From 5d81fc319c0a4ac220b1ac5dfd7cec27aa92cc02 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Nov 09 2021 09:56:18 +0000 Subject: import 389-ds-base-1.4.3.23-10.module+el8.5.0+12398+47000435 --- diff --git a/.389-ds-base.metadata b/.389-ds-base.metadata index 9c5f2b7..9ce4a90 100644 --- a/.389-ds-base.metadata +++ b/.389-ds-base.metadata @@ -1,2 +1,3 @@ -90cda7aea8d8644eea5a2af28c72350dd915db34 SOURCES/389-ds-base-1.4.3.16.tar.bz2 +c69c175a2f27053dffbfefac9c84ff16c7ff4cbf SOURCES/389-ds-base-1.4.3.23.tar.bz2 9e06b5cc57fd185379d007696da153893cf73e30 SOURCES/jemalloc-5.2.1.tar.bz2 +22b1ef11852864027e184bb4bee56286b855b703 SOURCES/vendor-1.4.3.23-2.tar.gz diff --git a/.gitignore b/.gitignore index 9745926..3e96486 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ -SOURCES/389-ds-base-1.4.3.16.tar.bz2 +SOURCES/389-ds-base-1.4.3.23.tar.bz2 SOURCES/jemalloc-5.2.1.tar.bz2 +SOURCES/vendor-1.4.3.23-2.tar.gz diff --git a/SOURCES/0001-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch b/SOURCES/0001-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch deleted file mode 100644 index 1b08b52..0000000 --- a/SOURCES/0001-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch +++ /dev/null @@ -1,159 +0,0 @@ -From 81dcaf1c37c2de24c46672df8d4f968c2fb40a6e Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Wed, 11 Nov 2020 08:59:18 -0500 -Subject: [PATCH 1/3] Issue 4383 - Do not normalize escaped spaces in a DN - -Bug Description: Adding an entry with an escaped leading space leads to many - problems. Mainly id2entry can get corrupted during an - import of such an entry, and the entryrdn index is not - updated correctly - -Fix Description: In slapi_dn_normalize_ext() leave an escaped space intact. - -Relates: https://github.com/389ds/389-ds-base/issues/4383 - -Reviewed by: firstyear, progier, and tbordaz (Thanks!!!) ---- - .../tests/suites/syntax/acceptance_test.py | 75 ++++++++++++++++++- - ldap/servers/slapd/dn.c | 8 +- - 2 files changed, 77 insertions(+), 6 deletions(-) - -diff --git a/dirsrvtests/tests/suites/syntax/acceptance_test.py b/dirsrvtests/tests/suites/syntax/acceptance_test.py -index 543718689..7939a99a7 100644 ---- a/dirsrvtests/tests/suites/syntax/acceptance_test.py -+++ b/dirsrvtests/tests/suites/syntax/acceptance_test.py -@@ -1,5 +1,5 @@ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2019 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). -@@ -7,13 +7,12 @@ - # --- END COPYRIGHT BLOCK --- - - import ldap --import logging - import pytest - import os - from lib389.schema import Schema - from lib389.config import Config - from lib389.idm.user import UserAccounts --from lib389.idm.group import Groups -+from lib389.idm.group import Group, Groups - from lib389._constants import DEFAULT_SUFFIX - from lib389.topologies import log, topology_st as topo - -@@ -127,7 +126,7 @@ def test_invalid_dn_syntax_crash(topo): - 4. Success - """ - -- # Create group -+ # Create group - groups = Groups(topo.standalone, DEFAULT_SUFFIX) - group = groups.create(properties={'cn': ' test'}) - -@@ -145,6 +144,74 @@ def test_invalid_dn_syntax_crash(topo): - groups.list() - - -+@pytest.mark.parametrize("props, rawdn", [ -+ ({'cn': ' leadingSpace'}, "cn=\\20leadingSpace,ou=Groups,dc=example,dc=com"), -+ ({'cn': 'trailingSpace '}, "cn=trailingSpace\\20,ou=Groups,dc=example,dc=com")]) -+def test_dn_syntax_spaces_delete(topo, props, rawdn): -+ """Test that an entry with a space as the first character in the DN can be -+ deleted without error. We also want to make sure the indexes are properly -+ updated by repeatedly adding and deleting the entry, and that the entry cache -+ is properly maintained. -+ -+ :id: b993f37c-c2b0-4312-992c-a9048ff98965 -+ :parametrized: yes -+ :setup: Standalone Instance -+ :steps: -+ 1. Create a group with a DN that has a space as the first/last -+ character. -+ 2. Delete group -+ 3. Add group -+ 4. Modify group -+ 5. Restart server and modify entry -+ 6. Delete group -+ 7. Add group back -+ 8. Delete group using specific DN -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ 5. Success -+ 6. Success -+ 7. Success -+ 8. Success -+ """ -+ -+ # Create group -+ groups = Groups(topo.standalone, DEFAULT_SUFFIX) -+ group = groups.create(properties=props.copy()) -+ -+ # Delete group (verifies DN/RDN parsing works and cache is correct) -+ group.delete() -+ -+ # Add group again (verifies entryrdn index was properly updated) -+ groups = Groups(topo.standalone, DEFAULT_SUFFIX) -+ group = groups.create(properties=props.copy()) -+ -+ # Modify the group (verifies dn/rdn parsing is correct) -+ group.replace('description', 'escaped space group') -+ -+ # Restart the server. This will pull the entry from the database and -+ # convert it into a cache entry, which is different than how a client -+ # first adds an entry and is put into the cache before being written to -+ # disk. -+ topo.standalone.restart() -+ -+ # Make sure we can modify the entry (verifies cache entry was created -+ # correctly) -+ group.replace('description', 'escaped space group after restart') -+ -+ # Make sure it can still be deleted (verifies cache again). -+ group.delete() -+ -+ # Add it back so we can delete it using a specific DN (sanity test to verify -+ # another DN/RDN parsing variation). -+ groups = Groups(topo.standalone, DEFAULT_SUFFIX) -+ group = groups.create(properties=props.copy()) -+ group = Group(topo.standalone, dn=rawdn) -+ group.delete() -+ -+ - if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode -diff --git a/ldap/servers/slapd/dn.c b/ldap/servers/slapd/dn.c -index 2af3f38fc..3980b897f 100644 ---- a/ldap/servers/slapd/dn.c -+++ b/ldap/servers/slapd/dn.c -@@ -894,8 +894,7 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len) - s++; - } - } -- } else if (s + 2 < ends && -- isxdigit(*(s + 1)) && isxdigit(*(s + 2))) { -+ } else if (s + 2 < ends && isxdigit(*(s + 1)) && isxdigit(*(s + 2))) { - /* esc hexpair ==> real character */ - int n = slapi_hexchar2int(*(s + 1)); - int n2 = slapi_hexchar2int(*(s + 2)); -@@ -903,6 +902,11 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len) - if (n == 0) { /* don't change \00 */ - *d++ = *++s; - *d++ = *++s; -+ } else if (n == 32) { /* leave \20 (space) intact */ -+ *d++ = *s; -+ *d++ = *++s; -+ *d++ = *++s; -+ s++; - } else { - *d++ = n; - s += 3; --- -2.26.2 - diff --git a/SOURCES/0001-Issue-4747-Remove-unstable-unstatus-tests-from-PRCI-.patch b/SOURCES/0001-Issue-4747-Remove-unstable-unstatus-tests-from-PRCI-.patch new file mode 100644 index 0000000..1400b43 --- /dev/null +++ b/SOURCES/0001-Issue-4747-Remove-unstable-unstatus-tests-from-PRCI-.patch @@ -0,0 +1,1370 @@ +From 5d730f7e9f1e857bc886556db0229607b8d536d2 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Thu, 6 May 2021 18:54:20 +0200 +Subject: [PATCH 01/12] Issue 4747 - Remove unstable/unstatus tests from PRCI + (#4748) + +Bug description: + Some tests (17) in the tests suite (dirsrvtest/tests/suites) + are failing although there is no regression. + It needs (long) investigations to status if failures + are due to a bug in the tests or in DS core. + Until those investigations are completes, test suites + loose a large part of its value to detect regression. + Indeed those failing tests may hide a real regression. + +Fix description: + Flag failing tests with pytest.mark.flaky(max_runs=2, min_passes=1) + Additional action will be to create upstream 17 ticket to + status on each failing tests + +relates: https://github.com/389ds/389-ds-base/issues/4747 + +Reviewed by: Simon Pichugin, Viktor Ashirov (many thanks for your +reviews and help) + +Platforms tested: F33 +--- + .github/workflows/pytest.yml | 84 +++++ + dirsrvtests/tests/suites/acl/keywords_test.py | 16 +- + .../tests/suites/clu/dsctl_acceptance_test.py | 56 --- + .../tests/suites/clu/repl_monitor_test.py | 2 + + .../dynamic_plugins/dynamic_plugins_test.py | 8 +- + .../suites/fourwaymmr/fourwaymmr_test.py | 3 +- + .../suites/healthcheck/health_config_test.py | 1 + + .../suites/healthcheck/health_sync_test.py | 2 + + .../tests/suites/import/import_test.py | 23 +- + .../tests/suites/indexes/regression_test.py | 63 ++++ + .../paged_results/paged_results_test.py | 3 +- + .../tests/suites/password/regression_test.py | 2 + + .../tests/suites/plugins/accpol_test.py | 20 +- + .../suites/plugins/managed_entry_test.py | 351 ++++++++++++++++++ + .../tests/suites/plugins/memberof_test.py | 3 +- + .../suites/replication/cleanallruv_test.py | 8 +- + .../suites/replication/encryption_cl5_test.py | 8 +- + .../tests/suites/retrocl/basic_test.py | 292 --------------- + 18 files changed, 576 insertions(+), 369 deletions(-) + create mode 100644 .github/workflows/pytest.yml + delete mode 100644 dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py + create mode 100644 dirsrvtests/tests/suites/plugins/managed_entry_test.py + delete mode 100644 dirsrvtests/tests/suites/retrocl/basic_test.py + +diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml +new file mode 100644 +index 000000000..015794d96 +--- /dev/null ++++ b/.github/workflows/pytest.yml +@@ -0,0 +1,84 @@ ++name: Test ++ ++on: [push, pull_request] ++ ++jobs: ++ build: ++ name: Build ++ runs-on: ubuntu-20.04 ++ container: ++ image: quay.io/389ds/ci-images:test ++ outputs: ++ matrix: ${{ steps.set-matrix.outputs.matrix }} ++ steps: ++ - name: Checkout ++ uses: actions/checkout@v2 ++ ++ - name: Get a list of all test suites ++ id: set-matrix ++ run: echo "::set-output name=matrix::$(python3 .github/scripts/generate_matrix.py)" ++ ++ - name: Build RPMs ++ run: cd $GITHUB_WORKSPACE && SKIP_AUDIT_CI=1 make -f rpm.mk dist-bz2 rpms ++ ++ - name: Tar build artifacts ++ run: tar -cvf dist.tar dist/ ++ ++ - name: Upload RPMs ++ uses: actions/upload-artifact@v2 ++ with: ++ name: rpms ++ path: dist.tar ++ ++ test: ++ name: Test ++ runs-on: ubuntu-20.04 ++ needs: build ++ strategy: ++ fail-fast: false ++ matrix: ${{ fromJson(needs.build.outputs.matrix) }} ++ ++ steps: ++ - name: Checkout ++ uses: actions/checkout@v2 ++ ++ - name: Install dependencies ++ run: | ++ sudo apt update -y ++ sudo apt install -y docker.io containerd runc ++ ++ sudo cp .github/daemon.json /etc/docker/daemon.json ++ ++ sudo systemctl unmask docker ++ sudo systemctl start docker ++ ++ - name: Download RPMs ++ uses: actions/download-artifact@master ++ with: ++ name: rpms ++ ++ - name: Extract RPMs ++ run: tar xvf dist.tar ++ ++ - name: Run pytest in a container ++ run: | ++ set -x ++ CID=$(sudo docker run -d -h server.example.com --privileged --rm -v /sys/fs/cgroup:/sys/fs/cgroup:rw,rslave -v ${PWD}:/workspace quay.io/389ds/ci-images:test) ++ sudo docker exec $CID sh -c "dnf install -y -v dist/rpms/*rpm" ++ sudo docker exec $CID py.test --suppress-no-test-exit-code -m "not flaky" --junit-xml=pytest.xml -v dirsrvtests/tests/suites/${{ matrix.suite }} ++ ++ - name: Make the results file readable by all ++ if: always() ++ run: ++ sudo chmod -f a+r pytest.xml ++ ++ - name: Sanitize filename ++ run: echo "PYTEST_SUITE=$(echo ${{ matrix.suite }} | sed -e 's#\/#-#g')" >> $GITHUB_ENV ++ ++ - name: Upload pytest test results ++ if: always() ++ uses: actions/upload-artifact@v2 ++ with: ++ name: pytest-${{ env.PYTEST_SUITE }} ++ path: pytest.xml ++ +diff --git a/dirsrvtests/tests/suites/acl/keywords_test.py b/dirsrvtests/tests/suites/acl/keywords_test.py +index 0174152e3..c5e989f3b 100644 +--- a/dirsrvtests/tests/suites/acl/keywords_test.py ++++ b/dirsrvtests/tests/suites/acl/keywords_test.py +@@ -216,7 +216,8 @@ def test_user_binds_without_any_password_and_cannot_access_the_data(topo, add_us + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_user_can_access_the_data_when_connecting_from_any_machine( + topo, add_user, aci_of_user + ): +@@ -245,6 +246,8 @@ def test_user_can_access_the_data_when_connecting_from_any_machine( + OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") + + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_user_can_access_the_data_when_connecting_from_internal_ds_network_only( + topo, add_user, aci_of_user + ): +@@ -276,7 +279,8 @@ def test_user_can_access_the_data_when_connecting_from_internal_ds_network_only( + # Perform Operation + OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_user_can_access_the_data_when_connecting_from_some_network_only( + topo, add_user, aci_of_user + ): +@@ -306,7 +310,8 @@ def test_user_can_access_the_data_when_connecting_from_some_network_only( + # Perform Operation + OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_from_an_unauthorized_network(topo, add_user, aci_of_user): + """User cannot access the data when connecting from an unauthorized network as per the ACI. + +@@ -332,7 +337,8 @@ def test_from_an_unauthorized_network(topo, add_user, aci_of_user): + # Perform Operation + OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_user_cannot_access_the_data_when_connecting_from_an_unauthorized_network_2( + topo, add_user, aci_of_user): + """User cannot access the data when connecting from an unauthorized network as per the ACI. +@@ -418,6 +424,8 @@ def test_dnsalias_keyword_test_nodns_cannot(topo, add_user, aci_of_user): + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + @pytest.mark.ds50378 + @pytest.mark.bz1710848 + @pytest.mark.parametrize("ip_addr", ['127.0.0.1', "[::1]"]) +diff --git a/dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py b/dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py +deleted file mode 100644 +index a0f89defd..000000000 +--- a/dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py ++++ /dev/null +@@ -1,56 +0,0 @@ +-# --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2021 Red Hat, Inc. +-# All rights reserved. +-# +-# License: GPL (version 3 or any later version). +-# See LICENSE for details. +-# --- END COPYRIGHT BLOCK --- +- +-import logging +-import pytest +-import os +-from lib389._constants import * +-from lib389.topologies import topology_st as topo +- +-log = logging.getLogger(__name__) +- +- +-def test_custom_path(topo): +- """Test that a custom path, backup directory, is correctly used by lib389 +- when the server is stopped. +- +- :id: 8659e209-ee83-477e-8183-1d2f555669ea +- :setup: Standalone Instance +- :steps: +- 1. Get the LDIF directory +- 2. Change the server's backup directory to the LDIF directory +- 3. Stop the server, and perform a backup +- 4. Backup was written to LDIF directory +- :expectedresults: +- 1. Success +- 2. Success +- 3. Success +- 4. Success +- """ +- +- # Get LDIF dir +- ldif_dir = topo.standalone.get_ldif_dir() +- +- # Set backup directory to LDIF directory +- topo.standalone.config.replace('nsslapd-bakdir', ldif_dir) +- +- # Stop the server and take a backup +- topo.standalone.stop() +- topo.standalone.db2bak(None) +- +- # Verify backup was written to LDIF directory +- backups = os.listdir(ldif_dir) +- assert len(backups) +- +- +-if __name__ == '__main__': +- # Run isolated +- # -s for DEBUG mode +- CURRENT_FILE = os.path.realpath(__file__) +- pytest.main(["-s", CURRENT_FILE]) +- +diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +index 9428edb26..3cf6343c8 100644 +--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py ++++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +@@ -90,6 +90,8 @@ def get_hostnames_from_log(port1, port2): + host_m2 = match.group(2) + return (host_m1, host_m2) + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + @pytest.mark.ds50545 + @pytest.mark.bz1739718 + @pytest.mark.skipif(ds_is_older("1.4.0"), reason="Not implemented") +diff --git a/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py b/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py +index b61daed74..7558cc03d 100644 +--- a/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py ++++ b/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py +@@ -68,7 +68,8 @@ def check_replicas(topology_m2): + + log.info('Data is consistent across the replicas.\n') + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_acceptance(topology_m2): + """Exercise each plugin and its main features, while + changing the configuration without restarting the server. +@@ -140,7 +141,8 @@ def test_acceptance(topology_m2): + ############################################################################ + check_replicas(topology_m2) + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_memory_corruption(topology_m2): + """Check the plugins for memory corruption issues while + dynamic plugins option is enabled +@@ -242,6 +244,8 @@ def test_memory_corruption(topology_m2): + ############################################################################ + check_replicas(topology_m2) + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + @pytest.mark.tier2 + def test_stress(topology_m2): + """Test plugins while under a big load. Perform the test 5 times +diff --git a/dirsrvtests/tests/suites/fourwaymmr/fourwaymmr_test.py b/dirsrvtests/tests/suites/fourwaymmr/fourwaymmr_test.py +index 5b0754a2e..c5a746ebb 100644 +--- a/dirsrvtests/tests/suites/fourwaymmr/fourwaymmr_test.py ++++ b/dirsrvtests/tests/suites/fourwaymmr/fourwaymmr_test.py +@@ -144,7 +144,8 @@ def test_delete_a_few_entries_in_m4(topo_m4, _cleanupentris): + topo_m4.ms["supplier4"], topo_m4.ms["supplier3"], 30 + ) + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_replicated_multivalued_entries(topo_m4): + """ + Replicated multivalued entries are ordered the same way on all consumers +diff --git a/dirsrvtests/tests/suites/healthcheck/health_config_test.py b/dirsrvtests/tests/suites/healthcheck/health_config_test.py +index 3d102e859..f470c05c6 100644 +--- a/dirsrvtests/tests/suites/healthcheck/health_config_test.py ++++ b/dirsrvtests/tests/suites/healthcheck/health_config_test.py +@@ -337,6 +337,7 @@ def test_healthcheck_low_disk_space(topology_st): + os.remove(file) + + ++@pytest.mark.flaky(max_runs=2, min_passes=1) + @pytest.mark.ds50791 + @pytest.mark.bz1843567 + @pytest.mark.xfail(ds_is_older("1.4.3.8"), reason="Not implemented") +diff --git a/dirsrvtests/tests/suites/healthcheck/health_sync_test.py b/dirsrvtests/tests/suites/healthcheck/health_sync_test.py +index 75bbfd35c..74df1b322 100644 +--- a/dirsrvtests/tests/suites/healthcheck/health_sync_test.py ++++ b/dirsrvtests/tests/suites/healthcheck/health_sync_test.py +@@ -70,6 +70,8 @@ def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searc + @pytest.mark.ds50873 + @pytest.mark.bz1685160 + @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_healthcheck_replication_out_of_sync_not_broken(topology_m3): + """Check if HealthCheck returns DSREPLLE0003 code + +diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py +index defe447d5..119b097f1 100644 +--- a/dirsrvtests/tests/suites/import/import_test.py ++++ b/dirsrvtests/tests/suites/import/import_test.py +@@ -14,6 +14,7 @@ import os + import pytest + import time + import glob ++import logging + from lib389.topologies import topology_st as topo + from lib389._constants import DEFAULT_SUFFIX, TaskWarning + from lib389.dbgen import dbgen_users +@@ -28,6 +29,12 @@ from lib389.idm.account import Accounts + + pytestmark = pytest.mark.tier1 + ++DEBUGGING = os.getenv("DEBUGGING", default=False) ++if DEBUGGING: ++ logging.getLogger(__name__).setLevel(logging.DEBUG) ++else: ++ logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) + + def _generate_ldif(topo, no_no): + """ +@@ -349,7 +356,8 @@ def _toggle_private_import_mem(request, topo): + ('nsslapd-db-private-import-mem', 'off')) + request.addfinalizer(finofaci) + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_fast_slow_import(topo, _toggle_private_import_mem, _import_clean): + """With nsslapd-db-private-import-mem: on is faster import. + +@@ -381,16 +389,19 @@ def test_fast_slow_import(topo, _toggle_private_import_mem, _import_clean): + # Let's set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: 0 + config = LDBMConfig(topo.standalone) + # Measure offline import time duration total_time1 +- total_time1 = _import_offline(topo, 20) ++ total_time1 = _import_offline(topo, 1000) + # Now nsslapd-db-private-import-mem:off + config.replace('nsslapd-db-private-import-mem', 'off') + accounts = Accounts(topo.standalone, DEFAULT_SUFFIX) + for i in accounts.filter('(uid=*)'): + UserAccount(topo.standalone, i.dn).delete() + # Measure offline import time duration total_time2 +- total_time2 = _import_offline(topo, 20) ++ total_time2 = _import_offline(topo, 1000) + # total_time1 < total_time2 ++ log.info("total_time1 = %f" % total_time1) ++ log.info("total_time2 = %f" % total_time2) + assert total_time1 < total_time2 ++ + # Set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: -1 + config.replace_many( + ('nsslapd-db-private-import-mem', 'on'), +@@ -398,14 +409,16 @@ def test_fast_slow_import(topo, _toggle_private_import_mem, _import_clean): + for i in accounts.filter('(uid=*)'): + UserAccount(topo.standalone, i.dn).delete() + # Measure offline import time duration total_time1 +- total_time1 = _import_offline(topo, 20) ++ total_time1 = _import_offline(topo, 1000) + # Now nsslapd-db-private-import-mem:off + config.replace('nsslapd-db-private-import-mem', 'off') + for i in accounts.filter('(uid=*)'): + UserAccount(topo.standalone, i.dn).delete() + # Measure offline import time duration total_time2 +- total_time2 = _import_offline(topo, 20) ++ total_time2 = _import_offline(topo, 1000) + # total_time1 < total_time2 ++ log.info("toral_time1 = %f" % total_time1) ++ log.info("total_time2 = %f" % total_time2) + assert total_time1 < total_time2 + + +diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py +index 1a71f16e9..ed0c8885f 100644 +--- a/dirsrvtests/tests/suites/indexes/regression_test.py ++++ b/dirsrvtests/tests/suites/indexes/regression_test.py +@@ -19,6 +19,68 @@ from lib389.topologies import topology_st as topo + pytestmark = pytest.mark.tier1 + + ++@pytest.fixture(scope="function") ++def add_a_group_with_users(request, topo): ++ """ ++ Add a group and users, which are members of this group. ++ """ ++ groups = Groups(topo.standalone, DEFAULT_SUFFIX, rdn=None) ++ group = groups.create(properties={'cn': 'test_group'}) ++ users_list = [] ++ users_num = 100 ++ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) ++ for num in range(users_num): ++ USER_NAME = f'test_{num}' ++ user = users.create(properties={ ++ 'uid': USER_NAME, ++ 'sn': USER_NAME, ++ 'cn': USER_NAME, ++ 'uidNumber': f'{num}', ++ 'gidNumber': f'{num}', ++ 'homeDirectory': f'/home/{USER_NAME}' ++ }) ++ users_list.append(user) ++ group.add_member(user.dn) ++ ++ def fin(): ++ """ ++ Removes group and users. ++ """ ++ # If the server crashed, start it again to do the cleanup ++ if not topo.standalone.status(): ++ topo.standalone.start() ++ for user in users_list: ++ user.delete() ++ group.delete() ++ ++ request.addfinalizer(fin) ++ ++ ++@pytest.fixture(scope="function") ++def set_small_idlistscanlimit(request, topo): ++ """ ++ Set nsslapd-idlistscanlimit to a smaller value to accelerate the reproducer ++ """ ++ db_cfg = DatabaseConfig(topo.standalone) ++ old_idlistscanlimit = db_cfg.get_attr_vals_utf8('nsslapd-idlistscanlimit') ++ db_cfg.set([('nsslapd-idlistscanlimit', '100')]) ++ topo.standalone.restart() ++ ++ def fin(): ++ """ ++ Set nsslapd-idlistscanlimit back to the default value ++ """ ++ # If the server crashed, start it again to do the cleanup ++ if not topo.standalone.status(): ++ topo.standalone.start() ++ db_cfg.set([('nsslapd-idlistscanlimit', old_idlistscanlimit)]) ++ topo.standalone.restart() ++ ++ request.addfinalizer(fin) ++ ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) ++@pytest.mark.skipif(ds_is_older("1.4.4.4"), reason="Not implemented") + def test_reindex_task_creates_abandoned_index_file(topo): + """ + Recreating an index for the same attribute but changing +@@ -123,3 +185,4 @@ if __name__ == "__main__": + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) ++ +diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py +index 9fdceb165..0b45b7d96 100644 +--- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py ++++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py +@@ -506,7 +506,8 @@ def test_search_with_timelimit(topology_st, create_user): + finally: + del_users(users_list) + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + @pytest.mark.parametrize('aci_subject', + ('dns = "{}"'.format(HOSTNAME), + 'ip = "{}"'.format(IP_ADDRESS))) +diff --git a/dirsrvtests/tests/suites/password/regression_test.py b/dirsrvtests/tests/suites/password/regression_test.py +index 251834421..8f1facb6d 100644 +--- a/dirsrvtests/tests/suites/password/regression_test.py ++++ b/dirsrvtests/tests/suites/password/regression_test.py +@@ -215,6 +215,8 @@ def test_global_vs_local(topo, passw_policy, create_user, user_pasw): + # reset password + create_user.set('userPassword', PASSWORD) + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + @pytest.mark.ds49789 + def test_unhashed_pw_switch(topo_supplier): + """Check that nsslapd-unhashed-pw-switch works corrently +diff --git a/dirsrvtests/tests/suites/plugins/accpol_test.py b/dirsrvtests/tests/suites/plugins/accpol_test.py +index 73e2e54d1..77975c747 100644 +--- a/dirsrvtests/tests/suites/plugins/accpol_test.py ++++ b/dirsrvtests/tests/suites/plugins/accpol_test.py +@@ -520,7 +520,8 @@ def test_glinact_limit(topology_st, accpol_global): + modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '12') + del_users(topology_st, suffix, subtree, userid, nousrs) + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_glnologin_attr(topology_st, accpol_global): + """Verify if user account is inactivated based on createTimeStamp attribute, no lastLoginTime attribute present + +@@ -610,7 +611,8 @@ def test_glnologin_attr(topology_st, accpol_global): + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_glnoalt_stattr(topology_st, accpol_global): + """Verify if user account can be inactivated based on lastLoginTime attribute, altstateattrname set to 1.1 + +@@ -656,6 +658,8 @@ def test_glnoalt_stattr(topology_st, accpol_global): + del_users(topology_st, suffix, subtree, userid, nousrs) + + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_glattr_modtime(topology_st, accpol_global): + """Verify if user account can be inactivated based on modifyTimeStamp attribute + +@@ -705,6 +709,8 @@ def test_glattr_modtime(topology_st, accpol_global): + del_users(topology_st, suffix, subtree, userid, nousrs) + + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_glnoalt_nologin(topology_st, accpol_global): + """Verify if account policy plugin works if we set altstateattrname set to 1.1 and alwaysrecordlogin to NO + +@@ -763,6 +769,8 @@ def test_glnoalt_nologin(topology_st, accpol_global): + del_users(topology_st, suffix, subtree, userid, nousrs) + + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_glinact_nsact(topology_st, accpol_global): + """Verify if user account can be activated using ns-activate.pl script. + +@@ -812,6 +820,8 @@ def test_glinact_nsact(topology_st, accpol_global): + del_users(topology_st, suffix, subtree, userid, nousrs) + + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_glinact_acclock(topology_st, accpol_global): + """Verify if user account is activated when account is unlocked by passwordlockoutduration. + +@@ -868,6 +878,8 @@ def test_glinact_acclock(topology_st, accpol_global): + del_users(topology_st, suffix, subtree, userid, nousrs) + + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_glnact_pwexp(topology_st, accpol_global): + """Verify if user account is activated when password is reset after password is expired + +@@ -951,6 +963,8 @@ def test_glnact_pwexp(topology_st, accpol_global): + del_users(topology_st, suffix, subtree, userid, nousrs) + + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_locact_inact(topology_st, accpol_local): + """Verify if user account is inactivated when accountInactivityLimit is exceeded. + +@@ -995,6 +1009,8 @@ def test_locact_inact(topology_st, accpol_local): + del_users(topology_st, suffix, subtree, userid, nousrs) + + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_locinact_modrdn(topology_st, accpol_local): + """Verify if user account is inactivated when moved from ou=groups to ou=people subtree. + +diff --git a/dirsrvtests/tests/suites/plugins/managed_entry_test.py b/dirsrvtests/tests/suites/plugins/managed_entry_test.py +new file mode 100644 +index 000000000..662044ccd +--- /dev/null ++++ b/dirsrvtests/tests/suites/plugins/managed_entry_test.py +@@ -0,0 +1,351 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2020 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# ++import pytest ++import time ++from lib389.topologies import topology_st as topo ++from lib389.idm.user import UserAccount, UserAccounts ++from lib389.idm.account import Account, Accounts ++from lib389._constants import DEFAULT_SUFFIX ++from lib389.idm.group import Groups ++from lib389.config import Config ++from lib389.idm.organizationalunit import OrganizationalUnits, OrganizationalUnit ++from lib389.plugins import MEPTemplates, MEPConfigs, ManagedEntriesPlugin, MEPTemplate ++from lib389.idm.nscontainer import nsContainers ++from lib389.idm.domain import Domain ++from lib389.tasks import Entry ++import ldap ++ ++pytestmark = pytest.mark.tier1 ++USER_PASSWORD = 'password' ++ ++ ++@pytest.fixture(scope="module") ++def _create_inital(topo): ++ """ ++ Will create entries for this module ++ """ ++ meps = MEPTemplates(topo.standalone, DEFAULT_SUFFIX) ++ mep_template1 = meps.create( ++ properties={'cn': 'UPG Template', 'mepRDNAttr': 'cn', 'mepStaticAttr': 'objectclass: posixGroup', ++ 'mepMappedAttr': 'cn: $uid|gidNumber: $gidNumber|description: User private group for $uid'.split( ++ '|')}) ++ conf_mep = MEPConfigs(topo.standalone) ++ conf_mep.create(properties={'cn': 'UPG Definition1', 'originScope': f'cn=Users,{DEFAULT_SUFFIX}', ++ 'originFilter': 'objectclass=posixaccount', ++ 'managedBase': f'cn=Groups,{DEFAULT_SUFFIX}', ++ 'managedTemplate': mep_template1.dn}) ++ container = nsContainers(topo.standalone, DEFAULT_SUFFIX) ++ for cn in ['Users', 'Groups']: ++ container.create(properties={'cn': cn}) ++ ++ ++def test_binddn_tracking(topo, _create_inital): ++ """Test Managed Entries basic functionality ++ ++ :id: ea2ddfd4-aaec-11ea-8416-8c16451d917b ++ :setup: Standalone Instance ++ :steps: ++ 1. Set nsslapd-plugin-binddn-tracking attribute under cn=config ++ 2. Add user ++ 3. Managed Entry Plugin runs against managed entries upon any update without validating ++ 4. verify creation of User Private Group with its time stamp value ++ 5. Modify the SN attribute which is not mapped with managed entry ++ 6. run ModRDN operation and check the User Private group ++ 7. Check the time stamp of UPG should be changed now ++ 8. Check the creatorsname should be user dn and internalCreatorsname should be plugin name ++ 9. Check if a managed group entry was created ++ :expected results: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ 7. Success ++ 8. Success ++ 9. Success ++ """ ++ config = Config(topo.standalone) ++ # set nsslapd-plugin-binddn-tracking attribute under cn=config ++ config.replace('nsslapd-plugin-binddn-tracking', 'on') ++ # Add user ++ user = UserAccounts(topo.standalone, f'cn=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() ++ assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=test_user_1000,cn=Groups,{DEFAULT_SUFFIX}' ++ entry = Account(topo.standalone, f'cn=test_user_1000,cn=Groups,{DEFAULT_SUFFIX}') ++ # Managed Entry Plugin runs against managed entries upon any update without validating ++ # verify creation of User Private Group with its time stamp value ++ stamp1 = entry.get_attr_val_utf8('modifyTimestamp') ++ user.replace('sn', 'NewSN_modified') ++ stamp2 = entry.get_attr_val_utf8('modifyTimestamp') ++ # Modify the SN attribute which is not mapped with managed entry ++ # Check the time stamp of UPG should not be changed ++ assert stamp1 == stamp2 ++ time.sleep(1) ++ # run ModRDN operation and check the User Private group ++ user.rename(new_rdn='uid=UserNewRDN', newsuperior='cn=Users,dc=example,dc=com') ++ assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=UserNewRDN,cn=Groups,{DEFAULT_SUFFIX}' ++ entry = Account(topo.standalone, f'cn=UserNewRDN,cn=Groups,{DEFAULT_SUFFIX}') ++ stamp3 = entry.get_attr_val_utf8('modifyTimestamp') ++ # Check the time stamp of UPG should be changed now ++ assert stamp2 != stamp3 ++ time.sleep(1) ++ user.replace('gidNumber', '1') ++ stamp4 = entry.get_attr_val_utf8('modifyTimestamp') ++ assert stamp4 != stamp3 ++ # Check the creatorsname should be user dn and internalCreatorsname should be plugin name ++ assert entry.get_attr_val_utf8('creatorsname') == 'cn=directory manager' ++ assert entry.get_attr_val_utf8('internalCreatorsname') == 'cn=Managed Entries,cn=plugins,cn=config' ++ assert entry.get_attr_val_utf8('modifiersname') == 'cn=directory manager' ++ user.delete() ++ config.replace('nsslapd-plugin-binddn-tracking', 'off') ++ ++ ++class WithObjectClass(Account): ++ def __init__(self, instance, dn=None): ++ super(WithObjectClass, self).__init__(instance, dn) ++ self._rdn_attribute = 'uid' ++ self._create_objectclasses = ['top', 'person', 'inetorgperson'] ++ ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) ++def test_mentry01(topo, _create_inital): ++ """Test Managed Entries basic functionality ++ ++ :id: 9b87493b-0493-46f9-8364-6099d0e5d806 ++ :setup: Standalone Instance ++ :steps: ++ 1. Check the plug-in status ++ 2. Add Template and definition entry ++ 3. Add our org units ++ 4. Add users with PosixAccount ObjectClass and verify creation of User Private Group ++ 5. Disable the plug-in and check the status ++ 6. Enable the plug-in and check the status the plug-in is disabled and creation of UPG should fail ++ 7. Add users with PosixAccount ObjectClass and verify creation of User Private Group ++ 8. Add users, run ModRDN operation and check the User Private group ++ 9. Add users, run LDAPMODIFY to change the gidNumber and check the User Private group ++ 10. Checking whether creation of User Private group fails for existing group entry ++ 11. Checking whether adding of posixAccount objectClass to existing user creates UPG ++ 12. Running ModRDN operation and checking the user private groups mepManagedBy attribute ++ 13. Deleting mepManagedBy attribute and running ModRDN operation to check if it creates a new UPG ++ 14. Change the RDN of template entry, DSA Unwilling to perform error expected ++ 15. Change the RDN of cn=Users to cn=TestUsers and check UPG are deleted ++ :expected results: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ 7. Success ++ 8. Success ++ 9. Success ++ 10. Success ++ 11. Success ++ 12. Success ++ 13. Success ++ 14. Fail(Unwilling to perform ) ++ 15. Success ++ """ ++ # Check the plug-in status ++ mana = ManagedEntriesPlugin(topo.standalone) ++ assert mana.status() ++ # Add Template and definition entry ++ org1 = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).create(properties={'ou': 'Users'}) ++ org2 = OrganizationalUnit(topo.standalone, f'ou=Groups,{DEFAULT_SUFFIX}') ++ meps = MEPTemplates(topo.standalone, DEFAULT_SUFFIX) ++ mep_template1 = meps.create(properties={ ++ 'cn': 'UPG Template1', ++ 'mepRDNAttr': 'cn', ++ 'mepStaticAttr': 'objectclass: posixGroup', ++ 'mepMappedAttr': 'cn: $uid|gidNumber: $gidNumber|description: User private group for $uid'.split('|')}) ++ conf_mep = MEPConfigs(topo.standalone) ++ mep_config = conf_mep.create(properties={ ++ 'cn': 'UPG Definition2', ++ 'originScope': org1.dn, ++ 'originFilter': 'objectclass=posixaccount', ++ 'managedBase': org2.dn, ++ 'managedTemplate': mep_template1.dn}) ++ # Add users with PosixAccount ObjectClass and verify creation of User Private Group ++ user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() ++ assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=test_user_1000,ou=Groups,{DEFAULT_SUFFIX}' ++ # Disable the plug-in and check the status ++ mana.disable() ++ user.delete() ++ topo.standalone.restart() ++ # Add users with PosixAccount ObjectClass when the plug-in is disabled and creation of UPG should fail ++ user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() ++ assert not user.get_attr_val_utf8('mepManagedEntry') ++ # Enable the plug-in and check the status ++ mana.enable() ++ user.delete() ++ topo.standalone.restart() ++ # Add users with PosixAccount ObjectClass and verify creation of User Private Group ++ user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() ++ assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=test_user_1000,ou=Groups,{DEFAULT_SUFFIX}' ++ # Add users, run ModRDN operation and check the User Private group ++ # Add users, run LDAPMODIFY to change the gidNumber and check the User Private group ++ user.rename(new_rdn='uid=UserNewRDN', newsuperior='ou=Users,dc=example,dc=com') ++ assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=UserNewRDN,ou=Groups,{DEFAULT_SUFFIX}' ++ user.replace('gidNumber', '20209') ++ entry = Account(topo.standalone, f'cn=UserNewRDN,ou=Groups,{DEFAULT_SUFFIX}') ++ assert entry.get_attr_val_utf8('gidNumber') == '20209' ++ user.replace_many(('sn', 'new_modified_sn'), ('gidNumber', '31309')) ++ assert entry.get_attr_val_utf8('gidNumber') == '31309' ++ user.delete() ++ # Checking whether creation of User Private group fails for existing group entry ++ grp = Groups(topo.standalone, f'ou=Groups,{DEFAULT_SUFFIX}', rdn=None).create(properties={'cn': 'MENTRY_14'}) ++ user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() ++ with pytest.raises(ldap.NO_SUCH_OBJECT): ++ entry.status() ++ user.delete() ++ # Checking whether adding of posixAccount objectClass to existing user creates UPG ++ # Add Users without posixAccount objectClass ++ users = WithObjectClass(topo.standalone, f'uid=test_test, ou=Users,{DEFAULT_SUFFIX}') ++ user_properties1 = {'uid': 'test_test', 'cn': 'test', 'sn': 'test', 'mail': 'sasa@sasa.com', 'telephoneNumber': '123'} ++ user = users.create(properties=user_properties1) ++ assert not user.get_attr_val_utf8('mepManagedEntry') ++ # Add posixAccount objectClass ++ user.replace_many(('objectclass', ['top', 'person', 'inetorgperson', 'posixAccount']), ++ ('homeDirectory', '/home/ok'), ++ ('uidNumber', '61603'), ('gidNumber', '61603')) ++ assert not user.get_attr_val_utf8('mepManagedEntry') ++ user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() ++ entry = Account(topo.standalone, 'cn=test_user_1000,ou=Groups,dc=example,dc=com') ++ # Add inetuser objectClass ++ user.replace_many( ++ ('objectclass', ['top', 'account', 'posixaccount', 'inetOrgPerson', ++ 'organizationalPerson', 'nsMemberOf', 'nsAccount', ++ 'person', 'mepOriginEntry', 'inetuser']), ++ ('memberOf', entry.dn)) ++ assert entry.status() ++ user.delete() ++ user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() ++ entry = Account(topo.standalone, 'cn=test_user_1000,ou=Groups,dc=example,dc=com') ++ # Add groupofNames objectClass ++ user.replace_many( ++ ('objectclass', ['top', 'account', 'posixaccount', 'inetOrgPerson', ++ 'organizationalPerson', 'nsMemberOf', 'nsAccount', ++ 'person', 'mepOriginEntry', 'groupofNames']), ++ ('memberOf', user.dn)) ++ assert entry.status() ++ # Running ModRDN operation and checking the user private groups mepManagedBy attribute ++ user.replace('mepManagedEntry', f'uid=CheckModRDN,ou=Users,{DEFAULT_SUFFIX}') ++ user.rename(new_rdn='uid=UserNewRDN', newsuperior='ou=Users,dc=example,dc=com') ++ assert user.get_attr_val_utf8('mepManagedEntry') == f'uid=CheckModRDN,ou=Users,{DEFAULT_SUFFIX}' ++ # Deleting mepManagedBy attribute and running ModRDN operation to check if it creates a new UPG ++ user.remove('mepManagedEntry', f'uid=CheckModRDN,ou=Users,{DEFAULT_SUFFIX}') ++ user.rename(new_rdn='uid=UserNewRDN1', newsuperior='ou=Users,dc=example,dc=com') ++ assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=UserNewRDN1,ou=Groups,{DEFAULT_SUFFIX}' ++ # Change the RDN of template entry, DSA Unwilling to perform error expected ++ mep = MEPTemplate(topo.standalone, f'cn=UPG Template,{DEFAULT_SUFFIX}') ++ with pytest.raises(ldap.UNWILLING_TO_PERFORM): ++ mep.rename(new_rdn='cn=UPG Template2', newsuperior='dc=example,dc=com') ++ # Change the RDN of cn=Users to cn=TestUsers and check UPG are deleted ++ before = user.get_attr_val_utf8('mepManagedEntry') ++ user.rename(new_rdn='uid=Anuj', newsuperior='ou=Users,dc=example,dc=com') ++ assert user.get_attr_val_utf8('mepManagedEntry') != before ++ ++ ++def test_managed_entry_removal(topo): ++ """Check that we can't remove managed entry manually ++ ++ :id: cf9c5be5-97ef-46fc-b199-8346acf4c296 ++ :setup: Standalone Instance ++ :steps: ++ 1. Enable the plugin ++ 2. Restart the instance ++ 3. Add our org units ++ 4. Set up config entry and template entry for the org units ++ 5. Add an entry that meets the MEP scope ++ 6. Check if a managed group entry was created ++ 7. Try to remove the entry while bound as Admin (non-DM) ++ 8. Remove the entry while bound as DM ++ 9. Check that the managing entry can be deleted too ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ 7. Should fail ++ 8. Success ++ 9. Success ++ """ ++ ++ inst = topo.standalone ++ ++ # Add ACI so we can test that non-DM user can't delete managed entry ++ domain = Domain(inst, DEFAULT_SUFFIX) ++ ACI_TARGET = f"(target = \"ldap:///{DEFAULT_SUFFIX}\")" ++ ACI_TARGETATTR = "(targetattr = *)" ++ ACI_ALLOW = "(version 3.0; acl \"Admin Access\"; allow (all) " ++ ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)" ++ ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT ++ domain.add('aci', ACI_BODY) ++ ++ # stop the plugin, and start it ++ plugin = ManagedEntriesPlugin(inst) ++ plugin.disable() ++ plugin.enable() ++ ++ # Add our org units ++ ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) ++ ou_people = ous.create(properties={'ou': 'managed_people'}) ++ ou_groups = ous.create(properties={'ou': 'managed_groups'}) ++ ++ mep_templates = MEPTemplates(inst, DEFAULT_SUFFIX) ++ mep_template1 = mep_templates.create(properties={ ++ 'cn': 'MEP template', ++ 'mepRDNAttr': 'cn', ++ 'mepStaticAttr': 'objectclass: groupOfNames|objectclass: extensibleObject'.split('|'), ++ 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') ++ }) ++ mep_configs = MEPConfigs(inst) ++ mep_configs.create(properties={'cn': 'config', ++ 'originScope': ou_people.dn, ++ 'originFilter': 'objectclass=posixAccount', ++ 'managedBase': ou_groups.dn, ++ 'managedTemplate': mep_template1.dn}) ++ inst.restart() ++ ++ # Add an entry that meets the MEP scope ++ test_users_m1 = UserAccounts(inst, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) ++ managing_entry = test_users_m1.create_test_user(1001) ++ managing_entry.reset_password(USER_PASSWORD) ++ user_bound_conn = managing_entry.bind(USER_PASSWORD) ++ ++ # Get the managed entry ++ managed_groups = Groups(inst, ou_groups.dn, rdn=None) ++ managed_entry = managed_groups.get(managing_entry.rdn) ++ ++ # Check that the managed entry was created ++ assert managed_entry.exists() ++ ++ # Try to remove the entry while bound as Admin (non-DM) ++ managed_groups_user_conn = Groups(user_bound_conn, ou_groups.dn, rdn=None) ++ managed_entry_user_conn = managed_groups_user_conn.get(managed_entry.rdn) ++ with pytest.raises(ldap.UNWILLING_TO_PERFORM): ++ managed_entry_user_conn.delete() ++ assert managed_entry_user_conn.exists() ++ ++ # Remove the entry while bound as DM ++ managed_entry.delete() ++ assert not managed_entry.exists() ++ ++ # Check that the managing entry can be deleted too ++ managing_entry.delete() ++ assert not managing_entry.exists() ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) +diff --git a/dirsrvtests/tests/suites/plugins/memberof_test.py b/dirsrvtests/tests/suites/plugins/memberof_test.py +index bc99eef7d..d3b32c856 100644 +--- a/dirsrvtests/tests/suites/plugins/memberof_test.py ++++ b/dirsrvtests/tests/suites/plugins/memberof_test.py +@@ -2655,7 +2655,8 @@ def test_complex_group_scenario_9(topology_st): + verify_post_025(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, + memofuser1, memofuser2, memofuser3, memofuser4) + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_memberof_auto_add_oc(topology_st): + """Test the auto add objectclass (OC) feature. The plugin should add a predefined + objectclass that will allow memberOf to be added to an entry. +diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py +index 5610e3c19..f0cd99cfc 100644 +--- a/dirsrvtests/tests/suites/replication/cleanallruv_test.py ++++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py +@@ -223,7 +223,7 @@ def test_clean(topology_m4, m4rid): + + log.info('test_clean PASSED, restoring supplier 4...') + +- ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_clean_restart(topology_m4, m4rid): + """Check that cleanallruv task works properly after a restart + +@@ -295,6 +295,7 @@ def test_clean_restart(topology_m4, m4rid): + log.info('test_clean_restart PASSED, restoring supplier 4...') + + ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_clean_force(topology_m4, m4rid): + """Check that multiple tasks with a 'force' option work properly + +@@ -353,6 +354,7 @@ def test_clean_force(topology_m4, m4rid): + log.info('test_clean_force PASSED, restoring supplier 4...') + + ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_abort(topology_m4, m4rid): + """Test the abort task basic functionality + +@@ -408,6 +410,7 @@ def test_abort(topology_m4, m4rid): + log.info('test_abort PASSED, restoring supplier 4...') + + ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_abort_restart(topology_m4, m4rid): + """Test the abort task can handle a restart, and then resume + +@@ -486,6 +489,7 @@ def test_abort_restart(topology_m4, m4rid): + log.info('test_abort_restart PASSED, restoring supplier 4...') + + ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_abort_certify(topology_m4, m4rid): + """Test the abort task with a replica-certify-all option + +@@ -555,6 +559,7 @@ def test_abort_certify(topology_m4, m4rid): + log.info('test_abort_certify PASSED, restoring supplier 4...') + + ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_stress_clean(topology_m4, m4rid): + """Put each server(m1 - m4) under a stress, and perform the entire clean process + +@@ -641,6 +646,7 @@ def test_stress_clean(topology_m4, m4rid): + ldbm_config.set('nsslapd-readonly', 'off') + + ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_multiple_tasks_with_force(topology_m4, m4rid): + """Check that multiple tasks with a 'force' option work properly + +diff --git a/dirsrvtests/tests/suites/replication/encryption_cl5_test.py b/dirsrvtests/tests/suites/replication/encryption_cl5_test.py +index 7ae7e1b13..b69863f53 100644 +--- a/dirsrvtests/tests/suites/replication/encryption_cl5_test.py ++++ b/dirsrvtests/tests/suites/replication/encryption_cl5_test.py +@@ -73,10 +73,10 @@ def _check_unhashed_userpw_encrypted(inst, change_type, user_dn, user_pw, is_enc + assert user_pw_attr in entry, 'Changelog entry does not contain clear text password' + assert count, 'Operation type and DN of the entry not matched in changelog' + +- +-@pytest.mark.parametrize("encryption", ["AES", "3DES"]) +-def test_algorithm_unhashed(topology_with_tls, encryption): +- """Check encryption algowithm AES and 3DES. ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) ++def test_algorithm_unhashed(topology_with_tls): ++ """Check encryption algorithm AES + And check unhashed#user#password attribute for encryption. + + :id: b7a37bf8-4b2e-4dbd-9891-70117d67558c +diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py +deleted file mode 100644 +index 112c73cb9..000000000 +--- a/dirsrvtests/tests/suites/retrocl/basic_test.py ++++ /dev/null +@@ -1,292 +0,0 @@ +-# --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2021 Red Hat, Inc. +-# All rights reserved. +-# +-# License: GPL (version 3 or any later version). +-# See LICENSE for details. +-# --- END COPYRIGHT BLOCK --- +- +-import logging +-import ldap +-import time +-import pytest +-from lib389.topologies import topology_st +-from lib389.plugins import RetroChangelogPlugin +-from lib389._constants import * +-from lib389.utils import * +-from lib389.tasks import * +-from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance +-from lib389.cli_base.dsrc import dsrc_arg_concat +-from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add +-from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts +- +-pytestmark = pytest.mark.tier1 +- +-USER1_DN = 'uid=user1,ou=people,'+ DEFAULT_SUFFIX +-USER2_DN = 'uid=user2,ou=people,'+ DEFAULT_SUFFIX +-USER_PW = 'password' +-ATTR_HOMEPHONE = 'homePhone' +-ATTR_CARLICENSE = 'carLicense' +- +-log = logging.getLogger(__name__) +- +-def test_retrocl_exclude_attr_add(topology_st): +- """ Test exclude attribute feature of the retrocl plugin for add operation +- +- :id: 3481650f-2070-45ef-9600-2500cfc51559 +- +- :setup: Standalone instance +- +- :steps: +- 1. Enable dynamic plugins +- 2. Confige retro changelog plugin +- 3. Add an entry +- 4. Ensure entry attrs are in the changelog +- 5. Exclude an attr +- 6. Add another entry +- 7. Ensure excluded attr is not in the changelog +- +- :expectedresults: +- 1. Success +- 2. Success +- 3. Success +- 4. Success +- 5. Success +- 6. Success +- 7. Success +- """ +- +- st = topology_st.standalone +- +- log.info('Enable dynamic plugins') +- try: +- st.config.set('nsslapd-dynamic-plugins', 'on') +- except ldap.LDAPError as e: +- ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc']) +- assert False +- +- log.info('Configure retrocl plugin') +- rcl = RetroChangelogPlugin(st) +- rcl.disable() +- rcl.enable() +- rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') +- +- log.info('Restarting instance') +- try: +- st.restart() +- except ldap.LDAPError as e: +- ldap.error('Failed to restart instance ' + e.args[0]['desc']) +- assert False +- +- users = UserAccounts(st, DEFAULT_SUFFIX) +- +- log.info('Adding user1') +- try: +- user1 = users.create(properties={ +- 'sn': '1', +- 'cn': 'user 1', +- 'uid': 'user1', +- 'uidNumber': '11', +- 'gidNumber': '111', +- 'givenname': 'user1', +- 'homePhone': '0861234567', +- 'carLicense': '131D16674', +- 'mail': 'user1@whereever.com', +- 'homeDirectory': '/home/user1', +- 'userpassword': USER_PW}) +- except ldap.ALREADY_EXISTS: +- pass +- except ldap.LDAPError as e: +- log.error("Failed to add user1") +- +- log.info('Verify homePhone and carLicense attrs are in the changelog changestring') +- try: +- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN) +- except ldap.LDAPError as e: +- log.fatal("Changelog search failed, error: " +str(e)) +- assert False +- assert len(cllist) > 0 +- if cllist[0].hasAttr('changes'): +- clstr = (cllist[0].getValue('changes')).decode() +- assert ATTR_HOMEPHONE in clstr +- assert ATTR_CARLICENSE in clstr +- +- log.info('Excluding attribute ' + ATTR_HOMEPHONE) +- args = FakeArgs() +- args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM] +- args.instance = 'standalone1' +- args.basedn = None +- args.binddn = None +- args.starttls = False +- args.pwdfile = None +- args.bindpw = None +- args.prompt = False +- args.exclude_attrs = ATTR_HOMEPHONE +- args.func = retrochangelog_add +- dsrc_inst = dsrc_arg_concat(args, None) +- inst = connect_instance(dsrc_inst, False, args) +- result = args.func(inst, None, log, args) +- disconnect_instance(inst) +- assert result is None +- +- log.info("5s delay for retrocl plugin to restart") +- time.sleep(5) +- +- log.info('Adding user2') +- try: +- user2 = users.create(properties={ +- 'sn': '2', +- 'cn': 'user 2', +- 'uid': 'user2', +- 'uidNumber': '22', +- 'gidNumber': '222', +- 'givenname': 'user2', +- 'homePhone': '0879088363', +- 'carLicense': '04WX11038', +- 'mail': 'user2@whereever.com', +- 'homeDirectory': '/home/user2', +- 'userpassword': USER_PW}) +- except ldap.ALREADY_EXISTS: +- pass +- except ldap.LDAPError as e: +- log.error("Failed to add user2") +- +- log.info('Verify homePhone attr is not in the changelog changestring') +- try: +- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER2_DN) +- assert len(cllist) > 0 +- if cllist[0].hasAttr('changes'): +- clstr = (cllist[0].getValue('changes')).decode() +- assert ATTR_HOMEPHONE not in clstr +- assert ATTR_CARLICENSE in clstr +- except ldap.LDAPError as e: +- log.fatal("Changelog search failed, error: " +str(e)) +- assert False +- +-def test_retrocl_exclude_attr_mod(topology_st): +- """ Test exclude attribute feature of the retrocl plugin for mod operation +- +- :id: f6bef689-685b-4f86-a98d-f7e6b1fcada3 +- +- :setup: Standalone instance +- +- :steps: +- 1. Enable dynamic plugins +- 2. Confige retro changelog plugin +- 3. Add user1 entry +- 4. Ensure entry attrs are in the changelog +- 5. Exclude an attr +- 6. Modify user1 entry +- 7. Ensure excluded attr is not in the changelog +- +- :expectedresults: +- 1. Success +- 2. Success +- 3. Success +- 4. Success +- 5. Success +- 6. Success +- 7. Success +- """ +- +- st = topology_st.standalone +- +- log.info('Enable dynamic plugins') +- try: +- st.config.set('nsslapd-dynamic-plugins', 'on') +- except ldap.LDAPError as e: +- ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc']) +- assert False +- +- log.info('Configure retrocl plugin') +- rcl = RetroChangelogPlugin(st) +- rcl.disable() +- rcl.enable() +- rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') +- +- log.info('Restarting instance') +- try: +- st.restart() +- except ldap.LDAPError as e: +- ldap.error('Failed to restart instance ' + e.args[0]['desc']) +- assert False +- +- users = UserAccounts(st, DEFAULT_SUFFIX) +- +- log.info('Adding user1') +- try: +- user1 = users.create(properties={ +- 'sn': '1', +- 'cn': 'user 1', +- 'uid': 'user1', +- 'uidNumber': '11', +- 'gidNumber': '111', +- 'givenname': 'user1', +- 'homePhone': '0861234567', +- 'carLicense': '131D16674', +- 'mail': 'user1@whereever.com', +- 'homeDirectory': '/home/user1', +- 'userpassword': USER_PW}) +- except ldap.ALREADY_EXISTS: +- pass +- except ldap.LDAPError as e: +- log.error("Failed to add user1") +- +- log.info('Verify homePhone and carLicense attrs are in the changelog changestring') +- try: +- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN) +- except ldap.LDAPError as e: +- log.fatal("Changelog search failed, error: " +str(e)) +- assert False +- assert len(cllist) > 0 +- if cllist[0].hasAttr('changes'): +- clstr = (cllist[0].getValue('changes')).decode() +- assert ATTR_HOMEPHONE in clstr +- assert ATTR_CARLICENSE in clstr +- +- log.info('Excluding attribute ' + ATTR_CARLICENSE) +- args = FakeArgs() +- args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM] +- args.instance = 'standalone1' +- args.basedn = None +- args.binddn = None +- args.starttls = False +- args.pwdfile = None +- args.bindpw = None +- args.prompt = False +- args.exclude_attrs = ATTR_CARLICENSE +- args.func = retrochangelog_add +- dsrc_inst = dsrc_arg_concat(args, None) +- inst = connect_instance(dsrc_inst, False, args) +- result = args.func(inst, None, log, args) +- disconnect_instance(inst) +- assert result is None +- +- log.info("5s delay for retrocl plugin to restart") +- time.sleep(5) +- +- log.info('Modify user1 carLicense attribute') +- try: +- st.modify_s(USER1_DN, [(ldap.MOD_REPLACE, ATTR_CARLICENSE, b"123WX321")]) +- except ldap.LDAPError as e: +- log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc']) +- assert False +- +- log.info('Verify carLicense attr is not in the changelog changestring') +- try: +- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN) +- assert len(cllist) > 0 +- # There will be 2 entries in the changelog for this user, we are only +- #interested in the second one, the modify operation. +- if cllist[1].hasAttr('changes'): +- clstr = (cllist[1].getValue('changes')).decode() +- assert ATTR_CARLICENSE not in clstr +- except ldap.LDAPError as e: +- log.fatal("Changelog search failed, error: " +str(e)) +- assert False +- +-if __name__ == '__main__': +- # Run isolated +- # -s for DEBUG mode +- CURRENT_FILE = os.path.realpath(__file__) +- pytest.main("-s %s" % CURRENT_FILE) +-- +2.26.3 + diff --git a/SOURCES/0002-Issue-4701-RFE-Exclude-attributes-from-retro-changel.patch b/SOURCES/0002-Issue-4701-RFE-Exclude-attributes-from-retro-changel.patch new file mode 100644 index 0000000..1b86463 --- /dev/null +++ b/SOURCES/0002-Issue-4701-RFE-Exclude-attributes-from-retro-changel.patch @@ -0,0 +1,322 @@ +From 1e1c2b23c35282481628af7e971ac683da334502 Mon Sep 17 00:00:00 2001 +From: James Chapman +Date: Tue, 27 Apr 2021 17:00:15 +0100 +Subject: [PATCH 02/12] Issue 4701 - RFE - Exclude attributes from retro + changelog (#4723) + +Description: When the retro changelog plugin is enabled it writes the + added/modified values to the "cn-changelog" suffix. In + some cases an entries attribute values can be of a + sensitive nature and should be excluded. This RFE adds + functionality that will allow an admin exclude certain + attributes from the retro changelog DB. + +Relates: https://github.com/389ds/389-ds-base/issues/4701 + +Reviewed by: mreynolds389, droideck (Thanks folks) +--- + .../tests/suites/retrocl/basic_test.py | 292 ++++++++++++++++++ + 1 file changed, 292 insertions(+) + create mode 100644 dirsrvtests/tests/suites/retrocl/basic_test.py + +diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py +new file mode 100644 +index 000000000..112c73cb9 +--- /dev/null ++++ b/dirsrvtests/tests/suites/retrocl/basic_test.py +@@ -0,0 +1,292 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2021 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++ ++import logging ++import ldap ++import time ++import pytest ++from lib389.topologies import topology_st ++from lib389.plugins import RetroChangelogPlugin ++from lib389._constants import * ++from lib389.utils import * ++from lib389.tasks import * ++from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance ++from lib389.cli_base.dsrc import dsrc_arg_concat ++from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add ++from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts ++ ++pytestmark = pytest.mark.tier1 ++ ++USER1_DN = 'uid=user1,ou=people,'+ DEFAULT_SUFFIX ++USER2_DN = 'uid=user2,ou=people,'+ DEFAULT_SUFFIX ++USER_PW = 'password' ++ATTR_HOMEPHONE = 'homePhone' ++ATTR_CARLICENSE = 'carLicense' ++ ++log = logging.getLogger(__name__) ++ ++def test_retrocl_exclude_attr_add(topology_st): ++ """ Test exclude attribute feature of the retrocl plugin for add operation ++ ++ :id: 3481650f-2070-45ef-9600-2500cfc51559 ++ ++ :setup: Standalone instance ++ ++ :steps: ++ 1. Enable dynamic plugins ++ 2. Confige retro changelog plugin ++ 3. Add an entry ++ 4. Ensure entry attrs are in the changelog ++ 5. Exclude an attr ++ 6. Add another entry ++ 7. Ensure excluded attr is not in the changelog ++ ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ 7. Success ++ """ ++ ++ st = topology_st.standalone ++ ++ log.info('Enable dynamic plugins') ++ try: ++ st.config.set('nsslapd-dynamic-plugins', 'on') ++ except ldap.LDAPError as e: ++ ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc']) ++ assert False ++ ++ log.info('Configure retrocl plugin') ++ rcl = RetroChangelogPlugin(st) ++ rcl.disable() ++ rcl.enable() ++ rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') ++ ++ log.info('Restarting instance') ++ try: ++ st.restart() ++ except ldap.LDAPError as e: ++ ldap.error('Failed to restart instance ' + e.args[0]['desc']) ++ assert False ++ ++ users = UserAccounts(st, DEFAULT_SUFFIX) ++ ++ log.info('Adding user1') ++ try: ++ user1 = users.create(properties={ ++ 'sn': '1', ++ 'cn': 'user 1', ++ 'uid': 'user1', ++ 'uidNumber': '11', ++ 'gidNumber': '111', ++ 'givenname': 'user1', ++ 'homePhone': '0861234567', ++ 'carLicense': '131D16674', ++ 'mail': 'user1@whereever.com', ++ 'homeDirectory': '/home/user1', ++ 'userpassword': USER_PW}) ++ except ldap.ALREADY_EXISTS: ++ pass ++ except ldap.LDAPError as e: ++ log.error("Failed to add user1") ++ ++ log.info('Verify homePhone and carLicense attrs are in the changelog changestring') ++ try: ++ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN) ++ except ldap.LDAPError as e: ++ log.fatal("Changelog search failed, error: " +str(e)) ++ assert False ++ assert len(cllist) > 0 ++ if cllist[0].hasAttr('changes'): ++ clstr = (cllist[0].getValue('changes')).decode() ++ assert ATTR_HOMEPHONE in clstr ++ assert ATTR_CARLICENSE in clstr ++ ++ log.info('Excluding attribute ' + ATTR_HOMEPHONE) ++ args = FakeArgs() ++ args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM] ++ args.instance = 'standalone1' ++ args.basedn = None ++ args.binddn = None ++ args.starttls = False ++ args.pwdfile = None ++ args.bindpw = None ++ args.prompt = False ++ args.exclude_attrs = ATTR_HOMEPHONE ++ args.func = retrochangelog_add ++ dsrc_inst = dsrc_arg_concat(args, None) ++ inst = connect_instance(dsrc_inst, False, args) ++ result = args.func(inst, None, log, args) ++ disconnect_instance(inst) ++ assert result is None ++ ++ log.info("5s delay for retrocl plugin to restart") ++ time.sleep(5) ++ ++ log.info('Adding user2') ++ try: ++ user2 = users.create(properties={ ++ 'sn': '2', ++ 'cn': 'user 2', ++ 'uid': 'user2', ++ 'uidNumber': '22', ++ 'gidNumber': '222', ++ 'givenname': 'user2', ++ 'homePhone': '0879088363', ++ 'carLicense': '04WX11038', ++ 'mail': 'user2@whereever.com', ++ 'homeDirectory': '/home/user2', ++ 'userpassword': USER_PW}) ++ except ldap.ALREADY_EXISTS: ++ pass ++ except ldap.LDAPError as e: ++ log.error("Failed to add user2") ++ ++ log.info('Verify homePhone attr is not in the changelog changestring') ++ try: ++ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER2_DN) ++ assert len(cllist) > 0 ++ if cllist[0].hasAttr('changes'): ++ clstr = (cllist[0].getValue('changes')).decode() ++ assert ATTR_HOMEPHONE not in clstr ++ assert ATTR_CARLICENSE in clstr ++ except ldap.LDAPError as e: ++ log.fatal("Changelog search failed, error: " +str(e)) ++ assert False ++ ++def test_retrocl_exclude_attr_mod(topology_st): ++ """ Test exclude attribute feature of the retrocl plugin for mod operation ++ ++ :id: f6bef689-685b-4f86-a98d-f7e6b1fcada3 ++ ++ :setup: Standalone instance ++ ++ :steps: ++ 1. Enable dynamic plugins ++ 2. Confige retro changelog plugin ++ 3. Add user1 entry ++ 4. Ensure entry attrs are in the changelog ++ 5. Exclude an attr ++ 6. Modify user1 entry ++ 7. Ensure excluded attr is not in the changelog ++ ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ 7. Success ++ """ ++ ++ st = topology_st.standalone ++ ++ log.info('Enable dynamic plugins') ++ try: ++ st.config.set('nsslapd-dynamic-plugins', 'on') ++ except ldap.LDAPError as e: ++ ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc']) ++ assert False ++ ++ log.info('Configure retrocl plugin') ++ rcl = RetroChangelogPlugin(st) ++ rcl.disable() ++ rcl.enable() ++ rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') ++ ++ log.info('Restarting instance') ++ try: ++ st.restart() ++ except ldap.LDAPError as e: ++ ldap.error('Failed to restart instance ' + e.args[0]['desc']) ++ assert False ++ ++ users = UserAccounts(st, DEFAULT_SUFFIX) ++ ++ log.info('Adding user1') ++ try: ++ user1 = users.create(properties={ ++ 'sn': '1', ++ 'cn': 'user 1', ++ 'uid': 'user1', ++ 'uidNumber': '11', ++ 'gidNumber': '111', ++ 'givenname': 'user1', ++ 'homePhone': '0861234567', ++ 'carLicense': '131D16674', ++ 'mail': 'user1@whereever.com', ++ 'homeDirectory': '/home/user1', ++ 'userpassword': USER_PW}) ++ except ldap.ALREADY_EXISTS: ++ pass ++ except ldap.LDAPError as e: ++ log.error("Failed to add user1") ++ ++ log.info('Verify homePhone and carLicense attrs are in the changelog changestring') ++ try: ++ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN) ++ except ldap.LDAPError as e: ++ log.fatal("Changelog search failed, error: " +str(e)) ++ assert False ++ assert len(cllist) > 0 ++ if cllist[0].hasAttr('changes'): ++ clstr = (cllist[0].getValue('changes')).decode() ++ assert ATTR_HOMEPHONE in clstr ++ assert ATTR_CARLICENSE in clstr ++ ++ log.info('Excluding attribute ' + ATTR_CARLICENSE) ++ args = FakeArgs() ++ args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM] ++ args.instance = 'standalone1' ++ args.basedn = None ++ args.binddn = None ++ args.starttls = False ++ args.pwdfile = None ++ args.bindpw = None ++ args.prompt = False ++ args.exclude_attrs = ATTR_CARLICENSE ++ args.func = retrochangelog_add ++ dsrc_inst = dsrc_arg_concat(args, None) ++ inst = connect_instance(dsrc_inst, False, args) ++ result = args.func(inst, None, log, args) ++ disconnect_instance(inst) ++ assert result is None ++ ++ log.info("5s delay for retrocl plugin to restart") ++ time.sleep(5) ++ ++ log.info('Modify user1 carLicense attribute') ++ try: ++ st.modify_s(USER1_DN, [(ldap.MOD_REPLACE, ATTR_CARLICENSE, b"123WX321")]) ++ except ldap.LDAPError as e: ++ log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc']) ++ assert False ++ ++ log.info('Verify carLicense attr is not in the changelog changestring') ++ try: ++ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN) ++ assert len(cllist) > 0 ++ # There will be 2 entries in the changelog for this user, we are only ++ #interested in the second one, the modify operation. ++ if cllist[1].hasAttr('changes'): ++ clstr = (cllist[1].getValue('changes')).decode() ++ assert ATTR_CARLICENSE not in clstr ++ except ldap.LDAPError as e: ++ log.fatal("Changelog search failed, error: " +str(e)) ++ assert False ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) +-- +2.26.3 + diff --git a/SOURCES/0002-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch b/SOURCES/0002-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch deleted file mode 100644 index e82fdf8..0000000 --- a/SOURCES/0002-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch +++ /dev/null @@ -1,232 +0,0 @@ -From 29c9e1c3c760f0941b022d45d14c248e9ceb9738 Mon Sep 17 00:00:00 2001 -From: progier389 <72748589+progier389@users.noreply.github.com> -Date: Tue, 3 Nov 2020 12:18:50 +0100 -Subject: [PATCH 2/3] ticket 2058: Add keep alive entry after on-line - initialization - second version (#4399) - -Bug description: -Keep alive entry is not created on target master after on line initialization, -and its RUVelement stays empty until a direct update is issued on that master - -Fix description: -The patch allows a consumer (configured as a master) to create (if it did not -exist before) the consumer's keep alive entry. It creates it at the end of a -replication session at a time we are sure the changelog exists and will not -be reset. It allows a consumer to have RUVelement with csn in the RUV at the -first incoming replication session. - -That is basically lkrispen's proposal with an associated pytest testcase - -Second version changes: - - moved the testcase to suites/replication/regression_test.py - - set up the topology from a 2 master topology then - reinitialized the replicas from an ldif without replication metadata - rather than using the cli. - - search for keepalive entries using search_s instead of getEntry - - add a comment about keep alive entries purpose - -last commit: - - wait that ruv are in sync before checking keep alive entries - -Reviewed by: droideck, Firstyear - -Platforms tested: F32 - -relates: #2058 ---- - .../suites/replication/regression_test.py | 130 ++++++++++++++++++ - .../plugins/replication/repl5_replica.c | 14 ++ - ldap/servers/plugins/replication/repl_extop.c | 4 + - 3 files changed, 148 insertions(+) - -diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py -index 844d762b9..14b9d6a44 100644 ---- a/dirsrvtests/tests/suites/replication/regression_test.py -+++ b/dirsrvtests/tests/suites/replication/regression_test.py -@@ -98,6 +98,30 @@ def _move_ruv(ldif_file): - for dn, entry in ldif_list: - ldif_writer.unparse(dn, entry) - -+def _remove_replication_data(ldif_file): -+ """ Remove the replication data from ldif file: -+ db2lif without -r includes some of the replica data like -+ - nsUniqueId -+ - keepalive entries -+ This function filters the ldif fil to remove these data -+ """ -+ -+ with open(ldif_file) as f: -+ parser = ldif.LDIFRecordList(f) -+ parser.parse() -+ -+ ldif_list = parser.all_records -+ # Iterate on a copy of the ldif entry list -+ for dn, entry in ldif_list[:]: -+ if dn.startswith('cn=repl keep alive'): -+ ldif_list.remove((dn,entry)) -+ else: -+ entry.pop('nsUniqueId') -+ with open(ldif_file, 'w') as f: -+ ldif_writer = ldif.LDIFWriter(f) -+ for dn, entry in ldif_list: -+ ldif_writer.unparse(dn, entry) -+ - - @pytest.fixture(scope="module") - def topo_with_sigkill(request): -@@ -897,6 +921,112 @@ def test_moving_entry_make_online_init_fail(topology_m2): - assert len(m1entries) == len(m2entries) - - -+def get_keepalive_entries(instance,replica): -+ # Returns the keep alive entries that exists with the suffix of the server instance -+ try: -+ entries = instance.search_s(replica.get_suffix(), ldap.SCOPE_ONELEVEL, -+ "(&(objectclass=ldapsubentry)(cn=repl keep alive*))", -+ ['cn', 'nsUniqueId', 'modifierTimestamp']) -+ except ldap.LDAPError as e: -+ log.fatal('Failed to retrieve keepalive entry (%s) on instance %s: error %s' % (dn, instance, str(e))) -+ assert False -+ # No error, so lets log the keepalive entries -+ if log.isEnabledFor(logging.DEBUG): -+ for ret in entries: -+ log.debug("Found keepalive entry:\n"+str(ret)); -+ return entries -+ -+def verify_keepalive_entries(topo, expected): -+ #Check that keep alive entries exists (or not exists) for every masters on every masters -+ #Note: The testing method is quite basic: counting that there is one keepalive entry per master. -+ # that is ok for simple test cases like test_online_init_should_create_keepalive_entries but -+ # not for the general case as keep alive associated with no more existing master may exists -+ # (for example after: db2ldif / demote a master / ldif2db / init other masters) -+ # ==> if the function is somehow pushed in lib389, a check better than simply counting the entries -+ # should be done. -+ for masterId in topo.ms: -+ master=topo.ms[masterId] -+ for replica in Replicas(master).list(): -+ if (replica.get_role() != ReplicaRole.MASTER): -+ continue -+ replica_info = f'master: {masterId} RID: {replica.get_rid()} suffix: {replica.get_suffix()}' -+ log.debug(f'Checking keepAliveEntries on {replica_info}') -+ keepaliveEntries = get_keepalive_entries(master, replica); -+ expectedCount = len(topo.ms) if expected else 0 -+ foundCount = len(keepaliveEntries) -+ if (foundCount == expectedCount): -+ log.debug(f'Found {foundCount} keepalive entries as expected on {replica_info}.') -+ else: -+ log.error(f'{foundCount} Keepalive entries are found ' -+ f'while {expectedCount} were expected on {replica_info}.') -+ assert False -+ -+ -+def test_online_init_should_create_keepalive_entries(topo_m2): -+ """Check that keep alive entries are created when initializinf a master from another one -+ -+ :id: d5940e71-d18a-4b71-aaf7-b9185361fffe -+ :setup: Two masters replication setup -+ :steps: -+ 1. Generate ldif without replication data -+ 2 Init both masters from that ldif -+ 3 Check that keep alive entries does not exists -+ 4 Perform on line init of master2 from master1 -+ 5 Check that keep alive entries exists -+ :expectedresults: -+ 1. No error while generating ldif -+ 2. No error while importing the ldif file -+ 3. No keepalive entrie should exists on any masters -+ 4. No error while initializing master2 -+ 5. All keepalive entries should exist on every masters -+ -+ """ -+ -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ m1 = topo_m2.ms["master1"] -+ m2 = topo_m2.ms["master2"] -+ # Step 1: Generate ldif without replication data -+ m1.stop() -+ m2.stop() -+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() -+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], -+ excludeSuffixes=None, repl_data=False, -+ outputfile=ldif_file, encrypt=False) -+ # Remove replication metadata that are still in the ldif -+ _remove_replication_data(ldif_file) -+ -+ # Step 2: Init both masters from that ldif -+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) -+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) -+ m1.start() -+ m2.start() -+ -+ """ Replica state is now as if CLI setup has been done using: -+ dsconf master1 replication enable --suffix "${SUFFIX}" --role master -+ dsconf master2 replication enable --suffix "${SUFFIX}" --role master -+ dsconf master1 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}" -+ dsconf master2 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}" -+ dsconf master1 repl-agmt create --suffix "${SUFFIX}" -+ dsconf master2 repl-agmt create --suffix "${SUFFIX}" -+ """ -+ -+ # Step 3: No keepalive entrie should exists on any masters -+ verify_keepalive_entries(topo_m2, False) -+ -+ # Step 4: Perform on line init of master2 from master1 -+ agmt = Agreements(m1).list()[0] -+ agmt.begin_reinit() -+ (done, error) = agmt.wait_reinit() -+ assert done is True -+ assert error is False -+ -+ # Step 5: All keepalive entries should exists on every masters -+ # Verify the keep alive entry once replication is in sync -+ # (that is the step that fails when bug is not fixed) -+ repl.wait_for_ruv(m2,m1) -+ verify_keepalive_entries(topo_m2, True); -+ -+ - if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode -diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c -index f01782330..f0ea0f8ef 100644 ---- a/ldap/servers/plugins/replication/repl5_replica.c -+++ b/ldap/servers/plugins/replication/repl5_replica.c -@@ -373,6 +373,20 @@ replica_destroy(void **arg) - slapi_ch_free((void **)arg); - } - -+/****************************************************************************** -+ ******************** REPLICATION KEEP ALIVE ENTRIES ************************** -+ ****************************************************************************** -+ * They are subentries of the replicated suffix and there is one per master. * -+ * These entries exist only to trigger a change that get replicated over the * -+ * topology. * -+ * Their main purpose is to generate records in the changelog and they are * -+ * updated from time to time by fractional replication to insure that at * -+ * least a change must be replicated by FR after a great number of not * -+ * replicated changes are found in the changelog. The interest is that the * -+ * fractional RUV get then updated so less changes need to be walked in the * -+ * changelog when searching for the first change to send * -+ ******************************************************************************/ -+ - #define KEEP_ALIVE_ATTR "keepalivetimestamp" - #define KEEP_ALIVE_ENTRY "repl keep alive" - #define KEEP_ALIVE_DN_FORMAT "cn=%s %d,%s" -diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c -index 14c8e0bcc..af486f730 100644 ---- a/ldap/servers/plugins/replication/repl_extop.c -+++ b/ldap/servers/plugins/replication/repl_extop.c -@@ -1173,6 +1173,10 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb) - */ - if (cl5GetState() == CL5_STATE_OPEN) { - replica_log_ruv_elements(r); -+ /* now that the changelog is open and started, we can alos cretae the -+ * keep alive entry without risk that db and cl will not match -+ */ -+ replica_subentry_check(replica_get_root(r), replica_get_rid(r)); - } - - /* ONREPL code that dealt with new RUV, etc was moved into the code --- -2.26.2 - diff --git a/SOURCES/0003-Ticket-137-Implement-EntryUUID-plugin.patch b/SOURCES/0003-Ticket-137-Implement-EntryUUID-plugin.patch new file mode 100644 index 0000000..67ccf0c --- /dev/null +++ b/SOURCES/0003-Ticket-137-Implement-EntryUUID-plugin.patch @@ -0,0 +1,5307 @@ +From eff14f0c884f3d2f541e3be6d9df86087177a76d Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Mon, 16 Mar 2020 14:59:56 +1000 +Subject: [PATCH 03/12] Ticket 137 - Implement EntryUUID plugin + +Bug Description: This implements EntryUUID - A plugin that generates +uuid's on attributes, which can be used by external applications to +associate an entry uniquely. + +Fix Description: This change is quite large as it contains multiple parts: + +* Schema for entryuuid. + ldap/schema/02common.ldif + ldap/schema/03entryuuid.ldif +* Documentation of the plugin design + src/README.md +* A rust plugin api. + src/slapi_r_plugin/Cargo.toml + src/slapi_r_plugin/README.md + src/slapi_r_plugin/build.rs + src/slapi_r_plugin/src/backend.rs + src/slapi_r_plugin/src/ber.rs + src/slapi_r_plugin/src/constants.rs + src/slapi_r_plugin/src/dn.rs + src/slapi_r_plugin/src/entry.rs + src/slapi_r_plugin/src/error.rs + src/slapi_r_plugin/src/init.c + src/slapi_r_plugin/src/lib.rs + src/slapi_r_plugin/src/log.rs + src/slapi_r_plugin/src/macros.rs + src/slapi_r_plugin/src/pblock.rs + src/slapi_r_plugin/src/plugin.rs + src/slapi_r_plugin/src/search.rs + src/slapi_r_plugin/src/syntax_plugin.rs + src/slapi_r_plugin/src/task.rs + src/slapi_r_plugin/src/value.rs +* An entry uuid syntax plugin, that has functional indexing + src/plugins/entryuuid_syntax/Cargo.toml + src/plugins/entryuuid_syntax/src/lib.rs +* A entry uuid plugin that generates entryuuid's and has a fixup task. + src/plugins/entryuuid/Cargo.toml + src/plugins/entryuuid/src/lib.rs +* Supporting changes in the server core to enable and provide apis for the plugins. + ldap/servers/slapd/config.c + ldap/servers/slapd/entry.c + ldap/servers/slapd/fedse.c +* A test suite for for the entryuuid plugin + dirsrvtests/tests/data/entryuuid/localhost-userRoot-2020_03_30_13_14_47.ldif + dirsrvtests/tests/suites/entryuuid/basic_test.py +* Supporting changes in lib389 + src/lib389/lib389/_constants.py + src/lib389/lib389/backend.py + src/lib389/lib389/instance/setup.py + src/lib389/lib389/plugins.py + src/lib389/lib389/tasks.py +* Changes to support building the plugins + Makefile.am + configure.ac +* Execution of cargo fmt on the tree, causing some clean up of files. + src/Cargo.lock + src/Cargo.toml + src/librnsslapd/build.rs + src/librnsslapd/src/lib.rs + src/librslapd/Cargo.toml + src/librslapd/build.rs + src/librslapd/src/lib.rs + src/libsds/sds/lib.rs + src/libsds/sds/tqueue.rs + src/slapd/src/error.rs + src/slapd/src/fernet.rs + src/slapd/src/lib.rs + +https://pagure.io/389-ds-base/issue/137 + +Author: William Brown + +Review by: mreynolds, lkrispenz (Thanks) +--- + Makefile.am | 96 +- + ...ocalhost-userRoot-2020_03_30_13_14_47.ldif | 233 +++++ + .../tests/suites/entryuuid/basic_test.py | 226 +++++ + ldap/schema/02common.ldif | 1 + + ldap/schema/03entryuuid.ldif | 16 + + ldap/servers/slapd/config.c | 17 + + ldap/servers/slapd/entry.c | 12 + + ldap/servers/slapd/fedse.c | 28 + + src/Cargo.lock | 241 +++-- + src/Cargo.toml | 11 +- + src/README.md | 0 + src/lib389/lib389/_constants.py | 1 + + src/lib389/lib389/backend.py | 2 +- + src/lib389/lib389/instance/setup.py | 14 + + src/lib389/lib389/plugins.py | 30 + + src/lib389/lib389/tasks.py | 14 + + src/librnsslapd/build.rs | 19 +- + src/librnsslapd/src/lib.rs | 16 +- + src/librslapd/Cargo.toml | 4 - + src/librslapd/build.rs | 19 +- + src/librslapd/src/lib.rs | 11 +- + src/libsds/sds/lib.rs | 2 - + src/libsds/sds/tqueue.rs | 23 +- + src/plugins/entryuuid/Cargo.toml | 21 + + src/plugins/entryuuid/src/lib.rs | 196 ++++ + src/plugins/entryuuid_syntax/Cargo.toml | 21 + + src/plugins/entryuuid_syntax/src/lib.rs | 145 +++ + src/slapd/src/error.rs | 2 - + src/slapd/src/fernet.rs | 31 +- + src/slapd/src/lib.rs | 3 - + src/slapi_r_plugin/Cargo.toml | 19 + + src/slapi_r_plugin/README.md | 216 +++++ + src/slapi_r_plugin/build.rs | 8 + + src/slapi_r_plugin/src/backend.rs | 71 ++ + src/slapi_r_plugin/src/ber.rs | 90 ++ + src/slapi_r_plugin/src/constants.rs | 203 +++++ + src/slapi_r_plugin/src/dn.rs | 108 +++ + src/slapi_r_plugin/src/entry.rs | 92 ++ + src/slapi_r_plugin/src/error.rs | 61 ++ + src/slapi_r_plugin/src/init.c | 8 + + src/slapi_r_plugin/src/lib.rs | 36 + + src/slapi_r_plugin/src/log.rs | 87 ++ + src/slapi_r_plugin/src/macros.rs | 835 ++++++++++++++++++ + src/slapi_r_plugin/src/pblock.rs | 275 ++++++ + src/slapi_r_plugin/src/plugin.rs | 117 +++ + src/slapi_r_plugin/src/search.rs | 127 +++ + src/slapi_r_plugin/src/syntax_plugin.rs | 169 ++++ + src/slapi_r_plugin/src/task.rs | 148 ++++ + src/slapi_r_plugin/src/value.rs | 235 +++++ + 49 files changed, 4213 insertions(+), 147 deletions(-) + create mode 100644 dirsrvtests/tests/data/entryuuid/localhost-userRoot-2020_03_30_13_14_47.ldif + create mode 100644 dirsrvtests/tests/suites/entryuuid/basic_test.py + create mode 100644 ldap/schema/03entryuuid.ldif + create mode 100644 src/README.md + create mode 100644 src/plugins/entryuuid/Cargo.toml + create mode 100644 src/plugins/entryuuid/src/lib.rs + create mode 100644 src/plugins/entryuuid_syntax/Cargo.toml + create mode 100644 src/plugins/entryuuid_syntax/src/lib.rs + create mode 100644 src/slapi_r_plugin/Cargo.toml + create mode 100644 src/slapi_r_plugin/README.md + create mode 100644 src/slapi_r_plugin/build.rs + create mode 100644 src/slapi_r_plugin/src/backend.rs + create mode 100644 src/slapi_r_plugin/src/ber.rs + create mode 100644 src/slapi_r_plugin/src/constants.rs + create mode 100644 src/slapi_r_plugin/src/dn.rs + create mode 100644 src/slapi_r_plugin/src/entry.rs + create mode 100644 src/slapi_r_plugin/src/error.rs + create mode 100644 src/slapi_r_plugin/src/init.c + create mode 100644 src/slapi_r_plugin/src/lib.rs + create mode 100644 src/slapi_r_plugin/src/log.rs + create mode 100644 src/slapi_r_plugin/src/macros.rs + create mode 100644 src/slapi_r_plugin/src/pblock.rs + create mode 100644 src/slapi_r_plugin/src/plugin.rs + create mode 100644 src/slapi_r_plugin/src/search.rs + create mode 100644 src/slapi_r_plugin/src/syntax_plugin.rs + create mode 100644 src/slapi_r_plugin/src/task.rs + create mode 100644 src/slapi_r_plugin/src/value.rs + +diff --git a/Makefile.am b/Makefile.am +index 668a095da..627953850 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -38,6 +38,7 @@ if RUST_ENABLE + RUST_ON = 1 + CARGO_FLAGS = @cargo_defs@ + RUSTC_FLAGS = @asan_rust_defs@ @msan_rust_defs@ @tsan_rust_defs@ @debug_rust_defs@ ++# -L@abs_top_builddir@/rs/@rust_target_dir@ + RUST_LDFLAGS = -ldl -lpthread -lgcc_s -lc -lm -lrt -lutil + RUST_DEFINES = -DRUST_ENABLE + if RUST_ENABLE_OFFLINE +@@ -298,7 +299,7 @@ clean-local: + -rm -rf $(abs_top_builddir)/html + -rm -rf $(abs_top_builddir)/man/man3 + if RUST_ENABLE +- CARGO_TARGET_DIR=$(abs_top_builddir)/rs cargo clean --manifest-path=$(srcdir)/src/libsds/Cargo.toml ++ CARGO_TARGET_DIR=$(abs_top_builddir)/rs cargo clean --manifest-path=$(srcdir)/src/Cargo.toml + endif + + dberrstrs.h: Makefile +@@ -416,6 +417,11 @@ serverplugin_LTLIBRARIES = libacl-plugin.la \ + $(LIBPAM_PASSTHRU_PLUGIN) $(LIBDNA_PLUGIN) \ + $(LIBBITWISE_PLUGIN) $(LIBPRESENCE_PLUGIN) $(LIBPOSIX_WINSYNC_PLUGIN) + ++if RUST_ENABLE ++serverplugin_LTLIBRARIES += libentryuuid-plugin.la libentryuuid-syntax-plugin.la ++endif ++ ++ + noinst_LIBRARIES = libavl.a + + dist_noinst_HEADERS = \ +@@ -757,6 +763,10 @@ systemschema_DATA = $(srcdir)/ldap/schema/00core.ldif \ + $(srcdir)/ldap/schema/60nss-ldap.ldif \ + $(LIBACCTPOLICY_SCHEMA) + ++if RUST_ENABLE ++systemschema_DATA += $(srcdir)/ldap/schema/03entryuuid.ldif ++endif ++ + schema_DATA = $(srcdir)/ldap/schema/99user.ldif + + libexec_SCRIPTS = +@@ -1227,7 +1237,7 @@ libsds_la_LDFLAGS = $(AM_LDFLAGS) $(SDS_LDFLAGS) + + if RUST_ENABLE + +-noinst_LTLIBRARIES = librsds.la librslapd.la librnsslapd.la ++noinst_LTLIBRARIES = librsds.la librslapd.la librnsslapd.la libentryuuid.la libentryuuid_syntax.la + + ### Why does this exist? + # +@@ -1252,6 +1262,8 @@ librsds_la_EXTRA = src/libsds/Cargo.lock + @abs_top_builddir@/rs/@rust_target_dir@/librsds.a: $(librsds_la_SOURCES) + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ ++ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ ++ SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/libsds/Cargo.toml \ + $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) + +@@ -1268,6 +1280,7 @@ librslapd_la_EXTRA = src/librslapd/Cargo.lock + @abs_top_builddir@/rs/@rust_target_dir@/librslapd.a: $(librslapd_la_SOURCES) + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ ++ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librslapd/Cargo.toml \ + $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) +@@ -1288,6 +1301,7 @@ librnsslapd_la_EXTRA = src/librnsslapd/Cargo.lock + @abs_top_builddir@/rs/@rust_target_dir@/librnsslapd.a: $(librnsslapd_la_SOURCES) + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ ++ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librnsslapd/Cargo.toml \ + $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) +@@ -1295,8 +1309,64 @@ librnsslapd_la_EXTRA = src/librnsslapd/Cargo.lock + # The header needs the lib build first. + rust-nsslapd-private.h: @abs_top_builddir@/rs/@rust_target_dir@/librnsslapd.a + ++libslapi_r_plugin_SOURCES = \ ++ src/slapi_r_plugin/src/backend.rs \ ++ src/slapi_r_plugin/src/ber.rs \ ++ src/slapi_r_plugin/src/constants.rs \ ++ src/slapi_r_plugin/src/dn.rs \ ++ src/slapi_r_plugin/src/entry.rs \ ++ src/slapi_r_plugin/src/error.rs \ ++ src/slapi_r_plugin/src/log.rs \ ++ src/slapi_r_plugin/src/macros.rs \ ++ src/slapi_r_plugin/src/pblock.rs \ ++ src/slapi_r_plugin/src/plugin.rs \ ++ src/slapi_r_plugin/src/search.rs \ ++ src/slapi_r_plugin/src/syntax_plugin.rs \ ++ src/slapi_r_plugin/src/task.rs \ ++ src/slapi_r_plugin/src/value.rs \ ++ src/slapi_r_plugin/src/lib.rs ++ ++# Build rust ns-slapd components as a library. ++ENTRYUUID_LIB = @abs_top_builddir@/rs/@rust_target_dir@/libentryuuid.a ++ ++libentryuuid_la_SOURCES = \ ++ src/plugins/entryuuid/Cargo.toml \ ++ src/plugins/entryuuid/src/lib.rs \ ++ $(libslapi_r_plugin_SOURCES) ++ ++libentryuuid_la_EXTRA = src/plugin/entryuuid/Cargo.lock ++ ++@abs_top_builddir@/rs/@rust_target_dir@/libentryuuid.a: $(libentryuuid_la_SOURCES) libslapd.la libentryuuid.la ++ RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ ++ CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ ++ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ ++ SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ ++ cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/plugins/entryuuid/Cargo.toml \ ++ $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) ++ cp $(ENTRYUUID_LIB) @abs_top_builddir@/.libs/libentryuuid.a ++ ++ENTRYUUID_SYNTAX_LIB = @abs_top_builddir@/rs/@rust_target_dir@/libentryuuid_syntax.a ++ ++libentryuuid_syntax_la_SOURCES = \ ++ src/plugins/entryuuid_syntax/Cargo.toml \ ++ src/plugins/entryuuid_syntax/src/lib.rs \ ++ $(libslapi_r_plugin_SOURCES) ++ ++libentryuuid_syntax_la_EXTRA = src/plugin/entryuuid_syntax/Cargo.lock ++ ++@abs_top_builddir@/rs/@rust_target_dir@/libentryuuid_syntax.a: $(libentryuuid_syntax_la_SOURCES) libslapd.la libentryuuid_syntax.la ++ RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ ++ CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ ++ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ ++ SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ ++ cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/plugins/entryuuid_syntax/Cargo.toml \ ++ $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) ++ cp $(ENTRYUUID_SYNTAX_LIB) @abs_top_builddir@/.libs/libentryuuid_syntax.a ++ + EXTRA_DIST = $(librsds_la_SOURCES) $(librsds_la_EXTRA) \ + $(librslapd_la_SOURCES) $(librslapd_la_EXTRA) \ ++ $(libentryuuid_la_SOURCES) $(libentryuuid_la_EXTRA) \ ++ $(libentryuuid_syntax_la_SOURCES) $(libentryuuid_syntax_la_EXTRA) \ + $(librnsslapd_la_SOURCES) $(librnsslapd_la_EXTRA) + + ## Run rust tests +@@ -1306,13 +1376,17 @@ else + check-local: + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ ++ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ ++ SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo test $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/libsds/Cargo.toml + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ ++ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo test $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librslapd/Cargo.toml + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ ++ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo test $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librnsslapd/Cargo.toml + endif +@@ -1735,6 +1809,24 @@ libderef_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) + libderef_plugin_la_DEPENDENCIES = libslapd.la + libderef_plugin_la_LDFLAGS = -avoid-version + ++if RUST_ENABLE ++#------------------------ ++# libentryuuid-syntax-plugin ++#----------------------- ++libentryuuid_syntax_plugin_la_SOURCES = src/slapi_r_plugin/src/init.c ++libentryuuid_syntax_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) -lentryuuid_syntax ++libentryuuid_syntax_plugin_la_DEPENDENCIES = libslapd.la $(ENTRYUUID_SYNTAX_LIB) ++libentryuuid_syntax_plugin_la_LDFLAGS = -avoid-version ++ ++#------------------------ ++# libentryuuid-plugin ++#----------------------- ++libentryuuid_plugin_la_SOURCES = src/slapi_r_plugin/src/init.c ++libentryuuid_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) -lentryuuid ++libentryuuid_plugin_la_DEPENDENCIES = libslapd.la $(ENTRYUUID_LIB) ++libentryuuid_plugin_la_LDFLAGS = -avoid-version ++endif ++ + #------------------------ + # libpbe-plugin + #----------------------- +diff --git a/dirsrvtests/tests/data/entryuuid/localhost-userRoot-2020_03_30_13_14_47.ldif b/dirsrvtests/tests/data/entryuuid/localhost-userRoot-2020_03_30_13_14_47.ldif +new file mode 100644 +index 000000000..b64090af7 +--- /dev/null ++++ b/dirsrvtests/tests/data/entryuuid/localhost-userRoot-2020_03_30_13_14_47.ldif +@@ -0,0 +1,233 @@ ++version: 1 ++ ++# entry-id: 1 ++dn: dc=example,dc=com ++objectClass: top ++objectClass: domain ++dc: example ++description: dc=example,dc=com ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015542Z ++modifyTimestamp: 20200325015542Z ++nsUniqueId: a2b33229-6e3b11ea-8de0c78c-83e27eda ++aci: (targetattr="dc || description || objectClass")(targetfilter="(objectClas ++ s=domain)")(version 3.0; acl "Enable anyone domain read"; allow (read, search ++ , compare)(userdn="ldap:///anyone");) ++aci: (targetattr="ou || objectClass")(targetfilter="(objectClass=organizationa ++ lUnit)")(version 3.0; acl "Enable anyone ou read"; allow (read, search, compa ++ re)(userdn="ldap:///anyone");) ++ ++# entry-id: 2 ++dn: cn=389_ds_system,dc=example,dc=com ++objectClass: top ++objectClass: nscontainer ++objectClass: ldapsubentry ++cn: 389_ds_system ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015542Z ++modifyTimestamp: 20200325015542Z ++nsUniqueId: a2b3322a-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 3 ++dn: ou=groups,dc=example,dc=com ++objectClass: top ++objectClass: organizationalunit ++ou: groups ++aci: (targetattr="cn || member || gidNumber || nsUniqueId || description || ob ++ jectClass")(targetfilter="(objectClass=groupOfNames)")(version 3.0; acl "Enab ++ le anyone group read"; allow (read, search, compare)(userdn="ldap:///anyone") ++ ;) ++aci: (targetattr="member")(targetfilter="(objectClass=groupOfNames)")(version ++ 3.0; acl "Enable group_modify to alter members"; allow (write)(groupdn="ldap: ++ ///cn=group_modify,ou=permissions,dc=example,dc=com");) ++aci: (targetattr="cn || member || gidNumber || description || objectClass")(ta ++ rgetfilter="(objectClass=groupOfNames)")(version 3.0; acl "Enable group_admin ++ to manage groups"; allow (write, add, delete)(groupdn="ldap:///cn=group_admi ++ n,ou=permissions,dc=example,dc=com");) ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015543Z ++modifyTimestamp: 20200325015543Z ++nsUniqueId: a2b3322b-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 4 ++dn: ou=people,dc=example,dc=com ++objectClass: top ++objectClass: organizationalunit ++ou: people ++aci: (targetattr="objectClass || description || nsUniqueId || uid || displayNa ++ me || loginShell || uidNumber || gidNumber || gecos || homeDirectory || cn || ++ memberOf || mail || nsSshPublicKey || nsAccountLock || userCertificate")(tar ++ getfilter="(objectClass=posixaccount)")(version 3.0; acl "Enable anyone user ++ read"; allow (read, search, compare)(userdn="ldap:///anyone");) ++aci: (targetattr="displayName || legalName || userPassword || nsSshPublicKey") ++ (version 3.0; acl "Enable self partial modify"; allow (write)(userdn="ldap:// ++ /self");) ++aci: (targetattr="legalName || telephoneNumber || mobile || sn")(targetfilter= ++ "(|(objectClass=nsPerson)(objectClass=inetOrgPerson))")(version 3.0; acl "Ena ++ ble self legalname read"; allow (read, search, compare)(userdn="ldap:///self" ++ );) ++aci: (targetattr="legalName || telephoneNumber")(targetfilter="(objectClass=ns ++ Person)")(version 3.0; acl "Enable user legalname read"; allow (read, search, ++ compare)(groupdn="ldap:///cn=user_private_read,ou=permissions,dc=example,dc= ++ com");) ++aci: (targetattr="uid || description || displayName || loginShell || uidNumber ++ || gidNumber || gecos || homeDirectory || cn || memberOf || mail || legalNam ++ e || telephoneNumber || mobile")(targetfilter="(&(objectClass=nsPerson)(objec ++ tClass=nsAccount))")(version 3.0; acl "Enable user admin create"; allow (writ ++ e, add, delete, read)(groupdn="ldap:///cn=user_admin,ou=permissions,dc=exampl ++ e,dc=com");) ++aci: (targetattr="uid || description || displayName || loginShell || uidNumber ++ || gidNumber || gecos || homeDirectory || cn || memberOf || mail || legalNam ++ e || telephoneNumber || mobile")(targetfilter="(&(objectClass=nsPerson)(objec ++ tClass=nsAccount))")(version 3.0; acl "Enable user modify to change users"; a ++ llow (write, read)(groupdn="ldap:///cn=user_modify,ou=permissions,dc=example, ++ dc=com");) ++aci: (targetattr="userPassword || nsAccountLock || userCertificate || nsSshPub ++ licKey")(targetfilter="(objectClass=nsAccount)")(version 3.0; acl "Enable use ++ r password reset"; allow (write, read)(groupdn="ldap:///cn=user_passwd_reset, ++ ou=permissions,dc=example,dc=com");) ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015543Z ++modifyTimestamp: 20200325015543Z ++nsUniqueId: a2b3322c-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 5 ++dn: ou=permissions,dc=example,dc=com ++objectClass: top ++objectClass: organizationalunit ++ou: permissions ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015543Z ++modifyTimestamp: 20200325015543Z ++nsUniqueId: a2b3322d-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 6 ++dn: ou=services,dc=example,dc=com ++objectClass: top ++objectClass: organizationalunit ++ou: services ++aci: (targetattr="objectClass || description || nsUniqueId || cn || memberOf | ++ | nsAccountLock ")(targetfilter="(objectClass=netscapeServer)")(version 3.0; ++ acl "Enable anyone service account read"; allow (read, search, compare)(userd ++ n="ldap:///anyone");) ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015544Z ++modifyTimestamp: 20200325015544Z ++nsUniqueId: a2b3322e-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 7 ++dn: uid=demo_user,ou=people,dc=example,dc=com ++objectClass: top ++objectClass: nsPerson ++objectClass: nsAccount ++objectClass: nsOrgPerson ++objectClass: posixAccount ++uid: demo_user ++cn: Demo User ++displayName: Demo User ++legalName: Demo User Name ++uidNumber: 99998 ++gidNumber: 99998 ++homeDirectory: /var/empty ++loginShell: /bin/false ++nsAccountLock: true ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015544Z ++modifyTimestamp: 20200325061615Z ++nsUniqueId: a2b3322f-6e3b11ea-8de0c78c-83e27eda ++entryUUID: 973e1bbf-ba9c-45d4-b01b-ff7371fd9008 ++ ++# entry-id: 8 ++dn: cn=demo_group,ou=groups,dc=example,dc=com ++objectClass: top ++objectClass: groupOfNames ++objectClass: posixGroup ++objectClass: nsMemberOf ++cn: demo_group ++gidNumber: 99999 ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015544Z ++modifyTimestamp: 20200325015544Z ++nsUniqueId: a2b33230-6e3b11ea-8de0c78c-83e27eda ++entryUUID: f6df8fe9-6b30-46aa-aa13-f0bf755371e8 ++ ++# entry-id: 9 ++dn: cn=group_admin,ou=permissions,dc=example,dc=com ++objectClass: top ++objectClass: groupOfNames ++objectClass: nsMemberOf ++cn: group_admin ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015545Z ++modifyTimestamp: 20200325015545Z ++nsUniqueId: a2b33231-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 10 ++dn: cn=group_modify,ou=permissions,dc=example,dc=com ++objectClass: top ++objectClass: groupOfNames ++objectClass: nsMemberOf ++cn: group_modify ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015545Z ++modifyTimestamp: 20200325015545Z ++nsUniqueId: a2b33232-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 11 ++dn: cn=user_admin,ou=permissions,dc=example,dc=com ++objectClass: top ++objectClass: groupOfNames ++objectClass: nsMemberOf ++cn: user_admin ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015545Z ++modifyTimestamp: 20200325015545Z ++nsUniqueId: a2b33233-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 12 ++dn: cn=user_modify,ou=permissions,dc=example,dc=com ++objectClass: top ++objectClass: groupOfNames ++objectClass: nsMemberOf ++cn: user_modify ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015546Z ++modifyTimestamp: 20200325015546Z ++nsUniqueId: a2b33234-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 13 ++dn: cn=user_passwd_reset,ou=permissions,dc=example,dc=com ++objectClass: top ++objectClass: groupOfNames ++objectClass: nsMemberOf ++cn: user_passwd_reset ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015546Z ++modifyTimestamp: 20200325015546Z ++nsUniqueId: a2b33235-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 14 ++dn: cn=user_private_read,ou=permissions,dc=example,dc=com ++objectClass: top ++objectClass: groupOfNames ++objectClass: nsMemberOf ++cn: user_private_read ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015547Z ++modifyTimestamp: 20200325015547Z ++nsUniqueId: a2b33236-6e3b11ea-8de0c78c-83e27eda ++ +diff --git a/dirsrvtests/tests/suites/entryuuid/basic_test.py b/dirsrvtests/tests/suites/entryuuid/basic_test.py +new file mode 100644 +index 000000000..beb73701d +--- /dev/null ++++ b/dirsrvtests/tests/suites/entryuuid/basic_test.py +@@ -0,0 +1,226 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2020 William Brown ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++ ++import ldap ++import pytest ++import time ++import shutil ++from lib389.idm.user import nsUserAccounts, UserAccounts ++from lib389.idm.account import Accounts ++from lib389.topologies import topology_st as topology ++from lib389.backend import Backends ++from lib389.paths import Paths ++from lib389.utils import ds_is_older ++from lib389._constants import * ++from lib389.plugins import EntryUUIDPlugin ++ ++default_paths = Paths() ++ ++pytestmark = pytest.mark.tier1 ++ ++DATADIR1 = os.path.join(os.path.dirname(__file__), '../../data/entryuuid/') ++IMPORT_UUID_A = "973e1bbf-ba9c-45d4-b01b-ff7371fd9008" ++UUID_BETWEEN = "eeeeeeee-0000-0000-0000-000000000000" ++IMPORT_UUID_B = "f6df8fe9-6b30-46aa-aa13-f0bf755371e8" ++UUID_MIN = "00000000-0000-0000-0000-000000000000" ++UUID_MAX = "ffffffff-ffff-ffff-ffff-ffffffffffff" ++ ++def _entryuuid_import_and_search(topology): ++ # 1 ++ ldif_dir = topology.standalone.get_ldif_dir() ++ target_ldif = os.path.join(ldif_dir, 'localhost-userRoot-2020_03_30_13_14_47.ldif') ++ import_ldif = os.path.join(DATADIR1, 'localhost-userRoot-2020_03_30_13_14_47.ldif') ++ shutil.copyfile(import_ldif, target_ldif) ++ ++ be = Backends(topology.standalone).get('userRoot') ++ task = be.import_ldif([target_ldif]) ++ task.wait() ++ assert(task.is_complete() and task.get_exit_code() == 0) ++ ++ accounts = Accounts(topology.standalone, DEFAULT_SUFFIX) ++ # 2 - positive eq test ++ r2 = accounts.filter("(entryUUID=%s)" % IMPORT_UUID_A) ++ assert(len(r2) == 1) ++ r3 = accounts.filter("(entryuuid=%s)" % IMPORT_UUID_B) ++ assert(len(r3) == 1) ++ # 3 - negative eq test ++ r4 = accounts.filter("(entryuuid=%s)" % UUID_MAX) ++ assert(len(r4) == 0) ++ # 4 - le search ++ r5 = accounts.filter("(entryuuid<=%s)" % UUID_BETWEEN) ++ assert(len(r5) == 1) ++ # 5 - ge search ++ r6 = accounts.filter("(entryuuid>=%s)" % UUID_BETWEEN) ++ assert(len(r6) == 1) ++ # 6 - le 0 search ++ r7 = accounts.filter("(entryuuid<=%s)" % UUID_MIN) ++ assert(len(r7) == 0) ++ # 7 - ge f search ++ r8 = accounts.filter("(entryuuid>=%s)" % UUID_MAX) ++ assert(len(r8) == 0) ++ # 8 - export db ++ task = be.export_ldif() ++ task.wait() ++ assert(task.is_complete() and task.get_exit_code() == 0) ++ ++ ++@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") ++def test_entryuuid_indexed_import_and_search(topology): ++ """ Test that an ldif of entries containing entryUUID's can be indexed and searched ++ correctly. As https://tools.ietf.org/html/rfc4530 states, the MR's are equality and ++ ordering, so we check these are correct. ++ ++ :id: c98ee6dc-a7ee-4bd4-974d-597ea966dad9 ++ ++ :setup: Standalone instance ++ ++ :steps: ++ 1. Import the db from the ldif ++ 2. EQ search for an entryuuid (match) ++ 3. EQ search for an entryuuid that does not exist ++ 4. LE search for an entryuuid lower (1 res) ++ 5. GE search for an entryuuid greater (1 res) ++ 6. LE for the 0 uuid (0 res) ++ 7. GE for the f uuid (0 res) ++ 8. export the db to ldif ++ ++ :expectedresults: ++ 1. Success ++ 2. 1 match ++ 3. 0 match ++ 4. 1 match ++ 5. 1 match ++ 6. 0 match ++ 7. 0 match ++ 8. success ++ """ ++ # Assert that the index correctly exists. ++ be = Backends(topology.standalone).get('userRoot') ++ indexes = be.get_indexes() ++ indexes.ensure_state(properties={ ++ 'cn': 'entryUUID', ++ 'nsSystemIndex': 'false', ++ 'nsIndexType': ['eq', 'pres'], ++ }) ++ _entryuuid_import_and_search(topology) ++ ++@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") ++def test_entryuuid_unindexed_import_and_search(topology): ++ """ Test that an ldif of entries containing entryUUID's can be UNindexed searched ++ correctly. As https://tools.ietf.org/html/rfc4530 states, the MR's are equality and ++ ordering, so we check these are correct. ++ ++ :id: b652b54d-f009-464b-b5bd-299a33f97243 ++ ++ :setup: Standalone instance ++ ++ :steps: ++ 1. Import the db from the ldif ++ 2. EQ search for an entryuuid (match) ++ 3. EQ search for an entryuuid that does not exist ++ 4. LE search for an entryuuid lower (1 res) ++ 5. GE search for an entryuuid greater (1 res) ++ 6. LE for the 0 uuid (0 res) ++ 7. GE for the f uuid (0 res) ++ 8. export the db to ldif ++ ++ :expectedresults: ++ 1. Success ++ 2. 1 match ++ 3. 0 match ++ 4. 1 match ++ 5. 1 match ++ 6. 0 match ++ 7. 0 match ++ 8. success ++ """ ++ # Assert that the index does NOT exist for this test. ++ be = Backends(topology.standalone).get('userRoot') ++ indexes = be.get_indexes() ++ try: ++ idx = indexes.get('entryUUID') ++ idx.delete() ++ except ldap.NO_SUCH_OBJECT: ++ # It's already not present, move along, nothing to see here. ++ pass ++ _entryuuid_import_and_search(topology) ++ ++# Test entryUUID generation ++@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") ++def test_entryuuid_generation_on_add(topology): ++ """ Test that when an entry is added, the entryuuid is added. ++ ++ :id: a7439b0a-dcee-4cd6-b8ef-771476c0b4f6 ++ ++ :setup: Standalone instance ++ ++ :steps: ++ 1. Create a new entry in the db ++ 2. Check it has an entry uuid ++ ++ :expectedresults: ++ 1. Success ++ 2. An entry uuid is present ++ """ ++ # Step one - create a user! ++ account = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX).create_test_user() ++ # Step two - does it have an entryuuid? ++ euuid = account.get_attr_val_utf8('entryUUID') ++ print(euuid) ++ assert(euuid is not None) ++ ++# Test fixup task ++@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") ++def test_entryuuid_fixup_task(topology): ++ """Test that when an entries without UUID's can have one generated via ++ the fixup process. ++ ++ :id: ad42bba2-ffb2-4c22-a37d-cbe7bcf73d6b ++ ++ :setup: Standalone instance ++ ++ :steps: ++ 1. Disable the entryuuid plugin ++ 2. Create an entry ++ 3. Enable the entryuuid plugin ++ 4. Run the fixup ++ 5. Assert the entryuuid now exists ++ ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Suddenly EntryUUID! ++ """ ++ # 1. Disable the plugin ++ plug = EntryUUIDPlugin(topology.standalone) ++ plug.disable() ++ topology.standalone.restart() ++ ++ # 2. create the account ++ account = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX).create_test_user(uid=2000) ++ euuid = account.get_attr_val_utf8('entryUUID') ++ assert(euuid is None) ++ ++ # 3. enable the plugin ++ plug.enable() ++ topology.standalone.restart() ++ ++ # 4. run the fix up ++ # For now set the log level to high! ++ topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE)) ++ task = plug.fixup(DEFAULT_SUFFIX) ++ task.wait() ++ assert(task.is_complete() and task.get_exit_code() == 0) ++ topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,)) ++ ++ # 5. Assert the uuid. ++ euuid = account.get_attr_val_utf8('entryUUID') ++ assert(euuid is not None) ++ +diff --git a/ldap/schema/02common.ldif b/ldap/schema/02common.ldif +index 57e6be3b3..3b0ad0a97 100644 +--- a/ldap/schema/02common.ldif ++++ b/ldap/schema/02common.ldif +@@ -11,6 +11,7 @@ + # + # Core schema, highly recommended but not required to start the Directory Server itself. + # ++# + dn: cn=schema + # + # attributes +diff --git a/ldap/schema/03entryuuid.ldif b/ldap/schema/03entryuuid.ldif +new file mode 100644 +index 000000000..cbde981fe +--- /dev/null ++++ b/ldap/schema/03entryuuid.ldif +@@ -0,0 +1,16 @@ ++# ++# BEGIN COPYRIGHT BLOCK ++# Copyright (C) 2020 William Brown ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# END COPYRIGHT BLOCK ++# ++# Core schema, highly recommended but not required to start the Directory Server itself. ++# ++dn: cn=schema ++# ++# attributes ++# ++attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY UUIDMatch ORDERING UUIDOrderingMatch SYNTAX 1.3.6.1.1.16.1 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation ) +diff --git a/ldap/servers/slapd/config.c b/ldap/servers/slapd/config.c +index 7e1618e79..bf5476272 100644 +--- a/ldap/servers/slapd/config.c ++++ b/ldap/servers/slapd/config.c +@@ -35,6 +35,10 @@ extern char *slapd_SSL3ciphers; + extern char *localuser; + char *rel2abspath(char *); + ++/* ++ * WARNING - this can only bootstrap PASSWORD and SYNTAX plugins! ++ * see fedse.c instead! ++ */ + static char *bootstrap_plugins[] = { + "dn: cn=PBKDF2_SHA256,cn=Password Storage Schemes,cn=plugins,cn=config\n" + "objectclass: top\n" +@@ -45,6 +49,19 @@ static char *bootstrap_plugins[] = { + "nsslapd-plugintype: pwdstoragescheme\n" + "nsslapd-pluginenabled: on", + ++ "dn: cn=entryuuid_syntax,cn=plugins,cn=config\n" ++ "objectclass: top\n" ++ "objectclass: nsSlapdPlugin\n" ++ "cn: entryuuid_syntax\n" ++ "nsslapd-pluginpath: libentryuuid-syntax-plugin\n" ++ "nsslapd-plugininitfunc: entryuuid_syntax_plugin_init\n" ++ "nsslapd-plugintype: syntax\n" ++ "nsslapd-pluginenabled: on\n" ++ "nsslapd-pluginId: entryuuid_syntax\n" ++ "nsslapd-pluginVersion: none\n" ++ "nsslapd-pluginVendor: 389 Project\n" ++ "nsslapd-pluginDescription: entryuuid_syntax\n", ++ + NULL + }; + +diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c +index 7697e2b88..9ae9523e2 100644 +--- a/ldap/servers/slapd/entry.c ++++ b/ldap/servers/slapd/entry.c +@@ -2882,6 +2882,18 @@ slapi_entry_attr_get_bool(const Slapi_Entry *e, const char *type) + return slapi_entry_attr_get_bool_ext(e, type, PR_FALSE); + } + ++const struct slapi_value ** ++slapi_entry_attr_get_valuearray(const Slapi_Entry *e, const char *attrname) ++{ ++ Slapi_Attr *attr; ++ ++ if (slapi_entry_attr_find(e, attrname, &attr) != 0) { ++ return NULL; ++ } ++ ++ return attr->a_present_values.va; ++} ++ + /* + * Extract a single value from an entry (as a string). You do not need + * to free the returned string value. +diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c +index 3b076eb17..0d645f909 100644 +--- a/ldap/servers/slapd/fedse.c ++++ b/ldap/servers/slapd/fedse.c +@@ -119,6 +119,34 @@ static const char *internal_entries[] = + "cn:SNMP\n" + "nsSNMPEnabled: on\n", + ++#ifdef RUST_ENABLE ++ "dn: cn=entryuuid_syntax,cn=plugins,cn=config\n" ++ "objectclass: top\n" ++ "objectclass: nsSlapdPlugin\n" ++ "cn: entryuuid_syntax\n" ++ "nsslapd-pluginpath: libentryuuid-syntax-plugin\n" ++ "nsslapd-plugininitfunc: entryuuid_syntax_plugin_init\n" ++ "nsslapd-plugintype: syntax\n" ++ "nsslapd-pluginenabled: on\n" ++ "nsslapd-pluginId: entryuuid_syntax\n" ++ "nsslapd-pluginVersion: none\n" ++ "nsslapd-pluginVendor: 389 Project\n" ++ "nsslapd-pluginDescription: entryuuid_syntax\n", ++ ++ "dn: cn=entryuuid,cn=plugins,cn=config\n" ++ "objectclass: top\n" ++ "objectclass: nsSlapdPlugin\n" ++ "cn: entryuuid\n" ++ "nsslapd-pluginpath: libentryuuid-plugin\n" ++ "nsslapd-plugininitfunc: entryuuid_plugin_init\n" ++ "nsslapd-plugintype: betxnpreoperation\n" ++ "nsslapd-pluginenabled: on\n" ++ "nsslapd-pluginId: entryuuid\n" ++ "nsslapd-pluginVersion: none\n" ++ "nsslapd-pluginVendor: 389 Project\n" ++ "nsslapd-pluginDescription: entryuuid\n", ++#endif ++ + "dn: cn=Password Storage Schemes,cn=plugins,cn=config\n" + "objectclass: top\n" + "objectclass: nsContainer\n" +diff --git a/src/Cargo.lock b/src/Cargo.lock +index ce3c7ed27..33d7b8f23 100644 +--- a/src/Cargo.lock ++++ b/src/Cargo.lock +@@ -28,12 +28,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" + + [[package]] + name = "base64" +-version = "0.10.1" ++version = "0.13.0" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" +-dependencies = [ +- "byteorder", +-] ++checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" + + [[package]] + name = "bitflags" +@@ -43,9 +40,9 @@ checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" + + [[package]] + name = "byteorder" +-version = "1.4.2" ++version = "1.4.3" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" ++checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" + + [[package]] + name = "cbindgen" +@@ -66,15 +63,12 @@ dependencies = [ + + [[package]] + name = "cc" +-version = "1.0.66" ++version = "1.0.67" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" +- +-[[package]] +-name = "cfg-if" +-version = "0.1.10" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" ++checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" ++dependencies = [ ++ "jobserver", ++] + + [[package]] + name = "cfg-if" +@@ -97,16 +91,39 @@ dependencies = [ + "vec_map", + ] + ++[[package]] ++name = "entryuuid" ++version = "0.1.0" ++dependencies = [ ++ "cc", ++ "libc", ++ "paste", ++ "slapi_r_plugin", ++ "uuid", ++] ++ ++[[package]] ++name = "entryuuid_syntax" ++version = "0.1.0" ++dependencies = [ ++ "cc", ++ "libc", ++ "paste", ++ "slapi_r_plugin", ++ "uuid", ++] ++ + [[package]] + name = "fernet" +-version = "0.1.3" ++version = "0.1.4" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "e7ac567fd75ce6bc28b68e63b5beaa3ce34f56bafd1122f64f8647c822e38a8b" ++checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f" + dependencies = [ + "base64", + "byteorder", + "getrandom", + "openssl", ++ "zeroize", + ] + + [[package]] +@@ -126,20 +143,20 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + + [[package]] + name = "getrandom" +-version = "0.1.16" ++version = "0.2.3" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" ++checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" + dependencies = [ +- "cfg-if 1.0.0", ++ "cfg-if", + "libc", + "wasi", + ] + + [[package]] + name = "hermit-abi" +-version = "0.1.17" ++version = "0.1.18" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" ++checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" + dependencies = [ + "libc", + ] +@@ -150,6 +167,15 @@ version = "0.4.7" + source = "registry+https://github.com/rust-lang/crates.io-index" + checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" + ++[[package]] ++name = "jobserver" ++version = "0.1.22" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd" ++dependencies = [ ++ "libc", ++] ++ + [[package]] + name = "lazy_static" + version = "1.4.0" +@@ -158,9 +184,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + + [[package]] + name = "libc" +-version = "0.2.82" ++version = "0.2.94" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929" ++checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e" + + [[package]] + name = "librnsslapd" +@@ -182,32 +208,38 @@ dependencies = [ + + [[package]] + name = "log" +-version = "0.4.11" ++version = "0.4.14" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" ++checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" + dependencies = [ +- "cfg-if 0.1.10", ++ "cfg-if", + ] + ++[[package]] ++name = "once_cell" ++version = "1.7.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" ++ + [[package]] + name = "openssl" +-version = "0.10.32" ++version = "0.10.34" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" ++checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8" + dependencies = [ + "bitflags", +- "cfg-if 1.0.0", ++ "cfg-if", + "foreign-types", +- "lazy_static", + "libc", ++ "once_cell", + "openssl-sys", + ] + + [[package]] + name = "openssl-sys" +-version = "0.9.60" ++version = "0.9.63" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "921fc71883267538946025deffb622905ecad223c28efbfdef9bb59a0175f3e6" ++checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98" + dependencies = [ + "autocfg", + "cc", +@@ -216,6 +248,25 @@ dependencies = [ + "vcpkg", + ] + ++[[package]] ++name = "paste" ++version = "0.1.18" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" ++dependencies = [ ++ "paste-impl", ++ "proc-macro-hack", ++] ++ ++[[package]] ++name = "paste-impl" ++version = "0.1.18" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" ++dependencies = [ ++ "proc-macro-hack", ++] ++ + [[package]] + name = "pkg-config" + version = "0.3.19" +@@ -228,31 +279,36 @@ version = "0.2.10" + source = "registry+https://github.com/rust-lang/crates.io-index" + checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" + ++[[package]] ++name = "proc-macro-hack" ++version = "0.5.19" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" ++ + [[package]] + name = "proc-macro2" +-version = "1.0.24" ++version = "1.0.27" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" ++checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" + dependencies = [ + "unicode-xid", + ] + + [[package]] + name = "quote" +-version = "1.0.8" ++version = "1.0.9" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" ++checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" + dependencies = [ + "proc-macro2", + ] + + [[package]] + name = "rand" +-version = "0.7.3" ++version = "0.8.3" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" ++checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" + dependencies = [ +- "getrandom", + "libc", + "rand_chacha", + "rand_core", +@@ -261,9 +317,9 @@ dependencies = [ + + [[package]] + name = "rand_chacha" +-version = "0.2.2" ++version = "0.3.0" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" ++checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" + dependencies = [ + "ppv-lite86", + "rand_core", +@@ -271,27 +327,30 @@ dependencies = [ + + [[package]] + name = "rand_core" +-version = "0.5.1" ++version = "0.6.2" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" ++checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" + dependencies = [ + "getrandom", + ] + + [[package]] + name = "rand_hc" +-version = "0.2.0" ++version = "0.3.0" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" ++checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" + dependencies = [ + "rand_core", + ] + + [[package]] + name = "redox_syscall" +-version = "0.1.57" ++version = "0.2.8" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" ++checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc" ++dependencies = [ ++ "bitflags", ++] + + [[package]] + name = "remove_dir_all" +@@ -314,18 +373,18 @@ checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" + + [[package]] + name = "serde" +-version = "1.0.118" ++version = "1.0.126" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "06c64263859d87aa2eb554587e2d23183398d617427327cf2b3d0ed8c69e4800" ++checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03" + dependencies = [ + "serde_derive", + ] + + [[package]] + name = "serde_derive" +-version = "1.0.118" ++version = "1.0.126" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "c84d3526699cd55261af4b941e4e725444df67aa4f9e6a3564f18030d12672df" ++checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43" + dependencies = [ + "proc-macro2", + "quote", +@@ -334,9 +393,9 @@ dependencies = [ + + [[package]] + name = "serde_json" +-version = "1.0.61" ++version = "1.0.64" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "4fceb2595057b6891a4ee808f70054bd2d12f0e97f1cbb78689b59f676df325a" ++checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" + dependencies = [ + "itoa", + "ryu", +@@ -350,6 +409,16 @@ dependencies = [ + "fernet", + ] + ++[[package]] ++name = "slapi_r_plugin" ++version = "0.1.0" ++dependencies = [ ++ "lazy_static", ++ "libc", ++ "paste", ++ "uuid", ++] ++ + [[package]] + name = "strsim" + version = "0.8.0" +@@ -358,22 +427,34 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" + + [[package]] + name = "syn" +-version = "1.0.58" ++version = "1.0.72" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "unicode-xid", ++] ++ ++[[package]] ++name = "synstructure" ++version = "0.12.4" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5" ++checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" + dependencies = [ + "proc-macro2", + "quote", ++ "syn", + "unicode-xid", + ] + + [[package]] + name = "tempfile" +-version = "3.1.0" ++version = "3.2.0" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" ++checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" + dependencies = [ +- "cfg-if 0.1.10", ++ "cfg-if", + "libc", + "rand", + "redox_syscall", +@@ -407,15 +488,24 @@ checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" + + [[package]] + name = "unicode-xid" +-version = "0.2.1" ++version = "0.2.2" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" ++checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" ++ ++[[package]] ++name = "uuid" ++version = "0.8.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" ++dependencies = [ ++ "getrandom", ++] + + [[package]] + name = "vcpkg" +-version = "0.2.11" ++version = "0.2.12" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" ++checksum = "cbdbff6266a24120518560b5dc983096efb98462e51d0d68169895b237be3e5d" + + [[package]] + name = "vec_map" +@@ -425,9 +515,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" + + [[package]] + name = "wasi" +-version = "0.9.0+wasi-snapshot-preview1" ++version = "0.10.2+wasi-snapshot-preview1" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" ++checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" + + [[package]] + name = "winapi" +@@ -450,3 +540,24 @@ name = "winapi-x86_64-pc-windows-gnu" + version = "0.4.0" + source = "registry+https://github.com/rust-lang/crates.io-index" + checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" ++ ++[[package]] ++name = "zeroize" ++version = "1.3.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd" ++dependencies = [ ++ "zeroize_derive", ++] ++ ++[[package]] ++name = "zeroize_derive" ++version = "1.1.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "syn", ++ "synstructure", ++] +diff --git a/src/Cargo.toml b/src/Cargo.toml +index f6dac010f..1ad2b21b0 100644 +--- a/src/Cargo.toml ++++ b/src/Cargo.toml +@@ -1,10 +1,13 @@ + + [workspace] + members = [ +- "librslapd", +- "librnsslapd", +- "libsds", +- "slapd", ++ "librslapd", ++ "librnsslapd", ++ "libsds", ++ "slapd", ++ "slapi_r_plugin", ++ "plugins/entryuuid", ++ "plugins/entryuuid_syntax", + ] + + [profile.release] +diff --git a/src/README.md b/src/README.md +new file mode 100644 +index 000000000..e69de29bb +diff --git a/src/lib389/lib389/_constants.py b/src/lib389/lib389/_constants.py +index 52aac0f21..c184c8d4f 100644 +--- a/src/lib389/lib389/_constants.py ++++ b/src/lib389/lib389/_constants.py +@@ -150,6 +150,7 @@ DN_IMPORT_TASK = "cn=import,%s" % DN_TASKS + DN_BACKUP_TASK = "cn=backup,%s" % DN_TASKS + DN_RESTORE_TASK = "cn=restore,%s" % DN_TASKS + DN_MBO_TASK = "cn=memberOf task,%s" % DN_TASKS ++DN_EUUID_TASK = "cn=entryuuid task,%s" % DN_TASKS + DN_TOMB_FIXUP_TASK = "cn=fixup tombstones,%s" % DN_TASKS + DN_FIXUP_LINKED_ATTIBUTES = "cn=fixup linked attributes,%s" % DN_TASKS + DN_AUTOMEMBER_REBUILD_TASK = "cn=automember rebuild membership,%s" % DN_TASKS +diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py +index aab07c028..bcd7b383f 100644 +--- a/src/lib389/lib389/backend.py ++++ b/src/lib389/lib389/backend.py +@@ -765,7 +765,7 @@ class Backend(DSLdapObject): + enc_attr.delete() + break + +- def import_ldif(self, ldifs, chunk_size=None, encrypted=False, gen_uniq_id=False, only_core=False, ++ def import_ldif(self, ldifs, chunk_size=None, encrypted=False, gen_uniq_id=None, only_core=False, + include_suffixes=None, exclude_suffixes=None): + """Do an import of the suffix""" + +diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py +index 530fb367a..ac0fe1a8c 100644 +--- a/src/lib389/lib389/instance/setup.py ++++ b/src/lib389/lib389/instance/setup.py +@@ -34,6 +34,7 @@ from lib389.instance.options import General2Base, Slapd2Base, Backend2Base + from lib389.paths import Paths + from lib389.saslmap import SaslMappings + from lib389.instance.remove import remove_ds_instance ++from lib389.index import Indexes + from lib389.utils import ( + assert_c, + is_a_dn, +@@ -928,6 +929,19 @@ class SetupDs(object): + if slapd['self_sign_cert']: + ds_instance.config.set('nsslapd-security', 'on') + ++ # Before we create any backends, create any extra default indexes that may be ++ # dynamicly provisioned, rather than from template-dse.ldif. Looking at you ++ # entryUUID (requires rust enabled). ++ # ++ # Indexes defaults to default_index_dn ++ indexes = Indexes(ds_instance) ++ if ds_instance.ds_paths.rust_enabled: ++ indexes.create(properties={ ++ 'cn': 'entryUUID', ++ 'nsSystemIndex': 'false', ++ 'nsIndexType': ['eq', 'pres'], ++ }) ++ + # Create the backends as listed + # Load example data if needed. + for backend in backends: +diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py +index 16899f6d3..2d88e60bd 100644 +--- a/src/lib389/lib389/plugins.py ++++ b/src/lib389/lib389/plugins.py +@@ -2244,3 +2244,33 @@ class ContentSyncPlugin(Plugin): + + def __init__(self, instance, dn="cn=Content Synchronization,cn=plugins,cn=config"): + super(ContentSyncPlugin, self).__init__(instance, dn) ++ ++ ++class EntryUUIDPlugin(Plugin): ++ """The EntryUUID plugin configuration ++ :param instance: An instance ++ :type instance: lib389.DirSrv ++ :param dn: Entry DN ++ :type dn: str ++ """ ++ def __init__(self, instance, dn="cn=entryuuid,cn=plugins,cn=config"): ++ super(EntryUUIDPlugin, self).__init__(instance, dn) ++ ++ def fixup(self, basedn, _filter=None): ++ """Create an entryuuid fixup task ++ ++ :param basedn: Basedn to fix up ++ :type basedn: str ++ :param _filter: a filter for entries to fix up ++ :type _filter: str ++ ++ :returns: an instance of Task(DSLdapObject) ++ """ ++ ++ task = tasks.EntryUUIDFixupTask(self._instance) ++ task_properties = {'basedn': basedn} ++ if _filter is not None: ++ task_properties['filter'] = _filter ++ task.create(properties=task_properties) ++ ++ return task +diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py +index b19e7918d..590c6ee79 100644 +--- a/src/lib389/lib389/tasks.py ++++ b/src/lib389/lib389/tasks.py +@@ -203,6 +203,20 @@ class USNTombstoneCleanupTask(Task): + return super(USNTombstoneCleanupTask, self)._validate(rdn, properties, basedn) + + ++class EntryUUIDFixupTask(Task): ++ """A single instance of memberOf task entry ++ ++ :param instance: An instance ++ :type instance: lib389.DirSrv ++ """ ++ ++ def __init__(self, instance, dn=None): ++ self.cn = 'entryuuid_fixup_' + Task._get_task_date() ++ dn = "cn=" + self.cn + "," + DN_EUUID_TASK ++ super(EntryUUIDFixupTask, self).__init__(instance, dn) ++ self._must_attributes.extend(['basedn']) ++ ++ + class SchemaReloadTask(Task): + """A single instance of schema reload task entry + +diff --git a/src/librnsslapd/build.rs b/src/librnsslapd/build.rs +index 9b953b246..13f6d2e03 100644 +--- a/src/librnsslapd/build.rs ++++ b/src/librnsslapd/build.rs +@@ -3,13 +3,14 @@ extern crate cbindgen; + use std::env; + + fn main() { +- let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); +- let out_dir = env::var("SLAPD_HEADER_DIR").unwrap(); +- +- cbindgen::Builder::new() +- .with_language(cbindgen::Language::C) +- .with_crate(crate_dir) +- .generate() +- .expect("Unable to generate bindings") +- .write_to_file(format!("{}/rust-nsslapd-private.h", out_dir)); ++ if let Ok(crate_dir) = env::var("CARGO_MANIFEST_DIR") { ++ if let Ok(out_dir) = env::var("SLAPD_HEADER_DIR") { ++ cbindgen::Builder::new() ++ .with_language(cbindgen::Language::C) ++ .with_crate(crate_dir) ++ .generate() ++ .expect("Unable to generate bindings") ++ .write_to_file(format!("{}/rust-nsslapd-private.h", out_dir)); ++ } ++ } + } +diff --git a/src/librnsslapd/src/lib.rs b/src/librnsslapd/src/lib.rs +index c5fd2bbaf..dffe4ce1c 100644 +--- a/src/librnsslapd/src/lib.rs ++++ b/src/librnsslapd/src/lib.rs +@@ -4,9 +4,9 @@ + // Remember this is just a c-bindgen stub, all logic should come from slapd! + + extern crate libc; +-use slapd; + use libc::c_char; +-use std::ffi::{CString, CStr}; ++use slapd; ++use std::ffi::{CStr, CString}; + + #[no_mangle] + pub extern "C" fn do_nothing_again_rust() -> usize { +@@ -29,9 +29,7 @@ pub extern "C" fn fernet_generate_token(dn: *const c_char, raw_key: *const c_cha + // We have to move string memory ownership by copying so the system + // allocator has it. + let raw = tok.into_raw(); +- let dup_tok = unsafe { +- libc::strdup(raw) +- }; ++ let dup_tok = unsafe { libc::strdup(raw) }; + unsafe { + CString::from_raw(raw); + }; +@@ -45,7 +43,12 @@ pub extern "C" fn fernet_generate_token(dn: *const c_char, raw_key: *const c_cha + } + + #[no_mangle] +-pub extern "C" fn fernet_verify_token(dn: *const c_char, token: *const c_char, raw_key: *const c_char, ttl: u64) -> bool { ++pub extern "C" fn fernet_verify_token( ++ dn: *const c_char, ++ token: *const c_char, ++ raw_key: *const c_char, ++ ttl: u64, ++) -> bool { + if dn.is_null() || raw_key.is_null() || token.is_null() { + return false; + } +@@ -67,4 +70,3 @@ pub extern "C" fn fernet_verify_token(dn: *const c_char, token: *const c_char, r + Err(_) => false, + } + } +- +diff --git a/src/librslapd/Cargo.toml b/src/librslapd/Cargo.toml +index 1dd715ed2..08309c224 100644 +--- a/src/librslapd/Cargo.toml ++++ b/src/librslapd/Cargo.toml +@@ -12,10 +12,6 @@ path = "src/lib.rs" + name = "rslapd" + crate-type = ["staticlib", "lib"] + +-# [profile.release] +-# panic = "abort" +-# lto = true +- + [dependencies] + slapd = { path = "../slapd" } + libc = "0.2" +diff --git a/src/librslapd/build.rs b/src/librslapd/build.rs +index 4d4c1ce42..84aff156b 100644 +--- a/src/librslapd/build.rs ++++ b/src/librslapd/build.rs +@@ -3,13 +3,14 @@ extern crate cbindgen; + use std::env; + + fn main() { +- let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); +- let out_dir = env::var("SLAPD_HEADER_DIR").unwrap(); +- +- cbindgen::Builder::new() +- .with_language(cbindgen::Language::C) +- .with_crate(crate_dir) +- .generate() +- .expect("Unable to generate bindings") +- .write_to_file(format!("{}/rust-slapi-private.h", out_dir)); ++ if let Ok(crate_dir) = env::var("CARGO_MANIFEST_DIR") { ++ if let Ok(out_dir) = env::var("SLAPD_HEADER_DIR") { ++ cbindgen::Builder::new() ++ .with_language(cbindgen::Language::C) ++ .with_crate(crate_dir) ++ .generate() ++ .expect("Unable to generate bindings") ++ .write_to_file(format!("{}/rust-slapi-private.h", out_dir)); ++ } ++ } + } +diff --git a/src/librslapd/src/lib.rs b/src/librslapd/src/lib.rs +index 9cce193a0..cf283a7ce 100644 +--- a/src/librslapd/src/lib.rs ++++ b/src/librslapd/src/lib.rs +@@ -8,7 +8,7 @@ extern crate libc; + use slapd; + + use libc::c_char; +-use std::ffi::{CString, CStr}; ++use std::ffi::{CStr, CString}; + + #[no_mangle] + pub extern "C" fn do_nothing_rust() -> usize { +@@ -18,9 +18,7 @@ pub extern "C" fn do_nothing_rust() -> usize { + #[no_mangle] + pub extern "C" fn rust_free_string(s: *mut c_char) { + if !s.is_null() { +- let _ = unsafe { +- CString::from_raw(s) +- }; ++ let _ = unsafe { CString::from_raw(s) }; + } + } + +@@ -35,9 +33,7 @@ pub extern "C" fn fernet_generate_new_key() -> *mut c_char { + match res_key { + Ok(key) => { + let raw = key.into_raw(); +- let dup_key = unsafe { +- libc::strdup(raw) +- }; ++ let dup_key = unsafe { libc::strdup(raw) }; + rust_free_string(raw); + dup_key + } +@@ -53,4 +49,3 @@ pub extern "C" fn fernet_validate_key(raw_key: *const c_char) -> bool { + Err(_) => false, + } + } +- +diff --git a/src/libsds/sds/lib.rs b/src/libsds/sds/lib.rs +index aa70c7a8e..9e2973222 100644 +--- a/src/libsds/sds/lib.rs ++++ b/src/libsds/sds/lib.rs +@@ -28,5 +28,3 @@ pub enum sds_result { + /// The list is exhausted, no more elements can be returned. + ListExhausted = 16, + } +- +- +diff --git a/src/libsds/sds/tqueue.rs b/src/libsds/sds/tqueue.rs +index b7042e514..ebe1f4b6c 100644 +--- a/src/libsds/sds/tqueue.rs ++++ b/src/libsds/sds/tqueue.rs +@@ -9,8 +9,8 @@ + #![warn(missing_docs)] + + use super::sds_result; +-use std::sync::Mutex; + use std::collections::LinkedList; ++use std::sync::Mutex; + + // Borrow from libc + #[doc(hidden)] +@@ -75,7 +75,10 @@ impl Drop for TQueue { + /// C compatible wrapper around the TQueue. Given a valid point, a TQueue pointer + /// is allocated on the heap and referenced in retq. free_fn_ptr may be NULL + /// but if it references a function, this will be called during drop of the TQueue. +-pub extern fn sds_tqueue_init(retq: *mut *mut TQueue, free_fn_ptr: Option) -> sds_result { ++pub extern "C" fn sds_tqueue_init( ++ retq: *mut *mut TQueue, ++ free_fn_ptr: Option, ++) -> sds_result { + // This piece of type signature magic is because in rust types that extern C, + // with option has None resolve to null. What this causes is we can wrap + // our fn ptr with Option in rust, but the C side gives us fn ptr or NULL, and +@@ -93,7 +96,7 @@ pub extern fn sds_tqueue_init(retq: *mut *mut TQueue, free_fn_ptr: Option sds_result { ++pub extern "C" fn sds_tqueue_enqueue(q: *const TQueue, elem: *const c_void) -> sds_result { + // Check for null .... + unsafe { (*q).enqueue(elem) }; + sds_result::Success +@@ -103,29 +106,27 @@ pub extern fn sds_tqueue_enqueue(q: *const TQueue, elem: *const c_void) -> sds_r + /// Dequeue from the head of the queue. The result will be placed into elem. + /// if elem is NULL no dequeue is attempted. If there are no more items + /// ListExhausted is returned. +-pub extern fn sds_tqueue_dequeue(q: *const TQueue, elem: *mut *const c_void) -> sds_result { ++pub extern "C" fn sds_tqueue_dequeue(q: *const TQueue, elem: *mut *const c_void) -> sds_result { + if elem.is_null() { + return sds_result::NullPointer; + } + match unsafe { (*q).dequeue() } { + Some(e) => { +- unsafe { *elem = e; }; ++ unsafe { ++ *elem = e; ++ }; + sds_result::Success + } +- None => { +- sds_result::ListExhausted +- } ++ None => sds_result::ListExhausted, + } + } + + #[no_mangle] + /// Free the queue and all remaining elements. After this point it is + /// not safe to access the queue. +-pub extern fn sds_tqueue_destroy(q: *mut TQueue) -> sds_result { ++pub extern "C" fn sds_tqueue_destroy(q: *mut TQueue) -> sds_result { + // This will drop the queue and free it's content + // mem::drop(q); + let _q = unsafe { Box::from_raw(q) }; + sds_result::Success + } +- +- +diff --git a/src/plugins/entryuuid/Cargo.toml b/src/plugins/entryuuid/Cargo.toml +new file mode 100644 +index 000000000..c43d7a771 +--- /dev/null ++++ b/src/plugins/entryuuid/Cargo.toml +@@ -0,0 +1,21 @@ ++[package] ++name = "entryuuid" ++version = "0.1.0" ++authors = ["William Brown "] ++edition = "2018" ++ ++# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html ++ ++[lib] ++path = "src/lib.rs" ++name = "entryuuid" ++crate-type = ["staticlib", "lib"] ++ ++[dependencies] ++libc = "0.2" ++paste = "0.1" ++slapi_r_plugin = { path="../../slapi_r_plugin" } ++uuid = { version = "0.8", features = [ "v4" ] } ++ ++[build-dependencies] ++cc = { version = "1.0", features = ["parallel"] } +diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs +new file mode 100644 +index 000000000..6b5e8d1bb +--- /dev/null ++++ b/src/plugins/entryuuid/src/lib.rs +@@ -0,0 +1,196 @@ ++#[macro_use] ++extern crate slapi_r_plugin; ++use slapi_r_plugin::prelude::*; ++use std::convert::{TryFrom, TryInto}; ++use std::os::raw::c_char; ++use uuid::Uuid; ++ ++#[derive(Debug)] ++struct FixupData { ++ basedn: Sdn, ++ raw_filter: String, ++} ++ ++struct EntryUuid; ++/* ++ * /---- plugin ident ++ * | /---- Struct name. ++ * V V ++ */ ++slapi_r_plugin_hooks!(entryuuid, EntryUuid); ++ ++/* ++ * /---- plugin ident ++ * | /---- cb ident ++ * | | /---- map function ++ * V V V ++ */ ++slapi_r_search_callback_mapfn!(entryuuid, entryuuid_fixup_cb, entryuuid_fixup_mapfn); ++ ++fn assign_uuid(e: &mut EntryRef) { ++ let sdn = e.get_sdnref(); ++ ++ // We could consider making these lazy static. ++ let config_sdn = Sdn::try_from("cn=config").expect("Invalid static dn"); ++ let schema_sdn = Sdn::try_from("cn=schema").expect("Invalid static dn"); ++ ++ if sdn.is_below_suffix(&*config_sdn) || sdn.is_below_suffix(&*schema_sdn) { ++ // We don't need to assign to these suffixes. ++ log_error!( ++ ErrorLevel::Trace, ++ "assign_uuid -> not assigning to {:?} as part of system suffix", ++ sdn.to_dn_string() ++ ); ++ return; ++ } ++ ++ // Generate a new Uuid. ++ let u: Uuid = Uuid::new_v4(); ++ log_error!( ++ ErrorLevel::Trace, ++ "assign_uuid -> assigning {:?} to dn {}", ++ u, ++ sdn.to_dn_string() ++ ); ++ ++ let uuid_value = Value::from(&u); ++ ++ // Add it to the entry ++ e.add_value("entryUUID", &uuid_value); ++} ++ ++impl SlapiPlugin3 for EntryUuid { ++ // Indicate we have pre add ++ fn has_betxn_pre_add() -> bool { ++ true ++ } ++ ++ fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> { ++ log_error!(ErrorLevel::Trace, "betxn_pre_add"); ++ ++ let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?; ++ assign_uuid(&mut e); ++ ++ Ok(()) ++ } ++ ++ fn has_task_handler() -> Option<&'static str> { ++ Some("entryuuid task") ++ } ++ ++ type TaskData = FixupData; ++ ++ fn task_validate(e: &EntryRef) -> Result { ++ // Does the entry have what we need? ++ let basedn: Sdn = match e.get_attr("basedn") { ++ Some(values) => values ++ .first() ++ .ok_or_else(|| { ++ log_error!( ++ ErrorLevel::Trace, ++ "task_validate basedn error -> empty value array?" ++ ); ++ LDAPError::Operation ++ })? ++ .as_ref() ++ .try_into() ++ .map_err(|e| { ++ log_error!(ErrorLevel::Trace, "task_validate basedn error -> {:?}", e); ++ LDAPError::Operation ++ })?, ++ None => return Err(LDAPError::ObjectClassViolation), ++ }; ++ ++ let raw_filter: String = match e.get_attr("filter") { ++ Some(values) => values ++ .first() ++ .ok_or_else(|| { ++ log_error!( ++ ErrorLevel::Trace, ++ "task_validate filter error -> empty value array?" ++ ); ++ LDAPError::Operation ++ })? ++ .as_ref() ++ .try_into() ++ .map_err(|e| { ++ log_error!(ErrorLevel::Trace, "task_validate filter error -> {:?}", e); ++ LDAPError::Operation ++ })?, ++ None => { ++ // Give a default filter. ++ "(objectClass=*)".to_string() ++ } ++ }; ++ ++ // Error if the first filter is empty? ++ ++ // Now, to make things faster, we wrap the filter in a exclude term. ++ let raw_filter = format!("(&{}(!(entryuuid=*)))", raw_filter); ++ ++ Ok(FixupData { basedn, raw_filter }) ++ } ++ ++ fn task_be_dn_hint(data: &Self::TaskData) -> Option { ++ Some(data.basedn.clone()) ++ } ++ ++ fn task_handler(_task: &Task, data: Self::TaskData) -> Result { ++ log_error!( ++ ErrorLevel::Trace, ++ "task_handler -> start thread with -> {:?}", ++ data ++ ); ++ ++ let search = Search::new_map_entry( ++ &(*data.basedn), ++ SearchScope::Subtree, ++ &data.raw_filter, ++ plugin_id(), ++ &(), ++ entryuuid_fixup_cb, ++ ) ++ .map_err(|e| { ++ log_error!( ++ ErrorLevel::Error, ++ "task_handler -> Unable to construct search -> {:?}", ++ e ++ ); ++ e ++ })?; ++ ++ match search.execute() { ++ Ok(_) => { ++ log_error!(ErrorLevel::Info, "task_handler -> fixup complete, success!"); ++ Ok(data) ++ } ++ Err(e) => { ++ // log, and return ++ log_error!( ++ ErrorLevel::Error, ++ "task_handler -> fixup complete, failed -> {:?}", ++ e ++ ); ++ Err(PluginError::GenericFailure) ++ } ++ } ++ } ++ ++ fn start(_pb: &mut PblockRef) -> Result<(), PluginError> { ++ log_error!(ErrorLevel::Trace, "plugin start"); ++ Ok(()) ++ } ++ ++ fn close(_pb: &mut PblockRef) -> Result<(), PluginError> { ++ log_error!(ErrorLevel::Trace, "plugin close"); ++ Ok(()) ++ } ++} ++ ++pub fn entryuuid_fixup_mapfn(mut e: EntryRef, _data: &()) -> Result<(), PluginError> { ++ assign_uuid(&mut e); ++ Ok(()) ++} ++ ++#[cfg(test)] ++mod tests {} +diff --git a/src/plugins/entryuuid_syntax/Cargo.toml b/src/plugins/entryuuid_syntax/Cargo.toml +new file mode 100644 +index 000000000..f7d3d64c9 +--- /dev/null ++++ b/src/plugins/entryuuid_syntax/Cargo.toml +@@ -0,0 +1,21 @@ ++[package] ++name = "entryuuid_syntax" ++version = "0.1.0" ++authors = ["William Brown "] ++edition = "2018" ++ ++# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html ++ ++[lib] ++path = "src/lib.rs" ++name = "entryuuid_syntax" ++crate-type = ["staticlib", "lib"] ++ ++[dependencies] ++libc = "0.2" ++paste = "0.1" ++slapi_r_plugin = { path="../../slapi_r_plugin" } ++uuid = { version = "0.8", features = [ "v4" ] } ++ ++[build-dependencies] ++cc = { version = "1.0", features = ["parallel"] } +diff --git a/src/plugins/entryuuid_syntax/src/lib.rs b/src/plugins/entryuuid_syntax/src/lib.rs +new file mode 100644 +index 000000000..0a4b89f16 +--- /dev/null ++++ b/src/plugins/entryuuid_syntax/src/lib.rs +@@ -0,0 +1,145 @@ ++#[macro_use] ++extern crate slapi_r_plugin; ++use slapi_r_plugin::prelude::*; ++use std::cmp::Ordering; ++use std::convert::TryInto; ++use uuid::Uuid; ++ ++struct EntryUuidSyntax; ++ ++// https://tools.ietf.org/html/rfc4530 ++ ++slapi_r_syntax_plugin_hooks!(entryuuid_syntax, EntryUuidSyntax); ++ ++impl SlapiSyntaxPlugin1 for EntryUuidSyntax { ++ fn attr_oid() -> &'static str { ++ "1.3.6.1.1.16.1" ++ } ++ ++ fn attr_compat_oids() -> Vec<&'static str> { ++ Vec::new() ++ } ++ ++ fn attr_supported_names() -> Vec<&'static str> { ++ vec!["1.3.6.1.1.16.1", "UUID"] ++ } ++ ++ fn syntax_validate(bval: &BerValRef) -> Result<(), PluginError> { ++ let r: Result = bval.try_into(); ++ r.map(|_| ()) ++ } ++ ++ fn eq_mr_oid() -> &'static str { ++ "1.3.6.1.1.16.2" ++ } ++ ++ fn eq_mr_name() -> &'static str { ++ "UUIDMatch" ++ } ++ ++ fn eq_mr_desc() -> &'static str { ++ "UUIDMatch matching rule." ++ } ++ ++ fn eq_mr_supported_names() -> Vec<&'static str> { ++ vec!["1.3.6.1.1.16.2", "uuidMatch", "UUIDMatch"] ++ } ++ ++ fn filter_ava_eq( ++ _pb: &mut PblockRef, ++ bval_filter: &BerValRef, ++ vals: &ValueArrayRef, ++ ) -> Result { ++ let u = match bval_filter.try_into() { ++ Ok(u) => u, ++ Err(_e) => return Ok(false), ++ }; ++ ++ let r = vals.iter().fold(false, |acc, va| { ++ if acc { ++ acc ++ } else { ++ // is u in va? ++ log_error!(ErrorLevel::Trace, "filter_ava_eq debug -> {:?}", va); ++ let res: Result = (&*va).try_into(); ++ match res { ++ Ok(vu) => vu == u, ++ Err(_) => acc, ++ } ++ } ++ }); ++ log_error!(ErrorLevel::Trace, "filter_ava_eq result -> {:?}", r); ++ Ok(r) ++ } ++ ++ fn eq_mr_filter_values2keys( ++ _pb: &mut PblockRef, ++ vals: &ValueArrayRef, ++ ) -> Result { ++ vals.iter() ++ .map(|va| { ++ let u: Uuid = (&*va).try_into()?; ++ Ok(Value::from(&u)) ++ }) ++ .collect() ++ } ++} ++ ++impl SlapiSubMr for EntryUuidSyntax {} ++ ++impl SlapiOrdMr for EntryUuidSyntax { ++ fn ord_mr_oid() -> Option<&'static str> { ++ Some("1.3.6.1.1.16.3") ++ } ++ ++ fn ord_mr_name() -> &'static str { ++ "UUIDOrderingMatch" ++ } ++ ++ fn ord_mr_desc() -> &'static str { ++ "UUIDMatch matching rule." ++ } ++ ++ fn ord_mr_supported_names() -> Vec<&'static str> { ++ vec!["1.3.6.1.1.16.3", "uuidOrderingMatch", "UUIDOrderingMatch"] ++ } ++ ++ fn filter_ava_ord( ++ _pb: &mut PblockRef, ++ bval_filter: &BerValRef, ++ vals: &ValueArrayRef, ++ ) -> Result, PluginError> { ++ let u: Uuid = match bval_filter.try_into() { ++ Ok(u) => u, ++ Err(_e) => return Ok(None), ++ }; ++ ++ let r = vals.iter().fold(None, |acc, va| { ++ if acc.is_some() { ++ acc ++ } else { ++ // is u in va? ++ log_error!(ErrorLevel::Trace, "filter_ava_ord debug -> {:?}", va); ++ let res: Result = (&*va).try_into(); ++ match res { ++ Ok(vu) => { ++ // 1.partial_cmp(2) => ordering::less ++ vu.partial_cmp(&u) ++ } ++ Err(_) => acc, ++ } ++ } ++ }); ++ log_error!(ErrorLevel::Trace, "filter_ava_ord result -> {:?}", r); ++ Ok(r) ++ } ++ ++ fn filter_compare(a: &BerValRef, b: &BerValRef) -> Ordering { ++ let ua: Uuid = a.try_into().expect("An invalid value a was given!"); ++ let ub: Uuid = b.try_into().expect("An invalid value b was given!"); ++ ua.cmp(&ub) ++ } ++} ++ ++#[cfg(test)] ++mod tests {} +diff --git a/src/slapd/src/error.rs b/src/slapd/src/error.rs +index 06ddb27b4..6f4d782ee 100644 +--- a/src/slapd/src/error.rs ++++ b/src/slapd/src/error.rs +@@ -1,8 +1,6 @@ +- + pub enum SlapdError { + // This occurs when a string contains an inner null byte + // that cstring can't handle. + CStringInvalidError, + FernetInvalidKey, + } +- +diff --git a/src/slapd/src/fernet.rs b/src/slapd/src/fernet.rs +index fcbd873f8..1a3251fd9 100644 +--- a/src/slapd/src/fernet.rs ++++ b/src/slapd/src/fernet.rs +@@ -1,39 +1,30 @@ + // Routines for managing fernet encryption + +-use std::ffi::{CString, CStr}; +-use fernet::Fernet; + use crate::error::SlapdError; ++use fernet::Fernet; ++use std::ffi::{CStr, CString}; + + pub fn generate_new_key() -> Result { + let k = Fernet::generate_key(); +- CString::new(k) +- .map_err(|_| { +- SlapdError::CStringInvalidError +- }) ++ CString::new(k).map_err(|_| SlapdError::CStringInvalidError) + } + + pub fn new(c_str_key: &CStr) -> Result { +- let str_key = c_str_key.to_str() ++ let str_key = c_str_key ++ .to_str() + .map_err(|_| SlapdError::CStringInvalidError)?; +- Fernet::new(str_key) +- .ok_or(SlapdError::FernetInvalidKey) ++ Fernet::new(str_key).ok_or(SlapdError::FernetInvalidKey) + } + + pub fn encrypt(fernet: &Fernet, dn: &CStr) -> Result { + let tok = fernet.encrypt(dn.to_bytes()); +- CString::new(tok) +- .map_err(|_| { +- SlapdError::CStringInvalidError +- }) ++ CString::new(tok).map_err(|_| SlapdError::CStringInvalidError) + } + + pub fn decrypt(fernet: &Fernet, tok: &CStr, ttl: u64) -> Result { +- let s = tok.to_str() +- .map_err(|_| SlapdError::CStringInvalidError)?; +- let r: Vec = fernet.decrypt_with_ttl(s, ttl) ++ let s = tok.to_str().map_err(|_| SlapdError::CStringInvalidError)?; ++ let r: Vec = fernet ++ .decrypt_with_ttl(s, ttl) + .map_err(|_| SlapdError::FernetInvalidKey)?; +- CString::new(r) +- .map_err(|_| SlapdError::CStringInvalidError) ++ CString::new(r).map_err(|_| SlapdError::CStringInvalidError) + } +- +- +diff --git a/src/slapd/src/lib.rs b/src/slapd/src/lib.rs +index 5b1f20368..79f1600c2 100644 +--- a/src/slapd/src/lib.rs ++++ b/src/slapd/src/lib.rs +@@ -1,5 +1,2 @@ +- + pub mod error; + pub mod fernet; +- +- +diff --git a/src/slapi_r_plugin/Cargo.toml b/src/slapi_r_plugin/Cargo.toml +new file mode 100644 +index 000000000..c7958671a +--- /dev/null ++++ b/src/slapi_r_plugin/Cargo.toml +@@ -0,0 +1,19 @@ ++[package] ++name = "slapi_r_plugin" ++version = "0.1.0" ++authors = ["William Brown "] ++edition = "2018" ++build = "build.rs" ++ ++# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html ++ ++[lib] ++path = "src/lib.rs" ++name = "slapi_r_plugin" ++crate-type = ["staticlib", "lib"] ++ ++[dependencies] ++libc = "0.2" ++paste = "0.1" ++lazy_static = "1.4" ++uuid = { version = "0.8", features = [ "v4" ] } +diff --git a/src/slapi_r_plugin/README.md b/src/slapi_r_plugin/README.md +new file mode 100644 +index 000000000..af9743ec9 +--- /dev/null ++++ b/src/slapi_r_plugin/README.md +@@ -0,0 +1,216 @@ ++ ++# Slapi R(ust) Plugin Bindings ++ ++If you are here, you are probably interested in the Rust bindings that allow plugins to be written ++in Rust for the 389 Directory Server project. If you are, you should use `cargo doc --workspace --no-deps` ++in `src`, as this contains the material you want for implementing safe plugins. ++ ++This readme is intended for developers of the bindings that enable those plugins to work. ++ ++As such it likely requires that you have an understanding both of C and ++the [Rust Nomicon](https://doc.rust-lang.org/nomicon/index.html) ++ ++> **WARNING** This place is not a place of honor ... no highly esteemed deed is commemorated here ++> ... nothing valued is here. What is here is dangerous and repulsive to us. This message is a ++> warning about danger. ++ ++This document will not detail the specifics of unsafe or the invariants you must adhere to for rust ++to work with C. ++ ++If you still want to see more about the plugin bindings, go on ... ++ ++## The Challenge ++ ++Rust is a memory safe language - that means you may not dereference pointers or alter or interact ++with uninitialised memory. There are whole classes of problems that this resolves, but it means ++that Rust is opiniated about how it interacts with memory. ++ ++C is an unsafe language - there are undefined behaviours all through out the specification, memory ++can be interacted with without bounds which leads to many kinds of issues ranging from crashes, ++silent data corruption, to code execution and explotation. ++ ++While it would be nice to rewrite everything from C to Rust, this is a large task - instead we need ++a way to allow Rust and C to interact. ++ ++## The Goal ++ ++To be able to define, a pure Rust, 100% safe (in rust terms) plugin for 389 Directory Server that ++can perform useful tasks. ++ ++## The 389 Directory Server Plugin API ++ ++The 389-ds plugin system works by reading an ldap entry from cn=config, that directs to a shared ++library. That shared library path is dlopened and an init symbol read and activated. At that ++point the plugin is able to call-back into 389-ds to provide registration of function handlers for ++various tasks that the plugin may wish to perform at defined points in a operations execution. ++ ++During the execution of a plugin callback, the context of the environment is passed through a ++parameter block (pblock). This pblock has a set of apis for accessing it's content, which may ++or may not be defined based on the execution state of the server. ++ ++Common plugin tasks involve the transformation of entries during write operation paths to provide ++extra attributes to the entry or generation of other entries. Values in entries are represented by ++internal structures that may or may not have sorting of content. ++ ++Already at this point it can be seen there is a lot of surface area to access. For clarity in ++our trivial example here we have required: ++ ++* Pblock ++* Entry ++* ValueSet ++* Value ++* Sdn ++* Result Codes ++ ++We need to be able to interact with all of these - and more - to make useful plugins. ++ ++## Structure of the Rust Plugin bindings. ++ ++As a result, there are a number of items we must be able to implement: ++ ++* Creation of the plugin function callback points ++* Transformation of C pointer types into Rust structures that can be interacted with. ++* Ability to have Rust interact with structures to achieve side effects in the C server ++* Mapping of errors that C can understand ++* Make all of it safe. ++ ++In order to design this, it's useful to see what a plugin from Rust should look like - by designing ++what the plugin should look like, we make the bindings that are preferable and ergonomic to rust ++rather than compromising on quality and developer experience. ++ ++Here is a minimal example of a plugin - it may not compile or be complete, it serves as an ++example. ++ ++``` ++#[macro_use] ++extern crate slapi_r_plugin; ++use slapi_r_plugin::prelude::*; ++ ++struct NewPlugin; ++ ++slapi_r_plugin_hooks!(plugin_name, NewPlugin); ++ ++impl SlapiPlugin3 for NewPlugin { ++ fn start(_pb: &mut PblockRef) -> Result<(), PluginError> { ++ log_error!(ErrorLevel::Trace, "plugin start"); ++ Ok(()) ++ } ++ ++ fn close(_pb: &mut PblockRef) -> Result<(), PluginError> { ++ log_error!(ErrorLevel::Trace, "plugin close"); ++ Ok(()) ++ } ++ ++ fn has_betxn_pre_add() -> bool { ++ true ++ } ++ ++ fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> { ++ let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?; ++ let sdn = e.get_sdnref(); ++ ++ log_error!(ErrorLevel::Trace, "betxn_pre_add -> {:?}", sdn); ++ Ok(()) ++ } ++} ++``` ++ ++Important details - there is no unsafe, we use rust native error handling and functions, there ++is no indication of memory management, we are defined by a trait, error logging uses native ++formatting. There are probably other details too - I'll leave it as an exercise for the reader ++to play Where's Wally and find them all. ++ ++With the end goal in mind, we can begin to look at the construction of the plugin system, and ++the design choices that were made. ++ ++## The Plugin Trait ++ ++A significant choice was the use of a trait to define the possible plugin function operations ++for rust implementors. This allows the compiler to guarantee that a plugin *will* have all ++associated functions. ++ ++> Traits are synonomous with java interfaces, defining methods you "promise" to implement, unlike ++> object orientation with a class hierarchy. ++ ++Now, you may notice that not all members of the trait are implemented. This is due to a feature ++of rust known as default trait impls. This allows the trait origin (src/plugin.rs) to provide ++template versions of these functions. If you "overwrite" them, your implementation is used. Unlike ++OO, you may not inherit or call the default function. ++ ++If a default is not provided you *must* implement that function to be considered valid. Today (20200422) ++this only applies to `start` and `close`. ++ ++The default implementations all return "false" to the presence of callbacks, and if they are used, ++they will always return an error. ++ ++## Interface generation ++ ++While it is nice to have this Rust interface for plugins, C is unable to call it (Rust uses a different ++stack calling syntax to C, as well as symbol mangaling). To expose these, we must provide `extern C` ++functions, where any function that requires a static symbol must be marked as no_mangle. ++ ++Rather than ask all plugin authors to do this, we can use the rust macro system to generate these ++interfaces at compile time. This is the reason for this line: ++ ++``` ++slapi_r_plugin_hooks!(plugin_name, NewPlugin); ++``` ++ ++This macro is defined in src/macros.rs, and is "the bridge" from C to Rust. Given a plugin name ++and a struct of the trait SlapiPlugin3, this macro is able to generate all needed C compatible ++functions. Based on the calls to `has_`, the generated functions are registered to the pblock ++that is provided. ++ ++When a call back triggers, the function landing point is called. This then wraps all the pointer ++types from C into Rust structs, and then dispatches to the struct instance. ++ ++When the struct function returns, the result is unpacked and turned into C compatible result codes - ++in some cases, the result codes are sanitised due to quirks in the C ds api - `[<$mod_ident _plugin_mr_filter_ava>]` ++is an excellent example of this, where Rust returns are `true`/`false`, which would normally ++be FFI safe to convert to 1/0 respectively, but 389-ds expects the inverse in this case, where ++0 is true and all other values are false. To present a sane api to rust, the macro layer does this ++(mind bending) transformation for us. ++ ++## C Ptr Wrapper types ++ ++This is likely the major, and important detail of the plugin api. By wrapping these C ptrs with ++Rust types, we can create types that perform as rust expects, and adheres to the invariants required, ++while providing safe - and useful interfaces to users. ++ ++It's important to understand how Rust manages memory both on the stack and the heap - Please see ++[the Rust Book](https://doc.rust-lang.org/book/ch04-00-understanding-ownership.html) for more. ++ ++As a result, this means that we must express in code, assertions about the proper ownership of memory ++and who is responsible for it (unlike C, where it can be hard to determine who or what is responsible ++for freeing some value.) Failure to handle this correctly, can and will lead to crashes, leaks or ++*hand waving* magical failures that are eXtReMeLy FuN to debug. ++ ++### Reference Types ++ ++There are a number of types, such as `SdnRef`, which have a suffix of `*Ref`. These types represent ++values whos content is owned by the C server - that is, it is the responsibility of 389-ds to free ++the content of the Pointer once it has been used. A majority of values that are provided to the ++function callback points fall into this class. ++ ++### Owned Types ++ ++These types contain a pointer from the C server, but it is the responsibility of the Rust library ++to indicate when that pointer and it's content should be disposed of. This is generally handled ++by the `drop` trait, which is executed ... well, when an item is dropped. ++ ++### Dispatch from the wrapper to C ++ ++When a rust function against a wrapper is called, the type internally accesses it Ref type and ++uses the ptr to dispatch into the C server. Any required invariants are upheld, and results are ++mapped as required to match what rust callers expect. ++ ++As a result, this involves horrendous amounts of unsafe, and a detailed analysis of both the DS C ++api, what it expects, and the Rust nomicon to ensure you maintain all the invariants. ++ ++## Conclusion ++ ++Providing a bridge between C and Rust is challenging - but achievable - the result is plugins that ++are clean, safe, efficent. ++ ++ ++ +diff --git a/src/slapi_r_plugin/build.rs b/src/slapi_r_plugin/build.rs +new file mode 100644 +index 000000000..29bbd52d4 +--- /dev/null ++++ b/src/slapi_r_plugin/build.rs +@@ -0,0 +1,8 @@ ++use std::env; ++ ++fn main() { ++ if let Ok(lib_dir) = env::var("SLAPD_DYLIB_DIR") { ++ println!("cargo:rustc-link-lib=dylib=slapd"); ++ println!("cargo:rustc-link-search=native={}", lib_dir); ++ } ++} +diff --git a/src/slapi_r_plugin/src/backend.rs b/src/slapi_r_plugin/src/backend.rs +new file mode 100644 +index 000000000..f308295aa +--- /dev/null ++++ b/src/slapi_r_plugin/src/backend.rs +@@ -0,0 +1,71 @@ ++use crate::dn::SdnRef; ++use crate::pblock::Pblock; ++// use std::ops::Deref; ++ ++extern "C" { ++ fn slapi_back_transaction_begin(pb: *const libc::c_void) -> i32; ++ fn slapi_back_transaction_commit(pb: *const libc::c_void); ++ fn slapi_back_transaction_abort(pb: *const libc::c_void); ++ fn slapi_be_select_exact(sdn: *const libc::c_void) -> *const libc::c_void; ++} ++ ++pub struct BackendRef { ++ raw_be: *const libc::c_void, ++} ++ ++impl BackendRef { ++ pub fn new(dn: &SdnRef) -> Result { ++ let raw_be = unsafe { slapi_be_select_exact(dn.as_ptr()) }; ++ if raw_be.is_null() { ++ Err(()) ++ } else { ++ Ok(BackendRef { raw_be }) ++ } ++ } ++ ++ pub(crate) fn as_ptr(&self) -> *const libc::c_void { ++ self.raw_be ++ } ++ ++ pub fn begin_txn(self) -> Result { ++ let mut pb = Pblock::new(); ++ if pb.set_op_backend(&self) != 0 { ++ return Err(()); ++ } ++ let rc = unsafe { slapi_back_transaction_begin(pb.as_ptr()) }; ++ if rc != 0 { ++ Err(()) ++ } else { ++ Ok(BackendRefTxn { ++ pb, ++ be: self, ++ committed: false, ++ }) ++ } ++ } ++} ++ ++pub struct BackendRefTxn { ++ pb: Pblock, ++ be: BackendRef, ++ committed: bool, ++} ++ ++impl BackendRefTxn { ++ pub fn commit(mut self) { ++ self.committed = true; ++ unsafe { ++ slapi_back_transaction_commit(self.pb.as_ptr()); ++ } ++ } ++} ++ ++impl Drop for BackendRefTxn { ++ fn drop(&mut self) { ++ if self.committed == false { ++ unsafe { ++ slapi_back_transaction_abort(self.pb.as_ptr()); ++ } ++ } ++ } ++} +diff --git a/src/slapi_r_plugin/src/ber.rs b/src/slapi_r_plugin/src/ber.rs +new file mode 100644 +index 000000000..a501fd642 +--- /dev/null ++++ b/src/slapi_r_plugin/src/ber.rs +@@ -0,0 +1,90 @@ ++use crate::log::{log_error, ErrorLevel}; ++use libc; ++use std::ffi::CString; ++// use std::ptr; ++use std::slice; ++ ++use std::convert::TryFrom; ++use uuid::Uuid; ++ ++use crate::error::PluginError; ++ ++#[repr(C)] ++pub(crate) struct ol_berval { ++ pub len: usize, ++ pub data: *const u8, ++} ++ ++#[derive(Debug)] ++pub struct BerValRef { ++ pub(crate) raw_berval: *const ol_berval, ++} ++ ++impl BerValRef { ++ pub fn new(raw_berval: *const libc::c_void) -> Self { ++ // so we retype this ++ let raw_berval = raw_berval as *const ol_berval; ++ BerValRef { raw_berval } ++ } ++ ++ pub(crate) fn into_cstring(&self) -> Option { ++ // Cstring does not need a trailing null, so if we have one, ignore it. ++ let l: usize = unsafe { (*self.raw_berval).len }; ++ let d_slice = unsafe { slice::from_raw_parts((*self.raw_berval).data, l) }; ++ CString::new(d_slice) ++ .or_else(|e| { ++ // Try it again, but with one byte less to trim a potential trailing null that ++ // could have been allocated, and ensure it has at least 1 byte of good data ++ // remaining. ++ if l > 1 { ++ let d_slice = unsafe { slice::from_raw_parts((*self.raw_berval).data, l - 1) }; ++ CString::new(d_slice) ++ } else { ++ Err(e) ++ } ++ }) ++ .map_err(|_| { ++ log_error!( ++ ErrorLevel::Trace, ++ "invalid ber parse attempt, may contain a null byte? -> {:?}", ++ self ++ ); ++ () ++ }) ++ .ok() ++ } ++ ++ pub fn into_string(&self) -> Option { ++ // Convert a Some to a rust string. ++ self.into_cstring().and_then(|v| { ++ v.into_string() ++ .map_err(|_| { ++ log_error!( ++ ErrorLevel::Trace, ++ "failed to convert cstring to string -> {:?}", ++ self ++ ); ++ () ++ }) ++ .ok() ++ }) ++ } ++} ++ ++impl TryFrom<&BerValRef> for Uuid { ++ type Error = PluginError; ++ ++ fn try_from(value: &BerValRef) -> Result { ++ let val_string = value.into_string().ok_or(PluginError::BervalString)?; ++ ++ Uuid::parse_str(val_string.as_str()) ++ .map(|r| { ++ log_error!(ErrorLevel::Trace, "valid uuid -> {:?}", r); ++ r ++ }) ++ .map_err(|_e| { ++ log_error!(ErrorLevel::Plugin, "Invalid uuid"); ++ PluginError::InvalidSyntax ++ }) ++ } ++} +diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs +new file mode 100644 +index 000000000..cf76ccbdb +--- /dev/null ++++ b/src/slapi_r_plugin/src/constants.rs +@@ -0,0 +1,203 @@ ++use crate::error::RPluginError; ++use std::convert::TryFrom; ++use std::os::raw::c_char; ++ ++pub const LDAP_SUCCESS: i32 = 0; ++pub const PLUGIN_DEFAULT_PRECEDENCE: i32 = 50; ++ ++#[repr(i32)] ++/// The set of possible function handles we can register via the pblock. These ++/// values correspond to slapi-plugin.h. ++pub enum PluginFnType { ++ /// SLAPI_PLUGIN_DESTROY_FN ++ Destroy = 11, ++ /// SLAPI_PLUGIN_CLOSE_FN ++ Close = 210, ++ /// SLAPI_PLUGIN_START_FN ++ Start = 212, ++ /// SLAPI_PLUGIN_PRE_BIND_FN ++ PreBind = 401, ++ /// SLAPI_PLUGIN_PRE_UNBIND_FN ++ PreUnbind = 402, ++ /// SLAPI_PLUGIN_PRE_SEARCH_FN ++ PreSearch = 403, ++ /// SLAPI_PLUGIN_PRE_COMPARE_FN ++ PreCompare = 404, ++ /// SLAPI_PLUGIN_PRE_MODIFY_FN ++ PreModify = 405, ++ /// SLAPI_PLUGIN_PRE_MODRDN_FN ++ PreModRDN = 406, ++ /// SLAPI_PLUGIN_PRE_ADD_FN ++ PreAdd = 407, ++ /// SLAPI_PLUGIN_PRE_DELETE_FN ++ PreDelete = 408, ++ /// SLAPI_PLUGIN_PRE_ABANDON_FN ++ PreAbandon = 409, ++ /// SLAPI_PLUGIN_PRE_ENTRY_FN ++ PreEntry = 410, ++ /// SLAPI_PLUGIN_PRE_REFERRAL_FN ++ PreReferal = 411, ++ /// SLAPI_PLUGIN_PRE_RESULT_FN ++ PreResult = 412, ++ /// SLAPI_PLUGIN_PRE_EXTOP_FN ++ PreExtop = 413, ++ /// SLAPI_PLUGIN_BE_PRE_ADD_FN ++ BeTxnPreAdd = 460, ++ /// SLAPI_PLUGIN_BE_TXN_PRE_MODIFY_FN ++ BeTxnPreModify = 461, ++ /// SLAPI_PLUGIN_BE_TXN_PRE_MODRDN_FN ++ BeTxnPreModRDN = 462, ++ /// SLAPI_PLUGIN_BE_TXN_PRE_DELETE_FN ++ BeTxnPreDelete = 463, ++ /// SLAPI_PLUGIN_BE_TXN_PRE_DELETE_TOMBSTONE_FN ++ BeTxnPreDeleteTombstone = 464, ++ /// SLAPI_PLUGIN_POST_SEARCH_FN ++ PostSearch = 503, ++ /// SLAPI_PLUGIN_BE_POST_ADD_FN ++ BeTxnPostAdd = 560, ++ /// SLAPI_PLUGIN_BE_POST_MODIFY_FN ++ BeTxnPostModify = 561, ++ /// SLAPI_PLUGIN_BE_POST_MODRDN_FN ++ BeTxnPostModRDN = 562, ++ /// SLAPI_PLUGIN_BE_POST_DELETE_FN ++ BeTxnPostDelete = 563, ++ ++ /// SLAPI_PLUGIN_MR_FILTER_CREATE_FN ++ MRFilterCreate = 600, ++ /// SLAPI_PLUGIN_MR_INDEXER_CREATE_FN ++ MRIndexerCreate = 601, ++ /// SLAPI_PLUGIN_MR_FILTER_AVA ++ MRFilterAva = 618, ++ /// SLAPI_PLUGIN_MR_FILTER_SUB ++ MRFilterSub = 619, ++ /// SLAPI_PLUGIN_MR_VALUES2KEYS ++ MRValuesToKeys = 620, ++ /// SLAPI_PLUGIN_MR_ASSERTION2KEYS_AVA ++ MRAssertionToKeysAva = 621, ++ /// SLAPI_PLUGIN_MR_ASSERTION2KEYS_SUB ++ MRAssertionToKeysSub = 622, ++ /// SLAPI_PLUGIN_MR_COMPARE ++ MRCompare = 625, ++ /// SLAPI_PLUGIN_MR_NORMALIZE ++ MRNormalize = 626, ++ ++ /// SLAPI_PLUGIN_SYNTAX_FILTER_AVA ++ SyntaxFilterAva = 700, ++ /// SLAPI_PLUGIN_SYNTAX_FILTER_SUB ++ SyntaxFilterSub = 701, ++ /// SLAPI_PLUGIN_SYNTAX_VALUES2KEYS ++ SyntaxValuesToKeys = 702, ++ /// SLAPI_PLUGIN_SYNTAX_ASSERTION2KEYS_AVA ++ SyntaxAssertion2KeysAva = 703, ++ /// SLAPI_PLUGIN_SYNTAX_ASSERTION2KEYS_SUB ++ SyntaxAssertion2KeysSub = 704, ++ /// SLAPI_PLUGIN_SYNTAX_FLAGS ++ SyntaxFlags = 707, ++ /// SLAPI_PLUGIN_SYNTAX_COMPARE ++ SyntaxCompare = 708, ++ /// SLAPI_PLUGIN_SYNTAX_VALIDATE ++ SyntaxValidate = 710, ++ /// SLAPI_PLUGIN_SYNTAX_NORMALIZE ++ SyntaxNormalize = 711, ++} ++ ++static SV01: [u8; 3] = [b'0', b'1', b'\0']; ++static SV02: [u8; 3] = [b'0', b'2', b'\0']; ++static SV03: [u8; 3] = [b'0', b'3', b'\0']; ++ ++/// Corresponding plugin versions ++pub enum PluginVersion { ++ /// SLAPI_PLUGIN_VERSION_01 ++ V01, ++ /// SLAPI_PLUGIN_VERSION_02 ++ V02, ++ /// SLAPI_PLUGIN_VERSION_03 ++ V03, ++} ++ ++impl PluginVersion { ++ pub fn to_char_ptr(&self) -> *const c_char { ++ match self { ++ PluginVersion::V01 => &SV01 as *const _ as *const c_char, ++ PluginVersion::V02 => &SV02 as *const _ as *const c_char, ++ PluginVersion::V03 => &SV03 as *const _ as *const c_char, ++ } ++ } ++} ++ ++static SMATCHINGRULE: [u8; 13] = [ ++ b'm', b'a', b't', b'c', b'h', b'i', b'n', b'g', b'r', b'u', b'l', b'e', b'\0', ++]; ++ ++pub enum PluginType { ++ MatchingRule, ++} ++ ++impl PluginType { ++ pub fn to_char_ptr(&self) -> *const c_char { ++ match self { ++ PluginType::MatchingRule => &SMATCHINGRULE as *const _ as *const c_char, ++ } ++ } ++} ++ ++#[repr(i32)] ++/// data types that we can get or retrieve from the pblock. This is only ++/// used internally. ++pub(crate) enum PblockType { ++ /// SLAPI_PLUGIN_PRIVATE ++ _PrivateData = 4, ++ /// SLAPI_PLUGIN_VERSION ++ Version = 8, ++ /// SLAPI_PLUGIN_DESCRIPTION ++ _Description = 12, ++ /// SLAPI_PLUGIN_IDENTITY ++ Identity = 13, ++ /// SLAPI_PLUGIN_INTOP_RESULT ++ OpResult = 15, ++ /// SLAPI_ADD_ENTRY ++ AddEntry = 60, ++ /// SLAPI_BACKEND ++ Backend = 130, ++ /// SLAPI_PLUGIN_MR_NAMES ++ MRNames = 624, ++ /// SLAPI_PLUGIN_SYNTAX_NAMES ++ SyntaxNames = 705, ++ /// SLAPI_PLUGIN_SYNTAX_OID ++ SyntaxOid = 706, ++} ++ ++/// See ./ldap/include/ldaprot.h ++#[derive(PartialEq)] ++pub enum FilterType { ++ And = 0xa0, ++ Or = 0xa1, ++ Not = 0xa2, ++ Equality = 0xa3, ++ Substring = 0xa4, ++ Ge = 0xa5, ++ Le = 0xa6, ++ Present = 0x87, ++ Approx = 0xa8, ++ Extended = 0xa9, ++} ++ ++impl TryFrom for FilterType { ++ type Error = RPluginError; ++ ++ fn try_from(value: i32) -> Result { ++ match value { ++ 0xa0 => Ok(FilterType::And), ++ 0xa1 => Ok(FilterType::Or), ++ 0xa2 => Ok(FilterType::Not), ++ 0xa3 => Ok(FilterType::Equality), ++ 0xa4 => Ok(FilterType::Substring), ++ 0xa5 => Ok(FilterType::Ge), ++ 0xa6 => Ok(FilterType::Le), ++ 0x87 => Ok(FilterType::Present), ++ 0xa8 => Ok(FilterType::Approx), ++ 0xa9 => Ok(FilterType::Extended), ++ _ => Err(RPluginError::FilterType), ++ } ++ } ++} +diff --git a/src/slapi_r_plugin/src/dn.rs b/src/slapi_r_plugin/src/dn.rs +new file mode 100644 +index 000000000..5f8a65743 +--- /dev/null ++++ b/src/slapi_r_plugin/src/dn.rs +@@ -0,0 +1,108 @@ ++use std::convert::TryFrom; ++use std::ffi::{CStr, CString}; ++use std::ops::Deref; ++use std::os::raw::c_char; ++ ++extern "C" { ++ fn slapi_sdn_get_dn(sdn: *const libc::c_void) -> *const c_char; ++ fn slapi_sdn_new_dn_byval(dn: *const c_char) -> *const libc::c_void; ++ fn slapi_sdn_issuffix(sdn: *const libc::c_void, suffix_sdn: *const libc::c_void) -> i32; ++ fn slapi_sdn_free(sdn: *const *const libc::c_void); ++ fn slapi_sdn_dup(sdn: *const libc::c_void) -> *const libc::c_void; ++} ++ ++#[derive(Debug)] ++pub struct SdnRef { ++ raw_sdn: *const libc::c_void, ++} ++ ++#[derive(Debug)] ++pub struct NdnRef { ++ raw_ndn: *const c_char, ++} ++ ++#[derive(Debug)] ++pub struct Sdn { ++ value: SdnRef, ++} ++ ++unsafe impl Send for Sdn {} ++ ++impl From<&CStr> for Sdn { ++ fn from(value: &CStr) -> Self { ++ Sdn { ++ value: SdnRef { ++ raw_sdn: unsafe { slapi_sdn_new_dn_byval(value.as_ptr()) }, ++ }, ++ } ++ } ++} ++ ++impl TryFrom<&str> for Sdn { ++ type Error = (); ++ ++ fn try_from(value: &str) -> Result { ++ let cstr = CString::new(value).map_err(|_| ())?; ++ Ok(Self::from(cstr.as_c_str())) ++ } ++} ++ ++impl Clone for Sdn { ++ fn clone(&self) -> Self { ++ let raw_sdn = unsafe { slapi_sdn_dup(self.value.raw_sdn) }; ++ Sdn { ++ value: SdnRef { raw_sdn }, ++ } ++ } ++} ++ ++impl Drop for Sdn { ++ fn drop(&mut self) { ++ unsafe { slapi_sdn_free(&self.value.raw_sdn as *const *const libc::c_void) } ++ } ++} ++ ++impl Deref for Sdn { ++ type Target = SdnRef; ++ ++ fn deref(&self) -> &Self::Target { ++ &self.value ++ } ++} ++ ++impl SdnRef { ++ pub fn new(raw_sdn: *const libc::c_void) -> Self { ++ SdnRef { raw_sdn } ++ } ++ ++ /// This is unsafe, as you need to ensure that the SdnRef associated lives at ++ /// least as long as the NdnRef, else this may cause a use-after-free. ++ pub unsafe fn as_ndnref(&self) -> NdnRef { ++ let raw_ndn = slapi_sdn_get_dn(self.raw_sdn); ++ NdnRef { raw_ndn } ++ } ++ ++ pub fn to_dn_string(&self) -> String { ++ let dn_raw = unsafe { slapi_sdn_get_dn(self.raw_sdn) }; ++ let dn_cstr = unsafe { CStr::from_ptr(dn_raw) }; ++ dn_cstr.to_string_lossy().to_string() ++ } ++ ++ pub(crate) fn as_ptr(&self) -> *const libc::c_void { ++ self.raw_sdn ++ } ++ ++ pub fn is_below_suffix(&self, other: &SdnRef) -> bool { ++ if unsafe { slapi_sdn_issuffix(self.raw_sdn, other.raw_sdn) } == 0 { ++ false ++ } else { ++ true ++ } ++ } ++} ++ ++impl NdnRef { ++ pub(crate) fn as_ptr(&self) -> *const c_char { ++ self.raw_ndn ++ } ++} +diff --git a/src/slapi_r_plugin/src/entry.rs b/src/slapi_r_plugin/src/entry.rs +new file mode 100644 +index 000000000..034efe692 +--- /dev/null ++++ b/src/slapi_r_plugin/src/entry.rs +@@ -0,0 +1,92 @@ ++use crate::dn::SdnRef; ++use crate::value::{slapi_value, ValueArrayRef, ValueRef}; ++use std::ffi::CString; ++use std::os::raw::c_char; ++ ++extern "C" { ++ fn slapi_entry_get_sdn(e: *const libc::c_void) -> *const libc::c_void; ++ fn slapi_entry_add_value( ++ e: *const libc::c_void, ++ a: *const c_char, ++ v: *const slapi_value, ++ ) -> i32; ++ fn slapi_entry_attr_get_valuearray( ++ e: *const libc::c_void, ++ a: *const c_char, ++ ) -> *const *const slapi_value; ++} ++ ++pub struct EntryRef { ++ raw_e: *const libc::c_void, ++} ++ ++/* ++pub struct Entry { ++ value: EntryRef, ++} ++ ++impl Drop for Entry { ++ fn drop(&mut self) { ++ () ++ } ++} ++ ++impl Deref for Entry { ++ type Target = EntryRef; ++ ++ fn deref(&self) -> &Self::Target { ++ &self.value ++ } ++} ++ ++impl Entry { ++ // Forget about this value, and get a pointer back suitable for providing to directory ++ // server to take ownership. ++ pub unsafe fn forget(self) -> *mut libc::c_void { ++ unimplemented!(); ++ } ++} ++*/ ++ ++impl EntryRef { ++ pub fn new(raw_e: *const libc::c_void) -> Self { ++ EntryRef { raw_e } ++ } ++ ++ // get the sdn ++ pub fn get_sdnref(&self) -> SdnRef { ++ let sdn_ptr = unsafe { slapi_entry_get_sdn(self.raw_e) }; ++ SdnRef::new(sdn_ptr) ++ } ++ ++ pub fn get_attr(&self, name: &str) -> Option { ++ let cname = CString::new(name).expect("invalid attr name"); ++ let va = unsafe { slapi_entry_attr_get_valuearray(self.raw_e, cname.as_ptr()) }; ++ ++ if va.is_null() { ++ None ++ } else { ++ Some(ValueArrayRef::new(va as *const libc::c_void)) ++ } ++ } ++ ++ pub fn add_value(&mut self, a: &str, v: &ValueRef) { ++ // turn the attr to a c string. ++ // TODO FIX ++ let attr_name = CString::new(a).expect("Invalid attribute name"); ++ // Get the raw ptr. ++ let raw_value_ref = unsafe { v.as_ptr() }; ++ // We ignore the return because it always returns 0. ++ let _ = unsafe { ++ // By default, this clones. ++ slapi_entry_add_value(self.raw_e, attr_name.as_ptr(), raw_value_ref) ++ }; ++ } ++ ++ /* ++ pub fn replace_value(&mut self, a: &str, v: &ValueRef) { ++ // slapi_entry_attr_replace(e, SLAPI_ATTR_ENTRYUSN, new_bvals); ++ unimplemented!(); ++ } ++ */ ++} +diff --git a/src/slapi_r_plugin/src/error.rs b/src/slapi_r_plugin/src/error.rs +new file mode 100644 +index 000000000..91c81cd26 +--- /dev/null ++++ b/src/slapi_r_plugin/src/error.rs +@@ -0,0 +1,61 @@ ++// use std::convert::TryFrom; ++ ++#[derive(Debug)] ++#[repr(i32)] ++pub enum RPluginError { ++ Unknown = 500, ++ Unimplemented = 501, ++ FilterType = 502, ++} ++ ++#[derive(Debug)] ++#[repr(i32)] ++pub enum PluginError { ++ GenericFailure = -1, ++ Unknown = 1000, ++ Unimplemented = 1001, ++ Pblock = 1002, ++ BervalString = 1003, ++ InvalidSyntax = 1004, ++ InvalidFilter = 1005, ++ TxnFailure = 1006, ++} ++ ++#[derive(Debug)] ++#[repr(i32)] ++pub enum LDAPError { ++ Success = 0, ++ Operation = 1, ++ ObjectClassViolation = 65, ++ Other = 80, ++ Unknown = 999, ++} ++ ++impl From for LDAPError { ++ fn from(value: i32) -> Self { ++ match value { ++ 0 => LDAPError::Success, ++ 1 => LDAPError::Operation, ++ 65 => LDAPError::ObjectClassViolation, ++ 80 => LDAPError::Other, ++ _ => LDAPError::Unknown, ++ } ++ } ++} ++ ++// if we make debug impl, we can use this. ++// errmsg = ldap_err2string(result); ++ ++#[derive(Debug)] ++#[repr(i32)] ++pub enum DseCallbackStatus { ++ DoNotApply = 0, ++ Ok = 1, ++ Error = -1, ++} ++ ++#[derive(Debug)] ++pub enum LoggingError { ++ Unknown, ++ CString(String), ++} +diff --git a/src/slapi_r_plugin/src/init.c b/src/slapi_r_plugin/src/init.c +new file mode 100644 +index 000000000..86d1235b8 +--- /dev/null ++++ b/src/slapi_r_plugin/src/init.c +@@ -0,0 +1,8 @@ ++ ++#include ++ ++int32_t ++do_nothing_really_well_abcdef() { ++ return 0; ++} ++ +diff --git a/src/slapi_r_plugin/src/lib.rs b/src/slapi_r_plugin/src/lib.rs +new file mode 100644 +index 000000000..d7fc22e52 +--- /dev/null ++++ b/src/slapi_r_plugin/src/lib.rs +@@ -0,0 +1,36 @@ ++// extern crate lazy_static; ++ ++#[macro_use] ++pub mod macros; ++pub mod backend; ++pub mod ber; ++mod constants; ++pub mod dn; ++pub mod entry; ++pub mod error; ++pub mod log; ++pub mod pblock; ++pub mod plugin; ++pub mod search; ++pub mod syntax_plugin; ++pub mod task; ++pub mod value; ++ ++pub mod prelude { ++ pub use crate::backend::{BackendRef, BackendRefTxn}; ++ pub use crate::ber::BerValRef; ++ pub use crate::constants::{FilterType, PluginFnType, PluginType, PluginVersion, LDAP_SUCCESS}; ++ pub use crate::dn::{Sdn, SdnRef}; ++ pub use crate::entry::EntryRef; ++ pub use crate::error::{DseCallbackStatus, LDAPError, PluginError, RPluginError}; ++ pub use crate::log::{log_error, ErrorLevel}; ++ pub use crate::pblock::{Pblock, PblockRef}; ++ pub use crate::plugin::{register_plugin_ext, PluginIdRef, SlapiPlugin3}; ++ pub use crate::search::{Search, SearchScope}; ++ pub use crate::syntax_plugin::{ ++ matchingrule_register, name_to_leaking_char, names_to_leaking_char_array, SlapiOrdMr, ++ SlapiSubMr, SlapiSyntaxPlugin1, ++ }; ++ pub use crate::task::{task_register_handler_fn, task_unregister_handler_fn, Task, TaskRef}; ++ pub use crate::value::{Value, ValueArray, ValueArrayRef, ValueRef}; ++} +diff --git a/src/slapi_r_plugin/src/log.rs b/src/slapi_r_plugin/src/log.rs +new file mode 100644 +index 000000000..f686ecd1a +--- /dev/null ++++ b/src/slapi_r_plugin/src/log.rs +@@ -0,0 +1,87 @@ ++use std::ffi::CString; ++use std::os::raw::c_char; ++ ++use crate::constants; ++use crate::error::LoggingError; ++ ++extern "C" { ++ fn slapi_log_error(level: i32, system: *const c_char, message: *const c_char) -> i32; ++} ++ ++pub fn log_error( ++ level: ErrorLevel, ++ subsystem: String, ++ message: String, ++) -> Result<(), LoggingError> { ++ let c_subsystem = CString::new(subsystem) ++ .map_err(|e| LoggingError::CString(format!("failed to convert subsystem -> {:?}", e)))?; ++ let c_message = CString::new(message) ++ .map_err(|e| LoggingError::CString(format!("failed to convert message -> {:?}", e)))?; ++ ++ match unsafe { slapi_log_error(level as i32, c_subsystem.as_ptr(), c_message.as_ptr()) } { ++ constants::LDAP_SUCCESS => Ok(()), ++ _ => Err(LoggingError::Unknown), ++ } ++} ++ ++#[repr(i32)] ++#[derive(Debug)] ++/// This is a safe rust representation of the values from slapi-plugin.h ++/// such as SLAPI_LOG_FATAL, SLAPI_LOG_TRACE, SLAPI_LOG_ ... These vaulues ++/// must matche their counter parts in slapi-plugin.h ++pub enum ErrorLevel { ++ /// Always log messages at this level. Soon to go away, see EMERG, ALERT, CRIT, ERR, WARNING, NOTICE, INFO, DEBUG ++ Fatal = 0, ++ /// Log detailed messages. ++ Trace = 1, ++ /// Log packet tracing. ++ Packets = 2, ++ /// Log argument tracing. ++ Args = 3, ++ /// Log connection tracking. ++ Conns = 4, ++ /// Log BER parsing. ++ Ber = 5, ++ /// Log filter processing. ++ Filter = 6, ++ /// Log configuration processing. ++ Config = 7, ++ /// Log access controls ++ Acl = 8, ++ /// Log .... ??? ++ Shell = 9, ++ /// Log .... ??? ++ Parse = 10, ++ /// Log .... ??? ++ House = 11, ++ /// Log detailed replication information. ++ Repl = 12, ++ /// Log cache management. ++ Cache = 13, ++ /// Log detailed plugin operations. ++ Plugin = 14, ++ /// Log .... ??? ++ Timing = 15, ++ /// Log backend infomation. ++ BackLDBM = 16, ++ /// Log ACL processing. ++ AclSummary = 17, ++ /// Log nuncstans processing. ++ NuncStansDONOTUSE = 18, ++ /// Emergency messages. Server is bursting into flame. ++ Emerg = 19, ++ /// Important alerts, server may explode soon. ++ Alert = 20, ++ /// Critical messages, but the server isn't going to explode. Admin should intervene. ++ Crit = 21, ++ /// Error has occured, but we can keep going. Could indicate misconfiguration. ++ Error = 22, ++ /// Warning about an issue that isn't very important. Good to resolve though. ++ Warning = 23, ++ /// Inform the admin of something that they should know about, IE server is running now. ++ Notice = 24, ++ /// Informational messages that are nice to know. ++ Info = 25, ++ /// Debugging information from the server. ++ Debug = 26, ++} +diff --git a/src/slapi_r_plugin/src/macros.rs b/src/slapi_r_plugin/src/macros.rs +new file mode 100644 +index 000000000..030449632 +--- /dev/null ++++ b/src/slapi_r_plugin/src/macros.rs +@@ -0,0 +1,835 @@ ++#[macro_export] ++macro_rules! log_error { ++ ($level:expr, $($arg:tt)*) => ({ ++ use std::fmt; ++ match log_error( ++ $level, ++ format!("{}:{}", file!(), line!()), ++ format!("{}\n", fmt::format(format_args!($($arg)*))) ++ ) { ++ Ok(_) => {}, ++ Err(e) => { ++ eprintln!("A logging error occured {}, {} -> {:?}", file!(), line!(), e); ++ } ++ }; ++ }) ++} ++ ++#[macro_export] ++macro_rules! slapi_r_plugin_hooks { ++ ($mod_ident:ident, $hooks_ident:ident) => ( ++ paste::item! { ++ use libc; ++ ++ static mut PLUGINID: *const libc::c_void = std::ptr::null(); ++ ++ pub(crate) fn plugin_id() -> PluginIdRef { ++ PluginIdRef { ++ raw_pid: unsafe { PLUGINID } ++ } ++ } ++ ++ #[no_mangle] ++ pub extern "C" fn [<$mod_ident _plugin_init>](raw_pb: *const libc::c_void) -> i32 { ++ let mut pb = PblockRef::new(raw_pb); ++ log_error!(ErrorLevel::Trace, "it's alive!\n"); ++ ++ match pb.set_plugin_version(PluginVersion::V03) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ // Setup the plugin id. ++ unsafe { ++ PLUGINID = pb.get_plugin_identity(); ++ } ++ ++ if $hooks_ident::has_betxn_pre_modify() { ++ match pb.register_betxn_pre_modify_fn([<$mod_ident _plugin_betxn_pre_modify>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ } ++ ++ if $hooks_ident::has_betxn_pre_add() { ++ match pb.register_betxn_pre_add_fn([<$mod_ident _plugin_betxn_pre_add>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ } ++ ++ // set the start fn ++ match pb.register_start_fn([<$mod_ident _plugin_start>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ // set the close fn ++ match pb.register_close_fn([<$mod_ident _plugin_close>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_start>](raw_pb: *const libc::c_void) -> i32 { ++ let mut pb = PblockRef::new(raw_pb); ++ ++ if let Some(task_ident) = $hooks_ident::has_task_handler() { ++ match task_register_handler_fn(task_ident, [<$mod_ident _plugin_task_handler>], &mut pb) { ++ 0 => {}, ++ e => return e, ++ }; ++ }; ++ ++ match $hooks_ident::start(&mut pb) { ++ Ok(()) => { ++ 0 ++ } ++ Err(e) => { ++ log_error!(ErrorLevel::Error, "-> {:?}", e); ++ 1 ++ } ++ } ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_close>](raw_pb: *const libc::c_void) -> i32 { ++ let mut pb = PblockRef::new(raw_pb); ++ ++ if let Some(task_ident) = $hooks_ident::has_task_handler() { ++ match task_unregister_handler_fn(task_ident, [<$mod_ident _plugin_task_handler>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ }; ++ ++ match $hooks_ident::close(&mut pb) { ++ Ok(()) => { ++ 0 ++ } ++ Err(e) => { ++ log_error!(ErrorLevel::Error, "-> {:?}", e); ++ 1 ++ } ++ } ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_betxn_pre_modify>](raw_pb: *const libc::c_void) -> i32 { ++ let mut pb = PblockRef::new(raw_pb); ++ match $hooks_ident::betxn_pre_modify(&mut pb) { ++ Ok(()) => { ++ 0 ++ } ++ Err(e) => { ++ log_error!(ErrorLevel::Error, "-> {:?}", e); ++ 1 ++ } ++ } ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_betxn_pre_add>](raw_pb: *const libc::c_void) -> i32 { ++ let mut pb = PblockRef::new(raw_pb); ++ match $hooks_ident::betxn_pre_add(&mut pb) { ++ Ok(()) => { ++ 0 ++ } ++ Err(e) => { ++ log_error!(ErrorLevel::Error, "-> {:?}", e); ++ 1 ++ } ++ } ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_task_handler>]( ++ raw_pb: *const libc::c_void, ++ raw_e_before: *const libc::c_void, ++ _raw_e_after: *const libc::c_void, ++ raw_returncode: *mut i32, ++ _raw_returntext: *mut c_char, ++ raw_arg: *const libc::c_void, ++ ) -> i32 { ++ let mut pb = PblockRef::new(raw_pb); ++ ++ let e_before = EntryRef::new(raw_e_before); ++ // let e_after = EntryRef::new(raw_e_after); ++ ++ let task_data = match $hooks_ident::task_validate( ++ &e_before ++ ) { ++ Ok(data) => data, ++ Err(retcode) => { ++ unsafe { *raw_returncode = retcode as i32 }; ++ return DseCallbackStatus::Error as i32 ++ } ++ }; ++ ++ let mut task = Task::new(&e_before, raw_arg); ++ task.register_destructor_fn([<$mod_ident _plugin_task_destructor>]); ++ ++ // Setup the task thread and then run it. Remember, because Rust is ++ // smarter about memory, the move statement here moves the task wrapper and ++ // task_data to the thread, so they drop on thread close. No need for a ++ // destructor beyond blocking on the thread to complete. ++ std::thread::spawn(move || { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_task_thread => begin")); ++ // Indicate the task is begun ++ task.begin(); ++ // Start a txn ++ let be: Option = match $hooks_ident::task_be_dn_hint(&task_data) ++ .map(|be_dn| { ++ BackendRef::new(&be_dn) ++ }) ++ .transpose() { ++ Ok(v) => v, ++ Err(_) => { ++ log_error!(ErrorLevel::Error, concat!(stringify!($mod_ident), "_plugin_task_thread => task error -> selected dn does not exist")); ++ task.error(PluginError::TxnFailure as i32); ++ return; ++ } ++ }; ++ let be_txn: Option = match be { ++ Some(b) => { ++ match b.begin_txn() { ++ Ok(txn) => Some(txn), ++ Err(_) => { ++ log_error!(ErrorLevel::Error, concat!(stringify!($mod_ident), "_plugin_task_thread => task error -> unable to begin txn")); ++ task.error(PluginError::TxnFailure as i32); ++ return; ++ } ++ } ++ } ++ None => None, ++ }; ++ ++ // Abort or commit the txn here. ++ match $hooks_ident::task_handler(&mut task, task_data) { ++ Ok(_data) => { ++ match be_txn { ++ Some(be_txn) => be_txn.commit(), ++ None => {} ++ }; ++ // These will set the status, and guarantee the drop ++ task.success(); ++ } ++ Err(e) => { ++ log_error!(ErrorLevel::Error, "{}_plugin_task_thread => task error -> {:?}", stringify!($mod_ident), e); ++ // These will set the status, and guarantee the drop ++ task.error(e as i32); ++ // On drop, be_txn implicitly aborts. ++ } ++ }; ++ ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_task_thread <= complete")); ++ }); ++ ++ // Indicate that the thread started just fine. ++ unsafe { *raw_returncode = LDAP_SUCCESS }; ++ DseCallbackStatus::Ok as i32 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_task_destructor>]( ++ raw_task: *const libc::c_void, ++ ) { ++ // Simply block until the task refcount drops to 0. ++ let task = TaskRef::new(raw_task); ++ task.block(); ++ } ++ ++ } // end paste ++ ) ++} // end macro ++ ++#[macro_export] ++macro_rules! slapi_r_syntax_plugin_hooks { ++ ( ++ $mod_ident:ident, ++ $hooks_ident:ident ++ ) => ( ++ paste::item! { ++ use libc; ++ use std::convert::TryFrom; ++ ++ #[no_mangle] ++ pub extern "C" fn [<$mod_ident _plugin_init>](raw_pb: *const libc::c_void) -> i32 { ++ let mut pb = PblockRef::new(raw_pb); ++ log_error!(ErrorLevel::Trace, "slapi_r_syntax_plugin_hooks => begin"); ++ // Setup our plugin ++ match pb.set_plugin_version(PluginVersion::V01) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ // Setup the names/oids that this plugin provides syntaxes for. ++ ++ let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::attr_supported_names()) }; ++ match pb.register_syntax_names(name_ptr) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ let name_ptr = unsafe { name_to_leaking_char($hooks_ident::attr_oid()) }; ++ match pb.register_syntax_oid(name_ptr) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ match pb.register_syntax_validate_fn([<$mod_ident _plugin_syntax_validate>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ // Now setup the MR's ++ match register_plugin_ext( ++ PluginType::MatchingRule, ++ $hooks_ident::eq_mr_name(), ++ concat!(stringify!($mod_ident), "_plugin_eq_mr_init"), ++ [<$mod_ident _plugin_eq_mr_init>] ++ ) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ if $hooks_ident::sub_mr_oid().is_some() { ++ match register_plugin_ext( ++ PluginType::MatchingRule, ++ $hooks_ident::sub_mr_name(), ++ concat!(stringify!($mod_ident), "_plugin_ord_mr_init"), ++ [<$mod_ident _plugin_ord_mr_init>] ++ ) { ++ 0 => {}, ++ e => return e, ++ }; ++ } ++ ++ if $hooks_ident::ord_mr_oid().is_some() { ++ match register_plugin_ext( ++ PluginType::MatchingRule, ++ $hooks_ident::ord_mr_name(), ++ concat!(stringify!($mod_ident), "_plugin_ord_mr_init"), ++ [<$mod_ident _plugin_ord_mr_init>] ++ ) { ++ 0 => {}, ++ e => return e, ++ }; ++ } ++ ++ log_error!(ErrorLevel::Trace, "slapi_r_syntax_plugin_hooks <= success"); ++ ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_syntax_validate>]( ++ raw_berval: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_syntax_validate => begin")); ++ ++ let bval = BerValRef::new(raw_berval); ++ ++ match $hooks_ident::syntax_validate(&bval) { ++ Ok(()) => { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_syntax_validate <= success")); ++ LDAP_SUCCESS ++ } ++ Err(e) => { ++ log_error!(ErrorLevel::Warning, ++ "{}_plugin_syntax_validate error -> {:?}", stringify!($mod_ident), e ++ ); ++ e as i32 ++ } ++ } ++ } ++ ++ // All the MR types share this. ++ pub extern "C" fn [<$mod_ident _plugin_mr_filter_ava>]( ++ raw_pb: *const libc::c_void, ++ raw_bvfilter: *const libc::c_void, ++ raw_bvals: *const libc::c_void, ++ i_ftype: i32, ++ _retval: *mut libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_mr_filter_ava => begin")); ++ let mut pb = PblockRef::new(raw_pb); ++ let bvfilter = BerValRef::new(raw_bvfilter); ++ let bvals = ValueArrayRef::new(raw_bvals); ++ let ftype = match FilterType::try_from(i_ftype) { ++ Ok(f) => f, ++ Err(e) => { ++ log_error!(ErrorLevel::Error, "{}_plugin_ord_mr_filter_ava Error -> {:?}", ++ stringify!($mod_ident), e); ++ return e as i32 ++ } ++ }; ++ ++ let r: Result = match ftype { ++ FilterType::And | FilterType::Or | FilterType::Not => { ++ Err(PluginError::InvalidFilter) ++ } ++ FilterType::Equality => { ++ $hooks_ident::filter_ava_eq(&mut pb, &bvfilter, &bvals) ++ } ++ FilterType::Substring => { ++ Err(PluginError::Unimplemented) ++ } ++ FilterType::Ge => { ++ $hooks_ident::filter_ava_ord(&mut pb, &bvfilter, &bvals) ++ .map(|o_ord| { ++ match o_ord { ++ Some(Ordering::Greater) | Some(Ordering::Equal) => true, ++ Some(Ordering::Less) | None => false, ++ } ++ }) ++ } ++ FilterType::Le => { ++ $hooks_ident::filter_ava_ord(&mut pb, &bvfilter, &bvals) ++ .map(|o_ord| { ++ match o_ord { ++ Some(Ordering::Less) | Some(Ordering::Equal) => true, ++ Some(Ordering::Greater) | None => false, ++ } ++ }) ++ } ++ FilterType::Present => { ++ Err(PluginError::Unimplemented) ++ } ++ FilterType::Approx => { ++ Err(PluginError::Unimplemented) ++ } ++ FilterType::Extended => { ++ Err(PluginError::Unimplemented) ++ } ++ }; ++ ++ match r { ++ Ok(b) => { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_mr_filter_ava <= success")); ++ // rust bool into i32 will become 0 false, 1 true. However, ds expects 0 true and 1 false for ++ // for the filter_ava match. So we flip the bool, and send it back. ++ (!b) as i32 ++ } ++ Err(e) => { ++ log_error!(ErrorLevel::Warning, ++ "{}_plugin_mr_filter_ava error -> {:?}", ++ stringify!($mod_ident), e ++ ); ++ e as i32 ++ } ++ } ++ } ++ ++ ++ // EQ MR plugin hooks ++ #[no_mangle] ++ pub extern "C" fn [<$mod_ident _plugin_eq_mr_init>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ let mut pb = PblockRef::new(raw_pb); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_init => begin")); ++ match pb.set_plugin_version(PluginVersion::V01) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::eq_mr_supported_names()) }; ++ // SLAPI_PLUGIN_MR_NAMES ++ match pb.register_mr_names(name_ptr) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ // description ++ // SLAPI_PLUGIN_MR_FILTER_CREATE_FN ++ match pb.register_mr_filter_create_fn([<$mod_ident _plugin_eq_mr_filter_create>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_INDEXER_CREATE_FN ++ match pb.register_mr_indexer_create_fn([<$mod_ident _plugin_eq_mr_indexer_create>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_FILTER_AVA ++ match pb.register_mr_filter_ava_fn([<$mod_ident _plugin_mr_filter_ava>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_FILTER_SUB ++ match pb.register_mr_filter_sub_fn([<$mod_ident _plugin_eq_mr_filter_sub>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_VALUES2KEYS ++ match pb.register_mr_values2keys_fn([<$mod_ident _plugin_eq_mr_filter_values2keys>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_ASSERTION2KEYS_AVA ++ match pb.register_mr_assertion2keys_ava_fn([<$mod_ident _plugin_eq_mr_filter_assertion2keys_ava>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_ASSERTION2KEYS_SUB ++ match pb.register_mr_assertion2keys_sub_fn([<$mod_ident _plugin_eq_mr_filter_assertion2keys_sub>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_COMPARE ++ match pb.register_mr_compare_fn([<$mod_ident _plugin_eq_mr_filter_compare>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_NORMALIZE ++ ++ // Finaly, register the MR ++ match unsafe { matchingrule_register($hooks_ident::eq_mr_oid(), $hooks_ident::eq_mr_name(), $hooks_ident::eq_mr_desc(), $hooks_ident::attr_oid(), &$hooks_ident::attr_compat_oids()) } { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_init <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_create>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_create => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_create <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_eq_mr_indexer_create>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_indexer_create => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_indexer_create <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_sub>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_sub => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_sub <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_values2keys>]( ++ raw_pb: *const libc::c_void, ++ raw_vals: *const libc::c_void, ++ raw_ivals: *mut libc::c_void, ++ i_ftype: i32, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_values2keys => begin")); ++ let mut pb = PblockRef::new(raw_pb); ++ let vals = ValueArrayRef::new(raw_vals); ++ let ftype = match FilterType::try_from(i_ftype) { ++ Ok(f) => f, ++ Err(e) => { ++ log_error!(ErrorLevel::Error, ++ "{}_plugin_eq_mr_filter_values2keys Error -> {:?}", ++ stringify!($mod_ident), ++ e); ++ return e as i32 ++ } ++ }; ++ ++ if (ftype != FilterType::Equality && ftype != FilterType::Approx) { ++ log_error!(ErrorLevel::Error, ++ "{}_plugin_eq_mr_filter_values2keys Error -> Invalid Filter type", ++ stringify!($mod_ident), ++ ); ++ return PluginError::InvalidFilter as i32 ++ } ++ ++ let va = match $hooks_ident::eq_mr_filter_values2keys(&mut pb, &vals) { ++ Ok(va) => va, ++ Err(e) => { ++ log_error!(ErrorLevel::Error, ++ "{}_plugin_eq_mr_filter_values2keys Error -> {:?}", ++ stringify!($mod_ident), ++ e); ++ return e as i32 ++ } ++ }; ++ ++ // Now, deconstruct the va, get the pointer, and put it into the ivals. ++ unsafe { ++ let ivals_ptr: *mut *const libc::c_void = raw_ivals as *mut _; ++ (*ivals_ptr) = va.take_ownership() as *const libc::c_void; ++ } ++ ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_values2keys <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_assertion2keys_ava>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_assertion2keys_ava => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_assertion2keys_ava <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_assertion2keys_sub>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_assertion2keys_sub => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_assertion2keys_sub <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_names>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ // This is probably another char pointer. ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_compare>]( ++ raw_va: *const libc::c_void, ++ raw_vb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_compare => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_compare <= success")); ++ 0 ++ } ++ ++ // SUB MR plugin hooks ++ ++ pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_create>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_create => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_create <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_sub_mr_indexer_create>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_indexer_create => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_indexer_create <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_sub>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_sub => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_sub <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_values2keys>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_values2keys => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_values2keys <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_assertion2keys_ava>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_assertion2keys_ava => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_assertion2keys_ava <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_assertion2keys_sub>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_assertion2keys_sub => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_assertion2keys_sub <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_names>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ // Probably a char array ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_compare>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_compare => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_compare <= success")); ++ 0 ++ } ++ ++ // ORD MR plugin hooks ++ #[no_mangle] ++ pub extern "C" fn [<$mod_ident _plugin_ord_mr_init>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ let mut pb = PblockRef::new(raw_pb); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_init => begin")); ++ match pb.set_plugin_version(PluginVersion::V01) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::ord_mr_supported_names()) }; ++ // SLAPI_PLUGIN_MR_NAMES ++ match pb.register_mr_names(name_ptr) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ // description ++ // SLAPI_PLUGIN_MR_FILTER_CREATE_FN ++ match pb.register_mr_filter_create_fn([<$mod_ident _plugin_ord_mr_filter_create>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_INDEXER_CREATE_FN ++ match pb.register_mr_indexer_create_fn([<$mod_ident _plugin_ord_mr_indexer_create>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_FILTER_AVA ++ match pb.register_mr_filter_ava_fn([<$mod_ident _plugin_mr_filter_ava>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_FILTER_SUB ++ match pb.register_mr_filter_sub_fn([<$mod_ident _plugin_ord_mr_filter_sub>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_VALUES2KEYS ++ /* ++ match pb.register_mr_values2keys_fn([<$mod_ident _plugin_ord_mr_filter_values2keys>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ */ ++ // SLAPI_PLUGIN_MR_ASSERTION2KEYS_AVA ++ match pb.register_mr_assertion2keys_ava_fn([<$mod_ident _plugin_ord_mr_filter_assertion2keys_ava>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_ASSERTION2KEYS_SUB ++ match pb.register_mr_assertion2keys_sub_fn([<$mod_ident _plugin_ord_mr_filter_assertion2keys_sub>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_COMPARE ++ match pb.register_mr_compare_fn([<$mod_ident _plugin_ord_mr_filter_compare>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_NORMALIZE ++ ++ // Finaly, register the MR ++ match unsafe { matchingrule_register($hooks_ident::ord_mr_oid().unwrap(), $hooks_ident::ord_mr_name(), $hooks_ident::ord_mr_desc(), $hooks_ident::attr_oid(), &$hooks_ident::attr_compat_oids()) } { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_init <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_create>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_create => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_create <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_ord_mr_indexer_create>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_indexer_create => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_indexer_create <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_sub>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_sub => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_sub <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_values2keys>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_values2keys => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_values2keys <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_assertion2keys_ava>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_assertion2keys_ava => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_assertion2keys_ava <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_assertion2keys_sub>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_assertion2keys_sub => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_assertion2keys_sub <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_names>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ // probably char pointers ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_compare>]( ++ raw_va: *const libc::c_void, ++ raw_vb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_compare => begin")); ++ let va = BerValRef::new(raw_va); ++ let vb = BerValRef::new(raw_vb); ++ let rc = match $hooks_ident::filter_compare(&va, &vb) { ++ Ordering::Less => -1, ++ Ordering::Equal => 0, ++ Ordering::Greater => 1, ++ }; ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_compare <= success")); ++ rc ++ } ++ ++ } // end paste ++ ) ++} // end macro ++ ++#[macro_export] ++macro_rules! slapi_r_search_callback_mapfn { ++ ( ++ $mod_ident:ident, ++ $cb_target_ident:ident, ++ $cb_mod_ident:ident ++ ) => { ++ paste::item! { ++ #[no_mangle] ++ pub extern "C" fn [<$cb_target_ident>]( ++ raw_e: *const libc::c_void, ++ raw_data: *const libc::c_void, ++ ) -> i32 { ++ let e = EntryRef::new(raw_e); ++ let data_ptr = raw_data as *const _; ++ let data = unsafe { &(*data_ptr) }; ++ match $cb_mod_ident(e, data) { ++ Ok(_) => LDAPError::Success as i32, ++ Err(e) => e as i32, ++ } ++ } ++ } // end paste ++ }; ++} // end macro +diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs +new file mode 100644 +index 000000000..b69ce1680 +--- /dev/null ++++ b/src/slapi_r_plugin/src/pblock.rs +@@ -0,0 +1,275 @@ ++use libc; ++use std::ops::{Deref, DerefMut}; ++use std::os::raw::c_char; ++use std::ptr; ++ ++use crate::backend::BackendRef; ++use crate::constants::{PblockType, PluginFnType, PluginVersion}; ++use crate::entry::EntryRef; ++pub use crate::log::{log_error, ErrorLevel}; ++ ++extern "C" { ++ fn slapi_pblock_set(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32; ++ fn slapi_pblock_get(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32; ++ fn slapi_pblock_new() -> *const libc::c_void; ++} ++ ++pub struct Pblock { ++ value: PblockRef, ++} ++ ++impl Pblock { ++ pub fn new() -> Pblock { ++ let raw_pb = unsafe { slapi_pblock_new() }; ++ Pblock { ++ value: PblockRef { raw_pb }, ++ } ++ } ++} ++ ++impl Deref for Pblock { ++ type Target = PblockRef; ++ ++ fn deref(&self) -> &Self::Target { ++ &self.value ++ } ++} ++ ++impl DerefMut for Pblock { ++ fn deref_mut(&mut self) -> &mut Self::Target { ++ &mut self.value ++ } ++} ++ ++pub struct PblockRef { ++ raw_pb: *const libc::c_void, ++} ++ ++impl PblockRef { ++ pub fn new(raw_pb: *const libc::c_void) -> Self { ++ PblockRef { raw_pb } ++ } ++ ++ pub unsafe fn as_ptr(&self) -> *const libc::c_void { ++ self.raw_pb ++ } ++ ++ fn set_pb_char_arr_ptr(&mut self, req_type: PblockType, ptr: *const *const c_char) -> i32 { ++ let value_ptr: *const libc::c_void = ptr as *const libc::c_void; ++ unsafe { slapi_pblock_set(self.raw_pb, req_type as i32, value_ptr) } ++ } ++ ++ fn set_pb_char_ptr(&mut self, req_type: PblockType, ptr: *const c_char) -> i32 { ++ let value_ptr: *const libc::c_void = ptr as *const libc::c_void; ++ unsafe { slapi_pblock_set(self.raw_pb, req_type as i32, value_ptr) } ++ } ++ ++ fn set_pb_fn_ptr( ++ &mut self, ++ fn_type: PluginFnType, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ let value_ptr: *const libc::c_void = ptr as *const libc::c_void; ++ unsafe { slapi_pblock_set(self.raw_pb, fn_type as i32, value_ptr) } ++ } ++ ++ fn get_value_ptr(&mut self, req_type: PblockType) -> Result<*const libc::c_void, ()> { ++ let mut value: *mut libc::c_void = ptr::null::() as *mut libc::c_void; ++ let value_ptr: *const libc::c_void = &mut value as *const _ as *const libc::c_void; ++ match unsafe { slapi_pblock_get(self.raw_pb, req_type as i32, value_ptr) } { ++ 0 => Ok(value), ++ e => { ++ log_error!(ErrorLevel::Error, "enable to get from pblock -> {:?}", e); ++ Err(()) ++ } ++ } ++ } ++ ++ fn get_value_i32(&mut self, req_type: PblockType) -> Result { ++ let mut value: i32 = 0; ++ let value_ptr: *const libc::c_void = &mut value as *const _ as *const libc::c_void; ++ match unsafe { slapi_pblock_get(self.raw_pb, req_type as i32, value_ptr) } { ++ 0 => Ok(value), ++ e => { ++ log_error!(ErrorLevel::Error, "enable to get from pblock -> {:?}", e); ++ Err(()) ++ } ++ } ++ } ++ ++ pub fn register_start_fn(&mut self, ptr: extern "C" fn(*const libc::c_void) -> i32) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::Start, ptr) ++ } ++ ++ pub fn register_close_fn(&mut self, ptr: extern "C" fn(*const libc::c_void) -> i32) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::Close, ptr) ++ } ++ ++ pub fn register_betxn_pre_add_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::BeTxnPreAdd, ptr) ++ } ++ ++ pub fn register_betxn_pre_modify_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::BeTxnPreModify, ptr) ++ } ++ ++ pub fn register_syntax_filter_ava_fn( ++ &mut self, ++ ptr: extern "C" fn( ++ *const core::ffi::c_void, ++ *const core::ffi::c_void, ++ *const core::ffi::c_void, ++ i32, ++ *mut core::ffi::c_void, ++ ) -> i32, ++ ) -> i32 { ++ // We can't use self.set_pb_fn_ptr here as the fn type sig is different. ++ let value_ptr: *const libc::c_void = ptr as *const libc::c_void; ++ unsafe { slapi_pblock_set(self.raw_pb, PluginFnType::SyntaxFilterAva as i32, value_ptr) } ++ } ++ ++ pub fn register_syntax_values2keys_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::SyntaxValuesToKeys, ptr) ++ } ++ ++ pub fn register_syntax_assertion2keys_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::SyntaxAssertion2KeysAva, ptr) ++ } ++ ++ pub fn register_syntax_flags_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::SyntaxFlags, ptr) ++ } ++ ++ pub fn register_syntax_oid(&mut self, ptr: *const c_char) -> i32 { ++ self.set_pb_char_ptr(PblockType::SyntaxOid, ptr) ++ } ++ ++ pub fn register_syntax_compare_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::SyntaxCompare, ptr) ++ } ++ ++ pub fn register_syntax_validate_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::SyntaxValidate, ptr) ++ } ++ ++ pub fn register_syntax_names(&mut self, arr_ptr: *const *const c_char) -> i32 { ++ self.set_pb_char_arr_ptr(PblockType::SyntaxNames, arr_ptr) ++ } ++ ++ pub fn register_mr_filter_create_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::MRFilterCreate, ptr) ++ } ++ ++ pub fn register_mr_indexer_create_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::MRIndexerCreate, ptr) ++ } ++ ++ pub fn register_mr_filter_ava_fn( ++ &mut self, ++ ptr: extern "C" fn( ++ *const core::ffi::c_void, ++ *const core::ffi::c_void, ++ *const core::ffi::c_void, ++ i32, ++ *mut core::ffi::c_void, ++ ) -> i32, ++ ) -> i32 { ++ let value_ptr: *const libc::c_void = ptr as *const libc::c_void; ++ unsafe { slapi_pblock_set(self.raw_pb, PluginFnType::MRFilterAva as i32, value_ptr) } ++ } ++ ++ pub fn register_mr_filter_sub_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::MRFilterSub, ptr) ++ } ++ ++ pub fn register_mr_values2keys_fn( ++ &mut self, ++ ptr: extern "C" fn( ++ *const core::ffi::c_void, ++ *const core::ffi::c_void, ++ *mut core::ffi::c_void, ++ i32, ++ ) -> i32, ++ ) -> i32 { ++ let value_ptr: *const libc::c_void = ptr as *const libc::c_void; ++ unsafe { slapi_pblock_set(self.raw_pb, PluginFnType::MRValuesToKeys as i32, value_ptr) } ++ } ++ ++ pub fn register_mr_assertion2keys_ava_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::MRAssertionToKeysAva, ptr) ++ } ++ ++ pub fn register_mr_assertion2keys_sub_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::MRAssertionToKeysSub, ptr) ++ } ++ ++ pub fn register_mr_compare_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void, *const libc::c_void) -> i32, ++ ) -> i32 { ++ let value_ptr: *const libc::c_void = ptr as *const libc::c_void; ++ unsafe { slapi_pblock_set(self.raw_pb, PluginFnType::MRCompare as i32, value_ptr) } ++ } ++ ++ pub fn register_mr_names(&mut self, arr_ptr: *const *const c_char) -> i32 { ++ self.set_pb_char_arr_ptr(PblockType::MRNames, arr_ptr) ++ } ++ ++ pub fn get_op_add_entryref(&mut self) -> Result { ++ self.get_value_ptr(PblockType::AddEntry) ++ .map(|ptr| EntryRef::new(ptr)) ++ } ++ ++ pub fn set_plugin_version(&mut self, vers: PluginVersion) -> i32 { ++ self.set_pb_char_ptr(PblockType::Version, vers.to_char_ptr()) ++ } ++ ++ pub fn set_op_backend(&mut self, be: &BackendRef) -> i32 { ++ unsafe { slapi_pblock_set(self.raw_pb, PblockType::Backend as i32, be.as_ptr()) } ++ } ++ ++ pub fn get_plugin_identity(&mut self) -> *const libc::c_void { ++ self.get_value_ptr(PblockType::Identity) ++ .unwrap_or(std::ptr::null()) ++ } ++ ++ pub fn get_op_result(&mut self) -> i32 { ++ self.get_value_i32(PblockType::OpResult).unwrap_or(-1) ++ } ++} +diff --git a/src/slapi_r_plugin/src/plugin.rs b/src/slapi_r_plugin/src/plugin.rs +new file mode 100644 +index 000000000..bf47779bc +--- /dev/null ++++ b/src/slapi_r_plugin/src/plugin.rs +@@ -0,0 +1,117 @@ ++use crate::constants::{PluginType, PLUGIN_DEFAULT_PRECEDENCE}; ++use crate::dn::Sdn; ++use crate::entry::EntryRef; ++use crate::error::LDAPError; ++use crate::error::PluginError; ++use crate::pblock::PblockRef; ++use crate::task::Task; ++use libc; ++use std::ffi::CString; ++use std::os::raw::c_char; ++use std::ptr; ++ ++extern "C" { ++ fn slapi_register_plugin_ext( ++ plugintype: *const c_char, ++ enabled: i32, ++ initsymbol: *const c_char, ++ initfunc: *const libc::c_void, ++ name: *const c_char, ++ argv: *const *const c_char, ++ group_identity: *const libc::c_void, ++ precedence: i32, ++ ) -> i32; ++} ++ ++pub struct PluginIdRef { ++ pub raw_pid: *const libc::c_void, ++} ++ ++pub fn register_plugin_ext( ++ ptype: PluginType, ++ plugname: &str, ++ initfnname: &str, ++ initfn: extern "C" fn(*const libc::c_void) -> i32, ++) -> i32 { ++ let c_plugname = match CString::new(plugname) { ++ Ok(c) => c, ++ Err(_) => return 1, ++ }; ++ let c_initfnname = match CString::new(initfnname) { ++ Ok(c) => c, ++ Err(_) => return 1, ++ }; ++ let argv = [c_plugname.as_ptr(), ptr::null()]; ++ let value_ptr: *const libc::c_void = initfn as *const libc::c_void; ++ ++ unsafe { ++ slapi_register_plugin_ext( ++ ptype.to_char_ptr(), ++ 1, ++ c_initfnname.as_ptr(), ++ value_ptr, ++ c_plugname.as_ptr(), ++ &argv as *const *const c_char, ++ ptr::null(), ++ PLUGIN_DEFAULT_PRECEDENCE, ++ ) ++ } ++} ++ ++pub trait SlapiPlugin3 { ++ // We require a newer rust for default associated types. ++ // type TaskData = (); ++ type TaskData; ++ ++ fn has_pre_modify() -> bool { ++ false ++ } ++ ++ fn has_post_modify() -> bool { ++ false ++ } ++ ++ fn has_pre_add() -> bool { ++ false ++ } ++ ++ fn has_post_add() -> bool { ++ false ++ } ++ ++ fn has_betxn_pre_modify() -> bool { ++ false ++ } ++ ++ fn has_betxn_pre_add() -> bool { ++ false ++ } ++ ++ fn has_task_handler() -> Option<&'static str> { ++ None ++ } ++ ++ fn start(_pb: &mut PblockRef) -> Result<(), PluginError>; ++ ++ fn close(_pb: &mut PblockRef) -> Result<(), PluginError>; ++ ++ fn betxn_pre_modify(_pb: &mut PblockRef) -> Result<(), PluginError> { ++ Err(PluginError::Unimplemented) ++ } ++ ++ fn betxn_pre_add(_pb: &mut PblockRef) -> Result<(), PluginError> { ++ Err(PluginError::Unimplemented) ++ } ++ ++ fn task_validate(_e: &EntryRef) -> Result { ++ Err(LDAPError::Other) ++ } ++ ++ fn task_be_dn_hint(_data: &Self::TaskData) -> Option { ++ None ++ } ++ ++ fn task_handler(_task: &Task, _data: Self::TaskData) -> Result { ++ Err(PluginError::Unimplemented) ++ } ++} +diff --git a/src/slapi_r_plugin/src/search.rs b/src/slapi_r_plugin/src/search.rs +new file mode 100644 +index 000000000..e0e2a1fd7 +--- /dev/null ++++ b/src/slapi_r_plugin/src/search.rs +@@ -0,0 +1,127 @@ ++use crate::dn::SdnRef; ++use crate::error::{LDAPError, PluginError}; ++use crate::pblock::Pblock; ++use crate::plugin::PluginIdRef; ++use std::ffi::CString; ++use std::ops::Deref; ++use std::os::raw::c_char; ++ ++extern "C" { ++ fn slapi_search_internal_set_pb_ext( ++ pb: *const libc::c_void, ++ base: *const libc::c_void, ++ scope: i32, ++ filter: *const c_char, ++ attrs: *const *const c_char, ++ attrsonly: i32, ++ controls: *const *const libc::c_void, ++ uniqueid: *const c_char, ++ plugin_ident: *const libc::c_void, ++ op_flags: i32, ++ ); ++ fn slapi_search_internal_callback_pb( ++ pb: *const libc::c_void, ++ cb_data: *const libc::c_void, ++ cb_result_ptr: *const libc::c_void, ++ cb_entry_ptr: *const libc::c_void, ++ cb_referral_ptr: *const libc::c_void, ++ ) -> i32; ++} ++ ++#[derive(Debug)] ++#[repr(i32)] ++pub enum SearchScope { ++ Base = 0, ++ Onelevel = 1, ++ Subtree = 2, ++} ++ ++enum SearchType { ++ InternalMapEntry( ++ extern "C" fn(*const core::ffi::c_void, *const core::ffi::c_void) -> i32, ++ *const libc::c_void, ++ ), ++ // InternalMapResult ++ // InternalMapReferral ++} ++ ++pub struct Search { ++ pb: Pblock, ++ // This is so that the char * to the pb lives long enough as ds won't clone it. ++ filter: Option, ++ stype: SearchType, ++} ++ ++pub struct SearchResult { ++ pb: Pblock, ++} ++ ++impl Search { ++ pub fn new_map_entry( ++ basedn: &SdnRef, ++ scope: SearchScope, ++ filter: &str, ++ plugin_id: PluginIdRef, ++ cbdata: &T, ++ mapfn: extern "C" fn(*const libc::c_void, *const libc::c_void) -> i32, ++ ) -> Result ++ where ++ T: Send, ++ { ++ // Configure a search based on the requested type. ++ let pb = Pblock::new(); ++ let raw_filter = CString::new(filter).map_err(|_| PluginError::InvalidFilter)?; ++ ++ unsafe { ++ slapi_search_internal_set_pb_ext( ++ pb.deref().as_ptr(), ++ basedn.as_ptr(), ++ scope as i32, ++ raw_filter.as_ptr(), ++ std::ptr::null(), ++ 0, ++ std::ptr::null(), ++ std::ptr::null(), ++ plugin_id.raw_pid, ++ 0, ++ ) ++ }; ++ ++ Ok(Search { ++ pb, ++ filter: Some(raw_filter), ++ stype: SearchType::InternalMapEntry(mapfn, cbdata as *const _ as *const libc::c_void), ++ }) ++ } ++ ++ // Consume self, do the search ++ pub fn execute(self) -> Result { ++ // Deconstruct self ++ let Search { ++ mut pb, ++ filter: _filter, ++ stype, ++ } = self; ++ ++ // run the search based on the type. ++ match stype { ++ SearchType::InternalMapEntry(mapfn, cbdata) => unsafe { ++ slapi_search_internal_callback_pb( ++ pb.deref().as_ptr(), ++ cbdata, ++ std::ptr::null(), ++ mapfn as *const libc::c_void, ++ std::ptr::null(), ++ ); ++ }, ++ }; ++ ++ // now check the result, and map to what we need. ++ let result = pb.get_op_result(); ++ ++ match result { ++ 0 => Ok(SearchResult { pb }), ++ _e => Err(LDAPError::from(result)), ++ } ++ } ++} +diff --git a/src/slapi_r_plugin/src/syntax_plugin.rs b/src/slapi_r_plugin/src/syntax_plugin.rs +new file mode 100644 +index 000000000..e7d5c01bd +--- /dev/null ++++ b/src/slapi_r_plugin/src/syntax_plugin.rs +@@ -0,0 +1,169 @@ ++use crate::ber::BerValRef; ++// use crate::constants::FilterType; ++use crate::error::PluginError; ++use crate::pblock::PblockRef; ++use crate::value::{ValueArray, ValueArrayRef}; ++use std::cmp::Ordering; ++use std::ffi::CString; ++use std::iter::once; ++use std::os::raw::c_char; ++use std::ptr; ++ ++// need a call to slapi_register_plugin_ext ++ ++extern "C" { ++ fn slapi_matchingrule_register(mr: *const slapi_matchingRuleEntry) -> i32; ++} ++ ++#[repr(C)] ++struct slapi_matchingRuleEntry { ++ mr_oid: *const c_char, ++ _mr_oidalias: *const c_char, ++ mr_name: *const c_char, ++ mr_desc: *const c_char, ++ mr_syntax: *const c_char, ++ _mr_obsolete: i32, // unused ++ mr_compat_syntax: *const *const c_char, ++} ++ ++pub unsafe fn name_to_leaking_char(name: &str) -> *const c_char { ++ let n = CString::new(name) ++ .expect("An invalid string has been hardcoded!") ++ .into_boxed_c_str(); ++ let n_ptr = n.as_ptr(); ++ // Now we intentionally leak the name here, and the pointer will remain valid. ++ Box::leak(n); ++ n_ptr ++} ++ ++pub unsafe fn names_to_leaking_char_array(names: &[&str]) -> *const *const c_char { ++ let n_arr: Vec = names ++ .iter() ++ .map(|s| CString::new(*s).expect("An invalid string has been hardcoded!")) ++ .collect(); ++ let n_arr = n_arr.into_boxed_slice(); ++ let n_ptr_arr: Vec<*const c_char> = n_arr ++ .iter() ++ .map(|v| v.as_ptr()) ++ .chain(once(ptr::null())) ++ .collect(); ++ let n_ptr_arr = n_ptr_arr.into_boxed_slice(); ++ ++ // Now we intentionally leak these names here, ++ let _r_n_arr = Box::leak(n_arr); ++ let r_n_ptr_arr = Box::leak(n_ptr_arr); ++ ++ let name_ptr = r_n_ptr_arr as *const _ as *const *const c_char; ++ name_ptr ++} ++ ++// oid - the oid of the matching rule ++// name - the name of the mr ++// desc - description ++// syntax - the syntax of the attribute we apply to ++// compat_syntax - extended syntaxes f other attributes we may apply to. ++pub unsafe fn matchingrule_register( ++ oid: &str, ++ name: &str, ++ desc: &str, ++ syntax: &str, ++ compat_syntax: &[&str], ++) -> i32 { ++ let oid_ptr = name_to_leaking_char(oid); ++ let name_ptr = name_to_leaking_char(name); ++ let desc_ptr = name_to_leaking_char(desc); ++ let syntax_ptr = name_to_leaking_char(syntax); ++ let compat_syntax_ptr = names_to_leaking_char_array(compat_syntax); ++ ++ let new_mr = slapi_matchingRuleEntry { ++ mr_oid: oid_ptr, ++ _mr_oidalias: ptr::null(), ++ mr_name: name_ptr, ++ mr_desc: desc_ptr, ++ mr_syntax: syntax_ptr, ++ _mr_obsolete: 0, ++ mr_compat_syntax: compat_syntax_ptr, ++ }; ++ ++ let new_mr_ptr = &new_mr as *const _; ++ slapi_matchingrule_register(new_mr_ptr) ++} ++ ++pub trait SlapiSyntaxPlugin1 { ++ fn attr_oid() -> &'static str; ++ ++ fn attr_compat_oids() -> Vec<&'static str>; ++ ++ fn attr_supported_names() -> Vec<&'static str>; ++ ++ fn syntax_validate(bval: &BerValRef) -> Result<(), PluginError>; ++ ++ fn eq_mr_oid() -> &'static str; ++ ++ fn eq_mr_name() -> &'static str; ++ ++ fn eq_mr_desc() -> &'static str; ++ ++ fn eq_mr_supported_names() -> Vec<&'static str>; ++ ++ fn filter_ava_eq( ++ _pb: &mut PblockRef, ++ _bval_filter: &BerValRef, ++ _vals: &ValueArrayRef, ++ ) -> Result { ++ Ok(false) ++ } ++ ++ fn eq_mr_filter_values2keys( ++ _pb: &mut PblockRef, ++ _vals: &ValueArrayRef, ++ ) -> Result; ++} ++ ++pub trait SlapiOrdMr: SlapiSyntaxPlugin1 { ++ fn ord_mr_oid() -> Option<&'static str> { ++ None ++ } ++ ++ fn ord_mr_name() -> &'static str { ++ panic!("Unimplemented ord_mr_name for SlapiOrdMr") ++ } ++ ++ fn ord_mr_desc() -> &'static str { ++ panic!("Unimplemented ord_mr_desc for SlapiOrdMr") ++ } ++ ++ fn ord_mr_supported_names() -> Vec<&'static str> { ++ panic!("Unimplemented ord_mr_supported_names for SlapiOrdMr") ++ } ++ ++ fn filter_ava_ord( ++ _pb: &mut PblockRef, ++ _bval_filter: &BerValRef, ++ _vals: &ValueArrayRef, ++ ) -> Result, PluginError> { ++ Ok(None) ++ } ++ ++ fn filter_compare(_a: &BerValRef, _b: &BerValRef) -> Ordering { ++ panic!("Unimplemented filter_compare") ++ } ++} ++ ++pub trait SlapiSubMr: SlapiSyntaxPlugin1 { ++ fn sub_mr_oid() -> Option<&'static str> { ++ None ++ } ++ ++ fn sub_mr_name() -> &'static str { ++ panic!("Unimplemented sub_mr_name for SlapiSubMr") ++ } ++ ++ fn sub_mr_desc() -> &'static str { ++ panic!("Unimplemented sub_mr_desc for SlapiSubMr") ++ } ++ ++ fn sub_mr_supported_names() -> Vec<&'static str> { ++ panic!("Unimplemented sub_mr_supported_names for SlapiSubMr") ++ } ++} +diff --git a/src/slapi_r_plugin/src/task.rs b/src/slapi_r_plugin/src/task.rs +new file mode 100644 +index 000000000..251ae4d82 +--- /dev/null ++++ b/src/slapi_r_plugin/src/task.rs +@@ -0,0 +1,148 @@ ++use crate::constants::LDAP_SUCCESS; ++use crate::entry::EntryRef; ++use crate::pblock::PblockRef; ++use std::ffi::CString; ++use std::os::raw::c_char; ++use std::thread; ++use std::time::Duration; ++ ++extern "C" { ++ fn slapi_plugin_new_task(ndn: *const c_char, arg: *const libc::c_void) -> *const libc::c_void; ++ fn slapi_task_dec_refcount(task: *const libc::c_void); ++ fn slapi_task_inc_refcount(task: *const libc::c_void); ++ fn slapi_task_get_refcount(task: *const libc::c_void) -> i32; ++ fn slapi_task_begin(task: *const libc::c_void, rc: i32); ++ fn slapi_task_finish(task: *const libc::c_void, rc: i32); ++ ++ fn slapi_plugin_task_register_handler( ++ ident: *const c_char, ++ cb: extern "C" fn( ++ *const libc::c_void, ++ *const libc::c_void, ++ *const libc::c_void, ++ *mut i32, ++ *mut c_char, ++ *const libc::c_void, ++ ) -> i32, ++ pb: *const libc::c_void, ++ ) -> i32; ++ fn slapi_plugin_task_unregister_handler( ++ ident: *const c_char, ++ cb: extern "C" fn( ++ *const libc::c_void, ++ *const libc::c_void, ++ *const libc::c_void, ++ *mut i32, ++ *mut c_char, ++ *const libc::c_void, ++ ) -> i32, ++ ) -> i32; ++ fn slapi_task_set_destructor_fn( ++ task: *const libc::c_void, ++ cb: extern "C" fn(*const libc::c_void), ++ ); ++} ++ ++pub struct TaskRef { ++ raw_task: *const libc::c_void, ++} ++ ++pub struct Task { ++ value: TaskRef, ++} ++ ++// Because raw pointers are not send, but we need to send the task to a thread ++// as part of the task thread spawn, we need to convince the compiler this ++// action is okay. It's probably not because C is terrible, BUT provided the ++// server and framework only touch the ref count, we are okay. ++unsafe impl Send for Task {} ++ ++pub fn task_register_handler_fn( ++ ident: &'static str, ++ cb: extern "C" fn( ++ *const libc::c_void, ++ *const libc::c_void, ++ *const libc::c_void, ++ *mut i32, ++ *mut c_char, ++ *const libc::c_void, ++ ) -> i32, ++ pb: &mut PblockRef, ++) -> i32 { ++ let cstr = CString::new(ident).expect("Invalid ident provided"); ++ unsafe { slapi_plugin_task_register_handler(cstr.as_ptr(), cb, pb.as_ptr()) } ++} ++ ++pub fn task_unregister_handler_fn( ++ ident: &'static str, ++ cb: extern "C" fn( ++ *const libc::c_void, ++ *const libc::c_void, ++ *const libc::c_void, ++ *mut i32, ++ *mut c_char, ++ *const libc::c_void, ++ ) -> i32, ++) -> i32 { ++ let cstr = CString::new(ident).expect("Invalid ident provided"); ++ unsafe { slapi_plugin_task_unregister_handler(cstr.as_ptr(), cb) } ++} ++ ++impl Task { ++ pub fn new(e: &EntryRef, arg: *const libc::c_void) -> Self { ++ let sdn = e.get_sdnref(); ++ let ndn = unsafe { sdn.as_ndnref() }; ++ let raw_task = unsafe { slapi_plugin_new_task(ndn.as_ptr(), arg) }; ++ unsafe { slapi_task_inc_refcount(raw_task) }; ++ Task { ++ value: TaskRef { raw_task }, ++ } ++ } ++ ++ pub fn begin(&self) { ++ // Indicate we begin ++ unsafe { slapi_task_begin(self.value.raw_task, 1) } ++ } ++ ++ pub fn register_destructor_fn(&mut self, cb: extern "C" fn(*const libc::c_void)) { ++ unsafe { ++ slapi_task_set_destructor_fn(self.value.raw_task, cb); ++ } ++ } ++ ++ pub fn success(self) { ++ unsafe { ++ slapi_task_finish(self.value.raw_task, LDAP_SUCCESS); ++ } ++ } ++ ++ pub fn error(self, rc: i32) { ++ unsafe { slapi_task_finish(self.value.raw_task, rc) }; ++ } ++} ++ ++impl Drop for Task { ++ fn drop(&mut self) { ++ unsafe { ++ slapi_task_dec_refcount(self.value.raw_task); ++ } ++ } ++} ++ ++impl TaskRef { ++ pub fn new(raw_task: *const libc::c_void) -> Self { ++ TaskRef { raw_task } ++ } ++ ++ pub fn block(&self) { ++ // wait for the refcount to go to 0. ++ let d = Duration::from_millis(250); ++ loop { ++ if unsafe { slapi_task_get_refcount(self.raw_task) } > 0 { ++ thread::sleep(d); ++ } else { ++ return; ++ } ++ } ++ } ++} +diff --git a/src/slapi_r_plugin/src/value.rs b/src/slapi_r_plugin/src/value.rs +new file mode 100644 +index 000000000..5a40dd279 +--- /dev/null ++++ b/src/slapi_r_plugin/src/value.rs +@@ -0,0 +1,235 @@ ++use crate::ber::{ol_berval, BerValRef}; ++use crate::dn::Sdn; ++use std::convert::{From, TryFrom}; ++use std::ffi::CString; ++use std::iter::once; ++use std::iter::FromIterator; ++use std::mem; ++use std::ops::Deref; ++use std::ptr; ++use uuid::Uuid; ++ ++extern "C" { ++ fn slapi_value_new() -> *mut slapi_value; ++ fn slapi_value_free(v: *mut *const libc::c_void); ++} ++ ++#[repr(C)] ++/// From ./ldap/servers/slapd/slap.h ++pub struct slapi_value { ++ bv: ol_berval, ++ v_csnset: *const libc::c_void, ++ v_flags: u32, ++} ++ ++pub struct ValueArrayRefIter<'a> { ++ idx: isize, ++ va_ref: &'a ValueArrayRef, ++} ++ ++impl<'a> Iterator for ValueArrayRefIter<'a> { ++ type Item = ValueRef; ++ ++ #[inline] ++ fn next(&mut self) -> Option { ++ // So long as va_ref.raw_slapi_val + offset != NULL, continue. ++ // this is so wildly unsafe, but you know, that's just daily life of C anyway ... ++ unsafe { ++ let n_ptr: *const slapi_value = *(self.va_ref.raw_slapi_val.offset(self.idx)); ++ if n_ptr.is_null() { ++ None ++ } else { ++ // Advance the iter. ++ self.idx = self.idx + 1; ++ let raw_berval: *const ol_berval = &(*n_ptr).bv as *const _; ++ Some(ValueRef { ++ raw_slapi_val: n_ptr, ++ bvr: BerValRef { raw_berval }, ++ }) ++ } ++ } ++ } ++} ++ ++pub struct ValueArrayRef { ++ raw_slapi_val: *const *const slapi_value, ++} ++ ++impl ValueArrayRef { ++ pub fn new(raw_slapi_val: *const libc::c_void) -> Self { ++ let raw_slapi_val = raw_slapi_val as *const _ as *const *const slapi_value; ++ ValueArrayRef { raw_slapi_val } ++ } ++ ++ pub fn iter(&self) -> ValueArrayRefIter { ++ ValueArrayRefIter { ++ idx: 0, ++ va_ref: &self, ++ } ++ } ++ ++ pub fn first(&self) -> Option { ++ self.iter().next() ++ } ++} ++ ++pub struct ValueArray { ++ data: Vec<*mut slapi_value>, ++ vrf: ValueArrayRef, ++} ++ ++impl Deref for ValueArray { ++ type Target = ValueArrayRef; ++ ++ fn deref(&self) -> &Self::Target { ++ &self.vrf ++ } ++} ++ ++impl ValueArray { ++ /// Take ownership of this value array, returning the pointer to the inner memory ++ /// and forgetting about it for ourself. This prevents the drop handler from freeing ++ /// the slapi_value, ie we are giving this to the 389-ds framework to manage from now. ++ pub unsafe fn take_ownership(mut self) -> *const *const slapi_value { ++ let mut vs = Vec::new(); ++ mem::swap(&mut self.data, &mut vs); ++ let bs = vs.into_boxed_slice(); ++ Box::leak(bs) as *const _ as *const *const slapi_value ++ } ++} ++ ++impl FromIterator for ValueArray { ++ fn from_iter>(iter: I) -> Self { ++ let data: Vec<*mut slapi_value> = iter ++ .into_iter() ++ .map(|v| unsafe { v.take_ownership() }) ++ .chain(once(ptr::null_mut() as *mut slapi_value)) ++ .collect(); ++ let vrf = ValueArrayRef { ++ raw_slapi_val: data.as_ptr() as *const *const slapi_value, ++ }; ++ ValueArray { data, vrf } ++ } ++} ++ ++impl Drop for ValueArray { ++ fn drop(&mut self) { ++ self.data.drain(0..).for_each(|mut v| unsafe { ++ slapi_value_free(&mut v as *mut _ as *mut *const libc::c_void); ++ }) ++ } ++} ++ ++#[derive(Debug)] ++pub struct ValueRef { ++ raw_slapi_val: *const slapi_value, ++ bvr: BerValRef, ++} ++ ++impl ValueRef { ++ pub(crate) unsafe fn as_ptr(&self) -> *const slapi_value { ++ // This is unsafe as the *const may outlive the value ref. ++ self.raw_slapi_val ++ } ++} ++ ++pub struct Value { ++ value: ValueRef, ++} ++ ++impl Value { ++ pub unsafe fn take_ownership(mut self) -> *mut slapi_value { ++ let mut n_ptr = ptr::null(); ++ mem::swap(&mut self.value.raw_slapi_val, &mut n_ptr); ++ n_ptr as *mut slapi_value ++ // Now drop will run and not care. ++ } ++} ++ ++impl Drop for Value { ++ fn drop(&mut self) { ++ if self.value.raw_slapi_val != ptr::null() { ++ // free it ++ unsafe { ++ slapi_value_free( ++ &mut self.value.raw_slapi_val as *mut _ as *mut *const libc::c_void, ++ ); ++ } ++ } ++ } ++} ++ ++impl Deref for Value { ++ type Target = ValueRef; ++ ++ fn deref(&self) -> &Self::Target { ++ &self.value ++ } ++} ++ ++impl From<&Uuid> for Value { ++ fn from(u: &Uuid) -> Self { ++ // turn the uuid to a str ++ let u_str = u.to_hyphenated().to_string(); ++ let len = u_str.len(); ++ let cstr = CString::new(u_str) ++ .expect("Invalid uuid, should never occur!") ++ .into_boxed_c_str(); ++ let s_ptr = cstr.as_ptr(); ++ Box::leak(cstr); ++ ++ let mut v = unsafe { slapi_value_new() }; ++ unsafe { ++ (*v).bv.len = len; ++ (*v).bv.data = s_ptr as *const u8; ++ } ++ ++ Value { ++ value: ValueRef::new(v as *const libc::c_void), ++ } ++ } ++} ++ ++impl ValueRef { ++ pub fn new(raw_slapi_val: *const libc::c_void) -> Self { ++ let raw_slapi_val = raw_slapi_val as *const _ as *const slapi_value; ++ let raw_berval: *const ol_berval = unsafe { &(*raw_slapi_val).bv as *const _ }; ++ ValueRef { ++ raw_slapi_val, ++ bvr: BerValRef { raw_berval }, ++ } ++ } ++} ++ ++impl TryFrom<&ValueRef> for String { ++ type Error = (); ++ ++ fn try_from(value: &ValueRef) -> Result { ++ value.bvr.into_string().ok_or(()) ++ } ++} ++ ++impl TryFrom<&ValueRef> for Sdn { ++ type Error = (); ++ ++ fn try_from(value: &ValueRef) -> Result { ++ // We need to do a middle step of moving through a cstring as ++ // bervals may not always have a trailing NULL, and sdn expects one. ++ let cdn = value.bvr.into_cstring().ok_or(())?; ++ Ok(cdn.as_c_str().into()) ++ } ++} ++ ++impl AsRef for ValueRef { ++ fn as_ref(&self) -> &ValueRef { ++ &self ++ } ++} ++ ++impl Deref for ValueRef { ++ type Target = BerValRef; ++ ++ fn deref(&self) -> &Self::Target { ++ &self.bvr ++ } ++} +-- +2.26.3 + diff --git a/SOURCES/0003-do-not-add-referrals-for-masters-with-different-data.patch b/SOURCES/0003-do-not-add-referrals-for-masters-with-different-data.patch deleted file mode 100644 index 411958e..0000000 --- a/SOURCES/0003-do-not-add-referrals-for-masters-with-different-data.patch +++ /dev/null @@ -1,513 +0,0 @@ -From e202c62c3b4c92163d2de9f3da9a9f3efc81e4b8 Mon Sep 17 00:00:00 2001 -From: progier389 <72748589+progier389@users.noreply.github.com> -Date: Thu, 12 Nov 2020 18:50:04 +0100 -Subject: [PATCH 3/3] do not add referrals for masters with different data - generation #2054 (#4427) - -Bug description: -The problem is that some operation mandatory in the usual cases are -also performed when replication cannot take place because the -database set are differents (i.e: RUV generation ids are different) - -One of the issue is that the csn generator state is updated when -starting a replication session (it is a problem when trying to -reset the time skew, as freshly reinstalled replicas get infected -by the old ones) - -A second issue is that the RUV got updated when ending a replication session -(which may add replica that does not share the same data set, -then update operations on consumer retun referrals towards wrong masters - -Fix description: -The fix checks the RUVs generation id before updating the csn generator -and before updating the RUV. - -Reviewed by: mreynolds - firstyear - vashirov - -Platforms tested: F32 ---- - .../suites/replication/regression_test.py | 290 ++++++++++++++++++ - ldap/servers/plugins/replication/repl5.h | 1 + - .../plugins/replication/repl5_inc_protocol.c | 20 +- - .../plugins/replication/repl5_replica.c | 39 ++- - src/lib389/lib389/dseldif.py | 37 +++ - 5 files changed, 368 insertions(+), 19 deletions(-) - -diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py -index 14b9d6a44..a72af6b30 100644 ---- a/dirsrvtests/tests/suites/replication/regression_test.py -+++ b/dirsrvtests/tests/suites/replication/regression_test.py -@@ -13,6 +13,7 @@ from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts - from lib389.pwpolicy import PwPolicyManager - from lib389.utils import * - from lib389.topologies import topology_m2 as topo_m2, TopologyMain, topology_m3 as topo_m3, create_topology, _remove_ssca_db, topology_i2 as topo_i2 -+from lib389.topologies import topology_m2c2 as topo_m2c2 - from lib389._constants import * - from lib389.idm.organizationalunit import OrganizationalUnits - from lib389.idm.user import UserAccount -@@ -22,6 +23,7 @@ from lib389.idm.directorymanager import DirectoryManager - from lib389.replica import Replicas, ReplicationManager, Changelog5, BootstrapReplicationManager - from lib389.agreement import Agreements - from lib389 import pid_from_file -+from lib389.dseldif import * - - - pytestmark = pytest.mark.tier1 -@@ -1027,6 +1029,294 @@ def test_online_init_should_create_keepalive_entries(topo_m2): - verify_keepalive_entries(topo_m2, True); - - -+def get_agreement(agmts, consumer): -+ # Get agreement towards consumer among the agremment list -+ for agmt in agmts.list(): -+ if (agmt.get_attr_val_utf8('nsDS5ReplicaPort') == str(consumer.port) and -+ agmt.get_attr_val_utf8('nsDS5ReplicaHost') == consumer.host): -+ return agmt -+ return None; -+ -+ -+def test_ruv_url_not_added_if_different_uuid(topo_m2c2): -+ """Check that RUV url is not updated if RUV generation uuid are different -+ -+ :id: 7cc30a4e-0ffd-4758-8f00-e500279af344 -+ :setup: Two masters + two consumers replication setup -+ :steps: -+ 1. Generate ldif without replication data -+ 2. Init both masters from that ldif -+ (to clear the ruvs and generates different generation uuid) -+ 3. Perform on line init from master1 to consumer1 -+ and from master2 to consumer2 -+ 4. Perform update on both masters -+ 5. Check that c1 RUV does not contains URL towards m2 -+ 6. Check that c2 RUV does contains URL towards m2 -+ 7. Perform on line init from master1 to master2 -+ 8. Perform update on master2 -+ 9. Check that c1 RUV does contains URL towards m2 -+ :expectedresults: -+ 1. No error while generating ldif -+ 2. No error while importing the ldif file -+ 3. No error and Initialization done. -+ 4. No error -+ 5. master2 replicaid should not be in the consumer1 RUV -+ 6. master2 replicaid should be in the consumer2 RUV -+ 7. No error and Initialization done. -+ 8. No error -+ 9. master2 replicaid should be in the consumer1 RUV -+ -+ """ -+ -+ # Variables initialization -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ -+ m1 = topo_m2c2.ms["master1"] -+ m2 = topo_m2c2.ms["master2"] -+ c1 = topo_m2c2.cs["consumer1"] -+ c2 = topo_m2c2.cs["consumer2"] -+ -+ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) -+ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) -+ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX) -+ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX) -+ -+ replicid_m2 = replica_m2.get_rid() -+ -+ agmts_m1 = Agreements(m1, replica_m1.dn) -+ agmts_m2 = Agreements(m2, replica_m2.dn) -+ -+ m1_m2 = get_agreement(agmts_m1, m2) -+ m1_c1 = get_agreement(agmts_m1, c1) -+ m1_c2 = get_agreement(agmts_m1, c2) -+ m2_m1 = get_agreement(agmts_m2, m1) -+ m2_c1 = get_agreement(agmts_m2, c1) -+ m2_c2 = get_agreement(agmts_m2, c2) -+ -+ # Step 1: Generate ldif without replication data -+ m1.stop() -+ m2.stop() -+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() -+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], -+ excludeSuffixes=None, repl_data=False, -+ outputfile=ldif_file, encrypt=False) -+ # Remove replication metadata that are still in the ldif -+ # _remove_replication_data(ldif_file) -+ -+ # Step 2: Init both masters from that ldif -+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) -+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) -+ m1.start() -+ m2.start() -+ -+ # Step 3: Perform on line init from master1 to consumer1 -+ # and from master2 to consumer2 -+ m1_c1.begin_reinit() -+ m2_c2.begin_reinit() -+ (done, error) = m1_c1.wait_reinit() -+ assert done is True -+ assert error is False -+ (done, error) = m2_c2.wait_reinit() -+ assert done is True -+ assert error is False -+ -+ # Step 4: Perform update on both masters -+ repl.test_replication(m1, c1) -+ repl.test_replication(m2, c2) -+ -+ # Step 5: Check that c1 RUV does not contains URL towards m2 -+ ruv = replica_c1.get_ruv() -+ log.debug(f"c1 RUV: {ruv}") -+ url=ruv._rid_url.get(replica_m2.get_rid()) -+ if (url == None): -+ log.debug(f"No URL for RID {replica_m2.get_rid()} in RUV"); -+ else: -+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}"); -+ log.error(f"URL for RID {replica_m2.get_rid()} found in RUV") -+ #Note: this assertion fails if issue 2054 is not fixed. -+ assert False -+ -+ # Step 6: Check that c2 RUV does contains URL towards m2 -+ ruv = replica_c2.get_ruv() -+ log.debug(f"c1 RUV: {ruv} {ruv._rids} ") -+ url=ruv._rid_url.get(replica_m2.get_rid()) -+ if (url == None): -+ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV"); -+ assert False -+ else: -+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}"); -+ -+ -+ # Step 7: Perform on line init from master1 to master2 -+ m1_m2.begin_reinit() -+ (done, error) = m1_m2.wait_reinit() -+ assert done is True -+ assert error is False -+ -+ # Step 8: Perform update on master2 -+ repl.test_replication(m2, c1) -+ -+ # Step 9: Check that c1 RUV does contains URL towards m2 -+ ruv = replica_c1.get_ruv() -+ log.debug(f"c1 RUV: {ruv} {ruv._rids} ") -+ url=ruv._rid_url.get(replica_m2.get_rid()) -+ if (url == None): -+ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV"); -+ assert False -+ else: -+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}"); -+ -+ -+def test_csngen_state_not_updated_if_different_uuid(topo_m2c2): -+ """Check that csngen remote offset is not updated if RUV generation uuid are different -+ -+ :id: 77694b8e-22ae-11eb-89b2-482ae39447e5 -+ :setup: Two masters + two consumers replication setup -+ :steps: -+ 1. Disable m1<->m2 agreement to avoid propagate timeSkew -+ 2. Generate ldif without replication data -+ 3. Increase time skew on master2 -+ 4. Init both masters from that ldif -+ (to clear the ruvs and generates different generation uuid) -+ 5. Perform on line init from master1 to consumer1 and master2 to consumer2 -+ 6. Perform update on both masters -+ 7: Check that c1 has no time skew -+ 8: Check that c2 has time skew -+ 9. Init master2 from master1 -+ 10. Perform update on master2 -+ 11. Check that c1 has time skew -+ :expectedresults: -+ 1. No error -+ 2. No error while generating ldif -+ 3. No error -+ 4. No error while importing the ldif file -+ 5. No error and Initialization done. -+ 6. No error -+ 7. c1 time skew should be lesser than threshold -+ 8. c2 time skew should be higher than threshold -+ 9. No error and Initialization done. -+ 10. No error -+ 11. c1 time skew should be higher than threshold -+ -+ """ -+ -+ # Variables initialization -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ -+ m1 = topo_m2c2.ms["master1"] -+ m2 = topo_m2c2.ms["master2"] -+ c1 = topo_m2c2.cs["consumer1"] -+ c2 = topo_m2c2.cs["consumer2"] -+ -+ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) -+ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) -+ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX) -+ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX) -+ -+ replicid_m2 = replica_m2.get_rid() -+ -+ agmts_m1 = Agreements(m1, replica_m1.dn) -+ agmts_m2 = Agreements(m2, replica_m2.dn) -+ -+ m1_m2 = get_agreement(agmts_m1, m2) -+ m1_c1 = get_agreement(agmts_m1, c1) -+ m1_c2 = get_agreement(agmts_m1, c2) -+ m2_m1 = get_agreement(agmts_m2, m1) -+ m2_c1 = get_agreement(agmts_m2, c1) -+ m2_c2 = get_agreement(agmts_m2, c2) -+ -+ # Step 1: Disable m1<->m2 agreement to avoid propagate timeSkew -+ m1_m2.pause() -+ m2_m1.pause() -+ -+ # Step 2: Generate ldif without replication data -+ m1.stop() -+ m2.stop() -+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() -+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], -+ excludeSuffixes=None, repl_data=False, -+ outputfile=ldif_file, encrypt=False) -+ # Remove replication metadata that are still in the ldif -+ # _remove_replication_data(ldif_file) -+ -+ # Step 3: Increase time skew on master2 -+ timeSkew=6*3600 -+ # We can modify master2 time skew -+ # But the time skew on the consumer may be smaller -+ # depending on when the cnsgen generation time is updated -+ # and when first csn get replicated. -+ # Since we use timeSkew has threshold value to detect -+ # whether there are time skew or not, -+ # lets add a significative margin (longer than the test duration) -+ # to avoid any risk of erroneous failure -+ timeSkewMargin = 300 -+ DSEldif(m2)._increaseTimeSkew(DEFAULT_SUFFIX, timeSkew+timeSkewMargin) -+ -+ # Step 4: Init both masters from that ldif -+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) -+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) -+ m1.start() -+ m2.start() -+ -+ # Step 5: Perform on line init from master1 to consumer1 -+ # and from master2 to consumer2 -+ m1_c1.begin_reinit() -+ m2_c2.begin_reinit() -+ (done, error) = m1_c1.wait_reinit() -+ assert done is True -+ assert error is False -+ (done, error) = m2_c2.wait_reinit() -+ assert done is True -+ assert error is False -+ -+ # Step 6: Perform update on both masters -+ repl.test_replication(m1, c1) -+ repl.test_replication(m2, c2) -+ -+ # Step 7: Check that c1 has no time skew -+ # Stop server to insure that dse.ldif is uptodate -+ c1.stop() -+ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0] -+ c1_timeSkew = int(c1_nsState['time_skew']) -+ log.debug(f"c1 time skew: {c1_timeSkew}") -+ if (c1_timeSkew >= timeSkew): -+ log.error(f"c1 csngen state has unexpectedly been synchronized with m2: time skew {c1_timeSkew}") -+ assert False -+ c1.start() -+ -+ # Step 8: Check that c2 has time skew -+ # Stop server to insure that dse.ldif is uptodate -+ c2.stop() -+ c2_nsState = DSEldif(c2).readNsState(DEFAULT_SUFFIX)[0] -+ c2_timeSkew = int(c2_nsState['time_skew']) -+ log.debug(f"c2 time skew: {c2_timeSkew}") -+ if (c2_timeSkew < timeSkew): -+ log.error(f"c2 csngen state has not been synchronized with m2: time skew {c2_timeSkew}") -+ assert False -+ c2.start() -+ -+ # Step 9: Perform on line init from master1 to master2 -+ m1_c1.pause() -+ m1_m2.resume() -+ m1_m2.begin_reinit() -+ (done, error) = m1_m2.wait_reinit() -+ assert done is True -+ assert error is False -+ -+ # Step 10: Perform update on master2 -+ repl.test_replication(m2, c1) -+ -+ # Step 11: Check that c1 has time skew -+ # Stop server to insure that dse.ldif is uptodate -+ c1.stop() -+ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0] -+ c1_timeSkew = int(c1_nsState['time_skew']) -+ log.debug(f"c1 time skew: {c1_timeSkew}") -+ if (c1_timeSkew < timeSkew): -+ log.error(f"c1 csngen state has not been synchronized with m2: time skew {c1_timeSkew}") -+ assert False -+ -+ - if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode -diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h -index b35f724c2..f1c596a3f 100644 ---- a/ldap/servers/plugins/replication/repl5.h -+++ b/ldap/servers/plugins/replication/repl5.h -@@ -708,6 +708,7 @@ void replica_dump(Replica *r); - void replica_set_enabled(Replica *r, PRBool enable); - Replica *replica_get_replica_from_dn(const Slapi_DN *dn); - Replica *replica_get_replica_from_root(const char *repl_root); -+int replica_check_generation(Replica *r, const RUV *remote_ruv); - int replica_update_ruv(Replica *replica, const CSN *csn, const char *replica_purl); - Replica *replica_get_replica_for_op(Slapi_PBlock *pb); - /* the functions below manipulate replica hash */ -diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c -index 29b1fb073..af5e5897c 100644 ---- a/ldap/servers/plugins/replication/repl5_inc_protocol.c -+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c -@@ -2161,26 +2161,12 @@ examine_update_vector(Private_Repl_Protocol *prp, RUV *remote_ruv) - } else if (NULL == remote_ruv) { - return_value = EXAMINE_RUV_PRISTINE_REPLICA; - } else { -- char *local_gen = NULL; -- char *remote_gen = ruv_get_replica_generation(remote_ruv); -- Object *local_ruv_obj; -- RUV *local_ruv; -- - PR_ASSERT(NULL != prp->replica); -- local_ruv_obj = replica_get_ruv(prp->replica); -- if (NULL != local_ruv_obj) { -- local_ruv = (RUV *)object_get_data(local_ruv_obj); -- PR_ASSERT(local_ruv); -- local_gen = ruv_get_replica_generation(local_ruv); -- object_release(local_ruv_obj); -- } -- if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) { -- return_value = EXAMINE_RUV_GENERATION_MISMATCH; -- } else { -+ if (replica_check_generation(prp->replica, remote_ruv)) { - return_value = EXAMINE_RUV_OK; -+ } else { -+ return_value = EXAMINE_RUV_GENERATION_MISMATCH; - } -- slapi_ch_free((void **)&remote_gen); -- slapi_ch_free((void **)&local_gen); - } - return return_value; - } -diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c -index f0ea0f8ef..7e56d6557 100644 ---- a/ldap/servers/plugins/replication/repl5_replica.c -+++ b/ldap/servers/plugins/replication/repl5_replica.c -@@ -812,6 +812,36 @@ replica_set_ruv(Replica *r, RUV *ruv) - replica_unlock(r->repl_lock); - } - -+/* -+ * Check if replica generation is the same than the remote ruv one -+ */ -+int -+replica_check_generation(Replica *r, const RUV *remote_ruv) -+{ -+ int return_value; -+ char *local_gen = NULL; -+ char *remote_gen = ruv_get_replica_generation(remote_ruv); -+ Object *local_ruv_obj; -+ RUV *local_ruv; -+ -+ PR_ASSERT(NULL != r); -+ local_ruv_obj = replica_get_ruv(r); -+ if (NULL != local_ruv_obj) { -+ local_ruv = (RUV *)object_get_data(local_ruv_obj); -+ PR_ASSERT(local_ruv); -+ local_gen = ruv_get_replica_generation(local_ruv); -+ object_release(local_ruv_obj); -+ } -+ if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) { -+ return_value = PR_FALSE; -+ } else { -+ return_value = PR_TRUE; -+ } -+ slapi_ch_free_string(&remote_gen); -+ slapi_ch_free_string(&local_gen); -+ return return_value; -+} -+ - /* - * Update one particular CSN in an RUV. This is meant to be called - * whenever (a) the server has processed a client operation and -@@ -1298,6 +1328,11 @@ replica_update_csngen_state_ext(Replica *r, const RUV *ruv, const CSN *extracsn) - - PR_ASSERT(r && ruv); - -+ if (!replica_check_generation(r, ruv)) /* ruv has wrong generation - we are done */ -+ { -+ return 0; -+ } -+ - rc = ruv_get_max_csn(ruv, &csn); - if (rc != RUV_SUCCESS) { - return -1; -@@ -3713,8 +3748,8 @@ replica_update_ruv_consumer(Replica *r, RUV *supplier_ruv) - replica_lock(r->repl_lock); - - local_ruv = (RUV *)object_get_data(r->repl_ruv); -- -- if (is_cleaned_rid(supplier_id) || local_ruv == NULL) { -+ if (is_cleaned_rid(supplier_id) || local_ruv == NULL || -+ !replica_check_generation(r, supplier_ruv)) { - replica_unlock(r->repl_lock); - return; - } -diff --git a/src/lib389/lib389/dseldif.py b/src/lib389/lib389/dseldif.py -index 10baba4d7..6850c9a8a 100644 ---- a/src/lib389/lib389/dseldif.py -+++ b/src/lib389/lib389/dseldif.py -@@ -317,6 +317,43 @@ class DSEldif(DSLint): - - return states - -+ def _increaseTimeSkew(self, suffix, timeSkew): -+ # Increase csngen state local_offset by timeSkew -+ # Warning: instance must be stopped before calling this function -+ assert (timeSkew >= 0) -+ nsState = self.readNsState(suffix)[0] -+ self._instance.log.debug(f'_increaseTimeSkew nsState is {nsState}') -+ oldNsState = self.get(nsState['dn'], 'nsState', True) -+ self._instance.log.debug(f'oldNsState is {oldNsState}') -+ -+ # Lets reencode the new nsState -+ from lib389.utils import print_nice_time -+ if pack('h', 1) == pack('=h',1): -+ end = '>' -+ else: -+ raise ValueError("Unknown endian, unable to proceed") -+ -+ thelen = len(oldNsState) -+ if thelen <= 20: -+ pad = 2 # padding for short H values -+ timefmt = 'I' # timevals are unsigned 32-bit int -+ else: -+ pad = 6 # padding for short H values -+ timefmt = 'Q' # timevals are unsigned 64-bit int -+ fmtstr = "%sH%dx3%sH%dx" % (end, pad, timefmt, pad) -+ newNsState = base64.b64encode(pack(fmtstr, int(nsState['rid']), -+ int(nsState['gen_time']), int(nsState['local_offset'])+timeSkew, -+ int(nsState['remote_offset']), int(nsState['seq_num']))) -+ newNsState = newNsState.decode('utf-8') -+ self._instance.log.debug(f'newNsState is {newNsState}') -+ # Lets replace the value. -+ (entry_dn_i, attr_data) = self._find_attr(nsState['dn'], 'nsState') -+ attr_i = next(iter(attr_data)) -+ self._contents[entry_dn_i + attr_i] = f"nsState:: {newNsState}" -+ self._update() -+ - - class FSChecks(DSLint): - """This is for the healthcheck feature, check commonly used system config files the --- -2.26.2 - diff --git a/SOURCES/0004-Ticket-4326-entryuuid-fixup-did-not-work-correctly-4.patch b/SOURCES/0004-Ticket-4326-entryuuid-fixup-did-not-work-correctly-4.patch new file mode 100644 index 0000000..8416726 --- /dev/null +++ b/SOURCES/0004-Ticket-4326-entryuuid-fixup-did-not-work-correctly-4.patch @@ -0,0 +1,373 @@ +From c167d6127db45d8426437c273060c8c8f7fbcb9b Mon Sep 17 00:00:00 2001 +From: Firstyear +Date: Wed, 23 Sep 2020 09:19:34 +1000 +Subject: [PATCH 04/12] Ticket 4326 - entryuuid fixup did not work correctly + (#4328) + +Bug Description: due to an oversight in how fixup tasks +worked, the entryuuid fixup task did not work correctly and +would not persist over restarts. + +Fix Description: Correctly implement entryuuid fixup. + +fixes: #4326 + +Author: William Brown + +Review by: mreynolds (thanks!) +--- + .../tests/suites/entryuuid/basic_test.py | 24 +++- + src/plugins/entryuuid/src/lib.rs | 43 ++++++- + src/slapi_r_plugin/src/constants.rs | 5 + + src/slapi_r_plugin/src/entry.rs | 8 ++ + src/slapi_r_plugin/src/lib.rs | 2 + + src/slapi_r_plugin/src/macros.rs | 2 +- + src/slapi_r_plugin/src/modify.rs | 118 ++++++++++++++++++ + src/slapi_r_plugin/src/pblock.rs | 7 ++ + src/slapi_r_plugin/src/value.rs | 4 + + 9 files changed, 206 insertions(+), 7 deletions(-) + create mode 100644 src/slapi_r_plugin/src/modify.rs + +diff --git a/dirsrvtests/tests/suites/entryuuid/basic_test.py b/dirsrvtests/tests/suites/entryuuid/basic_test.py +index beb73701d..4d8a40909 100644 +--- a/dirsrvtests/tests/suites/entryuuid/basic_test.py ++++ b/dirsrvtests/tests/suites/entryuuid/basic_test.py +@@ -12,6 +12,7 @@ import time + import shutil + from lib389.idm.user import nsUserAccounts, UserAccounts + from lib389.idm.account import Accounts ++from lib389.idm.domain import Domain + from lib389.topologies import topology_st as topology + from lib389.backend import Backends + from lib389.paths import Paths +@@ -190,6 +191,7 @@ def test_entryuuid_fixup_task(topology): + 3. Enable the entryuuid plugin + 4. Run the fixup + 5. Assert the entryuuid now exists ++ 6. Restart and check they persist + + :expectedresults: + 1. Success +@@ -197,6 +199,7 @@ def test_entryuuid_fixup_task(topology): + 3. Success + 4. Success + 5. Suddenly EntryUUID! ++ 6. Still has EntryUUID! + """ + # 1. Disable the plugin + plug = EntryUUIDPlugin(topology.standalone) +@@ -220,7 +223,22 @@ def test_entryuuid_fixup_task(topology): + assert(task.is_complete() and task.get_exit_code() == 0) + topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,)) + +- # 5. Assert the uuid. +- euuid = account.get_attr_val_utf8('entryUUID') +- assert(euuid is not None) ++ # 5.1 Assert the uuid on the user. ++ euuid_user = account.get_attr_val_utf8('entryUUID') ++ assert(euuid_user is not None) ++ ++ # 5.2 Assert it on the domain entry. ++ domain = Domain(topology.standalone, dn=DEFAULT_SUFFIX) ++ euuid_domain = domain.get_attr_val_utf8('entryUUID') ++ assert(euuid_domain is not None) ++ ++ # Assert it persists after a restart. ++ topology.standalone.restart() ++ # 6.1 Assert the uuid on the use. ++ euuid_user_2 = account.get_attr_val_utf8('entryUUID') ++ assert(euuid_user_2 == euuid_user) ++ ++ # 6.2 Assert it on the domain entry. ++ euuid_domain_2 = domain.get_attr_val_utf8('entryUUID') ++ assert(euuid_domain_2 == euuid_domain) + +diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs +index 6b5e8d1bb..92977db05 100644 +--- a/src/plugins/entryuuid/src/lib.rs ++++ b/src/plugins/entryuuid/src/lib.rs +@@ -187,9 +187,46 @@ impl SlapiPlugin3 for EntryUuid { + } + } + +-pub fn entryuuid_fixup_mapfn(mut e: EntryRef, _data: &()) -> Result<(), PluginError> { +- assign_uuid(&mut e); +- Ok(()) ++pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError> { ++ /* Supply a modification to the entry. */ ++ let sdn = e.get_sdnref(); ++ ++ /* Sanity check that entryuuid doesn't already exist */ ++ if e.contains_attr("entryUUID") { ++ log_error!( ++ ErrorLevel::Trace, ++ "skipping fixup for -> {}", ++ sdn.to_dn_string() ++ ); ++ return Ok(()); ++ } ++ ++ // Setup the modifications ++ let mut mods = SlapiMods::new(); ++ ++ let u: Uuid = Uuid::new_v4(); ++ let uuid_value = Value::from(&u); ++ let values: ValueArray = std::iter::once(uuid_value).collect(); ++ mods.append(ModType::Replace, "entryUUID", values); ++ ++ /* */ ++ let lmod = Modify::new(&sdn, mods, plugin_id())?; ++ ++ match lmod.execute() { ++ Ok(_) => { ++ log_error!(ErrorLevel::Trace, "fixed-up -> {}", sdn.to_dn_string()); ++ Ok(()) ++ } ++ Err(e) => { ++ log_error!( ++ ErrorLevel::Error, ++ "entryuuid_fixup_mapfn -> fixup failed -> {} {:?}", ++ sdn.to_dn_string(), ++ e ++ ); ++ Err(PluginError::GenericFailure) ++ } ++ } + } + + #[cfg(test)] +diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs +index cf76ccbdb..34845c2f4 100644 +--- a/src/slapi_r_plugin/src/constants.rs ++++ b/src/slapi_r_plugin/src/constants.rs +@@ -5,6 +5,11 @@ use std::os::raw::c_char; + pub const LDAP_SUCCESS: i32 = 0; + pub const PLUGIN_DEFAULT_PRECEDENCE: i32 = 50; + ++#[repr(i32)] ++pub enum OpFlags { ++ ByassReferrals = 0x0040_0000, ++} ++ + #[repr(i32)] + /// The set of possible function handles we can register via the pblock. These + /// values correspond to slapi-plugin.h. +diff --git a/src/slapi_r_plugin/src/entry.rs b/src/slapi_r_plugin/src/entry.rs +index 034efe692..22ae45189 100644 +--- a/src/slapi_r_plugin/src/entry.rs ++++ b/src/slapi_r_plugin/src/entry.rs +@@ -70,6 +70,14 @@ impl EntryRef { + } + } + ++ pub fn contains_attr(&self, name: &str) -> bool { ++ let cname = CString::new(name).expect("invalid attr name"); ++ let va = unsafe { slapi_entry_attr_get_valuearray(self.raw_e, cname.as_ptr()) }; ++ ++ // If it's null, it's not present, so flip the logic. ++ !va.is_null() ++ } ++ + pub fn add_value(&mut self, a: &str, v: &ValueRef) { + // turn the attr to a c string. + // TODO FIX +diff --git a/src/slapi_r_plugin/src/lib.rs b/src/slapi_r_plugin/src/lib.rs +index d7fc22e52..076907bae 100644 +--- a/src/slapi_r_plugin/src/lib.rs ++++ b/src/slapi_r_plugin/src/lib.rs +@@ -9,6 +9,7 @@ pub mod dn; + pub mod entry; + pub mod error; + pub mod log; ++pub mod modify; + pub mod pblock; + pub mod plugin; + pub mod search; +@@ -24,6 +25,7 @@ pub mod prelude { + pub use crate::entry::EntryRef; + pub use crate::error::{DseCallbackStatus, LDAPError, PluginError, RPluginError}; + pub use crate::log::{log_error, ErrorLevel}; ++ pub use crate::modify::{ModType, Modify, SlapiMods}; + pub use crate::pblock::{Pblock, PblockRef}; + pub use crate::plugin::{register_plugin_ext, PluginIdRef, SlapiPlugin3}; + pub use crate::search::{Search, SearchScope}; +diff --git a/src/slapi_r_plugin/src/macros.rs b/src/slapi_r_plugin/src/macros.rs +index 030449632..bc8dfa60f 100644 +--- a/src/slapi_r_plugin/src/macros.rs ++++ b/src/slapi_r_plugin/src/macros.rs +@@ -825,7 +825,7 @@ macro_rules! slapi_r_search_callback_mapfn { + let e = EntryRef::new(raw_e); + let data_ptr = raw_data as *const _; + let data = unsafe { &(*data_ptr) }; +- match $cb_mod_ident(e, data) { ++ match $cb_mod_ident(&e, data) { + Ok(_) => LDAPError::Success as i32, + Err(e) => e as i32, + } +diff --git a/src/slapi_r_plugin/src/modify.rs b/src/slapi_r_plugin/src/modify.rs +new file mode 100644 +index 000000000..30864377a +--- /dev/null ++++ b/src/slapi_r_plugin/src/modify.rs +@@ -0,0 +1,118 @@ ++use crate::constants::OpFlags; ++use crate::dn::SdnRef; ++use crate::error::{LDAPError, PluginError}; ++use crate::pblock::Pblock; ++use crate::plugin::PluginIdRef; ++use crate::value::{slapi_value, ValueArray}; ++ ++use std::ffi::CString; ++use std::ops::{Deref, DerefMut}; ++use std::os::raw::c_char; ++ ++extern "C" { ++ fn slapi_modify_internal_set_pb_ext( ++ pb: *const libc::c_void, ++ dn: *const libc::c_void, ++ mods: *const *const libc::c_void, ++ controls: *const *const libc::c_void, ++ uniqueid: *const c_char, ++ plugin_ident: *const libc::c_void, ++ op_flags: i32, ++ ); ++ fn slapi_modify_internal_pb(pb: *const libc::c_void); ++ fn slapi_mods_free(smods: *const *const libc::c_void); ++ fn slapi_mods_get_ldapmods_byref(smods: *const libc::c_void) -> *const *const libc::c_void; ++ fn slapi_mods_new() -> *const libc::c_void; ++ fn slapi_mods_add_mod_values( ++ smods: *const libc::c_void, ++ mtype: i32, ++ attrtype: *const c_char, ++ value: *const *const slapi_value, ++ ); ++} ++ ++#[derive(Debug)] ++#[repr(i32)] ++pub enum ModType { ++ Add = 0, ++ Delete = 1, ++ Replace = 2, ++} ++ ++pub struct SlapiMods { ++ inner: *const libc::c_void, ++ vas: Vec, ++} ++ ++impl Drop for SlapiMods { ++ fn drop(&mut self) { ++ unsafe { slapi_mods_free(&self.inner as *const *const libc::c_void) } ++ } ++} ++ ++impl SlapiMods { ++ pub fn new() -> Self { ++ SlapiMods { ++ inner: unsafe { slapi_mods_new() }, ++ vas: Vec::new(), ++ } ++ } ++ ++ pub fn append(&mut self, modtype: ModType, attrtype: &str, values: ValueArray) { ++ // We can get the value array pointer here to push to the inner ++ // because the internal pointers won't change even when we push them ++ // to the list to preserve their lifetime. ++ let vas = values.as_ptr(); ++ // We take ownership of this to ensure it lives as least as long as our ++ // slapimods structure. ++ self.vas.push(values); ++ // now we can insert these to the modes. ++ let c_attrtype = CString::new(attrtype).expect("failed to allocate attrtype"); ++ unsafe { slapi_mods_add_mod_values(self.inner, modtype as i32, c_attrtype.as_ptr(), vas) }; ++ } ++} ++ ++pub struct Modify { ++ pb: Pblock, ++ mods: SlapiMods, ++} ++ ++pub struct ModifyResult { ++ pb: Pblock, ++} ++ ++impl Modify { ++ pub fn new(dn: &SdnRef, mods: SlapiMods, plugin_id: PluginIdRef) -> Result { ++ let pb = Pblock::new(); ++ let lmods = unsafe { slapi_mods_get_ldapmods_byref(mods.inner) }; ++ // OP_FLAG_ACTION_LOG_ACCESS ++ ++ unsafe { ++ slapi_modify_internal_set_pb_ext( ++ pb.deref().as_ptr(), ++ dn.as_ptr(), ++ lmods, ++ std::ptr::null(), ++ std::ptr::null(), ++ plugin_id.raw_pid, ++ OpFlags::ByassReferrals as i32, ++ ) ++ }; ++ ++ Ok(Modify { pb, mods }) ++ } ++ ++ pub fn execute(self) -> Result { ++ let Modify { ++ mut pb, ++ mods: _mods, ++ } = self; ++ unsafe { slapi_modify_internal_pb(pb.deref().as_ptr()) }; ++ let result = pb.get_op_result(); ++ ++ match result { ++ 0 => Ok(ModifyResult { pb }), ++ _e => Err(LDAPError::from(result)), ++ } ++ } ++} +diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs +index b69ce1680..0f83914f3 100644 +--- a/src/slapi_r_plugin/src/pblock.rs ++++ b/src/slapi_r_plugin/src/pblock.rs +@@ -11,6 +11,7 @@ pub use crate::log::{log_error, ErrorLevel}; + extern "C" { + fn slapi_pblock_set(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32; + fn slapi_pblock_get(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32; ++ fn slapi_pblock_destroy(pb: *const libc::c_void); + fn slapi_pblock_new() -> *const libc::c_void; + } + +@@ -41,6 +42,12 @@ impl DerefMut for Pblock { + } + } + ++impl Drop for Pblock { ++ fn drop(&mut self) { ++ unsafe { slapi_pblock_destroy(self.value.raw_pb) } ++ } ++} ++ + pub struct PblockRef { + raw_pb: *const libc::c_void, + } +diff --git a/src/slapi_r_plugin/src/value.rs b/src/slapi_r_plugin/src/value.rs +index 5a40dd279..46246837a 100644 +--- a/src/slapi_r_plugin/src/value.rs ++++ b/src/slapi_r_plugin/src/value.rs +@@ -96,6 +96,10 @@ impl ValueArray { + let bs = vs.into_boxed_slice(); + Box::leak(bs) as *const _ as *const *const slapi_value + } ++ ++ pub fn as_ptr(&self) -> *const *const slapi_value { ++ self.data.as_ptr() as *const *const slapi_value ++ } + } + + impl FromIterator for ValueArray { +-- +2.26.3 + diff --git a/SOURCES/0004-Ticket-50933-Update-2307compat.ldif.patch b/SOURCES/0004-Ticket-50933-Update-2307compat.ldif.patch deleted file mode 100644 index 5622a1a..0000000 --- a/SOURCES/0004-Ticket-50933-Update-2307compat.ldif.patch +++ /dev/null @@ -1,179 +0,0 @@ -From 826a1bb4ea88915ac492828d1cc4a901623f7866 Mon Sep 17 00:00:00 2001 -From: William Brown -Date: Thu, 14 May 2020 14:31:47 +1000 -Subject: [PATCH 1/2] Ticket 50933 - Update 2307compat.ldif - -Bug Description: This resolves a potential conflict between 60nis.ldif -in freeipa and others with 2307compat, by removing the conflicting -definitions from 2307bis that were included. - -Fix Description: By not including these in 2307compat, this means that -sites that rely on the values provided by 2307bis may ALSO need -60nis.ldif to be present. However, these nis values seem like they are -likely very rare in reality, and this also will avoid potential -issues with freeipa. It also is the least disruptive as we don't need -to change an already defined file, and we don't have values where the name -to oid relationship changes. - -Fixes: #50933 -https://pagure.io/389-ds-base/issue/50933 - -Author: William Brown - -Review by: tbordaz (Thanks!) ---- - ldap/schema/10rfc2307compat.ldif | 66 -------------------------------- - ldap/schema/60autofs.ldif | 39 ++++++++++++------- - 2 files changed, 26 insertions(+), 79 deletions(-) - -diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif -index 8810231ac..78c588d08 100644 ---- a/ldap/schema/10rfc2307compat.ldif -+++ b/ldap/schema/10rfc2307compat.ldif -@@ -176,50 +176,6 @@ attributeTypes: ( - SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 - SINGLE-VALUE - ) --attributeTypes: ( -- 1.3.6.1.1.1.1.28 NAME 'nisPublicKey' -- DESC 'NIS public key' -- EQUALITY octetStringMatch -- SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 -- SINGLE-VALUE -- ) --attributeTypes: ( -- 1.3.6.1.1.1.1.29 NAME 'nisSecretKey' -- DESC 'NIS secret key' -- EQUALITY octetStringMatch -- SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 -- SINGLE-VALUE -- ) --attributeTypes: ( -- 1.3.6.1.1.1.1.30 NAME 'nisDomain' -- DESC 'NIS domain' -- EQUALITY caseIgnoreIA5Match -- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 -- ) --attributeTypes: ( -- 1.3.6.1.1.1.1.31 NAME 'automountMapName' -- DESC 'automount Map Name' -- EQUALITY caseExactIA5Match -- SUBSTR caseExactIA5SubstringsMatch -- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 -- SINGLE-VALUE -- ) --attributeTypes: ( -- 1.3.6.1.1.1.1.32 NAME 'automountKey' -- DESC 'Automount Key value' -- EQUALITY caseExactIA5Match -- SUBSTR caseExactIA5SubstringsMatch -- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 -- SINGLE-VALUE -- ) --attributeTypes: ( -- 1.3.6.1.1.1.1.33 NAME 'automountInformation' -- DESC 'Automount information' -- EQUALITY caseExactIA5Match -- SUBSTR caseExactIA5SubstringsMatch -- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 -- SINGLE-VALUE -- ) - # end of attribute types - beginning of objectclasses - objectClasses: ( - 1.3.6.1.1.1.2.0 NAME 'posixAccount' SUP top AUXILIARY -@@ -324,28 +280,6 @@ objectClasses: ( - seeAlso $ serialNumber' - MAY ( bootFile $ bootParameter $ cn $ description $ l $ o $ ou $ owner $ seeAlso $ serialNumber ) - ) --objectClasses: ( -- 1.3.6.1.1.1.2.14 NAME 'nisKeyObject' SUP top AUXILIARY -- DESC 'An object with a public and secret key' -- MUST ( cn $ nisPublicKey $ nisSecretKey ) -- MAY ( uidNumber $ description ) -- ) --objectClasses: ( -- 1.3.6.1.1.1.2.15 NAME 'nisDomainObject' SUP top AUXILIARY -- DESC 'Associates a NIS domain with a naming context' -- MUST nisDomain -- ) --objectClasses: ( -- 1.3.6.1.1.1.2.16 NAME 'automountMap' SUP top STRUCTURAL -- MUST ( automountMapName ) -- MAY description -- ) --objectClasses: ( -- 1.3.6.1.1.1.2.17 NAME 'automount' SUP top STRUCTURAL -- DESC 'Automount information' -- MUST ( automountKey $ automountInformation ) -- MAY description -- ) - ## namedObject is needed for groups without members - objectClasses: ( - 1.3.6.1.4.1.5322.13.1.1 NAME 'namedObject' SUP top STRUCTURAL -diff --git a/ldap/schema/60autofs.ldif b/ldap/schema/60autofs.ldif -index 084e9ec30..de3922aa2 100644 ---- a/ldap/schema/60autofs.ldif -+++ b/ldap/schema/60autofs.ldif -@@ -6,7 +6,23 @@ dn: cn=schema - ################################################################################ - # - attributeTypes: ( -- 1.3.6.1.1.1.1.33 -+ 1.3.6.1.1.1.1.31 NAME 'automountMapName' -+ DESC 'automount Map Name' -+ EQUALITY caseExactIA5Match -+ SUBSTR caseExactIA5SubstringsMatch -+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 -+ SINGLE-VALUE -+ ) -+attributeTypes: ( -+ 1.3.6.1.1.1.1.32 NAME 'automountKey' -+ DESC 'Automount Key value' -+ EQUALITY caseExactIA5Match -+ SUBSTR caseExactIA5SubstringsMatch -+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 -+ SINGLE-VALUE -+ ) -+attributeTypes: ( -+ 1.3.6.1.1.1.1.33 - NAME 'automountInformation' - DESC 'Information used by the autofs automounter' - EQUALITY caseExactIA5Match -@@ -18,25 +34,22 @@ attributeTypes: ( - ################################################################################ - # - objectClasses: ( -- 1.3.6.1.1.1.2.17 -- NAME 'automount' -- DESC 'An entry in an automounter map' -+ 1.3.6.1.1.1.2.16 -+ NAME 'automountMap' -+ DESC 'An group of related automount objects' - SUP top - STRUCTURAL -- MUST ( cn $ automountInformation ) -- MAY ( description ) -+ MAY ( ou $ automountMapName $ description ) - X-ORIGIN 'draft-howard-rfc2307bis' - ) --# --################################################################################ --# - objectClasses: ( -- 1.3.6.1.1.1.2.16 -- NAME 'automountMap' -- DESC 'An group of related automount objects' -+ 1.3.6.1.1.1.2.17 -+ NAME 'automount' -+ DESC 'An entry in an automounter map' - SUP top - STRUCTURAL -- MUST ( ou ) -+ MUST ( automountInformation ) -+ MAY ( cn $ description $ automountKey ) - X-ORIGIN 'draft-howard-rfc2307bis' - ) - # --- -2.26.2 - diff --git a/SOURCES/0005-Issue-4498-BUG-entryuuid-replication-may-not-work-45.patch b/SOURCES/0005-Issue-4498-BUG-entryuuid-replication-may-not-work-45.patch new file mode 100644 index 0000000..91de38c --- /dev/null +++ b/SOURCES/0005-Issue-4498-BUG-entryuuid-replication-may-not-work-45.patch @@ -0,0 +1,192 @@ +From b2e0a1d405d15383064e547fd15008bc136d3efe Mon Sep 17 00:00:00 2001 +From: Firstyear +Date: Thu, 17 Dec 2020 08:22:23 +1000 +Subject: [PATCH 05/12] Issue 4498 - BUG - entryuuid replication may not work + (#4503) + +Bug Description: EntryUUID can be duplicated in replication, +due to a missing check in assign_uuid + +Fix Description: Add a test case to determine how this occurs, +and add the correct check for existing entryUUID. + +fixes: https://github.com/389ds/389-ds-base/issues/4498 + +Author: William Brown + +Review by: @mreynolds389 +--- + .../tests/suites/entryuuid/replicated_test.py | 77 +++++++++++++++++++ + rpm.mk | 2 +- + src/plugins/entryuuid/src/lib.rs | 20 ++++- + src/slapi_r_plugin/src/constants.rs | 2 + + src/slapi_r_plugin/src/pblock.rs | 7 ++ + 5 files changed, 106 insertions(+), 2 deletions(-) + create mode 100644 dirsrvtests/tests/suites/entryuuid/replicated_test.py + +diff --git a/dirsrvtests/tests/suites/entryuuid/replicated_test.py b/dirsrvtests/tests/suites/entryuuid/replicated_test.py +new file mode 100644 +index 000000000..a2ebc8ff7 +--- /dev/null ++++ b/dirsrvtests/tests/suites/entryuuid/replicated_test.py +@@ -0,0 +1,77 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2020 William Brown ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++ ++import ldap ++import pytest ++import logging ++from lib389.topologies import topology_m2 as topo_m2 ++from lib389.idm.user import nsUserAccounts ++from lib389.paths import Paths ++from lib389.utils import ds_is_older ++from lib389._constants import * ++from lib389.replica import ReplicationManager ++ ++default_paths = Paths() ++ ++pytestmark = pytest.mark.tier1 ++ ++@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") ++ ++def test_entryuuid_with_replication(topo_m2): ++ """ Check that entryuuid works with replication ++ ++ :id: a5f15bf9-7f63-473a-840c-b9037b787024 ++ ++ :setup: two node mmr ++ ++ :steps: ++ 1. Create an entry on one server ++ 2. Wait for replication ++ 3. Assert it is on the second ++ ++ :expectedresults: ++ 1. Success ++ 1. Success ++ 1. Success ++ """ ++ ++ server_a = topo_m2.ms["supplier1"] ++ server_b = topo_m2.ms["supplier2"] ++ server_a.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE)) ++ server_b.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE)) ++ ++ repl = ReplicationManager(DEFAULT_SUFFIX) ++ ++ account_a = nsUserAccounts(server_a, DEFAULT_SUFFIX).create_test_user(uid=2000) ++ euuid_a = account_a.get_attr_vals_utf8('entryUUID') ++ print("🧩 %s" % euuid_a) ++ assert(euuid_a is not None) ++ assert(len(euuid_a) == 1) ++ ++ repl.wait_for_replication(server_a, server_b) ++ ++ account_b = nsUserAccounts(server_b, DEFAULT_SUFFIX).get("test_user_2000") ++ euuid_b = account_b.get_attr_vals_utf8('entryUUID') ++ print("🧩 %s" % euuid_b) ++ ++ server_a.config.loglevel(vals=(ErrorLog.DEFAULT,)) ++ server_b.config.loglevel(vals=(ErrorLog.DEFAULT,)) ++ ++ assert(euuid_b is not None) ++ assert(len(euuid_b) == 1) ++ assert(euuid_b == euuid_a) ++ ++ account_b.set("description", "update") ++ repl.wait_for_replication(server_b, server_a) ++ ++ euuid_c = account_a.get_attr_vals_utf8('entryUUID') ++ print("🧩 %s" % euuid_c) ++ assert(euuid_c is not None) ++ assert(len(euuid_c) == 1) ++ assert(euuid_c == euuid_a) ++ +diff --git a/rpm.mk b/rpm.mk +index 02f5bba37..d1cdff7df 100644 +--- a/rpm.mk ++++ b/rpm.mk +@@ -25,7 +25,7 @@ TSAN_ON = 0 + # Undefined Behaviour Sanitizer + UBSAN_ON = 0 + +-RUST_ON = 0 ++RUST_ON = 1 + + # PERL_ON is deprecated and turns on the LEGACY_ON, this for not breaking people's workflows. + PERL_ON = 1 +diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs +index 92977db05..0197c5e83 100644 +--- a/src/plugins/entryuuid/src/lib.rs ++++ b/src/plugins/entryuuid/src/lib.rs +@@ -30,6 +30,16 @@ slapi_r_search_callback_mapfn!(entryuuid, entryuuid_fixup_cb, entryuuid_fixup_ma + fn assign_uuid(e: &mut EntryRef) { + let sdn = e.get_sdnref(); + ++ // 🚧 safety barrier 🚧 ++ if e.contains_attr("entryUUID") { ++ log_error!( ++ ErrorLevel::Trace, ++ "assign_uuid -> entryUUID exists, skipping dn {}", ++ sdn.to_dn_string() ++ ); ++ return; ++ } ++ + // We could consider making these lazy static. + let config_sdn = Sdn::try_from("cn=config").expect("Invalid static dn"); + let schema_sdn = Sdn::try_from("cn=schema").expect("Invalid static dn"); +@@ -66,7 +76,15 @@ impl SlapiPlugin3 for EntryUuid { + } + + fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> { +- log_error!(ErrorLevel::Trace, "betxn_pre_add"); ++ if pb.get_is_replicated_operation() { ++ log_error!( ++ ErrorLevel::Trace, ++ "betxn_pre_add -> replicated operation, will not change" ++ ); ++ return Ok(()); ++ } ++ ++ log_error!(ErrorLevel::Trace, "betxn_pre_add -> start"); + + let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?; + assign_uuid(&mut e); +diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs +index 34845c2f4..aa0691acc 100644 +--- a/src/slapi_r_plugin/src/constants.rs ++++ b/src/slapi_r_plugin/src/constants.rs +@@ -164,6 +164,8 @@ pub(crate) enum PblockType { + AddEntry = 60, + /// SLAPI_BACKEND + Backend = 130, ++ /// SLAPI_IS_REPLICATED_OPERATION ++ IsReplicationOperation = 142, + /// SLAPI_PLUGIN_MR_NAMES + MRNames = 624, + /// SLAPI_PLUGIN_SYNTAX_NAMES +diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs +index 0f83914f3..718ff2ca7 100644 +--- a/src/slapi_r_plugin/src/pblock.rs ++++ b/src/slapi_r_plugin/src/pblock.rs +@@ -279,4 +279,11 @@ impl PblockRef { + pub fn get_op_result(&mut self) -> i32 { + self.get_value_i32(PblockType::OpResult).unwrap_or(-1) + } ++ ++ pub fn get_is_replicated_operation(&mut self) -> bool { ++ let i = self.get_value_i32(PblockType::IsReplicationOperation).unwrap_or(0); ++ // Because rust returns the result of the last evaluation, we can ++ // just return if not equal 0. ++ i != 0 ++ } + } +-- +2.26.3 + diff --git a/SOURCES/0005-Issue-50933-Fix-OID-change-between-10rfc2307-and-10r.patch b/SOURCES/0005-Issue-50933-Fix-OID-change-between-10rfc2307-and-10r.patch deleted file mode 100644 index 82fdf9d..0000000 --- a/SOURCES/0005-Issue-50933-Fix-OID-change-between-10rfc2307-and-10r.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 3d9ced9e340678cc02b1a36c2139492c95ef15a6 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Wed, 12 Aug 2020 12:46:42 -0400 -Subject: [PATCH 2/2] Issue 50933 - Fix OID change between 10rfc2307 and - 10rfc2307compat - -Bug Description: 10rfc2307compat changed the OID for nisMap objectclass to - match the standard OID, but this breaks replication with - older versions of DS. - -Fix Description: Continue to use the old(invalid?) oid for nisMap so that - replication does not break in a mixed version environment. - -Fixes: https://pagure.io/389-ds-base/issue/50933 - -Reviewed by: firstyear & tbordaz(Thanks!!) ---- - ldap/schema/10rfc2307compat.ldif | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif -index 78c588d08..8ba72e1e3 100644 ---- a/ldap/schema/10rfc2307compat.ldif -+++ b/ldap/schema/10rfc2307compat.ldif -@@ -253,7 +253,7 @@ objectClasses: ( - MAY ( nisNetgroupTriple $ memberNisNetgroup $ description ) - ) - objectClasses: ( -- 1.3.6.1.1.1.2.9 NAME 'nisMap' SUP top STRUCTURAL -+ 1.3.6.1.1.1.2.13 NAME 'nisMap' SUP top STRUCTURAL - DESC 'A generic abstraction of a NIS map' - MUST nisMapName - MAY description --- -2.26.2 - diff --git a/SOURCES/0006-Issue-4421-Unable-to-build-with-Rust-enabled-in-clos.patch b/SOURCES/0006-Issue-4421-Unable-to-build-with-Rust-enabled-in-clos.patch new file mode 100644 index 0000000..0affdf6 --- /dev/null +++ b/SOURCES/0006-Issue-4421-Unable-to-build-with-Rust-enabled-in-clos.patch @@ -0,0 +1,626 @@ +From 04c44e74503a842561b6c6e58001faf86d924b20 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 7 Dec 2020 11:00:45 -0500 +Subject: [PATCH 06/12] Issue 4421 - Unable to build with Rust enabled in + closed environment + +Description: Add Makefile flags and update rpm.mk that allow updating + and downloading all the cargo/rust dependencies. This is + needed for nightly tests and upstream/downstream releases. + +Fixes: https://github.com/389ds/389-ds-base/issues/4421 + +Reviewed by: firstyear(Thanks!) +--- + rpm.mk | 3 +- + rpm/389-ds-base.spec.in | 2 +- + src/Cargo.lock | 563 ---------------------------------------- + 3 files changed, 3 insertions(+), 565 deletions(-) + delete mode 100644 src/Cargo.lock + +diff --git a/rpm.mk b/rpm.mk +index d1cdff7df..ef810c63c 100644 +--- a/rpm.mk ++++ b/rpm.mk +@@ -44,6 +44,7 @@ update-cargo-dependencies: + cargo update --manifest-path=./src/Cargo.toml + + download-cargo-dependencies: ++ cargo update --manifest-path=./src/Cargo.toml + cargo vendor --manifest-path=./src/Cargo.toml + cargo fetch --manifest-path=./src/Cargo.toml + tar -czf vendor.tar.gz vendor +@@ -114,7 +115,7 @@ rpmbuildprep: + cp dist/sources/$(JEMALLOC_TARBALL) $(RPMBUILD)/SOURCES/ ; \ + fi + +-srpms: rpmroot srpmdistdir tarballs rpmbuildprep ++srpms: rpmroot srpmdistdir download-cargo-dependencies tarballs rpmbuildprep + rpmbuild --define "_topdir $(RPMBUILD)" -bs $(RPMBUILD)/SPECS/$(PACKAGE).spec + cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)*.src.rpm dist/srpms/ + rm -rf $(RPMBUILD) +diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in +index b9f85489b..d80de8422 100644 +--- a/rpm/389-ds-base.spec.in ++++ b/rpm/389-ds-base.spec.in +@@ -357,7 +357,7 @@ UBSAN_FLAGS="--enable-ubsan --enable-debug" + %endif + + %if %{use_rust} +-RUST_FLAGS="--enable-rust" ++RUST_FLAGS="--enable-rust --enable-rust-offline" + %endif + + %if %{use_legacy} +diff --git a/src/Cargo.lock b/src/Cargo.lock +deleted file mode 100644 +index 33d7b8f23..000000000 +--- a/src/Cargo.lock ++++ /dev/null +@@ -1,563 +0,0 @@ +-# This file is automatically @generated by Cargo. +-# It is not intended for manual editing. +-[[package]] +-name = "ansi_term" +-version = "0.11.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" +-dependencies = [ +- "winapi", +-] +- +-[[package]] +-name = "atty" +-version = "0.2.14" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +-dependencies = [ +- "hermit-abi", +- "libc", +- "winapi", +-] +- +-[[package]] +-name = "autocfg" +-version = "1.0.1" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +- +-[[package]] +-name = "base64" +-version = "0.13.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +- +-[[package]] +-name = "bitflags" +-version = "1.2.1" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +- +-[[package]] +-name = "byteorder" +-version = "1.4.3" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +- +-[[package]] +-name = "cbindgen" +-version = "0.9.1" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd" +-dependencies = [ +- "clap", +- "log", +- "proc-macro2", +- "quote", +- "serde", +- "serde_json", +- "syn", +- "tempfile", +- "toml", +-] +- +-[[package]] +-name = "cc" +-version = "1.0.67" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" +-dependencies = [ +- "jobserver", +-] +- +-[[package]] +-name = "cfg-if" +-version = "1.0.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +- +-[[package]] +-name = "clap" +-version = "2.33.3" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" +-dependencies = [ +- "ansi_term", +- "atty", +- "bitflags", +- "strsim", +- "textwrap", +- "unicode-width", +- "vec_map", +-] +- +-[[package]] +-name = "entryuuid" +-version = "0.1.0" +-dependencies = [ +- "cc", +- "libc", +- "paste", +- "slapi_r_plugin", +- "uuid", +-] +- +-[[package]] +-name = "entryuuid_syntax" +-version = "0.1.0" +-dependencies = [ +- "cc", +- "libc", +- "paste", +- "slapi_r_plugin", +- "uuid", +-] +- +-[[package]] +-name = "fernet" +-version = "0.1.4" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f" +-dependencies = [ +- "base64", +- "byteorder", +- "getrandom", +- "openssl", +- "zeroize", +-] +- +-[[package]] +-name = "foreign-types" +-version = "0.3.2" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +-dependencies = [ +- "foreign-types-shared", +-] +- +-[[package]] +-name = "foreign-types-shared" +-version = "0.1.1" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" +- +-[[package]] +-name = "getrandom" +-version = "0.2.3" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +-dependencies = [ +- "cfg-if", +- "libc", +- "wasi", +-] +- +-[[package]] +-name = "hermit-abi" +-version = "0.1.18" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" +-dependencies = [ +- "libc", +-] +- +-[[package]] +-name = "itoa" +-version = "0.4.7" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +- +-[[package]] +-name = "jobserver" +-version = "0.1.22" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd" +-dependencies = [ +- "libc", +-] +- +-[[package]] +-name = "lazy_static" +-version = "1.4.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +- +-[[package]] +-name = "libc" +-version = "0.2.94" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e" +- +-[[package]] +-name = "librnsslapd" +-version = "0.1.0" +-dependencies = [ +- "cbindgen", +- "libc", +- "slapd", +-] +- +-[[package]] +-name = "librslapd" +-version = "0.1.0" +-dependencies = [ +- "cbindgen", +- "libc", +- "slapd", +-] +- +-[[package]] +-name = "log" +-version = "0.4.14" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" +-dependencies = [ +- "cfg-if", +-] +- +-[[package]] +-name = "once_cell" +-version = "1.7.2" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" +- +-[[package]] +-name = "openssl" +-version = "0.10.34" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8" +-dependencies = [ +- "bitflags", +- "cfg-if", +- "foreign-types", +- "libc", +- "once_cell", +- "openssl-sys", +-] +- +-[[package]] +-name = "openssl-sys" +-version = "0.9.63" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98" +-dependencies = [ +- "autocfg", +- "cc", +- "libc", +- "pkg-config", +- "vcpkg", +-] +- +-[[package]] +-name = "paste" +-version = "0.1.18" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" +-dependencies = [ +- "paste-impl", +- "proc-macro-hack", +-] +- +-[[package]] +-name = "paste-impl" +-version = "0.1.18" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" +-dependencies = [ +- "proc-macro-hack", +-] +- +-[[package]] +-name = "pkg-config" +-version = "0.3.19" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" +- +-[[package]] +-name = "ppv-lite86" +-version = "0.2.10" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +- +-[[package]] +-name = "proc-macro-hack" +-version = "0.5.19" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" +- +-[[package]] +-name = "proc-macro2" +-version = "1.0.27" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" +-dependencies = [ +- "unicode-xid", +-] +- +-[[package]] +-name = "quote" +-version = "1.0.9" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +-dependencies = [ +- "proc-macro2", +-] +- +-[[package]] +-name = "rand" +-version = "0.8.3" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" +-dependencies = [ +- "libc", +- "rand_chacha", +- "rand_core", +- "rand_hc", +-] +- +-[[package]] +-name = "rand_chacha" +-version = "0.3.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +-dependencies = [ +- "ppv-lite86", +- "rand_core", +-] +- +-[[package]] +-name = "rand_core" +-version = "0.6.2" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" +-dependencies = [ +- "getrandom", +-] +- +-[[package]] +-name = "rand_hc" +-version = "0.3.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +-dependencies = [ +- "rand_core", +-] +- +-[[package]] +-name = "redox_syscall" +-version = "0.2.8" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc" +-dependencies = [ +- "bitflags", +-] +- +-[[package]] +-name = "remove_dir_all" +-version = "0.5.3" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +-dependencies = [ +- "winapi", +-] +- +-[[package]] +-name = "rsds" +-version = "0.1.0" +- +-[[package]] +-name = "ryu" +-version = "1.0.5" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +- +-[[package]] +-name = "serde" +-version = "1.0.126" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03" +-dependencies = [ +- "serde_derive", +-] +- +-[[package]] +-name = "serde_derive" +-version = "1.0.126" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43" +-dependencies = [ +- "proc-macro2", +- "quote", +- "syn", +-] +- +-[[package]] +-name = "serde_json" +-version = "1.0.64" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" +-dependencies = [ +- "itoa", +- "ryu", +- "serde", +-] +- +-[[package]] +-name = "slapd" +-version = "0.1.0" +-dependencies = [ +- "fernet", +-] +- +-[[package]] +-name = "slapi_r_plugin" +-version = "0.1.0" +-dependencies = [ +- "lazy_static", +- "libc", +- "paste", +- "uuid", +-] +- +-[[package]] +-name = "strsim" +-version = "0.8.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" +- +-[[package]] +-name = "syn" +-version = "1.0.72" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82" +-dependencies = [ +- "proc-macro2", +- "quote", +- "unicode-xid", +-] +- +-[[package]] +-name = "synstructure" +-version = "0.12.4" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +-dependencies = [ +- "proc-macro2", +- "quote", +- "syn", +- "unicode-xid", +-] +- +-[[package]] +-name = "tempfile" +-version = "3.2.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +-dependencies = [ +- "cfg-if", +- "libc", +- "rand", +- "redox_syscall", +- "remove_dir_all", +- "winapi", +-] +- +-[[package]] +-name = "textwrap" +-version = "0.11.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +-dependencies = [ +- "unicode-width", +-] +- +-[[package]] +-name = "toml" +-version = "0.5.8" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +-dependencies = [ +- "serde", +-] +- +-[[package]] +-name = "unicode-width" +-version = "0.1.8" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" +- +-[[package]] +-name = "unicode-xid" +-version = "0.2.2" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +- +-[[package]] +-name = "uuid" +-version = "0.8.2" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" +-dependencies = [ +- "getrandom", +-] +- +-[[package]] +-name = "vcpkg" +-version = "0.2.12" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "cbdbff6266a24120518560b5dc983096efb98462e51d0d68169895b237be3e5d" +- +-[[package]] +-name = "vec_map" +-version = "0.8.2" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" +- +-[[package]] +-name = "wasi" +-version = "0.10.2+wasi-snapshot-preview1" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +- +-[[package]] +-name = "winapi" +-version = "0.3.9" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +-dependencies = [ +- "winapi-i686-pc-windows-gnu", +- "winapi-x86_64-pc-windows-gnu", +-] +- +-[[package]] +-name = "winapi-i686-pc-windows-gnu" +-version = "0.4.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +- +-[[package]] +-name = "winapi-x86_64-pc-windows-gnu" +-version = "0.4.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +- +-[[package]] +-name = "zeroize" +-version = "1.3.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd" +-dependencies = [ +- "zeroize_derive", +-] +- +-[[package]] +-name = "zeroize_derive" +-version = "1.1.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1" +-dependencies = [ +- "proc-macro2", +- "quote", +- "syn", +- "synstructure", +-] +-- +2.26.3 + diff --git a/SOURCES/0006-Ticket-51131-improve-mutex-alloc-in-conntable.patch b/SOURCES/0006-Ticket-51131-improve-mutex-alloc-in-conntable.patch deleted file mode 100644 index 4269446..0000000 --- a/SOURCES/0006-Ticket-51131-improve-mutex-alloc-in-conntable.patch +++ /dev/null @@ -1,147 +0,0 @@ -From 1085823bf5586d55103cfba249fdf212e9afcb7c Mon Sep 17 00:00:00 2001 -From: William Brown -Date: Thu, 4 Jun 2020 11:51:53 +1000 -Subject: [PATCH] Ticket 51131 - improve mutex alloc in conntable - -Bug Description: We previously did delayed allocation -of mutexs, which @tbordaz noted can lead to high usage -of the pthread mutex init routines. This was done under -the conntable lock, as well as cleaning the connection - -Fix Description: rather than delayed allocation, we -initialise everything at start up instead, which means -that while startup may have a delay, at run time we have -a smaller and lighter connection allocation routine, -that is able to release the CT lock sooner. - -https://pagure.io/389-ds-base/issue/51131 - -Author: William Brown - -Review by: ??? ---- - ldap/servers/slapd/conntable.c | 86 +++++++++++++++++++--------------- - 1 file changed, 47 insertions(+), 39 deletions(-) - -diff --git a/ldap/servers/slapd/conntable.c b/ldap/servers/slapd/conntable.c -index b23dc3435..feb9c0d75 100644 ---- a/ldap/servers/slapd/conntable.c -+++ b/ldap/servers/slapd/conntable.c -@@ -138,10 +138,21 @@ connection_table_new(int table_size) - ct->conn_next_offset = 1; - ct->conn_free_offset = 1; - -+ pthread_mutexattr_t monitor_attr = {0}; -+ pthread_mutexattr_init(&monitor_attr); -+ pthread_mutexattr_settype(&monitor_attr, PTHREAD_MUTEX_RECURSIVE); -+ - /* We rely on the fact that we called calloc, which zeros the block, so we don't - * init any structure element unless a zero value is troublesome later - */ - for (i = 0; i < table_size; i++) { -+ /* -+ * Technically this is a no-op due to calloc, but we should always be -+ * careful with things like this .... -+ */ -+ ct->c[i].c_state = CONN_STATE_FREE; -+ /* Start the conn setup. */ -+ - LBER_SOCKET invalid_socket; - /* DBDB---move this out of here once everything works */ - ct->c[i].c_sb = ber_sockbuf_alloc(); -@@ -161,11 +172,20 @@ connection_table_new(int table_size) - ct->c[i].c_prev = NULL; - ct->c[i].c_ci = i; - ct->c[i].c_fdi = SLAPD_INVALID_SOCKET_INDEX; -- /* -- * Technically this is a no-op due to calloc, but we should always be -- * careful with things like this .... -- */ -- ct->c[i].c_state = CONN_STATE_FREE; -+ -+ if (pthread_mutex_init(&(ct->c[i].c_mutex), &monitor_attr) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "pthread_mutex_init failed\n"); -+ exit(1); -+ } -+ -+ ct->c[i].c_pdumutex = PR_NewLock(); -+ if (ct->c[i].c_pdumutex == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "PR_NewLock failed\n"); -+ exit(1); -+ } -+ -+ /* Ready to rock, mark as such. */ -+ ct->c[i].c_state = CONN_STATE_INIT; - /* Prepare the connection into the freelist. */ - ct->c_freelist[i] = &(ct->c[i]); - } -@@ -241,44 +261,32 @@ connection_table_get_connection(Connection_Table *ct, int sd) - /* Never use slot 0 */ - ct->conn_next_offset += 1; - } -- /* Now prep the slot for usage. */ -- PR_ASSERT(c->c_next == NULL); -- PR_ASSERT(c->c_prev == NULL); -- PR_ASSERT(c->c_extension == NULL); -- -- if (c->c_state == CONN_STATE_FREE) { -- -- c->c_state = CONN_STATE_INIT; -- -- pthread_mutexattr_t monitor_attr = {0}; -- pthread_mutexattr_init(&monitor_attr); -- pthread_mutexattr_settype(&monitor_attr, PTHREAD_MUTEX_RECURSIVE); -- if (pthread_mutex_init(&(c->c_mutex), &monitor_attr) != 0) { -- slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "pthread_mutex_init failed\n"); -- exit(1); -- } -- -- c->c_pdumutex = PR_NewLock(); -- if (c->c_pdumutex == NULL) { -- c->c_pdumutex = NULL; -- slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "PR_NewLock failed\n"); -- exit(1); -- } -- } -- /* Let's make sure there's no cruft left on there from the last time this connection was used. */ -- /* Note: no need to lock c->c_mutex because this function is only -- * called by one thread (the slapd_daemon thread), and if we got this -- * far then `c' is not being used by any operation threads, etc. -- */ -- connection_cleanup(c); -- c->c_ct = ct; /* pointer to connection table that owns this connection */ -+ PR_Unlock(ct->table_mutex); - } else { -- /* couldn't find a Connection */ -+ /* couldn't find a Connection, table must be full */ - slapi_log_err(SLAPI_LOG_CONNS, "connection_table_get_connection", "Max open connections reached\n"); -+ PR_Unlock(ct->table_mutex); -+ return NULL; - } - -- /* We could move this to before the c alloc as there is no point to remain here. */ -- PR_Unlock(ct->table_mutex); -+ /* Now prep the slot for usage. */ -+ PR_ASSERT(c != NULL); -+ PR_ASSERT(c->c_next == NULL); -+ PR_ASSERT(c->c_prev == NULL); -+ PR_ASSERT(c->c_extension == NULL); -+ PR_ASSERT(c->c_state == CONN_STATE_INIT); -+ /* Let's make sure there's no cruft left on there from the last time this connection was used. */ -+ -+ /* -+ * Note: no need to lock c->c_mutex because this function is only -+ * called by one thread (the slapd_daemon thread), and if we got this -+ * far then `c' is not being used by any operation threads, etc. The -+ * memory ordering will be provided by the work queue sending c to a -+ * thread. -+ */ -+ connection_cleanup(c); -+ /* pointer to connection table that owns this connection */ -+ c->c_ct = ct; - - return c; - } --- -2.26.2 - diff --git a/SOURCES/0007-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch b/SOURCES/0007-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch deleted file mode 100644 index 41f9315..0000000 --- a/SOURCES/0007-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch +++ /dev/null @@ -1,66 +0,0 @@ -From a9f53e9958861e6a7a827bd852d72d51a6512396 Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Wed, 25 Nov 2020 18:07:34 +0100 -Subject: [PATCH] Issue 4297 - 2nd fix for on ADD replication URP issue - internal searches with filter containing unescaped chars (#4439) - -Bug description: - Previous fix is buggy because slapi_filter_escape_filter_value returns - a escaped filter component not an escaped assertion value. - -Fix description: - use the escaped filter component - -relates: https://github.com/389ds/389-ds-base/issues/4297 - -Reviewed by: William Brown - -Platforms tested: F31 ---- - ldap/servers/plugins/replication/urp.c | 16 ++++++++-------- - 1 file changed, 8 insertions(+), 8 deletions(-) - -diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c -index f41dbc72d..ed340c9d8 100644 ---- a/ldap/servers/plugins/replication/urp.c -+++ b/ldap/servers/plugins/replication/urp.c -@@ -1411,12 +1411,12 @@ urp_add_check_tombstone (Slapi_PBlock *pb, char *sessionid, Slapi_Entry *entry, - Slapi_Entry **entries = NULL; - Slapi_PBlock *newpb; - char *basedn = slapi_entry_get_ndn(entry); -- char *escaped_basedn; -+ char *escaped_filter; - const Slapi_DN *suffix = slapi_get_suffix_by_dn(slapi_entry_get_sdn (entry)); -- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn); -+ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", basedn); - -- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn); -- slapi_ch_free((void **)&escaped_basedn); -+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter); -+ slapi_ch_free((void **)&escaped_filter); - newpb = slapi_pblock_new(); - slapi_search_internal_set_pb(newpb, - slapi_sdn_get_dn(suffix), /* Base DN */ -@@ -1605,15 +1605,15 @@ urp_find_tombstone_for_glue (Slapi_PBlock *pb, char *sessionid, const Slapi_Entr - Slapi_Entry **entries = NULL; - Slapi_PBlock *newpb; - const char *basedn = slapi_sdn_get_dn(parentdn); -- char *escaped_basedn; -- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", (char *)basedn); -+ char *escaped_filter; -+ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", (char *)basedn); - - char *conflict_csnstr = (char*)slapi_entry_attr_get_ref((Slapi_Entry *)entry, "conflictcsn"); - CSN *conflict_csn = csn_new_by_string(conflict_csnstr); - CSN *tombstone_csn = NULL; - -- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn); -- slapi_ch_free((void **)&escaped_basedn); -+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter); -+ slapi_ch_free((void **)&escaped_filter); - newpb = slapi_pblock_new(); - char *parent_dn = slapi_dn_parent (basedn); - slapi_search_internal_set_pb(newpb, --- -2.26.2 - diff --git a/SOURCES/0007-Ticket-51175-resolve-plugin-name-leaking.patch b/SOURCES/0007-Ticket-51175-resolve-plugin-name-leaking.patch new file mode 100644 index 0000000..f5edc9d --- /dev/null +++ b/SOURCES/0007-Ticket-51175-resolve-plugin-name-leaking.patch @@ -0,0 +1,412 @@ +From 279bdb5148eb0b67ddab40c4dd9d08e9e1672f13 Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Fri, 26 Jun 2020 10:27:56 +1000 +Subject: [PATCH 07/12] Ticket 51175 - resolve plugin name leaking + +Bug Description: Previously pblock.c assumed that all plugin +names were static c strings. Rust can't create static C +strings, so these were intentionally leaked. + +Fix Description: Rather than leak these, we do a dup/free +through the slapiplugin struct instead, meaning we can use +ephemeral, and properly managed strings in rust. This does not +affect any other existing code which will still handle the +static strings correctly. + +https://pagure.io/389-ds-base/issue/51175 + +Author: William Brown + +Review by: mreynolds, tbordaz (Thanks!) +--- + Makefile.am | 1 + + configure.ac | 2 +- + ldap/servers/slapd/pagedresults.c | 6 +-- + ldap/servers/slapd/pblock.c | 9 ++-- + ldap/servers/slapd/plugin.c | 7 +++ + ldap/servers/slapd/pw_verify.c | 1 + + ldap/servers/slapd/tools/pwenc.c | 2 +- + src/slapi_r_plugin/README.md | 6 +-- + src/slapi_r_plugin/src/charray.rs | 32 ++++++++++++++ + src/slapi_r_plugin/src/lib.rs | 8 ++-- + src/slapi_r_plugin/src/macros.rs | 17 +++++--- + src/slapi_r_plugin/src/syntax_plugin.rs | 57 +++++++------------------ + 12 files changed, 85 insertions(+), 63 deletions(-) + create mode 100644 src/slapi_r_plugin/src/charray.rs + +diff --git a/Makefile.am b/Makefile.am +index 627953850..36434cf17 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -1312,6 +1312,7 @@ rust-nsslapd-private.h: @abs_top_builddir@/rs/@rust_target_dir@/librnsslapd.a + libslapi_r_plugin_SOURCES = \ + src/slapi_r_plugin/src/backend.rs \ + src/slapi_r_plugin/src/ber.rs \ ++ src/slapi_r_plugin/src/charray.rs \ + src/slapi_r_plugin/src/constants.rs \ + src/slapi_r_plugin/src/dn.rs \ + src/slapi_r_plugin/src/entry.rs \ +diff --git a/configure.ac b/configure.ac +index b3cf77d08..61bf35e4a 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -122,7 +122,7 @@ if test "$enable_debug" = yes ; then + debug_defs="-DDEBUG -DMCC_DEBUG" + debug_cflags="-g3 -O0 -rdynamic" + debug_cxxflags="-g3 -O0 -rdynamic" +- debug_rust_defs="-C debuginfo=2" ++ debug_rust_defs="-C debuginfo=2 -Z macro-backtrace" + cargo_defs="" + rust_target_dir="debug" + else +diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c +index d8b8798b6..e3444e944 100644 +--- a/ldap/servers/slapd/pagedresults.c ++++ b/ldap/servers/slapd/pagedresults.c +@@ -738,10 +738,10 @@ pagedresults_cleanup(Connection *conn, int needlock) + int i; + PagedResults *prp = NULL; + +- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "=>\n"); ++ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "=>\n"); */ + + if (NULL == conn) { +- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= Connection is NULL\n"); ++ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= Connection is NULL\n"); */ + return 0; + } + +@@ -767,7 +767,7 @@ pagedresults_cleanup(Connection *conn, int needlock) + if (needlock) { + pthread_mutex_unlock(&(conn->c_mutex)); + } +- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= %d\n", rc); ++ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= %d\n", rc); */ + return rc; + } + +diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c +index 1ad9d0399..f7d1f8885 100644 +--- a/ldap/servers/slapd/pblock.c ++++ b/ldap/servers/slapd/pblock.c +@@ -3351,13 +3351,15 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value) + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) { + return (-1); + } +- pblock->pb_plugin->plg_syntax_names = (char **)value; ++ PR_ASSERT(pblock->pb_plugin->plg_syntax_names == NULL); ++ pblock->pb_plugin->plg_syntax_names = slapi_ch_array_dup((char **)value); + break; + case SLAPI_PLUGIN_SYNTAX_OID: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) { + return (-1); + } +- pblock->pb_plugin->plg_syntax_oid = (char *)value; ++ PR_ASSERT(pblock->pb_plugin->plg_syntax_oid == NULL); ++ pblock->pb_plugin->plg_syntax_oid = slapi_ch_strdup((char *)value); + break; + case SLAPI_PLUGIN_SYNTAX_FLAGS: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) { +@@ -3806,7 +3808,8 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value) + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) { + return (-1); + } +- pblock->pb_plugin->plg_mr_names = (char **)value; ++ PR_ASSERT(pblock->pb_plugin->plg_mr_names == NULL); ++ pblock->pb_plugin->plg_mr_names = slapi_ch_array_dup((char **)value); + break; + case SLAPI_PLUGIN_MR_COMPARE: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) { +diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c +index 282b98738..e6b48de60 100644 +--- a/ldap/servers/slapd/plugin.c ++++ b/ldap/servers/slapd/plugin.c +@@ -2694,6 +2694,13 @@ plugin_free(struct slapdplugin *plugin) + if (plugin->plg_type == SLAPI_PLUGIN_PWD_STORAGE_SCHEME || plugin->plg_type == SLAPI_PLUGIN_REVER_PWD_STORAGE_SCHEME) { + slapi_ch_free_string(&plugin->plg_pwdstorageschemename); + } ++ if (plugin->plg_type == SLAPI_PLUGIN_SYNTAX) { ++ slapi_ch_free_string(&plugin->plg_syntax_oid); ++ slapi_ch_array_free(plugin->plg_syntax_names); ++ } ++ if (plugin->plg_type == SLAPI_PLUGIN_MATCHINGRULE) { ++ slapi_ch_array_free(plugin->plg_mr_names); ++ } + release_componentid(plugin->plg_identity); + slapi_counter_destroy(&plugin->plg_op_counter); + if (!plugin->plg_group) { +diff --git a/ldap/servers/slapd/pw_verify.c b/ldap/servers/slapd/pw_verify.c +index 4f0944b73..4ff1fa2fd 100644 +--- a/ldap/servers/slapd/pw_verify.c ++++ b/ldap/servers/slapd/pw_verify.c +@@ -111,6 +111,7 @@ pw_verify_token_dn(Slapi_PBlock *pb) { + if (fernet_verify_token(dn, cred->bv_val, key, tok_ttl) != 0) { + rc = SLAPI_BIND_SUCCESS; + } ++ slapi_ch_free_string(&key); + #endif + return rc; + } +diff --git a/ldap/servers/slapd/tools/pwenc.c b/ldap/servers/slapd/tools/pwenc.c +index 1629c06cd..d89225e34 100644 +--- a/ldap/servers/slapd/tools/pwenc.c ++++ b/ldap/servers/slapd/tools/pwenc.c +@@ -34,7 +34,7 @@ + + int ldap_syslog; + int ldap_syslog_level; +-int slapd_ldap_debug = LDAP_DEBUG_ANY; ++/* int slapd_ldap_debug = LDAP_DEBUG_ANY; */ + int detached; + FILE *error_logfp; + FILE *access_logfp; +diff --git a/src/slapi_r_plugin/README.md b/src/slapi_r_plugin/README.md +index af9743ec9..1c9bcbf17 100644 +--- a/src/slapi_r_plugin/README.md ++++ b/src/slapi_r_plugin/README.md +@@ -15,7 +15,7 @@ the [Rust Nomicon](https://doc.rust-lang.org/nomicon/index.html) + > warning about danger. + + This document will not detail the specifics of unsafe or the invariants you must adhere to for rust +-to work with C. ++to work with C. Failure to uphold these invariants will lead to less than optimal consequences. + + If you still want to see more about the plugin bindings, go on ... + +@@ -135,7 +135,7 @@ associated functions. + Now, you may notice that not all members of the trait are implemented. This is due to a feature + of rust known as default trait impls. This allows the trait origin (src/plugin.rs) to provide + template versions of these functions. If you "overwrite" them, your implementation is used. Unlike +-OO, you may not inherit or call the default function. ++OO, you may not inherit or call the default function. + + If a default is not provided you *must* implement that function to be considered valid. Today (20200422) + this only applies to `start` and `close`. +@@ -183,7 +183,7 @@ It's important to understand how Rust manages memory both on the stack and the h + As a result, this means that we must express in code, assertions about the proper ownership of memory + and who is responsible for it (unlike C, where it can be hard to determine who or what is responsible + for freeing some value.) Failure to handle this correctly, can and will lead to crashes, leaks or +-*hand waving* magical failures that are eXtReMeLy FuN to debug. ++*hand waving* magical failures that are `eXtReMeLy FuN` to debug. + + ### Reference Types + +diff --git a/src/slapi_r_plugin/src/charray.rs b/src/slapi_r_plugin/src/charray.rs +new file mode 100644 +index 000000000..d2e44693c +--- /dev/null ++++ b/src/slapi_r_plugin/src/charray.rs +@@ -0,0 +1,32 @@ ++use std::ffi::CString; ++use std::iter::once; ++use std::os::raw::c_char; ++use std::ptr; ++ ++pub struct Charray { ++ pin: Vec, ++ charray: Vec<*const c_char>, ++} ++ ++impl Charray { ++ pub fn new(input: &[&str]) -> Result { ++ let pin: Result, ()> = input ++ .iter() ++ .map(|s| CString::new(*s).map_err(|_e| ())) ++ .collect(); ++ ++ let pin = pin?; ++ ++ let charray: Vec<_> = pin ++ .iter() ++ .map(|s| s.as_ptr()) ++ .chain(once(ptr::null())) ++ .collect(); ++ ++ Ok(Charray { pin, charray }) ++ } ++ ++ pub fn as_ptr(&self) -> *const *const c_char { ++ self.charray.as_ptr() ++ } ++} +diff --git a/src/slapi_r_plugin/src/lib.rs b/src/slapi_r_plugin/src/lib.rs +index 076907bae..be28cac95 100644 +--- a/src/slapi_r_plugin/src/lib.rs ++++ b/src/slapi_r_plugin/src/lib.rs +@@ -1,9 +1,11 @@ +-// extern crate lazy_static; ++#[macro_use] ++extern crate lazy_static; + + #[macro_use] + pub mod macros; + pub mod backend; + pub mod ber; ++pub mod charray; + mod constants; + pub mod dn; + pub mod entry; +@@ -20,6 +22,7 @@ pub mod value; + pub mod prelude { + pub use crate::backend::{BackendRef, BackendRefTxn}; + pub use crate::ber::BerValRef; ++ pub use crate::charray::Charray; + pub use crate::constants::{FilterType, PluginFnType, PluginType, PluginVersion, LDAP_SUCCESS}; + pub use crate::dn::{Sdn, SdnRef}; + pub use crate::entry::EntryRef; +@@ -30,8 +33,7 @@ pub mod prelude { + pub use crate::plugin::{register_plugin_ext, PluginIdRef, SlapiPlugin3}; + pub use crate::search::{Search, SearchScope}; + pub use crate::syntax_plugin::{ +- matchingrule_register, name_to_leaking_char, names_to_leaking_char_array, SlapiOrdMr, +- SlapiSubMr, SlapiSyntaxPlugin1, ++ matchingrule_register, SlapiOrdMr, SlapiSubMr, SlapiSyntaxPlugin1, + }; + pub use crate::task::{task_register_handler_fn, task_unregister_handler_fn, Task, TaskRef}; + pub use crate::value::{Value, ValueArray, ValueArrayRef, ValueRef}; +diff --git a/src/slapi_r_plugin/src/macros.rs b/src/slapi_r_plugin/src/macros.rs +index bc8dfa60f..97fc5d7ef 100644 +--- a/src/slapi_r_plugin/src/macros.rs ++++ b/src/slapi_r_plugin/src/macros.rs +@@ -249,6 +249,7 @@ macro_rules! slapi_r_syntax_plugin_hooks { + paste::item! { + use libc; + use std::convert::TryFrom; ++ use std::ffi::CString; + + #[no_mangle] + pub extern "C" fn [<$mod_ident _plugin_init>](raw_pb: *const libc::c_void) -> i32 { +@@ -261,15 +262,15 @@ macro_rules! slapi_r_syntax_plugin_hooks { + }; + + // Setup the names/oids that this plugin provides syntaxes for. +- +- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::attr_supported_names()) }; +- match pb.register_syntax_names(name_ptr) { ++ // DS will clone these, so they can be ephemeral to this function. ++ let name_vec = Charray::new($hooks_ident::attr_supported_names().as_slice()).expect("invalid supported names"); ++ match pb.register_syntax_names(name_vec.as_ptr()) { + 0 => {}, + e => return e, + }; + +- let name_ptr = unsafe { name_to_leaking_char($hooks_ident::attr_oid()) }; +- match pb.register_syntax_oid(name_ptr) { ++ let attr_oid = CString::new($hooks_ident::attr_oid()).expect("invalid attr oid"); ++ match pb.register_syntax_oid(attr_oid.as_ptr()) { + 0 => {}, + e => return e, + }; +@@ -430,7 +431,8 @@ macro_rules! slapi_r_syntax_plugin_hooks { + e => return e, + }; + +- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::eq_mr_supported_names()) }; ++ let name_vec = Charray::new($hooks_ident::eq_mr_supported_names().as_slice()).expect("invalid mr supported names"); ++ let name_ptr = name_vec.as_ptr(); + // SLAPI_PLUGIN_MR_NAMES + match pb.register_mr_names(name_ptr) { + 0 => {}, +@@ -672,7 +674,8 @@ macro_rules! slapi_r_syntax_plugin_hooks { + e => return e, + }; + +- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::ord_mr_supported_names()) }; ++ let name_vec = Charray::new($hooks_ident::ord_mr_supported_names().as_slice()).expect("invalid ord supported names"); ++ let name_ptr = name_vec.as_ptr(); + // SLAPI_PLUGIN_MR_NAMES + match pb.register_mr_names(name_ptr) { + 0 => {}, +diff --git a/src/slapi_r_plugin/src/syntax_plugin.rs b/src/slapi_r_plugin/src/syntax_plugin.rs +index e7d5c01bd..86f84bdd8 100644 +--- a/src/slapi_r_plugin/src/syntax_plugin.rs ++++ b/src/slapi_r_plugin/src/syntax_plugin.rs +@@ -1,11 +1,11 @@ + use crate::ber::BerValRef; + // use crate::constants::FilterType; ++use crate::charray::Charray; + use crate::error::PluginError; + use crate::pblock::PblockRef; + use crate::value::{ValueArray, ValueArrayRef}; + use std::cmp::Ordering; + use std::ffi::CString; +-use std::iter::once; + use std::os::raw::c_char; + use std::ptr; + +@@ -26,37 +26,6 @@ struct slapi_matchingRuleEntry { + mr_compat_syntax: *const *const c_char, + } + +-pub unsafe fn name_to_leaking_char(name: &str) -> *const c_char { +- let n = CString::new(name) +- .expect("An invalid string has been hardcoded!") +- .into_boxed_c_str(); +- let n_ptr = n.as_ptr(); +- // Now we intentionally leak the name here, and the pointer will remain valid. +- Box::leak(n); +- n_ptr +-} +- +-pub unsafe fn names_to_leaking_char_array(names: &[&str]) -> *const *const c_char { +- let n_arr: Vec = names +- .iter() +- .map(|s| CString::new(*s).expect("An invalid string has been hardcoded!")) +- .collect(); +- let n_arr = n_arr.into_boxed_slice(); +- let n_ptr_arr: Vec<*const c_char> = n_arr +- .iter() +- .map(|v| v.as_ptr()) +- .chain(once(ptr::null())) +- .collect(); +- let n_ptr_arr = n_ptr_arr.into_boxed_slice(); +- +- // Now we intentionally leak these names here, +- let _r_n_arr = Box::leak(n_arr); +- let r_n_ptr_arr = Box::leak(n_ptr_arr); +- +- let name_ptr = r_n_ptr_arr as *const _ as *const *const c_char; +- name_ptr +-} +- + // oid - the oid of the matching rule + // name - the name of the mr + // desc - description +@@ -69,20 +38,24 @@ pub unsafe fn matchingrule_register( + syntax: &str, + compat_syntax: &[&str], + ) -> i32 { +- let oid_ptr = name_to_leaking_char(oid); +- let name_ptr = name_to_leaking_char(name); +- let desc_ptr = name_to_leaking_char(desc); +- let syntax_ptr = name_to_leaking_char(syntax); +- let compat_syntax_ptr = names_to_leaking_char_array(compat_syntax); ++ // Make everything CStrings that live long enough. ++ ++ let oid_cs = CString::new(oid).expect("invalid oid"); ++ let name_cs = CString::new(name).expect("invalid name"); ++ let desc_cs = CString::new(desc).expect("invalid desc"); ++ let syntax_cs = CString::new(syntax).expect("invalid syntax"); ++ ++ // We have to do this so the cstrings live long enough. ++ let compat_syntax_ca = Charray::new(compat_syntax).expect("invalid compat_syntax"); + + let new_mr = slapi_matchingRuleEntry { +- mr_oid: oid_ptr, ++ mr_oid: oid_cs.as_ptr(), + _mr_oidalias: ptr::null(), +- mr_name: name_ptr, +- mr_desc: desc_ptr, +- mr_syntax: syntax_ptr, ++ mr_name: name_cs.as_ptr(), ++ mr_desc: desc_cs.as_ptr(), ++ mr_syntax: syntax_cs.as_ptr(), + _mr_obsolete: 0, +- mr_compat_syntax: compat_syntax_ptr, ++ mr_compat_syntax: compat_syntax_ca.as_ptr(), + }; + + let new_mr_ptr = &new_mr as *const _; +-- +2.26.3 + diff --git a/SOURCES/0008-Issue-3657-Add-options-to-dsctl-for-dsrc-file.patch b/SOURCES/0008-Issue-3657-Add-options-to-dsctl-for-dsrc-file.patch deleted file mode 100644 index 9bca531..0000000 --- a/SOURCES/0008-Issue-3657-Add-options-to-dsctl-for-dsrc-file.patch +++ /dev/null @@ -1,502 +0,0 @@ -From 4faec52810e12070ef72da347bb590c57d8761e4 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Fri, 20 Nov 2020 17:47:18 -0500 -Subject: [PATCH 1/2] Issue 3657 - Add options to dsctl for dsrc file - -Description: Add options to create, modify, delete, and display - the .dsrc CLI tool shortcut file. - -Relates: https://github.com/389ds/389-ds-base/issues/3657 - -Reviewed by: firstyear(Thanks!) ---- - dirsrvtests/tests/suites/clu/dsrc_test.py | 136 ++++++++++ - src/lib389/cli/dsctl | 2 + - src/lib389/lib389/cli_ctl/dsrc.py | 312 ++++++++++++++++++++++ - 3 files changed, 450 insertions(+) - create mode 100644 dirsrvtests/tests/suites/clu/dsrc_test.py - create mode 100644 src/lib389/lib389/cli_ctl/dsrc.py - -diff --git a/dirsrvtests/tests/suites/clu/dsrc_test.py b/dirsrvtests/tests/suites/clu/dsrc_test.py -new file mode 100644 -index 000000000..1b27700ec ---- /dev/null -+++ b/dirsrvtests/tests/suites/clu/dsrc_test.py -@@ -0,0 +1,136 @@ -+import logging -+import pytest -+import os -+from os.path import expanduser -+from lib389.cli_base import FakeArgs -+from lib389.cli_ctl.dsrc import create_dsrc, modify_dsrc, delete_dsrc, display_dsrc -+from lib389._constants import DEFAULT_SUFFIX, DN_DM -+from lib389.topologies import topology_st as topo -+ -+log = logging.getLogger(__name__) -+ -+ -+@pytest.fixture(scope="function") -+def setup(topo, request): -+ """Preserve any existing .dsrc file""" -+ -+ dsrc_file = f'{expanduser("~")}/.dsrc' -+ backup_file = dsrc_file + ".original" -+ if os.path.exists(dsrc_file): -+ os.rename(dsrc_file, backup_file) -+ -+ def fin(): -+ if os.path.exists(backup_file): -+ os.rename(backup_file, dsrc_file) -+ -+ request.addfinalizer(fin) -+ -+ -+def test_dsrc(topo, setup): -+ """Test "dsctl dsrc" command -+ -+ :id: 0610de6c-e167-4761-bdab-3e677b2d44bb -+ :setup: Standalone Instance -+ :steps: -+ 1. Test creation works -+ 2. Test creating duplicate section -+ 3. Test adding an additional inst config works -+ 4. Test removing an instance works -+ 5. Test modify works -+ 6. Test delete works -+ 7. Test display fails when no file is present -+ -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ 5. Success -+ 6. Success -+ 7. Success -+ """ -+ -+ inst = topo.standalone -+ serverid = inst.serverid -+ second_inst_name = "Second" -+ second_inst_basedn = "o=second" -+ different_suffix = "o=different" -+ -+ # Setup our args -+ args = FakeArgs() -+ args.basedn = DEFAULT_SUFFIX -+ args.binddn = DN_DM -+ args.json = None -+ args.uri = None -+ args.saslmech = None -+ args.tls_cacertdir = None -+ args.tls_cert = None -+ args.tls_key = None -+ args.tls_reqcert = None -+ args.starttls = None -+ args.cancel_starttls = None -+ args.pwdfile = None -+ args.do_it = True -+ -+ # Create a dsrc configuration entry -+ create_dsrc(inst, log, args) -+ display_dsrc(inst, topo.logcap.log, args) -+ assert topo.logcap.contains("basedn = " + args.basedn) -+ assert topo.logcap.contains("binddn = " + args.binddn) -+ assert topo.logcap.contains("[" + serverid + "]") -+ topo.logcap.flush() -+ -+ # Attempt to add duplicate instance section -+ with pytest.raises(ValueError): -+ create_dsrc(inst, log, args) -+ -+ # Test adding a second instance works correctly -+ inst.serverid = second_inst_name -+ args.basedn = second_inst_basedn -+ create_dsrc(inst, log, args) -+ display_dsrc(inst, topo.logcap.log, args) -+ assert topo.logcap.contains("basedn = " + args.basedn) -+ assert topo.logcap.contains("[" + second_inst_name + "]") -+ topo.logcap.flush() -+ -+ # Delete second instance -+ delete_dsrc(inst, log, args) -+ inst.serverid = serverid # Restore original instance name -+ display_dsrc(inst, topo.logcap.log, args) -+ assert not topo.logcap.contains("[" + second_inst_name + "]") -+ assert not topo.logcap.contains("basedn = " + args.basedn) -+ # Make sure first instance config is still present -+ assert topo.logcap.contains("[" + serverid + "]") -+ assert topo.logcap.contains("binddn = " + args.binddn) -+ topo.logcap.flush() -+ -+ # Modify the config -+ args.basedn = different_suffix -+ modify_dsrc(inst, log, args) -+ display_dsrc(inst, topo.logcap.log, args) -+ assert topo.logcap.contains(different_suffix) -+ topo.logcap.flush() -+ -+ # Remove an arg from the config -+ args.basedn = "" -+ modify_dsrc(inst, log, args) -+ display_dsrc(inst, topo.logcap.log, args) -+ assert not topo.logcap.contains(different_suffix) -+ topo.logcap.flush() -+ -+ # Remove the last entry, which should delete the file -+ delete_dsrc(inst, log, args) -+ dsrc_file = f'{expanduser("~")}/.dsrc' -+ assert not os.path.exists(dsrc_file) -+ -+ # Make sure display fails -+ with pytest.raises(ValueError): -+ display_dsrc(inst, log, args) -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main(["-s", CURRENT_FILE]) -+ -diff --git a/src/lib389/cli/dsctl b/src/lib389/cli/dsctl -index fe9bc10e9..69f069297 100755 ---- a/src/lib389/cli/dsctl -+++ b/src/lib389/cli/dsctl -@@ -23,6 +23,7 @@ from lib389.cli_ctl import tls as cli_tls - from lib389.cli_ctl import health as cli_health - from lib389.cli_ctl import nsstate as cli_nsstate - from lib389.cli_ctl import dbgen as cli_dbgen -+from lib389.cli_ctl import dsrc as cli_dsrc - from lib389.cli_ctl.instance import instance_remove_all - from lib389.cli_base import ( - disconnect_instance, -@@ -61,6 +62,7 @@ cli_tls.create_parser(subparsers) - cli_health.create_parser(subparsers) - cli_nsstate.create_parser(subparsers) - cli_dbgen.create_parser(subparsers) -+cli_dsrc.create_parser(subparsers) - - argcomplete.autocomplete(parser) - -diff --git a/src/lib389/lib389/cli_ctl/dsrc.py b/src/lib389/lib389/cli_ctl/dsrc.py -new file mode 100644 -index 000000000..e49c7f819 ---- /dev/null -+++ b/src/lib389/lib389/cli_ctl/dsrc.py -@@ -0,0 +1,312 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2020 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+ -+import json -+from os.path import expanduser -+from os import path, remove -+from ldapurl import isLDAPUrl -+from ldap.dn import is_dn -+import configparser -+ -+ -+def create_dsrc(inst, log, args): -+ """Create the .dsrc file -+ -+ [instance] -+ uri = ldaps://hostname:port -+ basedn = dc=example,dc=com -+ binddn = uid=user,.... -+ saslmech = [EXTERNAL|PLAIN] -+ tls_cacertdir = /path/to/cacertdir -+ tls_cert = /path/to/user.crt -+ tls_key = /path/to/user.key -+ tls_reqcert = [never, hard, allow] -+ starttls = [true, false] -+ pwdfile = /path/to/file -+ """ -+ -+ dsrc_file = f'{expanduser("~")}/.dsrc' -+ config = configparser.ConfigParser() -+ config.read(dsrc_file) -+ -+ # Verify this section does not already exist -+ instances = config.sections() -+ if inst.serverid in instances: -+ raise ValueError("There is already a configuration section for this instance!") -+ -+ # Process and validate the args -+ config[inst.serverid] = {} -+ -+ if args.uri is not None: -+ if not isLDAPUrl(args.uri): -+ raise ValueError("The uri is not a valid LDAP URL!") -+ if args.uri.startswith("ldapi"): -+ # We must use EXTERNAL saslmech for LDAPI -+ args.saslmech = "EXTERNAL" -+ config[inst.serverid]['uri'] = args.uri -+ if args.basedn is not None: -+ if not is_dn(args.basedn): -+ raise ValueError("The basedn is not a valid DN!") -+ config[inst.serverid]['basedn'] = args.basedn -+ if args.binddn is not None: -+ if not is_dn(args.binddn): -+ raise ValueError("The binddn is not a valid DN!") -+ config[inst.serverid]['binddn'] = args.binddn -+ if args.saslmech is not None: -+ if args.saslmech not in ['EXTERNAL', 'PLAIN']: -+ raise ValueError("The saslmech must be EXTERNAL or PLAIN!") -+ config[inst.serverid]['saslmech'] = args.saslmech -+ if args.tls_cacertdir is not None: -+ if not path.exists(args.tls_cacertdir): -+ raise ValueError('--tls-cacertdir directory does not exist!') -+ config[inst.serverid]['tls_cacertdir'] = args.tls_cacertdir -+ if args.tls_cert is not None: -+ if not path.exists(args.tls_cert): -+ raise ValueError('--tls-cert does not point to an existing file!') -+ config[inst.serverid]['tls_cert'] = args.tls_cert -+ if args.tls_key is not None: -+ if not path.exists(args.tls_key): -+ raise ValueError('--tls-key does not point to an existing file!') -+ config[inst.serverid]['tls_key'] = args.tls_key -+ if args.tls_reqcert is not None: -+ if args.tls_reqcert not in ['never', 'hard', 'allow']: -+ raise ValueError('--tls-reqcert value is invalid (must be either "never", "allow", or "hard")!') -+ config[inst.serverid]['tls_reqcert'] = args.tls_reqcert -+ if args.starttls: -+ config[inst.serverid]['starttls'] = 'true' -+ if args.pwdfile is not None: -+ if not path.exists(args.pwdfile): -+ raise ValueError('--pwdfile does not exist!') -+ config[inst.serverid]['pwdfile'] = args.pwdfile -+ -+ if len(config[inst.serverid]) == 0: -+ # No args set -+ raise ValueError("You must set at least one argument for the new dsrc file!") -+ -+ # Print a preview of the config -+ log.info(f'Updating "{dsrc_file}" with:\n') -+ log.info(f' [{inst.serverid}]') -+ for k, v in config[inst.serverid].items(): -+ log.info(f' {k} = {v}') -+ -+ # Perform confirmation? -+ if not args.do_it: -+ while 1: -+ val = input(f'\nUpdate "{dsrc_file}" ? [yes]: ').rstrip().lower() -+ if val == '' or val == 'y' or val == 'yes': -+ break -+ if val == 'n' or val == 'no': -+ return -+ -+ # Now write the file -+ with open(dsrc_file, 'w') as configfile: -+ config.write(configfile) -+ -+ log.info(f'Successfully updated: {dsrc_file}') -+ -+ -+def modify_dsrc(inst, log, args): -+ """Modify the instance config -+ """ -+ dsrc_file = f'{expanduser("~")}/.dsrc' -+ -+ if path.exists(dsrc_file): -+ config = configparser.ConfigParser() -+ config.read(dsrc_file) -+ -+ # Verify we have a section to modify -+ instances = config.sections() -+ if inst.serverid not in instances: -+ raise ValueError("There is no configuration section for this instance to modify!") -+ -+ # Process and validate the args -+ if args.uri is not None: -+ if not isLDAPUrl(args.uri): -+ raise ValueError("The uri is not a valid LDAP URL!") -+ if args.uri.startswith("ldapi"): -+ # We must use EXTERNAL saslmech for LDAPI -+ args.saslmech = "EXTERNAL" -+ if args.uri == '': -+ del config[inst.serverid]['uri'] -+ else: -+ config[inst.serverid]['uri'] = args.uri -+ if args.basedn is not None: -+ if not is_dn(args.basedn): -+ raise ValueError("The basedn is not a valid DN!") -+ if args.basedn == '': -+ del config[inst.serverid]['basedn'] -+ else: -+ config[inst.serverid]['basedn'] = args.basedn -+ if args.binddn is not None: -+ if not is_dn(args.binddn): -+ raise ValueError("The binddn is not a valid DN!") -+ if args.binddn == '': -+ del config[inst.serverid]['binddn'] -+ else: -+ config[inst.serverid]['binddn'] = args.binddn -+ if args.saslmech is not None: -+ if args.saslmech not in ['EXTERNAL', 'PLAIN']: -+ raise ValueError("The saslmech must be EXTERNAL or PLAIN!") -+ if args.saslmech == '': -+ del config[inst.serverid]['saslmech'] -+ else: -+ config[inst.serverid]['saslmech'] = args.saslmech -+ if args.tls_cacertdir is not None: -+ if not path.exists(args.tls_cacertdir): -+ raise ValueError('--tls-cacertdir directory does not exist!') -+ if args.tls_cacertdir == '': -+ del config[inst.serverid]['tls_cacertdir'] -+ else: -+ config[inst.serverid]['tls_cacertdir'] = args.tls_cacertdir -+ if args.tls_cert is not None: -+ if not path.exists(args.tls_cert): -+ raise ValueError('--tls-cert does not point to an existing file!') -+ if args.tls_cert == '': -+ del config[inst.serverid]['tls_cert'] -+ else: -+ config[inst.serverid]['tls_cert'] = args.tls_cert -+ if args.tls_key is not None: -+ if not path.exists(args.tls_key): -+ raise ValueError('--tls-key does not point to an existing file!') -+ if args.tls_key == '': -+ del config[inst.serverid]['tls_key'] -+ else: -+ config[inst.serverid]['tls_key'] = args.tls_key -+ if args.tls_reqcert is not None: -+ if args.tls_reqcert not in ['never', 'hard', 'allow']: -+ raise ValueError('--tls-reqcert value is invalid (must be either "never", "allow", or "hard")!') -+ if args.tls_reqcert == '': -+ del config[inst.serverid]['tls_reqcert'] -+ else: -+ config[inst.serverid]['tls_reqcert'] = args.tls_reqcert -+ if args.starttls: -+ config[inst.serverid]['starttls'] = 'true' -+ if args.cancel_starttls: -+ config[inst.serverid]['starttls'] = 'false' -+ if args.pwdfile is not None: -+ if not path.exists(args.pwdfile): -+ raise ValueError('--pwdfile does not exist!') -+ if args.pwdfile == '': -+ del config[inst.serverid]['pwdfile'] -+ else: -+ config[inst.serverid]['pwdfile'] = args.pwdfile -+ -+ # Okay now rewrite the file -+ with open(dsrc_file, 'w') as configfile: -+ config.write(configfile) -+ -+ log.info(f'Successfully updated: {dsrc_file}') -+ else: -+ raise ValueError(f'There is no .dsrc file "{dsrc_file}" to update!') -+ -+ -+def delete_dsrc(inst, log, args): -+ """Delete the .dsrc file -+ """ -+ dsrc_file = f'{expanduser("~")}/.dsrc' -+ if path.exists(dsrc_file): -+ if not args.do_it: -+ # Get confirmation -+ while 1: -+ val = input(f'\nAre you sure you want to remove this instances configuration ? [no]: ').rstrip().lower() -+ if val == 'y' or val == 'yes': -+ break -+ if val == '' or val == 'n' or val == 'no': -+ return -+ -+ config = configparser.ConfigParser() -+ config.read(dsrc_file) -+ instances = config.sections() -+ if inst.serverid not in instances: -+ raise ValueError("The is no configuration for this instance") -+ -+ # Update the config object -+ del config[inst.serverid] -+ -+ if len(config.sections()) == 0: -+ # The file would be empty so just delete it -+ try: -+ remove(dsrc_file) -+ log.info(f'Successfully removed: {dsrc_file}') -+ return -+ except OSError as e: -+ raise ValueError(f'Failed to delete "{dsrc_file}", error: {str(e)}') -+ else: -+ # write the updated config -+ with open(dsrc_file, 'w') as configfile: -+ config.write(configfile) -+ else: -+ raise ValueError(f'There is no .dsrc file "{dsrc_file}" to update!') -+ -+ log.info(f'Successfully updated: {dsrc_file}') -+ -+def display_dsrc(inst, log, args): -+ """Display the contents of the ~/.dsrc file -+ """ -+ dsrc_file = f'{expanduser("~")}/.dsrc' -+ -+ if not path.exists(dsrc_file): -+ raise ValueError(f'There is no dsrc file "{dsrc_file}" to display!') -+ -+ config = configparser.ConfigParser() -+ config.read(dsrc_file) -+ instances = config.sections() -+ -+ for inst_section in instances: -+ if args.json: -+ log.info(json.dumps({inst_section: dict(config[inst_section])}, indent=4)) -+ else: -+ log.info(f'[{inst_section}]') -+ for k, v in config[inst_section].items(): -+ log.info(f'{k} = {v}') -+ log.info("") -+ -+ -+def create_parser(subparsers): -+ dsrc_parser = subparsers.add_parser('dsrc', help="Manage the .dsrc file") -+ subcommands = dsrc_parser.add_subparsers(help="action") -+ -+ # Create .dsrc file -+ dsrc_create_parser = subcommands.add_parser('create', help='Generate the .dsrc file') -+ dsrc_create_parser.set_defaults(func=create_dsrc) -+ dsrc_create_parser.add_argument('--uri', help="The URI (LDAP URL) for the Directory Server instance.") -+ dsrc_create_parser.add_argument('--basedn', help="The default database suffix.") -+ dsrc_create_parser.add_argument('--binddn', help="The default Bind DN used or authentication.") -+ dsrc_create_parser.add_argument('--saslmech', help="The SASL mechanism to use: PLAIN or EXTERNAL.") -+ dsrc_create_parser.add_argument('--tls-cacertdir', help="The directory containing the Trusted Certificate Authority certificate.") -+ dsrc_create_parser.add_argument('--tls-cert', help="The absolute file name to the server certificate.") -+ dsrc_create_parser.add_argument('--tls-key', help="The absolute file name to the server certificate key.") -+ dsrc_create_parser.add_argument('--tls-reqcert', help="Request certificate strength: 'never', 'allow', 'hard'") -+ dsrc_create_parser.add_argument('--starttls', action='store_true', help="Use startTLS for connection to the server.") -+ dsrc_create_parser.add_argument('--pwdfile', help="The absolute path to a file containing the Bind DN's password.") -+ dsrc_create_parser.add_argument('--do-it', action='store_true', help="Create the file without any confirmation.") -+ -+ dsrc_modify_parser = subcommands.add_parser('modify', help='Modify the .dsrc file') -+ dsrc_modify_parser.set_defaults(func=modify_dsrc) -+ dsrc_modify_parser.add_argument('--uri', nargs='?', const='', help="The URI (LDAP URL) for the Directory Server instance.") -+ dsrc_modify_parser.add_argument('--basedn', nargs='?', const='', help="The default database suffix.") -+ dsrc_modify_parser.add_argument('--binddn', nargs='?', const='', help="The default Bind DN used or authentication.") -+ dsrc_modify_parser.add_argument('--saslmech', nargs='?', const='', help="The SASL mechanism to use: PLAIN or EXTERNAL.") -+ dsrc_modify_parser.add_argument('--tls-cacertdir', nargs='?', const='', help="The directory containing the Trusted Certificate Authority certificate.") -+ dsrc_modify_parser.add_argument('--tls-cert', nargs='?', const='', help="The absolute file name to the server certificate.") -+ dsrc_modify_parser.add_argument('--tls-key', nargs='?', const='', help="The absolute file name to the server certificate key.") -+ dsrc_modify_parser.add_argument('--tls-reqcert', nargs='?', const='', help="Request certificate strength: 'never', 'allow', 'hard'") -+ dsrc_modify_parser.add_argument('--starttls', action='store_true', help="Use startTLS for connection to the server.") -+ dsrc_modify_parser.add_argument('--cancel-starttls', action='store_true', help="Do not use startTLS for connection to the server.") -+ dsrc_modify_parser.add_argument('--pwdfile', nargs='?', const='', help="The absolute path to a file containing the Bind DN's password.") -+ dsrc_modify_parser.add_argument('--do-it', action='store_true', help="Update the file without any confirmation.") -+ -+ # Delete the instance from the .dsrc file -+ dsrc_delete_parser = subcommands.add_parser('delete', help='Delete instance configuration from the .dsrc file.') -+ dsrc_delete_parser.set_defaults(func=delete_dsrc) -+ dsrc_delete_parser.add_argument('--do-it', action='store_true', -+ help="Delete this instance's configuration from the .dsrc file.") -+ -+ # Display .dsrc file -+ dsrc_display_parser = subcommands.add_parser('display', help='Display the contents of the .dsrc file.') -+ dsrc_display_parser.set_defaults(func=display_dsrc) --- -2.26.2 - diff --git a/SOURCES/0008-Issue-4773-Enable-interval-feature-of-DNA-plugin.patch b/SOURCES/0008-Issue-4773-Enable-interval-feature-of-DNA-plugin.patch new file mode 100644 index 0000000..ce8b124 --- /dev/null +++ b/SOURCES/0008-Issue-4773-Enable-interval-feature-of-DNA-plugin.patch @@ -0,0 +1,37 @@ +From 40e9a4835a6e95f021a711a7c42ce0c1bddc5ba4 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Fri, 21 May 2021 13:09:12 -0400 +Subject: [PATCH 08/12] Issue 4773 - Enable interval feature of DNA plugin + +Description: Enable the dormant interval feature in DNA plugin + +relates: https://github.com/389ds/389-ds-base/issues/4773 + +Review by: mreynolds (one line commit rule) +--- + ldap/servers/plugins/dna/dna.c | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c +index bf6b74a99..928a3f54a 100644 +--- a/ldap/servers/plugins/dna/dna.c ++++ b/ldap/servers/plugins/dna/dna.c +@@ -1023,7 +1023,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply) + /* Set the default interval to 1 */ + entry->interval = 1; + +-#ifdef DNA_ENABLE_INTERVAL + value = slapi_entry_attr_get_charptr(e, DNA_INTERVAL); + if (value) { + entry->interval = strtoull(value, 0, 0); +@@ -1032,7 +1031,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply) + + slapi_log_err(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM, + "dna_parse_config_entry - %s [%" PRIu64 "]\n", DNA_INTERVAL, entry->interval); +-#endif + + value = slapi_entry_attr_get_charptr(e, DNA_GENERATE); + if (value) { +-- +2.26.3 + diff --git a/SOURCES/0009-Issue-4440-BUG-ldifgen-with-start-idx-option-fails-w.patch b/SOURCES/0009-Issue-4440-BUG-ldifgen-with-start-idx-option-fails-w.patch deleted file mode 100644 index 1a0df22..0000000 --- a/SOURCES/0009-Issue-4440-BUG-ldifgen-with-start-idx-option-fails-w.patch +++ /dev/null @@ -1,902 +0,0 @@ -From 201cb1147c0a34bddbd3e5c03aecd804c47a9905 Mon Sep 17 00:00:00 2001 -From: progier389 <72748589+progier389@users.noreply.github.com> -Date: Thu, 19 Nov 2020 10:21:10 +0100 -Subject: [PATCH 2/2] Issue 4440 - BUG - ldifgen with --start-idx option fails - with unsupported operand (#4444) - -Bug description: -Got TypeError exception when usign: - dsctl -v slapd-localhost ldifgen users --suffix - dc=example,dc=com --parent ou=people,dc=example,dc=com - --number 100000 --generic --start-idx=50 -The reason is that by default python parser provides - value for numeric options: - as an integer if specified by "--option value" or - as a string if specified by "--option=value" - -Fix description: -convert the numeric parameters to integer when using it. - options impacted are: - - in users subcommand: --number , --start-idx - - in mod-load subcommand: --num-users, --add-users, - --del-users, --modrdn-users, --mod-users - -FYI: An alternative solution would have been to indicate the -parser that these values are an integer. But two reasons - leaded me to implement the first solution: - - first solution fix the problem for all users while the - second one fixes only dsctl command. - - first solution is easier to test: - I just added a new test file generated by a script - that duplicated existing ldifgen test, renamed the - test cases and replaced the numeric arguments by - strings. - Second solution would need to redesign the test framework - to be able to test the parser. - -relates: https://github.com/389ds/389-ds-base/issues/4440 - -Reviewed by: - -Platforms tested: F32 - -(cherry picked from commit 3c3e1f30cdb046a1aabb93aacebcf261a76a0892) ---- - .../tests/suites/clu/dbgen_test_usan.py | 806 ++++++++++++++++++ - src/lib389/lib389/cli_ctl/dbgen.py | 10 +- - src/lib389/lib389/dbgen.py | 3 + - 3 files changed, 814 insertions(+), 5 deletions(-) - create mode 100644 dirsrvtests/tests/suites/clu/dbgen_test_usan.py - -diff --git a/dirsrvtests/tests/suites/clu/dbgen_test_usan.py b/dirsrvtests/tests/suites/clu/dbgen_test_usan.py -new file mode 100644 -index 000000000..80ff63417 ---- /dev/null -+++ b/dirsrvtests/tests/suites/clu/dbgen_test_usan.py -@@ -0,0 +1,806 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2020 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+import time -+ -+""" -+ This file contains tests similar to dbgen_test.py -+ except that paramaters that are number are expressed as string -+ (to mimic the parameters parser default behavior which returns an -+ int when parsing "option value" and a string when parsing "option=value" -+ This file has been generated by usign: -+sed ' -+9r z1 -+s/ test_/ test_usan/ -+/args.*= [0-9]/s,[0-9]*$,"&", -+/:id:/s/.$/1/ -+' dbgen_test.py > dbgen_test_usan.py -+ ( with z1 file containing this comment ) -+""" -+ -+ -+ -+import subprocess -+import pytest -+ -+from lib389.cli_ctl.dbgen import * -+from lib389.cos import CosClassicDefinitions, CosPointerDefinitions, CosIndirectDefinitions, CosTemplates -+from lib389.idm.account import Accounts -+from lib389.idm.group import Groups -+from lib389.idm.role import ManagedRoles, FilteredRoles, NestedRoles -+from lib389.tasks import * -+from lib389.utils import * -+from lib389.topologies import topology_st -+from lib389.cli_base import FakeArgs -+ -+pytestmark = pytest.mark.tier0 -+ -+LOG_FILE = '/tmp/dbgen.log' -+logging.getLogger(__name__).setLevel(logging.DEBUG) -+log = logging.getLogger(__name__) -+ -+ -+@pytest.fixture(scope="function") -+def set_log_file_and_ldif(topology_st, request): -+ global ldif_file -+ ldif_file = get_ldif_dir(topology_st.standalone) + '/created.ldif' -+ -+ fh = logging.FileHandler(LOG_FILE) -+ fh.setLevel(logging.DEBUG) -+ log.addHandler(fh) -+ -+ def fin(): -+ log.info('Delete files') -+ os.remove(LOG_FILE) -+ os.remove(ldif_file) -+ -+ request.addfinalizer(fin) -+ -+ -+def run_offline_import(instance, ldif_file): -+ log.info('Stopping the server and running offline import...') -+ instance.stop() -+ assert instance.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None, -+ import_file=ldif_file) -+ instance.start() -+ -+ -+def run_ldapmodify_from_file(instance, ldif_file, output_to_check=None): -+ LDAP_MOD = '/usr/bin/ldapmodify' -+ log.info('Add entries from ldif file with ldapmodify') -+ result = subprocess.check_output([LDAP_MOD, '-cx', '-D', DN_DM, '-w', PASSWORD, -+ '-h', instance.host, '-p', str(instance.port), '-af', ldif_file]) -+ if output_to_check is not None: -+ assert output_to_check in ensure_str(result) -+ -+ -+def check_value_in_log_and_reset(content_list): -+ with open(LOG_FILE, 'r+') as f: -+ file_content = f.read() -+ log.info('Check if content is present in output') -+ for item in content_list: -+ assert item in file_content -+ -+ log.info('Reset log file for next test') -+ f.truncate(0) -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_users(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create ldif with users -+ -+ :id: 426b5b94-9923-454d-a736-7e71ca985e91 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate ldif with users -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ args.suffix = DEFAULT_SUFFIX -+ args.parent = 'ou=people,dc=example,dc=com' -+ args.number = "1000" -+ args.rdn_cn = False -+ args.generic = True -+ args.start_idx = "50" -+ args.localize = False -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'suffix={}'.format(args.suffix), -+ 'parent={}'.format(args.parent), -+ 'number={}'.format(args.number), -+ 'rdn-cn={}'.format(args.rdn_cn), -+ 'generic={}'.format(args.generic), -+ 'start-idx={}'.format(args.start_idx), -+ 'localize={}'.format(args.localize), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create users ldif') -+ dbgen_create_users(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ log.info('Get number of accounts before import') -+ accounts = Accounts(standalone, DEFAULT_SUFFIX) -+ count_account = len(accounts.filter('(uid=*)')) -+ -+ run_offline_import(standalone, ldif_file) -+ -+ log.info('Check that accounts are imported') -+ assert len(accounts.filter('(uid=*)')) > count_account -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_groups(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create ldif with group -+ -+ :id: 97207413-9a93-4065-a5ec-63aa93801a31 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate ldif with group -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ LDAP_RESULT = 'adding new entry "cn=myGroup-1,ou=groups,dc=example,dc=com"' -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ args.NAME = 'myGroup' -+ args.parent = 'ou=groups,dc=example,dc=com' -+ args.suffix = DEFAULT_SUFFIX -+ args.number = "1" -+ args.num_members = "1000" -+ args.create_members = True -+ args.member_attr = 'uniquemember' -+ args.member_parent = 'ou=people,dc=example,dc=com' -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'NAME={}'.format(args.NAME), -+ 'number={}'.format(args.number), -+ 'suffix={}'.format(args.suffix), -+ 'num-members={}'.format(args.num_members), -+ 'create-members={}'.format(args.create_members), -+ 'member-parent={}'.format(args.member_parent), -+ 'member-attr={}'.format(args.member_attr), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create group ldif') -+ dbgen_create_groups(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ log.info('Get number of accounts before import') -+ accounts = Accounts(standalone, DEFAULT_SUFFIX) -+ count_account = len(accounts.filter('(uid=*)')) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ # ldapmodify will complain about already existing parent which causes subprocess to return exit code != 0 -+ with pytest.raises(subprocess.CalledProcessError): -+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) -+ -+ log.info('Check that accounts are imported') -+ assert len(accounts.filter('(uid=*)')) > count_account -+ -+ log.info('Check that group is imported') -+ groups = Groups(standalone, DEFAULT_SUFFIX) -+ assert groups.exists(args.NAME + '-1') -+ new_group = groups.get(args.NAME + '-1') -+ new_group.present('uniquemember', 'uid=group_entry1-0152,ou=people,dc=example,dc=com') -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_cos_classic(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create a COS definition -+ -+ :id: 8557f994-8a91-4f8a-86f6-9cb826a0b8f1 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate ldif with classic COS definition -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def,ou=cos definitions,dc=example,dc=com"' -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ args.type = 'classic' -+ args.NAME = 'My_Postal_Def' -+ args.parent = 'ou=cos definitions,dc=example,dc=com' -+ args.create_parent = True -+ args.cos_specifier = 'businessCategory' -+ args.cos_attr = ['postalcode', 'telephonenumber'] -+ args.cos_template = 'cn=sales,cn=classicCoS,dc=example,dc=com' -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'NAME={}'.format(args.NAME), -+ 'type={}'.format(args.type), -+ 'parent={}'.format(args.parent), -+ 'create-parent={}'.format(args.create_parent), -+ 'cos-specifier={}'.format(args.cos_specifier), -+ 'cos-template={}'.format(args.cos_template), -+ 'cos-attr={}'.format(args.cos_attr), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create COS definition ldif') -+ dbgen_create_cos_def(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) -+ -+ log.info('Check that COS definition is imported') -+ cos_def = CosClassicDefinitions(standalone, args.parent) -+ assert cos_def.exists(args.NAME) -+ new_cos = cos_def.get(args.NAME) -+ assert new_cos.present('cosTemplateDN', args.cos_template) -+ assert new_cos.present('cosSpecifier', args.cos_specifier) -+ assert new_cos.present('cosAttribute', args.cos_attr[0]) -+ assert new_cos.present('cosAttribute', args.cos_attr[1]) -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_cos_pointer(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create a COS definition -+ -+ :id: 6b26ca6d-226a-4f93-925e-faf95cc20211 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate ldif with pointer COS definition -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_pointer,ou=cos pointer definitions,dc=example,dc=com"' -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ args.type = 'pointer' -+ args.NAME = 'My_Postal_Def_pointer' -+ args.parent = 'ou=cos pointer definitions,dc=example,dc=com' -+ args.create_parent = True -+ args.cos_specifier = None -+ args.cos_attr = ['postalcode', 'telephonenumber'] -+ args.cos_template = 'cn=sales,cn=pointerCoS,dc=example,dc=com' -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'NAME={}'.format(args.NAME), -+ 'type={}'.format(args.type), -+ 'parent={}'.format(args.parent), -+ 'create-parent={}'.format(args.create_parent), -+ 'cos-template={}'.format(args.cos_template), -+ 'cos-attr={}'.format(args.cos_attr), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create COS definition ldif') -+ dbgen_create_cos_def(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) -+ -+ log.info('Check that COS definition is imported') -+ cos_def = CosPointerDefinitions(standalone, args.parent) -+ assert cos_def.exists(args.NAME) -+ new_cos = cos_def.get(args.NAME) -+ assert new_cos.present('cosTemplateDN', args.cos_template) -+ assert new_cos.present('cosAttribute', args.cos_attr[0]) -+ assert new_cos.present('cosAttribute', args.cos_attr[1]) -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_cos_indirect(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create a COS definition -+ -+ :id: ab4b799e-e801-432a-a61d-badad2628201 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate ldif with indirect COS definition -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_indirect,ou=cos indirect definitions,dc=example,dc=com"' -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ args.type = 'indirect' -+ args.NAME = 'My_Postal_Def_indirect' -+ args.parent = 'ou=cos indirect definitions,dc=example,dc=com' -+ args.create_parent = True -+ args.cos_specifier = 'businessCategory' -+ args.cos_attr = ['postalcode', 'telephonenumber'] -+ args.cos_template = None -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'NAME={}'.format(args.NAME), -+ 'type={}'.format(args.type), -+ 'parent={}'.format(args.parent), -+ 'create-parent={}'.format(args.create_parent), -+ 'cos-specifier={}'.format(args.cos_specifier), -+ 'cos-attr={}'.format(args.cos_attr), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create COS definition ldif') -+ dbgen_create_cos_def(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) -+ -+ log.info('Check that COS definition is imported') -+ cos_def = CosIndirectDefinitions(standalone, args.parent) -+ assert cos_def.exists(args.NAME) -+ new_cos = cos_def.get(args.NAME) -+ assert new_cos.present('cosIndirectSpecifier', args.cos_specifier) -+ assert new_cos.present('cosAttribute', args.cos_attr[0]) -+ assert new_cos.present('cosAttribute', args.cos_attr[1]) -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_cos_template(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create a COS template -+ -+ :id: 544017c7-4a82-4e7d-a047-00b68a28e071 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate ldif with COS template -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ LDAP_RESULT = 'adding new entry "cn=My_Template,ou=cos templates,dc=example,dc=com"' -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ args.NAME = 'My_Template' -+ args.parent = 'ou=cos templates,dc=example,dc=com' -+ args.create_parent = True -+ args.cos_priority = "1" -+ args.cos_attr_val = 'postalcode:12345' -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'NAME={}'.format(args.NAME), -+ 'parent={}'.format(args.parent), -+ 'create-parent={}'.format(args.create_parent), -+ 'cos-priority={}'.format(args.cos_priority), -+ 'cos-attr-val={}'.format(args.cos_attr_val), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create COS template ldif') -+ dbgen_create_cos_tmp(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) -+ -+ log.info('Check that COS template is imported') -+ cos_temp = CosTemplates(standalone, args.parent) -+ assert cos_temp.exists(args.NAME) -+ new_cos = cos_temp.get(args.NAME) -+ assert new_cos.present('cosPriority', str(args.cos_priority)) -+ assert new_cos.present('postalcode', '12345') -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_managed_role(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create a managed role -+ -+ :id: 10e77b41-0bc1-4ad5-a144-2c5107455b91 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate ldif with managed role -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ LDAP_RESULT = 'adding new entry "cn=My_Managed_Role,ou=managed roles,dc=example,dc=com"' -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ -+ args.NAME = 'My_Managed_Role' -+ args.parent = 'ou=managed roles,dc=example,dc=com' -+ args.create_parent = True -+ args.type = 'managed' -+ args.filter = None -+ args.role_dn = None -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'NAME={}'.format(args.NAME), -+ 'parent={}'.format(args.parent), -+ 'create-parent={}'.format(args.create_parent), -+ 'type={}'.format(args.type), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create managed role ldif') -+ dbgen_create_role(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) -+ -+ log.info('Check that managed role is imported') -+ roles = ManagedRoles(standalone, DEFAULT_SUFFIX) -+ assert roles.exists(args.NAME) -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_filtered_role(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create a filtered role -+ -+ :id: cb3c8ea8-4234-40e2-8810-fb6a25973921 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate ldif with filtered role -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ LDAP_RESULT = 'adding new entry "cn=My_Filtered_Role,ou=filtered roles,dc=example,dc=com"' -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ -+ args.NAME = 'My_Filtered_Role' -+ args.parent = 'ou=filtered roles,dc=example,dc=com' -+ args.create_parent = True -+ args.type = 'filtered' -+ args.filter = '"objectclass=posixAccount"' -+ args.role_dn = None -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'NAME={}'.format(args.NAME), -+ 'parent={}'.format(args.parent), -+ 'create-parent={}'.format(args.create_parent), -+ 'type={}'.format(args.type), -+ 'filter={}'.format(args.filter), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create filtered role ldif') -+ dbgen_create_role(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) -+ -+ log.info('Check that filtered role is imported') -+ roles = FilteredRoles(standalone, DEFAULT_SUFFIX) -+ assert roles.exists(args.NAME) -+ new_role = roles.get(args.NAME) -+ assert new_role.present('nsRoleFilter', args.filter) -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_nested_role(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create a nested role -+ -+ :id: 97fff0a8-3103-4adb-be04-2799ff58d8f1 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate ldif with nested role -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ LDAP_RESULT = 'adding new entry "cn=My_Nested_Role,ou=nested roles,dc=example,dc=com"' -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ args.NAME = 'My_Nested_Role' -+ args.parent = 'ou=nested roles,dc=example,dc=com' -+ args.create_parent = True -+ args.type = 'nested' -+ args.filter = None -+ args.role_dn = ['cn=some_role,ou=roles,dc=example,dc=com'] -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'NAME={}'.format(args.NAME), -+ 'parent={}'.format(args.parent), -+ 'create-parent={}'.format(args.create_parent), -+ 'type={}'.format(args.type), -+ 'role-dn={}'.format(args.role_dn), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create nested role ldif') -+ dbgen_create_role(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) -+ -+ log.info('Check that nested role is imported') -+ roles = NestedRoles(standalone, DEFAULT_SUFFIX) -+ assert roles.exists(args.NAME) -+ new_role = roles.get(args.NAME) -+ assert new_role.present('nsRoleDN', args.role_dn[0]) -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_mod_ldif_mixed(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create mixed modification ldif -+ -+ :id: 4a2e0901-2b48-452e-a4a0-507735132c81 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate modification ldif -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ args.parent = DEFAULT_SUFFIX -+ args.create_users = True -+ args.delete_users = True -+ args.create_parent = False -+ args.num_users = "1000" -+ args.add_users = "100" -+ args.del_users = "999" -+ args.modrdn_users = "100" -+ args.mod_users = "10" -+ args.mod_attrs = ['cn', 'uid', 'sn'] -+ args.randomize = False -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'create-users={}'.format(args.create_users), -+ 'parent={}'.format(args.parent), -+ 'create-parent={}'.format(args.create_parent), -+ 'delete-users={}'.format(args.delete_users), -+ 'num-users={}'.format(args.num_users), -+ 'add-users={}'.format(args.add_users), -+ 'del-users={}'.format(args.del_users), -+ 'modrdn-users={}'.format(args.modrdn_users), -+ 'mod-users={}'.format(args.mod_users), -+ 'mod-attrs={}'.format(args.mod_attrs), -+ 'randomize={}'.format(args.randomize), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create modification ldif') -+ dbgen_create_mods(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ log.info('Get number of accounts before import') -+ accounts = Accounts(standalone, DEFAULT_SUFFIX) -+ count_account = len(accounts.filter('(uid=*)')) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ # ldapmodify will complain about a lot of changes done which causes subprocess to return exit code != 0 -+ with pytest.raises(subprocess.CalledProcessError): -+ run_ldapmodify_from_file(standalone, ldif_file) -+ -+ log.info('Check that some accounts are imported') -+ assert len(accounts.filter('(uid=*)')) > count_account -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_nested_ldif(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create nested ldif -+ -+ :id: 9c281c28-4169-45e0-8c07-c5502d9a7581 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate nested ldif -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ args.suffix = DEFAULT_SUFFIX -+ args.node_limit = "100" -+ args.num_users = "600" -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'suffix={}'.format(args.suffix), -+ 'node-limit={}'.format(args.node_limit), -+ 'num-users={}'.format(args.num_users), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created nested LDIF file ({}) containing 6 nodes/subtrees'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create nested ldif') -+ dbgen_create_nested(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ log.info('Get number of accounts before import') -+ accounts = Accounts(standalone, DEFAULT_SUFFIX) -+ count_account = len(accounts.filter('(uid=*)')) -+ count_ou = len(accounts.filter('(ou=*)')) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ # ldapmodify will complain about already existing suffix which causes subprocess to return exit code != 0 -+ with pytest.raises(subprocess.CalledProcessError): -+ run_ldapmodify_from_file(standalone, ldif_file) -+ -+ standalone.restart() -+ -+ log.info('Check that accounts are imported') -+ assert len(accounts.filter('(uid=*)')) > count_account -+ assert len(accounts.filter('(ou=*)')) > count_ou -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main("-s %s" % CURRENT_FILE) -diff --git a/src/lib389/lib389/cli_ctl/dbgen.py b/src/lib389/lib389/cli_ctl/dbgen.py -index 7bc3892ba..058342fb1 100644 ---- a/src/lib389/lib389/cli_ctl/dbgen.py -+++ b/src/lib389/lib389/cli_ctl/dbgen.py -@@ -451,13 +451,13 @@ def dbgen_create_mods(inst, log, args): - props = { - "createUsers": args.create_users, - "deleteUsers": args.delete_users, -- "numUsers": args.num_users, -+ "numUsers": int(args.num_users), - "parent": args.parent, - "createParent": args.create_parent, -- "addUsers": args.add_users, -- "delUsers": args.del_users, -- "modrdnUsers": args.modrdn_users, -- "modUsers": args.mod_users, -+ "addUsers": int(args.add_users), -+ "delUsers": int(args.del_users), -+ "modrdnUsers": int(args.modrdn_users), -+ "modUsers": int(args.mod_users), - "random": args.randomize, - "modAttrs": args.mod_attrs - } -diff --git a/src/lib389/lib389/dbgen.py b/src/lib389/lib389/dbgen.py -index 6273781a2..10fb200f7 100644 ---- a/src/lib389/lib389/dbgen.py -+++ b/src/lib389/lib389/dbgen.py -@@ -220,6 +220,9 @@ def dbgen_users(instance, number, ldif_file, suffix, generic=False, entry_name=" - """ - Generate an LDIF of randomly named entries - """ -+ # Lets insure that integer parameters are not string -+ number=int(number) -+ startIdx=int(startIdx) - familyname_file = os.path.join(instance.ds_paths.data_dir, 'dirsrv/data/dbgen-FamilyNames') - givename_file = os.path.join(instance.ds_paths.data_dir, 'dirsrv/data/dbgen-GivenNames') - familynames = [] --- -2.26.2 - diff --git a/SOURCES/0009-Issue-4623-RFE-Monitor-the-current-DB-locks-4762.patch b/SOURCES/0009-Issue-4623-RFE-Monitor-the-current-DB-locks-4762.patch new file mode 100644 index 0000000..b4d22df --- /dev/null +++ b/SOURCES/0009-Issue-4623-RFE-Monitor-the-current-DB-locks-4762.patch @@ -0,0 +1,926 @@ +From 8df95679519364d0993572ecbea72ab89e5250a5 Mon Sep 17 00:00:00 2001 +From: Simon Pichugin +Date: Thu, 20 May 2021 14:24:25 +0200 +Subject: [PATCH 09/12] Issue 4623 - RFE - Monitor the current DB locks (#4762) + +Description: DB lock gets exhausted because of unindexed internal searches +(under a transaction). Indexing those searches is the way to prevent exhaustion. +If db lock get exhausted during a txn, it leads to db panic and the later recovery +can possibly fail. That leads to a full reinit of the instance where the db locks +got exhausted. + +Add three attributes to global BDB config: "nsslapd-db-locks-monitoring-enabled", + "nsslapd-db-locks-monitoring-threshold" and "nsslapd-db-locks-monitoring-pause". +By default, nsslapd-db-locks-monitoring-enabled is turned on, nsslapd-db-locks-monitoring-threshold is set to 90% and nsslapd-db-locks-monitoring-threshold is 500ms. + +When current locks are close to the maximum locks value of 90% - returning +the next candidate will fail until the maximum of locks won't be +increased or current locks are released. +The monitoring thread runs with the configurable interval of 500ms. + +Add the setting to UI and CLI tools. + +Fixes: https://github.com/389ds/389-ds-base/issues/4623 + +Reviewed by: @Firstyear, @tbordaz, @jchapma, @mreynolds389 (Thank you!!) +--- + .../suites/monitor/db_locks_monitor_test.py | 251 ++++++++++++++++++ + ldap/servers/slapd/back-ldbm/back-ldbm.h | 13 +- + .../slapd/back-ldbm/db-bdb/bdb_config.c | 99 +++++++ + .../slapd/back-ldbm/db-bdb/bdb_layer.c | 85 ++++++ + ldap/servers/slapd/back-ldbm/init.c | 3 + + ldap/servers/slapd/back-ldbm/ldbm_config.c | 3 + + ldap/servers/slapd/back-ldbm/ldbm_config.h | 3 + + ldap/servers/slapd/back-ldbm/ldbm_search.c | 13 + + ldap/servers/slapd/libglobs.c | 4 +- + src/cockpit/389-console/src/css/ds.css | 4 + + src/cockpit/389-console/src/database.jsx | 7 + + src/cockpit/389-console/src/index.html | 2 +- + .../src/lib/database/databaseConfig.jsx | 88 +++++- + src/lib389/lib389/backend.py | 3 + + src/lib389/lib389/cli_conf/backend.py | 10 + + 15 files changed, 576 insertions(+), 12 deletions(-) + create mode 100644 dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py + +diff --git a/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py b/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py +new file mode 100644 +index 000000000..7f9938f30 +--- /dev/null ++++ b/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py +@@ -0,0 +1,251 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2021 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# ++import logging ++import pytest ++import datetime ++import subprocess ++from multiprocessing import Process, Queue ++from lib389 import pid_from_file ++from lib389.utils import ldap, os ++from lib389._constants import DEFAULT_SUFFIX, ReplicaRole ++from lib389.cli_base import LogCapture ++from lib389.idm.user import UserAccounts ++from lib389.idm.organizationalunit import OrganizationalUnits ++from lib389.tasks import AccessLog ++from lib389.backend import Backends ++from lib389.ldclt import Ldclt ++from lib389.dbgen import dbgen_users ++from lib389.tasks import ImportTask ++from lib389.index import Indexes ++from lib389.plugins import AttributeUniquenessPlugin ++from lib389.config import BDB_LDBMConfig ++from lib389.monitor import MonitorLDBM ++from lib389.topologies import create_topology, _remove_ssca_db ++ ++pytestmark = pytest.mark.tier2 ++db_locks_monitoring_ack = pytest.mark.skipif(not os.environ.get('DB_LOCKS_MONITORING_ACK', False), ++ reason="DB locks monitoring tests may take hours if the feature is not present or another failure exists. " ++ "Also, the feature requires a big amount of space as we set nsslapd-db-locks to 1300000.") ++ ++DEBUGGING = os.getenv('DEBUGGING', default=False) ++if DEBUGGING: ++ logging.getLogger(__name__).setLevel(logging.DEBUG) ++else: ++ logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) ++ ++ ++def _kill_ns_slapd(inst): ++ pid = str(pid_from_file(inst.ds_paths.pid_file)) ++ cmd = ['kill', '-9', pid] ++ subprocess.Popen(cmd, stdout=subprocess.PIPE) ++ ++ ++@pytest.fixture(scope="function") ++def topology_st_fn(request): ++ """Create DS standalone instance for each test case""" ++ ++ topology = create_topology({ReplicaRole.STANDALONE: 1}) ++ ++ def fin(): ++ # Kill the hanging process at the end of test to prevent failures in the following tests ++ if DEBUGGING: ++ [_kill_ns_slapd(inst) for inst in topology] ++ else: ++ [_kill_ns_slapd(inst) for inst in topology] ++ assert _remove_ssca_db(topology) ++ [inst.stop() for inst in topology if inst.exists()] ++ [inst.delete() for inst in topology if inst.exists()] ++ request.addfinalizer(fin) ++ ++ topology.logcap = LogCapture() ++ return topology ++ ++ ++@pytest.fixture(scope="function") ++def setup_attruniq_index_be_import(topology_st_fn): ++ """Enable Attribute Uniqueness, disable indexes and ++ import 120000 entries to the default backend ++ """ ++ inst = topology_st_fn.standalone ++ ++ inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access') ++ inst.config.set('nsslapd-plugin-logging', 'on') ++ inst.restart() ++ ++ attruniq = AttributeUniquenessPlugin(inst, dn="cn=attruniq,cn=plugins,cn=config") ++ attruniq.create(properties={'cn': 'attruniq'}) ++ for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']: ++ attruniq.add_unique_attribute(cn) ++ attruniq.add_unique_subtree(DEFAULT_SUFFIX) ++ attruniq.enable_all_subtrees() ++ attruniq.enable() ++ ++ indexes = Indexes(inst) ++ for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']: ++ indexes.ensure_state(properties={ ++ 'cn': cn, ++ 'nsSystemIndex': 'false', ++ 'nsIndexType': 'none'}) ++ ++ bdb_config = BDB_LDBMConfig(inst) ++ bdb_config.replace("nsslapd-db-locks", "130000") ++ inst.restart() ++ ++ ldif_dir = inst.get_ldif_dir() ++ import_ldif = ldif_dir + '/perf_import.ldif' ++ ++ # Valid online import ++ import_task = ImportTask(inst) ++ dbgen_users(inst, 120000, import_ldif, DEFAULT_SUFFIX, entry_name="userNew") ++ import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) ++ import_task.wait() ++ assert import_task.is_complete() ++ ++ ++def create_user_wrapper(q, users): ++ try: ++ users.create_test_user() ++ except Exception as ex: ++ q.put(ex) ++ ++ ++def spawn_worker_thread(function, users, log, timeout, info): ++ log.info(f"Starting the thread - {info}") ++ q = Queue() ++ p = Process(target=function, args=(q,users,)) ++ p.start() ++ ++ log.info(f"Waiting for {timeout} seconds for the thread to finish") ++ p.join(timeout) ++ ++ if p.is_alive(): ++ log.info("Killing the thread as it's still running") ++ p.terminate() ++ p.join() ++ raise RuntimeError(f"Function call was aborted: {info}") ++ result = q.get() ++ if isinstance(result, Exception): ++ raise result ++ else: ++ return result ++ ++ ++@db_locks_monitoring_ack ++@pytest.mark.parametrize("lock_threshold", [("70"), ("80"), ("95")]) ++def test_exhaust_db_locks_basic(topology_st_fn, setup_attruniq_index_be_import, lock_threshold): ++ """Test that when all of the locks are exhausted the instance still working ++ and database is not corrupted ++ ++ :id: 299108cc-04d8-4ddc-b58e-99157fccd643 ++ :setup: Standalone instance with Attr Uniq plugin and user indexes disabled ++ :steps: 1. Set nsslapd-db-locks to 11000 ++ 2. Check that we stop acquiring new locks when the threshold is reached ++ 3. Check that we can regulate a pause interval for DB locks monitoring thread ++ 4. Make sure the feature works for different backends on the same suffix ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ ++ inst = topology_st_fn.standalone ++ ADDITIONAL_SUFFIX = 'ou=newpeople,dc=example,dc=com' ++ ++ backends = Backends(inst) ++ backends.create(properties={'nsslapd-suffix': ADDITIONAL_SUFFIX, ++ 'name': ADDITIONAL_SUFFIX[-3:]}) ++ ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) ++ ous.create(properties={'ou': 'newpeople'}) ++ ++ bdb_config = BDB_LDBMConfig(inst) ++ bdb_config.replace("nsslapd-db-locks", "11000") ++ ++ # Restart server ++ inst.restart() ++ ++ for lock_enabled in ["on", "off"]: ++ for lock_pause in ["100", "500", "1000"]: ++ bdb_config.replace("nsslapd-db-locks-monitoring-enabled", lock_enabled) ++ bdb_config.replace("nsslapd-db-locks-monitoring-threshold", lock_threshold) ++ bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause) ++ inst.restart() ++ ++ if lock_enabled == "off": ++ raised_exception = (RuntimeError, ldap.SERVER_DOWN) ++ else: ++ raised_exception = ldap.OPERATIONS_ERROR ++ ++ users = UserAccounts(inst, DEFAULT_SUFFIX) ++ with pytest.raises(raised_exception): ++ spawn_worker_thread(create_user_wrapper, users, log, 30, ++ f"Adding user with monitoring enabled='{lock_enabled}'; " ++ f"threshold='{lock_threshold}'; pause='{lock_pause}'.") ++ # Restart because we already run out of locks and the next unindexed searches will fail eventually ++ if lock_enabled == "off": ++ _kill_ns_slapd(inst) ++ inst.restart() ++ ++ users = UserAccounts(inst, ADDITIONAL_SUFFIX, rdn=None) ++ with pytest.raises(raised_exception): ++ spawn_worker_thread(create_user_wrapper, users, log, 30, ++ f"Adding user with monitoring enabled='{lock_enabled}'; " ++ f"threshold='{lock_threshold}'; pause='{lock_pause}'.") ++ # In case feature is disabled - restart for the clean up ++ if lock_enabled == "off": ++ _kill_ns_slapd(inst) ++ inst.restart() ++ ++ ++@db_locks_monitoring_ack ++def test_exhaust_db_locks_big_pause(topology_st_fn, setup_attruniq_index_be_import): ++ """Test that DB lock pause setting increases the wait interval value for the monitoring thread ++ ++ :id: 7d5bf838-5d4e-4ad5-8c03-5716afb84ea6 ++ :setup: Standalone instance with Attr Uniq plugin and user indexes disabled ++ :steps: 1. Set nsslapd-db-locks to 20000 while using the default threshold value (95%) ++ 2. Set nsslapd-db-locks-monitoring-pause to 10000 (10 seconds) ++ 3. Make sure that the pause is successfully increased a few times in a row ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ """ ++ ++ inst = topology_st_fn.standalone ++ ++ bdb_config = BDB_LDBMConfig(inst) ++ bdb_config.replace("nsslapd-db-locks", "20000") ++ lock_pause = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-pause") ++ assert lock_pause == 500 ++ lock_pause = "10000" ++ bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause) ++ ++ # Restart server ++ inst.restart() ++ ++ lock_enabled = bdb_config.get_attr_val_utf8_l("nsslapd-db-locks-monitoring-enabled") ++ lock_threshold = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-threshold") ++ assert lock_enabled == "on" ++ assert lock_threshold == 90 ++ ++ users = UserAccounts(inst, DEFAULT_SUFFIX) ++ start = datetime.datetime.now() ++ with pytest.raises(ldap.OPERATIONS_ERROR): ++ spawn_worker_thread(create_user_wrapper, users, log, 30, ++ f"Adding user with monitoring enabled='{lock_enabled}'; " ++ f"threshold='{lock_threshold}'; pause='{lock_pause}'. Expect it to 'Work'") ++ end = datetime.datetime.now() ++ time_delta = end - start ++ if time_delta.seconds < 9: ++ raise RuntimeError("nsslapd-db-locks-monitoring-pause attribute doesn't function correctly. " ++ f"Finished the execution in {time_delta.seconds} seconds") ++ # In case something has failed - restart for the clean up ++ inst.restart() +diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h +index 571b0a58b..afb831c32 100644 +--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h ++++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h +@@ -155,6 +155,8 @@ typedef unsigned short u_int16_t; + #define DEFAULT_DNCACHE_MAXCOUNT -1 /* no limit */ + #define DEFAULT_DBCACHE_SIZE 33554432 + #define DEFAULT_DBCACHE_SIZE_STR "33554432" ++#define DEFAULT_DBLOCK_PAUSE 500 ++#define DEFAULT_DBLOCK_PAUSE_STR "500" + #define DEFAULT_MODE 0600 + #define DEFAULT_ALLIDSTHRESHOLD 4000 + #define DEFAULT_IDL_TUNE 1 +@@ -575,12 +577,21 @@ struct ldbminfo + char *li_backend_implement; /* low layer backend implementation */ + int li_noparentcheck; /* check if parent exists on add */ + +- /* the next 3 fields are for the params that don't get changed until ++ /* db lock monitoring */ ++ /* if we decide to move the values to bdb_config, we can use slapi_back_get_info function to retrieve the values */ ++ int32_t li_dblock_monitoring; /* enables db locks monitoring thread - requires restart */ ++ uint32_t li_dblock_monitoring_pause; /* an interval for db locks monitoring thread */ ++ uint32_t li_dblock_threshold; /* when the percentage is reached, abort the search in ldbm_back_next_search_entry - requires restart*/ ++ uint32_t li_dblock_threshold_reached; ++ ++ /* the next 4 fields are for the params that don't get changed until + * the server is restarted (used by the admin console) + */ + char *li_new_directory; + uint64_t li_new_dbcachesize; + int li_new_dblock; ++ int32_t li_new_dblock_monitoring; ++ uint64_t li_new_dblock_threshold; + + int li_new_dbncache; + +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c +index 738b841aa..167644943 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c +@@ -190,6 +190,102 @@ bdb_config_db_lock_set(void *arg, void *value, char *errorbuf, int phase, int ap + return retval; + } + ++static void * ++bdb_config_db_lock_monitoring_get(void *arg) ++{ ++ struct ldbminfo *li = (struct ldbminfo *)arg; ++ ++ return (void *)((intptr_t)(li->li_new_dblock_monitoring)); ++} ++ ++static int ++bdb_config_db_lock_monitoring_set(void *arg, void *value, char *errorbuf __attribute__((unused)), int phase __attribute__((unused)), int apply) ++{ ++ struct ldbminfo *li = (struct ldbminfo *)arg; ++ int retval = LDAP_SUCCESS; ++ int val = (int32_t)((intptr_t)value); ++ ++ if (apply) { ++ if (CONFIG_PHASE_RUNNING == phase) { ++ li->li_new_dblock_monitoring = val; ++ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_monitoring_set", ++ "New nsslapd-db-lock-monitoring value will not take affect until the server is restarted\n"); ++ } else { ++ li->li_new_dblock_monitoring = val; ++ li->li_dblock_monitoring = val; ++ } ++ } ++ ++ return retval; ++} ++ ++static void * ++bdb_config_db_lock_pause_get(void *arg) ++{ ++ struct ldbminfo *li = (struct ldbminfo *)arg; ++ ++ return (void *)((uintptr_t)(slapi_atomic_load_32((int32_t *)&(li->li_dblock_monitoring_pause), __ATOMIC_RELAXED))); ++} ++ ++static int ++bdb_config_db_lock_pause_set(void *arg, void *value, char *errorbuf, int phase __attribute__((unused)), int apply) ++{ ++ struct ldbminfo *li = (struct ldbminfo *)arg; ++ int retval = LDAP_SUCCESS; ++ u_int32_t val = (u_int32_t)((uintptr_t)value); ++ ++ if (val == 0) { ++ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_pause_set", ++ "%s was set to '0'. The default value will be used (%s)", ++ CONFIG_DB_LOCKS_PAUSE, DEFAULT_DBLOCK_PAUSE_STR); ++ val = DEFAULT_DBLOCK_PAUSE; ++ } ++ ++ if (apply) { ++ slapi_atomic_store_32((int32_t *)&(li->li_dblock_monitoring_pause), val, __ATOMIC_RELAXED); ++ } ++ return retval; ++} ++ ++static void * ++bdb_config_db_lock_threshold_get(void *arg) ++{ ++ struct ldbminfo *li = (struct ldbminfo *)arg; ++ ++ return (void *)((uintptr_t)(li->li_new_dblock_threshold)); ++} ++ ++static int ++bdb_config_db_lock_threshold_set(void *arg, void *value, char *errorbuf, int phase __attribute__((unused)), int apply) ++{ ++ struct ldbminfo *li = (struct ldbminfo *)arg; ++ int retval = LDAP_SUCCESS; ++ u_int32_t val = (u_int32_t)((uintptr_t)value); ++ ++ if (val < 70 || val > 95) { ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, ++ "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95", ++ CONFIG_DB_LOCKS_THRESHOLD, val); ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_lock_threshold_set", ++ "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95", ++ CONFIG_DB_LOCKS_THRESHOLD, val); ++ retval = LDAP_OPERATIONS_ERROR; ++ return retval; ++ } ++ ++ if (apply) { ++ if (CONFIG_PHASE_RUNNING == phase) { ++ li->li_new_dblock_threshold = val; ++ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_threshold_set", ++ "New nsslapd-db-lock-monitoring-threshold value will not take affect until the server is restarted\n"); ++ } else { ++ li->li_new_dblock_threshold = val; ++ li->li_dblock_threshold = val; ++ } ++ } ++ return retval; ++} ++ + static void * + bdb_config_dbcachesize_get(void *arg) + { +@@ -1409,6 +1505,9 @@ static config_info bdb_config_param[] = { + {CONFIG_SERIAL_LOCK, CONFIG_TYPE_ONOFF, "on", &bdb_config_serial_lock_get, &bdb_config_serial_lock_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {CONFIG_USE_LEGACY_ERRORCODE, CONFIG_TYPE_ONOFF, "off", &bdb_config_legacy_errcode_get, &bdb_config_legacy_errcode_set, 0}, + {CONFIG_DB_DEADLOCK_POLICY, CONFIG_TYPE_INT, STRINGIFYDEFINE(DB_LOCK_YOUNGEST), &bdb_config_db_deadlock_policy_get, &bdb_config_db_deadlock_policy_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, ++ {CONFIG_DB_LOCKS_MONITORING, CONFIG_TYPE_ONOFF, "on", &bdb_config_db_lock_monitoring_get, &bdb_config_db_lock_monitoring_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, ++ {CONFIG_DB_LOCKS_THRESHOLD, CONFIG_TYPE_INT, "90", &bdb_config_db_lock_threshold_get, &bdb_config_db_lock_threshold_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, ++ {CONFIG_DB_LOCKS_PAUSE, CONFIG_TYPE_INT, DEFAULT_DBLOCK_PAUSE_STR, &bdb_config_db_lock_pause_get, &bdb_config_db_lock_pause_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {NULL, 0, NULL, NULL, NULL, 0}}; + + void +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +index 6cccad8e6..2f25f67a2 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +@@ -35,6 +35,8 @@ + (env)->txn_checkpoint((env), (kbyte), (min), (flags)) + #define MEMP_STAT(env, gsp, fsp, flags, malloc) \ + (env)->memp_stat((env), (gsp), (fsp), (flags)) ++#define LOCK_STAT(env, statp, flags, malloc) \ ++ (env)->lock_stat((env), (statp), (flags)) + #define MEMP_TRICKLE(env, pct, nwrotep) \ + (env)->memp_trickle((env), (pct), (nwrotep)) + #define LOG_ARCHIVE(env, listp, flags, malloc) \ +@@ -66,6 +68,7 @@ + #define NEWDIR_MODE 0755 + #define DB_REGION_PREFIX "__db." + ++static int locks_monitoring_threadmain(void *param); + static int perf_threadmain(void *param); + static int checkpoint_threadmain(void *param); + static int trickle_threadmain(void *param); +@@ -84,6 +87,7 @@ static int bdb_start_checkpoint_thread(struct ldbminfo *li); + static int bdb_start_trickle_thread(struct ldbminfo *li); + static int bdb_start_perf_thread(struct ldbminfo *li); + static int bdb_start_txn_test_thread(struct ldbminfo *li); ++static int bdb_start_locks_monitoring_thread(struct ldbminfo *li); + static int trans_batch_count = 0; + static int trans_batch_limit = 0; + static int trans_batch_txn_min_sleep = 50; /* ms */ +@@ -1299,6 +1303,10 @@ bdb_start(struct ldbminfo *li, int dbmode) + return return_value; + } + ++ if (0 != (return_value = bdb_start_locks_monitoring_thread(li))) { ++ return return_value; ++ } ++ + /* We need to free the memory to avoid a leak + * Also, we have to evaluate if the performance counter + * should be preserved or not for database restore. +@@ -2885,6 +2893,7 @@ bdb_start_perf_thread(struct ldbminfo *li) + return return_value; + } + ++ + /* Performance thread */ + static int + perf_threadmain(void *param) +@@ -2910,6 +2919,82 @@ perf_threadmain(void *param) + return 0; + } + ++ ++/* ++ * create a thread for locks_monitoring_threadmain ++ */ ++static int ++bdb_start_locks_monitoring_thread(struct ldbminfo *li) ++{ ++ int return_value = 0; ++ if (li->li_dblock_monitoring) { ++ if (NULL == PR_CreateThread(PR_USER_THREAD, ++ (VFP)(void *)locks_monitoring_threadmain, li, ++ PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, ++ PR_UNJOINABLE_THREAD, ++ SLAPD_DEFAULT_THREAD_STACKSIZE)) { ++ PRErrorCode prerr = PR_GetError(); ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_start_locks_monitoring_thread", ++ "Failed to create database locks monitoring thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", ++ prerr, slapd_pr_strerror(prerr)); ++ return_value = -1; ++ } ++ } ++ return return_value; ++} ++ ++ ++/* DB Locks Monitoring thread */ ++static int ++locks_monitoring_threadmain(void *param) ++{ ++ int ret = 0; ++ uint64_t current_locks = 0; ++ uint64_t max_locks = 0; ++ uint32_t lock_exhaustion = 0; ++ PRIntervalTime interval; ++ struct ldbminfo *li = NULL; ++ ++ PR_ASSERT(NULL != param); ++ li = (struct ldbminfo *)param; ++ ++ dblayer_private *priv = li->li_dblayer_private; ++ bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env; ++ PR_ASSERT(NULL != priv); ++ ++ INCR_THREAD_COUNT(pEnv); ++ ++ while (!BDB_CONFIG(li)->bdb_stop_threads) { ++ if (dblayer_db_uses_locking(pEnv->bdb_DB_ENV)) { ++ DB_LOCK_STAT *lockstat = NULL; ++ ret = LOCK_STAT(pEnv->bdb_DB_ENV, &lockstat, 0, (void *)slapi_ch_malloc); ++ if (0 == ret) { ++ current_locks = lockstat->st_nlocks; ++ max_locks = lockstat->st_maxlocks; ++ if (max_locks){ ++ lock_exhaustion = (uint32_t)((double)current_locks / (double)max_locks * 100.0); ++ } else { ++ lock_exhaustion = 0; ++ } ++ if ((li->li_dblock_threshold) && ++ (lock_exhaustion >= li->li_dblock_threshold)) { ++ slapi_atomic_store_32((int32_t *)&(li->li_dblock_threshold_reached), 1, __ATOMIC_RELAXED); ++ } else { ++ slapi_atomic_store_32((int32_t *)&(li->li_dblock_threshold_reached), 0, __ATOMIC_RELAXED); ++ } ++ } ++ slapi_ch_free((void **)&lockstat); ++ } ++ interval = PR_MillisecondsToInterval(slapi_atomic_load_32((int32_t *)&(li->li_dblock_monitoring_pause), __ATOMIC_RELAXED)); ++ DS_Sleep(interval); ++ } ++ ++ DECR_THREAD_COUNT(pEnv); ++ slapi_log_err(SLAPI_LOG_TRACE, "locks_monitoring_threadmain", "Leaving locks_monitoring_threadmain\n"); ++ return 0; ++} ++ ++ + /* + * create a thread for deadlock_threadmain + */ +diff --git a/ldap/servers/slapd/back-ldbm/init.c b/ldap/servers/slapd/back-ldbm/init.c +index 893776699..4165c8fad 100644 +--- a/ldap/servers/slapd/back-ldbm/init.c ++++ b/ldap/servers/slapd/back-ldbm/init.c +@@ -70,6 +70,9 @@ ldbm_back_init(Slapi_PBlock *pb) + /* Initialize the set of instances. */ + li->li_instance_set = objset_new(&ldbm_back_instance_set_destructor); + ++ /* Init lock threshold value */ ++ li->li_dblock_threshold_reached = 0; ++ + /* ask the factory to give us space in the Connection object + * (only bulk import uses this) + */ +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c +index 10cef250f..60884cf33 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c +@@ -87,6 +87,9 @@ static char *ldbm_config_moved_attributes[] = + CONFIG_SERIAL_LOCK, + CONFIG_USE_LEGACY_ERRORCODE, + CONFIG_DB_DEADLOCK_POLICY, ++ CONFIG_DB_LOCKS_MONITORING, ++ CONFIG_DB_LOCKS_THRESHOLD, ++ CONFIG_DB_LOCKS_PAUSE, + ""}; + + /* Used to add an array of entries, like the one above and +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.h b/ldap/servers/slapd/back-ldbm/ldbm_config.h +index 58e64799c..6fa8292eb 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_config.h ++++ b/ldap/servers/slapd/back-ldbm/ldbm_config.h +@@ -104,6 +104,9 @@ struct config_info + #define CONFIG_DB_VERBOSE "nsslapd-db-verbose" + #define CONFIG_DB_DEBUG "nsslapd-db-debug" + #define CONFIG_DB_LOCK "nsslapd-db-locks" ++#define CONFIG_DB_LOCKS_MONITORING "nsslapd-db-locks-monitoring-enabled" ++#define CONFIG_DB_LOCKS_THRESHOLD "nsslapd-db-locks-monitoring-threshold" ++#define CONFIG_DB_LOCKS_PAUSE "nsslapd-db-locks-monitoring-pause" + #define CONFIG_DB_NAMED_REGIONS "nsslapd-db-named-regions" + #define CONFIG_DB_PRIVATE_MEM "nsslapd-db-private-mem" + #define CONFIG_DB_PRIVATE_IMPORT_MEM "nsslapd-db-private-import-mem" +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c +index 1a7b510d4..6e22debde 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c +@@ -1472,6 +1472,7 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension) + slapi_pblock_get(pb, SLAPI_CONNECTION, &conn); + slapi_pblock_get(pb, SLAPI_OPERATION, &op); + ++ + if ((reverse_list = operation_is_flag_set(op, OP_FLAG_REVERSE_CANDIDATE_ORDER))) { + /* + * Start at the end of the list and work our way forward. Since a single +@@ -1538,6 +1539,18 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension) + + /* Find the next candidate entry and return it. */ + while (1) { ++ if (li->li_dblock_monitoring && ++ slapi_atomic_load_32((int32_t *)&(li->li_dblock_threshold_reached), __ATOMIC_RELAXED)) { ++ slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_next_search_entry", ++ "DB locks threshold is reached (nsslapd-db-locks-monitoring-threshold " ++ "under cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config). " ++ "Please, increase nsslapd-db-locks according to your needs.\n"); ++ slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_ENTRY, NULL); ++ delete_search_result_set(pb, &sr); ++ rc = SLAPI_FAIL_GENERAL; ++ slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "DB locks threshold is reached (nsslapd-db-locks-monitoring-threshold)", 0, NULL); ++ goto bail; ++ } + + /* check for abandon */ + if (slapi_op_abandoned(pb) || (NULL == sr)) { +diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c +index 388616b36..db7d01bbc 100644 +--- a/ldap/servers/slapd/libglobs.c ++++ b/ldap/servers/slapd/libglobs.c +@@ -8171,8 +8171,8 @@ config_set(const char *attr, struct berval **values, char *errorbuf, int apply) + #if 0 + debugHashTable(attr); + #endif +- slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Unknown attribute %s will be ignored", attr); +- slapi_log_err(SLAPI_LOG_ERR, "config_set", "Unknown attribute %s will be ignored", attr); ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Unknown attribute %s will be ignored\n", attr); ++ slapi_log_err(SLAPI_LOG_ERR, "config_set", "Unknown attribute %s will be ignored\n", attr); + return LDAP_NO_SUCH_ATTRIBUTE; + } + +diff --git a/src/cockpit/389-console/src/css/ds.css b/src/cockpit/389-console/src/css/ds.css +index 9248116e7..3cf50b593 100644 +--- a/src/cockpit/389-console/src/css/ds.css ++++ b/src/cockpit/389-console/src/css/ds.css +@@ -639,6 +639,10 @@ option { + padding-right: 0 !important; + } + ++.ds-vertical-scroll-auto { ++ overflow-y: auto !important; ++} ++ + .alert { + max-width: 750px; + } +diff --git a/src/cockpit/389-console/src/database.jsx b/src/cockpit/389-console/src/database.jsx +index efa3ce6d5..11cae972c 100644 +--- a/src/cockpit/389-console/src/database.jsx ++++ b/src/cockpit/389-console/src/database.jsx +@@ -157,6 +157,7 @@ export class Database extends React.Component { + const attrs = config.attrs; + let db_cache_auto = false; + let import_cache_auto = false; ++ let dblocksMonitoring = false; + let dbhome = ""; + + if ('nsslapd-db-home-directory' in attrs) { +@@ -168,6 +169,9 @@ export class Database extends React.Component { + if (attrs['nsslapd-import-cache-autosize'] != "0") { + import_cache_auto = true; + } ++ if (attrs['nsslapd-db-locks-monitoring-enabled'][0] == "on") { ++ dblocksMonitoring = true; ++ } + + this.setState(() => ( + { +@@ -187,6 +191,9 @@ export class Database extends React.Component { + txnlogdir: attrs['nsslapd-db-logdirectory'], + dbhomedir: dbhome, + dblocks: attrs['nsslapd-db-locks'], ++ dblocksMonitoring: dblocksMonitoring, ++ dblocksMonitoringThreshold: attrs['nsslapd-db-locks-monitoring-threshold'], ++ dblocksMonitoringPause: attrs['nsslapd-db-locks-monitoring-pause'], + chxpoint: attrs['nsslapd-db-checkpoint-interval'], + compactinterval: attrs['nsslapd-db-compactdb-interval'], + importcacheauto: attrs['nsslapd-import-cache-autosize'], +diff --git a/src/cockpit/389-console/src/index.html b/src/cockpit/389-console/src/index.html +index 1278844fc..fd0eeb669 100644 +--- a/src/cockpit/389-console/src/index.html ++++ b/src/cockpit/389-console/src/index.html +@@ -12,7 +12,7 @@ + + + +- ++ +
+ + +diff --git a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx +index f6e662bca..6a71c138d 100644 +--- a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx ++++ b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx +@@ -31,6 +31,9 @@ export class GlobalDatabaseConfig extends React.Component { + txnlogdir: this.props.data.txnlogdir, + dbhomedir: this.props.data.dbhomedir, + dblocks: this.props.data.dblocks, ++ dblocksMonitoring: this.props.data.dblocksMonitoring, ++ dblocksMonitoringThreshold: this.props.data.dblocksMonitoringThreshold, ++ dblocksMonitoringPause: this.props.data.dblocksMonitoringPause, + chxpoint: this.props.data.chxpoint, + compactinterval: this.props.data.compactinterval, + importcachesize: this.props.data.importcachesize, +@@ -47,6 +50,9 @@ export class GlobalDatabaseConfig extends React.Component { + _txnlogdir: this.props.data.txnlogdir, + _dbhomedir: this.props.data.dbhomedir, + _dblocks: this.props.data.dblocks, ++ _dblocksMonitoring: this.props.data.dblocksMonitoring, ++ _dblocksMonitoringThreshold: this.props.data.dblocksMonitoringThreshold, ++ _dblocksMonitoringPause: this.props.data.dblocksMonitoringPause, + _chxpoint: this.props.data.chxpoint, + _compactinterval: this.props.data.compactinterval, + _importcachesize: this.props.data.importcachesize, +@@ -55,6 +61,7 @@ export class GlobalDatabaseConfig extends React.Component { + _import_cache_auto: this.props.data.import_cache_auto, + }; + this.handleChange = this.handleChange.bind(this); ++ this.select_db_locks_monitoring = this.select_db_locks_monitoring.bind(this); + this.select_auto_cache = this.select_auto_cache.bind(this); + this.select_auto_import_cache = this.select_auto_import_cache.bind(this); + this.save_db_config = this.save_db_config.bind(this); +@@ -76,6 +83,12 @@ export class GlobalDatabaseConfig extends React.Component { + }, this.handleChange(e)); + } + ++ select_db_locks_monitoring (val, e) { ++ this.setState({ ++ dblocksMonitoring: !this.state.dblocksMonitoring ++ }, this.handleChange(val, e)); ++ } ++ + handleChange(e) { + // Generic + const value = e.target.type === 'checkbox' ? e.target.checked : e.target.value; +@@ -150,6 +163,21 @@ export class GlobalDatabaseConfig extends React.Component { + cmd.push("--locks=" + this.state.dblocks); + requireRestart = true; + } ++ if (this.state._dblocksMonitoring != this.state.dblocksMonitoring) { ++ if (this.state.dblocksMonitoring) { ++ cmd.push("--locks-monitoring-enabled=on"); ++ } else { ++ cmd.push("--locks-monitoring-enabled=off"); ++ } ++ requireRestart = true; ++ } ++ if (this.state._dblocksMonitoringThreshold != this.state.dblocksMonitoringThreshold) { ++ cmd.push("--locks-monitoring-threshold=" + this.state.dblocksMonitoringThreshold); ++ requireRestart = true; ++ } ++ if (this.state._dblocksMonitoringPause != this.state.dblocksMonitoringPause) { ++ cmd.push("--locks-monitoring-pause=" + this.state.dblocksMonitoringPause); ++ } + if (this.state._chxpoint != this.state.chxpoint) { + cmd.push("--checkpoint-interval=" + this.state.chxpoint); + requireRestart = true; +@@ -216,6 +244,28 @@ export class GlobalDatabaseConfig extends React.Component { + let import_cache_form; + let db_auto_checked = false; + let import_auto_checked = false; ++ let dblocksMonitor = ""; ++ ++ if (this.state.dblocksMonitoring) { ++ dblocksMonitor =
++ ++ ++ DB Locks Threshold Percentage ++ ++ ++ ++ ++ ++ ++ ++ DB Locks Pause Milliseconds ++ ++ ++ ++ ++ ++
; ++ } + + if (this.state.db_cache_auto) { + db_cache_form =
+@@ -422,14 +472,6 @@ export class GlobalDatabaseConfig extends React.Component { + + + +- +- +- Database Locks +- +- +- +- +- + + + Database Checkpoint Interval +@@ -446,6 +488,36 @@ export class GlobalDatabaseConfig extends React.Component { + + + ++ ++ ++ Database Locks ++ ++ ++ ++ ++ ++ ++ ++
DB Locks Monitoring
++
++ ++
++ ++ ++ ++ Enable Monitoring ++ ++ ++ ++ ++ ++ {dblocksMonitor} ++ ++ + +
+ +diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py +index bcd7b383f..13bb27842 100644 +--- a/src/lib389/lib389/backend.py ++++ b/src/lib389/lib389/backend.py +@@ -1011,6 +1011,9 @@ class DatabaseConfig(DSLdapObject): + 'nsslapd-db-transaction-batch-max-wait', + 'nsslapd-db-logbuf-size', + 'nsslapd-db-locks', ++ 'nsslapd-db-locks-monitoring-enabled', ++ 'nsslapd-db-locks-monitoring-threshold', ++ 'nsslapd-db-locks-monitoring-pause', + 'nsslapd-db-private-import-mem', + 'nsslapd-import-cache-autosize', + 'nsslapd-cache-autosize', +diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py +index 6bfbcb036..722764d10 100644 +--- a/src/lib389/lib389/cli_conf/backend.py ++++ b/src/lib389/lib389/cli_conf/backend.py +@@ -46,6 +46,9 @@ arg_to_attr = { + 'txn_batch_max': 'nsslapd-db-transaction-batch-max-wait', + 'logbufsize': 'nsslapd-db-logbuf-size', + 'locks': 'nsslapd-db-locks', ++ 'locks_monitoring_enabled': 'nsslapd-db-locks-monitoring-enabled', ++ 'locks_monitoring_threshold': 'nsslapd-db-locks-monitoring-threshold', ++ 'locks_monitoring_pause': 'nsslapd-db-locks-monitoring-pause', + 'import_cache_autosize': 'nsslapd-import-cache-autosize', + 'cache_autosize': 'nsslapd-cache-autosize', + 'cache_autosize_split': 'nsslapd-cache-autosize-split', +@@ -998,6 +1001,13 @@ def create_parser(subparsers): + 'the batch count (only works when txn-batch-val is set)') + set_db_config_parser.add_argument('--logbufsize', help='Specifies the transaction log information buffer size') + set_db_config_parser.add_argument('--locks', help='Sets the maximum number of database locks') ++ set_db_config_parser.add_argument('--locks-monitoring-enabled', help='Set to "on" or "off" to monitor DB locks. When it crosses the percentage value ' ++ 'set with "--locks-monitoring-threshold" ("on" by default)') ++ set_db_config_parser.add_argument('--locks-monitoring-threshold', help='Sets the DB lock exhaustion value in percentage (valid range is 70-95). If too many locks are ' ++ 'acquired, the server will abort the searches while the number of locks ' ++ 'are not decreased. It helps to avoid DB corruption and long recovery.') ++ set_db_config_parser.add_argument('--locks-monitoring-pause', help='Sets the DB lock monitoring value in milliseconds for the amount of time ' ++ 'that the monitoring thread spends waiting between checks.') + set_db_config_parser.add_argument('--import-cache-autosize', help='Set to "on" or "off" to automatically set the size of the import ' + 'cache to be used during the the import process of LDIF files') + set_db_config_parser.add_argument('--cache-autosize', help='Sets the percentage of free memory that is used in total for the database ' +-- +2.26.3 + diff --git a/SOURCES/0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch b/SOURCES/0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch deleted file mode 100644 index 17de2c9..0000000 --- a/SOURCES/0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch +++ /dev/null @@ -1,127 +0,0 @@ -From 2a2773d4bf8553ba64b396d567fe05506b22c94c Mon Sep 17 00:00:00 2001 -From: progier389 <72748589+progier389@users.noreply.github.com> -Date: Tue, 24 Nov 2020 19:22:49 +0100 -Subject: [PATCH] Issue 4449 - dsconf replication monitor fails to retrieve - database RUV - consumer (Unavailable) (#4451) - -Bug Description: - -"dsconf replication monitor" fails to retrieve database RUV entry from consumer and this -appears into the Cockpit web UI too. -The problem is that the bind credentials are not rightly propagated when trying to get -the consumers agreement status. Then supplier credntials are used instead and RUV -is searched anonymously because there is no bind dn in ldapi case. - -Fix Description: - -- Propagates the bind credentials when computing agreement status -- Add a credential cache because now a replica password could get asked several times: - when discovering the topology and - when getting the agreement maxcsn -- No testcase in 1.4.3 branch as the file modfied in master does not exists - -- Add a comment about nonlocal keyword - -Relates: #4449 - -Reviewers: - firstyear - droideck - mreynolds - -Issue 4449: Add a comment about nonlocal keyword - -(cherry picked from commit 73ee04fa12cd1de3a5e47c109e79e31c1aaaa2ab) ---- - src/lib389/lib389/cli_conf/replication.py | 13 +++++++++++-- - src/lib389/lib389/replica.py | 16 ++++++++++++---- - 2 files changed, 23 insertions(+), 6 deletions(-) - -diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py -index 9dbaa320a..248972cba 100644 ---- a/src/lib389/lib389/cli_conf/replication.py -+++ b/src/lib389/lib389/cli_conf/replication.py -@@ -369,9 +369,16 @@ def set_repl_config(inst, basedn, log, args): - - def get_repl_monitor_info(inst, basedn, log, args): - connection_data = dsrc_to_repl_monitor(DSRC_HOME, log) -+ credentials_cache = {} - - # Additional details for the connections to the topology - def get_credentials(host, port): -+ # credentials_cache is nonlocal to refer to the instance -+ # from enclosing function (get_repl_monitor_info)` -+ nonlocal credentials_cache -+ key = f'{host}:{port}' -+ if key in credentials_cache: -+ return credentials_cache[key] - found = False - if args.connections: - connections = args.connections -@@ -406,8 +413,10 @@ def get_repl_monitor_info(inst, basedn, log, args): - binddn = input(f'\nEnter a bind DN for {host}:{port}: ').rstrip() - bindpw = getpass(f"Enter a password for {binddn} on {host}:{port}: ").rstrip() - -- return {"binddn": binddn, -- "bindpw": bindpw} -+ credentials = {"binddn": binddn, -+ "bindpw": bindpw} -+ credentials_cache[key] = credentials -+ return credentials - - repl_monitor = ReplicationMonitor(inst) - report_dict = repl_monitor.generate_report(get_credentials, args.json) -diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py -index c2ad2104d..3d89e61fb 100644 ---- a/src/lib389/lib389/replica.py -+++ b/src/lib389/lib389/replica.py -@@ -2487,9 +2487,10 @@ class ReplicationMonitor(object): - else: - self._log = logging.getLogger(__name__) - -- def _get_replica_status(self, instance, report_data, use_json): -+ def _get_replica_status(self, instance, report_data, use_json, get_credentials=None): - """Load all of the status data to report - and add new hostname:port pairs for future processing -+ :type get_credentials: function - """ - - replicas_status = [] -@@ -2503,6 +2504,13 @@ class ReplicationMonitor(object): - for agmt in agmts.list(): - host = agmt.get_attr_val_utf8_l("nsds5replicahost") - port = agmt.get_attr_val_utf8_l("nsds5replicaport") -+ if get_credentials is not None: -+ credentials = get_credentials(host, port) -+ binddn = credentials["binddn"] -+ bindpw = credentials["bindpw"] -+ else: -+ binddn = instance.binddn -+ bindpw = instance.bindpw - protocol = agmt.get_attr_val_utf8_l('nsds5replicatransportinfo') - # Supply protocol here because we need it only for connection - # and agreement status is already preformatted for the user output -@@ -2510,9 +2518,9 @@ class ReplicationMonitor(object): - if consumer not in report_data: - report_data[f"{consumer}:{protocol}"] = None - if use_json: -- agmts_status.append(json.loads(agmt.status(use_json=True))) -+ agmts_status.append(json.loads(agmt.status(use_json=True, binddn=binddn, bindpw=bindpw))) - else: -- agmts_status.append(agmt.status()) -+ agmts_status.append(agmt.status(binddn=binddn, bindpw=bindpw)) - replicas_status.append({"replica_id": replica_id, - "replica_root": replica_root, - "replica_status": "Available", -@@ -2535,7 +2543,7 @@ class ReplicationMonitor(object): - initial_inst_key = f"{self._instance.config.get_attr_val_utf8_l('nsslapd-localhost')}:{self._instance.config.get_attr_val_utf8_l('nsslapd-port')}" - # Do this on an initial instance to get the agreements to other instances - try: -- report_data[initial_inst_key] = self._get_replica_status(self._instance, report_data, use_json) -+ report_data[initial_inst_key] = self._get_replica_status(self._instance, report_data, use_json, get_credentials) - except ldap.LDAPError as e: - self._log.debug(f"Connection to consumer ({supplier_hostname}:{supplier_port}) failed, error: {e}") - report_data[initial_inst_key] = [{"replica_status": f"Unavailable - {e.args[0]['desc']}"}] --- -2.26.2 - diff --git a/SOURCES/0010-Issue-4764-replicated-operation-sometime-checks-ACI-.patch b/SOURCES/0010-Issue-4764-replicated-operation-sometime-checks-ACI-.patch new file mode 100644 index 0000000..489f4b3 --- /dev/null +++ b/SOURCES/0010-Issue-4764-replicated-operation-sometime-checks-ACI-.patch @@ -0,0 +1,33 @@ +From 7573c62a2e61293a4800e67919d79341fa1a1532 Mon Sep 17 00:00:00 2001 +From: progier389 +Date: Wed, 26 May 2021 16:07:43 +0200 +Subject: [PATCH 10/12] Issue 4764 - replicated operation sometime checks ACI + (#4783) + +(cherry picked from commit 0cfdea7abcacfca6686a6cf84dbf7ae1167f3022) +--- + ldap/servers/slapd/connection.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c +index c7a15e775..e0c1a52d2 100644 +--- a/ldap/servers/slapd/connection.c ++++ b/ldap/servers/slapd/connection.c +@@ -1771,6 +1771,14 @@ connection_threadmain() + } + } + ++ /* ++ * Fix bz 1931820 issue (the check to set OP_FLAG_REPLICATED may be done ++ * before replication session is properly set). ++ */ ++ if (replication_connection) { ++ operation_set_flag(op, OP_FLAG_REPLICATED); ++ } ++ + /* + * Call the do_ function to process this request. + */ +-- +2.26.3 + diff --git a/SOURCES/0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch b/SOURCES/0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch deleted file mode 100644 index 70974ce..0000000 --- a/SOURCES/0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch +++ /dev/null @@ -1,63 +0,0 @@ -From e540effa692976c2eef766f1f735702ba5dc0950 Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Mon, 30 Nov 2020 09:03:33 +0100 -Subject: [PATCH] Issue 4243 - Fix test: SyncRepl plugin provides a wrong - cookie (#4467) - -Bug description: - This test case was incorrect. - During a refreshPersistent search, a cookie is sent - with the intermediate message that indicates the end of the refresh phase. - Then a second cookie is sent on the updated entry (group10) - I believed this test was successful some time ago but neither python-ldap - nor sync_repl changed (intermediate sent in post refresh). - So the testcase was never successful :( - -Fix description: - The fix is just to take into account the two expected cookies - -relates: https://github.com/389ds/389-ds-base/issues/4243 - -Reviewed by: Mark Reynolds - -Platforms tested: F31 ---- - .../tests/suites/syncrepl_plugin/basic_test.py | 12 +++++++----- - 1 file changed, 7 insertions(+), 5 deletions(-) - -diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py -index 79ec374bc..7b35537d5 100644 ---- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py -+++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py -@@ -589,7 +589,7 @@ def test_sync_repl_cookie_with_failure(topology, request): - sync_repl.start() - time.sleep(5) - -- # Add a test group just to check that sync_repl receives only one update -+ # Add a test group just to check that sync_repl receives that SyncControlInfo cookie - group.append(groups.create(properties={'cn': 'group%d' % 10})) - - # create users, that automember/memberof will generate nested updates -@@ -610,13 +610,15 @@ def test_sync_repl_cookie_with_failure(topology, request): - time.sleep(10) - cookies = sync_repl.get_result() - -- # checking that the cookie list contains only one entry -- assert len(cookies) == 1 -- prev = 0 -+ # checking that the cookie list contains only two entries -+ # the one from the SyncInfo/RefreshDelete that indicates the end of the refresh -+ # the the one from SyncStateControl related to the only updated entry (group10) -+ assert len(cookies) == 2 -+ prev = -1 - for cookie in cookies: - log.info('Check cookie %s' % cookie) - -- assert int(cookie) > 0 -+ assert int(cookie) >= 0 - assert int(cookie) < 1000 - assert int(cookie) > prev - prev = int(cookie) --- -2.26.2 - diff --git a/SOURCES/0011-Issue-4778-RFE-Allow-setting-TOD-for-db-compaction-a.patch b/SOURCES/0011-Issue-4778-RFE-Allow-setting-TOD-for-db-compaction-a.patch new file mode 100644 index 0000000..2121550 --- /dev/null +++ b/SOURCES/0011-Issue-4778-RFE-Allow-setting-TOD-for-db-compaction-a.patch @@ -0,0 +1,1453 @@ +From c79630de8012a893ed3d1c46b41bc7871a07a3e2 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 26 May 2021 13:32:13 -0400 +Subject: [PATCH 11/12] Issue 4778 - RFE - Allow setting TOD for db compaction + and add task + +Description: Since database compaction can be costly it should be allowed + to set a time to execute it during offpeak hours. Once the + compaction interval has been met, it will wait for the configured + time of day to do the compaction. The default is just before + midnight: 23:59 + + A task was also created that can run compaction on demand, + and can also just target the replication changelog. This could + be used in conjunction with a cronjob for more complex + execution patterns. + +ASAN tested and approved. + +relates: https://github.com/389ds/389-ds-base/issues/4778 + +Reviewed by: spichugi(Thanks!) +--- + .../tests/suites/config/compact_test.py | 81 ++++++ + ldap/schema/01core389.ldif | 3 +- + ldap/servers/plugins/replication/cl5.h | 1 + + ldap/servers/plugins/replication/cl5_api.c | 70 ++++- + ldap/servers/plugins/replication/cl5_api.h | 2 +- + .../servers/plugins/replication/cl5_clcache.c | 3 - + ldap/servers/plugins/replication/cl5_config.c | 102 ++++++- + ldap/servers/plugins/replication/cl5_init.c | 2 +- + .../servers/plugins/replication/repl_shared.h | 2 + + ldap/servers/plugins/retrocl/retrocl.c | 1 - + .../slapd/back-ldbm/db-bdb/bdb_config.c | 79 ++++++ + .../slapd/back-ldbm/db-bdb/bdb_layer.c | 258 ++++++++++++------ + .../slapd/back-ldbm/db-bdb/bdb_layer.h | 4 +- + ldap/servers/slapd/back-ldbm/init.c | 2 + + ldap/servers/slapd/back-ldbm/ldbm_config.h | 1 + + .../servers/slapd/back-ldbm/proto-back-ldbm.h | 1 + + ldap/servers/slapd/filtercmp.c | 5 +- + ldap/servers/slapd/pblock.c | 17 +- + ldap/servers/slapd/slap.h | 2 + + ldap/servers/slapd/slapi-private.h | 1 + + ldap/servers/slapd/task.c | 102 ++++++- + src/cockpit/389-console/src/database.jsx | 1 + + .../src/lib/database/databaseConfig.jsx | 16 +- + src/lib389/lib389/_constants.py | 1 + + src/lib389/lib389/backend.py | 1 + + src/lib389/lib389/cli_conf/backend.py | 24 +- + src/lib389/lib389/cli_conf/replication.py | 3 + + src/lib389/lib389/tasks.py | 14 +- + 28 files changed, 689 insertions(+), 110 deletions(-) + create mode 100644 dirsrvtests/tests/suites/config/compact_test.py + +diff --git a/dirsrvtests/tests/suites/config/compact_test.py b/dirsrvtests/tests/suites/config/compact_test.py +new file mode 100644 +index 000000000..1f1c097e4 +--- /dev/null ++++ b/dirsrvtests/tests/suites/config/compact_test.py +@@ -0,0 +1,81 @@ ++import logging ++import pytest ++import os ++import time ++from lib389.tasks import DBCompactTask ++from lib389.backend import DatabaseConfig ++from lib389.replica import Changelog5 ++from lib389.topologies import topology_m1 as topo ++ ++log = logging.getLogger(__name__) ++ ++ ++def test_compact_db_task(topo): ++ """Specify a test case purpose or name here ++ ++ :id: 1b3222ef-a336-4259-be21-6a52f76e1859 ++ :setup: Standalone Instance ++ :steps: ++ 1. Create task ++ 2. Check task was successful ++ 3. Check errors log to show task was run ++ 3. Create task just for replication ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ inst = topo.ms["supplier1"] ++ ++ task = DBCompactTask(inst) ++ task.create() ++ task.wait() ++ assert task.get_exit_code() == 0 ++ ++ # Check errors log to make sure task actually compacted db ++ assert inst.searchErrorsLog("Compacting databases") ++ inst.deleteErrorLogs(restart=False) ++ ++ ++def test_compaction_interval_and_time(topo): ++ """Specify a test case purpose or name here ++ ++ :id: f361bee9-d7e7-4569-9255-d7b60dd9d92e ++ :setup: Supplier Instance ++ :steps: ++ 1. Configure compact interval and time for database and changelog ++ 2. Check compaction occurs as expected ++ :expectedresults: ++ 1. Success ++ 2. Success ++ """ ++ ++ inst = topo.ms["supplier1"] ++ ++ # Configure DB compaction ++ config = DatabaseConfig(inst) ++ config.set([('nsslapd-db-compactdb-interval', '2'), ('nsslapd-db-compactdb-time', '00:01')]) ++ ++ # Configure changelog compaction ++ cl5 = Changelog5(inst) ++ cl5.replace_many( ++ ('nsslapd-changelogcompactdb-interval', '2'), ++ ('nsslapd-changelogcompactdb-time', '00:01'), ++ ('nsslapd-changelogtrim-interval', '2') ++ ) ++ inst.deleteErrorLogs() ++ ++ # Check is compaction occurred ++ time.sleep(6) ++ assert inst.searchErrorsLog("Compacting databases") ++ assert inst.searchErrorsLog("compacting replication changelogs") ++ inst.deleteErrorLogs(restart=False) ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main(["-s", CURRENT_FILE]) ++ +diff --git a/ldap/schema/01core389.ldif b/ldap/schema/01core389.ldif +index 9e9a26c21..0c73e5114 100644 +--- a/ldap/schema/01core389.ldif ++++ b/ldap/schema/01core389.ldif +@@ -285,6 +285,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2310 NAME 'nsds5ReplicaFlowControlWindow + attributeTypes: ( 2.16.840.1.113730.3.1.2311 NAME 'nsds5ReplicaFlowControlPause' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) + attributeTypes: ( 2.16.840.1.113730.3.1.2313 NAME 'nsslapd-changelogtrim-interval' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) + attributeTypes: ( 2.16.840.1.113730.3.1.2314 NAME 'nsslapd-changelogcompactdb-interval' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) ++attributeTypes: ( 2.16.840.1.113730.3.1.2385 NAME 'nsslapd-changelogcompactdb-time' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) + attributeTypes: ( 2.16.840.1.113730.3.1.2315 NAME 'nsDS5ReplicaWaitForAsyncResults' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) + attributeTypes: ( 2.16.840.1.113730.3.1.2316 NAME 'nsslapd-auditfaillog-maxlogsize' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) + attributeTypes: ( 2.16.840.1.113730.3.1.2317 NAME 'nsslapd-auditfaillog-logrotationsync-enabled' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) +@@ -345,5 +346,5 @@ objectClasses: ( nsEncryptionConfig-oid NAME 'nsEncryptionConfig' DESC 'Netscape + objectClasses: ( nsEncryptionModule-oid NAME 'nsEncryptionModule' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsSSLToken $ nsSSLPersonalityssl $ nsSSLActivation $ ServerKeyExtractFile $ ServerCertExtractFile ) X-ORIGIN 'Netscape' ) + objectClasses: ( 2.16.840.1.113730.3.2.327 NAME 'rootDNPluginConfig' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( rootdn-open-time $ rootdn-close-time $ rootdn-days-allowed $ rootdn-allow-host $ rootdn-deny-host $ rootdn-allow-ip $ rootdn-deny-ip ) X-ORIGIN 'Netscape' ) + objectClasses: ( 2.16.840.1.113730.3.2.328 NAME 'nsSchemaPolicy' DESC 'Netscape defined objectclass' SUP top MAY ( cn $ schemaUpdateObjectclassAccept $ schemaUpdateObjectclassReject $ schemaUpdateAttributeAccept $ schemaUpdateAttributeReject) X-ORIGIN 'Netscape Directory Server' ) +-objectClasses: ( 2.16.840.1.113730.3.2.332 NAME 'nsChangelogConfig' DESC 'Configuration of the changelog5 object' SUP top MUST ( cn $ nsslapd-changelogdir ) MAY ( nsslapd-changelogmaxage $ nsslapd-changelogtrim-interval $ nsslapd-changelogmaxentries $ nsslapd-changelogsuffix $ nsslapd-changelogcompactdb-interval $ nsslapd-encryptionalgorithm $ nsSymmetricKey ) X-ORIGIN '389 Directory Server' ) ++objectClasses: ( 2.16.840.1.113730.3.2.332 NAME 'nsChangelogConfig' DESC 'Configuration of the changelog5 object' SUP top MUST ( cn $ nsslapd-changelogdir ) MAY ( nsslapd-changelogmaxage $ nsslapd-changelogtrim-interval $ nsslapd-changelogmaxentries $ nsslapd-changelogsuffix $ nsslapd-changelogcompactdb-interval $ nsslapd-changelogcompactdb-time $ nsslapd-encryptionalgorithm $ nsSymmetricKey ) X-ORIGIN '389 Directory Server' ) + objectClasses: ( 2.16.840.1.113730.3.2.337 NAME 'rewriterEntry' DESC '' SUP top MUST ( nsslapd-libPath ) MAY ( cn $ nsslapd-filterrewriter $ nsslapd-returnedAttrRewriter ) X-ORIGIN '389 Directory Server' ) +diff --git a/ldap/servers/plugins/replication/cl5.h b/ldap/servers/plugins/replication/cl5.h +index 2af57e369..99ea1c6a2 100644 +--- a/ldap/servers/plugins/replication/cl5.h ++++ b/ldap/servers/plugins/replication/cl5.h +@@ -29,6 +29,7 @@ typedef struct changelog5Config + char *symmetricKey; + long compactInterval; + long trimInterval; ++ char *compactTime; + } changelog5Config; + + /* initializes changelog*/ +diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c +index 403a6a666..75a2f46f5 100644 +--- a/ldap/servers/plugins/replication/cl5_api.c ++++ b/ldap/servers/plugins/replication/cl5_api.c +@@ -158,6 +158,7 @@ typedef struct cl5trim + time_t maxAge; /* maximum entry age in seconds */ + int maxEntries; /* maximum number of entries across all changelog files */ + int compactInterval; /* interval to compact changelog db */ ++ char *compactTime; /* time to compact changelog db */ + int trimInterval; /* trimming interval */ + PRLock *lock; /* controls access to trimming configuration */ + } CL5Trim; +@@ -184,6 +185,7 @@ typedef struct cl5desc + PRLock *clLock; /* Lock associated to clVar, used to notify threads on close */ + PRCondVar *clCvar; /* Condition Variable used to notify threads on close */ + void *clcrypt_handle; /* for cl encryption */ ++ char *compact_time; /* Time to execute changelog compaction */ + } CL5Desc; + + typedef void (*VFP)(void *); +@@ -1025,7 +1027,7 @@ cl5GetState() + CL5_BAD_STATE if changelog is not open + */ + int +-cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, int trimInterval) ++cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char *compactTime, int trimInterval) + { + if (s_cl5Desc.dbState == CL5_STATE_NONE) { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl, +@@ -1061,6 +1063,10 @@ cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, int t + s_cl5Desc.dbTrim.compactInterval = compactInterval; + } + ++ if (strcmp(compactTime, CL5_STR_IGNORE) != 0) { ++ s_cl5Desc.dbTrim.compactTime = slapi_ch_strdup(compactTime); ++ } ++ + if (trimInterval != CL5_NUM_IGNORE) { + s_cl5Desc.dbTrim.trimInterval = trimInterval; + } +@@ -3077,16 +3083,48 @@ _cl5TrimCleanup(void) + { + if (s_cl5Desc.dbTrim.lock) + PR_DestroyLock(s_cl5Desc.dbTrim.lock); ++ slapi_ch_free_string(&s_cl5Desc.dbTrim.compactTime); + + memset(&s_cl5Desc.dbTrim, 0, sizeof(s_cl5Desc.dbTrim)); + } + ++static time_t ++_cl5_get_tod_expiration(char *expire_time) ++{ ++ time_t start_time, todays_elapsed_time, now = time(NULL); ++ struct tm *tm_struct = localtime(&now); ++ char hour_str[3] = {0}; ++ char min_str[3] = {0}; ++ char *s = expire_time; ++ char *endp = NULL; ++ int32_t hour, min, expiring_time; ++ ++ /* Get today's start time */ ++ todays_elapsed_time = (tm_struct->tm_hour * 3600) + (tm_struct->tm_min * 60) + (tm_struct->tm_sec); ++ start_time = slapi_current_utc_time() - todays_elapsed_time; ++ ++ /* Get the hour and minute and calculate the expiring time. The time was ++ * already validated in bdb_config.c: HH:MM */ ++ hour_str[0] = *s++; ++ hour_str[1] = *s++; ++ s++; /* skip colon */ ++ min_str[0] = *s++; ++ min_str[1] = *s++; ++ hour = strtoll(hour_str, &endp, 10); ++ min = strtoll(min_str, &endp, 10); ++ expiring_time = (hour * 60 * 60) + (min * 60); ++ ++ return start_time + expiring_time; ++} ++ + static int + _cl5TrimMain(void *param __attribute__((unused))) + { + time_t timePrev = slapi_current_utc_time(); + time_t timeCompactPrev = slapi_current_utc_time(); + time_t timeNow; ++ PRBool compacting = PR_FALSE; ++ int32_t compactdb_time = 0; + + PR_AtomicIncrement(&s_cl5Desc.threadCount); + +@@ -3097,11 +3135,26 @@ _cl5TrimMain(void *param __attribute__((unused))) + timePrev = timeNow; + _cl5DoTrimming(); + } ++ ++ if (!compacting) { ++ /* Once we know we want to compact we need to stop refreshing the ++ * TOD expiration. Otherwise if the compact time is close to ++ * midnight we could roll over past midnight during the checkpoint ++ * sleep interval, and we'd never actually compact the databases. ++ * We also need to get this value before the sleep. ++ */ ++ compactdb_time = _cl5_get_tod_expiration(s_cl5Desc.dbTrim.compactTime); ++ } + if ((s_cl5Desc.dbTrim.compactInterval > 0) && +- (timeNow - timeCompactPrev >= s_cl5Desc.dbTrim.compactInterval)) { +- /* time to trim */ +- timeCompactPrev = timeNow; +- _cl5CompactDBs(); ++ (timeNow - timeCompactPrev >= s_cl5Desc.dbTrim.compactInterval)) ++ { ++ compacting = PR_TRUE; ++ if (slapi_current_utc_time() > compactdb_time) { ++ /* time to trim */ ++ timeCompactPrev = timeNow; ++ _cl5CompactDBs(); ++ compacting = PR_FALSE; ++ } + } + if (NULL == s_cl5Desc.clLock) { + /* most likely, emergency */ +@@ -3215,6 +3268,10 @@ _cl5CompactDBs(void) + rc, db_strerror(rc)); + goto bail; + } ++ ++ ++ slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl, ++ "_cl5CompactDBs - compacting replication changelogs...\n"); + for (fileObj = objset_first_obj(s_cl5Desc.dbFiles); + fileObj; + fileObj = objset_next_obj(s_cl5Desc.dbFiles, fileObj)) { +@@ -3235,6 +3292,9 @@ _cl5CompactDBs(void) + "_cl5CompactDBs - %s - %d pages freed\n", + dbFile->replName, c_data.compact_pages_free); + } ++ ++ slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl, ++ "_cl5CompactDBs - compacting replication changelogs finished.\n"); + bail: + if (fileObj) { + object_release(fileObj); +diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h +index 302af97a0..4b0949fb3 100644 +--- a/ldap/servers/plugins/replication/cl5_api.h ++++ b/ldap/servers/plugins/replication/cl5_api.h +@@ -236,7 +236,7 @@ int cl5GetState(void); + Return: CL5_SUCCESS if successful; + CL5_BAD_STATE if changelog has not been open + */ +-int cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, int trimInterval); ++int cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char *compactTime, int trimInterval); + + void cl5DestroyIterator(void *iterator); + +diff --git a/ldap/servers/plugins/replication/cl5_clcache.c b/ldap/servers/plugins/replication/cl5_clcache.c +index 90dec4d54..e5a39c9c1 100644 +--- a/ldap/servers/plugins/replication/cl5_clcache.c ++++ b/ldap/servers/plugins/replication/cl5_clcache.c +@@ -452,9 +452,6 @@ static int + clcache_cursor_set(DBC *cursor, CLC_Buffer *buf) + { + int rc; +- uint32_t ulen; +- uint32_t dlen; +- uint32_t size; + + rc = cursor->c_get(cursor, &buf->buf_key, &buf->buf_data, DB_SET); + if (rc == DB_BUFFER_SMALL) { +diff --git a/ldap/servers/plugins/replication/cl5_config.c b/ldap/servers/plugins/replication/cl5_config.c +index e0530bed2..b32686788 100644 +--- a/ldap/servers/plugins/replication/cl5_config.c ++++ b/ldap/servers/plugins/replication/cl5_config.c +@@ -131,6 +131,7 @@ changelog5_config_done(changelog5Config *config) + /* slapi_ch_free_string accepts NULL pointer */ + slapi_ch_free_string(&config->maxAge); + slapi_ch_free_string(&config->dir); ++ slapi_ch_free_string(&config->compactTime); + slapi_ch_free_string(&config->symmetricKey); + slapi_ch_free_string(&config->dbconfig.encryptionAlgorithm); + slapi_ch_free_string(&config->dbconfig.symmetricKey); +@@ -211,7 +212,7 @@ changelog5_config_add(Slapi_PBlock *pb __attribute__((unused)), + } + + /* set trimming parameters */ +- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.trimInterval); ++ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval); + if (rc != CL5_SUCCESS) { + *returncode = 1; + if (returntext) { +@@ -302,6 +303,7 @@ changelog5_config_modify(Slapi_PBlock *pb, + config.compactInterval = CL5_NUM_IGNORE; + slapi_ch_free_string(&config.maxAge); + config.maxAge = slapi_ch_strdup(CL5_STR_IGNORE); ++ config.compactTime = slapi_ch_strdup(CHANGELOGDB_COMPACT_TIME); + config.trimInterval = CL5_NUM_IGNORE; + + slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods); +@@ -375,6 +377,55 @@ changelog5_config_modify(Slapi_PBlock *pb, + *returncode = LDAP_UNWILLING_TO_PERFORM; + goto done; + } ++ } else if (strcasecmp(config_attr, CONFIG_CHANGELOG_COMPACTTIME_ATTRIBUTE) == 0) { ++ if (config_attr_value && config_attr_value[0] != '\0') { ++ char *val = slapi_ch_strdup(config_attr_value); ++ char *endp = NULL; ++ char *hour_str = NULL; ++ char *min_str = NULL; ++ int32_t hour, min; ++ errno = 0; ++ ++ slapi_ch_free_string(&config.compactTime); ++ ++ if (strstr(val, ":")) { ++ /* Get the hour and minute */ ++ hour_str = ldap_utf8strtok_r(val, ":", &min_str); ++ /* Validate hour */ ++ hour = strtoll(hour_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || hour < 0 || hour > 23 || strlen(hour_str) != 2) { ++ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid hour set (%s), must be a two digit number between 00 and 23", ++ hour_str); ++ slapi_log_err(SLAPI_LOG_ERR, "changelog5_extract_config", ++ "Invalid minute set (%s), must be a two digit number between 00 and 59. " ++ "Using default of 23:59\n", hour_str); ++ *returncode = LDAP_UNWILLING_TO_PERFORM; ++ goto done; ++ } ++ /* Validate minute */ ++ min = strtoll(min_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || min < 0 || min > 59 || strlen(min_str) != 2) { ++ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid minute set (%s), must be a two digit number between 00 and 59", ++ hour_str); ++ slapi_log_err(SLAPI_LOG_ERR, "changelog5_extract_config", ++ "Invalid minute set (%s), must be a two digit number between 00 and 59. " ++ "Using default of 23:59\n", min_str); ++ *returncode = LDAP_UNWILLING_TO_PERFORM; ++ goto done; ++ } ++ } else { ++ /* Wrong format */ ++ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid setting (%s), must have a time format of HH:MM", val); ++ slapi_log_err(SLAPI_LOG_ERR, "changelog5_extract_config", ++ "Invalid setting (%s), must have a time format of HH:MM\n", val); ++ *returncode = LDAP_UNWILLING_TO_PERFORM; ++ goto done; ++ } ++ config.compactTime = slapi_ch_strdup(config_attr_value); ++ } + } else if (strcasecmp(config_attr, CONFIG_CHANGELOG_TRIM_ATTRIBUTE) == 0) { + if (slapi_is_duration_valid(config_attr_value)) { + config.trimInterval = (long)slapi_parse_duration(config_attr_value); +@@ -419,6 +470,11 @@ changelog5_config_modify(Slapi_PBlock *pb, + if (originalConfig->maxAge) + config.maxAge = slapi_ch_strdup(originalConfig->maxAge); + } ++ if (strcmp(config.compactTime, CL5_STR_IGNORE) == 0) { ++ slapi_ch_free_string(&config.compactTime); ++ if (originalConfig->compactTime) ++ config.compactTime = slapi_ch_strdup(originalConfig->compactTime); ++ } + + /* attempt to change chagelog dir */ + if (config.dir) { +@@ -519,7 +575,7 @@ changelog5_config_modify(Slapi_PBlock *pb, + if (config.maxEntries != CL5_NUM_IGNORE || + config.trimInterval != CL5_NUM_IGNORE || + strcmp(config.maxAge, CL5_STR_IGNORE) != 0) { +- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.trimInterval); ++ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval); + if (rc != CL5_SUCCESS) { + *returncode = 1; + if (returntext) { +@@ -689,6 +745,7 @@ changelog5_extract_config(Slapi_Entry *entry, changelog5Config *config) + { + const char *arg; + char *max_age = NULL; ++ char *val = NULL; + + memset(config, 0, sizeof(*config)); + config->dir = slapi_entry_attr_get_charptr(entry, CONFIG_CHANGELOG_DIR_ATTRIBUTE); +@@ -711,6 +768,47 @@ changelog5_extract_config(Slapi_Entry *entry, changelog5Config *config) + config->compactInterval = CHANGELOGDB_COMPACT_INTERVAL; + } + ++ arg = slapi_entry_attr_get_ref(entry, CONFIG_CHANGELOG_COMPACTTIME_ATTRIBUTE); ++ if (arg) { ++ char *endp = NULL; ++ char *hour_str = NULL; ++ char *min_str = NULL; ++ int32_t hour, min; ++ errno = 0; ++ ++ val = slapi_ch_strdup((char *)arg); ++ if (strstr(val, ":")) { ++ /* Get the hour and minute */ ++ hour_str = ldap_utf8strtok_r(val, ":", &min_str); ++ /* Validate hour */ ++ hour = strtoll(hour_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || hour < 0 || hour > 23 || strlen(hour_str) != 2) { ++ slapi_log_err(SLAPI_LOG_ERR, "changelog5_extract_config", ++ "Invalid minute set (%s), must be a two digit number between 00 and 59. " ++ "Using default of 23:59\n", hour_str); ++ goto set_default; ++ } ++ /* Validate minute */ ++ min = strtoll(min_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || min < 0 || min > 59 || strlen(min_str) != 2) { ++ slapi_log_err(SLAPI_LOG_ERR, "changelog5_extract_config", ++ "Invalid minute set (%s), must be a two digit number between 00 and 59. " ++ "Using default of 23:59\n", min_str); ++ goto set_default; ++ } ++ } else { ++ /* Wrong format */ ++ slapi_log_err(SLAPI_LOG_ERR, "changelog5_extract_config", ++ "Invalid setting (%s), must have a time format of HH:MM\n", val); ++ goto set_default; ++ } ++ config->compactTime = slapi_ch_strdup(arg); ++ } else { ++ set_default: ++ config->compactTime = slapi_ch_strdup(CHANGELOGDB_COMPACT_TIME); ++ } ++ slapi_ch_free_string(&val); ++ + arg = slapi_entry_attr_get_ref(entry, CONFIG_CHANGELOG_TRIM_ATTRIBUTE); + if (arg) { + if (slapi_is_duration_valid(arg)) { +diff --git a/ldap/servers/plugins/replication/cl5_init.c b/ldap/servers/plugins/replication/cl5_init.c +index 112c4ece4..251859714 100644 +--- a/ldap/servers/plugins/replication/cl5_init.c ++++ b/ldap/servers/plugins/replication/cl5_init.c +@@ -57,7 +57,7 @@ changelog5_init() + } + + /* set trimming parameters */ +- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.trimInterval); ++ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval); + if (rc != CL5_SUCCESS) { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl, + "changelog5_init: failed to configure changelog trimming\n"); +diff --git a/ldap/servers/plugins/replication/repl_shared.h b/ldap/servers/plugins/replication/repl_shared.h +index b1ed86934..6708e12f7 100644 +--- a/ldap/servers/plugins/replication/repl_shared.h ++++ b/ldap/servers/plugins/replication/repl_shared.h +@@ -26,11 +26,13 @@ + + #define CHANGELOGDB_TRIM_INTERVAL 300 /* 5 minutes */ + #define CHANGELOGDB_COMPACT_INTERVAL 2592000 /* 30 days */ ++#define CHANGELOGDB_COMPACT_TIME "23:55" /* 30 days */ + + #define CONFIG_CHANGELOG_DIR_ATTRIBUTE "nsslapd-changelogdir" + #define CONFIG_CHANGELOG_MAXENTRIES_ATTRIBUTE "nsslapd-changelogmaxentries" + #define CONFIG_CHANGELOG_MAXAGE_ATTRIBUTE "nsslapd-changelogmaxage" + #define CONFIG_CHANGELOG_COMPACTDB_ATTRIBUTE "nsslapd-changelogcompactdb-interval" ++#define CONFIG_CHANGELOG_COMPACTTIME_ATTRIBUTE "nsslapd-changelogcompactdb-time" + #define CONFIG_CHANGELOG_TRIM_ATTRIBUTE "nsslapd-changelogtrim-interval" + /* Changelog Internal Configuration Parameters -> Changelog Cache related */ + #define CONFIG_CHANGELOG_ENCRYPTION_ALGORITHM "nsslapd-encryptionalgorithm" +diff --git a/ldap/servers/plugins/retrocl/retrocl.c b/ldap/servers/plugins/retrocl/retrocl.c +index 2a620301c..f73c81528 100644 +--- a/ldap/servers/plugins/retrocl/retrocl.c ++++ b/ldap/servers/plugins/retrocl/retrocl.c +@@ -400,7 +400,6 @@ retrocl_start(Slapi_PBlock *pb) + + for (size_t i = 0; i < num_vals; i++) { + char *value = values[i]; +- size_t length = strlen(value); + + char *pos = strchr(value, ':'); + if (pos == NULL) { +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c +index 167644943..4261c6ce2 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c +@@ -678,6 +678,84 @@ bdb_config_db_compactdb_interval_set(void *arg, + return retval; + } + ++static void * ++bdb_config_db_compactdb_time_get(void *arg) ++{ ++ struct ldbminfo *li = (struct ldbminfo *)arg; ++ return (void *)slapi_ch_strdup(BDB_CONFIG(li)->bdb_compactdb_time); ++} ++ ++static int ++bdb_config_db_compactdb_time_set(void *arg, ++ void *value, ++ char *errorbuf __attribute__((unused)), ++ int phase __attribute__((unused)), ++ int apply) ++{ ++ struct ldbminfo *li = (struct ldbminfo *)arg; ++ char *val = slapi_ch_strdup((char *)value); ++ char *endp = NULL; ++ char *hour_str = NULL; ++ char *min_str = NULL; ++ char *default_time = "23:59"; ++ int32_t hour, min; ++ int retval = LDAP_SUCCESS; ++ errno = 0; ++ ++ if (strstr(val, ":")) { ++ /* Get the hour and minute */ ++ hour_str = ldap_utf8strtok_r(val, ":", &min_str); ++ ++ /* Validate hour */ ++ hour = strtoll(hour_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || hour < 0 || hour > 23 || strlen(hour_str) != 2) { ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid hour set (%s), must be a two digit number between 00 and 23", ++ hour_str); ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_compactdb_interval_set", ++ "Invalid minute set (%s), must be a two digit number between 00 and 59. " ++ "Using default of 23:59\n", hour_str); ++ retval = LDAP_OPERATIONS_ERROR; ++ goto done; ++ } ++ ++ /* Validate minute */ ++ min = strtoll(min_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || min < 0 || min > 59 || strlen(min_str) != 2) { ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid minute set (%s), must be a two digit number between 00 and 59", ++ hour_str); ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_compactdb_interval_set", ++ "Invalid minute set (%s), must be a two digit number between 00 and 59. " ++ "Using default of 23:59\n", min_str); ++ retval = LDAP_OPERATIONS_ERROR; ++ goto done; ++ } ++ } else { ++ /* Wrong format */ ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid setting (%s), must have a time format of HH:MM", val); ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_compactdb_interval_set", ++ "Invalid setting (%s), must have a time format of HH:MM\n", val); ++ retval = LDAP_OPERATIONS_ERROR; ++ goto done; ++ } ++ ++done: ++ if (apply) { ++ slapi_ch_free((void **)&(BDB_CONFIG(li)->bdb_compactdb_time)); ++ if (retval) { ++ /* Something went wrong, use the default */ ++ BDB_CONFIG(li)->bdb_compactdb_time = slapi_ch_strdup(default_time); ++ } else { ++ BDB_CONFIG(li)->bdb_compactdb_time = slapi_ch_strdup((char *)value); ++ } ++ } ++ slapi_ch_free_string(&val); ++ ++ return retval; ++} ++ + static void * + bdb_config_db_page_size_get(void *arg) + { +@@ -1473,6 +1551,7 @@ static config_info bdb_config_param[] = { + {CONFIG_DB_TRANSACTION_WAIT, CONFIG_TYPE_ONOFF, "off", &bdb_config_db_transaction_wait_get, &bdb_config_db_transaction_wait_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {CONFIG_DB_CHECKPOINT_INTERVAL, CONFIG_TYPE_INT, "60", &bdb_config_db_checkpoint_interval_get, &bdb_config_db_checkpoint_interval_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {CONFIG_DB_COMPACTDB_INTERVAL, CONFIG_TYPE_INT, "2592000" /*30days*/, &bdb_config_db_compactdb_interval_get, &bdb_config_db_compactdb_interval_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, ++ {CONFIG_DB_COMPACTDB_TIME, CONFIG_TYPE_STRING, "23:59", &bdb_config_db_compactdb_time_get, &bdb_config_db_compactdb_time_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {CONFIG_DB_TRANSACTION_BATCH, CONFIG_TYPE_INT, "0", &bdb_get_batch_transactions, &bdb_set_batch_transactions, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {CONFIG_DB_TRANSACTION_BATCH_MIN_SLEEP, CONFIG_TYPE_INT, "50", &bdb_get_batch_txn_min_sleep, &bdb_set_batch_txn_min_sleep, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {CONFIG_DB_TRANSACTION_BATCH_MAX_SLEEP, CONFIG_TYPE_INT, "50", &bdb_get_batch_txn_max_sleep, &bdb_set_batch_txn_max_sleep, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +index 2f25f67a2..ec1976d38 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +@@ -2126,6 +2126,7 @@ bdb_post_close(struct ldbminfo *li, int dbmode) + */ + slapi_ch_free_string(&conf->bdb_dbhome_directory); + slapi_ch_free_string(&conf->bdb_home_directory); ++ slapi_ch_free_string(&conf->bdb_compactdb_time); + } + + return return_value; +@@ -3644,6 +3645,39 @@ log_flush_threadmain(void *param) + return 0; + } + ++/* ++ * This refreshes the TOD expiration. So live changes to the configuration ++ * will take effect immediately. ++ */ ++static time_t ++bdb_get_tod_expiration(char *expire_time) ++{ ++ time_t start_time, todays_elapsed_time, now = time(NULL); ++ struct tm *tm_struct = localtime(&now); ++ char hour_str[3] = {0}; ++ char min_str[3] = {0}; ++ char *s = expire_time; ++ char *endp = NULL; ++ int32_t hour, min, expiring_time; ++ ++ /* Get today's start time */ ++ todays_elapsed_time = (tm_struct->tm_hour * 3600) + (tm_struct->tm_min * 60) + (tm_struct->tm_sec); ++ start_time = slapi_current_utc_time() - todays_elapsed_time; ++ ++ /* Get the hour and minute and calculate the expiring time. The time was ++ * already validated in bdb_config.c: HH:MM */ ++ hour_str[0] = *s++; ++ hour_str[1] = *s++; ++ s++; /* skip colon */ ++ min_str[0] = *s++; ++ min_str[1] = *s++; ++ hour = strtoll(hour_str, &endp, 10); ++ min = strtoll(min_str, &endp, 10); ++ expiring_time = (hour * 60 * 60) + (min * 60); ++ ++ return start_time + expiring_time; ++} ++ + /* + * create a thread for checkpoint_threadmain + */ +@@ -3685,7 +3719,9 @@ checkpoint_threadmain(void *param) + time_t checkpoint_interval_update = 0; + time_t compactdb_interval = 0; + time_t checkpoint_interval = 0; +- back_txn txn; ++ int32_t compactdb_time = 0; ++ PRBool compacting = PR_FALSE; ++ + + PR_ASSERT(NULL != param); + li = (struct ldbminfo *)param; +@@ -3724,22 +3760,35 @@ checkpoint_threadmain(void *param) + slapi_timespec_expire_at(checkpoint_interval, &checkpoint_expire); + + while (!BDB_CONFIG(li)->bdb_stop_threads) { +- /* sleep for a while */ +- /* why aren't we sleeping exactly the right amount of time ? */ +- /* answer---because the interval might be changed after the server +- * starts up */ ++ PR_Lock(li->li_config_mutex); ++ checkpoint_interval_update = (time_t)BDB_CONFIG(li)->bdb_checkpoint_interval; ++ compactdb_interval_update = (time_t)BDB_CONFIG(li)->bdb_compactdb_interval; ++ if (!compacting) { ++ /* Once we know we want to compact we need to stop refreshing the ++ * TOD expiration. Otherwise if the compact time is close to ++ * midnight we could roll over past midnight during the checkpoint ++ * sleep interval, and we'd never actually compact the databases. ++ * We also need to get this value before the sleep. ++ */ ++ compactdb_time = bdb_get_tod_expiration((char *)BDB_CONFIG(li)->bdb_compactdb_time); ++ } ++ PR_Unlock(li->li_config_mutex); ++ ++ if (compactdb_interval_update != compactdb_interval) { ++ /* Compact interval was changed, so reset the timer */ ++ slapi_timespec_expire_at(compactdb_interval_update, &compactdb_expire); ++ } + ++ /* Sleep for a while ... ++ * Why aren't we sleeping exactly the right amount of time ? ++ * Answer---because the interval might be changed after the server ++ * starts up */ + DS_Sleep(interval); + + if (0 == BDB_CONFIG(li)->bdb_enable_transactions) { + continue; + } + +- PR_Lock(li->li_config_mutex); +- checkpoint_interval_update = (time_t)BDB_CONFIG(li)->bdb_checkpoint_interval; +- compactdb_interval_update = (time_t)BDB_CONFIG(li)->bdb_compactdb_interval; +- PR_Unlock(li->li_config_mutex); +- + /* If the checkpoint has been updated OR we have expired */ + if (checkpoint_interval != checkpoint_interval_update || + slapi_timespec_expire_check(&checkpoint_expire) == TIMER_EXPIRED) { +@@ -3807,94 +3856,37 @@ checkpoint_threadmain(void *param) + + /* + * Remember that if compactdb_interval is 0, timer_expired can +- * never occur unless the value in compctdb_interval changes. ++ * never occur unless the value in compactdb_interval changes. + * +- * this could have been a bug infact, where compactdb_interval ++ * this could have been a bug in fact, where compactdb_interval + * was 0, if you change while running it would never take effect .... + */ +- if (compactdb_interval_update != compactdb_interval || +- slapi_timespec_expire_check(&compactdb_expire) == TIMER_EXPIRED) { +- int rc = 0; +- Object *inst_obj; +- ldbm_instance *inst; +- DB *db = NULL; +- DB_COMPACT c_data = {0}; +- +- for (inst_obj = objset_first_obj(li->li_instance_set); +- inst_obj; +- inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) { +- inst = (ldbm_instance *)object_get_data(inst_obj); +- rc = dblayer_get_id2entry(inst->inst_be, &db); +- if (!db || rc) { +- continue; +- } +- slapi_log_err(SLAPI_LOG_NOTICE, "checkpoint_threadmain", "Compacting DB start: %s\n", +- inst->inst_name); +- +- /* +- * It's possible for this to heap us after free because when we access db +- * *just* as the server shut's down, we don't know it. So we should probably +- * do something like wrapping access to the db var in a rwlock, and have "read" +- * to access, and take writes to change the state. This would prevent the issue. +- */ +- DBTYPE type; +- rc = db->get_type(db, &type); +- if (rc) { +- slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain", +- "compactdb: failed to determine db type for %s: db error - %d %s\n", +- inst->inst_name, rc, db_strerror(rc)); +- continue; +- } ++ if (slapi_timespec_expire_check(&compactdb_expire) == TIMER_EXPIRED) { ++ compacting = PR_TRUE; ++ if (slapi_current_utc_time() < compactdb_time) { ++ /* We have passed the interval, but we need to wait for a ++ * particular TOD to pass before compacting */ ++ continue; ++ } + +- rc = dblayer_txn_begin(inst->inst_be, NULL, &txn); +- if (rc) { +- slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain", "compactdb: transaction begin failed: %d\n", rc); +- break; +- } +- /* +- * https://docs.oracle.com/cd/E17275_01/html/api_reference/C/BDB-C_APIReference.pdf +- * "DB_FREELIST_ONLY +- * Do no page compaction, only returning pages to the filesystem that are already free and at the end +- * of the file. This flag must be set if the database is a Hash access method database." +- * +- */ ++ /* Time to compact the DB's */ ++ dblayer_force_checkpoint(li); ++ bdb_compact(li); ++ dblayer_force_checkpoint(li); + +- uint32_t compact_flags = DB_FREE_SPACE; +- if (type == DB_HASH) { +- compact_flags |= DB_FREELIST_ONLY; +- } +- rc = db->compact(db, txn.back_txn_txn, NULL /*start*/, NULL /*stop*/, +- &c_data, compact_flags, NULL /*end*/); +- if (rc) { +- slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain", +- "compactdb: failed to compact %s; db error - %d %s\n", +- inst->inst_name, rc, db_strerror(rc)); +- if ((rc = dblayer_txn_abort(inst->inst_be, &txn))) { +- slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain", "compactdb: failed to abort txn (%s) db error - %d %s\n", +- inst->inst_name, rc, db_strerror(rc)); +- break; +- } +- } else { +- slapi_log_err(SLAPI_LOG_NOTICE, "checkpoint_threadmain", +- "compactdb: compact %s - %d pages freed\n", +- inst->inst_name, c_data.compact_pages_free); +- if ((rc = dblayer_txn_commit(inst->inst_be, &txn))) { +- slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain", "compactdb: failed to commit txn (%s) db error - %d %s\n", +- inst->inst_name, rc, db_strerror(rc)); +- break; +- } +- } +- } ++ /* Now reset the timer and compacting flag */ + compactdb_interval = compactdb_interval_update; + slapi_timespec_expire_at(compactdb_interval, &compactdb_expire); ++ compacting = PR_FALSE; + } + } +- slapi_log_err(SLAPI_LOG_TRACE, "checkpoint_threadmain", "Check point before leaving\n"); ++ slapi_log_err(SLAPI_LOG_HOUSE, "checkpoint_threadmain", "Check point before leaving\n"); + rval = dblayer_force_checkpoint(li); ++ + error_return: + + DECR_THREAD_COUNT(pEnv); +- slapi_log_err(SLAPI_LOG_TRACE, "checkpoint_threadmain", "Leaving checkpoint_threadmain\n"); ++ slapi_log_err(SLAPI_LOG_HOUSE, "checkpoint_threadmain", "Leaving checkpoint_threadmain\n"); + return rval; + } + +@@ -6209,3 +6201,99 @@ bdb_back_ctrl(Slapi_Backend *be, int cmd, void *info) + + return rc; + } ++ ++int32_t ++ldbm_back_compact(Slapi_Backend *be) ++{ ++ struct ldbminfo *li = NULL; ++ int32_t rc = -1; ++ ++ li = (struct ldbminfo *)be->be_database->plg_private; ++ dblayer_force_checkpoint(li); ++ rc = bdb_compact(li); ++ dblayer_force_checkpoint(li); ++ return rc; ++} ++ ++ ++int32_t ++bdb_compact(struct ldbminfo *li) ++{ ++ Object *inst_obj; ++ ldbm_instance *inst; ++ DB *db = NULL; ++ back_txn txn = {0}; ++ int rc = 0; ++ DB_COMPACT c_data = {0}; ++ ++ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact", ++ "Compacting databases ...\n"); ++ for (inst_obj = objset_first_obj(li->li_instance_set); ++ inst_obj; ++ inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) ++ { ++ inst = (ldbm_instance *)object_get_data(inst_obj); ++ rc = dblayer_get_id2entry(inst->inst_be, &db); ++ if (!db || rc) { ++ continue; ++ } ++ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact", "Compacting DB start: %s\n", ++ inst->inst_name); ++ ++ /* ++ * It's possible for this to heap us after free because when we access db ++ * *just* as the server shut's down, we don't know it. So we should probably ++ * do something like wrapping access to the db var in a rwlock, and have "read" ++ * to access, and take writes to change the state. This would prevent the issue. ++ */ ++ DBTYPE type; ++ rc = db->get_type(db, &type); ++ if (rc) { ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", ++ "compactdb: failed to determine db type for %s: db error - %d %s\n", ++ inst->inst_name, rc, db_strerror(rc)); ++ continue; ++ } ++ ++ rc = dblayer_txn_begin(inst->inst_be, NULL, &txn); ++ if (rc) { ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", "compactdb: transaction begin failed: %d\n", rc); ++ break; ++ } ++ /* ++ * https://docs.oracle.com/cd/E17275_01/html/api_reference/C/BDB-C_APIReference.pdf ++ * "DB_FREELIST_ONLY ++ * Do no page compaction, only returning pages to the filesystem that are already free and at the end ++ * of the file. This flag must be set if the database is a Hash access method database." ++ * ++ */ ++ uint32_t compact_flags = DB_FREE_SPACE; ++ if (type == DB_HASH) { ++ compact_flags |= DB_FREELIST_ONLY; ++ } ++ rc = db->compact(db, txn.back_txn_txn, NULL /*start*/, NULL /*stop*/, ++ &c_data, compact_flags, NULL /*end*/); ++ if (rc) { ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", ++ "compactdb: failed to compact %s; db error - %d %s\n", ++ inst->inst_name, rc, db_strerror(rc)); ++ if ((rc = dblayer_txn_abort(inst->inst_be, &txn))) { ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", "compactdb: failed to abort txn (%s) db error - %d %s\n", ++ inst->inst_name, rc, db_strerror(rc)); ++ break; ++ } ++ } else { ++ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact", ++ "compactdb: compact %s - %d pages freed\n", ++ inst->inst_name, c_data.compact_pages_free); ++ if ((rc = dblayer_txn_commit(inst->inst_be, &txn))) { ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", "compactdb: failed to commit txn (%s) db error - %d %s\n", ++ inst->inst_name, rc, db_strerror(rc)); ++ break; ++ } ++ } ++ } ++ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact", "Compacting databases finished.\n"); ++ ++ return rc; ++} +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h +index 6bb04d21a..e3a49dbac 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h +@@ -79,7 +79,8 @@ typedef struct bdb_config + int bdb_previous_lock_config; /* Max lock count when we last shut down-- + * used to determine if we delete the mpool */ + u_int32_t bdb_deadlock_policy; /* i.e. the atype to DB_ENV->lock_detect in deadlock_threadmain */ +- int bdb_compactdb_interval; /* interval to execute compact id2entry dbs */ ++ int32_t bdb_compactdb_interval; /* interval to execute compact id2entry dbs */ ++ char *bdb_compactdb_time; /* time of day to execute compact id2entry dbs */ + } bdb_config; + + int bdb_init(struct ldbminfo *li, config_info *config_array); +@@ -96,6 +97,7 @@ int bdb_db_size(Slapi_PBlock *pb); + int bdb_upgradedb(Slapi_PBlock *pb); + int bdb_upgradednformat(Slapi_PBlock *pb); + int bdb_upgradeddformat(Slapi_PBlock *pb); ++int32_t bdb_compact(struct ldbminfo *li); + int bdb_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task); + int bdb_cleanup(struct ldbminfo *li); + int bdb_txn_begin(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, PRBool use_lock); +diff --git a/ldap/servers/slapd/back-ldbm/init.c b/ldap/servers/slapd/back-ldbm/init.c +index 4165c8fad..42c9bd00a 100644 +--- a/ldap/servers/slapd/back-ldbm/init.c ++++ b/ldap/servers/slapd/back-ldbm/init.c +@@ -180,6 +180,8 @@ ldbm_back_init(Slapi_PBlock *pb) + (void *)ldbm_back_set_info); + rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_DB_CTRL_INFO_FN, + (void *)ldbm_back_ctrl_info); ++ rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_DB_COMPACT_FN, ++ (void *)ldbm_back_compact); + + if (rc != 0) { + slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_init", "Failed %d\n", rc); +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.h b/ldap/servers/slapd/back-ldbm/ldbm_config.h +index 6fa8292eb..48446193e 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_config.h ++++ b/ldap/servers/slapd/back-ldbm/ldbm_config.h +@@ -84,6 +84,7 @@ struct config_info + #define CONFIG_DB_TRANSACTION_WAIT "nsslapd-db-transaction-wait" + #define CONFIG_DB_CHECKPOINT_INTERVAL "nsslapd-db-checkpoint-interval" + #define CONFIG_DB_COMPACTDB_INTERVAL "nsslapd-db-compactdb-interval" ++#define CONFIG_DB_COMPACTDB_TIME "nsslapd-db-compactdb-time" + #define CONFIG_DB_TRANSACTION_BATCH "nsslapd-db-transaction-batch-val" + #define CONFIG_DB_TRANSACTION_BATCH_MIN_SLEEP "nsslapd-db-transaction-batch-min-wait" + #define CONFIG_DB_TRANSACTION_BATCH_MAX_SLEEP "nsslapd-db-transaction-batch-max-wait" +diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h +index 5d618a89c..30c9003bf 100644 +--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h ++++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h +@@ -478,6 +478,7 @@ void ldbm_back_search_results_release(void **search_results); + int ldbm_back_init(Slapi_PBlock *pb); + void ldbm_back_prev_search_results(Slapi_PBlock *pb); + int ldbm_back_isinitialized(void); ++int32_t ldbm_back_compact(Slapi_Backend *be); + + /* + * vlv.c +diff --git a/ldap/servers/slapd/filtercmp.c b/ldap/servers/slapd/filtercmp.c +index f7e3ed4d5..c886267bd 100644 +--- a/ldap/servers/slapd/filtercmp.c ++++ b/ldap/servers/slapd/filtercmp.c +@@ -344,7 +344,6 @@ slapi_filter_compare(struct slapi_filter *f1, struct slapi_filter *f2) + struct berval *inval1[2], *inval2[2], **outval1, **outval2; + int ret; + Slapi_Attr sattr; +- int cmplen; + + slapi_log_err(SLAPI_LOG_TRACE, "slapi_filter_compare", "=>\n"); + +@@ -379,11 +378,11 @@ slapi_filter_compare(struct slapi_filter *f1, struct slapi_filter *f2) + if (key1 && key2) { + struct berval bvkey1 = { + slapi_value_get_length(key1[0]), +- slapi_value_get_string(key1[0]) ++ (char *)slapi_value_get_string(key1[0]) + }; + struct berval bvkey2 = { + slapi_value_get_length(key2[0]), +- slapi_value_get_string(key2[0]) ++ (char *)slapi_value_get_string(key2[0]) + }; + ret = slapi_berval_cmp(&bvkey1, &bvkey2); + } +diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c +index f7d1f8885..fcac53839 100644 +--- a/ldap/servers/slapd/pblock.c ++++ b/ldap/servers/slapd/pblock.c +@@ -925,6 +925,12 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value) + } + (*(IFP *)value) = pblock->pb_plugin->plg_db2ldif; + break; ++ case SLAPI_PLUGIN_DB_COMPACT_FN: ++ if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) { ++ return (-1); ++ } ++ (*(IFP *)value) = pblock->pb_plugin->plg_dbcompact; ++ break; + case SLAPI_PLUGIN_DB_DB2INDEX_FN: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) { + return (-1); +@@ -2925,7 +2931,12 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value) + } + pblock->pb_backend->be_noacl = *((int *)value); + break; +- ++ case SLAPI_PLUGIN_DB_COMPACT_FN: ++ if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) { ++ return (-1); ++ } ++ pblock->pb_plugin->plg_dbcompact = (IFP)value; ++ break; + + /* extendedop plugin functions */ + case SLAPI_PLUGIN_EXT_OP_FN: +@@ -4137,8 +4148,8 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value) + break; + + case SLAPI_URP_TOMBSTONE_CONFLICT_DN: +- pblock->pb_intop->pb_urp_tombstone_conflict_dn = (char *)value; +- break; ++ pblock->pb_intop->pb_urp_tombstone_conflict_dn = (char *)value; ++ break; + + case SLAPI_URP_TOMBSTONE_UNIQUEID: + _pblock_assert_pb_intop(pblock); +diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h +index 3126a65f3..c48516157 100644 +--- a/ldap/servers/slapd/slap.h ++++ b/ldap/servers/slapd/slap.h +@@ -1041,6 +1041,7 @@ struct slapdplugin + IFP plg_un_db_ldif2db; /* ldif 2 database */ + IFP plg_un_db_db2ldif; /* database 2 ldif */ + IFP plg_un_db_db2index; /* database 2 index */ ++ IFP plg_un_db_dbcompact; /* compact database */ + IFP plg_un_db_archive2db; /* ldif 2 database */ + IFP plg_un_db_db2archive; /* database 2 ldif */ + IFP plg_un_db_upgradedb; /* convert old idl to new */ +@@ -1082,6 +1083,7 @@ struct slapdplugin + #define plg_result plg_un.plg_un_db.plg_un_db_result + #define plg_ldif2db plg_un.plg_un_db.plg_un_db_ldif2db + #define plg_db2ldif plg_un.plg_un_db.plg_un_db_db2ldif ++#define plg_dbcompact plg_un.plg_un_db.plg_un_db_dbcompact + #define plg_db2index plg_un.plg_un_db.plg_un_db_db2index + #define plg_archive2db plg_un.plg_un_db.plg_un_db_archive2db + #define plg_db2archive plg_un.plg_un_db.plg_un_db_db2archive +diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h +index b956ebe63..570765e47 100644 +--- a/ldap/servers/slapd/slapi-private.h ++++ b/ldap/servers/slapd/slapi-private.h +@@ -928,6 +928,7 @@ int proxyauth_get_dn(Slapi_PBlock *pb, char **proxydnp, char **errtextp); + #define SLAPI_PLUGIN_DB_GET_INFO_FN 290 + #define SLAPI_PLUGIN_DB_SET_INFO_FN 291 + #define SLAPI_PLUGIN_DB_CTRL_INFO_FN 292 ++#define SLAPI_PLUGIN_DB_COMPACT_FN 294 + + /**** End of database plugin interface. **************************************/ + +diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c +index 93d31b806..4c7262ab3 100644 +--- a/ldap/servers/slapd/task.c ++++ b/ldap/servers/slapd/task.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2021 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -2928,6 +2928,105 @@ des2aes_task_destructor(Slapi_Task *task) + "des2aes_task_destructor <--\n"); + } + ++struct task_compact_data ++{ ++ char *suffix; ++ Slapi_Task *task; ++}; ++ ++static void ++compact_db_task_destructor(Slapi_Task *task) ++{ ++ slapi_log_err(SLAPI_LOG_PLUGIN, "compact db task", ++ "compact_db_task_destructor -->\n"); ++ if (task) { ++ struct task_compact_data *mydata = (struct task_compact_data *)slapi_task_get_data(task); ++ while (slapi_task_get_refcount(task) > 0) { ++ /* Yield to wait for the task to finish */ ++ DS_Sleep(PR_MillisecondsToInterval(100)); ++ } ++ if (mydata) { ++ slapi_ch_free((void **)&mydata); ++ } ++ } ++ slapi_log_err(SLAPI_LOG_PLUGIN, "compact db task", ++ "compact_db_task_destructor <--\n"); ++} ++ ++static void ++task_compact_thread(void *arg) ++{ ++ struct task_compact_data *task_data = arg; ++ Slapi_Task *task = task_data->task; ++ Slapi_Backend *be = NULL; ++ char *cookie = NULL; ++ int32_t rc = -1; ++ ++ slapi_task_inc_refcount(task); ++ slapi_task_begin(task, 1); ++ ++ be = slapi_get_first_backend(&cookie); ++ while (be) { ++ if (be->be_private == 0) { ++ /* Found a non-private backend, start compacting */ ++ rc = (be->be_database->plg_dbcompact)(be); ++ break; ++ } ++ be = (backend *)slapi_get_next_backend(cookie); ++ } ++ slapi_ch_free_string(&cookie); ++ ++ slapi_task_finish(task, rc); ++ slapi_task_dec_refcount(task); ++} ++ ++/* ++ * compact the BDB database ++ * ++ * dn: cn=compact_it,cn=compact db,cn=tasks,cn=config ++ * objectclass: top ++ * objectclass: extensibleObject ++ * cn: compact_it ++ */ ++static int ++task_compact_db_add(Slapi_PBlock *pb, ++ Slapi_Entry *e, ++ Slapi_Entry *eAfter __attribute__((unused)), ++ int *returncode, ++ char *returntext, ++ void *arg __attribute__((unused))) ++{ ++ Slapi_Task *task = slapi_new_task(slapi_entry_get_ndn(e)); ++ struct task_compact_data *task_data = NULL; ++ PRThread *thread = NULL; ++ ++ slapi_task_log_notice(task, "Beginning database compaction task...\n"); ++ ++ /* Register our destructor for cleaning up our private data */ ++ slapi_task_set_destructor_fn(task, compact_db_task_destructor); ++ ++ task_data = (struct task_compact_data *)slapi_ch_calloc(1, sizeof(struct task_compact_data)); ++ task_data->task = task; ++ slapi_task_set_data(task, task_data); ++ ++ /* Start the compaction as a separate thread */ ++ thread = PR_CreateThread(PR_USER_THREAD, task_compact_thread, ++ (void *)task_data, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, ++ PR_UNJOINABLE_THREAD, SLAPD_DEFAULT_THREAD_STACKSIZE); ++ if (thread == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, "task_compact_db_add", "Unable to create db compact thread!\n"); ++ *returncode = LDAP_OPERATIONS_ERROR; ++ slapi_ch_free((void **)&task_data); ++ } ++ ++ if (*returncode != LDAP_SUCCESS) { ++ slapi_task_finish(task, *returncode); ++ return SLAPI_DSE_CALLBACK_ERROR; ++ } ++ ++ return SLAPI_DSE_CALLBACK_OK; ++} ++ + /* cleanup old tasks that may still be in the DSE from a previous session + * (this can happen if the server crashes [no matter how unlikely we like + * to think that is].) +@@ -3010,6 +3109,7 @@ task_init(void) + slapi_task_register_handler("sysconfig reload", task_sysconfig_reload_add); + slapi_task_register_handler("fixup tombstones", task_fixup_tombstones_add); + slapi_task_register_handler("des2aes", task_des2aes); ++ slapi_task_register_handler("compact db", task_compact_db_add); + } + + /* called when the server is shutting down -- abort all existing tasks */ +diff --git a/src/cockpit/389-console/src/database.jsx b/src/cockpit/389-console/src/database.jsx +index 11cae972c..b73dc8460 100644 +--- a/src/cockpit/389-console/src/database.jsx ++++ b/src/cockpit/389-console/src/database.jsx +@@ -196,6 +196,7 @@ export class Database extends React.Component { + dblocksMonitoringPause: attrs['nsslapd-db-locks-monitoring-pause'], + chxpoint: attrs['nsslapd-db-checkpoint-interval'], + compactinterval: attrs['nsslapd-db-compactdb-interval'], ++ compacttime: attrs['nsslapd-db-compactdb-time'], + importcacheauto: attrs['nsslapd-import-cache-autosize'], + importcachesize: attrs['nsslapd-import-cachesize'], + }, +diff --git a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx +index 6a71c138d..1fa9f2cc2 100644 +--- a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx ++++ b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx +@@ -36,6 +36,7 @@ export class GlobalDatabaseConfig extends React.Component { + dblocksMonitoringPause: this.props.data.dblocksMonitoringPause, + chxpoint: this.props.data.chxpoint, + compactinterval: this.props.data.compactinterval, ++ compacttime: this.props.data.compacttime, + importcachesize: this.props.data.importcachesize, + importcacheauto: this.props.data.importcacheauto, + // These variables store the original value (used for saving config) +@@ -55,6 +56,7 @@ export class GlobalDatabaseConfig extends React.Component { + _dblocksMonitoringPause: this.props.data.dblocksMonitoringPause, + _chxpoint: this.props.data.chxpoint, + _compactinterval: this.props.data.compactinterval, ++ _compacttime: this.props.data.compacttime, + _importcachesize: this.props.data.importcachesize, + _importcacheauto: this.props.data.importcacheauto, + _db_cache_auto: this.props.data.db_cache_auto, +@@ -186,6 +188,10 @@ export class GlobalDatabaseConfig extends React.Component { + cmd.push("--compactdb-interval=" + this.state.compactinterval); + requireRestart = true; + } ++ if (this.state._compacttime != this.state.compacttime) { ++ cmd.push("--compactdb-time=" + this.state.compacttime); ++ requireRestart = true; ++ } + if (this.state.import_cache_auto) { + // Auto cache is selected + if (this.state._import_cache_auto != this.state.import_cache_auto) { +@@ -485,7 +491,15 @@ export class GlobalDatabaseConfig extends React.Component { + Database Compact Interval + + +- ++ ++ ++ ++ ++ ++ Database Compact Time ++ ++ ++ + + + +diff --git a/src/lib389/lib389/_constants.py b/src/lib389/lib389/_constants.py +index c184c8d4f..d6161cebb 100644 +--- a/src/lib389/lib389/_constants.py ++++ b/src/lib389/lib389/_constants.py +@@ -154,6 +154,7 @@ DN_EUUID_TASK = "cn=entryuuid task,%s" % DN_TASKS + DN_TOMB_FIXUP_TASK = "cn=fixup tombstones,%s" % DN_TASKS + DN_FIXUP_LINKED_ATTIBUTES = "cn=fixup linked attributes,%s" % DN_TASKS + DN_AUTOMEMBER_REBUILD_TASK = "cn=automember rebuild membership,%s" % DN_TASKS ++DN_COMPACTDB_TASK = "cn=compact db,%s" % DN_TASKS + + # Script Constants + LDIF2DB = 'ldif2db' +diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py +index 13bb27842..ad78a6ffe 100644 +--- a/src/lib389/lib389/backend.py ++++ b/src/lib389/lib389/backend.py +@@ -1005,6 +1005,7 @@ class DatabaseConfig(DSLdapObject): + 'nsslapd-db-transaction-wait', + 'nsslapd-db-checkpoint-interval', + 'nsslapd-db-compactdb-interval', ++ 'nsslapd-db-compactdb-time', + 'nsslapd-db-page-size', + 'nsslapd-db-transaction-batch-val', + 'nsslapd-db-transaction-batch-min-wait', +diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py +index 722764d10..7b2f32c23 100644 +--- a/src/lib389/lib389/cli_conf/backend.py ++++ b/src/lib389/lib389/cli_conf/backend.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2020 Red Hat, Inc. ++# Copyright (C) 2021 Red Hat, Inc. + # Copyright (C) 2019 William Brown + # All rights reserved. + # +@@ -19,6 +19,7 @@ from lib389.chaining import (ChainingLinks) + from lib389.monitor import MonitorLDBM + from lib389.replica import Replicas + from lib389.utils import ensure_str, is_a_dn, is_dn_parent ++from lib389.tasks import DBCompactTask + from lib389._constants import * + from lib389.cli_base import ( + _format_status, +@@ -41,6 +42,7 @@ arg_to_attr = { + 'txn_wait': 'nsslapd-db-transaction-wait', + 'checkpoint_interval': 'nsslapd-db-checkpoint-interval', + 'compactdb_interval': 'nsslapd-db-compactdb-interval', ++ 'compactdb_time': 'nsslapd-db-compactdb-time', + 'txn_batch_val': 'nsslapd-db-transaction-batch-val', + 'txn_batch_min': 'nsslapd-db-transaction-batch-min-wait', + 'txn_batch_max': 'nsslapd-db-transaction-batch-max-wait', +@@ -789,6 +791,18 @@ def backend_reindex_vlv(inst, basedn, log, args): + log.info("Successfully reindexed VLV indexes") + + ++def backend_compact(inst, basedn, log, args): ++ task = DBCompactTask(inst) ++ task_properties = {} ++ if args.only_changelog: ++ task_properties = {'justChangelog': 'yes'} ++ task.create(properties=task_properties) ++ task.wait() ++ if task.get_exit_code() != 0: ++ raise ValueError("Failed to create Database Compaction Task") ++ log.info("Successfully started Database Compaction Task") ++ ++ + def create_parser(subparsers): + backend_parser = subparsers.add_parser('backend', help="Manage database suffixes and backends") + subcommands = backend_parser.add_subparsers(help="action") +@@ -994,6 +1008,7 @@ def create_parser(subparsers): + set_db_config_parser.add_argument('--checkpoint-interval', help='Sets the amount of time in seconds after which the Directory Server sends a ' + 'checkpoint entry to the database transaction log') + set_db_config_parser.add_argument('--compactdb-interval', help='Sets the interval in seconds when the database is compacted') ++ set_db_config_parser.add_argument('--compactdb-time', help='Sets the Time Of Day to compact the database after the "compactdb interval" has been reached: Use this format to set the hour and minute: HH:MM') + set_db_config_parser.add_argument('--txn-batch-val', help='Specifies how many transactions will be batched before being committed') + set_db_config_parser.add_argument('--txn-batch-min', help='Controls when transactions should be flushed earliest, independently of ' + 'the batch count (only works when txn-batch-val is set)') +@@ -1121,3 +1136,10 @@ def create_parser(subparsers): + ####################################################### + get_tree_parser = subcommands.add_parser('get-tree', help='Get a representation of the suffix tree') + get_tree_parser.set_defaults(func=backend_get_tree) ++ ++ ####################################################### ++ # Run the db compaction task ++ ####################################################### ++ compact_parser = subcommands.add_parser('compact-db', help='Compact the database and the replication changelog') ++ compact_parser.set_defaults(func=backend_compact) ++ compact_parser.add_argument('--only-changelog', action='store_true', help='Only compact the Replication Change Log') +diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py +index 04886f632..3478a0a1f 100644 +--- a/src/lib389/lib389/cli_conf/replication.py ++++ b/src/lib389/lib389/cli_conf/replication.py +@@ -37,6 +37,7 @@ arg_to_attr = { + 'max_entries': 'nsslapd-changelogmaxentries', + 'max_age': 'nsslapd-changelogmaxage', + 'compact_interval': 'nsslapd-changelogcompactdb-interval', ++ 'compact_time': 'nsslapd-changelogcompactdb-time', + 'trim_interval': 'nsslapd-changelogtrim-interval', + 'encrypt_algo': 'nsslapd-encryptionalgorithm', + 'encrypt_key': 'nssymmetrickey', +@@ -1216,6 +1217,8 @@ def create_parser(subparsers): + repl_set_cl.add_argument('--max-entries', help="The maximum number of entries to get in the replication changelog") + repl_set_cl.add_argument('--max-age', help="The maximum age of a replication changelog entry") + repl_set_cl.add_argument('--compact-interval', help="The replication changelog compaction interval") ++ repl_set_cl.add_argument('--compact-time', help='Sets the Time Of Day to compact the database after the changelog "compact interval" ' ++ 'has been reached: Use this format to set the hour and minute: HH:MM') + repl_set_cl.add_argument('--trim-interval', help="The interval to check if the replication changelog can be trimmed") + + repl_get_cl = repl_subcommands.add_parser('get-changelog', help='Display replication changelog attributes.') +diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py +index 590c6ee79..b64bc6ce5 100644 +--- a/src/lib389/lib389/tasks.py ++++ b/src/lib389/lib389/tasks.py +@@ -217,6 +217,19 @@ class EntryUUIDFixupTask(Task): + self._must_attributes.extend(['basedn']) + + ++class DBCompactTask(Task): ++ """A single instance of compactdb task entry ++ ++ :param instance: An instance ++ :type instance: lib389.DirSrv ++ """ ++ ++ def __init__(self, instance, dn=None): ++ self.cn = 'compact_db_' + Task._get_task_date() ++ dn = "cn=" + self.cn + "," + DN_COMPACTDB_TASK ++ super(DBCompactTask, self).__init__(instance, dn) ++ ++ + class SchemaReloadTask(Task): + """A single instance of schema reload task entry + +@@ -227,7 +240,6 @@ class SchemaReloadTask(Task): + def __init__(self, instance, dn=None): + self.cn = 'schema_reload_' + Task._get_task_date() + dn = "cn=" + self.cn + ",cn=schema reload task," + DN_TASKS +- + super(SchemaReloadTask, self).__init__(instance, dn) + + +-- +2.26.3 + diff --git a/SOURCES/0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch b/SOURCES/0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch deleted file mode 100644 index 13a64c2..0000000 --- a/SOURCES/0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch +++ /dev/null @@ -1,254 +0,0 @@ -From 8b0ba11c3dfb577d1696f4b71a6f4e9f8d42349f Mon Sep 17 00:00:00 2001 -From: Pierre Rogier -Date: Mon, 30 Nov 2020 12:42:17 +0100 -Subject: [PATCH] Add dsconf replication monitor test case (gitHub issue 4449) - in 1.4.3 branch - ---- - .../tests/suites/clu/repl_monitor_test.py | 234 ++++++++++++++++++ - 1 file changed, 234 insertions(+) - create mode 100644 dirsrvtests/tests/suites/clu/repl_monitor_test.py - -diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py -new file mode 100644 -index 000000000..b03d170c8 ---- /dev/null -+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py -@@ -0,0 +1,234 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2020 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+import time -+import subprocess -+import pytest -+ -+from lib389.cli_conf.replication import get_repl_monitor_info -+from lib389.tasks import * -+from lib389.utils import * -+from lib389.topologies import topology_m2 -+from lib389.cli_base import FakeArgs -+from lib389.cli_base.dsrc import dsrc_arg_concat -+from lib389.cli_base import connect_instance -+ -+pytestmark = pytest.mark.tier0 -+ -+LOG_FILE = '/tmp/monitor.log' -+logging.getLogger(__name__).setLevel(logging.DEBUG) -+log = logging.getLogger(__name__) -+ -+ -+@pytest.fixture(scope="function") -+def set_log_file(request): -+ fh = logging.FileHandler(LOG_FILE) -+ fh.setLevel(logging.DEBUG) -+ log.addHandler(fh) -+ -+ def fin(): -+ log.info('Delete files') -+ os.remove(LOG_FILE) -+ -+ config = os.path.expanduser(DSRC_HOME) -+ if os.path.exists(config): -+ os.remove(config) -+ -+ request.addfinalizer(fin) -+ -+ -+def check_value_in_log_and_reset(content_list, second_list=None, single_value=None, error_list=None): -+ with open(LOG_FILE, 'r+') as f: -+ file_content = f.read() -+ -+ for item in content_list: -+ log.info('Check that "{}" is present'.format(item)) -+ assert item in file_content -+ -+ if second_list is not None: -+ log.info('Check for "{}"'.format(second_list)) -+ for item in second_list: -+ assert item in file_content -+ -+ if single_value is not None: -+ log.info('Check for "{}"'.format(single_value)) -+ assert single_value in file_content -+ -+ if error_list is not None: -+ log.info('Check that "{}" is not present'.format(error_list)) -+ for item in error_list: -+ assert item not in file_content -+ -+ log.info('Reset log file') -+ f.truncate(0) -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1739718 -+@pytest.mark.skipif(ds_is_older("1.4.0"), reason="Not implemented") -+def test_dsconf_replication_monitor(topology_m2, set_log_file): -+ """Test replication monitor that was ported from legacy tools -+ -+ :id: ce48020d-7c30-41b7-8f68-144c9cd757f6 -+ :setup: 2 MM topology -+ :steps: -+ 1. Create DS instance -+ 2. Run replication monitor with connections option -+ 3. Run replication monitor with aliases option -+ 4. Run replication monitor with --json option -+ 5. Run replication monitor with .dsrc file created -+ 6. Run replication monitor with connections option as if using dsconf CLI -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ 5. Success -+ 6. Success -+ """ -+ -+ m1 = topology_m2.ms["master1"] -+ m2 = topology_m2.ms["master2"] -+ -+ alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')', -+ 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')'] -+ -+ connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port) -+ content_list = ['Replica Root: dc=example,dc=com', -+ 'Replica ID: 1', -+ 'Replica Status: Available', -+ 'Max CSN', -+ 'Status For Agreement: "002" ('+ m2.host + ':' + str(m2.port) + ')', -+ 'Replica Enabled: on', -+ 'Update In Progress: FALSE', -+ 'Last Update Start:', -+ 'Last Update End:', -+ 'Number Of Changes Sent:', -+ 'Number Of Changes Skipped: None', -+ 'Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded', -+ 'Last Init Start:', -+ 'Last Init End:', -+ 'Last Init Status:', -+ 'Reap Active: 0', -+ 'Replication Status: In Synchronization', -+ 'Replication Lag Time:', -+ 'Supplier: ', -+ m2.host + ':' + str(m2.port), -+ 'Replica Root: dc=example,dc=com', -+ 'Replica ID: 2', -+ 'Status For Agreement: "001" (' + m1.host + ':' + str(m1.port)+')'] -+ -+ error_list = ['consumer (Unavailable)', -+ 'Failed to retrieve database RUV entry from consumer'] -+ -+ json_list = ['type', -+ 'list', -+ 'items', -+ 'name', -+ m1.host + ':' + str(m1.port), -+ 'data', -+ '"replica_id": "1"', -+ '"replica_root": "dc=example,dc=com"', -+ '"replica_status": "Available"', -+ 'maxcsn', -+ 'agmts_status', -+ 'agmt-name', -+ '002', -+ 'replica', -+ m2.host + ':' + str(m2.port), -+ 'replica-enabled', -+ 'update-in-progress', -+ 'last-update-start', -+ 'last-update-end', -+ 'number-changes-sent', -+ 'number-changes-skipped', -+ 'last-update-status', -+ 'Error (0) Replica acquired successfully: Incremental update succeeded', -+ 'last-init-start', -+ 'last-init-end', -+ 'last-init-status', -+ 'reap-active', -+ 'replication-status', -+ 'In Synchronization', -+ 'replication-lag-time', -+ '"replica_id": "2"', -+ '001', -+ m1.host + ':' + str(m1.port)] -+ -+ dsrc_content = '[repl-monitor-connections]\n' \ -+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -+ '\n' \ -+ '[repl-monitor-aliases]\n' \ -+ 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \ -+ 'M2 = ' + m2.host + ':' + str(m2.port) -+ -+ connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM, -+ m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM] -+ -+ aliases = ['M1=' + m1.host + ':' + str(m1.port), -+ 'M2=' + m2.host + ':' + str(m2.port)] -+ -+ args = FakeArgs() -+ args.connections = connections -+ args.aliases = None -+ args.json = False -+ -+ log.info('Run replication monitor with connections option') -+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) -+ check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) -+ -+ log.info('Run replication monitor with aliases option') -+ args.aliases = aliases -+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) -+ check_value_in_log_and_reset(content_list, alias_content) -+ -+ log.info('Run replication monitor with --json option') -+ args.aliases = None -+ args.json = True -+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) -+ check_value_in_log_and_reset(json_list) -+ -+ with open(os.path.expanduser(DSRC_HOME), 'w+') as f: -+ f.write(dsrc_content) -+ -+ args.connections = None -+ args.aliases = None -+ args.json = False -+ -+ log.info('Run replication monitor when .dsrc file is present with content') -+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) -+ check_value_in_log_and_reset(content_list, alias_content) -+ os.remove(os.path.expanduser(DSRC_HOME)) -+ -+ log.info('Run replication monitor with connections option as if using dsconf CLI') -+ # Perform same test than steps 2 test but without using directly the topology instance. -+ # but with an instance similar to those than dsconf cli generates: -+ # step 2 args -+ args.connections = connections -+ args.aliases = None -+ args.json = False -+ # args needed to generate an instance with dsrc_arg_concat -+ args.instance = 'master1' -+ args.basedn = None -+ args.binddn = None -+ args.bindpw = None -+ args.pwdfile = None -+ args.prompt = False -+ args.starttls = False -+ dsrc_inst = dsrc_arg_concat(args, None) -+ inst = connect_instance(dsrc_inst, True, args) -+ get_repl_monitor_info(inst, DEFAULT_SUFFIX, log, args) -+ check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main("-s %s" % CURRENT_FILE) --- -2.26.2 - diff --git a/SOURCES/0012-Issue-4778-RFE-Add-changelog-compaction-task-in-1.4..patch b/SOURCES/0012-Issue-4778-RFE-Add-changelog-compaction-task-in-1.4..patch new file mode 100644 index 0000000..94618f6 --- /dev/null +++ b/SOURCES/0012-Issue-4778-RFE-Add-changelog-compaction-task-in-1.4..patch @@ -0,0 +1,155 @@ +From 580880a598a8f9972994684c49593a4cf8b8969b Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Sat, 29 May 2021 13:19:53 -0400 +Subject: [PATCH 12/12] Issue 4778 - RFE - Add changelog compaction task in + 1.4.3 + +Description: In 1.4.3 the replication changelog is a separate database, + so it needs a separate "nsds5task" compaction task (COMPACT_CL5) + +relates: https://github.com/389ds/389-ds-base/issues/4778 + +ASAN tested and approved + +Reviewed by: mreynolds +--- + ldap/servers/plugins/replication/cl5_api.c | 21 +++++++++---------- + ldap/servers/plugins/replication/cl5_api.h | 1 + + .../replication/repl5_replica_config.c | 9 +++++++- + 3 files changed, 19 insertions(+), 12 deletions(-) + +diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c +index 75a2f46f5..4c5077b48 100644 +--- a/ldap/servers/plugins/replication/cl5_api.c ++++ b/ldap/servers/plugins/replication/cl5_api.c +@@ -266,7 +266,6 @@ static int _cl5TrimInit(void); + static void _cl5TrimCleanup(void); + static int _cl5TrimMain(void *param); + static void _cl5DoTrimming(void); +-static void _cl5CompactDBs(void); + static void _cl5PurgeRID(Object *file_obj, ReplicaId cleaned_rid); + static int _cl5PurgeGetFirstEntry(Object *file_obj, CL5Entry *entry, void **iterator, DB_TXN *txnid, int rid, DBT *key); + static int _cl5PurgeGetNextEntry(CL5Entry *entry, void *iterator, DBT *key); +@@ -3152,7 +3151,7 @@ _cl5TrimMain(void *param __attribute__((unused))) + if (slapi_current_utc_time() > compactdb_time) { + /* time to trim */ + timeCompactPrev = timeNow; +- _cl5CompactDBs(); ++ cl5CompactDBs(); + compacting = PR_FALSE; + } + } +@@ -3250,8 +3249,8 @@ _cl5DoPurging(cleanruv_purge_data *purge_data) + } + + /* clear free page files to reduce changelog */ +-static void +-_cl5CompactDBs(void) ++void ++cl5CompactDBs(void) + { + int rc; + Object *fileObj = NULL; +@@ -3264,14 +3263,14 @@ _cl5CompactDBs(void) + rc = TXN_BEGIN(s_cl5Desc.dbEnv, NULL, &txnid, 0); + if (rc) { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl, +- "_cl5CompactDBs - Failed to begin transaction; db error - %d %s\n", ++ "cl5CompactDBs - Failed to begin transaction; db error - %d %s\n", + rc, db_strerror(rc)); + goto bail; + } + + + slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl, +- "_cl5CompactDBs - compacting replication changelogs...\n"); ++ "cl5CompactDBs - compacting replication changelogs...\n"); + for (fileObj = objset_first_obj(s_cl5Desc.dbFiles); + fileObj; + fileObj = objset_next_obj(s_cl5Desc.dbFiles, fileObj)) { +@@ -3284,17 +3283,17 @@ _cl5CompactDBs(void) + &c_data, DB_FREE_SPACE, NULL /*end*/); + if (rc) { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl, +- "_cl5CompactDBs - Failed to compact %s; db error - %d %s\n", ++ "cl5CompactDBs - Failed to compact %s; db error - %d %s\n", + dbFile->replName, rc, db_strerror(rc)); + goto bail; + } + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl, +- "_cl5CompactDBs - %s - %d pages freed\n", ++ "cl5CompactDBs - %s - %d pages freed\n", + dbFile->replName, c_data.compact_pages_free); + } + + slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl, +- "_cl5CompactDBs - compacting replication changelogs finished.\n"); ++ "cl5CompactDBs - compacting replication changelogs finished.\n"); + bail: + if (fileObj) { + object_release(fileObj); +@@ -3303,14 +3302,14 @@ bail: + rc = TXN_ABORT(txnid); + if (rc) { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl, +- "_cl5CompactDBs - Failed to abort transaction; db error - %d %s\n", ++ "cl5CompactDBs - Failed to abort transaction; db error - %d %s\n", + rc, db_strerror(rc)); + } + } else { + rc = TXN_COMMIT(txnid); + if (rc) { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl, +- "_cl5CompactDBs - Failed to commit transaction; db error - %d %s\n", ++ "cl5CompactDBs - Failed to commit transaction; db error - %d %s\n", + rc, db_strerror(rc)); + } + } +diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h +index 4b0949fb3..11db771f2 100644 +--- a/ldap/servers/plugins/replication/cl5_api.h ++++ b/ldap/servers/plugins/replication/cl5_api.h +@@ -405,5 +405,6 @@ int cl5DeleteRUV(void); + void cl5CleanRUV(ReplicaId rid); + void cl5NotifyCleanup(int rid); + void trigger_cl_purging(cleanruv_purge_data *purge_data); ++void cl5CompactDBs(void); + + #endif +diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c +index a969ef82f..e708a1ccb 100644 +--- a/ldap/servers/plugins/replication/repl5_replica_config.c ++++ b/ldap/servers/plugins/replication/repl5_replica_config.c +@@ -29,6 +29,8 @@ + #define CLEANRUVLEN 8 + #define CLEANALLRUV "CLEANALLRUV" + #define CLEANALLRUVLEN 11 ++#define COMPACT_CL5 "COMPACT_CL5" ++#define COMPACT_CL5_LEN 11 + #define REPLICA_RDN "cn=replica" + + #define CLEANALLRUV_MAX_WAIT 7200 /* 2 hours */ +@@ -1050,7 +1052,6 @@ replica_config_change_flags(Replica *r, const char *new_flags, char *returntext + static int + replica_execute_task(Replica *r, const char *task_name, char *returntext, int apply_mods) + { +- + if (strcasecmp(task_name, CL2LDIF_TASK) == 0) { + if (apply_mods) { + return replica_execute_cl2ldif_task(r, returntext); +@@ -1084,6 +1085,12 @@ replica_execute_task(Replica *r, const char *task_name, char *returntext, int ap + return replica_execute_cleanall_ruv_task(r, (ReplicaId)temprid, empty_task, "no", PR_TRUE, returntext); + } else + return LDAP_SUCCESS; ++ } else if (strncasecmp(task_name, COMPACT_CL5, COMPACT_CL5_LEN) == 0) { ++ /* compact the replication changelogs */ ++ if (apply_mods) { ++ cl5CompactDBs(); ++ } ++ return LDAP_SUCCESS; + } else { + PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE, "Unsupported replica task - %s", task_name); + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, +-- +2.26.3 + diff --git a/SOURCES/0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch b/SOURCES/0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch deleted file mode 100644 index 74aa5aa..0000000 --- a/SOURCES/0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch +++ /dev/null @@ -1,100 +0,0 @@ -From 389b2c825742392365262a719be7c8f594e7e522 Mon Sep 17 00:00:00 2001 -From: William Brown -Date: Thu, 26 Nov 2020 09:08:13 +1000 -Subject: [PATCH] Issue 4460 - BUG - lib389 should use system tls policy - -Bug Description: Due to some changes in dsrc for tlsreqcert -and how def open was structured in lib389, the system ldap.conf -policy was ignored. - -Fix Description: Default to using the system ldap.conf policy -if undefined in lib389 or the tls_reqcert param in dsrc. - -fixes: #4460 - -Author: William Brown - -Review by: ??? ---- - src/lib389/lib389/__init__.py | 11 +++++++---- - src/lib389/lib389/cli_base/dsrc.py | 16 +++++++++------- - 2 files changed, 16 insertions(+), 11 deletions(-) - -diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py -index 99ea9cc6a..4e6a1905a 100644 ---- a/src/lib389/lib389/__init__.py -+++ b/src/lib389/lib389/__init__.py -@@ -962,7 +962,7 @@ class DirSrv(SimpleLDAPObject, object): - # Now, we are still an allocated ds object so we can be re-installed - self.state = DIRSRV_STATE_ALLOCATED - -- def open(self, uri=None, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=ldap.OPT_X_TLS_HARD, -+ def open(self, uri=None, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=None, - usercert=None, userkey=None): - ''' - It opens a ldap bound connection to dirsrv so that online -@@ -1025,9 +1025,12 @@ class DirSrv(SimpleLDAPObject, object): - try: - # Note this sets LDAP.OPT not SELF. Because once self has opened - # it can NOT change opts on reused (ie restart) -- self.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert) -- self.log.debug("Using certificate policy %s", reqcert) -- self.log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s", reqcert) -+ if reqcert is not None: -+ self.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert) -+ self.log.debug("Using lib389 certificate policy %s", reqcert) -+ else: -+ self.log.debug("Using /etc/openldap/ldap.conf certificate policy") -+ self.log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s", self.get_option(ldap.OPT_X_TLS_REQUIRE_CERT)) - except ldap.LDAPError as e: - self.log.fatal('TLS negotiation failed: %s', e) - raise e -diff --git a/src/lib389/lib389/cli_base/dsrc.py b/src/lib389/lib389/cli_base/dsrc.py -index fec18a5f9..9b09ea568 100644 ---- a/src/lib389/lib389/cli_base/dsrc.py -+++ b/src/lib389/lib389/cli_base/dsrc.py -@@ -45,7 +45,7 @@ def dsrc_arg_concat(args, dsrc_inst): - 'tls_cacertdir': None, - 'tls_cert': None, - 'tls_key': None, -- 'tls_reqcert': ldap.OPT_X_TLS_HARD, -+ 'tls_reqcert': None, - 'starttls': args.starttls, - 'prompt': False, - 'pwdfile': None, -@@ -134,7 +134,7 @@ def dsrc_to_ldap(path, instance_name, log): - dsrc_inst['binddn'] = config.get(instance_name, 'binddn', fallback=None) - dsrc_inst['saslmech'] = config.get(instance_name, 'saslmech', fallback=None) - if dsrc_inst['saslmech'] is not None and dsrc_inst['saslmech'] not in ['EXTERNAL', 'PLAIN']: -- raise Exception("%s [%s] saslmech must be one of EXTERNAL or PLAIN" % (path, instance_name)) -+ raise ValueError("%s [%s] saslmech must be one of EXTERNAL or PLAIN" % (path, instance_name)) - - dsrc_inst['tls_cacertdir'] = config.get(instance_name, 'tls_cacertdir', fallback=None) - # At this point, we should check if the provided cacertdir is indeed, a dir. This can be a cause -@@ -145,16 +145,18 @@ def dsrc_to_ldap(path, instance_name, log): - - dsrc_inst['tls_cert'] = config.get(instance_name, 'tls_cert', fallback=None) - dsrc_inst['tls_key'] = config.get(instance_name, 'tls_key', fallback=None) -- dsrc_inst['tls_reqcert'] = config.get(instance_name, 'tls_reqcert', fallback='hard') -- if dsrc_inst['tls_reqcert'] not in ['never', 'allow', 'hard']: -- raise Exception("dsrc tls_reqcert value invalid. %s [%s] tls_reqcert should be one of never, allow or hard" % (instance_name, -- path)) -+ dsrc_inst['tls_reqcert'] = config.get(instance_name, 'tls_reqcert', fallback=None) - if dsrc_inst['tls_reqcert'] == 'never': - dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_NEVER - elif dsrc_inst['tls_reqcert'] == 'allow': - dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_ALLOW -- else: -+ elif dsrc_inst['tls_reqcert'] == 'hard': - dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_HARD -+ elif dsrc_inst['tls_reqcert'] is None: -+ # Use system value -+ pass -+ else: -+ raise ValueError("dsrc tls_reqcert value invalid. %s [%s] tls_reqcert should be one of never, allow or hard" % (instance_name, path)) - dsrc_inst['starttls'] = config.getboolean(instance_name, 'starttls', fallback=False) - dsrc_inst['pwdfile'] = None - dsrc_inst['prompt'] = False --- -2.26.2 - diff --git a/SOURCES/0013-Issue-4797-ACL-IP-ADDRESS-evaluation-may-corrupt-c_i.patch b/SOURCES/0013-Issue-4797-ACL-IP-ADDRESS-evaluation-may-corrupt-c_i.patch new file mode 100644 index 0000000..db28cfa --- /dev/null +++ b/SOURCES/0013-Issue-4797-ACL-IP-ADDRESS-evaluation-may-corrupt-c_i.patch @@ -0,0 +1,52 @@ +From bc41bbb89405b2059b80e344b2d4c59ae39aabe6 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Thu, 10 Jun 2021 15:03:27 +0200 +Subject: [PATCH 1/3] Issue 4797 - ACL IP ADDRESS evaluation may corrupt + c_isreplication_session connection flags (#4799) + +Bug description: + The fix for ticket #3764 was broken with a missing break in a + switch. The consequence is that while setting the client IP + address in the pblock (SLAPI_CONN_CLIENTNETADDR_ACLIP), the + connection is erroneously set as replication connection. + This can lead to crash or failure of testcase + test_access_from_certain_network_only_ip. + This bug was quite hidden until the fix for #4764 is + showing it more frequently + +Fix description: + Add the missing break + +relates: https://github.com/389ds/389-ds-base/issues/4797 + +Reviewed by: Mark Reynolds + +Platforms tested: F33 +--- + ldap/servers/slapd/pblock.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c +index fcac53839..a64986aeb 100644 +--- a/ldap/servers/slapd/pblock.c ++++ b/ldap/servers/slapd/pblock.c +@@ -2595,7 +2595,7 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value) + pblock->pb_conn->c_authtype = slapi_ch_strdup((char *)value); + pthread_mutex_unlock(&(pblock->pb_conn->c_mutex)); + break; +- case SLAPI_CONN_CLIENTNETADDR_ACLIP: ++ case SLAPI_CONN_CLIENTNETADDR_ACLIP: + if (pblock->pb_conn == NULL) { + break; + } +@@ -2603,6 +2603,7 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value) + slapi_ch_free((void **)&pblock->pb_conn->cin_addr_aclip); + pblock->pb_conn->cin_addr_aclip = (PRNetAddr *)value; + pthread_mutex_unlock(&(pblock->pb_conn->c_mutex)); ++ break; + case SLAPI_CONN_IS_REPLICATION_SESSION: + if (pblock->pb_conn == NULL) { + slapi_log_err(SLAPI_LOG_ERR, +-- +2.31.1 + diff --git a/SOURCES/0014-Issue-4396-Minor-memory-leak-in-backend-4558-4572.patch b/SOURCES/0014-Issue-4396-Minor-memory-leak-in-backend-4558-4572.patch new file mode 100644 index 0000000..eb16fcb --- /dev/null +++ b/SOURCES/0014-Issue-4396-Minor-memory-leak-in-backend-4558-4572.patch @@ -0,0 +1,79 @@ +From b3170e39519530c39d59202413b20e6bd466224d Mon Sep 17 00:00:00 2001 +From: James Chapman +Date: Wed, 27 Jan 2021 09:56:38 +0000 +Subject: [PATCH 2/3] Issue 4396 - Minor memory leak in backend (#4558) (#4572) + +Bug Description: As multiple suffixes per backend were no longer used, this +functionality has been replaced with a single suffix per backend. Legacy +code remains that adds multiple suffixes to the dse internal backend, +resulting in memory allocations that are lost. + +Also a minor typo is corrected in backend.c + +Fix Description: Calls to be_addsuffix on the DSE backend are removed +as they are never used. + +Fixes: https://github.com/389ds/389-ds-base/issues/4396 + +Reviewed by: mreynolds389, Firstyear, droideck (Thank you) +--- + ldap/servers/slapd/backend.c | 2 +- + ldap/servers/slapd/fedse.c | 12 +++--------- + 2 files changed, 4 insertions(+), 10 deletions(-) + +diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c +index bc52b4643..5707504a9 100644 +--- a/ldap/servers/slapd/backend.c ++++ b/ldap/servers/slapd/backend.c +@@ -42,7 +42,7 @@ be_init(Slapi_Backend *be, const char *type, const char *name, int isprivate, in + } + be->be_monitordn = slapi_create_dn_string("cn=monitor,cn=%s,cn=%s,cn=plugins,cn=config", + name, type); +- if (NULL == be->be_configdn) { ++ if (NULL == be->be_monitordn) { + slapi_log_err(SLAPI_LOG_ERR, + "be_init", "Failed create instance monitor dn for " + "plugin %s, instance %s\n", +diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c +index 0d645f909..7b820b540 100644 +--- a/ldap/servers/slapd/fedse.c ++++ b/ldap/servers/slapd/fedse.c +@@ -2827,7 +2827,7 @@ search_snmp(Slapi_PBlock *pb __attribute__((unused)), + } + + /* +- * Called from config.c to install the internal backends ++ * Called from main.c to install the internal backends + */ + int + setup_internal_backends(char *configdir) +@@ -2846,7 +2846,6 @@ setup_internal_backends(char *configdir) + Slapi_DN counters; + Slapi_DN snmp; + Slapi_DN root; +- Slapi_Backend *be; + Slapi_DN encryption; + Slapi_DN saslmapping; + Slapi_DN plugins; +@@ -2895,16 +2894,11 @@ setup_internal_backends(char *configdir) + dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &saslmapping, LDAP_SCOPE_SUBTREE, "(objectclass=nsSaslMapping)", sasl_map_config_add, NULL, NULL); + dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &plugins, LDAP_SCOPE_SUBTREE, "(objectclass=nsSlapdPlugin)", check_plugin_path, NULL, NULL); + +- be = be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin); +- be_addsuffix(be, &root); +- be_addsuffix(be, &monitor); +- be_addsuffix(be, &config); ++ be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin); + + /* +- * Now that the be's are in place, we can +- * setup the mapping tree. ++ * Now that the be's are in place, we can setup the mapping tree. + */ +- + if (mapping_tree_init()) { + slapi_log_err(SLAPI_LOG_EMERG, "setup_internal_backends", "Failed to init mapping tree\n"); + exit(1); +-- +2.31.1 + diff --git a/SOURCES/0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch b/SOURCES/0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch deleted file mode 100644 index 16637bb..0000000 --- a/SOURCES/0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch +++ /dev/null @@ -1,60 +0,0 @@ -From 05b66529117d1cd85a636ab7d8fc84abdec814de Mon Sep 17 00:00:00 2001 -From: William Brown -Date: Thu, 12 Nov 2020 13:04:21 +1000 -Subject: [PATCH] Issue 4428 - BUG Paged Results with critical false causes - sigsegv in chaining - -Bug Description: When a paged search through chaining backend is -received with a false criticality (such as SSSD), chaining backend -will sigsegv due to a null context. - -Fix Description: When a NULL ctx is recieved to be freed, this is -as paged results have finished being sent, so we check the NULL -ctx and move on. - -fixes: #4428 - -Author: William Brown - -Review by: @droideck, @mreynolds389 ---- - ldap/servers/plugins/chainingdb/cb_search.c | 6 ++++++ - ldap/servers/plugins/chainingdb/cb_utils.c | 4 ++++ - 2 files changed, 10 insertions(+) - -diff --git a/ldap/servers/plugins/chainingdb/cb_search.c b/ldap/servers/plugins/chainingdb/cb_search.c -index 69d23a6b5..d47cbc8e4 100644 ---- a/ldap/servers/plugins/chainingdb/cb_search.c -+++ b/ldap/servers/plugins/chainingdb/cb_search.c -@@ -740,6 +740,12 @@ chaining_back_search_results_release(void **sr) - - slapi_log_err(SLAPI_LOG_PLUGIN, CB_PLUGIN_SUBSYSTEM, - "chaining_back_search_results_release\n"); -+ if (ctx == NULL) { -+ /* The paged search is already complete, just return */ -+ /* Could we have a ctx state flag instead? */ -+ return; -+ } -+ - if (ctx->readahead != ctx->tobefreed) { - slapi_entry_free(ctx->readahead); - } -diff --git a/ldap/servers/plugins/chainingdb/cb_utils.c b/ldap/servers/plugins/chainingdb/cb_utils.c -index dfd5dd92c..d52fd25a6 100644 ---- a/ldap/servers/plugins/chainingdb/cb_utils.c -+++ b/ldap/servers/plugins/chainingdb/cb_utils.c -@@ -279,7 +279,11 @@ cb_add_suffix(cb_backend_instance *inst, struct berval **bvals, int apply_mod, c - return LDAP_SUCCESS; - } - -+#ifdef DEBUG -+static int debug_on = 1; -+#else - static int debug_on = 0; -+#endif - - int - cb_debug_on() --- -2.26.2 - diff --git a/SOURCES/0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch b/SOURCES/0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch deleted file mode 100644 index de8c8a8..0000000 --- a/SOURCES/0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch +++ /dev/null @@ -1,50 +0,0 @@ -From 4c133d448f451b7c3b2ff1b42806c7516d623f09 Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Mon, 7 Dec 2020 00:41:27 +0100 -Subject: [PATCH] Issue 4315: performance search rate: nagle triggers high rate - of setsocketopt (#4437) - -Bug description: - When a socket is set with NO_DELAY=0 (nagle), written pdu are buffered - until buffer is full or tcp_cork is set. This reduce network traffic when - the application writes partial pdu. - DS write complete pdu (results/entries/..) so it gives low benefit for DS. - In addition nagle being 'on' by default, DS sets/unset socket tcp_cork to send - immediately results/entries at each operation. This is an overhead of syscalls. - -Fix description: - Disable nagle by default - -relates: https://github.com/389ds/389-ds-base/issues/4315 - -Reviewed by: @mreynolds389, @Firstyear - -Platforms tested: F33 ---- - ldap/servers/slapd/libglobs.c | 9 ++++----- - 1 file changed, 4 insertions(+), 5 deletions(-) - -diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c -index 7d5374c90..f8cf162e6 100644 ---- a/ldap/servers/slapd/libglobs.c -+++ b/ldap/servers/slapd/libglobs.c -@@ -1635,12 +1635,11 @@ FrontendConfig_init(void) - #endif /* USE_SYSCONF */ - - init_accesscontrol = cfg->accesscontrol = LDAP_ON; --#if defined(LINUX) -- /* On Linux, by default, we use TCP_CORK so we must enable nagle */ -- init_nagle = cfg->nagle = LDAP_ON; --#else -+ -+ /* nagle triggers set/unset TCP_CORK setsockopt per operation -+ * as DS only sends complete PDU there is no benefit of nagle/tcp_cork -+ */ - init_nagle = cfg->nagle = LDAP_OFF; --#endif - init_security = cfg->security = LDAP_OFF; - init_ssl_check_hostname = cfg->ssl_check_hostname = LDAP_ON; - cfg->tls_check_crl = TLS_CHECK_NONE; --- -2.26.2 - diff --git a/SOURCES/0015-Issue-4700-Regression-in-winsync-replication-agreeme.patch b/SOURCES/0015-Issue-4700-Regression-in-winsync-replication-agreeme.patch new file mode 100644 index 0000000..9e5231d --- /dev/null +++ b/SOURCES/0015-Issue-4700-Regression-in-winsync-replication-agreeme.patch @@ -0,0 +1,66 @@ +From 8d06fdf44b0d337f1e321e61ee1b22972ddea917 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Fri, 2 Apr 2021 14:05:41 +0200 +Subject: [PATCH 3/3] Issue 4700 - Regression in winsync replication agreement + (#4712) + +Bug description: + #4396 fixes a memory leak but did not set 'cn=config' as + DSE backend. + It had no signicant impact unless with sidgen IPA plugin + +Fix description: + revert the portion of the #4364 patch that set be_suffix + in be_addsuffix, free the suffix before setting it + +relates: https://github.com/389ds/389-ds-base/issues/4700 + +Reviewed by: Pierre Rogier (thanks !) + +Platforms tested: F33 +--- + ldap/servers/slapd/backend.c | 3 ++- + ldap/servers/slapd/fedse.c | 6 +++++- + 2 files changed, 7 insertions(+), 2 deletions(-) + +diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c +index 5707504a9..5db706841 100644 +--- a/ldap/servers/slapd/backend.c ++++ b/ldap/servers/slapd/backend.c +@@ -173,7 +173,8 @@ void + be_addsuffix(Slapi_Backend *be, const Slapi_DN *suffix) + { + if (be->be_state != BE_STATE_DELETED) { +- be->be_suffix = slapi_sdn_dup(suffix);; ++ slapi_sdn_free(&be->be_suffix); ++ be->be_suffix = slapi_sdn_dup(suffix); + } + } + +diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c +index 7b820b540..44159c991 100644 +--- a/ldap/servers/slapd/fedse.c ++++ b/ldap/servers/slapd/fedse.c +@@ -2846,6 +2846,7 @@ setup_internal_backends(char *configdir) + Slapi_DN counters; + Slapi_DN snmp; + Slapi_DN root; ++ Slapi_Backend *be; + Slapi_DN encryption; + Slapi_DN saslmapping; + Slapi_DN plugins; +@@ -2894,7 +2895,10 @@ setup_internal_backends(char *configdir) + dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &saslmapping, LDAP_SCOPE_SUBTREE, "(objectclass=nsSaslMapping)", sasl_map_config_add, NULL, NULL); + dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &plugins, LDAP_SCOPE_SUBTREE, "(objectclass=nsSlapdPlugin)", check_plugin_path, NULL, NULL); + +- be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin); ++ be = be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin); ++ be_addsuffix(be, &root); ++ be_addsuffix(be, &monitor); ++ be_addsuffix(be, &config); + + /* + * Now that the be's are in place, we can setup the mapping tree. +-- +2.31.1 + diff --git a/SOURCES/0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch b/SOURCES/0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch deleted file mode 100644 index a2cb4bd..0000000 --- a/SOURCES/0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 3007700a659ede03085f5390153cce483ce987a1 Mon Sep 17 00:00:00 2001 -From: Firstyear -Date: Fri, 4 Dec 2020 10:14:33 +1000 -Subject: [PATCH] Issue 4460 - BUG - add machine name to subject alt names in - SSCA (#4472) - -Bug Description: During SSCA creation, the server cert did not have -the machine name, which meant that the cert would not work without -reqcert = never. - -Fix Description: Add the machine name as an alt name during SSCA -creation. It is not guaranteed this value is correct, but it -is better than nothing. - -relates: https://github.com/389ds/389-ds-base/issues/4460 - -Author: William Brown - -Review by: mreynolds389, droideck ---- - src/lib389/lib389/instance/setup.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py -index 7d42ba292..e46f2d1e5 100644 ---- a/src/lib389/lib389/instance/setup.py -+++ b/src/lib389/lib389/instance/setup.py -@@ -887,7 +887,7 @@ class SetupDs(object): - tlsdb_inst = NssSsl(dbpath=os.path.join(etc_dirsrv_path, dir)) - tlsdb_inst.import_rsa_crt(ca) - -- csr = tlsdb.create_rsa_key_and_csr() -+ csr = tlsdb.create_rsa_key_and_csr(alt_names=[general['full_machine_name']]) - (ca, crt) = ssca.rsa_ca_sign_csr(csr) - tlsdb.import_rsa_crt(ca, crt) - if general['selinux']: --- -2.26.2 - diff --git a/SOURCES/0016-Issue-4725-Fix-compiler-warnings.patch b/SOURCES/0016-Issue-4725-Fix-compiler-warnings.patch new file mode 100644 index 0000000..2371384 --- /dev/null +++ b/SOURCES/0016-Issue-4725-Fix-compiler-warnings.patch @@ -0,0 +1,88 @@ +From 7345c51c68dfd90a704ccbb0e5b1e736af80f146 Mon Sep 17 00:00:00 2001 +From: Thierry Bordaz +Date: Mon, 17 May 2021 16:10:22 +0200 +Subject: [PATCH] Issue 4725 - Fix compiler warnings + +--- + ldap/servers/slapd/proto-slap.h | 2 +- + ldap/servers/slapd/pw.c | 9 ++++----- + ldap/servers/slapd/pw_retry.c | 2 -- + 3 files changed, 5 insertions(+), 8 deletions(-) + +diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h +index 6ff178127..2768d5a1d 100644 +--- a/ldap/servers/slapd/proto-slap.h ++++ b/ldap/servers/slapd/proto-slap.h +@@ -1012,7 +1012,7 @@ int add_shadow_ext_password_attrs(Slapi_PBlock *pb, Slapi_Entry **e); + * pw_retry.c + */ + int update_pw_retry(Slapi_PBlock *pb); +-int update_trp_pw_usecount(Slapi_PBlock *pb, Slapi_Entry *e, int32_t use_count); ++int update_tpr_pw_usecount(Slapi_PBlock *pb, Slapi_Entry *e, int32_t use_count); + void pw_apply_mods(const Slapi_DN *sdn, Slapi_Mods *mods); + void pw_set_componentID(struct slapi_componentid *cid); + struct slapi_componentid *pw_get_componentID(void); +diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c +index d98422513..2a167c8f1 100644 +--- a/ldap/servers/slapd/pw.c ++++ b/ldap/servers/slapd/pw.c +@@ -2622,7 +2622,6 @@ int + slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int send_result) { + passwdPolicy *pwpolicy = NULL; + char *dn = NULL; +- int tpr_maxuse; + char *value; + time_t cur_time; + char *cur_time_str = NULL; +@@ -2638,7 +2637,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen + return 0; + } + +- if (slapi_entry_attr_hasvalue(bind_target_entry, "pwdTPRReset", "TRUE") == NULL) { ++ if (!slapi_entry_attr_hasvalue(bind_target_entry, "pwdTPRReset", "TRUE")) { + /* the password was not reset by an admin while a TRP pwp was set, just returned */ + return 0; + } +@@ -2646,7 +2645,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen + /* Check entry TPR max use */ + if (pwpolicy->pw_tpr_maxuse >= 0) { + uint use_count; +- value = slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRUseCount"); ++ value = (char *) slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRUseCount"); + if (value) { + /* max Use is enforced */ + use_count = strtoull(value, 0, 0); +@@ -2681,7 +2680,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen + + /* Check entry TPR expiration at a specific time */ + if (pwpolicy->pw_tpr_delay_expire_at >= 0) { +- value = slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRExpireAt"); ++ value = (char *) slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRExpireAt"); + if (value) { + /* max Use is enforced */ + if (difftime(parse_genTime(cur_time_str), parse_genTime(value)) >= 0) { +@@ -2709,7 +2708,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen + + /* Check entry TPR valid after a specific time */ + if (pwpolicy->pw_tpr_delay_valid_from >= 0) { +- value = slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRValidFrom"); ++ value = (char *) slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRValidFrom"); + if (value) { + /* validity after a specific time is enforced */ + if (difftime(parse_genTime(value), parse_genTime(cur_time_str)) >= 0) { +diff --git a/ldap/servers/slapd/pw_retry.c b/ldap/servers/slapd/pw_retry.c +index 5d13eb636..af54aa19d 100644 +--- a/ldap/servers/slapd/pw_retry.c ++++ b/ldap/servers/slapd/pw_retry.c +@@ -163,8 +163,6 @@ set_retry_cnt_and_time(Slapi_PBlock *pb, int count, time_t cur_time) + int + set_tpr_usecount_mods(Slapi_PBlock *pb, Slapi_Mods *smods, int count) + { +- char *timestr; +- time_t unlock_time; + char retry_cnt[16] = {0}; /* 1-65535 */ + const char *dn = NULL; + Slapi_DN *sdn = NULL; +-- +2.31.1 + diff --git a/SOURCES/0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch b/SOURCES/0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch deleted file mode 100644 index 067d06e..0000000 --- a/SOURCES/0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch +++ /dev/null @@ -1,50 +0,0 @@ -From 1386b140d8cc81d37fdea6593487fe542587ccac Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Wed, 9 Dec 2020 09:52:08 -0500 -Subject: [PATCH] Issue 4483 - heap-use-after-free in slapi_be_getsuffix - -Description: heap-use-after-free in slapi_be_getsuffix after disk - monitoring runs. This feature is freeing a list of - backends which it does not need to do. - -Fixes: https://github.com/389ds/389-ds-base/issues/4483 - -Reviewed by: firstyear & tbordaz(Thanks!!) ---- - ldap/servers/slapd/daemon.c | 13 +------------ - 1 file changed, 1 insertion(+), 12 deletions(-) - -diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c -index 49199e4df..691f77570 100644 ---- a/ldap/servers/slapd/daemon.c -+++ b/ldap/servers/slapd/daemon.c -@@ -606,12 +606,6 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) - now = start; - while ((now - start) < grace_period) { - if (g_get_shutdown()) { -- be_index = 0; -- if (be_list[be_index] != NULL) { -- while ((be = be_list[be_index++])) { -- slapi_be_free(&be); -- } -- } - slapi_ch_array_free(dirs); - dirs = NULL; - return; -@@ -706,12 +700,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) - } - } - } -- be_index = 0; -- if (be_list[be_index] != NULL) { -- while ((be = be_list[be_index++])) { -- slapi_be_free(&be); -- } -- } -+ - slapi_ch_array_free(dirs); - dirs = NULL; /* now it is not needed but the code may be changed in the future and it'd better be more robust */ - g_set_shutdown(SLAPI_SHUTDOWN_DISKFULL); --- -2.26.2 - diff --git a/SOURCES/0017-Issue-4814-_cl5_get_tod_expiration-may-crash-at-star.patch b/SOURCES/0017-Issue-4814-_cl5_get_tod_expiration-may-crash-at-star.patch new file mode 100644 index 0000000..6785c04 --- /dev/null +++ b/SOURCES/0017-Issue-4814-_cl5_get_tod_expiration-may-crash-at-star.patch @@ -0,0 +1,202 @@ +From 59266365eda8130abf6901263efae4c87586376a Mon Sep 17 00:00:00 2001 +From: Thierry Bordaz +Date: Mon, 28 Jun 2021 16:40:15 +0200 +Subject: [PATCH] Issue 4814 - _cl5_get_tod_expiration may crash at startup + +Bug description: + This bug exist only in 1.4.3 branch + In 1.4.3, CL open as a separated database so + compaction mechanism is started along a CL + mechanism (CL trimming). + The problem is that the configuration of the CL + compaction is done after the compaction mechanism + (is started). Depending on thread scheduling it + crashes + +Fix description: + Make sure configuration of compaction thread is + taken into account (cl5ConfigSetCompaction) before + the compaction thread starts (cl5open) + +relates: https://github.com/389ds/389-ds-base/issues/4814 + +Reviewed by: Mark Reynolds, Simon Pichugin (thanks !) + +Platforms tested: 8.5 +--- + ldap/servers/plugins/replication/cl5_api.c | 24 ++++++++++++------- + ldap/servers/plugins/replication/cl5_api.h | 10 +++++++- + ldap/servers/plugins/replication/cl5_config.c | 8 +++++-- + ldap/servers/plugins/replication/cl5_init.c | 4 +++- + ldap/servers/plugins/replication/cl5_test.c | 2 +- + .../servers/plugins/replication/repl_shared.h | 2 +- + 6 files changed, 35 insertions(+), 15 deletions(-) + +diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c +index 4c5077b48..954b6b9e3 100644 +--- a/ldap/servers/plugins/replication/cl5_api.c ++++ b/ldap/servers/plugins/replication/cl5_api.c +@@ -1016,6 +1016,20 @@ cl5GetState() + return s_cl5Desc.dbState; + } + ++void ++cl5ConfigSetCompaction(int compactInterval, char *compactTime) ++{ ++ ++ if (compactInterval != CL5_NUM_IGNORE) { ++ s_cl5Desc.dbTrim.compactInterval = compactInterval; ++ } ++ ++ if (strcmp(compactTime, CL5_STR_IGNORE) != 0) { ++ s_cl5Desc.dbTrim.compactTime = slapi_ch_strdup(compactTime); ++ } ++ ++} ++ + /* Name: cl5ConfigTrimming + Description: sets changelog trimming parameters; changelog must be open. + Parameters: maxEntries - maximum number of entries in the chnagelog (in all files); +@@ -1026,7 +1040,7 @@ cl5GetState() + CL5_BAD_STATE if changelog is not open + */ + int +-cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char *compactTime, int trimInterval) ++cl5ConfigTrimming(int maxEntries, const char *maxAge, int trimInterval) + { + if (s_cl5Desc.dbState == CL5_STATE_NONE) { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl, +@@ -1058,14 +1072,6 @@ cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char + s_cl5Desc.dbTrim.maxEntries = maxEntries; + } + +- if (compactInterval != CL5_NUM_IGNORE) { +- s_cl5Desc.dbTrim.compactInterval = compactInterval; +- } +- +- if (strcmp(compactTime, CL5_STR_IGNORE) != 0) { +- s_cl5Desc.dbTrim.compactTime = slapi_ch_strdup(compactTime); +- } +- + if (trimInterval != CL5_NUM_IGNORE) { + s_cl5Desc.dbTrim.trimInterval = trimInterval; + } +diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h +index 11db771f2..6aa48aec4 100644 +--- a/ldap/servers/plugins/replication/cl5_api.h ++++ b/ldap/servers/plugins/replication/cl5_api.h +@@ -227,6 +227,14 @@ int cl5ImportLDIF(const char *clDir, const char *ldifFile, Replica **replicas); + + int cl5GetState(void); + ++/* Name: cl5ConfigSetCompaction ++ * Description: sets the database compaction parameters ++ * Parameters: compactInterval - Interval for compaction default is 30days ++ * compactTime - Compact time default is 23:59 ++ * Return: void ++ */ ++void cl5ConfigSetCompaction(int compactInterval, char *compactTime); ++ + /* Name: cl5ConfigTrimming + Description: sets changelog trimming parameters + Parameters: maxEntries - maximum number of entries in the log; +@@ -236,7 +244,7 @@ int cl5GetState(void); + Return: CL5_SUCCESS if successful; + CL5_BAD_STATE if changelog has not been open + */ +-int cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char *compactTime, int trimInterval); ++int cl5ConfigTrimming(int maxEntries, const char *maxAge, int trimInterval); + + void cl5DestroyIterator(void *iterator); + +diff --git a/ldap/servers/plugins/replication/cl5_config.c b/ldap/servers/plugins/replication/cl5_config.c +index b32686788..a43534c9b 100644 +--- a/ldap/servers/plugins/replication/cl5_config.c ++++ b/ldap/servers/plugins/replication/cl5_config.c +@@ -197,6 +197,8 @@ changelog5_config_add(Slapi_PBlock *pb __attribute__((unused)), + + goto done; + } ++ /* Set compaction parameters */ ++ cl5ConfigSetCompaction(config.compactInterval, config.compactTime); + + /* start the changelog */ + rc = cl5Open(config.dir, &config.dbconfig); +@@ -212,7 +214,7 @@ changelog5_config_add(Slapi_PBlock *pb __attribute__((unused)), + } + + /* set trimming parameters */ +- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval); ++ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.trimInterval); + if (rc != CL5_SUCCESS) { + *returncode = 1; + if (returntext) { +@@ -548,6 +550,8 @@ changelog5_config_modify(Slapi_PBlock *pb, + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl, + "changelog5_config_modify - Deleted the changelog at %s\n", currentDir); + } ++ /* Set compaction parameters */ ++ cl5ConfigSetCompaction(config.compactInterval, config.compactTime); + + rc = cl5Open(config.dir, &config.dbconfig); + if (rc != CL5_SUCCESS) { +@@ -575,7 +579,7 @@ changelog5_config_modify(Slapi_PBlock *pb, + if (config.maxEntries != CL5_NUM_IGNORE || + config.trimInterval != CL5_NUM_IGNORE || + strcmp(config.maxAge, CL5_STR_IGNORE) != 0) { +- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval); ++ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.trimInterval); + if (rc != CL5_SUCCESS) { + *returncode = 1; + if (returntext) { +diff --git a/ldap/servers/plugins/replication/cl5_init.c b/ldap/servers/plugins/replication/cl5_init.c +index 251859714..567e0274c 100644 +--- a/ldap/servers/plugins/replication/cl5_init.c ++++ b/ldap/servers/plugins/replication/cl5_init.c +@@ -45,6 +45,8 @@ changelog5_init() + rc = 0; /* OK */ + goto done; + } ++ /* Set compaction parameters */ ++ cl5ConfigSetCompaction(config.compactInterval, config.compactTime); + + /* start changelog */ + rc = cl5Open(config.dir, &config.dbconfig); +@@ -57,7 +59,7 @@ changelog5_init() + } + + /* set trimming parameters */ +- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval); ++ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.trimInterval); + if (rc != CL5_SUCCESS) { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl, + "changelog5_init: failed to configure changelog trimming\n"); +diff --git a/ldap/servers/plugins/replication/cl5_test.c b/ldap/servers/plugins/replication/cl5_test.c +index d6656653c..efb8c543a 100644 +--- a/ldap/servers/plugins/replication/cl5_test.c ++++ b/ldap/servers/plugins/replication/cl5_test.c +@@ -281,7 +281,7 @@ testTrimming() + rc = populateChangelog(300, NULL); + + if (rc == 0) +- rc = cl5ConfigTrimming(300, "1d", CHANGELOGDB_COMPACT_INTERVAL, CHANGELOGDB_TRIM_INTERVAL); ++ rc = cl5ConfigTrimming(300, "1d", CHANGELOGDB_TRIM_INTERVAL); + + interval = PR_SecondsToInterval(300); /* 5 min is default trimming interval */ + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl, +diff --git a/ldap/servers/plugins/replication/repl_shared.h b/ldap/servers/plugins/replication/repl_shared.h +index 6708e12f7..b59b2bd27 100644 +--- a/ldap/servers/plugins/replication/repl_shared.h ++++ b/ldap/servers/plugins/replication/repl_shared.h +@@ -26,7 +26,7 @@ + + #define CHANGELOGDB_TRIM_INTERVAL 300 /* 5 minutes */ + #define CHANGELOGDB_COMPACT_INTERVAL 2592000 /* 30 days */ +-#define CHANGELOGDB_COMPACT_TIME "23:55" /* 30 days */ ++#define CHANGELOGDB_COMPACT_TIME "23:59" /* around midnight */ + + #define CONFIG_CHANGELOG_DIR_ATTRIBUTE "nsslapd-changelogdir" + #define CONFIG_CHANGELOG_MAXENTRIES_ATTRIBUTE "nsslapd-changelogmaxentries" +-- +2.31.1 + diff --git a/SOURCES/0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch b/SOURCES/0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch deleted file mode 100644 index 9acd229..0000000 --- a/SOURCES/0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch +++ /dev/null @@ -1,65 +0,0 @@ -From 6e827f6d5e64e0be316f4e17111b2884899d302c Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Wed, 16 Dec 2020 16:30:28 +0100 -Subject: [PATCH] Issue 4480 - Unexpected info returned to ldap request (#4491) - -Bug description: - If the bind entry does not exist, the bind result info - reports that 'No such entry'. It should not give any - information if the target entry exists or not - -Fix description: - Does not return any additional information during a bind - -relates: https://github.com/389ds/389-ds-base/issues/4480 - -Reviewed by: William Brown, Viktor Ashirov, Mark Reynolds (thank you all) - -Platforms tested: F31 ---- - dirsrvtests/tests/suites/basic/basic_test.py | 1 - - ldap/servers/slapd/back-ldbm/ldbm_config.c | 2 +- - ldap/servers/slapd/result.c | 2 +- - 3 files changed, 2 insertions(+), 3 deletions(-) - -diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py -index 120207321..1ae82dcdd 100644 ---- a/dirsrvtests/tests/suites/basic/basic_test.py -+++ b/dirsrvtests/tests/suites/basic/basic_test.py -@@ -1400,7 +1400,6 @@ def test_dscreate_multiple_dashes_name(dscreate_long_instance): - assert not dscreate_long_instance.exists() - - -- - if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c -index 3fe86d567..10cef250f 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_config.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c -@@ -1234,7 +1234,7 @@ ldbm_config_search_entry_callback(Slapi_PBlock *pb __attribute__((unused)), - if (attrs) { - for (size_t i = 0; attrs[i]; i++) { - if (ldbm_config_moved_attr(attrs[i])) { -- slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "at least one required attribute has been moved to the BDB scecific configuration entry"); -+ slapi_pblock_set(pb, SLAPI_RESULT_TEXT, "at least one required attribute has been moved to the BDB scecific configuration entry"); - break; - } - } -diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c -index 9daf3b151..ab0d79454 100644 ---- a/ldap/servers/slapd/result.c -+++ b/ldap/servers/slapd/result.c -@@ -355,7 +355,7 @@ send_ldap_result_ext( - if (text) { - pbtext = text; - } else { -- slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &pbtext); -+ slapi_pblock_get(pb, SLAPI_RESULT_TEXT, &pbtext); - } - - if (operation == NULL) { --- -2.26.2 - diff --git a/SOURCES/0018-Issue-4789-Temporary-password-rules-are-not-enforce-.patch b/SOURCES/0018-Issue-4789-Temporary-password-rules-are-not-enforce-.patch new file mode 100644 index 0000000..5ab86af --- /dev/null +++ b/SOURCES/0018-Issue-4789-Temporary-password-rules-are-not-enforce-.patch @@ -0,0 +1,51 @@ +From e7fdfe527a5f72674fe4b577a0555cabf8ec73a5 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Mon, 7 Jun 2021 11:23:35 +0200 +Subject: [PATCH] Issue 4789 - Temporary password rules are not enforce with + local password policy (#4790) + +Bug description: + When allocating a password policy structure (new_passwdPolicy) + it is initialized with the local policy definition or + the global one. If it exists a local policy entry, the TPR + attributes (passwordTPRMaxUse, passwordTPRDelayValidFrom and + passwordTPRDelayExpireAt) are not taken into account. + +Fix description: + Take into account TPR attributes to initialize the policy + +relates: https://github.com/389ds/389-ds-base/issues/4789 + +Reviewed by: Simon Pichugin, William Brown + +Platforms tested: F34 +--- + ldap/servers/slapd/pw.c | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c +index 2a167c8f1..7680df41d 100644 +--- a/ldap/servers/slapd/pw.c ++++ b/ldap/servers/slapd/pw.c +@@ -2356,6 +2356,18 @@ new_passwdPolicy(Slapi_PBlock *pb, const char *dn) + if ((sval = attr_get_present_values(attr))) { + pwdpolicy->pw_dict_path = (char *)slapi_value_get_string(*sval); + } ++ } else if (!strcasecmp(attr_name, CONFIG_PW_TPR_MAXUSE)) { ++ if ((sval = attr_get_present_values(attr))) { ++ pwdpolicy->pw_tpr_maxuse = slapi_value_get_int(*sval); ++ } ++ } else if (!strcasecmp(attr_name, CONFIG_PW_TPR_DELAY_EXPIRE_AT)) { ++ if ((sval = attr_get_present_values(attr))) { ++ pwdpolicy->pw_tpr_delay_expire_at = slapi_value_get_int(*sval); ++ } ++ } else if (!strcasecmp(attr_name, CONFIG_PW_TPR_DELAY_VALID_FROM)) { ++ if ((sval = attr_get_present_values(attr))) { ++ pwdpolicy->pw_tpr_delay_valid_from = slapi_value_get_int(*sval); ++ } + } + } /* end of for() loop */ + if (pw_entry) { +-- +2.31.1 + diff --git a/SOURCES/0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch b/SOURCES/0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch deleted file mode 100644 index 6de8b9e..0000000 --- a/SOURCES/0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch +++ /dev/null @@ -1,108 +0,0 @@ -From 1fef5649ce05a17a741789cafb65269c099b396b Mon Sep 17 00:00:00 2001 -From: progier389 <72748589+progier389@users.noreply.github.com> -Date: Wed, 16 Dec 2020 16:21:35 +0100 -Subject: [PATCH 2/3] Issue #4504 - Fix pytest test_dsconf_replication_monitor - (#4505) - -(cherry picked from commit 0b08e6f35b000d1383580be59f902ac813e940f2) ---- - .../tests/suites/clu/repl_monitor_test.py | 50 +++++++++++++------ - 1 file changed, 36 insertions(+), 14 deletions(-) - -diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py -index b03d170c8..eb18d2da2 100644 ---- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py -+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py -@@ -9,6 +9,7 @@ - import time - import subprocess - import pytest -+import re - - from lib389.cli_conf.replication import get_repl_monitor_info - from lib389.tasks import * -@@ -67,6 +68,25 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No - log.info('Reset log file') - f.truncate(0) - -+def get_hostnames_from_log(port1, port2): -+ # Get the supplier host names as displayed in replication monitor output -+ with open(LOG_FILE, 'r') as logfile: -+ logtext = logfile.read() -+ # search for Supplier :hostname:port -+ # and use \D to insure there is no more number is after -+ # the matched port (i.e that 10 is not matching 101) -+ regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)' -+ match=re.search(regexp, logtext) -+ host_m1 = 'localhost.localdomain' -+ if (match is not None): -+ host_m1 = match.group(2) -+ # Same for master 2 -+ regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)' -+ match=re.search(regexp, logtext) -+ host_m2 = 'localhost.localdomain' -+ if (match is not None): -+ host_m2 = match.group(2) -+ return (host_m1, host_m2) - - @pytest.mark.ds50545 - @pytest.mark.bz1739718 -@@ -95,9 +115,6 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): - m1 = topology_m2.ms["master1"] - m2 = topology_m2.ms["master2"] - -- alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')', -- 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')'] -- - connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port) - content_list = ['Replica Root: dc=example,dc=com', - 'Replica ID: 1', -@@ -160,20 +177,9 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): - '001', - m1.host + ':' + str(m1.port)] - -- dsrc_content = '[repl-monitor-connections]\n' \ -- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -- '\n' \ -- '[repl-monitor-aliases]\n' \ -- 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \ -- 'M2 = ' + m2.host + ':' + str(m2.port) -- - connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM, - m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM] - -- aliases = ['M1=' + m1.host + ':' + str(m1.port), -- 'M2=' + m2.host + ':' + str(m2.port)] -- - args = FakeArgs() - args.connections = connections - args.aliases = None -@@ -181,8 +187,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): - - log.info('Run replication monitor with connections option') - get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) -+ (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port) - check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) - -+ # Prepare the data for next tests -+ aliases = ['M1=' + host_m1 + ':' + str(m1.port), -+ 'M2=' + host_m2 + ':' + str(m2.port)] -+ -+ alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')', -+ 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')'] -+ -+ dsrc_content = '[repl-monitor-connections]\n' \ -+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -+ '\n' \ -+ '[repl-monitor-aliases]\n' \ -+ 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \ -+ 'M2 = ' + host_m2 + ':' + str(m2.port) -+ - log.info('Run replication monitor with aliases option') - args.aliases = aliases - get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) --- -2.26.2 - diff --git a/SOURCES/0019-Issue-4788-CLI-should-support-Temporary-Password-Rul.patch b/SOURCES/0019-Issue-4788-CLI-should-support-Temporary-Password-Rul.patch new file mode 100644 index 0000000..f9e4266 --- /dev/null +++ b/SOURCES/0019-Issue-4788-CLI-should-support-Temporary-Password-Rul.patch @@ -0,0 +1,350 @@ +From 6a741b3ef50babf2ac2479437a38829204ffd438 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Thu, 17 Jun 2021 16:22:09 +0200 +Subject: [PATCH] Issue 4788 - CLI should support Temporary Password Rules + attributes (#4793) + +Bug description: + Since #4725, password policy support temporary password rules. + CLI (dsconf) does not support this RFE and only direct ldap + operation can configure global/local password policy + +Fix description: + Update dsconf to support this new RFE. + To run successfully the testcase it relies on #4788 + +relates: #4788 + +Reviewed by: Simon Pichugin (thanks !!) + +Platforms tested: F34 +--- + .../password/pwdPolicy_attribute_test.py | 172 ++++++++++++++++-- + src/lib389/lib389/cli_conf/pwpolicy.py | 5 +- + src/lib389/lib389/pwpolicy.py | 5 +- + 3 files changed, 165 insertions(+), 17 deletions(-) + +diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py +index aee3a91ad..085d0a373 100644 +--- a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py ++++ b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py +@@ -34,7 +34,7 @@ log = logging.getLogger(__name__) + + + @pytest.fixture(scope="module") +-def create_user(topology_st, request): ++def test_user(topology_st, request): + """User for binding operation""" + topology_st.standalone.config.set('nsslapd-auditlog-logging-enabled', 'on') + log.info('Adding test user {}') +@@ -56,10 +56,11 @@ def create_user(topology_st, request): + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + request.addfinalizer(fin) ++ return user + + + @pytest.fixture(scope="module") +-def password_policy(topology_st, create_user): ++def password_policy(topology_st, test_user): + """Set up password policy for subtree and user""" + + pwp = PwPolicyManager(topology_st.standalone) +@@ -71,7 +72,7 @@ def password_policy(topology_st, create_user): + pwp.create_user_policy(TEST_USER_DN, policy_props) + + @pytest.mark.skipif(ds_is_older('1.4.3.3'), reason="Not implemented") +-def test_pwd_reset(topology_st, create_user): ++def test_pwd_reset(topology_st, test_user): + """Test new password policy attribute "pwdReset" + + :id: 03db357b-4800-411e-a36e-28a534293004 +@@ -124,7 +125,7 @@ def test_pwd_reset(topology_st, create_user): + [('on', 'off', ldap.UNWILLING_TO_PERFORM), + ('off', 'off', ldap.UNWILLING_TO_PERFORM), + ('off', 'on', False), ('on', 'on', False)]) +-def test_change_pwd(topology_st, create_user, password_policy, ++def test_change_pwd(topology_st, test_user, password_policy, + subtree_pwchange, user_pwchange, exception): + """Verify that 'passwordChange' attr works as expected + User should have a priority over a subtree. +@@ -184,7 +185,7 @@ def test_change_pwd(topology_st, create_user, password_policy, + user.reset_password(TEST_USER_PWD) + + +-def test_pwd_min_age(topology_st, create_user, password_policy): ++def test_pwd_min_age(topology_st, test_user, password_policy): + """If we set passwordMinAge to some value, for example to 10, then it + should not allow the user to change the password within 10 seconds after + his previous change. +@@ -257,7 +258,7 @@ def test_pwd_min_age(topology_st, create_user, password_policy): + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + user.reset_password(TEST_USER_PWD) + +-def test_global_tpr_maxuse_1(topology_st, create_user, request): ++def test_global_tpr_maxuse_1(topology_st, test_user, request): + """Test global TPR policy : passwordTPRMaxUse + Test that after passwordTPRMaxUse failures to bind + additional bind with valid password are failing with CONSTRAINT_VIOLATION +@@ -374,7 +375,7 @@ def test_global_tpr_maxuse_1(topology_st, create_user, request): + + request.addfinalizer(fin) + +-def test_global_tpr_maxuse_2(topology_st, create_user, request): ++def test_global_tpr_maxuse_2(topology_st, test_user, request): + """Test global TPR policy : passwordTPRMaxUse + Test that after less than passwordTPRMaxUse failures to bind + additional bind with valid password are successfull +@@ -474,7 +475,7 @@ def test_global_tpr_maxuse_2(topology_st, create_user, request): + + request.addfinalizer(fin) + +-def test_global_tpr_maxuse_3(topology_st, create_user, request): ++def test_global_tpr_maxuse_3(topology_st, test_user, request): + """Test global TPR policy : passwordTPRMaxUse + Test that after less than passwordTPRMaxUse failures to bind + A bind with valid password is successfull but passwordMustChange +@@ -587,7 +588,7 @@ def test_global_tpr_maxuse_3(topology_st, create_user, request): + + request.addfinalizer(fin) + +-def test_global_tpr_maxuse_4(topology_st, create_user, request): ++def test_global_tpr_maxuse_4(topology_st, test_user, request): + """Test global TPR policy : passwordTPRMaxUse + Test that a TPR attribute passwordTPRMaxUse + can be updated by DM but not the by user itself +@@ -701,7 +702,148 @@ def test_global_tpr_maxuse_4(topology_st, create_user, request): + + request.addfinalizer(fin) + +-def test_global_tpr_delayValidFrom_1(topology_st, create_user, request): ++def test_local_tpr_maxuse_5(topology_st, test_user, request): ++ """Test TPR local policy overpass global one: passwordTPRMaxUse ++ Test that after passwordTPRMaxUse failures to bind ++ additional bind with valid password are failing with CONSTRAINT_VIOLATION ++ ++ :id: c3919707-d804-445a-8754-8385b1072c42 ++ :customerscenario: False ++ :setup: Standalone instance ++ :steps: ++ 1. Global password policy Enable passwordMustChange ++ 2. Global password policy Set passwordTPRMaxUse=5 ++ 3. Global password policy Set passwordMaxFailure to a higher value to not disturb the test ++ 4. Local password policy Enable passwordMustChange ++ 5. Local password policy Set passwordTPRMaxUse=10 (higher than global) ++ 6. Bind with a wrong password 10 times and check INVALID_CREDENTIALS ++ 7. Check that passwordTPRUseCount got to the limit (5) ++ 8. Bind with a wrong password (CONSTRAINT_VIOLATION) ++ and check passwordTPRUseCount overpass the limit by 1 (11) ++ 9. Bind with a valid password 10 times and check CONSTRAINT_VIOLATION ++ and check passwordTPRUseCount increases ++ 10. Reset password policy configuration and remove local password from user ++ :expected results: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ 7. Success ++ 8. Success ++ 9. Success ++ 10. Success ++ """ ++ ++ global_tpr_maxuse = 5 ++ # Set global password policy config, passwordMaxFailure being higher than ++ # passwordTPRMaxUse so that TPR is enforced first ++ topology_st.standalone.config.replace('passwordMustChange', 'on') ++ topology_st.standalone.config.replace('passwordMaxFailure', str(global_tpr_maxuse + 20)) ++ topology_st.standalone.config.replace('passwordTPRMaxUse', str(global_tpr_maxuse)) ++ time.sleep(.5) ++ ++ local_tpr_maxuse = global_tpr_maxuse + 5 ++ # Reset user's password with a local password policy ++ # that has passwordTPRMaxUse higher than global ++ #our_user = UserAccount(topology_st.standalone, TEST_USER_DN) ++ subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(), ++ 'slapd-standalone1', ++ 'localpwp', ++ 'adduser', ++ test_user.dn]) ++ subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(), ++ 'slapd-standalone1', ++ 'localpwp', ++ 'set', ++ '--pwptprmaxuse', ++ str(local_tpr_maxuse), ++ '--pwdmustchange', ++ 'on', ++ test_user.dn]) ++ test_user.replace('userpassword', PASSWORD) ++ time.sleep(.5) ++ ++ # look up to passwordTPRMaxUse with failing ++ # bind to check that the limits of TPR are enforced ++ for i in range(local_tpr_maxuse): ++ # Bind as user with a wrong password ++ with pytest.raises(ldap.INVALID_CREDENTIALS): ++ test_user.rebind('wrong password') ++ time.sleep(.5) ++ ++ # Check that pwdReset is TRUE ++ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) ++ #assert test_user.get_attr_val_utf8('pwdReset') == 'TRUE' ++ ++ # Check that pwdTPRReset is TRUE ++ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' ++ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(i+1) ++ log.info("%dth failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (i+1, i+1)) ++ ++ ++ # Now the #failures reached passwordTPRMaxUse ++ # Check that pwdReset is TRUE ++ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) ++ ++ # Check that pwdTPRReset is TRUE ++ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' ++ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse) ++ log.info("last failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (local_tpr_maxuse)) ++ ++ # Bind as user with wrong password --> ldap.CONSTRAINT_VIOLATION ++ with pytest.raises(ldap.CONSTRAINT_VIOLATION): ++ test_user.rebind("wrong password") ++ time.sleep(.5) ++ ++ # Check that pwdReset is TRUE ++ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) ++ ++ # Check that pwdTPRReset is TRUE ++ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' ++ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse + 1) ++ log.info("failing bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (local_tpr_maxuse + i)) ++ ++ # Now check that all next attempts with correct password are all in LDAP_CONSTRAINT_VIOLATION ++ # and passwordTPRRetryCount remains unchanged ++ # account is now similar to locked ++ for i in range(10): ++ # Bind as user with valid password ++ with pytest.raises(ldap.CONSTRAINT_VIOLATION): ++ test_user.rebind(PASSWORD) ++ time.sleep(.5) ++ ++ # Check that pwdReset is TRUE ++ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) ++ ++ # Check that pwdTPRReset is TRUE ++ # pwdTPRUseCount keeps increasing ++ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' ++ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse + i + 2) ++ log.info("Rejected bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (local_tpr_maxuse + i + 2)) ++ ++ ++ def fin(): ++ topology_st.standalone.restart() ++ # Reset password policy config ++ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) ++ topology_st.standalone.config.replace('passwordMustChange', 'off') ++ ++ # Remove local password policy from that entry ++ subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(), ++ 'slapd-standalone1', ++ 'localpwp', ++ 'remove', ++ test_user.dn]) ++ ++ # Reset user's password ++ test_user.replace('userpassword', TEST_USER_PWD) ++ ++ ++ request.addfinalizer(fin) ++ ++def test_global_tpr_delayValidFrom_1(topology_st, test_user, request): + """Test global TPR policy : passwordTPRDelayValidFrom + Test that a TPR password is not valid before reset time + + passwordTPRDelayValidFrom +@@ -766,7 +908,7 @@ def test_global_tpr_delayValidFrom_1(topology_st, create_user, request): + + request.addfinalizer(fin) + +-def test_global_tpr_delayValidFrom_2(topology_st, create_user, request): ++def test_global_tpr_delayValidFrom_2(topology_st, test_user, request): + """Test global TPR policy : passwordTPRDelayValidFrom + Test that a TPR password is valid after reset time + + passwordTPRDelayValidFrom +@@ -838,7 +980,7 @@ def test_global_tpr_delayValidFrom_2(topology_st, create_user, request): + + request.addfinalizer(fin) + +-def test_global_tpr_delayValidFrom_3(topology_st, create_user, request): ++def test_global_tpr_delayValidFrom_3(topology_st, test_user, request): + """Test global TPR policy : passwordTPRDelayValidFrom + Test that a TPR attribute passwordTPRDelayValidFrom + can be updated by DM but not the by user itself +@@ -940,7 +1082,7 @@ def test_global_tpr_delayValidFrom_3(topology_st, create_user, request): + + request.addfinalizer(fin) + +-def test_global_tpr_delayExpireAt_1(topology_st, create_user, request): ++def test_global_tpr_delayExpireAt_1(topology_st, test_user, request): + """Test global TPR policy : passwordTPRDelayExpireAt + Test that a TPR password is not valid after reset time + + passwordTPRDelayExpireAt +@@ -1010,7 +1152,7 @@ def test_global_tpr_delayExpireAt_1(topology_st, create_user, request): + + request.addfinalizer(fin) + +-def test_global_tpr_delayExpireAt_2(topology_st, create_user, request): ++def test_global_tpr_delayExpireAt_2(topology_st, test_user, request): + """Test global TPR policy : passwordTPRDelayExpireAt + Test that a TPR password is valid before reset time + + passwordTPRDelayExpireAt +@@ -1082,7 +1224,7 @@ def test_global_tpr_delayExpireAt_2(topology_st, create_user, request): + + request.addfinalizer(fin) + +-def test_global_tpr_delayExpireAt_3(topology_st, create_user, request): ++def test_global_tpr_delayExpireAt_3(topology_st, test_user, request): + """Test global TPR policy : passwordTPRDelayExpireAt + Test that a TPR attribute passwordTPRDelayExpireAt + can be updated by DM but not the by user itself +diff --git a/src/lib389/lib389/cli_conf/pwpolicy.py b/src/lib389/lib389/cli_conf/pwpolicy.py +index 2838afcb8..26af6e7ec 100644 +--- a/src/lib389/lib389/cli_conf/pwpolicy.py ++++ b/src/lib389/lib389/cli_conf/pwpolicy.py +@@ -255,6 +255,9 @@ def create_parser(subparsers): + set_parser.add_argument('--pwpinheritglobal', help="Set to \"on\" to allow local policies to inherit the global policy") + set_parser.add_argument('--pwddictcheck', help="Set to \"on\" to enforce CrackLib dictionary checking") + set_parser.add_argument('--pwddictpath', help="Filesystem path to specific/custom CrackLib dictionary files") ++ set_parser.add_argument('--pwptprmaxuse', help="Number of times a reset password can be used for authentication") ++ set_parser.add_argument('--pwptprdelayexpireat', help="Number of seconds after which a reset password expires") ++ set_parser.add_argument('--pwptprdelayvalidfrom', help="Number of seconds to wait before using a reset password to authenticated") + # delete local password policy + del_parser = local_subcommands.add_parser('remove', help='Remove a local password policy') + del_parser.set_defaults(func=del_local_policy) +@@ -291,4 +294,4 @@ def create_parser(subparsers): + ############################################# + set_parser.add_argument('DN', nargs=1, help='Set the local policy for this entry DN') + add_subtree_parser.add_argument('DN', nargs=1, help='Add/replace the subtree policy for this entry DN') +- add_user_parser.add_argument('DN', nargs=1, help='Add/replace the local password policy for this entry DN') +\ No newline at end of file ++ add_user_parser.add_argument('DN', nargs=1, help='Add/replace the local password policy for this entry DN') +diff --git a/src/lib389/lib389/pwpolicy.py b/src/lib389/lib389/pwpolicy.py +index 8653cb195..d2427933b 100644 +--- a/src/lib389/lib389/pwpolicy.py ++++ b/src/lib389/lib389/pwpolicy.py +@@ -65,7 +65,10 @@ class PwPolicyManager(object): + 'pwddictcheck': 'passworddictcheck', + 'pwddictpath': 'passworddictpath', + 'pwdallowhash': 'nsslapd-allow-hashed-passwords', +- 'pwpinheritglobal': 'nsslapd-pwpolicy-inherit-global' ++ 'pwpinheritglobal': 'nsslapd-pwpolicy-inherit-global', ++ 'pwptprmaxuse': 'passwordTPRMaxUse', ++ 'pwptprdelayexpireat': 'passwordTPRDelayExpireAt', ++ 'pwptprdelayvalidfrom': 'passwordTPRDelayValidFrom' + } + + def is_subtree_policy(self, dn): +-- +2.31.1 + diff --git a/SOURCES/0020-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch b/SOURCES/0020-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch deleted file mode 100644 index 6906b5c..0000000 --- a/SOURCES/0020-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch +++ /dev/null @@ -1,374 +0,0 @@ -From d7b49259ff2f9e0295bbfeaf128369ed33421974 Mon Sep 17 00:00:00 2001 -From: James Chapman -Date: Mon, 30 Nov 2020 15:28:05 +0000 -Subject: [PATCH 1/6] Issue 4418 - ldif2db - offline. Warn the user of skipped - entries - -Bug Description: During an ldif2db import entries that do not -conform to various constraints will be skipped and not imported. -On completition of an import with skipped entries, the server -returns a success exit code and logs the skipped entry detail to -the error logs. The success exit code could lead the user to -believe that all entries were successfully imported. - -Fix Description: If a skipped entry occurs during import, the -import will continue and a warning will be returned to the user. - -CLI tools for offline import updated to handle warning code. - -Test added to generate an incorrect ldif entry and perform an -import. - -Fixes: #4418 - -Reviewed by: Firstyear, droideck (Thanks) - -(cherry picked from commit a98fe54292e9b183a2163efbc7bdfe208d4abfb0) ---- - .../tests/suites/import/import_test.py | 54 ++++++++++++++++++- - .../slapd/back-ldbm/db-bdb/bdb_import.c | 22 ++++++-- - ldap/servers/slapd/main.c | 8 +++ - ldap/servers/slapd/pblock.c | 24 +++++++++ - ldap/servers/slapd/pblock_v3.h | 1 + - ldap/servers/slapd/slapi-private.h | 14 +++++ - src/lib389/lib389/__init__.py | 18 +++---- - src/lib389/lib389/_constants.py | 7 +++ - src/lib389/lib389/cli_ctl/dbtasks.py | 8 ++- - 9 files changed, 140 insertions(+), 16 deletions(-) - -diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py -index 3803ecf43..b47db96ed 100644 ---- a/dirsrvtests/tests/suites/import/import_test.py -+++ b/dirsrvtests/tests/suites/import/import_test.py -@@ -15,7 +15,7 @@ import pytest - import time - import glob - from lib389.topologies import topology_st as topo --from lib389._constants import DEFAULT_SUFFIX -+from lib389._constants import DEFAULT_SUFFIX, TaskWarning - from lib389.dbgen import dbgen_users - from lib389.tasks import ImportTask - from lib389.index import Indexes -@@ -139,6 +139,38 @@ def _create_bogus_ldif(topo): - return import_ldif1 - - -+def _create_syntax_err_ldif(topo): -+ """ -+ Create an incorrect ldif entry that violates syntax check -+ """ -+ ldif_dir = topo.standalone.get_ldif_dir() -+ line1 = """dn: dc=example,dc=com -+objectClass: top -+objectClass: domain -+dc: example -+dn: ou=groups,dc=example,dc=com -+objectClass: top -+objectClass: organizationalUnit -+ou: groups -+dn: uid=JHunt,ou=groups,dc=example,dc=com -+objectClass: top -+objectClass: person -+objectClass: organizationalPerson -+objectClass: inetOrgPerson -+objectclass: inetUser -+cn: James Hunt -+sn: Hunt -+uid: JHunt -+givenName: -+""" -+ with open(f'{ldif_dir}/syntax_err.ldif', 'w') as out: -+ out.write(f'{line1}') -+ os.chmod(out.name, 0o777) -+ out.close() -+ import_ldif1 = ldif_dir + '/syntax_err.ldif' -+ return import_ldif1 -+ -+ - def test_import_with_index(topo, _import_clean): - """ - Add an index, then import via cn=tasks -@@ -214,6 +246,26 @@ def test_ldif2db_allows_entries_without_a_parent_to_be_imported(topo, _import_cl - topo.standalone.start() - - -+def test_ldif2db_syntax_check(topo): -+ """ldif2db should return a warning when a skipped entry has occured. -+ :id: 85e75670-42c5-4062-9edc-7f117c97a06f -+ :setup: -+ 1. Standalone Instance -+ 2. Ldif entry that violates syntax check rule (empty givenname) -+ :steps: -+ 1. Create an ldif file which violates the syntax checking rule -+ 2. Stop the server and import ldif file with ldif2db -+ :expected results: -+ 1. ldif2db import returns a warning to signify skipped entries -+ """ -+ import_ldif1 = _create_syntax_err_ldif(topo) -+ # Import the offending LDIF data - offline -+ topo.standalone.stop() -+ ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1) -+ assert ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY -+ topo.standalone.start() -+ -+ - def test_issue_a_warning_if_the_cache_size_is_smaller(topo, _import_clean): - """Report during startup if nsslapd-cachememsize is too small - -diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c -index e7da0517f..1e4830e99 100644 ---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c -+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c -@@ -2563,7 +2563,7 @@ error: - slapi_task_dec_refcount(job->task); - } - import_all_done(job, ret); -- ret = 1; -+ ret |= WARN_UPGARDE_DN_FORMAT_ALL; - } else if (NEED_DN_NORM == ret) { - import_log_notice(job, SLAPI_LOG_NOTICE, "bdb_import_main", - "%s complete. %s needs upgradednformat.", -@@ -2572,7 +2572,7 @@ error: - slapi_task_dec_refcount(job->task); - } - import_all_done(job, ret); -- ret = 2; -+ ret |= WARN_UPGRADE_DN_FORMAT; - } else if (NEED_DN_NORM_SP == ret) { - import_log_notice(job, SLAPI_LOG_NOTICE, "bdb_import_main", - "%s complete. %s needs upgradednformat spaces.", -@@ -2581,7 +2581,7 @@ error: - slapi_task_dec_refcount(job->task); - } - import_all_done(job, ret); -- ret = 3; -+ ret |= WARN_UPGRADE_DN_FORMAT_SPACE; - } else { - ret = -1; - if (job->task != NULL) { -@@ -2600,6 +2600,11 @@ error: - import_all_done(job, ret); - } - -+ /* set task warning if there are no errors */ -+ if((!ret) && (job->skipped)) { -+ ret |= WARN_SKIPPED_IMPORT_ENTRY; -+ } -+ - /* This instance isn't busy anymore */ - instance_set_not_busy(job->inst); - -@@ -2637,6 +2642,7 @@ bdb_back_ldif2db(Slapi_PBlock *pb) - int total_files, i; - int up_flags = 0; - PRThread *thread = NULL; -+ int ret = 0; - - slapi_pblock_get(pb, SLAPI_BACKEND, &be); - if (be == NULL) { -@@ -2764,7 +2770,15 @@ bdb_back_ldif2db(Slapi_PBlock *pb) - } - - /* old style -- do it all synchronously (THIS IS GOING AWAY SOON) */ -- return import_main_offline((void *)job); -+ ret = import_main_offline((void *)job); -+ -+ /* no error just warning, reset ret */ -+ if(ret &= WARN_SKIPPED_IMPORT_ENTRY) { -+ slapi_pblock_set_task_warning(pb, WARN_SKIPPED_IMPORT_ENTRY); -+ ret = 0; -+ } -+ -+ return ret; - } - - struct _import_merge_thang -diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c -index 694375b22..104f6826c 100644 ---- a/ldap/servers/slapd/main.c -+++ b/ldap/servers/slapd/main.c -@@ -2069,6 +2069,14 @@ slapd_exemode_ldif2db(struct main_config *mcfg) - plugin->plg_name); - return_value = -1; - } -+ -+ /* check for task warnings */ -+ if(!return_value) { -+ if((return_value = slapi_pblock_get_task_warning(pb))) { -+ slapi_log_err(SLAPI_LOG_INFO, "slapd_exemode_ldif2db","returning task warning: %d\n", return_value); -+ } -+ } -+ - slapi_pblock_destroy(pb); - charray_free(instances); - charray_free(mcfg->cmd_line_instance_names); -diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c -index 454ea9cc3..1ad9d0399 100644 ---- a/ldap/servers/slapd/pblock.c -+++ b/ldap/servers/slapd/pblock.c -@@ -28,12 +28,14 @@ - #define SLAPI_LDIF_DUMP_REPLICA 2003 - #define SLAPI_PWDPOLICY 2004 - #define SLAPI_PW_ENTRY 2005 -+#define SLAPI_TASK_WARNING 2006 - - /* Used for checking assertions about pblocks in some cases. */ - #define SLAPI_HINT 9999 - - static PRLock *pblock_analytics_lock = NULL; - -+ - static PLHashNumber - hash_int_func(const void *key) - { -@@ -4315,6 +4317,28 @@ slapi_pblock_set_ldif_dump_replica(Slapi_PBlock *pb, int32_t dump_replica) - pb->pb_task->ldif_dump_replica = dump_replica; - } - -+int32_t -+slapi_pblock_get_task_warning(Slapi_PBlock *pb) -+{ -+#ifdef PBLOCK_ANALYTICS -+ pblock_analytics_record(pb, SLAPI_TASK_WARNING); -+#endif -+ if (pb->pb_task != NULL) { -+ return pb->pb_task->task_warning; -+ } -+ return 0; -+} -+ -+void -+slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warning) -+{ -+#ifdef PBLOCK_ANALYTICS -+ pblock_analytics_record(pb, SLAPI_TASK_WARNING); -+#endif -+ _pblock_assert_pb_task(pb); -+ pb->pb_task->task_warning = warning; -+} -+ - void * - slapi_pblock_get_vattr_context(Slapi_PBlock *pb) - { -diff --git a/ldap/servers/slapd/pblock_v3.h b/ldap/servers/slapd/pblock_v3.h -index 90498c0b0..b35d78565 100644 ---- a/ldap/servers/slapd/pblock_v3.h -+++ b/ldap/servers/slapd/pblock_v3.h -@@ -67,6 +67,7 @@ typedef struct _slapi_pblock_task - int ldif2db_noattrindexes; - int ldif_printkey; - int task_flags; -+ int32_t task_warning; - int import_state; - - int server_running; /* indicate that server is running */ -diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h -index c98c1947c..31cb33472 100644 ---- a/ldap/servers/slapd/slapi-private.h -+++ b/ldap/servers/slapd/slapi-private.h -@@ -1465,6 +1465,20 @@ void slapi_pblock_set_operation_notes(Slapi_PBlock *pb, uint32_t opnotes); - void slapi_pblock_set_flag_operation_notes(Slapi_PBlock *pb, uint32_t opflag); - void slapi_pblock_set_result_text_if_empty(Slapi_PBlock *pb, char *text); - -+/* task warnings */ -+typedef enum task_warning_t{ -+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0), -+ WARN_UPGRADE_DN_FORMAT = (1 << 1), -+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2), -+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3) -+} task_warning; -+ -+int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb); -+void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn); -+ -+ -+int slapi_exists_or_add_internal(Slapi_DN *dn, const char *filter, const char *entry, const char *modifier_name); -+ - #ifdef __cplusplus - } - #endif -diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py -index 4e6a1905a..5b36a79e1 100644 ---- a/src/lib389/lib389/__init__.py -+++ b/src/lib389/lib389/__init__.py -@@ -2683,7 +2683,7 @@ class DirSrv(SimpleLDAPObject, object): - # server is stopped) - # - def ldif2db(self, bename, suffixes, excludeSuffixes, encrypt, -- import_file): -+ import_file, import_cl): - """ - @param bename - The backend name of the database to import - @param suffixes - List/tuple of suffixes to import -@@ -2731,14 +2731,14 @@ class DirSrv(SimpleLDAPObject, object): - try: - result = subprocess.check_output(cmd, encoding='utf-8') - except subprocess.CalledProcessError as e: -- self.log.debug("Command: %s failed with the return code %s and the error %s", -- format_cmd_list(cmd), e.returncode, e.output) -- return False -- -- self.log.debug("ldif2db output: BEGIN") -- for line in result.split("\n"): -- self.log.debug(line) -- self.log.debug("ldif2db output: END") -+ if e.returncode == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY: -+ self.log.debug("Command: %s skipped import entry warning %s", -+ format_cmd_list(cmd), e.returncode) -+ return e.returncode -+ else: -+ self.log.debug("Command: %s failed with the return code %s and the error %s", -+ format_cmd_list(cmd), e.returncode, e.output) -+ return False - - return True - -diff --git a/src/lib389/lib389/_constants.py b/src/lib389/lib389/_constants.py -index e28c602a3..38ba04565 100644 ---- a/src/lib389/lib389/_constants.py -+++ b/src/lib389/lib389/_constants.py -@@ -162,6 +162,13 @@ DB2BAK = 'db2bak' - DB2INDEX = 'db2index' - DBSCAN = 'dbscan' - -+# Task warnings -+class TaskWarning(IntEnum): -+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0) -+ WARN_UPGRADE_DN_FORMAT = (1 << 1) -+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2) -+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3) -+ - RDN_REPLICA = "cn=replica" - - RETROCL_SUFFIX = "cn=changelog" -diff --git a/src/lib389/lib389/cli_ctl/dbtasks.py b/src/lib389/lib389/cli_ctl/dbtasks.py -index 590a1ea0e..02830239c 100644 ---- a/src/lib389/lib389/cli_ctl/dbtasks.py -+++ b/src/lib389/lib389/cli_ctl/dbtasks.py -@@ -7,6 +7,7 @@ - # See LICENSE for details. - # --- END COPYRIGHT BLOCK --- - -+from lib389._constants import TaskWarning - - def dbtasks_db2index(inst, log, args): - if not inst.db2index(bename=args.backend): -@@ -44,10 +45,13 @@ def dbtasks_db2ldif(inst, log, args): - - - def dbtasks_ldif2db(inst, log, args): -- if not inst.ldif2db(bename=args.backend, encrypt=args.encrypted, import_file=args.ldif, -- suffixes=None, excludeSuffixes=None): -+ ret = inst.ldif2db(bename=args.backend, encrypt=args.encrypted, import_file=args.ldif, -+ suffixes=None, excludeSuffixes=None, import_cl=False) -+ if not ret: - log.fatal("ldif2db failed") - return False -+ elif ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY: -+ log.warn("ldif2db successful with skipped entries") - else: - log.info("ldif2db successful") - --- -2.26.2 - diff --git a/SOURCES/0020-Issue-4447-Crash-when-the-Referential-Integrity-log-.patch b/SOURCES/0020-Issue-4447-Crash-when-the-Referential-Integrity-log-.patch new file mode 100644 index 0000000..193d44b --- /dev/null +++ b/SOURCES/0020-Issue-4447-Crash-when-the-Referential-Integrity-log-.patch @@ -0,0 +1,179 @@ +From 7b7217538908ae58df864ef5cd82e1d3303c189f Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 7 Jun 2021 12:58:42 -0400 +Subject: [PATCH] Issue 4447 - Crash when the Referential Integrity log is + manually edited + +Bug Description: If the referint log is manually edited with a string + that is not a DN the server will crash when processing + the log. + +Fix Description: Check for NULL pointers when strtoking the file line. + +relates: https://github.com/389ds/389-ds-base/issues/4447 + +Reviewed by: firstyear(Thanks!) +--- + .../tests/suites/plugins/referint_test.py | 72 +++++++++++++++---- + ldap/servers/plugins/referint/referint.c | 7 ++ + src/lib389/lib389/plugins.py | 15 ++++ + 3 files changed, 80 insertions(+), 14 deletions(-) + +diff --git a/dirsrvtests/tests/suites/plugins/referint_test.py b/dirsrvtests/tests/suites/plugins/referint_test.py +index 02b985767..fda602545 100644 +--- a/dirsrvtests/tests/suites/plugins/referint_test.py ++++ b/dirsrvtests/tests/suites/plugins/referint_test.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2016 Red Hat, Inc. ++# Copyright (C) 2021 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -12,13 +12,11 @@ Created on Dec 12, 2019 + @author: tbordaz + ''' + import logging +-import subprocess + import pytest + from lib389 import Entry +-from lib389.utils import * +-from lib389.plugins import * +-from lib389._constants import * +-from lib389.idm.user import UserAccounts, UserAccount ++from lib389.plugins import ReferentialIntegrityPlugin ++from lib389._constants import DEFAULT_SUFFIX ++from lib389.idm.user import UserAccounts + from lib389.idm.group import Groups + from lib389.topologies import topology_st as topo + +@@ -29,21 +27,27 @@ log = logging.getLogger(__name__) + ESCAPED_RDN_BASE = "foo\\,oo" + def _user_get_dn(no): + uid = '%s%d' % (ESCAPED_RDN_BASE, no) +- dn = 'uid=%s,%s' % (uid, SUFFIX) ++ dn = 'uid=%s,%s' % (uid, DEFAULT_SUFFIX) + return (uid, dn) + + def add_escaped_user(server, no): + (uid, dn) = _user_get_dn(no) + log.fatal('Adding user (%s): ' % dn) +- server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson'], +- 'uid': [uid], +- 'sn' : [uid], +- 'cn' : [uid]}))) ++ users = UserAccounts(server, DEFAULT_SUFFIX, None) ++ user_properties = { ++ 'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson', 'posixAccount'], ++ 'uid': uid, ++ 'cn' : uid, ++ 'sn' : uid, ++ 'uidNumber' : '1000', ++ 'gidNumber' : '2000', ++ 'homeDirectory' : '/home/testuser', ++ } ++ users.create(properties=user_properties) + return dn + +-@pytest.mark.ds50020 + def test_referential_false_failure(topo): +- """On MODRDN referential integrity can erronously fail ++ """On MODRDN referential integrity can erroneously fail + + :id: f77aeb80-c4c4-471b-8c1b-4733b714778b + :setup: Standalone Instance +@@ -100,6 +104,46 @@ def test_referential_false_failure(topo): + inst.restart() + + # Here if the bug is fixed, referential is able to update the member value +- inst.rename_s(user1.dn, 'uid=new_test_user_1001', newsuperior=SUFFIX, delold=0) ++ user1.rename('uid=new_test_user_1001', newsuperior=DEFAULT_SUFFIX, deloldrdn=False) + + ++def test_invalid_referint_log(topo): ++ """If there is an invalid log line in the referint log, make sure the server ++ does not crash at startup ++ ++ :id: 34807b5a-ab17-4281-ae48-4e3513e19145 ++ :setup: Standalone Instance ++ :steps: ++ 1. Set the referint log delay ++ 2. Create invalid log ++ 3. Start the server (no crash) ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ """ ++ ++ inst = topo.standalone ++ ++ # Set delay - required for log parsing at server startup ++ plugin = ReferentialIntegrityPlugin(inst) ++ plugin.enable() ++ plugin.set_update_delay('2') ++ logfile = plugin.get_log_file() ++ inst.restart() ++ ++ # Create invalid log ++ inst.stop() ++ with open(logfile, 'w') as log_fh: ++ log_fh.write("CRASH\n") ++ ++ # Start the instance ++ inst.start() ++ assert inst.status() ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) +diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c +index fd5356d72..28240c1f6 100644 +--- a/ldap/servers/plugins/referint/referint.c ++++ b/ldap/servers/plugins/referint/referint.c +@@ -1447,6 +1447,13 @@ referint_thread_func(void *arg __attribute__((unused))) + sdn = slapi_sdn_new_normdn_byref(ptoken); + ptoken = ldap_utf8strtok_r(NULL, delimiter, &iter); + ++ if (ptoken == NULL) { ++ /* Invalid line in referint log, skip it */ ++ slapi_log_err(SLAPI_LOG_ERR, REFERINT_PLUGIN_SUBSYSTEM, ++ "Skipping invalid referint log line: (%s)\n", thisline); ++ slapi_sdn_free(&sdn); ++ continue; ++ } + if (!strcasecmp(ptoken, "NULL")) { + tmprdn = NULL; + } else { +diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py +index 2d88e60bd..b07e80022 100644 +--- a/src/lib389/lib389/plugins.py ++++ b/src/lib389/lib389/plugins.py +@@ -518,6 +518,21 @@ class ReferentialIntegrityPlugin(Plugin): + + self.set('referint-update-delay', str(value)) + ++ def get_log_file(self): ++ """Get referint log file""" ++ ++ return self.get_attr_val_utf8('referint-logfile') ++ ++ def get_log_file_formatted(self): ++ """Get referint log file""" ++ ++ return self.display_attr('referint-logfile') ++ ++ def set_log_file(self, value): ++ """Set referint log file""" ++ ++ self.set('referint-logfile', value) ++ + def get_membership_attr(self, formatted=False): + """Get referint-membership-attr attribute""" + +-- +2.31.1 + diff --git a/SOURCES/0021-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch b/SOURCES/0021-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch deleted file mode 100644 index 6e77682..0000000 --- a/SOURCES/0021-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch +++ /dev/null @@ -1,52 +0,0 @@ -From 97bdef2d562e447d521202beb485c3948b0e7214 Mon Sep 17 00:00:00 2001 -From: James Chapman -Date: Mon, 30 Nov 2020 15:28:05 +0000 -Subject: [PATCH 2/6] Issue 4418 - ldif2db - offline. Warn the user of skipped - entries - -Bug Description: During an ldif2db import entries that do not -conform to various constraints will be skipped and not imported. -On completition of an import with skipped entries, the server -returns a success exit code and logs the skipped entry detail to -the error logs. The success exit code could lead the user to -believe that all entries were successfully imported. - -Fix Description: If a skipped entry occurs during import, the -import will continue and a warning will be returned to the user. - -CLI tools for offline import updated to handle warning code. - -Test added to generate an incorrect ldif entry and perform an -import. - -Fixes: #4418 - -Reviewed by: Firstyear, droideck (Thanks) ---- - ldap/servers/slapd/slapi-private.h | 10 ++++++++++ - 1 file changed, 10 insertions(+) - -diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h -index 31cb33472..e0092d571 100644 ---- a/ldap/servers/slapd/slapi-private.h -+++ b/ldap/servers/slapd/slapi-private.h -@@ -1476,6 +1476,16 @@ typedef enum task_warning_t{ - int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb); - void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn); - -+/* task warnings */ -+typedef enum task_warning_t{ -+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0), -+ WARN_UPGRADE_DN_FORMAT = (1 << 1), -+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2), -+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3) -+} task_warning; -+ -+int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb); -+void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn); - - int slapi_exists_or_add_internal(Slapi_DN *dn, const char *filter, const char *entry, const char *modifier_name); - --- -2.26.2 - diff --git a/SOURCES/0021-Issue-4791-Missing-dependency-for-RetroCL-RFE.patch b/SOURCES/0021-Issue-4791-Missing-dependency-for-RetroCL-RFE.patch new file mode 100644 index 0000000..4810288 --- /dev/null +++ b/SOURCES/0021-Issue-4791-Missing-dependency-for-RetroCL-RFE.patch @@ -0,0 +1,114 @@ +From 964a153b420b26140e0bbddfbebb4a51aaa0e4ea Mon Sep 17 00:00:00 2001 +From: James Chapman +Date: Thu, 3 Jun 2021 15:16:22 +0000 +Subject: [PATCH 1/7] Issue 4791 - Missing dependency for RetroCL RFE + +Description: The RetroCL exclude attribute RFE is dependent on functionality of the + EntryUUID bug fix, that didn't make into the latest build. This breaks the + RetroCL exclude attr feature so we need to provide a workaround. + +Fixes: https://github.com/389ds/389-ds-base/issues/4791 + +Relates: https://github.com/389ds/389-ds-base/pull/4723 + +Relates: https://github.com/389ds/389-ds-base/issues/4224 + +Reviewed by: tbordaz, droideck (Thank you) +--- + .../tests/suites/retrocl/basic_test.py | 6 ++-- + .../lib389/cli_conf/plugins/retrochangelog.py | 35 +++++++++++++++++-- + 2 files changed, 36 insertions(+), 5 deletions(-) + +diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py +index 112c73cb9..f3bc50f29 100644 +--- a/dirsrvtests/tests/suites/retrocl/basic_test.py ++++ b/dirsrvtests/tests/suites/retrocl/basic_test.py +@@ -17,7 +17,7 @@ from lib389.utils import * + from lib389.tasks import * + from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance + from lib389.cli_base.dsrc import dsrc_arg_concat +-from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add ++from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add_attr + from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts + + pytestmark = pytest.mark.tier1 +@@ -122,7 +122,7 @@ def test_retrocl_exclude_attr_add(topology_st): + args.bindpw = None + args.prompt = False + args.exclude_attrs = ATTR_HOMEPHONE +- args.func = retrochangelog_add ++ args.func = retrochangelog_add_attr + dsrc_inst = dsrc_arg_concat(args, None) + inst = connect_instance(dsrc_inst, False, args) + result = args.func(inst, None, log, args) +@@ -255,7 +255,7 @@ def test_retrocl_exclude_attr_mod(topology_st): + args.bindpw = None + args.prompt = False + args.exclude_attrs = ATTR_CARLICENSE +- args.func = retrochangelog_add ++ args.func = retrochangelog_add_attr + dsrc_inst = dsrc_arg_concat(args, None) + inst = connect_instance(dsrc_inst, False, args) + result = args.func(inst, None, log, args) +diff --git a/src/lib389/lib389/cli_conf/plugins/retrochangelog.py b/src/lib389/lib389/cli_conf/plugins/retrochangelog.py +index 9940c6532..160fbb82d 100644 +--- a/src/lib389/lib389/cli_conf/plugins/retrochangelog.py ++++ b/src/lib389/lib389/cli_conf/plugins/retrochangelog.py +@@ -6,8 +6,13 @@ + # See LICENSE for details. + # --- END COPYRIGHT BLOCK --- + ++# JC Work around for missing dependency on https://github.com/389ds/389-ds-base/pull/4344 ++import ldap ++ + from lib389.plugins import RetroChangelogPlugin +-from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit ++# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344 ++# from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add_attr ++from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, _args_to_attrs + + arg_to_attr = { + 'is_replicated': 'isReplicated', +@@ -18,12 +23,38 @@ arg_to_attr = { + 'exclude_attrs': 'nsslapd-exclude-attrs' + } + +- + def retrochangelog_edit(inst, basedn, log, args): + log = log.getChild('retrochangelog_edit') + plugin = RetroChangelogPlugin(inst) + generic_object_edit(plugin, log, args, arg_to_attr) + ++# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344 ++def retrochangelog_add_attr(inst, basedn, log, args): ++ log = log.getChild('retrochangelog_add_attr') ++ plugin = RetroChangelogPlugin(inst) ++ generic_object_add_attr(plugin, log, args, arg_to_attr) ++ ++# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344 ++def generic_object_add_attr(dsldap_object, log, args, arg_to_attr): ++ """Add an attribute to the entry. This differs to 'edit' as edit uses replace, ++ and this allows multivalues to be added. ++ ++ dsldap_object should be a single instance of DSLdapObject with a set dn ++ """ ++ log = log.getChild('generic_object_add_attr') ++ # Gather the attributes ++ attrs = _args_to_attrs(args, arg_to_attr) ++ ++ modlist = [] ++ for attr, value in attrs.items(): ++ if not isinstance(value, list): ++ value = [value] ++ modlist.append((ldap.MOD_ADD, attr, value)) ++ if len(modlist) > 0: ++ dsldap_object.apply_mods(modlist) ++ log.info("Successfully changed the %s", dsldap_object.dn) ++ else: ++ raise ValueError("There is nothing to set in the %s plugin entry" % dsldap_object.dn) + + def _add_parser_args(parser): + parser.add_argument('--is-replicated', choices=['TRUE', 'FALSE'], type=str.upper, +-- +2.31.1 + diff --git a/SOURCES/0022-Fix-cherry-pick-erorr.patch b/SOURCES/0022-Fix-cherry-pick-erorr.patch deleted file mode 100644 index a078160..0000000 --- a/SOURCES/0022-Fix-cherry-pick-erorr.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 22fb8b2690a5fa364d252846f06b77b5fec8c602 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Thu, 7 Jan 2021 10:27:43 -0500 -Subject: [PATCH 3/6] Fix cherry-pick erorr - ---- - ldap/servers/slapd/slapi-private.h | 11 ----------- - 1 file changed, 11 deletions(-) - -diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h -index e0092d571..d5abe8ac1 100644 ---- a/ldap/servers/slapd/slapi-private.h -+++ b/ldap/servers/slapd/slapi-private.h -@@ -1476,17 +1476,6 @@ typedef enum task_warning_t{ - int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb); - void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn); - --/* task warnings */ --typedef enum task_warning_t{ -- WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0), -- WARN_UPGRADE_DN_FORMAT = (1 << 1), -- WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2), -- WARN_SKIPPED_IMPORT_ENTRY = (1 << 3) --} task_warning; -- --int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb); --void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn); -- - int slapi_exists_or_add_internal(Slapi_DN *dn, const char *filter, const char *entry, const char *modifier_name); - - #ifdef __cplusplus --- -2.26.2 - diff --git a/SOURCES/0022-Issue-4656-remove-problematic-language-from-ds-replc.patch b/SOURCES/0022-Issue-4656-remove-problematic-language-from-ds-replc.patch new file mode 100644 index 0000000..82d6945 --- /dev/null +++ b/SOURCES/0022-Issue-4656-remove-problematic-language-from-ds-replc.patch @@ -0,0 +1,642 @@ +From d2ac7e98d53cfe6c74c99ddf3504b1072418f05a Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 11 Mar 2021 10:12:46 -0500 +Subject: [PATCH] Issue 4656 - remove problematic language from ds-replcheck + +Description: remove master from ds-replcheck and replace it with supplier + +relates: https://github.com/389ds/389-ds-base/issues/4656 + +Reviewed by: mreynolds + +e with '#' will be ignored, and an empty message aborts the commit. +--- + ldap/admin/src/scripts/ds-replcheck | 202 ++++++++++++++-------------- + 1 file changed, 101 insertions(+), 101 deletions(-) + +diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck +index 169496e8f..f411f357a 100755 +--- a/ldap/admin/src/scripts/ds-replcheck ++++ b/ldap/admin/src/scripts/ds-replcheck +@@ -1,7 +1,7 @@ + #!/usr/bin/python3 + + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2020 Red Hat, Inc. ++# Copyright (C) 2021 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -63,7 +63,7 @@ def remove_entry(rentries, dn): + def get_ruv_time(ruv, rid): + """Take a RUV element (nsds50ruv attribute) and extract the timestamp from maxcsn + :param ruv - A lsit of RUV elements +- :param rid - The rid of the master to extractthe maxcsn time from ++ :param rid - The rid of the supplier to extract the maxcsn time from + :return: The time in seconds of the maxcsn, or 0 if there is no maxcsn, or -1 if + the rid was not found + """ +@@ -213,22 +213,22 @@ def get_ruv_state(opts): + :param opts - all the script options + :return - A text description of the replicaton state + """ +- mtime = get_ruv_time(opts['master_ruv'], opts['rid']) ++ mtime = get_ruv_time(opts['supplier_ruv'], opts['rid']) + rtime = get_ruv_time(opts['replica_ruv'], opts['rid']) + if mtime == -1: +- repl_state = "Replication State: Replica ID ({}) not found in Master's RUV".format(opts['rid']) ++ repl_state = "Replication State: Replica ID ({}) not found in Supplier's RUV".format(opts['rid']) + elif rtime == -1: + repl_state = "Replication State: Replica ID ({}) not found in Replica's RUV (not initialized?)".format(opts['rid']) + elif mtime == 0: +- repl_state = "Replication State: Master has not seen any updates" ++ repl_state = "Replication State: Supplier has not seen any updates" + elif rtime == 0: +- repl_state = "Replication State: Replica has not seen any changes from the Master" ++ repl_state = "Replication State: Replica has not seen any changes from the Supplier" + elif mtime > rtime: +- repl_state = "Replication State: Replica is behind Master by: {} seconds".format(mtime - rtime) ++ repl_state = "Replication State: Replica is behind Supplier by: {} seconds".format(mtime - rtime) + elif mtime < rtime: +- repl_state = "Replication State: Replica is ahead of Master by: {} seconds".format(rtime - mtime) ++ repl_state = "Replication State: Replica is ahead of Supplier by: {} seconds".format(rtime - mtime) + else: +- repl_state = "Replication State: Master and Replica are in perfect synchronization" ++ repl_state = "Replication State: Supplier and Replica are in perfect synchronization" + + return repl_state + +@@ -238,11 +238,11 @@ def get_ruv_report(opts): + :param opts - all the script options + :return - A text blob to display in the report + """ +- opts['master_ruv'].sort() ++ opts['supplier_ruv'].sort() + opts['replica_ruv'].sort() + +- report = "Master RUV:\n" +- for element in opts['master_ruv']: ++ report = "Supplier RUV:\n" ++ for element in opts['supplier_ruv']: + report += " %s\n" % (element) + report += "\nReplica RUV:\n" + for element in opts['replica_ruv']: +@@ -521,7 +521,7 @@ def get_ldif_ruv(LDIF, opts): + + def cmp_entry(mentry, rentry, opts): + """Compare the two entries, and return a "diff map" +- :param mentry - A Master entry ++ :param mentry - A Supplier entry + :param rentry - A Replica entry + :param opts - A Dict of the scripts options + :return - A Dict of the differences in the entry, or None +@@ -536,7 +536,7 @@ def cmp_entry(mentry, rentry, opts): + mlist = list(mentry.data.keys()) + + # +- # Check master ++ # Check Supplier + # + for mattr in mlist: + if mattr in opts['ignore']: +@@ -555,7 +555,7 @@ def cmp_entry(mentry, rentry, opts): + if not found: + diff['missing'].append("") + found = True +- diff['missing'].append(" - Master's State Info: %s" % (val)) ++ diff['missing'].append(" - Supplier's State Info: %s" % (val)) + diff['missing'].append(" - Date: %s\n" % (time.ctime(extract_time(val)))) + else: + # No state info, just move on +@@ -566,18 +566,18 @@ def cmp_entry(mentry, rentry, opts): + if report_conflict(rentry, mattr, opts) and report_conflict(mentry, mattr, opts): + diff['diff'].append(" - Attribute '%s' is different:" % mattr) + if 'nscpentrywsi' in mentry.data: +- # Process Master ++ # Process Supplier + found = False + for val in mentry.data['nscpentrywsi']: + if val.lower().startswith(mattr + ';'): + if not found: +- diff['diff'].append(" Master:") ++ diff['diff'].append(" Supplier:") + diff['diff'].append(" - Value: %s" % (val.split(':')[1].lstrip())) + diff['diff'].append(" - State Info: %s" % (val)) + diff['diff'].append(" - Date: %s\n" % (time.ctime(extract_time(val)))) + found = True + if not found: +- diff['diff'].append(" Master: ") ++ diff['diff'].append(" Supplier: ") + for val in mentry.data[mattr]: + # This is an "origin" value which means it's never been + # updated since replication was set up. So its the +@@ -605,7 +605,7 @@ def cmp_entry(mentry, rentry, opts): + diff['diff'].append("") + else: + # no state info, report what we got +- diff['diff'].append(" Master: ") ++ diff['diff'].append(" Supplier: ") + for val in mentry.data[mattr]: + diff['diff'].append(" - %s: %s" % (mattr, val)) + diff['diff'].append(" Replica: ") +@@ -622,9 +622,9 @@ def cmp_entry(mentry, rentry, opts): + continue + + if rattr not in mlist: +- # Master is missing the attribute ++ # Supplier is missing the attribute + if report_conflict(rentry, rattr, opts): +- diff['missing'].append(" - Master missing attribute: \"%s\"" % (rattr)) ++ diff['missing'].append(" - Supplier missing attribute: \"%s\"" % (rattr)) + diff_count += 1 + if 'nscpentrywsi' in rentry.data: + found = False +@@ -663,7 +663,7 @@ def do_offline_report(opts, output_file=None): + try: + MLDIF = open(opts['mldif'], "r") + except Exception as e: +- print('Failed to open Master LDIF: ' + str(e)) ++ print('Failed to open Supplier LDIF: ' + str(e)) + return + + try: +@@ -676,10 +676,10 @@ def do_offline_report(opts, output_file=None): + # Verify LDIF Files + try: + if opts['verbose']: +- print("Validating Master ldif file ({})...".format(opts['mldif'])) ++ print("Validating Supplier ldif file ({})...".format(opts['mldif'])) + LDIFRecordList(MLDIF).parse() + except ValueError: +- print('Master LDIF file in invalid, aborting...') ++ print('Supplier LDIF file in invalid, aborting...') + MLDIF.close() + RLDIF.close() + return +@@ -696,34 +696,34 @@ def do_offline_report(opts, output_file=None): + # Get all the dn's, and entry counts + if opts['verbose']: + print ("Gathering all the DN's...") +- master_dns = get_dns(MLDIF, opts['mldif'], opts) ++ supplier_dns = get_dns(MLDIF, opts['mldif'], opts) + replica_dns = get_dns(RLDIF, opts['rldif'], opts) +- if master_dns is None or replica_dns is None: ++ if supplier_dns is None or replica_dns is None: + print("Aborting scan...") + MLDIF.close() + RLDIF.close() + sys.exit(1) +- m_count = len(master_dns) ++ m_count = len(supplier_dns) + r_count = len(replica_dns) + + # Get DB RUV + if opts['verbose']: + print ("Gathering the database RUV's...") +- opts['master_ruv'] = get_ldif_ruv(MLDIF, opts) ++ opts['supplier_ruv'] = get_ldif_ruv(MLDIF, opts) + opts['replica_ruv'] = get_ldif_ruv(RLDIF, opts) + +- """ Compare the master entries with the replica's. Take our list of dn's from +- the master ldif and get that entry( dn) from the master and replica ldif. In ++ """ Compare the Supplier entries with the replica's. Take our list of dn's from ++ the Supplier ldif and get that entry( dn) from the Supplier and replica ldif. In + this phase we keep keep track of conflict/tombstone counts, and we check for + missing entries and entry differences. We only need to do the entry diff + checking in this phase - we do not need to do it when process the replica dn's + because if the entry exists in both LDIF's then we already checked or diffs +- while processing the master dn's. ++ while processing the Supplier dn's. + """ + if opts['verbose']: +- print ("Comparing Master to Replica...") ++ print ("Comparing Supplier to Replica...") + missing = False +- for dn in master_dns: ++ for dn in supplier_dns: + mresult = ldif_search(MLDIF, dn) + if mresult['entry'] is None and mresult['conflict'] is None and not mresult['tombstone']: + # Try from the beginning +@@ -736,7 +736,7 @@ def do_offline_report(opts, output_file=None): + rresult['conflict'] is not None or rresult['tombstone']): + """ We can safely remove this DN from the replica dn list as it + does not need to be checked again. This also speeds things up +- when doing the replica vs master phase. ++ when doing the replica vs Supplier phase. + """ + replica_dns.remove(dn) + +@@ -766,7 +766,7 @@ def do_offline_report(opts, output_file=None): + missing_report += (' Entries missing on Replica:\n') + missing = True + if mresult['entry'] and 'createtimestamp' in mresult['entry'].data: +- missing_report += (' - %s (Created on Master at: %s)\n' % ++ missing_report += (' - %s (Created on Supplier at: %s)\n' % + (dn, convert_timestamp(mresult['entry'].data['createtimestamp'][0]))) + else: + missing_report += (' - %s\n' % dn) +@@ -791,7 +791,7 @@ def do_offline_report(opts, output_file=None): + remaining conflict & tombstone entries as well. + """ + if opts['verbose']: +- print ("Comparing Replica to Master...") ++ print ("Comparing Replica to Supplier...") + MLDIF.seek(0) + RLDIF.seek(0) + missing = False +@@ -811,7 +811,7 @@ def do_offline_report(opts, output_file=None): + if mresult['entry'] is None and mresult['glue'] is None: + MLDIF.seek(rresult['idx']) # Set the LDIF cursor/index to the last good line + if not missing: +- missing_report += (' Entries missing on Master:\n') ++ missing_report += (' Entries missing on Supplier:\n') + missing = True + if rresult['entry'] and 'createtimestamp' in rresult['entry'].data: + missing_report += (' - %s (Created on Replica at: %s)\n' % +@@ -837,12 +837,12 @@ def do_offline_report(opts, output_file=None): + final_report += get_ruv_report(opts) + final_report += ('Entry Counts\n') + final_report += ('=====================================================\n\n') +- final_report += ('Master: %d\n' % (m_count)) ++ final_report += ('Supplier: %d\n' % (m_count)) + final_report += ('Replica: %d\n\n' % (r_count)) + + final_report += ('\nTombstones\n') + final_report += ('=====================================================\n\n') +- final_report += ('Master: %d\n' % (mtombstones)) ++ final_report += ('Supplier: %d\n' % (mtombstones)) + final_report += ('Replica: %d\n' % (rtombstones)) + + final_report += get_conflict_report(mconflicts, rconflicts, opts['conflicts']) +@@ -859,9 +859,9 @@ def do_offline_report(opts, output_file=None): + final_report += ('\nResult\n') + final_report += ('=====================================================\n\n') + if missing_report == "" and len(diff_report) == 0: +- final_report += ('No replication differences between Master and Replica\n') ++ final_report += ('No replication differences between Supplier and Replica\n') + else: +- final_report += ('There are replication differences between Master and Replica\n') ++ final_report += ('There are replication differences between Supplier and Replica\n') + + if output_file: + output_file.write(final_report) +@@ -871,8 +871,8 @@ def do_offline_report(opts, output_file=None): + + def check_for_diffs(mentries, mglue, rentries, rglue, report, opts): + """Online mode only - Check for diffs, return the updated report +- :param mentries - Master entries +- :param mglue - Master glue entries ++ :param mentries - Supplier entries ++ :param mglue - Supplier glue entries + :param rentries - Replica entries + :param rglue - Replica glue entries + :param report - A Dict of the entire report +@@ -947,8 +947,8 @@ def validate_suffix(ldapnode, suffix, hostname): + # Check suffix is replicated + try: + replica_filter = "(&(objectclass=nsds5replica)(nsDS5ReplicaRoot=%s))" % suffix +- master_replica = ldapnode.search_s("cn=config",ldap.SCOPE_SUBTREE,replica_filter) +- if (len(master_replica) != 1): ++ supplier_replica = ldapnode.search_s("cn=config",ldap.SCOPE_SUBTREE,replica_filter) ++ if (len(supplier_replica) != 1): + print("Error: Failed to validate suffix in {}. {} is not replicated.".format(hostname, suffix)) + return False + except ldap.LDAPError as e: +@@ -969,7 +969,7 @@ def connect_to_replicas(opts): + muri = "%s://%s" % (opts['mprotocol'], opts['mhost'].replace("/", "%2f")) + else: + muri = "%s://%s:%s/" % (opts['mprotocol'], opts['mhost'], opts['mport']) +- master = SimpleLDAPObject(muri) ++ supplier = SimpleLDAPObject(muri) + + if opts['rprotocol'].lower() == 'ldapi': + ruri = "%s://%s" % (opts['rprotocol'], opts['rhost'].replace("/", "%2f")) +@@ -978,23 +978,23 @@ def connect_to_replicas(opts): + replica = SimpleLDAPObject(ruri) + + # Set timeouts +- master.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout']) +- master.set_option(ldap.OPT_TIMEOUT, opts['timeout']) ++ supplier.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout']) ++ supplier.set_option(ldap.OPT_TIMEOUT, opts['timeout']) + replica.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout']) + replica.set_option(ldap.OPT_TIMEOUT, opts['timeout']) + + # Setup Secure Connection + if opts['certdir'] is not None: +- # Setup Master ++ # Setup Supplier + if opts['mprotocol'] != LDAPI: +- master.set_option(ldap.OPT_X_TLS_CACERTDIR, opts['certdir']) +- master.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_HARD) ++ supplier.set_option(ldap.OPT_X_TLS_CACERTDIR, opts['certdir']) ++ supplier.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_HARD) + if opts['mprotocol'] == LDAP: + # Do StartTLS + try: +- master.start_tls_s() ++ supplier.start_tls_s() + except ldap.LDAPError as e: +- print('TLS negotiation failed on Master: {}'.format(str(e))) ++ print('TLS negotiation failed on Supplier: {}'.format(str(e))) + exit(1) + + # Setup Replica +@@ -1006,17 +1006,17 @@ def connect_to_replicas(opts): + try: + replica.start_tls_s() + except ldap.LDAPError as e: +- print('TLS negotiation failed on Master: {}'.format(str(e))) ++ print('TLS negotiation failed on Supplier: {}'.format(str(e))) + exit(1) + +- # Open connection to master ++ # Open connection to Supplier + try: +- master.simple_bind_s(opts['binddn'], opts['bindpw']) ++ supplier.simple_bind_s(opts['binddn'], opts['bindpw']) + except ldap.SERVER_DOWN as e: + print(f"Cannot connect to {muri} ({str(e)})") + sys.exit(1) + except ldap.LDAPError as e: +- print("Error: Failed to authenticate to Master: ({}). " ++ print("Error: Failed to authenticate to Supplier: ({}). " + "Please check your credentials and LDAP urls are correct.".format(str(e))) + sys.exit(1) + +@@ -1034,7 +1034,7 @@ def connect_to_replicas(opts): + # Validate suffix + if opts['verbose']: + print ("Validating suffix ...") +- if not validate_suffix(master, opts['suffix'], opts['mhost']): ++ if not validate_suffix(supplier, opts['suffix'], opts['mhost']): + sys.exit(1) + + if not validate_suffix(replica,opts['suffix'], opts['rhost']): +@@ -1042,16 +1042,16 @@ def connect_to_replicas(opts): + + # Get the RUVs + if opts['verbose']: +- print ("Gathering Master's RUV...") ++ print ("Gathering Supplier's RUV...") + try: +- master_ruv = master.search_s(opts['suffix'], ldap.SCOPE_SUBTREE, RUV_FILTER, ['nsds50ruv']) +- if len(master_ruv) > 0: +- opts['master_ruv'] = ensure_list_str(master_ruv[0][1]['nsds50ruv']) ++ supplier_ruv = supplier.search_s(opts['suffix'], ldap.SCOPE_SUBTREE, RUV_FILTER, ['nsds50ruv']) ++ if len(supplier_ruv) > 0: ++ opts['supplier_ruv'] = ensure_list_str(supplier_ruv[0][1]['nsds50ruv']) + else: +- print("Error: Master does not have an RUV entry") ++ print("Error: Supplier does not have an RUV entry") + sys.exit(1) + except ldap.LDAPError as e: +- print("Error: Failed to get Master RUV entry: {}".format(str(e))) ++ print("Error: Failed to get Supplier RUV entry: {}".format(str(e))) + sys.exit(1) + + if opts['verbose']: +@@ -1067,12 +1067,12 @@ def connect_to_replicas(opts): + print("Error: Failed to get Replica RUV entry: {}".format(str(e))) + sys.exit(1) + +- # Get the master RID ++ # Get the Supplier RID + if opts['verbose']: +- print("Getting Master's replica ID") ++ print("Getting Supplier's replica ID") + try: + search_filter = "(&(objectclass=nsds5Replica)(nsDS5ReplicaRoot={})(nsDS5ReplicaId=*))".format(opts['suffix']) +- replica_entry = master.search_s("cn=config", ldap.SCOPE_SUBTREE, search_filter) ++ replica_entry = supplier.search_s("cn=config", ldap.SCOPE_SUBTREE, search_filter) + if len(replica_entry) > 0: + opts['rid'] = ensure_int(replica_entry[0][1]['nsDS5ReplicaId'][0]) + else: +@@ -1081,7 +1081,7 @@ def connect_to_replicas(opts): + print("Error: Failed to get Replica entry: {}".format(str(e))) + sys.exit(1) + +- return (master, replica, opts) ++ return (supplier, replica, opts) + + + def print_online_report(report, opts, output_file): +@@ -1104,11 +1104,11 @@ def print_online_report(report, opts, output_file): + final_report += get_ruv_report(opts) + final_report += ('Entry Counts\n') + final_report += ('=====================================================\n\n') +- final_report += ('Master: %d\n' % (report['m_count'])) ++ final_report += ('Supplier: %d\n' % (report['m_count'])) + final_report += ('Replica: %d\n\n' % (report['r_count'])) + final_report += ('\nTombstones\n') + final_report += ('=====================================================\n\n') +- final_report += ('Master: %d\n' % (report['mtombstones'])) ++ final_report += ('Supplier: %d\n' % (report['mtombstones'])) + final_report += ('Replica: %d\n' % (report['rtombstones'])) + final_report += report['conflict'] + missing = False +@@ -1121,7 +1121,7 @@ def print_online_report(report, opts, output_file): + final_report += (' Entries missing on Replica:\n') + for entry in report['r_missing']: + if 'createtimestamp' in entry.data: +- final_report += (' - %s (Created on Master at: %s)\n' % ++ final_report += (' - %s (Created on Supplier at: %s)\n' % + (entry.dn, convert_timestamp(entry.data['createtimestamp'][0]))) + else: + final_report += (' - %s\n' % (entry.dn)) +@@ -1129,7 +1129,7 @@ def print_online_report(report, opts, output_file): + if m_missing > 0: + if r_missing > 0: + final_report += ('\n') +- final_report += (' Entries missing on Master:\n') ++ final_report += (' Entries missing on Supplier:\n') + for entry in report['m_missing']: + if 'createtimestamp' in entry.data: + final_report += (' - %s (Created on Replica at: %s)\n' % +@@ -1146,9 +1146,9 @@ def print_online_report(report, opts, output_file): + final_report += ('\nResult\n') + final_report += ('=====================================================\n\n') + if not missing and len(report['diff']) == 0: +- final_report += ('No replication differences between Master and Replica\n') ++ final_report += ('No replication differences between Supplier and Replica\n') + else: +- final_report += ('There are replication differences between Master and Replica\n') ++ final_report += ('There are replication differences between Supplier and Replica\n') + + if output_file: + output_file.write(final_report) +@@ -1170,7 +1170,7 @@ def remove_state_info(entry): + + def get_conflict_report(mentries, rentries, verbose): + """Gather the conflict entry dn's for each replica +- :param mentries - Master entries ++ :param mentries - Supplier entries + :param rentries - Replica entries + :param verbose - verbose logging + :return - A text blob to dispaly in the report +@@ -1197,7 +1197,7 @@ def get_conflict_report(mentries, rentries, verbose): + report = "\n\nConflict Entries\n" + report += "=====================================================\n\n" + if len(m_conflicts) > 0: +- report += ('Master Conflict Entries: %d\n' % (len(m_conflicts))) ++ report += ('Supplier Conflict Entries: %d\n' % (len(m_conflicts))) + if verbose: + for entry in m_conflicts: + report += ('\n - %s\n' % (entry['dn'])) +@@ -1239,8 +1239,8 @@ def do_online_report(opts, output_file=None): + rconflicts = [] + mconflicts = [] + +- # Fire off paged searches on Master and Replica +- master, replica, opts = connect_to_replicas(opts) ++ # Fire off paged searches on Supplier and Replica ++ supplier, replica, opts = connect_to_replicas(opts) + + if opts['verbose']: + print('Start searching and comparing...') +@@ -1248,12 +1248,12 @@ def do_online_report(opts, output_file=None): + controls = [paged_ctrl] + req_pr_ctrl = controls[0] + try: +- master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE, +- "(|(objectclass=*)(objectclass=ldapsubentry)(objectclass=nstombstone))", +- ['*', 'createtimestamp', 'nscpentrywsi', 'nsds5replconflict'], +- serverctrls=controls) ++ supplier_msgid = supplier.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE, ++ "(|(objectclass=*)(objectclass=ldapsubentry)(objectclass=nstombstone))", ++ ['*', 'createtimestamp', 'nscpentrywsi', 'nsds5replconflict'], ++ serverctrls=controls) + except ldap.LDAPError as e: +- print("Error: Failed to get Master entries: %s", str(e)) ++ print("Error: Failed to get Supplier entries: %s", str(e)) + sys.exit(1) + try: + replica_msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE, +@@ -1268,11 +1268,11 @@ def do_online_report(opts, output_file=None): + while not m_done or not r_done: + try: + if not m_done: +- m_rtype, m_rdata, m_rmsgid, m_rctrls = master.result3(master_msgid) ++ m_rtype, m_rdata, m_rmsgid, m_rctrls = supplier.result3(supplier_msgid) + elif not r_done: + m_rdata = [] + except ldap.LDAPError as e: +- print("Error: Problem getting the results from the master: %s", str(e)) ++ print("Error: Problem getting the results from the Supplier: %s", str(e)) + sys.exit(1) + try: + if not r_done: +@@ -1299,7 +1299,7 @@ def do_online_report(opts, output_file=None): + report, opts) + + if not m_done: +- # Master ++ # Supplier + m_pctrls = [ + c + for c in m_rctrls +@@ -1310,11 +1310,11 @@ def do_online_report(opts, output_file=None): + try: + # Copy cookie from response control to request control + req_pr_ctrl.cookie = m_pctrls[0].cookie +- master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE, ++ supplier_msgid = supplier.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE, + "(|(objectclass=*)(objectclass=ldapsubentry))", + ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls) + except ldap.LDAPError as e: +- print("Error: Problem searching the master: %s", str(e)) ++ print("Error: Problem searching the Supplier: %s", str(e)) + sys.exit(1) + else: + m_done = True # No more pages available +@@ -1354,7 +1354,7 @@ def do_online_report(opts, output_file=None): + print_online_report(report, opts, output_file) + + # unbind +- master.unbind_s() ++ supplier.unbind_s() + replica.unbind_s() + + +@@ -1367,18 +1367,18 @@ def init_online_params(args): + + # Make sure the URLs are different + if args.murl == args.rurl: +- print("Master and Replica LDAP URLs are the same, they must be different") ++ print("Supplier and Replica LDAP URLs are the same, they must be different") + sys.exit(1) + +- # Parse Master url ++ # Parse Supplier url + if not ldapurl.isLDAPUrl(args.murl): +- print("Master LDAP URL is invalid") ++ print("Supplier LDAP URL is invalid") + sys.exit(1) + murl = ldapurl.LDAPUrl(args.murl) + if murl.urlscheme in VALID_PROTOCOLS: + opts['mprotocol'] = murl.urlscheme + else: +- print('Unsupported ldap url protocol (%s) for Master, please use "ldaps" or "ldap"' % ++ print('Unsupported ldap url protocol (%s) for Supplier, please use "ldaps" or "ldap"' % + murl.urlscheme) + sys.exit(1) + +@@ -1520,7 +1520,7 @@ def offline_report(args): + print ("LDIF file ({}) is empty".format(ldif_dir)) + sys.exit(1) + if opts['mldif'] == opts['rldif']: +- print("The Master and Replica LDIF files must be different") ++ print("The Supplier and Replica LDIF files must be different") + sys.exit(1) + + OUTPUT_FILE = None +@@ -1547,7 +1547,7 @@ def get_state(args): + """Just do the RUV comparision + """ + opts = init_online_params(args) +- master, replica, opts = connect_to_replicas(opts) ++ supplier, replica, opts = connect_to_replicas(opts) + print(get_ruv_state(opts)) + + +@@ -1569,10 +1569,10 @@ def main(): + # Get state + state_parser = subparsers.add_parser('state', help="Get the current replicaton state between two replicas") + state_parser.set_defaults(func=get_state) +- state_parser.add_argument('-m', '--master-url', help='The LDAP URL for the Master server', +- dest='murl', default=None, required=True) ++ state_parser.add_argument('-m', '--supplier-url', help='The LDAP URL for the Supplier server', ++ dest='murl', default=None, required=True) + state_parser.add_argument('-r', '--replica-url', help='The LDAP URL for the Replica server', +- dest='rurl', required=True, default=None) ++ dest='rurl', required=True, default=None) + state_parser.add_argument('-b', '--suffix', help='Replicated suffix', dest='suffix', required=True) + state_parser.add_argument('-D', '--bind-dn', help='The Bind DN', required=True, dest='binddn', default=None) + state_parser.add_argument('-w', '--bind-pw', help='The Bind password', dest='bindpw', default=None) +@@ -1586,7 +1586,7 @@ def main(): + # Online mode + online_parser = subparsers.add_parser('online', help="Compare two online replicas for differences") + online_parser.set_defaults(func=online_report) +- online_parser.add_argument('-m', '--master-url', help='The LDAP URL for the Master server (REQUIRED)', ++ online_parser.add_argument('-m', '--supplier-url', help='The LDAP URL for the Supplier server (REQUIRED)', + dest='murl', default=None, required=True) + online_parser.add_argument('-r', '--replica-url', help='The LDAP URL for the Replica server (REQUIRED)', + dest='rurl', required=True, default=None) +@@ -1612,12 +1612,12 @@ def main(): + # Offline LDIF mode + offline_parser = subparsers.add_parser('offline', help="Compare two replication LDIF files for differences (LDIF file generated by 'db2ldif -r')") + offline_parser.set_defaults(func=offline_report) +- offline_parser.add_argument('-m', '--master-ldif', help='Master LDIF file', ++ offline_parser.add_argument('-m', '--supplier-ldif', help='Supplier LDIF file', + dest='mldif', default=None, required=True) + offline_parser.add_argument('-r', '--replica-ldif', help='Replica LDIF file', + dest='rldif', default=None, required=True) + offline_parser.add_argument('--rid', dest='rid', default=None, required=True, +- help='The Replica Identifer (rid) for the "Master" server') ++ help='The Replica Identifier (rid) for the "Supplier" server') + offline_parser.add_argument('-b', '--suffix', help='Replicated suffix', dest='suffix', required=True) + offline_parser.add_argument('-c', '--conflicts', help='Display verbose conflict information', action='store_true', + dest='conflicts', default=False) +-- +2.31.1 + diff --git a/SOURCES/0023-Issue-4419-Warn-users-of-skipped-entries-during-ldif.patch b/SOURCES/0023-Issue-4419-Warn-users-of-skipped-entries-during-ldif.patch deleted file mode 100644 index 81e2612..0000000 --- a/SOURCES/0023-Issue-4419-Warn-users-of-skipped-entries-during-ldif.patch +++ /dev/null @@ -1,393 +0,0 @@ -From 43f8a317bcd9040874b27cad905347a9e6bc8a6f Mon Sep 17 00:00:00 2001 -From: James Chapman -Date: Wed, 9 Dec 2020 22:42:59 +0000 -Subject: [PATCH 4/6] Issue 4419 - Warn users of skipped entries during ldif2db - online import (#4476) - -Bug Description: During an online ldif2db import entries that do not - conform to various constraints will be skipped and - not imported. On completition of an import with skipped - entries, the server responds with a success message - and logs the skipped entry detail to the error logs. - The success messgae could lead the user to believe - that all entries were successfully imported. - -Fix Description: If a skipped entry occurs during import, the import - will continue and a warning message will be displayed. - The schema is extended with a nsTaskWarning attribute - which is used to capture and retrieve any task - warnings. - - CLI tools for online import updated. - - Test added to generate an incorrect ldif entry and perform an - online import. - -Fixes: https://github.com/389ds/389-ds-base/issues/4419 - -Reviewed by: tbordaz, mreynolds389, droideck, Firstyear (Thanks) ---- - .../tests/suites/import/import_test.py | 39 +++++++++++++++++-- - ldap/schema/02common.ldif | 3 +- - .../back-ldbm/db-bdb/bdb_import_threads.c | 5 +++ - ldap/servers/slapd/slap.h | 1 + - ldap/servers/slapd/slapi-plugin.h | 11 ++++++ - ldap/servers/slapd/slapi-private.h | 8 ---- - ldap/servers/slapd/task.c | 29 +++++++++++++- - src/lib389/lib389/cli_conf/backend.py | 6 ++- - src/lib389/lib389/tasks.py | 23 +++++++++-- - 9 files changed, 108 insertions(+), 17 deletions(-) - -diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py -index b47db96ed..77c915026 100644 ---- a/dirsrvtests/tests/suites/import/import_test.py -+++ b/dirsrvtests/tests/suites/import/import_test.py -@@ -65,6 +65,9 @@ def _import_clean(request, topo): - import_ldif = ldif_dir + '/basic_import.ldif' - if os.path.exists(import_ldif): - os.remove(import_ldif) -+ syntax_err_ldif = ldif_dir + '/syntax_err.dif' -+ if os.path.exists(syntax_err_ldif): -+ os.remove(syntax_err_ldif) - - request.addfinalizer(finofaci) - -@@ -141,17 +144,19 @@ def _create_bogus_ldif(topo): - - def _create_syntax_err_ldif(topo): - """ -- Create an incorrect ldif entry that violates syntax check -+ Create an ldif file, which contains an entry that violates syntax check - """ - ldif_dir = topo.standalone.get_ldif_dir() - line1 = """dn: dc=example,dc=com - objectClass: top - objectClass: domain - dc: example -+ - dn: ou=groups,dc=example,dc=com - objectClass: top - objectClass: organizationalUnit - ou: groups -+ - dn: uid=JHunt,ou=groups,dc=example,dc=com - objectClass: top - objectClass: person -@@ -201,6 +206,34 @@ def test_import_with_index(topo, _import_clean): - assert f'{place}/userRoot/roomNumber.db' in glob.glob(f'{place}/userRoot/*.db', recursive=True) - - -+def test_online_import_with_warning(topo, _import_clean): -+ """ -+ Import an ldif file with syntax errors, verify skipped entry warning code -+ -+ :id: 5bf75c47-a283-430e-a65c-3c5fd8dbadb8 -+ :setup: Standalone Instance -+ :steps: -+ 1. Create standalone Instance -+ 2. Create an ldif file with an entry that violates syntax check (empty givenname) -+ 3. Online import of troublesome ldif file -+ :expected results: -+ 1. Successful import with skipped entry warning -+ """ -+ topo.standalone.restart() -+ -+ import_task = ImportTask(topo.standalone) -+ import_ldif1 = _create_syntax_err_ldif(topo) -+ -+ # Importing the offending ldif file - online -+ import_task.import_suffix_from_ldif(ldiffile=import_ldif1, suffix=DEFAULT_SUFFIX) -+ -+ # There is just a single entry in this ldif -+ import_task.wait(5) -+ -+ # Check for the task nsTaskWarning attr, make sure its set to skipped entry code -+ assert import_task.present('nstaskwarning') -+ assert TaskWarning.WARN_SKIPPED_IMPORT_ENTRY == import_task.get_task_warn() -+ - def test_crash_on_ldif2db(topo, _import_clean): - """ - Delete the cn=monitor entry for an LDBM backend instance. Doing this will -@@ -246,7 +279,7 @@ def test_ldif2db_allows_entries_without_a_parent_to_be_imported(topo, _import_cl - topo.standalone.start() - - --def test_ldif2db_syntax_check(topo): -+def test_ldif2db_syntax_check(topo, _import_clean): - """ldif2db should return a warning when a skipped entry has occured. - :id: 85e75670-42c5-4062-9edc-7f117c97a06f - :setup: -@@ -261,7 +294,7 @@ def test_ldif2db_syntax_check(topo): - import_ldif1 = _create_syntax_err_ldif(topo) - # Import the offending LDIF data - offline - topo.standalone.stop() -- ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1) -+ ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1, None) - assert ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY - topo.standalone.start() - -diff --git a/ldap/schema/02common.ldif b/ldap/schema/02common.ldif -index c6dc074db..821640d03 100644 ---- a/ldap/schema/02common.ldif -+++ b/ldap/schema/02common.ldif -@@ -145,6 +145,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2356 NAME 'nsTaskExitCode' DESC 'Slapi T - attributeTypes: ( 2.16.840.1.113730.3.1.2357 NAME 'nsTaskCurrentItem' DESC 'Slapi Task item' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) - attributeTypes: ( 2.16.840.1.113730.3.1.2358 NAME 'nsTaskTotalItems' DESC 'Slapi Task total items' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) - attributeTypes: ( 2.16.840.1.113730.3.1.2359 NAME 'nsTaskCreated' DESC 'Slapi Task creation date' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) -+attributeTypes: ( 2.16.840.1.113730.3.1.2375 NAME 'nsTaskWarning' DESC 'Slapi Task warning code' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) - # - # objectclasses: - # -@@ -177,5 +178,5 @@ objectClasses: ( 2.16.840.1.113730.3.2.503 NAME 'nsDSWindowsReplicationAgreement - objectClasses: ( 2.16.840.1.113730.3.2.128 NAME 'costemplate' DESC 'Netscape defined objectclass' SUP top MAY ( cn $ cospriority ) X-ORIGIN 'Netscape Directory Server' ) - objectClasses: ( 2.16.840.1.113730.3.2.304 NAME 'nsView' DESC 'Netscape defined objectclass' SUP top AUXILIARY MAY ( nsViewFilter $ description ) X-ORIGIN 'Netscape Directory Server' ) - objectClasses: ( 2.16.840.1.113730.3.2.316 NAME 'nsAttributeEncryption' DESC 'Netscape defined objectclass' SUP top MUST ( cn $ nsEncryptionAlgorithm ) X-ORIGIN 'Netscape Directory Server' ) --objectClasses: ( 2.16.840.1.113730.3.2.335 NAME 'nsSlapiTask' DESC 'Slapi_Task objectclass' SUP top MUST ( cn ) MAY ( ttl $ nsTaskLog $ nsTaskStatus $ nsTaskExitCode $ nsTaskCurrentItem $ nsTaskTotalItems $ nsTaskCreated ) X-ORIGIN '389 Directory Server' ) -+objectClasses: ( 2.16.840.1.113730.3.2.335 NAME 'nsSlapiTask' DESC 'Slapi_Task objectclass' SUP top MUST ( cn ) MAY ( ttl $ nsTaskLog $ nsTaskStatus $ nsTaskExitCode $ nsTaskCurrentItem $ nsTaskTotalItems $ nsTaskCreated $ nsTaskWarning ) X-ORIGIN '389 Directory Server' ) - -diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c -index 310893884..5c7d9c8f7 100644 ---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c -+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c -@@ -747,6 +747,11 @@ import_producer(void *param) - } - } - -+ /* capture skipped entry warnings for this task */ -+ if((job) && (job->skipped)) { -+ slapi_task_set_warning(job->task, WARN_SKIPPED_IMPORT_ENTRY); -+ } -+ - slapi_value_free(&(job->usn_value)); - import_free_ldif(&c); - info->state = FINISHED; -diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h -index 53c9161d1..be4d38739 100644 ---- a/ldap/servers/slapd/slap.h -+++ b/ldap/servers/slapd/slap.h -@@ -1753,6 +1753,7 @@ typedef struct slapi_task - int task_progress; /* number between 0 and task_work */ - int task_work; /* "units" of work to be done */ - int task_flags; /* (see above) */ -+ task_warning task_warn; /* task warning */ - char *task_status; /* transient status info */ - char *task_log; /* appended warnings, etc */ - char task_date[SLAPI_TIMESTAMP_BUFSIZE]; /* Date/time when task was created */ -diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h -index 96313ef2c..ddb11bc7c 100644 ---- a/ldap/servers/slapd/slapi-plugin.h -+++ b/ldap/servers/slapd/slapi-plugin.h -@@ -6638,6 +6638,15 @@ int slapi_config_remove_callback(int operation, int flags, const char *base, int - /* task flags (set by the task-control code) */ - #define SLAPI_TASK_DESTROYING 0x01 /* queued event for destruction */ - -+/* task warnings */ -+typedef enum task_warning_t{ -+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0), -+ WARN_UPGRADE_DN_FORMAT = (1 << 1), -+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2), -+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3) -+} task_warning; -+ -+ - int slapi_task_register_handler(const char *name, dseCallbackFn func); - int slapi_plugin_task_register_handler(const char *name, dseCallbackFn func, Slapi_PBlock *plugin_pb); - int slapi_plugin_task_unregister_handler(const char *name, dseCallbackFn func); -@@ -6654,6 +6663,8 @@ int slapi_task_get_refcount(Slapi_Task *task); - void slapi_task_set_destructor_fn(Slapi_Task *task, TaskCallbackFn func); - void slapi_task_set_cancel_fn(Slapi_Task *task, TaskCallbackFn func); - void slapi_task_status_changed(Slapi_Task *task); -+void slapi_task_set_warning(Slapi_Task *task, task_warning warn); -+int slapi_task_get_warning(Slapi_Task *task); - void slapi_task_log_status(Slapi_Task *task, char *format, ...) - #ifdef __GNUC__ - __attribute__((format(printf, 2, 3))); -diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h -index d5abe8ac1..b956ebe63 100644 ---- a/ldap/servers/slapd/slapi-private.h -+++ b/ldap/servers/slapd/slapi-private.h -@@ -1465,14 +1465,6 @@ void slapi_pblock_set_operation_notes(Slapi_PBlock *pb, uint32_t opnotes); - void slapi_pblock_set_flag_operation_notes(Slapi_PBlock *pb, uint32_t opflag); - void slapi_pblock_set_result_text_if_empty(Slapi_PBlock *pb, char *text); - --/* task warnings */ --typedef enum task_warning_t{ -- WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0), -- WARN_UPGRADE_DN_FORMAT = (1 << 1), -- WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2), -- WARN_SKIPPED_IMPORT_ENTRY = (1 << 3) --} task_warning; -- - int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb); - void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn); - -diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c -index 936c64920..806077a16 100644 ---- a/ldap/servers/slapd/task.c -+++ b/ldap/servers/slapd/task.c -@@ -46,6 +46,7 @@ static uint64_t shutting_down = 0; - #define TASK_PROGRESS_NAME "nsTaskCurrentItem" - #define TASK_WORK_NAME "nsTaskTotalItems" - #define TASK_DATE_NAME "nsTaskCreated" -+#define TASK_WARNING_NAME "nsTaskWarning" - - #define DEFAULT_TTL "3600" /* seconds */ - #define TASK_SYSCONFIG_FILE_ATTR "sysconfigfile" /* sysconfig reload task file attr */ -@@ -332,7 +333,7 @@ slapi_task_status_changed(Slapi_Task *task) - LDAPMod modlist[20]; - LDAPMod *mod[20]; - int cur = 0, i; -- char s1[20], s2[20], s3[20]; -+ char s1[20], s2[20], s3[20], s4[20]; - - if (shutting_down) { - /* don't care about task status updates anymore */ -@@ -346,9 +347,11 @@ slapi_task_status_changed(Slapi_Task *task) - sprintf(s1, "%d", task->task_exitcode); - sprintf(s2, "%d", task->task_progress); - sprintf(s3, "%d", task->task_work); -+ sprintf(s4, "%d", task->task_warn); - NEXTMOD(TASK_PROGRESS_NAME, s2); - NEXTMOD(TASK_WORK_NAME, s3); - NEXTMOD(TASK_DATE_NAME, task->task_date); -+ NEXTMOD(TASK_WARNING_NAME, s4); - /* only add the exit code when the job is done */ - if ((task->task_state == SLAPI_TASK_FINISHED) || - (task->task_state == SLAPI_TASK_CANCELLED)) { -@@ -452,6 +455,30 @@ slapi_task_get_refcount(Slapi_Task *task) - return 0; /* return value not currently used */ - } - -+/* -+ * Return task warning -+ */ -+int -+slapi_task_get_warning(Slapi_Task *task) -+{ -+ if (task) { -+ return task->task_warn; -+ } -+ -+ return 0; /* return value not currently used */ -+} -+ -+/* -+ * Set task warning -+ */ -+void -+slapi_task_set_warning(Slapi_Task *task, task_warning warn) -+{ -+ if (task) { -+ return task->task_warn |= warn; -+ } -+} -+ - int - slapi_plugin_task_unregister_handler(const char *name, dseCallbackFn func) - { -diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py -index d7a6e670c..6bfbcb036 100644 ---- a/src/lib389/lib389/cli_conf/backend.py -+++ b/src/lib389/lib389/cli_conf/backend.py -@@ -243,9 +243,13 @@ def backend_import(inst, basedn, log, args): - exclude_suffixes=args.exclude_suffixes) - task.wait(timeout=None) - result = task.get_exit_code() -+ warning = task.get_task_warn() - - if task.is_complete() and result == 0: -- log.info("The import task has finished successfully") -+ if warning is None or (warning == 0): -+ log.info("The import task has finished successfully") -+ else: -+ log.info("The import task has finished successfully, with warning code {}, check the logs for more detail".format(warning)) - else: - raise ValueError("Import task failed\n-------------------------\n{}".format(ensure_str(task.get_task_log()))) - -diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py -index dc7bb9206..bf20d1e61 100644 ---- a/src/lib389/lib389/tasks.py -+++ b/src/lib389/lib389/tasks.py -@@ -38,6 +38,7 @@ class Task(DSLdapObject): - self._protected = False - self._exit_code = None - self._task_log = "" -+ self._task_warn = None - - def status(self): - """Return the decoded status of the task -@@ -49,6 +50,7 @@ class Task(DSLdapObject): - - self._exit_code = self.get_attr_val_utf8("nsTaskExitCode") - self._task_log = self.get_attr_val_utf8("nsTaskLog") -+ self._task_warn = self.get_attr_val_utf8("nsTaskWarning") - if not self.exists(): - self._log.debug("complete: task has self cleaned ...") - # The task cleaned it self up. -@@ -77,6 +79,15 @@ class Task(DSLdapObject): - return None - return None - -+ def get_task_warn(self): -+ """Return task's warning code if task is complete, else None.""" -+ if self.is_complete(): -+ try: -+ return int(self._task_warn) -+ except TypeError: -+ return None -+ return None -+ - def wait(self, timeout=120): - """Wait until task is complete.""" - -@@ -390,14 +401,17 @@ class Tasks(object): - running, true if done - if true, second is the exit code - if dowait - is True, this function will block until the task is complete''' - attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', -- 'nsTaskCurrentItem', 'nsTaskTotalItems'] -+ 'nsTaskCurrentItem', 'nsTaskTotalItems', 'nsTaskWarning'] - done = False - exitCode = 0 -+ warningCode = 0 - dn = entry.dn - while not done: - entry = self.conn.getEntry(dn, attrlist=attrlist) - self.log.debug("task entry %r", entry) - -+ if entry.nsTaskWarning: -+ warningCode = int(entry.nsTaskWarning) - if entry.nsTaskExitCode: - exitCode = int(entry.nsTaskExitCode) - done = True -@@ -405,7 +419,7 @@ class Tasks(object): - time.sleep(1) - else: - break -- return (done, exitCode) -+ return (done, exitCode, warningCode) - - def importLDIF(self, suffix=None, benamebase=None, input_file=None, - args=None): -@@ -461,8 +475,9 @@ class Tasks(object): - self.conn.add_s(entry) - - exitCode = 0 -+ warningCode = 0 - if args and args.get(TASK_WAIT, False): -- (done, exitCode) = self.conn.tasks.checkTask(entry, True) -+ (done, exitCode, warningCode) = self.conn.tasks.checkTask(entry, True) - - if exitCode: - self.log.error("Error: import task %s for file %s exited with %d", -@@ -470,6 +485,8 @@ class Tasks(object): - else: - self.log.info("Import task %s for file %s completed successfully", - cn, input_file) -+ if warningCode: -+ self.log.info("with warning code %d", warningCode) - self.dn = dn - self.entry = entry - return exitCode --- -2.26.2 - diff --git a/SOURCES/0023-Issue-4443-Internal-unindexed-searches-in-syncrepl-r.patch b/SOURCES/0023-Issue-4443-Internal-unindexed-searches-in-syncrepl-r.patch new file mode 100644 index 0000000..3fd6f16 --- /dev/null +++ b/SOURCES/0023-Issue-4443-Internal-unindexed-searches-in-syncrepl-r.patch @@ -0,0 +1,373 @@ +From 55a47c1bfe1ce1c27e470384c4f1d50895db25f7 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 13 Jul 2021 14:18:03 -0400 +Subject: [PATCH] Issue 4443 - Internal unindexed searches in syncrepl/retro + changelog + +Bug Description: + +When a non-system index is added to a backend it is +disabled until the database is initialized or reindexed. +So in the case of the retro changelog the changenumber index +is alway disabled by default since it is never initialized. +This leads to unexpected unindexed searches of the retro +changelog. + +Fix Description: + +If an index has "nsSystemIndex" set to "true" then enable it +immediately. + +relates: https://github.com/389ds/389-ds-base/issues/4443 + +Reviewed by: spichugi & tbordaz(Thanks!!) +--- + .../tests/suites/retrocl/basic_test.py | 53 ++++++++------- + .../suites/retrocl/retrocl_indexing_test.py | 68 +++++++++++++++++++ + ldap/servers/plugins/retrocl/retrocl_create.c | 2 +- + .../slapd/back-ldbm/ldbm_index_config.c | 25 +++++-- + src/lib389/lib389/_mapped_object.py | 13 ++++ + 5 files changed, 130 insertions(+), 31 deletions(-) + create mode 100644 dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py + +diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py +index f3bc50f29..84d513829 100644 +--- a/dirsrvtests/tests/suites/retrocl/basic_test.py ++++ b/dirsrvtests/tests/suites/retrocl/basic_test.py +@@ -8,7 +8,6 @@ + + import logging + import ldap +-import time + import pytest + from lib389.topologies import topology_st + from lib389.plugins import RetroChangelogPlugin +@@ -18,7 +17,8 @@ from lib389.tasks import * + from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance + from lib389.cli_base.dsrc import dsrc_arg_concat + from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add_attr +-from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts ++from lib389.idm.user import UserAccount, UserAccounts ++from lib389._mapped_object import DSLdapObjects + + pytestmark = pytest.mark.tier1 + +@@ -82,7 +82,7 @@ def test_retrocl_exclude_attr_add(topology_st): + + log.info('Adding user1') + try: +- user1 = users.create(properties={ ++ users.create(properties={ + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', +@@ -97,17 +97,18 @@ def test_retrocl_exclude_attr_add(topology_st): + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: +- log.error("Failed to add user1") ++ log.error("Failed to add user1: " + str(e)) + + log.info('Verify homePhone and carLicense attrs are in the changelog changestring') + try: +- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN) ++ retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX) ++ cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})') + except ldap.LDAPError as e: +- log.fatal("Changelog search failed, error: " +str(e)) ++ log.fatal("Changelog search failed, error: " + str(e)) + assert False + assert len(cllist) > 0 +- if cllist[0].hasAttr('changes'): +- clstr = (cllist[0].getValue('changes')).decode() ++ if cllist[0].present('changes'): ++ clstr = str(cllist[0].get_attr_vals_utf8('changes')) + assert ATTR_HOMEPHONE in clstr + assert ATTR_CARLICENSE in clstr + +@@ -134,7 +135,7 @@ def test_retrocl_exclude_attr_add(topology_st): + + log.info('Adding user2') + try: +- user2 = users.create(properties={ ++ users.create(properties={ + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', +@@ -149,18 +150,18 @@ def test_retrocl_exclude_attr_add(topology_st): + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: +- log.error("Failed to add user2") ++ log.error("Failed to add user2: " + str(e)) + + log.info('Verify homePhone attr is not in the changelog changestring') + try: +- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER2_DN) ++ cllist = retro_changelog_suffix.filter(f'(targetDn={USER2_DN})') + assert len(cllist) > 0 +- if cllist[0].hasAttr('changes'): +- clstr = (cllist[0].getValue('changes')).decode() ++ if cllist[0].present('changes'): ++ clstr = str(cllist[0].get_attr_vals_utf8('changes')) + assert ATTR_HOMEPHONE not in clstr + assert ATTR_CARLICENSE in clstr + except ldap.LDAPError as e: +- log.fatal("Changelog search failed, error: " +str(e)) ++ log.fatal("Changelog search failed, error: " + str(e)) + assert False + + def test_retrocl_exclude_attr_mod(topology_st): +@@ -228,19 +229,20 @@ def test_retrocl_exclude_attr_mod(topology_st): + 'homeDirectory': '/home/user1', + 'userpassword': USER_PW}) + except ldap.ALREADY_EXISTS: +- pass ++ user1 = UserAccount(st, dn=USER1_DN) + except ldap.LDAPError as e: +- log.error("Failed to add user1") ++ log.error("Failed to add user1: " + str(e)) + + log.info('Verify homePhone and carLicense attrs are in the changelog changestring') + try: +- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN) ++ retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX) ++ cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})') + except ldap.LDAPError as e: +- log.fatal("Changelog search failed, error: " +str(e)) ++ log.fatal("Changelog search failed, error: " + str(e)) + assert False + assert len(cllist) > 0 +- if cllist[0].hasAttr('changes'): +- clstr = (cllist[0].getValue('changes')).decode() ++ if cllist[0].present('changes'): ++ clstr = str(cllist[0].get_attr_vals_utf8('changes')) + assert ATTR_HOMEPHONE in clstr + assert ATTR_CARLICENSE in clstr + +@@ -267,24 +269,25 @@ def test_retrocl_exclude_attr_mod(topology_st): + + log.info('Modify user1 carLicense attribute') + try: +- st.modify_s(USER1_DN, [(ldap.MOD_REPLACE, ATTR_CARLICENSE, b"123WX321")]) ++ user1.replace(ATTR_CARLICENSE, "123WX321") + except ldap.LDAPError as e: + log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc']) + assert False + + log.info('Verify carLicense attr is not in the changelog changestring') + try: +- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN) ++ cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})') + assert len(cllist) > 0 + # There will be 2 entries in the changelog for this user, we are only + #interested in the second one, the modify operation. +- if cllist[1].hasAttr('changes'): +- clstr = (cllist[1].getValue('changes')).decode() ++ if cllist[1].present('changes'): ++ clstr = str(cllist[1].get_attr_vals_utf8('changes')) + assert ATTR_CARLICENSE not in clstr + except ldap.LDAPError as e: +- log.fatal("Changelog search failed, error: " +str(e)) ++ log.fatal("Changelog search failed, error: " + str(e)) + assert False + ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py b/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py +new file mode 100644 +index 000000000..b1dfe962c +--- /dev/null ++++ b/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py +@@ -0,0 +1,68 @@ ++import logging ++import pytest ++import os ++from lib389._constants import RETROCL_SUFFIX, DEFAULT_SUFFIX ++from lib389.topologies import topology_st as topo ++from lib389.plugins import RetroChangelogPlugin ++from lib389.idm.user import UserAccounts ++from lib389._mapped_object import DSLdapObjects ++log = logging.getLogger(__name__) ++ ++ ++def test_indexing_is_online(topo): ++ """Test that the changenmumber index is online right after enabling the plugin ++ ++ :id: 16f4c001-9e0c-4448-a2b3-08ac1e85d40f ++ :setup: Standalone Instance ++ :steps: ++ 1. Enable retro cl ++ 2. Perform some updates ++ 3. Search for "(changenumber>=-1)", and it is not partially unindexed ++ 4. Search for "(&(changenumber>=-1)(targetuniqueid=*))", and it is not partially unindexed ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ ++ # Enable plugin ++ topo.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') ++ plugin = RetroChangelogPlugin(topo.standalone) ++ plugin.enable() ++ topo.standalone.restart() ++ ++ # Do a bunch of updates ++ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) ++ user_entry = users.create(properties={ ++ 'sn': '1', ++ 'cn': 'user 1', ++ 'uid': 'user1', ++ 'uidNumber': '11', ++ 'gidNumber': '111', ++ 'givenname': 'user1', ++ 'homePhone': '0861234567', ++ 'carLicense': '131D16674', ++ 'mail': 'user1@whereever.com', ++ 'homeDirectory': '/home' ++ }) ++ for count in range(0, 10): ++ user_entry.replace('mail', f'test{count}@test.com') ++ ++ # Search the retro cl, and check for error messages ++ filter_simple = '(changenumber>=-1)' ++ filter_compound = '(&(changenumber>=-1)(targetuniqueid=*))' ++ retro_changelog_suffix = DSLdapObjects(topo.standalone, basedn=RETROCL_SUFFIX) ++ retro_changelog_suffix.filter(filter_simple) ++ assert not topo.standalone.searchAccessLog('Partially Unindexed Filter') ++ ++ # Search the retro cl again with compound filter ++ retro_changelog_suffix.filter(filter_compound) ++ assert not topo.standalone.searchAccessLog('Partially Unindexed Filter') ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main(["-s", CURRENT_FILE]) +diff --git a/ldap/servers/plugins/retrocl/retrocl_create.c b/ldap/servers/plugins/retrocl/retrocl_create.c +index 571e6899f..5bfde7831 100644 +--- a/ldap/servers/plugins/retrocl/retrocl_create.c ++++ b/ldap/servers/plugins/retrocl/retrocl_create.c +@@ -133,7 +133,7 @@ retrocl_create_be(const char *bedir) + val.bv_len = strlen(val.bv_val); + slapi_entry_add_values(e, "cn", vals); + +- val.bv_val = "false"; ++ val.bv_val = "true"; /* enables the index */ + val.bv_len = strlen(val.bv_val); + slapi_entry_add_values(e, "nssystemindex", vals); + +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c +index 9722d0ce7..38e7368e1 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c +@@ -25,7 +25,7 @@ int ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb, Slapi_Entry *en + #define INDEXTYPE_NONE 1 + + static int +-ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, char *err_buf) ++ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, PRBool *is_system_index, char *err_buf) + { + Slapi_Attr *attr; + const struct berval *attrValue; +@@ -78,6 +78,15 @@ ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_st + } + } + ++ *is_system_index = PR_FALSE; ++ if (0 == slapi_entry_attr_find(e, "nsSystemIndex", &attr)) { ++ slapi_attr_first_value(attr, &sval); ++ attrValue = slapi_value_get_berval(sval); ++ if (strcasecmp(attrValue->bv_val, "true") == 0) { ++ *is_system_index = PR_TRUE; ++ } ++ } ++ + /* ok the entry is good to process, pass it to attr_index_config */ + if (attr_index_config(inst->inst_be, (char *)trace_string, 0, e, 0, 0, err_buf)) { + slapi_ch_free_string(index_name); +@@ -101,9 +110,10 @@ ldbm_index_init_entry_callback(Slapi_PBlock *pb __attribute__((unused)), + void *arg) + { + ldbm_instance *inst = (ldbm_instance *)arg; ++ PRBool is_system_index = PR_FALSE; + + returntext[0] = '\0'; +- *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, NULL); ++ *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, &is_system_index /* not used */, NULL); + if (*returncode == LDAP_SUCCESS) { + return SLAPI_DSE_CALLBACK_OK; + } else { +@@ -126,17 +136,21 @@ ldbm_instance_index_config_add_callback(Slapi_PBlock *pb __attribute__((unused)) + { + ldbm_instance *inst = (ldbm_instance *)arg; + char *index_name = NULL; ++ PRBool is_system_index = PR_FALSE; + + returntext[0] = '\0'; +- *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, returntext); ++ *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, &is_system_index, returntext); + if (*returncode == LDAP_SUCCESS) { + struct attrinfo *ai = NULL; + /* if the index is a "system" index, we assume it's being added by + * by the server, and it's okay for the index to go online immediately. + * if not, we set the index "offline" so it won't actually be used + * until someone runs db2index on it. ++ * If caller wants to add an index that they want to be online ++ * immediately they can also set "nsSystemIndex" to "true" in the ++ * index config entry (e.g. is_system_index). + */ +- if (!ldbm_attribute_always_indexed(index_name)) { ++ if (!is_system_index && !ldbm_attribute_always_indexed(index_name)) { + ainfo_get(inst->inst_be, index_name, &ai); + PR_ASSERT(ai != NULL); + ai->ai_indexmask |= INDEX_OFFLINE; +@@ -386,13 +400,14 @@ ldbm_instance_index_config_enable_index(ldbm_instance *inst, Slapi_Entry *e) + char *index_name = NULL; + int rc = LDAP_SUCCESS; + struct attrinfo *ai = NULL; ++ PRBool is_system_index = PR_FALSE; + + index_name = slapi_entry_attr_get_charptr(e, "cn"); + if (index_name) { + ainfo_get(inst->inst_be, index_name, &ai); + } + if (!ai) { +- rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, NULL); ++ rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, &is_system_index /* not used */, NULL); + } + if (rc == LDAP_SUCCESS) { + /* Assume the caller knows if it is OK to go online immediately */ +diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py +index b6d778b01..fe610d175 100644 +--- a/src/lib389/lib389/_mapped_object.py ++++ b/src/lib389/lib389/_mapped_object.py +@@ -148,6 +148,19 @@ class DSLdapObject(DSLogging, DSLint): + + return True + ++ def search(self, scope="subtree", filter='objectclass=*'): ++ search_scope = ldap.SCOPE_SUBTREE ++ if scope == 'base': ++ search_scope = ldap.SCOPE_BASE ++ elif scope == 'one': ++ search_scope = ldap.SCOPE_ONE ++ elif scope == 'subtree': ++ search_scope = ldap.SCOPE_SUBTREE ++ return self._instance.search_ext_s(self._dn, search_scope, filter, ++ serverctrls=self._server_controls, ++ clientctrls=self._client_controls, ++ escapehatch='i am sure') ++ + def display(self, attrlist=['*']): + """Get an entry but represent it as a string LDIF + +-- +2.31.1 + diff --git a/SOURCES/0024-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch b/SOURCES/0024-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch deleted file mode 100644 index 8f90863..0000000 --- a/SOURCES/0024-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch +++ /dev/null @@ -1,149 +0,0 @@ -From 61d82ef842e0e4e013937bf05d7f640be2d2fc09 Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Wed, 16 Dec 2020 16:30:28 +0100 -Subject: [PATCH 5/6] Issue 4480 - Unexpected info returned to ldap request - (#4491) - -Bug description: - If the bind entry does not exist, the bind result info - reports that 'No such entry'. It should not give any - information if the target entry exists or not - -Fix description: - Does not return any additional information during a bind - -relates: https://github.com/389ds/389-ds-base/issues/4480 - -Reviewed by: William Brown, Viktor Ashirov, Mark Reynolds (thank you all) - -Platforms tested: F31 ---- - dirsrvtests/tests/suites/basic/basic_test.py | 112 +++++++++++++++++++ - 1 file changed, 112 insertions(+) - -diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py -index 1ae82dcdd..02b73ee85 100644 ---- a/dirsrvtests/tests/suites/basic/basic_test.py -+++ b/dirsrvtests/tests/suites/basic/basic_test.py -@@ -1400,6 +1400,118 @@ def test_dscreate_multiple_dashes_name(dscreate_long_instance): - assert not dscreate_long_instance.exists() - - -+@pytest.fixture(scope="module", params=('c=uk', 'cn=test_user', 'dc=example,dc=com', 'o=south', 'ou=sales', 'wrong=some_value')) -+def dscreate_test_rdn_value(request): -+ template_file = "/tmp/dssetup.inf" -+ template_text = f"""[general] -+config_version = 2 -+# This invalid hostname ... -+full_machine_name = localhost.localdomain -+# Means we absolutely require this. -+strict_host_checking = False -+# In tests, we can be run in containers, NEVER trust -+# that systemd is there, or functional in any capacity -+systemd = False -+ -+[slapd] -+instance_name = test_different_rdn -+root_dn = cn=directory manager -+root_password = someLongPassword_123 -+# We do not have access to high ports in containers, -+# so default to something higher. -+port = 38999 -+secure_port = 63699 -+ -+[backend-userroot] -+create_suffix_entry = True -+suffix = {request.param} -+""" -+ -+ with open(template_file, "w") as template_fd: -+ template_fd.write(template_text) -+ -+ # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389 -+ tmp_env = os.environ -+ if "PYTHONPATH" in tmp_env: -+ del tmp_env["PYTHONPATH"] -+ -+ def fin(): -+ os.remove(template_file) -+ if request.param != "wrong=some_value": -+ try: -+ subprocess.check_call(['dsctl', 'test_different_rdn', 'remove', '--do-it']) -+ except subprocess.CalledProcessError as e: -+ log.fatal(f"Failed to remove test instance Error ({e.returncode}) {e.output}") -+ else: -+ log.info("Wrong RDN is passed, instance not created") -+ request.addfinalizer(fin) -+ return template_file, tmp_env, request.param, -+ -+ -+@pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.0.0'), -+ reason="This test is only required with new admin cli, and requires root.") -+@pytest.mark.bz1807419 -+@pytest.mark.ds50928 -+def test_dscreate_with_different_rdn(dscreate_test_rdn_value): -+ """Test that dscreate works with different RDN attributes as suffix -+ -+ :id: 77ed6300-6a2f-4e79-a862-1f1105f1e3ef -+ :parametrized: yes -+ :setup: None -+ :steps: -+ 1. Create template file for dscreate with different RDN attributes as suffix -+ 2. Create instance using template file -+ 3. Create instance with 'wrong=some_value' as suffix's RDN attribute -+ :expectedresults: -+ 1. Should succeeds -+ 2. Should succeeds -+ 3. Should fail -+ """ -+ try: -+ subprocess.check_call([ -+ 'dscreate', -+ 'from-file', -+ dscreate_test_rdn_value[0] -+ ], env=dscreate_test_rdn_value[1]) -+ except subprocess.CalledProcessError as e: -+ log.fatal(f"dscreate failed! Error ({e.returncode}) {e.output}") -+ if dscreate_test_rdn_value[2] != "wrong=some_value": -+ assert False -+ else: -+ assert True -+ -+def test_bind_invalid_entry(topology_st): -+ """Test the failing bind does not return information about the entry -+ -+ :id: 5cd9b083-eea6-426b-84ca-83c26fc49a6f -+ -+ :setup: Standalone instance -+ -+ :steps: -+ 1: bind as non existing entry -+ 2: check that bind info does not report 'No such entry' -+ -+ :expectedresults: -+ 1: pass -+ 2: pass -+ """ -+ -+ topology_st.standalone.restart() -+ INVALID_ENTRY="cn=foooo,%s" % DEFAULT_SUFFIX -+ try: -+ topology_st.standalone.simple_bind_s(INVALID_ENTRY, PASSWORD) -+ except ldap.LDAPError as e: -+ log.info('test_bind_invalid_entry: Failed to bind as %s (expected)' % INVALID_ENTRY) -+ log.info('exception description: ' + e.args[0]['desc']) -+ if 'info' in e.args[0]: -+ log.info('exception info: ' + e.args[0]['info']) -+ assert e.args[0]['desc'] == 'Invalid credentials' -+ assert 'info' not in e.args[0] -+ pass -+ -+ log.info('test_bind_invalid_entry: PASSED') -+ -+ - if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode --- -2.26.2 - diff --git a/SOURCES/0024-Issue-4817-BUG-locked-crypt-accounts-on-import-may-a.patch b/SOURCES/0024-Issue-4817-BUG-locked-crypt-accounts-on-import-may-a.patch new file mode 100644 index 0000000..32c0eb1 --- /dev/null +++ b/SOURCES/0024-Issue-4817-BUG-locked-crypt-accounts-on-import-may-a.patch @@ -0,0 +1,121 @@ +From 2f0218f91d35c83a2aaecb71849a54b2481390ab Mon Sep 17 00:00:00 2001 +From: Firstyear +Date: Fri, 9 Jul 2021 11:53:35 +1000 +Subject: [PATCH] Issue 4817 - BUG - locked crypt accounts on import may allow + all passwords (#4819) + +Bug Description: Due to mishanding of short dbpwd hashes, the +crypt_r algorithm was misused and was only comparing salts +in some cases, rather than checking the actual content +of the password. + +Fix Description: Stricter checks on dbpwd lengths to ensure +that content passed to crypt_r has at least 2 salt bytes and +1 hash byte, as well as stricter checks on ct_memcmp to ensure +that compared values are the same length, rather than potentially +allowing overruns/short comparisons. + +fixes: https://github.com/389ds/389-ds-base/issues/4817 + +Author: William Brown + +Review by: @mreynolds389 +--- + .../password/pwd_crypt_asterisk_test.py | 50 +++++++++++++++++++ + ldap/servers/plugins/pwdstorage/crypt_pwd.c | 20 +++++--- + 2 files changed, 64 insertions(+), 6 deletions(-) + create mode 100644 dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py + +diff --git a/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py b/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py +new file mode 100644 +index 000000000..d76614db1 +--- /dev/null ++++ b/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py +@@ -0,0 +1,50 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2021 William Brown ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# ++import ldap ++import pytest ++from lib389.topologies import topology_st ++from lib389.idm.user import UserAccounts ++from lib389._constants import (DEFAULT_SUFFIX, PASSWORD) ++ ++pytestmark = pytest.mark.tier1 ++ ++def test_password_crypt_asterisk_is_rejected(topology_st): ++ """It was reported that {CRYPT}* was allowing all passwords to be ++ valid in the bind process. This checks that we should be rejecting ++ these as they should represent locked accounts. Similar, {CRYPT}! ++ ++ :id: 0b8f1a6a-f3eb-4443-985e-da14d0939dc3 ++ :setup: Single instance ++ :steps: 1. Set a password hash in with CRYPT and the content * ++ 2. Test a bind ++ 3. Set a password hash in with CRYPT and the content ! ++ 4. Test a bind ++ :expectedresults: ++ 1. Successfully set the values ++ 2. The bind fails ++ 3. Successfully set the values ++ 4. The bind fails ++ """ ++ topology_st.standalone.config.set('nsslapd-allow-hashed-passwords', 'on') ++ topology_st.standalone.config.set('nsslapd-enable-upgrade-hash', 'off') ++ ++ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) ++ user = users.create_test_user() ++ ++ user.set('userPassword', "{CRYPT}*") ++ ++ # Attempt to bind with incorrect password. ++ with pytest.raises(ldap.INVALID_CREDENTIALS): ++ badconn = user.bind('badpassword') ++ ++ user.set('userPassword', "{CRYPT}!") ++ # Attempt to bind with incorrect password. ++ with pytest.raises(ldap.INVALID_CREDENTIALS): ++ badconn = user.bind('badpassword') ++ +diff --git a/ldap/servers/plugins/pwdstorage/crypt_pwd.c b/ldap/servers/plugins/pwdstorage/crypt_pwd.c +index 9031b2199..1b37d41ed 100644 +--- a/ldap/servers/plugins/pwdstorage/crypt_pwd.c ++++ b/ldap/servers/plugins/pwdstorage/crypt_pwd.c +@@ -48,15 +48,23 @@ static unsigned char itoa64[] = /* 0 ... 63 => ascii - 64 */ + int + crypt_pw_cmp(const char *userpwd, const char *dbpwd) + { +- int rc; +- char *cp; ++ int rc = -1; ++ char *cp = NULL; ++ size_t dbpwd_len = strlen(dbpwd); + struct crypt_data data; + data.initialized = 0; + +- /* we use salt (first 2 chars) of encoded password in call to crypt_r() */ +- cp = crypt_r(userpwd, dbpwd, &data); +- if (cp) { +- rc = slapi_ct_memcmp(dbpwd, cp, strlen(dbpwd)); ++ /* ++ * there MUST be at least 2 chars of salt and some pw bytes, else this is INVALID and will ++ * allow any password to bind as we then only compare SALTS. ++ */ ++ if (dbpwd_len >= 3) { ++ /* we use salt (first 2 chars) of encoded password in call to crypt_r() */ ++ cp = crypt_r(userpwd, dbpwd, &data); ++ } ++ /* If these are not the same length, we can not proceed safely with memcmp. */ ++ if (cp && dbpwd_len == strlen(cp)) { ++ rc = slapi_ct_memcmp(dbpwd, cp, dbpwd_len); + } else { + rc = -1; + } +-- +2.31.1 + diff --git a/SOURCES/0025-Issue-4414-disk-monitoring-prevent-division-by-zero-.patch b/SOURCES/0025-Issue-4414-disk-monitoring-prevent-division-by-zero-.patch deleted file mode 100644 index 1d3b1a9..0000000 --- a/SOURCES/0025-Issue-4414-disk-monitoring-prevent-division-by-zero-.patch +++ /dev/null @@ -1,99 +0,0 @@ -From 3c74f736c657d007770fe866842b08d0a74772ca Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Wed, 9 Dec 2020 15:21:11 -0500 -Subject: [PATCH 6/6] Issue 4414 - disk monitoring - prevent division by zero - crash - -Bug Description: If a disk mount has zero total space or zero used - space then a division by zero can occur and the - server will crash. - - It has also been observed that sometimes a system - can return the wrong disk entirely, and when that - happens the incorrect disk also has zero available - space which triggers the disk monitioring thread to - immediately shut the server down. - -Fix Description: Check the total and used space for zero and do not - divide, just ignore it. As a preemptive measure - ignore disks from /dev, /proc, /sys (except /dev/shm). - Yes it's a bit hacky, but the true underlying cause - is not known yet. So better to be safe than sorry. - -Relates: https://github.com/389ds/389-ds-base/issues/4414 - -Reviewed by: firstyear(Thanks!) ---- - ldap/servers/slapd/daemon.c | 22 +++++++++++++++++++++- - ldap/servers/slapd/monitor.c | 13 +++++-------- - 2 files changed, 26 insertions(+), 9 deletions(-) - -diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c -index 691f77570..bfd965263 100644 ---- a/ldap/servers/slapd/daemon.c -+++ b/ldap/servers/slapd/daemon.c -@@ -221,7 +221,27 @@ disk_mon_get_mount_point(char *dir) - } - if (s.st_dev == dev_id) { - endmntent(fp); -- return (slapi_ch_strdup(mnt->mnt_dir)); -+ -+ if ((strncmp(mnt->mnt_dir, "/dev", 4) == 0 && strncmp(mnt->mnt_dir, "/dev/shm", 8) != 0) || -+ strncmp(mnt->mnt_dir, "/proc", 4) == 0 || -+ strncmp(mnt->mnt_dir, "/sys", 4) == 0) -+ { -+ /* -+ * Ignore "mount directories" starting with /dev (except -+ * /dev/shm), /proc, /sys For some reason these mounts are -+ * occasionally/incorrectly returned. Only seen this at a -+ * customer site once. When it happens it causes disk -+ * monitoring to think the server has 0 disk space left, and -+ * it abruptly/unexpectedly shuts the server down. At this -+ * point it looks like a bug in stat(), setmntent(), or -+ * getmntent(), but there is no way to prove that since there -+ * is no way to reproduce the original issue. For now just -+ * return NULL to be safe. -+ */ -+ return NULL; -+ } else { -+ return (slapi_ch_strdup(mnt->mnt_dir)); -+ } - } - } - endmntent(fp); -diff --git a/ldap/servers/slapd/monitor.c b/ldap/servers/slapd/monitor.c -index 562721bed..65f082986 100644 ---- a/ldap/servers/slapd/monitor.c -+++ b/ldap/servers/slapd/monitor.c -@@ -131,7 +131,6 @@ monitor_disk_info (Slapi_PBlock *pb __attribute__((unused)), - { - int32_t rc = LDAP_SUCCESS; - char **dirs = NULL; -- char buf[BUFSIZ]; - struct berval val; - struct berval *vals[2]; - uint64_t total_space; -@@ -143,15 +142,13 @@ monitor_disk_info (Slapi_PBlock *pb __attribute__((unused)), - - disk_mon_get_dirs(&dirs); - -- for (uint16_t i = 0; dirs && dirs[i]; i++) { -+ for (size_t i = 0; dirs && dirs[i]; i++) { -+ char buf[BUFSIZ] = {0}; - rc = disk_get_info(dirs[i], &total_space, &avail_space, &used_space); -- if (rc) { -- slapi_log_err(SLAPI_LOG_WARNING, "monitor_disk_info", -- "Unable to get 'cn=disk space,cn=monitor' stats for %s\n", dirs[i]); -- } else { -+ if (rc == 0 && total_space > 0 && used_space > 0) { - val.bv_len = snprintf(buf, sizeof(buf), -- "partition=\"%s\" size=\"%" PRIu64 "\" used=\"%" PRIu64 "\" available=\"%" PRIu64 "\" use%%=\"%" PRIu64 "\"", -- dirs[i], total_space, used_space, avail_space, used_space * 100 / total_space); -+ "partition=\"%s\" size=\"%" PRIu64 "\" used=\"%" PRIu64 "\" available=\"%" PRIu64 "\" use%%=\"%" PRIu64 "\"", -+ dirs[i], total_space, used_space, avail_space, used_space * 100 / total_space); - val.bv_val = buf; - attrlist_merge(&e->e_attrs, "dsDisk", vals); - } --- -2.26.2 - diff --git a/SOURCES/0025-Issue-4837-persistent-search-returns-entries-even-wh.patch b/SOURCES/0025-Issue-4837-persistent-search-returns-entries-even-wh.patch new file mode 100644 index 0000000..66643a1 --- /dev/null +++ b/SOURCES/0025-Issue-4837-persistent-search-returns-entries-even-wh.patch @@ -0,0 +1,39 @@ +From 31d53e7da585723e66b838dcf34b77ea7c9968c6 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Wed, 21 Jul 2021 09:16:30 +0200 +Subject: [PATCH] Issue 4837 - persistent search returns entries even when an + error is returned by content-sync-plugin (#4838) + +Bug description: + When a ldap client sends a sync request control, the server response may contain a sync state control. + If the server fails to create the control the search should fail. + +Fix description: + In case the server fails to create the response control + logs the failure of the pre_search + +relates: https://github.com/389ds/389-ds-base/issues/4837 + +Reviewed by: Simon Pichugin + +Platforms tested: RH8.4 +--- + ldap/servers/plugins/sync/sync_refresh.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ldap/servers/plugins/sync/sync_refresh.c b/ldap/servers/plugins/sync/sync_refresh.c +index 646ff760b..4cbb6a949 100644 +--- a/ldap/servers/plugins/sync/sync_refresh.c ++++ b/ldap/servers/plugins/sync/sync_refresh.c +@@ -213,7 +213,7 @@ sync_srch_refresh_pre_entry(Slapi_PBlock *pb) + Slapi_Entry *e; + slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_ENTRY, &e); + LDAPControl **ctrl = (LDAPControl **)slapi_ch_calloc(2, sizeof(LDAPControl *)); +- sync_create_state_control(e, &ctrl[0], LDAP_SYNC_ADD, NULL); ++ rc = sync_create_state_control(e, &ctrl[0], LDAP_SYNC_ADD, NULL); + slapi_pblock_set(pb, SLAPI_SEARCH_CTRLS, ctrl); + } + return (rc); +-- +2.31.1 + diff --git a/SOURCES/0026-Hardcode-gost-crypt-passsword-storage-scheme.patch b/SOURCES/0026-Hardcode-gost-crypt-passsword-storage-scheme.patch new file mode 100644 index 0000000..aa701a0 --- /dev/null +++ b/SOURCES/0026-Hardcode-gost-crypt-passsword-storage-scheme.patch @@ -0,0 +1,49 @@ +From 616dc9964a4675dea2ab2c2efb9bd31c3903e29d Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 26 Jul 2021 15:22:08 -0400 +Subject: [PATCH] Hardcode gost crypt passsword storage scheme + +--- + .../plugins/pwdstorage/gost_yescrypt.c | 22 ------------------- + 1 file changed, 22 deletions(-) + +diff --git a/ldap/servers/plugins/pwdstorage/gost_yescrypt.c b/ldap/servers/plugins/pwdstorage/gost_yescrypt.c +index 67b39395e..7b0d1653c 100644 +--- a/ldap/servers/plugins/pwdstorage/gost_yescrypt.c ++++ b/ldap/servers/plugins/pwdstorage/gost_yescrypt.c +@@ -11,7 +11,6 @@ + + #include + +-#ifdef XCRYPT_VERSION_STR + #include + int + gost_yescrypt_pw_cmp(const char *userpwd, const char *dbpwd) +@@ -64,24 +63,3 @@ gost_yescrypt_pw_enc(const char *pwd) + return enc; + } + +-#else +- +-/* +- * We do not have xcrypt, so always fail all checks. +- */ +-int +-gost_yescrypt_pw_cmp(const char *userpwd __attribute__((unused)), const char *dbpwd __attribute__((unused))) +-{ +- slapi_log_err(SLAPI_LOG_ERR, GOST_YESCRYPT_SCHEME_NAME, +- "Unable to use gost_yescrypt_pw_cmp, xcrypt is not available.\n"); +- return 1; +-} +- +-char * +-gost_yescrypt_pw_enc(const char *pwd __attribute__((unused))) +-{ +- slapi_log_err(SLAPI_LOG_ERR, GOST_YESCRYPT_SCHEME_NAME, +- "Unable to use gost_yescrypt_pw_enc, xcrypt is not available.\n"); +- return NULL; +-} +-#endif +-- +2.31.1 + diff --git a/SOURCES/0026-Issue-4504-Insure-ldapi-is-enabled-in-repl_monitor_t.patch b/SOURCES/0026-Issue-4504-Insure-ldapi-is-enabled-in-repl_monitor_t.patch deleted file mode 100644 index fb3211a..0000000 --- a/SOURCES/0026-Issue-4504-Insure-ldapi-is-enabled-in-repl_monitor_t.patch +++ /dev/null @@ -1,132 +0,0 @@ -From 48b30739f33d1eb526dbdd45c820129c4a4c4bcb Mon Sep 17 00:00:00 2001 -From: progier389 <72748589+progier389@users.noreply.github.com> -Date: Tue, 12 Jan 2021 11:06:24 +0100 -Subject: [PATCH] Issue 4504 - Insure ldapi is enabled in repl_monitor_test.py - (Needed on RHEL) (#4527) - -(cherry picked from commit 279556bc78ed743d7a053069621d999ec045866f) ---- - .../tests/suites/clu/repl_monitor_test.py | 67 +++++++++---------- - 1 file changed, 31 insertions(+), 36 deletions(-) - -diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py -index eb18d2da2..b2cb840b3 100644 ---- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py -+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py -@@ -9,7 +9,6 @@ - import time - import subprocess - import pytest --import re - - from lib389.cli_conf.replication import get_repl_monitor_info - from lib389.tasks import * -@@ -18,6 +17,8 @@ from lib389.topologies import topology_m2 - from lib389.cli_base import FakeArgs - from lib389.cli_base.dsrc import dsrc_arg_concat - from lib389.cli_base import connect_instance -+from lib389.replica import Replicas -+ - - pytestmark = pytest.mark.tier0 - -@@ -68,25 +69,6 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No - log.info('Reset log file') - f.truncate(0) - --def get_hostnames_from_log(port1, port2): -- # Get the supplier host names as displayed in replication monitor output -- with open(LOG_FILE, 'r') as logfile: -- logtext = logfile.read() -- # search for Supplier :hostname:port -- # and use \D to insure there is no more number is after -- # the matched port (i.e that 10 is not matching 101) -- regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)' -- match=re.search(regexp, logtext) -- host_m1 = 'localhost.localdomain' -- if (match is not None): -- host_m1 = match.group(2) -- # Same for master 2 -- regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)' -- match=re.search(regexp, logtext) -- host_m2 = 'localhost.localdomain' -- if (match is not None): -- host_m2 = match.group(2) -- return (host_m1, host_m2) - - @pytest.mark.ds50545 - @pytest.mark.bz1739718 -@@ -115,6 +97,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): - m1 = topology_m2.ms["master1"] - m2 = topology_m2.ms["master2"] - -+ # Enable ldapi if not already done. -+ for inst in [topology_m2.ms["master1"], topology_m2.ms["master2"]]: -+ if not inst.can_autobind(): -+ # Update ns-slapd instance -+ inst.config.set('nsslapd-ldapilisten', 'on') -+ inst.config.set('nsslapd-ldapiautobind', 'on') -+ inst.restart() -+ # Ensure that updates have been sent both ways. -+ replicas = Replicas(m1) -+ replica = replicas.get(DEFAULT_SUFFIX) -+ replica.test_replication([m2]) -+ replicas = Replicas(m2) -+ replica = replicas.get(DEFAULT_SUFFIX) -+ replica.test_replication([m1]) -+ -+ alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')', -+ 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')'] -+ - connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port) - content_list = ['Replica Root: dc=example,dc=com', - 'Replica ID: 1', -@@ -177,9 +177,20 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): - '001', - m1.host + ':' + str(m1.port)] - -+ dsrc_content = '[repl-monitor-connections]\n' \ -+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -+ '\n' \ -+ '[repl-monitor-aliases]\n' \ -+ 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \ -+ 'M2 = ' + m2.host + ':' + str(m2.port) -+ - connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM, - m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM] - -+ aliases = ['M1=' + m1.host + ':' + str(m1.port), -+ 'M2=' + m2.host + ':' + str(m2.port)] -+ - args = FakeArgs() - args.connections = connections - args.aliases = None -@@ -187,24 +198,8 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): - - log.info('Run replication monitor with connections option') - get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) -- (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port) - check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) - -- # Prepare the data for next tests -- aliases = ['M1=' + host_m1 + ':' + str(m1.port), -- 'M2=' + host_m2 + ':' + str(m2.port)] -- -- alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')', -- 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')'] -- -- dsrc_content = '[repl-monitor-connections]\n' \ -- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -- '\n' \ -- '[repl-monitor-aliases]\n' \ -- 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \ -- 'M2 = ' + host_m2 + ':' + str(m2.port) -- - log.info('Run replication monitor with aliases option') - args.aliases = aliases - get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) --- -2.26.2 - diff --git a/SOURCES/0027-Issue-4315-performance-search-rate-nagle-triggers-hi.patch b/SOURCES/0027-Issue-4315-performance-search-rate-nagle-triggers-hi.patch deleted file mode 100644 index 44636c8..0000000 --- a/SOURCES/0027-Issue-4315-performance-search-rate-nagle-triggers-hi.patch +++ /dev/null @@ -1,51 +0,0 @@ -From f84e75de9176218d3b47a447d07fe8fb7ca3d72f Mon Sep 17 00:00:00 2001 -From: Barbora Simonova -Date: Mon, 11 Jan 2021 15:51:24 +0100 -Subject: [PATCH] Issue 4315 - performance search rate: nagle triggers high - rate of setsocketopt - -Description: -The config value of nsslapd-nagle is now set to 'off' by default. -Added a test case, that checks the value. - -Relates: https://github.com/389ds/389-ds-base/issues/4315 - -Reviewed by: droideck (Thanks!) ---- - .../tests/suites/config/config_test.py | 20 +++++++++++++++++++ - 1 file changed, 20 insertions(+) - -diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py -index 38d1ed9ac..fda16a530 100644 ---- a/dirsrvtests/tests/suites/config/config_test.py -+++ b/dirsrvtests/tests/suites/config/config_test.py -@@ -41,6 +41,26 @@ def big_file(): - return TEMP_BIG_FILE - - -+@pytest.mark.bz1897248 -+@pytest.mark.ds4315 -+@pytest.mark.skipif(ds_is_older('1.4.3.16'), reason="This config setting exists in 1.4.3.16 and higher") -+def test_nagle_default_value(topo): -+ """Test that nsslapd-nagle attribute is off by default -+ -+ :id: 00361f5d-d638-4d39-8231-66fa52637203 -+ :setup: Standalone instance -+ :steps: -+ 1. Create instance -+ 2. Check the value of nsslapd-nagle -+ :expectedresults: -+ 1. Success -+ 2. The value of nsslapd-nagle should be off -+ """ -+ -+ log.info('Check the value of nsslapd-nagle attribute is off by default') -+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-nagle') == 'off' -+ -+ - def test_maxbersize_repl(topology_m2, big_file): - """maxbersize is ignored in the replicated operations. - --- -2.26.2 - diff --git a/SOURCES/0027-Issue-4734-import-of-entry-with-no-parent-warning-47.patch b/SOURCES/0027-Issue-4734-import-of-entry-with-no-parent-warning-47.patch new file mode 100644 index 0000000..138ee66 --- /dev/null +++ b/SOURCES/0027-Issue-4734-import-of-entry-with-no-parent-warning-47.patch @@ -0,0 +1,39 @@ +From a2a51130b2f95316237b85da099a8be734969e54 Mon Sep 17 00:00:00 2001 +From: James Chapman +Date: Sat, 24 Apr 2021 21:37:54 +0100 +Subject: [PATCH] Issue 4734 - import of entry with no parent warning (#4735) + +Description: Online import of ldif file that contains an entry with + no parent doesnt generate a task warning. + +Fixes: https://github.com/389ds/389-ds-base/issues/4734 + +Author: vashirov@redhat.com (Thanks) + +Reviewed by: mreynolds, jchapma +--- + ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c +index 905a84e74..35183ed59 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c +@@ -2767,8 +2767,14 @@ import_foreman(void *param) + if (job->flags & FLAG_ABORT) { + goto error; + } ++ ++ /* capture skipped entry warnings for this task */ ++ if((job) && (job->skipped)) { ++ slapi_task_set_warning(job->task, WARN_SKIPPED_IMPORT_ENTRY); ++ } + } + ++ + slapi_pblock_destroy(pb); + info->state = FINISHED; + return; +-- +2.31.1 + diff --git a/SOURCES/0028-Issue-4504-insure-that-repl_monitor_test-use-ldapi-f.patch b/SOURCES/0028-Issue-4504-insure-that-repl_monitor_test-use-ldapi-f.patch deleted file mode 100644 index ba8f9d2..0000000 --- a/SOURCES/0028-Issue-4504-insure-that-repl_monitor_test-use-ldapi-f.patch +++ /dev/null @@ -1,98 +0,0 @@ -From 00ccec335792e3fa44712427463c64eb1ff9c5be Mon Sep 17 00:00:00 2001 -From: progier389 -Date: Tue, 12 Jan 2021 17:45:41 +0100 -Subject: [PATCH] Issue 4504 - insure that repl_monitor_test use ldapi (for - RHEL) - fix merge issue (#4533) - -(cherry picked from commit a880fddc192414d6283ea6832491b7349e5471dc) ---- - .../tests/suites/clu/repl_monitor_test.py | 47 ++++++++++++++----- - 1 file changed, 36 insertions(+), 11 deletions(-) - -diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py -index b2cb840b3..caf6a9099 100644 ---- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py -+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py -@@ -9,6 +9,7 @@ - import time - import subprocess - import pytest -+import re - - from lib389.cli_conf.replication import get_repl_monitor_info - from lib389.tasks import * -@@ -69,6 +70,25 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No - log.info('Reset log file') - f.truncate(0) - -+def get_hostnames_from_log(port1, port2): -+ # Get the supplier host names as displayed in replication monitor output -+ with open(LOG_FILE, 'r') as logfile: -+ logtext = logfile.read() -+ # search for Supplier :hostname:port -+ # and use \D to insure there is no more number is after -+ # the matched port (i.e that 10 is not matching 101) -+ regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)' -+ match=re.search(regexp, logtext) -+ host_m1 = 'localhost.localdomain' -+ if (match is not None): -+ host_m1 = match.group(2) -+ # Same for master 2 -+ regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)' -+ match=re.search(regexp, logtext) -+ host_m2 = 'localhost.localdomain' -+ if (match is not None): -+ host_m2 = match.group(2) -+ return (host_m1, host_m2) - - @pytest.mark.ds50545 - @pytest.mark.bz1739718 -@@ -177,20 +197,9 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): - '001', - m1.host + ':' + str(m1.port)] - -- dsrc_content = '[repl-monitor-connections]\n' \ -- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -- '\n' \ -- '[repl-monitor-aliases]\n' \ -- 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \ -- 'M2 = ' + m2.host + ':' + str(m2.port) -- - connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM, - m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM] - -- aliases = ['M1=' + m1.host + ':' + str(m1.port), -- 'M2=' + m2.host + ':' + str(m2.port)] -- - args = FakeArgs() - args.connections = connections - args.aliases = None -@@ -198,8 +207,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): - - log.info('Run replication monitor with connections option') - get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) -+ (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port) - check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) - -+ # Prepare the data for next tests -+ aliases = ['M1=' + host_m1 + ':' + str(m1.port), -+ 'M2=' + host_m2 + ':' + str(m2.port)] -+ -+ alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')', -+ 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')'] -+ -+ dsrc_content = '[repl-monitor-connections]\n' \ -+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -+ '\n' \ -+ '[repl-monitor-aliases]\n' \ -+ 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \ -+ 'M2 = ' + host_m2 + ':' + str(m2.port) -+ - log.info('Run replication monitor with aliases option') - args.aliases = aliases - get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) --- -2.26.2 - diff --git a/SOURCES/0028-Issue-4872-BUG-entryuuid-enabled-by-default-causes-r.patch b/SOURCES/0028-Issue-4872-BUG-entryuuid-enabled-by-default-causes-r.patch new file mode 100644 index 0000000..a9d5958 --- /dev/null +++ b/SOURCES/0028-Issue-4872-BUG-entryuuid-enabled-by-default-causes-r.patch @@ -0,0 +1,37 @@ +From f9bc249b2baa11a8ac0eb54e4077eb706d137e38 Mon Sep 17 00:00:00 2001 +From: Firstyear +Date: Thu, 19 Aug 2021 11:06:06 +1000 +Subject: [PATCH] Issue 4872 - BUG - entryuuid enabled by default causes + replication issues (#4876) + +Bug Description: Due to older servers missing the syntax +plugin this breaks schema replication and causes cascading +errors. + +Fix Description: This changes the syntax to be a case +insensitive string, while leaving the plugins in place +for other usage. + +fixes: https://github.com/389ds/389-ds-base/issues/4872 + +Author: William Brown + +Review by: @mreynolds389 @progier389 +--- + ldap/schema/03entryuuid.ldif | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/ldap/schema/03entryuuid.ldif b/ldap/schema/03entryuuid.ldif +index cbde981fe..f7a7f40d5 100644 +--- a/ldap/schema/03entryuuid.ldif ++++ b/ldap/schema/03entryuuid.ldif +@@ -13,4 +13,5 @@ dn: cn=schema + # + # attributes + # +-attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY UUIDMatch ORDERING UUIDOrderingMatch SYNTAX 1.3.6.1.1.16.1 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation ) ++# attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY UUIDMatch ORDERING UUIDOrderingMatch SYNTAX 1.3.6.1.1.16.1 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation ) ++attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation ) +-- +2.31.1 + diff --git a/SOURCES/0029-Issue-4528-Fix-cn-monitor-SCOPE_ONE-search-4529.patch b/SOURCES/0029-Issue-4528-Fix-cn-monitor-SCOPE_ONE-search-4529.patch deleted file mode 100644 index 593e2cd..0000000 --- a/SOURCES/0029-Issue-4528-Fix-cn-monitor-SCOPE_ONE-search-4529.patch +++ /dev/null @@ -1,70 +0,0 @@ -From 2afc65fd1750afcb1667545da5625f5a932aacdd Mon Sep 17 00:00:00 2001 -From: Simon Pichugin -Date: Wed, 13 Jan 2021 15:16:08 +0100 -Subject: [PATCH] Issue 4528 - Fix cn=monitor SCOPE_ONE search (#4529) - -Bug Description: While doing a ldapsearch on "cn=monitor" is -throwing err=32 with -s one. - -Fix Description: 'cn=monitor' is not a real entry so we should not -trying to check if the searched suffix (cm=monitor or its children) -belongs to the searched backend. - -Fixes: #4528 - -Reviewed by: @mreynolds389 @Firstyear @tbordaz (Thanks!) ---- - ldap/servers/slapd/opshared.c | 15 ++++++++++----- - 1 file changed, 10 insertions(+), 5 deletions(-) - -diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c -index c0bc5dcd0..f5ed71144 100644 ---- a/ldap/servers/slapd/opshared.c -+++ b/ldap/servers/slapd/opshared.c -@@ -240,6 +240,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) - int rc = 0; - int internal_op; - Slapi_DN *basesdn = NULL; -+ Slapi_DN monitorsdn = {0}; - Slapi_DN *sdn = NULL; - Slapi_Operation *operation = NULL; - Slapi_Entry *referral = NULL; -@@ -765,9 +766,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result) - } - } else { - /* be_suffix null means that we are searching the default backend -- * -> don't change the search parameters in pblock -- */ -- if (be_suffix != NULL) { -+ * -> don't change the search parameters in pblock -+ * Also, we skip this block for 'cn=monitor' search and its subsearches -+ * as they are done by callbacks from monitor.c */ -+ slapi_sdn_init_dn_byref(&monitorsdn, "cn=monitor"); -+ if (!((be_suffix == NULL) || slapi_sdn_issuffix(basesdn, &monitorsdn))) { - if ((be_name == NULL) && (scope == LDAP_SCOPE_ONELEVEL)) { - /* one level searches - * - depending on the suffix of the backend we might have to -@@ -789,8 +792,10 @@ op_shared_search(Slapi_PBlock *pb, int send_result) - } else if (slapi_sdn_issuffix(basesdn, be_suffix)) { - int tmp_scope = LDAP_SCOPE_ONELEVEL; - slapi_pblock_set(pb, SLAPI_SEARCH_SCOPE, &tmp_scope); -- } else -+ } else { -+ slapi_sdn_done(&monitorsdn); - goto next_be; -+ } - } - - /* subtree searches : -@@ -811,7 +816,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) - } - } - } -- -+ slapi_sdn_done(&monitorsdn); - slapi_pblock_set(pb, SLAPI_BACKEND, be); - slapi_pblock_set(pb, SLAPI_PLUGIN, be->be_database); - slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET, NULL); --- -2.26.2 - diff --git a/SOURCES/0029-Remove-GOST-YESCRYPT-password-sotrage-scheme.patch b/SOURCES/0029-Remove-GOST-YESCRYPT-password-sotrage-scheme.patch new file mode 100644 index 0000000..7b74019 --- /dev/null +++ b/SOURCES/0029-Remove-GOST-YESCRYPT-password-sotrage-scheme.patch @@ -0,0 +1,125 @@ +From 120511d35095a48d60abbb7cb2367d0c30fbc757 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 25 Aug 2021 13:20:56 -0400 +Subject: [PATCH] Remove GOST-YESCRYPT password sotrage scheme + +--- + .../tests/suites/password/pwd_algo_test.py | 1 - + ldap/ldif/template-dse-minimal.ldif.in | 9 --------- + ldap/ldif/template-dse.ldif.in | 9 --------- + ldap/servers/plugins/pwdstorage/pwd_init.c | 18 ------------------ + ldap/servers/slapd/fedse.c | 13 ------------- + 5 files changed, 50 deletions(-) + +diff --git a/dirsrvtests/tests/suites/password/pwd_algo_test.py b/dirsrvtests/tests/suites/password/pwd_algo_test.py +index 66bda420e..88f8e40b7 100644 +--- a/dirsrvtests/tests/suites/password/pwd_algo_test.py ++++ b/dirsrvtests/tests/suites/password/pwd_algo_test.py +@@ -124,7 +124,6 @@ def _test_algo_for_pbkdf2(inst, algo_name): + ('CLEAR', 'CRYPT', 'CRYPT-MD5', 'CRYPT-SHA256', 'CRYPT-SHA512', + 'MD5', 'SHA', 'SHA256', 'SHA384', 'SHA512', 'SMD5', 'SSHA', + 'SSHA256', 'SSHA384', 'SSHA512', 'PBKDF2_SHA256', 'DEFAULT', +- 'GOST_YESCRYPT', + )) + def test_pwd_algo_test(topology_st, algo): + """Assert that all of our password algorithms correctly PASS and FAIL varying +diff --git a/ldap/ldif/template-dse-minimal.ldif.in b/ldap/ldif/template-dse-minimal.ldif.in +index 2eccae9b2..1a05f4a67 100644 +--- a/ldap/ldif/template-dse-minimal.ldif.in ++++ b/ldap/ldif/template-dse-minimal.ldif.in +@@ -194,15 +194,6 @@ nsslapd-pluginarg1: nsds5ReplicaCredentials + nsslapd-pluginid: aes-storage-scheme + nsslapd-pluginprecedence: 1 + +-dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config +-objectclass: top +-objectclass: nsSlapdPlugin +-cn: GOST_YESCRYPT +-nsslapd-pluginpath: libpwdstorage-plugin +-nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init +-nsslapd-plugintype: pwdstoragescheme +-nsslapd-pluginenabled: on +- + dn: cn=Syntax Validation Task,cn=plugins,cn=config + objectclass: top + objectclass: nsSlapdPlugin +diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in +index 7e7480cba..f30531bec 100644 +--- a/ldap/ldif/template-dse.ldif.in ++++ b/ldap/ldif/template-dse.ldif.in +@@ -242,15 +242,6 @@ nsslapd-pluginarg2: nsds5ReplicaBootstrapCredentials + nsslapd-pluginid: aes-storage-scheme + nsslapd-pluginprecedence: 1 + +-dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config +-objectclass: top +-objectclass: nsSlapdPlugin +-cn: GOST_YESCRYPT +-nsslapd-pluginpath: libpwdstorage-plugin +-nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init +-nsslapd-plugintype: pwdstoragescheme +-nsslapd-pluginenabled: on +- + dn: cn=Syntax Validation Task,cn=plugins,cn=config + objectclass: top + objectclass: nsSlapdPlugin +diff --git a/ldap/servers/plugins/pwdstorage/pwd_init.c b/ldap/servers/plugins/pwdstorage/pwd_init.c +index 606e63404..59cfc4684 100644 +--- a/ldap/servers/plugins/pwdstorage/pwd_init.c ++++ b/ldap/servers/plugins/pwdstorage/pwd_init.c +@@ -52,8 +52,6 @@ static Slapi_PluginDesc smd5_pdesc = {"smd5-password-storage-scheme", VENDOR, DS + + static Slapi_PluginDesc pbkdf2_sha256_pdesc = {"pbkdf2-sha256-password-storage-scheme", VENDOR, DS_PACKAGE_VERSION, "Salted PBKDF2 SHA256 hash algorithm (PBKDF2_SHA256)"}; + +-static Slapi_PluginDesc gost_yescrypt_pdesc = {"gost-yescrypt-password-storage-scheme", VENDOR, DS_PACKAGE_VERSION, "Yescrypt KDF algorithm (Streebog256)"}; +- + static char *plugin_name = "NSPwdStoragePlugin"; + + int +@@ -431,19 +429,3 @@ pbkdf2_sha256_pwd_storage_scheme_init(Slapi_PBlock *pb) + return rc; + } + +-int +-gost_yescrypt_pwd_storage_scheme_init(Slapi_PBlock *pb) +-{ +- int rc; +- +- slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "=> gost_yescrypt_pwd_storage_scheme_init\n"); +- +- rc = slapi_pblock_set(pb, SLAPI_PLUGIN_VERSION, (void *)SLAPI_PLUGIN_VERSION_01); +- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_DESCRIPTION, (void *)&gost_yescrypt_pdesc); +- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_ENC_FN, (void *)gost_yescrypt_pw_enc); +- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *)gost_yescrypt_pw_cmp); +- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, GOST_YESCRYPT_SCHEME_NAME); +- +- slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "<= gost_yescrypt_pwd_storage_scheme_init %d\n", rc); +- return rc; +-} +diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c +index 44159c991..24b7ed11c 100644 +--- a/ldap/servers/slapd/fedse.c ++++ b/ldap/servers/slapd/fedse.c +@@ -203,19 +203,6 @@ static const char *internal_entries[] = + "nsslapd-pluginVersion: none\n" + "nsslapd-pluginVendor: 389 Project\n" + "nsslapd-pluginDescription: CRYPT-SHA512\n", +- +- "dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config\n" +- "objectclass: top\n" +- "objectclass: nsSlapdPlugin\n" +- "cn: GOST_YESCRYPT\n" +- "nsslapd-pluginpath: libpwdstorage-plugin\n" +- "nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init\n" +- "nsslapd-plugintype: pwdstoragescheme\n" +- "nsslapd-pluginenabled: on\n" +- "nsslapd-pluginId: GOST_YESCRYPT\n" +- "nsslapd-pluginVersion: none\n" +- "nsslapd-pluginVendor: 389 Project\n" +- "nsslapd-pluginDescription: GOST_YESCRYPT\n", + }; + + static int NUM_INTERNAL_ENTRIES = sizeof(internal_entries) / sizeof(internal_entries[0]); +-- +2.31.1 + diff --git a/SOURCES/0030-Issue-4384-Use-MONOTONIC-clock-for-all-timing-events.patch b/SOURCES/0030-Issue-4384-Use-MONOTONIC-clock-for-all-timing-events.patch deleted file mode 100644 index 7133049..0000000 --- a/SOURCES/0030-Issue-4384-Use-MONOTONIC-clock-for-all-timing-events.patch +++ /dev/null @@ -1,3866 +0,0 @@ -From 6969181628f2c664d5f82c89c15bbc0a2487e21f Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Thu, 19 Nov 2020 15:46:19 -0500 -Subject: [PATCH 1/2] Issue 4384 - Use MONOTONIC clock for all timing events - and conditions - -Bug Description: All of the server's event handling and replication were - based on REALTIME clocks, which can be influenced by the - system changing. This could causes massive delays, and - simply cause unexpected behavior. - -Fix Description: Move all condition variables to use pthread instead of NSPR - functions. Also make sure we use MONOTONIC clocks when we - get the current time when checking for timeouts and other - timed events. - -Relates: https://github.com/389ds/389-ds-base/issues/4384 - -Reviewed by: elkris, firstyear, and tbordaz (Thanks!!!) - -Apply firstyear's sugestions - -Apply Firstyear's other suggestions - -Apply Thierry's suggestions ---- - Makefile.am | 2 +- - .../tests/suites/plugins/entryusn_test.py | 3 + - ldap/servers/plugins/chainingdb/cb_add.c | 2 +- - ldap/servers/plugins/chainingdb/cb_compare.c | 2 +- - .../plugins/chainingdb/cb_conn_stateless.c | 16 +- - ldap/servers/plugins/chainingdb/cb_delete.c | 2 +- - ldap/servers/plugins/chainingdb/cb_instance.c | 3 +- - ldap/servers/plugins/chainingdb/cb_modify.c | 2 +- - ldap/servers/plugins/chainingdb/cb_modrdn.c | 2 +- - ldap/servers/plugins/chainingdb/cb_search.c | 8 +- - ldap/servers/plugins/cos/cos_cache.c | 4 +- - ldap/servers/plugins/dna/dna.c | 2 +- - ldap/servers/plugins/passthru/ptconn.c | 2 +- - ldap/servers/plugins/referint/referint.c | 85 +++++--- - ldap/servers/plugins/replication/repl5.h | 3 +- - .../plugins/replication/repl5_backoff.c | 4 +- - .../plugins/replication/repl5_connection.c | 12 +- - .../plugins/replication/repl5_inc_protocol.c | 91 ++++---- - .../plugins/replication/repl5_mtnode_ext.c | 3 +- - .../plugins/replication/repl5_prot_private.h | 6 +- - .../plugins/replication/repl5_replica.c | 10 +- - .../replication/repl5_replica_config.c | 197 +++++++++++------- - .../plugins/replication/repl5_tot_protocol.c | 71 ++++--- - ldap/servers/plugins/replication/repl_extop.c | 4 +- - .../plugins/replication/windows_connection.c | 2 +- - .../replication/windows_inc_protocol.c | 82 +++++--- - .../replication/windows_tot_protocol.c | 24 ++- - ldap/servers/plugins/retrocl/retrocl_trim.c | 2 +- - ldap/servers/plugins/roles/roles_cache.c | 4 +- - ldap/servers/plugins/sync/sync.h | 4 +- - ldap/servers/plugins/sync/sync_persist.c | 54 +++-- - .../slapd/back-ldbm/db-bdb/bdb_import.c | 49 ++--- - .../back-ldbm/db-bdb/bdb_import_threads.c | 29 +-- - .../back-ldbm/db-bdb/bdb_instance_config.c | 8 +- - .../slapd/back-ldbm/db-bdb/bdb_layer.c | 129 +++++++----- - .../slapd/back-ldbm/db-bdb/bdb_layer.h | 10 +- - ldap/servers/slapd/back-ldbm/import.h | 6 +- - ldap/servers/slapd/connection.c | 88 ++++---- - ldap/servers/slapd/daemon.c | 64 ++++-- - ldap/servers/slapd/eventq.c | 132 ++++++++---- - ldap/servers/slapd/house.c | 58 ++++-- - ldap/servers/slapd/libmakefile | 2 +- - ldap/servers/slapd/psearch.c | 63 +++--- - ldap/servers/slapd/regex.c | 2 +- - ldap/servers/slapd/slapi-plugin.h | 7 + - .../slapd/{slapi2nspr.c => slapi2runtime.c} | 87 +++++--- - ldap/servers/slapd/task.c | 4 +- - ldap/servers/slapd/time.c | 10 +- - 48 files changed, 877 insertions(+), 579 deletions(-) - rename ldap/servers/slapd/{slapi2nspr.c => slapi2runtime.c} (69%) - -diff --git a/Makefile.am b/Makefile.am -index 0e5f04f91..f7bf1c44c 100644 ---- a/Makefile.am -+++ b/Makefile.am -@@ -1455,7 +1455,7 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \ - ldap/servers/slapd/security_wrappers.c \ - ldap/servers/slapd/slapd_plhash.c \ - ldap/servers/slapd/slapi_counter.c \ -- ldap/servers/slapd/slapi2nspr.c \ -+ ldap/servers/slapd/slapi2runtime.c \ - ldap/servers/slapd/snmp_collator.c \ - ldap/servers/slapd/sort.c \ - ldap/servers/slapd/ssl.c \ -diff --git a/dirsrvtests/tests/suites/plugins/entryusn_test.py b/dirsrvtests/tests/suites/plugins/entryusn_test.py -index ad3d7f209..da0538f74 100644 ---- a/dirsrvtests/tests/suites/plugins/entryusn_test.py -+++ b/dirsrvtests/tests/suites/plugins/entryusn_test.py -@@ -6,9 +6,11 @@ - # See LICENSE for details. - # --- END COPYRIGHT BLOCK --- - # -+import os - import ldap - import logging - import pytest -+import time - from lib389._constants import DEFAULT_SUFFIX - from lib389.config import Config - from lib389.plugins import USNPlugin, MemberOfPlugin -@@ -211,6 +213,7 @@ def test_entryusn_after_repl_delete(topology_m2): - user_usn = user_1.get_attr_val_int('entryusn') - - user_1.delete() -+ time.sleep(1) # Gives a little time for tombstone creation to complete - - ts = tombstones.get(user_rdn) - ts_usn = ts.get_attr_val_int('entryusn') -diff --git a/ldap/servers/plugins/chainingdb/cb_add.c b/ldap/servers/plugins/chainingdb/cb_add.c -index a9f9c0f87..b7ae7267d 100644 ---- a/ldap/servers/plugins/chainingdb/cb_add.c -+++ b/ldap/servers/plugins/chainingdb/cb_add.c -@@ -130,7 +130,7 @@ chaining_back_add(Slapi_PBlock *pb) - - /* heart-beat management */ - if (cb->max_idle_time > 0) { -- endtime = slapi_current_utc_time() + cb->max_idle_time; -+ endtime = slapi_current_rel_time_t() + cb->max_idle_time; - } - - /* Send LDAP operation to the remote host */ -diff --git a/ldap/servers/plugins/chainingdb/cb_compare.c b/ldap/servers/plugins/chainingdb/cb_compare.c -index 25dfa87b5..8d7fdd06b 100644 ---- a/ldap/servers/plugins/chainingdb/cb_compare.c -+++ b/ldap/servers/plugins/chainingdb/cb_compare.c -@@ -126,7 +126,7 @@ chaining_back_compare(Slapi_PBlock *pb) - - /* heart-beat management */ - if (cb->max_idle_time > 0) { -- endtime = slapi_current_utc_time() + cb->max_idle_time; -+ endtime = slapi_current_rel_time_t() + cb->max_idle_time; - } - - /* -diff --git a/ldap/servers/plugins/chainingdb/cb_conn_stateless.c b/ldap/servers/plugins/chainingdb/cb_conn_stateless.c -index 9beb459ef..a2003221e 100644 ---- a/ldap/servers/plugins/chainingdb/cb_conn_stateless.c -+++ b/ldap/servers/plugins/chainingdb/cb_conn_stateless.c -@@ -453,7 +453,7 @@ cb_get_connection(cb_conn_pool *pool, - conn->ld = ld; - conn->status = CB_CONNSTATUS_OK; - conn->refcount = 0; /* incremented below */ -- conn->opentime = slapi_current_utc_time(); -+ conn->opentime = slapi_current_rel_time_t(); - conn->ThreadId = PR_MyThreadId(); /* store the thread id */ - conn->next = NULL; - if (secure) { -@@ -488,7 +488,7 @@ cb_get_connection(cb_conn_pool *pool, - } - - if (!secure) -- slapi_wait_condvar(pool->conn.conn_list_cv, NULL); -+ slapi_wait_condvar_pt(pool->conn.conn_list_cv, pool->conn.conn_list_mutex, NULL); - - if (cb_debug_on()) { - slapi_log_err(SLAPI_LOG_PLUGIN, CB_PLUGIN_SUBSYSTEM, -@@ -639,7 +639,7 @@ cb_check_for_stale_connections(cb_conn_pool *pool) - slapi_lock_mutex(pool->conn.conn_list_mutex); - - if (connlifetime > 0) -- curtime = slapi_current_utc_time(); -+ curtime = slapi_current_rel_time_t(); - - if (pool->secure) { - myself = PR_ThreadSelf(); -@@ -860,7 +860,7 @@ cb_ping_farm(cb_backend_instance *cb, cb_outgoing_conn *cnx, time_t end_time) - if (cnx && (cnx->status != CB_CONNSTATUS_OK)) /* Known problem */ - return LDAP_SERVER_DOWN; - -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - if (end_time && ((now <= end_time) || (end_time < 0))) - return LDAP_SUCCESS; - -@@ -905,7 +905,7 @@ cb_update_failed_conn_cpt(cb_backend_instance *cb) - slapi_unlock_mutex(cb->monitor_availability.cpt_lock); - if (cb->monitor_availability.cpt >= CB_NUM_CONN_BEFORE_UNAVAILABILITY) { - /* we reach the limit of authorized failed connections => we setup the chaining BE state to unavailable */ -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - slapi_lock_mutex(cb->monitor_availability.lock_timeLimit); - cb->monitor_availability.unavailableTimeLimit = now + CB_UNAVAILABLE_PERIOD; - slapi_unlock_mutex(cb->monitor_availability.lock_timeLimit); -@@ -938,7 +938,7 @@ cb_check_availability(cb_backend_instance *cb, Slapi_PBlock *pb) - time_t now; - if (cb->monitor_availability.farmserver_state == FARMSERVER_UNAVAILABLE) { - slapi_lock_mutex(cb->monitor_availability.lock_timeLimit); -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - if (now >= cb->monitor_availability.unavailableTimeLimit) { - cb->monitor_availability.unavailableTimeLimit = now + CB_INFINITE_TIME; /* to be sure only one thread can do the test */ - slapi_unlock_mutex(cb->monitor_availability.lock_timeLimit); -@@ -951,7 +951,7 @@ cb_check_availability(cb_backend_instance *cb, Slapi_PBlock *pb) - "cb_check_availability - ping the farm server and check if it's still unavailable"); - if (cb_ping_farm(cb, NULL, 0) != LDAP_SUCCESS) { /* farm still unavailable... Just change the timelimit */ - slapi_lock_mutex(cb->monitor_availability.lock_timeLimit); -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - cb->monitor_availability.unavailableTimeLimit = now + CB_UNAVAILABLE_PERIOD; - slapi_unlock_mutex(cb->monitor_availability.lock_timeLimit); - cb_send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL, "FARM SERVER TEMPORARY UNAVAILABLE", 0, NULL); -@@ -961,7 +961,7 @@ cb_check_availability(cb_backend_instance *cb, Slapi_PBlock *pb) - } else { - /* farm is back !*/ - slapi_lock_mutex(cb->monitor_availability.lock_timeLimit); -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - cb->monitor_availability.unavailableTimeLimit = now; /* the unavailable period is finished */ - slapi_unlock_mutex(cb->monitor_availability.lock_timeLimit); - /* The farmer server state backs to FARMSERVER_AVAILABLE, but this already done in cb_ping_farm, and also the reset of cpt*/ -diff --git a/ldap/servers/plugins/chainingdb/cb_delete.c b/ldap/servers/plugins/chainingdb/cb_delete.c -index e76fb6b95..94f84b55d 100644 ---- a/ldap/servers/plugins/chainingdb/cb_delete.c -+++ b/ldap/servers/plugins/chainingdb/cb_delete.c -@@ -117,7 +117,7 @@ chaining_back_delete(Slapi_PBlock *pb) - - /* heart-beat management */ - if (cb->max_idle_time > 0) { -- endtime = slapi_current_utc_time() + cb->max_idle_time; -+ endtime = slapi_current_rel_time_t() + cb->max_idle_time; - } - - /* -diff --git a/ldap/servers/plugins/chainingdb/cb_instance.c b/ldap/servers/plugins/chainingdb/cb_instance.c -index cd5abb834..bc1864c1a 100644 ---- a/ldap/servers/plugins/chainingdb/cb_instance.c -+++ b/ldap/servers/plugins/chainingdb/cb_instance.c -@@ -1947,7 +1947,8 @@ cb_instance_add_config_callback(Slapi_PBlock *pb __attribute__((unused)), - * we can't call recursively into the DSE to do more adds, they'll - * silently fail. instead, schedule the adds to happen in 1 second. - */ -- inst->eq_ctx = slapi_eq_once(cb_instance_add_monitor_later, (void *)inst, time(NULL) + 1); -+ inst->eq_ctx = slapi_eq_once(cb_instance_add_monitor_later, (void *)inst, -+ slapi_current_rel_time_t() + 1); - } - - /* Get the list of operational attrs defined in the schema */ -diff --git a/ldap/servers/plugins/chainingdb/cb_modify.c b/ldap/servers/plugins/chainingdb/cb_modify.c -index f81edf4a6..e53da9e40 100644 ---- a/ldap/servers/plugins/chainingdb/cb_modify.c -+++ b/ldap/servers/plugins/chainingdb/cb_modify.c -@@ -125,7 +125,7 @@ chaining_back_modify(Slapi_PBlock *pb) - - /* heart-beat management */ - if (cb->max_idle_time > 0) { -- endtime = slapi_current_utc_time() + cb->max_idle_time; -+ endtime = slapi_current_rel_time_t() + cb->max_idle_time; - } - - /* -diff --git a/ldap/servers/plugins/chainingdb/cb_modrdn.c b/ldap/servers/plugins/chainingdb/cb_modrdn.c -index 95a068be7..d648253c7 100644 ---- a/ldap/servers/plugins/chainingdb/cb_modrdn.c -+++ b/ldap/servers/plugins/chainingdb/cb_modrdn.c -@@ -129,7 +129,7 @@ chaining_back_modrdn(Slapi_PBlock *pb) - - /* heart-beat management */ - if (cb->max_idle_time > 0) { -- endtime = slapi_current_utc_time() + cb->max_idle_time; -+ endtime = slapi_current_rel_time_t() + cb->max_idle_time; - } - - /* -diff --git a/ldap/servers/plugins/chainingdb/cb_search.c b/ldap/servers/plugins/chainingdb/cb_search.c -index d47cbc8e4..ffc8f56f8 100644 ---- a/ldap/servers/plugins/chainingdb/cb_search.c -+++ b/ldap/servers/plugins/chainingdb/cb_search.c -@@ -236,7 +236,7 @@ chainingdb_build_candidate_list(Slapi_PBlock *pb) - - /* heart-beat management */ - if (cb->max_idle_time > 0) { -- endtime = slapi_current_utc_time() + cb->max_idle_time; -+ endtime = slapi_current_rel_time_t() + cb->max_idle_time; - } - - rc = ldap_search_ext(ld, target, scope, filter, attrs, attrsonly, -@@ -503,7 +503,7 @@ chainingdb_next_search_entry(Slapi_PBlock *pb) - - /* heart-beat management */ - if (cb->max_idle_time > 0) { -- endtime = slapi_current_utc_time() + cb->max_idle_time; -+ endtime = slapi_current_rel_time_t() + cb->max_idle_time; - } - - while (1) { -@@ -579,7 +579,7 @@ chainingdb_next_search_entry(Slapi_PBlock *pb) - - /* heart-beat management */ - if (cb->max_idle_time > 0) { -- endtime = slapi_current_utc_time() + cb->max_idle_time; -+ endtime = slapi_current_rel_time_t() + cb->max_idle_time; - } - - /* The server sent one of the entries found by the search */ -@@ -611,7 +611,7 @@ chainingdb_next_search_entry(Slapi_PBlock *pb) - - /* heart-beat management */ - if (cb->max_idle_time > 0) { -- endtime = slapi_current_utc_time() + cb->max_idle_time; -+ endtime = slapi_current_rel_time_t() + cb->max_idle_time; - } - - parse_rc = ldap_parse_reference(ctx->ld, res, &referrals, NULL, 1); -diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c -index eb9bd77f9..d404ff901 100644 ---- a/ldap/servers/plugins/cos/cos_cache.c -+++ b/ldap/servers/plugins/cos/cos_cache.c -@@ -346,7 +346,7 @@ cos_cache_init(void) - if (ret == 0) { - slapi_lock_mutex(start_lock); - while (!started) { -- while (slapi_wait_condvar(start_cond, NULL) == 0) -+ while (slapi_wait_condvar_pt(start_cond, start_lock, NULL) == 0) - ; - } - slapi_unlock_mutex(start_lock); -@@ -401,7 +401,7 @@ cos_cache_wait_on_change(void *arg __attribute__((unused))) - * thread notifies our condvar, and so we will not miss any - * notifications, including the shutdown notification. - */ -- slapi_wait_condvar(something_changed, NULL); -+ slapi_wait_condvar_pt(something_changed, change_lock, NULL); - } else { - /* Something to do...do it below */ - } -diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c -index 16c625bb0..1cb54580b 100644 ---- a/ldap/servers/plugins/dna/dna.c -+++ b/ldap/servers/plugins/dna/dna.c -@@ -907,7 +907,7 @@ dna_load_plugin_config(Slapi_PBlock *pb, int use_eventq) - * performing the operation at this point when - * starting up would cause the change to not - * get changelogged. */ -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - eq_ctx = slapi_eq_once(dna_update_config_event, NULL, now + 30); - } else { - dna_update_config_event(0, NULL); -diff --git a/ldap/servers/plugins/passthru/ptconn.c b/ldap/servers/plugins/passthru/ptconn.c -index 49040f651..637d33843 100644 ---- a/ldap/servers/plugins/passthru/ptconn.c -+++ b/ldap/servers/plugins/passthru/ptconn.c -@@ -233,7 +233,7 @@ passthru_get_connection(PassThruServer *srvr, LDAP **ldp) - slapi_log_err(SLAPI_LOG_PLUGIN, PASSTHRU_PLUGIN_SUBSYSTEM, - "... passthru_get_connection waiting for conn to free up\n"); - #endif -- slapi_wait_condvar(srvr->ptsrvr_connlist_cv, NULL); -+ slapi_wait_condvar_pt(srvr->ptsrvr_connlist_cv, srvr->ptsrvr_connlist_mutex, NULL); - - #ifdef PASSTHRU_VERBOSE_LOGGING - slapi_log_err(SLAPI_LOG_PLUGIN, PASSTHRU_PLUGIN_SUBSYSTEM, -diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c -index eb4b089fb..fd5356d72 100644 ---- a/ldap/servers/plugins/referint/referint.c -+++ b/ldap/servers/plugins/referint/referint.c -@@ -71,8 +71,9 @@ void referint_get_config(int *delay, char **logfile); - /* global thread control stuff */ - static PRLock *referint_mutex = NULL; - static PRThread *referint_tid = NULL; --static PRLock *keeprunning_mutex = NULL; --static PRCondVar *keeprunning_cv = NULL; -+static pthread_mutex_t keeprunning_mutex; -+static pthread_cond_t keeprunning_cv; -+ - static int keeprunning = 0; - static referint_config *config = NULL; - static Slapi_DN *_ConfigAreaDN = NULL; -@@ -1302,12 +1303,38 @@ referint_postop_start(Slapi_PBlock *pb) - * -1 = integrity off - */ - if (referint_get_delay() > 0) { -+ pthread_condattr_t condAttr; -+ - /* initialize the cv and lock */ - if (!use_txn && (NULL == referint_mutex)) { - referint_mutex = PR_NewLock(); - } -- keeprunning_mutex = PR_NewLock(); -- keeprunning_cv = PR_NewCondVar(keeprunning_mutex); -+ if ((rc = pthread_mutex_init(&keeprunning_mutex, NULL)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "referint_postop_start", -+ "cannot create new lock. error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(1); -+ } -+ if ((rc = pthread_condattr_init(&condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "referint_postop_start", -+ "cannot create new condition attribute variable. error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(1); -+ } -+ if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "referint_postop_start", -+ "cannot set condition attr clock. error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(1); -+ } -+ if ((rc = pthread_cond_init(&keeprunning_cv, &condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "referint_postop_start", -+ "cannot create new condition variable. error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(1); -+ } -+ pthread_condattr_destroy(&condAttr); /* no longer needed */ -+ - keeprunning = 1; - - referint_tid = PR_CreateThread(PR_USER_THREAD, -@@ -1337,13 +1364,11 @@ int - referint_postop_close(Slapi_PBlock *pb __attribute__((unused))) - { - /* signal the thread to exit */ -- if (NULL != keeprunning_mutex) { -- PR_Lock(keeprunning_mutex); -+ if (referint_get_delay() > 0) { -+ pthread_mutex_lock(&keeprunning_mutex); - keeprunning = 0; -- if (NULL != keeprunning_cv) { -- PR_NotifyCondVar(keeprunning_cv); -- } -- PR_Unlock(keeprunning_mutex); -+ pthread_cond_signal(&keeprunning_cv); -+ pthread_mutex_unlock(&keeprunning_mutex); - } - - slapi_destroy_rwlock(config_rwlock); -@@ -1369,6 +1394,7 @@ referint_thread_func(void *arg __attribute__((unused))) - char *iter = NULL; - Slapi_DN *sdn = NULL; - Slapi_DN *tmpsuperior = NULL; -+ struct timespec current_time = {0}; - int delay; - int no_changes; - -@@ -1383,20 +1409,22 @@ referint_thread_func(void *arg __attribute__((unused))) - no_changes = 1; - while (no_changes) { - -- PR_Lock(keeprunning_mutex); -+ pthread_mutex_lock(&keeprunning_mutex); - if (keeprunning == 0) { -- PR_Unlock(keeprunning_mutex); -+ pthread_mutex_unlock(&keeprunning_mutex); - break; - } -- PR_Unlock(keeprunning_mutex); -+ pthread_mutex_unlock(&keeprunning_mutex); - - referint_lock(); - if ((prfd = PR_Open(logfilename, PR_RDONLY, REFERINT_DEFAULT_FILE_MODE)) == NULL) { - referint_unlock(); - /* go back to sleep and wait for this file */ -- PR_Lock(keeprunning_mutex); -- PR_WaitCondVar(keeprunning_cv, PR_SecondsToInterval(delay)); -- PR_Unlock(keeprunning_mutex); -+ pthread_mutex_lock(&keeprunning_mutex); -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += delay; -+ pthread_cond_timedwait(&keeprunning_cv, &keeprunning_mutex, ¤t_time); -+ pthread_mutex_unlock(&keeprunning_mutex); - } else { - no_changes = 0; - } -@@ -1407,12 +1435,12 @@ referint_thread_func(void *arg __attribute__((unused))) - * loop before trying to do the changes. The server - * will pick them up on next startup as file still exists - */ -- PR_Lock(keeprunning_mutex); -+ pthread_mutex_lock(&keeprunning_mutex); - if (keeprunning == 0) { -- PR_Unlock(keeprunning_mutex); -+ pthread_mutex_unlock(&keeprunning_mutex); - break; - } -- PR_Unlock(keeprunning_mutex); -+ pthread_mutex_unlock(&keeprunning_mutex); - - while (GetNextLine(thisline, MAX_LINE, prfd)) { - ptoken = ldap_utf8strtok_r(thisline, delimiter, &iter); -@@ -1459,21 +1487,16 @@ referint_thread_func(void *arg __attribute__((unused))) - referint_unlock(); - - /* wait on condition here */ -- PR_Lock(keeprunning_mutex); -- PR_WaitCondVar(keeprunning_cv, PR_SecondsToInterval(delay)); -- PR_Unlock(keeprunning_mutex); -+ pthread_mutex_lock(&keeprunning_mutex); -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += delay; -+ pthread_cond_timedwait(&keeprunning_cv, &keeprunning_mutex, ¤t_time); -+ pthread_mutex_unlock(&keeprunning_mutex); - } - - /* cleanup resources allocated in start */ -- if (NULL != keeprunning_mutex) { -- PR_DestroyLock(keeprunning_mutex); -- } -- if (NULL != referint_mutex) { -- PR_DestroyLock(referint_mutex); -- } -- if (NULL != keeprunning_cv) { -- PR_DestroyCondVar(keeprunning_cv); -- } -+ pthread_mutex_destroy(&keeprunning_mutex); -+ pthread_cond_destroy(&keeprunning_cv); - slapi_ch_free_string(&logfilename); - } - -diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h -index f1c596a3f..06e747811 100644 ---- a/ldap/servers/plugins/replication/repl5.h -+++ b/ldap/servers/plugins/replication/repl5.h -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2010 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. - * All rights reserved. - * -@@ -28,6 +28,7 @@ - #include "llist.h" - #include "repl5_ruv.h" - #include "plstr.h" -+#include - - #define START_UPDATE_DELAY 2 /* 2 second */ - #define REPLICA_TYPE_WINDOWS 1 -diff --git a/ldap/servers/plugins/replication/repl5_backoff.c b/ldap/servers/plugins/replication/repl5_backoff.c -index 40848b96d..40ec75dd7 100644 ---- a/ldap/servers/plugins/replication/repl5_backoff.c -+++ b/ldap/servers/plugins/replication/repl5_backoff.c -@@ -110,7 +110,7 @@ backoff_reset(Backoff_Timer *bt, slapi_eq_fn_t callback, void *callback_data) - bt->next_interval = bt->initial_interval; - } - /* Schedule the callback */ -- bt->last_fire_time = slapi_current_utc_time(); -+ bt->last_fire_time = slapi_current_rel_time_t(); - return_value = bt->last_fire_time + bt->next_interval; - bt->pending_event = slapi_eq_once(bt->callback, bt->callback_arg, - return_value); -@@ -177,7 +177,7 @@ backoff_expired(Backoff_Timer *bt, int margin) - - PR_ASSERT(NULL != bt); - PR_Lock(bt->lock); -- return_value = (slapi_current_utc_time() >= (bt->last_fire_time + bt->next_interval + margin)); -+ return_value = (slapi_current_rel_time_t() >= (bt->last_fire_time + bt->next_interval + margin)); - PR_Unlock(bt->lock); - return return_value; - } -diff --git a/ldap/servers/plugins/replication/repl5_connection.c b/ldap/servers/plugins/replication/repl5_connection.c -index cf57c2156..bc9ca424b 100644 ---- a/ldap/servers/plugins/replication/repl5_connection.c -+++ b/ldap/servers/plugins/replication/repl5_connection.c -@@ -402,7 +402,7 @@ conn_read_result_ex(Repl_Connection *conn, char **retoidp, struct berval **retda - } - if (block) { - /* Did the connection's timeout expire ? */ -- time_now = slapi_current_utc_time(); -+ time_now = slapi_current_rel_time_t(); - if (conn->timeout.tv_sec <= (time_now - start_time)) { - /* We timed out */ - rc = 0; -@@ -676,7 +676,7 @@ conn_is_available(Repl_Connection *conn) - { - time_t poll_timeout_sec = 1; /* Polling for 1sec */ - time_t yield_delay_msec = 100; /* Delay to wait */ -- time_t start_time = slapi_current_utc_time(); -+ time_t start_time = slapi_current_rel_time_t(); - time_t time_now; - ConnResult return_value = CONN_OPERATION_SUCCESS; - -@@ -686,7 +686,7 @@ conn_is_available(Repl_Connection *conn) - /* in case of timeout we return CONN_TIMEOUT only - * if the RA.timeout is exceeded - */ -- time_now = slapi_current_utc_time(); -+ time_now = slapi_current_rel_time_t(); - if (conn->timeout.tv_sec <= (time_now - start_time)) { - break; - } else { -@@ -1010,7 +1010,7 @@ linger_timeout(time_t event_time __attribute__((unused)), void *arg) - void - conn_start_linger(Repl_Connection *conn) - { -- time_t now; -+ time_t now = slapi_current_rel_time_t(); - - PR_ASSERT(NULL != conn); - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, -@@ -1022,7 +1022,7 @@ conn_start_linger(Repl_Connection *conn) - agmt_get_long_name(conn->agmt)); - return; - } -- now = slapi_current_utc_time(); -+ - PR_Lock(conn->lock); - if (conn->linger_active) { - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, -@@ -1989,7 +1989,7 @@ repl5_start_debug_timeout(int *setlevel) - { - Slapi_Eq_Context eqctx = 0; - if (s_debug_timeout && s_debug_level) { -- time_t now = slapi_current_utc_time(); -+ time_t now = slapi_current_rel_time_t(); - eqctx = slapi_eq_once(repl5_debug_timeout_callback, setlevel, - s_debug_timeout + now); - } -diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c -index af5e5897c..4bb384882 100644 ---- a/ldap/servers/plugins/replication/repl5_inc_protocol.c -+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -129,7 +129,7 @@ typedef struct result_data - * don't see any updates for a period equal to this interval, - * we go ahead and start a replication session, just to be safe - */ --#define MAX_WAIT_BETWEEN_SESSIONS PR_SecondsToInterval(60 * 5) /* 5 minutes */ -+#define MAX_WAIT_BETWEEN_SESSIONS 300 /* 5 minutes */ - - /* - * tests if the protocol has been shutdown and we need to quit -@@ -145,7 +145,7 @@ typedef struct result_data - /* Forward declarations */ - static PRUint32 event_occurred(Private_Repl_Protocol *prp, PRUint32 event); - static void reset_events(Private_Repl_Protocol *prp); --static void protocol_sleep(Private_Repl_Protocol *prp, PRIntervalTime duration); -+static void protocol_sleep(Private_Repl_Protocol *prp, int32_t duration); - static int send_updates(Private_Repl_Protocol *prp, RUV *ruv, PRUint32 *num_changes_sent); - static void repl5_inc_backoff_expired(time_t timer_fire_time, void *arg); - static int examine_update_vector(Private_Repl_Protocol *prp, RUV *ruv); -@@ -253,7 +253,7 @@ repl5_inc_result_threadmain(void *param) - char *uniqueid = NULL; - char *ldap_error_string = NULL; - time_t time_now = 0; -- time_t start_time = slapi_current_utc_time(); -+ time_t start_time = slapi_current_rel_time_t(); - int connection_error = 0; - int operation_code = 0; - int backoff_time = 1; -@@ -275,7 +275,7 @@ repl5_inc_result_threadmain(void *param) - /* We need to a) check that the 'real' timeout hasn't expired and - * b) implement a backoff sleep to avoid spinning */ - /* Did the connection's timeout expire ? */ -- time_now = slapi_current_utc_time(); -+ time_now = slapi_current_rel_time_t(); - if (conn_get_timeout(conn) <= (time_now - start_time)) { - /* We timed out */ - conres = CONN_TIMEOUT; -@@ -358,7 +358,7 @@ repl5_inc_result_threadmain(void *param) - /* Should we stop ? */ - PR_Lock(rd->lock); - if (!finished && yield_session && rd->abort != SESSION_ABORTED && rd->abort_time == 0) { -- rd->abort_time = slapi_current_utc_time(); -+ rd->abort_time = slapi_current_rel_time_t(); - rd->abort = SESSION_ABORTED; /* only set the abort time once */ - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "repl5_inc_result_threadmain - " - "Abort control detected, setting abort time...(%s)\n", -@@ -532,13 +532,11 @@ repl5_inc_delete(Private_Repl_Protocol **prpp) - (*prpp)->stop(*prpp); - } - /* Then, delete all resources used by the protocol */ -- if ((*prpp)->lock) { -- PR_DestroyLock((*prpp)->lock); -- (*prpp)->lock = NULL; -+ if (&((*prpp)->lock)) { -+ pthread_mutex_destroy(&((*prpp)->lock)); - } -- if ((*prpp)->cvar) { -- PR_DestroyCondVar((*prpp)->cvar); -- (*prpp)->cvar = NULL; -+ if (&((*prpp)->cvar)) { -+ pthread_cond_destroy(&(*prpp)->cvar); - } - slapi_ch_free((void **)&(*prpp)->private); - slapi_ch_free((void **)prpp); -@@ -712,7 +710,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) - conn_set_agmt_changed(prp->conn); - } else if (event_occurred(prp, EVENT_TRIGGERING_CRITERIA_MET)) { /* change available */ - /* just ignore it and go to sleep */ -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } else if ((e1 = event_occurred(prp, EVENT_WINDOW_CLOSED)) || - event_occurred(prp, EVENT_BACKOFF_EXPIRED)) { - /* this events - should not occur - log a warning and go to sleep */ -@@ -720,13 +718,13 @@ repl5_inc_run(Private_Repl_Protocol *prp) - "repl5_inc_run - %s: " - "Event %s should not occur in state %s; going to sleep\n", - agmt_get_long_name(prp->agmt), e1 ? event2name(EVENT_WINDOW_CLOSED) : event2name(EVENT_BACKOFF_EXPIRED), state2name(current_state)); -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } else { - /* wait until window opens or an event occurs */ - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, - "repl5_inc_run - %s: Waiting for update window to open\n", - agmt_get_long_name(prp->agmt)); -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } - break; - -@@ -850,7 +848,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) - } - next_state = STATE_BACKOFF; - backoff_reset(prp_priv->backoff, repl5_inc_backoff_expired, (void *)prp); -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - use_busy_backoff_timer = PR_FALSE; - } - break; -@@ -899,13 +897,13 @@ repl5_inc_run(Private_Repl_Protocol *prp) - */ - if (STATE_BACKOFF == next_state) { - /* Step the backoff timer */ -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - next_fire_time = backoff_step(prp_priv->backoff); - /* And go back to sleep */ - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, - "repl5_inc_run - %s: Replication session backing off for %ld seconds\n", - agmt_get_long_name(prp->agmt), next_fire_time - now); -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } else { - /* Destroy the backoff timer, since we won't need it anymore */ - backoff_delete(&prp_priv->backoff); -@@ -923,7 +921,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) - next_state = STATE_READY_TO_ACQUIRE; - } else { - /* ignore changes and go to sleep */ -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } - } else if (event_occurred(prp, EVENT_WINDOW_OPENED)) { - /* this should never happen - log an error and go to sleep */ -@@ -931,7 +929,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) - "Event %s should not occur in state %s; going to sleep\n", - agmt_get_long_name(prp->agmt), event2name(EVENT_WINDOW_OPENED), - state2name(current_state)); -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } - break; - -@@ -1178,7 +1176,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) - reset_events(prp); - } - -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - break; - - case STATE_STOP_NORMAL_TERMINATION: -@@ -1209,20 +1207,28 @@ repl5_inc_run(Private_Repl_Protocol *prp) - * Go to sleep until awakened. - */ - static void --protocol_sleep(Private_Repl_Protocol *prp, PRIntervalTime duration) -+protocol_sleep(Private_Repl_Protocol *prp, int32_t duration) - { - PR_ASSERT(NULL != prp); -- PR_Lock(prp->lock); -+ pthread_mutex_lock(&(prp->lock)); - /* we should not go to sleep if there are events available to be processed. - Otherwise, we can miss the event that suppose to wake us up */ -- if (prp->eventbits == 0) -- PR_WaitCondVar(prp->cvar, duration); -- else { -+ if (prp->eventbits == 0) { -+ if (duration > 0) { -+ struct timespec current_time = {0}; -+ /* get the current monotonic time and add our interval */ -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += duration; -+ pthread_cond_timedwait(&(prp->cvar), &(prp->lock), ¤t_time); -+ } else { -+ pthread_cond_wait(&(prp->cvar), &(prp->lock)); -+ } -+ } else { - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, - "protocol_sleep - %s: Can't go to sleep: event bits - %x\n", - agmt_get_long_name(prp->agmt), prp->eventbits); - } -- PR_Unlock(prp->lock); -+ pthread_mutex_unlock(&(prp->lock)); - } - - /* -@@ -1235,10 +1241,10 @@ static void - event_notify(Private_Repl_Protocol *prp, PRUint32 event) - { - PR_ASSERT(NULL != prp); -- PR_Lock(prp->lock); -+ pthread_mutex_lock(&(prp->lock)); - prp->eventbits |= event; -- PR_NotifyCondVar(prp->cvar); -- PR_Unlock(prp->lock); -+ pthread_cond_signal(&(prp->cvar)); -+ pthread_mutex_unlock(&(prp->lock)); - } - - /* -@@ -1250,10 +1256,10 @@ event_occurred(Private_Repl_Protocol *prp, PRUint32 event) - { - PRUint32 return_value; - PR_ASSERT(NULL != prp); -- PR_Lock(prp->lock); -+ pthread_mutex_lock(&(prp->lock)); - return_value = (prp->eventbits & event); - prp->eventbits &= ~event; /* Clear event */ -- PR_Unlock(prp->lock); -+ pthread_mutex_unlock(&(prp->lock)); - return return_value; - } - -@@ -1261,9 +1267,9 @@ static void - reset_events(Private_Repl_Protocol *prp) - { - PR_ASSERT(NULL != prp); -- PR_Lock(prp->lock); -+ pthread_mutex_lock(&(prp->lock)); - prp->eventbits = 0; -- PR_Unlock(prp->lock); -+ pthread_mutex_unlock(&(prp->lock)); - } - - /* -@@ -1882,7 +1888,7 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu - /* See if the result thread has hit a problem */ - - if (!finished && rd->abort_time) { -- time_t current_time = slapi_current_utc_time(); -+ time_t current_time = slapi_current_rel_time_t(); - if ((current_time - rd->abort_time) >= release_timeout) { - rd->result = UPDATE_YIELD; - return_value = UPDATE_YIELD; -@@ -2088,7 +2094,9 @@ Private_Repl_Protocol * - Repl_5_Inc_Protocol_new(Repl_Protocol *rp) - { - repl5_inc_private *rip = NULL; -- Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_malloc(sizeof(Private_Repl_Protocol)); -+ pthread_condattr_t cattr; /* the pthread condition attr */ -+ Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_calloc(1, sizeof(Private_Repl_Protocol)); -+ - prp->delete = repl5_inc_delete; - prp->run = repl5_inc_run; - prp->stop = repl5_inc_stop; -@@ -2099,12 +2107,19 @@ Repl_5_Inc_Protocol_new(Repl_Protocol *rp) - prp->notify_window_closed = repl5_inc_notify_window_closed; - prp->update_now = repl5_inc_update_now; - prp->replica = prot_get_replica(rp); -- if ((prp->lock = PR_NewLock()) == NULL) { -+ if (pthread_mutex_init(&(prp->lock), NULL) != 0) { -+ goto loser; -+ } -+ if (pthread_condattr_init(&cattr) != 0) { -+ goto loser; -+ } -+ if (pthread_condattr_setclock(&cattr, CLOCK_MONOTONIC) != 0) { - goto loser; - } -- if ((prp->cvar = PR_NewCondVar(prp->lock)) == NULL) { -+ if (pthread_cond_init(&(prp->cvar), &cattr) != 0) { - goto loser; - } -+ pthread_condattr_destroy(&cattr); - prp->stopped = 0; - prp->terminate = 0; - prp->eventbits = 0; -diff --git a/ldap/servers/plugins/replication/repl5_mtnode_ext.c b/ldap/servers/plugins/replication/repl5_mtnode_ext.c -index 08a58613b..82e230958 100644 ---- a/ldap/servers/plugins/replication/repl5_mtnode_ext.c -+++ b/ldap/servers/plugins/replication/repl5_mtnode_ext.c -@@ -82,7 +82,8 @@ multimaster_mtnode_construct_replicas() - } - } - /* Wait a few seconds for everything to startup before resuming any replication tasks */ -- slapi_eq_once(replica_check_for_tasks, (void *)replica_get_root(r), time(NULL) + 5); -+ slapi_eq_once(replica_check_for_tasks, (void *)replica_get_root(r), -+ slapi_current_rel_time_t() + 5); - } - } - } -diff --git a/ldap/servers/plugins/replication/repl5_prot_private.h b/ldap/servers/plugins/replication/repl5_prot_private.h -index 5b2e1b3ca..0673f1978 100644 ---- a/ldap/servers/plugins/replication/repl5_prot_private.h -+++ b/ldap/servers/plugins/replication/repl5_prot_private.h -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -32,8 +32,6 @@ typedef struct private_repl_protocol - void (*notify_window_opened)(struct private_repl_protocol *); - void (*notify_window_closed)(struct private_repl_protocol *); - void (*update_now)(struct private_repl_protocol *); -- PRLock *lock; -- PRCondVar *cvar; - int stopped; - int terminate; - PRUint32 eventbits; -@@ -46,6 +44,8 @@ typedef struct private_repl_protocol - int repl50consumer; /* Flag to tell us if this is a 5.0-style consumer we're talking to */ - int repl71consumer; /* Flag to tell us if this is a 7.1-style consumer we're talking to */ - int repl90consumer; /* Flag to tell us if this is a 9.0-style consumer we're talking to */ -+ pthread_mutex_t lock; -+ pthread_cond_t cvar; - } Private_Repl_Protocol; - - extern Private_Repl_Protocol *Repl_5_Inc_Protocol_new(Repl_Protocol *rp); -diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c -index 7e56d6557..c1d376c72 100644 ---- a/ldap/servers/plugins/replication/repl5_replica.c -+++ b/ldap/servers/plugins/replication/repl5_replica.c -@@ -232,7 +232,7 @@ replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation, - In that case the updated would fail but nothing bad would happen. The next - scheduled update would save the state */ - r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, r->repl_name, -- slapi_current_utc_time() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); -+ slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); - - if (r->tombstone_reap_interval > 0) { - /* -@@ -240,7 +240,7 @@ replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation, - * This will allow the server to fully start before consuming resources. - */ - r->repl_eqcxt_tr = slapi_eq_repeat(eq_cb_reap_tombstones, r->repl_name, -- slapi_current_utc_time() + r->tombstone_reap_interval, -+ slapi_current_rel_time_t() + r->tombstone_reap_interval, - 1000 * r->tombstone_reap_interval); - } - -@@ -1088,7 +1088,7 @@ replica_is_updatedn(Replica *r, const Slapi_DN *sdn) - if (r->groupdn_list) { - /* check and rebuild groupdns */ - if (r->updatedn_group_check_interval > -1) { -- time_t now = slapi_current_utc_time(); -+ time_t now = slapi_current_rel_time_t(); - if (now - r->updatedn_group_last_check > r->updatedn_group_check_interval) { - Slapi_ValueSet *updatedn_groups_copy = NULL; - ReplicaUpdateDNList groupdn_list = replica_updatedn_list_new(NULL); -@@ -1512,7 +1512,7 @@ replica_set_enabled(Replica *r, PRBool enable) - if (r->repl_eqcxt_rs == NULL) /* event is not already registered */ - { - r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, r->repl_name, -- slapi_current_utc_time() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); -+ slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); - } - } else /* disable */ - { -@@ -3637,7 +3637,7 @@ replica_set_tombstone_reap_interval(Replica *r, long interval) - r->tombstone_reap_interval = interval; - if (interval > 0 && r->repl_eqcxt_tr == NULL) { - r->repl_eqcxt_tr = slapi_eq_repeat(eq_cb_reap_tombstones, r->repl_name, -- slapi_current_utc_time() + r->tombstone_reap_interval, -+ slapi_current_rel_time_t() + r->tombstone_reap_interval, - 1000 * r->tombstone_reap_interval); - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, - "replica_set_tombstone_reap_interval - tombstone_reap event (interval=%" PRId64 ") was %s\n", -diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c -index d64d4bf45..a969ef82f 100644 ---- a/ldap/servers/plugins/replication/repl5_replica_config.c -+++ b/ldap/servers/plugins/replication/repl5_replica_config.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -31,14 +31,17 @@ - #define CLEANALLRUVLEN 11 - #define REPLICA_RDN "cn=replica" - -+#define CLEANALLRUV_MAX_WAIT 7200 /* 2 hours */ -+#define CLEANALLRUV_SLEEP 5 -+ - int slapi_log_urp = SLAPI_LOG_REPL; - static ReplicaId cleaned_rids[CLEANRID_BUFSIZ] = {0}; - static ReplicaId pre_cleaned_rids[CLEANRID_BUFSIZ] = {0}; - static ReplicaId aborted_rids[CLEANRID_BUFSIZ] = {0}; - static PRLock *rid_lock = NULL; - static PRLock *abort_rid_lock = NULL; --static PRLock *notify_lock = NULL; --static PRCondVar *notify_cvar = NULL; -+static pthread_mutex_t notify_lock; -+static pthread_cond_t notify_cvar; - static PRLock *task_count_lock = NULL; - static int32_t clean_task_count = 0; - static int32_t abort_task_count = 0; -@@ -105,6 +108,9 @@ dont_allow_that(Slapi_PBlock *pb __attribute__((unused)), - int - replica_config_init() - { -+ int rc = 0; -+ pthread_condattr_t condAttr; -+ - s_configLock = PR_NewLock(); - - if (s_configLock == NULL) { -@@ -134,18 +140,31 @@ replica_config_init() - PR_GetError()); - return -1; - } -- if ((notify_lock = PR_NewLock()) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - " -- "Failed to create notify lock; NSPR error - %d\n", -- PR_GetError()); -+ if ((rc = pthread_mutex_init(¬ify_lock, NULL)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "replica_config_init", -+ "Failed to create notify lock: error %d (%s)\n", -+ rc, strerror(rc)); - return -1; - } -- if ((notify_cvar = PR_NewCondVar(notify_lock)) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - " -- "Failed to create notify cond var; NSPR error - %d\n", -- PR_GetError()); -+ if ((rc = pthread_condattr_init(&condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "replica_config_init", -+ "Failed to create notify new condition attribute variable. error %d (%s)\n", -+ rc, strerror(rc)); - return -1; - } -+ if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "replica_config_init", -+ "Cannot set condition attr clock. error %d (%s)\n", -+ rc, strerror(rc)); -+ return -1; -+ } -+ if ((rc = pthread_cond_init(¬ify_cvar, &condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "replica_config_init", -+ "Failed to create new condition variable. error %d (%s)\n", -+ rc, strerror(rc)); -+ return -1; -+ } -+ pthread_condattr_destroy(&condAttr); - - /* config DSE must be initialized before we get here */ - slapi_config_register_callback(SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, CONFIG_BASE, LDAP_SCOPE_SUBTREE, -@@ -1674,9 +1693,13 @@ replica_cleanallruv_thread(void *arg) - * to startup timing issues, we need to wait before grabbing the replica obj, as - * the backends might not be online yet. - */ -- PR_Lock(notify_lock); -- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(10)); -- PR_Unlock(notify_lock); -+ struct timespec current_time = {0}; -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += 10; -+ -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); -+ pthread_mutex_unlock(¬ify_lock); - data->replica = replica_get_replica_from_dn(data->sdn); - if (data->replica == NULL) { - cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Unable to retrieve repl object from dn(%s).", data->sdn); -@@ -1720,15 +1743,18 @@ replica_cleanallruv_thread(void *arg) - ruv_obj = replica_get_ruv(data->replica); - ruv = object_get_data(ruv_obj); - while (data->maxcsn && !is_task_aborted(data->rid) && !is_cleaned_rid(data->rid) && !slapi_is_shutting_down()) { -+ struct timespec current_time = {0}; - if (csn_get_replicaid(data->maxcsn) == 0 || - ruv_covers_csn_cleanallruv(ruv, data->maxcsn) || - strcasecmp(data->force, "yes") == 0) { - /* We are caught up, now we can clean the ruv's */ - break; - } -- PR_Lock(notify_lock); -- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(5)); -- PR_Unlock(notify_lock); -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += CLEANALLRUV_SLEEP; -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); -+ pthread_mutex_unlock(¬ify_lock); - } - object_release(ruv_obj); - /* -@@ -1796,18 +1822,20 @@ replica_cleanallruv_thread(void *arg) - /* - * need to sleep between passes - */ -- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, "Not all replicas have received the " -- "cleanallruv extended op, retrying in %d seconds", -+ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, -+ "Not all replicas have received the cleanallruv extended op, retrying in %d seconds", - interval); - if (!slapi_is_shutting_down()) { -- PR_Lock(notify_lock); -- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); -- PR_Unlock(notify_lock); -+ struct timespec current_time = {0}; -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += interval; -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); -+ pthread_mutex_unlock(¬ify_lock); - } -- if (interval < 14400) { /* 4 hour max */ -- interval = interval * 2; -- } else { -- interval = 14400; -+ interval *= 2; -+ if (interval >= CLEANALLRUV_MAX_WAIT) { -+ interval = CLEANALLRUV_MAX_WAIT; - } - } - /* -@@ -1857,18 +1885,19 @@ replica_cleanallruv_thread(void *arg) - * Need to sleep between passes unless we are shutting down - */ - if (!slapi_is_shutting_down()) { -- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, "Replicas have not been cleaned yet, " -- "retrying in %d seconds", -+ struct timespec current_time = {0}; -+ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, -+ "Replicas have not been cleaned yet, retrying in %d seconds", - interval); -- PR_Lock(notify_lock); -- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); -- PR_Unlock(notify_lock); -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += interval; -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); -+ pthread_mutex_unlock(¬ify_lock); - } -- -- if (interval < 14400) { /* 4 hour max */ -- interval = interval * 2; -- } else { -- interval = 14400; -+ interval *= 2; -+ if (interval >= CLEANALLRUV_MAX_WAIT) { -+ interval = CLEANALLRUV_MAX_WAIT; - } - } /* while */ - -@@ -2081,15 +2110,17 @@ check_replicas_are_done_cleaning(cleanruv_data *data) - "Not all replicas finished cleaning, retrying in %d seconds", - interval); - if (!slapi_is_shutting_down()) { -- PR_Lock(notify_lock); -- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); -- PR_Unlock(notify_lock); -+ struct timespec current_time = {0}; -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += interval; -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); -+ pthread_mutex_lock(¬ify_lock); - } - -- if (interval < 14400) { /* 4 hour max */ -- interval = interval * 2; -- } else { -- interval = 14400; -+ interval *= 2; -+ if (interval >= CLEANALLRUV_MAX_WAIT) { -+ interval = CLEANALLRUV_MAX_WAIT; - } - } - slapi_ch_free_string(&filter); -@@ -2190,14 +2221,16 @@ check_replicas_are_done_aborting(cleanruv_data *data) - cleanruv_log(data->task, data->rid, ABORT_CLEANALLRUV_ID, SLAPI_LOG_NOTICE, - "Not all replicas finished aborting, retrying in %d seconds", interval); - if (!slapi_is_shutting_down()) { -- PR_Lock(notify_lock); -- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); -- PR_Unlock(notify_lock); -+ struct timespec current_time = {0}; -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += interval; -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); -+ pthread_mutex_unlock(¬ify_lock); - } -- if (interval < 14400) { /* 4 hour max */ -- interval = interval * 2; -- } else { -- interval = 14400; -+ interval *= 2; -+ if (interval >= CLEANALLRUV_MAX_WAIT) { -+ interval = CLEANALLRUV_MAX_WAIT; - } - } - slapi_ch_free_string(&filter); -@@ -2248,14 +2281,16 @@ check_agmts_are_caught_up(cleanruv_data *data, char *maxcsn) - cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, - "Not all replicas caught up, retrying in %d seconds", interval); - if (!slapi_is_shutting_down()) { -- PR_Lock(notify_lock); -- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); -- PR_Unlock(notify_lock); -+ struct timespec current_time = {0}; -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += interval; -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); -+ pthread_mutex_unlock(¬ify_lock); - } -- if (interval < 14400) { /* 4 hour max */ -- interval = interval * 2; -- } else { -- interval = 14400; -+ interval *= 2; -+ if (interval >= CLEANALLRUV_MAX_WAIT) { -+ interval = CLEANALLRUV_MAX_WAIT; - } - } - slapi_ch_free_string(&rid_text); -@@ -2310,14 +2345,16 @@ check_agmts_are_alive(Replica *replica, ReplicaId rid, Slapi_Task *task) - interval); - - if (!slapi_is_shutting_down()) { -- PR_Lock(notify_lock); -- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); -- PR_Unlock(notify_lock); -+ struct timespec current_time = {0}; -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += interval; -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); -+ pthread_mutex_unlock(¬ify_lock); - } -- if (interval < 14400) { /* 4 hour max */ -- interval = interval * 2; -- } else { -- interval = 14400; -+ interval *= 2; -+ if (interval >= CLEANALLRUV_MAX_WAIT) { -+ interval = CLEANALLRUV_MAX_WAIT; - } - } - if (is_task_aborted(rid)) { -@@ -3093,16 +3130,18 @@ replica_abort_task_thread(void *arg) - * Need to sleep between passes. unless we are shutting down - */ - if (!slapi_is_shutting_down()) { -+ struct timespec current_time = {0}; - cleanruv_log(data->task, data->rid, ABORT_CLEANALLRUV_ID, SLAPI_LOG_NOTICE, "Retrying in %d seconds", interval); -- PR_Lock(notify_lock); -- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); -- PR_Unlock(notify_lock); -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += interval; -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); -+ pthread_mutex_unlock(¬ify_lock); - } - -- if (interval < 14400) { /* 4 hour max */ -- interval = interval * 2; -- } else { -- interval = 14400; -+ interval *= 2; -+ if (interval >= CLEANALLRUV_MAX_WAIT) { -+ interval = CLEANALLRUV_MAX_WAIT; - } - } /* while */ - -@@ -3536,10 +3575,10 @@ check_and_set_abort_cleanruv_task_count(void) - - PR_Lock(task_count_lock); - if (abort_task_count > CLEANRIDSIZ) { -- rc = -1; -- } else { -- abort_task_count++; -- } -+ rc = -1; -+ } else { -+ abort_task_count++; -+ } - PR_Unlock(task_count_lock); - - return rc; -@@ -3551,11 +3590,9 @@ check_and_set_abort_cleanruv_task_count(void) - void - stop_ruv_cleaning() - { -- if (notify_lock) { -- PR_Lock(notify_lock); -- PR_NotifyCondVar(notify_cvar); -- PR_Unlock(notify_lock); -- } -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_signal(¬ify_cvar); -+ pthread_mutex_unlock(¬ify_lock); - } - - /* -diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c -index a25839f21..f67263c3e 100644 ---- a/ldap/servers/plugins/replication/repl5_tot_protocol.c -+++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -45,7 +45,7 @@ typedef struct callback_data - unsigned long num_entries; - time_t sleep_on_busy; - time_t last_busy; -- PRLock *lock; /* Lock to protect access to this structure, the message id list and to force memory barriers */ -+ pthread_mutex_t lock; /* Lock to protect access to this structure, the message id list and to force memory barriers */ - PRThread *result_tid; /* The async result thread */ - operation_id_list_item *message_id_list; /* List of IDs for outstanding operations */ - int abort; /* Flag used to tell the sending thread asyncronously that it should abort (because an error came up in a result) */ -@@ -113,7 +113,7 @@ repl5_tot_result_threadmain(void *param) - while (!finished) { - int message_id = 0; - time_t time_now = 0; -- time_t start_time = slapi_current_utc_time(); -+ time_t start_time = slapi_current_rel_time_t(); - int backoff_time = 1; - - /* Read the next result */ -@@ -130,7 +130,7 @@ repl5_tot_result_threadmain(void *param) - /* We need to a) check that the 'real' timeout hasn't expired and - * b) implement a backoff sleep to avoid spinning */ - /* Did the connection's timeout expire ? */ -- time_now = slapi_current_utc_time(); -+ time_now = slapi_current_rel_time_t(); - if (conn_get_timeout(conn) <= (time_now - start_time)) { - /* We timed out */ - conres = CONN_TIMEOUT; -@@ -142,11 +142,11 @@ repl5_tot_result_threadmain(void *param) - backoff_time <<= 1; - } - /* Should we stop ? */ -- PR_Lock(cb->lock); -+ pthread_mutex_lock(&(cb->lock)); - if (cb->stop_result_thread) { - finished = 1; - } -- PR_Unlock(cb->lock); -+ pthread_mutex_unlock(&(cb->lock)); - } else { - /* Something other than a timeout, so we exit the loop */ - break; -@@ -164,21 +164,21 @@ repl5_tot_result_threadmain(void *param) - /* Was the result itself an error ? */ - if (0 != conres) { - /* If so then we need to take steps to abort the update process */ -- PR_Lock(cb->lock); -+ pthread_mutex_lock(&(cb->lock)); - cb->abort = 1; - if (conres == CONN_NOT_CONNECTED) { - cb->rc = LDAP_CONNECT_ERROR; - } -- PR_Unlock(cb->lock); -+ pthread_mutex_unlock(&(cb->lock)); - } - /* Should we stop ? */ -- PR_Lock(cb->lock); -+ pthread_mutex_lock(&(cb->lock)); - /* if the connection is not connected, then we cannot read any more - results - we are finished */ - if (cb->stop_result_thread || (conres == CONN_NOT_CONNECTED)) { - finished = 1; - } -- PR_Unlock(cb->lock); -+ pthread_mutex_unlock(&(cb->lock)); - } - } - -@@ -209,9 +209,9 @@ repl5_tot_destroy_async_result_thread(callback_data *cb_data) - int retval = 0; - PRThread *tid = cb_data->result_tid; - if (tid) { -- PR_Lock(cb_data->lock); -+ pthread_mutex_lock(&(cb_data->lock)); - cb_data->stop_result_thread = 1; -- PR_Unlock(cb_data->lock); -+ pthread_mutex_unlock(&(cb_data->lock)); - (void)PR_JoinThread(tid); - } - return retval; -@@ -248,7 +248,7 @@ repl5_tot_waitfor_async_results(callback_data *cb_data) - /* Keep pulling results off the LDAP connection until we catch up to the last message id stored in the rd */ - while (!done) { - /* Lock the structure to force memory barrier */ -- PR_Lock(cb_data->lock); -+ pthread_mutex_lock(&(cb_data->lock)); - /* Are we caught up ? */ - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, - "repl5_tot_waitfor_async_results - %d %d\n", -@@ -260,7 +260,7 @@ repl5_tot_waitfor_async_results(callback_data *cb_data) - if (cb_data->abort && LOST_CONN_ERR(cb_data->rc)) { - done = 1; /* no connection == no more results */ - } -- PR_Unlock(cb_data->lock); -+ pthread_mutex_unlock(&(cb_data->lock)); - /* If not then sleep a bit */ - DS_Sleep(PR_SecondsToInterval(1)); - loops++; -@@ -482,9 +482,9 @@ retry: - cb_data.rc = 0; - cb_data.num_entries = 1UL; - cb_data.sleep_on_busy = 0UL; -- cb_data.last_busy = slapi_current_utc_time(); -+ cb_data.last_busy = slapi_current_rel_time_t(); - cb_data.flowcontrol_detection = 0; -- cb_data.lock = PR_NewLock(); -+ pthread_mutex_init(&(cb_data.lock), NULL); - - /* This allows during perform_operation to check the callback data - * especially to do flow contol on delta send msgid / recv msgid -@@ -541,9 +541,9 @@ retry: - cb_data.rc = 0; - cb_data.num_entries = 0UL; - cb_data.sleep_on_busy = 0UL; -- cb_data.last_busy = slapi_current_utc_time(); -+ cb_data.last_busy = slapi_current_rel_time_t(); - cb_data.flowcontrol_detection = 0; -- cb_data.lock = PR_NewLock(); -+ pthread_mutex_init(&(cb_data.lock), NULL); - - /* This allows during perform_operation to check the callback data - * especially to do flow contol on delta send msgid / recv msgid -@@ -633,9 +633,7 @@ done: - type_nsds5ReplicaFlowControlWindow); - } - conn_set_tot_update_cb(prp->conn, NULL); -- if (cb_data.lock) { -- PR_DestroyLock(cb_data.lock); -- } -+ pthread_mutex_destroy(&(cb_data.lock)); - prp->stopped = 1; - } - -@@ -700,7 +698,9 @@ Private_Repl_Protocol * - Repl_5_Tot_Protocol_new(Repl_Protocol *rp) - { - repl5_tot_private *rip = NULL; -- Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_malloc(sizeof(Private_Repl_Protocol)); -+ pthread_condattr_t cattr; -+ Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_calloc(1, sizeof(Private_Repl_Protocol)); -+ - prp->delete = repl5_tot_delete; - prp->run = repl5_tot_run; - prp->stop = repl5_tot_stop; -@@ -710,12 +710,19 @@ Repl_5_Tot_Protocol_new(Repl_Protocol *rp) - prp->notify_window_opened = repl5_tot_noop; - prp->notify_window_closed = repl5_tot_noop; - prp->update_now = repl5_tot_noop; -- if ((prp->lock = PR_NewLock()) == NULL) { -+ if (pthread_mutex_init(&(prp->lock), NULL) != 0) { -+ goto loser; -+ } -+ if (pthread_condattr_init(&cattr) != 0) { -+ goto loser; -+ } -+ if (pthread_condattr_setclock(&cattr, CLOCK_MONOTONIC) != 0) { - goto loser; - } -- if ((prp->cvar = PR_NewCondVar(prp->lock)) == NULL) { -+ if (pthread_cond_init(&(prp->cvar), &cattr) != 0) { - goto loser; - } -+ pthread_condattr_destroy(&cattr); - prp->stopped = 1; - prp->terminate = 0; - prp->eventbits = 0; -@@ -744,13 +751,11 @@ repl5_tot_delete(Private_Repl_Protocol **prpp) - (*prpp)->stop(*prpp); - } - /* Then, delete all resources used by the protocol */ -- if ((*prpp)->lock) { -- PR_DestroyLock((*prpp)->lock); -- (*prpp)->lock = NULL; -+ if (&((*prpp)->lock)) { -+ pthread_mutex_destroy(&((*prpp)->lock)); - } -- if ((*prpp)->cvar) { -- PR_DestroyCondVar((*prpp)->cvar); -- (*prpp)->cvar = NULL; -+ if (&((*prpp)->cvar)) { -+ pthread_cond_destroy(&(*prpp)->cvar); - } - slapi_ch_free((void **)&(*prpp)->private); - slapi_ch_free((void **)prpp); -@@ -824,9 +829,9 @@ send_entry(Slapi_Entry *e, void *cb_data) - - /* see if the result reader thread encountered - a fatal error */ -- PR_Lock(((callback_data *)cb_data)->lock); -+ pthread_mutex_lock((&((callback_data *)cb_data)->lock)); - rc = ((callback_data *)cb_data)->abort; -- PR_Unlock(((callback_data *)cb_data)->lock); -+ pthread_mutex_unlock((&((callback_data *)cb_data)->lock)); - if (rc) { - conn_disconnect(prp->conn); - ((callback_data *)cb_data)->rc = -1; -@@ -889,7 +894,7 @@ send_entry(Slapi_Entry *e, void *cb_data) - } - - if (rc == CONN_BUSY) { -- time_t now = slapi_current_utc_time(); -+ time_t now = slapi_current_rel_time_t(); - if ((now - *last_busyp) < (*sleep_on_busyp + 10)) { - *sleep_on_busyp += 5; - } else { -diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c -index af486f730..ef2025dd9 100644 ---- a/ldap/servers/plugins/replication/repl_extop.c -+++ b/ldap/servers/plugins/replication/repl_extop.c -@@ -1176,7 +1176,7 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb) - /* now that the changelog is open and started, we can alos cretae the - * keep alive entry without risk that db and cl will not match - */ -- replica_subentry_check(replica_get_root(r), replica_get_rid(r)); -+ replica_subentry_check((Slapi_DN *)replica_get_root(r), replica_get_rid(r)); - } - - /* ONREPL code that dealt with new RUV, etc was moved into the code -@@ -1474,7 +1474,7 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb) - * Launch the cleanruv monitoring thread. Once all the replicas are cleaned it will release the rid - */ - -- cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Launching cleanAllRUV thread...\n"); -+ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Launching cleanAllRUV thread..."); - data = (cleanruv_data *)slapi_ch_calloc(1, sizeof(cleanruv_data)); - if (data == NULL) { - slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "multimaster_extop_cleanruv - CleanAllRUV Task - Failed to allocate " -diff --git a/ldap/servers/plugins/replication/windows_connection.c b/ldap/servers/plugins/replication/windows_connection.c -index 011b328bf..ce0662544 100644 ---- a/ldap/servers/plugins/replication/windows_connection.c -+++ b/ldap/servers/plugins/replication/windows_connection.c -@@ -1121,7 +1121,7 @@ windows_conn_start_linger(Repl_Connection *conn) - agmt_get_long_name(conn->agmt)); - return; - } -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - PR_Lock(conn->lock); - if (conn->linger_active) { - slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, -diff --git a/ldap/servers/plugins/replication/windows_inc_protocol.c b/ldap/servers/plugins/replication/windows_inc_protocol.c -index 1c07534e3..3d548e5ed 100644 ---- a/ldap/servers/plugins/replication/windows_inc_protocol.c -+++ b/ldap/servers/plugins/replication/windows_inc_protocol.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -48,7 +48,7 @@ typedef struct windows_inc_private - char *ruv; /* RUV on remote replica (use diff type for this? - ggood */ - Backoff_Timer *backoff; - Repl_Protocol *rp; -- PRLock *lock; -+ pthread_mutex_t *lock; - PRUint32 eventbits; - } windows_inc_private; - -@@ -96,7 +96,7 @@ typedef struct windows_inc_private - * don't see any updates for a period equal to this interval, - * we go ahead and start a replication session, just to be safe - */ --#define MAX_WAIT_BETWEEN_SESSIONS PR_SecondsToInterval(60 * 5) /* 5 minutes */ -+#define MAX_WAIT_BETWEEN_SESSIONS 300 /* 5 minutes */ - /* - * tests if the protocol has been shutdown and we need to quit - * event_occurred resets the bits in the bit flag, so whoever tests for shutdown -@@ -108,7 +108,7 @@ typedef struct windows_inc_private - /* Forward declarations */ - static PRUint32 event_occurred(Private_Repl_Protocol *prp, PRUint32 event); - static void reset_events(Private_Repl_Protocol *prp); --static void protocol_sleep(Private_Repl_Protocol *prp, PRIntervalTime duration); -+static void protocol_sleep(Private_Repl_Protocol *prp, int32_t duration); - static int send_updates(Private_Repl_Protocol *prp, RUV *ruv, PRUint32 *num_changes_sent, int do_send); - static void windows_inc_backoff_expired(time_t timer_fire_time, void *arg); - static int windows_examine_update_vector(Private_Repl_Protocol *prp, RUV *ruv); -@@ -143,13 +143,11 @@ windows_inc_delete(Private_Repl_Protocol **prpp) - (*prpp)->stopped = 1; - (*prpp)->stop(*prpp); - } -- if ((*prpp)->lock) { -- PR_DestroyLock((*prpp)->lock); -- (*prpp)->lock = NULL; -+ if (&((*prpp)->lock)) { -+ pthread_mutex_destroy(&((*prpp)->lock)); - } -- if ((*prpp)->cvar) { -- PR_DestroyCondVar((*prpp)->cvar); -- (*prpp)->cvar = NULL; -+ if (&((*prpp)->cvar)) { -+ pthread_cond_destroy(&(*prpp)->cvar); - } - slapi_ch_free((void **)&(*prpp)->private); - slapi_ch_free((void **)prpp); -@@ -360,7 +358,7 @@ windows_inc_run(Private_Repl_Protocol *prp) - } else if (event_occurred(prp, EVENT_TRIGGERING_CRITERIA_MET)) /* change available */ - { - /* just ignore it and go to sleep */ -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } else if ((e1 = event_occurred(prp, EVENT_WINDOW_CLOSED)) || - event_occurred(prp, EVENT_BACKOFF_EXPIRED)) { - /* this events - should not occur - log a warning and go to sleep */ -@@ -370,18 +368,18 @@ windows_inc_run(Private_Repl_Protocol *prp) - agmt_get_long_name(prp->agmt), - e1 ? event2name(EVENT_WINDOW_CLOSED) : event2name(EVENT_BACKOFF_EXPIRED), - state2name(current_state)); -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } else if (event_occurred(prp, EVENT_RUN_DIRSYNC)) /* periodic_dirsync */ - { - /* just ignore it and go to sleep */ -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } else { - /* wait until window opens or an event occurs */ - slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, - "windows_inc_run - %s: " - "Waiting for update window to open\n", - agmt_get_long_name(prp->agmt)); -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } - break; - -@@ -536,7 +534,7 @@ windows_inc_run(Private_Repl_Protocol *prp) - } - next_state = STATE_BACKOFF; - backoff_reset(prp_priv->backoff, windows_inc_backoff_expired, (void *)prp); -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - use_busy_backoff_timer = PR_FALSE; - } - break; -@@ -605,7 +603,7 @@ windows_inc_run(Private_Repl_Protocol *prp) - agmt_get_long_name(prp->agmt), - next_fire_time - now); - -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } else { - /* Destroy the backoff timer, since we won't need it anymore */ - backoff_delete(&prp_priv->backoff); -@@ -624,7 +622,7 @@ windows_inc_run(Private_Repl_Protocol *prp) - next_state = STATE_READY_TO_ACQUIRE; - } else { - /* ignore changes and go to sleep */ -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } - } else if (event_occurred(prp, EVENT_WINDOW_OPENED)) { - /* this should never happen - log an error and go to sleep */ -@@ -632,7 +630,7 @@ windows_inc_run(Private_Repl_Protocol *prp) - "event %s should not occur in state %s; going to sleep\n", - agmt_get_long_name(prp->agmt), - event2name(EVENT_WINDOW_OPENED), state2name(current_state)); -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } - break; - case STATE_SENDING_UPDATES: -@@ -856,7 +854,7 @@ windows_inc_run(Private_Repl_Protocol *prp) - reset_events(prp); - } - -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - break; - - case STATE_STOP_NORMAL_TERMINATION: -@@ -891,21 +889,29 @@ windows_inc_run(Private_Repl_Protocol *prp) - * Go to sleep until awakened. - */ - static void --protocol_sleep(Private_Repl_Protocol *prp, PRIntervalTime duration) -+protocol_sleep(Private_Repl_Protocol *prp, int32_t duration) - { - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> protocol_sleep\n"); - PR_ASSERT(NULL != prp); -- PR_Lock(prp->lock); -+ pthread_mutex_lock(&(prp->lock)); - /* we should not go to sleep if there are events available to be processed. - Otherwise, we can miss the event that suppose to wake us up */ -- if (prp->eventbits == 0) -- PR_WaitCondVar(prp->cvar, duration); -- else { -+ if (prp->eventbits == 0) { -+ if (duration > 0) { -+ struct timespec current_time = {0}; -+ /* get the current monotonic time and add our interval */ -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += duration; -+ pthread_cond_timedwait(&(prp->cvar), &(prp->lock), ¤t_time); -+ } else { -+ pthread_cond_wait(&(prp->cvar), &(prp->lock)); -+ } -+ } else { - slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, - "protocol_sleep - %s: Can't go to sleep: event bits - %x\n", - agmt_get_long_name(prp->agmt), prp->eventbits); - } -- PR_Unlock(prp->lock); -+ pthread_mutex_unlock(&(prp->lock)); - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= protocol_sleep\n"); - } - -@@ -921,10 +927,10 @@ event_notify(Private_Repl_Protocol *prp, PRUint32 event) - { - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> event_notify\n"); - PR_ASSERT(NULL != prp); -- PR_Lock(prp->lock); -+ pthread_mutex_lock(&(prp->lock)); - prp->eventbits |= event; -- PR_NotifyCondVar(prp->cvar); -- PR_Unlock(prp->lock); -+ pthread_cond_signal(&(prp->cvar)); -+ pthread_mutex_unlock(&(prp->lock)); - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= event_notify\n"); - } - -@@ -941,10 +947,10 @@ event_occurred(Private_Repl_Protocol *prp, PRUint32 event) - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> event_occurred\n"); - - PR_ASSERT(NULL != prp); -- PR_Lock(prp->lock); -+ pthread_mutex_lock(&(prp->lock)); - return_value = (prp->eventbits & event); - prp->eventbits &= ~event; /* Clear event */ -- PR_Unlock(prp->lock); -+ pthread_mutex_unlock(&(prp->lock)); - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= event_occurred\n"); - return return_value; - } -@@ -954,9 +960,9 @@ reset_events(Private_Repl_Protocol *prp) - { - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> reset_events\n"); - PR_ASSERT(NULL != prp); -- PR_Lock(prp->lock); -+ pthread_mutex_lock(&(prp->lock)); - prp->eventbits = 0; -- PR_Unlock(prp->lock); -+ pthread_mutex_unlock(&(prp->lock)); - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= reset_events\n"); - } - -@@ -1416,6 +1422,7 @@ Windows_Inc_Protocol_new(Repl_Protocol *rp) - { - windows_inc_private *rip = NULL; - Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_calloc(1, sizeof(Private_Repl_Protocol)); -+ pthread_condattr_t cattr; - - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> Windows_Inc_Protocol_new\n"); - -@@ -1429,12 +1436,19 @@ Windows_Inc_Protocol_new(Repl_Protocol *rp) - prp->notify_window_closed = windows_inc_notify_window_closed; - prp->update_now = windows_inc_update_now; - prp->replica = prot_get_replica(rp); -- if ((prp->lock = PR_NewLock()) == NULL) { -+ if (pthread_mutex_init(&(prp->lock), NULL) != 0) { -+ goto loser; -+ } -+ if (pthread_condattr_init(&cattr) != 0) { -+ goto loser; -+ } -+ if (pthread_condattr_setclock(&cattr, CLOCK_MONOTONIC) != 0) { - goto loser; - } -- if ((prp->cvar = PR_NewCondVar(prp->lock)) == NULL) { -+ if (pthread_cond_init(&(prp->cvar), &cattr) != 0) { - goto loser; - } -+ pthread_condattr_destroy(&cattr); /* no longer needed */ - prp->stopped = 0; - prp->terminate = 0; - prp->eventbits = 0; -diff --git a/ldap/servers/plugins/replication/windows_tot_protocol.c b/ldap/servers/plugins/replication/windows_tot_protocol.c -index da244c166..f67e4dbd2 100644 ---- a/ldap/servers/plugins/replication/windows_tot_protocol.c -+++ b/ldap/servers/plugins/replication/windows_tot_protocol.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -326,6 +326,7 @@ Windows_Tot_Protocol_new(Repl_Protocol *rp) - { - windows_tot_private *rip = NULL; - Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_calloc(1, sizeof(Private_Repl_Protocol)); -+ pthread_condattr_t cattr; - - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> Windows_Tot_Protocol_new\n"); - -@@ -339,12 +340,19 @@ Windows_Tot_Protocol_new(Repl_Protocol *rp) - prp->notify_window_closed = windows_tot_noop; - prp->replica = prot_get_replica(rp); - prp->update_now = windows_tot_noop; -- if ((prp->lock = PR_NewLock()) == NULL) { -+ if (pthread_mutex_init(&(prp->lock), NULL) != 0) { - goto loser; - } -- if ((prp->cvar = PR_NewCondVar(prp->lock)) == NULL) { -+ if (pthread_condattr_init(&cattr) != 0) { - goto loser; - } -+ if (pthread_condattr_setclock(&cattr, CLOCK_MONOTONIC) != 0) { -+ goto loser; -+ } -+ if (pthread_cond_init(&(prp->cvar), &cattr) != 0) { -+ goto loser; -+ } -+ pthread_condattr_destroy(&cattr); - prp->stopped = 1; - prp->terminate = 0; - prp->eventbits = 0; -@@ -373,13 +381,11 @@ windows_tot_delete(Private_Repl_Protocol **prpp) - (*prpp)->stop(*prpp); - } - /* Then, delete all resources used by the protocol */ -- if ((*prpp)->lock) { -- PR_DestroyLock((*prpp)->lock); -- (*prpp)->lock = NULL; -+ if (&((*prpp)->lock)) { -+ pthread_mutex_destroy(&((*prpp)->lock)); - } -- if ((*prpp)->cvar) { -- PR_DestroyCondVar((*prpp)->cvar); -- (*prpp)->cvar = NULL; -+ if (&((*prpp)->cvar)) { -+ pthread_cond_destroy(&(*prpp)->cvar); - } - slapi_ch_free((void **)&(*prpp)->private); - slapi_ch_free((void **)prpp); -diff --git a/ldap/servers/plugins/retrocl/retrocl_trim.c b/ldap/servers/plugins/retrocl/retrocl_trim.c -index d031dc3f8..a3e16c4e1 100644 ---- a/ldap/servers/plugins/retrocl/retrocl_trim.c -+++ b/ldap/servers/plugins/retrocl/retrocl_trim.c -@@ -241,7 +241,7 @@ trim_changelog(void) - int me, lt; - - -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - - PR_Lock(ts.ts_s_trim_mutex); - me = ts.ts_c_max_age; -diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c -index de99ba233..3d076a4cb 100644 ---- a/ldap/servers/plugins/roles/roles_cache.c -+++ b/ldap/servers/plugins/roles/roles_cache.c -@@ -343,7 +343,7 @@ roles_cache_create_suffix(Slapi_DN *sdn) - - slapi_lock_mutex(new_suffix->create_lock); - if (new_suffix->is_ready != 1) { -- slapi_wait_condvar(new_suffix->suffix_created, NULL); -+ slapi_wait_condvar_pt(new_suffix->suffix_created, new_suffix->create_lock, NULL); - } - slapi_unlock_mutex(new_suffix->create_lock); - -@@ -384,7 +384,7 @@ roles_cache_wait_on_change(void *arg) - test roles_def->keeprunning before - going to sleep. - */ -- slapi_wait_condvar(roles_def->something_changed, NULL); -+ slapi_wait_condvar_pt(roles_def->something_changed, roles_def->change_lock, NULL); - - slapi_log_err(SLAPI_LOG_PLUGIN, ROLES_PLUGIN_SUBSYSTEM, "roles_cache_wait_on_change - notified\n"); - -diff --git a/ldap/servers/plugins/sync/sync.h b/ldap/servers/plugins/sync/sync.h -index 51d0da6e0..7241fddbf 100644 ---- a/ldap/servers/plugins/sync/sync.h -+++ b/ldap/servers/plugins/sync/sync.h -@@ -201,8 +201,8 @@ typedef struct sync_request_list - { - Slapi_RWLock *sync_req_rwlock; /* R/W lock struct to serialize access */ - SyncRequest *sync_req_head; /* Head of list */ -- PRLock *sync_req_cvarlock; /* Lock for cvar */ -- PRCondVar *sync_req_cvar; /* ps threads sleep on this */ -+ pthread_mutex_t sync_req_cvarlock; /* Lock for cvar */ -+ pthread_cond_t sync_req_cvar; /* ps threads sleep on this */ - int sync_req_max_persist; - int sync_req_cur_persist; - } SyncRequestList; -diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c -index 598c6868d..d13f142b0 100644 ---- a/ldap/servers/plugins/sync/sync_persist.c -+++ b/ldap/servers/plugins/sync/sync_persist.c -@@ -463,19 +463,40 @@ int - sync_persist_initialize(int argc, char **argv) - { - if (!SYNC_IS_INITIALIZED()) { -+ pthread_condattr_t sync_req_condAttr; /* cond var attribute */ -+ int rc = 0; -+ - sync_request_list = (SyncRequestList *)slapi_ch_calloc(1, sizeof(SyncRequestList)); - if ((sync_request_list->sync_req_rwlock = slapi_new_rwlock()) == NULL) { - slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_initialize - Cannot initialize lock structure(1).\n"); - return (-1); - } -- if ((sync_request_list->sync_req_cvarlock = PR_NewLock()) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_initialize - Cannot initialize lock structure(2).\n"); -+ if (pthread_mutex_init(&(sync_request_list->sync_req_cvarlock), NULL) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "sync_persist_initialize", -+ "Failed to create lock: error %d (%s)\n", -+ rc, strerror(rc)); -+ return (-1); -+ } -+ if ((rc = pthread_condattr_init(&sync_req_condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "sync_persist_initialize", -+ "Failed to create new condition attribute variable. error %d (%s)\n", -+ rc, strerror(rc)); - return (-1); - } -- if ((sync_request_list->sync_req_cvar = PR_NewCondVar(sync_request_list->sync_req_cvarlock)) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_initialize - Cannot initialize condition variable.\n"); -+ if ((rc = pthread_condattr_setclock(&sync_req_condAttr, CLOCK_MONOTONIC)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "sync_persist_initialize", -+ "Cannot set condition attr clock. error %d (%s)\n", -+ rc, strerror(rc)); - return (-1); - } -+ if ((rc = pthread_cond_init(&(sync_request_list->sync_req_cvar), &sync_req_condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "sync_persist_initialize", -+ "Failed to create new condition variable. error %d (%s)\n", -+ rc, strerror(rc)); -+ return (-1); -+ } -+ pthread_condattr_destroy(&sync_req_condAttr); /* no longer needed */ -+ - sync_request_list->sync_req_head = NULL; - sync_request_list->sync_req_cur_persist = 0; - sync_request_list->sync_req_max_persist = SYNC_MAX_CONCURRENT; -@@ -617,8 +638,8 @@ sync_persist_terminate_all() - } - - slapi_destroy_rwlock(sync_request_list->sync_req_rwlock); -- PR_DestroyLock(sync_request_list->sync_req_cvarlock); -- PR_DestroyCondVar(sync_request_list->sync_req_cvar); -+ pthread_mutex_destroy(&(sync_request_list->sync_req_cvarlock)); -+ pthread_cond_destroy(&(sync_request_list->sync_req_cvar)); - - /* it frees the structures, just in case it remained connected sync_repl client */ - for (req = sync_request_list->sync_req_head; NULL != req; req = next) { -@@ -725,9 +746,9 @@ static void - sync_request_wakeup_all(void) - { - if (SYNC_IS_INITIALIZED()) { -- PR_Lock(sync_request_list->sync_req_cvarlock); -- PR_NotifyAllCondVar(sync_request_list->sync_req_cvar); -- PR_Unlock(sync_request_list->sync_req_cvarlock); -+ pthread_mutex_lock(&(sync_request_list->sync_req_cvarlock)); -+ pthread_cond_broadcast(&(sync_request_list->sync_req_cvar)); -+ pthread_mutex_unlock(&(sync_request_list->sync_req_cvarlock)); - } - } - -@@ -817,7 +838,7 @@ sync_send_results(void *arg) - goto done; - } - -- PR_Lock(sync_request_list->sync_req_cvarlock); -+ pthread_mutex_lock(&(sync_request_list->sync_req_cvarlock)); - - while ((conn_acq_flag == 0) && !req->req_complete && !plugin_closing) { - /* Check for an abandoned operation */ -@@ -833,7 +854,12 @@ sync_send_results(void *arg) - * connection code. Wake up every second to check if thread - * should terminate. - */ -- PR_WaitCondVar(sync_request_list->sync_req_cvar, PR_SecondsToInterval(1)); -+ struct timespec current_time = {0}; -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += 1; -+ pthread_cond_timedwait(&(sync_request_list->sync_req_cvar), -+ &(sync_request_list->sync_req_cvarlock), -+ ¤t_time); - } else { - /* dequeue the item */ - int attrsonly; -@@ -864,7 +890,7 @@ sync_send_results(void *arg) - * Send the result. Since send_ldap_search_entry can block for - * up to 30 minutes, we relinquish all locks before calling it. - */ -- PR_Unlock(sync_request_list->sync_req_cvarlock); -+ pthread_mutex_unlock(&(sync_request_list->sync_req_cvarlock)); - - /* - * The entry is in the right scope and matches the filter -@@ -910,13 +936,13 @@ sync_send_results(void *arg) - ldap_controls_free(ectrls); - slapi_ch_array_free(noattrs); - } -- PR_Lock(sync_request_list->sync_req_cvarlock); -+ pthread_mutex_lock(&(sync_request_list->sync_req_cvarlock)); - - /* Deallocate our wrapper for this entry */ - sync_node_free(&qnode); - } - } -- PR_Unlock(sync_request_list->sync_req_cvarlock); -+ pthread_mutex_unlock(&(sync_request_list->sync_req_cvarlock)); - - /* indicate the end of search */ - sync_release_connection(req->req_pblock, conn, op, conn_acq_flag == 0); -diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c -index 1e4830e99..ba783ee59 100644 ---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c -+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c -@@ -1,5 +1,5 @@ - /** BEGIN COPYRIGHT BLOCK -- * Copyright (C) 2019 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -1429,21 +1429,22 @@ import_free_job(ImportJob *job) - * To avoid freeing fifo queue under bulk_import_queue use - * job lock to synchronize - */ -- if (job->wire_lock) -- PR_Lock(job->wire_lock); -+ if (&job->wire_lock) { -+ pthread_mutex_lock(&job->wire_lock); -+ } - - import_fifo_destroy(job); - -- if (job->wire_lock) -- PR_Unlock(job->wire_lock); -+ if (&job->wire_lock) { -+ pthread_mutex_unlock(&job->wire_lock); -+ } - } - -- if (NULL != job->uuid_namespace) -+ if (NULL != job->uuid_namespace) { - slapi_ch_free((void **)&job->uuid_namespace); -- if (job->wire_lock) -- PR_DestroyLock(job->wire_lock); -- if (job->wire_cv) -- PR_DestroyCondVar(job->wire_cv); -+ } -+ pthread_mutex_destroy(&job->wire_lock); -+ pthread_cond_destroy(&job->wire_cv); - slapi_ch_free((void **)&job->task_status); - } - -@@ -1777,7 +1778,7 @@ import_monitor_threads(ImportJob *job, int *status) - goto error_abort; - } - -- last_time = slapi_current_utc_time(); -+ last_time = slapi_current_rel_time_t(); - job->start_time = last_time; - import_clear_progress_history(job); - -@@ -1789,7 +1790,7 @@ import_monitor_threads(ImportJob *job, int *status) - - /* First calculate the time interval since last reported */ - if (0 == (count % display_interval)) { -- time_now = slapi_current_utc_time(); -+ time_now = slapi_current_rel_time_t(); - time_interval = time_now - last_time; - last_time = time_now; - /* Now calculate our rate of progress overall for this chunk */ -@@ -2232,7 +2233,7 @@ bdb_import_main(void *arg) - opstr = "Reindexing"; - } - PR_ASSERT(inst != NULL); -- beginning = slapi_current_utc_time(); -+ beginning = slapi_current_rel_time_t(); - - /* Decide which indexes are needed */ - if (job->flags & FLAG_INDEX_ATTRS) { -@@ -2251,9 +2252,9 @@ bdb_import_main(void *arg) - ret = import_fifo_init(job); - if (ret) { - if (!(job->flags & FLAG_USE_FILES)) { -- PR_Lock(job->wire_lock); -- PR_NotifyCondVar(job->wire_cv); -- PR_Unlock(job->wire_lock); -+ pthread_mutex_lock(&job->wire_lock); -+ pthread_cond_signal(&job->wire_cv); -+ pthread_mutex_unlock(&job->wire_lock); - } - goto error; - } -@@ -2315,9 +2316,9 @@ bdb_import_main(void *arg) - } else { - /* release the startup lock and let the entries start queueing up - * in for import */ -- PR_Lock(job->wire_lock); -- PR_NotifyCondVar(job->wire_cv); -- PR_Unlock(job->wire_lock); -+ pthread_mutex_lock(&job->wire_lock); -+ pthread_cond_signal(&job->wire_cv); -+ pthread_mutex_unlock(&job->wire_lock); - } - - /* Run as many passes as we need to complete the job or die honourably in -@@ -2499,7 +2500,7 @@ error: - import_log_notice(job, SLAPI_LOG_WARNING, "bdb_import_main", "Failed to close database"); - } - } -- end = slapi_current_utc_time(); -+ end = slapi_current_rel_time_t(); - if (verbose && (0 == ret)) { - int seconds_to_import = end - beginning; - size_t entries_processed = job->lead_ID - (job->starting_ID - 1); -@@ -3393,7 +3394,7 @@ import_mega_merge(ImportJob *job) - passes, (long unsigned int)job->number_indexers); - } - -- beginning = slapi_current_utc_time(); -+ beginning = slapi_current_rel_time_t(); - /* Iterate over the files */ - for (current_worker = job->worker_list; - (ret == 0) && (current_worker != NULL); -@@ -3405,9 +3406,9 @@ import_mega_merge(ImportJob *job) - time_t file_end = 0; - int key_count = 0; - -- file_beginning = slapi_current_utc_time(); -+ file_beginning = slapi_current_rel_time_t(); - ret = import_merge_one_file(current_worker, passes, &key_count); -- file_end = slapi_current_utc_time(); -+ file_end = slapi_current_rel_time_t(); - if (key_count == 0) { - import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge", "No files to merge for \"%s\".", - current_worker->index_info->name); -@@ -3426,7 +3427,7 @@ import_mega_merge(ImportJob *job) - } - } - -- end = slapi_current_utc_time(); -+ end = slapi_current_rel_time_t(); - if (0 == ret) { - int seconds_to_merge = end - beginning; - import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge", "Merging completed in %d seconds.", -diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c -index 5c7d9c8f7..905a84e74 100644 ---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c -+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c -@@ -1,5 +1,5 @@ - /** BEGIN COPYRIGHT BLOCK -- * Copyright (C) 2019 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -3151,8 +3151,9 @@ bulk_import_start(Slapi_PBlock *pb) - (1024 * 1024); - } - import_subcount_stuff_init(job->mothers); -- job->wire_lock = PR_NewLock(); -- job->wire_cv = PR_NewCondVar(job->wire_lock); -+ -+ pthread_mutex_init(&job->wire_lock, NULL); -+ pthread_cond_init(&job->wire_cv, NULL); - - /* COPIED from ldif2ldbm.c : */ - -@@ -3175,7 +3176,7 @@ bulk_import_start(Slapi_PBlock *pb) - - /* END OF COPIED SECTION */ - -- PR_Lock(job->wire_lock); -+ pthread_mutex_lock(&job->wire_lock); - vlv_init(job->inst); - - /* create thread for import_main, so we can return */ -@@ -3188,7 +3189,7 @@ bulk_import_start(Slapi_PBlock *pb) - slapi_log_err(SLAPI_LOG_ERR, "bulk_import_start", - "Unable to spawn import thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", - prerr, slapd_pr_strerror(prerr)); -- PR_Unlock(job->wire_lock); -+ pthread_mutex_unlock(&job->wire_lock); - ret = -2; - goto fail; - } -@@ -3204,8 +3205,8 @@ bulk_import_start(Slapi_PBlock *pb) - /* (don't want to send the success code back to the LDAP client until - * we're ready for the adds to start rolling in) - */ -- PR_WaitCondVar(job->wire_cv, PR_INTERVAL_NO_TIMEOUT); -- PR_Unlock(job->wire_lock); -+ pthread_cond_wait(&job->wire_cv, &job->wire_lock); -+ pthread_mutex_unlock(&job->wire_lock); - - return 0; - -@@ -3243,13 +3244,13 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) - return -1; - } - -- PR_Lock(job->wire_lock); -+ pthread_mutex_lock(&job->wire_lock); - /* Let's do this inside the lock !*/ - id = job->lead_ID + 1; - /* generate uniqueid if necessary */ - if (import_generate_uniqueid(job, entry) != UID_SUCCESS) { - import_abort_all(job, 1); -- PR_Unlock(job->wire_lock); -+ pthread_mutex_unlock(&job->wire_lock); - return -1; - } - -@@ -3258,7 +3259,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) - if ((ep == NULL) || (ep->ep_entry == NULL)) { - import_abort_all(job, 1); - backentry_free(&ep); /* release the backend wrapper, here */ -- PR_Unlock(job->wire_lock); -+ pthread_mutex_unlock(&job->wire_lock); - return -1; - } - -@@ -3304,7 +3305,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) - if (job->flags & FLAG_ABORT) { - backentry_clear_entry(ep); /* entry is released in the frontend on failure*/ - backentry_free(&ep); /* release the backend wrapper, here */ -- PR_Unlock(job->wire_lock); -+ pthread_mutex_unlock(&job->wire_lock); - return -2; - } - -@@ -3342,7 +3343,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) - /* entry is released in the frontend on failure*/ - backentry_clear_entry(ep); - backentry_free(&ep); /* release the backend wrapper */ -- PR_Unlock(job->wire_lock); -+ pthread_mutex_unlock(&job->wire_lock); - return -1; - } - sepp = PL_strchr(sepp + 1, ','); -@@ -3368,7 +3369,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) - (long unsigned int)newesize, (long unsigned int)job->fifo.bsize); - backentry_clear_entry(ep); /* entry is released in the frontend on failure*/ - backentry_free(&ep); /* release the backend wrapper, here */ -- PR_Unlock(job->wire_lock); -+ pthread_mutex_unlock(&job->wire_lock); - return -1; - } - /* Now check if fifo has enough space for the new entry */ -@@ -3394,7 +3395,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) - job->trailing_ID = id - job->fifo.size; - } - -- PR_Unlock(job->wire_lock); -+ pthread_mutex_unlock(&job->wire_lock); - return 0; - } - -diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c -index 0ac3694b6..5d6010f46 100644 ---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c -+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c -@@ -1,5 +1,5 @@ - /** BEGIN COPYRIGHT BLOCK -- * Copyright (C) 2019 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -270,10 +270,8 @@ bdb_instance_cleanup(struct ldbm_instance *inst) - slapi_ch_free_string(&inst_dirp); - } - slapi_destroy_rwlock(inst_env->bdb_env_lock); -- PR_DestroyCondVar(inst_env->bdb_thread_count_cv); -- inst_env->bdb_thread_count_cv = NULL; -- PR_DestroyLock(inst_env->bdb_thread_count_lock); -- inst_env->bdb_thread_count_lock = NULL; -+ pthread_mutex_destroy(&(inst_env->bdb_thread_count_lock)); -+ pthread_cond_destroy(&(inst_env->bdb_thread_count_cv)); - slapi_ch_free((void **)&inst->inst_db); - /* - slapi_destroy_rwlock(((bdb_db_env *)inst->inst_db)->bdb_env_lock); -diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c -index 464f89f4d..6cccad8e6 100644 ---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c -+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c -@@ -1,5 +1,5 @@ - /** BEGIN COPYRIGHT BLOCK -- * Copyright (C) 2019 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -52,16 +52,16 @@ - return. - */ - #define INCR_THREAD_COUNT(pEnv) \ -- PR_Lock(pEnv->bdb_thread_count_lock); \ -+ pthread_mutex_lock(&pEnv->bdb_thread_count_lock); \ - ++pEnv->bdb_thread_count; \ -- PR_Unlock(pEnv->bdb_thread_count_lock) -+ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock) - - #define DECR_THREAD_COUNT(pEnv) \ -- PR_Lock(pEnv->bdb_thread_count_lock); \ -+ pthread_mutex_lock(&pEnv->bdb_thread_count_lock); \ - if (--pEnv->bdb_thread_count == 0) { \ -- PR_NotifyCondVar(pEnv->bdb_thread_count_cv); \ -+ pthread_cond_broadcast(&pEnv->bdb_thread_count_cv); \ - } \ -- PR_Unlock(pEnv->bdb_thread_count_lock) -+ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock) - - #define NEWDIR_MODE 0755 - #define DB_REGION_PREFIX "__db." -@@ -91,9 +91,12 @@ static int trans_batch_txn_max_sleep = 50; - static PRBool log_flush_thread = PR_FALSE; - static int txn_in_progress_count = 0; - static int *txn_log_flush_pending = NULL; --static PRLock *sync_txn_log_flush = NULL; --static PRCondVar *sync_txn_log_flush_done = NULL; --static PRCondVar *sync_txn_log_do_flush = NULL; -+ -+static pthread_mutex_t sync_txn_log_flush; -+static pthread_cond_t sync_txn_log_flush_done; -+static pthread_cond_t sync_txn_log_do_flush; -+ -+ - static int bdb_db_remove_ex(bdb_db_env *env, char const path[], char const dbName[], PRBool use_lock); - static int bdb_restore_file_check(struct ldbminfo *li); - -@@ -181,12 +184,12 @@ bdb_set_batch_transactions(void *arg __attribute__((unused)), void *value, char - } else { - if (val == 0) { - if (log_flush_thread) { -- PR_Lock(sync_txn_log_flush); -+ pthread_mutex_lock(&sync_txn_log_flush); - } - trans_batch_limit = FLUSH_REMOTEOFF; - if (log_flush_thread) { - log_flush_thread = PR_FALSE; -- PR_Unlock(sync_txn_log_flush); -+ pthread_mutex_unlock(&sync_txn_log_flush); - } - } else if (val > 0) { - if (trans_batch_limit == FLUSH_REMOTEOFF) { -@@ -217,12 +220,12 @@ bdb_set_batch_txn_min_sleep(void *arg __attribute__((unused)), void *value, char - } else { - if (val == 0) { - if (log_flush_thread) { -- PR_Lock(sync_txn_log_flush); -+ pthread_mutex_lock(&sync_txn_log_flush); - } - trans_batch_txn_min_sleep = FLUSH_REMOTEOFF; - if (log_flush_thread) { - log_flush_thread = PR_FALSE; -- PR_Unlock(sync_txn_log_flush); -+ pthread_mutex_unlock(&sync_txn_log_flush); - } - } else if (val > 0) { - if (trans_batch_txn_min_sleep == FLUSH_REMOTEOFF || !log_flush_thread) { -@@ -249,12 +252,12 @@ bdb_set_batch_txn_max_sleep(void *arg __attribute__((unused)), void *value, char - } else { - if (val == 0) { - if (log_flush_thread) { -- PR_Lock(sync_txn_log_flush); -+ pthread_mutex_lock(&sync_txn_log_flush); - } - trans_batch_txn_max_sleep = FLUSH_REMOTEOFF; - if (log_flush_thread) { - log_flush_thread = PR_FALSE; -- PR_Unlock(sync_txn_log_flush); -+ pthread_mutex_unlock(&sync_txn_log_flush); - } - } else if (val > 0) { - if (trans_batch_txn_max_sleep == FLUSH_REMOTEOFF || !log_flush_thread) { -@@ -725,10 +728,9 @@ bdb_free_env(void **arg) - slapi_destroy_rwlock((*env)->bdb_env_lock); - (*env)->bdb_env_lock = NULL; - } -- PR_DestroyCondVar((*env)->bdb_thread_count_cv); -- (*env)->bdb_thread_count_cv = NULL; -- PR_DestroyLock((*env)->bdb_thread_count_lock); -- (*env)->bdb_thread_count_lock = NULL; -+ pthread_mutex_destroy(&((*env)->bdb_thread_count_lock)); -+ pthread_cond_destroy(&((*env)->bdb_thread_count_cv)); -+ - slapi_ch_free((void **)env); - return; - } -@@ -746,11 +748,15 @@ bdb_make_env(bdb_db_env **env, struct ldbminfo *li) - int ret; - Object *inst_obj; - ldbm_instance *inst = NULL; -+ pthread_condattr_t condAttr; - - pEnv = (bdb_db_env *)slapi_ch_calloc(1, sizeof(bdb_db_env)); - -- pEnv->bdb_thread_count_lock = PR_NewLock(); -- pEnv->bdb_thread_count_cv = PR_NewCondVar(pEnv->bdb_thread_count_lock); -+ pthread_mutex_init(&pEnv->bdb_thread_count_lock, NULL); -+ pthread_condattr_init(&condAttr); -+ pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC); -+ pthread_cond_init(&pEnv->bdb_thread_count_cv, &condAttr); -+ pthread_condattr_destroy(&condAttr); /* no longer needed */ - - if ((ret = db_env_create(&pEnv->bdb_DB_ENV, 0)) != 0) { - slapi_log_err(SLAPI_LOG_ERR, -@@ -2013,9 +2019,9 @@ bdb_pre_close(struct ldbminfo *li) - return; - - /* first, see if there are any housekeeping threads running */ -- PR_Lock(pEnv->bdb_thread_count_lock); -+ pthread_mutex_lock(&pEnv->bdb_thread_count_lock); - threadcount = pEnv->bdb_thread_count; -- PR_Unlock(pEnv->bdb_thread_count_lock); -+ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock); - - if (threadcount) { - PRIntervalTime cvwaittime = PR_MillisecondsToInterval(DBLAYER_SLEEP_INTERVAL * 100); -@@ -2023,7 +2029,7 @@ bdb_pre_close(struct ldbminfo *li) - /* Print handy-dandy log message */ - slapi_log_err(SLAPI_LOG_INFO, "bdb_pre_close", "Waiting for %d database threads to stop\n", - threadcount); -- PR_Lock(pEnv->bdb_thread_count_lock); -+ pthread_mutex_lock(&pEnv->bdb_thread_count_lock); - /* Tell them to stop - we wait until the last possible moment to invoke - this. If we do this much sooner than this, we could find ourselves - in a situation where the threads see the stop_threads and exit before -@@ -2034,6 +2040,7 @@ bdb_pre_close(struct ldbminfo *li) - conf->bdb_stop_threads = 1; - /* Wait for them to exit */ - while (pEnv->bdb_thread_count > 0) { -+ struct timespec current_time = {0}; - PRIntervalTime before = PR_IntervalNow(); - /* There are 3 ways to wake up from this WaitCondVar: - 1) The last database thread exits and calls NotifyCondVar - thread_count -@@ -2041,7 +2048,9 @@ bdb_pre_close(struct ldbminfo *li) - 2) Timeout - in this case, thread_count will be > 0 - bad - 3) A bad error occurs - bad - will be reported as a timeout - */ -- PR_WaitCondVar(pEnv->bdb_thread_count_cv, cvwaittime); -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += DBLAYER_SLEEP_INTERVAL / 10; /* cvwaittime but in seconds */ -+ pthread_cond_timedwait(&pEnv->bdb_thread_count_cv, &pEnv->bdb_thread_count_lock, ¤t_time); - if (pEnv->bdb_thread_count > 0) { - /* still at least 1 thread running - see if this is a timeout */ - if ((PR_IntervalNow() - before) >= cvwaittime) { -@@ -2052,7 +2061,7 @@ bdb_pre_close(struct ldbminfo *li) - /* else just a spurious interrupt */ - } - } -- PR_Unlock(pEnv->bdb_thread_count_lock); -+ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock); - if (timedout) { - slapi_log_err(SLAPI_LOG_ERR, - "bdb_pre_close", "Timeout after [%d] milliseconds; leave %d database thread(s)...\n", -@@ -2645,12 +2654,12 @@ bdb_txn_begin(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, PRBool - and new parent for any nested transactions created */ - if (use_lock && log_flush_thread) { - int txn_id = new_txn.back_txn_txn->id(new_txn.back_txn_txn); -- PR_Lock(sync_txn_log_flush); -+ pthread_mutex_lock(&sync_txn_log_flush); - txn_in_progress_count++; - slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_begin_ext", - "Batchcount: %d, txn_in_progress: %d, curr_txn: %x\n", - trans_batch_count, txn_in_progress_count, txn_id); -- PR_Unlock(sync_txn_log_flush); -+ pthread_mutex_unlock(&sync_txn_log_flush); - } - dblayer_push_pvt_txn(&new_txn); - if (txn) { -@@ -2717,11 +2726,11 @@ bdb_txn_commit(struct ldbminfo *li, back_txn *txn, PRBool use_lock) - if ((conf->bdb_durable_transactions) && use_lock) { - if (trans_batch_limit > 0 && log_flush_thread) { - /* let log_flush thread do the flushing */ -- PR_Lock(sync_txn_log_flush); -+ pthread_mutex_lock(&sync_txn_log_flush); - txn_batch_slot = trans_batch_count++; - txn_log_flush_pending[txn_batch_slot] = txn_id; -- slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_commit_ext", "(before notify): batchcount: %d, " -- "txn_in_progress: %d, curr_txn: %x\n", -+ slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_commit_ext", -+ "(before notify): batchcount: %d, txn_in_progress: %d, curr_txn: %x\n", - trans_batch_count, - txn_in_progress_count, txn_id); - /* -@@ -2731,8 +2740,9 @@ bdb_txn_commit(struct ldbminfo *li, back_txn *txn, PRBool use_lock) - * - there is no other outstanding txn - */ - if (trans_batch_count > trans_batch_limit || -- trans_batch_count == txn_in_progress_count) { -- PR_NotifyCondVar(sync_txn_log_do_flush); -+ trans_batch_count == txn_in_progress_count) -+ { -+ pthread_cond_signal(&sync_txn_log_do_flush); - } - /* - * We need to wait until the txn has been flushed before continuing -@@ -2740,14 +2750,14 @@ bdb_txn_commit(struct ldbminfo *li, back_txn *txn, PRBool use_lock) - * PR_WaitCondvar releases and reaquires the lock - */ - while (txn_log_flush_pending[txn_batch_slot] == txn_id) { -- PR_WaitCondVar(sync_txn_log_flush_done, PR_INTERVAL_NO_TIMEOUT); -+ pthread_cond_wait(&sync_txn_log_flush_done, &sync_txn_log_flush); - } - txn_in_progress_count--; -- slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_commit_ext", "(before unlock): batchcount: %d, " -- "txn_in_progress: %d, curr_txn %x\n", -+ slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_commit_ext", -+ "(before unlock): batchcount: %d, txn_in_progress: %d, curr_txn %x\n", - trans_batch_count, - txn_in_progress_count, txn_id); -- PR_Unlock(sync_txn_log_flush); -+ pthread_mutex_unlock(&sync_txn_log_flush); - } else if (trans_batch_limit == FLUSH_REMOTEOFF) { /* user remotely turned batching off */ - LOG_FLUSH(pEnv->bdb_DB_ENV, 0); - } -@@ -2799,9 +2809,9 @@ bdb_txn_abort(struct ldbminfo *li, back_txn *txn, PRBool use_lock) - int txn_id = db_txn->id(db_txn); - bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env; - if (use_lock && log_flush_thread) { -- PR_Lock(sync_txn_log_flush); -+ pthread_mutex_lock(&sync_txn_log_flush); - txn_in_progress_count--; -- PR_Unlock(sync_txn_log_flush); -+ pthread_mutex_unlock(&sync_txn_log_flush); - slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_abort_ext", - "Batchcount: %d, txn_in_progress: %d, curr_txn: %x\n", - trans_batch_count, txn_in_progress_count, txn_id); -@@ -3420,11 +3430,18 @@ bdb_start_log_flush_thread(struct ldbminfo *li) - int max_threads = config_get_threadnumber(); - - if ((BDB_CONFIG(li)->bdb_durable_transactions) && -- (BDB_CONFIG(li)->bdb_enable_transactions) && (trans_batch_limit > 0)) { -+ (BDB_CONFIG(li)->bdb_enable_transactions) && (trans_batch_limit > 0)) -+ { - /* initialize the synchronization objects for the log_flush and worker threads */ -- sync_txn_log_flush = PR_NewLock(); -- sync_txn_log_flush_done = PR_NewCondVar(sync_txn_log_flush); -- sync_txn_log_do_flush = PR_NewCondVar(sync_txn_log_flush); -+ pthread_condattr_t condAttr; -+ -+ pthread_mutex_init(&sync_txn_log_flush, NULL); -+ pthread_condattr_init(&condAttr); -+ pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC); -+ pthread_cond_init(&sync_txn_log_do_flush, &condAttr); -+ pthread_cond_init(&sync_txn_log_flush_done, NULL); -+ pthread_condattr_destroy(&condAttr); /* no longer needed */ -+ - txn_log_flush_pending = (int *)slapi_ch_malloc(max_threads * sizeof(int)); - log_flush_thread = PR_TRUE; - if (NULL == PR_CreateThread(PR_USER_THREAD, -@@ -3451,7 +3468,7 @@ bdb_start_log_flush_thread(struct ldbminfo *li) - static int - log_flush_threadmain(void *param) - { -- PRIntervalTime interval_wait, interval_flush, interval_def; -+ PRIntervalTime interval_flush, interval_def; - PRIntervalTime last_flush = 0; - int i; - int do_flush = 0; -@@ -3464,7 +3481,6 @@ log_flush_threadmain(void *param) - INCR_THREAD_COUNT(pEnv); - - interval_flush = PR_MillisecondsToInterval(trans_batch_txn_min_sleep); -- interval_wait = PR_MillisecondsToInterval(trans_batch_txn_max_sleep); - interval_def = PR_MillisecondsToInterval(300); /*used while no txn or txn batching */ - /* LK this is only needed if online change of - * of txn config is supported ??? -@@ -3473,10 +3489,10 @@ log_flush_threadmain(void *param) - if (BDB_CONFIG(li)->bdb_enable_transactions) { - if (trans_batch_limit > 0) { - /* synchronize flushing thread with workers */ -- PR_Lock(sync_txn_log_flush); -+ pthread_mutex_lock(&sync_txn_log_flush); - if (!log_flush_thread) { - /* batch transactions was disabled while waiting for the lock */ -- PR_Unlock(sync_txn_log_flush); -+ pthread_mutex_unlock(&sync_txn_log_flush); - break; - } - slapi_log_err(SLAPI_LOG_BACKLDBM, "log_flush_threadmain", "(in loop): batchcount: %d, " -@@ -3502,20 +3518,31 @@ log_flush_threadmain(void *param) - slapi_log_err(SLAPI_LOG_BACKLDBM, "log_flush_threadmain", "(before notify): batchcount: %d, " - "txn_in_progress: %d\n", - trans_batch_count, txn_in_progress_count); -- PR_NotifyAllCondVar(sync_txn_log_flush_done); -+ pthread_cond_broadcast(&sync_txn_log_flush_done); - } - /* wait until flushing conditions are met */ - while ((trans_batch_count == 0) || -- (trans_batch_count < trans_batch_limit && trans_batch_count < txn_in_progress_count)) { -+ (trans_batch_count < trans_batch_limit && trans_batch_count < txn_in_progress_count)) -+ { -+ struct timespec current_time = {0}; -+ /* convert milliseconds to nano seconds */ -+ int32_t nano_sec_sleep = trans_batch_txn_max_sleep * 1000000; - if (BDB_CONFIG(li)->bdb_stop_threads) - break; - if (PR_IntervalNow() - last_flush > interval_flush) { - do_flush = 1; - break; - } -- PR_WaitCondVar(sync_txn_log_do_flush, interval_wait); -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ if (current_time.tv_nsec + nano_sec_sleep > 1000000000) { -+ /* nano sec will overflow, just bump the seconds */ -+ current_time.tv_sec++; -+ } else { -+ current_time.tv_nsec += nano_sec_sleep; -+ } -+ pthread_cond_timedwait(&sync_txn_log_do_flush, &sync_txn_log_flush, ¤t_time); - } -- PR_Unlock(sync_txn_log_flush); -+ pthread_mutex_unlock(&sync_txn_log_flush); - slapi_log_err(SLAPI_LOG_BACKLDBM, "log_flush_threadmain", "(wakeup): batchcount: %d, " - "txn_in_progress: %d\n", - trans_batch_count, txn_in_progress_count); -diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h -index bf00d2e9a..6bb04d21a 100644 ---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h -+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h -@@ -1,5 +1,5 @@ - /** BEGIN COPYRIGHT BLOCK -- * Copyright (C) 2019 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -18,10 +18,10 @@ typedef struct bdb_db_env - Slapi_RWLock *bdb_env_lock; - int bdb_openflags; - int bdb_priv_flags; -- PRLock *bdb_thread_count_lock; /* lock for thread_count_cv */ -- PRCondVar *bdb_thread_count_cv; /* condition variable for housekeeping thread shutdown */ -- PRInt32 bdb_thread_count; /* Tells us how many threads are running, -- * used to figure out when they're all stopped */ -+ pthread_mutex_t bdb_thread_count_lock; /* lock for thread_count_cv */ -+ pthread_cond_t bdb_thread_count_cv; /* condition variable for housekeeping thread shutdown */ -+ PRInt32 bdb_thread_count; /* Tells us how many threads are running, -+ * used to figure out when they're all stopped */ - } bdb_db_env; - - /* structure which holds our stuff */ -diff --git a/ldap/servers/slapd/back-ldbm/import.h b/ldap/servers/slapd/back-ldbm/import.h -index db77a602b..bfa74ed49 100644 ---- a/ldap/servers/slapd/back-ldbm/import.h -+++ b/ldap/servers/slapd/back-ldbm/import.h -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -130,8 +130,8 @@ typedef struct - char **exclude_subtrees; /* list of subtrees to NOT import */ - Fifo fifo; /* entry fifo for indexing */ - char *task_status; /* transient state info for the end-user */ -- PRLock *wire_lock; /* lock for serializing wire imports */ -- PRCondVar *wire_cv; /* ... and ordering the startup */ -+ pthread_mutex_t wire_lock; /* lock for serializing wire imports */ -+ pthread_cond_t wire_cv; /* ... and ordering the startup */ - PRThread *main_thread; /* for FRI: import_main() thread id */ - int encrypt; - Slapi_Value *usn_value; /* entryusn for import */ -diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c -index 88b7dc3be..1883fe711 100644 ---- a/ldap/servers/slapd/connection.c -+++ b/ldap/servers/slapd/connection.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -64,8 +64,10 @@ struct Slapi_work_q - - static struct Slapi_work_q *head_work_q = NULL; /* global work queue head */ - static struct Slapi_work_q *tail_work_q = NULL; /* global work queue tail */ --static PRLock *work_q_lock = NULL; /* protects head_conn_q and tail_conn_q */ --static PRCondVar *work_q_cv; /* used by operation threads to wait for work - when there is a conn in the queue waiting to be processed */ -+static pthread_mutex_t work_q_lock; /* protects head_conn_q and tail_conn_q */ -+static pthread_cond_t work_q_cv; /* used by operation threads to wait for work - -+ * when there is a conn in the queue waiting -+ * to be processed */ - static PRInt32 work_q_size; /* size of conn_q */ - static PRInt32 work_q_size_max; /* high water mark of work_q_size */ - #define WORK_Q_EMPTY (work_q_size == 0) -@@ -409,7 +411,7 @@ connection_reset(Connection *conn, int ns, PRNetAddr *from, int fromLen __attrib - - /* initialize the remaining connection fields */ - conn->c_ldapversion = LDAP_VERSION3; -- conn->c_starttime = slapi_current_utc_time(); -+ conn->c_starttime = slapi_current_rel_time_t(); - conn->c_idlesince = conn->c_starttime; - conn->c_flags = is_SSL ? CONN_FLAG_SSL : 0; - conn->c_authtype = slapi_ch_strdup(SLAPD_AUTH_NONE); -@@ -424,32 +426,40 @@ connection_reset(Connection *conn, int ns, PRNetAddr *from, int fromLen __attrib - void - init_op_threads() - { -- int i; -- PRErrorCode errorCode; -- int max_threads = config_get_threadnumber(); -- /* Initialize the locks and cv */ -+ pthread_condattr_t condAttr; -+ int32_t max_threads = config_get_threadnumber(); -+ int32_t rc; - -- if ((work_q_lock = PR_NewLock()) == NULL) { -- errorCode = PR_GetError(); -- slapi_log_err(SLAPI_LOG_ERR, -- "init_op_threads", "PR_NewLock failed for work_q_lock, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", -- errorCode, slapd_pr_strerror(errorCode)); -+ /* Initialize the locks and cv */ -+ if ((rc = pthread_mutex_init(&work_q_lock, NULL)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", -+ "Cannot create new lock. error %d (%s)\n", -+ rc, strerror(rc)); - exit(-1); - } -- -- if ((work_q_cv = PR_NewCondVar(work_q_lock)) == NULL) { -- errorCode = PR_GetError(); -- slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", "PR_NewCondVar failed for work_q_cv, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", -- errorCode, slapd_pr_strerror(errorCode)); -+ if ((rc = pthread_condattr_init(&condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", -+ "Cannot create new condition attribute variable. error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(-1); -+ } else if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", -+ "Cannot set condition attr clock. error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(-1); -+ } else if ((rc = pthread_cond_init(&work_q_cv, &condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", -+ "Cannot create new condition variable. error %d (%s)\n", -+ rc, strerror(rc)); - exit(-1); - } -+ pthread_condattr_destroy(&condAttr); /* no longer needed */ - - work_q_stack = PR_CreateStack("connection_work_q"); -- - op_stack = PR_CreateStack("connection_operation"); - - /* start the operation threads */ -- for (i = 0; i < max_threads; i++) { -+ for (size_t i = 0; i < max_threads; i++) { - PR_SetConcurrency(4); - if (PR_CreateThread(PR_USER_THREAD, - (VFP)(void *)connection_threadmain, NULL, -@@ -457,7 +467,8 @@ init_op_threads() - PR_UNJOINABLE_THREAD, - SLAPD_DEFAULT_THREAD_STACKSIZE) == NULL) { - int prerr = PR_GetError(); -- slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", "PR_CreateThread failed, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", -+ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", -+ "PR_CreateThread failed, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", - prerr, slapd_pr_strerror(prerr)); - } else { - g_incr_active_threadcnt(); -@@ -949,16 +960,23 @@ connection_make_new_pb(Slapi_PBlock *pb, Connection *conn) - } - - int --connection_wait_for_new_work(Slapi_PBlock *pb, PRIntervalTime interval) -+connection_wait_for_new_work(Slapi_PBlock *pb, int32_t interval) - { - int ret = CONN_FOUND_WORK_TO_DO; - work_q_item *wqitem = NULL; - struct Slapi_op_stack *op_stack_obj = NULL; - -- PR_Lock(work_q_lock); -+ pthread_mutex_lock(&work_q_lock); - - while (!op_shutdown && WORK_Q_EMPTY) { -- PR_WaitCondVar(work_q_cv, interval); -+ if (interval == 0 ) { -+ pthread_cond_wait(&work_q_cv, &work_q_lock); -+ } else { -+ struct timespec current_time = {0}; -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += interval; -+ pthread_cond_timedwait(&work_q_cv, &work_q_lock, ¤t_time); -+ } - } - - if (op_shutdown) { -@@ -975,7 +993,7 @@ connection_wait_for_new_work(Slapi_PBlock *pb, PRIntervalTime interval) - slapi_pblock_set(pb, SLAPI_OPERATION, op_stack_obj->op); - } - -- PR_Unlock(work_q_lock); -+ pthread_mutex_unlock(&work_q_lock); - return ret; - } - -@@ -1353,7 +1371,7 @@ connection_check_activity_level(Connection *conn) - /* store current count in the previous count slot */ - conn->c_private->previous_op_count = current_count; - /* update the last checked time */ -- conn->c_private->previous_count_check_time = slapi_current_utc_time(); -+ conn->c_private->previous_count_check_time = slapi_current_rel_time_t(); - pthread_mutex_unlock(&(conn->c_mutex)); - slapi_log_err(SLAPI_LOG_CONNS, "connection_check_activity_level", "conn %" PRIu64 " activity level = %d\n", conn->c_connid, delta_count); - } -@@ -1463,7 +1481,7 @@ connection_threadmain() - { - Slapi_PBlock *pb = slapi_pblock_new(); - /* wait forever for new pb until one is available or shutdown */ -- PRIntervalTime interval = PR_INTERVAL_NO_TIMEOUT; /* PR_SecondsToInterval(10); */ -+ int32_t interval = 0; /* used be 10 seconds */ - Connection *conn = NULL; - Operation *op; - ber_tag_t tag = 0; -@@ -1503,7 +1521,7 @@ connection_threadmain() - - switch (ret) { - case CONN_NOWORK: -- PR_ASSERT(interval != PR_INTERVAL_NO_TIMEOUT); /* this should never happen with PR_INTERVAL_NO_TIMEOUT */ -+ PR_ASSERT(interval != 0); /* this should never happen */ - continue; - case CONN_SHUTDOWN: - slapi_log_err(SLAPI_LOG_TRACE, "connection_threadmain", -@@ -1610,7 +1628,7 @@ connection_threadmain() - conn->c_opsinitiated, conn->c_refcnt, conn->c_flags); - } - -- curtime = slapi_current_utc_time(); -+ curtime = slapi_current_rel_time_t(); - #define DB_PERF_TURBO 1 - #if defined(DB_PERF_TURBO) - /* If it's been a while since we last did it ... */ -@@ -1914,7 +1932,7 @@ add_work_q(work_q_item *wqitem, struct Slapi_op_stack *op_stack_obj) - new_work_q->op_stack_obj = op_stack_obj; - new_work_q->next_work_item = NULL; - -- PR_Lock(work_q_lock); -+ pthread_mutex_lock(&work_q_lock); - if (tail_work_q == NULL) { - tail_work_q = new_work_q; - head_work_q = new_work_q; -@@ -1926,8 +1944,8 @@ add_work_q(work_q_item *wqitem, struct Slapi_op_stack *op_stack_obj) - if (work_q_size > work_q_size_max) { - work_q_size_max = work_q_size; - } -- PR_NotifyCondVar(work_q_cv); /* notify waiters in connection_wait_for_new_work */ -- PR_Unlock(work_q_lock); -+ pthread_cond_signal(&work_q_cv); /* notify waiters in connection_wait_for_new_work */ -+ pthread_mutex_unlock(&work_q_lock); - } - - /* get_work_q(): will get a work_q_item from the beginning of the work queue, return NULL if -@@ -1975,9 +1993,9 @@ op_thread_cleanup() - op_stack_size, work_q_size_max, work_q_stack_size_max); - - PR_AtomicIncrement(&op_shutdown); -- PR_Lock(work_q_lock); -- PR_NotifyAllCondVar(work_q_cv); /* tell any thread waiting in connection_wait_for_new_work to shutdown */ -- PR_Unlock(work_q_lock); -+ pthread_mutex_lock(&work_q_lock); -+ pthread_cond_broadcast(&work_q_cv); /* tell any thread waiting in connection_wait_for_new_work to shutdown */ -+ pthread_mutex_unlock(&work_q_lock); - } - - /* do this after all worker threads have terminated */ -diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c -index bfd965263..0071ed86a 100644 ---- a/ldap/servers/slapd/daemon.c -+++ b/ldap/servers/slapd/daemon.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -81,8 +81,9 @@ static int readsignalpipe = SLAPD_INVALID_SOCKET; - #define FDS_SIGNAL_PIPE 0 - - static PRThread *disk_thread_p = NULL; --static PRCondVar *diskmon_cvar = NULL; --static PRLock *diskmon_mutex = NULL; -+static pthread_cond_t diskmon_cvar; -+static pthread_mutex_t diskmon_mutex; -+ - void disk_monitoring_stop(void); - - typedef struct listener_info -@@ -441,9 +442,13 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) - - while (!g_get_shutdown()) { - if (!first_pass) { -- PR_Lock(diskmon_mutex); -- PR_WaitCondVar(diskmon_cvar, PR_SecondsToInterval(10)); -- PR_Unlock(diskmon_mutex); -+ struct timespec current_time = {0}; -+ -+ pthread_mutex_lock(&diskmon_mutex); -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += 10; -+ pthread_cond_timedwait(&diskmon_cvar, &diskmon_mutex, ¤t_time); -+ pthread_mutex_unlock(&diskmon_mutex); - /* - * We need to subtract from disk_space to account for the - * logging we just did, it doesn't hurt if we subtract a -@@ -622,7 +627,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) - "Disk space on (%s) is too far below the threshold(%" PRIu64 " bytes). " - "Waiting %d minutes for disk space to be cleaned up before shutting slapd down...\n", - dirstr, threshold, (grace_period / 60)); -- start = slapi_current_utc_time(); -+ start = slapi_current_rel_time_t(); - now = start; - while ((now - start) < grace_period) { - if (g_get_shutdown()) { -@@ -685,7 +690,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) - immediate_shutdown = 1; - goto cleanup; - } -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - } - - if (ok_now) { -@@ -1005,21 +1010,34 @@ slapd_daemon(daemon_ports_t *ports) - * and the monitoring thread. - */ - if (config_get_disk_monitoring()) { -- if ((diskmon_mutex = PR_NewLock()) == NULL) { -+ pthread_condattr_t condAttr; -+ int rc = 0; -+ -+ if ((rc = pthread_mutex_init(&diskmon_mutex, NULL)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "slapd_daemon", "cannot create new lock. error %d (%s)\n", -+ rc, strerror(rc)); -+ g_set_shutdown(SLAPI_SHUTDOWN_EXIT); -+ } -+ if ((rc = pthread_condattr_init(&condAttr)) != 0) { - slapi_log_err(SLAPI_LOG_ERR, "slapd_daemon", -- "Cannot create new lock for disk space monitoring. " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", -- PR_GetError(), slapd_pr_strerror(PR_GetError())); -+ "cannot create new condition attribute variable. error %d (%s)\n", -+ rc, strerror(rc)); - g_set_shutdown(SLAPI_SHUTDOWN_EXIT); - } -- if (diskmon_mutex) { -- if ((diskmon_cvar = PR_NewCondVar(diskmon_mutex)) == NULL) { -- slapi_log_err(SLAPI_LOG_EMERG, "slapd_daemon", -- "Cannot create new condition variable for disk space monitoring. " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", -- PR_GetError(), slapd_pr_strerror(PR_GetError())); -- g_set_shutdown(SLAPI_SHUTDOWN_EXIT); -- } -+ if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "slapd_daemon", -+ "cannot set condition attr clock. error %d (%s)\n", -+ rc, strerror(rc)); -+ g_set_shutdown(SLAPI_SHUTDOWN_EXIT); -+ } -+ if ((rc = pthread_cond_init(&diskmon_cvar, &condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "slapd_daemon", -+ "cannot create new condition variable. error %d (%s)\n", -+ rc, strerror(rc)); -+ g_set_shutdown(SLAPI_SHUTDOWN_EXIT); - } -- if (diskmon_mutex && diskmon_cvar) { -+ pthread_condattr_destroy(&condAttr); -+ if (rc == 0) { - disk_thread_p = PR_CreateThread(PR_SYSTEM_THREAD, - (VFP)(void *)disk_monitoring_thread, NULL, - PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, -@@ -1508,7 +1526,7 @@ static void - handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll __attribute__((unused))) - { - Connection *c; -- time_t curtime = slapi_current_utc_time(); -+ time_t curtime = slapi_current_rel_time_t(); - - #if LDAP_ERROR_LOGGING - if (slapd_ldap_debug & LDAP_DEBUG_CONNS) { -@@ -2884,8 +2902,8 @@ void - disk_monitoring_stop(void) - { - if (disk_thread_p) { -- PR_Lock(diskmon_mutex); -- PR_NotifyCondVar(diskmon_cvar); -- PR_Unlock(diskmon_mutex); -+ pthread_mutex_lock(&diskmon_mutex); -+ pthread_cond_signal(&diskmon_cvar); -+ pthread_mutex_unlock(&diskmon_mutex); - } - } -diff --git a/ldap/servers/slapd/eventq.c b/ldap/servers/slapd/eventq.c -index a491acd0a..e1900724f 100644 ---- a/ldap/servers/slapd/eventq.c -+++ b/ldap/servers/slapd/eventq.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -52,8 +52,8 @@ typedef struct _slapi_eq_context - */ - typedef struct _event_queue - { -- PRLock *eq_lock; -- PRCondVar *eq_cv; -+ pthread_mutex_t eq_lock; -+ pthread_cond_t eq_cv; - slapi_eq_context *eq_queue; - } event_queue; - -@@ -74,8 +74,8 @@ static PRThread *eq_loop_tid = NULL; - static int eq_running = 0; - static int eq_stopped = 0; - static int eq_initialized = 0; --PRLock *ss_lock = NULL; --PRCondVar *ss_cv = NULL; -+static pthread_mutex_t ss_lock; -+static pthread_cond_t ss_cv; - PRCallOnceType init_once = {0}; - - /* Forward declarations */ -@@ -170,7 +170,7 @@ slapi_eq_cancel(Slapi_Eq_Context ctx) - - PR_ASSERT(eq_initialized); - if (!eq_stopped) { -- PR_Lock(eq->eq_lock); -+ pthread_mutex_lock(&(eq->eq_lock)); - p = &(eq->eq_queue); - while (!found && *p != NULL) { - if ((*p)->ec_id == ctx) { -@@ -182,7 +182,7 @@ slapi_eq_cancel(Slapi_Eq_Context ctx) - p = &((*p)->ec_next); - } - } -- PR_Unlock(eq->eq_lock); -+ pthread_mutex_unlock(&(eq->eq_lock)); - } - slapi_log_err(SLAPI_LOG_HOUSE, NULL, - "cancellation of event id %p requested: %s\n", -@@ -223,7 +223,7 @@ eq_enqueue(slapi_eq_context *newec) - slapi_eq_context **p; - - PR_ASSERT(NULL != newec); -- PR_Lock(eq->eq_lock); -+ pthread_mutex_lock(&(eq->eq_lock)); - /* Insert in order (sorted by start time) in the list */ - for (p = &(eq->eq_queue); *p != NULL; p = &((*p)->ec_next)) { - if ((*p)->ec_when > newec->ec_when) { -@@ -236,8 +236,8 @@ eq_enqueue(slapi_eq_context *newec) - newec->ec_next = NULL; - } - *p = newec; -- PR_NotifyCondVar(eq->eq_cv); /* wake up scheduler thread */ -- PR_Unlock(eq->eq_lock); -+ pthread_cond_signal(&(eq->eq_cv)); /* wake up scheduler thread */ -+ pthread_mutex_unlock(&(eq->eq_lock)); - } - - -@@ -251,12 +251,12 @@ eq_dequeue(time_t now) - { - slapi_eq_context *retptr = NULL; - -- PR_Lock(eq->eq_lock); -+ pthread_mutex_lock(&(eq->eq_lock)); - if (NULL != eq->eq_queue && eq->eq_queue->ec_when <= now) { - retptr = eq->eq_queue; - eq->eq_queue = retptr->ec_next; - } -- PR_Unlock(eq->eq_lock); -+ pthread_mutex_unlock(&(eq->eq_lock)); - return retptr; - } - -@@ -271,7 +271,7 @@ static void - eq_call_all(void) - { - slapi_eq_context *p; -- time_t curtime = slapi_current_utc_time(); -+ time_t curtime = slapi_current_rel_time_t(); - - while ((p = eq_dequeue(curtime)) != NULL) { - /* Call the scheduled function */ -@@ -299,34 +299,35 @@ static void - eq_loop(void *arg __attribute__((unused))) - { - while (eq_running) { -- time_t curtime = slapi_current_utc_time(); -- PRIntervalTime timeout; -+ time_t curtime = slapi_current_rel_time_t(); - int until; -- PR_Lock(eq->eq_lock); -+ -+ pthread_mutex_lock(&(eq->eq_lock)); - while (!((NULL != eq->eq_queue) && (eq->eq_queue->ec_when <= curtime))) { - if (!eq_running) { -- PR_Unlock(eq->eq_lock); -+ pthread_mutex_unlock(&(eq->eq_lock)); - goto bye; - } - /* Compute new timeout */ - if (NULL != eq->eq_queue) { -+ struct timespec current_time = slapi_current_rel_time_hr(); - until = eq->eq_queue->ec_when - curtime; -- timeout = PR_SecondsToInterval(until); -+ current_time.tv_sec += until; -+ pthread_cond_timedwait(&eq->eq_cv, &eq->eq_lock, ¤t_time); - } else { -- timeout = PR_INTERVAL_NO_TIMEOUT; -+ pthread_cond_wait(&eq->eq_cv, &eq->eq_lock); - } -- PR_WaitCondVar(eq->eq_cv, timeout); -- curtime = slapi_current_utc_time(); -+ curtime = slapi_current_rel_time_t(); - } - /* There is some work to do */ -- PR_Unlock(eq->eq_lock); -+ pthread_mutex_unlock(&(eq->eq_lock)); - eq_call_all(); - } - bye: - eq_stopped = 1; -- PR_Lock(ss_lock); -- PR_NotifyAllCondVar(ss_cv); -- PR_Unlock(ss_lock); -+ pthread_mutex_lock(&ss_lock); -+ pthread_cond_broadcast(&ss_cv); -+ pthread_mutex_unlock(&ss_lock); - } - - -@@ -336,23 +337,50 @@ bye: - static PRStatus - eq_create(void) - { -- PR_ASSERT(NULL == eq->eq_lock); -- if ((eq->eq_lock = PR_NewLock()) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewLock failed\n"); -+ pthread_condattr_t condAttr; -+ int rc = 0; -+ -+ /* Init the eventq mutex and cond var */ -+ if (pthread_mutex_init(&eq->eq_lock, NULL) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ "Failed to create lock: error %d (%s)\n", -+ rc, strerror(rc)); - exit(1); - } -- if ((eq->eq_cv = PR_NewCondVar(eq->eq_lock)) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewCondVar failed\n"); -+ if ((rc = pthread_condattr_init(&condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ "Failed to create new condition attribute variable. error %d (%s)\n", -+ rc, strerror(rc)); - exit(1); - } -- if ((ss_lock = PR_NewLock()) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewLock failed\n"); -+ if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ "Cannot set condition attr clock. error %d (%s)\n", -+ rc, strerror(rc)); - exit(1); - } -- if ((ss_cv = PR_NewCondVar(ss_lock)) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewCondVar failed\n"); -+ if ((rc = pthread_cond_init(&eq->eq_cv, &condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ "Failed to create new condition variable. error %d (%s)\n", -+ rc, strerror(rc)); - exit(1); - } -+ -+ /* Init the "ss" mutex and condition var */ -+ if (pthread_mutex_init(&ss_lock, NULL) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ "Failed to create ss lock: error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(1); -+ } -+ if ((rc = pthread_cond_init(&ss_cv, &condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ "Failed to create new ss condition variable. error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(1); -+ } -+ pthread_condattr_destroy(&condAttr); /* no longer needed */ -+ - eq->eq_queue = NULL; - eq_initialized = 1; - return PR_SUCCESS; -@@ -411,7 +439,7 @@ eq_stop() - { - slapi_eq_context *p, *q; - -- if (NULL == eq || NULL == eq->eq_lock) { /* never started */ -+ if (NULL == eq) { /* never started */ - eq_stopped = 1; - return; - } -@@ -423,12 +451,24 @@ eq_stop() - * it acknowledges by setting eq_stopped. - */ - while (!eq_stopped) { -- PR_Lock(eq->eq_lock); -- PR_NotifyAllCondVar(eq->eq_cv); -- PR_Unlock(eq->eq_lock); -- PR_Lock(ss_lock); -- PR_WaitCondVar(ss_cv, PR_MillisecondsToInterval(100)); -- PR_Unlock(ss_lock); -+ struct timespec current_time = {0}; -+ -+ pthread_mutex_lock(&(eq->eq_lock)); -+ pthread_cond_broadcast(&(eq->eq_cv)); -+ pthread_mutex_unlock(&(eq->eq_lock)); -+ -+ pthread_mutex_lock(&ss_lock); -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ if (current_time.tv_nsec + 100000000 > 1000000000) { -+ /* nanoseconds will overflow, adjust the seconds and nanoseconds */ -+ current_time.tv_sec++; -+ /* Add the remainder to nanoseconds */ -+ current_time.tv_nsec = (current_time.tv_nsec + 100000000) - 1000000000; -+ } else { -+ current_time.tv_nsec += 100000000; /* 100 ms */ -+ } -+ pthread_cond_timedwait(&ss_cv, &ss_lock, ¤t_time); -+ pthread_mutex_unlock(&ss_lock); - } - (void)PR_JoinThread(eq_loop_tid); - /* -@@ -438,7 +478,7 @@ eq_stop() - * The downside is that the event queue can't be stopped and restarted - * easily. - */ -- PR_Lock(eq->eq_lock); -+ pthread_mutex_lock(&(eq->eq_lock)); - p = eq->eq_queue; - while (p != NULL) { - q = p->ec_next; -@@ -449,7 +489,7 @@ eq_stop() - */ - p = q; - } -- PR_Unlock(eq->eq_lock); -+ pthread_mutex_unlock(&(eq->eq_lock)); - slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have shut down\n"); - } - -@@ -463,17 +503,17 @@ slapi_eq_get_arg(Slapi_Eq_Context ctx) - - PR_ASSERT(eq_initialized); - if (eq && !eq_stopped) { -- PR_Lock(eq->eq_lock); -+ pthread_mutex_lock(&(eq->eq_lock)); - p = &(eq->eq_queue); - while (p && *p != NULL) { - if ((*p)->ec_id == ctx) { -- PR_Unlock(eq->eq_lock); -+ pthread_mutex_unlock(&(eq->eq_lock)); - return (*p)->ec_arg; - } else { - p = &((*p)->ec_next); - } - } -- PR_Unlock(eq->eq_lock); -+ pthread_mutex_unlock(&(eq->eq_lock)); - } - return NULL; - } -diff --git a/ldap/servers/slapd/house.c b/ldap/servers/slapd/house.c -index ff139a4a5..ac1d94f26 100644 ---- a/ldap/servers/slapd/house.c -+++ b/ldap/servers/slapd/house.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -23,17 +23,15 @@ - #define SLAPD_HOUSEKEEPING_INTERVAL 30 /* seconds */ - - static PRThread *housekeeping_tid = NULL; --static PRLock *housekeeping_mutex = NULL; --static PRCondVar *housekeeping_cvar = NULL; -+static pthread_mutex_t housekeeping_mutex; -+static pthread_cond_t housekeeping_cvar; - - - static void - housecleaning(void *cur_time __attribute__((unused))) - { -- int interval; -- -- interval = PR_SecondsToInterval(SLAPD_HOUSEKEEPING_INTERVAL); - while (!g_get_shutdown()) { -+ struct timespec current_time = {0}; - /* - * Looks simple, but could potentially take a long time. - */ -@@ -42,9 +40,15 @@ housecleaning(void *cur_time __attribute__((unused))) - if (g_get_shutdown()) { - break; - } -- PR_Lock(housekeeping_mutex); -- PR_WaitCondVar(housekeeping_cvar, interval); -- PR_Unlock(housekeeping_mutex); -+ -+ /* get the current monotonic time and add our interval */ -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += SLAPD_HOUSEKEEPING_INTERVAL; -+ -+ /* Now we wait... */ -+ pthread_mutex_lock(&housekeeping_mutex); -+ pthread_cond_timedwait(&housekeeping_cvar, &housekeeping_mutex, ¤t_time); -+ pthread_mutex_unlock(&housekeeping_mutex); - } - } - -@@ -52,20 +56,31 @@ PRThread * - housekeeping_start(time_t cur_time, void *arg __attribute__((unused))) - { - static time_t thread_start_time; -+ pthread_condattr_t condAttr; -+ int rc = 0; - - if (housekeeping_tid) { - return housekeeping_tid; - } - -- if ((housekeeping_mutex = PR_NewLock()) == NULL) { -+ if ((rc = pthread_mutex_init(&housekeeping_mutex, NULL)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", -+ "housekeeping cannot create new lock. error %d (%s)\n", -+ rc, strerror(rc)); -+ } else if ((rc = pthread_condattr_init(&condAttr)) != 0) { - slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", -- "housekeeping cannot create new lock. " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", -- PR_GetError(), slapd_pr_strerror(PR_GetError())); -- } else if ((housekeeping_cvar = PR_NewCondVar(housekeeping_mutex)) == NULL) { -+ "housekeeping cannot create new condition attribute variable. error %d (%s)\n", -+ rc, strerror(rc)); -+ } else if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { - slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", -- "housekeeping cannot create new condition variable. " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", -- PR_GetError(), slapd_pr_strerror(PR_GetError())); -+ "housekeeping cannot set condition attr clock. error %d (%s)\n", -+ rc, strerror(rc)); -+ } else if ((rc = pthread_cond_init(&housekeeping_cvar, &condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", -+ "housekeeping cannot create new condition variable. error %d (%s)\n", -+ rc, strerror(rc)); - } else { -+ pthread_condattr_destroy(&condAttr); /* no longer needed */ - thread_start_time = cur_time; - if ((housekeeping_tid = PR_CreateThread(PR_USER_THREAD, - (VFP)housecleaning, (void *)&thread_start_time, -@@ -84,9 +99,16 @@ void - housekeeping_stop() - { - if (housekeeping_tid) { -- PR_Lock(housekeeping_mutex); -- PR_NotifyCondVar(housekeeping_cvar); -- PR_Unlock(housekeeping_mutex); -+ /* Notify the thread */ -+ pthread_mutex_lock(&housekeeping_mutex); -+ pthread_cond_signal(&housekeeping_cvar); -+ pthread_mutex_unlock(&housekeeping_mutex); -+ -+ /* Wait for the thread to finish */ - (void)PR_JoinThread(housekeeping_tid); -+ -+ /* Clean it all up */ -+ pthread_mutex_destroy(&housekeeping_mutex); -+ pthread_cond_destroy(&housekeeping_cvar); - } - } -diff --git a/ldap/servers/slapd/libmakefile b/ldap/servers/slapd/libmakefile -index b3ecabc29..3559c0104 100644 ---- a/ldap/servers/slapd/libmakefile -+++ b/ldap/servers/slapd/libmakefile -@@ -46,7 +46,7 @@ LIBSLAPD_OBJS=plugin_role.o getfilelist.o libglobs.o log.o ch_malloc.o entry.o p - filter.o filtercmp.o filterentry.o operation.o schemaparse.o pw.o \ - backend.o defbackend.o ava.o charray.o regex.o \ - str2filter.o dynalib.o plugin.o plugin_syntax.o plugin_mr.o \ -- slapi2nspr.o rwlock.o control.o plugin_internal_op.o \ -+ slapi2runtime.o rwlock.o control.o plugin_internal_op.o \ - result.o pw_retry.o agtmmap.o referral.o snmp_collator.o util.o \ - dse.o errormap.o computed.o match.o fileio.o \ - generation.o localhost.o ssl.o factory.o auditlog.o \ -diff --git a/ldap/servers/slapd/psearch.c b/ldap/servers/slapd/psearch.c -index 6820a5d75..c60e6a8ed 100644 ---- a/ldap/servers/slapd/psearch.c -+++ b/ldap/servers/slapd/psearch.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -59,10 +59,10 @@ typedef struct _psearch - */ - typedef struct _psearch_list - { -- Slapi_RWLock *pl_rwlock; /* R/W lock struct to serialize access */ -- PSearch *pl_head; /* Head of list */ -- PRLock *pl_cvarlock; /* Lock for cvar */ -- PRCondVar *pl_cvar; /* ps threads sleep on this */ -+ Slapi_RWLock *pl_rwlock; /* R/W lock struct to serialize access */ -+ PSearch *pl_head; /* Head of list */ -+ pthread_mutex_t pl_cvarlock; /* Lock for cvar */ -+ pthread_cond_t pl_cvar; /* ps threads sleep on this */ - } PSearch_List; - - /* -@@ -101,21 +101,26 @@ void - ps_init_psearch_system() - { - if (!PS_IS_INITIALIZED()) { -+ int32_t rc = 0; -+ - psearch_list = (PSearch_List *)slapi_ch_calloc(1, sizeof(PSearch_List)); - if ((psearch_list->pl_rwlock = slapi_new_rwlock()) == NULL) { - slapi_log_err(SLAPI_LOG_ERR, "ps_init_psearch_system", "Cannot initialize lock structure. " - "The server is terminating.\n"); - exit(-1); - } -- if ((psearch_list->pl_cvarlock = PR_NewLock()) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, "ps_init_psearch_system", "Cannot create new lock. " -- "The server is terminating.\n"); -- exit(-1); -+ -+ if ((rc = pthread_mutex_init(&(psearch_list->pl_cvarlock), NULL)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "ps_init_psearch_system", -+ "Cannot create new lock. error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(1); - } -- if ((psearch_list->pl_cvar = PR_NewCondVar(psearch_list->pl_cvarlock)) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, "ps_init_psearch_system", "Cannot create new condition variable. " -- "The server is terminating.\n"); -- exit(-1); -+ if ((rc = pthread_cond_init(&(psearch_list->pl_cvar), NULL)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", -+ "housekeeping cannot create new condition variable. error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(1); - } - psearch_list->pl_head = NULL; - } -@@ -288,7 +293,7 @@ ps_send_results(void *arg) - pb_conn->c_connid, pb_op ? pb_op->o_opid : -1); - } - -- PR_Lock(psearch_list->pl_cvarlock); -+ pthread_mutex_lock(&(psearch_list->pl_cvarlock)); - - while ((conn_acq_flag == 0) && slapi_atomic_load_64(&(ps->ps_complete), __ATOMIC_ACQUIRE) == 0) { - /* Check for an abandoned operation */ -@@ -300,7 +305,7 @@ ps_send_results(void *arg) - } - if (NULL == ps->ps_eq_head) { - /* Nothing to do */ -- PR_WaitCondVar(psearch_list->pl_cvar, PR_INTERVAL_NO_TIMEOUT); -+ pthread_cond_wait(&(psearch_list->pl_cvar), &(psearch_list->pl_cvarlock)); - } else { - /* dequeue the item */ - int attrsonly; -@@ -330,17 +335,17 @@ ps_send_results(void *arg) - } - - /* -- * Send the result. Since send_ldap_search_entry can block for -- * up to 30 minutes, we relinquish all locks before calling it. -- */ -- PR_Unlock(psearch_list->pl_cvarlock); -+ * Send the result. Since send_ldap_search_entry can block for -+ * up to 30 minutes, we relinquish all locks before calling it. -+ */ -+ pthread_mutex_unlock(&(psearch_list->pl_cvarlock)); - - /* -- * The entry is in the right scope and matches the filter -- * but we need to redo the filter test here to check access -- * controls. See the comments at the slapi_filter_test() -- * call in ps_service_persistent_searches(). -- */ -+ * The entry is in the right scope and matches the filter -+ * but we need to redo the filter test here to check access -+ * controls. See the comments at the slapi_filter_test() -+ * call in ps_service_persistent_searches(). -+ */ - slapi_pblock_get(ps->ps_pblock, SLAPI_SEARCH_FILTER, &f); - - /* See if the entry meets the filter and ACL criteria */ -@@ -358,13 +363,13 @@ ps_send_results(void *arg) - } - } - -- PR_Lock(psearch_list->pl_cvarlock); -+ pthread_mutex_lock(&(psearch_list->pl_cvarlock)); - - /* Deallocate our wrapper for this entry */ - pe_ch_free(&peq); - } - } -- PR_Unlock(psearch_list->pl_cvarlock); -+ pthread_mutex_unlock(&(psearch_list->pl_cvarlock)); - ps_remove(ps); - - /* indicate the end of search */ -@@ -474,9 +479,9 @@ void - ps_wakeup_all() - { - if (PS_IS_INITIALIZED()) { -- PR_Lock(psearch_list->pl_cvarlock); -- PR_NotifyAllCondVar(psearch_list->pl_cvar); -- PR_Unlock(psearch_list->pl_cvarlock); -+ pthread_mutex_lock(&(psearch_list->pl_cvarlock)); -+ pthread_cond_broadcast(&(psearch_list->pl_cvar)); -+ pthread_mutex_unlock(&(psearch_list->pl_cvarlock)); - } - } - -diff --git a/ldap/servers/slapd/regex.c b/ldap/servers/slapd/regex.c -index 97249a4c5..a17c354fd 100644 ---- a/ldap/servers/slapd/regex.c -+++ b/ldap/servers/slapd/regex.c -@@ -72,7 +72,7 @@ int - slapi_re_exec(Slapi_Regex *re_handle, const char *subject, time_t time_up) - { - int rc; -- time_t curtime = slapi_current_utc_time(); -+ time_t curtime = slapi_current_rel_time_t(); - - if (NULL == re_handle || NULL == re_handle->re_pcre || NULL == subject) { - return LDAP_PARAM_ERROR; -diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h -index f9ac8b46c..55ded5eb8 100644 ---- a/ldap/servers/slapd/slapi-plugin.h -+++ b/ldap/servers/slapd/slapi-plugin.h -@@ -6086,6 +6086,7 @@ Slapi_CondVar *slapi_new_condvar(Slapi_Mutex *mutex); - void slapi_destroy_condvar(Slapi_CondVar *cvar); - int slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout); - int slapi_notify_condvar(Slapi_CondVar *cvar, int notify_all); -+int slapi_wait_condvar_pt(Slapi_CondVar *cvar, Slapi_Mutex *mutex, struct timeval *timeout); - - /** - * Creates a new read/write lock -@@ -6777,6 +6778,12 @@ struct timespec slapi_current_time_hr(void); - * \return timespec of the current monotonic time. - */ - struct timespec slapi_current_rel_time_hr(void); -+/** -+ * Returns the current system time as a hr clock -+ * -+ * \return time_t of the current monotonic time. -+ */ -+time_t slapi_current_rel_time_t(void); - /** - * Returns the current system time as a hr clock in UTC timezone. - * This clock adjusts with ntp steps, and should NOT be -diff --git a/ldap/servers/slapd/slapi2nspr.c b/ldap/servers/slapd/slapi2runtime.c -similarity index 69% -rename from ldap/servers/slapd/slapi2nspr.c -rename to ldap/servers/slapd/slapi2runtime.c -index 232d1599e..85dc4c9a8 100644 ---- a/ldap/servers/slapd/slapi2nspr.c -+++ b/ldap/servers/slapd/slapi2runtime.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -14,6 +14,8 @@ - /* - * slapi2nspr.c - expose a subset of the NSPR20/21 API to SLAPI plugin writers - * -+ * Also include slapi2pthread functions -+ * - */ - - #include "slap.h" -@@ -44,47 +46,50 @@ - Slapi_Mutex * - slapi_new_mutex(void) - { -- return ((Slapi_Mutex *)PR_NewLock()); -+ pthread_mutex_t *new_mutex = (pthread_mutex_t *)slapi_ch_calloc(1, sizeof(pthread_mutex_t)); -+ pthread_mutex_init(new_mutex, NULL); -+ return ((Slapi_Mutex *)new_mutex); - } - -- - /* - * Function: slapi_destroy_mutex -- * Description: behaves just like PR_DestroyLock(). -+ * Description: behaves just like pthread_mutex_destroy(). - */ - void - slapi_destroy_mutex(Slapi_Mutex *mutex) - { - if (mutex != NULL) { -- PR_DestroyLock((PRLock *)mutex); -+ pthread_mutex_destroy((pthread_mutex_t *)mutex); -+ slapi_ch_free((void **)&mutex); - } - } - - - /* - * Function: slapi_lock_mutex -- * Description: behaves just like PR_Lock(). -+ * Description: behaves just like pthread_mutex_lock(). - */ --void -+inline void __attribute__((always_inline)) - slapi_lock_mutex(Slapi_Mutex *mutex) - { - if (mutex != NULL) { -- PR_Lock((PRLock *)mutex); -+ pthread_mutex_lock((pthread_mutex_t *)mutex); - } - } - - - /* - * Function: slapi_unlock_mutex -- * Description: behaves just like PR_Unlock(). -+ * Description: behaves just like pthread_mutex_unlock(). - * Returns: - * non-zero if mutex was successfully unlocked. - * 0 if mutex is NULL or is not locked by the calling thread. - */ --int -+inline int __attribute__((always_inline)) - slapi_unlock_mutex(Slapi_Mutex *mutex) - { -- if (mutex == NULL || PR_Unlock((PRLock *)mutex) == PR_FAILURE) { -+ PR_ASSERT(mutex != NULL); -+ if (mutex == NULL || pthread_mutex_unlock((pthread_mutex_t *)mutex) != 0) { - return (0); - } else { - return (1); -@@ -98,13 +103,18 @@ slapi_unlock_mutex(Slapi_Mutex *mutex) - * Returns: pointer to a new condition variable (NULL if one can't be created). - */ - Slapi_CondVar * --slapi_new_condvar(Slapi_Mutex *mutex) -+slapi_new_condvar(Slapi_Mutex *mutex __attribute__((unused))) - { -- if (mutex == NULL) { -- return (NULL); -- } -+ pthread_cond_t *new_cv = (pthread_cond_t *)slapi_ch_calloc(1, sizeof(pthread_cond_t)); -+ pthread_condattr_t condAttr; -+ -+ pthread_condattr_init(&condAttr); -+ pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC); -+ pthread_cond_init(new_cv, &condAttr); -+ /* Done with the cond attr, it's safe to destroy it */ -+ pthread_condattr_destroy(&condAttr); - -- return ((Slapi_CondVar *)PR_NewCondVar((PRLock *)mutex)); -+ return (Slapi_CondVar *)new_cv; - } - - -@@ -116,7 +126,8 @@ void - slapi_destroy_condvar(Slapi_CondVar *cvar) - { - if (cvar != NULL) { -- PR_DestroyCondVar((PRCondVar *)cvar); -+ pthread_cond_destroy((pthread_cond_t *)cvar); -+ slapi_ch_free((void **)&cvar); - } - } - -@@ -134,23 +145,35 @@ slapi_destroy_condvar(Slapi_CondVar *cvar) - int - slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout) - { -- PRIntervalTime prit; -+ /* deprecated in favor of slapi_wait_condvar_pt() which requires that the -+ * mutex be passed in */ -+ return (0); -+} -+ -+int -+slapi_wait_condvar_pt(Slapi_CondVar *cvar, Slapi_Mutex *mutex, struct timeval *timeout) -+{ -+ int32_t rc = 1; - - if (cvar == NULL) { -- return (0); -+ return 0; - } - - if (timeout == NULL) { -- prit = PR_INTERVAL_NO_TIMEOUT; -+ rc = pthread_cond_wait((pthread_cond_t *)cvar, (pthread_mutex_t *)mutex); - } else { -- prit = PR_SecondsToInterval(timeout->tv_sec) + PR_MicrosecondsToInterval(timeout->tv_usec); -+ struct timespec current_time = {0}; -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += (timeout->tv_sec + PR_MicrosecondsToInterval(timeout->tv_usec)); -+ rc = pthread_cond_timedwait((pthread_cond_t *)cvar, (pthread_mutex_t *)mutex, ¤t_time); - } - -- if (PR_WaitCondVar((PRCondVar *)cvar, prit) != PR_SUCCESS) { -- return (0); -+ if (rc != 0) { -+ /* something went wrong */ -+ return 0; - } - -- return (1); -+ return 1; /* success */ - } - - -@@ -166,19 +189,19 @@ slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout) - int - slapi_notify_condvar(Slapi_CondVar *cvar, int notify_all) - { -- PRStatus prrc; -+ int32_t rc; - - if (cvar == NULL) { -- return (0); -+ return 0; - } - - if (notify_all) { -- prrc = PR_NotifyAllCondVar((PRCondVar *)cvar); -+ rc = pthread_cond_broadcast((pthread_cond_t *)cvar); - } else { -- prrc = PR_NotifyCondVar((PRCondVar *)cvar); -+ rc = pthread_cond_signal((pthread_cond_t *)cvar); - } - -- return (prrc == PR_SUCCESS ? 1 : 0); -+ return (rc == 0 ? 1 : 0); - } - - Slapi_RWLock * -@@ -236,7 +259,7 @@ slapi_destroy_rwlock(Slapi_RWLock *rwlock) - } - } - --int -+inline int __attribute__((always_inline)) - slapi_rwlock_rdlock(Slapi_RWLock *rwlock) - { - int ret = 0; -@@ -252,7 +275,7 @@ slapi_rwlock_rdlock(Slapi_RWLock *rwlock) - return ret; - } - --int -+inline int __attribute__((always_inline)) - slapi_rwlock_wrlock(Slapi_RWLock *rwlock) - { - int ret = 0; -@@ -268,7 +291,7 @@ slapi_rwlock_wrlock(Slapi_RWLock *rwlock) - return ret; - } - --int -+inline int __attribute__((always_inline)) - slapi_rwlock_unlock(Slapi_RWLock *rwlock) - { - int ret = 0; -diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c -index 806077a16..26f281cba 100644 ---- a/ldap/servers/slapd/task.c -+++ b/ldap/servers/slapd/task.c -@@ -380,16 +380,14 @@ slapi_task_status_changed(Slapi_Task *task) - Slapi_PBlock *pb = slapi_pblock_new(); - Slapi_Entry *e; - int ttl; -- time_t expire; - - if ((e = get_internal_entry(pb, task->task_dn))) { - ttl = atoi(slapi_fetch_attr(e, "ttl", DEFAULT_TTL)); - if (ttl > (24*3600)) - ttl = (24*3600); /* be reasonable, allow to check task status not longer than one day */ -- expire = time(NULL) + ttl; - task->task_flags |= SLAPI_TASK_DESTROYING; - /* queue an event to destroy the state info */ -- slapi_eq_once(destroy_task, (void *)task, expire); -+ slapi_eq_once(destroy_task, (void *)task, slapi_current_rel_time_t() + ttl); - } - slapi_free_search_results_internal(pb); - slapi_pblock_destroy(pb); -diff --git a/ldap/servers/slapd/time.c b/ldap/servers/slapd/time.c -index 545538404..0406c3689 100644 ---- a/ldap/servers/slapd/time.c -+++ b/ldap/servers/slapd/time.c -@@ -107,6 +107,14 @@ slapi_current_rel_time_hr(void) - return now; - } - -+time_t -+slapi_current_rel_time_t(void) -+{ -+ struct timespec now = {0}; -+ clock_gettime(CLOCK_MONOTONIC, &now); -+ return now.tv_sec; -+} -+ - struct timespec - slapi_current_utc_time_hr(void) - { -@@ -292,7 +300,7 @@ slapi_timer_result - slapi_timespec_expire_check(struct timespec *expire) - { - /* -- * Check this first, as it makes no timeout virutally free. -+ * Check this first, as it makes no timeout virtually free. - */ - if (expire->tv_sec == 0 && expire->tv_nsec == 0) { - return TIMER_CONTINUE; --- -2.26.2 - diff --git a/SOURCES/0030-Issue-4884-server-crashes-when-dnaInterval-attribute.patch b/SOURCES/0030-Issue-4884-server-crashes-when-dnaInterval-attribute.patch new file mode 100644 index 0000000..332394c --- /dev/null +++ b/SOURCES/0030-Issue-4884-server-crashes-when-dnaInterval-attribute.patch @@ -0,0 +1,44 @@ +From df0ccce06259b9ef06d522e61da4e3ffcbbf5016 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 25 Aug 2021 16:54:57 -0400 +Subject: [PATCH] Issue 4884 - server crashes when dnaInterval attribute is set + to zero + +Bug Description: + +A division by zero crash occurs if the dnaInterval is set to zero + +Fix Description: + +Validate the config value of dnaInterval and adjust it to the +default/safe value of "1" if needed. + +relates: https://github.com/389ds/389-ds-base/issues/4884 + +Reviewed by: tbordaz(Thanks!) +--- + ldap/servers/plugins/dna/dna.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c +index 928a3f54a..c983ebdd0 100644 +--- a/ldap/servers/plugins/dna/dna.c ++++ b/ldap/servers/plugins/dna/dna.c +@@ -1025,7 +1025,14 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply) + + value = slapi_entry_attr_get_charptr(e, DNA_INTERVAL); + if (value) { ++ errno = 0; + entry->interval = strtoull(value, 0, 0); ++ if (entry->interval == 0 || errno == ERANGE) { ++ slapi_log_err(SLAPI_LOG_WARNING, DNA_PLUGIN_SUBSYSTEM, ++ "dna_parse_config_entry - Invalid value for dnaInterval (%s), " ++ "Using default value of 1\n", value); ++ entry->interval = 1; ++ } + slapi_ch_free_string(&value); + } + +-- +2.31.1 + diff --git a/SOURCES/0031-Issue-4384-Separate-eventq-into-REALTIME-and-MONOTON.patch b/SOURCES/0031-Issue-4384-Separate-eventq-into-REALTIME-and-MONOTON.patch deleted file mode 100644 index 66a40e8..0000000 --- a/SOURCES/0031-Issue-4384-Separate-eventq-into-REALTIME-and-MONOTON.patch +++ /dev/null @@ -1,1748 +0,0 @@ -From 69af412d42acccac660037e1f4026a6a6717634c Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Thu, 17 Dec 2020 15:25:42 -0500 -Subject: [PATCH 2/2] Issue 4384 - Separate eventq into REALTIME and MONOTONIC - -Description: The recent changes to the eventq "when" time changed - internally from REALTIME to MONOTONIC, and this broke - the API. Create a new API for MONOTONIC clocks, and - keep the original API intact for REALTIME clocks. - -Relates: https://github.com/389ds/389-ds-base/issues/4384 - -Reviewed by: firstyear(Thanks!) ---- - Makefile.am | 1 + - docs/slapi.doxy.in | 1 - - ldap/servers/plugins/chainingdb/cb_instance.c | 6 +- - ldap/servers/plugins/dna/dna.c | 4 +- - .../plugins/replication/repl5_backoff.c | 12 +- - .../plugins/replication/repl5_connection.c | 10 +- - .../plugins/replication/repl5_mtnode_ext.c | 4 +- - .../plugins/replication/repl5_replica.c | 24 +- - .../plugins/replication/repl5_schedule.c | 4 +- - .../plugins/replication/windows_connection.c | 12 +- - .../replication/windows_inc_protocol.c | 7 +- - ldap/servers/plugins/retrocl/retrocl_trim.c | 10 +- - ldap/servers/slapd/daemon.c | 3 +- - ldap/servers/slapd/eventq-deprecated.c | 483 ++++++++++++++++++ - ldap/servers/slapd/eventq.c | 236 ++++----- - ldap/servers/slapd/main.c | 18 +- - ldap/servers/slapd/proto-slap.h | 6 +- - ldap/servers/slapd/slapi-plugin.h | 62 ++- - ldap/servers/slapd/slapi2runtime.c | 23 +- - ldap/servers/slapd/snmp_collator.c | 7 +- - ldap/servers/slapd/task.c | 2 +- - ldap/servers/slapd/uuid.c | 3 +- - 22 files changed, 750 insertions(+), 188 deletions(-) - create mode 100644 ldap/servers/slapd/eventq-deprecated.c - -diff --git a/Makefile.am b/Makefile.am -index f7bf1c44c..ece1ad41a 100644 ---- a/Makefile.am -+++ b/Makefile.am -@@ -1408,6 +1408,7 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \ - ldap/servers/slapd/entrywsi.c \ - ldap/servers/slapd/errormap.c \ - ldap/servers/slapd/eventq.c \ -+ ldap/servers/slapd/eventq-deprecated.c \ - ldap/servers/slapd/factory.c \ - ldap/servers/slapd/features.c \ - ldap/servers/slapd/fileio.c \ -diff --git a/docs/slapi.doxy.in b/docs/slapi.doxy.in -index b1e4810ab..1cafc50ce 100644 ---- a/docs/slapi.doxy.in -+++ b/docs/slapi.doxy.in -@@ -759,7 +759,6 @@ WARN_LOGFILE = - # Note: If this tag is empty the current directory is searched. - - INPUT = src/libsds/include/sds.h \ -- docs/job-safety.md \ - # ldap/servers/slapd/slapi-plugin.h \ - - # This tag can be used to specify the character encoding of the source files -diff --git a/ldap/servers/plugins/chainingdb/cb_instance.c b/ldap/servers/plugins/chainingdb/cb_instance.c -index bc1864c1a..7fd85deb0 100644 ---- a/ldap/servers/plugins/chainingdb/cb_instance.c -+++ b/ldap/servers/plugins/chainingdb/cb_instance.c -@@ -217,7 +217,7 @@ cb_instance_free(cb_backend_instance *inst) - slapi_rwlock_wrlock(inst->rwl_config_lock); - - if (inst->eq_ctx != NULL) { -- slapi_eq_cancel(inst->eq_ctx); -+ slapi_eq_cancel_rel(inst->eq_ctx); - inst->eq_ctx = NULL; - } - -@@ -1947,8 +1947,8 @@ cb_instance_add_config_callback(Slapi_PBlock *pb __attribute__((unused)), - * we can't call recursively into the DSE to do more adds, they'll - * silently fail. instead, schedule the adds to happen in 1 second. - */ -- inst->eq_ctx = slapi_eq_once(cb_instance_add_monitor_later, (void *)inst, -- slapi_current_rel_time_t() + 1); -+ inst->eq_ctx = slapi_eq_once_rel(cb_instance_add_monitor_later, (void *)inst, -+ slapi_current_rel_time_t() + 1); - } - - /* Get the list of operational attrs defined in the schema */ -diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c -index 1cb54580b..b46edfcbb 100644 ---- a/ldap/servers/plugins/dna/dna.c -+++ b/ldap/servers/plugins/dna/dna.c -@@ -688,7 +688,7 @@ dna_close(Slapi_PBlock *pb __attribute__((unused))) - slapi_log_err(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, - "--> dna_close\n"); - -- slapi_eq_cancel(eq_ctx); -+ slapi_eq_cancel_rel(eq_ctx); - dna_delete_config(NULL); - slapi_ch_free((void **)&dna_global_config); - slapi_destroy_rwlock(g_dna_cache_lock); -@@ -908,7 +908,7 @@ dna_load_plugin_config(Slapi_PBlock *pb, int use_eventq) - * starting up would cause the change to not - * get changelogged. */ - now = slapi_current_rel_time_t(); -- eq_ctx = slapi_eq_once(dna_update_config_event, NULL, now + 30); -+ eq_ctx = slapi_eq_once_rel(dna_update_config_event, NULL, now + 30); - } else { - dna_update_config_event(0, NULL); - } -diff --git a/ldap/servers/plugins/replication/repl5_backoff.c b/ldap/servers/plugins/replication/repl5_backoff.c -index 40ec75dd7..8c851beb2 100644 ---- a/ldap/servers/plugins/replication/repl5_backoff.c -+++ b/ldap/servers/plugins/replication/repl5_backoff.c -@@ -99,7 +99,7 @@ backoff_reset(Backoff_Timer *bt, slapi_eq_fn_t callback, void *callback_data) - bt->callback_arg = callback_data; - /* Cancel any pending events in the event queue */ - if (NULL != bt->pending_event) { -- slapi_eq_cancel(bt->pending_event); -+ slapi_eq_cancel_rel(bt->pending_event); - bt->pending_event = NULL; - } - /* Compute the first fire time */ -@@ -112,8 +112,8 @@ backoff_reset(Backoff_Timer *bt, slapi_eq_fn_t callback, void *callback_data) - /* Schedule the callback */ - bt->last_fire_time = slapi_current_rel_time_t(); - return_value = bt->last_fire_time + bt->next_interval; -- bt->pending_event = slapi_eq_once(bt->callback, bt->callback_arg, -- return_value); -+ bt->pending_event = slapi_eq_once_rel(bt->callback, bt->callback_arg, -+ return_value); - PR_Unlock(bt->lock); - return return_value; - } -@@ -159,8 +159,8 @@ backoff_step(Backoff_Timer *bt) - /* Schedule the callback, if any */ - bt->last_fire_time += previous_interval; - return_value = bt->last_fire_time + bt->next_interval; -- bt->pending_event = slapi_eq_once(bt->callback, bt->callback_arg, -- return_value); -+ bt->pending_event = slapi_eq_once_rel(bt->callback, bt->callback_arg, -+ return_value); - } - PR_Unlock(bt->lock); - return return_value; -@@ -196,7 +196,7 @@ backoff_delete(Backoff_Timer **btp) - PR_Lock(bt->lock); - /* Cancel any pending events in the event queue */ - if (NULL != bt->pending_event) { -- slapi_eq_cancel(bt->pending_event); -+ slapi_eq_cancel_rel(bt->pending_event); - } - PR_Unlock(bt->lock); - PR_DestroyLock(bt->lock); -diff --git a/ldap/servers/plugins/replication/repl5_connection.c b/ldap/servers/plugins/replication/repl5_connection.c -index bc9ca424b..2dd74f9e7 100644 ---- a/ldap/servers/plugins/replication/repl5_connection.c -+++ b/ldap/servers/plugins/replication/repl5_connection.c -@@ -272,7 +272,7 @@ conn_delete(Repl_Connection *conn) - PR_ASSERT(NULL != conn); - PR_Lock(conn->lock); - if (conn->linger_active) { -- if (slapi_eq_cancel(conn->linger_event) == 1) { -+ if (slapi_eq_cancel_rel(conn->linger_event) == 1) { - /* Event was found and cancelled. Destroy the connection object. */ - destroy_it = PR_TRUE; - } else { -@@ -961,7 +961,7 @@ conn_cancel_linger(Repl_Connection *conn) - "conn_cancel_linger - %s - Canceling linger on the connection\n", - agmt_get_long_name(conn->agmt)); - conn->linger_active = PR_FALSE; -- if (slapi_eq_cancel(conn->linger_event) == 1) { -+ if (slapi_eq_cancel_rel(conn->linger_event) == 1) { - conn->refcnt--; - } - conn->linger_event = NULL; -@@ -1030,7 +1030,7 @@ conn_start_linger(Repl_Connection *conn) - agmt_get_long_name(conn->agmt)); - } else { - conn->linger_active = PR_TRUE; -- conn->linger_event = slapi_eq_once(linger_timeout, conn, now + conn->linger_time); -+ conn->linger_event = slapi_eq_once_rel(linger_timeout, conn, now + conn->linger_time); - conn->status = STATUS_LINGERING; - } - PR_Unlock(conn->lock); -@@ -1990,7 +1990,7 @@ repl5_start_debug_timeout(int *setlevel) - Slapi_Eq_Context eqctx = 0; - if (s_debug_timeout && s_debug_level) { - time_t now = slapi_current_rel_time_t(); -- eqctx = slapi_eq_once(repl5_debug_timeout_callback, setlevel, -+ eqctx = slapi_eq_once_rel(repl5_debug_timeout_callback, setlevel, - s_debug_timeout + now); - } - return eqctx; -@@ -2002,7 +2002,7 @@ repl5_stop_debug_timeout(Slapi_Eq_Context eqctx, int *setlevel) - char buf[20]; - - if (eqctx && !*setlevel) { -- (void)slapi_eq_cancel(eqctx); -+ (void)slapi_eq_cancel_rel(eqctx); - } - - if (s_debug_timeout && s_debug_level && *setlevel) { -diff --git a/ldap/servers/plugins/replication/repl5_mtnode_ext.c b/ldap/servers/plugins/replication/repl5_mtnode_ext.c -index 82e230958..2967a47f8 100644 ---- a/ldap/servers/plugins/replication/repl5_mtnode_ext.c -+++ b/ldap/servers/plugins/replication/repl5_mtnode_ext.c -@@ -82,8 +82,8 @@ multimaster_mtnode_construct_replicas() - } - } - /* Wait a few seconds for everything to startup before resuming any replication tasks */ -- slapi_eq_once(replica_check_for_tasks, (void *)replica_get_root(r), -- slapi_current_rel_time_t() + 5); -+ slapi_eq_once_rel(replica_check_for_tasks, (void *)replica_get_root(r), -+ slapi_current_rel_time_t() + 5); - } - } - } -diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c -index c1d376c72..7102e0606 100644 ---- a/ldap/servers/plugins/replication/repl5_replica.c -+++ b/ldap/servers/plugins/replication/repl5_replica.c -@@ -231,17 +231,17 @@ replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation, - /* ONREPL - the state update can occur before the entry is added to the DIT. - In that case the updated would fail but nothing bad would happen. The next - scheduled update would save the state */ -- r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, r->repl_name, -- slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); -+ r->repl_eqcxt_rs = slapi_eq_repeat_rel(replica_update_state, r->repl_name, -+ slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); - - if (r->tombstone_reap_interval > 0) { - /* - * Reap Tombstone should be started some time after the plugin started. - * This will allow the server to fully start before consuming resources. - */ -- r->repl_eqcxt_tr = slapi_eq_repeat(eq_cb_reap_tombstones, r->repl_name, -- slapi_current_rel_time_t() + r->tombstone_reap_interval, -- 1000 * r->tombstone_reap_interval); -+ r->repl_eqcxt_tr = slapi_eq_repeat_rel(eq_cb_reap_tombstones, r->repl_name, -+ slapi_current_rel_time_t() + r->tombstone_reap_interval, -+ 1000 * r->tombstone_reap_interval); - } - - done: -@@ -303,12 +303,12 @@ replica_destroy(void **arg) - */ - - if (r->repl_eqcxt_rs) { -- slapi_eq_cancel(r->repl_eqcxt_rs); -+ slapi_eq_cancel_rel(r->repl_eqcxt_rs); - r->repl_eqcxt_rs = NULL; - } - - if (r->repl_eqcxt_tr) { -- slapi_eq_cancel(r->repl_eqcxt_tr); -+ slapi_eq_cancel_rel(r->repl_eqcxt_tr); - r->repl_eqcxt_tr = NULL; - } - -@@ -1511,14 +1511,14 @@ replica_set_enabled(Replica *r, PRBool enable) - if (enable) { - if (r->repl_eqcxt_rs == NULL) /* event is not already registered */ - { -- r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, r->repl_name, -- slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); -+ r->repl_eqcxt_rs = slapi_eq_repeat_rel(replica_update_state, r->repl_name, -+ slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); - } - } else /* disable */ - { - if (r->repl_eqcxt_rs) /* event is still registerd */ - { -- slapi_eq_cancel(r->repl_eqcxt_rs); -+ slapi_eq_cancel_rel(r->repl_eqcxt_rs); - r->repl_eqcxt_rs = NULL; - } - } -@@ -3628,7 +3628,7 @@ replica_set_tombstone_reap_interval(Replica *r, long interval) - if (interval > 0 && r->repl_eqcxt_tr && r->tombstone_reap_interval != interval) { - int found; - -- found = slapi_eq_cancel(r->repl_eqcxt_tr); -+ found = slapi_eq_cancel_rel(r->repl_eqcxt_tr); - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, - "replica_set_tombstone_reap_interval - tombstone_reap event (interval=%" PRId64 ") was %s\n", - r->tombstone_reap_interval, (found ? "cancelled" : "not found")); -@@ -3636,7 +3636,7 @@ replica_set_tombstone_reap_interval(Replica *r, long interval) - } - r->tombstone_reap_interval = interval; - if (interval > 0 && r->repl_eqcxt_tr == NULL) { -- r->repl_eqcxt_tr = slapi_eq_repeat(eq_cb_reap_tombstones, r->repl_name, -+ r->repl_eqcxt_tr = slapi_eq_repeat_rel(eq_cb_reap_tombstones, r->repl_name, - slapi_current_rel_time_t() + r->tombstone_reap_interval, - 1000 * r->tombstone_reap_interval); - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, -diff --git a/ldap/servers/plugins/replication/repl5_schedule.c b/ldap/servers/plugins/replication/repl5_schedule.c -index 9539f4031..ca42df561 100644 ---- a/ldap/servers/plugins/replication/repl5_schedule.c -+++ b/ldap/servers/plugins/replication/repl5_schedule.c -@@ -550,7 +550,7 @@ schedule_window_state_change_event(Schedule *sch) - wakeup_time = PRTime2time_t(tm); - - /* schedule the event */ -- sch->pending_event = slapi_eq_once(window_state_changed, sch, wakeup_time); -+ sch->pending_event = slapi_eq_once_rel(window_state_changed, sch, wakeup_time); - - timestr = get_timestring(&wakeup_time); - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "%s: Update window will %s at %s\n", -@@ -593,7 +593,7 @@ static void - unschedule_window_state_change_event(Schedule *sch) - { - if (sch->pending_event) { -- slapi_eq_cancel(sch->pending_event); -+ slapi_eq_cancel_rel(sch->pending_event); - sch->pending_event = NULL; - } - } -diff --git a/ldap/servers/plugins/replication/windows_connection.c b/ldap/servers/plugins/replication/windows_connection.c -index ce0662544..5eca5fad1 100644 ---- a/ldap/servers/plugins/replication/windows_connection.c -+++ b/ldap/servers/plugins/replication/windows_connection.c -@@ -204,7 +204,7 @@ windows_conn_delete(Repl_Connection *conn) - PR_ASSERT(NULL != conn); - PR_Lock(conn->lock); - if (conn->linger_active) { -- if (slapi_eq_cancel(conn->linger_event) == 1) { -+ if (slapi_eq_cancel_rel(conn->linger_event) == 1) { - /* Event was found and cancelled. Destroy the connection object. */ - PR_Unlock(conn->lock); - destroy_it = PR_TRUE; -@@ -1052,7 +1052,7 @@ windows_conn_cancel_linger(Repl_Connection *conn) - "windows_conn_cancel_linger - %s: Cancelling linger on the connection\n", - agmt_get_long_name(conn->agmt)); - conn->linger_active = PR_FALSE; -- if (slapi_eq_cancel(conn->linger_event) == 1) { -+ if (slapi_eq_cancel_rel(conn->linger_event) == 1) { - conn->refcnt--; - } - conn->linger_event = NULL; -@@ -1129,7 +1129,7 @@ windows_conn_start_linger(Repl_Connection *conn) - agmt_get_long_name(conn->agmt)); - } else { - conn->linger_active = PR_TRUE; -- conn->linger_event = slapi_eq_once(linger_timeout, conn, now + conn->linger_time); -+ conn->linger_event = slapi_eq_once_rel(linger_timeout, conn, now + conn->linger_time); - conn->status = STATUS_LINGERING; - } - PR_Unlock(conn->lock); -@@ -1822,8 +1822,8 @@ repl5_start_debug_timeout(int *setlevel) - - if (s_debug_timeout && s_debug_level) { - time_t now = time(NULL); -- eqctx = slapi_eq_once(repl5_debug_timeout_callback, setlevel, -- s_debug_timeout + now); -+ eqctx = slapi_eq_once_rel(repl5_debug_timeout_callback, setlevel, -+ s_debug_timeout + now); - } - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= repl5_start_debug_timeout\n"); - return eqctx; -@@ -1837,7 +1837,7 @@ repl5_stop_debug_timeout(Slapi_Eq_Context eqctx, int *setlevel) - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> repl5_stop_debug_timeout\n"); - - if (eqctx && !*setlevel) { -- (void)slapi_eq_cancel(eqctx); -+ (void)slapi_eq_cancel_rel(eqctx); - } - - if (s_debug_timeout && s_debug_level && *setlevel) { -diff --git a/ldap/servers/plugins/replication/windows_inc_protocol.c b/ldap/servers/plugins/replication/windows_inc_protocol.c -index 3d548e5ed..c07a8180a 100644 ---- a/ldap/servers/plugins/replication/windows_inc_protocol.c -+++ b/ldap/servers/plugins/replication/windows_inc_protocol.c -@@ -132,7 +132,7 @@ windows_inc_delete(Private_Repl_Protocol **prpp) - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_inc_delete\n"); - /* First, stop the protocol if it isn't already stopped */ - /* Then, delete all resources used by the protocol */ -- rc = slapi_eq_cancel(dirsync); -+ rc = slapi_eq_cancel_rel(dirsync); - slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, - "windows_inc_delete - dirsync: %p, rval: %d\n", dirsync, rc); - /* if backoff is set, delete it (from EQ, as well) */ -@@ -324,12 +324,13 @@ windows_inc_run(Private_Repl_Protocol *prp) - if (interval != current_interval) { - current_interval = interval; - if (dirsync) { -- int rc = slapi_eq_cancel(dirsync); -+ int rc = slapi_eq_cancel_rel(dirsync); - slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, - "windows_inc_run - Cancelled dirsync: %p, rval: %d\n", - dirsync, rc); - } -- dirsync = slapi_eq_repeat(periodic_dirsync, (void *)prp, (time_t)0, interval); -+ dirsync = slapi_eq_repeat_rel(periodic_dirsync, (void *)prp, -+ slapi_current_rel_time_t(), interval); - slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, - "windows_inc_run - New dirsync: %p\n", dirsync); - } -diff --git a/ldap/servers/plugins/retrocl/retrocl_trim.c b/ldap/servers/plugins/retrocl/retrocl_trim.c -index a3e16c4e1..12a395210 100644 ---- a/ldap/servers/plugins/retrocl/retrocl_trim.c -+++ b/ldap/servers/plugins/retrocl/retrocl_trim.c -@@ -460,10 +460,10 @@ retrocl_init_trimming(void) - ts.ts_s_initialized = 1; - retrocl_trimming = 1; - -- retrocl_trim_ctx = slapi_eq_repeat(retrocl_housekeeping, -- NULL, (time_t)0, -- /* in milliseconds */ -- trim_interval * 1000); -+ retrocl_trim_ctx = slapi_eq_repeat_rel(retrocl_housekeeping, -+ NULL, (time_t)0, -+ /* in milliseconds */ -+ trim_interval * 1000); - } - - /* -@@ -487,7 +487,7 @@ retrocl_stop_trimming(void) - */ - retrocl_trimming = 0; - if (retrocl_trim_ctx) { -- slapi_eq_cancel(retrocl_trim_ctx); -+ slapi_eq_cancel_rel(retrocl_trim_ctx); - retrocl_trim_ctx = NULL; - } - PR_DestroyLock(ts.ts_s_trim_mutex); -diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c -index 0071ed86a..7681e88ea 100644 ---- a/ldap/servers/slapd/daemon.c -+++ b/ldap/servers/slapd/daemon.c -@@ -1240,7 +1240,8 @@ slapd_daemon(daemon_ports_t *ports) - slapi_log_err(SLAPI_LOG_TRACE, "slapd_daemon", - "slapd shutting down - waiting for backends to close down\n"); - -- eq_stop(); -+ eq_stop(); /* deprecated */ -+ eq_stop_rel(); - if (!in_referral_mode) { - task_shutdown(); - uniqueIDGenCleanup(); -diff --git a/ldap/servers/slapd/eventq-deprecated.c b/ldap/servers/slapd/eventq-deprecated.c -new file mode 100644 -index 000000000..71a7bf8f5 ---- /dev/null -+++ b/ldap/servers/slapd/eventq-deprecated.c -@@ -0,0 +1,483 @@ -+/** BEGIN COPYRIGHT BLOCK -+ * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -+ * Copyright (C) 2020 Red Hat, Inc. -+ * All rights reserved. -+ * -+ * License: GPL (version 3 or any later version). -+ * See LICENSE for details. -+ * END COPYRIGHT BLOCK **/ -+ -+#ifdef HAVE_CONFIG_H -+#include -+#endif -+ -+ -+/* ******************************************************** -+eventq-deprecated.c - Event queue/scheduling system. -+ -+There are 3 publicly-accessible entry points: -+ -+slapi_eq_once(): cause an event to happen exactly once -+slapi_eq_repeat(): cause an event to happen repeatedly -+slapi_eq_cancel(): cancel a pending event -+ -+There is also an initialization point which must be -+called by the server to initialize the event queue system: -+eq_start(), and an entry point used to shut down the system: -+eq_stop(). -+ -+These functions are now deprecated in favor of the functions -+in eventq.c which use MONOTONIC clocks instead of REALTIME -+clocks. -+*********************************************************** */ -+ -+#include "slap.h" -+#include "prlock.h" -+#include "prcvar.h" -+#include "prinit.h" -+ -+/* -+ * Private definition of slapi_eq_context. Only this -+ * module (eventq.c) should know about the layout of -+ * this structure. -+ */ -+typedef struct _slapi_eq_context -+{ -+ time_t ec_when; -+ time_t ec_interval; -+ slapi_eq_fn_t ec_fn; -+ void *ec_arg; -+ Slapi_Eq_Context ec_id; -+ struct _slapi_eq_context *ec_next; -+} slapi_eq_context; -+ -+/* -+ * Definition of the event queue. -+ */ -+typedef struct _event_queue -+{ -+ PRLock *eq_lock; -+ PRCondVar *eq_cv; -+ slapi_eq_context *eq_queue; -+} event_queue; -+ -+/* -+ * The event queue itself. -+ */ -+static event_queue eqs = {0}; -+static event_queue *eq = &eqs; -+ -+/* -+ * Thread ID of the main thread loop -+ */ -+static PRThread *eq_loop_tid = NULL; -+ -+/* -+ * Flags used to control startup/shutdown of the event queue -+ */ -+static int eq_running = 0; -+static int eq_stopped = 0; -+static int eq_initialized = 0; -+PRLock *ss_lock = NULL; -+PRCondVar *ss_cv = NULL; -+PRCallOnceType init_once = {0}; -+ -+/* Forward declarations */ -+static slapi_eq_context *eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); -+static void eq_enqueue(slapi_eq_context *newec); -+static slapi_eq_context *eq_dequeue(time_t now); -+static PRStatus eq_create(void); -+ -+ -+/* ******************************************************** */ -+ -+ -+/* -+ * slapi_eq_once: cause an event to happen exactly once. -+ * -+ * Arguments: -+ * fn: the function to call -+ * arg: an argument to pass to the called function -+ * when: the time that the function should be called -+ * Returns: -+ * slapi_eq_context - a handle to an opaque object which -+ * the caller can use to refer to this particular scheduled -+ * event. -+ */ -+Slapi_Eq_Context -+slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) -+{ -+ slapi_eq_context *tmp; -+ PR_ASSERT(eq_initialized); -+ if (!eq_stopped) { -+ -+ Slapi_Eq_Context id; -+ -+ tmp = eq_new(fn, arg, when, 0UL); -+ id = tmp->ec_id; -+ -+ eq_enqueue(tmp); -+ -+ /* After this point, may have */ -+ /* been freed, depending on the thread */ -+ /* scheduling. Too bad */ -+ -+ slapi_log_err(SLAPI_LOG_HOUSE, NULL, -+ "added one-time event id %p at time %ld\n", -+ id, when); -+ return (id); -+ } -+ return NULL; /* JCM - Not sure if this should be 0 or something else. */ -+} -+ -+ -+/* -+ * slapi_eq_repeat: cause an event to happen repeatedly. -+ * -+ * Arguments: -+ * fn: the function to call -+ * arg: an argument to pass to the called function -+ * when: the time that the function should first be called -+ * interval: the amount of time (in milliseconds) between -+ * successive calls to the function -+ * Returns: -+ * slapi_eq_context - a handle to an opaque object which -+ * the caller can use to refer to this particular scheduled -+ */ -+Slapi_Eq_Context -+slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) -+{ -+ slapi_eq_context *tmp; -+ PR_ASSERT(eq_initialized); -+ if (!eq_stopped) { -+ tmp = eq_new(fn, arg, when, interval); -+ eq_enqueue(tmp); -+ slapi_log_err(SLAPI_LOG_HOUSE, NULL, -+ "added repeating event id %p at time %ld, interval %lu\n", -+ tmp->ec_id, when, interval); -+ return (tmp->ec_id); -+ } -+ return NULL; /* JCM - Not sure if this should be 0 or something else. */ -+} -+ -+ -+/* -+ * slapi_eq_cancel: cancel a pending event. -+ * Arguments: -+ * ctx: the context of the event which should be de-scheduled -+ */ -+int -+slapi_eq_cancel(Slapi_Eq_Context ctx) -+{ -+ slapi_eq_context **p, *tmp = NULL; -+ int found = 0; -+ -+ PR_ASSERT(eq_initialized); -+ if (!eq_stopped) { -+ PR_Lock(eq->eq_lock); -+ p = &(eq->eq_queue); -+ while (!found && *p != NULL) { -+ if ((*p)->ec_id == ctx) { -+ tmp = *p; -+ *p = (*p)->ec_next; -+ slapi_ch_free((void **)&tmp); -+ found = 1; -+ } else { -+ p = &((*p)->ec_next); -+ } -+ } -+ PR_Unlock(eq->eq_lock); -+ } -+ slapi_log_err(SLAPI_LOG_HOUSE, NULL, -+ "cancellation of event id %p requested: %s\n", -+ ctx, found ? "cancellation succeeded" : "event not found"); -+ return found; -+} -+ -+ -+/* -+ * Construct a new ec structure -+ */ -+static slapi_eq_context * -+eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) -+{ -+ slapi_eq_context *retptr = (slapi_eq_context *)slapi_ch_calloc(1, sizeof(slapi_eq_context)); -+ -+ retptr->ec_fn = fn; -+ retptr->ec_arg = arg; -+ /* -+ * retptr->ec_when = when < now ? now : when; -+ * we used to amke this check, but it make no sense: when queued, if when -+ * has expired, we'll be executed anyway. save the cycles, and just set -+ * ec_when. -+ */ -+ retptr->ec_when = when; -+ retptr->ec_interval = interval == 0UL ? 0UL : (interval + 999) / 1000; -+ retptr->ec_id = (Slapi_Eq_Context)retptr; -+ return retptr; -+} -+ -+ -+/* -+ * Add a new event to the event queue. -+ */ -+static void -+eq_enqueue(slapi_eq_context *newec) -+{ -+ slapi_eq_context **p; -+ -+ PR_ASSERT(NULL != newec); -+ PR_Lock(eq->eq_lock); -+ /* Insert in order (sorted by start time) in the list */ -+ for (p = &(eq->eq_queue); *p != NULL; p = &((*p)->ec_next)) { -+ if ((*p)->ec_when > newec->ec_when) { -+ break; -+ } -+ } -+ if (NULL != *p) { -+ newec->ec_next = *p; -+ } else { -+ newec->ec_next = NULL; -+ } -+ *p = newec; -+ PR_NotifyCondVar(eq->eq_cv); /* wake up scheduler thread */ -+ PR_Unlock(eq->eq_lock); -+} -+ -+ -+/* -+ * If there is an event in the queue scheduled at time -+ * or before, dequeue it and return a pointer -+ * to it. Otherwise, return NULL. -+ */ -+static slapi_eq_context * -+eq_dequeue(time_t now) -+{ -+ slapi_eq_context *retptr = NULL; -+ -+ PR_Lock(eq->eq_lock); -+ if (NULL != eq->eq_queue && eq->eq_queue->ec_when <= now) { -+ retptr = eq->eq_queue; -+ eq->eq_queue = retptr->ec_next; -+ } -+ PR_Unlock(eq->eq_lock); -+ return retptr; -+} -+ -+ -+/* -+ * Call all events which are due to run. -+ * Note that if we've missed a schedule -+ * opportunity, we don't try to catch up -+ * by calling the function repeatedly. -+ */ -+static void -+eq_call_all(void) -+{ -+ slapi_eq_context *p; -+ time_t curtime = slapi_current_utc_time(); -+ -+ while ((p = eq_dequeue(curtime)) != NULL) { -+ /* Call the scheduled function */ -+ p->ec_fn(p->ec_when, p->ec_arg); -+ slapi_log_err(SLAPI_LOG_HOUSE, NULL, -+ "Event id %p called at %ld (scheduled for %ld)\n", -+ p->ec_id, curtime, p->ec_when); -+ if (0UL != p->ec_interval) { -+ /* This is a repeating event. Requeue it. */ -+ do { -+ p->ec_when += p->ec_interval; -+ } while (p->ec_when < curtime); -+ eq_enqueue(p); -+ } else { -+ slapi_ch_free((void **)&p); -+ } -+ } -+} -+ -+ -+/* -+ * The main event queue loop. -+ */ -+static void -+eq_loop(void *arg __attribute__((unused))) -+{ -+ while (eq_running) { -+ time_t curtime = slapi_current_utc_time(); -+ PRIntervalTime timeout; -+ int until; -+ PR_Lock(eq->eq_lock); -+ while (!((NULL != eq->eq_queue) && (eq->eq_queue->ec_when <= curtime))) { -+ if (!eq_running) { -+ PR_Unlock(eq->eq_lock); -+ goto bye; -+ } -+ /* Compute new timeout */ -+ if (NULL != eq->eq_queue) { -+ until = eq->eq_queue->ec_when - curtime; -+ timeout = PR_SecondsToInterval(until); -+ } else { -+ timeout = PR_INTERVAL_NO_TIMEOUT; -+ } -+ PR_WaitCondVar(eq->eq_cv, timeout); -+ curtime = slapi_current_utc_time(); -+ } -+ /* There is some work to do */ -+ PR_Unlock(eq->eq_lock); -+ eq_call_all(); -+ } -+bye: -+ eq_stopped = 1; -+ PR_Lock(ss_lock); -+ PR_NotifyAllCondVar(ss_cv); -+ PR_Unlock(ss_lock); -+} -+ -+ -+/* -+ * Allocate and initialize the event queue structures. -+ */ -+static PRStatus -+eq_create(void) -+{ -+ PR_ASSERT(NULL == eq->eq_lock); -+ if ((eq->eq_lock = PR_NewLock()) == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewLock failed\n"); -+ exit(1); -+ } -+ if ((eq->eq_cv = PR_NewCondVar(eq->eq_lock)) == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewCondVar failed\n"); -+ exit(1); -+ } -+ if ((ss_lock = PR_NewLock()) == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewLock failed\n"); -+ exit(1); -+ } -+ if ((ss_cv = PR_NewCondVar(ss_lock)) == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewCondVar failed\n"); -+ exit(1); -+ } -+ eq->eq_queue = NULL; -+ eq_initialized = 1; -+ return PR_SUCCESS; -+} -+ -+ -+/* -+ * eq_start: start the event queue system. -+ * -+ * This should be called exactly once. It will start a -+ * thread which wakes up periodically and schedules events. -+ */ -+void -+eq_start() -+{ -+ PR_ASSERT(eq_initialized); -+ eq_running = 1; -+ if ((eq_loop_tid = PR_CreateThread(PR_USER_THREAD, (VFP)eq_loop, -+ NULL, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, -+ SLAPD_DEFAULT_THREAD_STACKSIZE)) == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_start", "eq_loop PR_CreateThread failed\n"); -+ exit(1); -+ } -+ slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have started\n"); -+} -+ -+ -+/* -+ * eq_init: initialize the event queue system. -+ * -+ * This function should be called early in server startup. -+ * Once it has been called, the event queue will queue -+ * events, but will not fire any events. Once all of the -+ * server plugins have been started, the eq_start() -+ * function should be called, and events will then start -+ * to fire. -+ */ -+void -+eq_init() -+{ -+ if (!eq_initialized) { -+ if (PR_SUCCESS != PR_CallOnce(&init_once, eq_create)) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_init", "eq_create failed\n"); -+ } -+ } -+} -+ -+ -+/* -+ * eq_stop: shut down the event queue system. -+ * Does not return until event queue is fully -+ * shut down. -+ */ -+void -+eq_stop() -+{ -+ slapi_eq_context *p, *q; -+ -+ if (NULL == eq || NULL == eq->eq_lock) { /* never started */ -+ eq_stopped = 1; -+ return; -+ } -+ -+ eq_stopped = 0; -+ eq_running = 0; -+ /* -+ * Signal the eq thread function to stop, and wait until -+ * it acknowledges by setting eq_stopped. -+ */ -+ while (!eq_stopped) { -+ PR_Lock(eq->eq_lock); -+ PR_NotifyAllCondVar(eq->eq_cv); -+ PR_Unlock(eq->eq_lock); -+ PR_Lock(ss_lock); -+ PR_WaitCondVar(ss_cv, PR_MillisecondsToInterval(100)); -+ PR_Unlock(ss_lock); -+ } -+ (void)PR_JoinThread(eq_loop_tid); -+ /* -+ * XXXggood we don't free the actual event queue data structures. -+ * This is intentional, to allow enqueueing/cancellation of events -+ * even after event queue services have shut down (these are no-ops). -+ * The downside is that the event queue can't be stopped and restarted -+ * easily. -+ */ -+ PR_Lock(eq->eq_lock); -+ p = eq->eq_queue; -+ while (p != NULL) { -+ q = p->ec_next; -+ slapi_ch_free((void **)&p); -+ /* Some ec_arg could get leaked here in shutdown (e.g., replica_name) -+ * This can be fixed by specifying a flag when the context is queued. -+ * [After 6.2] -+ */ -+ p = q; -+ } -+ PR_Unlock(eq->eq_lock); -+ slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have shut down\n"); -+} -+ -+/* -+ * return arg (ec_arg) only if the context is in the event queue -+ */ -+void * -+slapi_eq_get_arg(Slapi_Eq_Context ctx) -+{ -+ slapi_eq_context **p; -+ -+ PR_ASSERT(eq_initialized); -+ if (eq && !eq_stopped) { -+ PR_Lock(eq->eq_lock); -+ p = &(eq->eq_queue); -+ while (p && *p != NULL) { -+ if ((*p)->ec_id == ctx) { -+ PR_Unlock(eq->eq_lock); -+ return (*p)->ec_arg; -+ } else { -+ p = &((*p)->ec_next); -+ } -+ } -+ PR_Unlock(eq->eq_lock); -+ } -+ return NULL; -+} -diff --git a/ldap/servers/slapd/eventq.c b/ldap/servers/slapd/eventq.c -index e1900724f..4c39e08cf 100644 ---- a/ldap/servers/slapd/eventq.c -+++ b/ldap/servers/slapd/eventq.c -@@ -17,14 +17,14 @@ eventq.c - Event queue/scheduling system. - - There are 3 publicly-accessible entry points: - --slapi_eq_once(): cause an event to happen exactly once --slapi_eq_repeat(): cause an event to happen repeatedly --slapi_eq_cancel(): cancel a pending event -+slapi_eq_once_rel(): cause an event to happen exactly once -+slapi_eq_repeat_rel(): cause an event to happen repeatedly -+slapi_eq_cancel_rel(): cancel a pending event - - There is also an initialization point which must be - called by the server to initialize the event queue system: --eq_start(), and an entry point used to shut down the system: --eq_stop(). -+eq_start_rel(), and an entry point used to shut down the system: -+eq_stop_rel(). - *********************************************************** */ - - #include "slap.h" -@@ -60,36 +60,36 @@ typedef struct _event_queue - /* - * The event queue itself. - */ --static event_queue eqs = {0}; --static event_queue *eq = &eqs; -+static event_queue eqs_rel = {0}; -+static event_queue *eq_rel = &eqs_rel; - - /* - * Thread ID of the main thread loop - */ --static PRThread *eq_loop_tid = NULL; -+static PRThread *eq_loop_rel_tid = NULL; - - /* - * Flags used to control startup/shutdown of the event queue - */ --static int eq_running = 0; --static int eq_stopped = 0; --static int eq_initialized = 0; --static pthread_mutex_t ss_lock; --static pthread_cond_t ss_cv; --PRCallOnceType init_once = {0}; -+static int eq_rel_running = 0; -+static int eq_rel_stopped = 0; -+static int eq_rel_initialized = 0; -+static pthread_mutex_t ss_rel_lock; -+static pthread_cond_t ss_rel_cv; -+PRCallOnceType init_once_rel = {0}; - - /* Forward declarations */ --static slapi_eq_context *eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); --static void eq_enqueue(slapi_eq_context *newec); --static slapi_eq_context *eq_dequeue(time_t now); --static PRStatus eq_create(void); -+static slapi_eq_context *eq_new_rel(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); -+static void eq_enqueue_rel(slapi_eq_context *newec); -+static slapi_eq_context *eq_dequeue_rel(time_t now); -+static PRStatus eq_create_rel(void); - - - /* ******************************************************** */ - - - /* -- * slapi_eq_once: cause an event to happen exactly once. -+ * slapi_eq_once_rel: cause an event to happen exactly once. - * - * Arguments: - * fn: the function to call -@@ -101,18 +101,18 @@ static PRStatus eq_create(void); - * event. - */ - Slapi_Eq_Context --slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) -+slapi_eq_once_rel(slapi_eq_fn_t fn, void *arg, time_t when) - { - slapi_eq_context *tmp; -- PR_ASSERT(eq_initialized); -- if (!eq_stopped) { -+ PR_ASSERT(eq_rel_initialized); -+ if (!eq_rel_stopped) { - - Slapi_Eq_Context id; - -- tmp = eq_new(fn, arg, when, 0UL); -+ tmp = eq_new_rel(fn, arg, when, 0UL); - id = tmp->ec_id; - -- eq_enqueue(tmp); -+ eq_enqueue_rel(tmp); - - /* After this point, may have */ - /* been freed, depending on the thread */ -@@ -128,7 +128,7 @@ slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) - - - /* -- * slapi_eq_repeat: cause an event to happen repeatedly. -+ * slapi_eq_repeat_rel: cause an event to happen repeatedly. - * - * Arguments: - * fn: the function to call -@@ -141,13 +141,13 @@ slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) - * the caller can use to refer to this particular scheduled - */ - Slapi_Eq_Context --slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) -+slapi_eq_repeat_rel(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) - { - slapi_eq_context *tmp; -- PR_ASSERT(eq_initialized); -- if (!eq_stopped) { -- tmp = eq_new(fn, arg, when, interval); -- eq_enqueue(tmp); -+ PR_ASSERT(eq_rel_initialized); -+ if (!eq_rel_stopped) { -+ tmp = eq_new_rel(fn, arg, when, interval); -+ eq_enqueue_rel(tmp); - slapi_log_err(SLAPI_LOG_HOUSE, NULL, - "added repeating event id %p at time %ld, interval %lu\n", - tmp->ec_id, when, interval); -@@ -158,20 +158,20 @@ slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval - - - /* -- * slapi_eq_cancel: cancel a pending event. -+ * slapi_eq_cancel_rel: cancel a pending event. - * Arguments: - * ctx: the context of the event which should be de-scheduled - */ - int --slapi_eq_cancel(Slapi_Eq_Context ctx) -+slapi_eq_cancel_rel(Slapi_Eq_Context ctx) - { - slapi_eq_context **p, *tmp = NULL; - int found = 0; - -- PR_ASSERT(eq_initialized); -- if (!eq_stopped) { -- pthread_mutex_lock(&(eq->eq_lock)); -- p = &(eq->eq_queue); -+ PR_ASSERT(eq_rel_initialized); -+ if (!eq_rel_stopped) { -+ pthread_mutex_lock(&(eq_rel->eq_lock)); -+ p = &(eq_rel->eq_queue); - while (!found && *p != NULL) { - if ((*p)->ec_id == ctx) { - tmp = *p; -@@ -182,7 +182,7 @@ slapi_eq_cancel(Slapi_Eq_Context ctx) - p = &((*p)->ec_next); - } - } -- pthread_mutex_unlock(&(eq->eq_lock)); -+ pthread_mutex_unlock(&(eq_rel->eq_lock)); - } - slapi_log_err(SLAPI_LOG_HOUSE, NULL, - "cancellation of event id %p requested: %s\n", -@@ -195,7 +195,7 @@ slapi_eq_cancel(Slapi_Eq_Context ctx) - * Construct a new ec structure - */ - static slapi_eq_context * --eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) -+eq_new_rel(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) - { - slapi_eq_context *retptr = (slapi_eq_context *)slapi_ch_calloc(1, sizeof(slapi_eq_context)); - -@@ -218,14 +218,14 @@ eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) - * Add a new event to the event queue. - */ - static void --eq_enqueue(slapi_eq_context *newec) -+eq_enqueue_rel(slapi_eq_context *newec) - { - slapi_eq_context **p; - - PR_ASSERT(NULL != newec); -- pthread_mutex_lock(&(eq->eq_lock)); -+ pthread_mutex_lock(&(eq_rel->eq_lock)); - /* Insert in order (sorted by start time) in the list */ -- for (p = &(eq->eq_queue); *p != NULL; p = &((*p)->ec_next)) { -+ for (p = &(eq_rel->eq_queue); *p != NULL; p = &((*p)->ec_next)) { - if ((*p)->ec_when > newec->ec_when) { - break; - } -@@ -236,8 +236,8 @@ eq_enqueue(slapi_eq_context *newec) - newec->ec_next = NULL; - } - *p = newec; -- pthread_cond_signal(&(eq->eq_cv)); /* wake up scheduler thread */ -- pthread_mutex_unlock(&(eq->eq_lock)); -+ pthread_cond_signal(&(eq_rel->eq_cv)); /* wake up scheduler thread */ -+ pthread_mutex_unlock(&(eq_rel->eq_lock)); - } - - -@@ -247,16 +247,16 @@ eq_enqueue(slapi_eq_context *newec) - * to it. Otherwise, return NULL. - */ - static slapi_eq_context * --eq_dequeue(time_t now) -+eq_dequeue_rel(time_t now) - { - slapi_eq_context *retptr = NULL; - -- pthread_mutex_lock(&(eq->eq_lock)); -- if (NULL != eq->eq_queue && eq->eq_queue->ec_when <= now) { -- retptr = eq->eq_queue; -- eq->eq_queue = retptr->ec_next; -+ pthread_mutex_lock(&(eq_rel->eq_lock)); -+ if (NULL != eq_rel->eq_queue && eq_rel->eq_queue->ec_when <= now) { -+ retptr = eq_rel->eq_queue; -+ eq_rel->eq_queue = retptr->ec_next; - } -- pthread_mutex_unlock(&(eq->eq_lock)); -+ pthread_mutex_unlock(&(eq_rel->eq_lock)); - return retptr; - } - -@@ -268,12 +268,12 @@ eq_dequeue(time_t now) - * by calling the function repeatedly. - */ - static void --eq_call_all(void) -+eq_call_all_rel(void) - { - slapi_eq_context *p; - time_t curtime = slapi_current_rel_time_t(); - -- while ((p = eq_dequeue(curtime)) != NULL) { -+ while ((p = eq_dequeue_rel(curtime)) != NULL) { - /* Call the scheduled function */ - p->ec_fn(p->ec_when, p->ec_arg); - slapi_log_err(SLAPI_LOG_HOUSE, NULL, -@@ -284,7 +284,7 @@ eq_call_all(void) - do { - p->ec_when += p->ec_interval; - } while (p->ec_when < curtime); -- eq_enqueue(p); -+ eq_enqueue_rel(p); - } else { - slapi_ch_free((void **)&p); - } -@@ -296,38 +296,38 @@ eq_call_all(void) - * The main event queue loop. - */ - static void --eq_loop(void *arg __attribute__((unused))) -+eq_loop_rel(void *arg __attribute__((unused))) - { -- while (eq_running) { -+ while (eq_rel_running) { - time_t curtime = slapi_current_rel_time_t(); - int until; - -- pthread_mutex_lock(&(eq->eq_lock)); -- while (!((NULL != eq->eq_queue) && (eq->eq_queue->ec_when <= curtime))) { -- if (!eq_running) { -- pthread_mutex_unlock(&(eq->eq_lock)); -+ pthread_mutex_lock(&(eq_rel->eq_lock)); -+ while (!((NULL != eq_rel->eq_queue) && (eq_rel->eq_queue->ec_when <= curtime))) { -+ if (!eq_rel_running) { -+ pthread_mutex_unlock(&(eq_rel->eq_lock)); - goto bye; - } - /* Compute new timeout */ -- if (NULL != eq->eq_queue) { -+ if (NULL != eq_rel->eq_queue) { - struct timespec current_time = slapi_current_rel_time_hr(); -- until = eq->eq_queue->ec_when - curtime; -+ until = eq_rel->eq_queue->ec_when - curtime; - current_time.tv_sec += until; -- pthread_cond_timedwait(&eq->eq_cv, &eq->eq_lock, ¤t_time); -+ pthread_cond_timedwait(&eq_rel->eq_cv, &eq_rel->eq_lock, ¤t_time); - } else { -- pthread_cond_wait(&eq->eq_cv, &eq->eq_lock); -+ pthread_cond_wait(&eq_rel->eq_cv, &eq_rel->eq_lock); - } - curtime = slapi_current_rel_time_t(); - } - /* There is some work to do */ -- pthread_mutex_unlock(&(eq->eq_lock)); -- eq_call_all(); -+ pthread_mutex_unlock(&(eq_rel->eq_lock)); -+ eq_call_all_rel(); - } - bye: -- eq_stopped = 1; -- pthread_mutex_lock(&ss_lock); -- pthread_cond_broadcast(&ss_cv); -- pthread_mutex_unlock(&ss_lock); -+ eq_rel_stopped = 1; -+ pthread_mutex_lock(&ss_rel_lock); -+ pthread_cond_broadcast(&ss_rel_cv); -+ pthread_mutex_unlock(&ss_rel_lock); - } - - -@@ -335,73 +335,73 @@ bye: - * Allocate and initialize the event queue structures. - */ - static PRStatus --eq_create(void) -+eq_create_rel(void) - { - pthread_condattr_t condAttr; - int rc = 0; - - /* Init the eventq mutex and cond var */ -- if (pthread_mutex_init(&eq->eq_lock, NULL) != 0) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ if (pthread_mutex_init(&eq_rel->eq_lock, NULL) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", - "Failed to create lock: error %d (%s)\n", - rc, strerror(rc)); - exit(1); - } - if ((rc = pthread_condattr_init(&condAttr)) != 0) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", - "Failed to create new condition attribute variable. error %d (%s)\n", - rc, strerror(rc)); - exit(1); - } - if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", - "Cannot set condition attr clock. error %d (%s)\n", - rc, strerror(rc)); - exit(1); - } -- if ((rc = pthread_cond_init(&eq->eq_cv, &condAttr)) != 0) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ if ((rc = pthread_cond_init(&eq_rel->eq_cv, &condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", - "Failed to create new condition variable. error %d (%s)\n", - rc, strerror(rc)); - exit(1); - } - - /* Init the "ss" mutex and condition var */ -- if (pthread_mutex_init(&ss_lock, NULL) != 0) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ if (pthread_mutex_init(&ss_rel_lock, NULL) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", - "Failed to create ss lock: error %d (%s)\n", - rc, strerror(rc)); - exit(1); - } -- if ((rc = pthread_cond_init(&ss_cv, &condAttr)) != 0) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ if ((rc = pthread_cond_init(&ss_rel_cv, &condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", - "Failed to create new ss condition variable. error %d (%s)\n", - rc, strerror(rc)); - exit(1); - } - pthread_condattr_destroy(&condAttr); /* no longer needed */ - -- eq->eq_queue = NULL; -- eq_initialized = 1; -+ eq_rel->eq_queue = NULL; -+ eq_rel_initialized = 1; - return PR_SUCCESS; - } - - - /* -- * eq_start: start the event queue system. -+ * eq_start_rel: start the event queue system. - * - * This should be called exactly once. It will start a - * thread which wakes up periodically and schedules events. - */ - void --eq_start() -+eq_start_rel() - { -- PR_ASSERT(eq_initialized); -- eq_running = 1; -- if ((eq_loop_tid = PR_CreateThread(PR_USER_THREAD, (VFP)eq_loop, -+ PR_ASSERT(eq_rel_initialized); -+ eq_rel_running = 1; -+ if ((eq_loop_rel_tid = PR_CreateThread(PR_USER_THREAD, (VFP)eq_loop_rel, - NULL, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, - SLAPD_DEFAULT_THREAD_STACKSIZE)) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_start", "eq_loop PR_CreateThread failed\n"); -+ slapi_log_err(SLAPI_LOG_ERR, "eq_start_rel", "eq_loop_rel PR_CreateThread failed\n"); - exit(1); - } - slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have started\n"); -@@ -409,55 +409,55 @@ eq_start() - - - /* -- * eq_init: initialize the event queue system. -+ * eq_init_rel: initialize the event queue system. - * - * This function should be called early in server startup. - * Once it has been called, the event queue will queue - * events, but will not fire any events. Once all of the -- * server plugins have been started, the eq_start() -+ * server plugins have been started, the eq_start_rel() - * function should be called, and events will then start - * to fire. - */ - void --eq_init() -+eq_init_rel() - { -- if (!eq_initialized) { -- if (PR_SUCCESS != PR_CallOnce(&init_once, eq_create)) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_init", "eq_create failed\n"); -+ if (!eq_rel_initialized) { -+ if (PR_SUCCESS != PR_CallOnce(&init_once_rel, eq_create_rel)) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_init_rel", "eq_create_rel failed\n"); - } - } - } - - - /* -- * eq_stop: shut down the event queue system. -+ * eq_stop_rel: shut down the event queue system. - * Does not return until event queue is fully - * shut down. - */ - void --eq_stop() -+eq_stop_rel() - { - slapi_eq_context *p, *q; - -- if (NULL == eq) { /* never started */ -- eq_stopped = 1; -+ if (NULL == eq_rel) { /* never started */ -+ eq_rel_stopped = 1; - return; - } - -- eq_stopped = 0; -- eq_running = 0; -+ eq_rel_stopped = 0; -+ eq_rel_running = 0; - /* - * Signal the eq thread function to stop, and wait until -- * it acknowledges by setting eq_stopped. -+ * it acknowledges by setting eq_rel_stopped. - */ -- while (!eq_stopped) { -+ while (!eq_rel_stopped) { - struct timespec current_time = {0}; - -- pthread_mutex_lock(&(eq->eq_lock)); -- pthread_cond_broadcast(&(eq->eq_cv)); -- pthread_mutex_unlock(&(eq->eq_lock)); -+ pthread_mutex_lock(&(eq_rel->eq_lock)); -+ pthread_cond_broadcast(&(eq_rel->eq_cv)); -+ pthread_mutex_unlock(&(eq_rel->eq_lock)); - -- pthread_mutex_lock(&ss_lock); -+ pthread_mutex_lock(&ss_rel_lock); - clock_gettime(CLOCK_MONOTONIC, ¤t_time); - if (current_time.tv_nsec + 100000000 > 1000000000) { - /* nanoseconds will overflow, adjust the seconds and nanoseconds */ -@@ -467,10 +467,10 @@ eq_stop() - } else { - current_time.tv_nsec += 100000000; /* 100 ms */ - } -- pthread_cond_timedwait(&ss_cv, &ss_lock, ¤t_time); -- pthread_mutex_unlock(&ss_lock); -+ pthread_cond_timedwait(&ss_rel_cv, &ss_rel_lock, ¤t_time); -+ pthread_mutex_unlock(&ss_rel_lock); - } -- (void)PR_JoinThread(eq_loop_tid); -+ (void)PR_JoinThread(eq_loop_rel_tid); - /* - * XXXggood we don't free the actual event queue data structures. - * This is intentional, to allow enqueueing/cancellation of events -@@ -478,8 +478,8 @@ eq_stop() - * The downside is that the event queue can't be stopped and restarted - * easily. - */ -- pthread_mutex_lock(&(eq->eq_lock)); -- p = eq->eq_queue; -+ pthread_mutex_lock(&(eq_rel->eq_lock)); -+ p = eq_rel->eq_queue; - while (p != NULL) { - q = p->ec_next; - slapi_ch_free((void **)&p); -@@ -489,7 +489,7 @@ eq_stop() - */ - p = q; - } -- pthread_mutex_unlock(&(eq->eq_lock)); -+ pthread_mutex_unlock(&(eq_rel->eq_lock)); - slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have shut down\n"); - } - -@@ -497,23 +497,23 @@ eq_stop() - * return arg (ec_arg) only if the context is in the event queue - */ - void * --slapi_eq_get_arg(Slapi_Eq_Context ctx) -+slapi_eq_get_arg_rel(Slapi_Eq_Context ctx) - { - slapi_eq_context **p; - -- PR_ASSERT(eq_initialized); -- if (eq && !eq_stopped) { -- pthread_mutex_lock(&(eq->eq_lock)); -- p = &(eq->eq_queue); -+ PR_ASSERT(eq_rel_initialized); -+ if (eq_rel && !eq_rel_stopped) { -+ pthread_mutex_lock(&(eq_rel->eq_lock)); -+ p = &(eq_rel->eq_queue); - while (p && *p != NULL) { - if ((*p)->ec_id == ctx) { -- pthread_mutex_unlock(&(eq->eq_lock)); -+ pthread_mutex_unlock(&(eq_rel->eq_lock)); - return (*p)->ec_arg; - } else { - p = &((*p)->ec_next); - } - } -- pthread_mutex_unlock(&(eq->eq_lock)); -+ pthread_mutex_unlock(&(eq_rel->eq_lock)); - } - return NULL; - } -diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c -index 104f6826c..dbc8cec15 100644 ---- a/ldap/servers/slapd/main.c -+++ b/ldap/servers/slapd/main.c -@@ -979,7 +979,8 @@ main(int argc, char **argv) - fedse_create_startOK(DSE_FILENAME, DSE_STARTOKFILE, - slapdFrontendConfig->configdir); - -- eq_init(); /* must be done before plugins started */ -+ eq_init(); /* DEPRECATED */ -+ eq_init_rel(); /* must be done before plugins started */ - - /* Start the SNMP collator if counters are enabled. */ - if (config_get_slapi_counters()) { -@@ -1035,7 +1036,8 @@ main(int argc, char **argv) - goto cleanup; - } - -- eq_start(); /* must be done after plugins started */ -+ eq_start(); /* must be done after plugins started - DEPRECATED */ -+ eq_start_rel(); /* must be done after plugins started */ - - #ifdef HPUX10 - /* HPUX linker voodoo */ -@@ -2205,10 +2207,13 @@ slapd_exemode_db2ldif(int argc, char **argv, struct main_config *mcfg) - */ - plugin_get_plugin_dependencies(repl_plg_name, &plugin_list); - -- eq_init(); /* must be done before plugins started */ -+ eq_init(); /* must be done before plugins started - DEPRECATED */ -+ eq_init_rel(); /* must be done before plugins started */ -+ - ps_init_psearch_system(); /* must come before plugin_startall() */ - plugin_startall(argc, argv, plugin_list); -- eq_start(); /* must be done after plugins started */ -+ eq_start(); /* must be done after plugins started - DEPRECATED*/ -+ eq_start_rel(); /* must be done after plugins started */ - charray_free(plugin_list); - } - -@@ -2263,8 +2268,9 @@ slapd_exemode_db2ldif(int argc, char **argv, struct main_config *mcfg) - charray_free(mcfg->cmd_line_instance_names); - charray_free(mcfg->db2ldif_include); - if (mcfg->db2ldif_dump_replica) { -- eq_stop(); /* event queue should be shutdown before closing -- all plugins (especailly, replication plugin) */ -+ eq_stop(); /* DEPRECATED*/ -+ eq_stop_rel(); /* event queue should be shutdown before closing -+ all plugins (especially, replication plugin) */ - plugin_closeall(1 /* Close Backends */, 1 /* Close Globals */); - } - return (return_value); -diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h -index 3acc24f03..87080dd82 100644 ---- a/ldap/servers/slapd/proto-slap.h -+++ b/ldap/servers/slapd/proto-slap.h -@@ -1322,7 +1322,6 @@ void factory_destroy_extension(int type, void *object, void *parent, void **exte - /* - * auditlog.c - */ -- - void write_audit_log_entry(Slapi_PBlock *pb); - void auditlog_hide_unhashed_pw(void); - void auditlog_expose_unhashed_pw(void); -@@ -1334,10 +1333,15 @@ void auditfaillog_expose_unhashed_pw(void); - /* - * eventq.c - */ -+void eq_init_rel(void); -+void eq_start_rel(void); -+void eq_stop_rel(void); -+/* Deprecated eventq that uses REALTIME clock instead of MONOTONIC */ - void eq_init(void); - void eq_start(void); - void eq_stop(void); - -+ - /* - * uniqueidgen.c - */ -diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h -index 55ded5eb8..f76b86e3c 100644 ---- a/ldap/servers/slapd/slapi-plugin.h -+++ b/ldap/servers/slapd/slapi-plugin.h -@@ -6084,7 +6084,7 @@ void slapi_lock_mutex(Slapi_Mutex *mutex); - int slapi_unlock_mutex(Slapi_Mutex *mutex); - Slapi_CondVar *slapi_new_condvar(Slapi_Mutex *mutex); - void slapi_destroy_condvar(Slapi_CondVar *cvar); --int slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout); -+int slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout) __attribute__((deprecated)); - int slapi_notify_condvar(Slapi_CondVar *cvar, int notify_all); - int slapi_wait_condvar_pt(Slapi_CondVar *cvar, Slapi_Mutex *mutex, struct timeval *timeout); - -@@ -8059,24 +8059,24 @@ typedef void (*slapi_eq_fn_t)(time_t when, void *arg); - * - * \param fn The function to call when the event is triggered. - * \param arg An argument to pass to the called function. -- * \param when The time that the function should be called. -+ * \param when The time that the function should be called(MONOTONIC clock). - * - * \return slapi_eq_context - */ --Slapi_Eq_Context slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when); -+Slapi_Eq_Context slapi_eq_once_rel(slapi_eq_fn_t fn, void *arg, time_t when); - - /** - * Cause an event to happen repeatedly. - * - * \param fn The function to call when the vent is triggered. - * \param arg An argument to pass to the called function. -- * \param when The time that the function should be called. -+ * \param when The time that the function should be called(MONOTONIC clock). - * \param interval The amount of time (in milliseconds) between - * successive calls to the function. - * - * \return slapi_eq_context - */ --Slapi_Eq_Context slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); -+Slapi_Eq_Context slapi_eq_repeat_rel(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); - - /** - * Cause a scheduled event to be canceled. -@@ -8086,7 +8086,7 @@ Slapi_Eq_Context slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsig - * \return 1 If event was found and canceled. - * \return 0 If event was not found in the queue. - */ --int slapi_eq_cancel(Slapi_Eq_Context ctx); -+int slapi_eq_cancel_rel(Slapi_Eq_Context ctx); - - /** - * Return the event's argument. -@@ -8095,7 +8095,55 @@ int slapi_eq_cancel(Slapi_Eq_Context ctx); - * - * \return A pointer to the event argument. - */ --void *slapi_eq_get_arg(Slapi_Eq_Context ctx); -+void *slapi_eq_get_arg_rel(Slapi_Eq_Context ctx); -+ -+/* -+ * These event queue functions are now DEPRECATED as they REALTIME clocks -+ * instead of the preferred MONOTONIC clocks. -+ */ -+ -+/** -+ * Cause an event to happen exactly once. -+ * -+ * \param fn The function to call when the event is triggered. -+ * \param arg An argument to pass to the called function. -+ * \param when The time that the function should be called(REALTIME clock). -+ * -+ * \return slapi_eq_context -+ */ -+Slapi_Eq_Context slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) __attribute__((deprecated)); -+ -+/** -+ * Cause an event to happen repeatedly. -+ * -+ * \param fn The function to call when the vent is triggered. -+ * \param arg An argument to pass to the called function. -+ * \param when The time that the function should be called(REALTIME clock). -+ * \param interval The amount of time (in milliseconds) between -+ * successive calls to the function. -+ * -+ * \return slapi_eq_context -+ */ -+Slapi_Eq_Context slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) __attribute__((deprecated)); -+ -+/** -+ * Cause a scheduled event to be canceled. -+ * -+ * \param ctx The event object to cancel -+ * -+ * \return 1 If event was found and canceled. -+ * \return 0 If event was not found in the queue. -+ */ -+int slapi_eq_cancel(Slapi_Eq_Context ctx) __attribute__((deprecated)); -+ -+/** -+ * Return the event's argument. -+ * -+ * \param ctx The event object -+ * -+ * \return A pointer to the event argument. -+ */ -+void *slapi_eq_get_arg(Slapi_Eq_Context ctx) __attribute__((deprecated)); - - /** - * Construct a full path and name of a plugin. -diff --git a/ldap/servers/slapd/slapi2runtime.c b/ldap/servers/slapd/slapi2runtime.c -index 85dc4c9a8..53927934a 100644 ---- a/ldap/servers/slapd/slapi2runtime.c -+++ b/ldap/servers/slapd/slapi2runtime.c -@@ -133,7 +133,7 @@ slapi_destroy_condvar(Slapi_CondVar *cvar) - - - /* -- * Function: slapi_wait_condvar -+ * Function: slapi_wait_condvar (DEPRECATED) - * Description: behaves just like PR_WaitCondVar() except timeout is - * in seconds and microseconds instead of PRIntervalTime units. - * If timeout is NULL, this call blocks indefinitely. -@@ -145,9 +145,26 @@ slapi_destroy_condvar(Slapi_CondVar *cvar) - int - slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout) - { -- /* deprecated in favor of slapi_wait_condvar_pt() which requires that the -+ /* Deprecated in favor of slapi_wait_condvar_pt() which requires that the - * mutex be passed in */ -- return (0); -+ PRIntervalTime prit; -+ -+ if (cvar == NULL) { -+ return (0); -+ } -+ -+ if (timeout == NULL) { -+ prit = PR_INTERVAL_NO_TIMEOUT; -+ } else { -+ prit = PR_SecondsToInterval(timeout->tv_sec) + PR_MicrosecondsToInterval(timeout->tv_usec); -+ } -+ -+ if (PR_WaitCondVar((PRCondVar *)cvar, prit) != PR_SUCCESS) { -+ return (0); -+ } -+ -+ return (1); -+ - } - - int -diff --git a/ldap/servers/slapd/snmp_collator.c b/ldap/servers/slapd/snmp_collator.c -index 3dd3af657..d760515f4 100644 ---- a/ldap/servers/slapd/snmp_collator.c -+++ b/ldap/servers/slapd/snmp_collator.c -@@ -385,8 +385,9 @@ snmp_collator_start() - snmp_collator_init(); - - /* Arrange to be called back periodically to update the mmap'd stats file. */ -- snmp_eq_ctx = slapi_eq_repeat(snmp_collator_update, NULL, (time_t)0, -- SLAPD_SNMP_UPDATE_INTERVAL); -+ snmp_eq_ctx = slapi_eq_repeat_rel(snmp_collator_update, NULL, -+ slapi_current_rel_time_t(), -+ SLAPD_SNMP_UPDATE_INTERVAL); - return 0; - } - -@@ -411,7 +412,7 @@ snmp_collator_stop() - } - - /* Abort any pending events */ -- slapi_eq_cancel(snmp_eq_ctx); -+ slapi_eq_cancel_rel(snmp_eq_ctx); - snmp_collator_stopped = 1; - - /* acquire the semaphore */ -diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c -index 26f281cba..bded287c6 100644 ---- a/ldap/servers/slapd/task.c -+++ b/ldap/servers/slapd/task.c -@@ -387,7 +387,7 @@ slapi_task_status_changed(Slapi_Task *task) - ttl = (24*3600); /* be reasonable, allow to check task status not longer than one day */ - task->task_flags |= SLAPI_TASK_DESTROYING; - /* queue an event to destroy the state info */ -- slapi_eq_once(destroy_task, (void *)task, slapi_current_rel_time_t() + ttl); -+ slapi_eq_once_rel(destroy_task, (void *)task, slapi_current_rel_time_t() + ttl); - } - slapi_free_search_results_internal(pb); - slapi_pblock_destroy(pb); -diff --git a/ldap/servers/slapd/uuid.c b/ldap/servers/slapd/uuid.c -index a8bd6ee6c..31384a544 100644 ---- a/ldap/servers/slapd/uuid.c -+++ b/ldap/servers/slapd/uuid.c -@@ -186,7 +186,8 @@ uuid_init(const char *configDir, const Slapi_DN *configDN, PRBool mtGen) - - /* schedule update task for multithreaded generation */ - if (_state.mtGen) -- slapi_eq_repeat(uuid_update_state, NULL, (time_t)0, UPDATE_INTERVAL); -+ slapi_eq_repeat_rel(uuid_update_state, NULL, slapi_current_rel_time_t(), -+ UPDATE_INTERVAL); - - _state.initialized = PR_TRUE; - return UUID_SUCCESS; --- -2.26.2 - diff --git a/SOURCES/0032-Backport-tests-from-master-branch-fix-failing-tests-.patch b/SOURCES/0032-Backport-tests-from-master-branch-fix-failing-tests-.patch deleted file mode 100644 index 1e49598..0000000 --- a/SOURCES/0032-Backport-tests-from-master-branch-fix-failing-tests-.patch +++ /dev/null @@ -1,4208 +0,0 @@ -From 0f309fee0e2b337ee333d9ce80a6c64d6f7161ef Mon Sep 17 00:00:00 2001 -From: Viktor Ashirov -Date: Thu, 12 Nov 2020 17:53:09 +0100 -Subject: [PATCH] Backport tests from master branch, fix failing tests (#4425) - -Relates: #2820 - -Reviewed by: mreynolds (Thanks!) ---- - dirsrvtests/tests/suites/acl/acivattr_test.py | 50 +-- - dirsrvtests/tests/suites/acl/acl_deny_test.py | 10 +- - dirsrvtests/tests/suites/acl/acl_test.py | 26 +- - .../acl/default_aci_allows_self_write.py | 4 +- - dirsrvtests/tests/suites/acl/deladd_test.py | 54 ++-- - .../suites/acl/enhanced_aci_modrnd_test.py | 22 +- - .../suites/acl/globalgroup_part2_test.py | 36 ++- - .../tests/suites/acl/globalgroup_test.py | 16 +- - .../tests/suites/acl/keywords_part2_test.py | 30 +- - dirsrvtests/tests/suites/acl/keywords_test.py | 71 ++--- - dirsrvtests/tests/suites/acl/misc_test.py | 104 +++--- - dirsrvtests/tests/suites/acl/modrdn_test.py | 180 +++++------ - dirsrvtests/tests/suites/acl/roledn_test.py | 4 +- - .../suites/acl/selfdn_permissions_test.py | 23 +- - dirsrvtests/tests/suites/acl/syntax_test.py | 56 ++-- - dirsrvtests/tests/suites/acl/userattr_test.py | 6 +- - .../tests/suites/acl/valueacl_part2_test.py | 107 ++++--- - dirsrvtests/tests/suites/acl/valueacl_test.py | 207 ++++++------ - dirsrvtests/tests/suites/basic/basic_test.py | 23 +- - .../tests/suites/ds_logs/ds_logs_test.py | 301 ++++++++++++++---- - .../filter/rfc3673_all_oper_attrs_test.py | 23 +- - .../suites/mapping_tree/acceptance_test.py | 65 ++++ - .../be_del_and_default_naming_attr_test.py | 17 +- - .../password/pwdPolicy_attribute_test.py | 9 +- - .../suites/replication/changelog_test.py | 6 +- - .../replication/conflict_resolve_test.py | 4 +- - .../tests/suites/replication/rfc2307compat.py | 174 ++++++++++ - dirsrvtests/tests/suites/roles/__init__.py | 3 + - dirsrvtests/tests/suites/roles/basic_test.py | 83 ++--- - .../tests/suites/sasl/regression_test.py | 21 +- - .../tests/suites/syncrepl_plugin/__init__.py | 163 ++++++++++ - .../suites/syncrepl_plugin/basic_test.py | 66 ++-- - .../tests/suites/vlv/regression_test.py | 2 +- - 33 files changed, 1319 insertions(+), 647 deletions(-) - create mode 100644 dirsrvtests/tests/suites/mapping_tree/acceptance_test.py - create mode 100644 dirsrvtests/tests/suites/replication/rfc2307compat.py - create mode 100644 dirsrvtests/tests/suites/roles/__init__.py - create mode 100644 dirsrvtests/tests/suites/syncrepl_plugin/__init__.py - -diff --git a/dirsrvtests/tests/suites/acl/acivattr_test.py b/dirsrvtests/tests/suites/acl/acivattr_test.py -index 35759f36e..d55eea023 100644 ---- a/dirsrvtests/tests/suites/acl/acivattr_test.py -+++ b/dirsrvtests/tests/suites/acl/acivattr_test.py -@@ -174,18 +174,19 @@ LDAPURL_ACI = '(targetattr="*")(version 3.0; acl "url"; allow (all) userdn="ldap - '(ENG_USER, ENG_MANAGER, LDAPURL_ACI)', - ]) - def test_positive(topo, _add_user, aci_of_user, user, entry, aci): -- """ -- :id: ba6d5e9c-786b-11e8-860d-8c16451d917b -- :parametrized: yes -- :setup: server -- :steps: -- 1. Add test entry -- 2. Add ACI -- 3. ACI role should be followed -- :expectedresults: -- 1. Entry should be added -- 2. Operation should succeed -- 3. Operation should succeed -+ """Positive testing of ACLs -+ -+ :id: ba6d5e9c-786b-11e8-860d-8c16451d917b -+ :parametrized: yes -+ :setup: server -+ :steps: -+ 1. Add test entry -+ 2. Add ACI -+ 3. ACI role should be followed -+ :expectedresults: -+ 1. Entry should be added -+ 2. Operation should succeed -+ 3. Operation should succeed - """ - # set aci - Domain(topo.standalone, DNBASE).set("aci", aci) -@@ -225,18 +226,19 @@ def test_positive(topo, _add_user, aci_of_user, user, entry, aci): - - ]) - def test_negative(topo, _add_user, aci_of_user, user, entry, aci): -- """ -- :id: c4c887c2-786b-11e8-a328-8c16451d917b -- :parametrized: yes -- :setup: server -- :steps: -- 1. Add test entry -- 2. Add ACI -- 3. ACI role should be followed -- :expectedresults: -- 1. Entry should be added -- 2. Operation should succeed -- 3. Operation should succeed -+ """Negative testing of ACLs -+ -+ :id: c4c887c2-786b-11e8-a328-8c16451d917b -+ :parametrized: yes -+ :setup: server -+ :steps: -+ 1. Add test entry -+ 2. Add ACI -+ 3. ACI role should be followed -+ :expectedresults: -+ 1. Entry should be added -+ 2. Operation should succeed -+ 3. Operation should not succeed - """ - # set aci - Domain(topo.standalone, DNBASE).set("aci", aci) -diff --git a/dirsrvtests/tests/suites/acl/acl_deny_test.py b/dirsrvtests/tests/suites/acl/acl_deny_test.py -index 8ea6cd27b..96d08e9da 100644 ---- a/dirsrvtests/tests/suites/acl/acl_deny_test.py -+++ b/dirsrvtests/tests/suites/acl/acl_deny_test.py -@@ -1,3 +1,11 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2020 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# - import logging - import pytest - import os -@@ -5,7 +13,7 @@ import ldap - import time - from lib389._constants import * - from lib389.topologies import topology_st as topo --from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES -+from lib389.idm.user import UserAccount, TEST_USER_PROPERTIES - from lib389.idm.domain import Domain - - pytestmark = pytest.mark.tier1 -diff --git a/dirsrvtests/tests/suites/acl/acl_test.py b/dirsrvtests/tests/suites/acl/acl_test.py -index 5ca86523c..4c3214650 100644 ---- a/dirsrvtests/tests/suites/acl/acl_test.py -+++ b/dirsrvtests/tests/suites/acl/acl_test.py -@@ -1,5 +1,5 @@ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2016 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). -@@ -14,9 +14,8 @@ from lib389.schema import Schema - from lib389.idm.domain import Domain - from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES - from lib389.idm.organizationalrole import OrganizationalRole, OrganizationalRoles -- - from lib389.topologies import topology_m2 --from lib389._constants import SUFFIX, DN_SCHEMA, DN_DM, DEFAULT_SUFFIX, PASSWORD -+from lib389._constants import SUFFIX, DN_DM, DEFAULT_SUFFIX, PASSWORD - - pytestmark = pytest.mark.tier1 - -@@ -243,6 +242,14 @@ def moddn_setup(topology_m2): - 'userpassword': BIND_PW}) - user.create(properties=user_props, basedn=SUFFIX) - -+ # Add anonymous read aci -+ ACI_TARGET = "(target = \"ldap:///%s\")(targetattr=\"*\")" % (SUFFIX) -+ ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" -+ ACI_SUBJECT = " userdn = \"ldap:///anyone\";)" -+ ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT -+ suffix = Domain(m1, SUFFIX) -+ suffix.add('aci', ACI_BODY) -+ - # DIT for staging - m1.log.info("Add {}".format(STAGING_DN)) - o_roles.create(properties={'cn': STAGING_CN, 'description': "staging DIT"}) -@@ -411,7 +418,8 @@ def test_moddn_staging_prod(topology_m2, moddn_setup, - - - def test_moddn_staging_prod_9(topology_m2, moddn_setup): -- """ -+ """Test with nsslapd-moddn-aci set to off so that MODDN requires an 'add' aci. -+ - :id: 222dd7e8-7ff1-40b8-ad26-6f8e42fbfcd9 - :setup: MMR with two masters, - M1 - staging DIT -@@ -1061,10 +1069,12 @@ def test_mode_legacy_ger_with_moddn(topology_m2, moddn_setup): - @pytest.fixture(scope="module") - def rdn_write_setup(topology_m2): - topology_m2.ms["master1"].log.info("\n\n######## Add entry tuser ########\n") -- topology_m2.ms["master1"].add_s(Entry((SRC_ENTRY_DN, { -- 'objectclass': "top person".split(), -- 'sn': SRC_ENTRY_CN, -- 'cn': SRC_ENTRY_CN}))) -+ user = UserAccount(topology_m2.ms["master1"], SRC_ENTRY_DN) -+ user_props = TEST_USER_PROPERTIES.copy() -+ user_props.update({'sn': SRC_ENTRY_CN, -+ 'cn': SRC_ENTRY_CN, -+ 'userpassword': BIND_PW}) -+ user.create(properties=user_props, basedn=SUFFIX) - - - def test_rdn_write_get_ger(topology_m2, rdn_write_setup): -diff --git a/dirsrvtests/tests/suites/acl/default_aci_allows_self_write.py b/dirsrvtests/tests/suites/acl/default_aci_allows_self_write.py -index 5700abfba..9c7226b42 100644 ---- a/dirsrvtests/tests/suites/acl/default_aci_allows_self_write.py -+++ b/dirsrvtests/tests/suites/acl/default_aci_allows_self_write.py -@@ -21,7 +21,7 @@ pytestmark = pytest.mark.tier1 - USER_PASSWORD = "some test password" - NEW_USER_PASSWORD = "some new password" - --@pytest.mark.skipif(default_paths.perl_enabled or ds_is_older('1.4.2.0'), reason="Default aci's in older versions do not support this functionality") -+@pytest.mark.skipif(ds_is_older('1.4.2.0'), reason="Default aci's in older versions do not support this functionality") - def test_acl_default_allow_self_write_nsuser(topology): - """ - Testing nsusers can self write and self read. This it a sanity test -@@ -80,7 +80,7 @@ def test_acl_default_allow_self_write_nsuser(topology): - self_ent.change_password(USER_PASSWORD, NEW_USER_PASSWORD) - - --@pytest.mark.skipif(default_paths.perl_enabled or ds_is_older('1.4.2.0'), reason="Default aci's in older versions do not support this functionality") -+@pytest.mark.skipif(ds_is_older('1.4.2.0'), reason="Default aci's in older versions do not support this functionality") - def test_acl_default_allow_self_write_user(topology): - """ - Testing users can self write and self read. This it a sanity test -diff --git a/dirsrvtests/tests/suites/acl/deladd_test.py b/dirsrvtests/tests/suites/acl/deladd_test.py -index 45a66be94..afdc772d1 100644 ---- a/dirsrvtests/tests/suites/acl/deladd_test.py -+++ b/dirsrvtests/tests/suites/acl/deladd_test.py -@@ -86,8 +86,8 @@ def _add_user(request, topo): - - def test_allow_delete_access_to_groupdn(topo, _add_user, _aci_of_user): - -- """ -- Test allow delete access to groupdn -+ """Test allow delete access to groupdn -+ - :id: 7cf15992-68ad-11e8-85af-54e1ad30572c - :setup: topo.standalone - :steps: -@@ -124,8 +124,8 @@ def test_allow_delete_access_to_groupdn(topo, _add_user, _aci_of_user): - - def test_allow_add_access_to_anyone(topo, _add_user, _aci_of_user): - -- """ -- Test to allow add access to anyone -+ """Test to allow add access to anyone -+ - :id: 5ca31cc4-68e0-11e8-8666-8c16451d917b - :setup: topo.standalone - :steps: -@@ -160,8 +160,8 @@ def test_allow_add_access_to_anyone(topo, _add_user, _aci_of_user): - - def test_allow_delete_access_to_anyone(topo, _add_user, _aci_of_user): - -- """ -- Test to allow delete access to anyone -+ """Test to allow delete access to anyone -+ - :id: f5447c7e-68e1-11e8-84c4-8c16451d917b - :setup: server - :steps: -@@ -191,8 +191,8 @@ def test_allow_delete_access_to_anyone(topo, _add_user, _aci_of_user): - - def test_allow_delete_access_not_to_userdn(topo, _add_user, _aci_of_user): - -- """ -- Test to Allow delete access to != userdn -+ """Test to Allow delete access to != userdn -+ - :id: 00637f6e-68e3-11e8-92a3-8c16451d917b - :setup: server - :steps: -@@ -224,8 +224,8 @@ def test_allow_delete_access_not_to_userdn(topo, _add_user, _aci_of_user): - - def test_allow_delete_access_not_to_group(topo, _add_user, _aci_of_user): - -- """ -- Test to Allow delete access to != groupdn -+ """Test to Allow delete access to != groupdn -+ - :id: f58fc8b0-68e5-11e8-9313-8c16451d917b - :setup: server - :steps: -@@ -263,8 +263,8 @@ def test_allow_delete_access_not_to_group(topo, _add_user, _aci_of_user): - - def test_allow_add_access_to_parent(topo, _add_user, _aci_of_user): - -- """ -- Test to Allow add privilege to parent -+ """Test to Allow add privilege to parent -+ - :id: 9f099845-9dbc-412f-bdb9-19a5ea729694 - :setup: server - :steps: -@@ -299,8 +299,8 @@ def test_allow_add_access_to_parent(topo, _add_user, _aci_of_user): - - def test_allow_delete_access_to_parent(topo, _add_user, _aci_of_user): - -- """ -- Test to Allow delete access to parent -+ """Test to Allow delete access to parent -+ - :id: 2dd7f624-68e7-11e8-8591-8c16451d917b - :setup: server - :steps: -@@ -333,10 +333,10 @@ def test_allow_delete_access_to_parent(topo, _add_user, _aci_of_user): - new_user.delete() - - --def test_allow_delete_access_to_dynamic_group(topo, _add_user, _aci_of_user): -+def test_allow_delete_access_to_dynamic_group(topo, _add_user, _aci_of_user, request): -+ -+ """Test to Allow delete access to dynamic group - -- """ -- Test to Allow delete access to dynamic group - :id: 14ffa452-68ed-11e8-a60d-8c16451d917b - :setup: server - :steps: -@@ -361,8 +361,8 @@ def test_allow_delete_access_to_dynamic_group(topo, _add_user, _aci_of_user): - - # Set ACI - Domain(topo.standalone, DEFAULT_SUFFIX).\ -- add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr=*)' -- f'(version 3.0; acl "$tet_thistest"; ' -+ add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' -+ f'(version 3.0; acl "{request.node.name}"; ' - f'allow (delete) (groupdn = "ldap:///{group.dn}"); )') - - # create connection with USER_WITH_ACI_DELADD -@@ -372,10 +372,10 @@ def test_allow_delete_access_to_dynamic_group(topo, _add_user, _aci_of_user): - UserAccount(conn, USER_DELADD).delete() - - --def test_allow_delete_access_to_dynamic_group_uid(topo, _add_user, _aci_of_user): -+def test_allow_delete_access_to_dynamic_group_uid(topo, _add_user, _aci_of_user, request): -+ -+ """Test to Allow delete access to dynamic group - -- """ -- Test to Allow delete access to dynamic group - :id: 010a4f20-752a-4173-b763-f520c7a85b82 - :setup: server - :steps: -@@ -401,7 +401,7 @@ def test_allow_delete_access_to_dynamic_group_uid(topo, _add_user, _aci_of_user) - # Set ACI - Domain(topo.standalone, DEFAULT_SUFFIX).\ - add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})' -- f'(targetattr=uid)(version 3.0; acl "$tet_thistest"; ' -+ f'(targetattr="uid")(version 3.0; acl "{request.node.name}"; ' - f'allow (delete) (groupdn = "ldap:///{group.dn}"); )') - - # create connection with USER_WITH_ACI_DELADD -@@ -411,10 +411,10 @@ def test_allow_delete_access_to_dynamic_group_uid(topo, _add_user, _aci_of_user) - UserAccount(conn, USER_DELADD).delete() - - --def test_allow_delete_access_not_to_dynamic_group(topo, _add_user, _aci_of_user): -+def test_allow_delete_access_not_to_dynamic_group(topo, _add_user, _aci_of_user, request): -+ -+ """Test to Allow delete access to != dynamic group - -- """ -- Test to Allow delete access to != dynamic group - :id: 9ecb139d-bca8-428e-9044-fd89db5a3d14 - :setup: server - :steps: -@@ -439,7 +439,7 @@ def test_allow_delete_access_not_to_dynamic_group(topo, _add_user, _aci_of_user) - # Set ACI - Domain(topo.standalone, DEFAULT_SUFFIX).\ - add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})' -- f'(targetattr=*)(version 3.0; acl "$tet_thistest"; ' -+ f'(targetattr="*")(version 3.0; acl "{request.node.name}"; ' - f'allow (delete) (groupdn != "ldap:///{group.dn}"); )') - - # create connection with USER_WITH_ACI_DELADD -diff --git a/dirsrvtests/tests/suites/acl/enhanced_aci_modrnd_test.py b/dirsrvtests/tests/suites/acl/enhanced_aci_modrnd_test.py -index ca9456935..0cecde4b8 100644 ---- a/dirsrvtests/tests/suites/acl/enhanced_aci_modrnd_test.py -+++ b/dirsrvtests/tests/suites/acl/enhanced_aci_modrnd_test.py -@@ -1,5 +1,5 @@ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2016 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). -@@ -31,15 +31,13 @@ def env_setup(topology_st): - - log.info("Add a container: %s" % CONTAINER_1) - topology_st.standalone.add_s(Entry((CONTAINER_1, -- {'objectclass': 'top', -- 'objectclass': 'organizationalunit', -+ {'objectclass': ['top','organizationalunit'], - 'ou': CONTAINER_1_OU, - }))) - - log.info("Add a container: %s" % CONTAINER_2) - topology_st.standalone.add_s(Entry((CONTAINER_2, -- {'objectclass': 'top', -- 'objectclass': 'organizationalunit', -+ {'objectclass': ['top', 'organizationalunit'], - 'ou': CONTAINER_2_OU, - }))) - -@@ -75,13 +73,13 @@ def test_enhanced_aci_modrnd(topology_st, env_setup): - :id: 492cf2a9-2efe-4e3b-955e-85eca61d66b9 - :setup: Standalone instance - :steps: -- 1. Create two containers -- 2. Create a user within "ou=test_ou_1,dc=example,dc=com" -- 3. Add an aci with a rule "cn=test_user is allowed all" within these containers -- 4. Run MODRDN operation on the "cn=test_user" and set "newsuperior" to -- the "ou=test_ou_2,dc=example,dc=com" -- 5. Check there is no user under container one (ou=test_ou_1,dc=example,dc=com) -- 6. Check there is a user under container two (ou=test_ou_2,dc=example,dc=com) -+ 1. Create two containers -+ 2. Create a user within "ou=test_ou_1,dc=example,dc=com" -+ 3. Add an aci with a rule "cn=test_user is allowed all" within these containers -+ 4. Run MODRDN operation on the "cn=test_user" and set "newsuperior" to -+ the "ou=test_ou_2,dc=example,dc=com" -+ 5. Check there is no user under container one (ou=test_ou_1,dc=example,dc=com) -+ 6. Check there is a user under container two (ou=test_ou_2,dc=example,dc=com) - - :expectedresults: - 1. Two containers should be created -diff --git a/dirsrvtests/tests/suites/acl/globalgroup_part2_test.py b/dirsrvtests/tests/suites/acl/globalgroup_part2_test.py -index b10fb1b65..7474f61f0 100644 ---- a/dirsrvtests/tests/suites/acl/globalgroup_part2_test.py -+++ b/dirsrvtests/tests/suites/acl/globalgroup_part2_test.py -@@ -1,5 +1,5 @@ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2019 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). -@@ -70,6 +70,14 @@ def test_user(request, topo): - 'userPassword': PW_DM - }) - -+ # Add anonymous access aci -+ ACI_TARGET = "(targetattr=\"*\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) -+ ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" -+ ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" -+ ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT -+ suffix = Domain(topo.standalone, DEFAULT_SUFFIX) -+ suffix.add('aci', ANON_ACI) -+ - uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, 'uid=GROUPDNATTRSCRATCHENTRY_GLOBAL,ou=nestedgroup') - for demo1 in ['c1', 'CHILD1_GLOBAL']: - uas.create(properties={ -@@ -112,7 +120,7 @@ def test_undefined_in_group_eval_five(topo, test_user, aci_of_user): - 5. Operation should succeed - """ - -- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPF_GLOBAL)) -+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPF_GLOBAL)) - conn = UserAccount(topo.standalone, DEEPUSER2_GLOBAL).bind(PW_DM) - # This aci should NOT allow access - user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) -@@ -140,7 +148,7 @@ def test_undefined_in_group_eval_six(topo, test_user, aci_of_user): - 4. Operation should succeed - 5. Operation should succeed - """ -- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{} || ldap:///{}" ;)'.format(GROUPH_GLOBAL, ALLGROUPS_GLOBAL)) -+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{} || ldap:///{}" ;)'.format(GROUPH_GLOBAL, ALLGROUPS_GLOBAL)) - conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) - # test UNDEFINED in group - user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) -@@ -168,7 +176,7 @@ def test_undefined_in_group_eval_seven(topo, test_user, aci_of_user): - 4. Operation should succeed - 5. Operation should succeed - """ -- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPH_GLOBAL)) -+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPH_GLOBAL)) - conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) - # test UNDEFINED in group - user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) -@@ -196,7 +204,7 @@ def test_undefined_in_group_eval_eight(topo, test_user, aci_of_user): - 4. Operation should succeed - 5. Operation should succeed - """ -- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{} || ldap:///{} || ldap:///{}" ;)'.format(GROUPH_GLOBAL, GROUPA_GLOBAL, ALLGROUPS_GLOBAL)) -+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{} || ldap:///{} || ldap:///{}" ;)'.format(GROUPH_GLOBAL, GROUPA_GLOBAL, ALLGROUPS_GLOBAL)) - conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) - # test UNDEFINED in group - user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) -@@ -224,7 +232,7 @@ def test_undefined_in_group_eval_nine(topo, test_user, aci_of_user): - 4. Operation should succeed - 5. Operation should succeed - """ -- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{} || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPA_GLOBAL, GROUPH_GLOBAL)) -+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{} || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPA_GLOBAL, GROUPH_GLOBAL)) - conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) - # test UNDEFINED in group - user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) -@@ -252,7 +260,7 @@ def test_undefined_in_group_eval_ten(topo, test_user, aci_of_user): - 4. Operation should succeed - 5. Operation should succeed - """ -- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) userattr = "description#GROUPDN";)') -+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "description#GROUPDN";)') - user = UserAccount(topo.standalone, DEEPGROUPSCRATCHENTRY_GLOBAL) - user.add("description", [ALLGROUPS_GLOBAL, GROUPG_GLOBAL]) - conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) -@@ -281,7 +289,7 @@ def test_undefined_in_group_eval_eleven(topo, test_user, aci_of_user): - 4. Operation should succeed - 5. Operation should succeed - """ -- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) not( userattr = "description#GROUPDN");)') -+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) not( userattr = "description#GROUPDN");)') - user = UserAccount(topo.standalone, DEEPGROUPSCRATCHENTRY_GLOBAL) - user.add("description", [ALLGROUPS_GLOBAL, GROUPH_GLOBAL]) - conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) -@@ -312,7 +320,7 @@ def test_undefined_in_group_eval_twelve(topo, test_user, aci_of_user): - 4. Operation should succeed - 5. Operation should succeed - """ -- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') -+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') - user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) - user.add("description", [ALLGROUPS_GLOBAL, GROUPD_GLOBAL]) - conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) -@@ -341,7 +349,7 @@ def test_undefined_in_group_eval_fourteen(topo, test_user, aci_of_user): - 4. Operation should succeed - 5. Operation should succeed - """ -- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') -+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') - user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) - user.add("description", [ALLGROUPS_GLOBAL, GROUPG_GLOBAL]) - conn = UserAccount(topo.standalone, DEEPUSER2_GLOBAL).bind(PW_DM) -@@ -372,7 +380,7 @@ def test_undefined_in_group_eval_fifteen(topo, test_user, aci_of_user): - 4. Operation should succeed - 5. Operation should succeed - """ -- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#USERDN";)') -+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#USERDN";)') - UserAccount(topo.standalone, NESTEDGROUP_OU_GLOBAL).add("description", DEEPUSER_GLOBAL) - # Here do the same tests for userattr with the parent keyword. - conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) -@@ -399,7 +407,7 @@ def test_undefined_in_group_eval_sixteen(topo, test_user, aci_of_user): - 5. Operation should succeed - """ - domain = Domain(topo.standalone, DEFAULT_SUFFIX) -- domain.add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) not ( userattr = "parent[0,1].description#USERDN");)') -+ domain.add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) not ( userattr = "parent[0,1].description#USERDN");)') - domain.add("description", DEEPUSER_GLOBAL) - conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) - # Test with parent keyword with not key -@@ -427,7 +435,7 @@ def test_undefined_in_group_eval_seventeen(topo, test_user, aci_of_user): - 4. Operation should succeed - 5. Operation should succeed - """ -- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') -+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') - user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) - # Test with the parent keyord - user.add("description", [ALLGROUPS_GLOBAL, GROUPD_GLOBAL]) -@@ -455,7 +463,7 @@ def test_undefined_in_group_eval_eighteen(topo, test_user, aci_of_user): - 4. Operation should succeed - 5. Operation should succeed - """ -- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) not (userattr = "parent[0,1].description#GROUPDN" );)') -+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) not (userattr = "parent[0,1].description#GROUPDN" );)') - user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) - # Test with parent keyword with not key - user.add("description", [ALLGROUPS_GLOBAL, GROUPH_GLOBAL]) -diff --git a/dirsrvtests/tests/suites/acl/globalgroup_test.py b/dirsrvtests/tests/suites/acl/globalgroup_test.py -index 58c4392e5..dc51a8170 100644 ---- a/dirsrvtests/tests/suites/acl/globalgroup_test.py -+++ b/dirsrvtests/tests/suites/acl/globalgroup_test.py -@@ -1,5 +1,5 @@ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2019 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). -@@ -72,6 +72,14 @@ def test_user(request, topo): - 'userPassword': PW_DM - }) - -+ # Add anonymous access aci -+ ACI_TARGET = "(targetattr=\"*\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) -+ ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" -+ ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" -+ ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT -+ suffix = Domain(topo.standalone, DEFAULT_SUFFIX) -+ suffix.add('aci', ANON_ACI) -+ - uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, 'ou=nestedgroup') - for demo1 in ['DEEPUSER_GLOBAL', 'scratchEntry', 'DEEPUSER2_GLOBAL', 'DEEPUSER1_GLOBAL', - 'DEEPUSER3_GLOBAL', 'GROUPDNATTRSCRATCHENTRY_GLOBAL', 'newChild']: -@@ -361,7 +369,7 @@ def test_undefined_in_group_eval_two(topo, test_user, aci_of_user): - 4. Operation should succeed - 5. Operation should succeed - """ -- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPG_GLOBAL)) -+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPG_GLOBAL)) - conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) - user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) - # This aci should allow access -@@ -389,7 +397,7 @@ def test_undefined_in_group_eval_three(topo, test_user, aci_of_user): - 4. Operation should succeed - 5. Operation should succeed - """ -- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(GROUPG_GLOBAL, ALLGROUPS_GLOBAL)) -+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(GROUPG_GLOBAL, ALLGROUPS_GLOBAL)) - conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) - user = Domain(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) - # test UNDEFINED in group -@@ -417,7 +425,7 @@ def test_undefined_in_group_eval_four(topo, test_user, aci_of_user): - 4. Operation should succeed - 5. Operation should succeed - """ -- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPG_GLOBAL)) -+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPG_GLOBAL)) - conn = UserAccount(topo.standalone, DEEPUSER1_GLOBAL).bind(PW_DM) - # test UNDEFINED in group - user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) -diff --git a/dirsrvtests/tests/suites/acl/keywords_part2_test.py b/dirsrvtests/tests/suites/acl/keywords_part2_test.py -index c2aa9ac53..642e65bad 100644 ---- a/dirsrvtests/tests/suites/acl/keywords_part2_test.py -+++ b/dirsrvtests/tests/suites/acl/keywords_part2_test.py -@@ -1,5 +1,5 @@ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2019 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). -@@ -68,7 +68,7 @@ def test_access_from_certain_network_only_ip(topo, add_user, aci_of_user): - - # Add ACI - domain = Domain(topo.standalone, DEFAULT_SUFFIX) -- domain.add("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr=*)(version 3.0; aci "IP aci"; ' -+ domain.add("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr=\"*\")(version 3.0; aci "IP aci"; ' - f'allow(all)userdn = "ldap:///{NETSCAPEIP_KEY}" and ip = "{ip_ip}" ;)') - - # create a new connection for the test -@@ -76,12 +76,13 @@ def test_access_from_certain_network_only_ip(topo, add_user, aci_of_user): - # Perform Operation - org = OrganizationalUnit(conn, IP_OU_KEY) - org.replace("seeAlso", "cn=1") -+ - # remove the aci -- domain.ensure_removed("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr=*)(version 3.0; aci ' -+ domain.ensure_removed("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr=\"*\")(version 3.0; aci ' - f'"IP aci"; allow(all)userdn = "ldap:///{NETSCAPEIP_KEY}" and ' - f'ip = "{ip_ip}" ;)') - # Now add aci with new ip -- domain.add("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr=*)(version 3.0; aci "IP aci"; ' -+ domain.add("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr="*")(version 3.0; aci "IP aci"; ' - f'allow(all)userdn = "ldap:///{NETSCAPEIP_KEY}" and ip = "100.1.1.1" ;)') - - # After changing the ip user cant access data -@@ -106,10 +107,11 @@ def test_connectin_from_an_unauthorized_network(topo, add_user, aci_of_user): - """ - # Find the ip from ds logs , as we need to know the exact ip used by ds to run the instances. - ip_ip = topo.standalone.ds_access_log.match('.* connection from ')[0].split()[-1] -+ - # Add ACI - domain = Domain(topo.standalone, DEFAULT_SUFFIX) - domain.add("aci", f'(target = "ldap:///{IP_OU_KEY}")' -- f'(targetattr=*)(version 3.0; aci "IP aci"; ' -+ f'(targetattr="*")(version 3.0; aci "IP aci"; ' - f'allow(all) userdn = "ldap:///{NETSCAPEIP_KEY}" ' - f'and ip != "{ip_ip}" ;)') - -@@ -122,7 +124,7 @@ def test_connectin_from_an_unauthorized_network(topo, add_user, aci_of_user): - # Remove the ACI - domain.ensure_removed('aci', domain.get_attr_vals('aci')[-1]) - # Add new ACI -- domain.add('aci', f'(target = "ldap:///{IP_OU_KEY}")(targetattr=*)' -+ domain.add('aci', f'(target = "ldap:///{IP_OU_KEY}")(targetattr="*")' - f'(version 3.0; aci "IP aci"; allow(all) ' - f'userdn = "ldap:///{NETSCAPEIP_KEY}" and ip = "{ip_ip}" ;)') - -@@ -148,7 +150,7 @@ def test_ip_keyword_test_noip_cannot(topo, add_user, aci_of_user): - # Add ACI - Domain(topo.standalone, - DEFAULT_SUFFIX).add("aci", f'(target ="ldap:///{IP_OU_KEY}")' -- f'(targetattr=*)(version 3.0; aci "IP aci"; allow(all) ' -+ f'(targetattr="*")(version 3.0; aci "IP aci"; allow(all) ' - f'userdn = "ldap:///{FULLIP_KEY}" and ip = "*" ;)') - - # Create a new connection for this test. -@@ -177,7 +179,7 @@ def test_user_can_access_the_data_at_any_time(topo, add_user, aci_of_user): - # Add ACI - Domain(topo.standalone, - DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' -- f'(targetattr=*)(version 3.0; aci "Timeofday aci"; ' -+ f'(targetattr="*")(version 3.0; aci "Timeofday aci"; ' - f'allow(all) userdn ="ldap:///{FULLWORKER_KEY}" and ' - f'(timeofday >= "0000" and timeofday <= "2359") ;)') - -@@ -206,7 +208,7 @@ def test_user_can_access_the_data_only_in_the_morning(topo, add_user, aci_of_use - # Add ACI - Domain(topo.standalone, - DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' -- f'(targetattr=*)(version 3.0; aci "Timeofday aci"; ' -+ f'(targetattr="*")(version 3.0; aci "Timeofday aci"; ' - f'allow(all) userdn = "ldap:///{DAYWORKER_KEY}" ' - f'and timeofday < "1200" ;)') - -@@ -239,7 +241,7 @@ def test_user_can_access_the_data_only_in_the_afternoon(topo, add_user, aci_of_u - # Add ACI - Domain(topo.standalone, - DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' -- f'(targetattr=*)(version 3.0; aci "Timeofday aci"; ' -+ f'(targetattr="*")(version 3.0; aci "Timeofday aci"; ' - f'allow(all) userdn = "ldap:///{NIGHTWORKER_KEY}" ' - f'and timeofday > \'1200\' ;)') - -@@ -275,7 +277,7 @@ def test_timeofday_keyword(topo, add_user, aci_of_user): - # Add ACI - domain = Domain(topo.standalone, DEFAULT_SUFFIX) - domain.add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' -- f'(targetattr=*)(version 3.0; aci "Timeofday aci"; ' -+ f'(targetattr="*")(version 3.0; aci "Timeofday aci"; ' - f'allow(all) userdn = "ldap:///{NOWORKER_KEY}" ' - f'and timeofday = \'{now_1}\' ;)') - -@@ -312,7 +314,7 @@ def test_dayofweek_keyword_test_everyday_can_access(topo, add_user, aci_of_user) - # Add ACI - Domain(topo.standalone, - DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{DAYOFWEEK_OU_KEY}")' -- f'(targetattr=*)(version 3.0; aci "Dayofweek aci"; ' -+ f'(targetattr="*")(version 3.0; aci "Dayofweek aci"; ' - f'allow(all) userdn = "ldap:///{EVERYDAY_KEY}" and ' - f'dayofweek = "Sun, Mon, Tue, Wed, Thu, Fri, Sat" ;)') - -@@ -342,7 +344,7 @@ def test_dayofweek_keyword_today_can_access(topo, add_user, aci_of_user): - # Add ACI - Domain(topo.standalone, - DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{DAYOFWEEK_OU_KEY}")' -- f'(targetattr=*)(version 3.0; aci "Dayofweek aci"; ' -+ f'(targetattr="*")(version 3.0; aci "Dayofweek aci"; ' - f'allow(all) userdn = "ldap:///{TODAY_KEY}" ' - f'and dayofweek = \'{today_1}\' ;)') - -@@ -371,7 +373,7 @@ def test_user_cannot_access_the_data_at_all(topo, add_user, aci_of_user): - # Add ACI - Domain(topo.standalone, - DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{DAYOFWEEK_OU_KEY}")' -- f'(targetattr=*)(version 3.0; aci "Dayofweek aci"; ' -+ f'(targetattr="*")(version 3.0; aci "Dayofweek aci"; ' - f'allow(all) userdn = "ldap:///{TODAY_KEY}" ' - f'and dayofweek = "$NEW_DATE" ;)') - -diff --git a/dirsrvtests/tests/suites/acl/keywords_test.py b/dirsrvtests/tests/suites/acl/keywords_test.py -index 138e3ede1..0174152e3 100644 ---- a/dirsrvtests/tests/suites/acl/keywords_test.py -+++ b/dirsrvtests/tests/suites/acl/keywords_test.py -@@ -39,11 +39,11 @@ NONE_2_KEY = "uid=NONE_2_KEY,{}".format(AUTHMETHOD_OU_KEY) - - - NONE_ACI_KEY = f'(target = "ldap:///{AUTHMETHOD_OU_KEY}")' \ -- f'(targetattr=*)(version 3.0; aci "Authmethod aci"; ' \ -+ f'(targetattr="*")(version 3.0; aci "Authmethod aci"; ' \ - f'allow(all) userdn = "ldap:///{NONE_1_KEY}" and authmethod = "none" ;)' - - SIMPLE_ACI_KEY = f'(target = "ldap:///{AUTHMETHOD_OU_KEY}")' \ -- f'(targetattr=*)(version 3.0; aci "Authmethod aci"; ' \ -+ f'(targetattr="*")(version 3.0; aci "Authmethod aci"; ' \ - f'allow(all) userdn = "ldap:///{SIMPLE_1_KEY}" and authmethod = "simple" ;)' - - -@@ -55,8 +55,7 @@ def _add_aci(topo, name): - - - def test_user_binds_with_a_password_and_can_access_the_data(topo, add_user, aci_of_user): -- """ -- User binds with a password and can access the data as per the ACI. -+ """User binds with a password and can access the data as per the ACI. - - :id: f6c4b6f0-7ac4-11e8-a517-8c16451d917b - :setup: Standalone Server -@@ -78,8 +77,7 @@ def test_user_binds_with_a_password_and_can_access_the_data(topo, add_user, aci_ - - - def test_user_binds_with_a_bad_password_and_cannot_access_the_data(topo, add_user, aci_of_user): -- """ -- User binds with a BAD password and cannot access the data . -+ """User binds with a BAD password and cannot access the data . - - :id: 0397744e-7ac5-11e8-bfb1-8c16451d917b - :setup: Standalone Server -@@ -98,8 +96,7 @@ def test_user_binds_with_a_bad_password_and_cannot_access_the_data(topo, add_use - - - def test_anonymous_user_cannot_access_the_data(topo, add_user, aci_of_user): -- """ -- Anonymous user cannot access the data -+ """Anonymous user cannot access the data - - :id: 0821a55c-7ac5-11e8-b214-8c16451d917b - :setup: Standalone Server -@@ -124,8 +121,7 @@ def test_anonymous_user_cannot_access_the_data(topo, add_user, aci_of_user): - - - def test_authenticated_but_has_no_rigth_on_the_data(topo, add_user, aci_of_user): -- """ -- User has a password. He is authenticated but has no rigth on the data. -+ """User has a password. He is authenticated but has no rigth on the data. - - :id: 11be7ebe-7ac5-11e8-b754-8c16451d917b - :setup: Standalone Server -@@ -150,10 +146,9 @@ def test_authenticated_but_has_no_rigth_on_the_data(topo, add_user, aci_of_user) - - - def test_the_bind_client_is_accessing_the_directory(topo, add_user, aci_of_user): -- """ -- The bind rule is evaluated to be true if the client is accessing the directory as per the ACI. -+ """The bind rule is evaluated to be true if the client is accessing the directory as per the ACI. - -- :id: 1715bfb2-7ac5-11e8-8f2c-8c16451d917b -+ :id: 1715bfb2-7ac5-11e8-8f2c-8c16451d917b - :setup: Standalone Server - :steps: - 1. Add test entry -@@ -175,8 +170,7 @@ def test_the_bind_client_is_accessing_the_directory(topo, add_user, aci_of_user) - - def test_users_binds_with_a_password_and_can_access_the_data( - topo, add_user, aci_of_user): -- """ -- User binds with a password and can access the data as per the ACI. -+ """User binds with a password and can access the data as per the ACI. - - :id: 1bd01cb4-7ac5-11e8-a2f1-8c16451d917b - :setup: Standalone Server -@@ -199,8 +193,7 @@ def test_users_binds_with_a_password_and_can_access_the_data( - - - def test_user_binds_without_any_password_and_cannot_access_the_data(topo, add_user, aci_of_user): -- """ -- User binds without any password and cannot access the data -+ """User binds without any password and cannot access the data - - :id: 205777fa-7ac5-11e8-ba2f-8c16451d917b - :setup: Standalone Server -@@ -227,8 +220,7 @@ def test_user_binds_without_any_password_and_cannot_access_the_data(topo, add_us - def test_user_can_access_the_data_when_connecting_from_any_machine( - topo, add_user, aci_of_user - ): -- """ -- User can access the data when connecting from any machine as per the ACI. -+ """User can access the data when connecting from any machine as per the ACI. - - :id: 28cbc008-7ac5-11e8-934e-8c16451d917b - :setup: Standalone Server -@@ -244,7 +236,7 @@ def test_user_can_access_the_data_when_connecting_from_any_machine( - # Add ACI - Domain(topo.standalone, DEFAULT_SUFFIX)\ - .add("aci", f'(target ="ldap:///{DNS_OU_KEY}")' -- f'(targetattr=*)(version 3.0; aci "DNS aci"; allow(all) ' -+ f'(targetattr="*")(version 3.0; aci "DNS aci"; allow(all) ' - f'userdn = "ldap:///{FULLDNS_KEY}" and dns = "*" ;)') - - # Create a new connection for this test. -@@ -256,8 +248,8 @@ def test_user_can_access_the_data_when_connecting_from_any_machine( - def test_user_can_access_the_data_when_connecting_from_internal_ds_network_only( - topo, add_user, aci_of_user - ): -- """ -- User can access the data when connecting from internal ICNC network only as per the ACI. -+ """User can access the data when connecting from internal ICNC network only as per the ACI. -+ - :id: 2cac2136-7ac5-11e8-8328-8c16451d917b - :setup: Standalone Server - :steps: -@@ -273,9 +265,9 @@ def test_user_can_access_the_data_when_connecting_from_internal_ds_network_only( - # Add ACI - Domain(topo.standalone, DEFAULT_SUFFIX).\ - add("aci", [f'(target = "ldap:///{DNS_OU_KEY}")' -- f'(targetattr=*)(version 3.0; aci "DNS aci"; ' -+ f'(targetattr="*")(version 3.0; aci "DNS aci"; ' - f'allow(all) userdn = "ldap:///{SUNDNS_KEY}" and dns = "*redhat.com" ;)', -- f'(target = "ldap:///{DNS_OU_KEY}")(targetattr=*)' -+ f'(target = "ldap:///{DNS_OU_KEY}")(targetattr="*")' - f'(version 3.0; aci "DNS aci"; allow(all) ' - f'userdn = "ldap:///{SUNDNS_KEY}" and dns = "{dns_name}" ;)']) - -@@ -288,8 +280,7 @@ def test_user_can_access_the_data_when_connecting_from_internal_ds_network_only( - def test_user_can_access_the_data_when_connecting_from_some_network_only( - topo, add_user, aci_of_user - ): -- """ -- User can access the data when connecting from some network only as per the ACI. -+ """User can access the data when connecting from some network only as per the ACI. - - :id: 3098512a-7ac5-11e8-af85-8c16451d917b - :setup: Standalone Server -@@ -306,7 +297,7 @@ def test_user_can_access_the_data_when_connecting_from_some_network_only( - # Add ACI - Domain(topo.standalone, DEFAULT_SUFFIX)\ - .add("aci", f'(target = "ldap:///{DNS_OU_KEY}")' -- f'(targetattr=*)(version 3.0; aci "DNS aci"; allow(all) ' -+ f'(targetattr="*")(version 3.0; aci "DNS aci"; allow(all) ' - f'userdn = "ldap:///{NETSCAPEDNS_KEY}" ' - f'and dns = "{dns_name}" ;)') - -@@ -317,8 +308,7 @@ def test_user_can_access_the_data_when_connecting_from_some_network_only( - - - def test_from_an_unauthorized_network(topo, add_user, aci_of_user): -- """ -- User cannot access the data when connecting from an unauthorized network as per the ACI. -+ """User cannot access the data when connecting from an unauthorized network as per the ACI. - - :id: 34cf9726-7ac5-11e8-bc12-8c16451d917b - :setup: Standalone Server -@@ -334,7 +324,7 @@ def test_from_an_unauthorized_network(topo, add_user, aci_of_user): - # Add ACI - Domain(topo.standalone, DEFAULT_SUFFIX).\ - add("aci", f'(target = "ldap:///{DNS_OU_KEY}")' -- f'(targetattr=*)(version 3.0; aci "DNS aci"; allow(all) ' -+ f'(targetattr="*")(version 3.0; aci "DNS aci"; allow(all) ' - f'userdn = "ldap:///{NETSCAPEDNS_KEY}" and dns != "red.iplanet.com" ;)') - - # Create a new connection for this test. -@@ -345,8 +335,7 @@ def test_from_an_unauthorized_network(topo, add_user, aci_of_user): - - def test_user_cannot_access_the_data_when_connecting_from_an_unauthorized_network_2( - topo, add_user, aci_of_user): -- """ -- User cannot access the data when connecting from an unauthorized network as per the ACI. -+ """User cannot access the data when connecting from an unauthorized network as per the ACI. - - :id: 396bdd44-7ac5-11e8-8014-8c16451d917b - :setup: Standalone Server -@@ -362,7 +351,7 @@ def test_user_cannot_access_the_data_when_connecting_from_an_unauthorized_networ - # Add ACI - Domain(topo.standalone, DEFAULT_SUFFIX).\ - add("aci", f'(target = "ldap:///{DNS_OU_KEY}")' -- f'(targetattr=*)(version 3.0; aci "DNS aci"; allow(all) ' -+ f'(targetattr="*")(version 3.0; aci "DNS aci"; allow(all) ' - f'userdn = "ldap:///{NETSCAPEDNS_KEY}" ' - f'and dnsalias != "www.redhat.com" ;)') - -@@ -373,8 +362,8 @@ def test_user_cannot_access_the_data_when_connecting_from_an_unauthorized_networ - - - def test_user_cannot_access_the_data_if_not_from_a_certain_domain(topo, add_user, aci_of_user): -- """ -- User cannot access the data if not from a certain domain as per the ACI. -+ """User cannot access the data if not from a certain domain as per the ACI. -+ - :id: 3d658972-7ac5-11e8-930f-8c16451d917b - :setup: Standalone Server - :steps: -@@ -388,7 +377,7 @@ def test_user_cannot_access_the_data_if_not_from_a_certain_domain(topo, add_user - """ - # Add ACI - Domain(topo.standalone, DEFAULT_SUFFIX).\ -- add("aci", f'(target = "ldap:///{DNS_OU_KEY}")(targetattr=*)' -+ add("aci", f'(target = "ldap:///{DNS_OU_KEY}")(targetattr="*")' - f'(version 3.0; aci "DNS aci"; allow(all) ' - f'userdn = "ldap:///{NODNS_KEY}" ' - f'and dns = "RAP.rock.SALSA.house.COM" ;)') -@@ -402,8 +391,7 @@ def test_user_cannot_access_the_data_if_not_from_a_certain_domain(topo, add_user - - - def test_dnsalias_keyword_test_nodns_cannot(topo, add_user, aci_of_user): -- """ -- Dnsalias Keyword NODNS_KEY cannot assess data as per the ACI. -+ """Dnsalias Keyword NODNS_KEY cannot assess data as per the ACI. - - :id: 41b467be-7ac5-11e8-89a3-8c16451d917b - :setup: Standalone Server -@@ -418,7 +406,7 @@ def test_dnsalias_keyword_test_nodns_cannot(topo, add_user, aci_of_user): - """ - # Add ACI - Domain(topo.standalone, DEFAULT_SUFFIX).\ -- add("aci", f'(target = "ldap:///{DNS_OU_KEY}")(targetattr=*)' -+ add("aci", f'(target = "ldap:///{DNS_OU_KEY}")(targetattr="*")' - f'(version 3.0; aci "DNS aci"; allow(all) ' - f'userdn = "ldap:///{NODNS_KEY}" and ' - f'dnsalias = "RAP.rock.SALSA.house.COM" ;)') -@@ -434,8 +422,7 @@ def test_dnsalias_keyword_test_nodns_cannot(topo, add_user, aci_of_user): - @pytest.mark.bz1710848 - @pytest.mark.parametrize("ip_addr", ['127.0.0.1', "[::1]"]) - def test_user_can_access_from_ipv4_or_ipv6_address(topo, add_user, aci_of_user, ip_addr): -- """ -- User can modify the data when accessing the server from the allowed IPv4 and IPv6 addresses -+ """User can modify the data when accessing the server from the allowed IPv4 and IPv6 addresses - - :id: 461e761e-7ac5-11e8-9ae4-8c16451d917b - :parametrized: yes -@@ -451,7 +438,7 @@ def test_user_can_access_from_ipv4_or_ipv6_address(topo, add_user, aci_of_user, - """ - # Add ACI that contains both IPv4 and IPv6 - Domain(topo.standalone, DEFAULT_SUFFIX).\ -- add("aci", f'(target ="ldap:///{IP_OU_KEY}")(targetattr=*) ' -+ add("aci", f'(target ="ldap:///{IP_OU_KEY}")(targetattr="*") ' - f'(version 3.0; aci "IP aci"; allow(all) ' - f'userdn = "ldap:///{FULLIP_KEY}" and (ip = "127.0.0.1" or ip = "::1");)') - -diff --git a/dirsrvtests/tests/suites/acl/misc_test.py b/dirsrvtests/tests/suites/acl/misc_test.py -index 8f122b7a7..5f0e3eb72 100644 ---- a/dirsrvtests/tests/suites/acl/misc_test.py -+++ b/dirsrvtests/tests/suites/acl/misc_test.py -@@ -1,6 +1,6 @@ - """ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2019 RED Hat, Inc. -+# Copyright (C) 2020 RED Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). -@@ -8,6 +8,7 @@ - # --- END COPYRIGHT BLOCK ---- - """ - -+import ldap - import os - import pytest - -@@ -21,8 +22,6 @@ from lib389.topologies import topology_st as topo - from lib389.idm.domain import Domain - from lib389.plugins import ACLPlugin - --import ldap -- - pytestmark = pytest.mark.tier1 - - PEOPLE = "ou=PEOPLE,{}".format(DEFAULT_SUFFIX) -@@ -37,7 +36,19 @@ def aci_of_user(request, topo): - :param request: - :param topo: - """ -- aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') -+ -+ # Add anonymous access aci -+ ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) -+ ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" -+ ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" -+ ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT -+ suffix = Domain(topo.standalone, DEFAULT_SUFFIX) -+ try: -+ suffix.add('aci', ANON_ACI) -+ except ldap.TYPE_OR_VALUE_EXISTS: -+ pass -+ -+ aci_list = suffix.get_attr_vals('aci') - - def finofaci(): - """ -@@ -78,8 +89,8 @@ def clean(request, topo): - - - def test_accept_aci_in_addition_to_acl(topo, clean, aci_of_user): -- """ -- Misc Test 2 accept aci in addition to acl -+ """Misc Test 2 accept aci in addition to acl -+ - :id: 8e9408fa-7db8-11e8-adaa-8c16451d917b - :setup: Standalone Instance - :steps: -@@ -96,7 +107,7 @@ def test_accept_aci_in_addition_to_acl(topo, clean, aci_of_user): - for i in [('mail', 'anujborah@okok.com'), ('givenname', 'Anuj'), ('userPassword', PW_DM)]: - user.set(i[0], i[1]) - -- aci_target = "(targetattr=givenname)" -+ aci_target = '(targetattr="givenname")' - aci_allow = ('(version 3.0; acl "Name of the ACI"; deny (read, search, compare, write)') - aci_subject = 'userdn="ldap:///anyone";)' - Domain(topo.standalone, CONTAINER_1_DELADD).add("aci", aci_target + aci_allow + aci_subject) -@@ -115,9 +126,9 @@ def test_accept_aci_in_addition_to_acl(topo, clean, aci_of_user): - - @pytest.mark.bz334451 - def test_more_then_40_acl_will_crash_slapd(topo, clean, aci_of_user): -- """ -- bug 334451 : more then 40 acl will crash slapd -+ """bug 334451 : more then 40 acl will crash slapd - superseded by Bug 772778 - acl cache overflown problem with > 200 acis -+ - :id: 93a44c60-7db8-11e8-9439-8c16451d917b - :setup: Standalone Instance - :steps: -@@ -132,7 +143,7 @@ def test_more_then_40_acl_will_crash_slapd(topo, clean, aci_of_user): - uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') - user = uas.create_test_user() - -- aci_target = '(target ="ldap:///{}")(targetattr !="userPassword")'.format(CONTAINER_1_DELADD) -+ aci_target = '(target ="ldap:///{}")(targetattr!="userPassword")'.format(CONTAINER_1_DELADD) - # more_then_40_acl_will not crash_slapd - for i in range(40): - aci_allow = '(version 3.0;acl "ACI_{}";allow (read, search, compare)'.format(i) -@@ -147,9 +158,9 @@ def test_more_then_40_acl_will_crash_slapd(topo, clean, aci_of_user): - - @pytest.mark.bz345643 - def test_search_access_should_not_include_read_access(topo, clean, aci_of_user): -- """ -- bug 345643 -+ """bug 345643 - Misc Test 4 search access should not include read access -+ - :id: 98ab173e-7db8-11e8-a309-8c16451d917b - :setup: Standalone Instance - :steps: -@@ -163,7 +174,7 @@ def test_search_access_should_not_include_read_access(topo, clean, aci_of_user): - """ - assert Domain(topo.standalone, DEFAULT_SUFFIX).present('aci') - Domain(topo.standalone, DEFAULT_SUFFIX)\ -- .add("aci", [f'(target ="ldap:///{DEFAULT_SUFFIX}")(targetattr !="userPassword")' -+ .replace("aci", [f'(target ="ldap:///{DEFAULT_SUFFIX}")(targetattr != "userPassword")' - '(version 3.0;acl "anonymous access";allow (search)' - '(userdn = "ldap:///anyone");)', - f'(target="ldap:///{DEFAULT_SUFFIX}") (targetattr = "*")(version 3.0; ' -@@ -176,13 +187,13 @@ def test_search_access_should_not_include_read_access(topo, clean, aci_of_user): - conn = Anonymous(topo.standalone).bind() - # search_access_should_not_include_read_access - suffix = Domain(conn, DEFAULT_SUFFIX) -- with pytest.raises(AssertionError): -+ with pytest.raises(Exception): - assert suffix.present('aci') - - - def test_only_allow_some_targetattr(topo, clean, aci_of_user): -- """ -- Misc Test 5 only allow some targetattr (1/2) -+ """Misc Test 5 only allow some targetattr (1/2) -+ - :id: 9d27f048-7db8-11e8-a71c-8c16451d917b - :setup: Standalone Instance - :steps: -@@ -211,17 +222,17 @@ def test_only_allow_some_targetattr(topo, clean, aci_of_user): - # aci will allow only mail targetattr - assert len(accounts.filter('(mail=*)')) == 2 - # aci will allow only mail targetattr -- assert not accounts.filter('(cn=*)') -+ assert not accounts.filter('(cn=*)', scope=1) - # with root no , blockage -- assert len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=*)')) == 2 -+ assert len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=*)', scope=1)) == 2 - - for i in uas.list(): - i.delete() - - --def test_only_allow_some_targetattr_two(topo, clean, aci_of_user): -- """ -- Misc Test 6 only allow some targetattr (2/2)" -+def test_only_allow_some_targetattr_two(topo, clean, aci_of_user, request): -+ """Misc Test 6 only allow some targetattr (2/2)" -+ - :id: a188239c-7db8-11e8-903e-8c16451d917b - :setup: Standalone Instance - :steps: -@@ -244,15 +255,15 @@ def test_only_allow_some_targetattr_two(topo, clean, aci_of_user): - - Domain(topo.standalone, DEFAULT_SUFFIX).\ - replace("aci", '(target="ldap:///{}") (targetattr="mail||objectClass")' -- '(targetfilter="cn=Anuj") (version 3.0; acl "$tet_thistest"; ' -+ '(targetfilter="cn=Anuj") (version 3.0; acl "{}"; ' - 'allow (compare,read,search) ' -- '(userdn = "ldap:///anyone"); )'.format(DEFAULT_SUFFIX)) -+ '(userdn = "ldap:///anyone"); )'.format(DEFAULT_SUFFIX, request.node.name)) - - conn = UserAccount(topo.standalone, user.dn).bind(PW_DM) - # aci will allow only mail targetattr but only for cn=Anuj - account = Accounts(conn, DEFAULT_SUFFIX) -- assert len(account.filter('(mail=*)')) == 5 -- assert not account.filter('(cn=*)') -+ assert len(account.filter('(mail=*)', scope=1)) == 5 -+ assert not account.filter('(cn=*)', scope=1) - - for i in account.filter('(mail=*)'): - assert i.get_attr_val_utf8('mail') == 'anujborah@anujborah.com' -@@ -261,8 +272,8 @@ def test_only_allow_some_targetattr_two(topo, clean, aci_of_user): - conn = Anonymous(topo.standalone).bind() - # aci will allow only mail targetattr but only for cn=Anuj - account = Accounts(conn, DEFAULT_SUFFIX) -- assert len(account.filter('(mail=*)')) == 5 -- assert not account.filter('(cn=*)') -+ assert len(account.filter('(mail=*)', scope=1)) == 5 -+ assert not account.filter('(cn=*)', scope=1) - - for i in account.filter('(mail=*)'): - assert i.get_attr_val_utf8('mail') == 'anujborah@anujborah.com' -@@ -274,11 +285,10 @@ def test_only_allow_some_targetattr_two(topo, clean, aci_of_user): - i.delete() - - -- - @pytest.mark.bz326000 - def test_memberurl_needs_to_be_normalized(topo, clean, aci_of_user): -- """ -- Non-regression test for BUG 326000: MemberURL needs to be normalized -+ """Non-regression test for BUG 326000: MemberURL needs to be normalized -+ - :id: a5d172e6-7db8-11e8-aca7-8c16451d917b - :setup: Standalone Instance - :steps: -@@ -291,7 +301,7 @@ def test_memberurl_needs_to_be_normalized(topo, clean, aci_of_user): - 3. Operation should succeed - """ - ou_ou = OrganizationalUnit(topo.standalone, "ou=PEOPLE,{}".format(DEFAULT_SUFFIX)) -- ou_ou.set('aci', '(targetattr= *)' -+ ou_ou.set('aci', '(targetattr="*")' - '(version 3.0; acl "tester"; allow(all) ' - 'groupdn = "ldap:///cn =DYNGROUP,ou=PEOPLE, {}";)'.format(DEFAULT_SUFFIX)) - -@@ -323,8 +333,8 @@ def test_memberurl_needs_to_be_normalized(topo, clean, aci_of_user): - - @pytest.mark.bz624370 - def test_greater_than_200_acls_can_be_created(topo, clean, aci_of_user): -- """ -- Misc 10, check that greater than 200 ACLs can be created. Bug 624370 -+ """Misc 10, check that greater than 200 ACLs can be created. Bug 624370 -+ - :id: ac020252-7db8-11e8-8652-8c16451d917b - :setup: Standalone Instance - :steps: -@@ -355,8 +365,8 @@ def test_greater_than_200_acls_can_be_created(topo, clean, aci_of_user): - - @pytest.mark.bz624453 - def test_server_bahaves_properly_with_very_long_attribute_names(topo, clean, aci_of_user): -- """ -- Make sure the server bahaves properly with very long attribute names. Bug 624453. -+ """Make sure the server bahaves properly with very long attribute names. Bug 624453. -+ - :id: b0d31942-7db8-11e8-a833-8c16451d917b - :setup: Standalone Instance - :steps: -@@ -378,24 +388,23 @@ def test_server_bahaves_properly_with_very_long_attribute_names(topo, clean, aci - - - def test_do_bind_as_201_distinct_users(topo, clean, aci_of_user): -- """ -- Do bind as 201 distinct users -- Increase the nsslapd-aclpb-max-selected-acls in cn=ACL Plugin,cn=plugins,cn=config -- Restart the server -- Do bind as 201 distinct users -+ """Test bind as 201 distinct users -+ - :id: c0060532-7db8-11e8-a124-8c16451d917b - :setup: Standalone Instance - :steps: -- 1. Add test entry -- 2. Add ACI -- 3. User should follow ACI role -+ 1. Add test entries -+ 2. Increase the nsslapd-aclpb-max-selected-acls in cn=ACL Plugin,cn=plugins,cn=config -+ 3. Restart the server -+ 4. Do bind as 201 distinct users - :expectedresults: -- 1. Entry should be added -- 2. Operation should succeed -- 3. Operation should succeed -+ 1. Entries should be added -+ 2. Operation should succeed -+ 3. Operation should succeed -+ 4. Operation should succeed - """ - uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX) -- for i in range(50): -+ for i in range(201): - user = uas.create_test_user(uid=i, gid=i) - user.set('userPassword', PW_DM) - -@@ -408,7 +417,6 @@ def test_do_bind_as_201_distinct_users(topo, clean, aci_of_user): - for i in range(len(uas.list())): - uas.list()[i].bind(PW_DM) - -- - if __name__ == "__main__": - CURRENT_FILE = os.path.realpath(__file__) - pytest.main("-s -v %s" % CURRENT_FILE) -diff --git a/dirsrvtests/tests/suites/acl/modrdn_test.py b/dirsrvtests/tests/suites/acl/modrdn_test.py -index f67f3e508..c4ae8eea5 100644 ---- a/dirsrvtests/tests/suites/acl/modrdn_test.py -+++ b/dirsrvtests/tests/suites/acl/modrdn_test.py -@@ -1,5 +1,5 @@ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2019 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). -@@ -87,9 +87,9 @@ def _add_user(request, topo): - request.addfinalizer(fin) - - --def test_allow_write_privilege_to_anyone(topo, _add_user, aci_of_user): -- """ -- Modrdn Test 1 Allow write privilege to anyone -+def test_allow_write_privilege_to_anyone(topo, _add_user, aci_of_user, request): -+ """Modrdn Test 1 Allow write privilege to anyone -+ - :id: 4406f12e-7932-11e8-9dea-8c16451d917b - :setup: server - :steps: -@@ -102,8 +102,8 @@ def test_allow_write_privilege_to_anyone(topo, _add_user, aci_of_user): - 3. Operation should succeed - """ - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", -- '(target ="ldap:///{}")(targetattr=*)(version 3.0;acl "$tet_thistest";allow ' -- '(write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX)) -+ '(target ="ldap:///{}")(targetattr="*")(version 3.0;acl "{}";allow ' -+ '(write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX, request.node.name)) - conn = Anonymous(topo.standalone).bind() - # Allow write privilege to anyone - useraccount = UserAccount(conn, USER_WITH_ACI_DELADD) -@@ -115,22 +115,22 @@ def test_allow_write_privilege_to_anyone(topo, _add_user, aci_of_user): - - - def test_allow_write_privilege_to_dynamic_group_with_scope_set_to_base_in_ldap_url( -- topo, _add_user, aci_of_user -+ topo, _add_user, aci_of_user, request - ): -+ """Modrdn Test 2 Allow write privilege to DYNAMIC_MODRDN group with scope set to base in LDAP URL -+ -+ :id: 4c0f8c00-7932-11e8-8398-8c16451d917b -+ :setup: server -+ :steps: -+ 1. Add test entry -+ 2. Add ACI -+ 3. User should follow ACI role -+ :expectedresults: -+ 1. Entry should be added -+ 2. Operation should succeed -+ 3. Operation should succeed - """ -- Modrdn Test 2 Allow write privilege to DYNAMIC_MODRDN group with scope set to base in LDAP URL -- :id: 4c0f8c00-7932-11e8-8398-8c16451d917b -- :setup: server -- :steps: -- 1. Add test entry -- 2. Add ACI -- 3. User should follow ACI role -- :expectedresults: -- 1. Entry should be added -- 2. Operation should succeed -- 3. Operation should succeed -- """ -- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(target = ldap:///{})(targetattr=*)(version 3.0; acl "$tet_thistest"; allow(all)(groupdn = "ldap:///{}"); )'.format(DEFAULT_SUFFIX, DYNAMIC_MODRDN)) -+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(target = ldap:///{})(targetattr="*")(version 3.0; acl "{}"; allow(all)(groupdn = "ldap:///{}"); )'.format(DEFAULT_SUFFIX, request.node.name, DYNAMIC_MODRDN)) - conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) - # Allow write privilege to DYNAMIC_MODRDN group with scope set to base in LDAP URL - useraccount = UserAccount(conn, USER_DELADD) -@@ -141,22 +141,22 @@ def test_allow_write_privilege_to_dynamic_group_with_scope_set_to_base_in_ldap_u - assert 'cn=Jeff Vedder,ou=Product Development,dc=example,dc=com' == useraccount.dn - - --def test_write_access_to_naming_atributes(topo, _add_user, aci_of_user): -- """ -- Test for write access to naming atributes (1) -- Test that check for add writes to the new naming attr -- :id: 532fc630-7932-11e8-8924-8c16451d917b -- :setup: server -- :steps: -- 1. Add test entry -- 2. Add ACI -- 3. User should follow ACI role -- :expectedresults: -- 1. Entry should be added -- 2. Operation should succeed -- 3. Operation should succeed -+def test_write_access_to_naming_atributes(topo, _add_user, aci_of_user, request): -+ """Test for write access to naming atributes -+ Test that check for add writes to the new naming attr -+ -+ :id: 532fc630-7932-11e8-8924-8c16451d917b -+ :setup: server -+ :steps: -+ 1. Add test entry -+ 2. Add ACI -+ 3. User should follow ACI role -+ :expectedresults: -+ 1. Entry should be added -+ 2. Operation should succeed -+ 3. Operation should succeed - """ -- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target ="ldap:///{}")(targetattr != "uid")(version 3.0;acl "$tet_thistest";allow (write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX)) -+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target ="ldap:///{}")(targetattr != "uid")(version 3.0;acl "{}";allow (write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX, request.node.name)) - conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) - #Test for write access to naming atributes - useraccount = UserAccount(conn, USER_WITH_ACI_DELADD) -@@ -164,23 +164,23 @@ def test_write_access_to_naming_atributes(topo, _add_user, aci_of_user): - useraccount.rename("uid=Jeffbo Vedder") - - --def test_write_access_to_naming_atributes_two(topo, _add_user, aci_of_user): -- """ -- Test for write access to naming atributes (2) -- :id: 5a2077d2-7932-11e8-9e7b-8c16451d917b -- :setup: server -- :steps: -- 1. Add test entry -- 2. Add ACI -- 3. User should follow ACI role -- 4. Now try to modrdn it to cn, won't work if request deleteoldrdn. -- :expectedresults: -- 1. Entry should be added -- 2. Operation should succeed -- 3. Operation should succeed -- 4. Operation should not succeed -+def test_write_access_to_naming_atributes_two(topo, _add_user, aci_of_user, request): -+ """Test for write access to naming atributes (2) -+ -+ :id: 5a2077d2-7932-11e8-9e7b-8c16451d917b -+ :setup: server -+ :steps: -+ 1. Add test entry -+ 2. Add ACI -+ 3. User should follow ACI role -+ 4. Now try to modrdn it to cn, won't work if request deleteoldrdn. -+ :expectedresults: -+ 1. Entry should be added -+ 2. Operation should succeed -+ 3. Operation should succeed -+ 4. Operation should not succeed - """ -- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target ="ldap:///{}")(targetattr != "uid")(version 3.0;acl "$tet_thistest";allow (write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX)) -+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target ="ldap:///{}")(targetattr != "uid")(version 3.0;acl "{}";allow (write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX, request.node.name)) - properties = { - 'uid': 'Sam Carter1', - 'cn': 'Sam Carter1', -@@ -202,22 +202,22 @@ def test_write_access_to_naming_atributes_two(topo, _add_user, aci_of_user): - - @pytest.mark.bz950351 - def test_access_aci_list_contains_any_deny_rule(topo, _add_user, aci_of_user): -- """ -- Testing bug #950351: RHDS denies MODRDN access if ACI list contains any DENY rule -- Bug description: If you create a deny ACI for some or more attributes there is incorrect behaviour -- as you cannot rename the entry anymore -- :id: 62cbbb8a-7932-11e8-96a7-8c16451d917b -- :setup: server -- :steps: -- 1. Add test entry -- 2. Adding a new ou ou=People to $BASEDN -- 3. Adding a user NEWENTRY9_MODRDN to ou=People,$BASEDN -- 4. Adding an allow rule for NEWENTRY9_MODRDN and for others an aci deny rule -- :expectedresults: -- 1. Entry should be added -- 2. Operation should succeed -- 3. Operation should succeed -- 4. Operation should succeed -+ """RHDS denies MODRDN access if ACI list contains any DENY rule -+ Bug description: If you create a deny ACI for some or more attributes there is incorrect behaviour -+ as you cannot rename the entry anymore -+ -+ :id: 62cbbb8a-7932-11e8-96a7-8c16451d917b -+ :setup: server -+ :steps: -+ 1. Add test entry -+ 2. Adding a new ou ou=People to $BASEDN -+ 3. Adding a user NEWENTRY9_MODRDN to ou=People,$BASEDN -+ 4. Adding an allow rule for NEWENTRY9_MODRDN and for others an aci deny rule -+ :expectedresults: -+ 1. Entry should be added -+ 2. Operation should succeed -+ 3. Operation should succeed -+ 4. Operation should succeed - """ - properties = { - 'uid': 'NEWENTRY9_MODRDN', -@@ -245,28 +245,28 @@ def test_access_aci_list_contains_any_deny_rule(topo, _add_user, aci_of_user): - - - def test_renaming_target_entry(topo, _add_user, aci_of_user): -- """ -- Test for renaming target entry -- :id: 6be1d33a-7932-11e8-9115-8c16451d917b -- :setup: server -- :steps: -- 1. Add test entry -- 2. Create a test user entry -- 3.Create a new ou entry with an aci -- 4. Make sure uid=$MYUID has the access -- 5. Rename ou=OU0 to ou=OU1 -- 6. Create another ou=OU2 -- 7. Move ou=OU1 under ou=OU2 -- 8. Make sure uid=$MYUID still has the access -- :expectedresults: -- 1. Entry should be added -- 2. Operation should succeed -- 3. Operation should succeed -- 4. Operation should succeed -- 5. Operation should succeed -- 6. Operation should succeed -- 7. Operation should succeed -- 8. Operation should succeed -+ """Test for renaming target entry -+ -+ :id: 6be1d33a-7932-11e8-9115-8c16451d917b -+ :setup: server -+ :steps: -+ 1. Add test entry -+ 2. Create a test user entry -+ 3. Create a new ou entry with an aci -+ 4. Make sure uid=$MYUID has the access -+ 5. Rename ou=OU0 to ou=OU1 -+ 6. Create another ou=OU2 -+ 7. Move ou=OU1 under ou=OU2 -+ 8. Make sure uid=$MYUID still has the access -+ :expectedresults: -+ 1. Entry should be added -+ 2. Operation should succeed -+ 3. Operation should succeed -+ 4. Operation should succeed -+ 5. Operation should succeed -+ 6. Operation should succeed -+ 7. Operation should succeed -+ 8. Operation should succeed - """ - properties = { - 'uid': 'TRAC340_MODRDN', -@@ -281,7 +281,7 @@ def test_renaming_target_entry(topo, _add_user, aci_of_user): - user.set("userPassword", "password") - ou = OrganizationalUnit(topo.standalone, 'ou=OU0,{}'.format(DEFAULT_SUFFIX)) - ou.create(properties={'ou': 'OU0'}) -- ou.set('aci', '(targetattr=*)(version 3.0; acl "$MYUID";allow(read, search, compare) userdn = "ldap:///{}";)'.format(TRAC340_MODRDN)) -+ ou.set('aci', '(targetattr="*")(version 3.0; acl "$MYUID";allow(read, search, compare) userdn = "ldap:///{}";)'.format(TRAC340_MODRDN)) - conn = UserAccount(topo.standalone, TRAC340_MODRDN).bind(PW_DM) - assert OrganizationalUnits(conn, DEFAULT_SUFFIX).get('OU0') - # Test for renaming target entry -diff --git a/dirsrvtests/tests/suites/acl/roledn_test.py b/dirsrvtests/tests/suites/acl/roledn_test.py -index 227ebd95f..6ccd652cf 100644 ---- a/dirsrvtests/tests/suites/acl/roledn_test.py -+++ b/dirsrvtests/tests/suites/acl/roledn_test.py -@@ -78,10 +78,10 @@ def _add_user(request, topo): - f'(target="ldap:///{OR_RULE_ACCESS}")(targetattr="*")' - f'(version 3.0; aci "or role aci"; allow(all) ' - f'roledn = "ldap:///{ROLE1} || ldap:///{ROLE21}";)', -- f'(target="ldap:///{ALL_ACCESS}")(targetattr=*)' -+ f'(target="ldap:///{ALL_ACCESS}")(targetattr="*")' - f'(version 3.0; aci "anyone role aci"; allow(all) ' - f'roledn = "ldap:///anyone";)', -- f'(target="ldap:///{NOT_RULE_ACCESS}")(targetattr=*)' -+ f'(target="ldap:///{NOT_RULE_ACCESS}")(targetattr="*")' - f'(version 3.0; aci "not role aci"; allow(all)' - f'roledn != "ldap:///{ROLE1} || ldap:///{ROLE21}";)']) - -diff --git a/dirsrvtests/tests/suites/acl/selfdn_permissions_test.py b/dirsrvtests/tests/suites/acl/selfdn_permissions_test.py -index af7501338..dd506a786 100644 ---- a/dirsrvtests/tests/suites/acl/selfdn_permissions_test.py -+++ b/dirsrvtests/tests/suites/acl/selfdn_permissions_test.py -@@ -1,5 +1,5 @@ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2016 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). -@@ -90,8 +90,8 @@ def test_selfdn_permission_add(topology_st, allow_user_init): - - :id: e837a9ef-be92-48da-ad8b-ebf42b0fede1 - :setup: Standalone instance, add a entry which is used to bind, -- enable acl error logging by setting 'nsslapd-errorlog-level' to '128', -- remove aci's to start with a clean slate, and add dummy entries -+ enable acl error logging by setting 'nsslapd-errorlog-level' to '128', -+ remove aci's to start with a clean slate, and add dummy entries - :steps: - 1. Check we can not ADD an entry without the proper SELFDN aci - 2. Check with the proper ACI we can not ADD with 'member' attribute -@@ -191,8 +191,8 @@ def test_selfdn_permission_search(topology_st, allow_user_init): - - :id: 06d51ef9-c675-4583-99b2-4852dbda190e - :setup: Standalone instance, add a entry which is used to bind, -- enable acl error logging by setting 'nsslapd-errorlog-level' to '128', -- remove aci's to start with a clean slate, and add dummy entries -+ enable acl error logging by setting 'nsslapd-errorlog-level' to '128', -+ remove aci's to start with a clean slate, and add dummy entries - :steps: - 1. Check we can not search an entry without the proper SELFDN aci - 2. Add proper ACI -@@ -217,7 +217,7 @@ def test_selfdn_permission_search(topology_st, allow_user_init): - topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) - - ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX -- ACI_TARGETATTR = "(targetattr = *)" -+ ACI_TARGETATTR = '(targetattr="*")' - ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME - ACI_ALLOW = "(version 3.0; acl \"SelfDN search-read\"; allow (read, search, compare)" - ACI_SUBJECT = " userattr = \"member#selfDN\";)" -@@ -241,8 +241,8 @@ def test_selfdn_permission_modify(topology_st, allow_user_init): - - :id: 97a58844-095f-44b0-9029-dd29a7d83d68 - :setup: Standalone instance, add a entry which is used to bind, -- enable acl error logging by setting 'nsslapd-errorlog-level' to '128', -- remove aci's to start with a clean slate, and add dummy entries -+ enable acl error logging by setting 'nsslapd-errorlog-level' to '128', -+ remove aci's to start with a clean slate, and add dummy entries - :steps: - 1. Check we can not modify an entry without the proper SELFDN aci - 2. Add proper ACI -@@ -272,7 +272,7 @@ def test_selfdn_permission_modify(topology_st, allow_user_init): - topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) - - ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX -- ACI_TARGETATTR = "(targetattr = *)" -+ ACI_TARGETATTR = '(targetattr="*")' - ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME - ACI_ALLOW = "(version 3.0; acl \"SelfDN write\"; allow (write)" - ACI_SUBJECT = " userattr = \"member#selfDN\";)" -@@ -300,8 +300,8 @@ def test_selfdn_permission_delete(topology_st, allow_user_init): - - :id: 0ec4c0ec-e7b0-4ef1-8373-ab25aae34516 - :setup: Standalone instance, add a entry which is used to bind, -- enable acl error logging by setting 'nsslapd-errorlog-level' to '128', -- remove aci's to start with a clean slate, and add dummy entries -+ enable acl error logging by setting 'nsslapd-errorlog-level' to '128', -+ remove aci's to start with a clean slate, and add dummy entries - :steps: - 1. Check we can not delete an entry without the proper SELFDN aci - 2. Add proper ACI -@@ -309,6 +309,7 @@ def test_selfdn_permission_delete(topology_st, allow_user_init): - :expectedresults: - 1. Operation should be successful - 2. Operation should be successful -+ 3. Operation should be successful - """ - topology_st.standalone.log.info("\n\n######################### DELETE ######################\n") - -diff --git a/dirsrvtests/tests/suites/acl/syntax_test.py b/dirsrvtests/tests/suites/acl/syntax_test.py -index c143ff7c9..b8f27480a 100644 ---- a/dirsrvtests/tests/suites/acl/syntax_test.py -+++ b/dirsrvtests/tests/suites/acl/syntax_test.py -@@ -1,12 +1,10 @@ --""" - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2019 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). - # See LICENSE for details. - # --- END COPYRIGHT BLOCK ---- --""" - - import os - import pytest -@@ -74,66 +72,66 @@ INVALID = [('test_targattrfilters_1', - f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), - ('test_targattrfilters_19', - f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' -- f'(targetattr=*)' -+ f'(targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; deny(write)gropdn="ldap:///anyone";)'), - ('test_targattrfilters_21', - f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' -- f'(targetattr=*)' -+ f'(targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; deny(rite)userdn="ldap:///anyone";)'), - ('test_targattrfilters_22', - f'(targt = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' -- f'(targetattr=*)' -+ f'(targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), - ('test_targattrfilters_23', - f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' -- f'(targetattr=*)' -+ f'(targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; absolute (all)userdn="ldap:///anyone";)'), - ('test_Missing_acl_mispel', - f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' -- f'(targetattr=*)' -+ f'(targetattr="*")' - f'(version 3.0; alc "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), - ('test_Missing_acl_string', - f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' -- f'(targetattr=*)' -+ f'(targetattr="*")' - f'(version 3.0; "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), - ('test_Wrong_version_string', - f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' -- f'(targetattr=*)' -+ f'(targetattr="*")' - f'(version 2.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), - ('test_Missing_version_string', - f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' -- f'(targetattr=*)' -+ f'(targetattr="*")' - f'(; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), - ('test_Authenticate_statement', - f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' - f'(targetattr != "uid")' -- f'(targetattr=*)(version 3.0; acl "Name of the ACI"; deny absolute (all)' -+ f'(targetattr="*")(version 3.0; acl "Name of the ACI"; deny absolute (all)' - f'userdn="ldap:///anyone";)'), - ('test_Multiple_targets', - f'(target = ldap:///ou=Product Development,{DEFAULT_SUFFIX})' -- f'(target = ldap:///ou=Product Testing,{DEFAULT_SUFFIX})(targetattr=*)' -+ f'(target = ldap:///ou=Product Testing,{DEFAULT_SUFFIX})(targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), - ('test_Target_set_to_self', -- f'(target = ldap:///self)(targetattr=*)' -+ f'(target = ldap:///self)(targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), - ('test_target_set_with_ldap_instead_of_ldap', -- f'(target = ldap:\\\{DEFAULT_SUFFIX})(targetattr=*)' -+ f'(target = ldap:\\\{DEFAULT_SUFFIX})(targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), - ('test_target_set_with_more_than_three', -- f'(target = ldap:////{DEFAULT_SUFFIX})(targetattr=*)' -+ f'(target = ldap:////{DEFAULT_SUFFIX})(targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), - ('test_target_set_with_less_than_three', -- f'(target = ldap://{DEFAULT_SUFFIX})(targetattr=*)' -+ f'(target = ldap://{DEFAULT_SUFFIX})(targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), - ('test_bind_rule_set_with_less_than_three', -- f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr=*)' -+ f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:/anyone";)'), - ('test_Use_semicolon_instead_of_comma_in_permission', -- f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr=*)' -+ f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; deny ' - f'(read; search; compare; write)userdn="ldap:///anyone";)'), - ('test_Use_double_equal_instead_of_equal_in_the_target', -- f'(target == ldap:///{DEFAULT_SUFFIX})(targetattr=*)' -+ f'(target == ldap:///{DEFAULT_SUFFIX})(targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), - ('test_use_double_equal_instead_of_equal_in_user_and_group_access', - f'(target = ldap:///{DEFAULT_SUFFIX})' -@@ -143,21 +141,21 @@ INVALID = [('test_targattrfilters_1', - f'(target = ldap:///{DEFAULT_SUFFIX})' - f'(version 3.0; acl Name of the ACI ; deny absolute (all)userdn = "ldap:///anyone";)'), - ('test_extra_parentheses_case_1', -- f'( )(target = ldap:///{DEFAULT_SUFFIX}) (targetattr=*)' -+ f'( )(target = ldap:///{DEFAULT_SUFFIX}) (targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone";)'), - ('test_extra_parentheses_case_2', -- f'(((((target = ldap:///{DEFAULT_SUFFIX})(targetattr=*)' -+ f'(((((target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; deny absolute (all)' - f'userdn == "ldap:///anyone";)'), - ('test_extra_parentheses_case_3', -- f'(((target = ldap:///{DEFAULT_SUFFIX}) (targetattr=*)' -+ f'(((target = ldap:///{DEFAULT_SUFFIX}) (targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; deny absolute ' - f'(all)userdn = "ldap:///anyone";)))'), - ('test_no_semicolon_at_the_end_of_the_aci', -- f'(target = ldap:///{DEFAULT_SUFFIX}) (targetattr=*)' -+ f'(target = ldap:///{DEFAULT_SUFFIX}) (targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone")'), - ('test_a_character_different_of_a_semicolon_at_the_end_of_the_aci', -- f'(target = ldap:///{DEFAULT_SUFFIX}) (targetattr=*)' -+ f'(target = ldap:///{DEFAULT_SUFFIX}) (targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone"%)'), - ('test_bad_filter', - f'(target = ldap:///{DEFAULT_SUFFIX}) ' -@@ -173,14 +171,14 @@ INVALID = [('test_targattrfilters_1', - - FAILED = [('test_targattrfilters_18', - f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' -- f'(targetattr=*)' -+ f'(targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; deny(write)userdn="ldap:///{"123" * 300}";)'), - ('test_targattrfilters_20', - f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' -- f'(targetattr=*)' -+ f'(targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; deny(write)userdns="ldap:///anyone";)'), - ('test_bind_rule_set_with_more_than_three', -- f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr=*)' -+ f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' - f'(version 3.0; acl "Name of the ACI"; deny absolute (all)' - f'userdn="ldap:////////anyone";)'), - ('test_Use_double_equal_instead_of_equal_in_the_targetattr', -@@ -253,7 +251,7 @@ def test_target_set_above_the_entry_test(topo): - domain = Domain(topo.standalone, "ou=People,{}".format(DEFAULT_SUFFIX)) - with pytest.raises(ldap.INVALID_SYNTAX): - domain.add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})' -- f'(targetattr=*)(version 3.0; acl "Name of the ACI"; deny absolute ' -+ f'(targetattr="*")(version 3.0; acl "Name of the ACI"; deny absolute ' - f'(all)userdn="ldap:///anyone";)') - - -diff --git a/dirsrvtests/tests/suites/acl/userattr_test.py b/dirsrvtests/tests/suites/acl/userattr_test.py -index 542d7afc9..3a13d32dc 100644 ---- a/dirsrvtests/tests/suites/acl/userattr_test.py -+++ b/dirsrvtests/tests/suites/acl/userattr_test.py -@@ -1,5 +1,5 @@ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2019 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). -@@ -55,7 +55,7 @@ def _add_user(topo): - """ - This function will create user for the test and in the end entries will be deleted . - """ -- role_aci_body = '(targetattr=*)(version 3.0; aci "role aci"; allow(all)' -+ role_aci_body = '(targetattr="*")(version 3.0; aci "role aci"; allow(all)' - # Creating OUs - ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) - ou_accounting = ous.create(properties={'ou': 'Accounting'}) -@@ -77,7 +77,7 @@ def _add_user(topo): - 'description': LEVEL_1, - 'businessCategory': LEVEL_0}) - -- inheritance_aci_body = '(targetattr=*)(version 3.0; aci "Inheritance aci"; allow(all) ' -+ inheritance_aci_body = '(targetattr="*")(version 3.0; aci "Inheritance aci"; allow(all) ' - ou_inheritance.set('aci', [f'{inheritance_aci_body} ' - f'userattr = "parent[0].businessCategory#USERDN";)', - f'{inheritance_aci_body} ' -diff --git a/dirsrvtests/tests/suites/acl/valueacl_part2_test.py b/dirsrvtests/tests/suites/acl/valueacl_part2_test.py -index 5f5b1c64e..763c0b5a2 100644 ---- a/dirsrvtests/tests/suites/acl/valueacl_part2_test.py -+++ b/dirsrvtests/tests/suites/acl/valueacl_part2_test.py -@@ -28,6 +28,17 @@ HUMAN_OU_GLOBAL = "ou=Human Resources,{}".format(DEFAULT_SUFFIX) - - @pytest.fixture(scope="function") - def aci_of_user(request, topo): -+ # Add anonymous access aci -+ ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) -+ ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" -+ ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" -+ ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT -+ suffix = Domain(topo.standalone, DEFAULT_SUFFIX) -+ try: -+ suffix.add('aci', ANON_ACI) -+ except ldap.TYPE_OR_VALUE_EXISTS: -+ pass -+ - aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') - - def finofaci(): -@@ -107,10 +118,10 @@ def _add_user(request, topo): - request.addfinalizer(fin) - - --def test_we_can_search_as_expected(topo, _add_user, aci_of_user): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) -+def test_we_can_search_as_expected(topo, _add_user, aci_of_user, request): -+ """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) - Test that we can search as expected -+ - :id: e845dbba-7aa9-11e8-8988-8c16451d917b - :setup: server - :steps: -@@ -124,8 +135,8 @@ def test_we_can_search_as_expected(topo, _add_user, aci_of_user): - """ - ACI_BODY = '(target="ldap:///cn=*,ou=Product Development, {}")' \ - '(targetfilter="cn=Jeff*")(targetattr="secretary || objectclass || mail")' \ -- '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "$tet_thistest"; ' \ -- 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX) -+ '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "{}"; ' \ -+ 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX, request.node.name) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - conn = Anonymous(topo.standalone).bind() - # aci will allow secretary , mail , objectclass -@@ -135,11 +146,11 @@ def test_we_can_search_as_expected(topo, _add_user, aci_of_user): - assert user.get_attr_vals('objectclass') - - --def test_we_can_mod_title_as_expected(topo, _add_user, aci_of_user): -- """ -- Testing the targattrfilters keyword that allows access control based on the -+def test_we_can_mod_title_as_expected(topo, _add_user, aci_of_user, request): -+ """Testing the targattrfilters keyword that allows access control based on the - value of the attributes being added (or deleted)) -- "Valueacl Test $tet_thistest Test search will work with targattrfilters present." -+ Test search will work with targattrfilters present. -+ - :id: f8c1ea88-7aa9-11e8-a55c-8c16451d917b - :setup: server - :steps: -@@ -153,8 +164,8 @@ def test_we_can_mod_title_as_expected(topo, _add_user, aci_of_user): - """ - ACI_BODY = '(target="ldap:///cn=*,ou=Product Development, {}")' \ - '(targetfilter="cn=Jeff*")(targetattr="secretary || objectclass || mail")' \ -- '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "$tet_thistest"; ' \ -- 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX) -+ '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "{}"; ' \ -+ 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX, request.node.name) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - # aci will not allow 'title', 'topdog' - conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) -@@ -163,11 +174,11 @@ def test_we_can_mod_title_as_expected(topo, _add_user, aci_of_user): - user.add('title', 'topdog') - - --def test_modify_with_multiple_filters(topo, _add_user, aci_of_user): -- """ -- Testing the targattrfilters keyword that allows access control based on the -+def test_modify_with_multiple_filters(topo, _add_user, aci_of_user, request): -+ """Testing the targattrfilters keyword that allows access control based on the - value of the attributes being added (or deleted)) -- "Valueacl Test $tet_thistest Allowed by multiple." -+ Allowed by multiple filters -+ - :id: fd9d223e-7aa9-11e8-a83b-8c16451d917b - :setup: server - :steps: -@@ -181,9 +192,9 @@ def test_modify_with_multiple_filters(topo, _add_user, aci_of_user): - """ - ACI_BODY = '(targattrfilters = "add=title:(title=architect) && secretary:' \ - '(secretary=cn=Meylan,{}), del=title:(title=architect) && secretary:' \ -- '(secretary=cn=Meylan,{})")(version 3.0; acl "$tet_thistest"; allow (write) ' \ -+ '(secretary=cn=Meylan,{})")(version 3.0; acl "{}"; allow (write) ' \ - '(userdn = "ldap:///anyone") ;)'.format( -- DEFAULT_SUFFIX, DEFAULT_SUFFIX -+ DEFAULT_SUFFIX, DEFAULT_SUFFIX, request.node.name - ) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) -@@ -195,11 +206,11 @@ def test_modify_with_multiple_filters(topo, _add_user, aci_of_user): - assert user.get_attr_val('secretary') - - --def test_denied_by_multiple_filters(topo, _add_user, aci_of_user): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of the -+def test_denied_by_multiple_filters(topo, _add_user, aci_of_user, request): -+ """Testing the targattrfilters keyword that allows access control based on the value of the - attributes being added (or deleted)) -- "Valueacl Test $tet_thistest Denied by multiple filters." -+ Denied by multiple filters -+ - :id: 034c6c62-7aaa-11e8-8634-8c16451d917b - :setup: server - :steps: -@@ -213,8 +224,8 @@ def test_denied_by_multiple_filters(topo, _add_user, aci_of_user): - """ - ACI_BODY = '(targattrfilters = "add=title:(title=architect) && secretary:' \ - '(secretary=cn=Meylan,{}), del=title:(title=architect) && secretary:' \ -- '(secretary=cn=Meylan,{})")(version 3.0; acl "$tet_thistest"; allow (write) ' \ -- '(userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX, DEFAULT_SUFFIX) -+ '(secretary=cn=Meylan,{})")(version 3.0; acl "{}"; allow (write) ' \ -+ '(userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX, DEFAULT_SUFFIX, request.node.name) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) - # aci will allow title some attribute only -@@ -228,11 +239,11 @@ def test_denied_by_multiple_filters(topo, _add_user, aci_of_user): - user.add("secretary", "cn=Grenoble,dc=example,dc=com") - - --def test_allowed_add_one_attribute(topo, _add_user, aci_of_user): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of the -+def test_allowed_add_one_attribute(topo, _add_user, aci_of_user, request): -+ """Testing the targattrfilters keyword that allows access control based on the value of the - attributes being added (or deleted)) -- "Valueacl Test $tet_thistest Allowed add one attribute (in presence of multiple filters)" -+ Allowed add one attribute (in presence of multiple filters) -+ - :id: 086c7f0c-7aaa-11e8-b69f-8c16451d917b - :setup: server - :steps: -@@ -245,9 +256,9 @@ def test_allowed_add_one_attribute(topo, _add_user, aci_of_user): - 3. Operation should succeed - """ - ACI_BODY = '(targattrfilters = "add=title:(title=architect) && secretary:(secretary=cn=Meylan, {}), ' \ -- 'del=title:(title=architect) && secretary:(secretary=cn=Meylan, {})")(version 3.0; acl "$tet_thistest"; ' \ -+ 'del=title:(title=architect) && secretary:(secretary=cn=Meylan, {})")(version 3.0; acl "{}"; ' \ - 'allow (write) (userdn = "ldap:///{}") ;)'.format( -- DEFAULT_SUFFIX, DEFAULT_SUFFIX, USER_WITH_ACI_DELADD) -+ DEFAULT_SUFFIX, DEFAULT_SUFFIX, request.node.name, USER_WITH_ACI_DELADD) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) - user = UserAccount(conn, USER_DELADD) -@@ -258,12 +269,12 @@ def test_allowed_add_one_attribute(topo, _add_user, aci_of_user): - - - def test_cannot_add_an_entry_with_attribute_values_we_are_not_allowed_add( -- topo, _add_user, aci_of_user -+ topo, _add_user, aci_of_user, request - ): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of the -+ """Testing the targattrfilters keyword that allows access control based on the value of the - attributes being added (or deleted)) -- "Valueacl Test $tet_thistest Test not allowed add an entry" -+ Test not allowed add an entry -+ - :id: 0d0effee-7aaa-11e8-b673-8c16451d917b - :setup: server - :steps: -@@ -277,8 +288,8 @@ def test_cannot_add_an_entry_with_attribute_values_we_are_not_allowed_add( - """ - ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)) ' \ - '&& secretary:(secretary=cn=Meylan, {}), del=title:(|(title=engineer)(title=cool dude)' \ -- '(title=scum))")(version 3.0; aci "$tet_thistest"; allow (add) userdn = "ldap:///{}";)'.format( -- DEFAULT_SUFFIX, DEFAULT_SUFFIX) -+ '(title=scum))")(version 3.0; aci "{}"; allow (add) userdn = "ldap:///{}";)'.format( -+ DEFAULT_SUFFIX, request.node.name, DEFAULT_SUFFIX) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - properties = { - 'uid': 'FRED', -@@ -298,11 +309,11 @@ def test_cannot_add_an_entry_with_attribute_values_we_are_not_allowed_add( - user.add("objectclass", "person") - - --def test_on_modrdn(topo, _add_user, aci_of_user): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of the -+def test_on_modrdn(topo, _add_user, aci_of_user, request): -+ """Testing the targattrfilters keyword that allows access control based on the value of the - attributes being added (or deleted)) - Test that valuacls kick in for modrdn operation. -+ - :id: 12985dde-7aaa-11e8-abde-8c16451d917b - :setup: server - :steps: -@@ -315,8 +326,8 @@ def test_on_modrdn(topo, _add_user, aci_of_user): - 3. Operation should succeed - """ - ACI_BODY = '(target="ldap:///cn=*,ou=Accounting,{}")(targattrfilters = "add=cn:(|(cn=engineer)), ' \ -- 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; aci "$tet_thistest"; ' \ -- 'allow (write) userdn = "ldap:///{}";)'.format(DEFAULT_SUFFIX, USER_WITH_ACI_DELADD) -+ 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; aci "{}"; ' \ -+ 'allow (write) userdn = "ldap:///{}";)'.format(DEFAULT_SUFFIX, request.node.name, USER_WITH_ACI_DELADD) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) - # modrdn_s is not allowed with ou=OU1 -@@ -325,11 +336,11 @@ def test_on_modrdn(topo, _add_user, aci_of_user): - useraccount.rename("ou=OU1") - - --def test_on_modrdn_allow(topo, _add_user, aci_of_user): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of the attributes being -+def test_on_modrdn_allow(topo, _add_user, aci_of_user, request): -+ """Testing the targattrfilters keyword that allows access control based on the value of the attributes being - added (or deleted)) -- "Valueacl Test $tet_thistest Test modrdn still works (2)" -+ Test modrdn still works (2) -+ - :id: 17720562-7aaa-11e8-82ee-8c16451d917b - :setup: server - :steps: -@@ -342,8 +353,8 @@ def test_on_modrdn_allow(topo, _add_user, aci_of_user): - 3. Operation should succeed - """ - ACI_BODY = '(target="ldap:///{}")(targattrfilters = "add=cn:((cn=engineer)), del=cn:((cn=jonny))")' \ -- '(version 3.0; aci "$tet_thistest"; allow (write) ' \ -- 'userdn = "ldap:///{}";)'.format(DEFAULT_SUFFIX, USER_WITH_ACI_DELADD) -+ '(version 3.0; aci "{}"; allow (write) ' \ -+ 'userdn = "ldap:///{}";)'.format(DEFAULT_SUFFIX, request.node.name, USER_WITH_ACI_DELADD) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - properties = { - 'uid': 'jonny', -@@ -364,12 +375,12 @@ def test_on_modrdn_allow(topo, _add_user, aci_of_user): - - @pytest.mark.bz979515 - def test_targattrfilters_keyword(topo): -- """ -- Testing the targattrfilters keyword that allows access control based on the value -+ """Testing the targattrfilters keyword that allows access control based on the value - of the attributes being added (or deleted)) - "Bug #979515 - ACLs inoperative in some search scenarios [rhel-6.5]" - "Bug #979516 is a clone for DS8.2 on RHEL5.9" - "Bug #979514 is a clone for RHEL6.4 zStream errata" -+ - :id: 23f9e9d0-7aaa-11e8-b16b-8c16451d917b - :setup: server - :steps: -diff --git a/dirsrvtests/tests/suites/acl/valueacl_test.py b/dirsrvtests/tests/suites/acl/valueacl_test.py -index 54bc13452..3bbbdcabb 100644 ---- a/dirsrvtests/tests/suites/acl/valueacl_test.py -+++ b/dirsrvtests/tests/suites/acl/valueacl_test.py -@@ -1,5 +1,5 @@ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2019 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). -@@ -28,6 +28,17 @@ HUMAN_OU_GLOBAL = "ou=Human Resources,{}".format(DEFAULT_SUFFIX) - - @pytest.fixture(scope="function") - def aci_of_user(request, topo): -+ # Add anonymous access aci -+ ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) -+ ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" -+ ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" -+ ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT -+ suffix = Domain(topo.standalone, DEFAULT_SUFFIX) -+ try: -+ suffix.add('aci', ANON_ACI) -+ except ldap.TYPE_OR_VALUE_EXISTS: -+ pass -+ - aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') - - def finofaci(): -@@ -167,10 +178,10 @@ class _AddFREDWithRoot: - def test_delete_an_attribute_value_we_are_not_allowed_to_delete( - topo, _add_user, aci_of_user - ): -- """ -- Testing the targattrfilters keyword that allows access control based on the value -+ """Testing the targattrfilters keyword that allows access control based on the value - of the attributes being added (or deleted)) - Test that we can MODIFY:add an attribute value we are allowed to add -+ - :id: 7c41baa6-7aa9-11e8-9bdc-8c16451d917b - :setup: server - :steps: -@@ -192,12 +203,12 @@ def test_delete_an_attribute_value_we_are_not_allowed_to_delete( - - - def test_donot_allow_write_access_to_title_if_value_is_not_architect( -- topo, _add_user, aci_of_user -+ topo, _add_user, aci_of_user, request - ): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of the -+ """Testing the targattrfilters keyword that allows access control based on the value of the - attributes being added (or deleted)) - Test that we cannot MODIFY:add an attribute value we are not allowed to add -+ - :id: 822c607e-7aa9-11e8-b2e7-8c16451d917b - :setup: server - :steps: -@@ -210,7 +221,7 @@ def test_donot_allow_write_access_to_title_if_value_is_not_architect( - 3. Operation should succeed - """ - ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ -- '(version 3.0; acl "$tet_thistest"; allow (write) (userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) -+ '(version 3.0; acl "{}"; allow (write) (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - # aci will allow to add title architect - conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) -@@ -221,12 +232,12 @@ def test_donot_allow_write_access_to_title_if_value_is_not_architect( - - - def test_delete_an_attribute_value_we_are_allowed_to_delete( -- topo, _add_user, aci_of_user -+ topo, _add_user, aci_of_user, request - ): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of -+ """Testing the targattrfilters keyword that allows access control based on the value of - the attributes being added (or deleted)) -- Test that we can MODIFY:delete an attribute value we are allowed to delete, -+ Test that we can MODIFY:delete an attribute value we are allowed to delete -+ - :id: 86f36b34-7aa9-11e8-ab16-8c16451d917b - :setup: server - :steps: -@@ -239,7 +250,7 @@ def test_delete_an_attribute_value_we_are_allowed_to_delete( - 3. Operation should succeed - """ - ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ -- '(version 3.0; acl "$tet_thistest"; allow (write) (userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) -+ '(version 3.0; acl "{}"; allow (write) (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - _AddTitleWithRoot(topo, "architect").add() - # aci will allow to delete title architect -@@ -249,12 +260,12 @@ def test_delete_an_attribute_value_we_are_allowed_to_delete( - - - def test_delete_an_attribute_value_we_are_not_allowed_to_deleted( -- topo, _add_user, aci_of_user -+ topo, _add_user, aci_of_user, request - ): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of the -+ """Testing the targattrfilters keyword that allows access control based on the value of the - attributes being added (or deleted)) -- Test that we cannot MODIFY:delete an attribute value we are allowed to delete, -+ Test that we cannot MODIFY:delete an attribute value we are allowed to delete -+ - :id: 8c9f3a90-7aa9-11e8-bf2e-8c16451d917b - :setup: server - :steps: -@@ -267,7 +278,7 @@ def test_delete_an_attribute_value_we_are_not_allowed_to_deleted( - 3. Operation should succeed - """ - ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ -- '(version 3.0; acl "$tet_thistest"; allow (write) (userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) -+ '(version 3.0; acl "{}"; allow (write) (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - _AddTitleWithRoot(topo, "engineer").add() - # acl will not allow to delete title engineer -@@ -276,11 +287,11 @@ def test_delete_an_attribute_value_we_are_not_allowed_to_deleted( - _ModTitleArchitectJeffVedder(topo, "engineer", conn).delete() - - --def test_allow_modify_replace(topo, _add_user, aci_of_user): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of the -+def test_allow_modify_replace(topo, _add_user, aci_of_user, request): -+ """Testing the targattrfilters keyword that allows access control based on the value of the - attributes being added (or deleted)) - Test that we can MODIFY:replace an attribute if we have correct add/delete rights. -+ - :id: 9148a234-7aa9-11e8-a1f1-8c16451d917b - :setup: server - :steps: -@@ -293,8 +304,8 @@ def test_allow_modify_replace(topo, _add_user, aci_of_user): - 3. Operation should succeed - """ - ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ -- '(title=idiot))")(version 3.0; acl "$tet_thistest"; ' \ -- 'allow (write) (userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) -+ '(title=idiot))")(version 3.0; acl "{}"; ' \ -+ 'allow (write) (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - _AddTitleWithRoot(topo, "architect").add() - _AddTitleWithRoot(topo, "idiot").add() -@@ -305,11 +316,11 @@ def test_allow_modify_replace(topo, _add_user, aci_of_user): - _ModTitleArchitectJeffVedder(topo, "engineer", conn).delete() - - --def test_allow_modify_delete(topo, _add_user, aci_of_user): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of the -+def test_allow_modify_delete(topo, _add_user, aci_of_user, request): -+ """Testing the targattrfilters keyword that allows access control based on the value of the - attributes being added (or deleted)) -- "Valueacl Test $tet_thistest Don't Allow modify:replace because of lack of delete rights" -+ Don't Allow modify:replace because of lack of delete rights -+ - :id: 962842d2-7aa9-11e8-b39e-8c16451d917b - :setup: server - :steps: -@@ -322,8 +333,8 @@ def test_allow_modify_delete(topo, _add_user, aci_of_user): - 3. Operation should succeed - """ - ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect))")' \ -- '(version 3.0; acl "$tet_thistest"; allow (write) ' \ -- '(userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) -+ '(version 3.0; acl "{}"; allow (write) ' \ -+ '(userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - _AddTitleWithRoot(topo, "architect").add() - _AddTitleWithRoot(topo, "idiot").add() -@@ -335,11 +346,11 @@ def test_allow_modify_delete(topo, _add_user, aci_of_user): - _ModTitleArchitectJeffVedder(topo, "idiot", conn).delete() - - --def test_replace_an_attribute_if_we_lack(topo, _add_user, aci_of_user): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of the -+def test_replace_an_attribute_if_we_lack(topo, _add_user, aci_of_user, request): -+ """Testing the targattrfilters keyword that allows access control based on the value of the - attributes being added (or deleted)) - Test that we cannot MODIFY:replace an attribute if we lack -+ - :id: 9b1e6afa-7aa9-11e8-ac5b-8c16451d917b - :setup: server - :steps: -@@ -352,8 +363,8 @@ def test_replace_an_attribute_if_we_lack(topo, _add_user, aci_of_user): - 3. Operation should succeed - """ - ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect))")' \ -- '(version 3.0; acl "$tet_thistest"; allow (write) ' \ -- '(userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) -+ '(version 3.0; acl "{}"; allow (write) ' \ -+ '(userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - _AddTitleWithRoot(topo, "architect").add() - _AddTitleWithRoot(topo, "idiot").add() -@@ -365,13 +376,13 @@ def test_replace_an_attribute_if_we_lack(topo, _add_user, aci_of_user): - - - def test_remove_an_attribute_if_we_have_del_rights_to_all_attr_value( -- topo, _add_user, aci_of_user -+ topo, _add_user, aci_of_user, request - ): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of the -- attributes being added (or deleted)) -+ """Testing the targattrfilters keyword that allows access control based on the value of the -+ attributes being added (or deleted)) - Test that we can use MODIFY:delete to entirely remove an attribute if we have del rights - to all attr values negative case tested next. -+ - :id: a0c9e0c4-7aa9-11e8-8880-8c16451d917b - :setup: server - :steps: -@@ -384,8 +395,8 @@ def test_remove_an_attribute_if_we_have_del_rights_to_all_attr_value( - 3. Operation should succeed - """ - ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ -- '(title=idiot))")(version 3.0; acl "$tet_thistest"; allow (write)' \ -- ' (userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) -+ '(title=idiot))")(version 3.0; acl "{}"; allow (write)' \ -+ ' (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - _AddTitleWithRoot(topo, "architect").add() - _AddTitleWithRoot(topo, "idiot").add() -@@ -395,13 +406,13 @@ def test_remove_an_attribute_if_we_have_del_rights_to_all_attr_value( - - - def test_remove_an_attribute_if_we_donot_have_del_rights_to_all_attr_value( -- topo, _add_user, aci_of_user -+ topo, _add_user, aci_of_user, request - ): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of the -+ """Testing the targattrfilters keyword that allows access control based on the value of the - attributes being added (or deleted)) - Test that we can use MODIFY:delete to entirely remove an attribute if we have not del - rights to all attr values -+ - :id: a6862eaa-7aa9-11e8-8bf9-8c16451d917b - :setup: server - :steps: -@@ -414,8 +425,8 @@ def test_remove_an_attribute_if_we_donot_have_del_rights_to_all_attr_value( - 3. Operation should succeed - """ - ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ -- '(title=idiot))")(version 3.0; acl "$tet_thistest"; allow (write) ' \ -- '(userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) -+ '(title=idiot))")(version 3.0; acl "{}"; allow (write) ' \ -+ '(userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - _AddTitleWithRoot(topo, "architect").add() - _AddTitleWithRoot(topo, "sailor").add() -@@ -426,12 +437,12 @@ def test_remove_an_attribute_if_we_donot_have_del_rights_to_all_attr_value( - - - def test_remove_an_attribute_if_we_have_del_rights_to_all_attr_values( -- topo, _add_user, aci_of_user -+ topo, _add_user, aci_of_user, request - ): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of the -+ """Testing the targattrfilters keyword that allows access control based on the value of the - attributes being added (or deleted)) - Test that we can use MODIFY:replace to entirely remove an attribute if we have del rights to all attr values -+ - :id: ab04c7e8-7aa9-11e8-84db-8c16451d917b - :setup: server - :steps: -@@ -444,8 +455,8 @@ def test_remove_an_attribute_if_we_have_del_rights_to_all_attr_values( - 3. Operation should succeed - """ - ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ -- '(title=idiot))")(version 3.0; acl "$tet_thistest"; allow (write) ' \ -- '(userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) -+ '(title=idiot))")(version 3.0; acl "{}"; allow (write) ' \ -+ '(userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - _AddTitleWithRoot(topo, "architect").add() - _AddTitleWithRoot(topo, "idiot").add() -@@ -455,12 +466,12 @@ def test_remove_an_attribute_if_we_have_del_rights_to_all_attr_values( - - - def test_cantnot_delete_an_entry_with_attribute_values_we_are_not_allowed_delete( -- topo, _add_user, aci_of_user -+ topo, _add_user, aci_of_user, request - ): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of -+ """Testing the targattrfilters keyword that allows access control based on the value of - the attributes being added (or deleted)) -- Test we cannot DELETE an entry with attribute values we are not allowed delete, -+ Test we cannot DELETE an entry with attribute values we are not allowed delete -+ - :id: b525d94c-7aa9-11e8-8539-8c16451d917b - :setup: server - :steps: -@@ -474,7 +485,7 @@ def test_cantnot_delete_an_entry_with_attribute_values_we_are_not_allowed_delete - """ - ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)), ' \ - 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; ' \ -- 'aci "$tet_thistest"; allow (delete) userdn = "ldap:///{}";)'.format(USER_WITH_ACI_DELADD) -+ 'aci "{}"; allow (delete) userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - _AddFREDWithRoot(topo, "engineer", "cool dude", "ANuj").create() - conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) -@@ -484,12 +495,12 @@ def test_cantnot_delete_an_entry_with_attribute_values_we_are_not_allowed_delete - - - def test_we_can_add_and_delete_an_entry_with_attribute_values_we_are_allowed_add_and_delete( -- topo, _add_user, aci_of_user -+ topo, _add_user, aci_of_user, request - ): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of the -+ """Testing the targattrfilters keyword that allows access control based on the value of the - attributes being added (or deleted)) - Test we can DELETE an entry with attribute values we are allowed delete -+ - :id: ba138e54-7aa9-11e8-8037-8c16451d917b - :setup: server - :steps: -@@ -503,7 +514,7 @@ def test_we_can_add_and_delete_an_entry_with_attribute_values_we_are_allowed_add - """ - ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)), ' \ - 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; ' \ -- 'aci "$tet_thistest"; allow (delete) userdn = "ldap:///{}";)'.format(USER_WITH_ACI_DELADD) -+ 'aci "{}"; allow (delete) userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - _AddFREDWithRoot(topo, "engineer", "cool dude", "scum").create() - conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) -@@ -511,12 +522,12 @@ def test_we_can_add_and_delete_an_entry_with_attribute_values_we_are_allowed_add - UserAccount(conn, FRED).delete() - - --def test_allow_title(topo, _add_user, aci_of_user): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of the -+def test_allow_title(topo, _add_user, aci_of_user, request): -+ """Testing the targattrfilters keyword that allows access control based on the value of the - attributes being added (or deleted)) - Test that if attr appears in targetattr and in targattrfilters then targattrfilters - applies--ie. targattrfilters is a refinement of targattrfilters. -+ - :id: beadf328-7aa9-11e8-bb08-8c16451d917b - :setup: server - :steps: -@@ -530,8 +541,8 @@ def test_allow_title(topo, _add_user, aci_of_user): - """ - ACI_BODY = '(targetattr="title")(targattrfilters = "add=title:(|(title=engineer)' \ - '(title=cool dude)(title=scum)), del=title:(|(title=engineer)(title=cool dude)' \ -- '(title=scum))")(version 3.0; aci "$tet_thistest"; allow (write) ' \ -- 'userdn = "ldap:///{}";)'.format(USER_WITH_ACI_DELADD) -+ '(title=scum))")(version 3.0; aci "{}"; allow (write) ' \ -+ 'userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - _AddTitleWithRoot(topo, "engineer").add() - _AddTitleWithRoot(topo, "cool dude").add() -@@ -541,11 +552,11 @@ def test_allow_title(topo, _add_user, aci_of_user): - _ModTitleArchitectJeffVedder(topo, "topdog", conn).add() - - --def test_allow_to_modify(topo, _add_user, aci_of_user): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of the -+def test_allow_to_modify(topo, _add_user, aci_of_user, request): -+ """Testing the targattrfilters keyword that allows access control based on the value of the - attributes being added (or deleted)) - Test that I can have secretary in targetattr and title in targattrfilters. -+ - :id: c32e4704-7aa9-11e8-951d-8c16451d917b - :setup: server - :steps: -@@ -559,8 +570,8 @@ def test_allow_to_modify(topo, _add_user, aci_of_user): - """ - ACI_BODY = '(targetattr="secretary")(targattrfilters = "add=title:(|(title=engineer)' \ - '(title=cool dude)(title=scum)), del=title:(|(title=engineer)(title=cool dude)' \ -- '(title=scum))")(version 3.0; aci "$tet_thistest"; allow (write)' \ -- ' userdn = "ldap:///{}";)'.format(USER_WITH_ACI_DELADD) -+ '(title=scum))")(version 3.0; aci "{}"; allow (write)' \ -+ ' userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - _AddTitleWithRoot(topo, "engineer").add() - _AddTitleWithRoot(topo, "cool dude").add() -@@ -571,11 +582,11 @@ def test_allow_to_modify(topo, _add_user, aci_of_user): - assert user.get_attr_val('secretary') - - --def test_selfwrite_does_not_confer_write_on_a_targattrfilters_atribute(topo, _add_user, aci_of_user): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of -+def test_selfwrite_does_not_confer_write_on_a_targattrfilters_atribute(topo, _add_user, aci_of_user, request): -+ """Testing the targattrfilters keyword that allows access control based on the value of - the attributes being added (or deleted)) - Selfwrite does not confer "write" on a targattrfilters atribute. -+ - :id: c7b9ec2e-7aa9-11e8-ba4a-8c16451d917b - :setup: server - :steps: -@@ -589,7 +600,7 @@ def test_selfwrite_does_not_confer_write_on_a_targattrfilters_atribute(topo, _ad - """ - ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)), ' \ - 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; ' \ -- 'aci "$tet_thistest"; allow (selfwrite) userdn = "ldap:///{}";)'.format(USER_WITH_ACI_DELADD) -+ 'aci "{}"; allow (selfwrite) userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - # aci will not allow to add selfwrite_does_not_confer_write_on_a_targattrfilters_atribute - conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) -@@ -598,12 +609,12 @@ def test_selfwrite_does_not_confer_write_on_a_targattrfilters_atribute(topo, _ad - - - def test_selfwrite_continues_to_give_rights_to_attr_in_targetattr_list( -- topo, _add_user, aci_of_user -+ topo, _add_user, aci_of_user, request - ): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of -+ """Testing the targattrfilters keyword that allows access control based on the value of - the attributes being added (or deleted)) - Selfwrite continues to give rights to attr in targetattr list. -+ - :id: cd287680-7aa9-11e8-a8e2-8c16451d917b - :setup: server - :steps: -@@ -617,8 +628,8 @@ def test_selfwrite_continues_to_give_rights_to_attr_in_targetattr_list( - """ - ACI_BODY = '(targetattr="secretary")(targattrfilters = "add=title:(|(title=engineer)' \ - '(title=cool dude)(title=scum)), del=title:(|(title=engineer)(title=cool dude)' \ -- '(title=scum))")(version 3.0; aci "$tet_thistest"; allow (selfwrite) ' \ -- 'userdn = "ldap:///{}";)'.format(USER_WITH_ACI_DELADD) -+ '(title=scum))")(version 3.0; aci "{}"; allow (selfwrite) ' \ -+ 'userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - # selfwrite_continues_to_give_rights_to_attr_in_targetattr_list - conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) -@@ -627,12 +638,12 @@ def test_selfwrite_continues_to_give_rights_to_attr_in_targetattr_list( - - - def test_add_an_attribute_value_we_are_allowed_to_add_with_ldapanyone( -- topo, _add_user, aci_of_user -+ topo, _add_user, aci_of_user, request - ): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of the -+ """Testing the targattrfilters keyword that allows access control based on the value of the - attributes being added (or deleted)) - Test that we can MODIFY:add an attribute value we are allowed to add with ldap:///anyone -+ - :id: d1e1d7ac-7aa9-11e8-b968-8c16451d917b - :setup: server - :steps: -@@ -645,7 +656,7 @@ def test_add_an_attribute_value_we_are_allowed_to_add_with_ldapanyone( - 3. Operation should succeed - """ - ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ -- '(version 3.0; acl "$tet_thistest"; allow (write) userdn = "ldap:///anyone";)' -+ '(version 3.0; acl "{}"; allow (write) userdn = "ldap:///anyone";)'.format(request.node.name) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - _AddTitleWithRoot(topo, "engineer").add() - # aci will allow to add title architect -@@ -653,12 +664,12 @@ def test_add_an_attribute_value_we_are_allowed_to_add_with_ldapanyone( - _ModTitleArchitectJeffVedder(topo, "architect", conn).add() - - --def test_hierarchy(topo, _add_user, aci_of_user): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of -+def test_hierarchy(topo, _add_user, aci_of_user, request): -+ """Testing the targattrfilters keyword that allows access control based on the value of - the attributes being added (or deleted)) - Test that with two targattrfilters in the hierarchy that the general one applies. -- This is the correct behaviour, even if it's a bit -+ This is the correct behaviour, even if it's a bit confusing -+ - :id: d7ae354a-7aa9-11e8-8b0d-8c16451d917b - :setup: server - :steps: -@@ -670,10 +681,10 @@ def test_hierarchy(topo, _add_user, aci_of_user): - 2. Operation should succeed - 3. Operation should succeed - """ -- ACI_BODY = '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "$tet_thistest"; ' \ -- 'allow (write) (userdn = "ldap:///anyone") ;)' -+ ACI_BODY = '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "{}"; ' \ -+ 'allow (write) (userdn = "ldap:///anyone") ;)'.format(request.node.name) - ACI_BODY1 = '(targattrfilters = "add=title:(title=architect)")(version 3.0; ' \ -- 'acl "$tet_thistest"; allow (write) (userdn = "ldap:///anyone") ;)' -+ 'acl "{}"; allow (write) (userdn = "ldap:///anyone") ;)'.format(request.node.name) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY1) - _AddTitleWithRoot(topo, "engineer").add() -@@ -686,12 +697,12 @@ def test_hierarchy(topo, _add_user, aci_of_user): - - - def test_targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_expected( -- topo, _add_user, aci_of_user -+ topo, _add_user, aci_of_user, request - ): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of the -+ """Testing the targattrfilters keyword that allows access control based on the value of the - attributes being added (or deleted)) - Test that we can have targattrfilters and search permissions and that ldapmodify works as expected. -+ - :id: ddae7a22-7aa9-11e8-ad6b-8c16451d917b - :setup: server - :steps: -@@ -704,8 +715,8 @@ def test_targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_exp - 3. Operation should succeed - """ - ACI_BODY = '(targetattr="secretary || objectclass || mail")(targattrfilters = "add=title:' \ -- '(title=arch*)")(version 3.0; acl "$tet_thistest"; ' \ -- 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)' -+ '(title=arch*)")(version 3.0; acl "{}"; ' \ -+ 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(request.node.name) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - # aci will allow to add title architect - conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) -@@ -713,12 +724,12 @@ def test_targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_exp - - - def test_targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_expected_two( -- topo, _add_user, aci_of_user -+ topo, _add_user, aci_of_user, request - ): -- """ -- Testing the targattrfilters keyword that allows access control based on the value of -+ """Testing the targattrfilters keyword that allows access control based on the value of - the attributes being added (or deleted)) - Test that we can have targattrfilters and search permissions and that ldapsearch works as expected. -+ - :id: e25d116e-7aa9-11e8-81d8-8c16451d917b - :setup: server - :steps: -@@ -731,8 +742,8 @@ def test_targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_exp - 3. Operation should succeed - """ - ACI_BODY = '(targetattr="secretary || objectclass || mail")(targattrfilters = ' \ -- '"add=title:(title=arch*)")(version 3.0; acl "$tet_thistest"; allow ' \ -- '(write,read,search,compare) (userdn = "ldap:///anyone") ;)' -+ '"add=title:(title=arch*)")(version 3.0; acl "{}"; allow ' \ -+ '(write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(request.node.name) - Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) - conn = Anonymous(topo.standalone).bind() - user = UserAccount(conn, USER_DELADD) -diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py -index 02b73ee85..97908c31c 100644 ---- a/dirsrvtests/tests/suites/basic/basic_test.py -+++ b/dirsrvtests/tests/suites/basic/basic_test.py -@@ -7,10 +7,6 @@ - # --- END COPYRIGHT BLOCK --- - # - --""" -- :Requirement: Basic Directory Server Operations --""" -- - from subprocess import check_output, PIPE, run - from lib389 import DirSrv - from lib389.idm.user import UserAccounts -@@ -255,11 +251,11 @@ def test_basic_import_export(topology_st, import_example_ldif): - """ - - log.info('Running test_basic_import_export...') -- - # - # Test online/offline LDIF imports - # - topology_st.standalone.start() -+ # topology_st.standalone.config.set('nsslapd-errorlog-level', '1') - - # Generate a test ldif (50k entries) - log.info("Generating LDIF...") -@@ -267,6 +263,7 @@ def test_basic_import_export(topology_st, import_example_ldif): - import_ldif = ldif_dir + '/basic_import.ldif' - dbgen_users(topology_st.standalone, 50000, import_ldif, DEFAULT_SUFFIX) - -+ - # Online - log.info("Importing LDIF online...") - import_task = ImportTask(topology_st.standalone) -@@ -937,7 +934,7 @@ def test_mod_def_rootdse_attr(topology_st, import_example_ldif, rootdse_attr): - :id: c7831e04-f458-4e23-83c7-b6f66109f639 - :parametrized: yes - :setup: Standalone instance and we are using rootdse_attr fixture which --adds nsslapd-return-default-opattr attr with value of one operation attribute. -+ adds nsslapd-return-default-opattr attr with value of one operation attribute. - - :steps: - 1. Make an ldapsearch for rootdse attribute -@@ -1003,7 +1000,7 @@ def test_basic_anonymous_search(topology_st, create_users): - @pytest.mark.bz915801 - def test_search_original_type(topology_st, create_users): - """Test ldapsearch returning original attributes -- using nsslapd-search-return-original-type-switch -+ using nsslapd-search-return-original-type-switch - - :id: d7831d04-f558-4e50-93c7-b6f77109f640 - :setup: Standalone instance -@@ -1095,7 +1092,7 @@ def test_critical_msg_on_empty_range_idl(topology_st): - :setup: Standalone instance - :steps: - 1. Create an index for internationalISDNNumber. (attribute chosen because it is -- unlikely that previous tests used it) -+ unlikely that previous tests used it) - 2. telephoneNumber being indexed by default create 20 users without telephoneNumber - 3. add a telephoneNumber value and delete it to trigger an empty index database - 4. Do a search that triggers a range lookup on empty telephoneNumber -@@ -1105,7 +1102,7 @@ def test_critical_msg_on_empty_range_idl(topology_st): - 2. This should pass - 3. This should pass - 4. This should pass on normal build but could abort a debug build -- 4. This should pass -+ 5. This should pass - """ - indexedAttr = 'internationalISDNNumber' - -@@ -1206,7 +1203,7 @@ def test_ldbm_modification_audit_log(topology_st): - assert conn.searchAuditLog('%s: %s' % (attr, VALUE)) - - --@pytest.mark.skipif(not get_user_is_root() or not default_paths.perl_enabled or ds_is_older('1.4.0.0'), -+@pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.0.0'), - reason="This test is only required if perl is enabled, and requires root.") - def test_dscreate(request): - """Test that dscreate works, we need this for now until setup-ds.pl is -@@ -1356,7 +1353,7 @@ sample_entries = yes - return inst - - --@pytest.mark.skipif(not get_user_is_root() or not default_paths.perl_enabled or ds_is_older('1.4.2.0'), -+@pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.2.0'), - reason="This test is only required with new admin cli, and requires root.") - @pytest.mark.bz1748016 - @pytest.mark.ds50581 -@@ -1367,7 +1364,7 @@ def test_dscreate_ldapi(dscreate_long_instance): - :id: 5d72d955-aff8-4741-8c9a-32c1c707cf1f - :setup: None - :steps: -- 1. create an instance with a long serverId name, that open a ldapi connection -+ 1. Ccreate an instance with a long serverId name, that open a ldapi connection - 2. Connect with ldapi, that hit 50581 and crash the instance - :expectedresults: - 1. Should succeeds -@@ -1378,7 +1375,7 @@ def test_dscreate_ldapi(dscreate_long_instance): - log.info(root_dse.get_supported_ctrls()) - - --@pytest.mark.skipif(not get_user_is_root() or not default_paths.perl_enabled or ds_is_older('1.4.2.0'), -+@pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.2.0'), - reason="This test is only required with new admin cli, and requires root.") - @pytest.mark.bz1715406 - @pytest.mark.ds50923 -diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py -index 94686f5f2..d67bcb13e 100644 ---- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py -+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py -@@ -1,25 +1,26 @@ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2015 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). - # See LICENSE for details. - # --- END COPYRIGHT BLOCK --- - # -+from decimal import * - import os - import logging - import pytest --import subprocess - from lib389._mapped_object import DSLdapObject - from lib389.topologies import topology_st - from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, AutoMembershipDefinitions - from lib389.idm.user import UserAccounts - from lib389.idm.group import Groups - from lib389.idm.organizationalunit import OrganizationalUnits --from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, DN_CONFIG, HOST_STANDALONE, PORT_STANDALONE, DN_DM, PASSWORD --from lib389.utils import ds_is_older -+from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL -+from lib389.utils import ds_is_older, ds_is_newer - import ldap - import glob -+import re - - pytestmark = pytest.mark.tier1 - -@@ -30,7 +31,6 @@ PLUGIN_TIMESTAMP = 'nsslapd-logging-hr-timestamps-enabled' - PLUGIN_LOGGING = 'nsslapd-plugin-logging' - USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX - -- - def add_users(topology_st, users_num): - users = UserAccounts(topology_st, DEFAULT_SUFFIX) - log.info('Adding %d users' % users_num) -@@ -161,6 +161,20 @@ def clean_access_logs(topology_st, request): - - return clean_access_logs - -+@pytest.fixture(scope="function") -+def remove_users(topology_st, request): -+ def _remove_users(): -+ topo = topology_st.standalone -+ users = UserAccounts(topo, DEFAULT_SUFFIX) -+ entries = users.list() -+ assert len(entries) > 0 -+ -+ log.info("Removing all added users") -+ for entry in entries: -+ delete_obj(entry) -+ -+ request.addfinalizer(_remove_users) -+ - - def set_audit_log_config_values(topology_st, request, enabled, logsize): - topo = topology_st.standalone -@@ -181,6 +195,17 @@ def set_audit_log_config_values(topology_st, request, enabled, logsize): - def set_audit_log_config_values_to_rotate(topology_st, request): - set_audit_log_config_values(topology_st, request, 'on', '1') - -+@pytest.fixture(scope="function") -+def disable_access_log_buffering(topology_st, request): -+ log.info('Disable access log buffering') -+ topology_st.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') -+ def fin(): -+ log.info('Enable access log buffering') -+ topology_st.standalone.config.set('nsslapd-accesslog-logbuffering', 'on') -+ -+ request.addfinalizer(fin) -+ -+ return disable_access_log_buffering - - @pytest.mark.bz1273549 - def test_check_default(topology_st): -@@ -226,11 +251,11 @@ def test_plugin_set_invalid(topology_st): - - log.info('test_plugin_set_invalid - Expect to fail with junk value') - with pytest.raises(ldap.OPERATIONS_ERROR): -- result = topology_st.standalone.config.set(PLUGIN_TIMESTAMP, 'JUNK') -+ topology_st.standalone.config.set(PLUGIN_TIMESTAMP, 'JUNK') - - - @pytest.mark.bz1273549 --def test_log_plugin_on(topology_st): -+def test_log_plugin_on(topology_st, remove_users): - """Check access logs for millisecond, when - nsslapd-logging-hr-timestamps-enabled=ON - -@@ -266,7 +291,7 @@ def test_log_plugin_on(topology_st): - - - @pytest.mark.bz1273549 --def test_log_plugin_off(topology_st): -+def test_log_plugin_off(topology_st, remove_users): - """Milliseconds should be absent from access logs when - nsslapd-logging-hr-timestamps-enabled=OFF - -@@ -303,6 +328,7 @@ def test_log_plugin_off(topology_st): - topology_st.standalone.deleteAccessLogs() - - # Now generate some fresh logs -+ add_users(topology_st.standalone, 10) - search_users(topology_st.standalone) - - log.info('Restart the server to flush the logs') -@@ -317,8 +343,9 @@ def test_log_plugin_off(topology_st): - @pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") - @pytest.mark.bz1358706 - @pytest.mark.ds49029 --def test_internal_log_server_level_0(topology_st, clean_access_logs): -+def test_internal_log_server_level_0(topology_st, clean_access_logs, disable_access_log_buffering): - """Tests server-initiated internal operations -+ - :id: 798d06fe-92e8-4648-af66-21349c20638e - :setup: Standalone instance - :steps: -@@ -362,22 +389,23 @@ def test_internal_log_server_level_0(topology_st, clean_access_logs): - @pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") - @pytest.mark.bz1358706 - @pytest.mark.ds49029 --def test_internal_log_server_level_4(topology_st, clean_access_logs): -+def test_internal_log_server_level_4(topology_st, clean_access_logs, disable_access_log_buffering): - """Tests server-initiated internal operations -+ - :id: a3500e47-d941-4575-b399-e3f4b49bc4b6 - :setup: Standalone instance - :steps: - 1. Set nsslapd-plugin-logging to on - 2. Configure access log level to only 4 - 3. Check the access logs, it should contain info about MOD operation of cn=config and other -- internal operations should have the conn field set to Internal -- and all values inside parenthesis set to 0. -+ internal operations should have the conn field set to Internal -+ and all values inside parenthesis set to 0. - :expectedresults: - 1. Operation should be successful - 2. Operation should be successful - 3. Access log should contain correct internal log formats with cn=config modification: -- "(Internal) op=2(1)(1)" -- "conn=Internal(0)" -+ "(Internal) op=2(1)(1)" -+ "conn=Internal(0)" - """ - - topo = topology_st.standalone -@@ -398,8 +426,8 @@ def test_internal_log_server_level_4(topology_st, clean_access_logs): - log.info("Check if access log contains internal MOD operation in correct format") - # (Internal) op=2(2)(1) SRCH base="cn=config - assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="cn=config.*') -- # (Internal) op=2(2)(1) RESULT err=0 tag=48 nentries=1 -- assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') -+ # (Internal) op=2(2)(1) RESULT err=0 tag=48 nentries= -+ assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=.*') - - log.info("Check if the other internal operations have the correct format") - # conn=Internal(0) op=0 -@@ -411,8 +439,9 @@ def test_internal_log_server_level_4(topology_st, clean_access_logs): - @pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") - @pytest.mark.bz1358706 - @pytest.mark.ds49029 --def test_internal_log_level_260(topology_st, add_user_log_level_260): -+def test_internal_log_level_260(topology_st, add_user_log_level_260, disable_access_log_buffering): - """Tests client initiated operations when automember plugin is enabled -+ - :id: e68a303e-c037-42b2-a5a0-fbea27c338a9 - :setup: Standalone instance with internal operation - logging on and nsslapd-plugin-logging to on -@@ -465,9 +494,10 @@ def test_internal_log_level_260(topology_st, add_user_log_level_260): - # 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com" - assert topo.ds_access_log.match(r'.*op=[0-9]+ MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' - 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com".*') -- # (Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" -- assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' -- 'ou=branch1,dc=example,dc=com".*') -+ if ds_is_older(('1.4.3.9', '1.4.4.3')): -+ # (Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" -+ assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' -+ 'ou=branch1,dc=example,dc=com".*') - # (Internal) op=12(1)(1) RESULT err=0 tag=48 nentries=1 - assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') - # op=12 RESULT err=0 tag=109 -@@ -476,9 +506,10 @@ def test_internal_log_level_260(topology_st, add_user_log_level_260): - log.info("Check the access logs for DEL operation of the user") - # op=15 DEL dn="uid=new_test_user_777,dc=example,dc=com" - assert topo.ds_access_log.match(r'.*op=[0-9]+ DEL dn="uid=new_test_user_777,dc=example,dc=com".*') -- # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" -- assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' -- 'dc=example,dc=com".*') -+ if ds_is_older(('1.4.3.9', '1.4.4.3')): -+ # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" -+ assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' -+ 'dc=example,dc=com".*') - # (Internal) op=15(1)(1) RESULT err=0 tag=48 nentries=1 - assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') - # op=15 RESULT err=0 tag=107 -@@ -492,8 +523,9 @@ def test_internal_log_level_260(topology_st, add_user_log_level_260): - @pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") - @pytest.mark.bz1358706 - @pytest.mark.ds49029 --def test_internal_log_level_131076(topology_st, add_user_log_level_131076): -+def test_internal_log_level_131076(topology_st, add_user_log_level_131076, disable_access_log_buffering): - """Tests client-initiated operations while referential integrity plugin is enabled -+ - :id: 44836ac9-dabd-4a8c-abd5-ecd7c2509739 - :setup: Standalone instance - Configure access log level to - 131072 + 4 -@@ -547,9 +579,10 @@ def test_internal_log_level_131076(topology_st, add_user_log_level_131076): - # 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com" - assert not topo.ds_access_log.match(r'.*op=[0-9]+ MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' - 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com".*') -- # (Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" -- assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' -- 'ou=branch1,dc=example,dc=com".*') -+ if ds_is_older(('1.4.3.9', '1.4.4.3')): -+ # (Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" -+ assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' -+ 'ou=branch1,dc=example,dc=com".*') - # (Internal) op=12(1)(1) RESULT err=0 tag=48 nentries=1 - assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') - # op=12 RESULT err=0 tag=109 -@@ -558,9 +591,10 @@ def test_internal_log_level_131076(topology_st, add_user_log_level_131076): - log.info("Check the access logs for DEL operation of the user") - # op=15 DEL dn="uid=new_test_user_777,dc=example,dc=com" - assert not topo.ds_access_log.match(r'.*op=[0-9]+ DEL dn="uid=new_test_user_777,dc=example,dc=com".*') -- # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" -- assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' -- 'dc=example,dc=com".*') -+ if ds_is_older(('1.4.3.9', '1.4.4.3')): -+ # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" -+ assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' -+ 'dc=example,dc=com".*') - # (Internal) op=15(1)(1) RESULT err=0 tag=48 nentries=1 - assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') - # op=15 RESULT err=0 tag=107 -@@ -574,8 +608,9 @@ def test_internal_log_level_131076(topology_st, add_user_log_level_131076): - @pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") - @pytest.mark.bz1358706 - @pytest.mark.ds49029 --def test_internal_log_level_516(topology_st, add_user_log_level_516): -+def test_internal_log_level_516(topology_st, add_user_log_level_516, disable_access_log_buffering): - """Tests client initiated operations when referential integrity plugin is enabled -+ - :id: bee1d681-763d-4fa5-aca2-569cf93f8b71 - :setup: Standalone instance - Configure access log level to - 512+4 -@@ -624,34 +659,34 @@ def test_internal_log_level_516(topology_st, add_user_log_level_516): - assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1*') - # (Internal) op=10(1)(1) RESULT err=0 tag=48 - assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48.*') -- # op=10 RESULT err=0 tag=105 -- assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=105.*') - - log.info("Check the access logs for MOD operation of the user") - # op=12 MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' - # 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com" - assert not topo.ds_access_log.match(r'.*op=[0-9]+ MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' - 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com".*') -- # Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" -- assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' -- 'ou=branch1,dc=example,dc=com".*') -- # (Internal) op=12(1)(1) ENTRY dn="uid=test_user_777, ou=branch1,dc=example,dc=com" -- assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ENTRY dn="uid=test_user_777,' -- 'ou=branch1,dc=example,dc=com".*') -+ if ds_is_older(('1.4.3.9', '1.4.4.3')): -+ # Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" -+ assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' -+ 'ou=branch1,dc=example,dc=com".*') -+ # (Internal) op=12(1)(1) ENTRY dn="uid=test_user_777, ou=branch1,dc=example,dc=com" -+ assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ENTRY dn="uid=test_user_777,' -+ 'ou=branch1,dc=example,dc=com".*') - # (Internal) op=12(1)(1) RESULT err=0 tag=48 nentries=1 - assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') -- # op=12 RESULT err=0 tag=109 -- assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=109.*') -+ # op=12 RESULT err=0 tag=48 -+ assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=48.*') - - log.info("Check the access logs for DEL operation of the user") - # op=15 DEL dn="uid=new_test_user_777,dc=example,dc=com" - assert not topo.ds_access_log.match(r'.*op=[0-9]+ DEL dn="uid=new_test_user_777,dc=example,dc=com".*') -- # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" -- assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' -- 'dc=example,dc=com".*') -- # (Internal) op=15(1)(1) ENTRY dn="uid=new_test_user_777, dc=example,dc=com" -- assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ENTRY dn="uid=new_test_user_777,' -- 'dc=example,dc=com".*') -+ if ds_is_older(('1.4.3.9', '1.4.4.3')): -+ # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" -+ assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' -+ 'dc=example,dc=com".*') -+ # (Internal) op=15(1)(1) ENTRY dn="uid=new_test_user_777, dc=example,dc=com" -+ assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ENTRY dn="uid=new_test_user_777,' -+ 'dc=example,dc=com".*') - # (Internal) op=15(1)(1) RESULT err=0 tag=48 nentries=1 - assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') - # op=15 RESULT err=0 tag=107 -@@ -698,14 +733,13 @@ def test_access_log_truncated_search_message(topology_st, clean_access_logs): - assert not topo.ds_access_log.match(r'.*cn500.*') - - -- -+@pytest.mark.skipif(ds_is_newer("1.4.3"), reason="rsearch was removed") - @pytest.mark.xfail(ds_is_older('1.4.2.0'), reason="May fail because of bug 1732053") - @pytest.mark.bz1732053 - @pytest.mark.ds50510 - def test_etime_at_border_of_second(topology_st, clean_access_logs): - topo = topology_st.standalone - -- - prog = os.path.join(topo.ds_paths.bin_dir, 'rsearch') - - cmd = [prog] -@@ -741,11 +775,167 @@ def test_etime_at_border_of_second(topology_st, clean_access_logs): - assert not invalid_etime - - -+@pytest.mark.skipif(ds_is_older('1.3.10.1', '1.4.1'), reason="Fail because of bug 1749236") -+@pytest.mark.bz1749236 -+def test_etime_order_of_magnitude(topology_st, clean_access_logs, remove_users, disable_access_log_buffering): -+ """Test that the etime reported in the access log has a correct order of magnitude -+ -+ :id: e815cfa0-8136-4932-b50f-c3dfac34b0e6 -+ :setup: Standalone instance -+ :steps: -+ 1. Unset log buffering for the access log -+ 2. Delete potential existing access logs -+ 3. Add users -+ 4. Search users -+ 5. Restart the server to flush the logs -+ 6. Parse the access log looking for the SRCH operation log -+ 7. From the SRCH string get the start time and op number of the operation -+ 8. From the op num find the associated RESULT string in the access log -+ 9. From the RESULT string get the end time and the etime for the operation -+ 10. Calculate the ratio between the calculated elapsed time (end time - start time) and the logged etime -+ :expectedresults: -+ 1. access log buffering is off -+ 2. Previously existing access logs are deleted -+ 3. Users are successfully added -+ 4. Search operation is successful -+ 5. Server is restarted and logs are flushed -+ 6. SRCH operation log string is catched -+ 7. start time and op number are collected -+ 8. RESULT string is catched from the access log -+ 9. end time and etime are collected -+ 10. ratio between calculated elapsed time and logged etime is less or equal to 1 -+ """ -+ -+ DSLdapObject(topology_st.standalone, DEFAULT_SUFFIX) -+ -+ log.info('add_users') -+ add_users(topology_st.standalone, 30) -+ -+ log.info ('search users') -+ search_users(topology_st.standalone) -+ -+ log.info('parse the access logs to get the SRCH string') -+ # Here we are looking at the whole string logged for the search request with base ou=People,dc=example,dc=com -+ search_str = str(topology_st.standalone.ds_access_log.match(r'.*SRCH base="ou=People,dc=example,dc=com.*'))[1:-1] -+ assert len(search_str) > 0 -+ -+ # the search_str returned looks like : -+ # [23/Apr/2020:06:06:14.360857624 -0400] conn=1 op=93 SRCH base="ou=People,dc=example,dc=com" scope=2 filter="(&(objectClass=account)(objectClass=posixaccount)(objectClass=inetOrgPerson)(objectClass=organizationalPerson))" attrs="distinguishedName" -+ -+ log.info('get the operation start time from the SRCH string') -+ # Here we are getting the sec.nanosec part of the date, '14.360857624' in the example above -+ start_time = (search_str.split()[0]).split(':')[3] -+ -+ log.info('get the OP number from the SRCH string') -+ # Here we are getting the op number, 'op=93' in the above example -+ op_num = search_str.split()[3] -+ -+ log.info('get the RESULT string matching the SRCH OP number') -+ # Here we are looking at the RESULT string for the above search op, 'op=93' in this example -+ result_str = str(topology_st.standalone.ds_access_log.match(r'.*{} RESULT*'.format(op_num)))[1:-1] -+ assert len(result_str) > 0 -+ -+ # The result_str returned looks like : -+ # For ds older than 1.4.3.8: [23/Apr/2020:06:06:14.366429900 -0400] conn=1 op=93 RESULT err=0 tag=101 nentries=30 etime=0.005723017 -+ # For ds newer than 1.4.3.8: [21/Oct/2020:09:27:50.095209871 -0400] conn=1 op=96 RESULT err=0 tag=101 nentries=30 wtime=0.000412584 optime=0.005428971 etime=0.005836077 -+ -+ log.info('get the operation end time from the RESULT string') -+ # Here we are getting the sec.nanosec part of the date, '14.366429900' in the above example -+ end_time = (result_str.split()[0]).split(':')[3] -+ -+ log.info('get the logged etime for the operation from the RESULT string') -+ # Here we are getting the etime value, '0.005723017' in the example above -+ if ds_is_older('1.4.3.8'): -+ etime = result_str.split()[8].split('=')[1][:-3] -+ else: -+ etime = result_str.split()[10].split('=')[1][:-3] -+ -+ log.info('Calculate the ratio between logged etime for the operation and elapsed time from its start time to its end time - should be around 1') -+ etime_ratio = (Decimal(end_time) - Decimal(start_time)) // Decimal(etime) -+ assert etime_ratio <= 1 -+ -+ -+@pytest.mark.skipif(ds_is_older('1.4.3.8'), reason="Fail because of bug 1850275") -+@pytest.mark.bz1850275 -+def test_optime_and_wtime_keywords(topology_st, clean_access_logs, remove_users, disable_access_log_buffering): -+ """Test that the new optime and wtime keywords are present in the access log and have correct values -+ -+ :id: dfb4a49d-1cfc-400e-ba43-c107f58d62cf -+ :setup: Standalone instance -+ :steps: -+ 1. Unset log buffering for the access log -+ 2. Delete potential existing access logs -+ 3. Add users -+ 4. Search users -+ 5. Parse the access log looking for the SRCH operation log -+ 6. From the SRCH string get the op number of the operation -+ 7. From the op num find the associated RESULT string in the access log -+ 8. Search for the wtime optime keywords in the RESULT string -+ 9. From the RESULT string get the wtime, optime and etime values for the operation -+ 10. Check that optime + wtime is approximatively etime -+ :expectedresults: -+ 1. access log buffering is off -+ 2. Previously existing access logs are deleted -+ 3. Users are successfully added -+ 4. Search operation is successful -+ 5. SRCH operation log string is catched -+ 6. op number is collected -+ 7. RESULT string is catched from the access log -+ 8. wtime and optime keywords are collected -+ 9. wtime, optime and etime values are collected -+ 10. (optime + wtime) =~ etime -+ """ -+ -+ log.info('add_users') -+ add_users(topology_st.standalone, 30) -+ -+ log.info ('search users') -+ search_users(topology_st.standalone) -+ -+ log.info('parse the access logs to get the SRCH string') -+ # Here we are looking at the whole string logged for the search request with base ou=People,dc=example,dc=com -+ search_str = str(topology_st.standalone.ds_access_log.match(r'.*SRCH base="ou=People,dc=example,dc=com.*'))[1:-1] -+ assert len(search_str) > 0 -+ -+ # the search_str returned looks like : -+ # [22/Oct/2020:09:47:11.951316798 -0400] conn=1 op=96 SRCH base="ou=People,dc=example,dc=com" scope=2 filter="(&(objectClass=account)(objectClass=posixaccount)(objectClass=inetOrgPerson)(objectClass=organizationalPerson))" attrs="distinguishedName" -+ -+ log.info('get the OP number from the SRCH string') -+ # Here we are getting the op number, 'op=96' in the above example -+ op_num = search_str.split()[3] -+ -+ log.info('get the RESULT string matching the SRCH op number') -+ # Here we are looking at the RESULT string for the above search op, 'op=96' in this example -+ result_str = str(topology_st.standalone.ds_access_log.match(r'.*{} RESULT*'.format(op_num)))[1:-1] -+ assert len(result_str) > 0 -+ -+ # The result_str returned looks like : -+ # [22/Oct/2020:09:47:11.963276018 -0400] conn=1 op=96 RESULT err=0 tag=101 nentries=30 wtime=0.000180294 optime=0.011966632 etime=0.012141311 -+ log.info('Search for the wtime keyword in the RESULT string') -+ assert re.search('wtime', result_str) -+ -+ log.info('get the wtime value from the RESULT string') -+ wtime_value = result_str.split()[8].split('=')[1][:-3] -+ -+ log.info('Search for the optime keyword in the RESULT string') -+ assert re.search('optime', result_str) -+ -+ log.info('get the optime value from the RESULT string') -+ optime_value = result_str.split()[9].split('=')[1][:-3] -+ -+ log.info('get the etime value from the RESULT string') -+ etime_value = result_str.split()[10].split('=')[1][:-3] -+ -+ log.info('Check that (wtime + optime) is approximately equal to etime i.e. their ratio is 1') -+ etime_ratio = (Decimal(wtime_value) + Decimal(optime_value)) // Decimal(etime_value) -+ assert etime_ratio == 1 -+ -+ - @pytest.mark.xfail(ds_is_older('1.3.10.1'), reason="May fail because of bug 1662461") - @pytest.mark.bz1662461 - @pytest.mark.ds50428 - @pytest.mark.ds49969 --def test_log_base_dn_when_invalid_attr_request(topology_st): -+def test_log_base_dn_when_invalid_attr_request(topology_st, disable_access_log_buffering): - """Test that DS correctly logs the base dn when a search with invalid attribute request is performed - - :id: 859de962-c261-4ffb-8705-97bceab1ba2c -@@ -753,7 +943,7 @@ def test_log_base_dn_when_invalid_attr_request(topology_st): - :steps: - 1. Disable the accesslog-logbuffering config parameter - 2. Delete the previous access log -- 3. Perform a base search on the DEFAULT_SUFFIX, using invalid "" "" attribute request -+ 3. Perform a base search on the DEFAULT_SUFFIX, using ten empty attribute requests - 4. Check the access log file for 'invalid attribute request' - 5. Check the access log file for 'SRCH base="\(null\)"' - 6. Check the access log file for 'SRCH base="DEFAULT_SUFFIX"' -@@ -768,17 +958,14 @@ def test_log_base_dn_when_invalid_attr_request(topology_st): - - entry = DSLdapObject(topology_st.standalone, DEFAULT_SUFFIX) - -- log.info('Set accesslog logbuffering to off to get the log in real time') -- topology_st.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') -- - log.info('delete the previous access logs to get a fresh new one') - topology_st.standalone.deleteAccessLogs() - - log.info("Search the default suffix, with invalid '\"\" \"\"' attribute request") -- log.info("A Protocol error exception should be raised, see https://pagure.io/389-ds-base/issue/49969") -- # A ldap.PROTOCOL_ERROR exception is expected -+ log.info("A Protocol error exception should be raised, see https://github.com/389ds/389-ds-base/issues/3028") -+ # A ldap.PROTOCOL_ERROR exception is expected after 10 empty values - with pytest.raises(ldap.PROTOCOL_ERROR): -- assert entry.get_attrs_vals_utf8(['', '']) -+ assert entry.get_attrs_vals_utf8(['', '', '', '', '', '', '', '', '', '', '']) - - # Search for appropriate messages in the access log - log.info('Check the access logs for correct messages') -diff --git a/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py b/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py -index db2be9f67..c882bea5f 100644 ---- a/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py -+++ b/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py -@@ -11,6 +11,7 @@ from lib389.tasks import * - from lib389.utils import * - from lib389.topologies import topology_st - from lib389.idm.user import UserAccounts -+from lib389.idm.domain import Domain - - from lib389._constants import DN_DM, DEFAULT_SUFFIX, DN_CONFIG, PASSWORD - -@@ -26,15 +27,15 @@ TEST_USER_PWD = 'all_attrs_test' - TEST_PARAMS = [(DN_ROOT, False, [ - 'aci', 'createTimestamp', 'creatorsName', - 'modifiersName', 'modifyTimestamp', 'namingContexts', -- 'nsBackendSuffix', 'nsUniqueId', 'subschemaSubentry', -+ 'nsBackendSuffix', 'subschemaSubentry', - 'supportedControl', 'supportedExtension', - 'supportedFeatures', 'supportedLDAPVersion', - 'supportedSASLMechanisms', 'vendorName', 'vendorVersion' --]), -+ ]), - (DN_ROOT, True, [ - 'createTimestamp', 'creatorsName', - 'modifiersName', 'modifyTimestamp', 'namingContexts', -- 'nsBackendSuffix', 'nsUniqueId', 'subschemaSubentry', -+ 'nsBackendSuffix', 'subschemaSubentry', - 'supportedControl', 'supportedExtension', - 'supportedFeatures', 'supportedLDAPVersion', - 'supportedSASLMechanisms', 'vendorName', 'vendorVersion' -@@ -80,6 +81,18 @@ def create_user(topology_st): - 'homeDirectory': '/home/test' - }) - -+ # Add anonymous access aci -+ ACI_TARGET = "(targetattr != \"userpassword || aci\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) -+ ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" -+ ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" -+ ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT -+ suffix = Domain(topology_st.standalone, DEFAULT_SUFFIX) -+ try: -+ suffix.add('aci', ANON_ACI) -+ except ldap.TYPE_OR_VALUE_EXISTS: -+ pass -+ -+ - @pytest.fixture(scope="module") - def user_aci(topology_st): - """Don't allow modifiersName attribute for the test user -@@ -156,7 +169,9 @@ def test_search_basic(topology_st, create_user, user_aci, add_attr, - entries = topology_st.standalone.search_s(search_suffix, ldap.SCOPE_BASE, - '(objectclass=*)', - search_filter) -- found_attrs = entries[0].data.keys() -+ found_attrs = set(entries[0].data.keys()) -+ if search_suffix == DN_ROOT and "nsUniqueId" in found_attrs: -+ found_attrs.remove("nsUniqueId") - - if add_attr == '*': - assert set(expected_attrs) - set(found_attrs) == set() -diff --git a/dirsrvtests/tests/suites/mapping_tree/acceptance_test.py b/dirsrvtests/tests/suites/mapping_tree/acceptance_test.py -new file mode 100644 -index 000000000..387c313ad ---- /dev/null -+++ b/dirsrvtests/tests/suites/mapping_tree/acceptance_test.py -@@ -0,0 +1,65 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2020 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+import ldap -+import logging -+import pytest -+import os -+from lib389._constants import * -+from lib389.topologies import topology_st as topo -+from lib389.mappingTree import MappingTrees -+ -+DEBUGGING = os.getenv("DEBUGGING", default=False) -+if DEBUGGING: -+ logging.getLogger(__name__).setLevel(logging.DEBUG) -+else: -+ logging.getLogger(__name__).setLevel(logging.INFO) -+log = logging.getLogger(__name__) -+ -+ -+def test_invalid_mt(topo): -+ """Test that you can not add a new suffix/mapping tree -+ that does not already have the backend entry created. -+ -+ :id: caabd407-f541-4695-b13f-8f92af1112a0 -+ :setup: Standalone Instance -+ :steps: -+ 1. Create a new suffix that specifies an existing backend which has a -+ different suffix. -+ 2. Create a suffix that has no backend entry at all. -+ :expectedresults: -+ 1. Should fail with UNWILLING_TO_PERFORM -+ 1. Should fail with UNWILLING_TO_PERFORM -+ """ -+ -+ bad_suffix = 'dc=does,dc=not,dc=exist' -+ mts = MappingTrees(topo.standalone) -+ -+ properties = { -+ 'cn': bad_suffix, -+ 'nsslapd-state': 'backend', -+ 'nsslapd-backend': 'userroot', -+ } -+ with pytest.raises(ldap.UNWILLING_TO_PERFORM): -+ mts.create(properties=properties) -+ -+ properties = { -+ 'cn': bad_suffix, -+ 'nsslapd-state': 'backend', -+ 'nsslapd-backend': 'notCreatedRoot', -+ } -+ with pytest.raises(ldap.UNWILLING_TO_PERFORM): -+ mts.create(properties=properties) -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main(["-s", CURRENT_FILE]) -+ -diff --git a/dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py b/dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py -index 34a2de2ad..c25d89cb0 100644 ---- a/dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py -+++ b/dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py -@@ -6,6 +6,8 @@ from lib389.topologies import topology_m1 as topo - from lib389.backend import Backends - from lib389.encrypted_attributes import EncryptedAttrs - -+pytestmark = pytest.mark.tier1 -+ - DEBUGGING = os.getenv("DEBUGGING", default=False) - if DEBUGGING: - logging.getLogger(__name__).setLevel(logging.DEBUG) -@@ -26,13 +28,13 @@ def test_be_delete(topo): - :steps: - 1. Create second backend/suffix - 2. Add an encrypted attribute to the default suffix -- 2. Delete default suffix -- 3. Check the nsslapd-defaultnamingcontext is updated -- 4. Delete the last backend -- 5. Check the namingcontext has not changed -- 6. Add new backend -- 7. Set default naming context -- 8. Verify the naming context is correct -+ 3. Delete default suffix -+ 4. Check the nsslapd-defaultnamingcontext is updated -+ 5. Delete the last backend -+ 6. Check the namingcontext has not changed -+ 7. Add new backend -+ 8. Set default naming context -+ 9. Verify the naming context is correct - :expectedresults: - 1. Success - 2. Success -@@ -42,6 +44,7 @@ def test_be_delete(topo): - 6. Success - 7. Success - 8. Success -+ 9. Success - """ - - inst = topo.ms["master1"] -diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py -index b37eff70f..882faf513 100644 ---- a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py -+++ b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py -@@ -99,6 +99,7 @@ def test_pwd_reset(topology_st, create_user): - # Reset user's password - our_user = UserAccount(topology_st.standalone, TEST_USER_DN) - our_user.replace('userpassword', PASSWORD) -+ time.sleep(.5) - - # Check that pwdReset is TRUE - assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' -@@ -106,6 +107,7 @@ def test_pwd_reset(topology_st, create_user): - # Bind as user and change its own password - our_user.rebind(PASSWORD) - our_user.replace('userpassword', PASSWORD) -+ time.sleep(.5) - - # Check that pwdReset is FALSE - topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) -@@ -114,6 +116,9 @@ def test_pwd_reset(topology_st, create_user): - # Reset password policy config - topology_st.standalone.config.replace('passwordMustChange', 'off') - -+ # Reset user's password -+ our_user.replace('userpassword', TEST_USER_PWD) -+ - - @pytest.mark.parametrize('subtree_pwchange,user_pwchange,exception', - [('on', 'off', ldap.UNWILLING_TO_PERFORM), -@@ -171,7 +176,7 @@ def test_change_pwd(topology_st, create_user, password_policy, - user.reset_password('new_pass') - except ldap.LDAPError as e: - log.error('Failed to change userpassword for {}: error {}'.format( -- TEST_USER_DN, e.message['info'])) -+ TEST_USER_DN, e.args[0['info']])) - raise e - finally: - log.info('Bind as DM') -@@ -245,7 +250,7 @@ def test_pwd_min_age(topology_st, create_user, password_policy): - user.reset_password(TEST_USER_PWD) - except ldap.LDAPError as e: - log.error('Failed to change userpassword for {}: error {}'.format( -- TEST_USER_DN, e.message['info'])) -+ TEST_USER_DN, e.args[0]['info'])) - raise e - finally: - log.info('Bind as DM') -diff --git a/dirsrvtests/tests/suites/replication/changelog_test.py b/dirsrvtests/tests/suites/replication/changelog_test.py -index e395f0e7c..66599286f 100644 ---- a/dirsrvtests/tests/suites/replication/changelog_test.py -+++ b/dirsrvtests/tests/suites/replication/changelog_test.py -@@ -367,7 +367,7 @@ def test_dsconf_dump_changelog_files_removed(topo): - # primary condition before executing the core goal of this case : management of generated files. - - log.info("Use dsconf dump-changelog with invalid parameters") -- cmdline=['python', '/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', 'badpasswd', 'replication', 'dump-changelog'] -+ cmdline=['/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', 'badpasswd', 'replication', 'dump-changelog'] - log.info('Command used : %s' % cmdline) - proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) - msg = proc.communicate() -@@ -377,7 +377,7 @@ def test_dsconf_dump_changelog_files_removed(topo): - # Now the core goal of the test case - # Using dsconf replication changelog without -l option - log.info('Use dsconf replication changelog without -l option: no generated ldif files should be present in %s ' % changelog_dir) -- cmdline=['python', '/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', PASSWORD, 'replication', 'dump-changelog'] -+ cmdline=['/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', PASSWORD, 'replication', 'dump-changelog'] - log.info('Command used : %s' % cmdline) - proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) - proc.communicate() -@@ -396,7 +396,7 @@ def test_dsconf_dump_changelog_files_removed(topo): - - # Using dsconf replication changelog without -l option - log.info('Use dsconf replication changelog with -l option: generated ldif files should be kept in %s ' % changelog_dir) -- cmdline=['python', '/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', PASSWORD, 'replication', 'dump-changelog', '-l'] -+ cmdline=['/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', PASSWORD, 'replication', 'dump-changelog', '-l'] - log.info('Command used : %s' % cmdline) - proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) - proc.communicate() -diff --git a/dirsrvtests/tests/suites/replication/conflict_resolve_test.py b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py -index 48d0067db..ea3eacc48 100644 ---- a/dirsrvtests/tests/suites/replication/conflict_resolve_test.py -+++ b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py -@@ -1,5 +1,5 @@ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2018 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). -@@ -117,7 +117,7 @@ def _test_base(topology): - M1 = topology.ms["master1"] - - conts = nsContainers(M1, SUFFIX) -- base_m2 = conts.create(properties={'cn': 'test_container'}) -+ base_m2 = conts.ensure_state(properties={'cn': 'test_container'}) - - for inst in topology: - inst.config.loglevel([ErrorLog.DEFAULT, ErrorLog.REPLICA], service='error') -diff --git a/dirsrvtests/tests/suites/replication/rfc2307compat.py b/dirsrvtests/tests/suites/replication/rfc2307compat.py -new file mode 100644 -index 000000000..ec98e9dac ---- /dev/null -+++ b/dirsrvtests/tests/suites/replication/rfc2307compat.py -@@ -0,0 +1,174 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2020 Red Hat, Inc. -+# Copyright (C) 2020 William Brown -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+import pytest -+from lib389.replica import Replicas -+from lib389.tasks import * -+from lib389.utils import * -+from lib389.topologies import topology_m2 as topo_m2 -+from . import get_repl_entries -+from lib389.idm.user import UserAccount -+from lib389.replica import ReplicationManager -+from lib389._constants import * -+ -+pytestmark = pytest.mark.tier0 -+ -+TEST_ENTRY_NAME = 'mmrepl_test' -+TEST_ENTRY_DN = 'uid={},{}'.format(TEST_ENTRY_NAME, DEFAULT_SUFFIX) -+NEW_SUFFIX_NAME = 'test_repl' -+NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) -+NEW_BACKEND = 'repl_base' -+ -+DEBUGGING = os.getenv("DEBUGGING", default=False) -+if DEBUGGING: -+ logging.getLogger(__name__).setLevel(logging.DEBUG) -+else: -+ logging.getLogger(__name__).setLevel(logging.INFO) -+log = logging.getLogger(__name__) -+ -+pytest.mark.skipif(not os.environ.get('UNSAFE_ACK', False), reason="UNSAFE tests may damage system configuration.") -+def test_rfc2307compat(topo_m2): -+ """ Test to verify if 10rfc2307compat.ldif does not prevent replication of schema -+ - Create 2 masters and a test entry -+ - Move 10rfc2307compat.ldif to be private to M1 -+ - Move 10rfc2307.ldif to be private to M2 -+ - Add 'objectCategory' to the schema of M1 -+ - Force a replication session -+ - Check 'objectCategory' on M1 and M2 -+ """ -+ m1 = topo_m2.ms["master1"] -+ m2 = topo_m2.ms["master2"] -+ -+ m1.config.loglevel(vals=(ErrorLog.DEFAULT, ErrorLog.REPLICA)) -+ m2.config.loglevel(vals=(ErrorLog.DEFAULT, ErrorLog.REPLICA)) -+ -+ m1.add_s(Entry(( -+ TEST_ENTRY_DN, { -+ "objectClass": "top", -+ "objectClass": "extensibleObject", -+ 'uid': TEST_ENTRY_NAME, -+ 'cn': TEST_ENTRY_NAME, -+ 'sn': TEST_ENTRY_NAME, -+ } -+ ))) -+ -+ entries = get_repl_entries(topo_m2, TEST_ENTRY_NAME, ["uid"]) -+ assert all(entries), "Entry {} wasn't replicated successfully".format(TEST_ENTRY_DN) -+ -+ # Clean the old locations (if any) -+ m1_temp_schema = os.path.join(m1.get_config_dir(), 'schema') -+ m2_temp_schema = os.path.join(m2.get_config_dir(), 'schema') -+ m1_schema = os.path.join(m1.get_data_dir(), 'dirsrv/schema') -+ m1_opt_schema = os.path.join(m1.get_data_dir(), 'dirsrv/data') -+ m1_temp_backup = os.path.join(m1.get_tmp_dir(), 'schema') -+ -+ # Does the system schema exist? -+ if os.path.islink(m1_schema): -+ # Then we need to put the m1 schema back. -+ os.unlink(m1_schema) -+ shutil.copytree(m1_temp_backup, m1_schema) -+ if not os.path.exists(m1_temp_backup): -+ shutil.copytree(m1_schema, m1_temp_backup) -+ -+ shutil.rmtree(m1_temp_schema, ignore_errors=True) -+ shutil.rmtree(m2_temp_schema, ignore_errors=True) -+ -+ # Build a new copy -+ shutil.copytree(m1_schema, m1_temp_schema) -+ shutil.copytree(m1_schema, m2_temp_schema) -+ # Ensure 99user.ldif exists -+ with open(os.path.join(m1_temp_schema, '99user.ldif'), 'w') as f: -+ f.write('dn: cn=schema') -+ -+ with open(os.path.join(m2_temp_schema, '99user.ldif'), 'w') as f: -+ f.write('dn: cn=schema') -+ -+ # m1 has compat, m2 has legacy. -+ os.unlink(os.path.join(m2_temp_schema, '10rfc2307compat.ldif')) -+ shutil.copy(os.path.join(m1_opt_schema, '10rfc2307.ldif'), m2_temp_schema) -+ -+ # Configure the instances -+ # m1.config.replace('nsslapd-schemadir', m1_temp_schema) -+ # m2.config.replace('nsslapd-schemadir', m2_temp_schema) -+ -+ # Now mark the system schema as empty. -+ shutil.rmtree(m1_schema) -+ os.symlink('/var/lib/empty', m1_schema) -+ -+ print("SETUP COMPLETE -->") -+ -+ # Stop all instances -+ m1.stop() -+ m2.stop() -+ -+ # udpate the schema on M1 to tag a schemacsn -+ m1.start() -+ objectcategory_attr = '( NAME \'objectCategory\' DESC \'test of objectCategory\' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 )' -+ m1.schema.add_schema('attributetypes', [ensure_bytes(objectcategory_attr)]) -+ -+ # Now start M2 and trigger a replication M1->M2 -+ m2.start() -+ m1.modify_s(TEST_ENTRY_DN, [(ldap.MOD_ADD, 'cn', [ensure_bytes('value_m1')])]) -+ -+ # Now check that objectCategory is in both schema -+ time.sleep(10) -+ ents = m1.search_s("cn=schema", ldap.SCOPE_SUBTREE, 'objectclass=*',['attributetypes']) -+ for value in ents[0].getValues('attributetypes'): -+ if ensure_bytes('objectCategory') in value: -+ log.info("M1: " + str(value)) -+ break -+ assert ensure_bytes('objectCategory') in value -+ -+ ents = m2.search_s("cn=schema", ldap.SCOPE_SUBTREE, 'objectclass=*',['attributetypes']) -+ for value in ents[0].getValues('attributetypes'): -+ if ensure_bytes('objectCategory') in value: -+ log.info("M2: " + str(value)) -+ break -+ assert ensure_bytes('objectCategory') in value -+ -+ # Stop m2 -+ m2.stop() -+ -+ # "Update" it's schema, -+ os.unlink(os.path.join(m2_temp_schema, '10rfc2307.ldif')) -+ shutil.copy(os.path.join(m1_temp_backup, '10rfc2307compat.ldif'), m2_temp_schema) -+ -+ # Add some more to m1 -+ objectcategory_attr = '( NAME \'objectCategoryX\' DESC \'test of objectCategoryX\' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 )' -+ m1.schema.add_schema('attributetypes', [ensure_bytes(objectcategory_attr)]) -+ -+ # Start m2. -+ m2.start() -+ m1.modify_s(TEST_ENTRY_DN, [(ldap.MOD_ADD, 'cn', [ensure_bytes('value_m2')])]) -+ -+ time.sleep(10) -+ ents = m1.search_s("cn=schema", ldap.SCOPE_SUBTREE, 'objectclass=*',['attributetypes']) -+ for value in ents[0].getValues('attributetypes'): -+ if ensure_bytes('objectCategoryX') in value: -+ log.info("M1: " + str(value)) -+ break -+ assert ensure_bytes('objectCategoryX') in value -+ -+ ents = m2.search_s("cn=schema", ldap.SCOPE_SUBTREE, 'objectclass=*',['attributetypes']) -+ for value in ents[0].getValues('attributetypes'): -+ if ensure_bytes('objectCategoryX') in value: -+ log.info("M2: " + str(value)) -+ break -+ assert ensure_bytes('objectCategoryX') in value -+ -+ # Success cleanup -+ os.unlink(m1_schema) -+ shutil.copytree(m1_temp_backup, m1_schema) -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main("-s %s" % CURRENT_FILE) -diff --git a/dirsrvtests/tests/suites/roles/__init__.py b/dirsrvtests/tests/suites/roles/__init__.py -new file mode 100644 -index 000000000..1981985fb ---- /dev/null -+++ b/dirsrvtests/tests/suites/roles/__init__.py -@@ -0,0 +1,3 @@ -+""" -+ :Requirement: 389-ds-base: Roles -+""" -diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py -index 3f1b7568c..47a531794 100644 ---- a/dirsrvtests/tests/suites/roles/basic_test.py -+++ b/dirsrvtests/tests/suites/roles/basic_test.py -@@ -1,5 +1,5 @@ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2019 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). -@@ -36,18 +36,19 @@ FILTERROLEENGROLE = "cn=FILTERROLEENGROLE,{}".format(DNBASE) - - - def test_filterrole(topo): -- ''' -- :id: 8ada4064-786b-11e8-8634-8c16451d917b -- :setup: server -- :steps: -- 1. Add test entry -- 2. Add ACI -- 3. Search nsconsole role -- :expectedresults: -- 1. Entry should be added -- 2. Operation should succeed -- 3. Operation should succeed -- ''' -+ """Test Filter Role -+ -+ :id: 8ada4064-786b-11e8-8634-8c16451d917b -+ :setup: server -+ :steps: -+ 1. Add test entry -+ 2. Add ACI -+ 3. Search nsconsole role -+ :expectedresults: -+ 1. Entry should be added -+ 2. Operation should succeed -+ 3. Operation should succeed -+ """ - Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX) - properties = { - 'ou': 'eng', -@@ -137,18 +138,19 @@ def test_filterrole(topo): - - - def test_managedrole(topo): -- ''' -- :id: d52a9c00-3bf6-11e9-9b7b-8c16451d917b -- :setup: server -- :steps: -- 1. Add test entry -- 2. Add ACI -- 3. Search managed role entries -- :expectedresults: -- 1. Entry should be added -- 2. Operation should succeed -- 3. Operation should succeed -- ''' -+ """Test Managed Role -+ -+ :id: d52a9c00-3bf6-11e9-9b7b-8c16451d917b -+ :setup: server -+ :steps: -+ 1. Add test entry -+ 2. Add ACI -+ 3. Search managed role entries -+ :expectedresults: -+ 1. Entry should be added -+ 2. Operation should succeed -+ 3. Operation should succeed -+ """ - # Create Managed role entry - roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) - role = roles.create(properties={"cn": 'ROLE1'}) -@@ -184,8 +186,12 @@ def test_managedrole(topo): - - # Set an aci that will deny ROLE1 manage role - Domain(topo.standalone, DEFAULT_SUFFIX).\ -- add('aci', '(targetattr=*)(version 3.0; aci "role aci";' -+ add('aci', '(targetattr="*")(version 3.0; aci "role aci";' - ' deny(all) roledn="ldap:///{}";)'.format(role.dn),) -+ # Add self user modification and anonymous aci -+ ANON_ACI = "(targetattr=\"*\")(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare) userdn = \"ldap:///anyone\";)" -+ suffix = Domain(topo.standalone, DEFAULT_SUFFIX) -+ suffix.add('aci', ANON_ACI) - - # Crate a connection with cn=Fail which is member of ROLE1 - conn = UserAccount(topo.standalone, "uid=Fail,{}".format(DEFAULT_SUFFIX)).bind(PW_DM) -@@ -232,17 +238,18 @@ def _final(request, topo): - - - def test_nestedrole(topo, _final): -- """ -- :id: 867b40c0-7fcf-4332-afc7-bd01025b77f2 -- :setup: Standalone server -- :steps: -- 1. Add test entry -- 2. Add ACI -- 3. Search managed role entries -- :expectedresults: -- 1. Entry should be added -- 2. Operation should succeed -- 3. Operation should succeed -+ """Test Nested Role -+ -+ :id: 867b40c0-7fcf-4332-afc7-bd01025b77f2 -+ :setup: Standalone server -+ :steps: -+ 1. Add test entry -+ 2. Add ACI -+ 3. Search managed role entries -+ :expectedresults: -+ 1. Entry should be added -+ 2. Operation should succeed -+ 3. Operation should succeed - """ - # Create Managed role entry - managed_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) -@@ -271,7 +278,7 @@ def test_nestedrole(topo, _final): - - # Create a ACI with deny access to nested role entry - Domain(topo.standalone, DEFAULT_SUFFIX).\ -- add('aci', f'(targetattr=*)(version 3.0; aci ' -+ add('aci', f'(targetattr="*")(version 3.0; aci ' - f'"role aci"; deny(all) roledn="ldap:///{nested_role.dn}";)') - - # Create connection with 'uid=test_user_1,ou=People,dc=example,dc=com' member of managed_role1 -diff --git a/dirsrvtests/tests/suites/sasl/regression_test.py b/dirsrvtests/tests/suites/sasl/regression_test.py -index 2db76ce98..58ff9a225 100644 ---- a/dirsrvtests/tests/suites/sasl/regression_test.py -+++ b/dirsrvtests/tests/suites/sasl/regression_test.py -@@ -1,15 +1,14 @@ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2016 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). - # See LICENSE for details. - # --- END COPYRIGHT BLOCK --- - # --import base64 -+ - import os - import pytest --import subprocess - from lib389.tasks import * - from lib389.utils import * - from lib389.topologies import topology_m2 -@@ -48,7 +47,7 @@ def check_pems(confdir, mycacert, myservercert, myserverkey, notexist): - log.info("\n######################### Check PEM files (%s, %s, %s)%s in %s ######################\n" - % (mycacert, myservercert, myserverkey, notexist, confdir)) - global cacert -- cacert = '%s/%s.pem' % (confdir, mycacert) -+ cacert = f"{mycacert}.pem" - if os.path.isfile(cacert): - if notexist == "": - log.info('%s is successfully generated.' % cacert) -@@ -61,7 +60,7 @@ def check_pems(confdir, mycacert, myservercert, myserverkey, notexist): - assert False - else: - log.info('%s is correctly not generated.' % cacert) -- servercert = '%s/%s.pem' % (confdir, myservercert) -+ servercert = f"{myservercert}.pem" - if os.path.isfile(servercert): - if notexist == "": - log.info('%s is successfully generated.' % servercert) -@@ -74,7 +73,7 @@ def check_pems(confdir, mycacert, myservercert, myserverkey, notexist): - assert False - else: - log.info('%s is correctly not generated.' % servercert) -- serverkey = '%s/%s.pem' % (confdir, myserverkey) -+ serverkey = f"{myserverkey}.pem" - if os.path.isfile(serverkey): - if notexist == "": - log.info('%s is successfully generated.' % serverkey) -@@ -91,16 +90,16 @@ def check_pems(confdir, mycacert, myservercert, myserverkey, notexist): - - def relocate_pem_files(topology_m2): - log.info("######################### Relocate PEM files on master1 ######################") -- mycacert = 'MyCA' -+ certdir_prefix = "/dev/shm" -+ mycacert = os.path.join(certdir_prefix, "MyCA") - topology_m2.ms["master1"].encryption.set('CACertExtractFile', mycacert) -- myservercert = 'MyServerCert1' -- myserverkey = 'MyServerKey1' -+ myservercert = os.path.join(certdir_prefix, "MyServerCert1") -+ myserverkey = os.path.join(certdir_prefix, "MyServerKey1") - topology_m2.ms["master1"].rsa.apply_mods([(ldap.MOD_REPLACE, 'ServerCertExtractFile', myservercert), - (ldap.MOD_REPLACE, 'ServerKeyExtractFile', myserverkey)]) - log.info("##### restart master1") - topology_m2.ms["master1"].restart() -- m1confdir = topology_m2.ms["master1"].confdir -- check_pems(m1confdir, mycacert, myservercert, myserverkey, "") -+ check_pems(certdir_prefix, mycacert, myservercert, myserverkey, "") - - @pytest.mark.ds47536 - def test_openldap_no_nss_crypto(topology_m2): -diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/__init__.py b/dirsrvtests/tests/suites/syncrepl_plugin/__init__.py -new file mode 100644 -index 000000000..699d58f79 ---- /dev/null -+++ b/dirsrvtests/tests/suites/syncrepl_plugin/__init__.py -@@ -0,0 +1,163 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2020 William Brown -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+ -+import logging -+import ldap -+import time -+from ldap.syncrepl import SyncreplConsumer -+import pytest -+from lib389 import DirSrv -+from lib389.idm.user import nsUserAccounts, UserAccounts -+from lib389.topologies import topology_st as topology -+from lib389.paths import Paths -+from lib389.utils import ds_is_older -+from lib389.plugins import RetroChangelogPlugin, ContentSynchronizationPlugin -+from lib389._constants import * -+ -+log = logging.getLogger(__name__) -+ -+class ISyncRepl(DirSrv, SyncreplConsumer): -+ """ -+ This implements a test harness for checking syncrepl, and allowing us to check various actions or -+ behaviours. During a "run" it stores the results in it's instance, so that they can be inspected -+ later to ensure that syncrepl worked as expected. -+ """ -+ def __init__(self, inst, openldap=False): -+ self.inst = inst -+ self.msgid = None -+ -+ self.last_cookie = None -+ self.next_cookie = None -+ self.cookie = None -+ self.openldap = openldap -+ if self.openldap: -+ # In openldap mode, our initial cookie needs to be a rid. -+ self.cookie = "rid=123" -+ self.delete = [] -+ self.present = [] -+ self.entries = {} -+ -+ super().__init__() -+ -+ def result4(self, *args, **kwargs): -+ return self.inst.result4(*args, **kwargs, escapehatch='i am sure') -+ -+ def search_ext(self, *args, **kwargs): -+ return self.inst.search_ext(*args, **kwargs, escapehatch='i am sure') -+ -+ def syncrepl_search(self, base=DEFAULT_SUFFIX, scope=ldap.SCOPE_SUBTREE, mode='refreshOnly', cookie=None, **search_args): -+ # Wipe the last result set. -+ self.delete = [] -+ self.present = [] -+ self.entries = {} -+ self.next_cookie = None -+ # Start the sync -+ # If cookie is none, will call "get_cookie" we have. -+ self.msgid = super().syncrepl_search(base, scope, mode, cookie, **search_args) -+ log.debug(f'syncrepl_search -> {self.msgid}') -+ assert self.msgid is not None -+ -+ def syncrepl_complete(self): -+ log.debug(f'syncrepl_complete -> {self.msgid}') -+ assert self.msgid is not None -+ # Loop until the operation is complete. -+ while super().syncrepl_poll(msgid=self.msgid) is True: -+ pass -+ assert self.next_cookie is not None -+ self.last_cookie = self.cookie -+ self.cookie = self.next_cookie -+ -+ def check_cookie(self): -+ assert self.last_cookie != self.cookie -+ -+ def syncrepl_set_cookie(self, cookie): -+ log.debug(f'set_cookie -> {cookie}') -+ if self.openldap: -+ assert self.cookie.startswith("rid=123") -+ self.next_cookie = cookie -+ -+ def syncrepl_get_cookie(self): -+ log.debug('get_cookie -> %s' % self.cookie) -+ if self.openldap: -+ assert self.cookie.startswith("rid=123") -+ return self.cookie -+ -+ def syncrepl_present(self, uuids, refreshDeletes=False): -+ log.debug(f'=====> refdel -> {refreshDeletes} uuids -> {uuids}') -+ if uuids is not None: -+ self.present = self.present + uuids -+ -+ def syncrepl_delete(self, uuids): -+ log.debug(f'delete -> {uuids}') -+ self.delete = uuids -+ -+ def syncrepl_entry(self, dn, attrs, uuid): -+ log.debug(f'entry -> {dn}') -+ self.entries[dn] = (uuid, attrs) -+ -+ def syncrepl_refreshdone(self): -+ log.debug('refreshdone') -+ -+def syncstate_assert(st, sync): -+ # How many entries do we have? -+ r = st.search_ext_s( -+ base=DEFAULT_SUFFIX, -+ scope=ldap.SCOPE_SUBTREE, -+ filterstr='(objectClass=*)', -+ attrsonly=1, -+ escapehatch='i am sure' -+ ) -+ -+ # Initial sync -+ log.debug("*test* initial") -+ sync.syncrepl_search() -+ sync.syncrepl_complete() -+ # check we caught them all -+ assert len(r) == len(sync.entries.keys()) -+ assert len(r) == len(sync.present) -+ assert 0 == len(sync.delete) -+ -+ # Add a new entry -+ -+ account = nsUserAccounts(st, DEFAULT_SUFFIX).create_test_user() -+ # Check -+ log.debug("*test* add") -+ sync.syncrepl_search() -+ sync.syncrepl_complete() -+ sync.check_cookie() -+ assert 1 == len(sync.entries.keys()) -+ assert 1 == len(sync.present) -+ assert 0 == len(sync.delete) -+ -+ # Mod -+ account.replace('description', 'change') -+ # Check -+ log.debug("*test* mod") -+ sync.syncrepl_search() -+ sync.syncrepl_complete() -+ sync.check_cookie() -+ assert 1 == len(sync.entries.keys()) -+ assert 1 == len(sync.present) -+ assert 0 == len(sync.delete) -+ -+ ## Delete -+ account.delete() -+ -+ # Check -+ log.debug("*test* del") -+ sync.syncrepl_search() -+ sync.syncrepl_complete() -+ # In a delete, the cookie isn't updated (?) -+ sync.check_cookie() -+ log.debug(f'{sync.entries.keys()}') -+ log.debug(f'{sync.present}') -+ log.debug(f'{sync.delete}') -+ assert 0 == len(sync.entries.keys()) -+ assert 0 == len(sync.present) -+ assert 1 == len(sync.delete) -+ -diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py -index 7b35537d5..64b7425a5 100644 ---- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py -+++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py -@@ -20,7 +20,7 @@ from lib389.idm.group import Groups - from lib389.topologies import topology_st as topology - from lib389.paths import Paths - from lib389.utils import ds_is_older --from lib389.plugins import RetroChangelogPlugin, ContentSyncPlugin, AutoMembershipPlugin, MemberOfPlugin, MemberOfSharedConfig, AutoMembershipDefinitions, MEPTemplates, MEPConfigs, ManagedEntriesPlugin, MEPTemplate -+from lib389.plugins import RetroChangelogPlugin, ContentSynchronizationPlugin, AutoMembershipPlugin, MemberOfPlugin, MemberOfSharedConfig, AutoMembershipDefinitions, MEPTemplates, MEPConfigs, ManagedEntriesPlugin, MEPTemplate - from lib389._constants import * - - from . import ISyncRepl, syncstate_assert -@@ -54,7 +54,7 @@ def test_syncrepl_basic(topology): - # Set the default targetid - rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') - # Enable sync repl -- csp = ContentSyncPlugin(st) -+ csp = ContentSynchronizationPlugin(st) - csp.enable() - # Restart DS - st.restart() -@@ -176,7 +176,7 @@ def test_sync_repl_mep(topology, request): - plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') - - # Enable sync plugin -- plugin = ContentSyncPlugin(inst) -+ plugin = ContentSynchronizationPlugin(inst) - plugin.enable() - - # Check the plug-in status -@@ -232,6 +232,8 @@ def test_sync_repl_mep(topology, request): - prev = int(cookie) - sync_repl.join() - log.info('test_sync_repl_map: PASS\n') -+ inst.start() -+ - - def test_sync_repl_cookie(topology, request): - """Test sync_repl cookie are progressing is an increasing order -@@ -240,33 +242,33 @@ def test_sync_repl_cookie(topology, request): - :id: d7fbde25-5702-46ac-b38e-169d7a68e97c - :setup: Standalone Instance - :steps: -- 1.: enable retroCL -- 2.: configure retroCL to log nsuniqueid as targetUniqueId -- 3.: enable content_sync plugin -- 4.: enable automember -- 5.: create (2) groups. Few groups can help to reproduce the concurrent updates problem. -- 6.: configure automember to provision those groups with 'member' -- 7.: enable and configure memberof plugin -- 8.: enable plugin log level -- 9.: restart the server -- 10.: create a thread dedicated to run a sync repl client -- 11.: Create (9) users that will generate nested updates (automember/memberof) -- 12.: stop sync repl client and collect the list of cookie.change_no -- 13.: check that cookies.change_no are in increasing order -+ 1. enable retroCL -+ 2. configure retroCL to log nsuniqueid as targetUniqueId -+ 3. enable content_sync plugin -+ 4. enable automember -+ 5. create (2) groups. Few groups can help to reproduce the concurrent updates problem. -+ 6. configure automember to provision those groups with 'member' -+ 7. enable and configure memberof plugin -+ 8. enable plugin log level -+ 9. restart the server -+ 10. create a thread dedicated to run a sync repl client -+ 11. Create (9) users that will generate nested updates (automember/memberof) -+ 12. stop sync repl client and collect the list of cookie.change_no -+ 13. check that cookies.change_no are in increasing order - :expectedresults: -- 1.: succeeds -- 2.: succeeds -- 3.: succeeds -- 4.: succeeds -- 5.: succeeds -- 6.: succeeds -- 7.: succeeds -- 8.: succeeds -- 9.: succeeds -- 10.: succeeds -- 11.: succeeds -- 12.: succeeds -- 13.: succeeds -+ 1. succeeds -+ 2. succeeds -+ 3. succeeds -+ 4. succeeds -+ 5. succeeds -+ 6. succeeds -+ 7. succeeds -+ 8. succeeds -+ 9. succeeds -+ 10. succeeds -+ 11. succeeds -+ 12. succeeds -+ 13. succeeds - """ - inst = topology[0] - -@@ -277,7 +279,7 @@ def test_sync_repl_cookie(topology, request): - plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') - - # Enable sync plugin -- plugin = ContentSyncPlugin(inst) -+ plugin = ContentSynchronizationPlugin(inst) - plugin.enable() - - # Enable automember -@@ -409,7 +411,7 @@ def test_sync_repl_cookie_add_del(topology, request): - plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') - - # Enable sync plugin -- plugin = ContentSyncPlugin(inst) -+ plugin = ContentSynchronizationPlugin(inst) - plugin.enable() - - # Enable automember -@@ -541,7 +543,7 @@ def test_sync_repl_cookie_with_failure(topology, request): - plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') - - # Enable sync plugin -- plugin = ContentSyncPlugin(inst) -+ plugin = ContentSynchronizationPlugin(inst) - plugin.enable() - - # Enable automember -diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py -index 646cd97ba..2e1637a21 100644 ---- a/dirsrvtests/tests/suites/vlv/regression_test.py -+++ b/dirsrvtests/tests/suites/vlv/regression_test.py -@@ -84,8 +84,8 @@ def test_bulk_import_when_the_backend_with_vlv_was_recreated(topology_m2): - MappingTrees(M2).list()[0].delete() - Backends(M2).list()[0].delete() - # Recreate the backend and the VLV index on Master 2. -- M2.mappingtree.create(DEFAULT_SUFFIX, "userRoot") - M2.backend.create(DEFAULT_SUFFIX, {BACKEND_NAME: "userRoot"}) -+ M2.mappingtree.create(DEFAULT_SUFFIX, "userRoot") - # Recreating vlvSrchDn and vlvIndexDn on Master 2. - vlv_searches.create( - basedn="cn=userRoot,cn=ldbm database,cn=plugins,cn=config", --- -2.26.2 - diff --git a/SOURCES/0033-Issue-5442-Search-results-are-different-between-RHDS.patch b/SOURCES/0033-Issue-5442-Search-results-are-different-between-RHDS.patch deleted file mode 100644 index 362f5f3..0000000 --- a/SOURCES/0033-Issue-5442-Search-results-are-different-between-RHDS.patch +++ /dev/null @@ -1,782 +0,0 @@ -From 788d7c69a446d1ae324b2c58daaa5d4fd5528748 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Wed, 20 Jan 2021 16:42:15 -0500 -Subject: [PATCH 1/3] Issue 5442 - Search results are different between RHDS10 - and RHDS11 - -Bug Description: In 1.4.x we introduced a change that was overly strict about - how a search on a non-existent subtree returned its error code. - It was changed from returning an error 32 to an error 0 with - zero entries returned. - -Fix Description: When finding the entry and processing acl's make sure to - gather the aci's that match the resource even if the resource - does not exist. This requires some extra checks when processing - the target attribute. - -relates: https://github.com/389ds/389-ds-base/issues/4542 - -Reviewed by: firstyear, elkris, and tbordaz (Thanks!) - -Apply Thierry's changes - -round 2 - -Apply more suggestions from Thierry ---- - dirsrvtests/tests/suites/acl/misc_test.py | 108 +++++++- - ldap/servers/plugins/acl/acl.c | 296 ++++++++++------------ - ldap/servers/slapd/back-ldbm/findentry.c | 6 +- - src/lib389/lib389/_mapped_object.py | 4 +- - 4 files changed, 239 insertions(+), 175 deletions(-) - -diff --git a/dirsrvtests/tests/suites/acl/misc_test.py b/dirsrvtests/tests/suites/acl/misc_test.py -index 5f0e3eb72..c640e60ad 100644 ---- a/dirsrvtests/tests/suites/acl/misc_test.py -+++ b/dirsrvtests/tests/suites/acl/misc_test.py -@@ -12,7 +12,7 @@ import ldap - import os - import pytest - --from lib389._constants import DEFAULT_SUFFIX, PW_DM -+from lib389._constants import DEFAULT_SUFFIX, PW_DM, DN_DM - from lib389.idm.user import UserAccount, UserAccounts - from lib389._mapped_object import DSLdapObject - from lib389.idm.account import Accounts, Anonymous -@@ -408,14 +408,112 @@ def test_do_bind_as_201_distinct_users(topo, clean, aci_of_user): - user = uas.create_test_user(uid=i, gid=i) - user.set('userPassword', PW_DM) - -- for i in range(len(uas.list())): -- uas.list()[i].bind(PW_DM) -+ users = uas.list() -+ for user in users: -+ user.bind(PW_DM) - - ACLPlugin(topo.standalone).replace("nsslapd-aclpb-max-selected-acls", '220') - topo.standalone.restart() - -- for i in range(len(uas.list())): -- uas.list()[i].bind(PW_DM) -+ users = uas.list() -+ for user in users: -+ user.bind(PW_DM) -+ -+ -+def test_info_disclosure(request, topo): -+ """Test that a search returns 32 when base entry does not exist -+ -+ :id: f6dec4c2-65a3-41e4-a4c0-146196863333 -+ :setup: Standalone Instance -+ :steps: -+ 1. Add aci -+ 2. Add test user -+ 3. Bind as user and search for non-existent entry -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Error 32 is returned -+ """ -+ -+ ACI_TARGET = "(targetattr = \"*\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) -+ ACI_ALLOW = "(version 3.0; acl \"Read/Search permission for all users\"; allow (read,search)" -+ ACI_SUBJECT = "(userdn=\"ldap:///all\");)" -+ ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT -+ -+ # Get current ACi's so we can restore them when we are done -+ suffix = Domain(topo.standalone, DEFAULT_SUFFIX) -+ preserved_acis = suffix.get_attr_vals_utf8('aci') -+ -+ def finofaci(): -+ domain = Domain(topo.standalone, DEFAULT_SUFFIX) -+ try: -+ domain.remove_all('aci') -+ domain.replace_values('aci', preserved_acis) -+ except: -+ pass -+ request.addfinalizer(finofaci) -+ -+ # Remove aci's -+ suffix.remove_all('aci') -+ -+ # Add test user -+ USER_DN = "uid=test,ou=people," + DEFAULT_SUFFIX -+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) -+ users.create(properties={ -+ 'uid': 'test', -+ 'cn': 'test', -+ 'sn': 'test', -+ 'uidNumber': '1000', -+ 'gidNumber': '2000', -+ 'homeDirectory': '/home/test', -+ 'userPassword': PW_DM -+ }) -+ -+ # bind as user -+ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM) -+ -+ # Search fo existing base DN -+ test = Domain(conn, DEFAULT_SUFFIX) -+ try: -+ test.get_attr_vals_utf8_l('dc') -+ assert False -+ except IndexError: -+ pass -+ -+ # Search for a non existent bases -+ subtree = Domain(conn, "ou=does_not_exist," + DEFAULT_SUFFIX) -+ try: -+ subtree.get_attr_vals_utf8_l('objectclass') -+ except IndexError: -+ pass -+ subtree = Domain(conn, "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX) -+ try: -+ subtree.get_attr_vals_utf8_l('objectclass') -+ except IndexError: -+ pass -+ # Try ONE level search instead of BASE -+ try: -+ Accounts(conn, "ou=does_not_exist," + DEFAULT_SUFFIX).filter("(objectclass=top)", ldap.SCOPE_ONELEVEL) -+ except IndexError: -+ pass -+ -+ # add aci -+ suffix.add('aci', ACI) -+ -+ # Search for a non existent entry which should raise an exception -+ with pytest.raises(ldap.NO_SUCH_OBJECT): -+ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM) -+ subtree = Domain(conn, "ou=does_not_exist," + DEFAULT_SUFFIX) -+ subtree.get_attr_vals_utf8_l('objectclass') -+ with pytest.raises(ldap.NO_SUCH_OBJECT): -+ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM) -+ subtree = Domain(conn, "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX) -+ subtree.get_attr_vals_utf8_l('objectclass') -+ with pytest.raises(ldap.NO_SUCH_OBJECT): -+ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM) -+ DN = "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX -+ Accounts(conn, DN).filter("(objectclass=top)", ldap.SCOPE_ONELEVEL, strict=True) -+ - - if __name__ == "__main__": - CURRENT_FILE = os.path.realpath(__file__) -diff --git a/ldap/servers/plugins/acl/acl.c b/ldap/servers/plugins/acl/acl.c -index 41a909a18..4e811f73a 100644 ---- a/ldap/servers/plugins/acl/acl.c -+++ b/ldap/servers/plugins/acl/acl.c -@@ -2111,10 +2111,11 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - aci_right = aci->aci_access; - res_right = aclpb->aclpb_access; - if (!(aci_right & res_right)) { -- /* If we are looking for read/search and the acl has read/search -- ** then go further because if targets match we may keep that -- ** acl in the entry cache list. -- */ -+ /* -+ * If we are looking for read/search and the acl has read/search -+ * then go further because if targets match we may keep that -+ * acl in the entry cache list. -+ */ - if (!((res_right & (SLAPI_ACL_SEARCH | SLAPI_ACL_READ)) && - (aci_right & (SLAPI_ACL_SEARCH | SLAPI_ACL_READ)))) { - matches = ACL_FALSE; -@@ -2122,30 +2123,29 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - } - } - -- -- /* first Let's see if the entry is under the subtree where the -- ** ACL resides. We can't let somebody affect a target beyond the -- ** scope of where the ACL resides -- ** Example: ACL is located in "ou=engineering, o=ace industry, c=us -- ** but if the target is "o=ace industry, c=us", then we are in trouble. -- ** -- ** If the aci is in the rootdse and the entry is not, then we do not -- ** match--ie. acis in the rootdse do NOT apply below...for the moment. -- ** -- */ -+ /* -+ * First Let's see if the entry is under the subtree where the -+ * ACL resides. We can't let somebody affect a target beyond the -+ * scope of where the ACL resides -+ * Example: ACL is located in "ou=engineering, o=ace industry, c=us -+ * but if the target is "o=ace industry, c=us", then we are in trouble. -+ * -+ * If the aci is in the rootdse and the entry is not, then we do not -+ * match--ie. acis in the rootdse do NOT apply below...for the moment. -+ */ - res_ndn = slapi_sdn_get_ndn(aclpb->aclpb_curr_entry_sdn); - aci_ndn = slapi_sdn_get_ndn(aci->aci_sdn); -- if (!slapi_sdn_issuffix(aclpb->aclpb_curr_entry_sdn, aci->aci_sdn) || (!slapi_is_rootdse(res_ndn) && slapi_is_rootdse(aci_ndn))) { -- -- /* cant' poke around */ -+ if (!slapi_sdn_issuffix(aclpb->aclpb_curr_entry_sdn, aci->aci_sdn) || -+ (!slapi_is_rootdse(res_ndn) && slapi_is_rootdse(aci_ndn))) -+ { -+ /* can't poke around */ - matches = ACL_FALSE; - goto acl__resource_match_aci_EXIT; - } - - /* -- ** We have a single ACI which we need to find if it applies to -- ** the resource or not. -- */ -+ * We have a single ACI which we need to find if it applies to the resource or not. -+ */ - if ((aci->aci_type & ACI_TARGET_DN) && (aclpb->aclpb_curr_entry_sdn)) { - char *avaType; - struct berval *avaValue; -@@ -2173,25 +2173,23 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - char *avaType; - struct berval *avaValue; - char logbuf[1024]; -- -- /* We are evaluating the moddn permission. -- * The aci contains target_to and target_from -- * -- * target_to filter must be checked against the resource ndn that was stored in -- * aclpb->aclpb_curr_entry_sdn -- * -- * target_from filter must be check against the entry ndn that is in aclpb->aclpb_moddn_source_sdn -- * (sdn was stored in the pblock) -- */ -+ /* -+ * We are evaluating the moddn permission. -+ * The aci contains target_to and target_from -+ * -+ * target_to filter must be checked against the resource ndn that was stored in -+ * aclpb->aclpb_curr_entry_sdn -+ * -+ * target_from filter must be check against the entry ndn that is in aclpb->aclpb_moddn_source_sdn -+ * (sdn was stored in the pblock) -+ */ - if (aci->target_to) { - f = aci->target_to; - dn_matched = ACL_TRUE; - - /* Now check if the filter is a simple or substring filter */ - if (aci->aci_type & ACI_TARGET_MODDN_TO_PATTERN) { -- /* This is a filter with substring -- * e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com -- */ -+ /* This is a filter with substring e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com */ - slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_to substring: %s\n", - slapi_filter_to_string(f, logbuf, sizeof(logbuf))); - if ((rv = acl_match_substring(f, (char *)res_ndn, 0 /* match suffix */)) != ACL_TRUE) { -@@ -2204,9 +2202,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - } - } - } else { -- /* This is a filter without substring -- * e.g. ldap:///cn=accounts,dc=example,dc=com -- */ -+ /* This is a filter without substring e.g. ldap:///cn=accounts,dc=example,dc=com */ - slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_to: %s\n", - slapi_filter_to_string(f, logbuf, sizeof(logbuf))); - slapi_filter_get_ava(f, &avaType, &avaValue); -@@ -2230,8 +2226,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - /* Now check if the filter is a simple or substring filter */ - if (aci->aci_type & ACI_TARGET_MODDN_FROM_PATTERN) { - /* This is a filter with substring -- * e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com -- */ -+ * e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com -+ */ - slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_from substring: %s\n", - slapi_filter_to_string(f, logbuf, sizeof(logbuf))); - if ((rv = acl_match_substring(f, (char *)slapi_sdn_get_dn(aclpb->aclpb_moddn_source_sdn), 0 /* match suffix */)) != ACL_TRUE) { -@@ -2243,11 +2239,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - goto acl__resource_match_aci_EXIT; - } - } -- - } else { -- /* This is a filter without substring -- * e.g. ldap:///cn=accounts,dc=example,dc=com -- */ -+ /* This is a filter without substring e.g. ldap:///cn=accounts,dc=example,dc=com */ - slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_from: %s\n", - slapi_filter_to_string(f, logbuf, sizeof(logbuf))); - if (!slapi_dn_issuffix(slapi_sdn_get_dn(aclpb->aclpb_moddn_source_sdn), avaValue->bv_val)) { -@@ -2269,10 +2262,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - } - - if (aci->aci_type & ACI_TARGET_PATTERN) { -- - f = aci->target; - dn_matched = ACL_TRUE; -- - if ((rv = acl_match_substring(f, (char *)res_ndn, 0 /* match suffux */)) != ACL_TRUE) { - dn_matched = ACL_FALSE; - if (rv == ACL_ERR) { -@@ -2296,7 +2287,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - - /* - * Is it a (target="ldap://cn=*,($dn),o=sun.com") kind of thing. -- */ -+ */ - if (aci->aci_type & ACI_TARGET_MACRO_DN) { - /* - * See if the ($dn) component matches the string and -@@ -2306,8 +2297,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - * entry is the same one don't recalculate it-- - * this flag only works for search right now, could - * also optimise for mods by making it work for mods. -- */ -- -+ */ - if ((aclpb->aclpb_res_type & ACLPB_NEW_ENTRY) == 0) { - /* - * Here same entry so just look up the matched value, -@@ -2356,8 +2346,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - * If there is already an entry for this aci in this - * aclpb then remove it--it's an old value for a - * different entry. -- */ -- -+ */ - acl_ht_add_and_freeOld(aclpb->aclpb_macro_ht, - (PLHashNumber)aci->aci_index, - matched_val); -@@ -2381,30 +2370,27 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - } - - /* -- ** Here, if there's a targetfilter field, see if it matches. -- ** -- ** The commented out code below was an erroneous attempt to skip -- ** this test. It is wrong because: 1. you need to store -- ** whether the last test matched or not (you cannot just assume it did) -- ** and 2. It may not be the same aci, so the previous matched -- ** value is a function of the aci. -- ** May be interesting to build such a cache...but no evidence for -- ** for that right now. See Bug 383424. -- ** -- ** -- ** && ((aclpb->aclpb_state & ACLPB_SEARCH_BASED_ON_LIST) || -- ** (aclpb->aclpb_res_type & ACLPB_NEW_ENTRY)) -- */ -+ * Here, if there's a targetfilter field, see if it matches. -+ * -+ * The commented out code below was an erroneous attempt to skip -+ * this test. It is wrong because: 1. you need to store -+ * whether the last test matched or not (you cannot just assume it did) -+ * and 2. It may not be the same aci, so the previous matched -+ * value is a function of the aci. -+ * May be interesting to build such a cache...but no evidence for -+ * for that right now. See Bug 383424. -+ * -+ * -+ * && ((aclpb->aclpb_state & ACLPB_SEARCH_BASED_ON_LIST) || -+ * (aclpb->aclpb_res_type & ACLPB_NEW_ENTRY)) -+ */ - if (aci->aci_type & ACI_TARGET_FILTER) { - int filter_matched = ACL_TRUE; -- - /* - * Check for macros. - * For targetfilter we need to fake the lasinfo structure--it's - * created "naturally" for subjects but not targets. -- */ -- -- -+ */ - if (aci->aci_type & ACI_TARGET_FILTER_MACRO_DN) { - - lasInfo *lasinfo = NULL; -@@ -2419,11 +2405,9 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - ACL_EVAL_TARGET_FILTER); - slapi_ch_free((void **)&lasinfo); - } else { -- -- - if (slapi_vattr_filter_test(NULL, aclpb->aclpb_curr_entry, - aci->targetFilter, -- 0 /*don't do acess chk*/) != 0) { -+ 0 /*don't do access check*/) != 0) { - filter_matched = ACL_FALSE; - } - } -@@ -2450,7 +2434,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - * Check to see if we need to evaluate any targetattrfilters. - * They look as follows: - * (targetattrfilters="add=sn:(sn=rob) && gn:(gn!=byrne), -- * del=sn:(sn=rob) && gn:(gn=byrne)") -+ * del=sn:(sn=rob) && gn:(gn=byrne)") - * - * For ADD/DELETE: - * If theres's a targetattrfilter then each add/del filter -@@ -2458,29 +2442,25 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - * by each value of the attribute in the entry. - * - * For MODIFY: -- * If there's a targetattrfilter then the add/del filter -+ * If there's a targetattrfilter then the add/del filter - * must be satisfied by the attribute to be added/deleted. - * (MODIFY acl is evaluated one value at a time). - * - * -- */ -- -+ */ - if (((aclpb->aclpb_access & SLAPI_ACL_ADD) && - (aci->aci_type & ACI_TARGET_ATTR_ADD_FILTERS)) || - ((aclpb->aclpb_access & SLAPI_ACL_DELETE) && -- (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS))) { -- -+ (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS))) -+ { - Targetattrfilter **attrFilterArray = NULL; -- - Targetattrfilter *attrFilter = NULL; -- - Slapi_Attr *attr_ptr = NULL; - Slapi_Value *sval; - const struct berval *attrVal; - int k; - int done; - -- - if ((aclpb->aclpb_access & SLAPI_ACL_ADD) && - (aci->aci_type & ACI_TARGET_ATTR_ADD_FILTERS)) { - -@@ -2497,28 +2477,20 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - - while (attrFilterArray && attrFilterArray[num_attrs] && attr_matched) { - attrFilter = attrFilterArray[num_attrs]; -- - /* -- * If this filter applies to an attribute in the entry, -- * apply it to the entry. -- * Otherwise just ignore it. -- * -- */ -- -- if (slapi_entry_attr_find(aclpb->aclpb_curr_entry, -- attrFilter->attr_str, -- &attr_ptr) == 0) { -- -+ * If this filter applies to an attribute in the entry, -+ * apply it to the entry. -+ * Otherwise just ignore it. -+ * -+ */ -+ if (slapi_entry_attr_find(aclpb->aclpb_curr_entry, attrFilter->attr_str, &attr_ptr) == 0) { - /* -- * This is an applicable filter. -- * The filter is to be appplied to the entry being added -- * or deleted. -- * The filter needs to be satisfied by _each_ occurence -- * of the attribute in the entry--otherwise you -- * could satisfy the filter and then put loads of other -- * values in on the back of it. -- */ -- -+ * This is an applicable filter. -+ * The filter is to be applied to the entry being added or deleted. -+ * The filter needs to be satisfied by _each_ occurrence of the -+ * attribute in the entry--otherwise you could satisfy the filter -+ * and then put loads of other values in on the back of it. -+ */ - sval = NULL; - attrVal = NULL; - k = slapi_attr_first_value(attr_ptr, &sval); -@@ -2528,12 +2500,11 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - - if (acl__make_filter_test_entry(&aclpb->aclpb_filter_test_entry, - attrFilter->attr_str, -- (struct berval *)attrVal) == LDAP_SUCCESS) { -- -+ (struct berval *)attrVal) == LDAP_SUCCESS) -+ { - attr_matched = acl__test_filter(aclpb->aclpb_filter_test_entry, - attrFilter->filter, -- 1 /* Do filter sense evaluation below */ -- ); -+ 1 /* Do filter sense evaluation below */); - done = !attr_matched; - slapi_entry_free(aclpb->aclpb_filter_test_entry); - } -@@ -2542,19 +2513,19 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - } /* while */ - - /* -- * Here, we applied an applicable filter to the entry. -- * So if attr_matched is ACL_TRUE then every value -- * of the attribute in the entry satisfied the filter. -- * Otherwise, attr_matched is ACL_FALSE and not every -- * value satisfied the filter, so we will teminate the -- * scan of the filter list. -- */ -+ * Here, we applied an applicable filter to the entry. -+ * So if attr_matched is ACL_TRUE then every value -+ * of the attribute in the entry satisfied the filter. -+ * Otherwise, attr_matched is ACL_FALSE and not every -+ * value satisfied the filter, so we will terminate the -+ * scan of the filter list. -+ */ - } - - num_attrs++; - } /* while */ - --/* -+ /* - * Here, we've applied all the applicable filters to the entry. - * Each one must have been satisfied by all the values of the attribute. - * The result of this is stored in attr_matched. -@@ -2585,7 +2556,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - } else if (((aclpb->aclpb_access & ACLPB_SLAPI_ACL_WRITE_ADD) && - (aci->aci_type & ACI_TARGET_ATTR_ADD_FILTERS)) || - ((aclpb->aclpb_access & ACLPB_SLAPI_ACL_WRITE_DEL) && -- (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS))) { -+ (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS))) -+ { - /* - * Here, it's a modify add/del and we have attr filters. - * So, we need to scan the add/del filter list to find the filter -@@ -2629,11 +2601,10 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - * Otherwise, ignore the targetattrfilters. - */ - if (found) { -- - if (acl__make_filter_test_entry(&aclpb->aclpb_filter_test_entry, - aclpb->aclpb_curr_attrEval->attrEval_name, -- aclpb->aclpb_curr_attrVal) == LDAP_SUCCESS) { -- -+ aclpb->aclpb_curr_attrVal) == LDAP_SUCCESS) -+ { - attr_matched = acl__test_filter(aclpb->aclpb_filter_test_entry, - attrFilter->filter, - 1 /* Do filter sense evaluation below */ -@@ -2651,20 +2622,21 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - * Here this attribute appeared and was matched in a - * targetattrfilters list, so record this fact so we do - * not have to scan the targetattr list for the attribute. -- */ -+ */ - - attr_matched_in_targetattrfilters = 1; - } - } /* targetvaluefilters */ - - -- /* There are 3 cases by which acis are selected. -- ** 1) By scanning the whole list and picking based on the resource. -- ** 2) By picking a subset of the list which will be used for the whole -- ** acl evaluation. -- ** 3) A finer granularity, i.e, a selected list of acls which will be -- ** used for only that entry's evaluation. -- */ -+ /* -+ * There are 3 cases by which acis are selected. -+ * 1) By scanning the whole list and picking based on the resource. -+ * 2) By picking a subset of the list which will be used for the whole -+ * acl evaluation. -+ * 3) A finer granularity, i.e, a selected list of acls which will be -+ * used for only that entry's evaluation. -+ */ - if (!(skip_attrEval) && (aclpb->aclpb_state & ACLPB_SEARCH_BASED_ON_ENTRY_LIST) && - (res_right & SLAPI_ACL_SEARCH) && - ((aci->aci_access & SLAPI_ACL_READ) || (aci->aci_access & SLAPI_ACL_SEARCH))) { -@@ -2680,7 +2652,6 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - } - } - -- - /* If we are suppose to skip attr eval, then let's skip it */ - if ((aclpb->aclpb_access & SLAPI_ACL_SEARCH) && (!skip_attrEval) && - (aclpb->aclpb_res_type & ACLPB_NEW_ENTRY)) { -@@ -2697,9 +2668,10 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - goto acl__resource_match_aci_EXIT; - } - -- /* We need to check again because we don't want to select this handle -- ** if the right doesn't match for now. -- */ -+ /* -+ * We need to check again because we don't want to select this handle -+ * if the right doesn't match for now. -+ */ - if (!(aci_right & res_right)) { - matches = ACL_FALSE; - goto acl__resource_match_aci_EXIT; -@@ -2718,20 +2690,16 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - * rbyrneXXX if we had a proper permission for modrdn eg SLAPI_ACL_MODRDN - * then we would not need this crappy way of telling it was a MODRDN - * request ie. SLAPI_ACL_WRITE && !(c_attrEval). -- */ -- -+ */ - c_attrEval = aclpb->aclpb_curr_attrEval; - - /* - * If we've already matched on targattrfilter then do not - * bother to look at the attrlist. -- */ -- -+ */ - if (!attr_matched_in_targetattrfilters) { -- - /* match target attr */ -- if ((c_attrEval) && -- (aci->aci_type & ACI_TARGET_ATTR)) { -+ if ((c_attrEval) && (aci->aci_type & ACI_TARGET_ATTR)) { - /* there is a target ATTR */ - Targetattr **attrArray = aci->targetAttr; - Targetattr *attr = NULL; -@@ -2773,46 +2741,43 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - matches = (attr_matched ? ACL_TRUE : ACL_FALSE); - } - -- - aclpb->aclpb_state &= ~ACLPB_ATTR_STAR_MATCHED; - /* figure out how it matched, i.e star matched */ -- if (matches && star_matched && num_attrs == 1 && -- !(aclpb->aclpb_state & ACLPB_FOUND_ATTR_RULE)) -+ if (matches && star_matched && num_attrs == 1 && !(aclpb->aclpb_state & ACLPB_FOUND_ATTR_RULE)) { - aclpb->aclpb_state |= ACLPB_ATTR_STAR_MATCHED; -- else { -+ } else { - /* we are here means that there is a specific -- ** attr in the rule for this resource. -- ** We need to avoid this case -- ** Rule 1: (targetattr = "uid") -- ** Rule 2: (targetattr = "*") -- ** we cannot use STAR optimization -- */ -+ * attr in the rule for this resource. -+ * We need to avoid this case -+ * Rule 1: (targetattr = "uid") -+ * Rule 2: (targetattr = "*") -+ * we cannot use STAR optimization -+ */ - aclpb->aclpb_state |= ACLPB_FOUND_ATTR_RULE; - aclpb->aclpb_state &= ~ACLPB_ATTR_STAR_MATCHED; - } -- } else if ((c_attrEval) || -- (aci->aci_type & ACI_TARGET_ATTR)) { -+ } else if ((c_attrEval) || (aci->aci_type & ACI_TARGET_ATTR)) { - if ((aci_right & ACL_RIGHTS_TARGETATTR_NOT_NEEDED) && - (aclpb->aclpb_access & ACL_RIGHTS_TARGETATTR_NOT_NEEDED)) { - /* -- ** Targetattr rule doesn't make any sense -- ** in this case. So select this rule -- ** default: matches = ACL_TRUE; -- */ -+ * Targetattr rule doesn't make any sense -+ * in this case. So select this rule -+ * default: matches = ACL_TRUE; -+ */ - ; -- } else if (aci_right & SLAPI_ACL_WRITE && -+ } else if ((aci_right & SLAPI_ACL_WRITE) && - (aci->aci_type & ACI_TARGET_ATTR) && - !(c_attrEval) && - (aci->aci_type & ACI_HAS_ALLOW_RULE)) { - /* We need to handle modrdn operation. Modrdn doesn't -- ** change any attrs but changes the RDN and so (attr=NULL). -- ** Here we found an acl which has a targetattr but -- ** the resource doesn't need one. In that case, we should -- ** consider this acl. -- ** the opposite is true if it is a deny rule, only a deny without -- ** any targetattr should deny modrdn -- ** default: matches = ACL_TRUE; -- */ -+ * change any attrs but changes the RDN and so (attr=NULL). -+ * Here we found an acl which has a targetattr but -+ * the resource doesn't need one. In that case, we should -+ * consider this acl. -+ * the opposite is true if it is a deny rule, only a deny without -+ * any targetattr should deny modrdn -+ * default: matches = ACL_TRUE; -+ */ - ; - } else { - matches = ACL_FALSE; -@@ -2821,16 +2786,16 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a - } /* !attr_matched_in_targetattrfilters */ - - /* -- ** Here we are testing if we find a entry test rule (which should -- ** be rare). In that case, just remember it. An entry test rule -- ** doesn't have "(targetattr)". -- */ -+ * Here we are testing if we find a entry test rule (which should -+ * be rare). In that case, just remember it. An entry test rule -+ * doesn't have "(targetattr)". -+ */ - if ((aclpb->aclpb_state & ACLPB_EVALUATING_FIRST_ATTR) && - (!(aci->aci_type & ACI_TARGET_ATTR))) { - aclpb->aclpb_state |= ACLPB_FOUND_A_ENTRY_TEST_RULE; - } - --/* -+ /* - * Generic exit point for this routine: - * matches is ACL_TRUE if the aci matches the target of the resource, - * ACL_FALSE othrewise. -@@ -2853,6 +2818,7 @@ acl__resource_match_aci_EXIT: - - return (matches); - } -+ - /* Macro to determine if the cached result is valid or not. */ - #define ACL_CACHED_RESULT_VALID(result) \ - (((result & ACLPB_CACHE_READ_RES_ALLOW) && \ -diff --git a/ldap/servers/slapd/back-ldbm/findentry.c b/ldap/servers/slapd/back-ldbm/findentry.c -index 6e53a0aea..bff751c88 100644 ---- a/ldap/servers/slapd/back-ldbm/findentry.c -+++ b/ldap/servers/slapd/back-ldbm/findentry.c -@@ -93,7 +93,6 @@ find_entry_internal_dn( - size_t tries = 0; - int isroot = 0; - int op_type; -- char *errbuf = NULL; - - /* get the managedsait ldap message control */ - slapi_pblock_get(pb, SLAPI_MANAGEDSAIT, &managedsait); -@@ -207,8 +206,8 @@ find_entry_internal_dn( - break; - } - if (acl_type > 0) { -- err = plugin_call_acl_plugin(pb, me->ep_entry, NULL, NULL, acl_type, -- ACLPLUGIN_ACCESS_DEFAULT, &errbuf); -+ char *dummy_attr = "1.1"; -+ err = slapi_access_allowed(pb, me->ep_entry, dummy_attr, NULL, acl_type); - } - if (((acl_type > 0) && err) || (op_type == SLAPI_OPERATION_BIND)) { - /* -@@ -237,7 +236,6 @@ find_entry_internal_dn( - CACHE_RETURN(&inst->inst_cache, &me); - } - -- slapi_ch_free_string(&errbuf); - slapi_log_err(SLAPI_LOG_TRACE, "find_entry_internal_dn", "<= Not found (%s)\n", - slapi_sdn_get_dn(sdn)); - return (NULL); -diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py -index c60837601..ca6ea6ef8 100644 ---- a/src/lib389/lib389/_mapped_object.py -+++ b/src/lib389/lib389/_mapped_object.py -@@ -1190,7 +1190,7 @@ class DSLdapObjects(DSLogging, DSLints): - # Now actually commit the creation req - return co.ensure_state(rdn, properties, self._basedn) - -- def filter(self, search, scope=None): -+ def filter(self, search, scope=None, strict=False): - # This will yield and & filter for objectClass with as many terms as needed. - if search: - search_filter = _gen_and([self._get_objectclass_filter(), search]) -@@ -1211,5 +1211,7 @@ class DSLdapObjects(DSLogging, DSLints): - insts = [self._entry_to_instance(dn=r.dn, entry=r) for r in results] - except ldap.NO_SUCH_OBJECT: - # There are no objects to select from, se we return an empty array -+ if strict: -+ raise ldap.NO_SUCH_OBJECT - insts = [] - return insts --- -2.26.2 - diff --git a/SOURCES/0034-Issue-4526-sync_repl-when-completing-an-operation-in.patch b/SOURCES/0034-Issue-4526-sync_repl-when-completing-an-operation-in.patch deleted file mode 100644 index 3e12223..0000000 --- a/SOURCES/0034-Issue-4526-sync_repl-when-completing-an-operation-in.patch +++ /dev/null @@ -1,452 +0,0 @@ -From 5bca57b52069508a55b36fafe3729b7d1243743b Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Wed, 27 Jan 2021 11:58:38 +0100 -Subject: [PATCH 2/3] Issue 4526 - sync_repl: when completing an operation in - the pending list, it can select the wrong operation (#4553) - -Bug description: - When an operation complete, it was retrieved in the pending list with - the address of the Operation structure. In case of POST OP nested operations - the same address can be reused. So when completing an operation there could be - a confusion which operation actually completed. - A second problem is that if an update its DB_DEADLOCK, the BETXN_PREOP can - be called several times. During retry, the operation is already in the pending - list. - -Fix description: - The fix defines a new operation extension (sync_persist_extension_type). - This operation extension contains an index (idx_pl) of the op_pl in the - the pending list. - - And additional safety fix is to dump the pending list in case it becomes large (>10). - The pending list is dumped with SLAPI_LOG_PLUGIN. - - When there is a retry (operation extension exists) the call to sync_update_persist_betxn_pre_op - becomes a NOOP: the operation is not added again in the pending list. - -relates: https://github.com/389ds/389-ds-base/issues/4526 - -Reviewed by: William Brown (Thanks !!) ---- - ldap/servers/plugins/sync/sync.h | 9 ++ - ldap/servers/plugins/sync/sync_init.c | 64 +++++++- - ldap/servers/plugins/sync/sync_persist.c | 194 ++++++++++++++++------- - 3 files changed, 208 insertions(+), 59 deletions(-) - -diff --git a/ldap/servers/plugins/sync/sync.h b/ldap/servers/plugins/sync/sync.h -index 7241fddbf..2fdf24476 100644 ---- a/ldap/servers/plugins/sync/sync.h -+++ b/ldap/servers/plugins/sync/sync.h -@@ -82,6 +82,12 @@ typedef enum _pl_flags { - OPERATION_PL_IGNORED = 5 - } pl_flags_t; - -+typedef struct op_ext_ident -+{ -+ uint32_t idx_pl; /* To uniquely identify an operation in PL, the operation extension -+ * contains the index of that operation in the pending list -+ */ -+} op_ext_ident_t; - /* Pending list operations. - * it contains a list ('next') of nested operations. The - * order the same order that the server applied the operation -@@ -90,6 +96,7 @@ typedef enum _pl_flags { - typedef struct OPERATION_PL_CTX - { - Operation *op; /* Pending operation, should not be freed as it belongs to the pblock */ -+ uint32_t idx_pl; /* index of the operation in the pending list */ - pl_flags_t flags; /* operation is completed (set to TRUE in POST) */ - Slapi_Entry *entry; /* entry to be store in the enqueued node. 1st arg sync_queue_change */ - Slapi_Entry *eprev; /* pre-entry to be stored in the enqueued node. 2nd arg sync_queue_change */ -@@ -99,6 +106,8 @@ typedef struct OPERATION_PL_CTX - - OPERATION_PL_CTX_T * get_thread_primary_op(void); - void set_thread_primary_op(OPERATION_PL_CTX_T *op); -+const op_ext_ident_t * sync_persist_get_operation_extension(Slapi_PBlock *pb); -+void sync_persist_set_operation_extension(Slapi_PBlock *pb, op_ext_ident_t *op_ident); - - int sync_register_operation_extension(void); - int sync_unregister_operation_entension(void); -diff --git a/ldap/servers/plugins/sync/sync_init.c b/ldap/servers/plugins/sync/sync_init.c -index 74af14512..9e6a12000 100644 ---- a/ldap/servers/plugins/sync/sync_init.c -+++ b/ldap/servers/plugins/sync/sync_init.c -@@ -16,6 +16,7 @@ static int sync_preop_init(Slapi_PBlock *pb); - static int sync_postop_init(Slapi_PBlock *pb); - static int sync_be_postop_init(Slapi_PBlock *pb); - static int sync_betxn_preop_init(Slapi_PBlock *pb); -+static int sync_persist_register_operation_extension(void); - - static PRUintn thread_primary_op; - -@@ -43,7 +44,8 @@ sync_init(Slapi_PBlock *pb) - slapi_pblock_set(pb, SLAPI_PLUGIN_CLOSE_FN, - (void *)sync_close) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_DESCRIPTION, -- (void *)&pdesc) != 0) { -+ (void *)&pdesc) != 0 || -+ sync_persist_register_operation_extension()) { - slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, - "sync_init - Failed to register plugin\n"); - rc = 1; -@@ -242,4 +244,64 @@ set_thread_primary_op(OPERATION_PL_CTX_T *op) - PR_SetThreadPrivate(thread_primary_op, (void *) head); - } - head->next = op; -+} -+ -+/* The following definitions are used for the operation pending list -+ * (used by sync_repl). To retrieve a specific operation in the pending -+ * list, the operation extension contains the index of the operation in -+ * the pending list -+ */ -+static int sync_persist_extension_type; /* initialized in sync_persist_register_operation_extension */ -+static int sync_persist_extension_handle; /* initialized in sync_persist_register_operation_extension */ -+ -+const op_ext_ident_t * -+sync_persist_get_operation_extension(Slapi_PBlock *pb) -+{ -+ Slapi_Operation *op; -+ op_ext_ident_t *ident; -+ -+ slapi_pblock_get(pb, SLAPI_OPERATION, &op); -+ ident = slapi_get_object_extension(sync_persist_extension_type, op, -+ sync_persist_extension_handle); -+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_get_operation_extension operation (op=0x%lx) -> %d\n", -+ (ulong) op, ident ? ident->idx_pl : -1); -+ return (const op_ext_ident_t *) ident; -+ -+} -+ -+void -+sync_persist_set_operation_extension(Slapi_PBlock *pb, op_ext_ident_t *op_ident) -+{ -+ Slapi_Operation *op; -+ -+ slapi_pblock_get(pb, SLAPI_OPERATION, &op); -+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_set_operation_extension operation (op=0x%lx) -> %d\n", -+ (ulong) op, op_ident ? op_ident->idx_pl : -1); -+ slapi_set_object_extension(sync_persist_extension_type, op, -+ sync_persist_extension_handle, (void *)op_ident); -+} -+/* operation extension constructor */ -+static void * -+sync_persist_operation_extension_constructor(void *object __attribute__((unused)), void *parent __attribute__((unused))) -+{ -+ /* we only set the extension value explicitly in sync_update_persist_betxn_pre_op */ -+ return NULL; /* we don't set anything in the ctor */ -+} -+ -+/* consumer operation extension destructor */ -+static void -+sync_persist_operation_extension_destructor(void *ext, void *object __attribute__((unused)), void *parent __attribute__((unused))) -+{ -+ op_ext_ident_t *op_ident = (op_ext_ident_t *)ext; -+ slapi_ch_free((void **)&op_ident); -+} -+static int -+sync_persist_register_operation_extension(void) -+{ -+ return slapi_register_object_extension(SYNC_PLUGIN_SUBSYSTEM, -+ SLAPI_EXT_OPERATION, -+ sync_persist_operation_extension_constructor, -+ sync_persist_operation_extension_destructor, -+ &sync_persist_extension_type, -+ &sync_persist_extension_handle); - } -\ No newline at end of file -diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c -index d13f142b0..e93a8fa83 100644 ---- a/ldap/servers/plugins/sync/sync_persist.c -+++ b/ldap/servers/plugins/sync/sync_persist.c -@@ -47,6 +47,9 @@ static int sync_release_connection(Slapi_PBlock *pb, Slapi_Connection *conn, Sla - * per thread pending list of nested operation.. - * being a betxn_preop the pending list has the same order - * that the server received the operation -+ * -+ * In case of DB_RETRY, this callback can be called several times -+ * The detection of the DB_RETRY is done via the operation extension - */ - int - sync_update_persist_betxn_pre_op(Slapi_PBlock *pb) -@@ -54,64 +57,128 @@ sync_update_persist_betxn_pre_op(Slapi_PBlock *pb) - OPERATION_PL_CTX_T *prim_op; - OPERATION_PL_CTX_T *new_op; - Slapi_DN *sdn; -+ uint32_t idx_pl = 0; -+ op_ext_ident_t *op_ident; -+ Operation *op; - - if (!SYNC_IS_INITIALIZED()) { - /* not initialized if sync plugin is not started */ - return 0; - } - -+ prim_op = get_thread_primary_op(); -+ op_ident = sync_persist_get_operation_extension(pb); -+ slapi_pblock_get(pb, SLAPI_OPERATION, &op); -+ slapi_pblock_get(pb, SLAPI_TARGET_SDN, &sdn); -+ -+ /* Check if we are in a DB retry case */ -+ if (op_ident && prim_op) { -+ OPERATION_PL_CTX_T *current_op; -+ -+ /* This callback is called (with the same operation) because of a DB_RETRY */ -+ -+ /* It already existed (in the operation extension) an index of the operation in the pending list */ -+ for (idx_pl = 0, current_op = prim_op; current_op->next; idx_pl++, current_op = current_op->next) { -+ if (op_ident->idx_pl == idx_pl) { -+ break; -+ } -+ } -+ -+ /* The retrieved operation in the pending list is at the right -+ * index and state. Just return making this callback a noop -+ */ -+ PR_ASSERT(current_op); -+ PR_ASSERT(current_op->op == op); -+ PR_ASSERT(current_op->flags == OPERATION_PL_PENDING); -+ slapi_log_err(SLAPI_LOG_WARNING, SYNC_PLUGIN_SUBSYSTEM, "sync_update_persist_betxn_pre_op - DB retried operation targets " -+ "\"%s\" (op=0x%lx idx_pl=%d) => op not changed in PL\n", -+ slapi_sdn_get_dn(sdn), (ulong) op, idx_pl); -+ return 0; -+ } -+ - /* Create a new pending operation node */ - new_op = (OPERATION_PL_CTX_T *)slapi_ch_calloc(1, sizeof(OPERATION_PL_CTX_T)); - new_op->flags = OPERATION_PL_PENDING; -- slapi_pblock_get(pb, SLAPI_OPERATION, &new_op->op); -- slapi_pblock_get(pb, SLAPI_TARGET_SDN, &sdn); -+ new_op->op = op; - -- prim_op = get_thread_primary_op(); - if (prim_op) { - /* It already exists a primary operation, so the current - * operation is a nested one that we need to register at the end - * of the pending nested operations -+ * Also computes the idx_pl that will be the identifier (index) of the operation -+ * in the pending list - */ - OPERATION_PL_CTX_T *current_op; -- for (current_op = prim_op; current_op->next; current_op = current_op->next); -+ for (idx_pl = 0, current_op = prim_op; current_op->next; idx_pl++, current_op = current_op->next); - current_op->next = new_op; -+ idx_pl++; /* idx_pl is currently the index of the last op -+ * as we are adding a new op we need to increase that index -+ */ - slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "sync_update_persist_betxn_pre_op - nested operation targets " -- "\"%s\" (0x%lx)\n", -- slapi_sdn_get_dn(sdn), (ulong) new_op->op); -+ "\"%s\" (op=0x%lx idx_pl=%d)\n", -+ slapi_sdn_get_dn(sdn), (ulong) new_op->op, idx_pl); - } else { - /* The current operation is the first/primary one in the txn - * registers it directly in the thread private data (head) - */ - set_thread_primary_op(new_op); -+ idx_pl = 0; /* as primary operation, its index in the pending list is 0 */ - slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "sync_update_persist_betxn_pre_op - primary operation targets " - "\"%s\" (0x%lx)\n", - slapi_sdn_get_dn(sdn), (ulong) new_op->op); - } -+ -+ /* records, in the operation extension AND in the pending list, the identifier (index) of -+ * this operation into the pending list -+ */ -+ op_ident = (op_ext_ident_t *) slapi_ch_calloc(1, sizeof (op_ext_ident_t)); -+ op_ident->idx_pl = idx_pl; -+ new_op->idx_pl = idx_pl; -+ sync_persist_set_operation_extension(pb, op_ident); - return 0; - } - --/* This operation can not be proceed by sync_repl listener because -- * of internal problem. For example, POST entry does not exist -+/* This operation failed or skipped (e.g. no MODs). -+ * In such case POST entry does not exist - */ - static void --ignore_op_pl(Operation *op) -+ignore_op_pl(Slapi_PBlock *pb) - { - OPERATION_PL_CTX_T *prim_op, *curr_op; -+ op_ext_ident_t *ident; -+ Operation *op; -+ -+ slapi_pblock_get(pb, SLAPI_OPERATION, &op); -+ -+ /* prim_op is set if betxn was called -+ * In case of invalid update (schema violation) the -+ * operation skip betxn and prim_op is not set. -+ * This is the same for ident -+ */ - prim_op = get_thread_primary_op(); -+ ident = sync_persist_get_operation_extension(pb); - -- for (curr_op = prim_op; curr_op; curr_op = curr_op->next) { -- if ((curr_op->op == op) && -- (curr_op->flags == OPERATION_PL_PENDING)) { /* If by any "chance" a same operation structure was reused in consecutive updates -- * we can not only rely on 'op' value -- */ -- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "ignore_op_pl operation (0x%lx) from the pending list\n", -- (ulong) op); -- curr_op->flags = OPERATION_PL_IGNORED; -- return; -+ if (ident) { -+ /* The TXN_BEPROP was called, so the operation is -+ * registered in the pending list -+ */ -+ for (curr_op = prim_op; curr_op; curr_op = curr_op->next) { -+ if (curr_op->idx_pl == ident->idx_pl) { -+ /* The operation extension (ident) refers this operation (currop in the pending list). -+ * This is called during sync_repl postop. At this moment -+ * the operation in the pending list (identified by idx_pl in the operation extension) -+ * should be pending -+ */ -+ PR_ASSERT(curr_op->flags == OPERATION_PL_PENDING); -+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "ignore_op_pl operation (op=0x%lx, idx_pl=%d) from the pending list\n", -+ (ulong) op, ident->idx_pl); -+ curr_op->flags = OPERATION_PL_IGNORED; -+ return; -+ } - } - } -- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "ignore_op_pl can not retrieve an operation (0x%lx) in pending list\n", -- (ulong) op); -+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "ignore_op_pl failing operation (op=0x%lx, idx_pl=%d) was not in the pending list\n", -+ (ulong) op, ident ? ident->idx_pl : -1); - } - - /* This is a generic function that is called by betxn_post of this plugin. -@@ -126,7 +193,9 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber - { - OPERATION_PL_CTX_T *prim_op = NULL, *curr_op; - Operation *pb_op; -+ op_ext_ident_t *ident; - Slapi_DN *sdn; -+ uint32_t count; /* use for diagnostic of the lenght of the pending list */ - int32_t rc; - - if (!SYNC_IS_INITIALIZED()) { -@@ -138,7 +207,7 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber - - if (NULL == e) { - /* Ignore this operation (for example case of failure of the operation) */ -- ignore_op_pl(pb_op); -+ ignore_op_pl(pb); - return; - } - -@@ -161,16 +230,21 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber - - - prim_op = get_thread_primary_op(); -+ ident = sync_persist_get_operation_extension(pb); - PR_ASSERT(prim_op); -+ PR_ASSERT(ident); - /* First mark the operation as completed/failed - * the param to be used once the operation will be pushed - * on the listeners queue - */ - for (curr_op = prim_op; curr_op; curr_op = curr_op->next) { -- if ((curr_op->op == pb_op) && -- (curr_op->flags == OPERATION_PL_PENDING)) { /* If by any "chance" a same operation structure was reused in consecutive updates -- * we can not only rely on 'op' value -- */ -+ if (curr_op->idx_pl == ident->idx_pl) { -+ /* The operation extension (ident) refers this operation (currop in the pending list) -+ * This is called during sync_repl postop. At this moment -+ * the operation in the pending list (identified by idx_pl in the operation extension) -+ * should be pending -+ */ -+ PR_ASSERT(curr_op->flags == OPERATION_PL_PENDING); - if (rc == LDAP_SUCCESS) { - curr_op->flags = OPERATION_PL_SUCCEEDED; - curr_op->entry = e ? slapi_entry_dup(e) : NULL; -@@ -183,46 +257,50 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber - } - } - if (!curr_op) { -- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "%s - operation not found on the pendling list\n", label); -+ slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "%s - operation (op=0x%lx, idx_pl=%d) not found on the pendling list\n", -+ label, (ulong) pb_op, ident->idx_pl); - PR_ASSERT(curr_op); - } - --#if DEBUG -- /* dump the pending queue */ -- for (curr_op = prim_op; curr_op; curr_op = curr_op->next) { -- char *flags_str; -- char * entry_str; -+ /* for diagnostic of the pending list, dump its content if it is too long */ -+ for (count = 0, curr_op = prim_op; curr_op; count++, curr_op = curr_op->next); -+ if (loglevel_is_set(SLAPI_LOG_PLUGIN) && (count > 10)) { - -- if (curr_op->entry) { -- entry_str = slapi_entry_get_dn(curr_op->entry); -- } else if (curr_op->eprev){ -- entry_str = slapi_entry_get_dn(curr_op->eprev); -- } else { -- entry_str = "unknown"; -- } -- switch (curr_op->flags) { -- case OPERATION_PL_SUCCEEDED: -- flags_str = "succeeded"; -- break; -- case OPERATION_PL_FAILED: -- flags_str = "failed"; -- break; -- case OPERATION_PL_IGNORED: -- flags_str = "ignored"; -- break; -- case OPERATION_PL_PENDING: -- flags_str = "pending"; -- break; -- default: -- flags_str = "unknown"; -- break; -- -+ /* if pending list looks abnormally too long, dump the pending list */ -+ for (curr_op = prim_op; curr_op; curr_op = curr_op->next) { -+ char *flags_str; -+ char * entry_str; - -- } -- slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "dump pending list(0x%lx) %s %s\n", -+ if (curr_op->entry) { -+ entry_str = slapi_entry_get_dn(curr_op->entry); -+ } else if (curr_op->eprev) { -+ entry_str = slapi_entry_get_dn(curr_op->eprev); -+ } else { -+ entry_str = "unknown"; -+ } -+ switch (curr_op->flags) { -+ case OPERATION_PL_SUCCEEDED: -+ flags_str = "succeeded"; -+ break; -+ case OPERATION_PL_FAILED: -+ flags_str = "failed"; -+ break; -+ case OPERATION_PL_IGNORED: -+ flags_str = "ignored"; -+ break; -+ case OPERATION_PL_PENDING: -+ flags_str = "pending"; -+ break; -+ default: -+ flags_str = "unknown"; -+ break; -+ -+ -+ } -+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "dump pending list(0x%lx) %s %s\n", - (ulong) curr_op->op, entry_str, flags_str); -+ } - } --#endif - - /* Second check if it remains a pending operation in the pending list */ - for (curr_op = prim_op; curr_op; curr_op = curr_op->next) { --- -2.26.2 - diff --git a/SOURCES/0035-Issue-4581-A-failed-re-indexing-leaves-the-database-.patch b/SOURCES/0035-Issue-4581-A-failed-re-indexing-leaves-the-database-.patch deleted file mode 100644 index d80f386..0000000 --- a/SOURCES/0035-Issue-4581-A-failed-re-indexing-leaves-the-database-.patch +++ /dev/null @@ -1,145 +0,0 @@ -From e6536aa27bfdc27cad07f6c5cd3312f0f0710c96 Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Mon, 1 Feb 2021 09:28:25 +0100 -Subject: [PATCH 3/3] Issue 4581 - A failed re-indexing leaves the database in - broken state (#4582) - -Bug description: - During reindex the numsubordinates attribute is not updated in parent entries. - The consequence is that the internal counter job->numsubordinates==0. - Later when indexing the ancestorid, the server can show the progression of this - indexing with a ratio using job->numsubordinates==0. - Division with 0 -> SIGFPE - -Fix description: - if the numsubordinates is NULL, log a message without a division. - -relates: https://github.com/389ds/389-ds-base/issues/4581 - -Reviewed by: Pierre Rogier, Mark Reynolds, Simon Pichugin, Teko Mihinto (thanks !!) - -Platforms tested: F31 ---- - .../slapd/back-ldbm/db-bdb/bdb_import.c | 72 ++++++++++++++----- - 1 file changed, 54 insertions(+), 18 deletions(-) - -diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c -index ba783ee59..7f484934f 100644 ---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c -+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c -@@ -468,18 +468,30 @@ bdb_get_nonleaf_ids(backend *be, DB_TXN *txn, IDList **idl, ImportJob *job) - } - key_count++; - if (!(key_count % PROGRESS_INTERVAL)) { -- import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids", -- "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)", -- (key_count * 100 / job->numsubordinates), key_count); -+ if (job->numsubordinates) { -+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids", -+ "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)", -+ (key_count * 100 / job->numsubordinates), key_count); -+ } else { -+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids", -+ "Gathering ancestorid non-leaf IDs: processed %d ancestors...", -+ key_count); -+ } - started_progress_logging = 1; - } - } while (ret == 0 && !(job->flags & FLAG_ABORT)); - - if (started_progress_logging) { - /* finish what we started logging */ -- import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids", -- "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)", -- (key_count * 100 / job->numsubordinates), key_count); -+ if (job->numsubordinates) { -+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids", -+ "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)", -+ (key_count * 100 / job->numsubordinates), key_count); -+ } else { -+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids", -+ "Gathering ancestorid non-leaf IDs: processed %d ancestors", -+ key_count); -+ } - } - import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids", - "Finished gathering ancestorid non-leaf IDs."); -@@ -660,9 +672,15 @@ bdb_ancestorid_default_create_index(backend *be, ImportJob *job) - - key_count++; - if (!(key_count % PROGRESS_INTERVAL)) { -- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index", -- "Creating ancestorid index: processed %d%% (ID count %d)", -- (key_count * 100 / job->numsubordinates), key_count); -+ if (job->numsubordinates) { -+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index", -+ "Creating ancestorid index: processed %d%% (ID count %d)", -+ (key_count * 100 / job->numsubordinates), key_count); -+ } else { -+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index", -+ "Creating ancestorid index: processed %d ancestors...", -+ key_count); -+ } - started_progress_logging = 1; - } - -@@ -743,9 +761,15 @@ out: - if (ret == 0) { - if (started_progress_logging) { - /* finish what we started logging */ -- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index", -- "Creating ancestorid index: processed %d%% (ID count %d)", -- (key_count * 100 / job->numsubordinates), key_count); -+ if (job->numsubordinates) { -+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index", -+ "Creating ancestorid index: processed %d%% (ID count %d)", -+ (key_count * 100 / job->numsubordinates), key_count); -+ } else { -+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index", -+ "Creating ancestorid index: processed %d ancestors", -+ key_count); -+ } - } - import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index", - "Created ancestorid index (old idl)."); -@@ -869,9 +893,15 @@ bdb_ancestorid_new_idl_create_index(backend *be, ImportJob *job) - - key_count++; - if (!(key_count % PROGRESS_INTERVAL)) { -- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index", -- "Creating ancestorid index: progress %d%% (ID count %d)", -- (key_count * 100 / job->numsubordinates), key_count); -+ if (job->numsubordinates) { -+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index", -+ "Creating ancestorid index: progress %d%% (ID count %d)", -+ (key_count * 100 / job->numsubordinates), key_count); -+ } else { -+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index", -+ "Creating ancestorid index: progress %d ancestors...", -+ key_count); -+ } - started_progress_logging = 1; - } - -@@ -932,9 +962,15 @@ out: - if (ret == 0) { - if (started_progress_logging) { - /* finish what we started logging */ -- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index", -- "Creating ancestorid index: processed %d%% (ID count %d)", -- (key_count * 100 / job->numsubordinates), key_count); -+ if (job->numsubordinates) { -+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index", -+ "Creating ancestorid index: processed %d%% (ID count %d)", -+ (key_count * 100 / job->numsubordinates), key_count); -+ } else { -+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index", -+ "Creating ancestorid index: processed %d ancestors", -+ key_count); -+ } - } - import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index", - "Created ancestorid index (new idl)."); --- -2.26.2 - diff --git a/SOURCES/0036-Issue-4513-CI-Tests-fix-test-failures.patch b/SOURCES/0036-Issue-4513-CI-Tests-fix-test-failures.patch deleted file mode 100644 index 69c362a..0000000 --- a/SOURCES/0036-Issue-4513-CI-Tests-fix-test-failures.patch +++ /dev/null @@ -1,190 +0,0 @@ -From 4839898dbe69d6445f3571beec1bf3f1557d6cc6 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Tue, 12 Jan 2021 10:09:23 -0500 -Subject: [PATCH] Issue 4513 - CI Tests - fix test failures - -Description: - - Fixed tests in these suites: basic, entryuuid, filter, lib389, and schema - -relates: https://github.com/389ds/389-ds-base/issues/4513 - -Reviewed by: progier(Thanks!) ---- - dirsrvtests/tests/suites/basic/basic_test.py | 65 ++++++++++--------- - .../filter/rfc3673_all_oper_attrs_test.py | 4 +- - .../suites/lib389/config_compare_test.py | 5 +- - .../suites/lib389/idm/user_compare_i2_test.py | 3 + - .../tests/suites/schema/schema_reload_test.py | 3 + - 5 files changed, 47 insertions(+), 33 deletions(-) - -diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py -index 97908c31c..fc9af46e4 100644 ---- a/dirsrvtests/tests/suites/basic/basic_test.py -+++ b/dirsrvtests/tests/suites/basic/basic_test.py -@@ -1059,6 +1059,41 @@ def test_search_ou(topology_st): - assert len(entries) == 0 - - -+def test_bind_invalid_entry(topology_st): -+ """Test the failing bind does not return information about the entry -+ -+ :id: 5cd9b083-eea6-426b-84ca-83c26fc49a6f -+ -+ :setup: Standalone instance -+ -+ :steps: -+ 1: bind as non existing entry -+ 2: check that bind info does not report 'No such entry' -+ -+ :expectedresults: -+ 1: pass -+ 2: pass -+ """ -+ -+ topology_st.standalone.restart() -+ INVALID_ENTRY="cn=foooo,%s" % DEFAULT_SUFFIX -+ try: -+ topology_st.standalone.simple_bind_s(INVALID_ENTRY, PASSWORD) -+ except ldap.LDAPError as e: -+ log.info('test_bind_invalid_entry: Failed to bind as %s (expected)' % INVALID_ENTRY) -+ log.info('exception description: ' + e.args[0]['desc']) -+ if 'info' in e.args[0]: -+ log.info('exception info: ' + e.args[0]['info']) -+ assert e.args[0]['desc'] == 'Invalid credentials' -+ assert 'info' not in e.args[0] -+ pass -+ -+ log.info('test_bind_invalid_entry: PASSED') -+ -+ # reset credentials -+ topology_st.standalone.simple_bind_s(DN_DM, PW_DM) -+ -+ - @pytest.mark.bz1044135 - @pytest.mark.ds47319 - def test_connection_buffer_size(topology_st): -@@ -1477,36 +1512,6 @@ def test_dscreate_with_different_rdn(dscreate_test_rdn_value): - else: - assert True - --def test_bind_invalid_entry(topology_st): -- """Test the failing bind does not return information about the entry -- -- :id: 5cd9b083-eea6-426b-84ca-83c26fc49a6f -- -- :setup: Standalone instance -- -- :steps: -- 1: bind as non existing entry -- 2: check that bind info does not report 'No such entry' -- -- :expectedresults: -- 1: pass -- 2: pass -- """ -- -- topology_st.standalone.restart() -- INVALID_ENTRY="cn=foooo,%s" % DEFAULT_SUFFIX -- try: -- topology_st.standalone.simple_bind_s(INVALID_ENTRY, PASSWORD) -- except ldap.LDAPError as e: -- log.info('test_bind_invalid_entry: Failed to bind as %s (expected)' % INVALID_ENTRY) -- log.info('exception description: ' + e.args[0]['desc']) -- if 'info' in e.args[0]: -- log.info('exception info: ' + e.args[0]['info']) -- assert e.args[0]['desc'] == 'Invalid credentials' -- assert 'info' not in e.args[0] -- pass -- -- log.info('test_bind_invalid_entry: PASSED') - - - if __name__ == '__main__': -diff --git a/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py b/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py -index c882bea5f..0477acda7 100644 ---- a/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py -+++ b/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py -@@ -53,11 +53,11 @@ TEST_PARAMS = [(DN_ROOT, False, [ - (TEST_USER_DN, False, [ - 'createTimestamp', 'creatorsName', 'entrydn', - 'entryid', 'modifiersName', 'modifyTimestamp', -- 'nsUniqueId', 'parentid' -+ 'nsUniqueId', 'parentid', 'entryUUID' - ]), - (TEST_USER_DN, True, [ - 'createTimestamp', 'creatorsName', 'entrydn', -- 'entryid', 'modifyTimestamp', 'nsUniqueId', 'parentid' -+ 'entryid', 'modifyTimestamp', 'nsUniqueId', 'parentid', 'entryUUID' - ]), - (DN_CONFIG, False, [ - 'numSubordinates', 'passwordHistory' -diff --git a/dirsrvtests/tests/suites/lib389/config_compare_test.py b/dirsrvtests/tests/suites/lib389/config_compare_test.py -index 709bae8cb..84f55acfa 100644 ---- a/dirsrvtests/tests/suites/lib389/config_compare_test.py -+++ b/dirsrvtests/tests/suites/lib389/config_compare_test.py -@@ -22,15 +22,18 @@ def test_config_compare(topology_i2): - st2_config = topology_i2.ins.get('standalone2').config - # 'nsslapd-port' attribute is expected to be same in cn=config comparison, - # but they are different in our testing environment -- # as we are using 2 DS instances running, both running simultaneuosly. -+ # as we are using 2 DS instances running, both running simultaneously. - # Hence explicitly adding 'nsslapd-port' to compare_exclude. - st1_config._compare_exclude.append('nsslapd-port') - st2_config._compare_exclude.append('nsslapd-port') - st1_config._compare_exclude.append('nsslapd-secureport') - st2_config._compare_exclude.append('nsslapd-secureport') -+ st1_config._compare_exclude.append('nsslapd-ldapssotoken-secret') -+ st2_config._compare_exclude.append('nsslapd-ldapssotoken-secret') - - assert Config.compare(st1_config, st2_config) - -+ - if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode -diff --git a/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py b/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py -index c7540e4ce..ccde0f6b0 100644 ---- a/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py -+++ b/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py -@@ -39,6 +39,9 @@ def test_user_compare_i2(topology_i2): - st2_users.create(properties=user_properties) - st2_testuser = st2_users.get('testuser') - -+ st1_testuser._compare_exclude.append('entryuuid') -+ st2_testuser._compare_exclude.append('entryuuid') -+ - assert UserAccount.compare(st1_testuser, st2_testuser) - - -diff --git a/dirsrvtests/tests/suites/schema/schema_reload_test.py b/dirsrvtests/tests/suites/schema/schema_reload_test.py -index 2ece5dda5..e7e7d833d 100644 ---- a/dirsrvtests/tests/suites/schema/schema_reload_test.py -+++ b/dirsrvtests/tests/suites/schema/schema_reload_test.py -@@ -54,6 +54,7 @@ def test_valid_schema(topo): - schema_file.write("objectclasses: ( 1.2.3.4.5.6.7.8 NAME 'TestObject' " + - "SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " + - "sn $ ValidAttribute ) X-ORIGIN 'user defined' )')\n") -+ os.chmod(schema_filename, 0o777) - except OSError as e: - log.fatal("Failed to create schema file: " + - "{} Error: {}".format(schema_filename, str(e))) -@@ -106,6 +107,7 @@ def test_invalid_schema(topo): - schema_file.write("objectclasses: ( 1.2.3.4.5.6.7 NAME 'MoZiLLaOBJeCT' " + - "SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " + - "sn $ MozillaAttribute ) X-ORIGIN 'user defined' )')\n") -+ os.chmod(schema_filename, 0o777) - except OSError as e: - log.fatal("Failed to create schema file: " + - "{} Error: {}".format(schema_filename, str(e))) -@@ -122,6 +124,7 @@ def test_invalid_schema(topo): - schema_file.write("objectclasses: ( 1.2.3.4.5.6.70 NAME 'MoZiLLaOBJeCT' " + - "SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " + - "cn $ MoZiLLaATTRiBuTe ) X-ORIGIN 'user defined' )')\n") -+ os.chmod(schema_filename, 0o777) - except OSError as e: - log.fatal("Failed to create schema file: " + - "{} Error: {}".format(schema_filename, str(e))) --- -2.26.2 - diff --git a/SOURCES/0037-Issue-4609-CVE-info-disclosure-when-authenticating.patch b/SOURCES/0037-Issue-4609-CVE-info-disclosure-when-authenticating.patch deleted file mode 100644 index 6cd0aeb..0000000 --- a/SOURCES/0037-Issue-4609-CVE-info-disclosure-when-authenticating.patch +++ /dev/null @@ -1,130 +0,0 @@ -From 316aeae09468d6fd3b35422b236751eb1b5c309e Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Tue, 9 Feb 2021 14:02:59 -0500 -Subject: [PATCH 1/2] Issue 4609 - CVE - info disclosure when authenticating - -Description: If you bind as a user that does not exist. Error 49 is returned - instead of error 32. As error 32 discloses that the entry does - not exist. When you bind as an entry that does not have userpassword - set then error 48 (inappropriate auth) is returned, but this - discloses that the entry does indeed exist. Instead we should - always return error 49, even if the password is not set in the - entry. This way we do not disclose to an attacker if the Bind - DN exists or not. - -Relates: https://github.com/389ds/389-ds-base/issues/4609 - -Reviewed by: tbordaz(Thanks!) ---- - dirsrvtests/tests/suites/basic/basic_test.py | 39 +++++++++++++++++++- - ldap/servers/slapd/back-ldbm/ldbm_bind.c | 4 +- - ldap/servers/slapd/dse.c | 7 +++- - 3 files changed, 45 insertions(+), 5 deletions(-) - -diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py -index fc9af46e4..e35f34721 100644 ---- a/dirsrvtests/tests/suites/basic/basic_test.py -+++ b/dirsrvtests/tests/suites/basic/basic_test.py -@@ -9,7 +9,7 @@ - - from subprocess import check_output, PIPE, run - from lib389 import DirSrv --from lib389.idm.user import UserAccounts -+from lib389.idm.user import UserAccount, UserAccounts - import pytest - from lib389.tasks import * - from lib389.utils import * -@@ -1094,6 +1094,43 @@ def test_bind_invalid_entry(topology_st): - topology_st.standalone.simple_bind_s(DN_DM, PW_DM) - - -+def test_bind_entry_missing_passwd(topology_st): -+ """ -+ :id: af209149-8fb8-48cb-93ea-3e82dd7119d2 -+ :setup: Standalone Instance -+ :steps: -+ 1. Bind as database entry that does not have userpassword set -+ 2. Bind as database entry that does not exist -+ 1. Bind as cn=config entry that does not have userpassword set -+ 2. Bind as cn=config entry that does not exist -+ :expectedresults: -+ 1. Fails with error 49 -+ 2. Fails with error 49 -+ 3. Fails with error 49 -+ 4. Fails with error 49 -+ """ -+ user = UserAccount(topology_st.standalone, DEFAULT_SUFFIX) -+ with pytest.raises(ldap.INVALID_CREDENTIALS): -+ # Bind as the suffix root entry which does not have a userpassword -+ user.bind("some_password") -+ -+ user = UserAccount(topology_st.standalone, "cn=not here," + DEFAULT_SUFFIX) -+ with pytest.raises(ldap.INVALID_CREDENTIALS): -+ # Bind as the entry which does not exist -+ user.bind("some_password") -+ -+ # Test cn=config since it has its own code path -+ user = UserAccount(topology_st.standalone, "cn=config") -+ with pytest.raises(ldap.INVALID_CREDENTIALS): -+ # Bind as the config entry which does not have a userpassword -+ user.bind("some_password") -+ -+ user = UserAccount(topology_st.standalone, "cn=does not exist,cn=config") -+ with pytest.raises(ldap.INVALID_CREDENTIALS): -+ # Bind as an entry under cn=config that does not exist -+ user.bind("some_password") -+ -+ - @pytest.mark.bz1044135 - @pytest.mark.ds47319 - def test_connection_buffer_size(topology_st): -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_bind.c b/ldap/servers/slapd/back-ldbm/ldbm_bind.c -index fa450ecd5..38d115a32 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_bind.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_bind.c -@@ -76,8 +76,8 @@ ldbm_back_bind(Slapi_PBlock *pb) - case LDAP_AUTH_SIMPLE: { - Slapi_Value cv; - if (slapi_entry_attr_find(e->ep_entry, "userpassword", &attr) != 0) { -- slapi_send_ldap_result(pb, LDAP_INAPPROPRIATE_AUTH, NULL, -- NULL, 0, NULL); -+ slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "Entry does not have userpassword set"); -+ slapi_send_ldap_result(pb, LDAP_INVALID_CREDENTIALS, NULL, NULL, 0, NULL); - CACHE_RETURN(&inst->inst_cache, &e); - rc = SLAPI_BIND_FAIL; - goto bail; -diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c -index 3c2de75fc..b04fafde6 100644 ---- a/ldap/servers/slapd/dse.c -+++ b/ldap/servers/slapd/dse.c -@@ -1446,7 +1446,8 @@ dse_bind(Slapi_PBlock *pb) /* JCM There should only be one exit point from this - - ec = dse_get_entry_copy(pdse, sdn, DSE_USE_LOCK); - if (ec == NULL) { -- slapi_send_ldap_result(pb, LDAP_NO_SUCH_OBJECT, NULL, NULL, 0, NULL); -+ slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "Entry does not exist"); -+ slapi_send_ldap_result(pb, LDAP_INVALID_CREDENTIALS, NULL, NULL, 0, NULL); - return (SLAPI_BIND_FAIL); - } - -@@ -1454,7 +1455,8 @@ dse_bind(Slapi_PBlock *pb) /* JCM There should only be one exit point from this - case LDAP_AUTH_SIMPLE: { - Slapi_Value cv; - if (slapi_entry_attr_find(ec, "userpassword", &attr) != 0) { -- slapi_send_ldap_result(pb, LDAP_INAPPROPRIATE_AUTH, NULL, NULL, 0, NULL); -+ slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "Entry does not have userpassword set"); -+ slapi_send_ldap_result(pb, LDAP_INVALID_CREDENTIALS, NULL, NULL, 0, NULL); - slapi_entry_free(ec); - return SLAPI_BIND_FAIL; - } -@@ -1462,6 +1464,7 @@ dse_bind(Slapi_PBlock *pb) /* JCM There should only be one exit point from this - - slapi_value_init_berval(&cv, cred); - if (slapi_pw_find_sv(bvals, &cv) != 0) { -+ slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "Invalid credentials"); - slapi_send_ldap_result(pb, LDAP_INVALID_CREDENTIALS, NULL, NULL, 0, NULL); - slapi_entry_free(ec); - value_done(&cv); --- -2.26.2 - diff --git a/SOURCES/0038-Issue-4649-crash-in-sync_repl-when-a-MODRDN-create-a.patch b/SOURCES/0038-Issue-4649-crash-in-sync_repl-when-a-MODRDN-create-a.patch deleted file mode 100644 index dec42c3..0000000 --- a/SOURCES/0038-Issue-4649-crash-in-sync_repl-when-a-MODRDN-create-a.patch +++ /dev/null @@ -1,58 +0,0 @@ -From b01e30c79b1364ac35c0b2db2ef4a2ff64600a7f Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Tue, 23 Feb 2021 08:58:37 +0100 -Subject: [PATCH 1/2] Issue 4649 - crash in sync_repl when a MODRDN create a - cenotaph (#4652) - -Bug description: - When an operation is flagged OP_FLAG_NOOP, it skips BETXN plugins but calls POST plugins. - For sync_repl, betxn (sync_update_persist_betxn_pre_op) creates an operation extension to be - consumed by the post (sync_update_persist_op). In case of OP_FLAG_NOOP, there is no - operation extension. - -Fix description: - Test that the operation is OP_FLAG_NOOP if the operation extension is missing - -relates: https://github.com/389ds/389-ds-base/issues/4649 - -Reviewed by: William Brown (thanks) - -Platforms tested: F31 ---- - ldap/servers/plugins/sync/sync_persist.c | 14 ++++- - 2 files changed, 75 insertions(+), 2 deletions(-) -diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c -index e93a8fa83..12b23ebac 100644 ---- a/ldap/servers/plugins/sync/sync_persist.c -+++ b/ldap/servers/plugins/sync/sync_persist.c -@@ -206,7 +206,9 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber - slapi_pblock_get(pb, SLAPI_TARGET_SDN, &sdn); - - if (NULL == e) { -- /* Ignore this operation (for example case of failure of the operation) */ -+ /* Ignore this operation (for example case of failure of the operation -+ * or operation resulting in an empty Mods)) -+ */ - ignore_op_pl(pb); - return; - } -@@ -232,7 +234,15 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber - prim_op = get_thread_primary_op(); - ident = sync_persist_get_operation_extension(pb); - PR_ASSERT(prim_op); -- PR_ASSERT(ident); -+ -+ if ((ident == NULL) && operation_is_flag_set(pb_op, OP_FLAG_NOOP)) { -+ /* This happens for URP (add cenotaph, fixup rename, tombstone resurrect) -+ * As a NOOP betxn plugins are not called and operation ext is not created -+ */ -+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "Skip noop operation (0x%lx)\n", -+ (ulong) pb_op); -+ return; -+ } - /* First mark the operation as completed/failed - * the param to be used once the operation will be pushed - * on the listeners queue --- -2.26.2 - diff --git a/SOURCES/0039-Issue-4711-SIGSEV-with-sync_repl-4738.patch b/SOURCES/0039-Issue-4711-SIGSEV-with-sync_repl-4738.patch deleted file mode 100644 index c598ba4..0000000 --- a/SOURCES/0039-Issue-4711-SIGSEV-with-sync_repl-4738.patch +++ /dev/null @@ -1,55 +0,0 @@ -From 81e9e6431293cbdde5b037c88e5c644f39d3d14d Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Tue, 27 Apr 2021 09:29:32 +0200 -Subject: [PATCH 1/2] Issue 4711 - SIGSEV with sync_repl (#4738) - -Bug description: - sync_repl sends back entries identified with a unique - identifier that is 'nsuniqueid'. If 'nsuniqueid' is - missing, then it may crash - -Fix description: - Check a nsuniqueid is available else returns OP_ERR - -relates: https://github.com/389ds/389-ds-base/issues/4711 - -Reviewed by: Pierre Rogier, James Chapman, William Brown (Thanks!) - -Platforms tested: F33 ---- - ldap/servers/plugins/sync/sync_util.c | 12 ++++++++++-- - 1 file changed, 10 insertions(+), 2 deletions(-) - -diff --git a/ldap/servers/plugins/sync/sync_util.c b/ldap/servers/plugins/sync/sync_util.c -index e64d519e1..bdba0a6c2 100644 ---- a/ldap/servers/plugins/sync/sync_util.c -+++ b/ldap/servers/plugins/sync/sync_util.c -@@ -127,8 +127,8 @@ sync_create_state_control(Slapi_Entry *e, LDAPControl **ctrlp, int type, Sync_Co - BerElement *ber; - struct berval *bvp; - char *uuid; -- Slapi_Attr *attr; -- Slapi_Value *val; -+ Slapi_Attr *attr = NULL; -+ Slapi_Value *val = NULL; - - if (type == LDAP_SYNC_NONE || ctrlp == NULL || (ber = der_alloc()) == NULL) { - return (LDAP_OPERATIONS_ERROR); -@@ -138,6 +138,14 @@ sync_create_state_control(Slapi_Entry *e, LDAPControl **ctrlp, int type, Sync_Co - - slapi_entry_attr_find(e, SLAPI_ATTR_UNIQUEID, &attr); - slapi_attr_first_value(attr, &val); -+ if ((attr == NULL) || (val == NULL)) { -+ /* It may happen with entries in special backends -+ * such like cn=config, cn=shema, cn=monitor... -+ */ -+ slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, -+ "sync_create_state_control - Entries are missing nsuniqueid. Unable to proceed.\n"); -+ return (LDAP_OPERATIONS_ERROR); -+ } - uuid = sync_nsuniqueid2uuid(slapi_value_get_string(val)); - if ((rc = ber_printf(ber, "{eo", type, uuid, 16)) != -1) { - if (cookie) { --- -2.31.1 - diff --git a/SOURCES/0040-Issue-4764-replicated-operation-sometime-checks-ACI-.patch b/SOURCES/0040-Issue-4764-replicated-operation-sometime-checks-ACI-.patch deleted file mode 100644 index 797146c..0000000 --- a/SOURCES/0040-Issue-4764-replicated-operation-sometime-checks-ACI-.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 76d1b4ff8efdff1dbe6139b51da656880d7a8ec6 Mon Sep 17 00:00:00 2001 -From: progier389 -Date: Wed, 26 May 2021 16:07:43 +0200 -Subject: [PATCH 2/2] Issue 4764 - replicated operation sometime checks ACI - (#4783) - ---- - ldap/servers/slapd/connection.c | 8 ++++++++ - 1 file changed, 8 insertions(+) - -diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c -index 1883fe711..02c02ffb6 100644 ---- a/ldap/servers/slapd/connection.c -+++ b/ldap/servers/slapd/connection.c -@@ -1764,6 +1764,14 @@ connection_threadmain() - } - } - -+ /* -+ * Fix bz 1931820 issue (the check to set OP_FLAG_REPLICATED may be done -+ * before replication session is properly set). -+ */ -+ if (replication_connection) { -+ operation_set_flag(op, OP_FLAG_REPLICATED); -+ } -+ - /* - * Call the do_ function to process this request. - */ --- -2.31.1 - diff --git a/SOURCES/0041-Issue-4797-ACL-IP-ADDRESS-evaluation-may-corrupt-c_i.patch b/SOURCES/0041-Issue-4797-ACL-IP-ADDRESS-evaluation-may-corrupt-c_i.patch deleted file mode 100644 index 65e20bb..0000000 --- a/SOURCES/0041-Issue-4797-ACL-IP-ADDRESS-evaluation-may-corrupt-c_i.patch +++ /dev/null @@ -1,52 +0,0 @@ -From a789f89dbf84dd5f6395198bf5cc4db88453ec4b Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Thu, 10 Jun 2021 15:03:27 +0200 -Subject: [PATCH] Issue 4797 - ACL IP ADDRESS evaluation may corrupt - c_isreplication_session connection flags (#4799) - -Bug description: - The fix for ticket #3764 was broken with a missing break in a - switch. The consequence is that while setting the client IP - address in the pblock (SLAPI_CONN_CLIENTNETADDR_ACLIP), the - connection is erroneously set as replication connection. - This can lead to crash or failure of testcase - test_access_from_certain_network_only_ip. - This bug was quite hidden until the fix for #4764 is - showing it more frequently - -Fix description: - Add the missing break - -relates: https://github.com/389ds/389-ds-base/issues/4797 - -Reviewed by: Mark Reynolds - -Platforms tested: F33 ---- - ldap/servers/slapd/pblock.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c -index 1ad9d0399..9fd599bcb 100644 ---- a/ldap/servers/slapd/pblock.c -+++ b/ldap/servers/slapd/pblock.c -@@ -2589,7 +2589,7 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value) - pblock->pb_conn->c_authtype = slapi_ch_strdup((char *)value); - pthread_mutex_unlock(&(pblock->pb_conn->c_mutex)); - break; -- case SLAPI_CONN_CLIENTNETADDR_ACLIP: -+ case SLAPI_CONN_CLIENTNETADDR_ACLIP: - if (pblock->pb_conn == NULL) { - break; - } -@@ -2597,6 +2597,7 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value) - slapi_ch_free((void **)&pblock->pb_conn->cin_addr_aclip); - pblock->pb_conn->cin_addr_aclip = (PRNetAddr *)value; - pthread_mutex_unlock(&(pblock->pb_conn->c_mutex)); -+ break; - case SLAPI_CONN_IS_REPLICATION_SESSION: - if (pblock->pb_conn == NULL) { - slapi_log_err(SLAPI_LOG_ERR, --- -2.31.1 - diff --git a/SOURCES/0042-Issue-4492-Changelog-cache-can-upload-updates-from-a.patch b/SOURCES/0042-Issue-4492-Changelog-cache-can-upload-updates-from-a.patch deleted file mode 100644 index 5ba09e3..0000000 --- a/SOURCES/0042-Issue-4492-Changelog-cache-can-upload-updates-from-a.patch +++ /dev/null @@ -1,206 +0,0 @@ -From 16ec195b12688bcbe0d113396eee782175102565 Mon Sep 17 00:00:00 2001 -From: Thierry Bordaz -Date: Mon, 14 Dec 2020 10:41:58 +0100 -Subject: [PATCH] Issue 4492 - Changelog cache can upload updates from a wrong - starting point (CSN) - -Bug description: - When a replication session starts, a starting point is computed - according to supplier/consumer RUVs. - from the starting point the updates are bulk loaded from the CL. - When a bulk set have been fully evaluated the server needs to bulk load another set. - It iterates until there is no more updates to send. - The bug is that during bulk load, it recomputes the CL cursor position - and this computation can be wrong. For example if a new update on - a rarely updated replica (or not known replica) the new position will - be set before the inital starting point - -Fix description: - Fixing the invalid computation is a bit risky (complex code resulting from - years of corner cases handling) and a fix could fail to address others flavor - with the same symptom - The fix is only (sorry for that) safety checking fix that would end a replication session - if the computed cursor position goes before the initial starting point. - In case of large jump behind (24h) the starting point, a warning is logged. - -relates: https://github.com/389ds/389-ds-base/issues/4492 - -Reviewed by: Mark Reynolds, William Brown - -Platforms tested: F31 ---- - ldap/servers/plugins/replication/cl5_api.c | 6 +- - .../servers/plugins/replication/cl5_clcache.c | 60 ++++++++++++++++++- - .../servers/plugins/replication/cl5_clcache.h | 4 +- - 3 files changed, 63 insertions(+), 7 deletions(-) - -diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c -index d7e47495a..403a6a666 100644 ---- a/ldap/servers/plugins/replication/cl5_api.c -+++ b/ldap/servers/plugins/replication/cl5_api.c -@@ -143,6 +143,7 @@ struct cl5replayiterator - ReplicaId consumerRID; /* consumer's RID */ - const RUV *consumerRuv; /* consumer's update vector */ - Object *supplierRuvObj; /* supplier's update vector object */ -+ char starting_csn[CSN_STRSIZE]; - }; - - typedef struct cl5iterator -@@ -1367,7 +1368,7 @@ cl5GetNextOperationToReplay(CL5ReplayIterator *iterator, CL5Entry *entry) - return CL5_BAD_DATA; - } - -- rc = clcache_get_next_change(iterator->clcache, (void **)&key, &keylen, (void **)&data, &datalen, &csn); -+ rc = clcache_get_next_change(iterator->clcache, (void **)&key, &keylen, (void **)&data, &datalen, &csn, iterator->starting_csn); - - if (rc == DB_NOTFOUND) { - /* -@@ -4999,7 +5000,7 @@ _cl5PositionCursorForReplay(ReplicaId consumerRID, const RUV *consumerRuv, Repli - if (rc != 0) - goto done; - -- rc = clcache_load_buffer(clcache, &startCSN, continue_on_missing); -+ rc = clcache_load_buffer(clcache, &startCSN, continue_on_missing, NULL); - - if (rc == 0) { - haveChanges = PR_TRUE; -@@ -5063,6 +5064,7 @@ _cl5PositionCursorForReplay(ReplicaId consumerRID, const RUV *consumerRuv, Repli - (*iterator)->consumerRID = consumerRID; - (*iterator)->consumerRuv = consumerRuv; - (*iterator)->supplierRuvObj = supplierRuvObj; -+ csn_as_string(startCSN, PR_FALSE, (*iterator)->starting_csn); - } else if (rc == CL5_SUCCESS) { - /* we have no changes to send */ - rc = CL5_NOTFOUND; -diff --git a/ldap/servers/plugins/replication/cl5_clcache.c b/ldap/servers/plugins/replication/cl5_clcache.c -index 6b591fb8d..fcbca047a 100644 ---- a/ldap/servers/plugins/replication/cl5_clcache.c -+++ b/ldap/servers/plugins/replication/cl5_clcache.c -@@ -15,6 +15,8 @@ - #include "db.h" /* Berkeley DB */ - #include "cl5.h" /* changelog5Config */ - #include "cl5_clcache.h" -+#include "slap.h" -+#include "proto-slap.h" - - /* newer bdb uses DB_BUFFER_SMALL instead of ENOMEM as the - error return if the given buffer in which to load a -@@ -323,14 +325,21 @@ clcache_return_buffer(CLC_Buffer **buf) - * anchorcsn - passed in for the first load of a replication session; - * flag - DB_SET to load in the key CSN record. - * DB_NEXT to load in the records greater than key CSN. -+ * initial_starting_csn -+ * This is the starting_csn computed at the beginning of -+ * the replication session. It never change during a session -+ * (aka iterator creation). -+ * This is used for safety checking that the next CSN use -+ * for bulk load is not before the initial csn - * return - DB error code instead of cl5 one because of the - * historic reason. - */ - int --clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss) -+clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss, char *initial_starting_csn) - { - int rc = 0; - int flag = DB_NEXT; -+ CSN limit_csn = {0}; - - if (anchorCSN) - *anchorCSN = NULL; -@@ -343,6 +352,30 @@ clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss) - rc = clcache_adjust_anchorcsn(buf, &flag); - } - -+ /* safety checking, we do not want to (re)start replication before -+ * the inital computed starting point -+ */ -+ if (initial_starting_csn) { -+ csn_init_by_string(&limit_csn, initial_starting_csn); -+ if (csn_compare(&limit_csn, buf->buf_current_csn) > 0) { -+ char curr[CSN_STRSIZE]; -+ int loglevel = SLAPI_LOG_REPL; -+ -+ if (csn_time_difference(&limit_csn, buf->buf_current_csn) > (24 * 60 * 60)) { -+ /* This is a big jump (more than a day) behind the -+ * initial starting csn. Log a warning before ending -+ * the session -+ */ -+ loglevel = SLAPI_LOG_WARNING; -+ } -+ csn_as_string(buf->buf_current_csn, 0, curr); -+ slapi_log_err(loglevel, buf->buf_agmt_name, -+ "clcache_load_buffer - bulk load cursor (%s) is lower than starting csn %s. Ending session.\n", curr, initial_starting_csn); -+ /* it just end the session with UPDATE_NO_MORE_UPDATES */ -+ rc = CLC_STATE_DONE; -+ } -+ } -+ - if (rc == 0) { - - buf->buf_state = CLC_STATE_READY; -@@ -365,6 +398,27 @@ clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss) - } - /* the use of alternative start csns can be limited, record its usage */ - (*continue_on_miss)--; -+ -+ if (initial_starting_csn) { -+ if (csn_compare(&limit_csn, buf->buf_current_csn) > 0) { -+ char curr[CSN_STRSIZE]; -+ int loglevel = SLAPI_LOG_REPL; -+ -+ if (csn_time_difference(&limit_csn, buf->buf_current_csn) > (24 * 60 * 60)) { -+ /* This is a big jump (more than a day) behind the -+ * initial starting csn. Log a warning before ending -+ * the session -+ */ -+ loglevel = SLAPI_LOG_WARNING; -+ } -+ csn_as_string(buf->buf_current_csn, 0, curr); -+ slapi_log_err(loglevel, buf->buf_agmt_name, -+ "clcache_load_buffer - (DB_SET_RANGE) bulk load cursor (%s) is lower than starting csn %s. Ending session.\n", curr, initial_starting_csn); -+ rc = DB_NOTFOUND; -+ -+ return rc; -+ } -+ } - } - /* Reset some flag variables */ - if (rc == 0) { -@@ -492,7 +546,7 @@ retry: - * *data: output - data of the next change, or NULL if no more change - */ - int --clcache_get_next_change(CLC_Buffer *buf, void **key, size_t *keylen, void **data, size_t *datalen, CSN **csn) -+clcache_get_next_change(CLC_Buffer *buf, void **key, size_t *keylen, void **data, size_t *datalen, CSN **csn, char *initial_starting_csn) - { - int skip = 1; - int rc = 0; -@@ -510,7 +564,7 @@ clcache_get_next_change(CLC_Buffer *buf, void **key, size_t *keylen, void **data - * We're done with the current buffer. Now load the next chunk. - */ - if (NULL == *key && CLC_STATE_READY == buf->buf_state) { -- rc = clcache_load_buffer(buf, NULL, NULL); -+ rc = clcache_load_buffer(buf, NULL, NULL, initial_starting_csn); - if (0 == rc && buf->buf_record_ptr) { - DB_MULTIPLE_KEY_NEXT(buf->buf_record_ptr, &buf->buf_data, - *key, *keylen, *data, *datalen); -diff --git a/ldap/servers/plugins/replication/cl5_clcache.h b/ldap/servers/plugins/replication/cl5_clcache.h -index 73eb41590..16d53d563 100644 ---- a/ldap/servers/plugins/replication/cl5_clcache.h -+++ b/ldap/servers/plugins/replication/cl5_clcache.h -@@ -23,9 +23,9 @@ typedef struct clc_buffer CLC_Buffer; - int clcache_init(DB_ENV **dbenv); - void clcache_set_config(void); - int clcache_get_buffer(CLC_Buffer **buf, DB *db, ReplicaId consumer_rid, const RUV *consumer_ruv, const RUV *local_ruv); --int clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss); -+int clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss, char *initial_starting_csn); - void clcache_return_buffer(CLC_Buffer **buf); --int clcache_get_next_change(CLC_Buffer *buf, void **key, size_t *keylen, void **data, size_t *datalen, CSN **csn); -+int clcache_get_next_change(CLC_Buffer *buf, void **key, size_t *keylen, void **data, size_t *datalen, CSN **csn, char *initial_starting_csn); - void clcache_destroy(void); - - #endif --- -2.31.1 - diff --git a/SOURCES/0043-Issue-4644-Large-updates-can-reset-the-CLcache-to-th.patch b/SOURCES/0043-Issue-4644-Large-updates-can-reset-the-CLcache-to-th.patch deleted file mode 100644 index f60a953..0000000 --- a/SOURCES/0043-Issue-4644-Large-updates-can-reset-the-CLcache-to-th.patch +++ /dev/null @@ -1,146 +0,0 @@ -From f05f5f20a468efa82d13a99687ac5d3a5d80a3c9 Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Tue, 23 Feb 2021 13:42:31 +0100 -Subject: [PATCH] Issue 4644 - Large updates can reset the CLcache to the - beginning of the changelog (#4647) - -Bug description: - The replication agreements are using bulk load to load updates. - For bulk load it uses a cursor with DB_MULTIPLE_KEY and DB_NEXT. - Before using the cursor, it must be initialized with DB_SET. - - If during the cursor/DB_SET the CSN refers to an update that is larger than - the size of the provided buffer, then the cursor remains not initialized and - c_get returns DB_BUFFER_SMALL. - - The consequence is that the next c_get(DB_MULTIPLE_KEY and DB_NEXT) will return the - first record in the changelog DB. This break CLcache. - -Fix description: - The fix is to harden cursor initialization so that if DB_SET fails - because of DB_BUFFER_SMALL. It reallocates buf_data and retries a DB_SET. - If DB_SET can not be initialized it logs a warning. - - The patch also changes the behaviour of the fix #4492. - #4492 detected a massive (1day) jump prior the starting csn and ended the - replication session. If the jump was systematic, for example - if the CLcache got broken because of a too large updates, then - replication was systematically stopped. - This patch suppress the systematically stop, letting RA doing a big jump. - From #4492 only remains the warning. - -relates: https://github.com/389ds/389-ds-base/issues/4644 - -Reviewed by: Pierre Rogier (Thanks !!!!) - -Platforms tested: F31 ---- - .../servers/plugins/replication/cl5_clcache.c | 68 +++++++++++++++---- - 1 file changed, 53 insertions(+), 15 deletions(-) - -diff --git a/ldap/servers/plugins/replication/cl5_clcache.c b/ldap/servers/plugins/replication/cl5_clcache.c -index fcbca047a..90dec4d54 100644 ---- a/ldap/servers/plugins/replication/cl5_clcache.c -+++ b/ldap/servers/plugins/replication/cl5_clcache.c -@@ -370,9 +370,7 @@ clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss, cha - } - csn_as_string(buf->buf_current_csn, 0, curr); - slapi_log_err(loglevel, buf->buf_agmt_name, -- "clcache_load_buffer - bulk load cursor (%s) is lower than starting csn %s. Ending session.\n", curr, initial_starting_csn); -- /* it just end the session with UPDATE_NO_MORE_UPDATES */ -- rc = CLC_STATE_DONE; -+ "clcache_load_buffer - bulk load cursor (%s) is lower than starting csn %s.\n", curr, initial_starting_csn); - } - } - -@@ -413,10 +411,7 @@ clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss, cha - } - csn_as_string(buf->buf_current_csn, 0, curr); - slapi_log_err(loglevel, buf->buf_agmt_name, -- "clcache_load_buffer - (DB_SET_RANGE) bulk load cursor (%s) is lower than starting csn %s. Ending session.\n", curr, initial_starting_csn); -- rc = DB_NOTFOUND; -- -- return rc; -+ "clcache_load_buffer - (DB_SET_RANGE) bulk load cursor (%s) is lower than starting csn %s.\n", curr, initial_starting_csn); - } - } - } -@@ -444,6 +439,42 @@ clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss, cha - return rc; - } - -+/* Set a cursor to a specific key (buf->buf_key) -+ * In case buf_data is too small to receive the value, DB_SET fails -+ * (DB_BUFFER_SMALL). This let the cursor uninitialized that is -+ * problematic because further cursor DB_NEXT will reset the cursor -+ * to the beginning of the CL. -+ * If buf_data is too small, this function reallocates enough space -+ * -+ * It returns the return code of cursor->c_get -+ */ -+static int -+clcache_cursor_set(DBC *cursor, CLC_Buffer *buf) -+{ -+ int rc; -+ uint32_t ulen; -+ uint32_t dlen; -+ uint32_t size; -+ -+ rc = cursor->c_get(cursor, &buf->buf_key, &buf->buf_data, DB_SET); -+ if (rc == DB_BUFFER_SMALL) { -+ uint32_t ulen; -+ -+ /* Fortunately, buf->buf_data.size has been set by -+ * c_get() to the actual data size needed. So we can -+ * reallocate the data buffer and try to set again. -+ */ -+ ulen = buf->buf_data.ulen; -+ buf->buf_data.ulen = (buf->buf_data.size / DEFAULT_CLC_BUFFER_PAGE_SIZE + 1) * DEFAULT_CLC_BUFFER_PAGE_SIZE; -+ buf->buf_data.data = slapi_ch_realloc(buf->buf_data.data, buf->buf_data.ulen); -+ slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name, -+ "clcache_cursor_set - buf data len reallocated %d -> %d bytes (DB_BUFFER_SMALL)\n", -+ ulen, buf->buf_data.ulen); -+ rc = cursor->c_get(cursor, &buf->buf_key, &buf->buf_data, DB_SET); -+ } -+ return rc; -+} -+ - static int - clcache_load_buffer_bulk(CLC_Buffer *buf, int flag) - { -@@ -478,17 +509,24 @@ retry: - - if (use_flag == DB_NEXT) { - /* For bulk read, position the cursor before read the next block */ -- rc = cursor->c_get(cursor, -- &buf->buf_key, -- &buf->buf_data, -- DB_SET); -+ rc = clcache_cursor_set(cursor, buf); - } - -- /* -- * Continue if the error is no-mem since we don't need to -- * load in the key record anyway with DB_SET. -- */ - if (0 == rc || DB_BUFFER_SMALL == rc) { -+ /* -+ * It should not have failed with DB_BUFFER_SMALL as we tried -+ * to adjust buf_data in clcache_cursor_set. -+ * But if it failed with DB_BUFFER_SMALL, there is a risk in clcache_cursor_get -+ * that the cursor will be reset to the beginning of the changelog. -+ * Returning an error at this point will stop replication that is -+ * a risk. So just accept the risk of a reset to the beginning of the CL -+ * and log an alarming message. -+ */ -+ if (rc == DB_BUFFER_SMALL) { -+ slapi_log_err(SLAPI_LOG_WARNING, buf->buf_agmt_name, -+ "clcache_load_buffer_bulk - Fail to position on csn=%s from the changelog (too large update ?). Risk of full CL evaluation.\n", -+ (char *)buf->buf_key.data); -+ } - rc = clcache_cursor_get(cursor, buf, use_flag); - } - } --- -2.31.1 - diff --git a/SOURCES/0044-Issue-4563-Failure-on-s390x-Fails-to-split-RDN-o-pki.patch b/SOURCES/0044-Issue-4563-Failure-on-s390x-Fails-to-split-RDN-o-pki.patch deleted file mode 100644 index 752cd19..0000000 --- a/SOURCES/0044-Issue-4563-Failure-on-s390x-Fails-to-split-RDN-o-pki.patch +++ /dev/null @@ -1,40 +0,0 @@ -From 7e042cbc74440b81f46efa73ccb36d80732c7074 Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Thu, 28 Jan 2021 10:39:31 +0100 -Subject: [PATCH] Issue 4563 - Failure on s390x: 'Fails to split RDN - "o=pki-tomcat-CA" into components' (#4573) - -Bug description: - SLAPI_OPERATION_TYPE is a stored/read as an int (slapi_pblock_get/set). - This although the storage field is an unsigned long. - Calling slapi_pblock_get with an long (8 btyes) destination creates - a problem on big-endian (s390x). - -Fix description: - Define destination op_type as an int (4 bytes) - -relates: https://github.com/389ds/389-ds-base/issues/4563 - -Reviewed by: Mark Reynolds, William Brown - -Platforms tested: F31 (little endian), Debian (big endian) ---- - ldap/servers/slapd/back-ldbm/ldbm_modify.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c -index a507f3c31..49ca01d1d 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c -@@ -216,7 +216,7 @@ error: - int32_t - entry_get_rdn_mods(Slapi_PBlock *pb, Slapi_Entry *entry, CSN *csn, int repl_op, Slapi_Mods **smods_ret) - { -- unsigned long op_type = SLAPI_OPERATION_NONE; -+ int op_type = SLAPI_OPERATION_NONE; - char *new_rdn = NULL; - char **dns = NULL; - char **rdns = NULL; --- -2.31.1 - diff --git a/SOURCES/0045-Issue-4443-Internal-unindexed-searches-in-syncrepl-r.patch b/SOURCES/0045-Issue-4443-Internal-unindexed-searches-in-syncrepl-r.patch deleted file mode 100644 index 602d372..0000000 --- a/SOURCES/0045-Issue-4443-Internal-unindexed-searches-in-syncrepl-r.patch +++ /dev/null @@ -1,226 +0,0 @@ -From 98caa0c0ddf48db791a26764aa695fa2345584ce Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Tue, 13 Jul 2021 14:18:03 -0400 -Subject: [PATCH] Issue 4443 - Internal unindexed searches in syncrepl/retro - changelog - -Bug Description: - -When a non-system index is added to a backend it is -disabled until the database is initialized or reindexed. -So in the case of the retro changelog the changenumber index -is alway disabled by default since it is never initialized. -This leads to unexpected unindexed searches of the retro -changelog. - -Fix Description: - -If an index has "nsSystemIndex" set to "true" then enable it -immediately. - -relates: https://github.com/389ds/389-ds-base/issues/4443 - -Reviewed by: spichugi & tbordaz(Thanks!!) ---- - .../suites/retrocl/retrocl_indexing_test.py | 68 +++++++++++++++++++ - ldap/servers/plugins/retrocl/retrocl_create.c | 2 +- - .../slapd/back-ldbm/ldbm_index_config.c | 25 +++++-- - src/lib389/lib389/_mapped_object.py | 13 ++++ - 4 files changed, 102 insertions(+), 6 deletions(-) - create mode 100644 dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py - -diff --git a/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py b/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py -new file mode 100644 -index 000000000..b1dfe962c ---- /dev/null -+++ b/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py -@@ -0,0 +1,68 @@ -+import logging -+import pytest -+import os -+from lib389._constants import RETROCL_SUFFIX, DEFAULT_SUFFIX -+from lib389.topologies import topology_st as topo -+from lib389.plugins import RetroChangelogPlugin -+from lib389.idm.user import UserAccounts -+from lib389._mapped_object import DSLdapObjects -+log = logging.getLogger(__name__) -+ -+ -+def test_indexing_is_online(topo): -+ """Test that the changenmumber index is online right after enabling the plugin -+ -+ :id: 16f4c001-9e0c-4448-a2b3-08ac1e85d40f -+ :setup: Standalone Instance -+ :steps: -+ 1. Enable retro cl -+ 2. Perform some updates -+ 3. Search for "(changenumber>=-1)", and it is not partially unindexed -+ 4. Search for "(&(changenumber>=-1)(targetuniqueid=*))", and it is not partially unindexed -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ # Enable plugin -+ topo.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') -+ plugin = RetroChangelogPlugin(topo.standalone) -+ plugin.enable() -+ topo.standalone.restart() -+ -+ # Do a bunch of updates -+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) -+ user_entry = users.create(properties={ -+ 'sn': '1', -+ 'cn': 'user 1', -+ 'uid': 'user1', -+ 'uidNumber': '11', -+ 'gidNumber': '111', -+ 'givenname': 'user1', -+ 'homePhone': '0861234567', -+ 'carLicense': '131D16674', -+ 'mail': 'user1@whereever.com', -+ 'homeDirectory': '/home' -+ }) -+ for count in range(0, 10): -+ user_entry.replace('mail', f'test{count}@test.com') -+ -+ # Search the retro cl, and check for error messages -+ filter_simple = '(changenumber>=-1)' -+ filter_compound = '(&(changenumber>=-1)(targetuniqueid=*))' -+ retro_changelog_suffix = DSLdapObjects(topo.standalone, basedn=RETROCL_SUFFIX) -+ retro_changelog_suffix.filter(filter_simple) -+ assert not topo.standalone.searchAccessLog('Partially Unindexed Filter') -+ -+ # Search the retro cl again with compound filter -+ retro_changelog_suffix.filter(filter_compound) -+ assert not topo.standalone.searchAccessLog('Partially Unindexed Filter') -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main(["-s", CURRENT_FILE]) -diff --git a/ldap/servers/plugins/retrocl/retrocl_create.c b/ldap/servers/plugins/retrocl/retrocl_create.c -index 571e6899f..5bfde7831 100644 ---- a/ldap/servers/plugins/retrocl/retrocl_create.c -+++ b/ldap/servers/plugins/retrocl/retrocl_create.c -@@ -133,7 +133,7 @@ retrocl_create_be(const char *bedir) - val.bv_len = strlen(val.bv_val); - slapi_entry_add_values(e, "cn", vals); - -- val.bv_val = "false"; -+ val.bv_val = "true"; /* enables the index */ - val.bv_len = strlen(val.bv_val); - slapi_entry_add_values(e, "nssystemindex", vals); - -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c -index 9722d0ce7..38e7368e1 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c -@@ -25,7 +25,7 @@ int ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb, Slapi_Entry *en - #define INDEXTYPE_NONE 1 - - static int --ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, char *err_buf) -+ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, PRBool *is_system_index, char *err_buf) - { - Slapi_Attr *attr; - const struct berval *attrValue; -@@ -78,6 +78,15 @@ ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_st - } - } - -+ *is_system_index = PR_FALSE; -+ if (0 == slapi_entry_attr_find(e, "nsSystemIndex", &attr)) { -+ slapi_attr_first_value(attr, &sval); -+ attrValue = slapi_value_get_berval(sval); -+ if (strcasecmp(attrValue->bv_val, "true") == 0) { -+ *is_system_index = PR_TRUE; -+ } -+ } -+ - /* ok the entry is good to process, pass it to attr_index_config */ - if (attr_index_config(inst->inst_be, (char *)trace_string, 0, e, 0, 0, err_buf)) { - slapi_ch_free_string(index_name); -@@ -101,9 +110,10 @@ ldbm_index_init_entry_callback(Slapi_PBlock *pb __attribute__((unused)), - void *arg) - { - ldbm_instance *inst = (ldbm_instance *)arg; -+ PRBool is_system_index = PR_FALSE; - - returntext[0] = '\0'; -- *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, NULL); -+ *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, &is_system_index /* not used */, NULL); - if (*returncode == LDAP_SUCCESS) { - return SLAPI_DSE_CALLBACK_OK; - } else { -@@ -126,17 +136,21 @@ ldbm_instance_index_config_add_callback(Slapi_PBlock *pb __attribute__((unused)) - { - ldbm_instance *inst = (ldbm_instance *)arg; - char *index_name = NULL; -+ PRBool is_system_index = PR_FALSE; - - returntext[0] = '\0'; -- *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, returntext); -+ *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, &is_system_index, returntext); - if (*returncode == LDAP_SUCCESS) { - struct attrinfo *ai = NULL; - /* if the index is a "system" index, we assume it's being added by - * by the server, and it's okay for the index to go online immediately. - * if not, we set the index "offline" so it won't actually be used - * until someone runs db2index on it. -+ * If caller wants to add an index that they want to be online -+ * immediately they can also set "nsSystemIndex" to "true" in the -+ * index config entry (e.g. is_system_index). - */ -- if (!ldbm_attribute_always_indexed(index_name)) { -+ if (!is_system_index && !ldbm_attribute_always_indexed(index_name)) { - ainfo_get(inst->inst_be, index_name, &ai); - PR_ASSERT(ai != NULL); - ai->ai_indexmask |= INDEX_OFFLINE; -@@ -386,13 +400,14 @@ ldbm_instance_index_config_enable_index(ldbm_instance *inst, Slapi_Entry *e) - char *index_name = NULL; - int rc = LDAP_SUCCESS; - struct attrinfo *ai = NULL; -+ PRBool is_system_index = PR_FALSE; - - index_name = slapi_entry_attr_get_charptr(e, "cn"); - if (index_name) { - ainfo_get(inst->inst_be, index_name, &ai); - } - if (!ai) { -- rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, NULL); -+ rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, &is_system_index /* not used */, NULL); - } - if (rc == LDAP_SUCCESS) { - /* Assume the caller knows if it is OK to go online immediately */ -diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py -index ca6ea6ef8..6cdcb0dc7 100644 ---- a/src/lib389/lib389/_mapped_object.py -+++ b/src/lib389/lib389/_mapped_object.py -@@ -147,6 +147,19 @@ class DSLdapObject(DSLogging, DSLint): - - return True - -+ def search(self, scope="subtree", filter='objectclass=*'): -+ search_scope = ldap.SCOPE_SUBTREE -+ if scope == 'base': -+ search_scope = ldap.SCOPE_BASE -+ elif scope == 'one': -+ search_scope = ldap.SCOPE_ONE -+ elif scope == 'subtree': -+ search_scope = ldap.SCOPE_SUBTREE -+ return self._instance.search_ext_s(self._dn, search_scope, filter, -+ serverctrls=self._server_controls, -+ clientctrls=self._client_controls, -+ escapehatch='i am sure') -+ - def display(self, attrlist=['*']): - """Get an entry but represent it as a string LDIF - --- -2.31.1 - diff --git a/SOURCES/0046-Issue-4817-BUG-locked-crypt-accounts-on-import-may-a.patch b/SOURCES/0046-Issue-4817-BUG-locked-crypt-accounts-on-import-may-a.patch deleted file mode 100644 index 68a259d..0000000 --- a/SOURCES/0046-Issue-4817-BUG-locked-crypt-accounts-on-import-may-a.patch +++ /dev/null @@ -1,121 +0,0 @@ -From 1da033b82b428bb5b90c201a59aaab24e0f14ccf Mon Sep 17 00:00:00 2001 -From: Firstyear -Date: Fri, 9 Jul 2021 11:53:35 +1000 -Subject: [PATCH] Issue 4817 - BUG - locked crypt accounts on import may allow - all passwords (#4819) - -Bug Description: Due to mishanding of short dbpwd hashes, the -crypt_r algorithm was misused and was only comparing salts -in some cases, rather than checking the actual content -of the password. - -Fix Description: Stricter checks on dbpwd lengths to ensure -that content passed to crypt_r has at least 2 salt bytes and -1 hash byte, as well as stricter checks on ct_memcmp to ensure -that compared values are the same length, rather than potentially -allowing overruns/short comparisons. - -fixes: https://github.com/389ds/389-ds-base/issues/4817 - -Author: William Brown - -Review by: @mreynolds389 ---- - .../password/pwd_crypt_asterisk_test.py | 50 +++++++++++++++++++ - ldap/servers/plugins/pwdstorage/crypt_pwd.c | 20 +++++--- - 2 files changed, 64 insertions(+), 6 deletions(-) - create mode 100644 dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py - -diff --git a/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py b/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py -new file mode 100644 -index 000000000..d76614db1 ---- /dev/null -+++ b/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py -@@ -0,0 +1,50 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2021 William Brown -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+import ldap -+import pytest -+from lib389.topologies import topology_st -+from lib389.idm.user import UserAccounts -+from lib389._constants import (DEFAULT_SUFFIX, PASSWORD) -+ -+pytestmark = pytest.mark.tier1 -+ -+def test_password_crypt_asterisk_is_rejected(topology_st): -+ """It was reported that {CRYPT}* was allowing all passwords to be -+ valid in the bind process. This checks that we should be rejecting -+ these as they should represent locked accounts. Similar, {CRYPT}! -+ -+ :id: 0b8f1a6a-f3eb-4443-985e-da14d0939dc3 -+ :setup: Single instance -+ :steps: 1. Set a password hash in with CRYPT and the content * -+ 2. Test a bind -+ 3. Set a password hash in with CRYPT and the content ! -+ 4. Test a bind -+ :expectedresults: -+ 1. Successfully set the values -+ 2. The bind fails -+ 3. Successfully set the values -+ 4. The bind fails -+ """ -+ topology_st.standalone.config.set('nsslapd-allow-hashed-passwords', 'on') -+ topology_st.standalone.config.set('nsslapd-enable-upgrade-hash', 'off') -+ -+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) -+ user = users.create_test_user() -+ -+ user.set('userPassword', "{CRYPT}*") -+ -+ # Attempt to bind with incorrect password. -+ with pytest.raises(ldap.INVALID_CREDENTIALS): -+ badconn = user.bind('badpassword') -+ -+ user.set('userPassword', "{CRYPT}!") -+ # Attempt to bind with incorrect password. -+ with pytest.raises(ldap.INVALID_CREDENTIALS): -+ badconn = user.bind('badpassword') -+ -diff --git a/ldap/servers/plugins/pwdstorage/crypt_pwd.c b/ldap/servers/plugins/pwdstorage/crypt_pwd.c -index 9031b2199..1b37d41ed 100644 ---- a/ldap/servers/plugins/pwdstorage/crypt_pwd.c -+++ b/ldap/servers/plugins/pwdstorage/crypt_pwd.c -@@ -48,15 +48,23 @@ static unsigned char itoa64[] = /* 0 ... 63 => ascii - 64 */ - int - crypt_pw_cmp(const char *userpwd, const char *dbpwd) - { -- int rc; -- char *cp; -+ int rc = -1; -+ char *cp = NULL; -+ size_t dbpwd_len = strlen(dbpwd); - struct crypt_data data; - data.initialized = 0; - -- /* we use salt (first 2 chars) of encoded password in call to crypt_r() */ -- cp = crypt_r(userpwd, dbpwd, &data); -- if (cp) { -- rc = slapi_ct_memcmp(dbpwd, cp, strlen(dbpwd)); -+ /* -+ * there MUST be at least 2 chars of salt and some pw bytes, else this is INVALID and will -+ * allow any password to bind as we then only compare SALTS. -+ */ -+ if (dbpwd_len >= 3) { -+ /* we use salt (first 2 chars) of encoded password in call to crypt_r() */ -+ cp = crypt_r(userpwd, dbpwd, &data); -+ } -+ /* If these are not the same length, we can not proceed safely with memcmp. */ -+ if (cp && dbpwd_len == strlen(cp)) { -+ rc = slapi_ct_memcmp(dbpwd, cp, dbpwd_len); - } else { - rc = -1; - } --- -2.31.1 - diff --git a/SOURCES/0047-Issue-4837-persistent-search-returns-entries-even-wh.patch b/SOURCES/0047-Issue-4837-persistent-search-returns-entries-even-wh.patch deleted file mode 100644 index 408f0bb..0000000 --- a/SOURCES/0047-Issue-4837-persistent-search-returns-entries-even-wh.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 4919320a395ee13db67a4cc5f7c0b76e781b3b73 Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Wed, 21 Jul 2021 09:16:30 +0200 -Subject: [PATCH] Issue 4837 - persistent search returns entries even when an - error is returned by content-sync-plugin (#4838) - -Bug description: - When a ldap client sends a sync request control, the server response may contain a sync state control. - If the server fails to create the control the search should fail. - -Fix description: - In case the server fails to create the response control - logs the failure of the pre_search - -relates: https://github.com/389ds/389-ds-base/issues/4837 - -Reviewed by: Simon Pichugin - -Platforms tested: RH8.4 ---- - ldap/servers/plugins/sync/sync_refresh.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/ldap/servers/plugins/sync/sync_refresh.c b/ldap/servers/plugins/sync/sync_refresh.c -index 646ff760b..4cbb6a949 100644 ---- a/ldap/servers/plugins/sync/sync_refresh.c -+++ b/ldap/servers/plugins/sync/sync_refresh.c -@@ -213,7 +213,7 @@ sync_srch_refresh_pre_entry(Slapi_PBlock *pb) - Slapi_Entry *e; - slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_ENTRY, &e); - LDAPControl **ctrl = (LDAPControl **)slapi_ch_calloc(2, sizeof(LDAPControl *)); -- sync_create_state_control(e, &ctrl[0], LDAP_SYNC_ADD, NULL); -+ rc = sync_create_state_control(e, &ctrl[0], LDAP_SYNC_ADD, NULL); - slapi_pblock_set(pb, SLAPI_SEARCH_CTRLS, ctrl); - } - return (rc); --- -2.31.1 - diff --git a/SOURCES/Cargo.lock b/SOURCES/Cargo.lock new file mode 100644 index 0000000..1127ca0 --- /dev/null +++ b/SOURCES/Cargo.lock @@ -0,0 +1,565 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "ansi_term" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" +dependencies = [ + "winapi", +] + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + +[[package]] +name = "autocfg" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" + +[[package]] +name = "base64" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" + +[[package]] +name = "bitflags" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" + +[[package]] +name = "byteorder" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" + +[[package]] +name = "cbindgen" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd" +dependencies = [ + "clap", + "log", + "proc-macro2", + "quote", + "serde", + "serde_json", + "syn", + "tempfile", + "toml", +] + +[[package]] +name = "cc" +version = "1.0.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787" +dependencies = [ + "jobserver", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "clap" +version = "2.33.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" +dependencies = [ + "ansi_term", + "atty", + "bitflags", + "strsim", + "textwrap", + "unicode-width", + "vec_map", +] + +[[package]] +name = "entryuuid" +version = "0.1.0" +dependencies = [ + "cc", + "libc", + "paste", + "slapi_r_plugin", + "uuid", +] + +[[package]] +name = "entryuuid_syntax" +version = "0.1.0" +dependencies = [ + "cc", + "libc", + "paste", + "slapi_r_plugin", + "uuid", +] + +[[package]] +name = "fernet" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f" +dependencies = [ + "base64", + "byteorder", + "getrandom", + "openssl", + "zeroize", +] + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "getrandom" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "hermit-abi" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" +dependencies = [ + "libc", +] + +[[package]] +name = "itoa" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" + +[[package]] +name = "jobserver" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd" +dependencies = [ + "libc", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "789da6d93f1b866ffe175afc5322a4d76c038605a1c3319bb57b06967ca98a36" + +[[package]] +name = "librnsslapd" +version = "0.1.0" +dependencies = [ + "cbindgen", + "libc", + "slapd", +] + +[[package]] +name = "librslapd" +version = "0.1.0" +dependencies = [ + "cbindgen", + "libc", + "slapd", +] + +[[package]] +name = "log" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "once_cell" +version = "1.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" + +[[package]] +name = "openssl" +version = "0.10.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8" +dependencies = [ + "bitflags", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-sys", +] + +[[package]] +name = "openssl-sys" +version = "0.9.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98" +dependencies = [ + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "paste" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" +dependencies = [ + "paste-impl", + "proc-macro-hack", +] + +[[package]] +name = "paste-impl" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" +dependencies = [ + "proc-macro-hack", +] + +[[package]] +name = "pkg-config" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" + +[[package]] +name = "ppv-lite86" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" + +[[package]] +name = "proc-macro-hack" +version = "0.5.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" + +[[package]] +name = "proc-macro2" +version = "1.0.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", + "rand_hc", +] + +[[package]] +name = "rand_chacha" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_hc" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +dependencies = [ + "rand_core", +] + +[[package]] +name = "redox_syscall" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc" +dependencies = [ + "bitflags", +] + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + +[[package]] +name = "rsds" +version = "0.1.0" + +[[package]] +name = "ryu" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" + +[[package]] +name = "serde" +version = "1.0.126" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.126" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "slapd" +version = "0.1.0" +dependencies = [ + "fernet", +] + +[[package]] +name = "slapi_r_plugin" +version = "0.1.0" +dependencies = [ + "lazy_static", + "libc", + "paste", + "uuid", +] + +[[package]] +name = "strsim" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" + +[[package]] +name = "syn" +version = "1.0.72" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "synstructure" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "unicode-xid", +] + +[[package]] +name = "tempfile" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +dependencies = [ + "cfg-if", + "libc", + "rand", + "redox_syscall", + "remove_dir_all", + "winapi", +] + +[[package]] +name = "textwrap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "toml" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +dependencies = [ + "serde", +] + +[[package]] +name = "unicode-width" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" + +[[package]] +name = "unicode-xid" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" + +[[package]] +name = "uuid" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" +dependencies = [ + "getrandom", +] + +[[package]] +name = "vcpkg" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "025ce40a007e1907e58d5bc1a594def78e5573bb0b1160bc389634e8f12e4faa" + +[[package]] +name = "vec_map" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" + +[[package]] +name = "wasi" +version = "0.10.2+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "zeroize" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] diff --git a/SPECS/389-ds-base.spec b/SPECS/389-ds-base.spec index a98b9a9..bd2daeb 100644 --- a/SPECS/389-ds-base.spec +++ b/SPECS/389-ds-base.spec @@ -16,7 +16,7 @@ ExcludeArch: i686 %global use_Socket6 0 %global use_asan 0 -%global use_rust 0 +%global use_rust 1 %global use_legacy 1 %global bundle_jemalloc 1 %if %{use_asan} @@ -42,10 +42,13 @@ ExcludeArch: i686 # set PIE flag %global _hardened_build 1 +# Filter argparse-manpage from autogenerated package Requires +%global __requires_exclude ^python.*argparse-manpage + Summary: 389 Directory Server (base) Name: 389-ds-base -Version: 1.4.3.16 -Release: %{?relprefix}19%{?prerel}%{?dist} +Version: 1.4.3.23 +Release: %{?relprefix}10%{?prerel}%{?dist} License: GPLv3+ URL: https://www.port389.org Group: System Environment/Daemons @@ -54,6 +57,62 @@ Conflicts: freeipa-server < 4.0.3 Obsoletes: %{name} <= 1.4.0.9 Provides: ldif2ldbm >= 0 +##### Bundled cargo crates list - START ##### +Provides: bundled(crate(ansi_term)) = 0.11.0 +Provides: bundled(crate(atty)) = 0.2.14 +Provides: bundled(crate(autocfg)) = 1.0.1 +Provides: bundled(crate(base64)) = 0.10.1 +Provides: bundled(crate(bitflags)) = 1.2.1 +Provides: bundled(crate(byteorder)) = 1.4.2 +Provides: bundled(crate(cbindgen)) = 0.9.1 +Provides: bundled(crate(cc)) = 1.0.66 +Provides: bundled(crate(cfg-if)) = 0.1.10 +Provides: bundled(crate(cfg-if)) = 1.0.0 +Provides: bundled(crate(clap)) = 2.33.3 +Provides: bundled(crate(fernet)) = 0.1.3 +Provides: bundled(crate(foreign-types)) = 0.3.2 +Provides: bundled(crate(foreign-types-shared)) = 0.1.1 +Provides: bundled(crate(getrandom)) = 0.1.16 +Provides: bundled(crate(hermit-abi)) = 0.1.17 +Provides: bundled(crate(itoa)) = 0.4.7 +Provides: bundled(crate(lazy_static)) = 1.4.0 +Provides: bundled(crate(libc)) = 0.2.82 +Provides: bundled(crate(librnsslapd)) = 0.1.0 +Provides: bundled(crate(librslapd)) = 0.1.0 +Provides: bundled(crate(log)) = 0.4.11 +Provides: bundled(crate(openssl)) = 0.10.32 +Provides: bundled(crate(openssl-sys)) = 0.9.60 +Provides: bundled(crate(pkg-config)) = 0.3.19 +Provides: bundled(crate(ppv-lite86)) = 0.2.10 +Provides: bundled(crate(proc-macro2)) = 1.0.24 +Provides: bundled(crate(quote)) = 1.0.8 +Provides: bundled(crate(rand)) = 0.7.3 +Provides: bundled(crate(rand_chacha)) = 0.2.2 +Provides: bundled(crate(rand_core)) = 0.5.1 +Provides: bundled(crate(rand_hc)) = 0.2.0 +Provides: bundled(crate(redox_syscall)) = 0.1.57 +Provides: bundled(crate(remove_dir_all)) = 0.5.3 +Provides: bundled(crate(rsds)) = 0.1.0 +Provides: bundled(crate(ryu)) = 1.0.5 +Provides: bundled(crate(serde)) = 1.0.118 +Provides: bundled(crate(serde_derive)) = 1.0.118 +Provides: bundled(crate(serde_json)) = 1.0.61 +Provides: bundled(crate(slapd)) = 0.1.0 +Provides: bundled(crate(strsim)) = 0.8.0 +Provides: bundled(crate(syn)) = 1.0.58 +Provides: bundled(crate(tempfile)) = 3.1.0 +Provides: bundled(crate(textwrap)) = 0.11.0 +Provides: bundled(crate(toml)) = 0.5.8 +Provides: bundled(crate(unicode-width)) = 0.1.8 +Provides: bundled(crate(unicode-xid)) = 0.2.1 +Provides: bundled(crate(vcpkg)) = 0.2.11 +Provides: bundled(crate(vec_map)) = 0.8.2 +Provides: bundled(crate(wasi)) = 0.9.0+wasi_snapshot_preview1 +Provides: bundled(crate(winapi)) = 0.3.9 +Provides: bundled(crate(winapi-i686-pc-windows-gnu)) = 0.4.0 +Provides: bundled(crate(winapi-x86_64-pc-windows-gnu)) = 0.4.0 +##### Bundled cargo crates list - END ##### + BuildRequires: nspr-devel BuildRequires: nss-devel >= 3.34 BuildRequires: perl-generators @@ -174,53 +233,40 @@ Source2: %{name}-devel.README %if %{bundle_jemalloc} Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download/%{jemalloc_ver}/%{jemalloc_name}-%{jemalloc_ver}.tar.bz2 %endif -Patch01: 0001-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch -Patch02: 0002-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch -Patch03: 0003-do-not-add-referrals-for-masters-with-different-data.patch -Patch04: 0004-Ticket-50933-Update-2307compat.ldif.patch -Patch05: 0005-Issue-50933-Fix-OID-change-between-10rfc2307-and-10r.patch -Patch06: 0006-Ticket-51131-improve-mutex-alloc-in-conntable.patch -Patch07: 0007-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch -Patch08: 0008-Issue-3657-Add-options-to-dsctl-for-dsrc-file.patch -Patch09: 0009-Issue-4440-BUG-ldifgen-with-start-idx-option-fails-w.patch -Patch10: 0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch -Patch11: 0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch -Patch12: 0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch -Patch13: 0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch -Patch14: 0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch -Patch15: 0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch -Patch16: 0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch -Patch17: 0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch -Patch18: 0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch -Patch19: 0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch -Patch20: 0020-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch -Patch21: 0021-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch -Patch22: 0022-Fix-cherry-pick-erorr.patch -Patch23: 0023-Issue-4419-Warn-users-of-skipped-entries-during-ldif.patch -Patch24: 0024-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch -Patch25: 0025-Issue-4414-disk-monitoring-prevent-division-by-zero-.patch -Patch26: 0026-Issue-4504-Insure-ldapi-is-enabled-in-repl_monitor_t.patch -Patch27: 0027-Issue-4315-performance-search-rate-nagle-triggers-hi.patch -Patch28: 0028-Issue-4504-insure-that-repl_monitor_test-use-ldapi-f.patch -Patch29: 0029-Issue-4528-Fix-cn-monitor-SCOPE_ONE-search-4529.patch -Patch30: 0030-Issue-4384-Use-MONOTONIC-clock-for-all-timing-events.patch -Patch31: 0031-Issue-4384-Separate-eventq-into-REALTIME-and-MONOTON.patch -Patch32: 0032-Backport-tests-from-master-branch-fix-failing-tests-.patch -Patch33: 0033-Issue-5442-Search-results-are-different-between-RHDS.patch -Patch34: 0034-Issue-4526-sync_repl-when-completing-an-operation-in.patch -Patch35: 0035-Issue-4581-A-failed-re-indexing-leaves-the-database-.patch -Patch36: 0036-Issue-4513-CI-Tests-fix-test-failures.patch -Patch37: 0037-Issue-4609-CVE-info-disclosure-when-authenticating.patch -Patch38: 0038-Issue-4649-crash-in-sync_repl-when-a-MODRDN-create-a.patch -Patch39: 0039-Issue-4711-SIGSEV-with-sync_repl-4738.patch -Patch40: 0040-Issue-4764-replicated-operation-sometime-checks-ACI-.patch -Patch41: 0041-Issue-4797-ACL-IP-ADDRESS-evaluation-may-corrupt-c_i.patch -Patch42: 0042-Issue-4492-Changelog-cache-can-upload-updates-from-a.patch -Patch43: 0043-Issue-4644-Large-updates-can-reset-the-CLcache-to-th.patch -Patch44: 0044-Issue-4563-Failure-on-s390x-Fails-to-split-RDN-o-pki.patch -Patch45: 0045-Issue-4443-Internal-unindexed-searches-in-syncrepl-r.patch -Patch46: 0046-Issue-4817-BUG-locked-crypt-accounts-on-import-may-a.patch -Patch47: 0047-Issue-4837-persistent-search-returns-entries-even-wh.patch +%if %{use_rust} +Source4: vendor-%{version}-2.tar.gz +Source5: Cargo.lock +%endif +Patch01: 0001-Issue-4747-Remove-unstable-unstatus-tests-from-PRCI-.patch +Patch02: 0002-Issue-4701-RFE-Exclude-attributes-from-retro-changel.patch +Patch03: 0003-Ticket-137-Implement-EntryUUID-plugin.patch +Patch04: 0004-Ticket-4326-entryuuid-fixup-did-not-work-correctly-4.patch +Patch05: 0005-Issue-4498-BUG-entryuuid-replication-may-not-work-45.patch +Patch06: 0006-Issue-4421-Unable-to-build-with-Rust-enabled-in-clos.patch +Patch07: 0007-Ticket-51175-resolve-plugin-name-leaking.patch +Patch08: 0008-Issue-4773-Enable-interval-feature-of-DNA-plugin.patch +Patch09: 0009-Issue-4623-RFE-Monitor-the-current-DB-locks-4762.patch +Patch10: 0010-Issue-4764-replicated-operation-sometime-checks-ACI-.patch +Patch11: 0011-Issue-4778-RFE-Allow-setting-TOD-for-db-compaction-a.patch +Patch12: 0012-Issue-4778-RFE-Add-changelog-compaction-task-in-1.4..patch +Patch13: 0013-Issue-4797-ACL-IP-ADDRESS-evaluation-may-corrupt-c_i.patch +Patch14: 0014-Issue-4396-Minor-memory-leak-in-backend-4558-4572.patch +Patch15: 0015-Issue-4700-Regression-in-winsync-replication-agreeme.patch +Patch16: 0016-Issue-4725-Fix-compiler-warnings.patch +Patch17: 0017-Issue-4814-_cl5_get_tod_expiration-may-crash-at-star.patch +Patch18: 0018-Issue-4789-Temporary-password-rules-are-not-enforce-.patch +Patch19: 0019-Issue-4788-CLI-should-support-Temporary-Password-Rul.patch +Patch20: 0020-Issue-4447-Crash-when-the-Referential-Integrity-log-.patch +Patch21: 0021-Issue-4791-Missing-dependency-for-RetroCL-RFE.patch +Patch22: 0022-Issue-4656-remove-problematic-language-from-ds-replc.patch +Patch23: 0023-Issue-4443-Internal-unindexed-searches-in-syncrepl-r.patch +Patch24: 0024-Issue-4817-BUG-locked-crypt-accounts-on-import-may-a.patch +Patch25: 0025-Issue-4837-persistent-search-returns-entries-even-wh.patch +Patch26: 0026-Hardcode-gost-crypt-passsword-storage-scheme.patch +Patch27: 0027-Issue-4734-import-of-entry-with-no-parent-warning-47.patch +Patch28: 0028-Issue-4872-BUG-entryuuid-enabled-by-default-causes-r.patch +Patch29: 0029-Remove-GOST-YESCRYPT-password-sotrage-scheme.patch +Patch30: 0030-Issue-4884-server-crashes-when-dnaInterval-attribute.patch %description @@ -348,6 +394,10 @@ A cockpit UI Plugin for configuring and administering the 389 Directory Server %prep %autosetup -p1 -v -n %{name}-%{version}%{?prerel} +%if %{use_rust} +tar xvzf %{SOURCE4} +cp %{SOURCE5} src/ +%endif %if %{bundle_jemalloc} %setup -q -n %{name}-%{version}%{?prerel} -T -D -b 3 %endif @@ -365,7 +415,7 @@ ASAN_FLAGS="--enable-asan --enable-debug" %endif %if %{use_rust} -RUST_FLAGS="--enable-rust" +RUST_FLAGS="--enable-rust --enable-rust-offline" %endif %if %{use_legacy} @@ -699,9 +749,6 @@ exit 0 %if %{bundle_jemalloc} %{_libdir}/%{pkgname}/lib/libjemalloc.so.2 %endif -%if %{use_rust} -%{_libdir}/%{pkgname}/librsds.so -%endif %if %{use_legacy} %files legacy-tools @@ -839,184 +886,63 @@ exit 0 %doc README.md %changelog -* Wed Jul 21 2021 Thierry Bordaz - 1.4.3.16-19 -- Bump version to 1.4.3.16-19 -- Resolve: Bug 1984091 - persistent search returns entries even when an error is returned by content-sync-plugin - -* Mon Jul 19 2021 Thierry Bordaz - 1.4.3.16-18 -- Bump version to 1.4.3.16-18 -- Resolve: Bug 1983121 - CRYPT password hash with asterisk allows any bind attempt to succeed - -* Fri Jul 16 2021 Thierry Bordaz - 1.4.3.16-17 -- Bump version to 1.4.3.16-17 -- Resolve: Bug 1983095 - Internal unindexed searches in syncrepl -- Resolve: Bug 1980063 - IPA installation fails on s390x with 389-ds-base-1.4.3.8-4.module+el8.3.0+7193+dfd1e8ad.s390x - -* Wed Jun 16 2021 Thierry Bordaz - 1.4.3.16-16 -- Bump version to 1.4.3.16-16 -- Resolves: Bug 1972738 - Changelog cache can upload updates from a wrong starting point (CSN) -- Resolves: Bug 1972721 - Large updates can reset the CLcache to the beginning of the changelog - -* Fri Jun 11 2021 Thierry Bordaz - 1.4.3.16-15 -- Bump version to 1.4.3.16-15 -- Resolves: Bug 1970791 - A connection can be erroneously flagged as replication conn during evaluation of an aci with ip bind rule - -* Tue Jun 08 2021 Thierry Bordaz - 1.4.3.16-14 -- Bump version to 1.4.3.16-14 -- Resolves: Bug 1968588 - ACIs are being evaluated against the Replication Manager account in a replication context -- Resolves: Bug 1960720 - sync_repl NULL pointer dereference in sync_create_state_control() - -* Thu Mar 11 2021 Mark Reynolds - 1.4.3.16-13 -- Bump version to 1.4.3.16-13 -- Resolves: Bug 1930188 - crash in sync_repl when a MODRDN create a cenotaph - -* Mon Mar 1 2021 Mark Reynolds - 1.4.3.16-12 -- Bump version to 1.4.3.16-12 -- Resolves: Bug 1929067 - PKI instance creation failed with new 389-ds-base build - -* Mon Feb 15 2021 Mark Reynolds - 1.4.3.16-11 -- Bump version to 1.4.3.16-11 -- Resolves: Bug 1924130 - RHDS11: “write” permission of ACI changes ns-slapd’s behavior on search operation(remove patch as it breaks DogTag, will add this patch back after DogTag is fixed) - -* Wed Feb 10 2021 Mark Reynolds - 1.4.3.16-10 -- Bump version to 1.4.3.16-10 -- Resolves: Bug 1924130 - RHDS11: “write” permission of ACI changes ns-slapd’s behavior on search operation(part 2) - -* Tue Feb 2 2021 Mark Reynolds - 1.4.3.16-9 -- Bump version to 1.4.3.16-9 -- Resolves: Bug 1924130 - RHDS11: “write” permission of ACI changes ns-slapd’s behavior on search operation -- Resolves: Bug 1916677 - A failed re-indexing leaves the database in broken state. -- Resolves: Bug 1912822 - sync_repl: when completing an operation in the pending list, it can select the wrong operation - -* Wed Jan 13 2021 Mark Reynolds - 1.4.3.16-8 -- Bump version to 1.4.3.16-8 -- Resolves: Bug 1903539 - cn=monitor is throwing err=32 with scope: -s one -- Resolves: Bug 1893870 - PR_WaitCondVar() issue causes replication delay when clock jumps backwards - -* Thu Jan 7 2021 Mark Reynolds - 1.4.3.16-7 -- Bump version to 1.4.3.16-7 -- Resolves: Bug 1890118 - SIGFPE crash in rhds disk monitoring routine -- Resolves: Bug 1904991 - 389-ds:1.4/389-ds-base: information disclosure during the binding of a DN -- Resolves: Bug 1627645 - ldif2db does not change exit code when there are skipped entries - -* Wed Dec 16 2020 Mark Reynolds - 1.4.3.16-6 -- Bump version to 1.4.3.16-6 -- Resolves: Bug 1879386 - cli dsconf replication monitor fails to retrieve database RUV - consumer (Unavailable) State (green) Reason (error (0) -- Resolves: Bug 1904991 - Unexpected info returned to ldap request -- Resolves: Bug 1843838 - heap-use-after-free in slapi_be_getsuffix -- Resolves: Bug 1903133 - Server-Cert.crt created using dscreate has Subject:CN =localhost instead of hostname. - -* Wed Dec 9 2020 Mark Reynolds - 1.4.3.16-5 -- Bump version to 1.4.3.16-5 -- Resolves: Bug 1879386 - cli dsconf replication monitor fails to retrieve database RUV -- Resolves: Bug 1887449 - Sync repl: missing update because operation are erroneously stated as nested -- Resolves: Bug 1887415 - Sync repl - if a series of updates target the same entry then the cookie get wrong changenumber -- Resolves: Bug 1851978 - SyncRepl plugin provides a wrong cookie - -* Thu Dec 3 2020 Mark Reynolds - 1.4.3.16-4 -- Bump version to 1.4.3.16-4 -- Resolves: Bug 1843517 - Using ldifgen with --start-idx option fails with unsupported operand -- Resolves: Bug 1801086 - [RFE] Generate dsrc file using dsconf -- Resolves: Bug 1843838 - heap-use-after-free in slapi_be_getsuffix - -* Wed Nov 25 2020 Mark Reynolds - 1.4.3.16-3 -- Bump version to 1.4.3.16-3 -- Resolves: Bug 1859219 - rfc2307 and rfc2307bis compat schema -- Resolves: Bug 1843604 - reduce the cost of allocation/free when open/close a connection -- Resolves: Bug 1898850 - Entries conflict not resolved by replication - -* Thu Nov 19 2020 Mark Reynolds - 1.4.3.16-2 -- Bump version to 1.4.3.16-2 -- Resolves: Bug 1859227 - create keep alive entry after on line init -- Resolves: Bug 1888863 - group rdn with leading space char and add fails error 21 invalid syntax and delete fails error 32 -- Resolves: Bug 1859228 - do not add referrals for masters with different data generation - -* Mon Oct 26 2020 Mark Reynolds - 1.4.3.16-1 -- Bump version to 1.4.3.16-1 -- Resolves: Bug 1887415 - Sync repl - if a serie of updates target the same entry then the cookie get wrong changenumber -- Resolves: Bug 1859225 - suffix management in backends incorrect - -* Mon Oct 26 2020 Mark Reynolds - 1.4.3.14-1 -- Bump version to 1.4.3.14-1 -- Resolves: Bug 1862529 - Rebase 389-ds-base-1.4.3 in RHEL 8.4 -- Resolves: Bug 1859301 - Misleading message in access log for idle timeout -- Resolves: Bug 1889782 - Missing closing quote when reporting the details of unindexed/paged search results -- Resolves: Bug 1862971 - dsidm user status fails with Error: 'nsUserAccount' object has no attribute 'is_locked' -- Resolves: Bug 1859878 - Managed Entries configuration not being enforced -- Resolves: Bug 1851973 - Duplicate entryUSN numbers for different LDAP entries in the same backend -- Resolves: Bug 1851967 - if dbhome directory is set online backup fails -- Resolves: Bug 1887449 - Sync repl: missing update because operation are erroneously stated as nested -- Resolves: Bug 1887415 - Sync repl - if a serie of updates target the same entry then the cookie get wrong changenumber -- Resolves: Bug 1851978 - SyncRepl plugin provides a wrong cookie -- Resolves: Bug 1843604 - reduce the cost of allocation/free when open/close a connection -- Resolves: Bug 1872930 - dscreate: Not possible to bind to a unix domain socket -- Resolves: Bug 1861504 - ds-replcheck crashes in offline mode -- Resolves: Bug 1859282 - remove ldbm_back_entry_release -- Resolves: Bug 1859225 - suffix management in backends incorrect -- Resolves: Bug 1859224 - remove unused or unnecessary database plugin functions -- Resolves: Bug 1859219 - rfc2307 and rfc2307bis compat schema -- Resolves: Bug 1851975 - Add option to reject internal unindexed searches -- Resolves: Bug 1851972 - Remove code duplication from the BDB backend separation work -- Resolves: Bug 1850275 - Add new access log keywords for time spent in work queue and actual operation time -- Resolves: Bug 1848359 - Add failover credentials to replication agreement -- Resolves: Bug 1837315 - Healthcheck code DSBLE0002 not returned on disabled suffix - -* Wed Aug 5 2020 Mark Reynolds - 1.4.3.8-5 -- Bump version to 1.4.3.8-5 -- Resolves: Bug 1841086 - SSL alert: The value of sslVersionMax "TLS1.3" is higher than the supported version -- Resolves: Bug 1800529 - Memory leaks in disk monitoring -- Resolves: Bug 1748227 - Instance name length is not enforced -- Resolves: Bug 1849418 - python3-lib389 pulls unnecessary bash-completion package - -* Fri Jun 26 2020 Mark Reynolds - 1.4.3.8-4 -- Bump version to 1.4.3.8-4 -- Resolves: Bug 1806978 - ns-slapd crashes during db2ldif -- Resolves: Bug 1450863 - Log warning when tuning of nsslapd-threadnumber above or below the optimal value -- Resolves: Bug 1647017 - A distinguished value of a single valued attribute can be missing in an entry -- Resolves: Bug 1806573 - Dsctl healthcheck doesn't work when using instance name with 'slapd-' -- Resolves: Bug 1807773 - dsctl healthcheck : typo in DSREPLLE0002 Lint error suggested resolution commands -- Resolves: Bug 1843567 - Healthcheck to find notes=F -- Resolves: Bug 1845094 - User/Directory Manager can modify Password Policy attribute "pwdReset" -- Resolves: Bug 1850275 - Add new access log keywords for time spent in work queue and actual operation time -- Resolves: Bug 1442386 - Recreating an index while changing case will create an indexfile with the old name (different case) and after restart the indexfile is abandoned -- Resolves: Bug 1672574 - nsIndexIDListScanLimit accepts any value -- Resolves: Bug 1800529 - Memory leaks in disk monitoring - -* Fri Jun 5 2020 Mark Reynolds - 1.4.3.8-3 -- Bump version to 1.4.3.8-3 -- Resolves: Bug 1835619 - Healthcheck with --json option reports "Object of type 'bytes' is not JSON serializable" when mapping tree is deleted -- Resolves: Bug 1836428 - Directory Server ds-replcheck RFE to add a timeout command-line arg/value to wait longer when connecting to a replica server -- Resolves: Bug 1843090 - abort when a empty valueset is freed -- Resolves: Bug 1843156 - Prevent unnecessarily duplication of the target entry -- Resolves: Bug 1843157 - Check for clock errors and time skew -- Resolves: Bug 1843159 - RFE AD filter rewriter for ObjectCategory -- Resolves: Bug 1843162 - Creating Replication Manager fails if uid=repman is used -- Resolves: Bug 1816851 - Add option to healthcheck to list all the lint reports -- Resolves: Bug 1748227 - Instance name length is not enforced -- Resolves: Bug 1748244 - dscreate doesn't sanitize instance name - -* Mon May 11 2020 Mark Reynolds - 1.4.3.8-2 -- Bump version to 1.4.3.8-2 -- Resolves: Bug 1833350 - Remove cockpit dependancies that are breaking builds - -* Mon May 11 2020 Mark Reynolds - 1.4.3.8-1 -- Bump version to 1.4.3.8-1 -- Resolves: Bug 1833350 - Rebase 389-ds-base for RHEL 8.3 -- Resolves: Bug 1728943 - [RFE] Advance options in RHDS Disk Monitoring Framework -- Resolves: Bug 1775285 - [RFE] Implement the Password Policy attribute "pwdReset" -- Resolves: Bug 1638875 - [RFE] extract key/certs pem file into a private namespace -- Resolves: Bug 1758478 - AddressSanitizer: heap-buffer-overflow in ldap_utf8prev -- Resolves: Bug 1795943 - Port dbmon.sh from legacy tools package -- Resolves: Bug 1798394 - Port dbgen from legacy tools package -- Resolves: Bug 1800529 - Memory leaks in disk monitoring -- Resolves: Bug 1807419 - Unable to create a suffix with countryName either via dscreate or the admin console -- Resolves: Bug 1816848 - Database links: get_monitor() takes 1 positional argument but 2 were given -- Resolves: Bug 1816854 - Setting nsslapd-allowed-sasl-mechanisms truncates the value -- Resolves: Bug 1816857 - Searches on cn=config takes values with spaces and makes multiple attributes out of them -- Resolves: Bug 1816859 - lib389 - Replace exec() with setattr() -- Resolves: Bug 1816862 - Memory leak in indirect COS -- Resolves: Bug 1829071 - Installation of RHDS 11 fails on RHEL8 server with IPv6 disabled -- Resolves: Bug 1833515 - set 'nsslapd-enable-upgrade-hash: off' as this raises warnings in IPA -- Resolves: Bug 1790986 - cenotaph errors on modrdn operations -- Resolves: Bug 1769734 - Heavy StartTLS connection load can randomly fail with err=1 -- Resolves: Bug 1758501 - LeakSanitizer: detected memory leaks in changelog5_init and perfctrs_init +* Thu Aug 26 2021 Mark Reynolds - 1.4.3.23-10 +- Bump version to 1.4.3.23-10 +- Resolves: Bug 1997138 - LDAP server crashes when dnaInterval attribute is set to 0 + +* Wed Aug 25 2021 Mark Reynolds - 1.4.3.23-9 +- Bump version to 1.4.3.23-9 +- Resolves: Bug 1947044 - remove unsupported GOST password storage scheme + +* Thu Aug 19 2021 Mark Reynolds - 1.4.3.23-8 +- Bump version to 1.4.3.23-8 +- Resolves: Bug 1947044 - add missing patch for import result code +- Resolves: Bug 1944494 - support for RFC 4530 entryUUID attribute + +* Mon Jul 26 2021 Mark Reynolds - 1.4.3.23-7 +- Bump version to 1.4.3.23-7 +- Resolves: Bug 1983921 - persistent search returns entries even when an error is returned by content-sync-plugin + +* Fri Jul 16 2021 Mark Reynolds - 1.4.3.23-6 +- Bump version to 1.4.3.23-6 +- Resolves: Bug 1982787 - CRYPT password hash with asterisk allows any bind attempt to succeed + +* Thu Jul 15 2021 Mark Reynolds - 1.4.3.23-5 +- Bump version to 1.4.3.23-5 +- Resolves: Bug 1951020 - Internal unindexed searches in syncrepl +- Resolves: Bug 1978279 - ds-replcheck state output message has 'Master' instead of 'Supplier' + +* Tue Jun 29 2021 Mark Reynolds - 1.4.3.23-4 +- Bump version to 1.4.3.23-4 +- Resolves: Bug 1976906 - Instance crash at restart after changelog configuration +- Resolves: Bug 1480323 - ns-slapd crash at startup - Segmentation fault in strcmpi_fast() when the Referential Integrity log is manually edited +- Resolves: Bug 1967596 - Temporary password - add CLI and fix compiler errors + +* Thu Jun 17 2021 Mark Reynolds - 1.4.3.23-3 +- Bump version to 1.4.3.23-3 +- Resolves: Bug 1944494 - support for RFC 4530 entryUUID attribute +- Resolves: Bug 1967839 - ACIs are being evaluated against the Replication Manager account in a replication context +- Resolves: Bug 1970259 - A connection can be erroneously flagged as replication conn during evaluation of an aci with ip bind rule +- Resolves: Bug 1972590 - Large updates can reset the CLcache to the beginning of the changelog +- Resolves: Bug 1903221 - Memory leak in 389ds backend (Minor) + +* Sun May 30 2021 Mark Reynolds - 1.4.3.23-2 +- Bump version to 1.4.3.23-2 +- Resolves: Bug 1812286 - RFE - Monitor the current DB locks ( nsslapd-db-current-locks ) +- Resolves: Bug 1748441 - RFE - Schedule execution of "compactdb" at specific date/time +- Resolves: Bug 1938239 - RFE - Extend DNA plugin to support intervals sizes for subuids + +* Fri May 14 2021 Mark Reynolds - 1.4.3.23-1 +- Bump version to 1.4.3.23-1 +- Resolves: Bug 1947044 - Rebase 389 DS with 389-ds-base-1.4.3.23 for RHEL 8.5 +- Resolves: Bug 1850664 - RFE - Add an option for the Retro Changelog to ignore some attributes +- Resolves: Bug 1903221 - Memory leak in 389ds backend (Minor) +- Resolves: Bug 1898541 - Changelog cache can upload updates from a wrong starting point (CSN) +- Resolves: Bug 1889562 - client psearch with multiple threads hangs if nsslapd-maxthreadsperconn is under sized +- Resolves: Bug 1924848 - Negative wtime on ldapcompare +- Resolves: Bug 1895460 - RFE - Log an additional message if the server certificate nickname doesn't match nsSSLPersonalitySSL value +- Resolves: Bug 1897614 - Performance search rate: change entry cache monitor to recursive pthread mutex +- Resolves: Bug 1939607 - hang because of incorrect accounting of readers in vattr rwlock +- Resolves: Bug 1626633 - [RFE] DS - Update the password policy to support a Temporary Password with expiration +- Resolves: Bug 1952804 - CVE-2021-3514 389-ds:1.4/389-ds-base: sync_repl NULL pointer dereference in sync_create_state_control() +