From 91ce386728f718876586cebb020854c4554283b5 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Jun 14 2021 19:48:12 +0000 Subject: import 389-ds-base-1.4.3.23-2.module+el8.5.0+11209+cb479c8d --- diff --git a/.389-ds-base.metadata b/.389-ds-base.metadata index 9c5f2b7..46fc901 100644 --- a/.389-ds-base.metadata +++ b/.389-ds-base.metadata @@ -1,2 +1,2 @@ -90cda7aea8d8644eea5a2af28c72350dd915db34 SOURCES/389-ds-base-1.4.3.16.tar.bz2 +c69c175a2f27053dffbfefac9c84ff16c7ff4cbf SOURCES/389-ds-base-1.4.3.23.tar.bz2 9e06b5cc57fd185379d007696da153893cf73e30 SOURCES/jemalloc-5.2.1.tar.bz2 diff --git a/.gitignore b/.gitignore index 9745926..6114f52 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,2 @@ -SOURCES/389-ds-base-1.4.3.16.tar.bz2 +SOURCES/389-ds-base-1.4.3.23.tar.bz2 SOURCES/jemalloc-5.2.1.tar.bz2 diff --git a/SOURCES/0001-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch b/SOURCES/0001-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch deleted file mode 100644 index 1b08b52..0000000 --- a/SOURCES/0001-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch +++ /dev/null @@ -1,159 +0,0 @@ -From 81dcaf1c37c2de24c46672df8d4f968c2fb40a6e Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Wed, 11 Nov 2020 08:59:18 -0500 -Subject: [PATCH 1/3] Issue 4383 - Do not normalize escaped spaces in a DN - -Bug Description: Adding an entry with an escaped leading space leads to many - problems. Mainly id2entry can get corrupted during an - import of such an entry, and the entryrdn index is not - updated correctly - -Fix Description: In slapi_dn_normalize_ext() leave an escaped space intact. - -Relates: https://github.com/389ds/389-ds-base/issues/4383 - -Reviewed by: firstyear, progier, and tbordaz (Thanks!!!) ---- - .../tests/suites/syntax/acceptance_test.py | 75 ++++++++++++++++++- - ldap/servers/slapd/dn.c | 8 +- - 2 files changed, 77 insertions(+), 6 deletions(-) - -diff --git a/dirsrvtests/tests/suites/syntax/acceptance_test.py b/dirsrvtests/tests/suites/syntax/acceptance_test.py -index 543718689..7939a99a7 100644 ---- a/dirsrvtests/tests/suites/syntax/acceptance_test.py -+++ b/dirsrvtests/tests/suites/syntax/acceptance_test.py -@@ -1,5 +1,5 @@ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2019 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). -@@ -7,13 +7,12 @@ - # --- END COPYRIGHT BLOCK --- - - import ldap --import logging - import pytest - import os - from lib389.schema import Schema - from lib389.config import Config - from lib389.idm.user import UserAccounts --from lib389.idm.group import Groups -+from lib389.idm.group import Group, Groups - from lib389._constants import DEFAULT_SUFFIX - from lib389.topologies import log, topology_st as topo - -@@ -127,7 +126,7 @@ def test_invalid_dn_syntax_crash(topo): - 4. Success - """ - -- # Create group -+ # Create group - groups = Groups(topo.standalone, DEFAULT_SUFFIX) - group = groups.create(properties={'cn': ' test'}) - -@@ -145,6 +144,74 @@ def test_invalid_dn_syntax_crash(topo): - groups.list() - - -+@pytest.mark.parametrize("props, rawdn", [ -+ ({'cn': ' leadingSpace'}, "cn=\\20leadingSpace,ou=Groups,dc=example,dc=com"), -+ ({'cn': 'trailingSpace '}, "cn=trailingSpace\\20,ou=Groups,dc=example,dc=com")]) -+def test_dn_syntax_spaces_delete(topo, props, rawdn): -+ """Test that an entry with a space as the first character in the DN can be -+ deleted without error. We also want to make sure the indexes are properly -+ updated by repeatedly adding and deleting the entry, and that the entry cache -+ is properly maintained. -+ -+ :id: b993f37c-c2b0-4312-992c-a9048ff98965 -+ :parametrized: yes -+ :setup: Standalone Instance -+ :steps: -+ 1. Create a group with a DN that has a space as the first/last -+ character. -+ 2. Delete group -+ 3. Add group -+ 4. Modify group -+ 5. Restart server and modify entry -+ 6. Delete group -+ 7. Add group back -+ 8. Delete group using specific DN -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ 5. Success -+ 6. Success -+ 7. Success -+ 8. Success -+ """ -+ -+ # Create group -+ groups = Groups(topo.standalone, DEFAULT_SUFFIX) -+ group = groups.create(properties=props.copy()) -+ -+ # Delete group (verifies DN/RDN parsing works and cache is correct) -+ group.delete() -+ -+ # Add group again (verifies entryrdn index was properly updated) -+ groups = Groups(topo.standalone, DEFAULT_SUFFIX) -+ group = groups.create(properties=props.copy()) -+ -+ # Modify the group (verifies dn/rdn parsing is correct) -+ group.replace('description', 'escaped space group') -+ -+ # Restart the server. This will pull the entry from the database and -+ # convert it into a cache entry, which is different than how a client -+ # first adds an entry and is put into the cache before being written to -+ # disk. -+ topo.standalone.restart() -+ -+ # Make sure we can modify the entry (verifies cache entry was created -+ # correctly) -+ group.replace('description', 'escaped space group after restart') -+ -+ # Make sure it can still be deleted (verifies cache again). -+ group.delete() -+ -+ # Add it back so we can delete it using a specific DN (sanity test to verify -+ # another DN/RDN parsing variation). -+ groups = Groups(topo.standalone, DEFAULT_SUFFIX) -+ group = groups.create(properties=props.copy()) -+ group = Group(topo.standalone, dn=rawdn) -+ group.delete() -+ -+ - if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode -diff --git a/ldap/servers/slapd/dn.c b/ldap/servers/slapd/dn.c -index 2af3f38fc..3980b897f 100644 ---- a/ldap/servers/slapd/dn.c -+++ b/ldap/servers/slapd/dn.c -@@ -894,8 +894,7 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len) - s++; - } - } -- } else if (s + 2 < ends && -- isxdigit(*(s + 1)) && isxdigit(*(s + 2))) { -+ } else if (s + 2 < ends && isxdigit(*(s + 1)) && isxdigit(*(s + 2))) { - /* esc hexpair ==> real character */ - int n = slapi_hexchar2int(*(s + 1)); - int n2 = slapi_hexchar2int(*(s + 2)); -@@ -903,6 +902,11 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len) - if (n == 0) { /* don't change \00 */ - *d++ = *++s; - *d++ = *++s; -+ } else if (n == 32) { /* leave \20 (space) intact */ -+ *d++ = *s; -+ *d++ = *++s; -+ *d++ = *++s; -+ s++; - } else { - *d++ = n; - s += 3; --- -2.26.2 - diff --git a/SOURCES/0001-Issue-4747-Remove-unstable-unstatus-tests-from-PRCI-.patch b/SOURCES/0001-Issue-4747-Remove-unstable-unstatus-tests-from-PRCI-.patch new file mode 100644 index 0000000..1400b43 --- /dev/null +++ b/SOURCES/0001-Issue-4747-Remove-unstable-unstatus-tests-from-PRCI-.patch @@ -0,0 +1,1370 @@ +From 5d730f7e9f1e857bc886556db0229607b8d536d2 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Thu, 6 May 2021 18:54:20 +0200 +Subject: [PATCH 01/12] Issue 4747 - Remove unstable/unstatus tests from PRCI + (#4748) + +Bug description: + Some tests (17) in the tests suite (dirsrvtest/tests/suites) + are failing although there is no regression. + It needs (long) investigations to status if failures + are due to a bug in the tests or in DS core. + Until those investigations are completes, test suites + loose a large part of its value to detect regression. + Indeed those failing tests may hide a real regression. + +Fix description: + Flag failing tests with pytest.mark.flaky(max_runs=2, min_passes=1) + Additional action will be to create upstream 17 ticket to + status on each failing tests + +relates: https://github.com/389ds/389-ds-base/issues/4747 + +Reviewed by: Simon Pichugin, Viktor Ashirov (many thanks for your +reviews and help) + +Platforms tested: F33 +--- + .github/workflows/pytest.yml | 84 +++++ + dirsrvtests/tests/suites/acl/keywords_test.py | 16 +- + .../tests/suites/clu/dsctl_acceptance_test.py | 56 --- + .../tests/suites/clu/repl_monitor_test.py | 2 + + .../dynamic_plugins/dynamic_plugins_test.py | 8 +- + .../suites/fourwaymmr/fourwaymmr_test.py | 3 +- + .../suites/healthcheck/health_config_test.py | 1 + + .../suites/healthcheck/health_sync_test.py | 2 + + .../tests/suites/import/import_test.py | 23 +- + .../tests/suites/indexes/regression_test.py | 63 ++++ + .../paged_results/paged_results_test.py | 3 +- + .../tests/suites/password/regression_test.py | 2 + + .../tests/suites/plugins/accpol_test.py | 20 +- + .../suites/plugins/managed_entry_test.py | 351 ++++++++++++++++++ + .../tests/suites/plugins/memberof_test.py | 3 +- + .../suites/replication/cleanallruv_test.py | 8 +- + .../suites/replication/encryption_cl5_test.py | 8 +- + .../tests/suites/retrocl/basic_test.py | 292 --------------- + 18 files changed, 576 insertions(+), 369 deletions(-) + create mode 100644 .github/workflows/pytest.yml + delete mode 100644 dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py + create mode 100644 dirsrvtests/tests/suites/plugins/managed_entry_test.py + delete mode 100644 dirsrvtests/tests/suites/retrocl/basic_test.py + +diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml +new file mode 100644 +index 000000000..015794d96 +--- /dev/null ++++ b/.github/workflows/pytest.yml +@@ -0,0 +1,84 @@ ++name: Test ++ ++on: [push, pull_request] ++ ++jobs: ++ build: ++ name: Build ++ runs-on: ubuntu-20.04 ++ container: ++ image: quay.io/389ds/ci-images:test ++ outputs: ++ matrix: ${{ steps.set-matrix.outputs.matrix }} ++ steps: ++ - name: Checkout ++ uses: actions/checkout@v2 ++ ++ - name: Get a list of all test suites ++ id: set-matrix ++ run: echo "::set-output name=matrix::$(python3 .github/scripts/generate_matrix.py)" ++ ++ - name: Build RPMs ++ run: cd $GITHUB_WORKSPACE && SKIP_AUDIT_CI=1 make -f rpm.mk dist-bz2 rpms ++ ++ - name: Tar build artifacts ++ run: tar -cvf dist.tar dist/ ++ ++ - name: Upload RPMs ++ uses: actions/upload-artifact@v2 ++ with: ++ name: rpms ++ path: dist.tar ++ ++ test: ++ name: Test ++ runs-on: ubuntu-20.04 ++ needs: build ++ strategy: ++ fail-fast: false ++ matrix: ${{ fromJson(needs.build.outputs.matrix) }} ++ ++ steps: ++ - name: Checkout ++ uses: actions/checkout@v2 ++ ++ - name: Install dependencies ++ run: | ++ sudo apt update -y ++ sudo apt install -y docker.io containerd runc ++ ++ sudo cp .github/daemon.json /etc/docker/daemon.json ++ ++ sudo systemctl unmask docker ++ sudo systemctl start docker ++ ++ - name: Download RPMs ++ uses: actions/download-artifact@master ++ with: ++ name: rpms ++ ++ - name: Extract RPMs ++ run: tar xvf dist.tar ++ ++ - name: Run pytest in a container ++ run: | ++ set -x ++ CID=$(sudo docker run -d -h server.example.com --privileged --rm -v /sys/fs/cgroup:/sys/fs/cgroup:rw,rslave -v ${PWD}:/workspace quay.io/389ds/ci-images:test) ++ sudo docker exec $CID sh -c "dnf install -y -v dist/rpms/*rpm" ++ sudo docker exec $CID py.test --suppress-no-test-exit-code -m "not flaky" --junit-xml=pytest.xml -v dirsrvtests/tests/suites/${{ matrix.suite }} ++ ++ - name: Make the results file readable by all ++ if: always() ++ run: ++ sudo chmod -f a+r pytest.xml ++ ++ - name: Sanitize filename ++ run: echo "PYTEST_SUITE=$(echo ${{ matrix.suite }} | sed -e 's#\/#-#g')" >> $GITHUB_ENV ++ ++ - name: Upload pytest test results ++ if: always() ++ uses: actions/upload-artifact@v2 ++ with: ++ name: pytest-${{ env.PYTEST_SUITE }} ++ path: pytest.xml ++ +diff --git a/dirsrvtests/tests/suites/acl/keywords_test.py b/dirsrvtests/tests/suites/acl/keywords_test.py +index 0174152e3..c5e989f3b 100644 +--- a/dirsrvtests/tests/suites/acl/keywords_test.py ++++ b/dirsrvtests/tests/suites/acl/keywords_test.py +@@ -216,7 +216,8 @@ def test_user_binds_without_any_password_and_cannot_access_the_data(topo, add_us + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_user_can_access_the_data_when_connecting_from_any_machine( + topo, add_user, aci_of_user + ): +@@ -245,6 +246,8 @@ def test_user_can_access_the_data_when_connecting_from_any_machine( + OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") + + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_user_can_access_the_data_when_connecting_from_internal_ds_network_only( + topo, add_user, aci_of_user + ): +@@ -276,7 +279,8 @@ def test_user_can_access_the_data_when_connecting_from_internal_ds_network_only( + # Perform Operation + OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_user_can_access_the_data_when_connecting_from_some_network_only( + topo, add_user, aci_of_user + ): +@@ -306,7 +310,8 @@ def test_user_can_access_the_data_when_connecting_from_some_network_only( + # Perform Operation + OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_from_an_unauthorized_network(topo, add_user, aci_of_user): + """User cannot access the data when connecting from an unauthorized network as per the ACI. + +@@ -332,7 +337,8 @@ def test_from_an_unauthorized_network(topo, add_user, aci_of_user): + # Perform Operation + OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_user_cannot_access_the_data_when_connecting_from_an_unauthorized_network_2( + topo, add_user, aci_of_user): + """User cannot access the data when connecting from an unauthorized network as per the ACI. +@@ -418,6 +424,8 @@ def test_dnsalias_keyword_test_nodns_cannot(topo, add_user, aci_of_user): + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + @pytest.mark.ds50378 + @pytest.mark.bz1710848 + @pytest.mark.parametrize("ip_addr", ['127.0.0.1', "[::1]"]) +diff --git a/dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py b/dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py +deleted file mode 100644 +index a0f89defd..000000000 +--- a/dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py ++++ /dev/null +@@ -1,56 +0,0 @@ +-# --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2021 Red Hat, Inc. +-# All rights reserved. +-# +-# License: GPL (version 3 or any later version). +-# See LICENSE for details. +-# --- END COPYRIGHT BLOCK --- +- +-import logging +-import pytest +-import os +-from lib389._constants import * +-from lib389.topologies import topology_st as topo +- +-log = logging.getLogger(__name__) +- +- +-def test_custom_path(topo): +- """Test that a custom path, backup directory, is correctly used by lib389 +- when the server is stopped. +- +- :id: 8659e209-ee83-477e-8183-1d2f555669ea +- :setup: Standalone Instance +- :steps: +- 1. Get the LDIF directory +- 2. Change the server's backup directory to the LDIF directory +- 3. Stop the server, and perform a backup +- 4. Backup was written to LDIF directory +- :expectedresults: +- 1. Success +- 2. Success +- 3. Success +- 4. Success +- """ +- +- # Get LDIF dir +- ldif_dir = topo.standalone.get_ldif_dir() +- +- # Set backup directory to LDIF directory +- topo.standalone.config.replace('nsslapd-bakdir', ldif_dir) +- +- # Stop the server and take a backup +- topo.standalone.stop() +- topo.standalone.db2bak(None) +- +- # Verify backup was written to LDIF directory +- backups = os.listdir(ldif_dir) +- assert len(backups) +- +- +-if __name__ == '__main__': +- # Run isolated +- # -s for DEBUG mode +- CURRENT_FILE = os.path.realpath(__file__) +- pytest.main(["-s", CURRENT_FILE]) +- +diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +index 9428edb26..3cf6343c8 100644 +--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py ++++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +@@ -90,6 +90,8 @@ def get_hostnames_from_log(port1, port2): + host_m2 = match.group(2) + return (host_m1, host_m2) + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + @pytest.mark.ds50545 + @pytest.mark.bz1739718 + @pytest.mark.skipif(ds_is_older("1.4.0"), reason="Not implemented") +diff --git a/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py b/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py +index b61daed74..7558cc03d 100644 +--- a/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py ++++ b/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py +@@ -68,7 +68,8 @@ def check_replicas(topology_m2): + + log.info('Data is consistent across the replicas.\n') + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_acceptance(topology_m2): + """Exercise each plugin and its main features, while + changing the configuration without restarting the server. +@@ -140,7 +141,8 @@ def test_acceptance(topology_m2): + ############################################################################ + check_replicas(topology_m2) + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_memory_corruption(topology_m2): + """Check the plugins for memory corruption issues while + dynamic plugins option is enabled +@@ -242,6 +244,8 @@ def test_memory_corruption(topology_m2): + ############################################################################ + check_replicas(topology_m2) + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + @pytest.mark.tier2 + def test_stress(topology_m2): + """Test plugins while under a big load. Perform the test 5 times +diff --git a/dirsrvtests/tests/suites/fourwaymmr/fourwaymmr_test.py b/dirsrvtests/tests/suites/fourwaymmr/fourwaymmr_test.py +index 5b0754a2e..c5a746ebb 100644 +--- a/dirsrvtests/tests/suites/fourwaymmr/fourwaymmr_test.py ++++ b/dirsrvtests/tests/suites/fourwaymmr/fourwaymmr_test.py +@@ -144,7 +144,8 @@ def test_delete_a_few_entries_in_m4(topo_m4, _cleanupentris): + topo_m4.ms["supplier4"], topo_m4.ms["supplier3"], 30 + ) + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_replicated_multivalued_entries(topo_m4): + """ + Replicated multivalued entries are ordered the same way on all consumers +diff --git a/dirsrvtests/tests/suites/healthcheck/health_config_test.py b/dirsrvtests/tests/suites/healthcheck/health_config_test.py +index 3d102e859..f470c05c6 100644 +--- a/dirsrvtests/tests/suites/healthcheck/health_config_test.py ++++ b/dirsrvtests/tests/suites/healthcheck/health_config_test.py +@@ -337,6 +337,7 @@ def test_healthcheck_low_disk_space(topology_st): + os.remove(file) + + ++@pytest.mark.flaky(max_runs=2, min_passes=1) + @pytest.mark.ds50791 + @pytest.mark.bz1843567 + @pytest.mark.xfail(ds_is_older("1.4.3.8"), reason="Not implemented") +diff --git a/dirsrvtests/tests/suites/healthcheck/health_sync_test.py b/dirsrvtests/tests/suites/healthcheck/health_sync_test.py +index 75bbfd35c..74df1b322 100644 +--- a/dirsrvtests/tests/suites/healthcheck/health_sync_test.py ++++ b/dirsrvtests/tests/suites/healthcheck/health_sync_test.py +@@ -70,6 +70,8 @@ def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searc + @pytest.mark.ds50873 + @pytest.mark.bz1685160 + @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_healthcheck_replication_out_of_sync_not_broken(topology_m3): + """Check if HealthCheck returns DSREPLLE0003 code + +diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py +index defe447d5..119b097f1 100644 +--- a/dirsrvtests/tests/suites/import/import_test.py ++++ b/dirsrvtests/tests/suites/import/import_test.py +@@ -14,6 +14,7 @@ import os + import pytest + import time + import glob ++import logging + from lib389.topologies import topology_st as topo + from lib389._constants import DEFAULT_SUFFIX, TaskWarning + from lib389.dbgen import dbgen_users +@@ -28,6 +29,12 @@ from lib389.idm.account import Accounts + + pytestmark = pytest.mark.tier1 + ++DEBUGGING = os.getenv("DEBUGGING", default=False) ++if DEBUGGING: ++ logging.getLogger(__name__).setLevel(logging.DEBUG) ++else: ++ logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) + + def _generate_ldif(topo, no_no): + """ +@@ -349,7 +356,8 @@ def _toggle_private_import_mem(request, topo): + ('nsslapd-db-private-import-mem', 'off')) + request.addfinalizer(finofaci) + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_fast_slow_import(topo, _toggle_private_import_mem, _import_clean): + """With nsslapd-db-private-import-mem: on is faster import. + +@@ -381,16 +389,19 @@ def test_fast_slow_import(topo, _toggle_private_import_mem, _import_clean): + # Let's set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: 0 + config = LDBMConfig(topo.standalone) + # Measure offline import time duration total_time1 +- total_time1 = _import_offline(topo, 20) ++ total_time1 = _import_offline(topo, 1000) + # Now nsslapd-db-private-import-mem:off + config.replace('nsslapd-db-private-import-mem', 'off') + accounts = Accounts(topo.standalone, DEFAULT_SUFFIX) + for i in accounts.filter('(uid=*)'): + UserAccount(topo.standalone, i.dn).delete() + # Measure offline import time duration total_time2 +- total_time2 = _import_offline(topo, 20) ++ total_time2 = _import_offline(topo, 1000) + # total_time1 < total_time2 ++ log.info("total_time1 = %f" % total_time1) ++ log.info("total_time2 = %f" % total_time2) + assert total_time1 < total_time2 ++ + # Set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: -1 + config.replace_many( + ('nsslapd-db-private-import-mem', 'on'), +@@ -398,14 +409,16 @@ def test_fast_slow_import(topo, _toggle_private_import_mem, _import_clean): + for i in accounts.filter('(uid=*)'): + UserAccount(topo.standalone, i.dn).delete() + # Measure offline import time duration total_time1 +- total_time1 = _import_offline(topo, 20) ++ total_time1 = _import_offline(topo, 1000) + # Now nsslapd-db-private-import-mem:off + config.replace('nsslapd-db-private-import-mem', 'off') + for i in accounts.filter('(uid=*)'): + UserAccount(topo.standalone, i.dn).delete() + # Measure offline import time duration total_time2 +- total_time2 = _import_offline(topo, 20) ++ total_time2 = _import_offline(topo, 1000) + # total_time1 < total_time2 ++ log.info("toral_time1 = %f" % total_time1) ++ log.info("total_time2 = %f" % total_time2) + assert total_time1 < total_time2 + + +diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py +index 1a71f16e9..ed0c8885f 100644 +--- a/dirsrvtests/tests/suites/indexes/regression_test.py ++++ b/dirsrvtests/tests/suites/indexes/regression_test.py +@@ -19,6 +19,68 @@ from lib389.topologies import topology_st as topo + pytestmark = pytest.mark.tier1 + + ++@pytest.fixture(scope="function") ++def add_a_group_with_users(request, topo): ++ """ ++ Add a group and users, which are members of this group. ++ """ ++ groups = Groups(topo.standalone, DEFAULT_SUFFIX, rdn=None) ++ group = groups.create(properties={'cn': 'test_group'}) ++ users_list = [] ++ users_num = 100 ++ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) ++ for num in range(users_num): ++ USER_NAME = f'test_{num}' ++ user = users.create(properties={ ++ 'uid': USER_NAME, ++ 'sn': USER_NAME, ++ 'cn': USER_NAME, ++ 'uidNumber': f'{num}', ++ 'gidNumber': f'{num}', ++ 'homeDirectory': f'/home/{USER_NAME}' ++ }) ++ users_list.append(user) ++ group.add_member(user.dn) ++ ++ def fin(): ++ """ ++ Removes group and users. ++ """ ++ # If the server crashed, start it again to do the cleanup ++ if not topo.standalone.status(): ++ topo.standalone.start() ++ for user in users_list: ++ user.delete() ++ group.delete() ++ ++ request.addfinalizer(fin) ++ ++ ++@pytest.fixture(scope="function") ++def set_small_idlistscanlimit(request, topo): ++ """ ++ Set nsslapd-idlistscanlimit to a smaller value to accelerate the reproducer ++ """ ++ db_cfg = DatabaseConfig(topo.standalone) ++ old_idlistscanlimit = db_cfg.get_attr_vals_utf8('nsslapd-idlistscanlimit') ++ db_cfg.set([('nsslapd-idlistscanlimit', '100')]) ++ topo.standalone.restart() ++ ++ def fin(): ++ """ ++ Set nsslapd-idlistscanlimit back to the default value ++ """ ++ # If the server crashed, start it again to do the cleanup ++ if not topo.standalone.status(): ++ topo.standalone.start() ++ db_cfg.set([('nsslapd-idlistscanlimit', old_idlistscanlimit)]) ++ topo.standalone.restart() ++ ++ request.addfinalizer(fin) ++ ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) ++@pytest.mark.skipif(ds_is_older("1.4.4.4"), reason="Not implemented") + def test_reindex_task_creates_abandoned_index_file(topo): + """ + Recreating an index for the same attribute but changing +@@ -123,3 +185,4 @@ if __name__ == "__main__": + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) ++ +diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py +index 9fdceb165..0b45b7d96 100644 +--- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py ++++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py +@@ -506,7 +506,8 @@ def test_search_with_timelimit(topology_st, create_user): + finally: + del_users(users_list) + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + @pytest.mark.parametrize('aci_subject', + ('dns = "{}"'.format(HOSTNAME), + 'ip = "{}"'.format(IP_ADDRESS))) +diff --git a/dirsrvtests/tests/suites/password/regression_test.py b/dirsrvtests/tests/suites/password/regression_test.py +index 251834421..8f1facb6d 100644 +--- a/dirsrvtests/tests/suites/password/regression_test.py ++++ b/dirsrvtests/tests/suites/password/regression_test.py +@@ -215,6 +215,8 @@ def test_global_vs_local(topo, passw_policy, create_user, user_pasw): + # reset password + create_user.set('userPassword', PASSWORD) + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + @pytest.mark.ds49789 + def test_unhashed_pw_switch(topo_supplier): + """Check that nsslapd-unhashed-pw-switch works corrently +diff --git a/dirsrvtests/tests/suites/plugins/accpol_test.py b/dirsrvtests/tests/suites/plugins/accpol_test.py +index 73e2e54d1..77975c747 100644 +--- a/dirsrvtests/tests/suites/plugins/accpol_test.py ++++ b/dirsrvtests/tests/suites/plugins/accpol_test.py +@@ -520,7 +520,8 @@ def test_glinact_limit(topology_st, accpol_global): + modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '12') + del_users(topology_st, suffix, subtree, userid, nousrs) + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_glnologin_attr(topology_st, accpol_global): + """Verify if user account is inactivated based on createTimeStamp attribute, no lastLoginTime attribute present + +@@ -610,7 +611,8 @@ def test_glnologin_attr(topology_st, accpol_global): + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_glnoalt_stattr(topology_st, accpol_global): + """Verify if user account can be inactivated based on lastLoginTime attribute, altstateattrname set to 1.1 + +@@ -656,6 +658,8 @@ def test_glnoalt_stattr(topology_st, accpol_global): + del_users(topology_st, suffix, subtree, userid, nousrs) + + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_glattr_modtime(topology_st, accpol_global): + """Verify if user account can be inactivated based on modifyTimeStamp attribute + +@@ -705,6 +709,8 @@ def test_glattr_modtime(topology_st, accpol_global): + del_users(topology_st, suffix, subtree, userid, nousrs) + + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_glnoalt_nologin(topology_st, accpol_global): + """Verify if account policy plugin works if we set altstateattrname set to 1.1 and alwaysrecordlogin to NO + +@@ -763,6 +769,8 @@ def test_glnoalt_nologin(topology_st, accpol_global): + del_users(topology_st, suffix, subtree, userid, nousrs) + + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_glinact_nsact(topology_st, accpol_global): + """Verify if user account can be activated using ns-activate.pl script. + +@@ -812,6 +820,8 @@ def test_glinact_nsact(topology_st, accpol_global): + del_users(topology_st, suffix, subtree, userid, nousrs) + + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_glinact_acclock(topology_st, accpol_global): + """Verify if user account is activated when account is unlocked by passwordlockoutduration. + +@@ -868,6 +878,8 @@ def test_glinact_acclock(topology_st, accpol_global): + del_users(topology_st, suffix, subtree, userid, nousrs) + + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_glnact_pwexp(topology_st, accpol_global): + """Verify if user account is activated when password is reset after password is expired + +@@ -951,6 +963,8 @@ def test_glnact_pwexp(topology_st, accpol_global): + del_users(topology_st, suffix, subtree, userid, nousrs) + + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_locact_inact(topology_st, accpol_local): + """Verify if user account is inactivated when accountInactivityLimit is exceeded. + +@@ -995,6 +1009,8 @@ def test_locact_inact(topology_st, accpol_local): + del_users(topology_st, suffix, subtree, userid, nousrs) + + ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_locinact_modrdn(topology_st, accpol_local): + """Verify if user account is inactivated when moved from ou=groups to ou=people subtree. + +diff --git a/dirsrvtests/tests/suites/plugins/managed_entry_test.py b/dirsrvtests/tests/suites/plugins/managed_entry_test.py +new file mode 100644 +index 000000000..662044ccd +--- /dev/null ++++ b/dirsrvtests/tests/suites/plugins/managed_entry_test.py +@@ -0,0 +1,351 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2020 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# ++import pytest ++import time ++from lib389.topologies import topology_st as topo ++from lib389.idm.user import UserAccount, UserAccounts ++from lib389.idm.account import Account, Accounts ++from lib389._constants import DEFAULT_SUFFIX ++from lib389.idm.group import Groups ++from lib389.config import Config ++from lib389.idm.organizationalunit import OrganizationalUnits, OrganizationalUnit ++from lib389.plugins import MEPTemplates, MEPConfigs, ManagedEntriesPlugin, MEPTemplate ++from lib389.idm.nscontainer import nsContainers ++from lib389.idm.domain import Domain ++from lib389.tasks import Entry ++import ldap ++ ++pytestmark = pytest.mark.tier1 ++USER_PASSWORD = 'password' ++ ++ ++@pytest.fixture(scope="module") ++def _create_inital(topo): ++ """ ++ Will create entries for this module ++ """ ++ meps = MEPTemplates(topo.standalone, DEFAULT_SUFFIX) ++ mep_template1 = meps.create( ++ properties={'cn': 'UPG Template', 'mepRDNAttr': 'cn', 'mepStaticAttr': 'objectclass: posixGroup', ++ 'mepMappedAttr': 'cn: $uid|gidNumber: $gidNumber|description: User private group for $uid'.split( ++ '|')}) ++ conf_mep = MEPConfigs(topo.standalone) ++ conf_mep.create(properties={'cn': 'UPG Definition1', 'originScope': f'cn=Users,{DEFAULT_SUFFIX}', ++ 'originFilter': 'objectclass=posixaccount', ++ 'managedBase': f'cn=Groups,{DEFAULT_SUFFIX}', ++ 'managedTemplate': mep_template1.dn}) ++ container = nsContainers(topo.standalone, DEFAULT_SUFFIX) ++ for cn in ['Users', 'Groups']: ++ container.create(properties={'cn': cn}) ++ ++ ++def test_binddn_tracking(topo, _create_inital): ++ """Test Managed Entries basic functionality ++ ++ :id: ea2ddfd4-aaec-11ea-8416-8c16451d917b ++ :setup: Standalone Instance ++ :steps: ++ 1. Set nsslapd-plugin-binddn-tracking attribute under cn=config ++ 2. Add user ++ 3. Managed Entry Plugin runs against managed entries upon any update without validating ++ 4. verify creation of User Private Group with its time stamp value ++ 5. Modify the SN attribute which is not mapped with managed entry ++ 6. run ModRDN operation and check the User Private group ++ 7. Check the time stamp of UPG should be changed now ++ 8. Check the creatorsname should be user dn and internalCreatorsname should be plugin name ++ 9. Check if a managed group entry was created ++ :expected results: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ 7. Success ++ 8. Success ++ 9. Success ++ """ ++ config = Config(topo.standalone) ++ # set nsslapd-plugin-binddn-tracking attribute under cn=config ++ config.replace('nsslapd-plugin-binddn-tracking', 'on') ++ # Add user ++ user = UserAccounts(topo.standalone, f'cn=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() ++ assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=test_user_1000,cn=Groups,{DEFAULT_SUFFIX}' ++ entry = Account(topo.standalone, f'cn=test_user_1000,cn=Groups,{DEFAULT_SUFFIX}') ++ # Managed Entry Plugin runs against managed entries upon any update without validating ++ # verify creation of User Private Group with its time stamp value ++ stamp1 = entry.get_attr_val_utf8('modifyTimestamp') ++ user.replace('sn', 'NewSN_modified') ++ stamp2 = entry.get_attr_val_utf8('modifyTimestamp') ++ # Modify the SN attribute which is not mapped with managed entry ++ # Check the time stamp of UPG should not be changed ++ assert stamp1 == stamp2 ++ time.sleep(1) ++ # run ModRDN operation and check the User Private group ++ user.rename(new_rdn='uid=UserNewRDN', newsuperior='cn=Users,dc=example,dc=com') ++ assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=UserNewRDN,cn=Groups,{DEFAULT_SUFFIX}' ++ entry = Account(topo.standalone, f'cn=UserNewRDN,cn=Groups,{DEFAULT_SUFFIX}') ++ stamp3 = entry.get_attr_val_utf8('modifyTimestamp') ++ # Check the time stamp of UPG should be changed now ++ assert stamp2 != stamp3 ++ time.sleep(1) ++ user.replace('gidNumber', '1') ++ stamp4 = entry.get_attr_val_utf8('modifyTimestamp') ++ assert stamp4 != stamp3 ++ # Check the creatorsname should be user dn and internalCreatorsname should be plugin name ++ assert entry.get_attr_val_utf8('creatorsname') == 'cn=directory manager' ++ assert entry.get_attr_val_utf8('internalCreatorsname') == 'cn=Managed Entries,cn=plugins,cn=config' ++ assert entry.get_attr_val_utf8('modifiersname') == 'cn=directory manager' ++ user.delete() ++ config.replace('nsslapd-plugin-binddn-tracking', 'off') ++ ++ ++class WithObjectClass(Account): ++ def __init__(self, instance, dn=None): ++ super(WithObjectClass, self).__init__(instance, dn) ++ self._rdn_attribute = 'uid' ++ self._create_objectclasses = ['top', 'person', 'inetorgperson'] ++ ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) ++def test_mentry01(topo, _create_inital): ++ """Test Managed Entries basic functionality ++ ++ :id: 9b87493b-0493-46f9-8364-6099d0e5d806 ++ :setup: Standalone Instance ++ :steps: ++ 1. Check the plug-in status ++ 2. Add Template and definition entry ++ 3. Add our org units ++ 4. Add users with PosixAccount ObjectClass and verify creation of User Private Group ++ 5. Disable the plug-in and check the status ++ 6. Enable the plug-in and check the status the plug-in is disabled and creation of UPG should fail ++ 7. Add users with PosixAccount ObjectClass and verify creation of User Private Group ++ 8. Add users, run ModRDN operation and check the User Private group ++ 9. Add users, run LDAPMODIFY to change the gidNumber and check the User Private group ++ 10. Checking whether creation of User Private group fails for existing group entry ++ 11. Checking whether adding of posixAccount objectClass to existing user creates UPG ++ 12. Running ModRDN operation and checking the user private groups mepManagedBy attribute ++ 13. Deleting mepManagedBy attribute and running ModRDN operation to check if it creates a new UPG ++ 14. Change the RDN of template entry, DSA Unwilling to perform error expected ++ 15. Change the RDN of cn=Users to cn=TestUsers and check UPG are deleted ++ :expected results: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ 7. Success ++ 8. Success ++ 9. Success ++ 10. Success ++ 11. Success ++ 12. Success ++ 13. Success ++ 14. Fail(Unwilling to perform ) ++ 15. Success ++ """ ++ # Check the plug-in status ++ mana = ManagedEntriesPlugin(topo.standalone) ++ assert mana.status() ++ # Add Template and definition entry ++ org1 = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).create(properties={'ou': 'Users'}) ++ org2 = OrganizationalUnit(topo.standalone, f'ou=Groups,{DEFAULT_SUFFIX}') ++ meps = MEPTemplates(topo.standalone, DEFAULT_SUFFIX) ++ mep_template1 = meps.create(properties={ ++ 'cn': 'UPG Template1', ++ 'mepRDNAttr': 'cn', ++ 'mepStaticAttr': 'objectclass: posixGroup', ++ 'mepMappedAttr': 'cn: $uid|gidNumber: $gidNumber|description: User private group for $uid'.split('|')}) ++ conf_mep = MEPConfigs(topo.standalone) ++ mep_config = conf_mep.create(properties={ ++ 'cn': 'UPG Definition2', ++ 'originScope': org1.dn, ++ 'originFilter': 'objectclass=posixaccount', ++ 'managedBase': org2.dn, ++ 'managedTemplate': mep_template1.dn}) ++ # Add users with PosixAccount ObjectClass and verify creation of User Private Group ++ user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() ++ assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=test_user_1000,ou=Groups,{DEFAULT_SUFFIX}' ++ # Disable the plug-in and check the status ++ mana.disable() ++ user.delete() ++ topo.standalone.restart() ++ # Add users with PosixAccount ObjectClass when the plug-in is disabled and creation of UPG should fail ++ user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() ++ assert not user.get_attr_val_utf8('mepManagedEntry') ++ # Enable the plug-in and check the status ++ mana.enable() ++ user.delete() ++ topo.standalone.restart() ++ # Add users with PosixAccount ObjectClass and verify creation of User Private Group ++ user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() ++ assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=test_user_1000,ou=Groups,{DEFAULT_SUFFIX}' ++ # Add users, run ModRDN operation and check the User Private group ++ # Add users, run LDAPMODIFY to change the gidNumber and check the User Private group ++ user.rename(new_rdn='uid=UserNewRDN', newsuperior='ou=Users,dc=example,dc=com') ++ assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=UserNewRDN,ou=Groups,{DEFAULT_SUFFIX}' ++ user.replace('gidNumber', '20209') ++ entry = Account(topo.standalone, f'cn=UserNewRDN,ou=Groups,{DEFAULT_SUFFIX}') ++ assert entry.get_attr_val_utf8('gidNumber') == '20209' ++ user.replace_many(('sn', 'new_modified_sn'), ('gidNumber', '31309')) ++ assert entry.get_attr_val_utf8('gidNumber') == '31309' ++ user.delete() ++ # Checking whether creation of User Private group fails for existing group entry ++ grp = Groups(topo.standalone, f'ou=Groups,{DEFAULT_SUFFIX}', rdn=None).create(properties={'cn': 'MENTRY_14'}) ++ user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() ++ with pytest.raises(ldap.NO_SUCH_OBJECT): ++ entry.status() ++ user.delete() ++ # Checking whether adding of posixAccount objectClass to existing user creates UPG ++ # Add Users without posixAccount objectClass ++ users = WithObjectClass(topo.standalone, f'uid=test_test, ou=Users,{DEFAULT_SUFFIX}') ++ user_properties1 = {'uid': 'test_test', 'cn': 'test', 'sn': 'test', 'mail': 'sasa@sasa.com', 'telephoneNumber': '123'} ++ user = users.create(properties=user_properties1) ++ assert not user.get_attr_val_utf8('mepManagedEntry') ++ # Add posixAccount objectClass ++ user.replace_many(('objectclass', ['top', 'person', 'inetorgperson', 'posixAccount']), ++ ('homeDirectory', '/home/ok'), ++ ('uidNumber', '61603'), ('gidNumber', '61603')) ++ assert not user.get_attr_val_utf8('mepManagedEntry') ++ user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() ++ entry = Account(topo.standalone, 'cn=test_user_1000,ou=Groups,dc=example,dc=com') ++ # Add inetuser objectClass ++ user.replace_many( ++ ('objectclass', ['top', 'account', 'posixaccount', 'inetOrgPerson', ++ 'organizationalPerson', 'nsMemberOf', 'nsAccount', ++ 'person', 'mepOriginEntry', 'inetuser']), ++ ('memberOf', entry.dn)) ++ assert entry.status() ++ user.delete() ++ user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() ++ entry = Account(topo.standalone, 'cn=test_user_1000,ou=Groups,dc=example,dc=com') ++ # Add groupofNames objectClass ++ user.replace_many( ++ ('objectclass', ['top', 'account', 'posixaccount', 'inetOrgPerson', ++ 'organizationalPerson', 'nsMemberOf', 'nsAccount', ++ 'person', 'mepOriginEntry', 'groupofNames']), ++ ('memberOf', user.dn)) ++ assert entry.status() ++ # Running ModRDN operation and checking the user private groups mepManagedBy attribute ++ user.replace('mepManagedEntry', f'uid=CheckModRDN,ou=Users,{DEFAULT_SUFFIX}') ++ user.rename(new_rdn='uid=UserNewRDN', newsuperior='ou=Users,dc=example,dc=com') ++ assert user.get_attr_val_utf8('mepManagedEntry') == f'uid=CheckModRDN,ou=Users,{DEFAULT_SUFFIX}' ++ # Deleting mepManagedBy attribute and running ModRDN operation to check if it creates a new UPG ++ user.remove('mepManagedEntry', f'uid=CheckModRDN,ou=Users,{DEFAULT_SUFFIX}') ++ user.rename(new_rdn='uid=UserNewRDN1', newsuperior='ou=Users,dc=example,dc=com') ++ assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=UserNewRDN1,ou=Groups,{DEFAULT_SUFFIX}' ++ # Change the RDN of template entry, DSA Unwilling to perform error expected ++ mep = MEPTemplate(topo.standalone, f'cn=UPG Template,{DEFAULT_SUFFIX}') ++ with pytest.raises(ldap.UNWILLING_TO_PERFORM): ++ mep.rename(new_rdn='cn=UPG Template2', newsuperior='dc=example,dc=com') ++ # Change the RDN of cn=Users to cn=TestUsers and check UPG are deleted ++ before = user.get_attr_val_utf8('mepManagedEntry') ++ user.rename(new_rdn='uid=Anuj', newsuperior='ou=Users,dc=example,dc=com') ++ assert user.get_attr_val_utf8('mepManagedEntry') != before ++ ++ ++def test_managed_entry_removal(topo): ++ """Check that we can't remove managed entry manually ++ ++ :id: cf9c5be5-97ef-46fc-b199-8346acf4c296 ++ :setup: Standalone Instance ++ :steps: ++ 1. Enable the plugin ++ 2. Restart the instance ++ 3. Add our org units ++ 4. Set up config entry and template entry for the org units ++ 5. Add an entry that meets the MEP scope ++ 6. Check if a managed group entry was created ++ 7. Try to remove the entry while bound as Admin (non-DM) ++ 8. Remove the entry while bound as DM ++ 9. Check that the managing entry can be deleted too ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ 7. Should fail ++ 8. Success ++ 9. Success ++ """ ++ ++ inst = topo.standalone ++ ++ # Add ACI so we can test that non-DM user can't delete managed entry ++ domain = Domain(inst, DEFAULT_SUFFIX) ++ ACI_TARGET = f"(target = \"ldap:///{DEFAULT_SUFFIX}\")" ++ ACI_TARGETATTR = "(targetattr = *)" ++ ACI_ALLOW = "(version 3.0; acl \"Admin Access\"; allow (all) " ++ ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)" ++ ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT ++ domain.add('aci', ACI_BODY) ++ ++ # stop the plugin, and start it ++ plugin = ManagedEntriesPlugin(inst) ++ plugin.disable() ++ plugin.enable() ++ ++ # Add our org units ++ ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) ++ ou_people = ous.create(properties={'ou': 'managed_people'}) ++ ou_groups = ous.create(properties={'ou': 'managed_groups'}) ++ ++ mep_templates = MEPTemplates(inst, DEFAULT_SUFFIX) ++ mep_template1 = mep_templates.create(properties={ ++ 'cn': 'MEP template', ++ 'mepRDNAttr': 'cn', ++ 'mepStaticAttr': 'objectclass: groupOfNames|objectclass: extensibleObject'.split('|'), ++ 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') ++ }) ++ mep_configs = MEPConfigs(inst) ++ mep_configs.create(properties={'cn': 'config', ++ 'originScope': ou_people.dn, ++ 'originFilter': 'objectclass=posixAccount', ++ 'managedBase': ou_groups.dn, ++ 'managedTemplate': mep_template1.dn}) ++ inst.restart() ++ ++ # Add an entry that meets the MEP scope ++ test_users_m1 = UserAccounts(inst, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) ++ managing_entry = test_users_m1.create_test_user(1001) ++ managing_entry.reset_password(USER_PASSWORD) ++ user_bound_conn = managing_entry.bind(USER_PASSWORD) ++ ++ # Get the managed entry ++ managed_groups = Groups(inst, ou_groups.dn, rdn=None) ++ managed_entry = managed_groups.get(managing_entry.rdn) ++ ++ # Check that the managed entry was created ++ assert managed_entry.exists() ++ ++ # Try to remove the entry while bound as Admin (non-DM) ++ managed_groups_user_conn = Groups(user_bound_conn, ou_groups.dn, rdn=None) ++ managed_entry_user_conn = managed_groups_user_conn.get(managed_entry.rdn) ++ with pytest.raises(ldap.UNWILLING_TO_PERFORM): ++ managed_entry_user_conn.delete() ++ assert managed_entry_user_conn.exists() ++ ++ # Remove the entry while bound as DM ++ managed_entry.delete() ++ assert not managed_entry.exists() ++ ++ # Check that the managing entry can be deleted too ++ managing_entry.delete() ++ assert not managing_entry.exists() ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) +diff --git a/dirsrvtests/tests/suites/plugins/memberof_test.py b/dirsrvtests/tests/suites/plugins/memberof_test.py +index bc99eef7d..d3b32c856 100644 +--- a/dirsrvtests/tests/suites/plugins/memberof_test.py ++++ b/dirsrvtests/tests/suites/plugins/memberof_test.py +@@ -2655,7 +2655,8 @@ def test_complex_group_scenario_9(topology_st): + verify_post_025(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, + memofuser1, memofuser2, memofuser3, memofuser4) + +- ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_memberof_auto_add_oc(topology_st): + """Test the auto add objectclass (OC) feature. The plugin should add a predefined + objectclass that will allow memberOf to be added to an entry. +diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py +index 5610e3c19..f0cd99cfc 100644 +--- a/dirsrvtests/tests/suites/replication/cleanallruv_test.py ++++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py +@@ -223,7 +223,7 @@ def test_clean(topology_m4, m4rid): + + log.info('test_clean PASSED, restoring supplier 4...') + +- ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_clean_restart(topology_m4, m4rid): + """Check that cleanallruv task works properly after a restart + +@@ -295,6 +295,7 @@ def test_clean_restart(topology_m4, m4rid): + log.info('test_clean_restart PASSED, restoring supplier 4...') + + ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_clean_force(topology_m4, m4rid): + """Check that multiple tasks with a 'force' option work properly + +@@ -353,6 +354,7 @@ def test_clean_force(topology_m4, m4rid): + log.info('test_clean_force PASSED, restoring supplier 4...') + + ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_abort(topology_m4, m4rid): + """Test the abort task basic functionality + +@@ -408,6 +410,7 @@ def test_abort(topology_m4, m4rid): + log.info('test_abort PASSED, restoring supplier 4...') + + ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_abort_restart(topology_m4, m4rid): + """Test the abort task can handle a restart, and then resume + +@@ -486,6 +489,7 @@ def test_abort_restart(topology_m4, m4rid): + log.info('test_abort_restart PASSED, restoring supplier 4...') + + ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_abort_certify(topology_m4, m4rid): + """Test the abort task with a replica-certify-all option + +@@ -555,6 +559,7 @@ def test_abort_certify(topology_m4, m4rid): + log.info('test_abort_certify PASSED, restoring supplier 4...') + + ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_stress_clean(topology_m4, m4rid): + """Put each server(m1 - m4) under a stress, and perform the entire clean process + +@@ -641,6 +646,7 @@ def test_stress_clean(topology_m4, m4rid): + ldbm_config.set('nsslapd-readonly', 'off') + + ++@pytest.mark.flaky(max_runs=2, min_passes=1) + def test_multiple_tasks_with_force(topology_m4, m4rid): + """Check that multiple tasks with a 'force' option work properly + +diff --git a/dirsrvtests/tests/suites/replication/encryption_cl5_test.py b/dirsrvtests/tests/suites/replication/encryption_cl5_test.py +index 7ae7e1b13..b69863f53 100644 +--- a/dirsrvtests/tests/suites/replication/encryption_cl5_test.py ++++ b/dirsrvtests/tests/suites/replication/encryption_cl5_test.py +@@ -73,10 +73,10 @@ def _check_unhashed_userpw_encrypted(inst, change_type, user_dn, user_pw, is_enc + assert user_pw_attr in entry, 'Changelog entry does not contain clear text password' + assert count, 'Operation type and DN of the entry not matched in changelog' + +- +-@pytest.mark.parametrize("encryption", ["AES", "3DES"]) +-def test_algorithm_unhashed(topology_with_tls, encryption): +- """Check encryption algowithm AES and 3DES. ++#unstable or unstatus tests, skipped for now ++@pytest.mark.flaky(max_runs=2, min_passes=1) ++def test_algorithm_unhashed(topology_with_tls): ++ """Check encryption algorithm AES + And check unhashed#user#password attribute for encryption. + + :id: b7a37bf8-4b2e-4dbd-9891-70117d67558c +diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py +deleted file mode 100644 +index 112c73cb9..000000000 +--- a/dirsrvtests/tests/suites/retrocl/basic_test.py ++++ /dev/null +@@ -1,292 +0,0 @@ +-# --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2021 Red Hat, Inc. +-# All rights reserved. +-# +-# License: GPL (version 3 or any later version). +-# See LICENSE for details. +-# --- END COPYRIGHT BLOCK --- +- +-import logging +-import ldap +-import time +-import pytest +-from lib389.topologies import topology_st +-from lib389.plugins import RetroChangelogPlugin +-from lib389._constants import * +-from lib389.utils import * +-from lib389.tasks import * +-from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance +-from lib389.cli_base.dsrc import dsrc_arg_concat +-from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add +-from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts +- +-pytestmark = pytest.mark.tier1 +- +-USER1_DN = 'uid=user1,ou=people,'+ DEFAULT_SUFFIX +-USER2_DN = 'uid=user2,ou=people,'+ DEFAULT_SUFFIX +-USER_PW = 'password' +-ATTR_HOMEPHONE = 'homePhone' +-ATTR_CARLICENSE = 'carLicense' +- +-log = logging.getLogger(__name__) +- +-def test_retrocl_exclude_attr_add(topology_st): +- """ Test exclude attribute feature of the retrocl plugin for add operation +- +- :id: 3481650f-2070-45ef-9600-2500cfc51559 +- +- :setup: Standalone instance +- +- :steps: +- 1. Enable dynamic plugins +- 2. Confige retro changelog plugin +- 3. Add an entry +- 4. Ensure entry attrs are in the changelog +- 5. Exclude an attr +- 6. Add another entry +- 7. Ensure excluded attr is not in the changelog +- +- :expectedresults: +- 1. Success +- 2. Success +- 3. Success +- 4. Success +- 5. Success +- 6. Success +- 7. Success +- """ +- +- st = topology_st.standalone +- +- log.info('Enable dynamic plugins') +- try: +- st.config.set('nsslapd-dynamic-plugins', 'on') +- except ldap.LDAPError as e: +- ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc']) +- assert False +- +- log.info('Configure retrocl plugin') +- rcl = RetroChangelogPlugin(st) +- rcl.disable() +- rcl.enable() +- rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') +- +- log.info('Restarting instance') +- try: +- st.restart() +- except ldap.LDAPError as e: +- ldap.error('Failed to restart instance ' + e.args[0]['desc']) +- assert False +- +- users = UserAccounts(st, DEFAULT_SUFFIX) +- +- log.info('Adding user1') +- try: +- user1 = users.create(properties={ +- 'sn': '1', +- 'cn': 'user 1', +- 'uid': 'user1', +- 'uidNumber': '11', +- 'gidNumber': '111', +- 'givenname': 'user1', +- 'homePhone': '0861234567', +- 'carLicense': '131D16674', +- 'mail': 'user1@whereever.com', +- 'homeDirectory': '/home/user1', +- 'userpassword': USER_PW}) +- except ldap.ALREADY_EXISTS: +- pass +- except ldap.LDAPError as e: +- log.error("Failed to add user1") +- +- log.info('Verify homePhone and carLicense attrs are in the changelog changestring') +- try: +- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN) +- except ldap.LDAPError as e: +- log.fatal("Changelog search failed, error: " +str(e)) +- assert False +- assert len(cllist) > 0 +- if cllist[0].hasAttr('changes'): +- clstr = (cllist[0].getValue('changes')).decode() +- assert ATTR_HOMEPHONE in clstr +- assert ATTR_CARLICENSE in clstr +- +- log.info('Excluding attribute ' + ATTR_HOMEPHONE) +- args = FakeArgs() +- args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM] +- args.instance = 'standalone1' +- args.basedn = None +- args.binddn = None +- args.starttls = False +- args.pwdfile = None +- args.bindpw = None +- args.prompt = False +- args.exclude_attrs = ATTR_HOMEPHONE +- args.func = retrochangelog_add +- dsrc_inst = dsrc_arg_concat(args, None) +- inst = connect_instance(dsrc_inst, False, args) +- result = args.func(inst, None, log, args) +- disconnect_instance(inst) +- assert result is None +- +- log.info("5s delay for retrocl plugin to restart") +- time.sleep(5) +- +- log.info('Adding user2') +- try: +- user2 = users.create(properties={ +- 'sn': '2', +- 'cn': 'user 2', +- 'uid': 'user2', +- 'uidNumber': '22', +- 'gidNumber': '222', +- 'givenname': 'user2', +- 'homePhone': '0879088363', +- 'carLicense': '04WX11038', +- 'mail': 'user2@whereever.com', +- 'homeDirectory': '/home/user2', +- 'userpassword': USER_PW}) +- except ldap.ALREADY_EXISTS: +- pass +- except ldap.LDAPError as e: +- log.error("Failed to add user2") +- +- log.info('Verify homePhone attr is not in the changelog changestring') +- try: +- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER2_DN) +- assert len(cllist) > 0 +- if cllist[0].hasAttr('changes'): +- clstr = (cllist[0].getValue('changes')).decode() +- assert ATTR_HOMEPHONE not in clstr +- assert ATTR_CARLICENSE in clstr +- except ldap.LDAPError as e: +- log.fatal("Changelog search failed, error: " +str(e)) +- assert False +- +-def test_retrocl_exclude_attr_mod(topology_st): +- """ Test exclude attribute feature of the retrocl plugin for mod operation +- +- :id: f6bef689-685b-4f86-a98d-f7e6b1fcada3 +- +- :setup: Standalone instance +- +- :steps: +- 1. Enable dynamic plugins +- 2. Confige retro changelog plugin +- 3. Add user1 entry +- 4. Ensure entry attrs are in the changelog +- 5. Exclude an attr +- 6. Modify user1 entry +- 7. Ensure excluded attr is not in the changelog +- +- :expectedresults: +- 1. Success +- 2. Success +- 3. Success +- 4. Success +- 5. Success +- 6. Success +- 7. Success +- """ +- +- st = topology_st.standalone +- +- log.info('Enable dynamic plugins') +- try: +- st.config.set('nsslapd-dynamic-plugins', 'on') +- except ldap.LDAPError as e: +- ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc']) +- assert False +- +- log.info('Configure retrocl plugin') +- rcl = RetroChangelogPlugin(st) +- rcl.disable() +- rcl.enable() +- rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') +- +- log.info('Restarting instance') +- try: +- st.restart() +- except ldap.LDAPError as e: +- ldap.error('Failed to restart instance ' + e.args[0]['desc']) +- assert False +- +- users = UserAccounts(st, DEFAULT_SUFFIX) +- +- log.info('Adding user1') +- try: +- user1 = users.create(properties={ +- 'sn': '1', +- 'cn': 'user 1', +- 'uid': 'user1', +- 'uidNumber': '11', +- 'gidNumber': '111', +- 'givenname': 'user1', +- 'homePhone': '0861234567', +- 'carLicense': '131D16674', +- 'mail': 'user1@whereever.com', +- 'homeDirectory': '/home/user1', +- 'userpassword': USER_PW}) +- except ldap.ALREADY_EXISTS: +- pass +- except ldap.LDAPError as e: +- log.error("Failed to add user1") +- +- log.info('Verify homePhone and carLicense attrs are in the changelog changestring') +- try: +- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN) +- except ldap.LDAPError as e: +- log.fatal("Changelog search failed, error: " +str(e)) +- assert False +- assert len(cllist) > 0 +- if cllist[0].hasAttr('changes'): +- clstr = (cllist[0].getValue('changes')).decode() +- assert ATTR_HOMEPHONE in clstr +- assert ATTR_CARLICENSE in clstr +- +- log.info('Excluding attribute ' + ATTR_CARLICENSE) +- args = FakeArgs() +- args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM] +- args.instance = 'standalone1' +- args.basedn = None +- args.binddn = None +- args.starttls = False +- args.pwdfile = None +- args.bindpw = None +- args.prompt = False +- args.exclude_attrs = ATTR_CARLICENSE +- args.func = retrochangelog_add +- dsrc_inst = dsrc_arg_concat(args, None) +- inst = connect_instance(dsrc_inst, False, args) +- result = args.func(inst, None, log, args) +- disconnect_instance(inst) +- assert result is None +- +- log.info("5s delay for retrocl plugin to restart") +- time.sleep(5) +- +- log.info('Modify user1 carLicense attribute') +- try: +- st.modify_s(USER1_DN, [(ldap.MOD_REPLACE, ATTR_CARLICENSE, b"123WX321")]) +- except ldap.LDAPError as e: +- log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc']) +- assert False +- +- log.info('Verify carLicense attr is not in the changelog changestring') +- try: +- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN) +- assert len(cllist) > 0 +- # There will be 2 entries in the changelog for this user, we are only +- #interested in the second one, the modify operation. +- if cllist[1].hasAttr('changes'): +- clstr = (cllist[1].getValue('changes')).decode() +- assert ATTR_CARLICENSE not in clstr +- except ldap.LDAPError as e: +- log.fatal("Changelog search failed, error: " +str(e)) +- assert False +- +-if __name__ == '__main__': +- # Run isolated +- # -s for DEBUG mode +- CURRENT_FILE = os.path.realpath(__file__) +- pytest.main("-s %s" % CURRENT_FILE) +-- +2.26.3 + diff --git a/SOURCES/0002-Issue-4701-RFE-Exclude-attributes-from-retro-changel.patch b/SOURCES/0002-Issue-4701-RFE-Exclude-attributes-from-retro-changel.patch new file mode 100644 index 0000000..1b86463 --- /dev/null +++ b/SOURCES/0002-Issue-4701-RFE-Exclude-attributes-from-retro-changel.patch @@ -0,0 +1,322 @@ +From 1e1c2b23c35282481628af7e971ac683da334502 Mon Sep 17 00:00:00 2001 +From: James Chapman +Date: Tue, 27 Apr 2021 17:00:15 +0100 +Subject: [PATCH 02/12] Issue 4701 - RFE - Exclude attributes from retro + changelog (#4723) + +Description: When the retro changelog plugin is enabled it writes the + added/modified values to the "cn-changelog" suffix. In + some cases an entries attribute values can be of a + sensitive nature and should be excluded. This RFE adds + functionality that will allow an admin exclude certain + attributes from the retro changelog DB. + +Relates: https://github.com/389ds/389-ds-base/issues/4701 + +Reviewed by: mreynolds389, droideck (Thanks folks) +--- + .../tests/suites/retrocl/basic_test.py | 292 ++++++++++++++++++ + 1 file changed, 292 insertions(+) + create mode 100644 dirsrvtests/tests/suites/retrocl/basic_test.py + +diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py +new file mode 100644 +index 000000000..112c73cb9 +--- /dev/null ++++ b/dirsrvtests/tests/suites/retrocl/basic_test.py +@@ -0,0 +1,292 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2021 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++ ++import logging ++import ldap ++import time ++import pytest ++from lib389.topologies import topology_st ++from lib389.plugins import RetroChangelogPlugin ++from lib389._constants import * ++from lib389.utils import * ++from lib389.tasks import * ++from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance ++from lib389.cli_base.dsrc import dsrc_arg_concat ++from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add ++from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts ++ ++pytestmark = pytest.mark.tier1 ++ ++USER1_DN = 'uid=user1,ou=people,'+ DEFAULT_SUFFIX ++USER2_DN = 'uid=user2,ou=people,'+ DEFAULT_SUFFIX ++USER_PW = 'password' ++ATTR_HOMEPHONE = 'homePhone' ++ATTR_CARLICENSE = 'carLicense' ++ ++log = logging.getLogger(__name__) ++ ++def test_retrocl_exclude_attr_add(topology_st): ++ """ Test exclude attribute feature of the retrocl plugin for add operation ++ ++ :id: 3481650f-2070-45ef-9600-2500cfc51559 ++ ++ :setup: Standalone instance ++ ++ :steps: ++ 1. Enable dynamic plugins ++ 2. Confige retro changelog plugin ++ 3. Add an entry ++ 4. Ensure entry attrs are in the changelog ++ 5. Exclude an attr ++ 6. Add another entry ++ 7. Ensure excluded attr is not in the changelog ++ ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ 7. Success ++ """ ++ ++ st = topology_st.standalone ++ ++ log.info('Enable dynamic plugins') ++ try: ++ st.config.set('nsslapd-dynamic-plugins', 'on') ++ except ldap.LDAPError as e: ++ ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc']) ++ assert False ++ ++ log.info('Configure retrocl plugin') ++ rcl = RetroChangelogPlugin(st) ++ rcl.disable() ++ rcl.enable() ++ rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') ++ ++ log.info('Restarting instance') ++ try: ++ st.restart() ++ except ldap.LDAPError as e: ++ ldap.error('Failed to restart instance ' + e.args[0]['desc']) ++ assert False ++ ++ users = UserAccounts(st, DEFAULT_SUFFIX) ++ ++ log.info('Adding user1') ++ try: ++ user1 = users.create(properties={ ++ 'sn': '1', ++ 'cn': 'user 1', ++ 'uid': 'user1', ++ 'uidNumber': '11', ++ 'gidNumber': '111', ++ 'givenname': 'user1', ++ 'homePhone': '0861234567', ++ 'carLicense': '131D16674', ++ 'mail': 'user1@whereever.com', ++ 'homeDirectory': '/home/user1', ++ 'userpassword': USER_PW}) ++ except ldap.ALREADY_EXISTS: ++ pass ++ except ldap.LDAPError as e: ++ log.error("Failed to add user1") ++ ++ log.info('Verify homePhone and carLicense attrs are in the changelog changestring') ++ try: ++ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN) ++ except ldap.LDAPError as e: ++ log.fatal("Changelog search failed, error: " +str(e)) ++ assert False ++ assert len(cllist) > 0 ++ if cllist[0].hasAttr('changes'): ++ clstr = (cllist[0].getValue('changes')).decode() ++ assert ATTR_HOMEPHONE in clstr ++ assert ATTR_CARLICENSE in clstr ++ ++ log.info('Excluding attribute ' + ATTR_HOMEPHONE) ++ args = FakeArgs() ++ args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM] ++ args.instance = 'standalone1' ++ args.basedn = None ++ args.binddn = None ++ args.starttls = False ++ args.pwdfile = None ++ args.bindpw = None ++ args.prompt = False ++ args.exclude_attrs = ATTR_HOMEPHONE ++ args.func = retrochangelog_add ++ dsrc_inst = dsrc_arg_concat(args, None) ++ inst = connect_instance(dsrc_inst, False, args) ++ result = args.func(inst, None, log, args) ++ disconnect_instance(inst) ++ assert result is None ++ ++ log.info("5s delay for retrocl plugin to restart") ++ time.sleep(5) ++ ++ log.info('Adding user2') ++ try: ++ user2 = users.create(properties={ ++ 'sn': '2', ++ 'cn': 'user 2', ++ 'uid': 'user2', ++ 'uidNumber': '22', ++ 'gidNumber': '222', ++ 'givenname': 'user2', ++ 'homePhone': '0879088363', ++ 'carLicense': '04WX11038', ++ 'mail': 'user2@whereever.com', ++ 'homeDirectory': '/home/user2', ++ 'userpassword': USER_PW}) ++ except ldap.ALREADY_EXISTS: ++ pass ++ except ldap.LDAPError as e: ++ log.error("Failed to add user2") ++ ++ log.info('Verify homePhone attr is not in the changelog changestring') ++ try: ++ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER2_DN) ++ assert len(cllist) > 0 ++ if cllist[0].hasAttr('changes'): ++ clstr = (cllist[0].getValue('changes')).decode() ++ assert ATTR_HOMEPHONE not in clstr ++ assert ATTR_CARLICENSE in clstr ++ except ldap.LDAPError as e: ++ log.fatal("Changelog search failed, error: " +str(e)) ++ assert False ++ ++def test_retrocl_exclude_attr_mod(topology_st): ++ """ Test exclude attribute feature of the retrocl plugin for mod operation ++ ++ :id: f6bef689-685b-4f86-a98d-f7e6b1fcada3 ++ ++ :setup: Standalone instance ++ ++ :steps: ++ 1. Enable dynamic plugins ++ 2. Confige retro changelog plugin ++ 3. Add user1 entry ++ 4. Ensure entry attrs are in the changelog ++ 5. Exclude an attr ++ 6. Modify user1 entry ++ 7. Ensure excluded attr is not in the changelog ++ ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ 7. Success ++ """ ++ ++ st = topology_st.standalone ++ ++ log.info('Enable dynamic plugins') ++ try: ++ st.config.set('nsslapd-dynamic-plugins', 'on') ++ except ldap.LDAPError as e: ++ ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc']) ++ assert False ++ ++ log.info('Configure retrocl plugin') ++ rcl = RetroChangelogPlugin(st) ++ rcl.disable() ++ rcl.enable() ++ rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') ++ ++ log.info('Restarting instance') ++ try: ++ st.restart() ++ except ldap.LDAPError as e: ++ ldap.error('Failed to restart instance ' + e.args[0]['desc']) ++ assert False ++ ++ users = UserAccounts(st, DEFAULT_SUFFIX) ++ ++ log.info('Adding user1') ++ try: ++ user1 = users.create(properties={ ++ 'sn': '1', ++ 'cn': 'user 1', ++ 'uid': 'user1', ++ 'uidNumber': '11', ++ 'gidNumber': '111', ++ 'givenname': 'user1', ++ 'homePhone': '0861234567', ++ 'carLicense': '131D16674', ++ 'mail': 'user1@whereever.com', ++ 'homeDirectory': '/home/user1', ++ 'userpassword': USER_PW}) ++ except ldap.ALREADY_EXISTS: ++ pass ++ except ldap.LDAPError as e: ++ log.error("Failed to add user1") ++ ++ log.info('Verify homePhone and carLicense attrs are in the changelog changestring') ++ try: ++ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN) ++ except ldap.LDAPError as e: ++ log.fatal("Changelog search failed, error: " +str(e)) ++ assert False ++ assert len(cllist) > 0 ++ if cllist[0].hasAttr('changes'): ++ clstr = (cllist[0].getValue('changes')).decode() ++ assert ATTR_HOMEPHONE in clstr ++ assert ATTR_CARLICENSE in clstr ++ ++ log.info('Excluding attribute ' + ATTR_CARLICENSE) ++ args = FakeArgs() ++ args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM] ++ args.instance = 'standalone1' ++ args.basedn = None ++ args.binddn = None ++ args.starttls = False ++ args.pwdfile = None ++ args.bindpw = None ++ args.prompt = False ++ args.exclude_attrs = ATTR_CARLICENSE ++ args.func = retrochangelog_add ++ dsrc_inst = dsrc_arg_concat(args, None) ++ inst = connect_instance(dsrc_inst, False, args) ++ result = args.func(inst, None, log, args) ++ disconnect_instance(inst) ++ assert result is None ++ ++ log.info("5s delay for retrocl plugin to restart") ++ time.sleep(5) ++ ++ log.info('Modify user1 carLicense attribute') ++ try: ++ st.modify_s(USER1_DN, [(ldap.MOD_REPLACE, ATTR_CARLICENSE, b"123WX321")]) ++ except ldap.LDAPError as e: ++ log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc']) ++ assert False ++ ++ log.info('Verify carLicense attr is not in the changelog changestring') ++ try: ++ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN) ++ assert len(cllist) > 0 ++ # There will be 2 entries in the changelog for this user, we are only ++ #interested in the second one, the modify operation. ++ if cllist[1].hasAttr('changes'): ++ clstr = (cllist[1].getValue('changes')).decode() ++ assert ATTR_CARLICENSE not in clstr ++ except ldap.LDAPError as e: ++ log.fatal("Changelog search failed, error: " +str(e)) ++ assert False ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) +-- +2.26.3 + diff --git a/SOURCES/0002-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch b/SOURCES/0002-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch deleted file mode 100644 index e82fdf8..0000000 --- a/SOURCES/0002-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch +++ /dev/null @@ -1,232 +0,0 @@ -From 29c9e1c3c760f0941b022d45d14c248e9ceb9738 Mon Sep 17 00:00:00 2001 -From: progier389 <72748589+progier389@users.noreply.github.com> -Date: Tue, 3 Nov 2020 12:18:50 +0100 -Subject: [PATCH 2/3] ticket 2058: Add keep alive entry after on-line - initialization - second version (#4399) - -Bug description: -Keep alive entry is not created on target master after on line initialization, -and its RUVelement stays empty until a direct update is issued on that master - -Fix description: -The patch allows a consumer (configured as a master) to create (if it did not -exist before) the consumer's keep alive entry. It creates it at the end of a -replication session at a time we are sure the changelog exists and will not -be reset. It allows a consumer to have RUVelement with csn in the RUV at the -first incoming replication session. - -That is basically lkrispen's proposal with an associated pytest testcase - -Second version changes: - - moved the testcase to suites/replication/regression_test.py - - set up the topology from a 2 master topology then - reinitialized the replicas from an ldif without replication metadata - rather than using the cli. - - search for keepalive entries using search_s instead of getEntry - - add a comment about keep alive entries purpose - -last commit: - - wait that ruv are in sync before checking keep alive entries - -Reviewed by: droideck, Firstyear - -Platforms tested: F32 - -relates: #2058 ---- - .../suites/replication/regression_test.py | 130 ++++++++++++++++++ - .../plugins/replication/repl5_replica.c | 14 ++ - ldap/servers/plugins/replication/repl_extop.c | 4 + - 3 files changed, 148 insertions(+) - -diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py -index 844d762b9..14b9d6a44 100644 ---- a/dirsrvtests/tests/suites/replication/regression_test.py -+++ b/dirsrvtests/tests/suites/replication/regression_test.py -@@ -98,6 +98,30 @@ def _move_ruv(ldif_file): - for dn, entry in ldif_list: - ldif_writer.unparse(dn, entry) - -+def _remove_replication_data(ldif_file): -+ """ Remove the replication data from ldif file: -+ db2lif without -r includes some of the replica data like -+ - nsUniqueId -+ - keepalive entries -+ This function filters the ldif fil to remove these data -+ """ -+ -+ with open(ldif_file) as f: -+ parser = ldif.LDIFRecordList(f) -+ parser.parse() -+ -+ ldif_list = parser.all_records -+ # Iterate on a copy of the ldif entry list -+ for dn, entry in ldif_list[:]: -+ if dn.startswith('cn=repl keep alive'): -+ ldif_list.remove((dn,entry)) -+ else: -+ entry.pop('nsUniqueId') -+ with open(ldif_file, 'w') as f: -+ ldif_writer = ldif.LDIFWriter(f) -+ for dn, entry in ldif_list: -+ ldif_writer.unparse(dn, entry) -+ - - @pytest.fixture(scope="module") - def topo_with_sigkill(request): -@@ -897,6 +921,112 @@ def test_moving_entry_make_online_init_fail(topology_m2): - assert len(m1entries) == len(m2entries) - - -+def get_keepalive_entries(instance,replica): -+ # Returns the keep alive entries that exists with the suffix of the server instance -+ try: -+ entries = instance.search_s(replica.get_suffix(), ldap.SCOPE_ONELEVEL, -+ "(&(objectclass=ldapsubentry)(cn=repl keep alive*))", -+ ['cn', 'nsUniqueId', 'modifierTimestamp']) -+ except ldap.LDAPError as e: -+ log.fatal('Failed to retrieve keepalive entry (%s) on instance %s: error %s' % (dn, instance, str(e))) -+ assert False -+ # No error, so lets log the keepalive entries -+ if log.isEnabledFor(logging.DEBUG): -+ for ret in entries: -+ log.debug("Found keepalive entry:\n"+str(ret)); -+ return entries -+ -+def verify_keepalive_entries(topo, expected): -+ #Check that keep alive entries exists (or not exists) for every masters on every masters -+ #Note: The testing method is quite basic: counting that there is one keepalive entry per master. -+ # that is ok for simple test cases like test_online_init_should_create_keepalive_entries but -+ # not for the general case as keep alive associated with no more existing master may exists -+ # (for example after: db2ldif / demote a master / ldif2db / init other masters) -+ # ==> if the function is somehow pushed in lib389, a check better than simply counting the entries -+ # should be done. -+ for masterId in topo.ms: -+ master=topo.ms[masterId] -+ for replica in Replicas(master).list(): -+ if (replica.get_role() != ReplicaRole.MASTER): -+ continue -+ replica_info = f'master: {masterId} RID: {replica.get_rid()} suffix: {replica.get_suffix()}' -+ log.debug(f'Checking keepAliveEntries on {replica_info}') -+ keepaliveEntries = get_keepalive_entries(master, replica); -+ expectedCount = len(topo.ms) if expected else 0 -+ foundCount = len(keepaliveEntries) -+ if (foundCount == expectedCount): -+ log.debug(f'Found {foundCount} keepalive entries as expected on {replica_info}.') -+ else: -+ log.error(f'{foundCount} Keepalive entries are found ' -+ f'while {expectedCount} were expected on {replica_info}.') -+ assert False -+ -+ -+def test_online_init_should_create_keepalive_entries(topo_m2): -+ """Check that keep alive entries are created when initializinf a master from another one -+ -+ :id: d5940e71-d18a-4b71-aaf7-b9185361fffe -+ :setup: Two masters replication setup -+ :steps: -+ 1. Generate ldif without replication data -+ 2 Init both masters from that ldif -+ 3 Check that keep alive entries does not exists -+ 4 Perform on line init of master2 from master1 -+ 5 Check that keep alive entries exists -+ :expectedresults: -+ 1. No error while generating ldif -+ 2. No error while importing the ldif file -+ 3. No keepalive entrie should exists on any masters -+ 4. No error while initializing master2 -+ 5. All keepalive entries should exist on every masters -+ -+ """ -+ -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ m1 = topo_m2.ms["master1"] -+ m2 = topo_m2.ms["master2"] -+ # Step 1: Generate ldif without replication data -+ m1.stop() -+ m2.stop() -+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() -+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], -+ excludeSuffixes=None, repl_data=False, -+ outputfile=ldif_file, encrypt=False) -+ # Remove replication metadata that are still in the ldif -+ _remove_replication_data(ldif_file) -+ -+ # Step 2: Init both masters from that ldif -+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) -+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) -+ m1.start() -+ m2.start() -+ -+ """ Replica state is now as if CLI setup has been done using: -+ dsconf master1 replication enable --suffix "${SUFFIX}" --role master -+ dsconf master2 replication enable --suffix "${SUFFIX}" --role master -+ dsconf master1 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}" -+ dsconf master2 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}" -+ dsconf master1 repl-agmt create --suffix "${SUFFIX}" -+ dsconf master2 repl-agmt create --suffix "${SUFFIX}" -+ """ -+ -+ # Step 3: No keepalive entrie should exists on any masters -+ verify_keepalive_entries(topo_m2, False) -+ -+ # Step 4: Perform on line init of master2 from master1 -+ agmt = Agreements(m1).list()[0] -+ agmt.begin_reinit() -+ (done, error) = agmt.wait_reinit() -+ assert done is True -+ assert error is False -+ -+ # Step 5: All keepalive entries should exists on every masters -+ # Verify the keep alive entry once replication is in sync -+ # (that is the step that fails when bug is not fixed) -+ repl.wait_for_ruv(m2,m1) -+ verify_keepalive_entries(topo_m2, True); -+ -+ - if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode -diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c -index f01782330..f0ea0f8ef 100644 ---- a/ldap/servers/plugins/replication/repl5_replica.c -+++ b/ldap/servers/plugins/replication/repl5_replica.c -@@ -373,6 +373,20 @@ replica_destroy(void **arg) - slapi_ch_free((void **)arg); - } - -+/****************************************************************************** -+ ******************** REPLICATION KEEP ALIVE ENTRIES ************************** -+ ****************************************************************************** -+ * They are subentries of the replicated suffix and there is one per master. * -+ * These entries exist only to trigger a change that get replicated over the * -+ * topology. * -+ * Their main purpose is to generate records in the changelog and they are * -+ * updated from time to time by fractional replication to insure that at * -+ * least a change must be replicated by FR after a great number of not * -+ * replicated changes are found in the changelog. The interest is that the * -+ * fractional RUV get then updated so less changes need to be walked in the * -+ * changelog when searching for the first change to send * -+ ******************************************************************************/ -+ - #define KEEP_ALIVE_ATTR "keepalivetimestamp" - #define KEEP_ALIVE_ENTRY "repl keep alive" - #define KEEP_ALIVE_DN_FORMAT "cn=%s %d,%s" -diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c -index 14c8e0bcc..af486f730 100644 ---- a/ldap/servers/plugins/replication/repl_extop.c -+++ b/ldap/servers/plugins/replication/repl_extop.c -@@ -1173,6 +1173,10 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb) - */ - if (cl5GetState() == CL5_STATE_OPEN) { - replica_log_ruv_elements(r); -+ /* now that the changelog is open and started, we can alos cretae the -+ * keep alive entry without risk that db and cl will not match -+ */ -+ replica_subentry_check(replica_get_root(r), replica_get_rid(r)); - } - - /* ONREPL code that dealt with new RUV, etc was moved into the code --- -2.26.2 - diff --git a/SOURCES/0003-Ticket-137-Implement-EntryUUID-plugin.patch b/SOURCES/0003-Ticket-137-Implement-EntryUUID-plugin.patch new file mode 100644 index 0000000..67ccf0c --- /dev/null +++ b/SOURCES/0003-Ticket-137-Implement-EntryUUID-plugin.patch @@ -0,0 +1,5307 @@ +From eff14f0c884f3d2f541e3be6d9df86087177a76d Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Mon, 16 Mar 2020 14:59:56 +1000 +Subject: [PATCH 03/12] Ticket 137 - Implement EntryUUID plugin + +Bug Description: This implements EntryUUID - A plugin that generates +uuid's on attributes, which can be used by external applications to +associate an entry uniquely. + +Fix Description: This change is quite large as it contains multiple parts: + +* Schema for entryuuid. + ldap/schema/02common.ldif + ldap/schema/03entryuuid.ldif +* Documentation of the plugin design + src/README.md +* A rust plugin api. + src/slapi_r_plugin/Cargo.toml + src/slapi_r_plugin/README.md + src/slapi_r_plugin/build.rs + src/slapi_r_plugin/src/backend.rs + src/slapi_r_plugin/src/ber.rs + src/slapi_r_plugin/src/constants.rs + src/slapi_r_plugin/src/dn.rs + src/slapi_r_plugin/src/entry.rs + src/slapi_r_plugin/src/error.rs + src/slapi_r_plugin/src/init.c + src/slapi_r_plugin/src/lib.rs + src/slapi_r_plugin/src/log.rs + src/slapi_r_plugin/src/macros.rs + src/slapi_r_plugin/src/pblock.rs + src/slapi_r_plugin/src/plugin.rs + src/slapi_r_plugin/src/search.rs + src/slapi_r_plugin/src/syntax_plugin.rs + src/slapi_r_plugin/src/task.rs + src/slapi_r_plugin/src/value.rs +* An entry uuid syntax plugin, that has functional indexing + src/plugins/entryuuid_syntax/Cargo.toml + src/plugins/entryuuid_syntax/src/lib.rs +* A entry uuid plugin that generates entryuuid's and has a fixup task. + src/plugins/entryuuid/Cargo.toml + src/plugins/entryuuid/src/lib.rs +* Supporting changes in the server core to enable and provide apis for the plugins. + ldap/servers/slapd/config.c + ldap/servers/slapd/entry.c + ldap/servers/slapd/fedse.c +* A test suite for for the entryuuid plugin + dirsrvtests/tests/data/entryuuid/localhost-userRoot-2020_03_30_13_14_47.ldif + dirsrvtests/tests/suites/entryuuid/basic_test.py +* Supporting changes in lib389 + src/lib389/lib389/_constants.py + src/lib389/lib389/backend.py + src/lib389/lib389/instance/setup.py + src/lib389/lib389/plugins.py + src/lib389/lib389/tasks.py +* Changes to support building the plugins + Makefile.am + configure.ac +* Execution of cargo fmt on the tree, causing some clean up of files. + src/Cargo.lock + src/Cargo.toml + src/librnsslapd/build.rs + src/librnsslapd/src/lib.rs + src/librslapd/Cargo.toml + src/librslapd/build.rs + src/librslapd/src/lib.rs + src/libsds/sds/lib.rs + src/libsds/sds/tqueue.rs + src/slapd/src/error.rs + src/slapd/src/fernet.rs + src/slapd/src/lib.rs + +https://pagure.io/389-ds-base/issue/137 + +Author: William Brown + +Review by: mreynolds, lkrispenz (Thanks) +--- + Makefile.am | 96 +- + ...ocalhost-userRoot-2020_03_30_13_14_47.ldif | 233 +++++ + .../tests/suites/entryuuid/basic_test.py | 226 +++++ + ldap/schema/02common.ldif | 1 + + ldap/schema/03entryuuid.ldif | 16 + + ldap/servers/slapd/config.c | 17 + + ldap/servers/slapd/entry.c | 12 + + ldap/servers/slapd/fedse.c | 28 + + src/Cargo.lock | 241 +++-- + src/Cargo.toml | 11 +- + src/README.md | 0 + src/lib389/lib389/_constants.py | 1 + + src/lib389/lib389/backend.py | 2 +- + src/lib389/lib389/instance/setup.py | 14 + + src/lib389/lib389/plugins.py | 30 + + src/lib389/lib389/tasks.py | 14 + + src/librnsslapd/build.rs | 19 +- + src/librnsslapd/src/lib.rs | 16 +- + src/librslapd/Cargo.toml | 4 - + src/librslapd/build.rs | 19 +- + src/librslapd/src/lib.rs | 11 +- + src/libsds/sds/lib.rs | 2 - + src/libsds/sds/tqueue.rs | 23 +- + src/plugins/entryuuid/Cargo.toml | 21 + + src/plugins/entryuuid/src/lib.rs | 196 ++++ + src/plugins/entryuuid_syntax/Cargo.toml | 21 + + src/plugins/entryuuid_syntax/src/lib.rs | 145 +++ + src/slapd/src/error.rs | 2 - + src/slapd/src/fernet.rs | 31 +- + src/slapd/src/lib.rs | 3 - + src/slapi_r_plugin/Cargo.toml | 19 + + src/slapi_r_plugin/README.md | 216 +++++ + src/slapi_r_plugin/build.rs | 8 + + src/slapi_r_plugin/src/backend.rs | 71 ++ + src/slapi_r_plugin/src/ber.rs | 90 ++ + src/slapi_r_plugin/src/constants.rs | 203 +++++ + src/slapi_r_plugin/src/dn.rs | 108 +++ + src/slapi_r_plugin/src/entry.rs | 92 ++ + src/slapi_r_plugin/src/error.rs | 61 ++ + src/slapi_r_plugin/src/init.c | 8 + + src/slapi_r_plugin/src/lib.rs | 36 + + src/slapi_r_plugin/src/log.rs | 87 ++ + src/slapi_r_plugin/src/macros.rs | 835 ++++++++++++++++++ + src/slapi_r_plugin/src/pblock.rs | 275 ++++++ + src/slapi_r_plugin/src/plugin.rs | 117 +++ + src/slapi_r_plugin/src/search.rs | 127 +++ + src/slapi_r_plugin/src/syntax_plugin.rs | 169 ++++ + src/slapi_r_plugin/src/task.rs | 148 ++++ + src/slapi_r_plugin/src/value.rs | 235 +++++ + 49 files changed, 4213 insertions(+), 147 deletions(-) + create mode 100644 dirsrvtests/tests/data/entryuuid/localhost-userRoot-2020_03_30_13_14_47.ldif + create mode 100644 dirsrvtests/tests/suites/entryuuid/basic_test.py + create mode 100644 ldap/schema/03entryuuid.ldif + create mode 100644 src/README.md + create mode 100644 src/plugins/entryuuid/Cargo.toml + create mode 100644 src/plugins/entryuuid/src/lib.rs + create mode 100644 src/plugins/entryuuid_syntax/Cargo.toml + create mode 100644 src/plugins/entryuuid_syntax/src/lib.rs + create mode 100644 src/slapi_r_plugin/Cargo.toml + create mode 100644 src/slapi_r_plugin/README.md + create mode 100644 src/slapi_r_plugin/build.rs + create mode 100644 src/slapi_r_plugin/src/backend.rs + create mode 100644 src/slapi_r_plugin/src/ber.rs + create mode 100644 src/slapi_r_plugin/src/constants.rs + create mode 100644 src/slapi_r_plugin/src/dn.rs + create mode 100644 src/slapi_r_plugin/src/entry.rs + create mode 100644 src/slapi_r_plugin/src/error.rs + create mode 100644 src/slapi_r_plugin/src/init.c + create mode 100644 src/slapi_r_plugin/src/lib.rs + create mode 100644 src/slapi_r_plugin/src/log.rs + create mode 100644 src/slapi_r_plugin/src/macros.rs + create mode 100644 src/slapi_r_plugin/src/pblock.rs + create mode 100644 src/slapi_r_plugin/src/plugin.rs + create mode 100644 src/slapi_r_plugin/src/search.rs + create mode 100644 src/slapi_r_plugin/src/syntax_plugin.rs + create mode 100644 src/slapi_r_plugin/src/task.rs + create mode 100644 src/slapi_r_plugin/src/value.rs + +diff --git a/Makefile.am b/Makefile.am +index 668a095da..627953850 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -38,6 +38,7 @@ if RUST_ENABLE + RUST_ON = 1 + CARGO_FLAGS = @cargo_defs@ + RUSTC_FLAGS = @asan_rust_defs@ @msan_rust_defs@ @tsan_rust_defs@ @debug_rust_defs@ ++# -L@abs_top_builddir@/rs/@rust_target_dir@ + RUST_LDFLAGS = -ldl -lpthread -lgcc_s -lc -lm -lrt -lutil + RUST_DEFINES = -DRUST_ENABLE + if RUST_ENABLE_OFFLINE +@@ -298,7 +299,7 @@ clean-local: + -rm -rf $(abs_top_builddir)/html + -rm -rf $(abs_top_builddir)/man/man3 + if RUST_ENABLE +- CARGO_TARGET_DIR=$(abs_top_builddir)/rs cargo clean --manifest-path=$(srcdir)/src/libsds/Cargo.toml ++ CARGO_TARGET_DIR=$(abs_top_builddir)/rs cargo clean --manifest-path=$(srcdir)/src/Cargo.toml + endif + + dberrstrs.h: Makefile +@@ -416,6 +417,11 @@ serverplugin_LTLIBRARIES = libacl-plugin.la \ + $(LIBPAM_PASSTHRU_PLUGIN) $(LIBDNA_PLUGIN) \ + $(LIBBITWISE_PLUGIN) $(LIBPRESENCE_PLUGIN) $(LIBPOSIX_WINSYNC_PLUGIN) + ++if RUST_ENABLE ++serverplugin_LTLIBRARIES += libentryuuid-plugin.la libentryuuid-syntax-plugin.la ++endif ++ ++ + noinst_LIBRARIES = libavl.a + + dist_noinst_HEADERS = \ +@@ -757,6 +763,10 @@ systemschema_DATA = $(srcdir)/ldap/schema/00core.ldif \ + $(srcdir)/ldap/schema/60nss-ldap.ldif \ + $(LIBACCTPOLICY_SCHEMA) + ++if RUST_ENABLE ++systemschema_DATA += $(srcdir)/ldap/schema/03entryuuid.ldif ++endif ++ + schema_DATA = $(srcdir)/ldap/schema/99user.ldif + + libexec_SCRIPTS = +@@ -1227,7 +1237,7 @@ libsds_la_LDFLAGS = $(AM_LDFLAGS) $(SDS_LDFLAGS) + + if RUST_ENABLE + +-noinst_LTLIBRARIES = librsds.la librslapd.la librnsslapd.la ++noinst_LTLIBRARIES = librsds.la librslapd.la librnsslapd.la libentryuuid.la libentryuuid_syntax.la + + ### Why does this exist? + # +@@ -1252,6 +1262,8 @@ librsds_la_EXTRA = src/libsds/Cargo.lock + @abs_top_builddir@/rs/@rust_target_dir@/librsds.a: $(librsds_la_SOURCES) + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ ++ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ ++ SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/libsds/Cargo.toml \ + $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) + +@@ -1268,6 +1280,7 @@ librslapd_la_EXTRA = src/librslapd/Cargo.lock + @abs_top_builddir@/rs/@rust_target_dir@/librslapd.a: $(librslapd_la_SOURCES) + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ ++ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librslapd/Cargo.toml \ + $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) +@@ -1288,6 +1301,7 @@ librnsslapd_la_EXTRA = src/librnsslapd/Cargo.lock + @abs_top_builddir@/rs/@rust_target_dir@/librnsslapd.a: $(librnsslapd_la_SOURCES) + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ ++ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librnsslapd/Cargo.toml \ + $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) +@@ -1295,8 +1309,64 @@ librnsslapd_la_EXTRA = src/librnsslapd/Cargo.lock + # The header needs the lib build first. + rust-nsslapd-private.h: @abs_top_builddir@/rs/@rust_target_dir@/librnsslapd.a + ++libslapi_r_plugin_SOURCES = \ ++ src/slapi_r_plugin/src/backend.rs \ ++ src/slapi_r_plugin/src/ber.rs \ ++ src/slapi_r_plugin/src/constants.rs \ ++ src/slapi_r_plugin/src/dn.rs \ ++ src/slapi_r_plugin/src/entry.rs \ ++ src/slapi_r_plugin/src/error.rs \ ++ src/slapi_r_plugin/src/log.rs \ ++ src/slapi_r_plugin/src/macros.rs \ ++ src/slapi_r_plugin/src/pblock.rs \ ++ src/slapi_r_plugin/src/plugin.rs \ ++ src/slapi_r_plugin/src/search.rs \ ++ src/slapi_r_plugin/src/syntax_plugin.rs \ ++ src/slapi_r_plugin/src/task.rs \ ++ src/slapi_r_plugin/src/value.rs \ ++ src/slapi_r_plugin/src/lib.rs ++ ++# Build rust ns-slapd components as a library. ++ENTRYUUID_LIB = @abs_top_builddir@/rs/@rust_target_dir@/libentryuuid.a ++ ++libentryuuid_la_SOURCES = \ ++ src/plugins/entryuuid/Cargo.toml \ ++ src/plugins/entryuuid/src/lib.rs \ ++ $(libslapi_r_plugin_SOURCES) ++ ++libentryuuid_la_EXTRA = src/plugin/entryuuid/Cargo.lock ++ ++@abs_top_builddir@/rs/@rust_target_dir@/libentryuuid.a: $(libentryuuid_la_SOURCES) libslapd.la libentryuuid.la ++ RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ ++ CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ ++ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ ++ SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ ++ cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/plugins/entryuuid/Cargo.toml \ ++ $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) ++ cp $(ENTRYUUID_LIB) @abs_top_builddir@/.libs/libentryuuid.a ++ ++ENTRYUUID_SYNTAX_LIB = @abs_top_builddir@/rs/@rust_target_dir@/libentryuuid_syntax.a ++ ++libentryuuid_syntax_la_SOURCES = \ ++ src/plugins/entryuuid_syntax/Cargo.toml \ ++ src/plugins/entryuuid_syntax/src/lib.rs \ ++ $(libslapi_r_plugin_SOURCES) ++ ++libentryuuid_syntax_la_EXTRA = src/plugin/entryuuid_syntax/Cargo.lock ++ ++@abs_top_builddir@/rs/@rust_target_dir@/libentryuuid_syntax.a: $(libentryuuid_syntax_la_SOURCES) libslapd.la libentryuuid_syntax.la ++ RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ ++ CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ ++ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ ++ SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ ++ cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/plugins/entryuuid_syntax/Cargo.toml \ ++ $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) ++ cp $(ENTRYUUID_SYNTAX_LIB) @abs_top_builddir@/.libs/libentryuuid_syntax.a ++ + EXTRA_DIST = $(librsds_la_SOURCES) $(librsds_la_EXTRA) \ + $(librslapd_la_SOURCES) $(librslapd_la_EXTRA) \ ++ $(libentryuuid_la_SOURCES) $(libentryuuid_la_EXTRA) \ ++ $(libentryuuid_syntax_la_SOURCES) $(libentryuuid_syntax_la_EXTRA) \ + $(librnsslapd_la_SOURCES) $(librnsslapd_la_EXTRA) + + ## Run rust tests +@@ -1306,13 +1376,17 @@ else + check-local: + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ ++ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ ++ SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo test $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/libsds/Cargo.toml + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ ++ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo test $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librslapd/Cargo.toml + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ ++ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo test $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librnsslapd/Cargo.toml + endif +@@ -1735,6 +1809,24 @@ libderef_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) + libderef_plugin_la_DEPENDENCIES = libslapd.la + libderef_plugin_la_LDFLAGS = -avoid-version + ++if RUST_ENABLE ++#------------------------ ++# libentryuuid-syntax-plugin ++#----------------------- ++libentryuuid_syntax_plugin_la_SOURCES = src/slapi_r_plugin/src/init.c ++libentryuuid_syntax_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) -lentryuuid_syntax ++libentryuuid_syntax_plugin_la_DEPENDENCIES = libslapd.la $(ENTRYUUID_SYNTAX_LIB) ++libentryuuid_syntax_plugin_la_LDFLAGS = -avoid-version ++ ++#------------------------ ++# libentryuuid-plugin ++#----------------------- ++libentryuuid_plugin_la_SOURCES = src/slapi_r_plugin/src/init.c ++libentryuuid_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) -lentryuuid ++libentryuuid_plugin_la_DEPENDENCIES = libslapd.la $(ENTRYUUID_LIB) ++libentryuuid_plugin_la_LDFLAGS = -avoid-version ++endif ++ + #------------------------ + # libpbe-plugin + #----------------------- +diff --git a/dirsrvtests/tests/data/entryuuid/localhost-userRoot-2020_03_30_13_14_47.ldif b/dirsrvtests/tests/data/entryuuid/localhost-userRoot-2020_03_30_13_14_47.ldif +new file mode 100644 +index 000000000..b64090af7 +--- /dev/null ++++ b/dirsrvtests/tests/data/entryuuid/localhost-userRoot-2020_03_30_13_14_47.ldif +@@ -0,0 +1,233 @@ ++version: 1 ++ ++# entry-id: 1 ++dn: dc=example,dc=com ++objectClass: top ++objectClass: domain ++dc: example ++description: dc=example,dc=com ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015542Z ++modifyTimestamp: 20200325015542Z ++nsUniqueId: a2b33229-6e3b11ea-8de0c78c-83e27eda ++aci: (targetattr="dc || description || objectClass")(targetfilter="(objectClas ++ s=domain)")(version 3.0; acl "Enable anyone domain read"; allow (read, search ++ , compare)(userdn="ldap:///anyone");) ++aci: (targetattr="ou || objectClass")(targetfilter="(objectClass=organizationa ++ lUnit)")(version 3.0; acl "Enable anyone ou read"; allow (read, search, compa ++ re)(userdn="ldap:///anyone");) ++ ++# entry-id: 2 ++dn: cn=389_ds_system,dc=example,dc=com ++objectClass: top ++objectClass: nscontainer ++objectClass: ldapsubentry ++cn: 389_ds_system ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015542Z ++modifyTimestamp: 20200325015542Z ++nsUniqueId: a2b3322a-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 3 ++dn: ou=groups,dc=example,dc=com ++objectClass: top ++objectClass: organizationalunit ++ou: groups ++aci: (targetattr="cn || member || gidNumber || nsUniqueId || description || ob ++ jectClass")(targetfilter="(objectClass=groupOfNames)")(version 3.0; acl "Enab ++ le anyone group read"; allow (read, search, compare)(userdn="ldap:///anyone") ++ ;) ++aci: (targetattr="member")(targetfilter="(objectClass=groupOfNames)")(version ++ 3.0; acl "Enable group_modify to alter members"; allow (write)(groupdn="ldap: ++ ///cn=group_modify,ou=permissions,dc=example,dc=com");) ++aci: (targetattr="cn || member || gidNumber || description || objectClass")(ta ++ rgetfilter="(objectClass=groupOfNames)")(version 3.0; acl "Enable group_admin ++ to manage groups"; allow (write, add, delete)(groupdn="ldap:///cn=group_admi ++ n,ou=permissions,dc=example,dc=com");) ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015543Z ++modifyTimestamp: 20200325015543Z ++nsUniqueId: a2b3322b-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 4 ++dn: ou=people,dc=example,dc=com ++objectClass: top ++objectClass: organizationalunit ++ou: people ++aci: (targetattr="objectClass || description || nsUniqueId || uid || displayNa ++ me || loginShell || uidNumber || gidNumber || gecos || homeDirectory || cn || ++ memberOf || mail || nsSshPublicKey || nsAccountLock || userCertificate")(tar ++ getfilter="(objectClass=posixaccount)")(version 3.0; acl "Enable anyone user ++ read"; allow (read, search, compare)(userdn="ldap:///anyone");) ++aci: (targetattr="displayName || legalName || userPassword || nsSshPublicKey") ++ (version 3.0; acl "Enable self partial modify"; allow (write)(userdn="ldap:// ++ /self");) ++aci: (targetattr="legalName || telephoneNumber || mobile || sn")(targetfilter= ++ "(|(objectClass=nsPerson)(objectClass=inetOrgPerson))")(version 3.0; acl "Ena ++ ble self legalname read"; allow (read, search, compare)(userdn="ldap:///self" ++ );) ++aci: (targetattr="legalName || telephoneNumber")(targetfilter="(objectClass=ns ++ Person)")(version 3.0; acl "Enable user legalname read"; allow (read, search, ++ compare)(groupdn="ldap:///cn=user_private_read,ou=permissions,dc=example,dc= ++ com");) ++aci: (targetattr="uid || description || displayName || loginShell || uidNumber ++ || gidNumber || gecos || homeDirectory || cn || memberOf || mail || legalNam ++ e || telephoneNumber || mobile")(targetfilter="(&(objectClass=nsPerson)(objec ++ tClass=nsAccount))")(version 3.0; acl "Enable user admin create"; allow (writ ++ e, add, delete, read)(groupdn="ldap:///cn=user_admin,ou=permissions,dc=exampl ++ e,dc=com");) ++aci: (targetattr="uid || description || displayName || loginShell || uidNumber ++ || gidNumber || gecos || homeDirectory || cn || memberOf || mail || legalNam ++ e || telephoneNumber || mobile")(targetfilter="(&(objectClass=nsPerson)(objec ++ tClass=nsAccount))")(version 3.0; acl "Enable user modify to change users"; a ++ llow (write, read)(groupdn="ldap:///cn=user_modify,ou=permissions,dc=example, ++ dc=com");) ++aci: (targetattr="userPassword || nsAccountLock || userCertificate || nsSshPub ++ licKey")(targetfilter="(objectClass=nsAccount)")(version 3.0; acl "Enable use ++ r password reset"; allow (write, read)(groupdn="ldap:///cn=user_passwd_reset, ++ ou=permissions,dc=example,dc=com");) ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015543Z ++modifyTimestamp: 20200325015543Z ++nsUniqueId: a2b3322c-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 5 ++dn: ou=permissions,dc=example,dc=com ++objectClass: top ++objectClass: organizationalunit ++ou: permissions ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015543Z ++modifyTimestamp: 20200325015543Z ++nsUniqueId: a2b3322d-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 6 ++dn: ou=services,dc=example,dc=com ++objectClass: top ++objectClass: organizationalunit ++ou: services ++aci: (targetattr="objectClass || description || nsUniqueId || cn || memberOf | ++ | nsAccountLock ")(targetfilter="(objectClass=netscapeServer)")(version 3.0; ++ acl "Enable anyone service account read"; allow (read, search, compare)(userd ++ n="ldap:///anyone");) ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015544Z ++modifyTimestamp: 20200325015544Z ++nsUniqueId: a2b3322e-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 7 ++dn: uid=demo_user,ou=people,dc=example,dc=com ++objectClass: top ++objectClass: nsPerson ++objectClass: nsAccount ++objectClass: nsOrgPerson ++objectClass: posixAccount ++uid: demo_user ++cn: Demo User ++displayName: Demo User ++legalName: Demo User Name ++uidNumber: 99998 ++gidNumber: 99998 ++homeDirectory: /var/empty ++loginShell: /bin/false ++nsAccountLock: true ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015544Z ++modifyTimestamp: 20200325061615Z ++nsUniqueId: a2b3322f-6e3b11ea-8de0c78c-83e27eda ++entryUUID: 973e1bbf-ba9c-45d4-b01b-ff7371fd9008 ++ ++# entry-id: 8 ++dn: cn=demo_group,ou=groups,dc=example,dc=com ++objectClass: top ++objectClass: groupOfNames ++objectClass: posixGroup ++objectClass: nsMemberOf ++cn: demo_group ++gidNumber: 99999 ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015544Z ++modifyTimestamp: 20200325015544Z ++nsUniqueId: a2b33230-6e3b11ea-8de0c78c-83e27eda ++entryUUID: f6df8fe9-6b30-46aa-aa13-f0bf755371e8 ++ ++# entry-id: 9 ++dn: cn=group_admin,ou=permissions,dc=example,dc=com ++objectClass: top ++objectClass: groupOfNames ++objectClass: nsMemberOf ++cn: group_admin ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015545Z ++modifyTimestamp: 20200325015545Z ++nsUniqueId: a2b33231-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 10 ++dn: cn=group_modify,ou=permissions,dc=example,dc=com ++objectClass: top ++objectClass: groupOfNames ++objectClass: nsMemberOf ++cn: group_modify ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015545Z ++modifyTimestamp: 20200325015545Z ++nsUniqueId: a2b33232-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 11 ++dn: cn=user_admin,ou=permissions,dc=example,dc=com ++objectClass: top ++objectClass: groupOfNames ++objectClass: nsMemberOf ++cn: user_admin ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015545Z ++modifyTimestamp: 20200325015545Z ++nsUniqueId: a2b33233-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 12 ++dn: cn=user_modify,ou=permissions,dc=example,dc=com ++objectClass: top ++objectClass: groupOfNames ++objectClass: nsMemberOf ++cn: user_modify ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015546Z ++modifyTimestamp: 20200325015546Z ++nsUniqueId: a2b33234-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 13 ++dn: cn=user_passwd_reset,ou=permissions,dc=example,dc=com ++objectClass: top ++objectClass: groupOfNames ++objectClass: nsMemberOf ++cn: user_passwd_reset ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015546Z ++modifyTimestamp: 20200325015546Z ++nsUniqueId: a2b33235-6e3b11ea-8de0c78c-83e27eda ++ ++# entry-id: 14 ++dn: cn=user_private_read,ou=permissions,dc=example,dc=com ++objectClass: top ++objectClass: groupOfNames ++objectClass: nsMemberOf ++cn: user_private_read ++creatorsName: cn=Directory Manager ++modifiersName: cn=Directory Manager ++createTimestamp: 20200325015547Z ++modifyTimestamp: 20200325015547Z ++nsUniqueId: a2b33236-6e3b11ea-8de0c78c-83e27eda ++ +diff --git a/dirsrvtests/tests/suites/entryuuid/basic_test.py b/dirsrvtests/tests/suites/entryuuid/basic_test.py +new file mode 100644 +index 000000000..beb73701d +--- /dev/null ++++ b/dirsrvtests/tests/suites/entryuuid/basic_test.py +@@ -0,0 +1,226 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2020 William Brown ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++ ++import ldap ++import pytest ++import time ++import shutil ++from lib389.idm.user import nsUserAccounts, UserAccounts ++from lib389.idm.account import Accounts ++from lib389.topologies import topology_st as topology ++from lib389.backend import Backends ++from lib389.paths import Paths ++from lib389.utils import ds_is_older ++from lib389._constants import * ++from lib389.plugins import EntryUUIDPlugin ++ ++default_paths = Paths() ++ ++pytestmark = pytest.mark.tier1 ++ ++DATADIR1 = os.path.join(os.path.dirname(__file__), '../../data/entryuuid/') ++IMPORT_UUID_A = "973e1bbf-ba9c-45d4-b01b-ff7371fd9008" ++UUID_BETWEEN = "eeeeeeee-0000-0000-0000-000000000000" ++IMPORT_UUID_B = "f6df8fe9-6b30-46aa-aa13-f0bf755371e8" ++UUID_MIN = "00000000-0000-0000-0000-000000000000" ++UUID_MAX = "ffffffff-ffff-ffff-ffff-ffffffffffff" ++ ++def _entryuuid_import_and_search(topology): ++ # 1 ++ ldif_dir = topology.standalone.get_ldif_dir() ++ target_ldif = os.path.join(ldif_dir, 'localhost-userRoot-2020_03_30_13_14_47.ldif') ++ import_ldif = os.path.join(DATADIR1, 'localhost-userRoot-2020_03_30_13_14_47.ldif') ++ shutil.copyfile(import_ldif, target_ldif) ++ ++ be = Backends(topology.standalone).get('userRoot') ++ task = be.import_ldif([target_ldif]) ++ task.wait() ++ assert(task.is_complete() and task.get_exit_code() == 0) ++ ++ accounts = Accounts(topology.standalone, DEFAULT_SUFFIX) ++ # 2 - positive eq test ++ r2 = accounts.filter("(entryUUID=%s)" % IMPORT_UUID_A) ++ assert(len(r2) == 1) ++ r3 = accounts.filter("(entryuuid=%s)" % IMPORT_UUID_B) ++ assert(len(r3) == 1) ++ # 3 - negative eq test ++ r4 = accounts.filter("(entryuuid=%s)" % UUID_MAX) ++ assert(len(r4) == 0) ++ # 4 - le search ++ r5 = accounts.filter("(entryuuid<=%s)" % UUID_BETWEEN) ++ assert(len(r5) == 1) ++ # 5 - ge search ++ r6 = accounts.filter("(entryuuid>=%s)" % UUID_BETWEEN) ++ assert(len(r6) == 1) ++ # 6 - le 0 search ++ r7 = accounts.filter("(entryuuid<=%s)" % UUID_MIN) ++ assert(len(r7) == 0) ++ # 7 - ge f search ++ r8 = accounts.filter("(entryuuid>=%s)" % UUID_MAX) ++ assert(len(r8) == 0) ++ # 8 - export db ++ task = be.export_ldif() ++ task.wait() ++ assert(task.is_complete() and task.get_exit_code() == 0) ++ ++ ++@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") ++def test_entryuuid_indexed_import_and_search(topology): ++ """ Test that an ldif of entries containing entryUUID's can be indexed and searched ++ correctly. As https://tools.ietf.org/html/rfc4530 states, the MR's are equality and ++ ordering, so we check these are correct. ++ ++ :id: c98ee6dc-a7ee-4bd4-974d-597ea966dad9 ++ ++ :setup: Standalone instance ++ ++ :steps: ++ 1. Import the db from the ldif ++ 2. EQ search for an entryuuid (match) ++ 3. EQ search for an entryuuid that does not exist ++ 4. LE search for an entryuuid lower (1 res) ++ 5. GE search for an entryuuid greater (1 res) ++ 6. LE for the 0 uuid (0 res) ++ 7. GE for the f uuid (0 res) ++ 8. export the db to ldif ++ ++ :expectedresults: ++ 1. Success ++ 2. 1 match ++ 3. 0 match ++ 4. 1 match ++ 5. 1 match ++ 6. 0 match ++ 7. 0 match ++ 8. success ++ """ ++ # Assert that the index correctly exists. ++ be = Backends(topology.standalone).get('userRoot') ++ indexes = be.get_indexes() ++ indexes.ensure_state(properties={ ++ 'cn': 'entryUUID', ++ 'nsSystemIndex': 'false', ++ 'nsIndexType': ['eq', 'pres'], ++ }) ++ _entryuuid_import_and_search(topology) ++ ++@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") ++def test_entryuuid_unindexed_import_and_search(topology): ++ """ Test that an ldif of entries containing entryUUID's can be UNindexed searched ++ correctly. As https://tools.ietf.org/html/rfc4530 states, the MR's are equality and ++ ordering, so we check these are correct. ++ ++ :id: b652b54d-f009-464b-b5bd-299a33f97243 ++ ++ :setup: Standalone instance ++ ++ :steps: ++ 1. Import the db from the ldif ++ 2. EQ search for an entryuuid (match) ++ 3. EQ search for an entryuuid that does not exist ++ 4. LE search for an entryuuid lower (1 res) ++ 5. GE search for an entryuuid greater (1 res) ++ 6. LE for the 0 uuid (0 res) ++ 7. GE for the f uuid (0 res) ++ 8. export the db to ldif ++ ++ :expectedresults: ++ 1. Success ++ 2. 1 match ++ 3. 0 match ++ 4. 1 match ++ 5. 1 match ++ 6. 0 match ++ 7. 0 match ++ 8. success ++ """ ++ # Assert that the index does NOT exist for this test. ++ be = Backends(topology.standalone).get('userRoot') ++ indexes = be.get_indexes() ++ try: ++ idx = indexes.get('entryUUID') ++ idx.delete() ++ except ldap.NO_SUCH_OBJECT: ++ # It's already not present, move along, nothing to see here. ++ pass ++ _entryuuid_import_and_search(topology) ++ ++# Test entryUUID generation ++@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") ++def test_entryuuid_generation_on_add(topology): ++ """ Test that when an entry is added, the entryuuid is added. ++ ++ :id: a7439b0a-dcee-4cd6-b8ef-771476c0b4f6 ++ ++ :setup: Standalone instance ++ ++ :steps: ++ 1. Create a new entry in the db ++ 2. Check it has an entry uuid ++ ++ :expectedresults: ++ 1. Success ++ 2. An entry uuid is present ++ """ ++ # Step one - create a user! ++ account = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX).create_test_user() ++ # Step two - does it have an entryuuid? ++ euuid = account.get_attr_val_utf8('entryUUID') ++ print(euuid) ++ assert(euuid is not None) ++ ++# Test fixup task ++@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") ++def test_entryuuid_fixup_task(topology): ++ """Test that when an entries without UUID's can have one generated via ++ the fixup process. ++ ++ :id: ad42bba2-ffb2-4c22-a37d-cbe7bcf73d6b ++ ++ :setup: Standalone instance ++ ++ :steps: ++ 1. Disable the entryuuid plugin ++ 2. Create an entry ++ 3. Enable the entryuuid plugin ++ 4. Run the fixup ++ 5. Assert the entryuuid now exists ++ ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Suddenly EntryUUID! ++ """ ++ # 1. Disable the plugin ++ plug = EntryUUIDPlugin(topology.standalone) ++ plug.disable() ++ topology.standalone.restart() ++ ++ # 2. create the account ++ account = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX).create_test_user(uid=2000) ++ euuid = account.get_attr_val_utf8('entryUUID') ++ assert(euuid is None) ++ ++ # 3. enable the plugin ++ plug.enable() ++ topology.standalone.restart() ++ ++ # 4. run the fix up ++ # For now set the log level to high! ++ topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE)) ++ task = plug.fixup(DEFAULT_SUFFIX) ++ task.wait() ++ assert(task.is_complete() and task.get_exit_code() == 0) ++ topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,)) ++ ++ # 5. Assert the uuid. ++ euuid = account.get_attr_val_utf8('entryUUID') ++ assert(euuid is not None) ++ +diff --git a/ldap/schema/02common.ldif b/ldap/schema/02common.ldif +index 57e6be3b3..3b0ad0a97 100644 +--- a/ldap/schema/02common.ldif ++++ b/ldap/schema/02common.ldif +@@ -11,6 +11,7 @@ + # + # Core schema, highly recommended but not required to start the Directory Server itself. + # ++# + dn: cn=schema + # + # attributes +diff --git a/ldap/schema/03entryuuid.ldif b/ldap/schema/03entryuuid.ldif +new file mode 100644 +index 000000000..cbde981fe +--- /dev/null ++++ b/ldap/schema/03entryuuid.ldif +@@ -0,0 +1,16 @@ ++# ++# BEGIN COPYRIGHT BLOCK ++# Copyright (C) 2020 William Brown ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# END COPYRIGHT BLOCK ++# ++# Core schema, highly recommended but not required to start the Directory Server itself. ++# ++dn: cn=schema ++# ++# attributes ++# ++attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY UUIDMatch ORDERING UUIDOrderingMatch SYNTAX 1.3.6.1.1.16.1 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation ) +diff --git a/ldap/servers/slapd/config.c b/ldap/servers/slapd/config.c +index 7e1618e79..bf5476272 100644 +--- a/ldap/servers/slapd/config.c ++++ b/ldap/servers/slapd/config.c +@@ -35,6 +35,10 @@ extern char *slapd_SSL3ciphers; + extern char *localuser; + char *rel2abspath(char *); + ++/* ++ * WARNING - this can only bootstrap PASSWORD and SYNTAX plugins! ++ * see fedse.c instead! ++ */ + static char *bootstrap_plugins[] = { + "dn: cn=PBKDF2_SHA256,cn=Password Storage Schemes,cn=plugins,cn=config\n" + "objectclass: top\n" +@@ -45,6 +49,19 @@ static char *bootstrap_plugins[] = { + "nsslapd-plugintype: pwdstoragescheme\n" + "nsslapd-pluginenabled: on", + ++ "dn: cn=entryuuid_syntax,cn=plugins,cn=config\n" ++ "objectclass: top\n" ++ "objectclass: nsSlapdPlugin\n" ++ "cn: entryuuid_syntax\n" ++ "nsslapd-pluginpath: libentryuuid-syntax-plugin\n" ++ "nsslapd-plugininitfunc: entryuuid_syntax_plugin_init\n" ++ "nsslapd-plugintype: syntax\n" ++ "nsslapd-pluginenabled: on\n" ++ "nsslapd-pluginId: entryuuid_syntax\n" ++ "nsslapd-pluginVersion: none\n" ++ "nsslapd-pluginVendor: 389 Project\n" ++ "nsslapd-pluginDescription: entryuuid_syntax\n", ++ + NULL + }; + +diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c +index 7697e2b88..9ae9523e2 100644 +--- a/ldap/servers/slapd/entry.c ++++ b/ldap/servers/slapd/entry.c +@@ -2882,6 +2882,18 @@ slapi_entry_attr_get_bool(const Slapi_Entry *e, const char *type) + return slapi_entry_attr_get_bool_ext(e, type, PR_FALSE); + } + ++const struct slapi_value ** ++slapi_entry_attr_get_valuearray(const Slapi_Entry *e, const char *attrname) ++{ ++ Slapi_Attr *attr; ++ ++ if (slapi_entry_attr_find(e, attrname, &attr) != 0) { ++ return NULL; ++ } ++ ++ return attr->a_present_values.va; ++} ++ + /* + * Extract a single value from an entry (as a string). You do not need + * to free the returned string value. +diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c +index 3b076eb17..0d645f909 100644 +--- a/ldap/servers/slapd/fedse.c ++++ b/ldap/servers/slapd/fedse.c +@@ -119,6 +119,34 @@ static const char *internal_entries[] = + "cn:SNMP\n" + "nsSNMPEnabled: on\n", + ++#ifdef RUST_ENABLE ++ "dn: cn=entryuuid_syntax,cn=plugins,cn=config\n" ++ "objectclass: top\n" ++ "objectclass: nsSlapdPlugin\n" ++ "cn: entryuuid_syntax\n" ++ "nsslapd-pluginpath: libentryuuid-syntax-plugin\n" ++ "nsslapd-plugininitfunc: entryuuid_syntax_plugin_init\n" ++ "nsslapd-plugintype: syntax\n" ++ "nsslapd-pluginenabled: on\n" ++ "nsslapd-pluginId: entryuuid_syntax\n" ++ "nsslapd-pluginVersion: none\n" ++ "nsslapd-pluginVendor: 389 Project\n" ++ "nsslapd-pluginDescription: entryuuid_syntax\n", ++ ++ "dn: cn=entryuuid,cn=plugins,cn=config\n" ++ "objectclass: top\n" ++ "objectclass: nsSlapdPlugin\n" ++ "cn: entryuuid\n" ++ "nsslapd-pluginpath: libentryuuid-plugin\n" ++ "nsslapd-plugininitfunc: entryuuid_plugin_init\n" ++ "nsslapd-plugintype: betxnpreoperation\n" ++ "nsslapd-pluginenabled: on\n" ++ "nsslapd-pluginId: entryuuid\n" ++ "nsslapd-pluginVersion: none\n" ++ "nsslapd-pluginVendor: 389 Project\n" ++ "nsslapd-pluginDescription: entryuuid\n", ++#endif ++ + "dn: cn=Password Storage Schemes,cn=plugins,cn=config\n" + "objectclass: top\n" + "objectclass: nsContainer\n" +diff --git a/src/Cargo.lock b/src/Cargo.lock +index ce3c7ed27..33d7b8f23 100644 +--- a/src/Cargo.lock ++++ b/src/Cargo.lock +@@ -28,12 +28,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" + + [[package]] + name = "base64" +-version = "0.10.1" ++version = "0.13.0" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" +-dependencies = [ +- "byteorder", +-] ++checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" + + [[package]] + name = "bitflags" +@@ -43,9 +40,9 @@ checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" + + [[package]] + name = "byteorder" +-version = "1.4.2" ++version = "1.4.3" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" ++checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" + + [[package]] + name = "cbindgen" +@@ -66,15 +63,12 @@ dependencies = [ + + [[package]] + name = "cc" +-version = "1.0.66" ++version = "1.0.67" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" +- +-[[package]] +-name = "cfg-if" +-version = "0.1.10" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" ++checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" ++dependencies = [ ++ "jobserver", ++] + + [[package]] + name = "cfg-if" +@@ -97,16 +91,39 @@ dependencies = [ + "vec_map", + ] + ++[[package]] ++name = "entryuuid" ++version = "0.1.0" ++dependencies = [ ++ "cc", ++ "libc", ++ "paste", ++ "slapi_r_plugin", ++ "uuid", ++] ++ ++[[package]] ++name = "entryuuid_syntax" ++version = "0.1.0" ++dependencies = [ ++ "cc", ++ "libc", ++ "paste", ++ "slapi_r_plugin", ++ "uuid", ++] ++ + [[package]] + name = "fernet" +-version = "0.1.3" ++version = "0.1.4" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "e7ac567fd75ce6bc28b68e63b5beaa3ce34f56bafd1122f64f8647c822e38a8b" ++checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f" + dependencies = [ + "base64", + "byteorder", + "getrandom", + "openssl", ++ "zeroize", + ] + + [[package]] +@@ -126,20 +143,20 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + + [[package]] + name = "getrandom" +-version = "0.1.16" ++version = "0.2.3" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" ++checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" + dependencies = [ +- "cfg-if 1.0.0", ++ "cfg-if", + "libc", + "wasi", + ] + + [[package]] + name = "hermit-abi" +-version = "0.1.17" ++version = "0.1.18" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" ++checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" + dependencies = [ + "libc", + ] +@@ -150,6 +167,15 @@ version = "0.4.7" + source = "registry+https://github.com/rust-lang/crates.io-index" + checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" + ++[[package]] ++name = "jobserver" ++version = "0.1.22" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd" ++dependencies = [ ++ "libc", ++] ++ + [[package]] + name = "lazy_static" + version = "1.4.0" +@@ -158,9 +184,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + + [[package]] + name = "libc" +-version = "0.2.82" ++version = "0.2.94" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929" ++checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e" + + [[package]] + name = "librnsslapd" +@@ -182,32 +208,38 @@ dependencies = [ + + [[package]] + name = "log" +-version = "0.4.11" ++version = "0.4.14" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" ++checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" + dependencies = [ +- "cfg-if 0.1.10", ++ "cfg-if", + ] + ++[[package]] ++name = "once_cell" ++version = "1.7.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" ++ + [[package]] + name = "openssl" +-version = "0.10.32" ++version = "0.10.34" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" ++checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8" + dependencies = [ + "bitflags", +- "cfg-if 1.0.0", ++ "cfg-if", + "foreign-types", +- "lazy_static", + "libc", ++ "once_cell", + "openssl-sys", + ] + + [[package]] + name = "openssl-sys" +-version = "0.9.60" ++version = "0.9.63" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "921fc71883267538946025deffb622905ecad223c28efbfdef9bb59a0175f3e6" ++checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98" + dependencies = [ + "autocfg", + "cc", +@@ -216,6 +248,25 @@ dependencies = [ + "vcpkg", + ] + ++[[package]] ++name = "paste" ++version = "0.1.18" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" ++dependencies = [ ++ "paste-impl", ++ "proc-macro-hack", ++] ++ ++[[package]] ++name = "paste-impl" ++version = "0.1.18" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" ++dependencies = [ ++ "proc-macro-hack", ++] ++ + [[package]] + name = "pkg-config" + version = "0.3.19" +@@ -228,31 +279,36 @@ version = "0.2.10" + source = "registry+https://github.com/rust-lang/crates.io-index" + checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" + ++[[package]] ++name = "proc-macro-hack" ++version = "0.5.19" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" ++ + [[package]] + name = "proc-macro2" +-version = "1.0.24" ++version = "1.0.27" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" ++checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" + dependencies = [ + "unicode-xid", + ] + + [[package]] + name = "quote" +-version = "1.0.8" ++version = "1.0.9" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" ++checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" + dependencies = [ + "proc-macro2", + ] + + [[package]] + name = "rand" +-version = "0.7.3" ++version = "0.8.3" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" ++checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" + dependencies = [ +- "getrandom", + "libc", + "rand_chacha", + "rand_core", +@@ -261,9 +317,9 @@ dependencies = [ + + [[package]] + name = "rand_chacha" +-version = "0.2.2" ++version = "0.3.0" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" ++checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" + dependencies = [ + "ppv-lite86", + "rand_core", +@@ -271,27 +327,30 @@ dependencies = [ + + [[package]] + name = "rand_core" +-version = "0.5.1" ++version = "0.6.2" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" ++checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" + dependencies = [ + "getrandom", + ] + + [[package]] + name = "rand_hc" +-version = "0.2.0" ++version = "0.3.0" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" ++checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" + dependencies = [ + "rand_core", + ] + + [[package]] + name = "redox_syscall" +-version = "0.1.57" ++version = "0.2.8" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" ++checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc" ++dependencies = [ ++ "bitflags", ++] + + [[package]] + name = "remove_dir_all" +@@ -314,18 +373,18 @@ checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" + + [[package]] + name = "serde" +-version = "1.0.118" ++version = "1.0.126" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "06c64263859d87aa2eb554587e2d23183398d617427327cf2b3d0ed8c69e4800" ++checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03" + dependencies = [ + "serde_derive", + ] + + [[package]] + name = "serde_derive" +-version = "1.0.118" ++version = "1.0.126" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "c84d3526699cd55261af4b941e4e725444df67aa4f9e6a3564f18030d12672df" ++checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43" + dependencies = [ + "proc-macro2", + "quote", +@@ -334,9 +393,9 @@ dependencies = [ + + [[package]] + name = "serde_json" +-version = "1.0.61" ++version = "1.0.64" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "4fceb2595057b6891a4ee808f70054bd2d12f0e97f1cbb78689b59f676df325a" ++checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" + dependencies = [ + "itoa", + "ryu", +@@ -350,6 +409,16 @@ dependencies = [ + "fernet", + ] + ++[[package]] ++name = "slapi_r_plugin" ++version = "0.1.0" ++dependencies = [ ++ "lazy_static", ++ "libc", ++ "paste", ++ "uuid", ++] ++ + [[package]] + name = "strsim" + version = "0.8.0" +@@ -358,22 +427,34 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" + + [[package]] + name = "syn" +-version = "1.0.58" ++version = "1.0.72" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "unicode-xid", ++] ++ ++[[package]] ++name = "synstructure" ++version = "0.12.4" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5" ++checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" + dependencies = [ + "proc-macro2", + "quote", ++ "syn", + "unicode-xid", + ] + + [[package]] + name = "tempfile" +-version = "3.1.0" ++version = "3.2.0" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" ++checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" + dependencies = [ +- "cfg-if 0.1.10", ++ "cfg-if", + "libc", + "rand", + "redox_syscall", +@@ -407,15 +488,24 @@ checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" + + [[package]] + name = "unicode-xid" +-version = "0.2.1" ++version = "0.2.2" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" ++checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" ++ ++[[package]] ++name = "uuid" ++version = "0.8.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" ++dependencies = [ ++ "getrandom", ++] + + [[package]] + name = "vcpkg" +-version = "0.2.11" ++version = "0.2.12" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" ++checksum = "cbdbff6266a24120518560b5dc983096efb98462e51d0d68169895b237be3e5d" + + [[package]] + name = "vec_map" +@@ -425,9 +515,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" + + [[package]] + name = "wasi" +-version = "0.9.0+wasi-snapshot-preview1" ++version = "0.10.2+wasi-snapshot-preview1" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" ++checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" + + [[package]] + name = "winapi" +@@ -450,3 +540,24 @@ name = "winapi-x86_64-pc-windows-gnu" + version = "0.4.0" + source = "registry+https://github.com/rust-lang/crates.io-index" + checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" ++ ++[[package]] ++name = "zeroize" ++version = "1.3.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd" ++dependencies = [ ++ "zeroize_derive", ++] ++ ++[[package]] ++name = "zeroize_derive" ++version = "1.1.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "syn", ++ "synstructure", ++] +diff --git a/src/Cargo.toml b/src/Cargo.toml +index f6dac010f..1ad2b21b0 100644 +--- a/src/Cargo.toml ++++ b/src/Cargo.toml +@@ -1,10 +1,13 @@ + + [workspace] + members = [ +- "librslapd", +- "librnsslapd", +- "libsds", +- "slapd", ++ "librslapd", ++ "librnsslapd", ++ "libsds", ++ "slapd", ++ "slapi_r_plugin", ++ "plugins/entryuuid", ++ "plugins/entryuuid_syntax", + ] + + [profile.release] +diff --git a/src/README.md b/src/README.md +new file mode 100644 +index 000000000..e69de29bb +diff --git a/src/lib389/lib389/_constants.py b/src/lib389/lib389/_constants.py +index 52aac0f21..c184c8d4f 100644 +--- a/src/lib389/lib389/_constants.py ++++ b/src/lib389/lib389/_constants.py +@@ -150,6 +150,7 @@ DN_IMPORT_TASK = "cn=import,%s" % DN_TASKS + DN_BACKUP_TASK = "cn=backup,%s" % DN_TASKS + DN_RESTORE_TASK = "cn=restore,%s" % DN_TASKS + DN_MBO_TASK = "cn=memberOf task,%s" % DN_TASKS ++DN_EUUID_TASK = "cn=entryuuid task,%s" % DN_TASKS + DN_TOMB_FIXUP_TASK = "cn=fixup tombstones,%s" % DN_TASKS + DN_FIXUP_LINKED_ATTIBUTES = "cn=fixup linked attributes,%s" % DN_TASKS + DN_AUTOMEMBER_REBUILD_TASK = "cn=automember rebuild membership,%s" % DN_TASKS +diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py +index aab07c028..bcd7b383f 100644 +--- a/src/lib389/lib389/backend.py ++++ b/src/lib389/lib389/backend.py +@@ -765,7 +765,7 @@ class Backend(DSLdapObject): + enc_attr.delete() + break + +- def import_ldif(self, ldifs, chunk_size=None, encrypted=False, gen_uniq_id=False, only_core=False, ++ def import_ldif(self, ldifs, chunk_size=None, encrypted=False, gen_uniq_id=None, only_core=False, + include_suffixes=None, exclude_suffixes=None): + """Do an import of the suffix""" + +diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py +index 530fb367a..ac0fe1a8c 100644 +--- a/src/lib389/lib389/instance/setup.py ++++ b/src/lib389/lib389/instance/setup.py +@@ -34,6 +34,7 @@ from lib389.instance.options import General2Base, Slapd2Base, Backend2Base + from lib389.paths import Paths + from lib389.saslmap import SaslMappings + from lib389.instance.remove import remove_ds_instance ++from lib389.index import Indexes + from lib389.utils import ( + assert_c, + is_a_dn, +@@ -928,6 +929,19 @@ class SetupDs(object): + if slapd['self_sign_cert']: + ds_instance.config.set('nsslapd-security', 'on') + ++ # Before we create any backends, create any extra default indexes that may be ++ # dynamicly provisioned, rather than from template-dse.ldif. Looking at you ++ # entryUUID (requires rust enabled). ++ # ++ # Indexes defaults to default_index_dn ++ indexes = Indexes(ds_instance) ++ if ds_instance.ds_paths.rust_enabled: ++ indexes.create(properties={ ++ 'cn': 'entryUUID', ++ 'nsSystemIndex': 'false', ++ 'nsIndexType': ['eq', 'pres'], ++ }) ++ + # Create the backends as listed + # Load example data if needed. + for backend in backends: +diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py +index 16899f6d3..2d88e60bd 100644 +--- a/src/lib389/lib389/plugins.py ++++ b/src/lib389/lib389/plugins.py +@@ -2244,3 +2244,33 @@ class ContentSyncPlugin(Plugin): + + def __init__(self, instance, dn="cn=Content Synchronization,cn=plugins,cn=config"): + super(ContentSyncPlugin, self).__init__(instance, dn) ++ ++ ++class EntryUUIDPlugin(Plugin): ++ """The EntryUUID plugin configuration ++ :param instance: An instance ++ :type instance: lib389.DirSrv ++ :param dn: Entry DN ++ :type dn: str ++ """ ++ def __init__(self, instance, dn="cn=entryuuid,cn=plugins,cn=config"): ++ super(EntryUUIDPlugin, self).__init__(instance, dn) ++ ++ def fixup(self, basedn, _filter=None): ++ """Create an entryuuid fixup task ++ ++ :param basedn: Basedn to fix up ++ :type basedn: str ++ :param _filter: a filter for entries to fix up ++ :type _filter: str ++ ++ :returns: an instance of Task(DSLdapObject) ++ """ ++ ++ task = tasks.EntryUUIDFixupTask(self._instance) ++ task_properties = {'basedn': basedn} ++ if _filter is not None: ++ task_properties['filter'] = _filter ++ task.create(properties=task_properties) ++ ++ return task +diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py +index b19e7918d..590c6ee79 100644 +--- a/src/lib389/lib389/tasks.py ++++ b/src/lib389/lib389/tasks.py +@@ -203,6 +203,20 @@ class USNTombstoneCleanupTask(Task): + return super(USNTombstoneCleanupTask, self)._validate(rdn, properties, basedn) + + ++class EntryUUIDFixupTask(Task): ++ """A single instance of memberOf task entry ++ ++ :param instance: An instance ++ :type instance: lib389.DirSrv ++ """ ++ ++ def __init__(self, instance, dn=None): ++ self.cn = 'entryuuid_fixup_' + Task._get_task_date() ++ dn = "cn=" + self.cn + "," + DN_EUUID_TASK ++ super(EntryUUIDFixupTask, self).__init__(instance, dn) ++ self._must_attributes.extend(['basedn']) ++ ++ + class SchemaReloadTask(Task): + """A single instance of schema reload task entry + +diff --git a/src/librnsslapd/build.rs b/src/librnsslapd/build.rs +index 9b953b246..13f6d2e03 100644 +--- a/src/librnsslapd/build.rs ++++ b/src/librnsslapd/build.rs +@@ -3,13 +3,14 @@ extern crate cbindgen; + use std::env; + + fn main() { +- let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); +- let out_dir = env::var("SLAPD_HEADER_DIR").unwrap(); +- +- cbindgen::Builder::new() +- .with_language(cbindgen::Language::C) +- .with_crate(crate_dir) +- .generate() +- .expect("Unable to generate bindings") +- .write_to_file(format!("{}/rust-nsslapd-private.h", out_dir)); ++ if let Ok(crate_dir) = env::var("CARGO_MANIFEST_DIR") { ++ if let Ok(out_dir) = env::var("SLAPD_HEADER_DIR") { ++ cbindgen::Builder::new() ++ .with_language(cbindgen::Language::C) ++ .with_crate(crate_dir) ++ .generate() ++ .expect("Unable to generate bindings") ++ .write_to_file(format!("{}/rust-nsslapd-private.h", out_dir)); ++ } ++ } + } +diff --git a/src/librnsslapd/src/lib.rs b/src/librnsslapd/src/lib.rs +index c5fd2bbaf..dffe4ce1c 100644 +--- a/src/librnsslapd/src/lib.rs ++++ b/src/librnsslapd/src/lib.rs +@@ -4,9 +4,9 @@ + // Remember this is just a c-bindgen stub, all logic should come from slapd! + + extern crate libc; +-use slapd; + use libc::c_char; +-use std::ffi::{CString, CStr}; ++use slapd; ++use std::ffi::{CStr, CString}; + + #[no_mangle] + pub extern "C" fn do_nothing_again_rust() -> usize { +@@ -29,9 +29,7 @@ pub extern "C" fn fernet_generate_token(dn: *const c_char, raw_key: *const c_cha + // We have to move string memory ownership by copying so the system + // allocator has it. + let raw = tok.into_raw(); +- let dup_tok = unsafe { +- libc::strdup(raw) +- }; ++ let dup_tok = unsafe { libc::strdup(raw) }; + unsafe { + CString::from_raw(raw); + }; +@@ -45,7 +43,12 @@ pub extern "C" fn fernet_generate_token(dn: *const c_char, raw_key: *const c_cha + } + + #[no_mangle] +-pub extern "C" fn fernet_verify_token(dn: *const c_char, token: *const c_char, raw_key: *const c_char, ttl: u64) -> bool { ++pub extern "C" fn fernet_verify_token( ++ dn: *const c_char, ++ token: *const c_char, ++ raw_key: *const c_char, ++ ttl: u64, ++) -> bool { + if dn.is_null() || raw_key.is_null() || token.is_null() { + return false; + } +@@ -67,4 +70,3 @@ pub extern "C" fn fernet_verify_token(dn: *const c_char, token: *const c_char, r + Err(_) => false, + } + } +- +diff --git a/src/librslapd/Cargo.toml b/src/librslapd/Cargo.toml +index 1dd715ed2..08309c224 100644 +--- a/src/librslapd/Cargo.toml ++++ b/src/librslapd/Cargo.toml +@@ -12,10 +12,6 @@ path = "src/lib.rs" + name = "rslapd" + crate-type = ["staticlib", "lib"] + +-# [profile.release] +-# panic = "abort" +-# lto = true +- + [dependencies] + slapd = { path = "../slapd" } + libc = "0.2" +diff --git a/src/librslapd/build.rs b/src/librslapd/build.rs +index 4d4c1ce42..84aff156b 100644 +--- a/src/librslapd/build.rs ++++ b/src/librslapd/build.rs +@@ -3,13 +3,14 @@ extern crate cbindgen; + use std::env; + + fn main() { +- let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); +- let out_dir = env::var("SLAPD_HEADER_DIR").unwrap(); +- +- cbindgen::Builder::new() +- .with_language(cbindgen::Language::C) +- .with_crate(crate_dir) +- .generate() +- .expect("Unable to generate bindings") +- .write_to_file(format!("{}/rust-slapi-private.h", out_dir)); ++ if let Ok(crate_dir) = env::var("CARGO_MANIFEST_DIR") { ++ if let Ok(out_dir) = env::var("SLAPD_HEADER_DIR") { ++ cbindgen::Builder::new() ++ .with_language(cbindgen::Language::C) ++ .with_crate(crate_dir) ++ .generate() ++ .expect("Unable to generate bindings") ++ .write_to_file(format!("{}/rust-slapi-private.h", out_dir)); ++ } ++ } + } +diff --git a/src/librslapd/src/lib.rs b/src/librslapd/src/lib.rs +index 9cce193a0..cf283a7ce 100644 +--- a/src/librslapd/src/lib.rs ++++ b/src/librslapd/src/lib.rs +@@ -8,7 +8,7 @@ extern crate libc; + use slapd; + + use libc::c_char; +-use std::ffi::{CString, CStr}; ++use std::ffi::{CStr, CString}; + + #[no_mangle] + pub extern "C" fn do_nothing_rust() -> usize { +@@ -18,9 +18,7 @@ pub extern "C" fn do_nothing_rust() -> usize { + #[no_mangle] + pub extern "C" fn rust_free_string(s: *mut c_char) { + if !s.is_null() { +- let _ = unsafe { +- CString::from_raw(s) +- }; ++ let _ = unsafe { CString::from_raw(s) }; + } + } + +@@ -35,9 +33,7 @@ pub extern "C" fn fernet_generate_new_key() -> *mut c_char { + match res_key { + Ok(key) => { + let raw = key.into_raw(); +- let dup_key = unsafe { +- libc::strdup(raw) +- }; ++ let dup_key = unsafe { libc::strdup(raw) }; + rust_free_string(raw); + dup_key + } +@@ -53,4 +49,3 @@ pub extern "C" fn fernet_validate_key(raw_key: *const c_char) -> bool { + Err(_) => false, + } + } +- +diff --git a/src/libsds/sds/lib.rs b/src/libsds/sds/lib.rs +index aa70c7a8e..9e2973222 100644 +--- a/src/libsds/sds/lib.rs ++++ b/src/libsds/sds/lib.rs +@@ -28,5 +28,3 @@ pub enum sds_result { + /// The list is exhausted, no more elements can be returned. + ListExhausted = 16, + } +- +- +diff --git a/src/libsds/sds/tqueue.rs b/src/libsds/sds/tqueue.rs +index b7042e514..ebe1f4b6c 100644 +--- a/src/libsds/sds/tqueue.rs ++++ b/src/libsds/sds/tqueue.rs +@@ -9,8 +9,8 @@ + #![warn(missing_docs)] + + use super::sds_result; +-use std::sync::Mutex; + use std::collections::LinkedList; ++use std::sync::Mutex; + + // Borrow from libc + #[doc(hidden)] +@@ -75,7 +75,10 @@ impl Drop for TQueue { + /// C compatible wrapper around the TQueue. Given a valid point, a TQueue pointer + /// is allocated on the heap and referenced in retq. free_fn_ptr may be NULL + /// but if it references a function, this will be called during drop of the TQueue. +-pub extern fn sds_tqueue_init(retq: *mut *mut TQueue, free_fn_ptr: Option) -> sds_result { ++pub extern "C" fn sds_tqueue_init( ++ retq: *mut *mut TQueue, ++ free_fn_ptr: Option, ++) -> sds_result { + // This piece of type signature magic is because in rust types that extern C, + // with option has None resolve to null. What this causes is we can wrap + // our fn ptr with Option in rust, but the C side gives us fn ptr or NULL, and +@@ -93,7 +96,7 @@ pub extern fn sds_tqueue_init(retq: *mut *mut TQueue, free_fn_ptr: Option sds_result { ++pub extern "C" fn sds_tqueue_enqueue(q: *const TQueue, elem: *const c_void) -> sds_result { + // Check for null .... + unsafe { (*q).enqueue(elem) }; + sds_result::Success +@@ -103,29 +106,27 @@ pub extern fn sds_tqueue_enqueue(q: *const TQueue, elem: *const c_void) -> sds_r + /// Dequeue from the head of the queue. The result will be placed into elem. + /// if elem is NULL no dequeue is attempted. If there are no more items + /// ListExhausted is returned. +-pub extern fn sds_tqueue_dequeue(q: *const TQueue, elem: *mut *const c_void) -> sds_result { ++pub extern "C" fn sds_tqueue_dequeue(q: *const TQueue, elem: *mut *const c_void) -> sds_result { + if elem.is_null() { + return sds_result::NullPointer; + } + match unsafe { (*q).dequeue() } { + Some(e) => { +- unsafe { *elem = e; }; ++ unsafe { ++ *elem = e; ++ }; + sds_result::Success + } +- None => { +- sds_result::ListExhausted +- } ++ None => sds_result::ListExhausted, + } + } + + #[no_mangle] + /// Free the queue and all remaining elements. After this point it is + /// not safe to access the queue. +-pub extern fn sds_tqueue_destroy(q: *mut TQueue) -> sds_result { ++pub extern "C" fn sds_tqueue_destroy(q: *mut TQueue) -> sds_result { + // This will drop the queue and free it's content + // mem::drop(q); + let _q = unsafe { Box::from_raw(q) }; + sds_result::Success + } +- +- +diff --git a/src/plugins/entryuuid/Cargo.toml b/src/plugins/entryuuid/Cargo.toml +new file mode 100644 +index 000000000..c43d7a771 +--- /dev/null ++++ b/src/plugins/entryuuid/Cargo.toml +@@ -0,0 +1,21 @@ ++[package] ++name = "entryuuid" ++version = "0.1.0" ++authors = ["William Brown "] ++edition = "2018" ++ ++# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html ++ ++[lib] ++path = "src/lib.rs" ++name = "entryuuid" ++crate-type = ["staticlib", "lib"] ++ ++[dependencies] ++libc = "0.2" ++paste = "0.1" ++slapi_r_plugin = { path="../../slapi_r_plugin" } ++uuid = { version = "0.8", features = [ "v4" ] } ++ ++[build-dependencies] ++cc = { version = "1.0", features = ["parallel"] } +diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs +new file mode 100644 +index 000000000..6b5e8d1bb +--- /dev/null ++++ b/src/plugins/entryuuid/src/lib.rs +@@ -0,0 +1,196 @@ ++#[macro_use] ++extern crate slapi_r_plugin; ++use slapi_r_plugin::prelude::*; ++use std::convert::{TryFrom, TryInto}; ++use std::os::raw::c_char; ++use uuid::Uuid; ++ ++#[derive(Debug)] ++struct FixupData { ++ basedn: Sdn, ++ raw_filter: String, ++} ++ ++struct EntryUuid; ++/* ++ * /---- plugin ident ++ * | /---- Struct name. ++ * V V ++ */ ++slapi_r_plugin_hooks!(entryuuid, EntryUuid); ++ ++/* ++ * /---- plugin ident ++ * | /---- cb ident ++ * | | /---- map function ++ * V V V ++ */ ++slapi_r_search_callback_mapfn!(entryuuid, entryuuid_fixup_cb, entryuuid_fixup_mapfn); ++ ++fn assign_uuid(e: &mut EntryRef) { ++ let sdn = e.get_sdnref(); ++ ++ // We could consider making these lazy static. ++ let config_sdn = Sdn::try_from("cn=config").expect("Invalid static dn"); ++ let schema_sdn = Sdn::try_from("cn=schema").expect("Invalid static dn"); ++ ++ if sdn.is_below_suffix(&*config_sdn) || sdn.is_below_suffix(&*schema_sdn) { ++ // We don't need to assign to these suffixes. ++ log_error!( ++ ErrorLevel::Trace, ++ "assign_uuid -> not assigning to {:?} as part of system suffix", ++ sdn.to_dn_string() ++ ); ++ return; ++ } ++ ++ // Generate a new Uuid. ++ let u: Uuid = Uuid::new_v4(); ++ log_error!( ++ ErrorLevel::Trace, ++ "assign_uuid -> assigning {:?} to dn {}", ++ u, ++ sdn.to_dn_string() ++ ); ++ ++ let uuid_value = Value::from(&u); ++ ++ // Add it to the entry ++ e.add_value("entryUUID", &uuid_value); ++} ++ ++impl SlapiPlugin3 for EntryUuid { ++ // Indicate we have pre add ++ fn has_betxn_pre_add() -> bool { ++ true ++ } ++ ++ fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> { ++ log_error!(ErrorLevel::Trace, "betxn_pre_add"); ++ ++ let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?; ++ assign_uuid(&mut e); ++ ++ Ok(()) ++ } ++ ++ fn has_task_handler() -> Option<&'static str> { ++ Some("entryuuid task") ++ } ++ ++ type TaskData = FixupData; ++ ++ fn task_validate(e: &EntryRef) -> Result { ++ // Does the entry have what we need? ++ let basedn: Sdn = match e.get_attr("basedn") { ++ Some(values) => values ++ .first() ++ .ok_or_else(|| { ++ log_error!( ++ ErrorLevel::Trace, ++ "task_validate basedn error -> empty value array?" ++ ); ++ LDAPError::Operation ++ })? ++ .as_ref() ++ .try_into() ++ .map_err(|e| { ++ log_error!(ErrorLevel::Trace, "task_validate basedn error -> {:?}", e); ++ LDAPError::Operation ++ })?, ++ None => return Err(LDAPError::ObjectClassViolation), ++ }; ++ ++ let raw_filter: String = match e.get_attr("filter") { ++ Some(values) => values ++ .first() ++ .ok_or_else(|| { ++ log_error!( ++ ErrorLevel::Trace, ++ "task_validate filter error -> empty value array?" ++ ); ++ LDAPError::Operation ++ })? ++ .as_ref() ++ .try_into() ++ .map_err(|e| { ++ log_error!(ErrorLevel::Trace, "task_validate filter error -> {:?}", e); ++ LDAPError::Operation ++ })?, ++ None => { ++ // Give a default filter. ++ "(objectClass=*)".to_string() ++ } ++ }; ++ ++ // Error if the first filter is empty? ++ ++ // Now, to make things faster, we wrap the filter in a exclude term. ++ let raw_filter = format!("(&{}(!(entryuuid=*)))", raw_filter); ++ ++ Ok(FixupData { basedn, raw_filter }) ++ } ++ ++ fn task_be_dn_hint(data: &Self::TaskData) -> Option { ++ Some(data.basedn.clone()) ++ } ++ ++ fn task_handler(_task: &Task, data: Self::TaskData) -> Result { ++ log_error!( ++ ErrorLevel::Trace, ++ "task_handler -> start thread with -> {:?}", ++ data ++ ); ++ ++ let search = Search::new_map_entry( ++ &(*data.basedn), ++ SearchScope::Subtree, ++ &data.raw_filter, ++ plugin_id(), ++ &(), ++ entryuuid_fixup_cb, ++ ) ++ .map_err(|e| { ++ log_error!( ++ ErrorLevel::Error, ++ "task_handler -> Unable to construct search -> {:?}", ++ e ++ ); ++ e ++ })?; ++ ++ match search.execute() { ++ Ok(_) => { ++ log_error!(ErrorLevel::Info, "task_handler -> fixup complete, success!"); ++ Ok(data) ++ } ++ Err(e) => { ++ // log, and return ++ log_error!( ++ ErrorLevel::Error, ++ "task_handler -> fixup complete, failed -> {:?}", ++ e ++ ); ++ Err(PluginError::GenericFailure) ++ } ++ } ++ } ++ ++ fn start(_pb: &mut PblockRef) -> Result<(), PluginError> { ++ log_error!(ErrorLevel::Trace, "plugin start"); ++ Ok(()) ++ } ++ ++ fn close(_pb: &mut PblockRef) -> Result<(), PluginError> { ++ log_error!(ErrorLevel::Trace, "plugin close"); ++ Ok(()) ++ } ++} ++ ++pub fn entryuuid_fixup_mapfn(mut e: EntryRef, _data: &()) -> Result<(), PluginError> { ++ assign_uuid(&mut e); ++ Ok(()) ++} ++ ++#[cfg(test)] ++mod tests {} +diff --git a/src/plugins/entryuuid_syntax/Cargo.toml b/src/plugins/entryuuid_syntax/Cargo.toml +new file mode 100644 +index 000000000..f7d3d64c9 +--- /dev/null ++++ b/src/plugins/entryuuid_syntax/Cargo.toml +@@ -0,0 +1,21 @@ ++[package] ++name = "entryuuid_syntax" ++version = "0.1.0" ++authors = ["William Brown "] ++edition = "2018" ++ ++# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html ++ ++[lib] ++path = "src/lib.rs" ++name = "entryuuid_syntax" ++crate-type = ["staticlib", "lib"] ++ ++[dependencies] ++libc = "0.2" ++paste = "0.1" ++slapi_r_plugin = { path="../../slapi_r_plugin" } ++uuid = { version = "0.8", features = [ "v4" ] } ++ ++[build-dependencies] ++cc = { version = "1.0", features = ["parallel"] } +diff --git a/src/plugins/entryuuid_syntax/src/lib.rs b/src/plugins/entryuuid_syntax/src/lib.rs +new file mode 100644 +index 000000000..0a4b89f16 +--- /dev/null ++++ b/src/plugins/entryuuid_syntax/src/lib.rs +@@ -0,0 +1,145 @@ ++#[macro_use] ++extern crate slapi_r_plugin; ++use slapi_r_plugin::prelude::*; ++use std::cmp::Ordering; ++use std::convert::TryInto; ++use uuid::Uuid; ++ ++struct EntryUuidSyntax; ++ ++// https://tools.ietf.org/html/rfc4530 ++ ++slapi_r_syntax_plugin_hooks!(entryuuid_syntax, EntryUuidSyntax); ++ ++impl SlapiSyntaxPlugin1 for EntryUuidSyntax { ++ fn attr_oid() -> &'static str { ++ "1.3.6.1.1.16.1" ++ } ++ ++ fn attr_compat_oids() -> Vec<&'static str> { ++ Vec::new() ++ } ++ ++ fn attr_supported_names() -> Vec<&'static str> { ++ vec!["1.3.6.1.1.16.1", "UUID"] ++ } ++ ++ fn syntax_validate(bval: &BerValRef) -> Result<(), PluginError> { ++ let r: Result = bval.try_into(); ++ r.map(|_| ()) ++ } ++ ++ fn eq_mr_oid() -> &'static str { ++ "1.3.6.1.1.16.2" ++ } ++ ++ fn eq_mr_name() -> &'static str { ++ "UUIDMatch" ++ } ++ ++ fn eq_mr_desc() -> &'static str { ++ "UUIDMatch matching rule." ++ } ++ ++ fn eq_mr_supported_names() -> Vec<&'static str> { ++ vec!["1.3.6.1.1.16.2", "uuidMatch", "UUIDMatch"] ++ } ++ ++ fn filter_ava_eq( ++ _pb: &mut PblockRef, ++ bval_filter: &BerValRef, ++ vals: &ValueArrayRef, ++ ) -> Result { ++ let u = match bval_filter.try_into() { ++ Ok(u) => u, ++ Err(_e) => return Ok(false), ++ }; ++ ++ let r = vals.iter().fold(false, |acc, va| { ++ if acc { ++ acc ++ } else { ++ // is u in va? ++ log_error!(ErrorLevel::Trace, "filter_ava_eq debug -> {:?}", va); ++ let res: Result = (&*va).try_into(); ++ match res { ++ Ok(vu) => vu == u, ++ Err(_) => acc, ++ } ++ } ++ }); ++ log_error!(ErrorLevel::Trace, "filter_ava_eq result -> {:?}", r); ++ Ok(r) ++ } ++ ++ fn eq_mr_filter_values2keys( ++ _pb: &mut PblockRef, ++ vals: &ValueArrayRef, ++ ) -> Result { ++ vals.iter() ++ .map(|va| { ++ let u: Uuid = (&*va).try_into()?; ++ Ok(Value::from(&u)) ++ }) ++ .collect() ++ } ++} ++ ++impl SlapiSubMr for EntryUuidSyntax {} ++ ++impl SlapiOrdMr for EntryUuidSyntax { ++ fn ord_mr_oid() -> Option<&'static str> { ++ Some("1.3.6.1.1.16.3") ++ } ++ ++ fn ord_mr_name() -> &'static str { ++ "UUIDOrderingMatch" ++ } ++ ++ fn ord_mr_desc() -> &'static str { ++ "UUIDMatch matching rule." ++ } ++ ++ fn ord_mr_supported_names() -> Vec<&'static str> { ++ vec!["1.3.6.1.1.16.3", "uuidOrderingMatch", "UUIDOrderingMatch"] ++ } ++ ++ fn filter_ava_ord( ++ _pb: &mut PblockRef, ++ bval_filter: &BerValRef, ++ vals: &ValueArrayRef, ++ ) -> Result, PluginError> { ++ let u: Uuid = match bval_filter.try_into() { ++ Ok(u) => u, ++ Err(_e) => return Ok(None), ++ }; ++ ++ let r = vals.iter().fold(None, |acc, va| { ++ if acc.is_some() { ++ acc ++ } else { ++ // is u in va? ++ log_error!(ErrorLevel::Trace, "filter_ava_ord debug -> {:?}", va); ++ let res: Result = (&*va).try_into(); ++ match res { ++ Ok(vu) => { ++ // 1.partial_cmp(2) => ordering::less ++ vu.partial_cmp(&u) ++ } ++ Err(_) => acc, ++ } ++ } ++ }); ++ log_error!(ErrorLevel::Trace, "filter_ava_ord result -> {:?}", r); ++ Ok(r) ++ } ++ ++ fn filter_compare(a: &BerValRef, b: &BerValRef) -> Ordering { ++ let ua: Uuid = a.try_into().expect("An invalid value a was given!"); ++ let ub: Uuid = b.try_into().expect("An invalid value b was given!"); ++ ua.cmp(&ub) ++ } ++} ++ ++#[cfg(test)] ++mod tests {} +diff --git a/src/slapd/src/error.rs b/src/slapd/src/error.rs +index 06ddb27b4..6f4d782ee 100644 +--- a/src/slapd/src/error.rs ++++ b/src/slapd/src/error.rs +@@ -1,8 +1,6 @@ +- + pub enum SlapdError { + // This occurs when a string contains an inner null byte + // that cstring can't handle. + CStringInvalidError, + FernetInvalidKey, + } +- +diff --git a/src/slapd/src/fernet.rs b/src/slapd/src/fernet.rs +index fcbd873f8..1a3251fd9 100644 +--- a/src/slapd/src/fernet.rs ++++ b/src/slapd/src/fernet.rs +@@ -1,39 +1,30 @@ + // Routines for managing fernet encryption + +-use std::ffi::{CString, CStr}; +-use fernet::Fernet; + use crate::error::SlapdError; ++use fernet::Fernet; ++use std::ffi::{CStr, CString}; + + pub fn generate_new_key() -> Result { + let k = Fernet::generate_key(); +- CString::new(k) +- .map_err(|_| { +- SlapdError::CStringInvalidError +- }) ++ CString::new(k).map_err(|_| SlapdError::CStringInvalidError) + } + + pub fn new(c_str_key: &CStr) -> Result { +- let str_key = c_str_key.to_str() ++ let str_key = c_str_key ++ .to_str() + .map_err(|_| SlapdError::CStringInvalidError)?; +- Fernet::new(str_key) +- .ok_or(SlapdError::FernetInvalidKey) ++ Fernet::new(str_key).ok_or(SlapdError::FernetInvalidKey) + } + + pub fn encrypt(fernet: &Fernet, dn: &CStr) -> Result { + let tok = fernet.encrypt(dn.to_bytes()); +- CString::new(tok) +- .map_err(|_| { +- SlapdError::CStringInvalidError +- }) ++ CString::new(tok).map_err(|_| SlapdError::CStringInvalidError) + } + + pub fn decrypt(fernet: &Fernet, tok: &CStr, ttl: u64) -> Result { +- let s = tok.to_str() +- .map_err(|_| SlapdError::CStringInvalidError)?; +- let r: Vec = fernet.decrypt_with_ttl(s, ttl) ++ let s = tok.to_str().map_err(|_| SlapdError::CStringInvalidError)?; ++ let r: Vec = fernet ++ .decrypt_with_ttl(s, ttl) + .map_err(|_| SlapdError::FernetInvalidKey)?; +- CString::new(r) +- .map_err(|_| SlapdError::CStringInvalidError) ++ CString::new(r).map_err(|_| SlapdError::CStringInvalidError) + } +- +- +diff --git a/src/slapd/src/lib.rs b/src/slapd/src/lib.rs +index 5b1f20368..79f1600c2 100644 +--- a/src/slapd/src/lib.rs ++++ b/src/slapd/src/lib.rs +@@ -1,5 +1,2 @@ +- + pub mod error; + pub mod fernet; +- +- +diff --git a/src/slapi_r_plugin/Cargo.toml b/src/slapi_r_plugin/Cargo.toml +new file mode 100644 +index 000000000..c7958671a +--- /dev/null ++++ b/src/slapi_r_plugin/Cargo.toml +@@ -0,0 +1,19 @@ ++[package] ++name = "slapi_r_plugin" ++version = "0.1.0" ++authors = ["William Brown "] ++edition = "2018" ++build = "build.rs" ++ ++# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html ++ ++[lib] ++path = "src/lib.rs" ++name = "slapi_r_plugin" ++crate-type = ["staticlib", "lib"] ++ ++[dependencies] ++libc = "0.2" ++paste = "0.1" ++lazy_static = "1.4" ++uuid = { version = "0.8", features = [ "v4" ] } +diff --git a/src/slapi_r_plugin/README.md b/src/slapi_r_plugin/README.md +new file mode 100644 +index 000000000..af9743ec9 +--- /dev/null ++++ b/src/slapi_r_plugin/README.md +@@ -0,0 +1,216 @@ ++ ++# Slapi R(ust) Plugin Bindings ++ ++If you are here, you are probably interested in the Rust bindings that allow plugins to be written ++in Rust for the 389 Directory Server project. If you are, you should use `cargo doc --workspace --no-deps` ++in `src`, as this contains the material you want for implementing safe plugins. ++ ++This readme is intended for developers of the bindings that enable those plugins to work. ++ ++As such it likely requires that you have an understanding both of C and ++the [Rust Nomicon](https://doc.rust-lang.org/nomicon/index.html) ++ ++> **WARNING** This place is not a place of honor ... no highly esteemed deed is commemorated here ++> ... nothing valued is here. What is here is dangerous and repulsive to us. This message is a ++> warning about danger. ++ ++This document will not detail the specifics of unsafe or the invariants you must adhere to for rust ++to work with C. ++ ++If you still want to see more about the plugin bindings, go on ... ++ ++## The Challenge ++ ++Rust is a memory safe language - that means you may not dereference pointers or alter or interact ++with uninitialised memory. There are whole classes of problems that this resolves, but it means ++that Rust is opiniated about how it interacts with memory. ++ ++C is an unsafe language - there are undefined behaviours all through out the specification, memory ++can be interacted with without bounds which leads to many kinds of issues ranging from crashes, ++silent data corruption, to code execution and explotation. ++ ++While it would be nice to rewrite everything from C to Rust, this is a large task - instead we need ++a way to allow Rust and C to interact. ++ ++## The Goal ++ ++To be able to define, a pure Rust, 100% safe (in rust terms) plugin for 389 Directory Server that ++can perform useful tasks. ++ ++## The 389 Directory Server Plugin API ++ ++The 389-ds plugin system works by reading an ldap entry from cn=config, that directs to a shared ++library. That shared library path is dlopened and an init symbol read and activated. At that ++point the plugin is able to call-back into 389-ds to provide registration of function handlers for ++various tasks that the plugin may wish to perform at defined points in a operations execution. ++ ++During the execution of a plugin callback, the context of the environment is passed through a ++parameter block (pblock). This pblock has a set of apis for accessing it's content, which may ++or may not be defined based on the execution state of the server. ++ ++Common plugin tasks involve the transformation of entries during write operation paths to provide ++extra attributes to the entry or generation of other entries. Values in entries are represented by ++internal structures that may or may not have sorting of content. ++ ++Already at this point it can be seen there is a lot of surface area to access. For clarity in ++our trivial example here we have required: ++ ++* Pblock ++* Entry ++* ValueSet ++* Value ++* Sdn ++* Result Codes ++ ++We need to be able to interact with all of these - and more - to make useful plugins. ++ ++## Structure of the Rust Plugin bindings. ++ ++As a result, there are a number of items we must be able to implement: ++ ++* Creation of the plugin function callback points ++* Transformation of C pointer types into Rust structures that can be interacted with. ++* Ability to have Rust interact with structures to achieve side effects in the C server ++* Mapping of errors that C can understand ++* Make all of it safe. ++ ++In order to design this, it's useful to see what a plugin from Rust should look like - by designing ++what the plugin should look like, we make the bindings that are preferable and ergonomic to rust ++rather than compromising on quality and developer experience. ++ ++Here is a minimal example of a plugin - it may not compile or be complete, it serves as an ++example. ++ ++``` ++#[macro_use] ++extern crate slapi_r_plugin; ++use slapi_r_plugin::prelude::*; ++ ++struct NewPlugin; ++ ++slapi_r_plugin_hooks!(plugin_name, NewPlugin); ++ ++impl SlapiPlugin3 for NewPlugin { ++ fn start(_pb: &mut PblockRef) -> Result<(), PluginError> { ++ log_error!(ErrorLevel::Trace, "plugin start"); ++ Ok(()) ++ } ++ ++ fn close(_pb: &mut PblockRef) -> Result<(), PluginError> { ++ log_error!(ErrorLevel::Trace, "plugin close"); ++ Ok(()) ++ } ++ ++ fn has_betxn_pre_add() -> bool { ++ true ++ } ++ ++ fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> { ++ let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?; ++ let sdn = e.get_sdnref(); ++ ++ log_error!(ErrorLevel::Trace, "betxn_pre_add -> {:?}", sdn); ++ Ok(()) ++ } ++} ++``` ++ ++Important details - there is no unsafe, we use rust native error handling and functions, there ++is no indication of memory management, we are defined by a trait, error logging uses native ++formatting. There are probably other details too - I'll leave it as an exercise for the reader ++to play Where's Wally and find them all. ++ ++With the end goal in mind, we can begin to look at the construction of the plugin system, and ++the design choices that were made. ++ ++## The Plugin Trait ++ ++A significant choice was the use of a trait to define the possible plugin function operations ++for rust implementors. This allows the compiler to guarantee that a plugin *will* have all ++associated functions. ++ ++> Traits are synonomous with java interfaces, defining methods you "promise" to implement, unlike ++> object orientation with a class hierarchy. ++ ++Now, you may notice that not all members of the trait are implemented. This is due to a feature ++of rust known as default trait impls. This allows the trait origin (src/plugin.rs) to provide ++template versions of these functions. If you "overwrite" them, your implementation is used. Unlike ++OO, you may not inherit or call the default function. ++ ++If a default is not provided you *must* implement that function to be considered valid. Today (20200422) ++this only applies to `start` and `close`. ++ ++The default implementations all return "false" to the presence of callbacks, and if they are used, ++they will always return an error. ++ ++## Interface generation ++ ++While it is nice to have this Rust interface for plugins, C is unable to call it (Rust uses a different ++stack calling syntax to C, as well as symbol mangaling). To expose these, we must provide `extern C` ++functions, where any function that requires a static symbol must be marked as no_mangle. ++ ++Rather than ask all plugin authors to do this, we can use the rust macro system to generate these ++interfaces at compile time. This is the reason for this line: ++ ++``` ++slapi_r_plugin_hooks!(plugin_name, NewPlugin); ++``` ++ ++This macro is defined in src/macros.rs, and is "the bridge" from C to Rust. Given a plugin name ++and a struct of the trait SlapiPlugin3, this macro is able to generate all needed C compatible ++functions. Based on the calls to `has_`, the generated functions are registered to the pblock ++that is provided. ++ ++When a call back triggers, the function landing point is called. This then wraps all the pointer ++types from C into Rust structs, and then dispatches to the struct instance. ++ ++When the struct function returns, the result is unpacked and turned into C compatible result codes - ++in some cases, the result codes are sanitised due to quirks in the C ds api - `[<$mod_ident _plugin_mr_filter_ava>]` ++is an excellent example of this, where Rust returns are `true`/`false`, which would normally ++be FFI safe to convert to 1/0 respectively, but 389-ds expects the inverse in this case, where ++0 is true and all other values are false. To present a sane api to rust, the macro layer does this ++(mind bending) transformation for us. ++ ++## C Ptr Wrapper types ++ ++This is likely the major, and important detail of the plugin api. By wrapping these C ptrs with ++Rust types, we can create types that perform as rust expects, and adheres to the invariants required, ++while providing safe - and useful interfaces to users. ++ ++It's important to understand how Rust manages memory both on the stack and the heap - Please see ++[the Rust Book](https://doc.rust-lang.org/book/ch04-00-understanding-ownership.html) for more. ++ ++As a result, this means that we must express in code, assertions about the proper ownership of memory ++and who is responsible for it (unlike C, where it can be hard to determine who or what is responsible ++for freeing some value.) Failure to handle this correctly, can and will lead to crashes, leaks or ++*hand waving* magical failures that are eXtReMeLy FuN to debug. ++ ++### Reference Types ++ ++There are a number of types, such as `SdnRef`, which have a suffix of `*Ref`. These types represent ++values whos content is owned by the C server - that is, it is the responsibility of 389-ds to free ++the content of the Pointer once it has been used. A majority of values that are provided to the ++function callback points fall into this class. ++ ++### Owned Types ++ ++These types contain a pointer from the C server, but it is the responsibility of the Rust library ++to indicate when that pointer and it's content should be disposed of. This is generally handled ++by the `drop` trait, which is executed ... well, when an item is dropped. ++ ++### Dispatch from the wrapper to C ++ ++When a rust function against a wrapper is called, the type internally accesses it Ref type and ++uses the ptr to dispatch into the C server. Any required invariants are upheld, and results are ++mapped as required to match what rust callers expect. ++ ++As a result, this involves horrendous amounts of unsafe, and a detailed analysis of both the DS C ++api, what it expects, and the Rust nomicon to ensure you maintain all the invariants. ++ ++## Conclusion ++ ++Providing a bridge between C and Rust is challenging - but achievable - the result is plugins that ++are clean, safe, efficent. ++ ++ ++ +diff --git a/src/slapi_r_plugin/build.rs b/src/slapi_r_plugin/build.rs +new file mode 100644 +index 000000000..29bbd52d4 +--- /dev/null ++++ b/src/slapi_r_plugin/build.rs +@@ -0,0 +1,8 @@ ++use std::env; ++ ++fn main() { ++ if let Ok(lib_dir) = env::var("SLAPD_DYLIB_DIR") { ++ println!("cargo:rustc-link-lib=dylib=slapd"); ++ println!("cargo:rustc-link-search=native={}", lib_dir); ++ } ++} +diff --git a/src/slapi_r_plugin/src/backend.rs b/src/slapi_r_plugin/src/backend.rs +new file mode 100644 +index 000000000..f308295aa +--- /dev/null ++++ b/src/slapi_r_plugin/src/backend.rs +@@ -0,0 +1,71 @@ ++use crate::dn::SdnRef; ++use crate::pblock::Pblock; ++// use std::ops::Deref; ++ ++extern "C" { ++ fn slapi_back_transaction_begin(pb: *const libc::c_void) -> i32; ++ fn slapi_back_transaction_commit(pb: *const libc::c_void); ++ fn slapi_back_transaction_abort(pb: *const libc::c_void); ++ fn slapi_be_select_exact(sdn: *const libc::c_void) -> *const libc::c_void; ++} ++ ++pub struct BackendRef { ++ raw_be: *const libc::c_void, ++} ++ ++impl BackendRef { ++ pub fn new(dn: &SdnRef) -> Result { ++ let raw_be = unsafe { slapi_be_select_exact(dn.as_ptr()) }; ++ if raw_be.is_null() { ++ Err(()) ++ } else { ++ Ok(BackendRef { raw_be }) ++ } ++ } ++ ++ pub(crate) fn as_ptr(&self) -> *const libc::c_void { ++ self.raw_be ++ } ++ ++ pub fn begin_txn(self) -> Result { ++ let mut pb = Pblock::new(); ++ if pb.set_op_backend(&self) != 0 { ++ return Err(()); ++ } ++ let rc = unsafe { slapi_back_transaction_begin(pb.as_ptr()) }; ++ if rc != 0 { ++ Err(()) ++ } else { ++ Ok(BackendRefTxn { ++ pb, ++ be: self, ++ committed: false, ++ }) ++ } ++ } ++} ++ ++pub struct BackendRefTxn { ++ pb: Pblock, ++ be: BackendRef, ++ committed: bool, ++} ++ ++impl BackendRefTxn { ++ pub fn commit(mut self) { ++ self.committed = true; ++ unsafe { ++ slapi_back_transaction_commit(self.pb.as_ptr()); ++ } ++ } ++} ++ ++impl Drop for BackendRefTxn { ++ fn drop(&mut self) { ++ if self.committed == false { ++ unsafe { ++ slapi_back_transaction_abort(self.pb.as_ptr()); ++ } ++ } ++ } ++} +diff --git a/src/slapi_r_plugin/src/ber.rs b/src/slapi_r_plugin/src/ber.rs +new file mode 100644 +index 000000000..a501fd642 +--- /dev/null ++++ b/src/slapi_r_plugin/src/ber.rs +@@ -0,0 +1,90 @@ ++use crate::log::{log_error, ErrorLevel}; ++use libc; ++use std::ffi::CString; ++// use std::ptr; ++use std::slice; ++ ++use std::convert::TryFrom; ++use uuid::Uuid; ++ ++use crate::error::PluginError; ++ ++#[repr(C)] ++pub(crate) struct ol_berval { ++ pub len: usize, ++ pub data: *const u8, ++} ++ ++#[derive(Debug)] ++pub struct BerValRef { ++ pub(crate) raw_berval: *const ol_berval, ++} ++ ++impl BerValRef { ++ pub fn new(raw_berval: *const libc::c_void) -> Self { ++ // so we retype this ++ let raw_berval = raw_berval as *const ol_berval; ++ BerValRef { raw_berval } ++ } ++ ++ pub(crate) fn into_cstring(&self) -> Option { ++ // Cstring does not need a trailing null, so if we have one, ignore it. ++ let l: usize = unsafe { (*self.raw_berval).len }; ++ let d_slice = unsafe { slice::from_raw_parts((*self.raw_berval).data, l) }; ++ CString::new(d_slice) ++ .or_else(|e| { ++ // Try it again, but with one byte less to trim a potential trailing null that ++ // could have been allocated, and ensure it has at least 1 byte of good data ++ // remaining. ++ if l > 1 { ++ let d_slice = unsafe { slice::from_raw_parts((*self.raw_berval).data, l - 1) }; ++ CString::new(d_slice) ++ } else { ++ Err(e) ++ } ++ }) ++ .map_err(|_| { ++ log_error!( ++ ErrorLevel::Trace, ++ "invalid ber parse attempt, may contain a null byte? -> {:?}", ++ self ++ ); ++ () ++ }) ++ .ok() ++ } ++ ++ pub fn into_string(&self) -> Option { ++ // Convert a Some to a rust string. ++ self.into_cstring().and_then(|v| { ++ v.into_string() ++ .map_err(|_| { ++ log_error!( ++ ErrorLevel::Trace, ++ "failed to convert cstring to string -> {:?}", ++ self ++ ); ++ () ++ }) ++ .ok() ++ }) ++ } ++} ++ ++impl TryFrom<&BerValRef> for Uuid { ++ type Error = PluginError; ++ ++ fn try_from(value: &BerValRef) -> Result { ++ let val_string = value.into_string().ok_or(PluginError::BervalString)?; ++ ++ Uuid::parse_str(val_string.as_str()) ++ .map(|r| { ++ log_error!(ErrorLevel::Trace, "valid uuid -> {:?}", r); ++ r ++ }) ++ .map_err(|_e| { ++ log_error!(ErrorLevel::Plugin, "Invalid uuid"); ++ PluginError::InvalidSyntax ++ }) ++ } ++} +diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs +new file mode 100644 +index 000000000..cf76ccbdb +--- /dev/null ++++ b/src/slapi_r_plugin/src/constants.rs +@@ -0,0 +1,203 @@ ++use crate::error::RPluginError; ++use std::convert::TryFrom; ++use std::os::raw::c_char; ++ ++pub const LDAP_SUCCESS: i32 = 0; ++pub const PLUGIN_DEFAULT_PRECEDENCE: i32 = 50; ++ ++#[repr(i32)] ++/// The set of possible function handles we can register via the pblock. These ++/// values correspond to slapi-plugin.h. ++pub enum PluginFnType { ++ /// SLAPI_PLUGIN_DESTROY_FN ++ Destroy = 11, ++ /// SLAPI_PLUGIN_CLOSE_FN ++ Close = 210, ++ /// SLAPI_PLUGIN_START_FN ++ Start = 212, ++ /// SLAPI_PLUGIN_PRE_BIND_FN ++ PreBind = 401, ++ /// SLAPI_PLUGIN_PRE_UNBIND_FN ++ PreUnbind = 402, ++ /// SLAPI_PLUGIN_PRE_SEARCH_FN ++ PreSearch = 403, ++ /// SLAPI_PLUGIN_PRE_COMPARE_FN ++ PreCompare = 404, ++ /// SLAPI_PLUGIN_PRE_MODIFY_FN ++ PreModify = 405, ++ /// SLAPI_PLUGIN_PRE_MODRDN_FN ++ PreModRDN = 406, ++ /// SLAPI_PLUGIN_PRE_ADD_FN ++ PreAdd = 407, ++ /// SLAPI_PLUGIN_PRE_DELETE_FN ++ PreDelete = 408, ++ /// SLAPI_PLUGIN_PRE_ABANDON_FN ++ PreAbandon = 409, ++ /// SLAPI_PLUGIN_PRE_ENTRY_FN ++ PreEntry = 410, ++ /// SLAPI_PLUGIN_PRE_REFERRAL_FN ++ PreReferal = 411, ++ /// SLAPI_PLUGIN_PRE_RESULT_FN ++ PreResult = 412, ++ /// SLAPI_PLUGIN_PRE_EXTOP_FN ++ PreExtop = 413, ++ /// SLAPI_PLUGIN_BE_PRE_ADD_FN ++ BeTxnPreAdd = 460, ++ /// SLAPI_PLUGIN_BE_TXN_PRE_MODIFY_FN ++ BeTxnPreModify = 461, ++ /// SLAPI_PLUGIN_BE_TXN_PRE_MODRDN_FN ++ BeTxnPreModRDN = 462, ++ /// SLAPI_PLUGIN_BE_TXN_PRE_DELETE_FN ++ BeTxnPreDelete = 463, ++ /// SLAPI_PLUGIN_BE_TXN_PRE_DELETE_TOMBSTONE_FN ++ BeTxnPreDeleteTombstone = 464, ++ /// SLAPI_PLUGIN_POST_SEARCH_FN ++ PostSearch = 503, ++ /// SLAPI_PLUGIN_BE_POST_ADD_FN ++ BeTxnPostAdd = 560, ++ /// SLAPI_PLUGIN_BE_POST_MODIFY_FN ++ BeTxnPostModify = 561, ++ /// SLAPI_PLUGIN_BE_POST_MODRDN_FN ++ BeTxnPostModRDN = 562, ++ /// SLAPI_PLUGIN_BE_POST_DELETE_FN ++ BeTxnPostDelete = 563, ++ ++ /// SLAPI_PLUGIN_MR_FILTER_CREATE_FN ++ MRFilterCreate = 600, ++ /// SLAPI_PLUGIN_MR_INDEXER_CREATE_FN ++ MRIndexerCreate = 601, ++ /// SLAPI_PLUGIN_MR_FILTER_AVA ++ MRFilterAva = 618, ++ /// SLAPI_PLUGIN_MR_FILTER_SUB ++ MRFilterSub = 619, ++ /// SLAPI_PLUGIN_MR_VALUES2KEYS ++ MRValuesToKeys = 620, ++ /// SLAPI_PLUGIN_MR_ASSERTION2KEYS_AVA ++ MRAssertionToKeysAva = 621, ++ /// SLAPI_PLUGIN_MR_ASSERTION2KEYS_SUB ++ MRAssertionToKeysSub = 622, ++ /// SLAPI_PLUGIN_MR_COMPARE ++ MRCompare = 625, ++ /// SLAPI_PLUGIN_MR_NORMALIZE ++ MRNormalize = 626, ++ ++ /// SLAPI_PLUGIN_SYNTAX_FILTER_AVA ++ SyntaxFilterAva = 700, ++ /// SLAPI_PLUGIN_SYNTAX_FILTER_SUB ++ SyntaxFilterSub = 701, ++ /// SLAPI_PLUGIN_SYNTAX_VALUES2KEYS ++ SyntaxValuesToKeys = 702, ++ /// SLAPI_PLUGIN_SYNTAX_ASSERTION2KEYS_AVA ++ SyntaxAssertion2KeysAva = 703, ++ /// SLAPI_PLUGIN_SYNTAX_ASSERTION2KEYS_SUB ++ SyntaxAssertion2KeysSub = 704, ++ /// SLAPI_PLUGIN_SYNTAX_FLAGS ++ SyntaxFlags = 707, ++ /// SLAPI_PLUGIN_SYNTAX_COMPARE ++ SyntaxCompare = 708, ++ /// SLAPI_PLUGIN_SYNTAX_VALIDATE ++ SyntaxValidate = 710, ++ /// SLAPI_PLUGIN_SYNTAX_NORMALIZE ++ SyntaxNormalize = 711, ++} ++ ++static SV01: [u8; 3] = [b'0', b'1', b'\0']; ++static SV02: [u8; 3] = [b'0', b'2', b'\0']; ++static SV03: [u8; 3] = [b'0', b'3', b'\0']; ++ ++/// Corresponding plugin versions ++pub enum PluginVersion { ++ /// SLAPI_PLUGIN_VERSION_01 ++ V01, ++ /// SLAPI_PLUGIN_VERSION_02 ++ V02, ++ /// SLAPI_PLUGIN_VERSION_03 ++ V03, ++} ++ ++impl PluginVersion { ++ pub fn to_char_ptr(&self) -> *const c_char { ++ match self { ++ PluginVersion::V01 => &SV01 as *const _ as *const c_char, ++ PluginVersion::V02 => &SV02 as *const _ as *const c_char, ++ PluginVersion::V03 => &SV03 as *const _ as *const c_char, ++ } ++ } ++} ++ ++static SMATCHINGRULE: [u8; 13] = [ ++ b'm', b'a', b't', b'c', b'h', b'i', b'n', b'g', b'r', b'u', b'l', b'e', b'\0', ++]; ++ ++pub enum PluginType { ++ MatchingRule, ++} ++ ++impl PluginType { ++ pub fn to_char_ptr(&self) -> *const c_char { ++ match self { ++ PluginType::MatchingRule => &SMATCHINGRULE as *const _ as *const c_char, ++ } ++ } ++} ++ ++#[repr(i32)] ++/// data types that we can get or retrieve from the pblock. This is only ++/// used internally. ++pub(crate) enum PblockType { ++ /// SLAPI_PLUGIN_PRIVATE ++ _PrivateData = 4, ++ /// SLAPI_PLUGIN_VERSION ++ Version = 8, ++ /// SLAPI_PLUGIN_DESCRIPTION ++ _Description = 12, ++ /// SLAPI_PLUGIN_IDENTITY ++ Identity = 13, ++ /// SLAPI_PLUGIN_INTOP_RESULT ++ OpResult = 15, ++ /// SLAPI_ADD_ENTRY ++ AddEntry = 60, ++ /// SLAPI_BACKEND ++ Backend = 130, ++ /// SLAPI_PLUGIN_MR_NAMES ++ MRNames = 624, ++ /// SLAPI_PLUGIN_SYNTAX_NAMES ++ SyntaxNames = 705, ++ /// SLAPI_PLUGIN_SYNTAX_OID ++ SyntaxOid = 706, ++} ++ ++/// See ./ldap/include/ldaprot.h ++#[derive(PartialEq)] ++pub enum FilterType { ++ And = 0xa0, ++ Or = 0xa1, ++ Not = 0xa2, ++ Equality = 0xa3, ++ Substring = 0xa4, ++ Ge = 0xa5, ++ Le = 0xa6, ++ Present = 0x87, ++ Approx = 0xa8, ++ Extended = 0xa9, ++} ++ ++impl TryFrom for FilterType { ++ type Error = RPluginError; ++ ++ fn try_from(value: i32) -> Result { ++ match value { ++ 0xa0 => Ok(FilterType::And), ++ 0xa1 => Ok(FilterType::Or), ++ 0xa2 => Ok(FilterType::Not), ++ 0xa3 => Ok(FilterType::Equality), ++ 0xa4 => Ok(FilterType::Substring), ++ 0xa5 => Ok(FilterType::Ge), ++ 0xa6 => Ok(FilterType::Le), ++ 0x87 => Ok(FilterType::Present), ++ 0xa8 => Ok(FilterType::Approx), ++ 0xa9 => Ok(FilterType::Extended), ++ _ => Err(RPluginError::FilterType), ++ } ++ } ++} +diff --git a/src/slapi_r_plugin/src/dn.rs b/src/slapi_r_plugin/src/dn.rs +new file mode 100644 +index 000000000..5f8a65743 +--- /dev/null ++++ b/src/slapi_r_plugin/src/dn.rs +@@ -0,0 +1,108 @@ ++use std::convert::TryFrom; ++use std::ffi::{CStr, CString}; ++use std::ops::Deref; ++use std::os::raw::c_char; ++ ++extern "C" { ++ fn slapi_sdn_get_dn(sdn: *const libc::c_void) -> *const c_char; ++ fn slapi_sdn_new_dn_byval(dn: *const c_char) -> *const libc::c_void; ++ fn slapi_sdn_issuffix(sdn: *const libc::c_void, suffix_sdn: *const libc::c_void) -> i32; ++ fn slapi_sdn_free(sdn: *const *const libc::c_void); ++ fn slapi_sdn_dup(sdn: *const libc::c_void) -> *const libc::c_void; ++} ++ ++#[derive(Debug)] ++pub struct SdnRef { ++ raw_sdn: *const libc::c_void, ++} ++ ++#[derive(Debug)] ++pub struct NdnRef { ++ raw_ndn: *const c_char, ++} ++ ++#[derive(Debug)] ++pub struct Sdn { ++ value: SdnRef, ++} ++ ++unsafe impl Send for Sdn {} ++ ++impl From<&CStr> for Sdn { ++ fn from(value: &CStr) -> Self { ++ Sdn { ++ value: SdnRef { ++ raw_sdn: unsafe { slapi_sdn_new_dn_byval(value.as_ptr()) }, ++ }, ++ } ++ } ++} ++ ++impl TryFrom<&str> for Sdn { ++ type Error = (); ++ ++ fn try_from(value: &str) -> Result { ++ let cstr = CString::new(value).map_err(|_| ())?; ++ Ok(Self::from(cstr.as_c_str())) ++ } ++} ++ ++impl Clone for Sdn { ++ fn clone(&self) -> Self { ++ let raw_sdn = unsafe { slapi_sdn_dup(self.value.raw_sdn) }; ++ Sdn { ++ value: SdnRef { raw_sdn }, ++ } ++ } ++} ++ ++impl Drop for Sdn { ++ fn drop(&mut self) { ++ unsafe { slapi_sdn_free(&self.value.raw_sdn as *const *const libc::c_void) } ++ } ++} ++ ++impl Deref for Sdn { ++ type Target = SdnRef; ++ ++ fn deref(&self) -> &Self::Target { ++ &self.value ++ } ++} ++ ++impl SdnRef { ++ pub fn new(raw_sdn: *const libc::c_void) -> Self { ++ SdnRef { raw_sdn } ++ } ++ ++ /// This is unsafe, as you need to ensure that the SdnRef associated lives at ++ /// least as long as the NdnRef, else this may cause a use-after-free. ++ pub unsafe fn as_ndnref(&self) -> NdnRef { ++ let raw_ndn = slapi_sdn_get_dn(self.raw_sdn); ++ NdnRef { raw_ndn } ++ } ++ ++ pub fn to_dn_string(&self) -> String { ++ let dn_raw = unsafe { slapi_sdn_get_dn(self.raw_sdn) }; ++ let dn_cstr = unsafe { CStr::from_ptr(dn_raw) }; ++ dn_cstr.to_string_lossy().to_string() ++ } ++ ++ pub(crate) fn as_ptr(&self) -> *const libc::c_void { ++ self.raw_sdn ++ } ++ ++ pub fn is_below_suffix(&self, other: &SdnRef) -> bool { ++ if unsafe { slapi_sdn_issuffix(self.raw_sdn, other.raw_sdn) } == 0 { ++ false ++ } else { ++ true ++ } ++ } ++} ++ ++impl NdnRef { ++ pub(crate) fn as_ptr(&self) -> *const c_char { ++ self.raw_ndn ++ } ++} +diff --git a/src/slapi_r_plugin/src/entry.rs b/src/slapi_r_plugin/src/entry.rs +new file mode 100644 +index 000000000..034efe692 +--- /dev/null ++++ b/src/slapi_r_plugin/src/entry.rs +@@ -0,0 +1,92 @@ ++use crate::dn::SdnRef; ++use crate::value::{slapi_value, ValueArrayRef, ValueRef}; ++use std::ffi::CString; ++use std::os::raw::c_char; ++ ++extern "C" { ++ fn slapi_entry_get_sdn(e: *const libc::c_void) -> *const libc::c_void; ++ fn slapi_entry_add_value( ++ e: *const libc::c_void, ++ a: *const c_char, ++ v: *const slapi_value, ++ ) -> i32; ++ fn slapi_entry_attr_get_valuearray( ++ e: *const libc::c_void, ++ a: *const c_char, ++ ) -> *const *const slapi_value; ++} ++ ++pub struct EntryRef { ++ raw_e: *const libc::c_void, ++} ++ ++/* ++pub struct Entry { ++ value: EntryRef, ++} ++ ++impl Drop for Entry { ++ fn drop(&mut self) { ++ () ++ } ++} ++ ++impl Deref for Entry { ++ type Target = EntryRef; ++ ++ fn deref(&self) -> &Self::Target { ++ &self.value ++ } ++} ++ ++impl Entry { ++ // Forget about this value, and get a pointer back suitable for providing to directory ++ // server to take ownership. ++ pub unsafe fn forget(self) -> *mut libc::c_void { ++ unimplemented!(); ++ } ++} ++*/ ++ ++impl EntryRef { ++ pub fn new(raw_e: *const libc::c_void) -> Self { ++ EntryRef { raw_e } ++ } ++ ++ // get the sdn ++ pub fn get_sdnref(&self) -> SdnRef { ++ let sdn_ptr = unsafe { slapi_entry_get_sdn(self.raw_e) }; ++ SdnRef::new(sdn_ptr) ++ } ++ ++ pub fn get_attr(&self, name: &str) -> Option { ++ let cname = CString::new(name).expect("invalid attr name"); ++ let va = unsafe { slapi_entry_attr_get_valuearray(self.raw_e, cname.as_ptr()) }; ++ ++ if va.is_null() { ++ None ++ } else { ++ Some(ValueArrayRef::new(va as *const libc::c_void)) ++ } ++ } ++ ++ pub fn add_value(&mut self, a: &str, v: &ValueRef) { ++ // turn the attr to a c string. ++ // TODO FIX ++ let attr_name = CString::new(a).expect("Invalid attribute name"); ++ // Get the raw ptr. ++ let raw_value_ref = unsafe { v.as_ptr() }; ++ // We ignore the return because it always returns 0. ++ let _ = unsafe { ++ // By default, this clones. ++ slapi_entry_add_value(self.raw_e, attr_name.as_ptr(), raw_value_ref) ++ }; ++ } ++ ++ /* ++ pub fn replace_value(&mut self, a: &str, v: &ValueRef) { ++ // slapi_entry_attr_replace(e, SLAPI_ATTR_ENTRYUSN, new_bvals); ++ unimplemented!(); ++ } ++ */ ++} +diff --git a/src/slapi_r_plugin/src/error.rs b/src/slapi_r_plugin/src/error.rs +new file mode 100644 +index 000000000..91c81cd26 +--- /dev/null ++++ b/src/slapi_r_plugin/src/error.rs +@@ -0,0 +1,61 @@ ++// use std::convert::TryFrom; ++ ++#[derive(Debug)] ++#[repr(i32)] ++pub enum RPluginError { ++ Unknown = 500, ++ Unimplemented = 501, ++ FilterType = 502, ++} ++ ++#[derive(Debug)] ++#[repr(i32)] ++pub enum PluginError { ++ GenericFailure = -1, ++ Unknown = 1000, ++ Unimplemented = 1001, ++ Pblock = 1002, ++ BervalString = 1003, ++ InvalidSyntax = 1004, ++ InvalidFilter = 1005, ++ TxnFailure = 1006, ++} ++ ++#[derive(Debug)] ++#[repr(i32)] ++pub enum LDAPError { ++ Success = 0, ++ Operation = 1, ++ ObjectClassViolation = 65, ++ Other = 80, ++ Unknown = 999, ++} ++ ++impl From for LDAPError { ++ fn from(value: i32) -> Self { ++ match value { ++ 0 => LDAPError::Success, ++ 1 => LDAPError::Operation, ++ 65 => LDAPError::ObjectClassViolation, ++ 80 => LDAPError::Other, ++ _ => LDAPError::Unknown, ++ } ++ } ++} ++ ++// if we make debug impl, we can use this. ++// errmsg = ldap_err2string(result); ++ ++#[derive(Debug)] ++#[repr(i32)] ++pub enum DseCallbackStatus { ++ DoNotApply = 0, ++ Ok = 1, ++ Error = -1, ++} ++ ++#[derive(Debug)] ++pub enum LoggingError { ++ Unknown, ++ CString(String), ++} +diff --git a/src/slapi_r_plugin/src/init.c b/src/slapi_r_plugin/src/init.c +new file mode 100644 +index 000000000..86d1235b8 +--- /dev/null ++++ b/src/slapi_r_plugin/src/init.c +@@ -0,0 +1,8 @@ ++ ++#include ++ ++int32_t ++do_nothing_really_well_abcdef() { ++ return 0; ++} ++ +diff --git a/src/slapi_r_plugin/src/lib.rs b/src/slapi_r_plugin/src/lib.rs +new file mode 100644 +index 000000000..d7fc22e52 +--- /dev/null ++++ b/src/slapi_r_plugin/src/lib.rs +@@ -0,0 +1,36 @@ ++// extern crate lazy_static; ++ ++#[macro_use] ++pub mod macros; ++pub mod backend; ++pub mod ber; ++mod constants; ++pub mod dn; ++pub mod entry; ++pub mod error; ++pub mod log; ++pub mod pblock; ++pub mod plugin; ++pub mod search; ++pub mod syntax_plugin; ++pub mod task; ++pub mod value; ++ ++pub mod prelude { ++ pub use crate::backend::{BackendRef, BackendRefTxn}; ++ pub use crate::ber::BerValRef; ++ pub use crate::constants::{FilterType, PluginFnType, PluginType, PluginVersion, LDAP_SUCCESS}; ++ pub use crate::dn::{Sdn, SdnRef}; ++ pub use crate::entry::EntryRef; ++ pub use crate::error::{DseCallbackStatus, LDAPError, PluginError, RPluginError}; ++ pub use crate::log::{log_error, ErrorLevel}; ++ pub use crate::pblock::{Pblock, PblockRef}; ++ pub use crate::plugin::{register_plugin_ext, PluginIdRef, SlapiPlugin3}; ++ pub use crate::search::{Search, SearchScope}; ++ pub use crate::syntax_plugin::{ ++ matchingrule_register, name_to_leaking_char, names_to_leaking_char_array, SlapiOrdMr, ++ SlapiSubMr, SlapiSyntaxPlugin1, ++ }; ++ pub use crate::task::{task_register_handler_fn, task_unregister_handler_fn, Task, TaskRef}; ++ pub use crate::value::{Value, ValueArray, ValueArrayRef, ValueRef}; ++} +diff --git a/src/slapi_r_plugin/src/log.rs b/src/slapi_r_plugin/src/log.rs +new file mode 100644 +index 000000000..f686ecd1a +--- /dev/null ++++ b/src/slapi_r_plugin/src/log.rs +@@ -0,0 +1,87 @@ ++use std::ffi::CString; ++use std::os::raw::c_char; ++ ++use crate::constants; ++use crate::error::LoggingError; ++ ++extern "C" { ++ fn slapi_log_error(level: i32, system: *const c_char, message: *const c_char) -> i32; ++} ++ ++pub fn log_error( ++ level: ErrorLevel, ++ subsystem: String, ++ message: String, ++) -> Result<(), LoggingError> { ++ let c_subsystem = CString::new(subsystem) ++ .map_err(|e| LoggingError::CString(format!("failed to convert subsystem -> {:?}", e)))?; ++ let c_message = CString::new(message) ++ .map_err(|e| LoggingError::CString(format!("failed to convert message -> {:?}", e)))?; ++ ++ match unsafe { slapi_log_error(level as i32, c_subsystem.as_ptr(), c_message.as_ptr()) } { ++ constants::LDAP_SUCCESS => Ok(()), ++ _ => Err(LoggingError::Unknown), ++ } ++} ++ ++#[repr(i32)] ++#[derive(Debug)] ++/// This is a safe rust representation of the values from slapi-plugin.h ++/// such as SLAPI_LOG_FATAL, SLAPI_LOG_TRACE, SLAPI_LOG_ ... These vaulues ++/// must matche their counter parts in slapi-plugin.h ++pub enum ErrorLevel { ++ /// Always log messages at this level. Soon to go away, see EMERG, ALERT, CRIT, ERR, WARNING, NOTICE, INFO, DEBUG ++ Fatal = 0, ++ /// Log detailed messages. ++ Trace = 1, ++ /// Log packet tracing. ++ Packets = 2, ++ /// Log argument tracing. ++ Args = 3, ++ /// Log connection tracking. ++ Conns = 4, ++ /// Log BER parsing. ++ Ber = 5, ++ /// Log filter processing. ++ Filter = 6, ++ /// Log configuration processing. ++ Config = 7, ++ /// Log access controls ++ Acl = 8, ++ /// Log .... ??? ++ Shell = 9, ++ /// Log .... ??? ++ Parse = 10, ++ /// Log .... ??? ++ House = 11, ++ /// Log detailed replication information. ++ Repl = 12, ++ /// Log cache management. ++ Cache = 13, ++ /// Log detailed plugin operations. ++ Plugin = 14, ++ /// Log .... ??? ++ Timing = 15, ++ /// Log backend infomation. ++ BackLDBM = 16, ++ /// Log ACL processing. ++ AclSummary = 17, ++ /// Log nuncstans processing. ++ NuncStansDONOTUSE = 18, ++ /// Emergency messages. Server is bursting into flame. ++ Emerg = 19, ++ /// Important alerts, server may explode soon. ++ Alert = 20, ++ /// Critical messages, but the server isn't going to explode. Admin should intervene. ++ Crit = 21, ++ /// Error has occured, but we can keep going. Could indicate misconfiguration. ++ Error = 22, ++ /// Warning about an issue that isn't very important. Good to resolve though. ++ Warning = 23, ++ /// Inform the admin of something that they should know about, IE server is running now. ++ Notice = 24, ++ /// Informational messages that are nice to know. ++ Info = 25, ++ /// Debugging information from the server. ++ Debug = 26, ++} +diff --git a/src/slapi_r_plugin/src/macros.rs b/src/slapi_r_plugin/src/macros.rs +new file mode 100644 +index 000000000..030449632 +--- /dev/null ++++ b/src/slapi_r_plugin/src/macros.rs +@@ -0,0 +1,835 @@ ++#[macro_export] ++macro_rules! log_error { ++ ($level:expr, $($arg:tt)*) => ({ ++ use std::fmt; ++ match log_error( ++ $level, ++ format!("{}:{}", file!(), line!()), ++ format!("{}\n", fmt::format(format_args!($($arg)*))) ++ ) { ++ Ok(_) => {}, ++ Err(e) => { ++ eprintln!("A logging error occured {}, {} -> {:?}", file!(), line!(), e); ++ } ++ }; ++ }) ++} ++ ++#[macro_export] ++macro_rules! slapi_r_plugin_hooks { ++ ($mod_ident:ident, $hooks_ident:ident) => ( ++ paste::item! { ++ use libc; ++ ++ static mut PLUGINID: *const libc::c_void = std::ptr::null(); ++ ++ pub(crate) fn plugin_id() -> PluginIdRef { ++ PluginIdRef { ++ raw_pid: unsafe { PLUGINID } ++ } ++ } ++ ++ #[no_mangle] ++ pub extern "C" fn [<$mod_ident _plugin_init>](raw_pb: *const libc::c_void) -> i32 { ++ let mut pb = PblockRef::new(raw_pb); ++ log_error!(ErrorLevel::Trace, "it's alive!\n"); ++ ++ match pb.set_plugin_version(PluginVersion::V03) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ // Setup the plugin id. ++ unsafe { ++ PLUGINID = pb.get_plugin_identity(); ++ } ++ ++ if $hooks_ident::has_betxn_pre_modify() { ++ match pb.register_betxn_pre_modify_fn([<$mod_ident _plugin_betxn_pre_modify>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ } ++ ++ if $hooks_ident::has_betxn_pre_add() { ++ match pb.register_betxn_pre_add_fn([<$mod_ident _plugin_betxn_pre_add>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ } ++ ++ // set the start fn ++ match pb.register_start_fn([<$mod_ident _plugin_start>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ // set the close fn ++ match pb.register_close_fn([<$mod_ident _plugin_close>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_start>](raw_pb: *const libc::c_void) -> i32 { ++ let mut pb = PblockRef::new(raw_pb); ++ ++ if let Some(task_ident) = $hooks_ident::has_task_handler() { ++ match task_register_handler_fn(task_ident, [<$mod_ident _plugin_task_handler>], &mut pb) { ++ 0 => {}, ++ e => return e, ++ }; ++ }; ++ ++ match $hooks_ident::start(&mut pb) { ++ Ok(()) => { ++ 0 ++ } ++ Err(e) => { ++ log_error!(ErrorLevel::Error, "-> {:?}", e); ++ 1 ++ } ++ } ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_close>](raw_pb: *const libc::c_void) -> i32 { ++ let mut pb = PblockRef::new(raw_pb); ++ ++ if let Some(task_ident) = $hooks_ident::has_task_handler() { ++ match task_unregister_handler_fn(task_ident, [<$mod_ident _plugin_task_handler>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ }; ++ ++ match $hooks_ident::close(&mut pb) { ++ Ok(()) => { ++ 0 ++ } ++ Err(e) => { ++ log_error!(ErrorLevel::Error, "-> {:?}", e); ++ 1 ++ } ++ } ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_betxn_pre_modify>](raw_pb: *const libc::c_void) -> i32 { ++ let mut pb = PblockRef::new(raw_pb); ++ match $hooks_ident::betxn_pre_modify(&mut pb) { ++ Ok(()) => { ++ 0 ++ } ++ Err(e) => { ++ log_error!(ErrorLevel::Error, "-> {:?}", e); ++ 1 ++ } ++ } ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_betxn_pre_add>](raw_pb: *const libc::c_void) -> i32 { ++ let mut pb = PblockRef::new(raw_pb); ++ match $hooks_ident::betxn_pre_add(&mut pb) { ++ Ok(()) => { ++ 0 ++ } ++ Err(e) => { ++ log_error!(ErrorLevel::Error, "-> {:?}", e); ++ 1 ++ } ++ } ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_task_handler>]( ++ raw_pb: *const libc::c_void, ++ raw_e_before: *const libc::c_void, ++ _raw_e_after: *const libc::c_void, ++ raw_returncode: *mut i32, ++ _raw_returntext: *mut c_char, ++ raw_arg: *const libc::c_void, ++ ) -> i32 { ++ let mut pb = PblockRef::new(raw_pb); ++ ++ let e_before = EntryRef::new(raw_e_before); ++ // let e_after = EntryRef::new(raw_e_after); ++ ++ let task_data = match $hooks_ident::task_validate( ++ &e_before ++ ) { ++ Ok(data) => data, ++ Err(retcode) => { ++ unsafe { *raw_returncode = retcode as i32 }; ++ return DseCallbackStatus::Error as i32 ++ } ++ }; ++ ++ let mut task = Task::new(&e_before, raw_arg); ++ task.register_destructor_fn([<$mod_ident _plugin_task_destructor>]); ++ ++ // Setup the task thread and then run it. Remember, because Rust is ++ // smarter about memory, the move statement here moves the task wrapper and ++ // task_data to the thread, so they drop on thread close. No need for a ++ // destructor beyond blocking on the thread to complete. ++ std::thread::spawn(move || { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_task_thread => begin")); ++ // Indicate the task is begun ++ task.begin(); ++ // Start a txn ++ let be: Option = match $hooks_ident::task_be_dn_hint(&task_data) ++ .map(|be_dn| { ++ BackendRef::new(&be_dn) ++ }) ++ .transpose() { ++ Ok(v) => v, ++ Err(_) => { ++ log_error!(ErrorLevel::Error, concat!(stringify!($mod_ident), "_plugin_task_thread => task error -> selected dn does not exist")); ++ task.error(PluginError::TxnFailure as i32); ++ return; ++ } ++ }; ++ let be_txn: Option = match be { ++ Some(b) => { ++ match b.begin_txn() { ++ Ok(txn) => Some(txn), ++ Err(_) => { ++ log_error!(ErrorLevel::Error, concat!(stringify!($mod_ident), "_plugin_task_thread => task error -> unable to begin txn")); ++ task.error(PluginError::TxnFailure as i32); ++ return; ++ } ++ } ++ } ++ None => None, ++ }; ++ ++ // Abort or commit the txn here. ++ match $hooks_ident::task_handler(&mut task, task_data) { ++ Ok(_data) => { ++ match be_txn { ++ Some(be_txn) => be_txn.commit(), ++ None => {} ++ }; ++ // These will set the status, and guarantee the drop ++ task.success(); ++ } ++ Err(e) => { ++ log_error!(ErrorLevel::Error, "{}_plugin_task_thread => task error -> {:?}", stringify!($mod_ident), e); ++ // These will set the status, and guarantee the drop ++ task.error(e as i32); ++ // On drop, be_txn implicitly aborts. ++ } ++ }; ++ ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_task_thread <= complete")); ++ }); ++ ++ // Indicate that the thread started just fine. ++ unsafe { *raw_returncode = LDAP_SUCCESS }; ++ DseCallbackStatus::Ok as i32 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_task_destructor>]( ++ raw_task: *const libc::c_void, ++ ) { ++ // Simply block until the task refcount drops to 0. ++ let task = TaskRef::new(raw_task); ++ task.block(); ++ } ++ ++ } // end paste ++ ) ++} // end macro ++ ++#[macro_export] ++macro_rules! slapi_r_syntax_plugin_hooks { ++ ( ++ $mod_ident:ident, ++ $hooks_ident:ident ++ ) => ( ++ paste::item! { ++ use libc; ++ use std::convert::TryFrom; ++ ++ #[no_mangle] ++ pub extern "C" fn [<$mod_ident _plugin_init>](raw_pb: *const libc::c_void) -> i32 { ++ let mut pb = PblockRef::new(raw_pb); ++ log_error!(ErrorLevel::Trace, "slapi_r_syntax_plugin_hooks => begin"); ++ // Setup our plugin ++ match pb.set_plugin_version(PluginVersion::V01) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ // Setup the names/oids that this plugin provides syntaxes for. ++ ++ let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::attr_supported_names()) }; ++ match pb.register_syntax_names(name_ptr) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ let name_ptr = unsafe { name_to_leaking_char($hooks_ident::attr_oid()) }; ++ match pb.register_syntax_oid(name_ptr) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ match pb.register_syntax_validate_fn([<$mod_ident _plugin_syntax_validate>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ // Now setup the MR's ++ match register_plugin_ext( ++ PluginType::MatchingRule, ++ $hooks_ident::eq_mr_name(), ++ concat!(stringify!($mod_ident), "_plugin_eq_mr_init"), ++ [<$mod_ident _plugin_eq_mr_init>] ++ ) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ if $hooks_ident::sub_mr_oid().is_some() { ++ match register_plugin_ext( ++ PluginType::MatchingRule, ++ $hooks_ident::sub_mr_name(), ++ concat!(stringify!($mod_ident), "_plugin_ord_mr_init"), ++ [<$mod_ident _plugin_ord_mr_init>] ++ ) { ++ 0 => {}, ++ e => return e, ++ }; ++ } ++ ++ if $hooks_ident::ord_mr_oid().is_some() { ++ match register_plugin_ext( ++ PluginType::MatchingRule, ++ $hooks_ident::ord_mr_name(), ++ concat!(stringify!($mod_ident), "_plugin_ord_mr_init"), ++ [<$mod_ident _plugin_ord_mr_init>] ++ ) { ++ 0 => {}, ++ e => return e, ++ }; ++ } ++ ++ log_error!(ErrorLevel::Trace, "slapi_r_syntax_plugin_hooks <= success"); ++ ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_syntax_validate>]( ++ raw_berval: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_syntax_validate => begin")); ++ ++ let bval = BerValRef::new(raw_berval); ++ ++ match $hooks_ident::syntax_validate(&bval) { ++ Ok(()) => { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_syntax_validate <= success")); ++ LDAP_SUCCESS ++ } ++ Err(e) => { ++ log_error!(ErrorLevel::Warning, ++ "{}_plugin_syntax_validate error -> {:?}", stringify!($mod_ident), e ++ ); ++ e as i32 ++ } ++ } ++ } ++ ++ // All the MR types share this. ++ pub extern "C" fn [<$mod_ident _plugin_mr_filter_ava>]( ++ raw_pb: *const libc::c_void, ++ raw_bvfilter: *const libc::c_void, ++ raw_bvals: *const libc::c_void, ++ i_ftype: i32, ++ _retval: *mut libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_mr_filter_ava => begin")); ++ let mut pb = PblockRef::new(raw_pb); ++ let bvfilter = BerValRef::new(raw_bvfilter); ++ let bvals = ValueArrayRef::new(raw_bvals); ++ let ftype = match FilterType::try_from(i_ftype) { ++ Ok(f) => f, ++ Err(e) => { ++ log_error!(ErrorLevel::Error, "{}_plugin_ord_mr_filter_ava Error -> {:?}", ++ stringify!($mod_ident), e); ++ return e as i32 ++ } ++ }; ++ ++ let r: Result = match ftype { ++ FilterType::And | FilterType::Or | FilterType::Not => { ++ Err(PluginError::InvalidFilter) ++ } ++ FilterType::Equality => { ++ $hooks_ident::filter_ava_eq(&mut pb, &bvfilter, &bvals) ++ } ++ FilterType::Substring => { ++ Err(PluginError::Unimplemented) ++ } ++ FilterType::Ge => { ++ $hooks_ident::filter_ava_ord(&mut pb, &bvfilter, &bvals) ++ .map(|o_ord| { ++ match o_ord { ++ Some(Ordering::Greater) | Some(Ordering::Equal) => true, ++ Some(Ordering::Less) | None => false, ++ } ++ }) ++ } ++ FilterType::Le => { ++ $hooks_ident::filter_ava_ord(&mut pb, &bvfilter, &bvals) ++ .map(|o_ord| { ++ match o_ord { ++ Some(Ordering::Less) | Some(Ordering::Equal) => true, ++ Some(Ordering::Greater) | None => false, ++ } ++ }) ++ } ++ FilterType::Present => { ++ Err(PluginError::Unimplemented) ++ } ++ FilterType::Approx => { ++ Err(PluginError::Unimplemented) ++ } ++ FilterType::Extended => { ++ Err(PluginError::Unimplemented) ++ } ++ }; ++ ++ match r { ++ Ok(b) => { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_mr_filter_ava <= success")); ++ // rust bool into i32 will become 0 false, 1 true. However, ds expects 0 true and 1 false for ++ // for the filter_ava match. So we flip the bool, and send it back. ++ (!b) as i32 ++ } ++ Err(e) => { ++ log_error!(ErrorLevel::Warning, ++ "{}_plugin_mr_filter_ava error -> {:?}", ++ stringify!($mod_ident), e ++ ); ++ e as i32 ++ } ++ } ++ } ++ ++ ++ // EQ MR plugin hooks ++ #[no_mangle] ++ pub extern "C" fn [<$mod_ident _plugin_eq_mr_init>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ let mut pb = PblockRef::new(raw_pb); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_init => begin")); ++ match pb.set_plugin_version(PluginVersion::V01) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::eq_mr_supported_names()) }; ++ // SLAPI_PLUGIN_MR_NAMES ++ match pb.register_mr_names(name_ptr) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ // description ++ // SLAPI_PLUGIN_MR_FILTER_CREATE_FN ++ match pb.register_mr_filter_create_fn([<$mod_ident _plugin_eq_mr_filter_create>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_INDEXER_CREATE_FN ++ match pb.register_mr_indexer_create_fn([<$mod_ident _plugin_eq_mr_indexer_create>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_FILTER_AVA ++ match pb.register_mr_filter_ava_fn([<$mod_ident _plugin_mr_filter_ava>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_FILTER_SUB ++ match pb.register_mr_filter_sub_fn([<$mod_ident _plugin_eq_mr_filter_sub>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_VALUES2KEYS ++ match pb.register_mr_values2keys_fn([<$mod_ident _plugin_eq_mr_filter_values2keys>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_ASSERTION2KEYS_AVA ++ match pb.register_mr_assertion2keys_ava_fn([<$mod_ident _plugin_eq_mr_filter_assertion2keys_ava>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_ASSERTION2KEYS_SUB ++ match pb.register_mr_assertion2keys_sub_fn([<$mod_ident _plugin_eq_mr_filter_assertion2keys_sub>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_COMPARE ++ match pb.register_mr_compare_fn([<$mod_ident _plugin_eq_mr_filter_compare>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_NORMALIZE ++ ++ // Finaly, register the MR ++ match unsafe { matchingrule_register($hooks_ident::eq_mr_oid(), $hooks_ident::eq_mr_name(), $hooks_ident::eq_mr_desc(), $hooks_ident::attr_oid(), &$hooks_ident::attr_compat_oids()) } { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_init <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_create>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_create => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_create <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_eq_mr_indexer_create>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_indexer_create => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_indexer_create <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_sub>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_sub => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_sub <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_values2keys>]( ++ raw_pb: *const libc::c_void, ++ raw_vals: *const libc::c_void, ++ raw_ivals: *mut libc::c_void, ++ i_ftype: i32, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_values2keys => begin")); ++ let mut pb = PblockRef::new(raw_pb); ++ let vals = ValueArrayRef::new(raw_vals); ++ let ftype = match FilterType::try_from(i_ftype) { ++ Ok(f) => f, ++ Err(e) => { ++ log_error!(ErrorLevel::Error, ++ "{}_plugin_eq_mr_filter_values2keys Error -> {:?}", ++ stringify!($mod_ident), ++ e); ++ return e as i32 ++ } ++ }; ++ ++ if (ftype != FilterType::Equality && ftype != FilterType::Approx) { ++ log_error!(ErrorLevel::Error, ++ "{}_plugin_eq_mr_filter_values2keys Error -> Invalid Filter type", ++ stringify!($mod_ident), ++ ); ++ return PluginError::InvalidFilter as i32 ++ } ++ ++ let va = match $hooks_ident::eq_mr_filter_values2keys(&mut pb, &vals) { ++ Ok(va) => va, ++ Err(e) => { ++ log_error!(ErrorLevel::Error, ++ "{}_plugin_eq_mr_filter_values2keys Error -> {:?}", ++ stringify!($mod_ident), ++ e); ++ return e as i32 ++ } ++ }; ++ ++ // Now, deconstruct the va, get the pointer, and put it into the ivals. ++ unsafe { ++ let ivals_ptr: *mut *const libc::c_void = raw_ivals as *mut _; ++ (*ivals_ptr) = va.take_ownership() as *const libc::c_void; ++ } ++ ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_values2keys <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_assertion2keys_ava>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_assertion2keys_ava => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_assertion2keys_ava <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_assertion2keys_sub>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_assertion2keys_sub => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_assertion2keys_sub <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_names>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ // This is probably another char pointer. ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_compare>]( ++ raw_va: *const libc::c_void, ++ raw_vb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_compare => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_compare <= success")); ++ 0 ++ } ++ ++ // SUB MR plugin hooks ++ ++ pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_create>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_create => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_create <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_sub_mr_indexer_create>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_indexer_create => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_indexer_create <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_sub>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_sub => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_sub <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_values2keys>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_values2keys => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_values2keys <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_assertion2keys_ava>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_assertion2keys_ava => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_assertion2keys_ava <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_assertion2keys_sub>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_assertion2keys_sub => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_assertion2keys_sub <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_names>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ // Probably a char array ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_compare>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_compare => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_compare <= success")); ++ 0 ++ } ++ ++ // ORD MR plugin hooks ++ #[no_mangle] ++ pub extern "C" fn [<$mod_ident _plugin_ord_mr_init>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ let mut pb = PblockRef::new(raw_pb); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_init => begin")); ++ match pb.set_plugin_version(PluginVersion::V01) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::ord_mr_supported_names()) }; ++ // SLAPI_PLUGIN_MR_NAMES ++ match pb.register_mr_names(name_ptr) { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ // description ++ // SLAPI_PLUGIN_MR_FILTER_CREATE_FN ++ match pb.register_mr_filter_create_fn([<$mod_ident _plugin_ord_mr_filter_create>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_INDEXER_CREATE_FN ++ match pb.register_mr_indexer_create_fn([<$mod_ident _plugin_ord_mr_indexer_create>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_FILTER_AVA ++ match pb.register_mr_filter_ava_fn([<$mod_ident _plugin_mr_filter_ava>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_FILTER_SUB ++ match pb.register_mr_filter_sub_fn([<$mod_ident _plugin_ord_mr_filter_sub>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_VALUES2KEYS ++ /* ++ match pb.register_mr_values2keys_fn([<$mod_ident _plugin_ord_mr_filter_values2keys>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ */ ++ // SLAPI_PLUGIN_MR_ASSERTION2KEYS_AVA ++ match pb.register_mr_assertion2keys_ava_fn([<$mod_ident _plugin_ord_mr_filter_assertion2keys_ava>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_ASSERTION2KEYS_SUB ++ match pb.register_mr_assertion2keys_sub_fn([<$mod_ident _plugin_ord_mr_filter_assertion2keys_sub>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_COMPARE ++ match pb.register_mr_compare_fn([<$mod_ident _plugin_ord_mr_filter_compare>]) { ++ 0 => {}, ++ e => return e, ++ }; ++ // SLAPI_PLUGIN_MR_NORMALIZE ++ ++ // Finaly, register the MR ++ match unsafe { matchingrule_register($hooks_ident::ord_mr_oid().unwrap(), $hooks_ident::ord_mr_name(), $hooks_ident::ord_mr_desc(), $hooks_ident::attr_oid(), &$hooks_ident::attr_compat_oids()) } { ++ 0 => {}, ++ e => return e, ++ }; ++ ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_init <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_create>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_create => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_create <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_ord_mr_indexer_create>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_indexer_create => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_indexer_create <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_sub>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_sub => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_sub <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_values2keys>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_values2keys => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_values2keys <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_assertion2keys_ava>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_assertion2keys_ava => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_assertion2keys_ava <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_assertion2keys_sub>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_assertion2keys_sub => begin")); ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_assertion2keys_sub <= success")); ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_names>]( ++ raw_pb: *const libc::c_void, ++ ) -> i32 { ++ // probably char pointers ++ 0 ++ } ++ ++ pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_compare>]( ++ raw_va: *const libc::c_void, ++ raw_vb: *const libc::c_void, ++ ) -> i32 { ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_compare => begin")); ++ let va = BerValRef::new(raw_va); ++ let vb = BerValRef::new(raw_vb); ++ let rc = match $hooks_ident::filter_compare(&va, &vb) { ++ Ordering::Less => -1, ++ Ordering::Equal => 0, ++ Ordering::Greater => 1, ++ }; ++ log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_compare <= success")); ++ rc ++ } ++ ++ } // end paste ++ ) ++} // end macro ++ ++#[macro_export] ++macro_rules! slapi_r_search_callback_mapfn { ++ ( ++ $mod_ident:ident, ++ $cb_target_ident:ident, ++ $cb_mod_ident:ident ++ ) => { ++ paste::item! { ++ #[no_mangle] ++ pub extern "C" fn [<$cb_target_ident>]( ++ raw_e: *const libc::c_void, ++ raw_data: *const libc::c_void, ++ ) -> i32 { ++ let e = EntryRef::new(raw_e); ++ let data_ptr = raw_data as *const _; ++ let data = unsafe { &(*data_ptr) }; ++ match $cb_mod_ident(e, data) { ++ Ok(_) => LDAPError::Success as i32, ++ Err(e) => e as i32, ++ } ++ } ++ } // end paste ++ }; ++} // end macro +diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs +new file mode 100644 +index 000000000..b69ce1680 +--- /dev/null ++++ b/src/slapi_r_plugin/src/pblock.rs +@@ -0,0 +1,275 @@ ++use libc; ++use std::ops::{Deref, DerefMut}; ++use std::os::raw::c_char; ++use std::ptr; ++ ++use crate::backend::BackendRef; ++use crate::constants::{PblockType, PluginFnType, PluginVersion}; ++use crate::entry::EntryRef; ++pub use crate::log::{log_error, ErrorLevel}; ++ ++extern "C" { ++ fn slapi_pblock_set(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32; ++ fn slapi_pblock_get(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32; ++ fn slapi_pblock_new() -> *const libc::c_void; ++} ++ ++pub struct Pblock { ++ value: PblockRef, ++} ++ ++impl Pblock { ++ pub fn new() -> Pblock { ++ let raw_pb = unsafe { slapi_pblock_new() }; ++ Pblock { ++ value: PblockRef { raw_pb }, ++ } ++ } ++} ++ ++impl Deref for Pblock { ++ type Target = PblockRef; ++ ++ fn deref(&self) -> &Self::Target { ++ &self.value ++ } ++} ++ ++impl DerefMut for Pblock { ++ fn deref_mut(&mut self) -> &mut Self::Target { ++ &mut self.value ++ } ++} ++ ++pub struct PblockRef { ++ raw_pb: *const libc::c_void, ++} ++ ++impl PblockRef { ++ pub fn new(raw_pb: *const libc::c_void) -> Self { ++ PblockRef { raw_pb } ++ } ++ ++ pub unsafe fn as_ptr(&self) -> *const libc::c_void { ++ self.raw_pb ++ } ++ ++ fn set_pb_char_arr_ptr(&mut self, req_type: PblockType, ptr: *const *const c_char) -> i32 { ++ let value_ptr: *const libc::c_void = ptr as *const libc::c_void; ++ unsafe { slapi_pblock_set(self.raw_pb, req_type as i32, value_ptr) } ++ } ++ ++ fn set_pb_char_ptr(&mut self, req_type: PblockType, ptr: *const c_char) -> i32 { ++ let value_ptr: *const libc::c_void = ptr as *const libc::c_void; ++ unsafe { slapi_pblock_set(self.raw_pb, req_type as i32, value_ptr) } ++ } ++ ++ fn set_pb_fn_ptr( ++ &mut self, ++ fn_type: PluginFnType, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ let value_ptr: *const libc::c_void = ptr as *const libc::c_void; ++ unsafe { slapi_pblock_set(self.raw_pb, fn_type as i32, value_ptr) } ++ } ++ ++ fn get_value_ptr(&mut self, req_type: PblockType) -> Result<*const libc::c_void, ()> { ++ let mut value: *mut libc::c_void = ptr::null::() as *mut libc::c_void; ++ let value_ptr: *const libc::c_void = &mut value as *const _ as *const libc::c_void; ++ match unsafe { slapi_pblock_get(self.raw_pb, req_type as i32, value_ptr) } { ++ 0 => Ok(value), ++ e => { ++ log_error!(ErrorLevel::Error, "enable to get from pblock -> {:?}", e); ++ Err(()) ++ } ++ } ++ } ++ ++ fn get_value_i32(&mut self, req_type: PblockType) -> Result { ++ let mut value: i32 = 0; ++ let value_ptr: *const libc::c_void = &mut value as *const _ as *const libc::c_void; ++ match unsafe { slapi_pblock_get(self.raw_pb, req_type as i32, value_ptr) } { ++ 0 => Ok(value), ++ e => { ++ log_error!(ErrorLevel::Error, "enable to get from pblock -> {:?}", e); ++ Err(()) ++ } ++ } ++ } ++ ++ pub fn register_start_fn(&mut self, ptr: extern "C" fn(*const libc::c_void) -> i32) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::Start, ptr) ++ } ++ ++ pub fn register_close_fn(&mut self, ptr: extern "C" fn(*const libc::c_void) -> i32) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::Close, ptr) ++ } ++ ++ pub fn register_betxn_pre_add_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::BeTxnPreAdd, ptr) ++ } ++ ++ pub fn register_betxn_pre_modify_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::BeTxnPreModify, ptr) ++ } ++ ++ pub fn register_syntax_filter_ava_fn( ++ &mut self, ++ ptr: extern "C" fn( ++ *const core::ffi::c_void, ++ *const core::ffi::c_void, ++ *const core::ffi::c_void, ++ i32, ++ *mut core::ffi::c_void, ++ ) -> i32, ++ ) -> i32 { ++ // We can't use self.set_pb_fn_ptr here as the fn type sig is different. ++ let value_ptr: *const libc::c_void = ptr as *const libc::c_void; ++ unsafe { slapi_pblock_set(self.raw_pb, PluginFnType::SyntaxFilterAva as i32, value_ptr) } ++ } ++ ++ pub fn register_syntax_values2keys_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::SyntaxValuesToKeys, ptr) ++ } ++ ++ pub fn register_syntax_assertion2keys_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::SyntaxAssertion2KeysAva, ptr) ++ } ++ ++ pub fn register_syntax_flags_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::SyntaxFlags, ptr) ++ } ++ ++ pub fn register_syntax_oid(&mut self, ptr: *const c_char) -> i32 { ++ self.set_pb_char_ptr(PblockType::SyntaxOid, ptr) ++ } ++ ++ pub fn register_syntax_compare_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::SyntaxCompare, ptr) ++ } ++ ++ pub fn register_syntax_validate_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::SyntaxValidate, ptr) ++ } ++ ++ pub fn register_syntax_names(&mut self, arr_ptr: *const *const c_char) -> i32 { ++ self.set_pb_char_arr_ptr(PblockType::SyntaxNames, arr_ptr) ++ } ++ ++ pub fn register_mr_filter_create_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::MRFilterCreate, ptr) ++ } ++ ++ pub fn register_mr_indexer_create_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::MRIndexerCreate, ptr) ++ } ++ ++ pub fn register_mr_filter_ava_fn( ++ &mut self, ++ ptr: extern "C" fn( ++ *const core::ffi::c_void, ++ *const core::ffi::c_void, ++ *const core::ffi::c_void, ++ i32, ++ *mut core::ffi::c_void, ++ ) -> i32, ++ ) -> i32 { ++ let value_ptr: *const libc::c_void = ptr as *const libc::c_void; ++ unsafe { slapi_pblock_set(self.raw_pb, PluginFnType::MRFilterAva as i32, value_ptr) } ++ } ++ ++ pub fn register_mr_filter_sub_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::MRFilterSub, ptr) ++ } ++ ++ pub fn register_mr_values2keys_fn( ++ &mut self, ++ ptr: extern "C" fn( ++ *const core::ffi::c_void, ++ *const core::ffi::c_void, ++ *mut core::ffi::c_void, ++ i32, ++ ) -> i32, ++ ) -> i32 { ++ let value_ptr: *const libc::c_void = ptr as *const libc::c_void; ++ unsafe { slapi_pblock_set(self.raw_pb, PluginFnType::MRValuesToKeys as i32, value_ptr) } ++ } ++ ++ pub fn register_mr_assertion2keys_ava_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::MRAssertionToKeysAva, ptr) ++ } ++ ++ pub fn register_mr_assertion2keys_sub_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void) -> i32, ++ ) -> i32 { ++ self.set_pb_fn_ptr(PluginFnType::MRAssertionToKeysSub, ptr) ++ } ++ ++ pub fn register_mr_compare_fn( ++ &mut self, ++ ptr: extern "C" fn(*const libc::c_void, *const libc::c_void) -> i32, ++ ) -> i32 { ++ let value_ptr: *const libc::c_void = ptr as *const libc::c_void; ++ unsafe { slapi_pblock_set(self.raw_pb, PluginFnType::MRCompare as i32, value_ptr) } ++ } ++ ++ pub fn register_mr_names(&mut self, arr_ptr: *const *const c_char) -> i32 { ++ self.set_pb_char_arr_ptr(PblockType::MRNames, arr_ptr) ++ } ++ ++ pub fn get_op_add_entryref(&mut self) -> Result { ++ self.get_value_ptr(PblockType::AddEntry) ++ .map(|ptr| EntryRef::new(ptr)) ++ } ++ ++ pub fn set_plugin_version(&mut self, vers: PluginVersion) -> i32 { ++ self.set_pb_char_ptr(PblockType::Version, vers.to_char_ptr()) ++ } ++ ++ pub fn set_op_backend(&mut self, be: &BackendRef) -> i32 { ++ unsafe { slapi_pblock_set(self.raw_pb, PblockType::Backend as i32, be.as_ptr()) } ++ } ++ ++ pub fn get_plugin_identity(&mut self) -> *const libc::c_void { ++ self.get_value_ptr(PblockType::Identity) ++ .unwrap_or(std::ptr::null()) ++ } ++ ++ pub fn get_op_result(&mut self) -> i32 { ++ self.get_value_i32(PblockType::OpResult).unwrap_or(-1) ++ } ++} +diff --git a/src/slapi_r_plugin/src/plugin.rs b/src/slapi_r_plugin/src/plugin.rs +new file mode 100644 +index 000000000..bf47779bc +--- /dev/null ++++ b/src/slapi_r_plugin/src/plugin.rs +@@ -0,0 +1,117 @@ ++use crate::constants::{PluginType, PLUGIN_DEFAULT_PRECEDENCE}; ++use crate::dn::Sdn; ++use crate::entry::EntryRef; ++use crate::error::LDAPError; ++use crate::error::PluginError; ++use crate::pblock::PblockRef; ++use crate::task::Task; ++use libc; ++use std::ffi::CString; ++use std::os::raw::c_char; ++use std::ptr; ++ ++extern "C" { ++ fn slapi_register_plugin_ext( ++ plugintype: *const c_char, ++ enabled: i32, ++ initsymbol: *const c_char, ++ initfunc: *const libc::c_void, ++ name: *const c_char, ++ argv: *const *const c_char, ++ group_identity: *const libc::c_void, ++ precedence: i32, ++ ) -> i32; ++} ++ ++pub struct PluginIdRef { ++ pub raw_pid: *const libc::c_void, ++} ++ ++pub fn register_plugin_ext( ++ ptype: PluginType, ++ plugname: &str, ++ initfnname: &str, ++ initfn: extern "C" fn(*const libc::c_void) -> i32, ++) -> i32 { ++ let c_plugname = match CString::new(plugname) { ++ Ok(c) => c, ++ Err(_) => return 1, ++ }; ++ let c_initfnname = match CString::new(initfnname) { ++ Ok(c) => c, ++ Err(_) => return 1, ++ }; ++ let argv = [c_plugname.as_ptr(), ptr::null()]; ++ let value_ptr: *const libc::c_void = initfn as *const libc::c_void; ++ ++ unsafe { ++ slapi_register_plugin_ext( ++ ptype.to_char_ptr(), ++ 1, ++ c_initfnname.as_ptr(), ++ value_ptr, ++ c_plugname.as_ptr(), ++ &argv as *const *const c_char, ++ ptr::null(), ++ PLUGIN_DEFAULT_PRECEDENCE, ++ ) ++ } ++} ++ ++pub trait SlapiPlugin3 { ++ // We require a newer rust for default associated types. ++ // type TaskData = (); ++ type TaskData; ++ ++ fn has_pre_modify() -> bool { ++ false ++ } ++ ++ fn has_post_modify() -> bool { ++ false ++ } ++ ++ fn has_pre_add() -> bool { ++ false ++ } ++ ++ fn has_post_add() -> bool { ++ false ++ } ++ ++ fn has_betxn_pre_modify() -> bool { ++ false ++ } ++ ++ fn has_betxn_pre_add() -> bool { ++ false ++ } ++ ++ fn has_task_handler() -> Option<&'static str> { ++ None ++ } ++ ++ fn start(_pb: &mut PblockRef) -> Result<(), PluginError>; ++ ++ fn close(_pb: &mut PblockRef) -> Result<(), PluginError>; ++ ++ fn betxn_pre_modify(_pb: &mut PblockRef) -> Result<(), PluginError> { ++ Err(PluginError::Unimplemented) ++ } ++ ++ fn betxn_pre_add(_pb: &mut PblockRef) -> Result<(), PluginError> { ++ Err(PluginError::Unimplemented) ++ } ++ ++ fn task_validate(_e: &EntryRef) -> Result { ++ Err(LDAPError::Other) ++ } ++ ++ fn task_be_dn_hint(_data: &Self::TaskData) -> Option { ++ None ++ } ++ ++ fn task_handler(_task: &Task, _data: Self::TaskData) -> Result { ++ Err(PluginError::Unimplemented) ++ } ++} +diff --git a/src/slapi_r_plugin/src/search.rs b/src/slapi_r_plugin/src/search.rs +new file mode 100644 +index 000000000..e0e2a1fd7 +--- /dev/null ++++ b/src/slapi_r_plugin/src/search.rs +@@ -0,0 +1,127 @@ ++use crate::dn::SdnRef; ++use crate::error::{LDAPError, PluginError}; ++use crate::pblock::Pblock; ++use crate::plugin::PluginIdRef; ++use std::ffi::CString; ++use std::ops::Deref; ++use std::os::raw::c_char; ++ ++extern "C" { ++ fn slapi_search_internal_set_pb_ext( ++ pb: *const libc::c_void, ++ base: *const libc::c_void, ++ scope: i32, ++ filter: *const c_char, ++ attrs: *const *const c_char, ++ attrsonly: i32, ++ controls: *const *const libc::c_void, ++ uniqueid: *const c_char, ++ plugin_ident: *const libc::c_void, ++ op_flags: i32, ++ ); ++ fn slapi_search_internal_callback_pb( ++ pb: *const libc::c_void, ++ cb_data: *const libc::c_void, ++ cb_result_ptr: *const libc::c_void, ++ cb_entry_ptr: *const libc::c_void, ++ cb_referral_ptr: *const libc::c_void, ++ ) -> i32; ++} ++ ++#[derive(Debug)] ++#[repr(i32)] ++pub enum SearchScope { ++ Base = 0, ++ Onelevel = 1, ++ Subtree = 2, ++} ++ ++enum SearchType { ++ InternalMapEntry( ++ extern "C" fn(*const core::ffi::c_void, *const core::ffi::c_void) -> i32, ++ *const libc::c_void, ++ ), ++ // InternalMapResult ++ // InternalMapReferral ++} ++ ++pub struct Search { ++ pb: Pblock, ++ // This is so that the char * to the pb lives long enough as ds won't clone it. ++ filter: Option, ++ stype: SearchType, ++} ++ ++pub struct SearchResult { ++ pb: Pblock, ++} ++ ++impl Search { ++ pub fn new_map_entry( ++ basedn: &SdnRef, ++ scope: SearchScope, ++ filter: &str, ++ plugin_id: PluginIdRef, ++ cbdata: &T, ++ mapfn: extern "C" fn(*const libc::c_void, *const libc::c_void) -> i32, ++ ) -> Result ++ where ++ T: Send, ++ { ++ // Configure a search based on the requested type. ++ let pb = Pblock::new(); ++ let raw_filter = CString::new(filter).map_err(|_| PluginError::InvalidFilter)?; ++ ++ unsafe { ++ slapi_search_internal_set_pb_ext( ++ pb.deref().as_ptr(), ++ basedn.as_ptr(), ++ scope as i32, ++ raw_filter.as_ptr(), ++ std::ptr::null(), ++ 0, ++ std::ptr::null(), ++ std::ptr::null(), ++ plugin_id.raw_pid, ++ 0, ++ ) ++ }; ++ ++ Ok(Search { ++ pb, ++ filter: Some(raw_filter), ++ stype: SearchType::InternalMapEntry(mapfn, cbdata as *const _ as *const libc::c_void), ++ }) ++ } ++ ++ // Consume self, do the search ++ pub fn execute(self) -> Result { ++ // Deconstruct self ++ let Search { ++ mut pb, ++ filter: _filter, ++ stype, ++ } = self; ++ ++ // run the search based on the type. ++ match stype { ++ SearchType::InternalMapEntry(mapfn, cbdata) => unsafe { ++ slapi_search_internal_callback_pb( ++ pb.deref().as_ptr(), ++ cbdata, ++ std::ptr::null(), ++ mapfn as *const libc::c_void, ++ std::ptr::null(), ++ ); ++ }, ++ }; ++ ++ // now check the result, and map to what we need. ++ let result = pb.get_op_result(); ++ ++ match result { ++ 0 => Ok(SearchResult { pb }), ++ _e => Err(LDAPError::from(result)), ++ } ++ } ++} +diff --git a/src/slapi_r_plugin/src/syntax_plugin.rs b/src/slapi_r_plugin/src/syntax_plugin.rs +new file mode 100644 +index 000000000..e7d5c01bd +--- /dev/null ++++ b/src/slapi_r_plugin/src/syntax_plugin.rs +@@ -0,0 +1,169 @@ ++use crate::ber::BerValRef; ++// use crate::constants::FilterType; ++use crate::error::PluginError; ++use crate::pblock::PblockRef; ++use crate::value::{ValueArray, ValueArrayRef}; ++use std::cmp::Ordering; ++use std::ffi::CString; ++use std::iter::once; ++use std::os::raw::c_char; ++use std::ptr; ++ ++// need a call to slapi_register_plugin_ext ++ ++extern "C" { ++ fn slapi_matchingrule_register(mr: *const slapi_matchingRuleEntry) -> i32; ++} ++ ++#[repr(C)] ++struct slapi_matchingRuleEntry { ++ mr_oid: *const c_char, ++ _mr_oidalias: *const c_char, ++ mr_name: *const c_char, ++ mr_desc: *const c_char, ++ mr_syntax: *const c_char, ++ _mr_obsolete: i32, // unused ++ mr_compat_syntax: *const *const c_char, ++} ++ ++pub unsafe fn name_to_leaking_char(name: &str) -> *const c_char { ++ let n = CString::new(name) ++ .expect("An invalid string has been hardcoded!") ++ .into_boxed_c_str(); ++ let n_ptr = n.as_ptr(); ++ // Now we intentionally leak the name here, and the pointer will remain valid. ++ Box::leak(n); ++ n_ptr ++} ++ ++pub unsafe fn names_to_leaking_char_array(names: &[&str]) -> *const *const c_char { ++ let n_arr: Vec = names ++ .iter() ++ .map(|s| CString::new(*s).expect("An invalid string has been hardcoded!")) ++ .collect(); ++ let n_arr = n_arr.into_boxed_slice(); ++ let n_ptr_arr: Vec<*const c_char> = n_arr ++ .iter() ++ .map(|v| v.as_ptr()) ++ .chain(once(ptr::null())) ++ .collect(); ++ let n_ptr_arr = n_ptr_arr.into_boxed_slice(); ++ ++ // Now we intentionally leak these names here, ++ let _r_n_arr = Box::leak(n_arr); ++ let r_n_ptr_arr = Box::leak(n_ptr_arr); ++ ++ let name_ptr = r_n_ptr_arr as *const _ as *const *const c_char; ++ name_ptr ++} ++ ++// oid - the oid of the matching rule ++// name - the name of the mr ++// desc - description ++// syntax - the syntax of the attribute we apply to ++// compat_syntax - extended syntaxes f other attributes we may apply to. ++pub unsafe fn matchingrule_register( ++ oid: &str, ++ name: &str, ++ desc: &str, ++ syntax: &str, ++ compat_syntax: &[&str], ++) -> i32 { ++ let oid_ptr = name_to_leaking_char(oid); ++ let name_ptr = name_to_leaking_char(name); ++ let desc_ptr = name_to_leaking_char(desc); ++ let syntax_ptr = name_to_leaking_char(syntax); ++ let compat_syntax_ptr = names_to_leaking_char_array(compat_syntax); ++ ++ let new_mr = slapi_matchingRuleEntry { ++ mr_oid: oid_ptr, ++ _mr_oidalias: ptr::null(), ++ mr_name: name_ptr, ++ mr_desc: desc_ptr, ++ mr_syntax: syntax_ptr, ++ _mr_obsolete: 0, ++ mr_compat_syntax: compat_syntax_ptr, ++ }; ++ ++ let new_mr_ptr = &new_mr as *const _; ++ slapi_matchingrule_register(new_mr_ptr) ++} ++ ++pub trait SlapiSyntaxPlugin1 { ++ fn attr_oid() -> &'static str; ++ ++ fn attr_compat_oids() -> Vec<&'static str>; ++ ++ fn attr_supported_names() -> Vec<&'static str>; ++ ++ fn syntax_validate(bval: &BerValRef) -> Result<(), PluginError>; ++ ++ fn eq_mr_oid() -> &'static str; ++ ++ fn eq_mr_name() -> &'static str; ++ ++ fn eq_mr_desc() -> &'static str; ++ ++ fn eq_mr_supported_names() -> Vec<&'static str>; ++ ++ fn filter_ava_eq( ++ _pb: &mut PblockRef, ++ _bval_filter: &BerValRef, ++ _vals: &ValueArrayRef, ++ ) -> Result { ++ Ok(false) ++ } ++ ++ fn eq_mr_filter_values2keys( ++ _pb: &mut PblockRef, ++ _vals: &ValueArrayRef, ++ ) -> Result; ++} ++ ++pub trait SlapiOrdMr: SlapiSyntaxPlugin1 { ++ fn ord_mr_oid() -> Option<&'static str> { ++ None ++ } ++ ++ fn ord_mr_name() -> &'static str { ++ panic!("Unimplemented ord_mr_name for SlapiOrdMr") ++ } ++ ++ fn ord_mr_desc() -> &'static str { ++ panic!("Unimplemented ord_mr_desc for SlapiOrdMr") ++ } ++ ++ fn ord_mr_supported_names() -> Vec<&'static str> { ++ panic!("Unimplemented ord_mr_supported_names for SlapiOrdMr") ++ } ++ ++ fn filter_ava_ord( ++ _pb: &mut PblockRef, ++ _bval_filter: &BerValRef, ++ _vals: &ValueArrayRef, ++ ) -> Result, PluginError> { ++ Ok(None) ++ } ++ ++ fn filter_compare(_a: &BerValRef, _b: &BerValRef) -> Ordering { ++ panic!("Unimplemented filter_compare") ++ } ++} ++ ++pub trait SlapiSubMr: SlapiSyntaxPlugin1 { ++ fn sub_mr_oid() -> Option<&'static str> { ++ None ++ } ++ ++ fn sub_mr_name() -> &'static str { ++ panic!("Unimplemented sub_mr_name for SlapiSubMr") ++ } ++ ++ fn sub_mr_desc() -> &'static str { ++ panic!("Unimplemented sub_mr_desc for SlapiSubMr") ++ } ++ ++ fn sub_mr_supported_names() -> Vec<&'static str> { ++ panic!("Unimplemented sub_mr_supported_names for SlapiSubMr") ++ } ++} +diff --git a/src/slapi_r_plugin/src/task.rs b/src/slapi_r_plugin/src/task.rs +new file mode 100644 +index 000000000..251ae4d82 +--- /dev/null ++++ b/src/slapi_r_plugin/src/task.rs +@@ -0,0 +1,148 @@ ++use crate::constants::LDAP_SUCCESS; ++use crate::entry::EntryRef; ++use crate::pblock::PblockRef; ++use std::ffi::CString; ++use std::os::raw::c_char; ++use std::thread; ++use std::time::Duration; ++ ++extern "C" { ++ fn slapi_plugin_new_task(ndn: *const c_char, arg: *const libc::c_void) -> *const libc::c_void; ++ fn slapi_task_dec_refcount(task: *const libc::c_void); ++ fn slapi_task_inc_refcount(task: *const libc::c_void); ++ fn slapi_task_get_refcount(task: *const libc::c_void) -> i32; ++ fn slapi_task_begin(task: *const libc::c_void, rc: i32); ++ fn slapi_task_finish(task: *const libc::c_void, rc: i32); ++ ++ fn slapi_plugin_task_register_handler( ++ ident: *const c_char, ++ cb: extern "C" fn( ++ *const libc::c_void, ++ *const libc::c_void, ++ *const libc::c_void, ++ *mut i32, ++ *mut c_char, ++ *const libc::c_void, ++ ) -> i32, ++ pb: *const libc::c_void, ++ ) -> i32; ++ fn slapi_plugin_task_unregister_handler( ++ ident: *const c_char, ++ cb: extern "C" fn( ++ *const libc::c_void, ++ *const libc::c_void, ++ *const libc::c_void, ++ *mut i32, ++ *mut c_char, ++ *const libc::c_void, ++ ) -> i32, ++ ) -> i32; ++ fn slapi_task_set_destructor_fn( ++ task: *const libc::c_void, ++ cb: extern "C" fn(*const libc::c_void), ++ ); ++} ++ ++pub struct TaskRef { ++ raw_task: *const libc::c_void, ++} ++ ++pub struct Task { ++ value: TaskRef, ++} ++ ++// Because raw pointers are not send, but we need to send the task to a thread ++// as part of the task thread spawn, we need to convince the compiler this ++// action is okay. It's probably not because C is terrible, BUT provided the ++// server and framework only touch the ref count, we are okay. ++unsafe impl Send for Task {} ++ ++pub fn task_register_handler_fn( ++ ident: &'static str, ++ cb: extern "C" fn( ++ *const libc::c_void, ++ *const libc::c_void, ++ *const libc::c_void, ++ *mut i32, ++ *mut c_char, ++ *const libc::c_void, ++ ) -> i32, ++ pb: &mut PblockRef, ++) -> i32 { ++ let cstr = CString::new(ident).expect("Invalid ident provided"); ++ unsafe { slapi_plugin_task_register_handler(cstr.as_ptr(), cb, pb.as_ptr()) } ++} ++ ++pub fn task_unregister_handler_fn( ++ ident: &'static str, ++ cb: extern "C" fn( ++ *const libc::c_void, ++ *const libc::c_void, ++ *const libc::c_void, ++ *mut i32, ++ *mut c_char, ++ *const libc::c_void, ++ ) -> i32, ++) -> i32 { ++ let cstr = CString::new(ident).expect("Invalid ident provided"); ++ unsafe { slapi_plugin_task_unregister_handler(cstr.as_ptr(), cb) } ++} ++ ++impl Task { ++ pub fn new(e: &EntryRef, arg: *const libc::c_void) -> Self { ++ let sdn = e.get_sdnref(); ++ let ndn = unsafe { sdn.as_ndnref() }; ++ let raw_task = unsafe { slapi_plugin_new_task(ndn.as_ptr(), arg) }; ++ unsafe { slapi_task_inc_refcount(raw_task) }; ++ Task { ++ value: TaskRef { raw_task }, ++ } ++ } ++ ++ pub fn begin(&self) { ++ // Indicate we begin ++ unsafe { slapi_task_begin(self.value.raw_task, 1) } ++ } ++ ++ pub fn register_destructor_fn(&mut self, cb: extern "C" fn(*const libc::c_void)) { ++ unsafe { ++ slapi_task_set_destructor_fn(self.value.raw_task, cb); ++ } ++ } ++ ++ pub fn success(self) { ++ unsafe { ++ slapi_task_finish(self.value.raw_task, LDAP_SUCCESS); ++ } ++ } ++ ++ pub fn error(self, rc: i32) { ++ unsafe { slapi_task_finish(self.value.raw_task, rc) }; ++ } ++} ++ ++impl Drop for Task { ++ fn drop(&mut self) { ++ unsafe { ++ slapi_task_dec_refcount(self.value.raw_task); ++ } ++ } ++} ++ ++impl TaskRef { ++ pub fn new(raw_task: *const libc::c_void) -> Self { ++ TaskRef { raw_task } ++ } ++ ++ pub fn block(&self) { ++ // wait for the refcount to go to 0. ++ let d = Duration::from_millis(250); ++ loop { ++ if unsafe { slapi_task_get_refcount(self.raw_task) } > 0 { ++ thread::sleep(d); ++ } else { ++ return; ++ } ++ } ++ } ++} +diff --git a/src/slapi_r_plugin/src/value.rs b/src/slapi_r_plugin/src/value.rs +new file mode 100644 +index 000000000..5a40dd279 +--- /dev/null ++++ b/src/slapi_r_plugin/src/value.rs +@@ -0,0 +1,235 @@ ++use crate::ber::{ol_berval, BerValRef}; ++use crate::dn::Sdn; ++use std::convert::{From, TryFrom}; ++use std::ffi::CString; ++use std::iter::once; ++use std::iter::FromIterator; ++use std::mem; ++use std::ops::Deref; ++use std::ptr; ++use uuid::Uuid; ++ ++extern "C" { ++ fn slapi_value_new() -> *mut slapi_value; ++ fn slapi_value_free(v: *mut *const libc::c_void); ++} ++ ++#[repr(C)] ++/// From ./ldap/servers/slapd/slap.h ++pub struct slapi_value { ++ bv: ol_berval, ++ v_csnset: *const libc::c_void, ++ v_flags: u32, ++} ++ ++pub struct ValueArrayRefIter<'a> { ++ idx: isize, ++ va_ref: &'a ValueArrayRef, ++} ++ ++impl<'a> Iterator for ValueArrayRefIter<'a> { ++ type Item = ValueRef; ++ ++ #[inline] ++ fn next(&mut self) -> Option { ++ // So long as va_ref.raw_slapi_val + offset != NULL, continue. ++ // this is so wildly unsafe, but you know, that's just daily life of C anyway ... ++ unsafe { ++ let n_ptr: *const slapi_value = *(self.va_ref.raw_slapi_val.offset(self.idx)); ++ if n_ptr.is_null() { ++ None ++ } else { ++ // Advance the iter. ++ self.idx = self.idx + 1; ++ let raw_berval: *const ol_berval = &(*n_ptr).bv as *const _; ++ Some(ValueRef { ++ raw_slapi_val: n_ptr, ++ bvr: BerValRef { raw_berval }, ++ }) ++ } ++ } ++ } ++} ++ ++pub struct ValueArrayRef { ++ raw_slapi_val: *const *const slapi_value, ++} ++ ++impl ValueArrayRef { ++ pub fn new(raw_slapi_val: *const libc::c_void) -> Self { ++ let raw_slapi_val = raw_slapi_val as *const _ as *const *const slapi_value; ++ ValueArrayRef { raw_slapi_val } ++ } ++ ++ pub fn iter(&self) -> ValueArrayRefIter { ++ ValueArrayRefIter { ++ idx: 0, ++ va_ref: &self, ++ } ++ } ++ ++ pub fn first(&self) -> Option { ++ self.iter().next() ++ } ++} ++ ++pub struct ValueArray { ++ data: Vec<*mut slapi_value>, ++ vrf: ValueArrayRef, ++} ++ ++impl Deref for ValueArray { ++ type Target = ValueArrayRef; ++ ++ fn deref(&self) -> &Self::Target { ++ &self.vrf ++ } ++} ++ ++impl ValueArray { ++ /// Take ownership of this value array, returning the pointer to the inner memory ++ /// and forgetting about it for ourself. This prevents the drop handler from freeing ++ /// the slapi_value, ie we are giving this to the 389-ds framework to manage from now. ++ pub unsafe fn take_ownership(mut self) -> *const *const slapi_value { ++ let mut vs = Vec::new(); ++ mem::swap(&mut self.data, &mut vs); ++ let bs = vs.into_boxed_slice(); ++ Box::leak(bs) as *const _ as *const *const slapi_value ++ } ++} ++ ++impl FromIterator for ValueArray { ++ fn from_iter>(iter: I) -> Self { ++ let data: Vec<*mut slapi_value> = iter ++ .into_iter() ++ .map(|v| unsafe { v.take_ownership() }) ++ .chain(once(ptr::null_mut() as *mut slapi_value)) ++ .collect(); ++ let vrf = ValueArrayRef { ++ raw_slapi_val: data.as_ptr() as *const *const slapi_value, ++ }; ++ ValueArray { data, vrf } ++ } ++} ++ ++impl Drop for ValueArray { ++ fn drop(&mut self) { ++ self.data.drain(0..).for_each(|mut v| unsafe { ++ slapi_value_free(&mut v as *mut _ as *mut *const libc::c_void); ++ }) ++ } ++} ++ ++#[derive(Debug)] ++pub struct ValueRef { ++ raw_slapi_val: *const slapi_value, ++ bvr: BerValRef, ++} ++ ++impl ValueRef { ++ pub(crate) unsafe fn as_ptr(&self) -> *const slapi_value { ++ // This is unsafe as the *const may outlive the value ref. ++ self.raw_slapi_val ++ } ++} ++ ++pub struct Value { ++ value: ValueRef, ++} ++ ++impl Value { ++ pub unsafe fn take_ownership(mut self) -> *mut slapi_value { ++ let mut n_ptr = ptr::null(); ++ mem::swap(&mut self.value.raw_slapi_val, &mut n_ptr); ++ n_ptr as *mut slapi_value ++ // Now drop will run and not care. ++ } ++} ++ ++impl Drop for Value { ++ fn drop(&mut self) { ++ if self.value.raw_slapi_val != ptr::null() { ++ // free it ++ unsafe { ++ slapi_value_free( ++ &mut self.value.raw_slapi_val as *mut _ as *mut *const libc::c_void, ++ ); ++ } ++ } ++ } ++} ++ ++impl Deref for Value { ++ type Target = ValueRef; ++ ++ fn deref(&self) -> &Self::Target { ++ &self.value ++ } ++} ++ ++impl From<&Uuid> for Value { ++ fn from(u: &Uuid) -> Self { ++ // turn the uuid to a str ++ let u_str = u.to_hyphenated().to_string(); ++ let len = u_str.len(); ++ let cstr = CString::new(u_str) ++ .expect("Invalid uuid, should never occur!") ++ .into_boxed_c_str(); ++ let s_ptr = cstr.as_ptr(); ++ Box::leak(cstr); ++ ++ let mut v = unsafe { slapi_value_new() }; ++ unsafe { ++ (*v).bv.len = len; ++ (*v).bv.data = s_ptr as *const u8; ++ } ++ ++ Value { ++ value: ValueRef::new(v as *const libc::c_void), ++ } ++ } ++} ++ ++impl ValueRef { ++ pub fn new(raw_slapi_val: *const libc::c_void) -> Self { ++ let raw_slapi_val = raw_slapi_val as *const _ as *const slapi_value; ++ let raw_berval: *const ol_berval = unsafe { &(*raw_slapi_val).bv as *const _ }; ++ ValueRef { ++ raw_slapi_val, ++ bvr: BerValRef { raw_berval }, ++ } ++ } ++} ++ ++impl TryFrom<&ValueRef> for String { ++ type Error = (); ++ ++ fn try_from(value: &ValueRef) -> Result { ++ value.bvr.into_string().ok_or(()) ++ } ++} ++ ++impl TryFrom<&ValueRef> for Sdn { ++ type Error = (); ++ ++ fn try_from(value: &ValueRef) -> Result { ++ // We need to do a middle step of moving through a cstring as ++ // bervals may not always have a trailing NULL, and sdn expects one. ++ let cdn = value.bvr.into_cstring().ok_or(())?; ++ Ok(cdn.as_c_str().into()) ++ } ++} ++ ++impl AsRef for ValueRef { ++ fn as_ref(&self) -> &ValueRef { ++ &self ++ } ++} ++ ++impl Deref for ValueRef { ++ type Target = BerValRef; ++ ++ fn deref(&self) -> &Self::Target { ++ &self.bvr ++ } ++} +-- +2.26.3 + diff --git a/SOURCES/0003-do-not-add-referrals-for-masters-with-different-data.patch b/SOURCES/0003-do-not-add-referrals-for-masters-with-different-data.patch deleted file mode 100644 index 411958e..0000000 --- a/SOURCES/0003-do-not-add-referrals-for-masters-with-different-data.patch +++ /dev/null @@ -1,513 +0,0 @@ -From e202c62c3b4c92163d2de9f3da9a9f3efc81e4b8 Mon Sep 17 00:00:00 2001 -From: progier389 <72748589+progier389@users.noreply.github.com> -Date: Thu, 12 Nov 2020 18:50:04 +0100 -Subject: [PATCH 3/3] do not add referrals for masters with different data - generation #2054 (#4427) - -Bug description: -The problem is that some operation mandatory in the usual cases are -also performed when replication cannot take place because the -database set are differents (i.e: RUV generation ids are different) - -One of the issue is that the csn generator state is updated when -starting a replication session (it is a problem when trying to -reset the time skew, as freshly reinstalled replicas get infected -by the old ones) - -A second issue is that the RUV got updated when ending a replication session -(which may add replica that does not share the same data set, -then update operations on consumer retun referrals towards wrong masters - -Fix description: -The fix checks the RUVs generation id before updating the csn generator -and before updating the RUV. - -Reviewed by: mreynolds - firstyear - vashirov - -Platforms tested: F32 ---- - .../suites/replication/regression_test.py | 290 ++++++++++++++++++ - ldap/servers/plugins/replication/repl5.h | 1 + - .../plugins/replication/repl5_inc_protocol.c | 20 +- - .../plugins/replication/repl5_replica.c | 39 ++- - src/lib389/lib389/dseldif.py | 37 +++ - 5 files changed, 368 insertions(+), 19 deletions(-) - -diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py -index 14b9d6a44..a72af6b30 100644 ---- a/dirsrvtests/tests/suites/replication/regression_test.py -+++ b/dirsrvtests/tests/suites/replication/regression_test.py -@@ -13,6 +13,7 @@ from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts - from lib389.pwpolicy import PwPolicyManager - from lib389.utils import * - from lib389.topologies import topology_m2 as topo_m2, TopologyMain, topology_m3 as topo_m3, create_topology, _remove_ssca_db, topology_i2 as topo_i2 -+from lib389.topologies import topology_m2c2 as topo_m2c2 - from lib389._constants import * - from lib389.idm.organizationalunit import OrganizationalUnits - from lib389.idm.user import UserAccount -@@ -22,6 +23,7 @@ from lib389.idm.directorymanager import DirectoryManager - from lib389.replica import Replicas, ReplicationManager, Changelog5, BootstrapReplicationManager - from lib389.agreement import Agreements - from lib389 import pid_from_file -+from lib389.dseldif import * - - - pytestmark = pytest.mark.tier1 -@@ -1027,6 +1029,294 @@ def test_online_init_should_create_keepalive_entries(topo_m2): - verify_keepalive_entries(topo_m2, True); - - -+def get_agreement(agmts, consumer): -+ # Get agreement towards consumer among the agremment list -+ for agmt in agmts.list(): -+ if (agmt.get_attr_val_utf8('nsDS5ReplicaPort') == str(consumer.port) and -+ agmt.get_attr_val_utf8('nsDS5ReplicaHost') == consumer.host): -+ return agmt -+ return None; -+ -+ -+def test_ruv_url_not_added_if_different_uuid(topo_m2c2): -+ """Check that RUV url is not updated if RUV generation uuid are different -+ -+ :id: 7cc30a4e-0ffd-4758-8f00-e500279af344 -+ :setup: Two masters + two consumers replication setup -+ :steps: -+ 1. Generate ldif without replication data -+ 2. Init both masters from that ldif -+ (to clear the ruvs and generates different generation uuid) -+ 3. Perform on line init from master1 to consumer1 -+ and from master2 to consumer2 -+ 4. Perform update on both masters -+ 5. Check that c1 RUV does not contains URL towards m2 -+ 6. Check that c2 RUV does contains URL towards m2 -+ 7. Perform on line init from master1 to master2 -+ 8. Perform update on master2 -+ 9. Check that c1 RUV does contains URL towards m2 -+ :expectedresults: -+ 1. No error while generating ldif -+ 2. No error while importing the ldif file -+ 3. No error and Initialization done. -+ 4. No error -+ 5. master2 replicaid should not be in the consumer1 RUV -+ 6. master2 replicaid should be in the consumer2 RUV -+ 7. No error and Initialization done. -+ 8. No error -+ 9. master2 replicaid should be in the consumer1 RUV -+ -+ """ -+ -+ # Variables initialization -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ -+ m1 = topo_m2c2.ms["master1"] -+ m2 = topo_m2c2.ms["master2"] -+ c1 = topo_m2c2.cs["consumer1"] -+ c2 = topo_m2c2.cs["consumer2"] -+ -+ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) -+ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) -+ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX) -+ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX) -+ -+ replicid_m2 = replica_m2.get_rid() -+ -+ agmts_m1 = Agreements(m1, replica_m1.dn) -+ agmts_m2 = Agreements(m2, replica_m2.dn) -+ -+ m1_m2 = get_agreement(agmts_m1, m2) -+ m1_c1 = get_agreement(agmts_m1, c1) -+ m1_c2 = get_agreement(agmts_m1, c2) -+ m2_m1 = get_agreement(agmts_m2, m1) -+ m2_c1 = get_agreement(agmts_m2, c1) -+ m2_c2 = get_agreement(agmts_m2, c2) -+ -+ # Step 1: Generate ldif without replication data -+ m1.stop() -+ m2.stop() -+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() -+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], -+ excludeSuffixes=None, repl_data=False, -+ outputfile=ldif_file, encrypt=False) -+ # Remove replication metadata that are still in the ldif -+ # _remove_replication_data(ldif_file) -+ -+ # Step 2: Init both masters from that ldif -+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) -+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) -+ m1.start() -+ m2.start() -+ -+ # Step 3: Perform on line init from master1 to consumer1 -+ # and from master2 to consumer2 -+ m1_c1.begin_reinit() -+ m2_c2.begin_reinit() -+ (done, error) = m1_c1.wait_reinit() -+ assert done is True -+ assert error is False -+ (done, error) = m2_c2.wait_reinit() -+ assert done is True -+ assert error is False -+ -+ # Step 4: Perform update on both masters -+ repl.test_replication(m1, c1) -+ repl.test_replication(m2, c2) -+ -+ # Step 5: Check that c1 RUV does not contains URL towards m2 -+ ruv = replica_c1.get_ruv() -+ log.debug(f"c1 RUV: {ruv}") -+ url=ruv._rid_url.get(replica_m2.get_rid()) -+ if (url == None): -+ log.debug(f"No URL for RID {replica_m2.get_rid()} in RUV"); -+ else: -+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}"); -+ log.error(f"URL for RID {replica_m2.get_rid()} found in RUV") -+ #Note: this assertion fails if issue 2054 is not fixed. -+ assert False -+ -+ # Step 6: Check that c2 RUV does contains URL towards m2 -+ ruv = replica_c2.get_ruv() -+ log.debug(f"c1 RUV: {ruv} {ruv._rids} ") -+ url=ruv._rid_url.get(replica_m2.get_rid()) -+ if (url == None): -+ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV"); -+ assert False -+ else: -+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}"); -+ -+ -+ # Step 7: Perform on line init from master1 to master2 -+ m1_m2.begin_reinit() -+ (done, error) = m1_m2.wait_reinit() -+ assert done is True -+ assert error is False -+ -+ # Step 8: Perform update on master2 -+ repl.test_replication(m2, c1) -+ -+ # Step 9: Check that c1 RUV does contains URL towards m2 -+ ruv = replica_c1.get_ruv() -+ log.debug(f"c1 RUV: {ruv} {ruv._rids} ") -+ url=ruv._rid_url.get(replica_m2.get_rid()) -+ if (url == None): -+ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV"); -+ assert False -+ else: -+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}"); -+ -+ -+def test_csngen_state_not_updated_if_different_uuid(topo_m2c2): -+ """Check that csngen remote offset is not updated if RUV generation uuid are different -+ -+ :id: 77694b8e-22ae-11eb-89b2-482ae39447e5 -+ :setup: Two masters + two consumers replication setup -+ :steps: -+ 1. Disable m1<->m2 agreement to avoid propagate timeSkew -+ 2. Generate ldif without replication data -+ 3. Increase time skew on master2 -+ 4. Init both masters from that ldif -+ (to clear the ruvs and generates different generation uuid) -+ 5. Perform on line init from master1 to consumer1 and master2 to consumer2 -+ 6. Perform update on both masters -+ 7: Check that c1 has no time skew -+ 8: Check that c2 has time skew -+ 9. Init master2 from master1 -+ 10. Perform update on master2 -+ 11. Check that c1 has time skew -+ :expectedresults: -+ 1. No error -+ 2. No error while generating ldif -+ 3. No error -+ 4. No error while importing the ldif file -+ 5. No error and Initialization done. -+ 6. No error -+ 7. c1 time skew should be lesser than threshold -+ 8. c2 time skew should be higher than threshold -+ 9. No error and Initialization done. -+ 10. No error -+ 11. c1 time skew should be higher than threshold -+ -+ """ -+ -+ # Variables initialization -+ repl = ReplicationManager(DEFAULT_SUFFIX) -+ -+ m1 = topo_m2c2.ms["master1"] -+ m2 = topo_m2c2.ms["master2"] -+ c1 = topo_m2c2.cs["consumer1"] -+ c2 = topo_m2c2.cs["consumer2"] -+ -+ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) -+ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) -+ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX) -+ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX) -+ -+ replicid_m2 = replica_m2.get_rid() -+ -+ agmts_m1 = Agreements(m1, replica_m1.dn) -+ agmts_m2 = Agreements(m2, replica_m2.dn) -+ -+ m1_m2 = get_agreement(agmts_m1, m2) -+ m1_c1 = get_agreement(agmts_m1, c1) -+ m1_c2 = get_agreement(agmts_m1, c2) -+ m2_m1 = get_agreement(agmts_m2, m1) -+ m2_c1 = get_agreement(agmts_m2, c1) -+ m2_c2 = get_agreement(agmts_m2, c2) -+ -+ # Step 1: Disable m1<->m2 agreement to avoid propagate timeSkew -+ m1_m2.pause() -+ m2_m1.pause() -+ -+ # Step 2: Generate ldif without replication data -+ m1.stop() -+ m2.stop() -+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() -+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], -+ excludeSuffixes=None, repl_data=False, -+ outputfile=ldif_file, encrypt=False) -+ # Remove replication metadata that are still in the ldif -+ # _remove_replication_data(ldif_file) -+ -+ # Step 3: Increase time skew on master2 -+ timeSkew=6*3600 -+ # We can modify master2 time skew -+ # But the time skew on the consumer may be smaller -+ # depending on when the cnsgen generation time is updated -+ # and when first csn get replicated. -+ # Since we use timeSkew has threshold value to detect -+ # whether there are time skew or not, -+ # lets add a significative margin (longer than the test duration) -+ # to avoid any risk of erroneous failure -+ timeSkewMargin = 300 -+ DSEldif(m2)._increaseTimeSkew(DEFAULT_SUFFIX, timeSkew+timeSkewMargin) -+ -+ # Step 4: Init both masters from that ldif -+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) -+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) -+ m1.start() -+ m2.start() -+ -+ # Step 5: Perform on line init from master1 to consumer1 -+ # and from master2 to consumer2 -+ m1_c1.begin_reinit() -+ m2_c2.begin_reinit() -+ (done, error) = m1_c1.wait_reinit() -+ assert done is True -+ assert error is False -+ (done, error) = m2_c2.wait_reinit() -+ assert done is True -+ assert error is False -+ -+ # Step 6: Perform update on both masters -+ repl.test_replication(m1, c1) -+ repl.test_replication(m2, c2) -+ -+ # Step 7: Check that c1 has no time skew -+ # Stop server to insure that dse.ldif is uptodate -+ c1.stop() -+ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0] -+ c1_timeSkew = int(c1_nsState['time_skew']) -+ log.debug(f"c1 time skew: {c1_timeSkew}") -+ if (c1_timeSkew >= timeSkew): -+ log.error(f"c1 csngen state has unexpectedly been synchronized with m2: time skew {c1_timeSkew}") -+ assert False -+ c1.start() -+ -+ # Step 8: Check that c2 has time skew -+ # Stop server to insure that dse.ldif is uptodate -+ c2.stop() -+ c2_nsState = DSEldif(c2).readNsState(DEFAULT_SUFFIX)[0] -+ c2_timeSkew = int(c2_nsState['time_skew']) -+ log.debug(f"c2 time skew: {c2_timeSkew}") -+ if (c2_timeSkew < timeSkew): -+ log.error(f"c2 csngen state has not been synchronized with m2: time skew {c2_timeSkew}") -+ assert False -+ c2.start() -+ -+ # Step 9: Perform on line init from master1 to master2 -+ m1_c1.pause() -+ m1_m2.resume() -+ m1_m2.begin_reinit() -+ (done, error) = m1_m2.wait_reinit() -+ assert done is True -+ assert error is False -+ -+ # Step 10: Perform update on master2 -+ repl.test_replication(m2, c1) -+ -+ # Step 11: Check that c1 has time skew -+ # Stop server to insure that dse.ldif is uptodate -+ c1.stop() -+ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0] -+ c1_timeSkew = int(c1_nsState['time_skew']) -+ log.debug(f"c1 time skew: {c1_timeSkew}") -+ if (c1_timeSkew < timeSkew): -+ log.error(f"c1 csngen state has not been synchronized with m2: time skew {c1_timeSkew}") -+ assert False -+ -+ - if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode -diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h -index b35f724c2..f1c596a3f 100644 ---- a/ldap/servers/plugins/replication/repl5.h -+++ b/ldap/servers/plugins/replication/repl5.h -@@ -708,6 +708,7 @@ void replica_dump(Replica *r); - void replica_set_enabled(Replica *r, PRBool enable); - Replica *replica_get_replica_from_dn(const Slapi_DN *dn); - Replica *replica_get_replica_from_root(const char *repl_root); -+int replica_check_generation(Replica *r, const RUV *remote_ruv); - int replica_update_ruv(Replica *replica, const CSN *csn, const char *replica_purl); - Replica *replica_get_replica_for_op(Slapi_PBlock *pb); - /* the functions below manipulate replica hash */ -diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c -index 29b1fb073..af5e5897c 100644 ---- a/ldap/servers/plugins/replication/repl5_inc_protocol.c -+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c -@@ -2161,26 +2161,12 @@ examine_update_vector(Private_Repl_Protocol *prp, RUV *remote_ruv) - } else if (NULL == remote_ruv) { - return_value = EXAMINE_RUV_PRISTINE_REPLICA; - } else { -- char *local_gen = NULL; -- char *remote_gen = ruv_get_replica_generation(remote_ruv); -- Object *local_ruv_obj; -- RUV *local_ruv; -- - PR_ASSERT(NULL != prp->replica); -- local_ruv_obj = replica_get_ruv(prp->replica); -- if (NULL != local_ruv_obj) { -- local_ruv = (RUV *)object_get_data(local_ruv_obj); -- PR_ASSERT(local_ruv); -- local_gen = ruv_get_replica_generation(local_ruv); -- object_release(local_ruv_obj); -- } -- if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) { -- return_value = EXAMINE_RUV_GENERATION_MISMATCH; -- } else { -+ if (replica_check_generation(prp->replica, remote_ruv)) { - return_value = EXAMINE_RUV_OK; -+ } else { -+ return_value = EXAMINE_RUV_GENERATION_MISMATCH; - } -- slapi_ch_free((void **)&remote_gen); -- slapi_ch_free((void **)&local_gen); - } - return return_value; - } -diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c -index f0ea0f8ef..7e56d6557 100644 ---- a/ldap/servers/plugins/replication/repl5_replica.c -+++ b/ldap/servers/plugins/replication/repl5_replica.c -@@ -812,6 +812,36 @@ replica_set_ruv(Replica *r, RUV *ruv) - replica_unlock(r->repl_lock); - } - -+/* -+ * Check if replica generation is the same than the remote ruv one -+ */ -+int -+replica_check_generation(Replica *r, const RUV *remote_ruv) -+{ -+ int return_value; -+ char *local_gen = NULL; -+ char *remote_gen = ruv_get_replica_generation(remote_ruv); -+ Object *local_ruv_obj; -+ RUV *local_ruv; -+ -+ PR_ASSERT(NULL != r); -+ local_ruv_obj = replica_get_ruv(r); -+ if (NULL != local_ruv_obj) { -+ local_ruv = (RUV *)object_get_data(local_ruv_obj); -+ PR_ASSERT(local_ruv); -+ local_gen = ruv_get_replica_generation(local_ruv); -+ object_release(local_ruv_obj); -+ } -+ if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) { -+ return_value = PR_FALSE; -+ } else { -+ return_value = PR_TRUE; -+ } -+ slapi_ch_free_string(&remote_gen); -+ slapi_ch_free_string(&local_gen); -+ return return_value; -+} -+ - /* - * Update one particular CSN in an RUV. This is meant to be called - * whenever (a) the server has processed a client operation and -@@ -1298,6 +1328,11 @@ replica_update_csngen_state_ext(Replica *r, const RUV *ruv, const CSN *extracsn) - - PR_ASSERT(r && ruv); - -+ if (!replica_check_generation(r, ruv)) /* ruv has wrong generation - we are done */ -+ { -+ return 0; -+ } -+ - rc = ruv_get_max_csn(ruv, &csn); - if (rc != RUV_SUCCESS) { - return -1; -@@ -3713,8 +3748,8 @@ replica_update_ruv_consumer(Replica *r, RUV *supplier_ruv) - replica_lock(r->repl_lock); - - local_ruv = (RUV *)object_get_data(r->repl_ruv); -- -- if (is_cleaned_rid(supplier_id) || local_ruv == NULL) { -+ if (is_cleaned_rid(supplier_id) || local_ruv == NULL || -+ !replica_check_generation(r, supplier_ruv)) { - replica_unlock(r->repl_lock); - return; - } -diff --git a/src/lib389/lib389/dseldif.py b/src/lib389/lib389/dseldif.py -index 10baba4d7..6850c9a8a 100644 ---- a/src/lib389/lib389/dseldif.py -+++ b/src/lib389/lib389/dseldif.py -@@ -317,6 +317,43 @@ class DSEldif(DSLint): - - return states - -+ def _increaseTimeSkew(self, suffix, timeSkew): -+ # Increase csngen state local_offset by timeSkew -+ # Warning: instance must be stopped before calling this function -+ assert (timeSkew >= 0) -+ nsState = self.readNsState(suffix)[0] -+ self._instance.log.debug(f'_increaseTimeSkew nsState is {nsState}') -+ oldNsState = self.get(nsState['dn'], 'nsState', True) -+ self._instance.log.debug(f'oldNsState is {oldNsState}') -+ -+ # Lets reencode the new nsState -+ from lib389.utils import print_nice_time -+ if pack('h', 1) == pack('=h',1): -+ end = '>' -+ else: -+ raise ValueError("Unknown endian, unable to proceed") -+ -+ thelen = len(oldNsState) -+ if thelen <= 20: -+ pad = 2 # padding for short H values -+ timefmt = 'I' # timevals are unsigned 32-bit int -+ else: -+ pad = 6 # padding for short H values -+ timefmt = 'Q' # timevals are unsigned 64-bit int -+ fmtstr = "%sH%dx3%sH%dx" % (end, pad, timefmt, pad) -+ newNsState = base64.b64encode(pack(fmtstr, int(nsState['rid']), -+ int(nsState['gen_time']), int(nsState['local_offset'])+timeSkew, -+ int(nsState['remote_offset']), int(nsState['seq_num']))) -+ newNsState = newNsState.decode('utf-8') -+ self._instance.log.debug(f'newNsState is {newNsState}') -+ # Lets replace the value. -+ (entry_dn_i, attr_data) = self._find_attr(nsState['dn'], 'nsState') -+ attr_i = next(iter(attr_data)) -+ self._contents[entry_dn_i + attr_i] = f"nsState:: {newNsState}" -+ self._update() -+ - - class FSChecks(DSLint): - """This is for the healthcheck feature, check commonly used system config files the --- -2.26.2 - diff --git a/SOURCES/0004-Ticket-4326-entryuuid-fixup-did-not-work-correctly-4.patch b/SOURCES/0004-Ticket-4326-entryuuid-fixup-did-not-work-correctly-4.patch new file mode 100644 index 0000000..8416726 --- /dev/null +++ b/SOURCES/0004-Ticket-4326-entryuuid-fixup-did-not-work-correctly-4.patch @@ -0,0 +1,373 @@ +From c167d6127db45d8426437c273060c8c8f7fbcb9b Mon Sep 17 00:00:00 2001 +From: Firstyear +Date: Wed, 23 Sep 2020 09:19:34 +1000 +Subject: [PATCH 04/12] Ticket 4326 - entryuuid fixup did not work correctly + (#4328) + +Bug Description: due to an oversight in how fixup tasks +worked, the entryuuid fixup task did not work correctly and +would not persist over restarts. + +Fix Description: Correctly implement entryuuid fixup. + +fixes: #4326 + +Author: William Brown + +Review by: mreynolds (thanks!) +--- + .../tests/suites/entryuuid/basic_test.py | 24 +++- + src/plugins/entryuuid/src/lib.rs | 43 ++++++- + src/slapi_r_plugin/src/constants.rs | 5 + + src/slapi_r_plugin/src/entry.rs | 8 ++ + src/slapi_r_plugin/src/lib.rs | 2 + + src/slapi_r_plugin/src/macros.rs | 2 +- + src/slapi_r_plugin/src/modify.rs | 118 ++++++++++++++++++ + src/slapi_r_plugin/src/pblock.rs | 7 ++ + src/slapi_r_plugin/src/value.rs | 4 + + 9 files changed, 206 insertions(+), 7 deletions(-) + create mode 100644 src/slapi_r_plugin/src/modify.rs + +diff --git a/dirsrvtests/tests/suites/entryuuid/basic_test.py b/dirsrvtests/tests/suites/entryuuid/basic_test.py +index beb73701d..4d8a40909 100644 +--- a/dirsrvtests/tests/suites/entryuuid/basic_test.py ++++ b/dirsrvtests/tests/suites/entryuuid/basic_test.py +@@ -12,6 +12,7 @@ import time + import shutil + from lib389.idm.user import nsUserAccounts, UserAccounts + from lib389.idm.account import Accounts ++from lib389.idm.domain import Domain + from lib389.topologies import topology_st as topology + from lib389.backend import Backends + from lib389.paths import Paths +@@ -190,6 +191,7 @@ def test_entryuuid_fixup_task(topology): + 3. Enable the entryuuid plugin + 4. Run the fixup + 5. Assert the entryuuid now exists ++ 6. Restart and check they persist + + :expectedresults: + 1. Success +@@ -197,6 +199,7 @@ def test_entryuuid_fixup_task(topology): + 3. Success + 4. Success + 5. Suddenly EntryUUID! ++ 6. Still has EntryUUID! + """ + # 1. Disable the plugin + plug = EntryUUIDPlugin(topology.standalone) +@@ -220,7 +223,22 @@ def test_entryuuid_fixup_task(topology): + assert(task.is_complete() and task.get_exit_code() == 0) + topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,)) + +- # 5. Assert the uuid. +- euuid = account.get_attr_val_utf8('entryUUID') +- assert(euuid is not None) ++ # 5.1 Assert the uuid on the user. ++ euuid_user = account.get_attr_val_utf8('entryUUID') ++ assert(euuid_user is not None) ++ ++ # 5.2 Assert it on the domain entry. ++ domain = Domain(topology.standalone, dn=DEFAULT_SUFFIX) ++ euuid_domain = domain.get_attr_val_utf8('entryUUID') ++ assert(euuid_domain is not None) ++ ++ # Assert it persists after a restart. ++ topology.standalone.restart() ++ # 6.1 Assert the uuid on the use. ++ euuid_user_2 = account.get_attr_val_utf8('entryUUID') ++ assert(euuid_user_2 == euuid_user) ++ ++ # 6.2 Assert it on the domain entry. ++ euuid_domain_2 = domain.get_attr_val_utf8('entryUUID') ++ assert(euuid_domain_2 == euuid_domain) + +diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs +index 6b5e8d1bb..92977db05 100644 +--- a/src/plugins/entryuuid/src/lib.rs ++++ b/src/plugins/entryuuid/src/lib.rs +@@ -187,9 +187,46 @@ impl SlapiPlugin3 for EntryUuid { + } + } + +-pub fn entryuuid_fixup_mapfn(mut e: EntryRef, _data: &()) -> Result<(), PluginError> { +- assign_uuid(&mut e); +- Ok(()) ++pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError> { ++ /* Supply a modification to the entry. */ ++ let sdn = e.get_sdnref(); ++ ++ /* Sanity check that entryuuid doesn't already exist */ ++ if e.contains_attr("entryUUID") { ++ log_error!( ++ ErrorLevel::Trace, ++ "skipping fixup for -> {}", ++ sdn.to_dn_string() ++ ); ++ return Ok(()); ++ } ++ ++ // Setup the modifications ++ let mut mods = SlapiMods::new(); ++ ++ let u: Uuid = Uuid::new_v4(); ++ let uuid_value = Value::from(&u); ++ let values: ValueArray = std::iter::once(uuid_value).collect(); ++ mods.append(ModType::Replace, "entryUUID", values); ++ ++ /* */ ++ let lmod = Modify::new(&sdn, mods, plugin_id())?; ++ ++ match lmod.execute() { ++ Ok(_) => { ++ log_error!(ErrorLevel::Trace, "fixed-up -> {}", sdn.to_dn_string()); ++ Ok(()) ++ } ++ Err(e) => { ++ log_error!( ++ ErrorLevel::Error, ++ "entryuuid_fixup_mapfn -> fixup failed -> {} {:?}", ++ sdn.to_dn_string(), ++ e ++ ); ++ Err(PluginError::GenericFailure) ++ } ++ } + } + + #[cfg(test)] +diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs +index cf76ccbdb..34845c2f4 100644 +--- a/src/slapi_r_plugin/src/constants.rs ++++ b/src/slapi_r_plugin/src/constants.rs +@@ -5,6 +5,11 @@ use std::os::raw::c_char; + pub const LDAP_SUCCESS: i32 = 0; + pub const PLUGIN_DEFAULT_PRECEDENCE: i32 = 50; + ++#[repr(i32)] ++pub enum OpFlags { ++ ByassReferrals = 0x0040_0000, ++} ++ + #[repr(i32)] + /// The set of possible function handles we can register via the pblock. These + /// values correspond to slapi-plugin.h. +diff --git a/src/slapi_r_plugin/src/entry.rs b/src/slapi_r_plugin/src/entry.rs +index 034efe692..22ae45189 100644 +--- a/src/slapi_r_plugin/src/entry.rs ++++ b/src/slapi_r_plugin/src/entry.rs +@@ -70,6 +70,14 @@ impl EntryRef { + } + } + ++ pub fn contains_attr(&self, name: &str) -> bool { ++ let cname = CString::new(name).expect("invalid attr name"); ++ let va = unsafe { slapi_entry_attr_get_valuearray(self.raw_e, cname.as_ptr()) }; ++ ++ // If it's null, it's not present, so flip the logic. ++ !va.is_null() ++ } ++ + pub fn add_value(&mut self, a: &str, v: &ValueRef) { + // turn the attr to a c string. + // TODO FIX +diff --git a/src/slapi_r_plugin/src/lib.rs b/src/slapi_r_plugin/src/lib.rs +index d7fc22e52..076907bae 100644 +--- a/src/slapi_r_plugin/src/lib.rs ++++ b/src/slapi_r_plugin/src/lib.rs +@@ -9,6 +9,7 @@ pub mod dn; + pub mod entry; + pub mod error; + pub mod log; ++pub mod modify; + pub mod pblock; + pub mod plugin; + pub mod search; +@@ -24,6 +25,7 @@ pub mod prelude { + pub use crate::entry::EntryRef; + pub use crate::error::{DseCallbackStatus, LDAPError, PluginError, RPluginError}; + pub use crate::log::{log_error, ErrorLevel}; ++ pub use crate::modify::{ModType, Modify, SlapiMods}; + pub use crate::pblock::{Pblock, PblockRef}; + pub use crate::plugin::{register_plugin_ext, PluginIdRef, SlapiPlugin3}; + pub use crate::search::{Search, SearchScope}; +diff --git a/src/slapi_r_plugin/src/macros.rs b/src/slapi_r_plugin/src/macros.rs +index 030449632..bc8dfa60f 100644 +--- a/src/slapi_r_plugin/src/macros.rs ++++ b/src/slapi_r_plugin/src/macros.rs +@@ -825,7 +825,7 @@ macro_rules! slapi_r_search_callback_mapfn { + let e = EntryRef::new(raw_e); + let data_ptr = raw_data as *const _; + let data = unsafe { &(*data_ptr) }; +- match $cb_mod_ident(e, data) { ++ match $cb_mod_ident(&e, data) { + Ok(_) => LDAPError::Success as i32, + Err(e) => e as i32, + } +diff --git a/src/slapi_r_plugin/src/modify.rs b/src/slapi_r_plugin/src/modify.rs +new file mode 100644 +index 000000000..30864377a +--- /dev/null ++++ b/src/slapi_r_plugin/src/modify.rs +@@ -0,0 +1,118 @@ ++use crate::constants::OpFlags; ++use crate::dn::SdnRef; ++use crate::error::{LDAPError, PluginError}; ++use crate::pblock::Pblock; ++use crate::plugin::PluginIdRef; ++use crate::value::{slapi_value, ValueArray}; ++ ++use std::ffi::CString; ++use std::ops::{Deref, DerefMut}; ++use std::os::raw::c_char; ++ ++extern "C" { ++ fn slapi_modify_internal_set_pb_ext( ++ pb: *const libc::c_void, ++ dn: *const libc::c_void, ++ mods: *const *const libc::c_void, ++ controls: *const *const libc::c_void, ++ uniqueid: *const c_char, ++ plugin_ident: *const libc::c_void, ++ op_flags: i32, ++ ); ++ fn slapi_modify_internal_pb(pb: *const libc::c_void); ++ fn slapi_mods_free(smods: *const *const libc::c_void); ++ fn slapi_mods_get_ldapmods_byref(smods: *const libc::c_void) -> *const *const libc::c_void; ++ fn slapi_mods_new() -> *const libc::c_void; ++ fn slapi_mods_add_mod_values( ++ smods: *const libc::c_void, ++ mtype: i32, ++ attrtype: *const c_char, ++ value: *const *const slapi_value, ++ ); ++} ++ ++#[derive(Debug)] ++#[repr(i32)] ++pub enum ModType { ++ Add = 0, ++ Delete = 1, ++ Replace = 2, ++} ++ ++pub struct SlapiMods { ++ inner: *const libc::c_void, ++ vas: Vec, ++} ++ ++impl Drop for SlapiMods { ++ fn drop(&mut self) { ++ unsafe { slapi_mods_free(&self.inner as *const *const libc::c_void) } ++ } ++} ++ ++impl SlapiMods { ++ pub fn new() -> Self { ++ SlapiMods { ++ inner: unsafe { slapi_mods_new() }, ++ vas: Vec::new(), ++ } ++ } ++ ++ pub fn append(&mut self, modtype: ModType, attrtype: &str, values: ValueArray) { ++ // We can get the value array pointer here to push to the inner ++ // because the internal pointers won't change even when we push them ++ // to the list to preserve their lifetime. ++ let vas = values.as_ptr(); ++ // We take ownership of this to ensure it lives as least as long as our ++ // slapimods structure. ++ self.vas.push(values); ++ // now we can insert these to the modes. ++ let c_attrtype = CString::new(attrtype).expect("failed to allocate attrtype"); ++ unsafe { slapi_mods_add_mod_values(self.inner, modtype as i32, c_attrtype.as_ptr(), vas) }; ++ } ++} ++ ++pub struct Modify { ++ pb: Pblock, ++ mods: SlapiMods, ++} ++ ++pub struct ModifyResult { ++ pb: Pblock, ++} ++ ++impl Modify { ++ pub fn new(dn: &SdnRef, mods: SlapiMods, plugin_id: PluginIdRef) -> Result { ++ let pb = Pblock::new(); ++ let lmods = unsafe { slapi_mods_get_ldapmods_byref(mods.inner) }; ++ // OP_FLAG_ACTION_LOG_ACCESS ++ ++ unsafe { ++ slapi_modify_internal_set_pb_ext( ++ pb.deref().as_ptr(), ++ dn.as_ptr(), ++ lmods, ++ std::ptr::null(), ++ std::ptr::null(), ++ plugin_id.raw_pid, ++ OpFlags::ByassReferrals as i32, ++ ) ++ }; ++ ++ Ok(Modify { pb, mods }) ++ } ++ ++ pub fn execute(self) -> Result { ++ let Modify { ++ mut pb, ++ mods: _mods, ++ } = self; ++ unsafe { slapi_modify_internal_pb(pb.deref().as_ptr()) }; ++ let result = pb.get_op_result(); ++ ++ match result { ++ 0 => Ok(ModifyResult { pb }), ++ _e => Err(LDAPError::from(result)), ++ } ++ } ++} +diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs +index b69ce1680..0f83914f3 100644 +--- a/src/slapi_r_plugin/src/pblock.rs ++++ b/src/slapi_r_plugin/src/pblock.rs +@@ -11,6 +11,7 @@ pub use crate::log::{log_error, ErrorLevel}; + extern "C" { + fn slapi_pblock_set(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32; + fn slapi_pblock_get(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32; ++ fn slapi_pblock_destroy(pb: *const libc::c_void); + fn slapi_pblock_new() -> *const libc::c_void; + } + +@@ -41,6 +42,12 @@ impl DerefMut for Pblock { + } + } + ++impl Drop for Pblock { ++ fn drop(&mut self) { ++ unsafe { slapi_pblock_destroy(self.value.raw_pb) } ++ } ++} ++ + pub struct PblockRef { + raw_pb: *const libc::c_void, + } +diff --git a/src/slapi_r_plugin/src/value.rs b/src/slapi_r_plugin/src/value.rs +index 5a40dd279..46246837a 100644 +--- a/src/slapi_r_plugin/src/value.rs ++++ b/src/slapi_r_plugin/src/value.rs +@@ -96,6 +96,10 @@ impl ValueArray { + let bs = vs.into_boxed_slice(); + Box::leak(bs) as *const _ as *const *const slapi_value + } ++ ++ pub fn as_ptr(&self) -> *const *const slapi_value { ++ self.data.as_ptr() as *const *const slapi_value ++ } + } + + impl FromIterator for ValueArray { +-- +2.26.3 + diff --git a/SOURCES/0004-Ticket-50933-Update-2307compat.ldif.patch b/SOURCES/0004-Ticket-50933-Update-2307compat.ldif.patch deleted file mode 100644 index 5622a1a..0000000 --- a/SOURCES/0004-Ticket-50933-Update-2307compat.ldif.patch +++ /dev/null @@ -1,179 +0,0 @@ -From 826a1bb4ea88915ac492828d1cc4a901623f7866 Mon Sep 17 00:00:00 2001 -From: William Brown -Date: Thu, 14 May 2020 14:31:47 +1000 -Subject: [PATCH 1/2] Ticket 50933 - Update 2307compat.ldif - -Bug Description: This resolves a potential conflict between 60nis.ldif -in freeipa and others with 2307compat, by removing the conflicting -definitions from 2307bis that were included. - -Fix Description: By not including these in 2307compat, this means that -sites that rely on the values provided by 2307bis may ALSO need -60nis.ldif to be present. However, these nis values seem like they are -likely very rare in reality, and this also will avoid potential -issues with freeipa. It also is the least disruptive as we don't need -to change an already defined file, and we don't have values where the name -to oid relationship changes. - -Fixes: #50933 -https://pagure.io/389-ds-base/issue/50933 - -Author: William Brown - -Review by: tbordaz (Thanks!) ---- - ldap/schema/10rfc2307compat.ldif | 66 -------------------------------- - ldap/schema/60autofs.ldif | 39 ++++++++++++------- - 2 files changed, 26 insertions(+), 79 deletions(-) - -diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif -index 8810231ac..78c588d08 100644 ---- a/ldap/schema/10rfc2307compat.ldif -+++ b/ldap/schema/10rfc2307compat.ldif -@@ -176,50 +176,6 @@ attributeTypes: ( - SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 - SINGLE-VALUE - ) --attributeTypes: ( -- 1.3.6.1.1.1.1.28 NAME 'nisPublicKey' -- DESC 'NIS public key' -- EQUALITY octetStringMatch -- SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 -- SINGLE-VALUE -- ) --attributeTypes: ( -- 1.3.6.1.1.1.1.29 NAME 'nisSecretKey' -- DESC 'NIS secret key' -- EQUALITY octetStringMatch -- SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 -- SINGLE-VALUE -- ) --attributeTypes: ( -- 1.3.6.1.1.1.1.30 NAME 'nisDomain' -- DESC 'NIS domain' -- EQUALITY caseIgnoreIA5Match -- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 -- ) --attributeTypes: ( -- 1.3.6.1.1.1.1.31 NAME 'automountMapName' -- DESC 'automount Map Name' -- EQUALITY caseExactIA5Match -- SUBSTR caseExactIA5SubstringsMatch -- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 -- SINGLE-VALUE -- ) --attributeTypes: ( -- 1.3.6.1.1.1.1.32 NAME 'automountKey' -- DESC 'Automount Key value' -- EQUALITY caseExactIA5Match -- SUBSTR caseExactIA5SubstringsMatch -- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 -- SINGLE-VALUE -- ) --attributeTypes: ( -- 1.3.6.1.1.1.1.33 NAME 'automountInformation' -- DESC 'Automount information' -- EQUALITY caseExactIA5Match -- SUBSTR caseExactIA5SubstringsMatch -- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 -- SINGLE-VALUE -- ) - # end of attribute types - beginning of objectclasses - objectClasses: ( - 1.3.6.1.1.1.2.0 NAME 'posixAccount' SUP top AUXILIARY -@@ -324,28 +280,6 @@ objectClasses: ( - seeAlso $ serialNumber' - MAY ( bootFile $ bootParameter $ cn $ description $ l $ o $ ou $ owner $ seeAlso $ serialNumber ) - ) --objectClasses: ( -- 1.3.6.1.1.1.2.14 NAME 'nisKeyObject' SUP top AUXILIARY -- DESC 'An object with a public and secret key' -- MUST ( cn $ nisPublicKey $ nisSecretKey ) -- MAY ( uidNumber $ description ) -- ) --objectClasses: ( -- 1.3.6.1.1.1.2.15 NAME 'nisDomainObject' SUP top AUXILIARY -- DESC 'Associates a NIS domain with a naming context' -- MUST nisDomain -- ) --objectClasses: ( -- 1.3.6.1.1.1.2.16 NAME 'automountMap' SUP top STRUCTURAL -- MUST ( automountMapName ) -- MAY description -- ) --objectClasses: ( -- 1.3.6.1.1.1.2.17 NAME 'automount' SUP top STRUCTURAL -- DESC 'Automount information' -- MUST ( automountKey $ automountInformation ) -- MAY description -- ) - ## namedObject is needed for groups without members - objectClasses: ( - 1.3.6.1.4.1.5322.13.1.1 NAME 'namedObject' SUP top STRUCTURAL -diff --git a/ldap/schema/60autofs.ldif b/ldap/schema/60autofs.ldif -index 084e9ec30..de3922aa2 100644 ---- a/ldap/schema/60autofs.ldif -+++ b/ldap/schema/60autofs.ldif -@@ -6,7 +6,23 @@ dn: cn=schema - ################################################################################ - # - attributeTypes: ( -- 1.3.6.1.1.1.1.33 -+ 1.3.6.1.1.1.1.31 NAME 'automountMapName' -+ DESC 'automount Map Name' -+ EQUALITY caseExactIA5Match -+ SUBSTR caseExactIA5SubstringsMatch -+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 -+ SINGLE-VALUE -+ ) -+attributeTypes: ( -+ 1.3.6.1.1.1.1.32 NAME 'automountKey' -+ DESC 'Automount Key value' -+ EQUALITY caseExactIA5Match -+ SUBSTR caseExactIA5SubstringsMatch -+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 -+ SINGLE-VALUE -+ ) -+attributeTypes: ( -+ 1.3.6.1.1.1.1.33 - NAME 'automountInformation' - DESC 'Information used by the autofs automounter' - EQUALITY caseExactIA5Match -@@ -18,25 +34,22 @@ attributeTypes: ( - ################################################################################ - # - objectClasses: ( -- 1.3.6.1.1.1.2.17 -- NAME 'automount' -- DESC 'An entry in an automounter map' -+ 1.3.6.1.1.1.2.16 -+ NAME 'automountMap' -+ DESC 'An group of related automount objects' - SUP top - STRUCTURAL -- MUST ( cn $ automountInformation ) -- MAY ( description ) -+ MAY ( ou $ automountMapName $ description ) - X-ORIGIN 'draft-howard-rfc2307bis' - ) --# --################################################################################ --# - objectClasses: ( -- 1.3.6.1.1.1.2.16 -- NAME 'automountMap' -- DESC 'An group of related automount objects' -+ 1.3.6.1.1.1.2.17 -+ NAME 'automount' -+ DESC 'An entry in an automounter map' - SUP top - STRUCTURAL -- MUST ( ou ) -+ MUST ( automountInformation ) -+ MAY ( cn $ description $ automountKey ) - X-ORIGIN 'draft-howard-rfc2307bis' - ) - # --- -2.26.2 - diff --git a/SOURCES/0005-Issue-4498-BUG-entryuuid-replication-may-not-work-45.patch b/SOURCES/0005-Issue-4498-BUG-entryuuid-replication-may-not-work-45.patch new file mode 100644 index 0000000..91de38c --- /dev/null +++ b/SOURCES/0005-Issue-4498-BUG-entryuuid-replication-may-not-work-45.patch @@ -0,0 +1,192 @@ +From b2e0a1d405d15383064e547fd15008bc136d3efe Mon Sep 17 00:00:00 2001 +From: Firstyear +Date: Thu, 17 Dec 2020 08:22:23 +1000 +Subject: [PATCH 05/12] Issue 4498 - BUG - entryuuid replication may not work + (#4503) + +Bug Description: EntryUUID can be duplicated in replication, +due to a missing check in assign_uuid + +Fix Description: Add a test case to determine how this occurs, +and add the correct check for existing entryUUID. + +fixes: https://github.com/389ds/389-ds-base/issues/4498 + +Author: William Brown + +Review by: @mreynolds389 +--- + .../tests/suites/entryuuid/replicated_test.py | 77 +++++++++++++++++++ + rpm.mk | 2 +- + src/plugins/entryuuid/src/lib.rs | 20 ++++- + src/slapi_r_plugin/src/constants.rs | 2 + + src/slapi_r_plugin/src/pblock.rs | 7 ++ + 5 files changed, 106 insertions(+), 2 deletions(-) + create mode 100644 dirsrvtests/tests/suites/entryuuid/replicated_test.py + +diff --git a/dirsrvtests/tests/suites/entryuuid/replicated_test.py b/dirsrvtests/tests/suites/entryuuid/replicated_test.py +new file mode 100644 +index 000000000..a2ebc8ff7 +--- /dev/null ++++ b/dirsrvtests/tests/suites/entryuuid/replicated_test.py +@@ -0,0 +1,77 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2020 William Brown ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++ ++import ldap ++import pytest ++import logging ++from lib389.topologies import topology_m2 as topo_m2 ++from lib389.idm.user import nsUserAccounts ++from lib389.paths import Paths ++from lib389.utils import ds_is_older ++from lib389._constants import * ++from lib389.replica import ReplicationManager ++ ++default_paths = Paths() ++ ++pytestmark = pytest.mark.tier1 ++ ++@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") ++ ++def test_entryuuid_with_replication(topo_m2): ++ """ Check that entryuuid works with replication ++ ++ :id: a5f15bf9-7f63-473a-840c-b9037b787024 ++ ++ :setup: two node mmr ++ ++ :steps: ++ 1. Create an entry on one server ++ 2. Wait for replication ++ 3. Assert it is on the second ++ ++ :expectedresults: ++ 1. Success ++ 1. Success ++ 1. Success ++ """ ++ ++ server_a = topo_m2.ms["supplier1"] ++ server_b = topo_m2.ms["supplier2"] ++ server_a.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE)) ++ server_b.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE)) ++ ++ repl = ReplicationManager(DEFAULT_SUFFIX) ++ ++ account_a = nsUserAccounts(server_a, DEFAULT_SUFFIX).create_test_user(uid=2000) ++ euuid_a = account_a.get_attr_vals_utf8('entryUUID') ++ print("🧩 %s" % euuid_a) ++ assert(euuid_a is not None) ++ assert(len(euuid_a) == 1) ++ ++ repl.wait_for_replication(server_a, server_b) ++ ++ account_b = nsUserAccounts(server_b, DEFAULT_SUFFIX).get("test_user_2000") ++ euuid_b = account_b.get_attr_vals_utf8('entryUUID') ++ print("🧩 %s" % euuid_b) ++ ++ server_a.config.loglevel(vals=(ErrorLog.DEFAULT,)) ++ server_b.config.loglevel(vals=(ErrorLog.DEFAULT,)) ++ ++ assert(euuid_b is not None) ++ assert(len(euuid_b) == 1) ++ assert(euuid_b == euuid_a) ++ ++ account_b.set("description", "update") ++ repl.wait_for_replication(server_b, server_a) ++ ++ euuid_c = account_a.get_attr_vals_utf8('entryUUID') ++ print("🧩 %s" % euuid_c) ++ assert(euuid_c is not None) ++ assert(len(euuid_c) == 1) ++ assert(euuid_c == euuid_a) ++ +diff --git a/rpm.mk b/rpm.mk +index 02f5bba37..d1cdff7df 100644 +--- a/rpm.mk ++++ b/rpm.mk +@@ -25,7 +25,7 @@ TSAN_ON = 0 + # Undefined Behaviour Sanitizer + UBSAN_ON = 0 + +-RUST_ON = 0 ++RUST_ON = 1 + + # PERL_ON is deprecated and turns on the LEGACY_ON, this for not breaking people's workflows. + PERL_ON = 1 +diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs +index 92977db05..0197c5e83 100644 +--- a/src/plugins/entryuuid/src/lib.rs ++++ b/src/plugins/entryuuid/src/lib.rs +@@ -30,6 +30,16 @@ slapi_r_search_callback_mapfn!(entryuuid, entryuuid_fixup_cb, entryuuid_fixup_ma + fn assign_uuid(e: &mut EntryRef) { + let sdn = e.get_sdnref(); + ++ // 🚧 safety barrier 🚧 ++ if e.contains_attr("entryUUID") { ++ log_error!( ++ ErrorLevel::Trace, ++ "assign_uuid -> entryUUID exists, skipping dn {}", ++ sdn.to_dn_string() ++ ); ++ return; ++ } ++ + // We could consider making these lazy static. + let config_sdn = Sdn::try_from("cn=config").expect("Invalid static dn"); + let schema_sdn = Sdn::try_from("cn=schema").expect("Invalid static dn"); +@@ -66,7 +76,15 @@ impl SlapiPlugin3 for EntryUuid { + } + + fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> { +- log_error!(ErrorLevel::Trace, "betxn_pre_add"); ++ if pb.get_is_replicated_operation() { ++ log_error!( ++ ErrorLevel::Trace, ++ "betxn_pre_add -> replicated operation, will not change" ++ ); ++ return Ok(()); ++ } ++ ++ log_error!(ErrorLevel::Trace, "betxn_pre_add -> start"); + + let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?; + assign_uuid(&mut e); +diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs +index 34845c2f4..aa0691acc 100644 +--- a/src/slapi_r_plugin/src/constants.rs ++++ b/src/slapi_r_plugin/src/constants.rs +@@ -164,6 +164,8 @@ pub(crate) enum PblockType { + AddEntry = 60, + /// SLAPI_BACKEND + Backend = 130, ++ /// SLAPI_IS_REPLICATED_OPERATION ++ IsReplicationOperation = 142, + /// SLAPI_PLUGIN_MR_NAMES + MRNames = 624, + /// SLAPI_PLUGIN_SYNTAX_NAMES +diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs +index 0f83914f3..718ff2ca7 100644 +--- a/src/slapi_r_plugin/src/pblock.rs ++++ b/src/slapi_r_plugin/src/pblock.rs +@@ -279,4 +279,11 @@ impl PblockRef { + pub fn get_op_result(&mut self) -> i32 { + self.get_value_i32(PblockType::OpResult).unwrap_or(-1) + } ++ ++ pub fn get_is_replicated_operation(&mut self) -> bool { ++ let i = self.get_value_i32(PblockType::IsReplicationOperation).unwrap_or(0); ++ // Because rust returns the result of the last evaluation, we can ++ // just return if not equal 0. ++ i != 0 ++ } + } +-- +2.26.3 + diff --git a/SOURCES/0005-Issue-50933-Fix-OID-change-between-10rfc2307-and-10r.patch b/SOURCES/0005-Issue-50933-Fix-OID-change-between-10rfc2307-and-10r.patch deleted file mode 100644 index 82fdf9d..0000000 --- a/SOURCES/0005-Issue-50933-Fix-OID-change-between-10rfc2307-and-10r.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 3d9ced9e340678cc02b1a36c2139492c95ef15a6 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Wed, 12 Aug 2020 12:46:42 -0400 -Subject: [PATCH 2/2] Issue 50933 - Fix OID change between 10rfc2307 and - 10rfc2307compat - -Bug Description: 10rfc2307compat changed the OID for nisMap objectclass to - match the standard OID, but this breaks replication with - older versions of DS. - -Fix Description: Continue to use the old(invalid?) oid for nisMap so that - replication does not break in a mixed version environment. - -Fixes: https://pagure.io/389-ds-base/issue/50933 - -Reviewed by: firstyear & tbordaz(Thanks!!) ---- - ldap/schema/10rfc2307compat.ldif | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif -index 78c588d08..8ba72e1e3 100644 ---- a/ldap/schema/10rfc2307compat.ldif -+++ b/ldap/schema/10rfc2307compat.ldif -@@ -253,7 +253,7 @@ objectClasses: ( - MAY ( nisNetgroupTriple $ memberNisNetgroup $ description ) - ) - objectClasses: ( -- 1.3.6.1.1.1.2.9 NAME 'nisMap' SUP top STRUCTURAL -+ 1.3.6.1.1.1.2.13 NAME 'nisMap' SUP top STRUCTURAL - DESC 'A generic abstraction of a NIS map' - MUST nisMapName - MAY description --- -2.26.2 - diff --git a/SOURCES/0006-Issue-4421-Unable-to-build-with-Rust-enabled-in-clos.patch b/SOURCES/0006-Issue-4421-Unable-to-build-with-Rust-enabled-in-clos.patch new file mode 100644 index 0000000..0affdf6 --- /dev/null +++ b/SOURCES/0006-Issue-4421-Unable-to-build-with-Rust-enabled-in-clos.patch @@ -0,0 +1,626 @@ +From 04c44e74503a842561b6c6e58001faf86d924b20 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 7 Dec 2020 11:00:45 -0500 +Subject: [PATCH 06/12] Issue 4421 - Unable to build with Rust enabled in + closed environment + +Description: Add Makefile flags and update rpm.mk that allow updating + and downloading all the cargo/rust dependencies. This is + needed for nightly tests and upstream/downstream releases. + +Fixes: https://github.com/389ds/389-ds-base/issues/4421 + +Reviewed by: firstyear(Thanks!) +--- + rpm.mk | 3 +- + rpm/389-ds-base.spec.in | 2 +- + src/Cargo.lock | 563 ---------------------------------------- + 3 files changed, 3 insertions(+), 565 deletions(-) + delete mode 100644 src/Cargo.lock + +diff --git a/rpm.mk b/rpm.mk +index d1cdff7df..ef810c63c 100644 +--- a/rpm.mk ++++ b/rpm.mk +@@ -44,6 +44,7 @@ update-cargo-dependencies: + cargo update --manifest-path=./src/Cargo.toml + + download-cargo-dependencies: ++ cargo update --manifest-path=./src/Cargo.toml + cargo vendor --manifest-path=./src/Cargo.toml + cargo fetch --manifest-path=./src/Cargo.toml + tar -czf vendor.tar.gz vendor +@@ -114,7 +115,7 @@ rpmbuildprep: + cp dist/sources/$(JEMALLOC_TARBALL) $(RPMBUILD)/SOURCES/ ; \ + fi + +-srpms: rpmroot srpmdistdir tarballs rpmbuildprep ++srpms: rpmroot srpmdistdir download-cargo-dependencies tarballs rpmbuildprep + rpmbuild --define "_topdir $(RPMBUILD)" -bs $(RPMBUILD)/SPECS/$(PACKAGE).spec + cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)*.src.rpm dist/srpms/ + rm -rf $(RPMBUILD) +diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in +index b9f85489b..d80de8422 100644 +--- a/rpm/389-ds-base.spec.in ++++ b/rpm/389-ds-base.spec.in +@@ -357,7 +357,7 @@ UBSAN_FLAGS="--enable-ubsan --enable-debug" + %endif + + %if %{use_rust} +-RUST_FLAGS="--enable-rust" ++RUST_FLAGS="--enable-rust --enable-rust-offline" + %endif + + %if %{use_legacy} +diff --git a/src/Cargo.lock b/src/Cargo.lock +deleted file mode 100644 +index 33d7b8f23..000000000 +--- a/src/Cargo.lock ++++ /dev/null +@@ -1,563 +0,0 @@ +-# This file is automatically @generated by Cargo. +-# It is not intended for manual editing. +-[[package]] +-name = "ansi_term" +-version = "0.11.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" +-dependencies = [ +- "winapi", +-] +- +-[[package]] +-name = "atty" +-version = "0.2.14" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +-dependencies = [ +- "hermit-abi", +- "libc", +- "winapi", +-] +- +-[[package]] +-name = "autocfg" +-version = "1.0.1" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +- +-[[package]] +-name = "base64" +-version = "0.13.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +- +-[[package]] +-name = "bitflags" +-version = "1.2.1" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +- +-[[package]] +-name = "byteorder" +-version = "1.4.3" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +- +-[[package]] +-name = "cbindgen" +-version = "0.9.1" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd" +-dependencies = [ +- "clap", +- "log", +- "proc-macro2", +- "quote", +- "serde", +- "serde_json", +- "syn", +- "tempfile", +- "toml", +-] +- +-[[package]] +-name = "cc" +-version = "1.0.67" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" +-dependencies = [ +- "jobserver", +-] +- +-[[package]] +-name = "cfg-if" +-version = "1.0.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +- +-[[package]] +-name = "clap" +-version = "2.33.3" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" +-dependencies = [ +- "ansi_term", +- "atty", +- "bitflags", +- "strsim", +- "textwrap", +- "unicode-width", +- "vec_map", +-] +- +-[[package]] +-name = "entryuuid" +-version = "0.1.0" +-dependencies = [ +- "cc", +- "libc", +- "paste", +- "slapi_r_plugin", +- "uuid", +-] +- +-[[package]] +-name = "entryuuid_syntax" +-version = "0.1.0" +-dependencies = [ +- "cc", +- "libc", +- "paste", +- "slapi_r_plugin", +- "uuid", +-] +- +-[[package]] +-name = "fernet" +-version = "0.1.4" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f" +-dependencies = [ +- "base64", +- "byteorder", +- "getrandom", +- "openssl", +- "zeroize", +-] +- +-[[package]] +-name = "foreign-types" +-version = "0.3.2" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +-dependencies = [ +- "foreign-types-shared", +-] +- +-[[package]] +-name = "foreign-types-shared" +-version = "0.1.1" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" +- +-[[package]] +-name = "getrandom" +-version = "0.2.3" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +-dependencies = [ +- "cfg-if", +- "libc", +- "wasi", +-] +- +-[[package]] +-name = "hermit-abi" +-version = "0.1.18" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" +-dependencies = [ +- "libc", +-] +- +-[[package]] +-name = "itoa" +-version = "0.4.7" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +- +-[[package]] +-name = "jobserver" +-version = "0.1.22" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd" +-dependencies = [ +- "libc", +-] +- +-[[package]] +-name = "lazy_static" +-version = "1.4.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +- +-[[package]] +-name = "libc" +-version = "0.2.94" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e" +- +-[[package]] +-name = "librnsslapd" +-version = "0.1.0" +-dependencies = [ +- "cbindgen", +- "libc", +- "slapd", +-] +- +-[[package]] +-name = "librslapd" +-version = "0.1.0" +-dependencies = [ +- "cbindgen", +- "libc", +- "slapd", +-] +- +-[[package]] +-name = "log" +-version = "0.4.14" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" +-dependencies = [ +- "cfg-if", +-] +- +-[[package]] +-name = "once_cell" +-version = "1.7.2" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" +- +-[[package]] +-name = "openssl" +-version = "0.10.34" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8" +-dependencies = [ +- "bitflags", +- "cfg-if", +- "foreign-types", +- "libc", +- "once_cell", +- "openssl-sys", +-] +- +-[[package]] +-name = "openssl-sys" +-version = "0.9.63" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98" +-dependencies = [ +- "autocfg", +- "cc", +- "libc", +- "pkg-config", +- "vcpkg", +-] +- +-[[package]] +-name = "paste" +-version = "0.1.18" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" +-dependencies = [ +- "paste-impl", +- "proc-macro-hack", +-] +- +-[[package]] +-name = "paste-impl" +-version = "0.1.18" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" +-dependencies = [ +- "proc-macro-hack", +-] +- +-[[package]] +-name = "pkg-config" +-version = "0.3.19" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" +- +-[[package]] +-name = "ppv-lite86" +-version = "0.2.10" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +- +-[[package]] +-name = "proc-macro-hack" +-version = "0.5.19" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" +- +-[[package]] +-name = "proc-macro2" +-version = "1.0.27" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" +-dependencies = [ +- "unicode-xid", +-] +- +-[[package]] +-name = "quote" +-version = "1.0.9" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +-dependencies = [ +- "proc-macro2", +-] +- +-[[package]] +-name = "rand" +-version = "0.8.3" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" +-dependencies = [ +- "libc", +- "rand_chacha", +- "rand_core", +- "rand_hc", +-] +- +-[[package]] +-name = "rand_chacha" +-version = "0.3.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +-dependencies = [ +- "ppv-lite86", +- "rand_core", +-] +- +-[[package]] +-name = "rand_core" +-version = "0.6.2" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" +-dependencies = [ +- "getrandom", +-] +- +-[[package]] +-name = "rand_hc" +-version = "0.3.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +-dependencies = [ +- "rand_core", +-] +- +-[[package]] +-name = "redox_syscall" +-version = "0.2.8" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc" +-dependencies = [ +- "bitflags", +-] +- +-[[package]] +-name = "remove_dir_all" +-version = "0.5.3" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +-dependencies = [ +- "winapi", +-] +- +-[[package]] +-name = "rsds" +-version = "0.1.0" +- +-[[package]] +-name = "ryu" +-version = "1.0.5" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +- +-[[package]] +-name = "serde" +-version = "1.0.126" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03" +-dependencies = [ +- "serde_derive", +-] +- +-[[package]] +-name = "serde_derive" +-version = "1.0.126" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43" +-dependencies = [ +- "proc-macro2", +- "quote", +- "syn", +-] +- +-[[package]] +-name = "serde_json" +-version = "1.0.64" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" +-dependencies = [ +- "itoa", +- "ryu", +- "serde", +-] +- +-[[package]] +-name = "slapd" +-version = "0.1.0" +-dependencies = [ +- "fernet", +-] +- +-[[package]] +-name = "slapi_r_plugin" +-version = "0.1.0" +-dependencies = [ +- "lazy_static", +- "libc", +- "paste", +- "uuid", +-] +- +-[[package]] +-name = "strsim" +-version = "0.8.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" +- +-[[package]] +-name = "syn" +-version = "1.0.72" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82" +-dependencies = [ +- "proc-macro2", +- "quote", +- "unicode-xid", +-] +- +-[[package]] +-name = "synstructure" +-version = "0.12.4" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +-dependencies = [ +- "proc-macro2", +- "quote", +- "syn", +- "unicode-xid", +-] +- +-[[package]] +-name = "tempfile" +-version = "3.2.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +-dependencies = [ +- "cfg-if", +- "libc", +- "rand", +- "redox_syscall", +- "remove_dir_all", +- "winapi", +-] +- +-[[package]] +-name = "textwrap" +-version = "0.11.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +-dependencies = [ +- "unicode-width", +-] +- +-[[package]] +-name = "toml" +-version = "0.5.8" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +-dependencies = [ +- "serde", +-] +- +-[[package]] +-name = "unicode-width" +-version = "0.1.8" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" +- +-[[package]] +-name = "unicode-xid" +-version = "0.2.2" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +- +-[[package]] +-name = "uuid" +-version = "0.8.2" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" +-dependencies = [ +- "getrandom", +-] +- +-[[package]] +-name = "vcpkg" +-version = "0.2.12" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "cbdbff6266a24120518560b5dc983096efb98462e51d0d68169895b237be3e5d" +- +-[[package]] +-name = "vec_map" +-version = "0.8.2" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" +- +-[[package]] +-name = "wasi" +-version = "0.10.2+wasi-snapshot-preview1" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +- +-[[package]] +-name = "winapi" +-version = "0.3.9" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +-dependencies = [ +- "winapi-i686-pc-windows-gnu", +- "winapi-x86_64-pc-windows-gnu", +-] +- +-[[package]] +-name = "winapi-i686-pc-windows-gnu" +-version = "0.4.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +- +-[[package]] +-name = "winapi-x86_64-pc-windows-gnu" +-version = "0.4.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +- +-[[package]] +-name = "zeroize" +-version = "1.3.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd" +-dependencies = [ +- "zeroize_derive", +-] +- +-[[package]] +-name = "zeroize_derive" +-version = "1.1.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1" +-dependencies = [ +- "proc-macro2", +- "quote", +- "syn", +- "synstructure", +-] +-- +2.26.3 + diff --git a/SOURCES/0006-Ticket-51131-improve-mutex-alloc-in-conntable.patch b/SOURCES/0006-Ticket-51131-improve-mutex-alloc-in-conntable.patch deleted file mode 100644 index 4269446..0000000 --- a/SOURCES/0006-Ticket-51131-improve-mutex-alloc-in-conntable.patch +++ /dev/null @@ -1,147 +0,0 @@ -From 1085823bf5586d55103cfba249fdf212e9afcb7c Mon Sep 17 00:00:00 2001 -From: William Brown -Date: Thu, 4 Jun 2020 11:51:53 +1000 -Subject: [PATCH] Ticket 51131 - improve mutex alloc in conntable - -Bug Description: We previously did delayed allocation -of mutexs, which @tbordaz noted can lead to high usage -of the pthread mutex init routines. This was done under -the conntable lock, as well as cleaning the connection - -Fix Description: rather than delayed allocation, we -initialise everything at start up instead, which means -that while startup may have a delay, at run time we have -a smaller and lighter connection allocation routine, -that is able to release the CT lock sooner. - -https://pagure.io/389-ds-base/issue/51131 - -Author: William Brown - -Review by: ??? ---- - ldap/servers/slapd/conntable.c | 86 +++++++++++++++++++--------------- - 1 file changed, 47 insertions(+), 39 deletions(-) - -diff --git a/ldap/servers/slapd/conntable.c b/ldap/servers/slapd/conntable.c -index b23dc3435..feb9c0d75 100644 ---- a/ldap/servers/slapd/conntable.c -+++ b/ldap/servers/slapd/conntable.c -@@ -138,10 +138,21 @@ connection_table_new(int table_size) - ct->conn_next_offset = 1; - ct->conn_free_offset = 1; - -+ pthread_mutexattr_t monitor_attr = {0}; -+ pthread_mutexattr_init(&monitor_attr); -+ pthread_mutexattr_settype(&monitor_attr, PTHREAD_MUTEX_RECURSIVE); -+ - /* We rely on the fact that we called calloc, which zeros the block, so we don't - * init any structure element unless a zero value is troublesome later - */ - for (i = 0; i < table_size; i++) { -+ /* -+ * Technically this is a no-op due to calloc, but we should always be -+ * careful with things like this .... -+ */ -+ ct->c[i].c_state = CONN_STATE_FREE; -+ /* Start the conn setup. */ -+ - LBER_SOCKET invalid_socket; - /* DBDB---move this out of here once everything works */ - ct->c[i].c_sb = ber_sockbuf_alloc(); -@@ -161,11 +172,20 @@ connection_table_new(int table_size) - ct->c[i].c_prev = NULL; - ct->c[i].c_ci = i; - ct->c[i].c_fdi = SLAPD_INVALID_SOCKET_INDEX; -- /* -- * Technically this is a no-op due to calloc, but we should always be -- * careful with things like this .... -- */ -- ct->c[i].c_state = CONN_STATE_FREE; -+ -+ if (pthread_mutex_init(&(ct->c[i].c_mutex), &monitor_attr) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "pthread_mutex_init failed\n"); -+ exit(1); -+ } -+ -+ ct->c[i].c_pdumutex = PR_NewLock(); -+ if (ct->c[i].c_pdumutex == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "PR_NewLock failed\n"); -+ exit(1); -+ } -+ -+ /* Ready to rock, mark as such. */ -+ ct->c[i].c_state = CONN_STATE_INIT; - /* Prepare the connection into the freelist. */ - ct->c_freelist[i] = &(ct->c[i]); - } -@@ -241,44 +261,32 @@ connection_table_get_connection(Connection_Table *ct, int sd) - /* Never use slot 0 */ - ct->conn_next_offset += 1; - } -- /* Now prep the slot for usage. */ -- PR_ASSERT(c->c_next == NULL); -- PR_ASSERT(c->c_prev == NULL); -- PR_ASSERT(c->c_extension == NULL); -- -- if (c->c_state == CONN_STATE_FREE) { -- -- c->c_state = CONN_STATE_INIT; -- -- pthread_mutexattr_t monitor_attr = {0}; -- pthread_mutexattr_init(&monitor_attr); -- pthread_mutexattr_settype(&monitor_attr, PTHREAD_MUTEX_RECURSIVE); -- if (pthread_mutex_init(&(c->c_mutex), &monitor_attr) != 0) { -- slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "pthread_mutex_init failed\n"); -- exit(1); -- } -- -- c->c_pdumutex = PR_NewLock(); -- if (c->c_pdumutex == NULL) { -- c->c_pdumutex = NULL; -- slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "PR_NewLock failed\n"); -- exit(1); -- } -- } -- /* Let's make sure there's no cruft left on there from the last time this connection was used. */ -- /* Note: no need to lock c->c_mutex because this function is only -- * called by one thread (the slapd_daemon thread), and if we got this -- * far then `c' is not being used by any operation threads, etc. -- */ -- connection_cleanup(c); -- c->c_ct = ct; /* pointer to connection table that owns this connection */ -+ PR_Unlock(ct->table_mutex); - } else { -- /* couldn't find a Connection */ -+ /* couldn't find a Connection, table must be full */ - slapi_log_err(SLAPI_LOG_CONNS, "connection_table_get_connection", "Max open connections reached\n"); -+ PR_Unlock(ct->table_mutex); -+ return NULL; - } - -- /* We could move this to before the c alloc as there is no point to remain here. */ -- PR_Unlock(ct->table_mutex); -+ /* Now prep the slot for usage. */ -+ PR_ASSERT(c != NULL); -+ PR_ASSERT(c->c_next == NULL); -+ PR_ASSERT(c->c_prev == NULL); -+ PR_ASSERT(c->c_extension == NULL); -+ PR_ASSERT(c->c_state == CONN_STATE_INIT); -+ /* Let's make sure there's no cruft left on there from the last time this connection was used. */ -+ -+ /* -+ * Note: no need to lock c->c_mutex because this function is only -+ * called by one thread (the slapd_daemon thread), and if we got this -+ * far then `c' is not being used by any operation threads, etc. The -+ * memory ordering will be provided by the work queue sending c to a -+ * thread. -+ */ -+ connection_cleanup(c); -+ /* pointer to connection table that owns this connection */ -+ c->c_ct = ct; - - return c; - } --- -2.26.2 - diff --git a/SOURCES/0007-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch b/SOURCES/0007-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch deleted file mode 100644 index 41f9315..0000000 --- a/SOURCES/0007-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch +++ /dev/null @@ -1,66 +0,0 @@ -From a9f53e9958861e6a7a827bd852d72d51a6512396 Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Wed, 25 Nov 2020 18:07:34 +0100 -Subject: [PATCH] Issue 4297 - 2nd fix for on ADD replication URP issue - internal searches with filter containing unescaped chars (#4439) - -Bug description: - Previous fix is buggy because slapi_filter_escape_filter_value returns - a escaped filter component not an escaped assertion value. - -Fix description: - use the escaped filter component - -relates: https://github.com/389ds/389-ds-base/issues/4297 - -Reviewed by: William Brown - -Platforms tested: F31 ---- - ldap/servers/plugins/replication/urp.c | 16 ++++++++-------- - 1 file changed, 8 insertions(+), 8 deletions(-) - -diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c -index f41dbc72d..ed340c9d8 100644 ---- a/ldap/servers/plugins/replication/urp.c -+++ b/ldap/servers/plugins/replication/urp.c -@@ -1411,12 +1411,12 @@ urp_add_check_tombstone (Slapi_PBlock *pb, char *sessionid, Slapi_Entry *entry, - Slapi_Entry **entries = NULL; - Slapi_PBlock *newpb; - char *basedn = slapi_entry_get_ndn(entry); -- char *escaped_basedn; -+ char *escaped_filter; - const Slapi_DN *suffix = slapi_get_suffix_by_dn(slapi_entry_get_sdn (entry)); -- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn); -+ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", basedn); - -- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn); -- slapi_ch_free((void **)&escaped_basedn); -+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter); -+ slapi_ch_free((void **)&escaped_filter); - newpb = slapi_pblock_new(); - slapi_search_internal_set_pb(newpb, - slapi_sdn_get_dn(suffix), /* Base DN */ -@@ -1605,15 +1605,15 @@ urp_find_tombstone_for_glue (Slapi_PBlock *pb, char *sessionid, const Slapi_Entr - Slapi_Entry **entries = NULL; - Slapi_PBlock *newpb; - const char *basedn = slapi_sdn_get_dn(parentdn); -- char *escaped_basedn; -- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", (char *)basedn); -+ char *escaped_filter; -+ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", (char *)basedn); - - char *conflict_csnstr = (char*)slapi_entry_attr_get_ref((Slapi_Entry *)entry, "conflictcsn"); - CSN *conflict_csn = csn_new_by_string(conflict_csnstr); - CSN *tombstone_csn = NULL; - -- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn); -- slapi_ch_free((void **)&escaped_basedn); -+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter); -+ slapi_ch_free((void **)&escaped_filter); - newpb = slapi_pblock_new(); - char *parent_dn = slapi_dn_parent (basedn); - slapi_search_internal_set_pb(newpb, --- -2.26.2 - diff --git a/SOURCES/0007-Ticket-51175-resolve-plugin-name-leaking.patch b/SOURCES/0007-Ticket-51175-resolve-plugin-name-leaking.patch new file mode 100644 index 0000000..f5edc9d --- /dev/null +++ b/SOURCES/0007-Ticket-51175-resolve-plugin-name-leaking.patch @@ -0,0 +1,412 @@ +From 279bdb5148eb0b67ddab40c4dd9d08e9e1672f13 Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Fri, 26 Jun 2020 10:27:56 +1000 +Subject: [PATCH 07/12] Ticket 51175 - resolve plugin name leaking + +Bug Description: Previously pblock.c assumed that all plugin +names were static c strings. Rust can't create static C +strings, so these were intentionally leaked. + +Fix Description: Rather than leak these, we do a dup/free +through the slapiplugin struct instead, meaning we can use +ephemeral, and properly managed strings in rust. This does not +affect any other existing code which will still handle the +static strings correctly. + +https://pagure.io/389-ds-base/issue/51175 + +Author: William Brown + +Review by: mreynolds, tbordaz (Thanks!) +--- + Makefile.am | 1 + + configure.ac | 2 +- + ldap/servers/slapd/pagedresults.c | 6 +-- + ldap/servers/slapd/pblock.c | 9 ++-- + ldap/servers/slapd/plugin.c | 7 +++ + ldap/servers/slapd/pw_verify.c | 1 + + ldap/servers/slapd/tools/pwenc.c | 2 +- + src/slapi_r_plugin/README.md | 6 +-- + src/slapi_r_plugin/src/charray.rs | 32 ++++++++++++++ + src/slapi_r_plugin/src/lib.rs | 8 ++-- + src/slapi_r_plugin/src/macros.rs | 17 +++++--- + src/slapi_r_plugin/src/syntax_plugin.rs | 57 +++++++------------------ + 12 files changed, 85 insertions(+), 63 deletions(-) + create mode 100644 src/slapi_r_plugin/src/charray.rs + +diff --git a/Makefile.am b/Makefile.am +index 627953850..36434cf17 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -1312,6 +1312,7 @@ rust-nsslapd-private.h: @abs_top_builddir@/rs/@rust_target_dir@/librnsslapd.a + libslapi_r_plugin_SOURCES = \ + src/slapi_r_plugin/src/backend.rs \ + src/slapi_r_plugin/src/ber.rs \ ++ src/slapi_r_plugin/src/charray.rs \ + src/slapi_r_plugin/src/constants.rs \ + src/slapi_r_plugin/src/dn.rs \ + src/slapi_r_plugin/src/entry.rs \ +diff --git a/configure.ac b/configure.ac +index b3cf77d08..61bf35e4a 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -122,7 +122,7 @@ if test "$enable_debug" = yes ; then + debug_defs="-DDEBUG -DMCC_DEBUG" + debug_cflags="-g3 -O0 -rdynamic" + debug_cxxflags="-g3 -O0 -rdynamic" +- debug_rust_defs="-C debuginfo=2" ++ debug_rust_defs="-C debuginfo=2 -Z macro-backtrace" + cargo_defs="" + rust_target_dir="debug" + else +diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c +index d8b8798b6..e3444e944 100644 +--- a/ldap/servers/slapd/pagedresults.c ++++ b/ldap/servers/slapd/pagedresults.c +@@ -738,10 +738,10 @@ pagedresults_cleanup(Connection *conn, int needlock) + int i; + PagedResults *prp = NULL; + +- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "=>\n"); ++ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "=>\n"); */ + + if (NULL == conn) { +- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= Connection is NULL\n"); ++ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= Connection is NULL\n"); */ + return 0; + } + +@@ -767,7 +767,7 @@ pagedresults_cleanup(Connection *conn, int needlock) + if (needlock) { + pthread_mutex_unlock(&(conn->c_mutex)); + } +- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= %d\n", rc); ++ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= %d\n", rc); */ + return rc; + } + +diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c +index 1ad9d0399..f7d1f8885 100644 +--- a/ldap/servers/slapd/pblock.c ++++ b/ldap/servers/slapd/pblock.c +@@ -3351,13 +3351,15 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value) + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) { + return (-1); + } +- pblock->pb_plugin->plg_syntax_names = (char **)value; ++ PR_ASSERT(pblock->pb_plugin->plg_syntax_names == NULL); ++ pblock->pb_plugin->plg_syntax_names = slapi_ch_array_dup((char **)value); + break; + case SLAPI_PLUGIN_SYNTAX_OID: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) { + return (-1); + } +- pblock->pb_plugin->plg_syntax_oid = (char *)value; ++ PR_ASSERT(pblock->pb_plugin->plg_syntax_oid == NULL); ++ pblock->pb_plugin->plg_syntax_oid = slapi_ch_strdup((char *)value); + break; + case SLAPI_PLUGIN_SYNTAX_FLAGS: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) { +@@ -3806,7 +3808,8 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value) + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) { + return (-1); + } +- pblock->pb_plugin->plg_mr_names = (char **)value; ++ PR_ASSERT(pblock->pb_plugin->plg_mr_names == NULL); ++ pblock->pb_plugin->plg_mr_names = slapi_ch_array_dup((char **)value); + break; + case SLAPI_PLUGIN_MR_COMPARE: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) { +diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c +index 282b98738..e6b48de60 100644 +--- a/ldap/servers/slapd/plugin.c ++++ b/ldap/servers/slapd/plugin.c +@@ -2694,6 +2694,13 @@ plugin_free(struct slapdplugin *plugin) + if (plugin->plg_type == SLAPI_PLUGIN_PWD_STORAGE_SCHEME || plugin->plg_type == SLAPI_PLUGIN_REVER_PWD_STORAGE_SCHEME) { + slapi_ch_free_string(&plugin->plg_pwdstorageschemename); + } ++ if (plugin->plg_type == SLAPI_PLUGIN_SYNTAX) { ++ slapi_ch_free_string(&plugin->plg_syntax_oid); ++ slapi_ch_array_free(plugin->plg_syntax_names); ++ } ++ if (plugin->plg_type == SLAPI_PLUGIN_MATCHINGRULE) { ++ slapi_ch_array_free(plugin->plg_mr_names); ++ } + release_componentid(plugin->plg_identity); + slapi_counter_destroy(&plugin->plg_op_counter); + if (!plugin->plg_group) { +diff --git a/ldap/servers/slapd/pw_verify.c b/ldap/servers/slapd/pw_verify.c +index 4f0944b73..4ff1fa2fd 100644 +--- a/ldap/servers/slapd/pw_verify.c ++++ b/ldap/servers/slapd/pw_verify.c +@@ -111,6 +111,7 @@ pw_verify_token_dn(Slapi_PBlock *pb) { + if (fernet_verify_token(dn, cred->bv_val, key, tok_ttl) != 0) { + rc = SLAPI_BIND_SUCCESS; + } ++ slapi_ch_free_string(&key); + #endif + return rc; + } +diff --git a/ldap/servers/slapd/tools/pwenc.c b/ldap/servers/slapd/tools/pwenc.c +index 1629c06cd..d89225e34 100644 +--- a/ldap/servers/slapd/tools/pwenc.c ++++ b/ldap/servers/slapd/tools/pwenc.c +@@ -34,7 +34,7 @@ + + int ldap_syslog; + int ldap_syslog_level; +-int slapd_ldap_debug = LDAP_DEBUG_ANY; ++/* int slapd_ldap_debug = LDAP_DEBUG_ANY; */ + int detached; + FILE *error_logfp; + FILE *access_logfp; +diff --git a/src/slapi_r_plugin/README.md b/src/slapi_r_plugin/README.md +index af9743ec9..1c9bcbf17 100644 +--- a/src/slapi_r_plugin/README.md ++++ b/src/slapi_r_plugin/README.md +@@ -15,7 +15,7 @@ the [Rust Nomicon](https://doc.rust-lang.org/nomicon/index.html) + > warning about danger. + + This document will not detail the specifics of unsafe or the invariants you must adhere to for rust +-to work with C. ++to work with C. Failure to uphold these invariants will lead to less than optimal consequences. + + If you still want to see more about the plugin bindings, go on ... + +@@ -135,7 +135,7 @@ associated functions. + Now, you may notice that not all members of the trait are implemented. This is due to a feature + of rust known as default trait impls. This allows the trait origin (src/plugin.rs) to provide + template versions of these functions. If you "overwrite" them, your implementation is used. Unlike +-OO, you may not inherit or call the default function. ++OO, you may not inherit or call the default function. + + If a default is not provided you *must* implement that function to be considered valid. Today (20200422) + this only applies to `start` and `close`. +@@ -183,7 +183,7 @@ It's important to understand how Rust manages memory both on the stack and the h + As a result, this means that we must express in code, assertions about the proper ownership of memory + and who is responsible for it (unlike C, where it can be hard to determine who or what is responsible + for freeing some value.) Failure to handle this correctly, can and will lead to crashes, leaks or +-*hand waving* magical failures that are eXtReMeLy FuN to debug. ++*hand waving* magical failures that are `eXtReMeLy FuN` to debug. + + ### Reference Types + +diff --git a/src/slapi_r_plugin/src/charray.rs b/src/slapi_r_plugin/src/charray.rs +new file mode 100644 +index 000000000..d2e44693c +--- /dev/null ++++ b/src/slapi_r_plugin/src/charray.rs +@@ -0,0 +1,32 @@ ++use std::ffi::CString; ++use std::iter::once; ++use std::os::raw::c_char; ++use std::ptr; ++ ++pub struct Charray { ++ pin: Vec, ++ charray: Vec<*const c_char>, ++} ++ ++impl Charray { ++ pub fn new(input: &[&str]) -> Result { ++ let pin: Result, ()> = input ++ .iter() ++ .map(|s| CString::new(*s).map_err(|_e| ())) ++ .collect(); ++ ++ let pin = pin?; ++ ++ let charray: Vec<_> = pin ++ .iter() ++ .map(|s| s.as_ptr()) ++ .chain(once(ptr::null())) ++ .collect(); ++ ++ Ok(Charray { pin, charray }) ++ } ++ ++ pub fn as_ptr(&self) -> *const *const c_char { ++ self.charray.as_ptr() ++ } ++} +diff --git a/src/slapi_r_plugin/src/lib.rs b/src/slapi_r_plugin/src/lib.rs +index 076907bae..be28cac95 100644 +--- a/src/slapi_r_plugin/src/lib.rs ++++ b/src/slapi_r_plugin/src/lib.rs +@@ -1,9 +1,11 @@ +-// extern crate lazy_static; ++#[macro_use] ++extern crate lazy_static; + + #[macro_use] + pub mod macros; + pub mod backend; + pub mod ber; ++pub mod charray; + mod constants; + pub mod dn; + pub mod entry; +@@ -20,6 +22,7 @@ pub mod value; + pub mod prelude { + pub use crate::backend::{BackendRef, BackendRefTxn}; + pub use crate::ber::BerValRef; ++ pub use crate::charray::Charray; + pub use crate::constants::{FilterType, PluginFnType, PluginType, PluginVersion, LDAP_SUCCESS}; + pub use crate::dn::{Sdn, SdnRef}; + pub use crate::entry::EntryRef; +@@ -30,8 +33,7 @@ pub mod prelude { + pub use crate::plugin::{register_plugin_ext, PluginIdRef, SlapiPlugin3}; + pub use crate::search::{Search, SearchScope}; + pub use crate::syntax_plugin::{ +- matchingrule_register, name_to_leaking_char, names_to_leaking_char_array, SlapiOrdMr, +- SlapiSubMr, SlapiSyntaxPlugin1, ++ matchingrule_register, SlapiOrdMr, SlapiSubMr, SlapiSyntaxPlugin1, + }; + pub use crate::task::{task_register_handler_fn, task_unregister_handler_fn, Task, TaskRef}; + pub use crate::value::{Value, ValueArray, ValueArrayRef, ValueRef}; +diff --git a/src/slapi_r_plugin/src/macros.rs b/src/slapi_r_plugin/src/macros.rs +index bc8dfa60f..97fc5d7ef 100644 +--- a/src/slapi_r_plugin/src/macros.rs ++++ b/src/slapi_r_plugin/src/macros.rs +@@ -249,6 +249,7 @@ macro_rules! slapi_r_syntax_plugin_hooks { + paste::item! { + use libc; + use std::convert::TryFrom; ++ use std::ffi::CString; + + #[no_mangle] + pub extern "C" fn [<$mod_ident _plugin_init>](raw_pb: *const libc::c_void) -> i32 { +@@ -261,15 +262,15 @@ macro_rules! slapi_r_syntax_plugin_hooks { + }; + + // Setup the names/oids that this plugin provides syntaxes for. +- +- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::attr_supported_names()) }; +- match pb.register_syntax_names(name_ptr) { ++ // DS will clone these, so they can be ephemeral to this function. ++ let name_vec = Charray::new($hooks_ident::attr_supported_names().as_slice()).expect("invalid supported names"); ++ match pb.register_syntax_names(name_vec.as_ptr()) { + 0 => {}, + e => return e, + }; + +- let name_ptr = unsafe { name_to_leaking_char($hooks_ident::attr_oid()) }; +- match pb.register_syntax_oid(name_ptr) { ++ let attr_oid = CString::new($hooks_ident::attr_oid()).expect("invalid attr oid"); ++ match pb.register_syntax_oid(attr_oid.as_ptr()) { + 0 => {}, + e => return e, + }; +@@ -430,7 +431,8 @@ macro_rules! slapi_r_syntax_plugin_hooks { + e => return e, + }; + +- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::eq_mr_supported_names()) }; ++ let name_vec = Charray::new($hooks_ident::eq_mr_supported_names().as_slice()).expect("invalid mr supported names"); ++ let name_ptr = name_vec.as_ptr(); + // SLAPI_PLUGIN_MR_NAMES + match pb.register_mr_names(name_ptr) { + 0 => {}, +@@ -672,7 +674,8 @@ macro_rules! slapi_r_syntax_plugin_hooks { + e => return e, + }; + +- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::ord_mr_supported_names()) }; ++ let name_vec = Charray::new($hooks_ident::ord_mr_supported_names().as_slice()).expect("invalid ord supported names"); ++ let name_ptr = name_vec.as_ptr(); + // SLAPI_PLUGIN_MR_NAMES + match pb.register_mr_names(name_ptr) { + 0 => {}, +diff --git a/src/slapi_r_plugin/src/syntax_plugin.rs b/src/slapi_r_plugin/src/syntax_plugin.rs +index e7d5c01bd..86f84bdd8 100644 +--- a/src/slapi_r_plugin/src/syntax_plugin.rs ++++ b/src/slapi_r_plugin/src/syntax_plugin.rs +@@ -1,11 +1,11 @@ + use crate::ber::BerValRef; + // use crate::constants::FilterType; ++use crate::charray::Charray; + use crate::error::PluginError; + use crate::pblock::PblockRef; + use crate::value::{ValueArray, ValueArrayRef}; + use std::cmp::Ordering; + use std::ffi::CString; +-use std::iter::once; + use std::os::raw::c_char; + use std::ptr; + +@@ -26,37 +26,6 @@ struct slapi_matchingRuleEntry { + mr_compat_syntax: *const *const c_char, + } + +-pub unsafe fn name_to_leaking_char(name: &str) -> *const c_char { +- let n = CString::new(name) +- .expect("An invalid string has been hardcoded!") +- .into_boxed_c_str(); +- let n_ptr = n.as_ptr(); +- // Now we intentionally leak the name here, and the pointer will remain valid. +- Box::leak(n); +- n_ptr +-} +- +-pub unsafe fn names_to_leaking_char_array(names: &[&str]) -> *const *const c_char { +- let n_arr: Vec = names +- .iter() +- .map(|s| CString::new(*s).expect("An invalid string has been hardcoded!")) +- .collect(); +- let n_arr = n_arr.into_boxed_slice(); +- let n_ptr_arr: Vec<*const c_char> = n_arr +- .iter() +- .map(|v| v.as_ptr()) +- .chain(once(ptr::null())) +- .collect(); +- let n_ptr_arr = n_ptr_arr.into_boxed_slice(); +- +- // Now we intentionally leak these names here, +- let _r_n_arr = Box::leak(n_arr); +- let r_n_ptr_arr = Box::leak(n_ptr_arr); +- +- let name_ptr = r_n_ptr_arr as *const _ as *const *const c_char; +- name_ptr +-} +- + // oid - the oid of the matching rule + // name - the name of the mr + // desc - description +@@ -69,20 +38,24 @@ pub unsafe fn matchingrule_register( + syntax: &str, + compat_syntax: &[&str], + ) -> i32 { +- let oid_ptr = name_to_leaking_char(oid); +- let name_ptr = name_to_leaking_char(name); +- let desc_ptr = name_to_leaking_char(desc); +- let syntax_ptr = name_to_leaking_char(syntax); +- let compat_syntax_ptr = names_to_leaking_char_array(compat_syntax); ++ // Make everything CStrings that live long enough. ++ ++ let oid_cs = CString::new(oid).expect("invalid oid"); ++ let name_cs = CString::new(name).expect("invalid name"); ++ let desc_cs = CString::new(desc).expect("invalid desc"); ++ let syntax_cs = CString::new(syntax).expect("invalid syntax"); ++ ++ // We have to do this so the cstrings live long enough. ++ let compat_syntax_ca = Charray::new(compat_syntax).expect("invalid compat_syntax"); + + let new_mr = slapi_matchingRuleEntry { +- mr_oid: oid_ptr, ++ mr_oid: oid_cs.as_ptr(), + _mr_oidalias: ptr::null(), +- mr_name: name_ptr, +- mr_desc: desc_ptr, +- mr_syntax: syntax_ptr, ++ mr_name: name_cs.as_ptr(), ++ mr_desc: desc_cs.as_ptr(), ++ mr_syntax: syntax_cs.as_ptr(), + _mr_obsolete: 0, +- mr_compat_syntax: compat_syntax_ptr, ++ mr_compat_syntax: compat_syntax_ca.as_ptr(), + }; + + let new_mr_ptr = &new_mr as *const _; +-- +2.26.3 + diff --git a/SOURCES/0008-Issue-3657-Add-options-to-dsctl-for-dsrc-file.patch b/SOURCES/0008-Issue-3657-Add-options-to-dsctl-for-dsrc-file.patch deleted file mode 100644 index 9bca531..0000000 --- a/SOURCES/0008-Issue-3657-Add-options-to-dsctl-for-dsrc-file.patch +++ /dev/null @@ -1,502 +0,0 @@ -From 4faec52810e12070ef72da347bb590c57d8761e4 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Fri, 20 Nov 2020 17:47:18 -0500 -Subject: [PATCH 1/2] Issue 3657 - Add options to dsctl for dsrc file - -Description: Add options to create, modify, delete, and display - the .dsrc CLI tool shortcut file. - -Relates: https://github.com/389ds/389-ds-base/issues/3657 - -Reviewed by: firstyear(Thanks!) ---- - dirsrvtests/tests/suites/clu/dsrc_test.py | 136 ++++++++++ - src/lib389/cli/dsctl | 2 + - src/lib389/lib389/cli_ctl/dsrc.py | 312 ++++++++++++++++++++++ - 3 files changed, 450 insertions(+) - create mode 100644 dirsrvtests/tests/suites/clu/dsrc_test.py - create mode 100644 src/lib389/lib389/cli_ctl/dsrc.py - -diff --git a/dirsrvtests/tests/suites/clu/dsrc_test.py b/dirsrvtests/tests/suites/clu/dsrc_test.py -new file mode 100644 -index 000000000..1b27700ec ---- /dev/null -+++ b/dirsrvtests/tests/suites/clu/dsrc_test.py -@@ -0,0 +1,136 @@ -+import logging -+import pytest -+import os -+from os.path import expanduser -+from lib389.cli_base import FakeArgs -+from lib389.cli_ctl.dsrc import create_dsrc, modify_dsrc, delete_dsrc, display_dsrc -+from lib389._constants import DEFAULT_SUFFIX, DN_DM -+from lib389.topologies import topology_st as topo -+ -+log = logging.getLogger(__name__) -+ -+ -+@pytest.fixture(scope="function") -+def setup(topo, request): -+ """Preserve any existing .dsrc file""" -+ -+ dsrc_file = f'{expanduser("~")}/.dsrc' -+ backup_file = dsrc_file + ".original" -+ if os.path.exists(dsrc_file): -+ os.rename(dsrc_file, backup_file) -+ -+ def fin(): -+ if os.path.exists(backup_file): -+ os.rename(backup_file, dsrc_file) -+ -+ request.addfinalizer(fin) -+ -+ -+def test_dsrc(topo, setup): -+ """Test "dsctl dsrc" command -+ -+ :id: 0610de6c-e167-4761-bdab-3e677b2d44bb -+ :setup: Standalone Instance -+ :steps: -+ 1. Test creation works -+ 2. Test creating duplicate section -+ 3. Test adding an additional inst config works -+ 4. Test removing an instance works -+ 5. Test modify works -+ 6. Test delete works -+ 7. Test display fails when no file is present -+ -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ 5. Success -+ 6. Success -+ 7. Success -+ """ -+ -+ inst = topo.standalone -+ serverid = inst.serverid -+ second_inst_name = "Second" -+ second_inst_basedn = "o=second" -+ different_suffix = "o=different" -+ -+ # Setup our args -+ args = FakeArgs() -+ args.basedn = DEFAULT_SUFFIX -+ args.binddn = DN_DM -+ args.json = None -+ args.uri = None -+ args.saslmech = None -+ args.tls_cacertdir = None -+ args.tls_cert = None -+ args.tls_key = None -+ args.tls_reqcert = None -+ args.starttls = None -+ args.cancel_starttls = None -+ args.pwdfile = None -+ args.do_it = True -+ -+ # Create a dsrc configuration entry -+ create_dsrc(inst, log, args) -+ display_dsrc(inst, topo.logcap.log, args) -+ assert topo.logcap.contains("basedn = " + args.basedn) -+ assert topo.logcap.contains("binddn = " + args.binddn) -+ assert topo.logcap.contains("[" + serverid + "]") -+ topo.logcap.flush() -+ -+ # Attempt to add duplicate instance section -+ with pytest.raises(ValueError): -+ create_dsrc(inst, log, args) -+ -+ # Test adding a second instance works correctly -+ inst.serverid = second_inst_name -+ args.basedn = second_inst_basedn -+ create_dsrc(inst, log, args) -+ display_dsrc(inst, topo.logcap.log, args) -+ assert topo.logcap.contains("basedn = " + args.basedn) -+ assert topo.logcap.contains("[" + second_inst_name + "]") -+ topo.logcap.flush() -+ -+ # Delete second instance -+ delete_dsrc(inst, log, args) -+ inst.serverid = serverid # Restore original instance name -+ display_dsrc(inst, topo.logcap.log, args) -+ assert not topo.logcap.contains("[" + second_inst_name + "]") -+ assert not topo.logcap.contains("basedn = " + args.basedn) -+ # Make sure first instance config is still present -+ assert topo.logcap.contains("[" + serverid + "]") -+ assert topo.logcap.contains("binddn = " + args.binddn) -+ topo.logcap.flush() -+ -+ # Modify the config -+ args.basedn = different_suffix -+ modify_dsrc(inst, log, args) -+ display_dsrc(inst, topo.logcap.log, args) -+ assert topo.logcap.contains(different_suffix) -+ topo.logcap.flush() -+ -+ # Remove an arg from the config -+ args.basedn = "" -+ modify_dsrc(inst, log, args) -+ display_dsrc(inst, topo.logcap.log, args) -+ assert not topo.logcap.contains(different_suffix) -+ topo.logcap.flush() -+ -+ # Remove the last entry, which should delete the file -+ delete_dsrc(inst, log, args) -+ dsrc_file = f'{expanduser("~")}/.dsrc' -+ assert not os.path.exists(dsrc_file) -+ -+ # Make sure display fails -+ with pytest.raises(ValueError): -+ display_dsrc(inst, log, args) -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main(["-s", CURRENT_FILE]) -+ -diff --git a/src/lib389/cli/dsctl b/src/lib389/cli/dsctl -index fe9bc10e9..69f069297 100755 ---- a/src/lib389/cli/dsctl -+++ b/src/lib389/cli/dsctl -@@ -23,6 +23,7 @@ from lib389.cli_ctl import tls as cli_tls - from lib389.cli_ctl import health as cli_health - from lib389.cli_ctl import nsstate as cli_nsstate - from lib389.cli_ctl import dbgen as cli_dbgen -+from lib389.cli_ctl import dsrc as cli_dsrc - from lib389.cli_ctl.instance import instance_remove_all - from lib389.cli_base import ( - disconnect_instance, -@@ -61,6 +62,7 @@ cli_tls.create_parser(subparsers) - cli_health.create_parser(subparsers) - cli_nsstate.create_parser(subparsers) - cli_dbgen.create_parser(subparsers) -+cli_dsrc.create_parser(subparsers) - - argcomplete.autocomplete(parser) - -diff --git a/src/lib389/lib389/cli_ctl/dsrc.py b/src/lib389/lib389/cli_ctl/dsrc.py -new file mode 100644 -index 000000000..e49c7f819 ---- /dev/null -+++ b/src/lib389/lib389/cli_ctl/dsrc.py -@@ -0,0 +1,312 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2020 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+ -+import json -+from os.path import expanduser -+from os import path, remove -+from ldapurl import isLDAPUrl -+from ldap.dn import is_dn -+import configparser -+ -+ -+def create_dsrc(inst, log, args): -+ """Create the .dsrc file -+ -+ [instance] -+ uri = ldaps://hostname:port -+ basedn = dc=example,dc=com -+ binddn = uid=user,.... -+ saslmech = [EXTERNAL|PLAIN] -+ tls_cacertdir = /path/to/cacertdir -+ tls_cert = /path/to/user.crt -+ tls_key = /path/to/user.key -+ tls_reqcert = [never, hard, allow] -+ starttls = [true, false] -+ pwdfile = /path/to/file -+ """ -+ -+ dsrc_file = f'{expanduser("~")}/.dsrc' -+ config = configparser.ConfigParser() -+ config.read(dsrc_file) -+ -+ # Verify this section does not already exist -+ instances = config.sections() -+ if inst.serverid in instances: -+ raise ValueError("There is already a configuration section for this instance!") -+ -+ # Process and validate the args -+ config[inst.serverid] = {} -+ -+ if args.uri is not None: -+ if not isLDAPUrl(args.uri): -+ raise ValueError("The uri is not a valid LDAP URL!") -+ if args.uri.startswith("ldapi"): -+ # We must use EXTERNAL saslmech for LDAPI -+ args.saslmech = "EXTERNAL" -+ config[inst.serverid]['uri'] = args.uri -+ if args.basedn is not None: -+ if not is_dn(args.basedn): -+ raise ValueError("The basedn is not a valid DN!") -+ config[inst.serverid]['basedn'] = args.basedn -+ if args.binddn is not None: -+ if not is_dn(args.binddn): -+ raise ValueError("The binddn is not a valid DN!") -+ config[inst.serverid]['binddn'] = args.binddn -+ if args.saslmech is not None: -+ if args.saslmech not in ['EXTERNAL', 'PLAIN']: -+ raise ValueError("The saslmech must be EXTERNAL or PLAIN!") -+ config[inst.serverid]['saslmech'] = args.saslmech -+ if args.tls_cacertdir is not None: -+ if not path.exists(args.tls_cacertdir): -+ raise ValueError('--tls-cacertdir directory does not exist!') -+ config[inst.serverid]['tls_cacertdir'] = args.tls_cacertdir -+ if args.tls_cert is not None: -+ if not path.exists(args.tls_cert): -+ raise ValueError('--tls-cert does not point to an existing file!') -+ config[inst.serverid]['tls_cert'] = args.tls_cert -+ if args.tls_key is not None: -+ if not path.exists(args.tls_key): -+ raise ValueError('--tls-key does not point to an existing file!') -+ config[inst.serverid]['tls_key'] = args.tls_key -+ if args.tls_reqcert is not None: -+ if args.tls_reqcert not in ['never', 'hard', 'allow']: -+ raise ValueError('--tls-reqcert value is invalid (must be either "never", "allow", or "hard")!') -+ config[inst.serverid]['tls_reqcert'] = args.tls_reqcert -+ if args.starttls: -+ config[inst.serverid]['starttls'] = 'true' -+ if args.pwdfile is not None: -+ if not path.exists(args.pwdfile): -+ raise ValueError('--pwdfile does not exist!') -+ config[inst.serverid]['pwdfile'] = args.pwdfile -+ -+ if len(config[inst.serverid]) == 0: -+ # No args set -+ raise ValueError("You must set at least one argument for the new dsrc file!") -+ -+ # Print a preview of the config -+ log.info(f'Updating "{dsrc_file}" with:\n') -+ log.info(f' [{inst.serverid}]') -+ for k, v in config[inst.serverid].items(): -+ log.info(f' {k} = {v}') -+ -+ # Perform confirmation? -+ if not args.do_it: -+ while 1: -+ val = input(f'\nUpdate "{dsrc_file}" ? [yes]: ').rstrip().lower() -+ if val == '' or val == 'y' or val == 'yes': -+ break -+ if val == 'n' or val == 'no': -+ return -+ -+ # Now write the file -+ with open(dsrc_file, 'w') as configfile: -+ config.write(configfile) -+ -+ log.info(f'Successfully updated: {dsrc_file}') -+ -+ -+def modify_dsrc(inst, log, args): -+ """Modify the instance config -+ """ -+ dsrc_file = f'{expanduser("~")}/.dsrc' -+ -+ if path.exists(dsrc_file): -+ config = configparser.ConfigParser() -+ config.read(dsrc_file) -+ -+ # Verify we have a section to modify -+ instances = config.sections() -+ if inst.serverid not in instances: -+ raise ValueError("There is no configuration section for this instance to modify!") -+ -+ # Process and validate the args -+ if args.uri is not None: -+ if not isLDAPUrl(args.uri): -+ raise ValueError("The uri is not a valid LDAP URL!") -+ if args.uri.startswith("ldapi"): -+ # We must use EXTERNAL saslmech for LDAPI -+ args.saslmech = "EXTERNAL" -+ if args.uri == '': -+ del config[inst.serverid]['uri'] -+ else: -+ config[inst.serverid]['uri'] = args.uri -+ if args.basedn is not None: -+ if not is_dn(args.basedn): -+ raise ValueError("The basedn is not a valid DN!") -+ if args.basedn == '': -+ del config[inst.serverid]['basedn'] -+ else: -+ config[inst.serverid]['basedn'] = args.basedn -+ if args.binddn is not None: -+ if not is_dn(args.binddn): -+ raise ValueError("The binddn is not a valid DN!") -+ if args.binddn == '': -+ del config[inst.serverid]['binddn'] -+ else: -+ config[inst.serverid]['binddn'] = args.binddn -+ if args.saslmech is not None: -+ if args.saslmech not in ['EXTERNAL', 'PLAIN']: -+ raise ValueError("The saslmech must be EXTERNAL or PLAIN!") -+ if args.saslmech == '': -+ del config[inst.serverid]['saslmech'] -+ else: -+ config[inst.serverid]['saslmech'] = args.saslmech -+ if args.tls_cacertdir is not None: -+ if not path.exists(args.tls_cacertdir): -+ raise ValueError('--tls-cacertdir directory does not exist!') -+ if args.tls_cacertdir == '': -+ del config[inst.serverid]['tls_cacertdir'] -+ else: -+ config[inst.serverid]['tls_cacertdir'] = args.tls_cacertdir -+ if args.tls_cert is not None: -+ if not path.exists(args.tls_cert): -+ raise ValueError('--tls-cert does not point to an existing file!') -+ if args.tls_cert == '': -+ del config[inst.serverid]['tls_cert'] -+ else: -+ config[inst.serverid]['tls_cert'] = args.tls_cert -+ if args.tls_key is not None: -+ if not path.exists(args.tls_key): -+ raise ValueError('--tls-key does not point to an existing file!') -+ if args.tls_key == '': -+ del config[inst.serverid]['tls_key'] -+ else: -+ config[inst.serverid]['tls_key'] = args.tls_key -+ if args.tls_reqcert is not None: -+ if args.tls_reqcert not in ['never', 'hard', 'allow']: -+ raise ValueError('--tls-reqcert value is invalid (must be either "never", "allow", or "hard")!') -+ if args.tls_reqcert == '': -+ del config[inst.serverid]['tls_reqcert'] -+ else: -+ config[inst.serverid]['tls_reqcert'] = args.tls_reqcert -+ if args.starttls: -+ config[inst.serverid]['starttls'] = 'true' -+ if args.cancel_starttls: -+ config[inst.serverid]['starttls'] = 'false' -+ if args.pwdfile is not None: -+ if not path.exists(args.pwdfile): -+ raise ValueError('--pwdfile does not exist!') -+ if args.pwdfile == '': -+ del config[inst.serverid]['pwdfile'] -+ else: -+ config[inst.serverid]['pwdfile'] = args.pwdfile -+ -+ # Okay now rewrite the file -+ with open(dsrc_file, 'w') as configfile: -+ config.write(configfile) -+ -+ log.info(f'Successfully updated: {dsrc_file}') -+ else: -+ raise ValueError(f'There is no .dsrc file "{dsrc_file}" to update!') -+ -+ -+def delete_dsrc(inst, log, args): -+ """Delete the .dsrc file -+ """ -+ dsrc_file = f'{expanduser("~")}/.dsrc' -+ if path.exists(dsrc_file): -+ if not args.do_it: -+ # Get confirmation -+ while 1: -+ val = input(f'\nAre you sure you want to remove this instances configuration ? [no]: ').rstrip().lower() -+ if val == 'y' or val == 'yes': -+ break -+ if val == '' or val == 'n' or val == 'no': -+ return -+ -+ config = configparser.ConfigParser() -+ config.read(dsrc_file) -+ instances = config.sections() -+ if inst.serverid not in instances: -+ raise ValueError("The is no configuration for this instance") -+ -+ # Update the config object -+ del config[inst.serverid] -+ -+ if len(config.sections()) == 0: -+ # The file would be empty so just delete it -+ try: -+ remove(dsrc_file) -+ log.info(f'Successfully removed: {dsrc_file}') -+ return -+ except OSError as e: -+ raise ValueError(f'Failed to delete "{dsrc_file}", error: {str(e)}') -+ else: -+ # write the updated config -+ with open(dsrc_file, 'w') as configfile: -+ config.write(configfile) -+ else: -+ raise ValueError(f'There is no .dsrc file "{dsrc_file}" to update!') -+ -+ log.info(f'Successfully updated: {dsrc_file}') -+ -+def display_dsrc(inst, log, args): -+ """Display the contents of the ~/.dsrc file -+ """ -+ dsrc_file = f'{expanduser("~")}/.dsrc' -+ -+ if not path.exists(dsrc_file): -+ raise ValueError(f'There is no dsrc file "{dsrc_file}" to display!') -+ -+ config = configparser.ConfigParser() -+ config.read(dsrc_file) -+ instances = config.sections() -+ -+ for inst_section in instances: -+ if args.json: -+ log.info(json.dumps({inst_section: dict(config[inst_section])}, indent=4)) -+ else: -+ log.info(f'[{inst_section}]') -+ for k, v in config[inst_section].items(): -+ log.info(f'{k} = {v}') -+ log.info("") -+ -+ -+def create_parser(subparsers): -+ dsrc_parser = subparsers.add_parser('dsrc', help="Manage the .dsrc file") -+ subcommands = dsrc_parser.add_subparsers(help="action") -+ -+ # Create .dsrc file -+ dsrc_create_parser = subcommands.add_parser('create', help='Generate the .dsrc file') -+ dsrc_create_parser.set_defaults(func=create_dsrc) -+ dsrc_create_parser.add_argument('--uri', help="The URI (LDAP URL) for the Directory Server instance.") -+ dsrc_create_parser.add_argument('--basedn', help="The default database suffix.") -+ dsrc_create_parser.add_argument('--binddn', help="The default Bind DN used or authentication.") -+ dsrc_create_parser.add_argument('--saslmech', help="The SASL mechanism to use: PLAIN or EXTERNAL.") -+ dsrc_create_parser.add_argument('--tls-cacertdir', help="The directory containing the Trusted Certificate Authority certificate.") -+ dsrc_create_parser.add_argument('--tls-cert', help="The absolute file name to the server certificate.") -+ dsrc_create_parser.add_argument('--tls-key', help="The absolute file name to the server certificate key.") -+ dsrc_create_parser.add_argument('--tls-reqcert', help="Request certificate strength: 'never', 'allow', 'hard'") -+ dsrc_create_parser.add_argument('--starttls', action='store_true', help="Use startTLS for connection to the server.") -+ dsrc_create_parser.add_argument('--pwdfile', help="The absolute path to a file containing the Bind DN's password.") -+ dsrc_create_parser.add_argument('--do-it', action='store_true', help="Create the file without any confirmation.") -+ -+ dsrc_modify_parser = subcommands.add_parser('modify', help='Modify the .dsrc file') -+ dsrc_modify_parser.set_defaults(func=modify_dsrc) -+ dsrc_modify_parser.add_argument('--uri', nargs='?', const='', help="The URI (LDAP URL) for the Directory Server instance.") -+ dsrc_modify_parser.add_argument('--basedn', nargs='?', const='', help="The default database suffix.") -+ dsrc_modify_parser.add_argument('--binddn', nargs='?', const='', help="The default Bind DN used or authentication.") -+ dsrc_modify_parser.add_argument('--saslmech', nargs='?', const='', help="The SASL mechanism to use: PLAIN or EXTERNAL.") -+ dsrc_modify_parser.add_argument('--tls-cacertdir', nargs='?', const='', help="The directory containing the Trusted Certificate Authority certificate.") -+ dsrc_modify_parser.add_argument('--tls-cert', nargs='?', const='', help="The absolute file name to the server certificate.") -+ dsrc_modify_parser.add_argument('--tls-key', nargs='?', const='', help="The absolute file name to the server certificate key.") -+ dsrc_modify_parser.add_argument('--tls-reqcert', nargs='?', const='', help="Request certificate strength: 'never', 'allow', 'hard'") -+ dsrc_modify_parser.add_argument('--starttls', action='store_true', help="Use startTLS for connection to the server.") -+ dsrc_modify_parser.add_argument('--cancel-starttls', action='store_true', help="Do not use startTLS for connection to the server.") -+ dsrc_modify_parser.add_argument('--pwdfile', nargs='?', const='', help="The absolute path to a file containing the Bind DN's password.") -+ dsrc_modify_parser.add_argument('--do-it', action='store_true', help="Update the file without any confirmation.") -+ -+ # Delete the instance from the .dsrc file -+ dsrc_delete_parser = subcommands.add_parser('delete', help='Delete instance configuration from the .dsrc file.') -+ dsrc_delete_parser.set_defaults(func=delete_dsrc) -+ dsrc_delete_parser.add_argument('--do-it', action='store_true', -+ help="Delete this instance's configuration from the .dsrc file.") -+ -+ # Display .dsrc file -+ dsrc_display_parser = subcommands.add_parser('display', help='Display the contents of the .dsrc file.') -+ dsrc_display_parser.set_defaults(func=display_dsrc) --- -2.26.2 - diff --git a/SOURCES/0008-Issue-4773-Enable-interval-feature-of-DNA-plugin.patch b/SOURCES/0008-Issue-4773-Enable-interval-feature-of-DNA-plugin.patch new file mode 100644 index 0000000..ce8b124 --- /dev/null +++ b/SOURCES/0008-Issue-4773-Enable-interval-feature-of-DNA-plugin.patch @@ -0,0 +1,37 @@ +From 40e9a4835a6e95f021a711a7c42ce0c1bddc5ba4 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Fri, 21 May 2021 13:09:12 -0400 +Subject: [PATCH 08/12] Issue 4773 - Enable interval feature of DNA plugin + +Description: Enable the dormant interval feature in DNA plugin + +relates: https://github.com/389ds/389-ds-base/issues/4773 + +Review by: mreynolds (one line commit rule) +--- + ldap/servers/plugins/dna/dna.c | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c +index bf6b74a99..928a3f54a 100644 +--- a/ldap/servers/plugins/dna/dna.c ++++ b/ldap/servers/plugins/dna/dna.c +@@ -1023,7 +1023,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply) + /* Set the default interval to 1 */ + entry->interval = 1; + +-#ifdef DNA_ENABLE_INTERVAL + value = slapi_entry_attr_get_charptr(e, DNA_INTERVAL); + if (value) { + entry->interval = strtoull(value, 0, 0); +@@ -1032,7 +1031,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply) + + slapi_log_err(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM, + "dna_parse_config_entry - %s [%" PRIu64 "]\n", DNA_INTERVAL, entry->interval); +-#endif + + value = slapi_entry_attr_get_charptr(e, DNA_GENERATE); + if (value) { +-- +2.26.3 + diff --git a/SOURCES/0009-Issue-4440-BUG-ldifgen-with-start-idx-option-fails-w.patch b/SOURCES/0009-Issue-4440-BUG-ldifgen-with-start-idx-option-fails-w.patch deleted file mode 100644 index 1a0df22..0000000 --- a/SOURCES/0009-Issue-4440-BUG-ldifgen-with-start-idx-option-fails-w.patch +++ /dev/null @@ -1,902 +0,0 @@ -From 201cb1147c0a34bddbd3e5c03aecd804c47a9905 Mon Sep 17 00:00:00 2001 -From: progier389 <72748589+progier389@users.noreply.github.com> -Date: Thu, 19 Nov 2020 10:21:10 +0100 -Subject: [PATCH 2/2] Issue 4440 - BUG - ldifgen with --start-idx option fails - with unsupported operand (#4444) - -Bug description: -Got TypeError exception when usign: - dsctl -v slapd-localhost ldifgen users --suffix - dc=example,dc=com --parent ou=people,dc=example,dc=com - --number 100000 --generic --start-idx=50 -The reason is that by default python parser provides - value for numeric options: - as an integer if specified by "--option value" or - as a string if specified by "--option=value" - -Fix description: -convert the numeric parameters to integer when using it. - options impacted are: - - in users subcommand: --number , --start-idx - - in mod-load subcommand: --num-users, --add-users, - --del-users, --modrdn-users, --mod-users - -FYI: An alternative solution would have been to indicate the -parser that these values are an integer. But two reasons - leaded me to implement the first solution: - - first solution fix the problem for all users while the - second one fixes only dsctl command. - - first solution is easier to test: - I just added a new test file generated by a script - that duplicated existing ldifgen test, renamed the - test cases and replaced the numeric arguments by - strings. - Second solution would need to redesign the test framework - to be able to test the parser. - -relates: https://github.com/389ds/389-ds-base/issues/4440 - -Reviewed by: - -Platforms tested: F32 - -(cherry picked from commit 3c3e1f30cdb046a1aabb93aacebcf261a76a0892) ---- - .../tests/suites/clu/dbgen_test_usan.py | 806 ++++++++++++++++++ - src/lib389/lib389/cli_ctl/dbgen.py | 10 +- - src/lib389/lib389/dbgen.py | 3 + - 3 files changed, 814 insertions(+), 5 deletions(-) - create mode 100644 dirsrvtests/tests/suites/clu/dbgen_test_usan.py - -diff --git a/dirsrvtests/tests/suites/clu/dbgen_test_usan.py b/dirsrvtests/tests/suites/clu/dbgen_test_usan.py -new file mode 100644 -index 000000000..80ff63417 ---- /dev/null -+++ b/dirsrvtests/tests/suites/clu/dbgen_test_usan.py -@@ -0,0 +1,806 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2020 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+import time -+ -+""" -+ This file contains tests similar to dbgen_test.py -+ except that paramaters that are number are expressed as string -+ (to mimic the parameters parser default behavior which returns an -+ int when parsing "option value" and a string when parsing "option=value" -+ This file has been generated by usign: -+sed ' -+9r z1 -+s/ test_/ test_usan/ -+/args.*= [0-9]/s,[0-9]*$,"&", -+/:id:/s/.$/1/ -+' dbgen_test.py > dbgen_test_usan.py -+ ( with z1 file containing this comment ) -+""" -+ -+ -+ -+import subprocess -+import pytest -+ -+from lib389.cli_ctl.dbgen import * -+from lib389.cos import CosClassicDefinitions, CosPointerDefinitions, CosIndirectDefinitions, CosTemplates -+from lib389.idm.account import Accounts -+from lib389.idm.group import Groups -+from lib389.idm.role import ManagedRoles, FilteredRoles, NestedRoles -+from lib389.tasks import * -+from lib389.utils import * -+from lib389.topologies import topology_st -+from lib389.cli_base import FakeArgs -+ -+pytestmark = pytest.mark.tier0 -+ -+LOG_FILE = '/tmp/dbgen.log' -+logging.getLogger(__name__).setLevel(logging.DEBUG) -+log = logging.getLogger(__name__) -+ -+ -+@pytest.fixture(scope="function") -+def set_log_file_and_ldif(topology_st, request): -+ global ldif_file -+ ldif_file = get_ldif_dir(topology_st.standalone) + '/created.ldif' -+ -+ fh = logging.FileHandler(LOG_FILE) -+ fh.setLevel(logging.DEBUG) -+ log.addHandler(fh) -+ -+ def fin(): -+ log.info('Delete files') -+ os.remove(LOG_FILE) -+ os.remove(ldif_file) -+ -+ request.addfinalizer(fin) -+ -+ -+def run_offline_import(instance, ldif_file): -+ log.info('Stopping the server and running offline import...') -+ instance.stop() -+ assert instance.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None, -+ import_file=ldif_file) -+ instance.start() -+ -+ -+def run_ldapmodify_from_file(instance, ldif_file, output_to_check=None): -+ LDAP_MOD = '/usr/bin/ldapmodify' -+ log.info('Add entries from ldif file with ldapmodify') -+ result = subprocess.check_output([LDAP_MOD, '-cx', '-D', DN_DM, '-w', PASSWORD, -+ '-h', instance.host, '-p', str(instance.port), '-af', ldif_file]) -+ if output_to_check is not None: -+ assert output_to_check in ensure_str(result) -+ -+ -+def check_value_in_log_and_reset(content_list): -+ with open(LOG_FILE, 'r+') as f: -+ file_content = f.read() -+ log.info('Check if content is present in output') -+ for item in content_list: -+ assert item in file_content -+ -+ log.info('Reset log file for next test') -+ f.truncate(0) -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_users(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create ldif with users -+ -+ :id: 426b5b94-9923-454d-a736-7e71ca985e91 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate ldif with users -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ args.suffix = DEFAULT_SUFFIX -+ args.parent = 'ou=people,dc=example,dc=com' -+ args.number = "1000" -+ args.rdn_cn = False -+ args.generic = True -+ args.start_idx = "50" -+ args.localize = False -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'suffix={}'.format(args.suffix), -+ 'parent={}'.format(args.parent), -+ 'number={}'.format(args.number), -+ 'rdn-cn={}'.format(args.rdn_cn), -+ 'generic={}'.format(args.generic), -+ 'start-idx={}'.format(args.start_idx), -+ 'localize={}'.format(args.localize), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create users ldif') -+ dbgen_create_users(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ log.info('Get number of accounts before import') -+ accounts = Accounts(standalone, DEFAULT_SUFFIX) -+ count_account = len(accounts.filter('(uid=*)')) -+ -+ run_offline_import(standalone, ldif_file) -+ -+ log.info('Check that accounts are imported') -+ assert len(accounts.filter('(uid=*)')) > count_account -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_groups(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create ldif with group -+ -+ :id: 97207413-9a93-4065-a5ec-63aa93801a31 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate ldif with group -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ LDAP_RESULT = 'adding new entry "cn=myGroup-1,ou=groups,dc=example,dc=com"' -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ args.NAME = 'myGroup' -+ args.parent = 'ou=groups,dc=example,dc=com' -+ args.suffix = DEFAULT_SUFFIX -+ args.number = "1" -+ args.num_members = "1000" -+ args.create_members = True -+ args.member_attr = 'uniquemember' -+ args.member_parent = 'ou=people,dc=example,dc=com' -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'NAME={}'.format(args.NAME), -+ 'number={}'.format(args.number), -+ 'suffix={}'.format(args.suffix), -+ 'num-members={}'.format(args.num_members), -+ 'create-members={}'.format(args.create_members), -+ 'member-parent={}'.format(args.member_parent), -+ 'member-attr={}'.format(args.member_attr), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create group ldif') -+ dbgen_create_groups(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ log.info('Get number of accounts before import') -+ accounts = Accounts(standalone, DEFAULT_SUFFIX) -+ count_account = len(accounts.filter('(uid=*)')) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ # ldapmodify will complain about already existing parent which causes subprocess to return exit code != 0 -+ with pytest.raises(subprocess.CalledProcessError): -+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) -+ -+ log.info('Check that accounts are imported') -+ assert len(accounts.filter('(uid=*)')) > count_account -+ -+ log.info('Check that group is imported') -+ groups = Groups(standalone, DEFAULT_SUFFIX) -+ assert groups.exists(args.NAME + '-1') -+ new_group = groups.get(args.NAME + '-1') -+ new_group.present('uniquemember', 'uid=group_entry1-0152,ou=people,dc=example,dc=com') -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_cos_classic(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create a COS definition -+ -+ :id: 8557f994-8a91-4f8a-86f6-9cb826a0b8f1 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate ldif with classic COS definition -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def,ou=cos definitions,dc=example,dc=com"' -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ args.type = 'classic' -+ args.NAME = 'My_Postal_Def' -+ args.parent = 'ou=cos definitions,dc=example,dc=com' -+ args.create_parent = True -+ args.cos_specifier = 'businessCategory' -+ args.cos_attr = ['postalcode', 'telephonenumber'] -+ args.cos_template = 'cn=sales,cn=classicCoS,dc=example,dc=com' -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'NAME={}'.format(args.NAME), -+ 'type={}'.format(args.type), -+ 'parent={}'.format(args.parent), -+ 'create-parent={}'.format(args.create_parent), -+ 'cos-specifier={}'.format(args.cos_specifier), -+ 'cos-template={}'.format(args.cos_template), -+ 'cos-attr={}'.format(args.cos_attr), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create COS definition ldif') -+ dbgen_create_cos_def(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) -+ -+ log.info('Check that COS definition is imported') -+ cos_def = CosClassicDefinitions(standalone, args.parent) -+ assert cos_def.exists(args.NAME) -+ new_cos = cos_def.get(args.NAME) -+ assert new_cos.present('cosTemplateDN', args.cos_template) -+ assert new_cos.present('cosSpecifier', args.cos_specifier) -+ assert new_cos.present('cosAttribute', args.cos_attr[0]) -+ assert new_cos.present('cosAttribute', args.cos_attr[1]) -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_cos_pointer(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create a COS definition -+ -+ :id: 6b26ca6d-226a-4f93-925e-faf95cc20211 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate ldif with pointer COS definition -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_pointer,ou=cos pointer definitions,dc=example,dc=com"' -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ args.type = 'pointer' -+ args.NAME = 'My_Postal_Def_pointer' -+ args.parent = 'ou=cos pointer definitions,dc=example,dc=com' -+ args.create_parent = True -+ args.cos_specifier = None -+ args.cos_attr = ['postalcode', 'telephonenumber'] -+ args.cos_template = 'cn=sales,cn=pointerCoS,dc=example,dc=com' -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'NAME={}'.format(args.NAME), -+ 'type={}'.format(args.type), -+ 'parent={}'.format(args.parent), -+ 'create-parent={}'.format(args.create_parent), -+ 'cos-template={}'.format(args.cos_template), -+ 'cos-attr={}'.format(args.cos_attr), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create COS definition ldif') -+ dbgen_create_cos_def(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) -+ -+ log.info('Check that COS definition is imported') -+ cos_def = CosPointerDefinitions(standalone, args.parent) -+ assert cos_def.exists(args.NAME) -+ new_cos = cos_def.get(args.NAME) -+ assert new_cos.present('cosTemplateDN', args.cos_template) -+ assert new_cos.present('cosAttribute', args.cos_attr[0]) -+ assert new_cos.present('cosAttribute', args.cos_attr[1]) -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_cos_indirect(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create a COS definition -+ -+ :id: ab4b799e-e801-432a-a61d-badad2628201 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate ldif with indirect COS definition -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_indirect,ou=cos indirect definitions,dc=example,dc=com"' -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ args.type = 'indirect' -+ args.NAME = 'My_Postal_Def_indirect' -+ args.parent = 'ou=cos indirect definitions,dc=example,dc=com' -+ args.create_parent = True -+ args.cos_specifier = 'businessCategory' -+ args.cos_attr = ['postalcode', 'telephonenumber'] -+ args.cos_template = None -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'NAME={}'.format(args.NAME), -+ 'type={}'.format(args.type), -+ 'parent={}'.format(args.parent), -+ 'create-parent={}'.format(args.create_parent), -+ 'cos-specifier={}'.format(args.cos_specifier), -+ 'cos-attr={}'.format(args.cos_attr), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create COS definition ldif') -+ dbgen_create_cos_def(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) -+ -+ log.info('Check that COS definition is imported') -+ cos_def = CosIndirectDefinitions(standalone, args.parent) -+ assert cos_def.exists(args.NAME) -+ new_cos = cos_def.get(args.NAME) -+ assert new_cos.present('cosIndirectSpecifier', args.cos_specifier) -+ assert new_cos.present('cosAttribute', args.cos_attr[0]) -+ assert new_cos.present('cosAttribute', args.cos_attr[1]) -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_cos_template(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create a COS template -+ -+ :id: 544017c7-4a82-4e7d-a047-00b68a28e071 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate ldif with COS template -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ LDAP_RESULT = 'adding new entry "cn=My_Template,ou=cos templates,dc=example,dc=com"' -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ args.NAME = 'My_Template' -+ args.parent = 'ou=cos templates,dc=example,dc=com' -+ args.create_parent = True -+ args.cos_priority = "1" -+ args.cos_attr_val = 'postalcode:12345' -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'NAME={}'.format(args.NAME), -+ 'parent={}'.format(args.parent), -+ 'create-parent={}'.format(args.create_parent), -+ 'cos-priority={}'.format(args.cos_priority), -+ 'cos-attr-val={}'.format(args.cos_attr_val), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create COS template ldif') -+ dbgen_create_cos_tmp(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) -+ -+ log.info('Check that COS template is imported') -+ cos_temp = CosTemplates(standalone, args.parent) -+ assert cos_temp.exists(args.NAME) -+ new_cos = cos_temp.get(args.NAME) -+ assert new_cos.present('cosPriority', str(args.cos_priority)) -+ assert new_cos.present('postalcode', '12345') -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_managed_role(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create a managed role -+ -+ :id: 10e77b41-0bc1-4ad5-a144-2c5107455b91 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate ldif with managed role -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ LDAP_RESULT = 'adding new entry "cn=My_Managed_Role,ou=managed roles,dc=example,dc=com"' -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ -+ args.NAME = 'My_Managed_Role' -+ args.parent = 'ou=managed roles,dc=example,dc=com' -+ args.create_parent = True -+ args.type = 'managed' -+ args.filter = None -+ args.role_dn = None -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'NAME={}'.format(args.NAME), -+ 'parent={}'.format(args.parent), -+ 'create-parent={}'.format(args.create_parent), -+ 'type={}'.format(args.type), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create managed role ldif') -+ dbgen_create_role(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) -+ -+ log.info('Check that managed role is imported') -+ roles = ManagedRoles(standalone, DEFAULT_SUFFIX) -+ assert roles.exists(args.NAME) -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_filtered_role(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create a filtered role -+ -+ :id: cb3c8ea8-4234-40e2-8810-fb6a25973921 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate ldif with filtered role -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ LDAP_RESULT = 'adding new entry "cn=My_Filtered_Role,ou=filtered roles,dc=example,dc=com"' -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ -+ args.NAME = 'My_Filtered_Role' -+ args.parent = 'ou=filtered roles,dc=example,dc=com' -+ args.create_parent = True -+ args.type = 'filtered' -+ args.filter = '"objectclass=posixAccount"' -+ args.role_dn = None -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'NAME={}'.format(args.NAME), -+ 'parent={}'.format(args.parent), -+ 'create-parent={}'.format(args.create_parent), -+ 'type={}'.format(args.type), -+ 'filter={}'.format(args.filter), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create filtered role ldif') -+ dbgen_create_role(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) -+ -+ log.info('Check that filtered role is imported') -+ roles = FilteredRoles(standalone, DEFAULT_SUFFIX) -+ assert roles.exists(args.NAME) -+ new_role = roles.get(args.NAME) -+ assert new_role.present('nsRoleFilter', args.filter) -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_nested_role(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create a nested role -+ -+ :id: 97fff0a8-3103-4adb-be04-2799ff58d8f1 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate ldif with nested role -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ LDAP_RESULT = 'adding new entry "cn=My_Nested_Role,ou=nested roles,dc=example,dc=com"' -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ args.NAME = 'My_Nested_Role' -+ args.parent = 'ou=nested roles,dc=example,dc=com' -+ args.create_parent = True -+ args.type = 'nested' -+ args.filter = None -+ args.role_dn = ['cn=some_role,ou=roles,dc=example,dc=com'] -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'NAME={}'.format(args.NAME), -+ 'parent={}'.format(args.parent), -+ 'create-parent={}'.format(args.create_parent), -+ 'type={}'.format(args.type), -+ 'role-dn={}'.format(args.role_dn), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create nested role ldif') -+ dbgen_create_role(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) -+ -+ log.info('Check that nested role is imported') -+ roles = NestedRoles(standalone, DEFAULT_SUFFIX) -+ assert roles.exists(args.NAME) -+ new_role = roles.get(args.NAME) -+ assert new_role.present('nsRoleDN', args.role_dn[0]) -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_mod_ldif_mixed(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create mixed modification ldif -+ -+ :id: 4a2e0901-2b48-452e-a4a0-507735132c81 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate modification ldif -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ args.parent = DEFAULT_SUFFIX -+ args.create_users = True -+ args.delete_users = True -+ args.create_parent = False -+ args.num_users = "1000" -+ args.add_users = "100" -+ args.del_users = "999" -+ args.modrdn_users = "100" -+ args.mod_users = "10" -+ args.mod_attrs = ['cn', 'uid', 'sn'] -+ args.randomize = False -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'create-users={}'.format(args.create_users), -+ 'parent={}'.format(args.parent), -+ 'create-parent={}'.format(args.create_parent), -+ 'delete-users={}'.format(args.delete_users), -+ 'num-users={}'.format(args.num_users), -+ 'add-users={}'.format(args.add_users), -+ 'del-users={}'.format(args.del_users), -+ 'modrdn-users={}'.format(args.modrdn_users), -+ 'mod-users={}'.format(args.mod_users), -+ 'mod-attrs={}'.format(args.mod_attrs), -+ 'randomize={}'.format(args.randomize), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created LDIF file: {}'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create modification ldif') -+ dbgen_create_mods(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ log.info('Get number of accounts before import') -+ accounts = Accounts(standalone, DEFAULT_SUFFIX) -+ count_account = len(accounts.filter('(uid=*)')) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ # ldapmodify will complain about a lot of changes done which causes subprocess to return exit code != 0 -+ with pytest.raises(subprocess.CalledProcessError): -+ run_ldapmodify_from_file(standalone, ldif_file) -+ -+ log.info('Check that some accounts are imported') -+ assert len(accounts.filter('(uid=*)')) > count_account -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1798394 -+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") -+def test_usandsconf_dbgen_nested_ldif(topology_st, set_log_file_and_ldif): -+ """Test ldifgen (formerly dbgen) tool to create nested ldif -+ -+ :id: 9c281c28-4169-45e0-8c07-c5502d9a7581 -+ :setup: Standalone instance -+ :steps: -+ 1. Create DS instance -+ 2. Run ldifgen to generate nested ldif -+ 3. Import generated ldif to database -+ 4. Check it was properly imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ standalone = topology_st.standalone -+ -+ args = FakeArgs() -+ args.suffix = DEFAULT_SUFFIX -+ args.node_limit = "100" -+ args.num_users = "600" -+ args.ldif_file = ldif_file -+ -+ content_list = ['Generating LDIF with the following options:', -+ 'suffix={}'.format(args.suffix), -+ 'node-limit={}'.format(args.node_limit), -+ 'num-users={}'.format(args.num_users), -+ 'ldif-file={}'.format(args.ldif_file), -+ 'Writing LDIF', -+ 'Successfully created nested LDIF file ({}) containing 6 nodes/subtrees'.format(args.ldif_file)] -+ -+ log.info('Run ldifgen to create nested ldif') -+ dbgen_create_nested(standalone, log, args) -+ -+ log.info('Check if file exists') -+ assert os.path.exists(ldif_file) -+ -+ check_value_in_log_and_reset(content_list) -+ -+ log.info('Get number of accounts before import') -+ accounts = Accounts(standalone, DEFAULT_SUFFIX) -+ count_account = len(accounts.filter('(uid=*)')) -+ count_ou = len(accounts.filter('(ou=*)')) -+ -+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db -+ # ldapmodify will complain about already existing suffix which causes subprocess to return exit code != 0 -+ with pytest.raises(subprocess.CalledProcessError): -+ run_ldapmodify_from_file(standalone, ldif_file) -+ -+ standalone.restart() -+ -+ log.info('Check that accounts are imported') -+ assert len(accounts.filter('(uid=*)')) > count_account -+ assert len(accounts.filter('(ou=*)')) > count_ou -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main("-s %s" % CURRENT_FILE) -diff --git a/src/lib389/lib389/cli_ctl/dbgen.py b/src/lib389/lib389/cli_ctl/dbgen.py -index 7bc3892ba..058342fb1 100644 ---- a/src/lib389/lib389/cli_ctl/dbgen.py -+++ b/src/lib389/lib389/cli_ctl/dbgen.py -@@ -451,13 +451,13 @@ def dbgen_create_mods(inst, log, args): - props = { - "createUsers": args.create_users, - "deleteUsers": args.delete_users, -- "numUsers": args.num_users, -+ "numUsers": int(args.num_users), - "parent": args.parent, - "createParent": args.create_parent, -- "addUsers": args.add_users, -- "delUsers": args.del_users, -- "modrdnUsers": args.modrdn_users, -- "modUsers": args.mod_users, -+ "addUsers": int(args.add_users), -+ "delUsers": int(args.del_users), -+ "modrdnUsers": int(args.modrdn_users), -+ "modUsers": int(args.mod_users), - "random": args.randomize, - "modAttrs": args.mod_attrs - } -diff --git a/src/lib389/lib389/dbgen.py b/src/lib389/lib389/dbgen.py -index 6273781a2..10fb200f7 100644 ---- a/src/lib389/lib389/dbgen.py -+++ b/src/lib389/lib389/dbgen.py -@@ -220,6 +220,9 @@ def dbgen_users(instance, number, ldif_file, suffix, generic=False, entry_name=" - """ - Generate an LDIF of randomly named entries - """ -+ # Lets insure that integer parameters are not string -+ number=int(number) -+ startIdx=int(startIdx) - familyname_file = os.path.join(instance.ds_paths.data_dir, 'dirsrv/data/dbgen-FamilyNames') - givename_file = os.path.join(instance.ds_paths.data_dir, 'dirsrv/data/dbgen-GivenNames') - familynames = [] --- -2.26.2 - diff --git a/SOURCES/0009-Issue-4623-RFE-Monitor-the-current-DB-locks-4762.patch b/SOURCES/0009-Issue-4623-RFE-Monitor-the-current-DB-locks-4762.patch new file mode 100644 index 0000000..b4d22df --- /dev/null +++ b/SOURCES/0009-Issue-4623-RFE-Monitor-the-current-DB-locks-4762.patch @@ -0,0 +1,926 @@ +From 8df95679519364d0993572ecbea72ab89e5250a5 Mon Sep 17 00:00:00 2001 +From: Simon Pichugin +Date: Thu, 20 May 2021 14:24:25 +0200 +Subject: [PATCH 09/12] Issue 4623 - RFE - Monitor the current DB locks (#4762) + +Description: DB lock gets exhausted because of unindexed internal searches +(under a transaction). Indexing those searches is the way to prevent exhaustion. +If db lock get exhausted during a txn, it leads to db panic and the later recovery +can possibly fail. That leads to a full reinit of the instance where the db locks +got exhausted. + +Add three attributes to global BDB config: "nsslapd-db-locks-monitoring-enabled", + "nsslapd-db-locks-monitoring-threshold" and "nsslapd-db-locks-monitoring-pause". +By default, nsslapd-db-locks-monitoring-enabled is turned on, nsslapd-db-locks-monitoring-threshold is set to 90% and nsslapd-db-locks-monitoring-threshold is 500ms. + +When current locks are close to the maximum locks value of 90% - returning +the next candidate will fail until the maximum of locks won't be +increased or current locks are released. +The monitoring thread runs with the configurable interval of 500ms. + +Add the setting to UI and CLI tools. + +Fixes: https://github.com/389ds/389-ds-base/issues/4623 + +Reviewed by: @Firstyear, @tbordaz, @jchapma, @mreynolds389 (Thank you!!) +--- + .../suites/monitor/db_locks_monitor_test.py | 251 ++++++++++++++++++ + ldap/servers/slapd/back-ldbm/back-ldbm.h | 13 +- + .../slapd/back-ldbm/db-bdb/bdb_config.c | 99 +++++++ + .../slapd/back-ldbm/db-bdb/bdb_layer.c | 85 ++++++ + ldap/servers/slapd/back-ldbm/init.c | 3 + + ldap/servers/slapd/back-ldbm/ldbm_config.c | 3 + + ldap/servers/slapd/back-ldbm/ldbm_config.h | 3 + + ldap/servers/slapd/back-ldbm/ldbm_search.c | 13 + + ldap/servers/slapd/libglobs.c | 4 +- + src/cockpit/389-console/src/css/ds.css | 4 + + src/cockpit/389-console/src/database.jsx | 7 + + src/cockpit/389-console/src/index.html | 2 +- + .../src/lib/database/databaseConfig.jsx | 88 +++++- + src/lib389/lib389/backend.py | 3 + + src/lib389/lib389/cli_conf/backend.py | 10 + + 15 files changed, 576 insertions(+), 12 deletions(-) + create mode 100644 dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py + +diff --git a/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py b/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py +new file mode 100644 +index 000000000..7f9938f30 +--- /dev/null ++++ b/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py +@@ -0,0 +1,251 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2021 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# ++import logging ++import pytest ++import datetime ++import subprocess ++from multiprocessing import Process, Queue ++from lib389 import pid_from_file ++from lib389.utils import ldap, os ++from lib389._constants import DEFAULT_SUFFIX, ReplicaRole ++from lib389.cli_base import LogCapture ++from lib389.idm.user import UserAccounts ++from lib389.idm.organizationalunit import OrganizationalUnits ++from lib389.tasks import AccessLog ++from lib389.backend import Backends ++from lib389.ldclt import Ldclt ++from lib389.dbgen import dbgen_users ++from lib389.tasks import ImportTask ++from lib389.index import Indexes ++from lib389.plugins import AttributeUniquenessPlugin ++from lib389.config import BDB_LDBMConfig ++from lib389.monitor import MonitorLDBM ++from lib389.topologies import create_topology, _remove_ssca_db ++ ++pytestmark = pytest.mark.tier2 ++db_locks_monitoring_ack = pytest.mark.skipif(not os.environ.get('DB_LOCKS_MONITORING_ACK', False), ++ reason="DB locks monitoring tests may take hours if the feature is not present or another failure exists. " ++ "Also, the feature requires a big amount of space as we set nsslapd-db-locks to 1300000.") ++ ++DEBUGGING = os.getenv('DEBUGGING', default=False) ++if DEBUGGING: ++ logging.getLogger(__name__).setLevel(logging.DEBUG) ++else: ++ logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) ++ ++ ++def _kill_ns_slapd(inst): ++ pid = str(pid_from_file(inst.ds_paths.pid_file)) ++ cmd = ['kill', '-9', pid] ++ subprocess.Popen(cmd, stdout=subprocess.PIPE) ++ ++ ++@pytest.fixture(scope="function") ++def topology_st_fn(request): ++ """Create DS standalone instance for each test case""" ++ ++ topology = create_topology({ReplicaRole.STANDALONE: 1}) ++ ++ def fin(): ++ # Kill the hanging process at the end of test to prevent failures in the following tests ++ if DEBUGGING: ++ [_kill_ns_slapd(inst) for inst in topology] ++ else: ++ [_kill_ns_slapd(inst) for inst in topology] ++ assert _remove_ssca_db(topology) ++ [inst.stop() for inst in topology if inst.exists()] ++ [inst.delete() for inst in topology if inst.exists()] ++ request.addfinalizer(fin) ++ ++ topology.logcap = LogCapture() ++ return topology ++ ++ ++@pytest.fixture(scope="function") ++def setup_attruniq_index_be_import(topology_st_fn): ++ """Enable Attribute Uniqueness, disable indexes and ++ import 120000 entries to the default backend ++ """ ++ inst = topology_st_fn.standalone ++ ++ inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access') ++ inst.config.set('nsslapd-plugin-logging', 'on') ++ inst.restart() ++ ++ attruniq = AttributeUniquenessPlugin(inst, dn="cn=attruniq,cn=plugins,cn=config") ++ attruniq.create(properties={'cn': 'attruniq'}) ++ for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']: ++ attruniq.add_unique_attribute(cn) ++ attruniq.add_unique_subtree(DEFAULT_SUFFIX) ++ attruniq.enable_all_subtrees() ++ attruniq.enable() ++ ++ indexes = Indexes(inst) ++ for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']: ++ indexes.ensure_state(properties={ ++ 'cn': cn, ++ 'nsSystemIndex': 'false', ++ 'nsIndexType': 'none'}) ++ ++ bdb_config = BDB_LDBMConfig(inst) ++ bdb_config.replace("nsslapd-db-locks", "130000") ++ inst.restart() ++ ++ ldif_dir = inst.get_ldif_dir() ++ import_ldif = ldif_dir + '/perf_import.ldif' ++ ++ # Valid online import ++ import_task = ImportTask(inst) ++ dbgen_users(inst, 120000, import_ldif, DEFAULT_SUFFIX, entry_name="userNew") ++ import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) ++ import_task.wait() ++ assert import_task.is_complete() ++ ++ ++def create_user_wrapper(q, users): ++ try: ++ users.create_test_user() ++ except Exception as ex: ++ q.put(ex) ++ ++ ++def spawn_worker_thread(function, users, log, timeout, info): ++ log.info(f"Starting the thread - {info}") ++ q = Queue() ++ p = Process(target=function, args=(q,users,)) ++ p.start() ++ ++ log.info(f"Waiting for {timeout} seconds for the thread to finish") ++ p.join(timeout) ++ ++ if p.is_alive(): ++ log.info("Killing the thread as it's still running") ++ p.terminate() ++ p.join() ++ raise RuntimeError(f"Function call was aborted: {info}") ++ result = q.get() ++ if isinstance(result, Exception): ++ raise result ++ else: ++ return result ++ ++ ++@db_locks_monitoring_ack ++@pytest.mark.parametrize("lock_threshold", [("70"), ("80"), ("95")]) ++def test_exhaust_db_locks_basic(topology_st_fn, setup_attruniq_index_be_import, lock_threshold): ++ """Test that when all of the locks are exhausted the instance still working ++ and database is not corrupted ++ ++ :id: 299108cc-04d8-4ddc-b58e-99157fccd643 ++ :setup: Standalone instance with Attr Uniq plugin and user indexes disabled ++ :steps: 1. Set nsslapd-db-locks to 11000 ++ 2. Check that we stop acquiring new locks when the threshold is reached ++ 3. Check that we can regulate a pause interval for DB locks monitoring thread ++ 4. Make sure the feature works for different backends on the same suffix ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ ++ inst = topology_st_fn.standalone ++ ADDITIONAL_SUFFIX = 'ou=newpeople,dc=example,dc=com' ++ ++ backends = Backends(inst) ++ backends.create(properties={'nsslapd-suffix': ADDITIONAL_SUFFIX, ++ 'name': ADDITIONAL_SUFFIX[-3:]}) ++ ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) ++ ous.create(properties={'ou': 'newpeople'}) ++ ++ bdb_config = BDB_LDBMConfig(inst) ++ bdb_config.replace("nsslapd-db-locks", "11000") ++ ++ # Restart server ++ inst.restart() ++ ++ for lock_enabled in ["on", "off"]: ++ for lock_pause in ["100", "500", "1000"]: ++ bdb_config.replace("nsslapd-db-locks-monitoring-enabled", lock_enabled) ++ bdb_config.replace("nsslapd-db-locks-monitoring-threshold", lock_threshold) ++ bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause) ++ inst.restart() ++ ++ if lock_enabled == "off": ++ raised_exception = (RuntimeError, ldap.SERVER_DOWN) ++ else: ++ raised_exception = ldap.OPERATIONS_ERROR ++ ++ users = UserAccounts(inst, DEFAULT_SUFFIX) ++ with pytest.raises(raised_exception): ++ spawn_worker_thread(create_user_wrapper, users, log, 30, ++ f"Adding user with monitoring enabled='{lock_enabled}'; " ++ f"threshold='{lock_threshold}'; pause='{lock_pause}'.") ++ # Restart because we already run out of locks and the next unindexed searches will fail eventually ++ if lock_enabled == "off": ++ _kill_ns_slapd(inst) ++ inst.restart() ++ ++ users = UserAccounts(inst, ADDITIONAL_SUFFIX, rdn=None) ++ with pytest.raises(raised_exception): ++ spawn_worker_thread(create_user_wrapper, users, log, 30, ++ f"Adding user with monitoring enabled='{lock_enabled}'; " ++ f"threshold='{lock_threshold}'; pause='{lock_pause}'.") ++ # In case feature is disabled - restart for the clean up ++ if lock_enabled == "off": ++ _kill_ns_slapd(inst) ++ inst.restart() ++ ++ ++@db_locks_monitoring_ack ++def test_exhaust_db_locks_big_pause(topology_st_fn, setup_attruniq_index_be_import): ++ """Test that DB lock pause setting increases the wait interval value for the monitoring thread ++ ++ :id: 7d5bf838-5d4e-4ad5-8c03-5716afb84ea6 ++ :setup: Standalone instance with Attr Uniq plugin and user indexes disabled ++ :steps: 1. Set nsslapd-db-locks to 20000 while using the default threshold value (95%) ++ 2. Set nsslapd-db-locks-monitoring-pause to 10000 (10 seconds) ++ 3. Make sure that the pause is successfully increased a few times in a row ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ """ ++ ++ inst = topology_st_fn.standalone ++ ++ bdb_config = BDB_LDBMConfig(inst) ++ bdb_config.replace("nsslapd-db-locks", "20000") ++ lock_pause = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-pause") ++ assert lock_pause == 500 ++ lock_pause = "10000" ++ bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause) ++ ++ # Restart server ++ inst.restart() ++ ++ lock_enabled = bdb_config.get_attr_val_utf8_l("nsslapd-db-locks-monitoring-enabled") ++ lock_threshold = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-threshold") ++ assert lock_enabled == "on" ++ assert lock_threshold == 90 ++ ++ users = UserAccounts(inst, DEFAULT_SUFFIX) ++ start = datetime.datetime.now() ++ with pytest.raises(ldap.OPERATIONS_ERROR): ++ spawn_worker_thread(create_user_wrapper, users, log, 30, ++ f"Adding user with monitoring enabled='{lock_enabled}'; " ++ f"threshold='{lock_threshold}'; pause='{lock_pause}'. Expect it to 'Work'") ++ end = datetime.datetime.now() ++ time_delta = end - start ++ if time_delta.seconds < 9: ++ raise RuntimeError("nsslapd-db-locks-monitoring-pause attribute doesn't function correctly. " ++ f"Finished the execution in {time_delta.seconds} seconds") ++ # In case something has failed - restart for the clean up ++ inst.restart() +diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h +index 571b0a58b..afb831c32 100644 +--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h ++++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h +@@ -155,6 +155,8 @@ typedef unsigned short u_int16_t; + #define DEFAULT_DNCACHE_MAXCOUNT -1 /* no limit */ + #define DEFAULT_DBCACHE_SIZE 33554432 + #define DEFAULT_DBCACHE_SIZE_STR "33554432" ++#define DEFAULT_DBLOCK_PAUSE 500 ++#define DEFAULT_DBLOCK_PAUSE_STR "500" + #define DEFAULT_MODE 0600 + #define DEFAULT_ALLIDSTHRESHOLD 4000 + #define DEFAULT_IDL_TUNE 1 +@@ -575,12 +577,21 @@ struct ldbminfo + char *li_backend_implement; /* low layer backend implementation */ + int li_noparentcheck; /* check if parent exists on add */ + +- /* the next 3 fields are for the params that don't get changed until ++ /* db lock monitoring */ ++ /* if we decide to move the values to bdb_config, we can use slapi_back_get_info function to retrieve the values */ ++ int32_t li_dblock_monitoring; /* enables db locks monitoring thread - requires restart */ ++ uint32_t li_dblock_monitoring_pause; /* an interval for db locks monitoring thread */ ++ uint32_t li_dblock_threshold; /* when the percentage is reached, abort the search in ldbm_back_next_search_entry - requires restart*/ ++ uint32_t li_dblock_threshold_reached; ++ ++ /* the next 4 fields are for the params that don't get changed until + * the server is restarted (used by the admin console) + */ + char *li_new_directory; + uint64_t li_new_dbcachesize; + int li_new_dblock; ++ int32_t li_new_dblock_monitoring; ++ uint64_t li_new_dblock_threshold; + + int li_new_dbncache; + +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c +index 738b841aa..167644943 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c +@@ -190,6 +190,102 @@ bdb_config_db_lock_set(void *arg, void *value, char *errorbuf, int phase, int ap + return retval; + } + ++static void * ++bdb_config_db_lock_monitoring_get(void *arg) ++{ ++ struct ldbminfo *li = (struct ldbminfo *)arg; ++ ++ return (void *)((intptr_t)(li->li_new_dblock_monitoring)); ++} ++ ++static int ++bdb_config_db_lock_monitoring_set(void *arg, void *value, char *errorbuf __attribute__((unused)), int phase __attribute__((unused)), int apply) ++{ ++ struct ldbminfo *li = (struct ldbminfo *)arg; ++ int retval = LDAP_SUCCESS; ++ int val = (int32_t)((intptr_t)value); ++ ++ if (apply) { ++ if (CONFIG_PHASE_RUNNING == phase) { ++ li->li_new_dblock_monitoring = val; ++ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_monitoring_set", ++ "New nsslapd-db-lock-monitoring value will not take affect until the server is restarted\n"); ++ } else { ++ li->li_new_dblock_monitoring = val; ++ li->li_dblock_monitoring = val; ++ } ++ } ++ ++ return retval; ++} ++ ++static void * ++bdb_config_db_lock_pause_get(void *arg) ++{ ++ struct ldbminfo *li = (struct ldbminfo *)arg; ++ ++ return (void *)((uintptr_t)(slapi_atomic_load_32((int32_t *)&(li->li_dblock_monitoring_pause), __ATOMIC_RELAXED))); ++} ++ ++static int ++bdb_config_db_lock_pause_set(void *arg, void *value, char *errorbuf, int phase __attribute__((unused)), int apply) ++{ ++ struct ldbminfo *li = (struct ldbminfo *)arg; ++ int retval = LDAP_SUCCESS; ++ u_int32_t val = (u_int32_t)((uintptr_t)value); ++ ++ if (val == 0) { ++ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_pause_set", ++ "%s was set to '0'. The default value will be used (%s)", ++ CONFIG_DB_LOCKS_PAUSE, DEFAULT_DBLOCK_PAUSE_STR); ++ val = DEFAULT_DBLOCK_PAUSE; ++ } ++ ++ if (apply) { ++ slapi_atomic_store_32((int32_t *)&(li->li_dblock_monitoring_pause), val, __ATOMIC_RELAXED); ++ } ++ return retval; ++} ++ ++static void * ++bdb_config_db_lock_threshold_get(void *arg) ++{ ++ struct ldbminfo *li = (struct ldbminfo *)arg; ++ ++ return (void *)((uintptr_t)(li->li_new_dblock_threshold)); ++} ++ ++static int ++bdb_config_db_lock_threshold_set(void *arg, void *value, char *errorbuf, int phase __attribute__((unused)), int apply) ++{ ++ struct ldbminfo *li = (struct ldbminfo *)arg; ++ int retval = LDAP_SUCCESS; ++ u_int32_t val = (u_int32_t)((uintptr_t)value); ++ ++ if (val < 70 || val > 95) { ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, ++ "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95", ++ CONFIG_DB_LOCKS_THRESHOLD, val); ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_lock_threshold_set", ++ "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95", ++ CONFIG_DB_LOCKS_THRESHOLD, val); ++ retval = LDAP_OPERATIONS_ERROR; ++ return retval; ++ } ++ ++ if (apply) { ++ if (CONFIG_PHASE_RUNNING == phase) { ++ li->li_new_dblock_threshold = val; ++ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_threshold_set", ++ "New nsslapd-db-lock-monitoring-threshold value will not take affect until the server is restarted\n"); ++ } else { ++ li->li_new_dblock_threshold = val; ++ li->li_dblock_threshold = val; ++ } ++ } ++ return retval; ++} ++ + static void * + bdb_config_dbcachesize_get(void *arg) + { +@@ -1409,6 +1505,9 @@ static config_info bdb_config_param[] = { + {CONFIG_SERIAL_LOCK, CONFIG_TYPE_ONOFF, "on", &bdb_config_serial_lock_get, &bdb_config_serial_lock_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {CONFIG_USE_LEGACY_ERRORCODE, CONFIG_TYPE_ONOFF, "off", &bdb_config_legacy_errcode_get, &bdb_config_legacy_errcode_set, 0}, + {CONFIG_DB_DEADLOCK_POLICY, CONFIG_TYPE_INT, STRINGIFYDEFINE(DB_LOCK_YOUNGEST), &bdb_config_db_deadlock_policy_get, &bdb_config_db_deadlock_policy_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, ++ {CONFIG_DB_LOCKS_MONITORING, CONFIG_TYPE_ONOFF, "on", &bdb_config_db_lock_monitoring_get, &bdb_config_db_lock_monitoring_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, ++ {CONFIG_DB_LOCKS_THRESHOLD, CONFIG_TYPE_INT, "90", &bdb_config_db_lock_threshold_get, &bdb_config_db_lock_threshold_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, ++ {CONFIG_DB_LOCKS_PAUSE, CONFIG_TYPE_INT, DEFAULT_DBLOCK_PAUSE_STR, &bdb_config_db_lock_pause_get, &bdb_config_db_lock_pause_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {NULL, 0, NULL, NULL, NULL, 0}}; + + void +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +index 6cccad8e6..2f25f67a2 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +@@ -35,6 +35,8 @@ + (env)->txn_checkpoint((env), (kbyte), (min), (flags)) + #define MEMP_STAT(env, gsp, fsp, flags, malloc) \ + (env)->memp_stat((env), (gsp), (fsp), (flags)) ++#define LOCK_STAT(env, statp, flags, malloc) \ ++ (env)->lock_stat((env), (statp), (flags)) + #define MEMP_TRICKLE(env, pct, nwrotep) \ + (env)->memp_trickle((env), (pct), (nwrotep)) + #define LOG_ARCHIVE(env, listp, flags, malloc) \ +@@ -66,6 +68,7 @@ + #define NEWDIR_MODE 0755 + #define DB_REGION_PREFIX "__db." + ++static int locks_monitoring_threadmain(void *param); + static int perf_threadmain(void *param); + static int checkpoint_threadmain(void *param); + static int trickle_threadmain(void *param); +@@ -84,6 +87,7 @@ static int bdb_start_checkpoint_thread(struct ldbminfo *li); + static int bdb_start_trickle_thread(struct ldbminfo *li); + static int bdb_start_perf_thread(struct ldbminfo *li); + static int bdb_start_txn_test_thread(struct ldbminfo *li); ++static int bdb_start_locks_monitoring_thread(struct ldbminfo *li); + static int trans_batch_count = 0; + static int trans_batch_limit = 0; + static int trans_batch_txn_min_sleep = 50; /* ms */ +@@ -1299,6 +1303,10 @@ bdb_start(struct ldbminfo *li, int dbmode) + return return_value; + } + ++ if (0 != (return_value = bdb_start_locks_monitoring_thread(li))) { ++ return return_value; ++ } ++ + /* We need to free the memory to avoid a leak + * Also, we have to evaluate if the performance counter + * should be preserved or not for database restore. +@@ -2885,6 +2893,7 @@ bdb_start_perf_thread(struct ldbminfo *li) + return return_value; + } + ++ + /* Performance thread */ + static int + perf_threadmain(void *param) +@@ -2910,6 +2919,82 @@ perf_threadmain(void *param) + return 0; + } + ++ ++/* ++ * create a thread for locks_monitoring_threadmain ++ */ ++static int ++bdb_start_locks_monitoring_thread(struct ldbminfo *li) ++{ ++ int return_value = 0; ++ if (li->li_dblock_monitoring) { ++ if (NULL == PR_CreateThread(PR_USER_THREAD, ++ (VFP)(void *)locks_monitoring_threadmain, li, ++ PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, ++ PR_UNJOINABLE_THREAD, ++ SLAPD_DEFAULT_THREAD_STACKSIZE)) { ++ PRErrorCode prerr = PR_GetError(); ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_start_locks_monitoring_thread", ++ "Failed to create database locks monitoring thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", ++ prerr, slapd_pr_strerror(prerr)); ++ return_value = -1; ++ } ++ } ++ return return_value; ++} ++ ++ ++/* DB Locks Monitoring thread */ ++static int ++locks_monitoring_threadmain(void *param) ++{ ++ int ret = 0; ++ uint64_t current_locks = 0; ++ uint64_t max_locks = 0; ++ uint32_t lock_exhaustion = 0; ++ PRIntervalTime interval; ++ struct ldbminfo *li = NULL; ++ ++ PR_ASSERT(NULL != param); ++ li = (struct ldbminfo *)param; ++ ++ dblayer_private *priv = li->li_dblayer_private; ++ bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env; ++ PR_ASSERT(NULL != priv); ++ ++ INCR_THREAD_COUNT(pEnv); ++ ++ while (!BDB_CONFIG(li)->bdb_stop_threads) { ++ if (dblayer_db_uses_locking(pEnv->bdb_DB_ENV)) { ++ DB_LOCK_STAT *lockstat = NULL; ++ ret = LOCK_STAT(pEnv->bdb_DB_ENV, &lockstat, 0, (void *)slapi_ch_malloc); ++ if (0 == ret) { ++ current_locks = lockstat->st_nlocks; ++ max_locks = lockstat->st_maxlocks; ++ if (max_locks){ ++ lock_exhaustion = (uint32_t)((double)current_locks / (double)max_locks * 100.0); ++ } else { ++ lock_exhaustion = 0; ++ } ++ if ((li->li_dblock_threshold) && ++ (lock_exhaustion >= li->li_dblock_threshold)) { ++ slapi_atomic_store_32((int32_t *)&(li->li_dblock_threshold_reached), 1, __ATOMIC_RELAXED); ++ } else { ++ slapi_atomic_store_32((int32_t *)&(li->li_dblock_threshold_reached), 0, __ATOMIC_RELAXED); ++ } ++ } ++ slapi_ch_free((void **)&lockstat); ++ } ++ interval = PR_MillisecondsToInterval(slapi_atomic_load_32((int32_t *)&(li->li_dblock_monitoring_pause), __ATOMIC_RELAXED)); ++ DS_Sleep(interval); ++ } ++ ++ DECR_THREAD_COUNT(pEnv); ++ slapi_log_err(SLAPI_LOG_TRACE, "locks_monitoring_threadmain", "Leaving locks_monitoring_threadmain\n"); ++ return 0; ++} ++ ++ + /* + * create a thread for deadlock_threadmain + */ +diff --git a/ldap/servers/slapd/back-ldbm/init.c b/ldap/servers/slapd/back-ldbm/init.c +index 893776699..4165c8fad 100644 +--- a/ldap/servers/slapd/back-ldbm/init.c ++++ b/ldap/servers/slapd/back-ldbm/init.c +@@ -70,6 +70,9 @@ ldbm_back_init(Slapi_PBlock *pb) + /* Initialize the set of instances. */ + li->li_instance_set = objset_new(&ldbm_back_instance_set_destructor); + ++ /* Init lock threshold value */ ++ li->li_dblock_threshold_reached = 0; ++ + /* ask the factory to give us space in the Connection object + * (only bulk import uses this) + */ +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c +index 10cef250f..60884cf33 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c +@@ -87,6 +87,9 @@ static char *ldbm_config_moved_attributes[] = + CONFIG_SERIAL_LOCK, + CONFIG_USE_LEGACY_ERRORCODE, + CONFIG_DB_DEADLOCK_POLICY, ++ CONFIG_DB_LOCKS_MONITORING, ++ CONFIG_DB_LOCKS_THRESHOLD, ++ CONFIG_DB_LOCKS_PAUSE, + ""}; + + /* Used to add an array of entries, like the one above and +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.h b/ldap/servers/slapd/back-ldbm/ldbm_config.h +index 58e64799c..6fa8292eb 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_config.h ++++ b/ldap/servers/slapd/back-ldbm/ldbm_config.h +@@ -104,6 +104,9 @@ struct config_info + #define CONFIG_DB_VERBOSE "nsslapd-db-verbose" + #define CONFIG_DB_DEBUG "nsslapd-db-debug" + #define CONFIG_DB_LOCK "nsslapd-db-locks" ++#define CONFIG_DB_LOCKS_MONITORING "nsslapd-db-locks-monitoring-enabled" ++#define CONFIG_DB_LOCKS_THRESHOLD "nsslapd-db-locks-monitoring-threshold" ++#define CONFIG_DB_LOCKS_PAUSE "nsslapd-db-locks-monitoring-pause" + #define CONFIG_DB_NAMED_REGIONS "nsslapd-db-named-regions" + #define CONFIG_DB_PRIVATE_MEM "nsslapd-db-private-mem" + #define CONFIG_DB_PRIVATE_IMPORT_MEM "nsslapd-db-private-import-mem" +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c +index 1a7b510d4..6e22debde 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c +@@ -1472,6 +1472,7 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension) + slapi_pblock_get(pb, SLAPI_CONNECTION, &conn); + slapi_pblock_get(pb, SLAPI_OPERATION, &op); + ++ + if ((reverse_list = operation_is_flag_set(op, OP_FLAG_REVERSE_CANDIDATE_ORDER))) { + /* + * Start at the end of the list and work our way forward. Since a single +@@ -1538,6 +1539,18 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension) + + /* Find the next candidate entry and return it. */ + while (1) { ++ if (li->li_dblock_monitoring && ++ slapi_atomic_load_32((int32_t *)&(li->li_dblock_threshold_reached), __ATOMIC_RELAXED)) { ++ slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_next_search_entry", ++ "DB locks threshold is reached (nsslapd-db-locks-monitoring-threshold " ++ "under cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config). " ++ "Please, increase nsslapd-db-locks according to your needs.\n"); ++ slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_ENTRY, NULL); ++ delete_search_result_set(pb, &sr); ++ rc = SLAPI_FAIL_GENERAL; ++ slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "DB locks threshold is reached (nsslapd-db-locks-monitoring-threshold)", 0, NULL); ++ goto bail; ++ } + + /* check for abandon */ + if (slapi_op_abandoned(pb) || (NULL == sr)) { +diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c +index 388616b36..db7d01bbc 100644 +--- a/ldap/servers/slapd/libglobs.c ++++ b/ldap/servers/slapd/libglobs.c +@@ -8171,8 +8171,8 @@ config_set(const char *attr, struct berval **values, char *errorbuf, int apply) + #if 0 + debugHashTable(attr); + #endif +- slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Unknown attribute %s will be ignored", attr); +- slapi_log_err(SLAPI_LOG_ERR, "config_set", "Unknown attribute %s will be ignored", attr); ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Unknown attribute %s will be ignored\n", attr); ++ slapi_log_err(SLAPI_LOG_ERR, "config_set", "Unknown attribute %s will be ignored\n", attr); + return LDAP_NO_SUCH_ATTRIBUTE; + } + +diff --git a/src/cockpit/389-console/src/css/ds.css b/src/cockpit/389-console/src/css/ds.css +index 9248116e7..3cf50b593 100644 +--- a/src/cockpit/389-console/src/css/ds.css ++++ b/src/cockpit/389-console/src/css/ds.css +@@ -639,6 +639,10 @@ option { + padding-right: 0 !important; + } + ++.ds-vertical-scroll-auto { ++ overflow-y: auto !important; ++} ++ + .alert { + max-width: 750px; + } +diff --git a/src/cockpit/389-console/src/database.jsx b/src/cockpit/389-console/src/database.jsx +index efa3ce6d5..11cae972c 100644 +--- a/src/cockpit/389-console/src/database.jsx ++++ b/src/cockpit/389-console/src/database.jsx +@@ -157,6 +157,7 @@ export class Database extends React.Component { + const attrs = config.attrs; + let db_cache_auto = false; + let import_cache_auto = false; ++ let dblocksMonitoring = false; + let dbhome = ""; + + if ('nsslapd-db-home-directory' in attrs) { +@@ -168,6 +169,9 @@ export class Database extends React.Component { + if (attrs['nsslapd-import-cache-autosize'] != "0") { + import_cache_auto = true; + } ++ if (attrs['nsslapd-db-locks-monitoring-enabled'][0] == "on") { ++ dblocksMonitoring = true; ++ } + + this.setState(() => ( + { +@@ -187,6 +191,9 @@ export class Database extends React.Component { + txnlogdir: attrs['nsslapd-db-logdirectory'], + dbhomedir: dbhome, + dblocks: attrs['nsslapd-db-locks'], ++ dblocksMonitoring: dblocksMonitoring, ++ dblocksMonitoringThreshold: attrs['nsslapd-db-locks-monitoring-threshold'], ++ dblocksMonitoringPause: attrs['nsslapd-db-locks-monitoring-pause'], + chxpoint: attrs['nsslapd-db-checkpoint-interval'], + compactinterval: attrs['nsslapd-db-compactdb-interval'], + importcacheauto: attrs['nsslapd-import-cache-autosize'], +diff --git a/src/cockpit/389-console/src/index.html b/src/cockpit/389-console/src/index.html +index 1278844fc..fd0eeb669 100644 +--- a/src/cockpit/389-console/src/index.html ++++ b/src/cockpit/389-console/src/index.html +@@ -12,7 +12,7 @@ + + + +- ++ +
+ + +diff --git a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx +index f6e662bca..6a71c138d 100644 +--- a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx ++++ b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx +@@ -31,6 +31,9 @@ export class GlobalDatabaseConfig extends React.Component { + txnlogdir: this.props.data.txnlogdir, + dbhomedir: this.props.data.dbhomedir, + dblocks: this.props.data.dblocks, ++ dblocksMonitoring: this.props.data.dblocksMonitoring, ++ dblocksMonitoringThreshold: this.props.data.dblocksMonitoringThreshold, ++ dblocksMonitoringPause: this.props.data.dblocksMonitoringPause, + chxpoint: this.props.data.chxpoint, + compactinterval: this.props.data.compactinterval, + importcachesize: this.props.data.importcachesize, +@@ -47,6 +50,9 @@ export class GlobalDatabaseConfig extends React.Component { + _txnlogdir: this.props.data.txnlogdir, + _dbhomedir: this.props.data.dbhomedir, + _dblocks: this.props.data.dblocks, ++ _dblocksMonitoring: this.props.data.dblocksMonitoring, ++ _dblocksMonitoringThreshold: this.props.data.dblocksMonitoringThreshold, ++ _dblocksMonitoringPause: this.props.data.dblocksMonitoringPause, + _chxpoint: this.props.data.chxpoint, + _compactinterval: this.props.data.compactinterval, + _importcachesize: this.props.data.importcachesize, +@@ -55,6 +61,7 @@ export class GlobalDatabaseConfig extends React.Component { + _import_cache_auto: this.props.data.import_cache_auto, + }; + this.handleChange = this.handleChange.bind(this); ++ this.select_db_locks_monitoring = this.select_db_locks_monitoring.bind(this); + this.select_auto_cache = this.select_auto_cache.bind(this); + this.select_auto_import_cache = this.select_auto_import_cache.bind(this); + this.save_db_config = this.save_db_config.bind(this); +@@ -76,6 +83,12 @@ export class GlobalDatabaseConfig extends React.Component { + }, this.handleChange(e)); + } + ++ select_db_locks_monitoring (val, e) { ++ this.setState({ ++ dblocksMonitoring: !this.state.dblocksMonitoring ++ }, this.handleChange(val, e)); ++ } ++ + handleChange(e) { + // Generic + const value = e.target.type === 'checkbox' ? e.target.checked : e.target.value; +@@ -150,6 +163,21 @@ export class GlobalDatabaseConfig extends React.Component { + cmd.push("--locks=" + this.state.dblocks); + requireRestart = true; + } ++ if (this.state._dblocksMonitoring != this.state.dblocksMonitoring) { ++ if (this.state.dblocksMonitoring) { ++ cmd.push("--locks-monitoring-enabled=on"); ++ } else { ++ cmd.push("--locks-monitoring-enabled=off"); ++ } ++ requireRestart = true; ++ } ++ if (this.state._dblocksMonitoringThreshold != this.state.dblocksMonitoringThreshold) { ++ cmd.push("--locks-monitoring-threshold=" + this.state.dblocksMonitoringThreshold); ++ requireRestart = true; ++ } ++ if (this.state._dblocksMonitoringPause != this.state.dblocksMonitoringPause) { ++ cmd.push("--locks-monitoring-pause=" + this.state.dblocksMonitoringPause); ++ } + if (this.state._chxpoint != this.state.chxpoint) { + cmd.push("--checkpoint-interval=" + this.state.chxpoint); + requireRestart = true; +@@ -216,6 +244,28 @@ export class GlobalDatabaseConfig extends React.Component { + let import_cache_form; + let db_auto_checked = false; + let import_auto_checked = false; ++ let dblocksMonitor = ""; ++ ++ if (this.state.dblocksMonitoring) { ++ dblocksMonitor =
++ ++ ++ DB Locks Threshold Percentage ++ ++ ++ ++ ++ ++ ++ ++ DB Locks Pause Milliseconds ++ ++ ++ ++ ++ ++
; ++ } + + if (this.state.db_cache_auto) { + db_cache_form =
+@@ -422,14 +472,6 @@ export class GlobalDatabaseConfig extends React.Component { + + + +- +- +- Database Locks +- +- +- +- +- + + + Database Checkpoint Interval +@@ -446,6 +488,36 @@ export class GlobalDatabaseConfig extends React.Component { + + + ++ ++ ++ Database Locks ++ ++ ++ ++ ++ ++ ++ ++
DB Locks Monitoring
++
++ ++
++ ++ ++ ++ Enable Monitoring ++ ++ ++ ++ ++ ++ {dblocksMonitor} ++ ++ + +
+ +diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py +index bcd7b383f..13bb27842 100644 +--- a/src/lib389/lib389/backend.py ++++ b/src/lib389/lib389/backend.py +@@ -1011,6 +1011,9 @@ class DatabaseConfig(DSLdapObject): + 'nsslapd-db-transaction-batch-max-wait', + 'nsslapd-db-logbuf-size', + 'nsslapd-db-locks', ++ 'nsslapd-db-locks-monitoring-enabled', ++ 'nsslapd-db-locks-monitoring-threshold', ++ 'nsslapd-db-locks-monitoring-pause', + 'nsslapd-db-private-import-mem', + 'nsslapd-import-cache-autosize', + 'nsslapd-cache-autosize', +diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py +index 6bfbcb036..722764d10 100644 +--- a/src/lib389/lib389/cli_conf/backend.py ++++ b/src/lib389/lib389/cli_conf/backend.py +@@ -46,6 +46,9 @@ arg_to_attr = { + 'txn_batch_max': 'nsslapd-db-transaction-batch-max-wait', + 'logbufsize': 'nsslapd-db-logbuf-size', + 'locks': 'nsslapd-db-locks', ++ 'locks_monitoring_enabled': 'nsslapd-db-locks-monitoring-enabled', ++ 'locks_monitoring_threshold': 'nsslapd-db-locks-monitoring-threshold', ++ 'locks_monitoring_pause': 'nsslapd-db-locks-monitoring-pause', + 'import_cache_autosize': 'nsslapd-import-cache-autosize', + 'cache_autosize': 'nsslapd-cache-autosize', + 'cache_autosize_split': 'nsslapd-cache-autosize-split', +@@ -998,6 +1001,13 @@ def create_parser(subparsers): + 'the batch count (only works when txn-batch-val is set)') + set_db_config_parser.add_argument('--logbufsize', help='Specifies the transaction log information buffer size') + set_db_config_parser.add_argument('--locks', help='Sets the maximum number of database locks') ++ set_db_config_parser.add_argument('--locks-monitoring-enabled', help='Set to "on" or "off" to monitor DB locks. When it crosses the percentage value ' ++ 'set with "--locks-monitoring-threshold" ("on" by default)') ++ set_db_config_parser.add_argument('--locks-monitoring-threshold', help='Sets the DB lock exhaustion value in percentage (valid range is 70-95). If too many locks are ' ++ 'acquired, the server will abort the searches while the number of locks ' ++ 'are not decreased. It helps to avoid DB corruption and long recovery.') ++ set_db_config_parser.add_argument('--locks-monitoring-pause', help='Sets the DB lock monitoring value in milliseconds for the amount of time ' ++ 'that the monitoring thread spends waiting between checks.') + set_db_config_parser.add_argument('--import-cache-autosize', help='Set to "on" or "off" to automatically set the size of the import ' + 'cache to be used during the the import process of LDIF files') + set_db_config_parser.add_argument('--cache-autosize', help='Sets the percentage of free memory that is used in total for the database ' +-- +2.26.3 + diff --git a/SOURCES/0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch b/SOURCES/0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch deleted file mode 100644 index 17de2c9..0000000 --- a/SOURCES/0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch +++ /dev/null @@ -1,127 +0,0 @@ -From 2a2773d4bf8553ba64b396d567fe05506b22c94c Mon Sep 17 00:00:00 2001 -From: progier389 <72748589+progier389@users.noreply.github.com> -Date: Tue, 24 Nov 2020 19:22:49 +0100 -Subject: [PATCH] Issue 4449 - dsconf replication monitor fails to retrieve - database RUV - consumer (Unavailable) (#4451) - -Bug Description: - -"dsconf replication monitor" fails to retrieve database RUV entry from consumer and this -appears into the Cockpit web UI too. -The problem is that the bind credentials are not rightly propagated when trying to get -the consumers agreement status. Then supplier credntials are used instead and RUV -is searched anonymously because there is no bind dn in ldapi case. - -Fix Description: - -- Propagates the bind credentials when computing agreement status -- Add a credential cache because now a replica password could get asked several times: - when discovering the topology and - when getting the agreement maxcsn -- No testcase in 1.4.3 branch as the file modfied in master does not exists - -- Add a comment about nonlocal keyword - -Relates: #4449 - -Reviewers: - firstyear - droideck - mreynolds - -Issue 4449: Add a comment about nonlocal keyword - -(cherry picked from commit 73ee04fa12cd1de3a5e47c109e79e31c1aaaa2ab) ---- - src/lib389/lib389/cli_conf/replication.py | 13 +++++++++++-- - src/lib389/lib389/replica.py | 16 ++++++++++++---- - 2 files changed, 23 insertions(+), 6 deletions(-) - -diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py -index 9dbaa320a..248972cba 100644 ---- a/src/lib389/lib389/cli_conf/replication.py -+++ b/src/lib389/lib389/cli_conf/replication.py -@@ -369,9 +369,16 @@ def set_repl_config(inst, basedn, log, args): - - def get_repl_monitor_info(inst, basedn, log, args): - connection_data = dsrc_to_repl_monitor(DSRC_HOME, log) -+ credentials_cache = {} - - # Additional details for the connections to the topology - def get_credentials(host, port): -+ # credentials_cache is nonlocal to refer to the instance -+ # from enclosing function (get_repl_monitor_info)` -+ nonlocal credentials_cache -+ key = f'{host}:{port}' -+ if key in credentials_cache: -+ return credentials_cache[key] - found = False - if args.connections: - connections = args.connections -@@ -406,8 +413,10 @@ def get_repl_monitor_info(inst, basedn, log, args): - binddn = input(f'\nEnter a bind DN for {host}:{port}: ').rstrip() - bindpw = getpass(f"Enter a password for {binddn} on {host}:{port}: ").rstrip() - -- return {"binddn": binddn, -- "bindpw": bindpw} -+ credentials = {"binddn": binddn, -+ "bindpw": bindpw} -+ credentials_cache[key] = credentials -+ return credentials - - repl_monitor = ReplicationMonitor(inst) - report_dict = repl_monitor.generate_report(get_credentials, args.json) -diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py -index c2ad2104d..3d89e61fb 100644 ---- a/src/lib389/lib389/replica.py -+++ b/src/lib389/lib389/replica.py -@@ -2487,9 +2487,10 @@ class ReplicationMonitor(object): - else: - self._log = logging.getLogger(__name__) - -- def _get_replica_status(self, instance, report_data, use_json): -+ def _get_replica_status(self, instance, report_data, use_json, get_credentials=None): - """Load all of the status data to report - and add new hostname:port pairs for future processing -+ :type get_credentials: function - """ - - replicas_status = [] -@@ -2503,6 +2504,13 @@ class ReplicationMonitor(object): - for agmt in agmts.list(): - host = agmt.get_attr_val_utf8_l("nsds5replicahost") - port = agmt.get_attr_val_utf8_l("nsds5replicaport") -+ if get_credentials is not None: -+ credentials = get_credentials(host, port) -+ binddn = credentials["binddn"] -+ bindpw = credentials["bindpw"] -+ else: -+ binddn = instance.binddn -+ bindpw = instance.bindpw - protocol = agmt.get_attr_val_utf8_l('nsds5replicatransportinfo') - # Supply protocol here because we need it only for connection - # and agreement status is already preformatted for the user output -@@ -2510,9 +2518,9 @@ class ReplicationMonitor(object): - if consumer not in report_data: - report_data[f"{consumer}:{protocol}"] = None - if use_json: -- agmts_status.append(json.loads(agmt.status(use_json=True))) -+ agmts_status.append(json.loads(agmt.status(use_json=True, binddn=binddn, bindpw=bindpw))) - else: -- agmts_status.append(agmt.status()) -+ agmts_status.append(agmt.status(binddn=binddn, bindpw=bindpw)) - replicas_status.append({"replica_id": replica_id, - "replica_root": replica_root, - "replica_status": "Available", -@@ -2535,7 +2543,7 @@ class ReplicationMonitor(object): - initial_inst_key = f"{self._instance.config.get_attr_val_utf8_l('nsslapd-localhost')}:{self._instance.config.get_attr_val_utf8_l('nsslapd-port')}" - # Do this on an initial instance to get the agreements to other instances - try: -- report_data[initial_inst_key] = self._get_replica_status(self._instance, report_data, use_json) -+ report_data[initial_inst_key] = self._get_replica_status(self._instance, report_data, use_json, get_credentials) - except ldap.LDAPError as e: - self._log.debug(f"Connection to consumer ({supplier_hostname}:{supplier_port}) failed, error: {e}") - report_data[initial_inst_key] = [{"replica_status": f"Unavailable - {e.args[0]['desc']}"}] --- -2.26.2 - diff --git a/SOURCES/0010-Issue-4764-replicated-operation-sometime-checks-ACI-.patch b/SOURCES/0010-Issue-4764-replicated-operation-sometime-checks-ACI-.patch new file mode 100644 index 0000000..489f4b3 --- /dev/null +++ b/SOURCES/0010-Issue-4764-replicated-operation-sometime-checks-ACI-.patch @@ -0,0 +1,33 @@ +From 7573c62a2e61293a4800e67919d79341fa1a1532 Mon Sep 17 00:00:00 2001 +From: progier389 +Date: Wed, 26 May 2021 16:07:43 +0200 +Subject: [PATCH 10/12] Issue 4764 - replicated operation sometime checks ACI + (#4783) + +(cherry picked from commit 0cfdea7abcacfca6686a6cf84dbf7ae1167f3022) +--- + ldap/servers/slapd/connection.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c +index c7a15e775..e0c1a52d2 100644 +--- a/ldap/servers/slapd/connection.c ++++ b/ldap/servers/slapd/connection.c +@@ -1771,6 +1771,14 @@ connection_threadmain() + } + } + ++ /* ++ * Fix bz 1931820 issue (the check to set OP_FLAG_REPLICATED may be done ++ * before replication session is properly set). ++ */ ++ if (replication_connection) { ++ operation_set_flag(op, OP_FLAG_REPLICATED); ++ } ++ + /* + * Call the do_ function to process this request. + */ +-- +2.26.3 + diff --git a/SOURCES/0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch b/SOURCES/0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch deleted file mode 100644 index 70974ce..0000000 --- a/SOURCES/0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch +++ /dev/null @@ -1,63 +0,0 @@ -From e540effa692976c2eef766f1f735702ba5dc0950 Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Mon, 30 Nov 2020 09:03:33 +0100 -Subject: [PATCH] Issue 4243 - Fix test: SyncRepl plugin provides a wrong - cookie (#4467) - -Bug description: - This test case was incorrect. - During a refreshPersistent search, a cookie is sent - with the intermediate message that indicates the end of the refresh phase. - Then a second cookie is sent on the updated entry (group10) - I believed this test was successful some time ago but neither python-ldap - nor sync_repl changed (intermediate sent in post refresh). - So the testcase was never successful :( - -Fix description: - The fix is just to take into account the two expected cookies - -relates: https://github.com/389ds/389-ds-base/issues/4243 - -Reviewed by: Mark Reynolds - -Platforms tested: F31 ---- - .../tests/suites/syncrepl_plugin/basic_test.py | 12 +++++++----- - 1 file changed, 7 insertions(+), 5 deletions(-) - -diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py -index 79ec374bc..7b35537d5 100644 ---- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py -+++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py -@@ -589,7 +589,7 @@ def test_sync_repl_cookie_with_failure(topology, request): - sync_repl.start() - time.sleep(5) - -- # Add a test group just to check that sync_repl receives only one update -+ # Add a test group just to check that sync_repl receives that SyncControlInfo cookie - group.append(groups.create(properties={'cn': 'group%d' % 10})) - - # create users, that automember/memberof will generate nested updates -@@ -610,13 +610,15 @@ def test_sync_repl_cookie_with_failure(topology, request): - time.sleep(10) - cookies = sync_repl.get_result() - -- # checking that the cookie list contains only one entry -- assert len(cookies) == 1 -- prev = 0 -+ # checking that the cookie list contains only two entries -+ # the one from the SyncInfo/RefreshDelete that indicates the end of the refresh -+ # the the one from SyncStateControl related to the only updated entry (group10) -+ assert len(cookies) == 2 -+ prev = -1 - for cookie in cookies: - log.info('Check cookie %s' % cookie) - -- assert int(cookie) > 0 -+ assert int(cookie) >= 0 - assert int(cookie) < 1000 - assert int(cookie) > prev - prev = int(cookie) --- -2.26.2 - diff --git a/SOURCES/0011-Issue-4778-RFE-Allow-setting-TOD-for-db-compaction-a.patch b/SOURCES/0011-Issue-4778-RFE-Allow-setting-TOD-for-db-compaction-a.patch new file mode 100644 index 0000000..2121550 --- /dev/null +++ b/SOURCES/0011-Issue-4778-RFE-Allow-setting-TOD-for-db-compaction-a.patch @@ -0,0 +1,1453 @@ +From c79630de8012a893ed3d1c46b41bc7871a07a3e2 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 26 May 2021 13:32:13 -0400 +Subject: [PATCH 11/12] Issue 4778 - RFE - Allow setting TOD for db compaction + and add task + +Description: Since database compaction can be costly it should be allowed + to set a time to execute it during offpeak hours. Once the + compaction interval has been met, it will wait for the configured + time of day to do the compaction. The default is just before + midnight: 23:59 + + A task was also created that can run compaction on demand, + and can also just target the replication changelog. This could + be used in conjunction with a cronjob for more complex + execution patterns. + +ASAN tested and approved. + +relates: https://github.com/389ds/389-ds-base/issues/4778 + +Reviewed by: spichugi(Thanks!) +--- + .../tests/suites/config/compact_test.py | 81 ++++++ + ldap/schema/01core389.ldif | 3 +- + ldap/servers/plugins/replication/cl5.h | 1 + + ldap/servers/plugins/replication/cl5_api.c | 70 ++++- + ldap/servers/plugins/replication/cl5_api.h | 2 +- + .../servers/plugins/replication/cl5_clcache.c | 3 - + ldap/servers/plugins/replication/cl5_config.c | 102 ++++++- + ldap/servers/plugins/replication/cl5_init.c | 2 +- + .../servers/plugins/replication/repl_shared.h | 2 + + ldap/servers/plugins/retrocl/retrocl.c | 1 - + .../slapd/back-ldbm/db-bdb/bdb_config.c | 79 ++++++ + .../slapd/back-ldbm/db-bdb/bdb_layer.c | 258 ++++++++++++------ + .../slapd/back-ldbm/db-bdb/bdb_layer.h | 4 +- + ldap/servers/slapd/back-ldbm/init.c | 2 + + ldap/servers/slapd/back-ldbm/ldbm_config.h | 1 + + .../servers/slapd/back-ldbm/proto-back-ldbm.h | 1 + + ldap/servers/slapd/filtercmp.c | 5 +- + ldap/servers/slapd/pblock.c | 17 +- + ldap/servers/slapd/slap.h | 2 + + ldap/servers/slapd/slapi-private.h | 1 + + ldap/servers/slapd/task.c | 102 ++++++- + src/cockpit/389-console/src/database.jsx | 1 + + .../src/lib/database/databaseConfig.jsx | 16 +- + src/lib389/lib389/_constants.py | 1 + + src/lib389/lib389/backend.py | 1 + + src/lib389/lib389/cli_conf/backend.py | 24 +- + src/lib389/lib389/cli_conf/replication.py | 3 + + src/lib389/lib389/tasks.py | 14 +- + 28 files changed, 689 insertions(+), 110 deletions(-) + create mode 100644 dirsrvtests/tests/suites/config/compact_test.py + +diff --git a/dirsrvtests/tests/suites/config/compact_test.py b/dirsrvtests/tests/suites/config/compact_test.py +new file mode 100644 +index 000000000..1f1c097e4 +--- /dev/null ++++ b/dirsrvtests/tests/suites/config/compact_test.py +@@ -0,0 +1,81 @@ ++import logging ++import pytest ++import os ++import time ++from lib389.tasks import DBCompactTask ++from lib389.backend import DatabaseConfig ++from lib389.replica import Changelog5 ++from lib389.topologies import topology_m1 as topo ++ ++log = logging.getLogger(__name__) ++ ++ ++def test_compact_db_task(topo): ++ """Specify a test case purpose or name here ++ ++ :id: 1b3222ef-a336-4259-be21-6a52f76e1859 ++ :setup: Standalone Instance ++ :steps: ++ 1. Create task ++ 2. Check task was successful ++ 3. Check errors log to show task was run ++ 3. Create task just for replication ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ inst = topo.ms["supplier1"] ++ ++ task = DBCompactTask(inst) ++ task.create() ++ task.wait() ++ assert task.get_exit_code() == 0 ++ ++ # Check errors log to make sure task actually compacted db ++ assert inst.searchErrorsLog("Compacting databases") ++ inst.deleteErrorLogs(restart=False) ++ ++ ++def test_compaction_interval_and_time(topo): ++ """Specify a test case purpose or name here ++ ++ :id: f361bee9-d7e7-4569-9255-d7b60dd9d92e ++ :setup: Supplier Instance ++ :steps: ++ 1. Configure compact interval and time for database and changelog ++ 2. Check compaction occurs as expected ++ :expectedresults: ++ 1. Success ++ 2. Success ++ """ ++ ++ inst = topo.ms["supplier1"] ++ ++ # Configure DB compaction ++ config = DatabaseConfig(inst) ++ config.set([('nsslapd-db-compactdb-interval', '2'), ('nsslapd-db-compactdb-time', '00:01')]) ++ ++ # Configure changelog compaction ++ cl5 = Changelog5(inst) ++ cl5.replace_many( ++ ('nsslapd-changelogcompactdb-interval', '2'), ++ ('nsslapd-changelogcompactdb-time', '00:01'), ++ ('nsslapd-changelogtrim-interval', '2') ++ ) ++ inst.deleteErrorLogs() ++ ++ # Check is compaction occurred ++ time.sleep(6) ++ assert inst.searchErrorsLog("Compacting databases") ++ assert inst.searchErrorsLog("compacting replication changelogs") ++ inst.deleteErrorLogs(restart=False) ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main(["-s", CURRENT_FILE]) ++ +diff --git a/ldap/schema/01core389.ldif b/ldap/schema/01core389.ldif +index 9e9a26c21..0c73e5114 100644 +--- a/ldap/schema/01core389.ldif ++++ b/ldap/schema/01core389.ldif +@@ -285,6 +285,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2310 NAME 'nsds5ReplicaFlowControlWindow + attributeTypes: ( 2.16.840.1.113730.3.1.2311 NAME 'nsds5ReplicaFlowControlPause' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) + attributeTypes: ( 2.16.840.1.113730.3.1.2313 NAME 'nsslapd-changelogtrim-interval' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) + attributeTypes: ( 2.16.840.1.113730.3.1.2314 NAME 'nsslapd-changelogcompactdb-interval' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) ++attributeTypes: ( 2.16.840.1.113730.3.1.2385 NAME 'nsslapd-changelogcompactdb-time' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) + attributeTypes: ( 2.16.840.1.113730.3.1.2315 NAME 'nsDS5ReplicaWaitForAsyncResults' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) + attributeTypes: ( 2.16.840.1.113730.3.1.2316 NAME 'nsslapd-auditfaillog-maxlogsize' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) + attributeTypes: ( 2.16.840.1.113730.3.1.2317 NAME 'nsslapd-auditfaillog-logrotationsync-enabled' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) +@@ -345,5 +346,5 @@ objectClasses: ( nsEncryptionConfig-oid NAME 'nsEncryptionConfig' DESC 'Netscape + objectClasses: ( nsEncryptionModule-oid NAME 'nsEncryptionModule' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsSSLToken $ nsSSLPersonalityssl $ nsSSLActivation $ ServerKeyExtractFile $ ServerCertExtractFile ) X-ORIGIN 'Netscape' ) + objectClasses: ( 2.16.840.1.113730.3.2.327 NAME 'rootDNPluginConfig' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( rootdn-open-time $ rootdn-close-time $ rootdn-days-allowed $ rootdn-allow-host $ rootdn-deny-host $ rootdn-allow-ip $ rootdn-deny-ip ) X-ORIGIN 'Netscape' ) + objectClasses: ( 2.16.840.1.113730.3.2.328 NAME 'nsSchemaPolicy' DESC 'Netscape defined objectclass' SUP top MAY ( cn $ schemaUpdateObjectclassAccept $ schemaUpdateObjectclassReject $ schemaUpdateAttributeAccept $ schemaUpdateAttributeReject) X-ORIGIN 'Netscape Directory Server' ) +-objectClasses: ( 2.16.840.1.113730.3.2.332 NAME 'nsChangelogConfig' DESC 'Configuration of the changelog5 object' SUP top MUST ( cn $ nsslapd-changelogdir ) MAY ( nsslapd-changelogmaxage $ nsslapd-changelogtrim-interval $ nsslapd-changelogmaxentries $ nsslapd-changelogsuffix $ nsslapd-changelogcompactdb-interval $ nsslapd-encryptionalgorithm $ nsSymmetricKey ) X-ORIGIN '389 Directory Server' ) ++objectClasses: ( 2.16.840.1.113730.3.2.332 NAME 'nsChangelogConfig' DESC 'Configuration of the changelog5 object' SUP top MUST ( cn $ nsslapd-changelogdir ) MAY ( nsslapd-changelogmaxage $ nsslapd-changelogtrim-interval $ nsslapd-changelogmaxentries $ nsslapd-changelogsuffix $ nsslapd-changelogcompactdb-interval $ nsslapd-changelogcompactdb-time $ nsslapd-encryptionalgorithm $ nsSymmetricKey ) X-ORIGIN '389 Directory Server' ) + objectClasses: ( 2.16.840.1.113730.3.2.337 NAME 'rewriterEntry' DESC '' SUP top MUST ( nsslapd-libPath ) MAY ( cn $ nsslapd-filterrewriter $ nsslapd-returnedAttrRewriter ) X-ORIGIN '389 Directory Server' ) +diff --git a/ldap/servers/plugins/replication/cl5.h b/ldap/servers/plugins/replication/cl5.h +index 2af57e369..99ea1c6a2 100644 +--- a/ldap/servers/plugins/replication/cl5.h ++++ b/ldap/servers/plugins/replication/cl5.h +@@ -29,6 +29,7 @@ typedef struct changelog5Config + char *symmetricKey; + long compactInterval; + long trimInterval; ++ char *compactTime; + } changelog5Config; + + /* initializes changelog*/ +diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c +index 403a6a666..75a2f46f5 100644 +--- a/ldap/servers/plugins/replication/cl5_api.c ++++ b/ldap/servers/plugins/replication/cl5_api.c +@@ -158,6 +158,7 @@ typedef struct cl5trim + time_t maxAge; /* maximum entry age in seconds */ + int maxEntries; /* maximum number of entries across all changelog files */ + int compactInterval; /* interval to compact changelog db */ ++ char *compactTime; /* time to compact changelog db */ + int trimInterval; /* trimming interval */ + PRLock *lock; /* controls access to trimming configuration */ + } CL5Trim; +@@ -184,6 +185,7 @@ typedef struct cl5desc + PRLock *clLock; /* Lock associated to clVar, used to notify threads on close */ + PRCondVar *clCvar; /* Condition Variable used to notify threads on close */ + void *clcrypt_handle; /* for cl encryption */ ++ char *compact_time; /* Time to execute changelog compaction */ + } CL5Desc; + + typedef void (*VFP)(void *); +@@ -1025,7 +1027,7 @@ cl5GetState() + CL5_BAD_STATE if changelog is not open + */ + int +-cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, int trimInterval) ++cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char *compactTime, int trimInterval) + { + if (s_cl5Desc.dbState == CL5_STATE_NONE) { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl, +@@ -1061,6 +1063,10 @@ cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, int t + s_cl5Desc.dbTrim.compactInterval = compactInterval; + } + ++ if (strcmp(compactTime, CL5_STR_IGNORE) != 0) { ++ s_cl5Desc.dbTrim.compactTime = slapi_ch_strdup(compactTime); ++ } ++ + if (trimInterval != CL5_NUM_IGNORE) { + s_cl5Desc.dbTrim.trimInterval = trimInterval; + } +@@ -3077,16 +3083,48 @@ _cl5TrimCleanup(void) + { + if (s_cl5Desc.dbTrim.lock) + PR_DestroyLock(s_cl5Desc.dbTrim.lock); ++ slapi_ch_free_string(&s_cl5Desc.dbTrim.compactTime); + + memset(&s_cl5Desc.dbTrim, 0, sizeof(s_cl5Desc.dbTrim)); + } + ++static time_t ++_cl5_get_tod_expiration(char *expire_time) ++{ ++ time_t start_time, todays_elapsed_time, now = time(NULL); ++ struct tm *tm_struct = localtime(&now); ++ char hour_str[3] = {0}; ++ char min_str[3] = {0}; ++ char *s = expire_time; ++ char *endp = NULL; ++ int32_t hour, min, expiring_time; ++ ++ /* Get today's start time */ ++ todays_elapsed_time = (tm_struct->tm_hour * 3600) + (tm_struct->tm_min * 60) + (tm_struct->tm_sec); ++ start_time = slapi_current_utc_time() - todays_elapsed_time; ++ ++ /* Get the hour and minute and calculate the expiring time. The time was ++ * already validated in bdb_config.c: HH:MM */ ++ hour_str[0] = *s++; ++ hour_str[1] = *s++; ++ s++; /* skip colon */ ++ min_str[0] = *s++; ++ min_str[1] = *s++; ++ hour = strtoll(hour_str, &endp, 10); ++ min = strtoll(min_str, &endp, 10); ++ expiring_time = (hour * 60 * 60) + (min * 60); ++ ++ return start_time + expiring_time; ++} ++ + static int + _cl5TrimMain(void *param __attribute__((unused))) + { + time_t timePrev = slapi_current_utc_time(); + time_t timeCompactPrev = slapi_current_utc_time(); + time_t timeNow; ++ PRBool compacting = PR_FALSE; ++ int32_t compactdb_time = 0; + + PR_AtomicIncrement(&s_cl5Desc.threadCount); + +@@ -3097,11 +3135,26 @@ _cl5TrimMain(void *param __attribute__((unused))) + timePrev = timeNow; + _cl5DoTrimming(); + } ++ ++ if (!compacting) { ++ /* Once we know we want to compact we need to stop refreshing the ++ * TOD expiration. Otherwise if the compact time is close to ++ * midnight we could roll over past midnight during the checkpoint ++ * sleep interval, and we'd never actually compact the databases. ++ * We also need to get this value before the sleep. ++ */ ++ compactdb_time = _cl5_get_tod_expiration(s_cl5Desc.dbTrim.compactTime); ++ } + if ((s_cl5Desc.dbTrim.compactInterval > 0) && +- (timeNow - timeCompactPrev >= s_cl5Desc.dbTrim.compactInterval)) { +- /* time to trim */ +- timeCompactPrev = timeNow; +- _cl5CompactDBs(); ++ (timeNow - timeCompactPrev >= s_cl5Desc.dbTrim.compactInterval)) ++ { ++ compacting = PR_TRUE; ++ if (slapi_current_utc_time() > compactdb_time) { ++ /* time to trim */ ++ timeCompactPrev = timeNow; ++ _cl5CompactDBs(); ++ compacting = PR_FALSE; ++ } + } + if (NULL == s_cl5Desc.clLock) { + /* most likely, emergency */ +@@ -3215,6 +3268,10 @@ _cl5CompactDBs(void) + rc, db_strerror(rc)); + goto bail; + } ++ ++ ++ slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl, ++ "_cl5CompactDBs - compacting replication changelogs...\n"); + for (fileObj = objset_first_obj(s_cl5Desc.dbFiles); + fileObj; + fileObj = objset_next_obj(s_cl5Desc.dbFiles, fileObj)) { +@@ -3235,6 +3292,9 @@ _cl5CompactDBs(void) + "_cl5CompactDBs - %s - %d pages freed\n", + dbFile->replName, c_data.compact_pages_free); + } ++ ++ slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl, ++ "_cl5CompactDBs - compacting replication changelogs finished.\n"); + bail: + if (fileObj) { + object_release(fileObj); +diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h +index 302af97a0..4b0949fb3 100644 +--- a/ldap/servers/plugins/replication/cl5_api.h ++++ b/ldap/servers/plugins/replication/cl5_api.h +@@ -236,7 +236,7 @@ int cl5GetState(void); + Return: CL5_SUCCESS if successful; + CL5_BAD_STATE if changelog has not been open + */ +-int cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, int trimInterval); ++int cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char *compactTime, int trimInterval); + + void cl5DestroyIterator(void *iterator); + +diff --git a/ldap/servers/plugins/replication/cl5_clcache.c b/ldap/servers/plugins/replication/cl5_clcache.c +index 90dec4d54..e5a39c9c1 100644 +--- a/ldap/servers/plugins/replication/cl5_clcache.c ++++ b/ldap/servers/plugins/replication/cl5_clcache.c +@@ -452,9 +452,6 @@ static int + clcache_cursor_set(DBC *cursor, CLC_Buffer *buf) + { + int rc; +- uint32_t ulen; +- uint32_t dlen; +- uint32_t size; + + rc = cursor->c_get(cursor, &buf->buf_key, &buf->buf_data, DB_SET); + if (rc == DB_BUFFER_SMALL) { +diff --git a/ldap/servers/plugins/replication/cl5_config.c b/ldap/servers/plugins/replication/cl5_config.c +index e0530bed2..b32686788 100644 +--- a/ldap/servers/plugins/replication/cl5_config.c ++++ b/ldap/servers/plugins/replication/cl5_config.c +@@ -131,6 +131,7 @@ changelog5_config_done(changelog5Config *config) + /* slapi_ch_free_string accepts NULL pointer */ + slapi_ch_free_string(&config->maxAge); + slapi_ch_free_string(&config->dir); ++ slapi_ch_free_string(&config->compactTime); + slapi_ch_free_string(&config->symmetricKey); + slapi_ch_free_string(&config->dbconfig.encryptionAlgorithm); + slapi_ch_free_string(&config->dbconfig.symmetricKey); +@@ -211,7 +212,7 @@ changelog5_config_add(Slapi_PBlock *pb __attribute__((unused)), + } + + /* set trimming parameters */ +- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.trimInterval); ++ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval); + if (rc != CL5_SUCCESS) { + *returncode = 1; + if (returntext) { +@@ -302,6 +303,7 @@ changelog5_config_modify(Slapi_PBlock *pb, + config.compactInterval = CL5_NUM_IGNORE; + slapi_ch_free_string(&config.maxAge); + config.maxAge = slapi_ch_strdup(CL5_STR_IGNORE); ++ config.compactTime = slapi_ch_strdup(CHANGELOGDB_COMPACT_TIME); + config.trimInterval = CL5_NUM_IGNORE; + + slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods); +@@ -375,6 +377,55 @@ changelog5_config_modify(Slapi_PBlock *pb, + *returncode = LDAP_UNWILLING_TO_PERFORM; + goto done; + } ++ } else if (strcasecmp(config_attr, CONFIG_CHANGELOG_COMPACTTIME_ATTRIBUTE) == 0) { ++ if (config_attr_value && config_attr_value[0] != '\0') { ++ char *val = slapi_ch_strdup(config_attr_value); ++ char *endp = NULL; ++ char *hour_str = NULL; ++ char *min_str = NULL; ++ int32_t hour, min; ++ errno = 0; ++ ++ slapi_ch_free_string(&config.compactTime); ++ ++ if (strstr(val, ":")) { ++ /* Get the hour and minute */ ++ hour_str = ldap_utf8strtok_r(val, ":", &min_str); ++ /* Validate hour */ ++ hour = strtoll(hour_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || hour < 0 || hour > 23 || strlen(hour_str) != 2) { ++ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid hour set (%s), must be a two digit number between 00 and 23", ++ hour_str); ++ slapi_log_err(SLAPI_LOG_ERR, "changelog5_extract_config", ++ "Invalid minute set (%s), must be a two digit number between 00 and 59. " ++ "Using default of 23:59\n", hour_str); ++ *returncode = LDAP_UNWILLING_TO_PERFORM; ++ goto done; ++ } ++ /* Validate minute */ ++ min = strtoll(min_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || min < 0 || min > 59 || strlen(min_str) != 2) { ++ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid minute set (%s), must be a two digit number between 00 and 59", ++ hour_str); ++ slapi_log_err(SLAPI_LOG_ERR, "changelog5_extract_config", ++ "Invalid minute set (%s), must be a two digit number between 00 and 59. " ++ "Using default of 23:59\n", min_str); ++ *returncode = LDAP_UNWILLING_TO_PERFORM; ++ goto done; ++ } ++ } else { ++ /* Wrong format */ ++ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid setting (%s), must have a time format of HH:MM", val); ++ slapi_log_err(SLAPI_LOG_ERR, "changelog5_extract_config", ++ "Invalid setting (%s), must have a time format of HH:MM\n", val); ++ *returncode = LDAP_UNWILLING_TO_PERFORM; ++ goto done; ++ } ++ config.compactTime = slapi_ch_strdup(config_attr_value); ++ } + } else if (strcasecmp(config_attr, CONFIG_CHANGELOG_TRIM_ATTRIBUTE) == 0) { + if (slapi_is_duration_valid(config_attr_value)) { + config.trimInterval = (long)slapi_parse_duration(config_attr_value); +@@ -419,6 +470,11 @@ changelog5_config_modify(Slapi_PBlock *pb, + if (originalConfig->maxAge) + config.maxAge = slapi_ch_strdup(originalConfig->maxAge); + } ++ if (strcmp(config.compactTime, CL5_STR_IGNORE) == 0) { ++ slapi_ch_free_string(&config.compactTime); ++ if (originalConfig->compactTime) ++ config.compactTime = slapi_ch_strdup(originalConfig->compactTime); ++ } + + /* attempt to change chagelog dir */ + if (config.dir) { +@@ -519,7 +575,7 @@ changelog5_config_modify(Slapi_PBlock *pb, + if (config.maxEntries != CL5_NUM_IGNORE || + config.trimInterval != CL5_NUM_IGNORE || + strcmp(config.maxAge, CL5_STR_IGNORE) != 0) { +- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.trimInterval); ++ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval); + if (rc != CL5_SUCCESS) { + *returncode = 1; + if (returntext) { +@@ -689,6 +745,7 @@ changelog5_extract_config(Slapi_Entry *entry, changelog5Config *config) + { + const char *arg; + char *max_age = NULL; ++ char *val = NULL; + + memset(config, 0, sizeof(*config)); + config->dir = slapi_entry_attr_get_charptr(entry, CONFIG_CHANGELOG_DIR_ATTRIBUTE); +@@ -711,6 +768,47 @@ changelog5_extract_config(Slapi_Entry *entry, changelog5Config *config) + config->compactInterval = CHANGELOGDB_COMPACT_INTERVAL; + } + ++ arg = slapi_entry_attr_get_ref(entry, CONFIG_CHANGELOG_COMPACTTIME_ATTRIBUTE); ++ if (arg) { ++ char *endp = NULL; ++ char *hour_str = NULL; ++ char *min_str = NULL; ++ int32_t hour, min; ++ errno = 0; ++ ++ val = slapi_ch_strdup((char *)arg); ++ if (strstr(val, ":")) { ++ /* Get the hour and minute */ ++ hour_str = ldap_utf8strtok_r(val, ":", &min_str); ++ /* Validate hour */ ++ hour = strtoll(hour_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || hour < 0 || hour > 23 || strlen(hour_str) != 2) { ++ slapi_log_err(SLAPI_LOG_ERR, "changelog5_extract_config", ++ "Invalid minute set (%s), must be a two digit number between 00 and 59. " ++ "Using default of 23:59\n", hour_str); ++ goto set_default; ++ } ++ /* Validate minute */ ++ min = strtoll(min_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || min < 0 || min > 59 || strlen(min_str) != 2) { ++ slapi_log_err(SLAPI_LOG_ERR, "changelog5_extract_config", ++ "Invalid minute set (%s), must be a two digit number between 00 and 59. " ++ "Using default of 23:59\n", min_str); ++ goto set_default; ++ } ++ } else { ++ /* Wrong format */ ++ slapi_log_err(SLAPI_LOG_ERR, "changelog5_extract_config", ++ "Invalid setting (%s), must have a time format of HH:MM\n", val); ++ goto set_default; ++ } ++ config->compactTime = slapi_ch_strdup(arg); ++ } else { ++ set_default: ++ config->compactTime = slapi_ch_strdup(CHANGELOGDB_COMPACT_TIME); ++ } ++ slapi_ch_free_string(&val); ++ + arg = slapi_entry_attr_get_ref(entry, CONFIG_CHANGELOG_TRIM_ATTRIBUTE); + if (arg) { + if (slapi_is_duration_valid(arg)) { +diff --git a/ldap/servers/plugins/replication/cl5_init.c b/ldap/servers/plugins/replication/cl5_init.c +index 112c4ece4..251859714 100644 +--- a/ldap/servers/plugins/replication/cl5_init.c ++++ b/ldap/servers/plugins/replication/cl5_init.c +@@ -57,7 +57,7 @@ changelog5_init() + } + + /* set trimming parameters */ +- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.trimInterval); ++ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval); + if (rc != CL5_SUCCESS) { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl, + "changelog5_init: failed to configure changelog trimming\n"); +diff --git a/ldap/servers/plugins/replication/repl_shared.h b/ldap/servers/plugins/replication/repl_shared.h +index b1ed86934..6708e12f7 100644 +--- a/ldap/servers/plugins/replication/repl_shared.h ++++ b/ldap/servers/plugins/replication/repl_shared.h +@@ -26,11 +26,13 @@ + + #define CHANGELOGDB_TRIM_INTERVAL 300 /* 5 minutes */ + #define CHANGELOGDB_COMPACT_INTERVAL 2592000 /* 30 days */ ++#define CHANGELOGDB_COMPACT_TIME "23:55" /* 30 days */ + + #define CONFIG_CHANGELOG_DIR_ATTRIBUTE "nsslapd-changelogdir" + #define CONFIG_CHANGELOG_MAXENTRIES_ATTRIBUTE "nsslapd-changelogmaxentries" + #define CONFIG_CHANGELOG_MAXAGE_ATTRIBUTE "nsslapd-changelogmaxage" + #define CONFIG_CHANGELOG_COMPACTDB_ATTRIBUTE "nsslapd-changelogcompactdb-interval" ++#define CONFIG_CHANGELOG_COMPACTTIME_ATTRIBUTE "nsslapd-changelogcompactdb-time" + #define CONFIG_CHANGELOG_TRIM_ATTRIBUTE "nsslapd-changelogtrim-interval" + /* Changelog Internal Configuration Parameters -> Changelog Cache related */ + #define CONFIG_CHANGELOG_ENCRYPTION_ALGORITHM "nsslapd-encryptionalgorithm" +diff --git a/ldap/servers/plugins/retrocl/retrocl.c b/ldap/servers/plugins/retrocl/retrocl.c +index 2a620301c..f73c81528 100644 +--- a/ldap/servers/plugins/retrocl/retrocl.c ++++ b/ldap/servers/plugins/retrocl/retrocl.c +@@ -400,7 +400,6 @@ retrocl_start(Slapi_PBlock *pb) + + for (size_t i = 0; i < num_vals; i++) { + char *value = values[i]; +- size_t length = strlen(value); + + char *pos = strchr(value, ':'); + if (pos == NULL) { +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c +index 167644943..4261c6ce2 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c +@@ -678,6 +678,84 @@ bdb_config_db_compactdb_interval_set(void *arg, + return retval; + } + ++static void * ++bdb_config_db_compactdb_time_get(void *arg) ++{ ++ struct ldbminfo *li = (struct ldbminfo *)arg; ++ return (void *)slapi_ch_strdup(BDB_CONFIG(li)->bdb_compactdb_time); ++} ++ ++static int ++bdb_config_db_compactdb_time_set(void *arg, ++ void *value, ++ char *errorbuf __attribute__((unused)), ++ int phase __attribute__((unused)), ++ int apply) ++{ ++ struct ldbminfo *li = (struct ldbminfo *)arg; ++ char *val = slapi_ch_strdup((char *)value); ++ char *endp = NULL; ++ char *hour_str = NULL; ++ char *min_str = NULL; ++ char *default_time = "23:59"; ++ int32_t hour, min; ++ int retval = LDAP_SUCCESS; ++ errno = 0; ++ ++ if (strstr(val, ":")) { ++ /* Get the hour and minute */ ++ hour_str = ldap_utf8strtok_r(val, ":", &min_str); ++ ++ /* Validate hour */ ++ hour = strtoll(hour_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || hour < 0 || hour > 23 || strlen(hour_str) != 2) { ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid hour set (%s), must be a two digit number between 00 and 23", ++ hour_str); ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_compactdb_interval_set", ++ "Invalid minute set (%s), must be a two digit number between 00 and 59. " ++ "Using default of 23:59\n", hour_str); ++ retval = LDAP_OPERATIONS_ERROR; ++ goto done; ++ } ++ ++ /* Validate minute */ ++ min = strtoll(min_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || min < 0 || min > 59 || strlen(min_str) != 2) { ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid minute set (%s), must be a two digit number between 00 and 59", ++ hour_str); ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_compactdb_interval_set", ++ "Invalid minute set (%s), must be a two digit number between 00 and 59. " ++ "Using default of 23:59\n", min_str); ++ retval = LDAP_OPERATIONS_ERROR; ++ goto done; ++ } ++ } else { ++ /* Wrong format */ ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid setting (%s), must have a time format of HH:MM", val); ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_compactdb_interval_set", ++ "Invalid setting (%s), must have a time format of HH:MM\n", val); ++ retval = LDAP_OPERATIONS_ERROR; ++ goto done; ++ } ++ ++done: ++ if (apply) { ++ slapi_ch_free((void **)&(BDB_CONFIG(li)->bdb_compactdb_time)); ++ if (retval) { ++ /* Something went wrong, use the default */ ++ BDB_CONFIG(li)->bdb_compactdb_time = slapi_ch_strdup(default_time); ++ } else { ++ BDB_CONFIG(li)->bdb_compactdb_time = slapi_ch_strdup((char *)value); ++ } ++ } ++ slapi_ch_free_string(&val); ++ ++ return retval; ++} ++ + static void * + bdb_config_db_page_size_get(void *arg) + { +@@ -1473,6 +1551,7 @@ static config_info bdb_config_param[] = { + {CONFIG_DB_TRANSACTION_WAIT, CONFIG_TYPE_ONOFF, "off", &bdb_config_db_transaction_wait_get, &bdb_config_db_transaction_wait_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {CONFIG_DB_CHECKPOINT_INTERVAL, CONFIG_TYPE_INT, "60", &bdb_config_db_checkpoint_interval_get, &bdb_config_db_checkpoint_interval_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {CONFIG_DB_COMPACTDB_INTERVAL, CONFIG_TYPE_INT, "2592000" /*30days*/, &bdb_config_db_compactdb_interval_get, &bdb_config_db_compactdb_interval_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, ++ {CONFIG_DB_COMPACTDB_TIME, CONFIG_TYPE_STRING, "23:59", &bdb_config_db_compactdb_time_get, &bdb_config_db_compactdb_time_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {CONFIG_DB_TRANSACTION_BATCH, CONFIG_TYPE_INT, "0", &bdb_get_batch_transactions, &bdb_set_batch_transactions, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {CONFIG_DB_TRANSACTION_BATCH_MIN_SLEEP, CONFIG_TYPE_INT, "50", &bdb_get_batch_txn_min_sleep, &bdb_set_batch_txn_min_sleep, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {CONFIG_DB_TRANSACTION_BATCH_MAX_SLEEP, CONFIG_TYPE_INT, "50", &bdb_get_batch_txn_max_sleep, &bdb_set_batch_txn_max_sleep, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +index 2f25f67a2..ec1976d38 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +@@ -2126,6 +2126,7 @@ bdb_post_close(struct ldbminfo *li, int dbmode) + */ + slapi_ch_free_string(&conf->bdb_dbhome_directory); + slapi_ch_free_string(&conf->bdb_home_directory); ++ slapi_ch_free_string(&conf->bdb_compactdb_time); + } + + return return_value; +@@ -3644,6 +3645,39 @@ log_flush_threadmain(void *param) + return 0; + } + ++/* ++ * This refreshes the TOD expiration. So live changes to the configuration ++ * will take effect immediately. ++ */ ++static time_t ++bdb_get_tod_expiration(char *expire_time) ++{ ++ time_t start_time, todays_elapsed_time, now = time(NULL); ++ struct tm *tm_struct = localtime(&now); ++ char hour_str[3] = {0}; ++ char min_str[3] = {0}; ++ char *s = expire_time; ++ char *endp = NULL; ++ int32_t hour, min, expiring_time; ++ ++ /* Get today's start time */ ++ todays_elapsed_time = (tm_struct->tm_hour * 3600) + (tm_struct->tm_min * 60) + (tm_struct->tm_sec); ++ start_time = slapi_current_utc_time() - todays_elapsed_time; ++ ++ /* Get the hour and minute and calculate the expiring time. The time was ++ * already validated in bdb_config.c: HH:MM */ ++ hour_str[0] = *s++; ++ hour_str[1] = *s++; ++ s++; /* skip colon */ ++ min_str[0] = *s++; ++ min_str[1] = *s++; ++ hour = strtoll(hour_str, &endp, 10); ++ min = strtoll(min_str, &endp, 10); ++ expiring_time = (hour * 60 * 60) + (min * 60); ++ ++ return start_time + expiring_time; ++} ++ + /* + * create a thread for checkpoint_threadmain + */ +@@ -3685,7 +3719,9 @@ checkpoint_threadmain(void *param) + time_t checkpoint_interval_update = 0; + time_t compactdb_interval = 0; + time_t checkpoint_interval = 0; +- back_txn txn; ++ int32_t compactdb_time = 0; ++ PRBool compacting = PR_FALSE; ++ + + PR_ASSERT(NULL != param); + li = (struct ldbminfo *)param; +@@ -3724,22 +3760,35 @@ checkpoint_threadmain(void *param) + slapi_timespec_expire_at(checkpoint_interval, &checkpoint_expire); + + while (!BDB_CONFIG(li)->bdb_stop_threads) { +- /* sleep for a while */ +- /* why aren't we sleeping exactly the right amount of time ? */ +- /* answer---because the interval might be changed after the server +- * starts up */ ++ PR_Lock(li->li_config_mutex); ++ checkpoint_interval_update = (time_t)BDB_CONFIG(li)->bdb_checkpoint_interval; ++ compactdb_interval_update = (time_t)BDB_CONFIG(li)->bdb_compactdb_interval; ++ if (!compacting) { ++ /* Once we know we want to compact we need to stop refreshing the ++ * TOD expiration. Otherwise if the compact time is close to ++ * midnight we could roll over past midnight during the checkpoint ++ * sleep interval, and we'd never actually compact the databases. ++ * We also need to get this value before the sleep. ++ */ ++ compactdb_time = bdb_get_tod_expiration((char *)BDB_CONFIG(li)->bdb_compactdb_time); ++ } ++ PR_Unlock(li->li_config_mutex); ++ ++ if (compactdb_interval_update != compactdb_interval) { ++ /* Compact interval was changed, so reset the timer */ ++ slapi_timespec_expire_at(compactdb_interval_update, &compactdb_expire); ++ } + ++ /* Sleep for a while ... ++ * Why aren't we sleeping exactly the right amount of time ? ++ * Answer---because the interval might be changed after the server ++ * starts up */ + DS_Sleep(interval); + + if (0 == BDB_CONFIG(li)->bdb_enable_transactions) { + continue; + } + +- PR_Lock(li->li_config_mutex); +- checkpoint_interval_update = (time_t)BDB_CONFIG(li)->bdb_checkpoint_interval; +- compactdb_interval_update = (time_t)BDB_CONFIG(li)->bdb_compactdb_interval; +- PR_Unlock(li->li_config_mutex); +- + /* If the checkpoint has been updated OR we have expired */ + if (checkpoint_interval != checkpoint_interval_update || + slapi_timespec_expire_check(&checkpoint_expire) == TIMER_EXPIRED) { +@@ -3807,94 +3856,37 @@ checkpoint_threadmain(void *param) + + /* + * Remember that if compactdb_interval is 0, timer_expired can +- * never occur unless the value in compctdb_interval changes. ++ * never occur unless the value in compactdb_interval changes. + * +- * this could have been a bug infact, where compactdb_interval ++ * this could have been a bug in fact, where compactdb_interval + * was 0, if you change while running it would never take effect .... + */ +- if (compactdb_interval_update != compactdb_interval || +- slapi_timespec_expire_check(&compactdb_expire) == TIMER_EXPIRED) { +- int rc = 0; +- Object *inst_obj; +- ldbm_instance *inst; +- DB *db = NULL; +- DB_COMPACT c_data = {0}; +- +- for (inst_obj = objset_first_obj(li->li_instance_set); +- inst_obj; +- inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) { +- inst = (ldbm_instance *)object_get_data(inst_obj); +- rc = dblayer_get_id2entry(inst->inst_be, &db); +- if (!db || rc) { +- continue; +- } +- slapi_log_err(SLAPI_LOG_NOTICE, "checkpoint_threadmain", "Compacting DB start: %s\n", +- inst->inst_name); +- +- /* +- * It's possible for this to heap us after free because when we access db +- * *just* as the server shut's down, we don't know it. So we should probably +- * do something like wrapping access to the db var in a rwlock, and have "read" +- * to access, and take writes to change the state. This would prevent the issue. +- */ +- DBTYPE type; +- rc = db->get_type(db, &type); +- if (rc) { +- slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain", +- "compactdb: failed to determine db type for %s: db error - %d %s\n", +- inst->inst_name, rc, db_strerror(rc)); +- continue; +- } ++ if (slapi_timespec_expire_check(&compactdb_expire) == TIMER_EXPIRED) { ++ compacting = PR_TRUE; ++ if (slapi_current_utc_time() < compactdb_time) { ++ /* We have passed the interval, but we need to wait for a ++ * particular TOD to pass before compacting */ ++ continue; ++ } + +- rc = dblayer_txn_begin(inst->inst_be, NULL, &txn); +- if (rc) { +- slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain", "compactdb: transaction begin failed: %d\n", rc); +- break; +- } +- /* +- * https://docs.oracle.com/cd/E17275_01/html/api_reference/C/BDB-C_APIReference.pdf +- * "DB_FREELIST_ONLY +- * Do no page compaction, only returning pages to the filesystem that are already free and at the end +- * of the file. This flag must be set if the database is a Hash access method database." +- * +- */ ++ /* Time to compact the DB's */ ++ dblayer_force_checkpoint(li); ++ bdb_compact(li); ++ dblayer_force_checkpoint(li); + +- uint32_t compact_flags = DB_FREE_SPACE; +- if (type == DB_HASH) { +- compact_flags |= DB_FREELIST_ONLY; +- } +- rc = db->compact(db, txn.back_txn_txn, NULL /*start*/, NULL /*stop*/, +- &c_data, compact_flags, NULL /*end*/); +- if (rc) { +- slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain", +- "compactdb: failed to compact %s; db error - %d %s\n", +- inst->inst_name, rc, db_strerror(rc)); +- if ((rc = dblayer_txn_abort(inst->inst_be, &txn))) { +- slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain", "compactdb: failed to abort txn (%s) db error - %d %s\n", +- inst->inst_name, rc, db_strerror(rc)); +- break; +- } +- } else { +- slapi_log_err(SLAPI_LOG_NOTICE, "checkpoint_threadmain", +- "compactdb: compact %s - %d pages freed\n", +- inst->inst_name, c_data.compact_pages_free); +- if ((rc = dblayer_txn_commit(inst->inst_be, &txn))) { +- slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain", "compactdb: failed to commit txn (%s) db error - %d %s\n", +- inst->inst_name, rc, db_strerror(rc)); +- break; +- } +- } +- } ++ /* Now reset the timer and compacting flag */ + compactdb_interval = compactdb_interval_update; + slapi_timespec_expire_at(compactdb_interval, &compactdb_expire); ++ compacting = PR_FALSE; + } + } +- slapi_log_err(SLAPI_LOG_TRACE, "checkpoint_threadmain", "Check point before leaving\n"); ++ slapi_log_err(SLAPI_LOG_HOUSE, "checkpoint_threadmain", "Check point before leaving\n"); + rval = dblayer_force_checkpoint(li); ++ + error_return: + + DECR_THREAD_COUNT(pEnv); +- slapi_log_err(SLAPI_LOG_TRACE, "checkpoint_threadmain", "Leaving checkpoint_threadmain\n"); ++ slapi_log_err(SLAPI_LOG_HOUSE, "checkpoint_threadmain", "Leaving checkpoint_threadmain\n"); + return rval; + } + +@@ -6209,3 +6201,99 @@ bdb_back_ctrl(Slapi_Backend *be, int cmd, void *info) + + return rc; + } ++ ++int32_t ++ldbm_back_compact(Slapi_Backend *be) ++{ ++ struct ldbminfo *li = NULL; ++ int32_t rc = -1; ++ ++ li = (struct ldbminfo *)be->be_database->plg_private; ++ dblayer_force_checkpoint(li); ++ rc = bdb_compact(li); ++ dblayer_force_checkpoint(li); ++ return rc; ++} ++ ++ ++int32_t ++bdb_compact(struct ldbminfo *li) ++{ ++ Object *inst_obj; ++ ldbm_instance *inst; ++ DB *db = NULL; ++ back_txn txn = {0}; ++ int rc = 0; ++ DB_COMPACT c_data = {0}; ++ ++ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact", ++ "Compacting databases ...\n"); ++ for (inst_obj = objset_first_obj(li->li_instance_set); ++ inst_obj; ++ inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) ++ { ++ inst = (ldbm_instance *)object_get_data(inst_obj); ++ rc = dblayer_get_id2entry(inst->inst_be, &db); ++ if (!db || rc) { ++ continue; ++ } ++ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact", "Compacting DB start: %s\n", ++ inst->inst_name); ++ ++ /* ++ * It's possible for this to heap us after free because when we access db ++ * *just* as the server shut's down, we don't know it. So we should probably ++ * do something like wrapping access to the db var in a rwlock, and have "read" ++ * to access, and take writes to change the state. This would prevent the issue. ++ */ ++ DBTYPE type; ++ rc = db->get_type(db, &type); ++ if (rc) { ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", ++ "compactdb: failed to determine db type for %s: db error - %d %s\n", ++ inst->inst_name, rc, db_strerror(rc)); ++ continue; ++ } ++ ++ rc = dblayer_txn_begin(inst->inst_be, NULL, &txn); ++ if (rc) { ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", "compactdb: transaction begin failed: %d\n", rc); ++ break; ++ } ++ /* ++ * https://docs.oracle.com/cd/E17275_01/html/api_reference/C/BDB-C_APIReference.pdf ++ * "DB_FREELIST_ONLY ++ * Do no page compaction, only returning pages to the filesystem that are already free and at the end ++ * of the file. This flag must be set if the database is a Hash access method database." ++ * ++ */ ++ uint32_t compact_flags = DB_FREE_SPACE; ++ if (type == DB_HASH) { ++ compact_flags |= DB_FREELIST_ONLY; ++ } ++ rc = db->compact(db, txn.back_txn_txn, NULL /*start*/, NULL /*stop*/, ++ &c_data, compact_flags, NULL /*end*/); ++ if (rc) { ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", ++ "compactdb: failed to compact %s; db error - %d %s\n", ++ inst->inst_name, rc, db_strerror(rc)); ++ if ((rc = dblayer_txn_abort(inst->inst_be, &txn))) { ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", "compactdb: failed to abort txn (%s) db error - %d %s\n", ++ inst->inst_name, rc, db_strerror(rc)); ++ break; ++ } ++ } else { ++ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact", ++ "compactdb: compact %s - %d pages freed\n", ++ inst->inst_name, c_data.compact_pages_free); ++ if ((rc = dblayer_txn_commit(inst->inst_be, &txn))) { ++ slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", "compactdb: failed to commit txn (%s) db error - %d %s\n", ++ inst->inst_name, rc, db_strerror(rc)); ++ break; ++ } ++ } ++ } ++ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact", "Compacting databases finished.\n"); ++ ++ return rc; ++} +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h +index 6bb04d21a..e3a49dbac 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h +@@ -79,7 +79,8 @@ typedef struct bdb_config + int bdb_previous_lock_config; /* Max lock count when we last shut down-- + * used to determine if we delete the mpool */ + u_int32_t bdb_deadlock_policy; /* i.e. the atype to DB_ENV->lock_detect in deadlock_threadmain */ +- int bdb_compactdb_interval; /* interval to execute compact id2entry dbs */ ++ int32_t bdb_compactdb_interval; /* interval to execute compact id2entry dbs */ ++ char *bdb_compactdb_time; /* time of day to execute compact id2entry dbs */ + } bdb_config; + + int bdb_init(struct ldbminfo *li, config_info *config_array); +@@ -96,6 +97,7 @@ int bdb_db_size(Slapi_PBlock *pb); + int bdb_upgradedb(Slapi_PBlock *pb); + int bdb_upgradednformat(Slapi_PBlock *pb); + int bdb_upgradeddformat(Slapi_PBlock *pb); ++int32_t bdb_compact(struct ldbminfo *li); + int bdb_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task); + int bdb_cleanup(struct ldbminfo *li); + int bdb_txn_begin(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, PRBool use_lock); +diff --git a/ldap/servers/slapd/back-ldbm/init.c b/ldap/servers/slapd/back-ldbm/init.c +index 4165c8fad..42c9bd00a 100644 +--- a/ldap/servers/slapd/back-ldbm/init.c ++++ b/ldap/servers/slapd/back-ldbm/init.c +@@ -180,6 +180,8 @@ ldbm_back_init(Slapi_PBlock *pb) + (void *)ldbm_back_set_info); + rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_DB_CTRL_INFO_FN, + (void *)ldbm_back_ctrl_info); ++ rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_DB_COMPACT_FN, ++ (void *)ldbm_back_compact); + + if (rc != 0) { + slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_init", "Failed %d\n", rc); +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.h b/ldap/servers/slapd/back-ldbm/ldbm_config.h +index 6fa8292eb..48446193e 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_config.h ++++ b/ldap/servers/slapd/back-ldbm/ldbm_config.h +@@ -84,6 +84,7 @@ struct config_info + #define CONFIG_DB_TRANSACTION_WAIT "nsslapd-db-transaction-wait" + #define CONFIG_DB_CHECKPOINT_INTERVAL "nsslapd-db-checkpoint-interval" + #define CONFIG_DB_COMPACTDB_INTERVAL "nsslapd-db-compactdb-interval" ++#define CONFIG_DB_COMPACTDB_TIME "nsslapd-db-compactdb-time" + #define CONFIG_DB_TRANSACTION_BATCH "nsslapd-db-transaction-batch-val" + #define CONFIG_DB_TRANSACTION_BATCH_MIN_SLEEP "nsslapd-db-transaction-batch-min-wait" + #define CONFIG_DB_TRANSACTION_BATCH_MAX_SLEEP "nsslapd-db-transaction-batch-max-wait" +diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h +index 5d618a89c..30c9003bf 100644 +--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h ++++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h +@@ -478,6 +478,7 @@ void ldbm_back_search_results_release(void **search_results); + int ldbm_back_init(Slapi_PBlock *pb); + void ldbm_back_prev_search_results(Slapi_PBlock *pb); + int ldbm_back_isinitialized(void); ++int32_t ldbm_back_compact(Slapi_Backend *be); + + /* + * vlv.c +diff --git a/ldap/servers/slapd/filtercmp.c b/ldap/servers/slapd/filtercmp.c +index f7e3ed4d5..c886267bd 100644 +--- a/ldap/servers/slapd/filtercmp.c ++++ b/ldap/servers/slapd/filtercmp.c +@@ -344,7 +344,6 @@ slapi_filter_compare(struct slapi_filter *f1, struct slapi_filter *f2) + struct berval *inval1[2], *inval2[2], **outval1, **outval2; + int ret; + Slapi_Attr sattr; +- int cmplen; + + slapi_log_err(SLAPI_LOG_TRACE, "slapi_filter_compare", "=>\n"); + +@@ -379,11 +378,11 @@ slapi_filter_compare(struct slapi_filter *f1, struct slapi_filter *f2) + if (key1 && key2) { + struct berval bvkey1 = { + slapi_value_get_length(key1[0]), +- slapi_value_get_string(key1[0]) ++ (char *)slapi_value_get_string(key1[0]) + }; + struct berval bvkey2 = { + slapi_value_get_length(key2[0]), +- slapi_value_get_string(key2[0]) ++ (char *)slapi_value_get_string(key2[0]) + }; + ret = slapi_berval_cmp(&bvkey1, &bvkey2); + } +diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c +index f7d1f8885..fcac53839 100644 +--- a/ldap/servers/slapd/pblock.c ++++ b/ldap/servers/slapd/pblock.c +@@ -925,6 +925,12 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value) + } + (*(IFP *)value) = pblock->pb_plugin->plg_db2ldif; + break; ++ case SLAPI_PLUGIN_DB_COMPACT_FN: ++ if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) { ++ return (-1); ++ } ++ (*(IFP *)value) = pblock->pb_plugin->plg_dbcompact; ++ break; + case SLAPI_PLUGIN_DB_DB2INDEX_FN: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) { + return (-1); +@@ -2925,7 +2931,12 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value) + } + pblock->pb_backend->be_noacl = *((int *)value); + break; +- ++ case SLAPI_PLUGIN_DB_COMPACT_FN: ++ if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) { ++ return (-1); ++ } ++ pblock->pb_plugin->plg_dbcompact = (IFP)value; ++ break; + + /* extendedop plugin functions */ + case SLAPI_PLUGIN_EXT_OP_FN: +@@ -4137,8 +4148,8 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value) + break; + + case SLAPI_URP_TOMBSTONE_CONFLICT_DN: +- pblock->pb_intop->pb_urp_tombstone_conflict_dn = (char *)value; +- break; ++ pblock->pb_intop->pb_urp_tombstone_conflict_dn = (char *)value; ++ break; + + case SLAPI_URP_TOMBSTONE_UNIQUEID: + _pblock_assert_pb_intop(pblock); +diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h +index 3126a65f3..c48516157 100644 +--- a/ldap/servers/slapd/slap.h ++++ b/ldap/servers/slapd/slap.h +@@ -1041,6 +1041,7 @@ struct slapdplugin + IFP plg_un_db_ldif2db; /* ldif 2 database */ + IFP plg_un_db_db2ldif; /* database 2 ldif */ + IFP plg_un_db_db2index; /* database 2 index */ ++ IFP plg_un_db_dbcompact; /* compact database */ + IFP plg_un_db_archive2db; /* ldif 2 database */ + IFP plg_un_db_db2archive; /* database 2 ldif */ + IFP plg_un_db_upgradedb; /* convert old idl to new */ +@@ -1082,6 +1083,7 @@ struct slapdplugin + #define plg_result plg_un.plg_un_db.plg_un_db_result + #define plg_ldif2db plg_un.plg_un_db.plg_un_db_ldif2db + #define plg_db2ldif plg_un.plg_un_db.plg_un_db_db2ldif ++#define plg_dbcompact plg_un.plg_un_db.plg_un_db_dbcompact + #define plg_db2index plg_un.plg_un_db.plg_un_db_db2index + #define plg_archive2db plg_un.plg_un_db.plg_un_db_archive2db + #define plg_db2archive plg_un.plg_un_db.plg_un_db_db2archive +diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h +index b956ebe63..570765e47 100644 +--- a/ldap/servers/slapd/slapi-private.h ++++ b/ldap/servers/slapd/slapi-private.h +@@ -928,6 +928,7 @@ int proxyauth_get_dn(Slapi_PBlock *pb, char **proxydnp, char **errtextp); + #define SLAPI_PLUGIN_DB_GET_INFO_FN 290 + #define SLAPI_PLUGIN_DB_SET_INFO_FN 291 + #define SLAPI_PLUGIN_DB_CTRL_INFO_FN 292 ++#define SLAPI_PLUGIN_DB_COMPACT_FN 294 + + /**** End of database plugin interface. **************************************/ + +diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c +index 93d31b806..4c7262ab3 100644 +--- a/ldap/servers/slapd/task.c ++++ b/ldap/servers/slapd/task.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2021 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -2928,6 +2928,105 @@ des2aes_task_destructor(Slapi_Task *task) + "des2aes_task_destructor <--\n"); + } + ++struct task_compact_data ++{ ++ char *suffix; ++ Slapi_Task *task; ++}; ++ ++static void ++compact_db_task_destructor(Slapi_Task *task) ++{ ++ slapi_log_err(SLAPI_LOG_PLUGIN, "compact db task", ++ "compact_db_task_destructor -->\n"); ++ if (task) { ++ struct task_compact_data *mydata = (struct task_compact_data *)slapi_task_get_data(task); ++ while (slapi_task_get_refcount(task) > 0) { ++ /* Yield to wait for the task to finish */ ++ DS_Sleep(PR_MillisecondsToInterval(100)); ++ } ++ if (mydata) { ++ slapi_ch_free((void **)&mydata); ++ } ++ } ++ slapi_log_err(SLAPI_LOG_PLUGIN, "compact db task", ++ "compact_db_task_destructor <--\n"); ++} ++ ++static void ++task_compact_thread(void *arg) ++{ ++ struct task_compact_data *task_data = arg; ++ Slapi_Task *task = task_data->task; ++ Slapi_Backend *be = NULL; ++ char *cookie = NULL; ++ int32_t rc = -1; ++ ++ slapi_task_inc_refcount(task); ++ slapi_task_begin(task, 1); ++ ++ be = slapi_get_first_backend(&cookie); ++ while (be) { ++ if (be->be_private == 0) { ++ /* Found a non-private backend, start compacting */ ++ rc = (be->be_database->plg_dbcompact)(be); ++ break; ++ } ++ be = (backend *)slapi_get_next_backend(cookie); ++ } ++ slapi_ch_free_string(&cookie); ++ ++ slapi_task_finish(task, rc); ++ slapi_task_dec_refcount(task); ++} ++ ++/* ++ * compact the BDB database ++ * ++ * dn: cn=compact_it,cn=compact db,cn=tasks,cn=config ++ * objectclass: top ++ * objectclass: extensibleObject ++ * cn: compact_it ++ */ ++static int ++task_compact_db_add(Slapi_PBlock *pb, ++ Slapi_Entry *e, ++ Slapi_Entry *eAfter __attribute__((unused)), ++ int *returncode, ++ char *returntext, ++ void *arg __attribute__((unused))) ++{ ++ Slapi_Task *task = slapi_new_task(slapi_entry_get_ndn(e)); ++ struct task_compact_data *task_data = NULL; ++ PRThread *thread = NULL; ++ ++ slapi_task_log_notice(task, "Beginning database compaction task...\n"); ++ ++ /* Register our destructor for cleaning up our private data */ ++ slapi_task_set_destructor_fn(task, compact_db_task_destructor); ++ ++ task_data = (struct task_compact_data *)slapi_ch_calloc(1, sizeof(struct task_compact_data)); ++ task_data->task = task; ++ slapi_task_set_data(task, task_data); ++ ++ /* Start the compaction as a separate thread */ ++ thread = PR_CreateThread(PR_USER_THREAD, task_compact_thread, ++ (void *)task_data, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, ++ PR_UNJOINABLE_THREAD, SLAPD_DEFAULT_THREAD_STACKSIZE); ++ if (thread == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, "task_compact_db_add", "Unable to create db compact thread!\n"); ++ *returncode = LDAP_OPERATIONS_ERROR; ++ slapi_ch_free((void **)&task_data); ++ } ++ ++ if (*returncode != LDAP_SUCCESS) { ++ slapi_task_finish(task, *returncode); ++ return SLAPI_DSE_CALLBACK_ERROR; ++ } ++ ++ return SLAPI_DSE_CALLBACK_OK; ++} ++ + /* cleanup old tasks that may still be in the DSE from a previous session + * (this can happen if the server crashes [no matter how unlikely we like + * to think that is].) +@@ -3010,6 +3109,7 @@ task_init(void) + slapi_task_register_handler("sysconfig reload", task_sysconfig_reload_add); + slapi_task_register_handler("fixup tombstones", task_fixup_tombstones_add); + slapi_task_register_handler("des2aes", task_des2aes); ++ slapi_task_register_handler("compact db", task_compact_db_add); + } + + /* called when the server is shutting down -- abort all existing tasks */ +diff --git a/src/cockpit/389-console/src/database.jsx b/src/cockpit/389-console/src/database.jsx +index 11cae972c..b73dc8460 100644 +--- a/src/cockpit/389-console/src/database.jsx ++++ b/src/cockpit/389-console/src/database.jsx +@@ -196,6 +196,7 @@ export class Database extends React.Component { + dblocksMonitoringPause: attrs['nsslapd-db-locks-monitoring-pause'], + chxpoint: attrs['nsslapd-db-checkpoint-interval'], + compactinterval: attrs['nsslapd-db-compactdb-interval'], ++ compacttime: attrs['nsslapd-db-compactdb-time'], + importcacheauto: attrs['nsslapd-import-cache-autosize'], + importcachesize: attrs['nsslapd-import-cachesize'], + }, +diff --git a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx +index 6a71c138d..1fa9f2cc2 100644 +--- a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx ++++ b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx +@@ -36,6 +36,7 @@ export class GlobalDatabaseConfig extends React.Component { + dblocksMonitoringPause: this.props.data.dblocksMonitoringPause, + chxpoint: this.props.data.chxpoint, + compactinterval: this.props.data.compactinterval, ++ compacttime: this.props.data.compacttime, + importcachesize: this.props.data.importcachesize, + importcacheauto: this.props.data.importcacheauto, + // These variables store the original value (used for saving config) +@@ -55,6 +56,7 @@ export class GlobalDatabaseConfig extends React.Component { + _dblocksMonitoringPause: this.props.data.dblocksMonitoringPause, + _chxpoint: this.props.data.chxpoint, + _compactinterval: this.props.data.compactinterval, ++ _compacttime: this.props.data.compacttime, + _importcachesize: this.props.data.importcachesize, + _importcacheauto: this.props.data.importcacheauto, + _db_cache_auto: this.props.data.db_cache_auto, +@@ -186,6 +188,10 @@ export class GlobalDatabaseConfig extends React.Component { + cmd.push("--compactdb-interval=" + this.state.compactinterval); + requireRestart = true; + } ++ if (this.state._compacttime != this.state.compacttime) { ++ cmd.push("--compactdb-time=" + this.state.compacttime); ++ requireRestart = true; ++ } + if (this.state.import_cache_auto) { + // Auto cache is selected + if (this.state._import_cache_auto != this.state.import_cache_auto) { +@@ -485,7 +491,15 @@ export class GlobalDatabaseConfig extends React.Component { + Database Compact Interval + + +- ++ ++ ++ ++ ++ ++ Database Compact Time ++ ++ ++ + + + +diff --git a/src/lib389/lib389/_constants.py b/src/lib389/lib389/_constants.py +index c184c8d4f..d6161cebb 100644 +--- a/src/lib389/lib389/_constants.py ++++ b/src/lib389/lib389/_constants.py +@@ -154,6 +154,7 @@ DN_EUUID_TASK = "cn=entryuuid task,%s" % DN_TASKS + DN_TOMB_FIXUP_TASK = "cn=fixup tombstones,%s" % DN_TASKS + DN_FIXUP_LINKED_ATTIBUTES = "cn=fixup linked attributes,%s" % DN_TASKS + DN_AUTOMEMBER_REBUILD_TASK = "cn=automember rebuild membership,%s" % DN_TASKS ++DN_COMPACTDB_TASK = "cn=compact db,%s" % DN_TASKS + + # Script Constants + LDIF2DB = 'ldif2db' +diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py +index 13bb27842..ad78a6ffe 100644 +--- a/src/lib389/lib389/backend.py ++++ b/src/lib389/lib389/backend.py +@@ -1005,6 +1005,7 @@ class DatabaseConfig(DSLdapObject): + 'nsslapd-db-transaction-wait', + 'nsslapd-db-checkpoint-interval', + 'nsslapd-db-compactdb-interval', ++ 'nsslapd-db-compactdb-time', + 'nsslapd-db-page-size', + 'nsslapd-db-transaction-batch-val', + 'nsslapd-db-transaction-batch-min-wait', +diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py +index 722764d10..7b2f32c23 100644 +--- a/src/lib389/lib389/cli_conf/backend.py ++++ b/src/lib389/lib389/cli_conf/backend.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2020 Red Hat, Inc. ++# Copyright (C) 2021 Red Hat, Inc. + # Copyright (C) 2019 William Brown + # All rights reserved. + # +@@ -19,6 +19,7 @@ from lib389.chaining import (ChainingLinks) + from lib389.monitor import MonitorLDBM + from lib389.replica import Replicas + from lib389.utils import ensure_str, is_a_dn, is_dn_parent ++from lib389.tasks import DBCompactTask + from lib389._constants import * + from lib389.cli_base import ( + _format_status, +@@ -41,6 +42,7 @@ arg_to_attr = { + 'txn_wait': 'nsslapd-db-transaction-wait', + 'checkpoint_interval': 'nsslapd-db-checkpoint-interval', + 'compactdb_interval': 'nsslapd-db-compactdb-interval', ++ 'compactdb_time': 'nsslapd-db-compactdb-time', + 'txn_batch_val': 'nsslapd-db-transaction-batch-val', + 'txn_batch_min': 'nsslapd-db-transaction-batch-min-wait', + 'txn_batch_max': 'nsslapd-db-transaction-batch-max-wait', +@@ -789,6 +791,18 @@ def backend_reindex_vlv(inst, basedn, log, args): + log.info("Successfully reindexed VLV indexes") + + ++def backend_compact(inst, basedn, log, args): ++ task = DBCompactTask(inst) ++ task_properties = {} ++ if args.only_changelog: ++ task_properties = {'justChangelog': 'yes'} ++ task.create(properties=task_properties) ++ task.wait() ++ if task.get_exit_code() != 0: ++ raise ValueError("Failed to create Database Compaction Task") ++ log.info("Successfully started Database Compaction Task") ++ ++ + def create_parser(subparsers): + backend_parser = subparsers.add_parser('backend', help="Manage database suffixes and backends") + subcommands = backend_parser.add_subparsers(help="action") +@@ -994,6 +1008,7 @@ def create_parser(subparsers): + set_db_config_parser.add_argument('--checkpoint-interval', help='Sets the amount of time in seconds after which the Directory Server sends a ' + 'checkpoint entry to the database transaction log') + set_db_config_parser.add_argument('--compactdb-interval', help='Sets the interval in seconds when the database is compacted') ++ set_db_config_parser.add_argument('--compactdb-time', help='Sets the Time Of Day to compact the database after the "compactdb interval" has been reached: Use this format to set the hour and minute: HH:MM') + set_db_config_parser.add_argument('--txn-batch-val', help='Specifies how many transactions will be batched before being committed') + set_db_config_parser.add_argument('--txn-batch-min', help='Controls when transactions should be flushed earliest, independently of ' + 'the batch count (only works when txn-batch-val is set)') +@@ -1121,3 +1136,10 @@ def create_parser(subparsers): + ####################################################### + get_tree_parser = subcommands.add_parser('get-tree', help='Get a representation of the suffix tree') + get_tree_parser.set_defaults(func=backend_get_tree) ++ ++ ####################################################### ++ # Run the db compaction task ++ ####################################################### ++ compact_parser = subcommands.add_parser('compact-db', help='Compact the database and the replication changelog') ++ compact_parser.set_defaults(func=backend_compact) ++ compact_parser.add_argument('--only-changelog', action='store_true', help='Only compact the Replication Change Log') +diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py +index 04886f632..3478a0a1f 100644 +--- a/src/lib389/lib389/cli_conf/replication.py ++++ b/src/lib389/lib389/cli_conf/replication.py +@@ -37,6 +37,7 @@ arg_to_attr = { + 'max_entries': 'nsslapd-changelogmaxentries', + 'max_age': 'nsslapd-changelogmaxage', + 'compact_interval': 'nsslapd-changelogcompactdb-interval', ++ 'compact_time': 'nsslapd-changelogcompactdb-time', + 'trim_interval': 'nsslapd-changelogtrim-interval', + 'encrypt_algo': 'nsslapd-encryptionalgorithm', + 'encrypt_key': 'nssymmetrickey', +@@ -1216,6 +1217,8 @@ def create_parser(subparsers): + repl_set_cl.add_argument('--max-entries', help="The maximum number of entries to get in the replication changelog") + repl_set_cl.add_argument('--max-age', help="The maximum age of a replication changelog entry") + repl_set_cl.add_argument('--compact-interval', help="The replication changelog compaction interval") ++ repl_set_cl.add_argument('--compact-time', help='Sets the Time Of Day to compact the database after the changelog "compact interval" ' ++ 'has been reached: Use this format to set the hour and minute: HH:MM') + repl_set_cl.add_argument('--trim-interval', help="The interval to check if the replication changelog can be trimmed") + + repl_get_cl = repl_subcommands.add_parser('get-changelog', help='Display replication changelog attributes.') +diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py +index 590c6ee79..b64bc6ce5 100644 +--- a/src/lib389/lib389/tasks.py ++++ b/src/lib389/lib389/tasks.py +@@ -217,6 +217,19 @@ class EntryUUIDFixupTask(Task): + self._must_attributes.extend(['basedn']) + + ++class DBCompactTask(Task): ++ """A single instance of compactdb task entry ++ ++ :param instance: An instance ++ :type instance: lib389.DirSrv ++ """ ++ ++ def __init__(self, instance, dn=None): ++ self.cn = 'compact_db_' + Task._get_task_date() ++ dn = "cn=" + self.cn + "," + DN_COMPACTDB_TASK ++ super(DBCompactTask, self).__init__(instance, dn) ++ ++ + class SchemaReloadTask(Task): + """A single instance of schema reload task entry + +@@ -227,7 +240,6 @@ class SchemaReloadTask(Task): + def __init__(self, instance, dn=None): + self.cn = 'schema_reload_' + Task._get_task_date() + dn = "cn=" + self.cn + ",cn=schema reload task," + DN_TASKS +- + super(SchemaReloadTask, self).__init__(instance, dn) + + +-- +2.26.3 + diff --git a/SOURCES/0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch b/SOURCES/0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch deleted file mode 100644 index 13a64c2..0000000 --- a/SOURCES/0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch +++ /dev/null @@ -1,254 +0,0 @@ -From 8b0ba11c3dfb577d1696f4b71a6f4e9f8d42349f Mon Sep 17 00:00:00 2001 -From: Pierre Rogier -Date: Mon, 30 Nov 2020 12:42:17 +0100 -Subject: [PATCH] Add dsconf replication monitor test case (gitHub issue 4449) - in 1.4.3 branch - ---- - .../tests/suites/clu/repl_monitor_test.py | 234 ++++++++++++++++++ - 1 file changed, 234 insertions(+) - create mode 100644 dirsrvtests/tests/suites/clu/repl_monitor_test.py - -diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py -new file mode 100644 -index 000000000..b03d170c8 ---- /dev/null -+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py -@@ -0,0 +1,234 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2020 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+import time -+import subprocess -+import pytest -+ -+from lib389.cli_conf.replication import get_repl_monitor_info -+from lib389.tasks import * -+from lib389.utils import * -+from lib389.topologies import topology_m2 -+from lib389.cli_base import FakeArgs -+from lib389.cli_base.dsrc import dsrc_arg_concat -+from lib389.cli_base import connect_instance -+ -+pytestmark = pytest.mark.tier0 -+ -+LOG_FILE = '/tmp/monitor.log' -+logging.getLogger(__name__).setLevel(logging.DEBUG) -+log = logging.getLogger(__name__) -+ -+ -+@pytest.fixture(scope="function") -+def set_log_file(request): -+ fh = logging.FileHandler(LOG_FILE) -+ fh.setLevel(logging.DEBUG) -+ log.addHandler(fh) -+ -+ def fin(): -+ log.info('Delete files') -+ os.remove(LOG_FILE) -+ -+ config = os.path.expanduser(DSRC_HOME) -+ if os.path.exists(config): -+ os.remove(config) -+ -+ request.addfinalizer(fin) -+ -+ -+def check_value_in_log_and_reset(content_list, second_list=None, single_value=None, error_list=None): -+ with open(LOG_FILE, 'r+') as f: -+ file_content = f.read() -+ -+ for item in content_list: -+ log.info('Check that "{}" is present'.format(item)) -+ assert item in file_content -+ -+ if second_list is not None: -+ log.info('Check for "{}"'.format(second_list)) -+ for item in second_list: -+ assert item in file_content -+ -+ if single_value is not None: -+ log.info('Check for "{}"'.format(single_value)) -+ assert single_value in file_content -+ -+ if error_list is not None: -+ log.info('Check that "{}" is not present'.format(error_list)) -+ for item in error_list: -+ assert item not in file_content -+ -+ log.info('Reset log file') -+ f.truncate(0) -+ -+ -+@pytest.mark.ds50545 -+@pytest.mark.bz1739718 -+@pytest.mark.skipif(ds_is_older("1.4.0"), reason="Not implemented") -+def test_dsconf_replication_monitor(topology_m2, set_log_file): -+ """Test replication monitor that was ported from legacy tools -+ -+ :id: ce48020d-7c30-41b7-8f68-144c9cd757f6 -+ :setup: 2 MM topology -+ :steps: -+ 1. Create DS instance -+ 2. Run replication monitor with connections option -+ 3. Run replication monitor with aliases option -+ 4. Run replication monitor with --json option -+ 5. Run replication monitor with .dsrc file created -+ 6. Run replication monitor with connections option as if using dsconf CLI -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ 5. Success -+ 6. Success -+ """ -+ -+ m1 = topology_m2.ms["master1"] -+ m2 = topology_m2.ms["master2"] -+ -+ alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')', -+ 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')'] -+ -+ connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port) -+ content_list = ['Replica Root: dc=example,dc=com', -+ 'Replica ID: 1', -+ 'Replica Status: Available', -+ 'Max CSN', -+ 'Status For Agreement: "002" ('+ m2.host + ':' + str(m2.port) + ')', -+ 'Replica Enabled: on', -+ 'Update In Progress: FALSE', -+ 'Last Update Start:', -+ 'Last Update End:', -+ 'Number Of Changes Sent:', -+ 'Number Of Changes Skipped: None', -+ 'Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded', -+ 'Last Init Start:', -+ 'Last Init End:', -+ 'Last Init Status:', -+ 'Reap Active: 0', -+ 'Replication Status: In Synchronization', -+ 'Replication Lag Time:', -+ 'Supplier: ', -+ m2.host + ':' + str(m2.port), -+ 'Replica Root: dc=example,dc=com', -+ 'Replica ID: 2', -+ 'Status For Agreement: "001" (' + m1.host + ':' + str(m1.port)+')'] -+ -+ error_list = ['consumer (Unavailable)', -+ 'Failed to retrieve database RUV entry from consumer'] -+ -+ json_list = ['type', -+ 'list', -+ 'items', -+ 'name', -+ m1.host + ':' + str(m1.port), -+ 'data', -+ '"replica_id": "1"', -+ '"replica_root": "dc=example,dc=com"', -+ '"replica_status": "Available"', -+ 'maxcsn', -+ 'agmts_status', -+ 'agmt-name', -+ '002', -+ 'replica', -+ m2.host + ':' + str(m2.port), -+ 'replica-enabled', -+ 'update-in-progress', -+ 'last-update-start', -+ 'last-update-end', -+ 'number-changes-sent', -+ 'number-changes-skipped', -+ 'last-update-status', -+ 'Error (0) Replica acquired successfully: Incremental update succeeded', -+ 'last-init-start', -+ 'last-init-end', -+ 'last-init-status', -+ 'reap-active', -+ 'replication-status', -+ 'In Synchronization', -+ 'replication-lag-time', -+ '"replica_id": "2"', -+ '001', -+ m1.host + ':' + str(m1.port)] -+ -+ dsrc_content = '[repl-monitor-connections]\n' \ -+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -+ '\n' \ -+ '[repl-monitor-aliases]\n' \ -+ 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \ -+ 'M2 = ' + m2.host + ':' + str(m2.port) -+ -+ connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM, -+ m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM] -+ -+ aliases = ['M1=' + m1.host + ':' + str(m1.port), -+ 'M2=' + m2.host + ':' + str(m2.port)] -+ -+ args = FakeArgs() -+ args.connections = connections -+ args.aliases = None -+ args.json = False -+ -+ log.info('Run replication monitor with connections option') -+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) -+ check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) -+ -+ log.info('Run replication monitor with aliases option') -+ args.aliases = aliases -+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) -+ check_value_in_log_and_reset(content_list, alias_content) -+ -+ log.info('Run replication monitor with --json option') -+ args.aliases = None -+ args.json = True -+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) -+ check_value_in_log_and_reset(json_list) -+ -+ with open(os.path.expanduser(DSRC_HOME), 'w+') as f: -+ f.write(dsrc_content) -+ -+ args.connections = None -+ args.aliases = None -+ args.json = False -+ -+ log.info('Run replication monitor when .dsrc file is present with content') -+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) -+ check_value_in_log_and_reset(content_list, alias_content) -+ os.remove(os.path.expanduser(DSRC_HOME)) -+ -+ log.info('Run replication monitor with connections option as if using dsconf CLI') -+ # Perform same test than steps 2 test but without using directly the topology instance. -+ # but with an instance similar to those than dsconf cli generates: -+ # step 2 args -+ args.connections = connections -+ args.aliases = None -+ args.json = False -+ # args needed to generate an instance with dsrc_arg_concat -+ args.instance = 'master1' -+ args.basedn = None -+ args.binddn = None -+ args.bindpw = None -+ args.pwdfile = None -+ args.prompt = False -+ args.starttls = False -+ dsrc_inst = dsrc_arg_concat(args, None) -+ inst = connect_instance(dsrc_inst, True, args) -+ get_repl_monitor_info(inst, DEFAULT_SUFFIX, log, args) -+ check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main("-s %s" % CURRENT_FILE) --- -2.26.2 - diff --git a/SOURCES/0012-Issue-4778-RFE-Add-changelog-compaction-task-in-1.4..patch b/SOURCES/0012-Issue-4778-RFE-Add-changelog-compaction-task-in-1.4..patch new file mode 100644 index 0000000..94618f6 --- /dev/null +++ b/SOURCES/0012-Issue-4778-RFE-Add-changelog-compaction-task-in-1.4..patch @@ -0,0 +1,155 @@ +From 580880a598a8f9972994684c49593a4cf8b8969b Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Sat, 29 May 2021 13:19:53 -0400 +Subject: [PATCH 12/12] Issue 4778 - RFE - Add changelog compaction task in + 1.4.3 + +Description: In 1.4.3 the replication changelog is a separate database, + so it needs a separate "nsds5task" compaction task (COMPACT_CL5) + +relates: https://github.com/389ds/389-ds-base/issues/4778 + +ASAN tested and approved + +Reviewed by: mreynolds +--- + ldap/servers/plugins/replication/cl5_api.c | 21 +++++++++---------- + ldap/servers/plugins/replication/cl5_api.h | 1 + + .../replication/repl5_replica_config.c | 9 +++++++- + 3 files changed, 19 insertions(+), 12 deletions(-) + +diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c +index 75a2f46f5..4c5077b48 100644 +--- a/ldap/servers/plugins/replication/cl5_api.c ++++ b/ldap/servers/plugins/replication/cl5_api.c +@@ -266,7 +266,6 @@ static int _cl5TrimInit(void); + static void _cl5TrimCleanup(void); + static int _cl5TrimMain(void *param); + static void _cl5DoTrimming(void); +-static void _cl5CompactDBs(void); + static void _cl5PurgeRID(Object *file_obj, ReplicaId cleaned_rid); + static int _cl5PurgeGetFirstEntry(Object *file_obj, CL5Entry *entry, void **iterator, DB_TXN *txnid, int rid, DBT *key); + static int _cl5PurgeGetNextEntry(CL5Entry *entry, void *iterator, DBT *key); +@@ -3152,7 +3151,7 @@ _cl5TrimMain(void *param __attribute__((unused))) + if (slapi_current_utc_time() > compactdb_time) { + /* time to trim */ + timeCompactPrev = timeNow; +- _cl5CompactDBs(); ++ cl5CompactDBs(); + compacting = PR_FALSE; + } + } +@@ -3250,8 +3249,8 @@ _cl5DoPurging(cleanruv_purge_data *purge_data) + } + + /* clear free page files to reduce changelog */ +-static void +-_cl5CompactDBs(void) ++void ++cl5CompactDBs(void) + { + int rc; + Object *fileObj = NULL; +@@ -3264,14 +3263,14 @@ _cl5CompactDBs(void) + rc = TXN_BEGIN(s_cl5Desc.dbEnv, NULL, &txnid, 0); + if (rc) { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl, +- "_cl5CompactDBs - Failed to begin transaction; db error - %d %s\n", ++ "cl5CompactDBs - Failed to begin transaction; db error - %d %s\n", + rc, db_strerror(rc)); + goto bail; + } + + + slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl, +- "_cl5CompactDBs - compacting replication changelogs...\n"); ++ "cl5CompactDBs - compacting replication changelogs...\n"); + for (fileObj = objset_first_obj(s_cl5Desc.dbFiles); + fileObj; + fileObj = objset_next_obj(s_cl5Desc.dbFiles, fileObj)) { +@@ -3284,17 +3283,17 @@ _cl5CompactDBs(void) + &c_data, DB_FREE_SPACE, NULL /*end*/); + if (rc) { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl, +- "_cl5CompactDBs - Failed to compact %s; db error - %d %s\n", ++ "cl5CompactDBs - Failed to compact %s; db error - %d %s\n", + dbFile->replName, rc, db_strerror(rc)); + goto bail; + } + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl, +- "_cl5CompactDBs - %s - %d pages freed\n", ++ "cl5CompactDBs - %s - %d pages freed\n", + dbFile->replName, c_data.compact_pages_free); + } + + slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl, +- "_cl5CompactDBs - compacting replication changelogs finished.\n"); ++ "cl5CompactDBs - compacting replication changelogs finished.\n"); + bail: + if (fileObj) { + object_release(fileObj); +@@ -3303,14 +3302,14 @@ bail: + rc = TXN_ABORT(txnid); + if (rc) { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl, +- "_cl5CompactDBs - Failed to abort transaction; db error - %d %s\n", ++ "cl5CompactDBs - Failed to abort transaction; db error - %d %s\n", + rc, db_strerror(rc)); + } + } else { + rc = TXN_COMMIT(txnid); + if (rc) { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl, +- "_cl5CompactDBs - Failed to commit transaction; db error - %d %s\n", ++ "cl5CompactDBs - Failed to commit transaction; db error - %d %s\n", + rc, db_strerror(rc)); + } + } +diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h +index 4b0949fb3..11db771f2 100644 +--- a/ldap/servers/plugins/replication/cl5_api.h ++++ b/ldap/servers/plugins/replication/cl5_api.h +@@ -405,5 +405,6 @@ int cl5DeleteRUV(void); + void cl5CleanRUV(ReplicaId rid); + void cl5NotifyCleanup(int rid); + void trigger_cl_purging(cleanruv_purge_data *purge_data); ++void cl5CompactDBs(void); + + #endif +diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c +index a969ef82f..e708a1ccb 100644 +--- a/ldap/servers/plugins/replication/repl5_replica_config.c ++++ b/ldap/servers/plugins/replication/repl5_replica_config.c +@@ -29,6 +29,8 @@ + #define CLEANRUVLEN 8 + #define CLEANALLRUV "CLEANALLRUV" + #define CLEANALLRUVLEN 11 ++#define COMPACT_CL5 "COMPACT_CL5" ++#define COMPACT_CL5_LEN 11 + #define REPLICA_RDN "cn=replica" + + #define CLEANALLRUV_MAX_WAIT 7200 /* 2 hours */ +@@ -1050,7 +1052,6 @@ replica_config_change_flags(Replica *r, const char *new_flags, char *returntext + static int + replica_execute_task(Replica *r, const char *task_name, char *returntext, int apply_mods) + { +- + if (strcasecmp(task_name, CL2LDIF_TASK) == 0) { + if (apply_mods) { + return replica_execute_cl2ldif_task(r, returntext); +@@ -1084,6 +1085,12 @@ replica_execute_task(Replica *r, const char *task_name, char *returntext, int ap + return replica_execute_cleanall_ruv_task(r, (ReplicaId)temprid, empty_task, "no", PR_TRUE, returntext); + } else + return LDAP_SUCCESS; ++ } else if (strncasecmp(task_name, COMPACT_CL5, COMPACT_CL5_LEN) == 0) { ++ /* compact the replication changelogs */ ++ if (apply_mods) { ++ cl5CompactDBs(); ++ } ++ return LDAP_SUCCESS; + } else { + PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE, "Unsupported replica task - %s", task_name); + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, +-- +2.26.3 + diff --git a/SOURCES/0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch b/SOURCES/0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch deleted file mode 100644 index 74aa5aa..0000000 --- a/SOURCES/0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch +++ /dev/null @@ -1,100 +0,0 @@ -From 389b2c825742392365262a719be7c8f594e7e522 Mon Sep 17 00:00:00 2001 -From: William Brown -Date: Thu, 26 Nov 2020 09:08:13 +1000 -Subject: [PATCH] Issue 4460 - BUG - lib389 should use system tls policy - -Bug Description: Due to some changes in dsrc for tlsreqcert -and how def open was structured in lib389, the system ldap.conf -policy was ignored. - -Fix Description: Default to using the system ldap.conf policy -if undefined in lib389 or the tls_reqcert param in dsrc. - -fixes: #4460 - -Author: William Brown - -Review by: ??? ---- - src/lib389/lib389/__init__.py | 11 +++++++---- - src/lib389/lib389/cli_base/dsrc.py | 16 +++++++++------- - 2 files changed, 16 insertions(+), 11 deletions(-) - -diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py -index 99ea9cc6a..4e6a1905a 100644 ---- a/src/lib389/lib389/__init__.py -+++ b/src/lib389/lib389/__init__.py -@@ -962,7 +962,7 @@ class DirSrv(SimpleLDAPObject, object): - # Now, we are still an allocated ds object so we can be re-installed - self.state = DIRSRV_STATE_ALLOCATED - -- def open(self, uri=None, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=ldap.OPT_X_TLS_HARD, -+ def open(self, uri=None, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=None, - usercert=None, userkey=None): - ''' - It opens a ldap bound connection to dirsrv so that online -@@ -1025,9 +1025,12 @@ class DirSrv(SimpleLDAPObject, object): - try: - # Note this sets LDAP.OPT not SELF. Because once self has opened - # it can NOT change opts on reused (ie restart) -- self.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert) -- self.log.debug("Using certificate policy %s", reqcert) -- self.log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s", reqcert) -+ if reqcert is not None: -+ self.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert) -+ self.log.debug("Using lib389 certificate policy %s", reqcert) -+ else: -+ self.log.debug("Using /etc/openldap/ldap.conf certificate policy") -+ self.log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s", self.get_option(ldap.OPT_X_TLS_REQUIRE_CERT)) - except ldap.LDAPError as e: - self.log.fatal('TLS negotiation failed: %s', e) - raise e -diff --git a/src/lib389/lib389/cli_base/dsrc.py b/src/lib389/lib389/cli_base/dsrc.py -index fec18a5f9..9b09ea568 100644 ---- a/src/lib389/lib389/cli_base/dsrc.py -+++ b/src/lib389/lib389/cli_base/dsrc.py -@@ -45,7 +45,7 @@ def dsrc_arg_concat(args, dsrc_inst): - 'tls_cacertdir': None, - 'tls_cert': None, - 'tls_key': None, -- 'tls_reqcert': ldap.OPT_X_TLS_HARD, -+ 'tls_reqcert': None, - 'starttls': args.starttls, - 'prompt': False, - 'pwdfile': None, -@@ -134,7 +134,7 @@ def dsrc_to_ldap(path, instance_name, log): - dsrc_inst['binddn'] = config.get(instance_name, 'binddn', fallback=None) - dsrc_inst['saslmech'] = config.get(instance_name, 'saslmech', fallback=None) - if dsrc_inst['saslmech'] is not None and dsrc_inst['saslmech'] not in ['EXTERNAL', 'PLAIN']: -- raise Exception("%s [%s] saslmech must be one of EXTERNAL or PLAIN" % (path, instance_name)) -+ raise ValueError("%s [%s] saslmech must be one of EXTERNAL or PLAIN" % (path, instance_name)) - - dsrc_inst['tls_cacertdir'] = config.get(instance_name, 'tls_cacertdir', fallback=None) - # At this point, we should check if the provided cacertdir is indeed, a dir. This can be a cause -@@ -145,16 +145,18 @@ def dsrc_to_ldap(path, instance_name, log): - - dsrc_inst['tls_cert'] = config.get(instance_name, 'tls_cert', fallback=None) - dsrc_inst['tls_key'] = config.get(instance_name, 'tls_key', fallback=None) -- dsrc_inst['tls_reqcert'] = config.get(instance_name, 'tls_reqcert', fallback='hard') -- if dsrc_inst['tls_reqcert'] not in ['never', 'allow', 'hard']: -- raise Exception("dsrc tls_reqcert value invalid. %s [%s] tls_reqcert should be one of never, allow or hard" % (instance_name, -- path)) -+ dsrc_inst['tls_reqcert'] = config.get(instance_name, 'tls_reqcert', fallback=None) - if dsrc_inst['tls_reqcert'] == 'never': - dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_NEVER - elif dsrc_inst['tls_reqcert'] == 'allow': - dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_ALLOW -- else: -+ elif dsrc_inst['tls_reqcert'] == 'hard': - dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_HARD -+ elif dsrc_inst['tls_reqcert'] is None: -+ # Use system value -+ pass -+ else: -+ raise ValueError("dsrc tls_reqcert value invalid. %s [%s] tls_reqcert should be one of never, allow or hard" % (instance_name, path)) - dsrc_inst['starttls'] = config.getboolean(instance_name, 'starttls', fallback=False) - dsrc_inst['pwdfile'] = None - dsrc_inst['prompt'] = False --- -2.26.2 - diff --git a/SOURCES/0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch b/SOURCES/0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch deleted file mode 100644 index 16637bb..0000000 --- a/SOURCES/0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch +++ /dev/null @@ -1,60 +0,0 @@ -From 05b66529117d1cd85a636ab7d8fc84abdec814de Mon Sep 17 00:00:00 2001 -From: William Brown -Date: Thu, 12 Nov 2020 13:04:21 +1000 -Subject: [PATCH] Issue 4428 - BUG Paged Results with critical false causes - sigsegv in chaining - -Bug Description: When a paged search through chaining backend is -received with a false criticality (such as SSSD), chaining backend -will sigsegv due to a null context. - -Fix Description: When a NULL ctx is recieved to be freed, this is -as paged results have finished being sent, so we check the NULL -ctx and move on. - -fixes: #4428 - -Author: William Brown - -Review by: @droideck, @mreynolds389 ---- - ldap/servers/plugins/chainingdb/cb_search.c | 6 ++++++ - ldap/servers/plugins/chainingdb/cb_utils.c | 4 ++++ - 2 files changed, 10 insertions(+) - -diff --git a/ldap/servers/plugins/chainingdb/cb_search.c b/ldap/servers/plugins/chainingdb/cb_search.c -index 69d23a6b5..d47cbc8e4 100644 ---- a/ldap/servers/plugins/chainingdb/cb_search.c -+++ b/ldap/servers/plugins/chainingdb/cb_search.c -@@ -740,6 +740,12 @@ chaining_back_search_results_release(void **sr) - - slapi_log_err(SLAPI_LOG_PLUGIN, CB_PLUGIN_SUBSYSTEM, - "chaining_back_search_results_release\n"); -+ if (ctx == NULL) { -+ /* The paged search is already complete, just return */ -+ /* Could we have a ctx state flag instead? */ -+ return; -+ } -+ - if (ctx->readahead != ctx->tobefreed) { - slapi_entry_free(ctx->readahead); - } -diff --git a/ldap/servers/plugins/chainingdb/cb_utils.c b/ldap/servers/plugins/chainingdb/cb_utils.c -index dfd5dd92c..d52fd25a6 100644 ---- a/ldap/servers/plugins/chainingdb/cb_utils.c -+++ b/ldap/servers/plugins/chainingdb/cb_utils.c -@@ -279,7 +279,11 @@ cb_add_suffix(cb_backend_instance *inst, struct berval **bvals, int apply_mod, c - return LDAP_SUCCESS; - } - -+#ifdef DEBUG -+static int debug_on = 1; -+#else - static int debug_on = 0; -+#endif - - int - cb_debug_on() --- -2.26.2 - diff --git a/SOURCES/0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch b/SOURCES/0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch deleted file mode 100644 index de8c8a8..0000000 --- a/SOURCES/0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch +++ /dev/null @@ -1,50 +0,0 @@ -From 4c133d448f451b7c3b2ff1b42806c7516d623f09 Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Mon, 7 Dec 2020 00:41:27 +0100 -Subject: [PATCH] Issue 4315: performance search rate: nagle triggers high rate - of setsocketopt (#4437) - -Bug description: - When a socket is set with NO_DELAY=0 (nagle), written pdu are buffered - until buffer is full or tcp_cork is set. This reduce network traffic when - the application writes partial pdu. - DS write complete pdu (results/entries/..) so it gives low benefit for DS. - In addition nagle being 'on' by default, DS sets/unset socket tcp_cork to send - immediately results/entries at each operation. This is an overhead of syscalls. - -Fix description: - Disable nagle by default - -relates: https://github.com/389ds/389-ds-base/issues/4315 - -Reviewed by: @mreynolds389, @Firstyear - -Platforms tested: F33 ---- - ldap/servers/slapd/libglobs.c | 9 ++++----- - 1 file changed, 4 insertions(+), 5 deletions(-) - -diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c -index 7d5374c90..f8cf162e6 100644 ---- a/ldap/servers/slapd/libglobs.c -+++ b/ldap/servers/slapd/libglobs.c -@@ -1635,12 +1635,11 @@ FrontendConfig_init(void) - #endif /* USE_SYSCONF */ - - init_accesscontrol = cfg->accesscontrol = LDAP_ON; --#if defined(LINUX) -- /* On Linux, by default, we use TCP_CORK so we must enable nagle */ -- init_nagle = cfg->nagle = LDAP_ON; --#else -+ -+ /* nagle triggers set/unset TCP_CORK setsockopt per operation -+ * as DS only sends complete PDU there is no benefit of nagle/tcp_cork -+ */ - init_nagle = cfg->nagle = LDAP_OFF; --#endif - init_security = cfg->security = LDAP_OFF; - init_ssl_check_hostname = cfg->ssl_check_hostname = LDAP_ON; - cfg->tls_check_crl = TLS_CHECK_NONE; --- -2.26.2 - diff --git a/SOURCES/0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch b/SOURCES/0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch deleted file mode 100644 index a2cb4bd..0000000 --- a/SOURCES/0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 3007700a659ede03085f5390153cce483ce987a1 Mon Sep 17 00:00:00 2001 -From: Firstyear -Date: Fri, 4 Dec 2020 10:14:33 +1000 -Subject: [PATCH] Issue 4460 - BUG - add machine name to subject alt names in - SSCA (#4472) - -Bug Description: During SSCA creation, the server cert did not have -the machine name, which meant that the cert would not work without -reqcert = never. - -Fix Description: Add the machine name as an alt name during SSCA -creation. It is not guaranteed this value is correct, but it -is better than nothing. - -relates: https://github.com/389ds/389-ds-base/issues/4460 - -Author: William Brown - -Review by: mreynolds389, droideck ---- - src/lib389/lib389/instance/setup.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py -index 7d42ba292..e46f2d1e5 100644 ---- a/src/lib389/lib389/instance/setup.py -+++ b/src/lib389/lib389/instance/setup.py -@@ -887,7 +887,7 @@ class SetupDs(object): - tlsdb_inst = NssSsl(dbpath=os.path.join(etc_dirsrv_path, dir)) - tlsdb_inst.import_rsa_crt(ca) - -- csr = tlsdb.create_rsa_key_and_csr() -+ csr = tlsdb.create_rsa_key_and_csr(alt_names=[general['full_machine_name']]) - (ca, crt) = ssca.rsa_ca_sign_csr(csr) - tlsdb.import_rsa_crt(ca, crt) - if general['selinux']: --- -2.26.2 - diff --git a/SOURCES/0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch b/SOURCES/0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch deleted file mode 100644 index 067d06e..0000000 --- a/SOURCES/0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch +++ /dev/null @@ -1,50 +0,0 @@ -From 1386b140d8cc81d37fdea6593487fe542587ccac Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Wed, 9 Dec 2020 09:52:08 -0500 -Subject: [PATCH] Issue 4483 - heap-use-after-free in slapi_be_getsuffix - -Description: heap-use-after-free in slapi_be_getsuffix after disk - monitoring runs. This feature is freeing a list of - backends which it does not need to do. - -Fixes: https://github.com/389ds/389-ds-base/issues/4483 - -Reviewed by: firstyear & tbordaz(Thanks!!) ---- - ldap/servers/slapd/daemon.c | 13 +------------ - 1 file changed, 1 insertion(+), 12 deletions(-) - -diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c -index 49199e4df..691f77570 100644 ---- a/ldap/servers/slapd/daemon.c -+++ b/ldap/servers/slapd/daemon.c -@@ -606,12 +606,6 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) - now = start; - while ((now - start) < grace_period) { - if (g_get_shutdown()) { -- be_index = 0; -- if (be_list[be_index] != NULL) { -- while ((be = be_list[be_index++])) { -- slapi_be_free(&be); -- } -- } - slapi_ch_array_free(dirs); - dirs = NULL; - return; -@@ -706,12 +700,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) - } - } - } -- be_index = 0; -- if (be_list[be_index] != NULL) { -- while ((be = be_list[be_index++])) { -- slapi_be_free(&be); -- } -- } -+ - slapi_ch_array_free(dirs); - dirs = NULL; /* now it is not needed but the code may be changed in the future and it'd better be more robust */ - g_set_shutdown(SLAPI_SHUTDOWN_DISKFULL); --- -2.26.2 - diff --git a/SOURCES/0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch b/SOURCES/0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch deleted file mode 100644 index 9acd229..0000000 --- a/SOURCES/0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch +++ /dev/null @@ -1,65 +0,0 @@ -From 6e827f6d5e64e0be316f4e17111b2884899d302c Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Wed, 16 Dec 2020 16:30:28 +0100 -Subject: [PATCH] Issue 4480 - Unexpected info returned to ldap request (#4491) - -Bug description: - If the bind entry does not exist, the bind result info - reports that 'No such entry'. It should not give any - information if the target entry exists or not - -Fix description: - Does not return any additional information during a bind - -relates: https://github.com/389ds/389-ds-base/issues/4480 - -Reviewed by: William Brown, Viktor Ashirov, Mark Reynolds (thank you all) - -Platforms tested: F31 ---- - dirsrvtests/tests/suites/basic/basic_test.py | 1 - - ldap/servers/slapd/back-ldbm/ldbm_config.c | 2 +- - ldap/servers/slapd/result.c | 2 +- - 3 files changed, 2 insertions(+), 3 deletions(-) - -diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py -index 120207321..1ae82dcdd 100644 ---- a/dirsrvtests/tests/suites/basic/basic_test.py -+++ b/dirsrvtests/tests/suites/basic/basic_test.py -@@ -1400,7 +1400,6 @@ def test_dscreate_multiple_dashes_name(dscreate_long_instance): - assert not dscreate_long_instance.exists() - - -- - if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c -index 3fe86d567..10cef250f 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_config.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c -@@ -1234,7 +1234,7 @@ ldbm_config_search_entry_callback(Slapi_PBlock *pb __attribute__((unused)), - if (attrs) { - for (size_t i = 0; attrs[i]; i++) { - if (ldbm_config_moved_attr(attrs[i])) { -- slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "at least one required attribute has been moved to the BDB scecific configuration entry"); -+ slapi_pblock_set(pb, SLAPI_RESULT_TEXT, "at least one required attribute has been moved to the BDB scecific configuration entry"); - break; - } - } -diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c -index 9daf3b151..ab0d79454 100644 ---- a/ldap/servers/slapd/result.c -+++ b/ldap/servers/slapd/result.c -@@ -355,7 +355,7 @@ send_ldap_result_ext( - if (text) { - pbtext = text; - } else { -- slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &pbtext); -+ slapi_pblock_get(pb, SLAPI_RESULT_TEXT, &pbtext); - } - - if (operation == NULL) { --- -2.26.2 - diff --git a/SOURCES/0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch b/SOURCES/0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch deleted file mode 100644 index 6de8b9e..0000000 --- a/SOURCES/0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch +++ /dev/null @@ -1,108 +0,0 @@ -From 1fef5649ce05a17a741789cafb65269c099b396b Mon Sep 17 00:00:00 2001 -From: progier389 <72748589+progier389@users.noreply.github.com> -Date: Wed, 16 Dec 2020 16:21:35 +0100 -Subject: [PATCH 2/3] Issue #4504 - Fix pytest test_dsconf_replication_monitor - (#4505) - -(cherry picked from commit 0b08e6f35b000d1383580be59f902ac813e940f2) ---- - .../tests/suites/clu/repl_monitor_test.py | 50 +++++++++++++------ - 1 file changed, 36 insertions(+), 14 deletions(-) - -diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py -index b03d170c8..eb18d2da2 100644 ---- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py -+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py -@@ -9,6 +9,7 @@ - import time - import subprocess - import pytest -+import re - - from lib389.cli_conf.replication import get_repl_monitor_info - from lib389.tasks import * -@@ -67,6 +68,25 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No - log.info('Reset log file') - f.truncate(0) - -+def get_hostnames_from_log(port1, port2): -+ # Get the supplier host names as displayed in replication monitor output -+ with open(LOG_FILE, 'r') as logfile: -+ logtext = logfile.read() -+ # search for Supplier :hostname:port -+ # and use \D to insure there is no more number is after -+ # the matched port (i.e that 10 is not matching 101) -+ regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)' -+ match=re.search(regexp, logtext) -+ host_m1 = 'localhost.localdomain' -+ if (match is not None): -+ host_m1 = match.group(2) -+ # Same for master 2 -+ regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)' -+ match=re.search(regexp, logtext) -+ host_m2 = 'localhost.localdomain' -+ if (match is not None): -+ host_m2 = match.group(2) -+ return (host_m1, host_m2) - - @pytest.mark.ds50545 - @pytest.mark.bz1739718 -@@ -95,9 +115,6 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): - m1 = topology_m2.ms["master1"] - m2 = topology_m2.ms["master2"] - -- alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')', -- 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')'] -- - connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port) - content_list = ['Replica Root: dc=example,dc=com', - 'Replica ID: 1', -@@ -160,20 +177,9 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): - '001', - m1.host + ':' + str(m1.port)] - -- dsrc_content = '[repl-monitor-connections]\n' \ -- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -- '\n' \ -- '[repl-monitor-aliases]\n' \ -- 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \ -- 'M2 = ' + m2.host + ':' + str(m2.port) -- - connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM, - m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM] - -- aliases = ['M1=' + m1.host + ':' + str(m1.port), -- 'M2=' + m2.host + ':' + str(m2.port)] -- - args = FakeArgs() - args.connections = connections - args.aliases = None -@@ -181,8 +187,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): - - log.info('Run replication monitor with connections option') - get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) -+ (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port) - check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) - -+ # Prepare the data for next tests -+ aliases = ['M1=' + host_m1 + ':' + str(m1.port), -+ 'M2=' + host_m2 + ':' + str(m2.port)] -+ -+ alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')', -+ 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')'] -+ -+ dsrc_content = '[repl-monitor-connections]\n' \ -+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -+ '\n' \ -+ '[repl-monitor-aliases]\n' \ -+ 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \ -+ 'M2 = ' + host_m2 + ':' + str(m2.port) -+ - log.info('Run replication monitor with aliases option') - args.aliases = aliases - get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) --- -2.26.2 - diff --git a/SOURCES/0020-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch b/SOURCES/0020-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch deleted file mode 100644 index 6906b5c..0000000 --- a/SOURCES/0020-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch +++ /dev/null @@ -1,374 +0,0 @@ -From d7b49259ff2f9e0295bbfeaf128369ed33421974 Mon Sep 17 00:00:00 2001 -From: James Chapman -Date: Mon, 30 Nov 2020 15:28:05 +0000 -Subject: [PATCH 1/6] Issue 4418 - ldif2db - offline. Warn the user of skipped - entries - -Bug Description: During an ldif2db import entries that do not -conform to various constraints will be skipped and not imported. -On completition of an import with skipped entries, the server -returns a success exit code and logs the skipped entry detail to -the error logs. The success exit code could lead the user to -believe that all entries were successfully imported. - -Fix Description: If a skipped entry occurs during import, the -import will continue and a warning will be returned to the user. - -CLI tools for offline import updated to handle warning code. - -Test added to generate an incorrect ldif entry and perform an -import. - -Fixes: #4418 - -Reviewed by: Firstyear, droideck (Thanks) - -(cherry picked from commit a98fe54292e9b183a2163efbc7bdfe208d4abfb0) ---- - .../tests/suites/import/import_test.py | 54 ++++++++++++++++++- - .../slapd/back-ldbm/db-bdb/bdb_import.c | 22 ++++++-- - ldap/servers/slapd/main.c | 8 +++ - ldap/servers/slapd/pblock.c | 24 +++++++++ - ldap/servers/slapd/pblock_v3.h | 1 + - ldap/servers/slapd/slapi-private.h | 14 +++++ - src/lib389/lib389/__init__.py | 18 +++---- - src/lib389/lib389/_constants.py | 7 +++ - src/lib389/lib389/cli_ctl/dbtasks.py | 8 ++- - 9 files changed, 140 insertions(+), 16 deletions(-) - -diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py -index 3803ecf43..b47db96ed 100644 ---- a/dirsrvtests/tests/suites/import/import_test.py -+++ b/dirsrvtests/tests/suites/import/import_test.py -@@ -15,7 +15,7 @@ import pytest - import time - import glob - from lib389.topologies import topology_st as topo --from lib389._constants import DEFAULT_SUFFIX -+from lib389._constants import DEFAULT_SUFFIX, TaskWarning - from lib389.dbgen import dbgen_users - from lib389.tasks import ImportTask - from lib389.index import Indexes -@@ -139,6 +139,38 @@ def _create_bogus_ldif(topo): - return import_ldif1 - - -+def _create_syntax_err_ldif(topo): -+ """ -+ Create an incorrect ldif entry that violates syntax check -+ """ -+ ldif_dir = topo.standalone.get_ldif_dir() -+ line1 = """dn: dc=example,dc=com -+objectClass: top -+objectClass: domain -+dc: example -+dn: ou=groups,dc=example,dc=com -+objectClass: top -+objectClass: organizationalUnit -+ou: groups -+dn: uid=JHunt,ou=groups,dc=example,dc=com -+objectClass: top -+objectClass: person -+objectClass: organizationalPerson -+objectClass: inetOrgPerson -+objectclass: inetUser -+cn: James Hunt -+sn: Hunt -+uid: JHunt -+givenName: -+""" -+ with open(f'{ldif_dir}/syntax_err.ldif', 'w') as out: -+ out.write(f'{line1}') -+ os.chmod(out.name, 0o777) -+ out.close() -+ import_ldif1 = ldif_dir + '/syntax_err.ldif' -+ return import_ldif1 -+ -+ - def test_import_with_index(topo, _import_clean): - """ - Add an index, then import via cn=tasks -@@ -214,6 +246,26 @@ def test_ldif2db_allows_entries_without_a_parent_to_be_imported(topo, _import_cl - topo.standalone.start() - - -+def test_ldif2db_syntax_check(topo): -+ """ldif2db should return a warning when a skipped entry has occured. -+ :id: 85e75670-42c5-4062-9edc-7f117c97a06f -+ :setup: -+ 1. Standalone Instance -+ 2. Ldif entry that violates syntax check rule (empty givenname) -+ :steps: -+ 1. Create an ldif file which violates the syntax checking rule -+ 2. Stop the server and import ldif file with ldif2db -+ :expected results: -+ 1. ldif2db import returns a warning to signify skipped entries -+ """ -+ import_ldif1 = _create_syntax_err_ldif(topo) -+ # Import the offending LDIF data - offline -+ topo.standalone.stop() -+ ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1) -+ assert ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY -+ topo.standalone.start() -+ -+ - def test_issue_a_warning_if_the_cache_size_is_smaller(topo, _import_clean): - """Report during startup if nsslapd-cachememsize is too small - -diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c -index e7da0517f..1e4830e99 100644 ---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c -+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c -@@ -2563,7 +2563,7 @@ error: - slapi_task_dec_refcount(job->task); - } - import_all_done(job, ret); -- ret = 1; -+ ret |= WARN_UPGARDE_DN_FORMAT_ALL; - } else if (NEED_DN_NORM == ret) { - import_log_notice(job, SLAPI_LOG_NOTICE, "bdb_import_main", - "%s complete. %s needs upgradednformat.", -@@ -2572,7 +2572,7 @@ error: - slapi_task_dec_refcount(job->task); - } - import_all_done(job, ret); -- ret = 2; -+ ret |= WARN_UPGRADE_DN_FORMAT; - } else if (NEED_DN_NORM_SP == ret) { - import_log_notice(job, SLAPI_LOG_NOTICE, "bdb_import_main", - "%s complete. %s needs upgradednformat spaces.", -@@ -2581,7 +2581,7 @@ error: - slapi_task_dec_refcount(job->task); - } - import_all_done(job, ret); -- ret = 3; -+ ret |= WARN_UPGRADE_DN_FORMAT_SPACE; - } else { - ret = -1; - if (job->task != NULL) { -@@ -2600,6 +2600,11 @@ error: - import_all_done(job, ret); - } - -+ /* set task warning if there are no errors */ -+ if((!ret) && (job->skipped)) { -+ ret |= WARN_SKIPPED_IMPORT_ENTRY; -+ } -+ - /* This instance isn't busy anymore */ - instance_set_not_busy(job->inst); - -@@ -2637,6 +2642,7 @@ bdb_back_ldif2db(Slapi_PBlock *pb) - int total_files, i; - int up_flags = 0; - PRThread *thread = NULL; -+ int ret = 0; - - slapi_pblock_get(pb, SLAPI_BACKEND, &be); - if (be == NULL) { -@@ -2764,7 +2770,15 @@ bdb_back_ldif2db(Slapi_PBlock *pb) - } - - /* old style -- do it all synchronously (THIS IS GOING AWAY SOON) */ -- return import_main_offline((void *)job); -+ ret = import_main_offline((void *)job); -+ -+ /* no error just warning, reset ret */ -+ if(ret &= WARN_SKIPPED_IMPORT_ENTRY) { -+ slapi_pblock_set_task_warning(pb, WARN_SKIPPED_IMPORT_ENTRY); -+ ret = 0; -+ } -+ -+ return ret; - } - - struct _import_merge_thang -diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c -index 694375b22..104f6826c 100644 ---- a/ldap/servers/slapd/main.c -+++ b/ldap/servers/slapd/main.c -@@ -2069,6 +2069,14 @@ slapd_exemode_ldif2db(struct main_config *mcfg) - plugin->plg_name); - return_value = -1; - } -+ -+ /* check for task warnings */ -+ if(!return_value) { -+ if((return_value = slapi_pblock_get_task_warning(pb))) { -+ slapi_log_err(SLAPI_LOG_INFO, "slapd_exemode_ldif2db","returning task warning: %d\n", return_value); -+ } -+ } -+ - slapi_pblock_destroy(pb); - charray_free(instances); - charray_free(mcfg->cmd_line_instance_names); -diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c -index 454ea9cc3..1ad9d0399 100644 ---- a/ldap/servers/slapd/pblock.c -+++ b/ldap/servers/slapd/pblock.c -@@ -28,12 +28,14 @@ - #define SLAPI_LDIF_DUMP_REPLICA 2003 - #define SLAPI_PWDPOLICY 2004 - #define SLAPI_PW_ENTRY 2005 -+#define SLAPI_TASK_WARNING 2006 - - /* Used for checking assertions about pblocks in some cases. */ - #define SLAPI_HINT 9999 - - static PRLock *pblock_analytics_lock = NULL; - -+ - static PLHashNumber - hash_int_func(const void *key) - { -@@ -4315,6 +4317,28 @@ slapi_pblock_set_ldif_dump_replica(Slapi_PBlock *pb, int32_t dump_replica) - pb->pb_task->ldif_dump_replica = dump_replica; - } - -+int32_t -+slapi_pblock_get_task_warning(Slapi_PBlock *pb) -+{ -+#ifdef PBLOCK_ANALYTICS -+ pblock_analytics_record(pb, SLAPI_TASK_WARNING); -+#endif -+ if (pb->pb_task != NULL) { -+ return pb->pb_task->task_warning; -+ } -+ return 0; -+} -+ -+void -+slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warning) -+{ -+#ifdef PBLOCK_ANALYTICS -+ pblock_analytics_record(pb, SLAPI_TASK_WARNING); -+#endif -+ _pblock_assert_pb_task(pb); -+ pb->pb_task->task_warning = warning; -+} -+ - void * - slapi_pblock_get_vattr_context(Slapi_PBlock *pb) - { -diff --git a/ldap/servers/slapd/pblock_v3.h b/ldap/servers/slapd/pblock_v3.h -index 90498c0b0..b35d78565 100644 ---- a/ldap/servers/slapd/pblock_v3.h -+++ b/ldap/servers/slapd/pblock_v3.h -@@ -67,6 +67,7 @@ typedef struct _slapi_pblock_task - int ldif2db_noattrindexes; - int ldif_printkey; - int task_flags; -+ int32_t task_warning; - int import_state; - - int server_running; /* indicate that server is running */ -diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h -index c98c1947c..31cb33472 100644 ---- a/ldap/servers/slapd/slapi-private.h -+++ b/ldap/servers/slapd/slapi-private.h -@@ -1465,6 +1465,20 @@ void slapi_pblock_set_operation_notes(Slapi_PBlock *pb, uint32_t opnotes); - void slapi_pblock_set_flag_operation_notes(Slapi_PBlock *pb, uint32_t opflag); - void slapi_pblock_set_result_text_if_empty(Slapi_PBlock *pb, char *text); - -+/* task warnings */ -+typedef enum task_warning_t{ -+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0), -+ WARN_UPGRADE_DN_FORMAT = (1 << 1), -+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2), -+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3) -+} task_warning; -+ -+int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb); -+void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn); -+ -+ -+int slapi_exists_or_add_internal(Slapi_DN *dn, const char *filter, const char *entry, const char *modifier_name); -+ - #ifdef __cplusplus - } - #endif -diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py -index 4e6a1905a..5b36a79e1 100644 ---- a/src/lib389/lib389/__init__.py -+++ b/src/lib389/lib389/__init__.py -@@ -2683,7 +2683,7 @@ class DirSrv(SimpleLDAPObject, object): - # server is stopped) - # - def ldif2db(self, bename, suffixes, excludeSuffixes, encrypt, -- import_file): -+ import_file, import_cl): - """ - @param bename - The backend name of the database to import - @param suffixes - List/tuple of suffixes to import -@@ -2731,14 +2731,14 @@ class DirSrv(SimpleLDAPObject, object): - try: - result = subprocess.check_output(cmd, encoding='utf-8') - except subprocess.CalledProcessError as e: -- self.log.debug("Command: %s failed with the return code %s and the error %s", -- format_cmd_list(cmd), e.returncode, e.output) -- return False -- -- self.log.debug("ldif2db output: BEGIN") -- for line in result.split("\n"): -- self.log.debug(line) -- self.log.debug("ldif2db output: END") -+ if e.returncode == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY: -+ self.log.debug("Command: %s skipped import entry warning %s", -+ format_cmd_list(cmd), e.returncode) -+ return e.returncode -+ else: -+ self.log.debug("Command: %s failed with the return code %s and the error %s", -+ format_cmd_list(cmd), e.returncode, e.output) -+ return False - - return True - -diff --git a/src/lib389/lib389/_constants.py b/src/lib389/lib389/_constants.py -index e28c602a3..38ba04565 100644 ---- a/src/lib389/lib389/_constants.py -+++ b/src/lib389/lib389/_constants.py -@@ -162,6 +162,13 @@ DB2BAK = 'db2bak' - DB2INDEX = 'db2index' - DBSCAN = 'dbscan' - -+# Task warnings -+class TaskWarning(IntEnum): -+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0) -+ WARN_UPGRADE_DN_FORMAT = (1 << 1) -+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2) -+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3) -+ - RDN_REPLICA = "cn=replica" - - RETROCL_SUFFIX = "cn=changelog" -diff --git a/src/lib389/lib389/cli_ctl/dbtasks.py b/src/lib389/lib389/cli_ctl/dbtasks.py -index 590a1ea0e..02830239c 100644 ---- a/src/lib389/lib389/cli_ctl/dbtasks.py -+++ b/src/lib389/lib389/cli_ctl/dbtasks.py -@@ -7,6 +7,7 @@ - # See LICENSE for details. - # --- END COPYRIGHT BLOCK --- - -+from lib389._constants import TaskWarning - - def dbtasks_db2index(inst, log, args): - if not inst.db2index(bename=args.backend): -@@ -44,10 +45,13 @@ def dbtasks_db2ldif(inst, log, args): - - - def dbtasks_ldif2db(inst, log, args): -- if not inst.ldif2db(bename=args.backend, encrypt=args.encrypted, import_file=args.ldif, -- suffixes=None, excludeSuffixes=None): -+ ret = inst.ldif2db(bename=args.backend, encrypt=args.encrypted, import_file=args.ldif, -+ suffixes=None, excludeSuffixes=None, import_cl=False) -+ if not ret: - log.fatal("ldif2db failed") - return False -+ elif ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY: -+ log.warn("ldif2db successful with skipped entries") - else: - log.info("ldif2db successful") - --- -2.26.2 - diff --git a/SOURCES/0021-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch b/SOURCES/0021-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch deleted file mode 100644 index 6e77682..0000000 --- a/SOURCES/0021-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch +++ /dev/null @@ -1,52 +0,0 @@ -From 97bdef2d562e447d521202beb485c3948b0e7214 Mon Sep 17 00:00:00 2001 -From: James Chapman -Date: Mon, 30 Nov 2020 15:28:05 +0000 -Subject: [PATCH 2/6] Issue 4418 - ldif2db - offline. Warn the user of skipped - entries - -Bug Description: During an ldif2db import entries that do not -conform to various constraints will be skipped and not imported. -On completition of an import with skipped entries, the server -returns a success exit code and logs the skipped entry detail to -the error logs. The success exit code could lead the user to -believe that all entries were successfully imported. - -Fix Description: If a skipped entry occurs during import, the -import will continue and a warning will be returned to the user. - -CLI tools for offline import updated to handle warning code. - -Test added to generate an incorrect ldif entry and perform an -import. - -Fixes: #4418 - -Reviewed by: Firstyear, droideck (Thanks) ---- - ldap/servers/slapd/slapi-private.h | 10 ++++++++++ - 1 file changed, 10 insertions(+) - -diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h -index 31cb33472..e0092d571 100644 ---- a/ldap/servers/slapd/slapi-private.h -+++ b/ldap/servers/slapd/slapi-private.h -@@ -1476,6 +1476,16 @@ typedef enum task_warning_t{ - int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb); - void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn); - -+/* task warnings */ -+typedef enum task_warning_t{ -+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0), -+ WARN_UPGRADE_DN_FORMAT = (1 << 1), -+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2), -+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3) -+} task_warning; -+ -+int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb); -+void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn); - - int slapi_exists_or_add_internal(Slapi_DN *dn, const char *filter, const char *entry, const char *modifier_name); - --- -2.26.2 - diff --git a/SOURCES/0022-Fix-cherry-pick-erorr.patch b/SOURCES/0022-Fix-cherry-pick-erorr.patch deleted file mode 100644 index a078160..0000000 --- a/SOURCES/0022-Fix-cherry-pick-erorr.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 22fb8b2690a5fa364d252846f06b77b5fec8c602 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Thu, 7 Jan 2021 10:27:43 -0500 -Subject: [PATCH 3/6] Fix cherry-pick erorr - ---- - ldap/servers/slapd/slapi-private.h | 11 ----------- - 1 file changed, 11 deletions(-) - -diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h -index e0092d571..d5abe8ac1 100644 ---- a/ldap/servers/slapd/slapi-private.h -+++ b/ldap/servers/slapd/slapi-private.h -@@ -1476,17 +1476,6 @@ typedef enum task_warning_t{ - int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb); - void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn); - --/* task warnings */ --typedef enum task_warning_t{ -- WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0), -- WARN_UPGRADE_DN_FORMAT = (1 << 1), -- WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2), -- WARN_SKIPPED_IMPORT_ENTRY = (1 << 3) --} task_warning; -- --int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb); --void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn); -- - int slapi_exists_or_add_internal(Slapi_DN *dn, const char *filter, const char *entry, const char *modifier_name); - - #ifdef __cplusplus --- -2.26.2 - diff --git a/SOURCES/0023-Issue-4419-Warn-users-of-skipped-entries-during-ldif.patch b/SOURCES/0023-Issue-4419-Warn-users-of-skipped-entries-during-ldif.patch deleted file mode 100644 index 81e2612..0000000 --- a/SOURCES/0023-Issue-4419-Warn-users-of-skipped-entries-during-ldif.patch +++ /dev/null @@ -1,393 +0,0 @@ -From 43f8a317bcd9040874b27cad905347a9e6bc8a6f Mon Sep 17 00:00:00 2001 -From: James Chapman -Date: Wed, 9 Dec 2020 22:42:59 +0000 -Subject: [PATCH 4/6] Issue 4419 - Warn users of skipped entries during ldif2db - online import (#4476) - -Bug Description: During an online ldif2db import entries that do not - conform to various constraints will be skipped and - not imported. On completition of an import with skipped - entries, the server responds with a success message - and logs the skipped entry detail to the error logs. - The success messgae could lead the user to believe - that all entries were successfully imported. - -Fix Description: If a skipped entry occurs during import, the import - will continue and a warning message will be displayed. - The schema is extended with a nsTaskWarning attribute - which is used to capture and retrieve any task - warnings. - - CLI tools for online import updated. - - Test added to generate an incorrect ldif entry and perform an - online import. - -Fixes: https://github.com/389ds/389-ds-base/issues/4419 - -Reviewed by: tbordaz, mreynolds389, droideck, Firstyear (Thanks) ---- - .../tests/suites/import/import_test.py | 39 +++++++++++++++++-- - ldap/schema/02common.ldif | 3 +- - .../back-ldbm/db-bdb/bdb_import_threads.c | 5 +++ - ldap/servers/slapd/slap.h | 1 + - ldap/servers/slapd/slapi-plugin.h | 11 ++++++ - ldap/servers/slapd/slapi-private.h | 8 ---- - ldap/servers/slapd/task.c | 29 +++++++++++++- - src/lib389/lib389/cli_conf/backend.py | 6 ++- - src/lib389/lib389/tasks.py | 23 +++++++++-- - 9 files changed, 108 insertions(+), 17 deletions(-) - -diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py -index b47db96ed..77c915026 100644 ---- a/dirsrvtests/tests/suites/import/import_test.py -+++ b/dirsrvtests/tests/suites/import/import_test.py -@@ -65,6 +65,9 @@ def _import_clean(request, topo): - import_ldif = ldif_dir + '/basic_import.ldif' - if os.path.exists(import_ldif): - os.remove(import_ldif) -+ syntax_err_ldif = ldif_dir + '/syntax_err.dif' -+ if os.path.exists(syntax_err_ldif): -+ os.remove(syntax_err_ldif) - - request.addfinalizer(finofaci) - -@@ -141,17 +144,19 @@ def _create_bogus_ldif(topo): - - def _create_syntax_err_ldif(topo): - """ -- Create an incorrect ldif entry that violates syntax check -+ Create an ldif file, which contains an entry that violates syntax check - """ - ldif_dir = topo.standalone.get_ldif_dir() - line1 = """dn: dc=example,dc=com - objectClass: top - objectClass: domain - dc: example -+ - dn: ou=groups,dc=example,dc=com - objectClass: top - objectClass: organizationalUnit - ou: groups -+ - dn: uid=JHunt,ou=groups,dc=example,dc=com - objectClass: top - objectClass: person -@@ -201,6 +206,34 @@ def test_import_with_index(topo, _import_clean): - assert f'{place}/userRoot/roomNumber.db' in glob.glob(f'{place}/userRoot/*.db', recursive=True) - - -+def test_online_import_with_warning(topo, _import_clean): -+ """ -+ Import an ldif file with syntax errors, verify skipped entry warning code -+ -+ :id: 5bf75c47-a283-430e-a65c-3c5fd8dbadb8 -+ :setup: Standalone Instance -+ :steps: -+ 1. Create standalone Instance -+ 2. Create an ldif file with an entry that violates syntax check (empty givenname) -+ 3. Online import of troublesome ldif file -+ :expected results: -+ 1. Successful import with skipped entry warning -+ """ -+ topo.standalone.restart() -+ -+ import_task = ImportTask(topo.standalone) -+ import_ldif1 = _create_syntax_err_ldif(topo) -+ -+ # Importing the offending ldif file - online -+ import_task.import_suffix_from_ldif(ldiffile=import_ldif1, suffix=DEFAULT_SUFFIX) -+ -+ # There is just a single entry in this ldif -+ import_task.wait(5) -+ -+ # Check for the task nsTaskWarning attr, make sure its set to skipped entry code -+ assert import_task.present('nstaskwarning') -+ assert TaskWarning.WARN_SKIPPED_IMPORT_ENTRY == import_task.get_task_warn() -+ - def test_crash_on_ldif2db(topo, _import_clean): - """ - Delete the cn=monitor entry for an LDBM backend instance. Doing this will -@@ -246,7 +279,7 @@ def test_ldif2db_allows_entries_without_a_parent_to_be_imported(topo, _import_cl - topo.standalone.start() - - --def test_ldif2db_syntax_check(topo): -+def test_ldif2db_syntax_check(topo, _import_clean): - """ldif2db should return a warning when a skipped entry has occured. - :id: 85e75670-42c5-4062-9edc-7f117c97a06f - :setup: -@@ -261,7 +294,7 @@ def test_ldif2db_syntax_check(topo): - import_ldif1 = _create_syntax_err_ldif(topo) - # Import the offending LDIF data - offline - topo.standalone.stop() -- ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1) -+ ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1, None) - assert ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY - topo.standalone.start() - -diff --git a/ldap/schema/02common.ldif b/ldap/schema/02common.ldif -index c6dc074db..821640d03 100644 ---- a/ldap/schema/02common.ldif -+++ b/ldap/schema/02common.ldif -@@ -145,6 +145,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2356 NAME 'nsTaskExitCode' DESC 'Slapi T - attributeTypes: ( 2.16.840.1.113730.3.1.2357 NAME 'nsTaskCurrentItem' DESC 'Slapi Task item' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) - attributeTypes: ( 2.16.840.1.113730.3.1.2358 NAME 'nsTaskTotalItems' DESC 'Slapi Task total items' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) - attributeTypes: ( 2.16.840.1.113730.3.1.2359 NAME 'nsTaskCreated' DESC 'Slapi Task creation date' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) -+attributeTypes: ( 2.16.840.1.113730.3.1.2375 NAME 'nsTaskWarning' DESC 'Slapi Task warning code' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) - # - # objectclasses: - # -@@ -177,5 +178,5 @@ objectClasses: ( 2.16.840.1.113730.3.2.503 NAME 'nsDSWindowsReplicationAgreement - objectClasses: ( 2.16.840.1.113730.3.2.128 NAME 'costemplate' DESC 'Netscape defined objectclass' SUP top MAY ( cn $ cospriority ) X-ORIGIN 'Netscape Directory Server' ) - objectClasses: ( 2.16.840.1.113730.3.2.304 NAME 'nsView' DESC 'Netscape defined objectclass' SUP top AUXILIARY MAY ( nsViewFilter $ description ) X-ORIGIN 'Netscape Directory Server' ) - objectClasses: ( 2.16.840.1.113730.3.2.316 NAME 'nsAttributeEncryption' DESC 'Netscape defined objectclass' SUP top MUST ( cn $ nsEncryptionAlgorithm ) X-ORIGIN 'Netscape Directory Server' ) --objectClasses: ( 2.16.840.1.113730.3.2.335 NAME 'nsSlapiTask' DESC 'Slapi_Task objectclass' SUP top MUST ( cn ) MAY ( ttl $ nsTaskLog $ nsTaskStatus $ nsTaskExitCode $ nsTaskCurrentItem $ nsTaskTotalItems $ nsTaskCreated ) X-ORIGIN '389 Directory Server' ) -+objectClasses: ( 2.16.840.1.113730.3.2.335 NAME 'nsSlapiTask' DESC 'Slapi_Task objectclass' SUP top MUST ( cn ) MAY ( ttl $ nsTaskLog $ nsTaskStatus $ nsTaskExitCode $ nsTaskCurrentItem $ nsTaskTotalItems $ nsTaskCreated $ nsTaskWarning ) X-ORIGIN '389 Directory Server' ) - -diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c -index 310893884..5c7d9c8f7 100644 ---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c -+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c -@@ -747,6 +747,11 @@ import_producer(void *param) - } - } - -+ /* capture skipped entry warnings for this task */ -+ if((job) && (job->skipped)) { -+ slapi_task_set_warning(job->task, WARN_SKIPPED_IMPORT_ENTRY); -+ } -+ - slapi_value_free(&(job->usn_value)); - import_free_ldif(&c); - info->state = FINISHED; -diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h -index 53c9161d1..be4d38739 100644 ---- a/ldap/servers/slapd/slap.h -+++ b/ldap/servers/slapd/slap.h -@@ -1753,6 +1753,7 @@ typedef struct slapi_task - int task_progress; /* number between 0 and task_work */ - int task_work; /* "units" of work to be done */ - int task_flags; /* (see above) */ -+ task_warning task_warn; /* task warning */ - char *task_status; /* transient status info */ - char *task_log; /* appended warnings, etc */ - char task_date[SLAPI_TIMESTAMP_BUFSIZE]; /* Date/time when task was created */ -diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h -index 96313ef2c..ddb11bc7c 100644 ---- a/ldap/servers/slapd/slapi-plugin.h -+++ b/ldap/servers/slapd/slapi-plugin.h -@@ -6638,6 +6638,15 @@ int slapi_config_remove_callback(int operation, int flags, const char *base, int - /* task flags (set by the task-control code) */ - #define SLAPI_TASK_DESTROYING 0x01 /* queued event for destruction */ - -+/* task warnings */ -+typedef enum task_warning_t{ -+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0), -+ WARN_UPGRADE_DN_FORMAT = (1 << 1), -+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2), -+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3) -+} task_warning; -+ -+ - int slapi_task_register_handler(const char *name, dseCallbackFn func); - int slapi_plugin_task_register_handler(const char *name, dseCallbackFn func, Slapi_PBlock *plugin_pb); - int slapi_plugin_task_unregister_handler(const char *name, dseCallbackFn func); -@@ -6654,6 +6663,8 @@ int slapi_task_get_refcount(Slapi_Task *task); - void slapi_task_set_destructor_fn(Slapi_Task *task, TaskCallbackFn func); - void slapi_task_set_cancel_fn(Slapi_Task *task, TaskCallbackFn func); - void slapi_task_status_changed(Slapi_Task *task); -+void slapi_task_set_warning(Slapi_Task *task, task_warning warn); -+int slapi_task_get_warning(Slapi_Task *task); - void slapi_task_log_status(Slapi_Task *task, char *format, ...) - #ifdef __GNUC__ - __attribute__((format(printf, 2, 3))); -diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h -index d5abe8ac1..b956ebe63 100644 ---- a/ldap/servers/slapd/slapi-private.h -+++ b/ldap/servers/slapd/slapi-private.h -@@ -1465,14 +1465,6 @@ void slapi_pblock_set_operation_notes(Slapi_PBlock *pb, uint32_t opnotes); - void slapi_pblock_set_flag_operation_notes(Slapi_PBlock *pb, uint32_t opflag); - void slapi_pblock_set_result_text_if_empty(Slapi_PBlock *pb, char *text); - --/* task warnings */ --typedef enum task_warning_t{ -- WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0), -- WARN_UPGRADE_DN_FORMAT = (1 << 1), -- WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2), -- WARN_SKIPPED_IMPORT_ENTRY = (1 << 3) --} task_warning; -- - int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb); - void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn); - -diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c -index 936c64920..806077a16 100644 ---- a/ldap/servers/slapd/task.c -+++ b/ldap/servers/slapd/task.c -@@ -46,6 +46,7 @@ static uint64_t shutting_down = 0; - #define TASK_PROGRESS_NAME "nsTaskCurrentItem" - #define TASK_WORK_NAME "nsTaskTotalItems" - #define TASK_DATE_NAME "nsTaskCreated" -+#define TASK_WARNING_NAME "nsTaskWarning" - - #define DEFAULT_TTL "3600" /* seconds */ - #define TASK_SYSCONFIG_FILE_ATTR "sysconfigfile" /* sysconfig reload task file attr */ -@@ -332,7 +333,7 @@ slapi_task_status_changed(Slapi_Task *task) - LDAPMod modlist[20]; - LDAPMod *mod[20]; - int cur = 0, i; -- char s1[20], s2[20], s3[20]; -+ char s1[20], s2[20], s3[20], s4[20]; - - if (shutting_down) { - /* don't care about task status updates anymore */ -@@ -346,9 +347,11 @@ slapi_task_status_changed(Slapi_Task *task) - sprintf(s1, "%d", task->task_exitcode); - sprintf(s2, "%d", task->task_progress); - sprintf(s3, "%d", task->task_work); -+ sprintf(s4, "%d", task->task_warn); - NEXTMOD(TASK_PROGRESS_NAME, s2); - NEXTMOD(TASK_WORK_NAME, s3); - NEXTMOD(TASK_DATE_NAME, task->task_date); -+ NEXTMOD(TASK_WARNING_NAME, s4); - /* only add the exit code when the job is done */ - if ((task->task_state == SLAPI_TASK_FINISHED) || - (task->task_state == SLAPI_TASK_CANCELLED)) { -@@ -452,6 +455,30 @@ slapi_task_get_refcount(Slapi_Task *task) - return 0; /* return value not currently used */ - } - -+/* -+ * Return task warning -+ */ -+int -+slapi_task_get_warning(Slapi_Task *task) -+{ -+ if (task) { -+ return task->task_warn; -+ } -+ -+ return 0; /* return value not currently used */ -+} -+ -+/* -+ * Set task warning -+ */ -+void -+slapi_task_set_warning(Slapi_Task *task, task_warning warn) -+{ -+ if (task) { -+ return task->task_warn |= warn; -+ } -+} -+ - int - slapi_plugin_task_unregister_handler(const char *name, dseCallbackFn func) - { -diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py -index d7a6e670c..6bfbcb036 100644 ---- a/src/lib389/lib389/cli_conf/backend.py -+++ b/src/lib389/lib389/cli_conf/backend.py -@@ -243,9 +243,13 @@ def backend_import(inst, basedn, log, args): - exclude_suffixes=args.exclude_suffixes) - task.wait(timeout=None) - result = task.get_exit_code() -+ warning = task.get_task_warn() - - if task.is_complete() and result == 0: -- log.info("The import task has finished successfully") -+ if warning is None or (warning == 0): -+ log.info("The import task has finished successfully") -+ else: -+ log.info("The import task has finished successfully, with warning code {}, check the logs for more detail".format(warning)) - else: - raise ValueError("Import task failed\n-------------------------\n{}".format(ensure_str(task.get_task_log()))) - -diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py -index dc7bb9206..bf20d1e61 100644 ---- a/src/lib389/lib389/tasks.py -+++ b/src/lib389/lib389/tasks.py -@@ -38,6 +38,7 @@ class Task(DSLdapObject): - self._protected = False - self._exit_code = None - self._task_log = "" -+ self._task_warn = None - - def status(self): - """Return the decoded status of the task -@@ -49,6 +50,7 @@ class Task(DSLdapObject): - - self._exit_code = self.get_attr_val_utf8("nsTaskExitCode") - self._task_log = self.get_attr_val_utf8("nsTaskLog") -+ self._task_warn = self.get_attr_val_utf8("nsTaskWarning") - if not self.exists(): - self._log.debug("complete: task has self cleaned ...") - # The task cleaned it self up. -@@ -77,6 +79,15 @@ class Task(DSLdapObject): - return None - return None - -+ def get_task_warn(self): -+ """Return task's warning code if task is complete, else None.""" -+ if self.is_complete(): -+ try: -+ return int(self._task_warn) -+ except TypeError: -+ return None -+ return None -+ - def wait(self, timeout=120): - """Wait until task is complete.""" - -@@ -390,14 +401,17 @@ class Tasks(object): - running, true if done - if true, second is the exit code - if dowait - is True, this function will block until the task is complete''' - attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', -- 'nsTaskCurrentItem', 'nsTaskTotalItems'] -+ 'nsTaskCurrentItem', 'nsTaskTotalItems', 'nsTaskWarning'] - done = False - exitCode = 0 -+ warningCode = 0 - dn = entry.dn - while not done: - entry = self.conn.getEntry(dn, attrlist=attrlist) - self.log.debug("task entry %r", entry) - -+ if entry.nsTaskWarning: -+ warningCode = int(entry.nsTaskWarning) - if entry.nsTaskExitCode: - exitCode = int(entry.nsTaskExitCode) - done = True -@@ -405,7 +419,7 @@ class Tasks(object): - time.sleep(1) - else: - break -- return (done, exitCode) -+ return (done, exitCode, warningCode) - - def importLDIF(self, suffix=None, benamebase=None, input_file=None, - args=None): -@@ -461,8 +475,9 @@ class Tasks(object): - self.conn.add_s(entry) - - exitCode = 0 -+ warningCode = 0 - if args and args.get(TASK_WAIT, False): -- (done, exitCode) = self.conn.tasks.checkTask(entry, True) -+ (done, exitCode, warningCode) = self.conn.tasks.checkTask(entry, True) - - if exitCode: - self.log.error("Error: import task %s for file %s exited with %d", -@@ -470,6 +485,8 @@ class Tasks(object): - else: - self.log.info("Import task %s for file %s completed successfully", - cn, input_file) -+ if warningCode: -+ self.log.info("with warning code %d", warningCode) - self.dn = dn - self.entry = entry - return exitCode --- -2.26.2 - diff --git a/SOURCES/0024-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch b/SOURCES/0024-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch deleted file mode 100644 index 8f90863..0000000 --- a/SOURCES/0024-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch +++ /dev/null @@ -1,149 +0,0 @@ -From 61d82ef842e0e4e013937bf05d7f640be2d2fc09 Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Wed, 16 Dec 2020 16:30:28 +0100 -Subject: [PATCH 5/6] Issue 4480 - Unexpected info returned to ldap request - (#4491) - -Bug description: - If the bind entry does not exist, the bind result info - reports that 'No such entry'. It should not give any - information if the target entry exists or not - -Fix description: - Does not return any additional information during a bind - -relates: https://github.com/389ds/389-ds-base/issues/4480 - -Reviewed by: William Brown, Viktor Ashirov, Mark Reynolds (thank you all) - -Platforms tested: F31 ---- - dirsrvtests/tests/suites/basic/basic_test.py | 112 +++++++++++++++++++ - 1 file changed, 112 insertions(+) - -diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py -index 1ae82dcdd..02b73ee85 100644 ---- a/dirsrvtests/tests/suites/basic/basic_test.py -+++ b/dirsrvtests/tests/suites/basic/basic_test.py -@@ -1400,6 +1400,118 @@ def test_dscreate_multiple_dashes_name(dscreate_long_instance): - assert not dscreate_long_instance.exists() - - -+@pytest.fixture(scope="module", params=('c=uk', 'cn=test_user', 'dc=example,dc=com', 'o=south', 'ou=sales', 'wrong=some_value')) -+def dscreate_test_rdn_value(request): -+ template_file = "/tmp/dssetup.inf" -+ template_text = f"""[general] -+config_version = 2 -+# This invalid hostname ... -+full_machine_name = localhost.localdomain -+# Means we absolutely require this. -+strict_host_checking = False -+# In tests, we can be run in containers, NEVER trust -+# that systemd is there, or functional in any capacity -+systemd = False -+ -+[slapd] -+instance_name = test_different_rdn -+root_dn = cn=directory manager -+root_password = someLongPassword_123 -+# We do not have access to high ports in containers, -+# so default to something higher. -+port = 38999 -+secure_port = 63699 -+ -+[backend-userroot] -+create_suffix_entry = True -+suffix = {request.param} -+""" -+ -+ with open(template_file, "w") as template_fd: -+ template_fd.write(template_text) -+ -+ # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389 -+ tmp_env = os.environ -+ if "PYTHONPATH" in tmp_env: -+ del tmp_env["PYTHONPATH"] -+ -+ def fin(): -+ os.remove(template_file) -+ if request.param != "wrong=some_value": -+ try: -+ subprocess.check_call(['dsctl', 'test_different_rdn', 'remove', '--do-it']) -+ except subprocess.CalledProcessError as e: -+ log.fatal(f"Failed to remove test instance Error ({e.returncode}) {e.output}") -+ else: -+ log.info("Wrong RDN is passed, instance not created") -+ request.addfinalizer(fin) -+ return template_file, tmp_env, request.param, -+ -+ -+@pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.0.0'), -+ reason="This test is only required with new admin cli, and requires root.") -+@pytest.mark.bz1807419 -+@pytest.mark.ds50928 -+def test_dscreate_with_different_rdn(dscreate_test_rdn_value): -+ """Test that dscreate works with different RDN attributes as suffix -+ -+ :id: 77ed6300-6a2f-4e79-a862-1f1105f1e3ef -+ :parametrized: yes -+ :setup: None -+ :steps: -+ 1. Create template file for dscreate with different RDN attributes as suffix -+ 2. Create instance using template file -+ 3. Create instance with 'wrong=some_value' as suffix's RDN attribute -+ :expectedresults: -+ 1. Should succeeds -+ 2. Should succeeds -+ 3. Should fail -+ """ -+ try: -+ subprocess.check_call([ -+ 'dscreate', -+ 'from-file', -+ dscreate_test_rdn_value[0] -+ ], env=dscreate_test_rdn_value[1]) -+ except subprocess.CalledProcessError as e: -+ log.fatal(f"dscreate failed! Error ({e.returncode}) {e.output}") -+ if dscreate_test_rdn_value[2] != "wrong=some_value": -+ assert False -+ else: -+ assert True -+ -+def test_bind_invalid_entry(topology_st): -+ """Test the failing bind does not return information about the entry -+ -+ :id: 5cd9b083-eea6-426b-84ca-83c26fc49a6f -+ -+ :setup: Standalone instance -+ -+ :steps: -+ 1: bind as non existing entry -+ 2: check that bind info does not report 'No such entry' -+ -+ :expectedresults: -+ 1: pass -+ 2: pass -+ """ -+ -+ topology_st.standalone.restart() -+ INVALID_ENTRY="cn=foooo,%s" % DEFAULT_SUFFIX -+ try: -+ topology_st.standalone.simple_bind_s(INVALID_ENTRY, PASSWORD) -+ except ldap.LDAPError as e: -+ log.info('test_bind_invalid_entry: Failed to bind as %s (expected)' % INVALID_ENTRY) -+ log.info('exception description: ' + e.args[0]['desc']) -+ if 'info' in e.args[0]: -+ log.info('exception info: ' + e.args[0]['info']) -+ assert e.args[0]['desc'] == 'Invalid credentials' -+ assert 'info' not in e.args[0] -+ pass -+ -+ log.info('test_bind_invalid_entry: PASSED') -+ -+ - if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode --- -2.26.2 - diff --git a/SOURCES/0025-Issue-4414-disk-monitoring-prevent-division-by-zero-.patch b/SOURCES/0025-Issue-4414-disk-monitoring-prevent-division-by-zero-.patch deleted file mode 100644 index 1d3b1a9..0000000 --- a/SOURCES/0025-Issue-4414-disk-monitoring-prevent-division-by-zero-.patch +++ /dev/null @@ -1,99 +0,0 @@ -From 3c74f736c657d007770fe866842b08d0a74772ca Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Wed, 9 Dec 2020 15:21:11 -0500 -Subject: [PATCH 6/6] Issue 4414 - disk monitoring - prevent division by zero - crash - -Bug Description: If a disk mount has zero total space or zero used - space then a division by zero can occur and the - server will crash. - - It has also been observed that sometimes a system - can return the wrong disk entirely, and when that - happens the incorrect disk also has zero available - space which triggers the disk monitioring thread to - immediately shut the server down. - -Fix Description: Check the total and used space for zero and do not - divide, just ignore it. As a preemptive measure - ignore disks from /dev, /proc, /sys (except /dev/shm). - Yes it's a bit hacky, but the true underlying cause - is not known yet. So better to be safe than sorry. - -Relates: https://github.com/389ds/389-ds-base/issues/4414 - -Reviewed by: firstyear(Thanks!) ---- - ldap/servers/slapd/daemon.c | 22 +++++++++++++++++++++- - ldap/servers/slapd/monitor.c | 13 +++++-------- - 2 files changed, 26 insertions(+), 9 deletions(-) - -diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c -index 691f77570..bfd965263 100644 ---- a/ldap/servers/slapd/daemon.c -+++ b/ldap/servers/slapd/daemon.c -@@ -221,7 +221,27 @@ disk_mon_get_mount_point(char *dir) - } - if (s.st_dev == dev_id) { - endmntent(fp); -- return (slapi_ch_strdup(mnt->mnt_dir)); -+ -+ if ((strncmp(mnt->mnt_dir, "/dev", 4) == 0 && strncmp(mnt->mnt_dir, "/dev/shm", 8) != 0) || -+ strncmp(mnt->mnt_dir, "/proc", 4) == 0 || -+ strncmp(mnt->mnt_dir, "/sys", 4) == 0) -+ { -+ /* -+ * Ignore "mount directories" starting with /dev (except -+ * /dev/shm), /proc, /sys For some reason these mounts are -+ * occasionally/incorrectly returned. Only seen this at a -+ * customer site once. When it happens it causes disk -+ * monitoring to think the server has 0 disk space left, and -+ * it abruptly/unexpectedly shuts the server down. At this -+ * point it looks like a bug in stat(), setmntent(), or -+ * getmntent(), but there is no way to prove that since there -+ * is no way to reproduce the original issue. For now just -+ * return NULL to be safe. -+ */ -+ return NULL; -+ } else { -+ return (slapi_ch_strdup(mnt->mnt_dir)); -+ } - } - } - endmntent(fp); -diff --git a/ldap/servers/slapd/monitor.c b/ldap/servers/slapd/monitor.c -index 562721bed..65f082986 100644 ---- a/ldap/servers/slapd/monitor.c -+++ b/ldap/servers/slapd/monitor.c -@@ -131,7 +131,6 @@ monitor_disk_info (Slapi_PBlock *pb __attribute__((unused)), - { - int32_t rc = LDAP_SUCCESS; - char **dirs = NULL; -- char buf[BUFSIZ]; - struct berval val; - struct berval *vals[2]; - uint64_t total_space; -@@ -143,15 +142,13 @@ monitor_disk_info (Slapi_PBlock *pb __attribute__((unused)), - - disk_mon_get_dirs(&dirs); - -- for (uint16_t i = 0; dirs && dirs[i]; i++) { -+ for (size_t i = 0; dirs && dirs[i]; i++) { -+ char buf[BUFSIZ] = {0}; - rc = disk_get_info(dirs[i], &total_space, &avail_space, &used_space); -- if (rc) { -- slapi_log_err(SLAPI_LOG_WARNING, "monitor_disk_info", -- "Unable to get 'cn=disk space,cn=monitor' stats for %s\n", dirs[i]); -- } else { -+ if (rc == 0 && total_space > 0 && used_space > 0) { - val.bv_len = snprintf(buf, sizeof(buf), -- "partition=\"%s\" size=\"%" PRIu64 "\" used=\"%" PRIu64 "\" available=\"%" PRIu64 "\" use%%=\"%" PRIu64 "\"", -- dirs[i], total_space, used_space, avail_space, used_space * 100 / total_space); -+ "partition=\"%s\" size=\"%" PRIu64 "\" used=\"%" PRIu64 "\" available=\"%" PRIu64 "\" use%%=\"%" PRIu64 "\"", -+ dirs[i], total_space, used_space, avail_space, used_space * 100 / total_space); - val.bv_val = buf; - attrlist_merge(&e->e_attrs, "dsDisk", vals); - } --- -2.26.2 - diff --git a/SOURCES/0026-Issue-4504-Insure-ldapi-is-enabled-in-repl_monitor_t.patch b/SOURCES/0026-Issue-4504-Insure-ldapi-is-enabled-in-repl_monitor_t.patch deleted file mode 100644 index fb3211a..0000000 --- a/SOURCES/0026-Issue-4504-Insure-ldapi-is-enabled-in-repl_monitor_t.patch +++ /dev/null @@ -1,132 +0,0 @@ -From 48b30739f33d1eb526dbdd45c820129c4a4c4bcb Mon Sep 17 00:00:00 2001 -From: progier389 <72748589+progier389@users.noreply.github.com> -Date: Tue, 12 Jan 2021 11:06:24 +0100 -Subject: [PATCH] Issue 4504 - Insure ldapi is enabled in repl_monitor_test.py - (Needed on RHEL) (#4527) - -(cherry picked from commit 279556bc78ed743d7a053069621d999ec045866f) ---- - .../tests/suites/clu/repl_monitor_test.py | 67 +++++++++---------- - 1 file changed, 31 insertions(+), 36 deletions(-) - -diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py -index eb18d2da2..b2cb840b3 100644 ---- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py -+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py -@@ -9,7 +9,6 @@ - import time - import subprocess - import pytest --import re - - from lib389.cli_conf.replication import get_repl_monitor_info - from lib389.tasks import * -@@ -18,6 +17,8 @@ from lib389.topologies import topology_m2 - from lib389.cli_base import FakeArgs - from lib389.cli_base.dsrc import dsrc_arg_concat - from lib389.cli_base import connect_instance -+from lib389.replica import Replicas -+ - - pytestmark = pytest.mark.tier0 - -@@ -68,25 +69,6 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No - log.info('Reset log file') - f.truncate(0) - --def get_hostnames_from_log(port1, port2): -- # Get the supplier host names as displayed in replication monitor output -- with open(LOG_FILE, 'r') as logfile: -- logtext = logfile.read() -- # search for Supplier :hostname:port -- # and use \D to insure there is no more number is after -- # the matched port (i.e that 10 is not matching 101) -- regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)' -- match=re.search(regexp, logtext) -- host_m1 = 'localhost.localdomain' -- if (match is not None): -- host_m1 = match.group(2) -- # Same for master 2 -- regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)' -- match=re.search(regexp, logtext) -- host_m2 = 'localhost.localdomain' -- if (match is not None): -- host_m2 = match.group(2) -- return (host_m1, host_m2) - - @pytest.mark.ds50545 - @pytest.mark.bz1739718 -@@ -115,6 +97,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): - m1 = topology_m2.ms["master1"] - m2 = topology_m2.ms["master2"] - -+ # Enable ldapi if not already done. -+ for inst in [topology_m2.ms["master1"], topology_m2.ms["master2"]]: -+ if not inst.can_autobind(): -+ # Update ns-slapd instance -+ inst.config.set('nsslapd-ldapilisten', 'on') -+ inst.config.set('nsslapd-ldapiautobind', 'on') -+ inst.restart() -+ # Ensure that updates have been sent both ways. -+ replicas = Replicas(m1) -+ replica = replicas.get(DEFAULT_SUFFIX) -+ replica.test_replication([m2]) -+ replicas = Replicas(m2) -+ replica = replicas.get(DEFAULT_SUFFIX) -+ replica.test_replication([m1]) -+ -+ alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')', -+ 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')'] -+ - connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port) - content_list = ['Replica Root: dc=example,dc=com', - 'Replica ID: 1', -@@ -177,9 +177,20 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): - '001', - m1.host + ':' + str(m1.port)] - -+ dsrc_content = '[repl-monitor-connections]\n' \ -+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -+ '\n' \ -+ '[repl-monitor-aliases]\n' \ -+ 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \ -+ 'M2 = ' + m2.host + ':' + str(m2.port) -+ - connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM, - m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM] - -+ aliases = ['M1=' + m1.host + ':' + str(m1.port), -+ 'M2=' + m2.host + ':' + str(m2.port)] -+ - args = FakeArgs() - args.connections = connections - args.aliases = None -@@ -187,24 +198,8 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): - - log.info('Run replication monitor with connections option') - get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) -- (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port) - check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) - -- # Prepare the data for next tests -- aliases = ['M1=' + host_m1 + ':' + str(m1.port), -- 'M2=' + host_m2 + ':' + str(m2.port)] -- -- alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')', -- 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')'] -- -- dsrc_content = '[repl-monitor-connections]\n' \ -- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -- '\n' \ -- '[repl-monitor-aliases]\n' \ -- 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \ -- 'M2 = ' + host_m2 + ':' + str(m2.port) -- - log.info('Run replication monitor with aliases option') - args.aliases = aliases - get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) --- -2.26.2 - diff --git a/SOURCES/0027-Issue-4315-performance-search-rate-nagle-triggers-hi.patch b/SOURCES/0027-Issue-4315-performance-search-rate-nagle-triggers-hi.patch deleted file mode 100644 index 44636c8..0000000 --- a/SOURCES/0027-Issue-4315-performance-search-rate-nagle-triggers-hi.patch +++ /dev/null @@ -1,51 +0,0 @@ -From f84e75de9176218d3b47a447d07fe8fb7ca3d72f Mon Sep 17 00:00:00 2001 -From: Barbora Simonova -Date: Mon, 11 Jan 2021 15:51:24 +0100 -Subject: [PATCH] Issue 4315 - performance search rate: nagle triggers high - rate of setsocketopt - -Description: -The config value of nsslapd-nagle is now set to 'off' by default. -Added a test case, that checks the value. - -Relates: https://github.com/389ds/389-ds-base/issues/4315 - -Reviewed by: droideck (Thanks!) ---- - .../tests/suites/config/config_test.py | 20 +++++++++++++++++++ - 1 file changed, 20 insertions(+) - -diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py -index 38d1ed9ac..fda16a530 100644 ---- a/dirsrvtests/tests/suites/config/config_test.py -+++ b/dirsrvtests/tests/suites/config/config_test.py -@@ -41,6 +41,26 @@ def big_file(): - return TEMP_BIG_FILE - - -+@pytest.mark.bz1897248 -+@pytest.mark.ds4315 -+@pytest.mark.skipif(ds_is_older('1.4.3.16'), reason="This config setting exists in 1.4.3.16 and higher") -+def test_nagle_default_value(topo): -+ """Test that nsslapd-nagle attribute is off by default -+ -+ :id: 00361f5d-d638-4d39-8231-66fa52637203 -+ :setup: Standalone instance -+ :steps: -+ 1. Create instance -+ 2. Check the value of nsslapd-nagle -+ :expectedresults: -+ 1. Success -+ 2. The value of nsslapd-nagle should be off -+ """ -+ -+ log.info('Check the value of nsslapd-nagle attribute is off by default') -+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-nagle') == 'off' -+ -+ - def test_maxbersize_repl(topology_m2, big_file): - """maxbersize is ignored in the replicated operations. - --- -2.26.2 - diff --git a/SOURCES/0028-Issue-4504-insure-that-repl_monitor_test-use-ldapi-f.patch b/SOURCES/0028-Issue-4504-insure-that-repl_monitor_test-use-ldapi-f.patch deleted file mode 100644 index ba8f9d2..0000000 --- a/SOURCES/0028-Issue-4504-insure-that-repl_monitor_test-use-ldapi-f.patch +++ /dev/null @@ -1,98 +0,0 @@ -From 00ccec335792e3fa44712427463c64eb1ff9c5be Mon Sep 17 00:00:00 2001 -From: progier389 -Date: Tue, 12 Jan 2021 17:45:41 +0100 -Subject: [PATCH] Issue 4504 - insure that repl_monitor_test use ldapi (for - RHEL) - fix merge issue (#4533) - -(cherry picked from commit a880fddc192414d6283ea6832491b7349e5471dc) ---- - .../tests/suites/clu/repl_monitor_test.py | 47 ++++++++++++++----- - 1 file changed, 36 insertions(+), 11 deletions(-) - -diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py -index b2cb840b3..caf6a9099 100644 ---- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py -+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py -@@ -9,6 +9,7 @@ - import time - import subprocess - import pytest -+import re - - from lib389.cli_conf.replication import get_repl_monitor_info - from lib389.tasks import * -@@ -69,6 +70,25 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No - log.info('Reset log file') - f.truncate(0) - -+def get_hostnames_from_log(port1, port2): -+ # Get the supplier host names as displayed in replication monitor output -+ with open(LOG_FILE, 'r') as logfile: -+ logtext = logfile.read() -+ # search for Supplier :hostname:port -+ # and use \D to insure there is no more number is after -+ # the matched port (i.e that 10 is not matching 101) -+ regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)' -+ match=re.search(regexp, logtext) -+ host_m1 = 'localhost.localdomain' -+ if (match is not None): -+ host_m1 = match.group(2) -+ # Same for master 2 -+ regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)' -+ match=re.search(regexp, logtext) -+ host_m2 = 'localhost.localdomain' -+ if (match is not None): -+ host_m2 = match.group(2) -+ return (host_m1, host_m2) - - @pytest.mark.ds50545 - @pytest.mark.bz1739718 -@@ -177,20 +197,9 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): - '001', - m1.host + ':' + str(m1.port)] - -- dsrc_content = '[repl-monitor-connections]\n' \ -- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -- '\n' \ -- '[repl-monitor-aliases]\n' \ -- 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \ -- 'M2 = ' + m2.host + ':' + str(m2.port) -- - connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM, - m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM] - -- aliases = ['M1=' + m1.host + ':' + str(m1.port), -- 'M2=' + m2.host + ':' + str(m2.port)] -- - args = FakeArgs() - args.connections = connections - args.aliases = None -@@ -198,8 +207,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): - - log.info('Run replication monitor with connections option') - get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) -+ (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port) - check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) - -+ # Prepare the data for next tests -+ aliases = ['M1=' + host_m1 + ':' + str(m1.port), -+ 'M2=' + host_m2 + ':' + str(m2.port)] -+ -+ alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')', -+ 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')'] -+ -+ dsrc_content = '[repl-monitor-connections]\n' \ -+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ -+ '\n' \ -+ '[repl-monitor-aliases]\n' \ -+ 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \ -+ 'M2 = ' + host_m2 + ':' + str(m2.port) -+ - log.info('Run replication monitor with aliases option') - args.aliases = aliases - get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) --- -2.26.2 - diff --git a/SOURCES/0029-Issue-4528-Fix-cn-monitor-SCOPE_ONE-search-4529.patch b/SOURCES/0029-Issue-4528-Fix-cn-monitor-SCOPE_ONE-search-4529.patch deleted file mode 100644 index 593e2cd..0000000 --- a/SOURCES/0029-Issue-4528-Fix-cn-monitor-SCOPE_ONE-search-4529.patch +++ /dev/null @@ -1,70 +0,0 @@ -From 2afc65fd1750afcb1667545da5625f5a932aacdd Mon Sep 17 00:00:00 2001 -From: Simon Pichugin -Date: Wed, 13 Jan 2021 15:16:08 +0100 -Subject: [PATCH] Issue 4528 - Fix cn=monitor SCOPE_ONE search (#4529) - -Bug Description: While doing a ldapsearch on "cn=monitor" is -throwing err=32 with -s one. - -Fix Description: 'cn=monitor' is not a real entry so we should not -trying to check if the searched suffix (cm=monitor or its children) -belongs to the searched backend. - -Fixes: #4528 - -Reviewed by: @mreynolds389 @Firstyear @tbordaz (Thanks!) ---- - ldap/servers/slapd/opshared.c | 15 ++++++++++----- - 1 file changed, 10 insertions(+), 5 deletions(-) - -diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c -index c0bc5dcd0..f5ed71144 100644 ---- a/ldap/servers/slapd/opshared.c -+++ b/ldap/servers/slapd/opshared.c -@@ -240,6 +240,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) - int rc = 0; - int internal_op; - Slapi_DN *basesdn = NULL; -+ Slapi_DN monitorsdn = {0}; - Slapi_DN *sdn = NULL; - Slapi_Operation *operation = NULL; - Slapi_Entry *referral = NULL; -@@ -765,9 +766,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result) - } - } else { - /* be_suffix null means that we are searching the default backend -- * -> don't change the search parameters in pblock -- */ -- if (be_suffix != NULL) { -+ * -> don't change the search parameters in pblock -+ * Also, we skip this block for 'cn=monitor' search and its subsearches -+ * as they are done by callbacks from monitor.c */ -+ slapi_sdn_init_dn_byref(&monitorsdn, "cn=monitor"); -+ if (!((be_suffix == NULL) || slapi_sdn_issuffix(basesdn, &monitorsdn))) { - if ((be_name == NULL) && (scope == LDAP_SCOPE_ONELEVEL)) { - /* one level searches - * - depending on the suffix of the backend we might have to -@@ -789,8 +792,10 @@ op_shared_search(Slapi_PBlock *pb, int send_result) - } else if (slapi_sdn_issuffix(basesdn, be_suffix)) { - int tmp_scope = LDAP_SCOPE_ONELEVEL; - slapi_pblock_set(pb, SLAPI_SEARCH_SCOPE, &tmp_scope); -- } else -+ } else { -+ slapi_sdn_done(&monitorsdn); - goto next_be; -+ } - } - - /* subtree searches : -@@ -811,7 +816,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) - } - } - } -- -+ slapi_sdn_done(&monitorsdn); - slapi_pblock_set(pb, SLAPI_BACKEND, be); - slapi_pblock_set(pb, SLAPI_PLUGIN, be->be_database); - slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET, NULL); --- -2.26.2 - diff --git a/SOURCES/0030-Issue-4384-Use-MONOTONIC-clock-for-all-timing-events.patch b/SOURCES/0030-Issue-4384-Use-MONOTONIC-clock-for-all-timing-events.patch deleted file mode 100644 index 7133049..0000000 --- a/SOURCES/0030-Issue-4384-Use-MONOTONIC-clock-for-all-timing-events.patch +++ /dev/null @@ -1,3866 +0,0 @@ -From 6969181628f2c664d5f82c89c15bbc0a2487e21f Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Thu, 19 Nov 2020 15:46:19 -0500 -Subject: [PATCH 1/2] Issue 4384 - Use MONOTONIC clock for all timing events - and conditions - -Bug Description: All of the server's event handling and replication were - based on REALTIME clocks, which can be influenced by the - system changing. This could causes massive delays, and - simply cause unexpected behavior. - -Fix Description: Move all condition variables to use pthread instead of NSPR - functions. Also make sure we use MONOTONIC clocks when we - get the current time when checking for timeouts and other - timed events. - -Relates: https://github.com/389ds/389-ds-base/issues/4384 - -Reviewed by: elkris, firstyear, and tbordaz (Thanks!!!) - -Apply firstyear's sugestions - -Apply Firstyear's other suggestions - -Apply Thierry's suggestions ---- - Makefile.am | 2 +- - .../tests/suites/plugins/entryusn_test.py | 3 + - ldap/servers/plugins/chainingdb/cb_add.c | 2 +- - ldap/servers/plugins/chainingdb/cb_compare.c | 2 +- - .../plugins/chainingdb/cb_conn_stateless.c | 16 +- - ldap/servers/plugins/chainingdb/cb_delete.c | 2 +- - ldap/servers/plugins/chainingdb/cb_instance.c | 3 +- - ldap/servers/plugins/chainingdb/cb_modify.c | 2 +- - ldap/servers/plugins/chainingdb/cb_modrdn.c | 2 +- - ldap/servers/plugins/chainingdb/cb_search.c | 8 +- - ldap/servers/plugins/cos/cos_cache.c | 4 +- - ldap/servers/plugins/dna/dna.c | 2 +- - ldap/servers/plugins/passthru/ptconn.c | 2 +- - ldap/servers/plugins/referint/referint.c | 85 +++++--- - ldap/servers/plugins/replication/repl5.h | 3 +- - .../plugins/replication/repl5_backoff.c | 4 +- - .../plugins/replication/repl5_connection.c | 12 +- - .../plugins/replication/repl5_inc_protocol.c | 91 ++++---- - .../plugins/replication/repl5_mtnode_ext.c | 3 +- - .../plugins/replication/repl5_prot_private.h | 6 +- - .../plugins/replication/repl5_replica.c | 10 +- - .../replication/repl5_replica_config.c | 197 +++++++++++------- - .../plugins/replication/repl5_tot_protocol.c | 71 ++++--- - ldap/servers/plugins/replication/repl_extop.c | 4 +- - .../plugins/replication/windows_connection.c | 2 +- - .../replication/windows_inc_protocol.c | 82 +++++--- - .../replication/windows_tot_protocol.c | 24 ++- - ldap/servers/plugins/retrocl/retrocl_trim.c | 2 +- - ldap/servers/plugins/roles/roles_cache.c | 4 +- - ldap/servers/plugins/sync/sync.h | 4 +- - ldap/servers/plugins/sync/sync_persist.c | 54 +++-- - .../slapd/back-ldbm/db-bdb/bdb_import.c | 49 ++--- - .../back-ldbm/db-bdb/bdb_import_threads.c | 29 +-- - .../back-ldbm/db-bdb/bdb_instance_config.c | 8 +- - .../slapd/back-ldbm/db-bdb/bdb_layer.c | 129 +++++++----- - .../slapd/back-ldbm/db-bdb/bdb_layer.h | 10 +- - ldap/servers/slapd/back-ldbm/import.h | 6 +- - ldap/servers/slapd/connection.c | 88 ++++---- - ldap/servers/slapd/daemon.c | 64 ++++-- - ldap/servers/slapd/eventq.c | 132 ++++++++---- - ldap/servers/slapd/house.c | 58 ++++-- - ldap/servers/slapd/libmakefile | 2 +- - ldap/servers/slapd/psearch.c | 63 +++--- - ldap/servers/slapd/regex.c | 2 +- - ldap/servers/slapd/slapi-plugin.h | 7 + - .../slapd/{slapi2nspr.c => slapi2runtime.c} | 87 +++++--- - ldap/servers/slapd/task.c | 4 +- - ldap/servers/slapd/time.c | 10 +- - 48 files changed, 877 insertions(+), 579 deletions(-) - rename ldap/servers/slapd/{slapi2nspr.c => slapi2runtime.c} (69%) - -diff --git a/Makefile.am b/Makefile.am -index 0e5f04f91..f7bf1c44c 100644 ---- a/Makefile.am -+++ b/Makefile.am -@@ -1455,7 +1455,7 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \ - ldap/servers/slapd/security_wrappers.c \ - ldap/servers/slapd/slapd_plhash.c \ - ldap/servers/slapd/slapi_counter.c \ -- ldap/servers/slapd/slapi2nspr.c \ -+ ldap/servers/slapd/slapi2runtime.c \ - ldap/servers/slapd/snmp_collator.c \ - ldap/servers/slapd/sort.c \ - ldap/servers/slapd/ssl.c \ -diff --git a/dirsrvtests/tests/suites/plugins/entryusn_test.py b/dirsrvtests/tests/suites/plugins/entryusn_test.py -index ad3d7f209..da0538f74 100644 ---- a/dirsrvtests/tests/suites/plugins/entryusn_test.py -+++ b/dirsrvtests/tests/suites/plugins/entryusn_test.py -@@ -6,9 +6,11 @@ - # See LICENSE for details. - # --- END COPYRIGHT BLOCK --- - # -+import os - import ldap - import logging - import pytest -+import time - from lib389._constants import DEFAULT_SUFFIX - from lib389.config import Config - from lib389.plugins import USNPlugin, MemberOfPlugin -@@ -211,6 +213,7 @@ def test_entryusn_after_repl_delete(topology_m2): - user_usn = user_1.get_attr_val_int('entryusn') - - user_1.delete() -+ time.sleep(1) # Gives a little time for tombstone creation to complete - - ts = tombstones.get(user_rdn) - ts_usn = ts.get_attr_val_int('entryusn') -diff --git a/ldap/servers/plugins/chainingdb/cb_add.c b/ldap/servers/plugins/chainingdb/cb_add.c -index a9f9c0f87..b7ae7267d 100644 ---- a/ldap/servers/plugins/chainingdb/cb_add.c -+++ b/ldap/servers/plugins/chainingdb/cb_add.c -@@ -130,7 +130,7 @@ chaining_back_add(Slapi_PBlock *pb) - - /* heart-beat management */ - if (cb->max_idle_time > 0) { -- endtime = slapi_current_utc_time() + cb->max_idle_time; -+ endtime = slapi_current_rel_time_t() + cb->max_idle_time; - } - - /* Send LDAP operation to the remote host */ -diff --git a/ldap/servers/plugins/chainingdb/cb_compare.c b/ldap/servers/plugins/chainingdb/cb_compare.c -index 25dfa87b5..8d7fdd06b 100644 ---- a/ldap/servers/plugins/chainingdb/cb_compare.c -+++ b/ldap/servers/plugins/chainingdb/cb_compare.c -@@ -126,7 +126,7 @@ chaining_back_compare(Slapi_PBlock *pb) - - /* heart-beat management */ - if (cb->max_idle_time > 0) { -- endtime = slapi_current_utc_time() + cb->max_idle_time; -+ endtime = slapi_current_rel_time_t() + cb->max_idle_time; - } - - /* -diff --git a/ldap/servers/plugins/chainingdb/cb_conn_stateless.c b/ldap/servers/plugins/chainingdb/cb_conn_stateless.c -index 9beb459ef..a2003221e 100644 ---- a/ldap/servers/plugins/chainingdb/cb_conn_stateless.c -+++ b/ldap/servers/plugins/chainingdb/cb_conn_stateless.c -@@ -453,7 +453,7 @@ cb_get_connection(cb_conn_pool *pool, - conn->ld = ld; - conn->status = CB_CONNSTATUS_OK; - conn->refcount = 0; /* incremented below */ -- conn->opentime = slapi_current_utc_time(); -+ conn->opentime = slapi_current_rel_time_t(); - conn->ThreadId = PR_MyThreadId(); /* store the thread id */ - conn->next = NULL; - if (secure) { -@@ -488,7 +488,7 @@ cb_get_connection(cb_conn_pool *pool, - } - - if (!secure) -- slapi_wait_condvar(pool->conn.conn_list_cv, NULL); -+ slapi_wait_condvar_pt(pool->conn.conn_list_cv, pool->conn.conn_list_mutex, NULL); - - if (cb_debug_on()) { - slapi_log_err(SLAPI_LOG_PLUGIN, CB_PLUGIN_SUBSYSTEM, -@@ -639,7 +639,7 @@ cb_check_for_stale_connections(cb_conn_pool *pool) - slapi_lock_mutex(pool->conn.conn_list_mutex); - - if (connlifetime > 0) -- curtime = slapi_current_utc_time(); -+ curtime = slapi_current_rel_time_t(); - - if (pool->secure) { - myself = PR_ThreadSelf(); -@@ -860,7 +860,7 @@ cb_ping_farm(cb_backend_instance *cb, cb_outgoing_conn *cnx, time_t end_time) - if (cnx && (cnx->status != CB_CONNSTATUS_OK)) /* Known problem */ - return LDAP_SERVER_DOWN; - -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - if (end_time && ((now <= end_time) || (end_time < 0))) - return LDAP_SUCCESS; - -@@ -905,7 +905,7 @@ cb_update_failed_conn_cpt(cb_backend_instance *cb) - slapi_unlock_mutex(cb->monitor_availability.cpt_lock); - if (cb->monitor_availability.cpt >= CB_NUM_CONN_BEFORE_UNAVAILABILITY) { - /* we reach the limit of authorized failed connections => we setup the chaining BE state to unavailable */ -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - slapi_lock_mutex(cb->monitor_availability.lock_timeLimit); - cb->monitor_availability.unavailableTimeLimit = now + CB_UNAVAILABLE_PERIOD; - slapi_unlock_mutex(cb->monitor_availability.lock_timeLimit); -@@ -938,7 +938,7 @@ cb_check_availability(cb_backend_instance *cb, Slapi_PBlock *pb) - time_t now; - if (cb->monitor_availability.farmserver_state == FARMSERVER_UNAVAILABLE) { - slapi_lock_mutex(cb->monitor_availability.lock_timeLimit); -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - if (now >= cb->monitor_availability.unavailableTimeLimit) { - cb->monitor_availability.unavailableTimeLimit = now + CB_INFINITE_TIME; /* to be sure only one thread can do the test */ - slapi_unlock_mutex(cb->monitor_availability.lock_timeLimit); -@@ -951,7 +951,7 @@ cb_check_availability(cb_backend_instance *cb, Slapi_PBlock *pb) - "cb_check_availability - ping the farm server and check if it's still unavailable"); - if (cb_ping_farm(cb, NULL, 0) != LDAP_SUCCESS) { /* farm still unavailable... Just change the timelimit */ - slapi_lock_mutex(cb->monitor_availability.lock_timeLimit); -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - cb->monitor_availability.unavailableTimeLimit = now + CB_UNAVAILABLE_PERIOD; - slapi_unlock_mutex(cb->monitor_availability.lock_timeLimit); - cb_send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL, "FARM SERVER TEMPORARY UNAVAILABLE", 0, NULL); -@@ -961,7 +961,7 @@ cb_check_availability(cb_backend_instance *cb, Slapi_PBlock *pb) - } else { - /* farm is back !*/ - slapi_lock_mutex(cb->monitor_availability.lock_timeLimit); -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - cb->monitor_availability.unavailableTimeLimit = now; /* the unavailable period is finished */ - slapi_unlock_mutex(cb->monitor_availability.lock_timeLimit); - /* The farmer server state backs to FARMSERVER_AVAILABLE, but this already done in cb_ping_farm, and also the reset of cpt*/ -diff --git a/ldap/servers/plugins/chainingdb/cb_delete.c b/ldap/servers/plugins/chainingdb/cb_delete.c -index e76fb6b95..94f84b55d 100644 ---- a/ldap/servers/plugins/chainingdb/cb_delete.c -+++ b/ldap/servers/plugins/chainingdb/cb_delete.c -@@ -117,7 +117,7 @@ chaining_back_delete(Slapi_PBlock *pb) - - /* heart-beat management */ - if (cb->max_idle_time > 0) { -- endtime = slapi_current_utc_time() + cb->max_idle_time; -+ endtime = slapi_current_rel_time_t() + cb->max_idle_time; - } - - /* -diff --git a/ldap/servers/plugins/chainingdb/cb_instance.c b/ldap/servers/plugins/chainingdb/cb_instance.c -index cd5abb834..bc1864c1a 100644 ---- a/ldap/servers/plugins/chainingdb/cb_instance.c -+++ b/ldap/servers/plugins/chainingdb/cb_instance.c -@@ -1947,7 +1947,8 @@ cb_instance_add_config_callback(Slapi_PBlock *pb __attribute__((unused)), - * we can't call recursively into the DSE to do more adds, they'll - * silently fail. instead, schedule the adds to happen in 1 second. - */ -- inst->eq_ctx = slapi_eq_once(cb_instance_add_monitor_later, (void *)inst, time(NULL) + 1); -+ inst->eq_ctx = slapi_eq_once(cb_instance_add_monitor_later, (void *)inst, -+ slapi_current_rel_time_t() + 1); - } - - /* Get the list of operational attrs defined in the schema */ -diff --git a/ldap/servers/plugins/chainingdb/cb_modify.c b/ldap/servers/plugins/chainingdb/cb_modify.c -index f81edf4a6..e53da9e40 100644 ---- a/ldap/servers/plugins/chainingdb/cb_modify.c -+++ b/ldap/servers/plugins/chainingdb/cb_modify.c -@@ -125,7 +125,7 @@ chaining_back_modify(Slapi_PBlock *pb) - - /* heart-beat management */ - if (cb->max_idle_time > 0) { -- endtime = slapi_current_utc_time() + cb->max_idle_time; -+ endtime = slapi_current_rel_time_t() + cb->max_idle_time; - } - - /* -diff --git a/ldap/servers/plugins/chainingdb/cb_modrdn.c b/ldap/servers/plugins/chainingdb/cb_modrdn.c -index 95a068be7..d648253c7 100644 ---- a/ldap/servers/plugins/chainingdb/cb_modrdn.c -+++ b/ldap/servers/plugins/chainingdb/cb_modrdn.c -@@ -129,7 +129,7 @@ chaining_back_modrdn(Slapi_PBlock *pb) - - /* heart-beat management */ - if (cb->max_idle_time > 0) { -- endtime = slapi_current_utc_time() + cb->max_idle_time; -+ endtime = slapi_current_rel_time_t() + cb->max_idle_time; - } - - /* -diff --git a/ldap/servers/plugins/chainingdb/cb_search.c b/ldap/servers/plugins/chainingdb/cb_search.c -index d47cbc8e4..ffc8f56f8 100644 ---- a/ldap/servers/plugins/chainingdb/cb_search.c -+++ b/ldap/servers/plugins/chainingdb/cb_search.c -@@ -236,7 +236,7 @@ chainingdb_build_candidate_list(Slapi_PBlock *pb) - - /* heart-beat management */ - if (cb->max_idle_time > 0) { -- endtime = slapi_current_utc_time() + cb->max_idle_time; -+ endtime = slapi_current_rel_time_t() + cb->max_idle_time; - } - - rc = ldap_search_ext(ld, target, scope, filter, attrs, attrsonly, -@@ -503,7 +503,7 @@ chainingdb_next_search_entry(Slapi_PBlock *pb) - - /* heart-beat management */ - if (cb->max_idle_time > 0) { -- endtime = slapi_current_utc_time() + cb->max_idle_time; -+ endtime = slapi_current_rel_time_t() + cb->max_idle_time; - } - - while (1) { -@@ -579,7 +579,7 @@ chainingdb_next_search_entry(Slapi_PBlock *pb) - - /* heart-beat management */ - if (cb->max_idle_time > 0) { -- endtime = slapi_current_utc_time() + cb->max_idle_time; -+ endtime = slapi_current_rel_time_t() + cb->max_idle_time; - } - - /* The server sent one of the entries found by the search */ -@@ -611,7 +611,7 @@ chainingdb_next_search_entry(Slapi_PBlock *pb) - - /* heart-beat management */ - if (cb->max_idle_time > 0) { -- endtime = slapi_current_utc_time() + cb->max_idle_time; -+ endtime = slapi_current_rel_time_t() + cb->max_idle_time; - } - - parse_rc = ldap_parse_reference(ctx->ld, res, &referrals, NULL, 1); -diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c -index eb9bd77f9..d404ff901 100644 ---- a/ldap/servers/plugins/cos/cos_cache.c -+++ b/ldap/servers/plugins/cos/cos_cache.c -@@ -346,7 +346,7 @@ cos_cache_init(void) - if (ret == 0) { - slapi_lock_mutex(start_lock); - while (!started) { -- while (slapi_wait_condvar(start_cond, NULL) == 0) -+ while (slapi_wait_condvar_pt(start_cond, start_lock, NULL) == 0) - ; - } - slapi_unlock_mutex(start_lock); -@@ -401,7 +401,7 @@ cos_cache_wait_on_change(void *arg __attribute__((unused))) - * thread notifies our condvar, and so we will not miss any - * notifications, including the shutdown notification. - */ -- slapi_wait_condvar(something_changed, NULL); -+ slapi_wait_condvar_pt(something_changed, change_lock, NULL); - } else { - /* Something to do...do it below */ - } -diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c -index 16c625bb0..1cb54580b 100644 ---- a/ldap/servers/plugins/dna/dna.c -+++ b/ldap/servers/plugins/dna/dna.c -@@ -907,7 +907,7 @@ dna_load_plugin_config(Slapi_PBlock *pb, int use_eventq) - * performing the operation at this point when - * starting up would cause the change to not - * get changelogged. */ -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - eq_ctx = slapi_eq_once(dna_update_config_event, NULL, now + 30); - } else { - dna_update_config_event(0, NULL); -diff --git a/ldap/servers/plugins/passthru/ptconn.c b/ldap/servers/plugins/passthru/ptconn.c -index 49040f651..637d33843 100644 ---- a/ldap/servers/plugins/passthru/ptconn.c -+++ b/ldap/servers/plugins/passthru/ptconn.c -@@ -233,7 +233,7 @@ passthru_get_connection(PassThruServer *srvr, LDAP **ldp) - slapi_log_err(SLAPI_LOG_PLUGIN, PASSTHRU_PLUGIN_SUBSYSTEM, - "... passthru_get_connection waiting for conn to free up\n"); - #endif -- slapi_wait_condvar(srvr->ptsrvr_connlist_cv, NULL); -+ slapi_wait_condvar_pt(srvr->ptsrvr_connlist_cv, srvr->ptsrvr_connlist_mutex, NULL); - - #ifdef PASSTHRU_VERBOSE_LOGGING - slapi_log_err(SLAPI_LOG_PLUGIN, PASSTHRU_PLUGIN_SUBSYSTEM, -diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c -index eb4b089fb..fd5356d72 100644 ---- a/ldap/servers/plugins/referint/referint.c -+++ b/ldap/servers/plugins/referint/referint.c -@@ -71,8 +71,9 @@ void referint_get_config(int *delay, char **logfile); - /* global thread control stuff */ - static PRLock *referint_mutex = NULL; - static PRThread *referint_tid = NULL; --static PRLock *keeprunning_mutex = NULL; --static PRCondVar *keeprunning_cv = NULL; -+static pthread_mutex_t keeprunning_mutex; -+static pthread_cond_t keeprunning_cv; -+ - static int keeprunning = 0; - static referint_config *config = NULL; - static Slapi_DN *_ConfigAreaDN = NULL; -@@ -1302,12 +1303,38 @@ referint_postop_start(Slapi_PBlock *pb) - * -1 = integrity off - */ - if (referint_get_delay() > 0) { -+ pthread_condattr_t condAttr; -+ - /* initialize the cv and lock */ - if (!use_txn && (NULL == referint_mutex)) { - referint_mutex = PR_NewLock(); - } -- keeprunning_mutex = PR_NewLock(); -- keeprunning_cv = PR_NewCondVar(keeprunning_mutex); -+ if ((rc = pthread_mutex_init(&keeprunning_mutex, NULL)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "referint_postop_start", -+ "cannot create new lock. error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(1); -+ } -+ if ((rc = pthread_condattr_init(&condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "referint_postop_start", -+ "cannot create new condition attribute variable. error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(1); -+ } -+ if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "referint_postop_start", -+ "cannot set condition attr clock. error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(1); -+ } -+ if ((rc = pthread_cond_init(&keeprunning_cv, &condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "referint_postop_start", -+ "cannot create new condition variable. error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(1); -+ } -+ pthread_condattr_destroy(&condAttr); /* no longer needed */ -+ - keeprunning = 1; - - referint_tid = PR_CreateThread(PR_USER_THREAD, -@@ -1337,13 +1364,11 @@ int - referint_postop_close(Slapi_PBlock *pb __attribute__((unused))) - { - /* signal the thread to exit */ -- if (NULL != keeprunning_mutex) { -- PR_Lock(keeprunning_mutex); -+ if (referint_get_delay() > 0) { -+ pthread_mutex_lock(&keeprunning_mutex); - keeprunning = 0; -- if (NULL != keeprunning_cv) { -- PR_NotifyCondVar(keeprunning_cv); -- } -- PR_Unlock(keeprunning_mutex); -+ pthread_cond_signal(&keeprunning_cv); -+ pthread_mutex_unlock(&keeprunning_mutex); - } - - slapi_destroy_rwlock(config_rwlock); -@@ -1369,6 +1394,7 @@ referint_thread_func(void *arg __attribute__((unused))) - char *iter = NULL; - Slapi_DN *sdn = NULL; - Slapi_DN *tmpsuperior = NULL; -+ struct timespec current_time = {0}; - int delay; - int no_changes; - -@@ -1383,20 +1409,22 @@ referint_thread_func(void *arg __attribute__((unused))) - no_changes = 1; - while (no_changes) { - -- PR_Lock(keeprunning_mutex); -+ pthread_mutex_lock(&keeprunning_mutex); - if (keeprunning == 0) { -- PR_Unlock(keeprunning_mutex); -+ pthread_mutex_unlock(&keeprunning_mutex); - break; - } -- PR_Unlock(keeprunning_mutex); -+ pthread_mutex_unlock(&keeprunning_mutex); - - referint_lock(); - if ((prfd = PR_Open(logfilename, PR_RDONLY, REFERINT_DEFAULT_FILE_MODE)) == NULL) { - referint_unlock(); - /* go back to sleep and wait for this file */ -- PR_Lock(keeprunning_mutex); -- PR_WaitCondVar(keeprunning_cv, PR_SecondsToInterval(delay)); -- PR_Unlock(keeprunning_mutex); -+ pthread_mutex_lock(&keeprunning_mutex); -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += delay; -+ pthread_cond_timedwait(&keeprunning_cv, &keeprunning_mutex, ¤t_time); -+ pthread_mutex_unlock(&keeprunning_mutex); - } else { - no_changes = 0; - } -@@ -1407,12 +1435,12 @@ referint_thread_func(void *arg __attribute__((unused))) - * loop before trying to do the changes. The server - * will pick them up on next startup as file still exists - */ -- PR_Lock(keeprunning_mutex); -+ pthread_mutex_lock(&keeprunning_mutex); - if (keeprunning == 0) { -- PR_Unlock(keeprunning_mutex); -+ pthread_mutex_unlock(&keeprunning_mutex); - break; - } -- PR_Unlock(keeprunning_mutex); -+ pthread_mutex_unlock(&keeprunning_mutex); - - while (GetNextLine(thisline, MAX_LINE, prfd)) { - ptoken = ldap_utf8strtok_r(thisline, delimiter, &iter); -@@ -1459,21 +1487,16 @@ referint_thread_func(void *arg __attribute__((unused))) - referint_unlock(); - - /* wait on condition here */ -- PR_Lock(keeprunning_mutex); -- PR_WaitCondVar(keeprunning_cv, PR_SecondsToInterval(delay)); -- PR_Unlock(keeprunning_mutex); -+ pthread_mutex_lock(&keeprunning_mutex); -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += delay; -+ pthread_cond_timedwait(&keeprunning_cv, &keeprunning_mutex, ¤t_time); -+ pthread_mutex_unlock(&keeprunning_mutex); - } - - /* cleanup resources allocated in start */ -- if (NULL != keeprunning_mutex) { -- PR_DestroyLock(keeprunning_mutex); -- } -- if (NULL != referint_mutex) { -- PR_DestroyLock(referint_mutex); -- } -- if (NULL != keeprunning_cv) { -- PR_DestroyCondVar(keeprunning_cv); -- } -+ pthread_mutex_destroy(&keeprunning_mutex); -+ pthread_cond_destroy(&keeprunning_cv); - slapi_ch_free_string(&logfilename); - } - -diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h -index f1c596a3f..06e747811 100644 ---- a/ldap/servers/plugins/replication/repl5.h -+++ b/ldap/servers/plugins/replication/repl5.h -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2010 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. - * All rights reserved. - * -@@ -28,6 +28,7 @@ - #include "llist.h" - #include "repl5_ruv.h" - #include "plstr.h" -+#include - - #define START_UPDATE_DELAY 2 /* 2 second */ - #define REPLICA_TYPE_WINDOWS 1 -diff --git a/ldap/servers/plugins/replication/repl5_backoff.c b/ldap/servers/plugins/replication/repl5_backoff.c -index 40848b96d..40ec75dd7 100644 ---- a/ldap/servers/plugins/replication/repl5_backoff.c -+++ b/ldap/servers/plugins/replication/repl5_backoff.c -@@ -110,7 +110,7 @@ backoff_reset(Backoff_Timer *bt, slapi_eq_fn_t callback, void *callback_data) - bt->next_interval = bt->initial_interval; - } - /* Schedule the callback */ -- bt->last_fire_time = slapi_current_utc_time(); -+ bt->last_fire_time = slapi_current_rel_time_t(); - return_value = bt->last_fire_time + bt->next_interval; - bt->pending_event = slapi_eq_once(bt->callback, bt->callback_arg, - return_value); -@@ -177,7 +177,7 @@ backoff_expired(Backoff_Timer *bt, int margin) - - PR_ASSERT(NULL != bt); - PR_Lock(bt->lock); -- return_value = (slapi_current_utc_time() >= (bt->last_fire_time + bt->next_interval + margin)); -+ return_value = (slapi_current_rel_time_t() >= (bt->last_fire_time + bt->next_interval + margin)); - PR_Unlock(bt->lock); - return return_value; - } -diff --git a/ldap/servers/plugins/replication/repl5_connection.c b/ldap/servers/plugins/replication/repl5_connection.c -index cf57c2156..bc9ca424b 100644 ---- a/ldap/servers/plugins/replication/repl5_connection.c -+++ b/ldap/servers/plugins/replication/repl5_connection.c -@@ -402,7 +402,7 @@ conn_read_result_ex(Repl_Connection *conn, char **retoidp, struct berval **retda - } - if (block) { - /* Did the connection's timeout expire ? */ -- time_now = slapi_current_utc_time(); -+ time_now = slapi_current_rel_time_t(); - if (conn->timeout.tv_sec <= (time_now - start_time)) { - /* We timed out */ - rc = 0; -@@ -676,7 +676,7 @@ conn_is_available(Repl_Connection *conn) - { - time_t poll_timeout_sec = 1; /* Polling for 1sec */ - time_t yield_delay_msec = 100; /* Delay to wait */ -- time_t start_time = slapi_current_utc_time(); -+ time_t start_time = slapi_current_rel_time_t(); - time_t time_now; - ConnResult return_value = CONN_OPERATION_SUCCESS; - -@@ -686,7 +686,7 @@ conn_is_available(Repl_Connection *conn) - /* in case of timeout we return CONN_TIMEOUT only - * if the RA.timeout is exceeded - */ -- time_now = slapi_current_utc_time(); -+ time_now = slapi_current_rel_time_t(); - if (conn->timeout.tv_sec <= (time_now - start_time)) { - break; - } else { -@@ -1010,7 +1010,7 @@ linger_timeout(time_t event_time __attribute__((unused)), void *arg) - void - conn_start_linger(Repl_Connection *conn) - { -- time_t now; -+ time_t now = slapi_current_rel_time_t(); - - PR_ASSERT(NULL != conn); - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, -@@ -1022,7 +1022,7 @@ conn_start_linger(Repl_Connection *conn) - agmt_get_long_name(conn->agmt)); - return; - } -- now = slapi_current_utc_time(); -+ - PR_Lock(conn->lock); - if (conn->linger_active) { - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, -@@ -1989,7 +1989,7 @@ repl5_start_debug_timeout(int *setlevel) - { - Slapi_Eq_Context eqctx = 0; - if (s_debug_timeout && s_debug_level) { -- time_t now = slapi_current_utc_time(); -+ time_t now = slapi_current_rel_time_t(); - eqctx = slapi_eq_once(repl5_debug_timeout_callback, setlevel, - s_debug_timeout + now); - } -diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c -index af5e5897c..4bb384882 100644 ---- a/ldap/servers/plugins/replication/repl5_inc_protocol.c -+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -129,7 +129,7 @@ typedef struct result_data - * don't see any updates for a period equal to this interval, - * we go ahead and start a replication session, just to be safe - */ --#define MAX_WAIT_BETWEEN_SESSIONS PR_SecondsToInterval(60 * 5) /* 5 minutes */ -+#define MAX_WAIT_BETWEEN_SESSIONS 300 /* 5 minutes */ - - /* - * tests if the protocol has been shutdown and we need to quit -@@ -145,7 +145,7 @@ typedef struct result_data - /* Forward declarations */ - static PRUint32 event_occurred(Private_Repl_Protocol *prp, PRUint32 event); - static void reset_events(Private_Repl_Protocol *prp); --static void protocol_sleep(Private_Repl_Protocol *prp, PRIntervalTime duration); -+static void protocol_sleep(Private_Repl_Protocol *prp, int32_t duration); - static int send_updates(Private_Repl_Protocol *prp, RUV *ruv, PRUint32 *num_changes_sent); - static void repl5_inc_backoff_expired(time_t timer_fire_time, void *arg); - static int examine_update_vector(Private_Repl_Protocol *prp, RUV *ruv); -@@ -253,7 +253,7 @@ repl5_inc_result_threadmain(void *param) - char *uniqueid = NULL; - char *ldap_error_string = NULL; - time_t time_now = 0; -- time_t start_time = slapi_current_utc_time(); -+ time_t start_time = slapi_current_rel_time_t(); - int connection_error = 0; - int operation_code = 0; - int backoff_time = 1; -@@ -275,7 +275,7 @@ repl5_inc_result_threadmain(void *param) - /* We need to a) check that the 'real' timeout hasn't expired and - * b) implement a backoff sleep to avoid spinning */ - /* Did the connection's timeout expire ? */ -- time_now = slapi_current_utc_time(); -+ time_now = slapi_current_rel_time_t(); - if (conn_get_timeout(conn) <= (time_now - start_time)) { - /* We timed out */ - conres = CONN_TIMEOUT; -@@ -358,7 +358,7 @@ repl5_inc_result_threadmain(void *param) - /* Should we stop ? */ - PR_Lock(rd->lock); - if (!finished && yield_session && rd->abort != SESSION_ABORTED && rd->abort_time == 0) { -- rd->abort_time = slapi_current_utc_time(); -+ rd->abort_time = slapi_current_rel_time_t(); - rd->abort = SESSION_ABORTED; /* only set the abort time once */ - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "repl5_inc_result_threadmain - " - "Abort control detected, setting abort time...(%s)\n", -@@ -532,13 +532,11 @@ repl5_inc_delete(Private_Repl_Protocol **prpp) - (*prpp)->stop(*prpp); - } - /* Then, delete all resources used by the protocol */ -- if ((*prpp)->lock) { -- PR_DestroyLock((*prpp)->lock); -- (*prpp)->lock = NULL; -+ if (&((*prpp)->lock)) { -+ pthread_mutex_destroy(&((*prpp)->lock)); - } -- if ((*prpp)->cvar) { -- PR_DestroyCondVar((*prpp)->cvar); -- (*prpp)->cvar = NULL; -+ if (&((*prpp)->cvar)) { -+ pthread_cond_destroy(&(*prpp)->cvar); - } - slapi_ch_free((void **)&(*prpp)->private); - slapi_ch_free((void **)prpp); -@@ -712,7 +710,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) - conn_set_agmt_changed(prp->conn); - } else if (event_occurred(prp, EVENT_TRIGGERING_CRITERIA_MET)) { /* change available */ - /* just ignore it and go to sleep */ -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } else if ((e1 = event_occurred(prp, EVENT_WINDOW_CLOSED)) || - event_occurred(prp, EVENT_BACKOFF_EXPIRED)) { - /* this events - should not occur - log a warning and go to sleep */ -@@ -720,13 +718,13 @@ repl5_inc_run(Private_Repl_Protocol *prp) - "repl5_inc_run - %s: " - "Event %s should not occur in state %s; going to sleep\n", - agmt_get_long_name(prp->agmt), e1 ? event2name(EVENT_WINDOW_CLOSED) : event2name(EVENT_BACKOFF_EXPIRED), state2name(current_state)); -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } else { - /* wait until window opens or an event occurs */ - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, - "repl5_inc_run - %s: Waiting for update window to open\n", - agmt_get_long_name(prp->agmt)); -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } - break; - -@@ -850,7 +848,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) - } - next_state = STATE_BACKOFF; - backoff_reset(prp_priv->backoff, repl5_inc_backoff_expired, (void *)prp); -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - use_busy_backoff_timer = PR_FALSE; - } - break; -@@ -899,13 +897,13 @@ repl5_inc_run(Private_Repl_Protocol *prp) - */ - if (STATE_BACKOFF == next_state) { - /* Step the backoff timer */ -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - next_fire_time = backoff_step(prp_priv->backoff); - /* And go back to sleep */ - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, - "repl5_inc_run - %s: Replication session backing off for %ld seconds\n", - agmt_get_long_name(prp->agmt), next_fire_time - now); -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } else { - /* Destroy the backoff timer, since we won't need it anymore */ - backoff_delete(&prp_priv->backoff); -@@ -923,7 +921,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) - next_state = STATE_READY_TO_ACQUIRE; - } else { - /* ignore changes and go to sleep */ -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } - } else if (event_occurred(prp, EVENT_WINDOW_OPENED)) { - /* this should never happen - log an error and go to sleep */ -@@ -931,7 +929,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) - "Event %s should not occur in state %s; going to sleep\n", - agmt_get_long_name(prp->agmt), event2name(EVENT_WINDOW_OPENED), - state2name(current_state)); -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } - break; - -@@ -1178,7 +1176,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) - reset_events(prp); - } - -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - break; - - case STATE_STOP_NORMAL_TERMINATION: -@@ -1209,20 +1207,28 @@ repl5_inc_run(Private_Repl_Protocol *prp) - * Go to sleep until awakened. - */ - static void --protocol_sleep(Private_Repl_Protocol *prp, PRIntervalTime duration) -+protocol_sleep(Private_Repl_Protocol *prp, int32_t duration) - { - PR_ASSERT(NULL != prp); -- PR_Lock(prp->lock); -+ pthread_mutex_lock(&(prp->lock)); - /* we should not go to sleep if there are events available to be processed. - Otherwise, we can miss the event that suppose to wake us up */ -- if (prp->eventbits == 0) -- PR_WaitCondVar(prp->cvar, duration); -- else { -+ if (prp->eventbits == 0) { -+ if (duration > 0) { -+ struct timespec current_time = {0}; -+ /* get the current monotonic time and add our interval */ -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += duration; -+ pthread_cond_timedwait(&(prp->cvar), &(prp->lock), ¤t_time); -+ } else { -+ pthread_cond_wait(&(prp->cvar), &(prp->lock)); -+ } -+ } else { - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, - "protocol_sleep - %s: Can't go to sleep: event bits - %x\n", - agmt_get_long_name(prp->agmt), prp->eventbits); - } -- PR_Unlock(prp->lock); -+ pthread_mutex_unlock(&(prp->lock)); - } - - /* -@@ -1235,10 +1241,10 @@ static void - event_notify(Private_Repl_Protocol *prp, PRUint32 event) - { - PR_ASSERT(NULL != prp); -- PR_Lock(prp->lock); -+ pthread_mutex_lock(&(prp->lock)); - prp->eventbits |= event; -- PR_NotifyCondVar(prp->cvar); -- PR_Unlock(prp->lock); -+ pthread_cond_signal(&(prp->cvar)); -+ pthread_mutex_unlock(&(prp->lock)); - } - - /* -@@ -1250,10 +1256,10 @@ event_occurred(Private_Repl_Protocol *prp, PRUint32 event) - { - PRUint32 return_value; - PR_ASSERT(NULL != prp); -- PR_Lock(prp->lock); -+ pthread_mutex_lock(&(prp->lock)); - return_value = (prp->eventbits & event); - prp->eventbits &= ~event; /* Clear event */ -- PR_Unlock(prp->lock); -+ pthread_mutex_unlock(&(prp->lock)); - return return_value; - } - -@@ -1261,9 +1267,9 @@ static void - reset_events(Private_Repl_Protocol *prp) - { - PR_ASSERT(NULL != prp); -- PR_Lock(prp->lock); -+ pthread_mutex_lock(&(prp->lock)); - prp->eventbits = 0; -- PR_Unlock(prp->lock); -+ pthread_mutex_unlock(&(prp->lock)); - } - - /* -@@ -1882,7 +1888,7 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu - /* See if the result thread has hit a problem */ - - if (!finished && rd->abort_time) { -- time_t current_time = slapi_current_utc_time(); -+ time_t current_time = slapi_current_rel_time_t(); - if ((current_time - rd->abort_time) >= release_timeout) { - rd->result = UPDATE_YIELD; - return_value = UPDATE_YIELD; -@@ -2088,7 +2094,9 @@ Private_Repl_Protocol * - Repl_5_Inc_Protocol_new(Repl_Protocol *rp) - { - repl5_inc_private *rip = NULL; -- Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_malloc(sizeof(Private_Repl_Protocol)); -+ pthread_condattr_t cattr; /* the pthread condition attr */ -+ Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_calloc(1, sizeof(Private_Repl_Protocol)); -+ - prp->delete = repl5_inc_delete; - prp->run = repl5_inc_run; - prp->stop = repl5_inc_stop; -@@ -2099,12 +2107,19 @@ Repl_5_Inc_Protocol_new(Repl_Protocol *rp) - prp->notify_window_closed = repl5_inc_notify_window_closed; - prp->update_now = repl5_inc_update_now; - prp->replica = prot_get_replica(rp); -- if ((prp->lock = PR_NewLock()) == NULL) { -+ if (pthread_mutex_init(&(prp->lock), NULL) != 0) { -+ goto loser; -+ } -+ if (pthread_condattr_init(&cattr) != 0) { -+ goto loser; -+ } -+ if (pthread_condattr_setclock(&cattr, CLOCK_MONOTONIC) != 0) { - goto loser; - } -- if ((prp->cvar = PR_NewCondVar(prp->lock)) == NULL) { -+ if (pthread_cond_init(&(prp->cvar), &cattr) != 0) { - goto loser; - } -+ pthread_condattr_destroy(&cattr); - prp->stopped = 0; - prp->terminate = 0; - prp->eventbits = 0; -diff --git a/ldap/servers/plugins/replication/repl5_mtnode_ext.c b/ldap/servers/plugins/replication/repl5_mtnode_ext.c -index 08a58613b..82e230958 100644 ---- a/ldap/servers/plugins/replication/repl5_mtnode_ext.c -+++ b/ldap/servers/plugins/replication/repl5_mtnode_ext.c -@@ -82,7 +82,8 @@ multimaster_mtnode_construct_replicas() - } - } - /* Wait a few seconds for everything to startup before resuming any replication tasks */ -- slapi_eq_once(replica_check_for_tasks, (void *)replica_get_root(r), time(NULL) + 5); -+ slapi_eq_once(replica_check_for_tasks, (void *)replica_get_root(r), -+ slapi_current_rel_time_t() + 5); - } - } - } -diff --git a/ldap/servers/plugins/replication/repl5_prot_private.h b/ldap/servers/plugins/replication/repl5_prot_private.h -index 5b2e1b3ca..0673f1978 100644 ---- a/ldap/servers/plugins/replication/repl5_prot_private.h -+++ b/ldap/servers/plugins/replication/repl5_prot_private.h -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -32,8 +32,6 @@ typedef struct private_repl_protocol - void (*notify_window_opened)(struct private_repl_protocol *); - void (*notify_window_closed)(struct private_repl_protocol *); - void (*update_now)(struct private_repl_protocol *); -- PRLock *lock; -- PRCondVar *cvar; - int stopped; - int terminate; - PRUint32 eventbits; -@@ -46,6 +44,8 @@ typedef struct private_repl_protocol - int repl50consumer; /* Flag to tell us if this is a 5.0-style consumer we're talking to */ - int repl71consumer; /* Flag to tell us if this is a 7.1-style consumer we're talking to */ - int repl90consumer; /* Flag to tell us if this is a 9.0-style consumer we're talking to */ -+ pthread_mutex_t lock; -+ pthread_cond_t cvar; - } Private_Repl_Protocol; - - extern Private_Repl_Protocol *Repl_5_Inc_Protocol_new(Repl_Protocol *rp); -diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c -index 7e56d6557..c1d376c72 100644 ---- a/ldap/servers/plugins/replication/repl5_replica.c -+++ b/ldap/servers/plugins/replication/repl5_replica.c -@@ -232,7 +232,7 @@ replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation, - In that case the updated would fail but nothing bad would happen. The next - scheduled update would save the state */ - r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, r->repl_name, -- slapi_current_utc_time() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); -+ slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); - - if (r->tombstone_reap_interval > 0) { - /* -@@ -240,7 +240,7 @@ replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation, - * This will allow the server to fully start before consuming resources. - */ - r->repl_eqcxt_tr = slapi_eq_repeat(eq_cb_reap_tombstones, r->repl_name, -- slapi_current_utc_time() + r->tombstone_reap_interval, -+ slapi_current_rel_time_t() + r->tombstone_reap_interval, - 1000 * r->tombstone_reap_interval); - } - -@@ -1088,7 +1088,7 @@ replica_is_updatedn(Replica *r, const Slapi_DN *sdn) - if (r->groupdn_list) { - /* check and rebuild groupdns */ - if (r->updatedn_group_check_interval > -1) { -- time_t now = slapi_current_utc_time(); -+ time_t now = slapi_current_rel_time_t(); - if (now - r->updatedn_group_last_check > r->updatedn_group_check_interval) { - Slapi_ValueSet *updatedn_groups_copy = NULL; - ReplicaUpdateDNList groupdn_list = replica_updatedn_list_new(NULL); -@@ -1512,7 +1512,7 @@ replica_set_enabled(Replica *r, PRBool enable) - if (r->repl_eqcxt_rs == NULL) /* event is not already registered */ - { - r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, r->repl_name, -- slapi_current_utc_time() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); -+ slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); - } - } else /* disable */ - { -@@ -3637,7 +3637,7 @@ replica_set_tombstone_reap_interval(Replica *r, long interval) - r->tombstone_reap_interval = interval; - if (interval > 0 && r->repl_eqcxt_tr == NULL) { - r->repl_eqcxt_tr = slapi_eq_repeat(eq_cb_reap_tombstones, r->repl_name, -- slapi_current_utc_time() + r->tombstone_reap_interval, -+ slapi_current_rel_time_t() + r->tombstone_reap_interval, - 1000 * r->tombstone_reap_interval); - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, - "replica_set_tombstone_reap_interval - tombstone_reap event (interval=%" PRId64 ") was %s\n", -diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c -index d64d4bf45..a969ef82f 100644 ---- a/ldap/servers/plugins/replication/repl5_replica_config.c -+++ b/ldap/servers/plugins/replication/repl5_replica_config.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -31,14 +31,17 @@ - #define CLEANALLRUVLEN 11 - #define REPLICA_RDN "cn=replica" - -+#define CLEANALLRUV_MAX_WAIT 7200 /* 2 hours */ -+#define CLEANALLRUV_SLEEP 5 -+ - int slapi_log_urp = SLAPI_LOG_REPL; - static ReplicaId cleaned_rids[CLEANRID_BUFSIZ] = {0}; - static ReplicaId pre_cleaned_rids[CLEANRID_BUFSIZ] = {0}; - static ReplicaId aborted_rids[CLEANRID_BUFSIZ] = {0}; - static PRLock *rid_lock = NULL; - static PRLock *abort_rid_lock = NULL; --static PRLock *notify_lock = NULL; --static PRCondVar *notify_cvar = NULL; -+static pthread_mutex_t notify_lock; -+static pthread_cond_t notify_cvar; - static PRLock *task_count_lock = NULL; - static int32_t clean_task_count = 0; - static int32_t abort_task_count = 0; -@@ -105,6 +108,9 @@ dont_allow_that(Slapi_PBlock *pb __attribute__((unused)), - int - replica_config_init() - { -+ int rc = 0; -+ pthread_condattr_t condAttr; -+ - s_configLock = PR_NewLock(); - - if (s_configLock == NULL) { -@@ -134,18 +140,31 @@ replica_config_init() - PR_GetError()); - return -1; - } -- if ((notify_lock = PR_NewLock()) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - " -- "Failed to create notify lock; NSPR error - %d\n", -- PR_GetError()); -+ if ((rc = pthread_mutex_init(¬ify_lock, NULL)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "replica_config_init", -+ "Failed to create notify lock: error %d (%s)\n", -+ rc, strerror(rc)); - return -1; - } -- if ((notify_cvar = PR_NewCondVar(notify_lock)) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - " -- "Failed to create notify cond var; NSPR error - %d\n", -- PR_GetError()); -+ if ((rc = pthread_condattr_init(&condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "replica_config_init", -+ "Failed to create notify new condition attribute variable. error %d (%s)\n", -+ rc, strerror(rc)); - return -1; - } -+ if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "replica_config_init", -+ "Cannot set condition attr clock. error %d (%s)\n", -+ rc, strerror(rc)); -+ return -1; -+ } -+ if ((rc = pthread_cond_init(¬ify_cvar, &condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "replica_config_init", -+ "Failed to create new condition variable. error %d (%s)\n", -+ rc, strerror(rc)); -+ return -1; -+ } -+ pthread_condattr_destroy(&condAttr); - - /* config DSE must be initialized before we get here */ - slapi_config_register_callback(SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, CONFIG_BASE, LDAP_SCOPE_SUBTREE, -@@ -1674,9 +1693,13 @@ replica_cleanallruv_thread(void *arg) - * to startup timing issues, we need to wait before grabbing the replica obj, as - * the backends might not be online yet. - */ -- PR_Lock(notify_lock); -- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(10)); -- PR_Unlock(notify_lock); -+ struct timespec current_time = {0}; -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += 10; -+ -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); -+ pthread_mutex_unlock(¬ify_lock); - data->replica = replica_get_replica_from_dn(data->sdn); - if (data->replica == NULL) { - cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Unable to retrieve repl object from dn(%s).", data->sdn); -@@ -1720,15 +1743,18 @@ replica_cleanallruv_thread(void *arg) - ruv_obj = replica_get_ruv(data->replica); - ruv = object_get_data(ruv_obj); - while (data->maxcsn && !is_task_aborted(data->rid) && !is_cleaned_rid(data->rid) && !slapi_is_shutting_down()) { -+ struct timespec current_time = {0}; - if (csn_get_replicaid(data->maxcsn) == 0 || - ruv_covers_csn_cleanallruv(ruv, data->maxcsn) || - strcasecmp(data->force, "yes") == 0) { - /* We are caught up, now we can clean the ruv's */ - break; - } -- PR_Lock(notify_lock); -- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(5)); -- PR_Unlock(notify_lock); -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += CLEANALLRUV_SLEEP; -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); -+ pthread_mutex_unlock(¬ify_lock); - } - object_release(ruv_obj); - /* -@@ -1796,18 +1822,20 @@ replica_cleanallruv_thread(void *arg) - /* - * need to sleep between passes - */ -- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, "Not all replicas have received the " -- "cleanallruv extended op, retrying in %d seconds", -+ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, -+ "Not all replicas have received the cleanallruv extended op, retrying in %d seconds", - interval); - if (!slapi_is_shutting_down()) { -- PR_Lock(notify_lock); -- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); -- PR_Unlock(notify_lock); -+ struct timespec current_time = {0}; -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += interval; -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); -+ pthread_mutex_unlock(¬ify_lock); - } -- if (interval < 14400) { /* 4 hour max */ -- interval = interval * 2; -- } else { -- interval = 14400; -+ interval *= 2; -+ if (interval >= CLEANALLRUV_MAX_WAIT) { -+ interval = CLEANALLRUV_MAX_WAIT; - } - } - /* -@@ -1857,18 +1885,19 @@ replica_cleanallruv_thread(void *arg) - * Need to sleep between passes unless we are shutting down - */ - if (!slapi_is_shutting_down()) { -- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, "Replicas have not been cleaned yet, " -- "retrying in %d seconds", -+ struct timespec current_time = {0}; -+ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, -+ "Replicas have not been cleaned yet, retrying in %d seconds", - interval); -- PR_Lock(notify_lock); -- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); -- PR_Unlock(notify_lock); -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += interval; -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); -+ pthread_mutex_unlock(¬ify_lock); - } -- -- if (interval < 14400) { /* 4 hour max */ -- interval = interval * 2; -- } else { -- interval = 14400; -+ interval *= 2; -+ if (interval >= CLEANALLRUV_MAX_WAIT) { -+ interval = CLEANALLRUV_MAX_WAIT; - } - } /* while */ - -@@ -2081,15 +2110,17 @@ check_replicas_are_done_cleaning(cleanruv_data *data) - "Not all replicas finished cleaning, retrying in %d seconds", - interval); - if (!slapi_is_shutting_down()) { -- PR_Lock(notify_lock); -- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); -- PR_Unlock(notify_lock); -+ struct timespec current_time = {0}; -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += interval; -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); -+ pthread_mutex_lock(¬ify_lock); - } - -- if (interval < 14400) { /* 4 hour max */ -- interval = interval * 2; -- } else { -- interval = 14400; -+ interval *= 2; -+ if (interval >= CLEANALLRUV_MAX_WAIT) { -+ interval = CLEANALLRUV_MAX_WAIT; - } - } - slapi_ch_free_string(&filter); -@@ -2190,14 +2221,16 @@ check_replicas_are_done_aborting(cleanruv_data *data) - cleanruv_log(data->task, data->rid, ABORT_CLEANALLRUV_ID, SLAPI_LOG_NOTICE, - "Not all replicas finished aborting, retrying in %d seconds", interval); - if (!slapi_is_shutting_down()) { -- PR_Lock(notify_lock); -- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); -- PR_Unlock(notify_lock); -+ struct timespec current_time = {0}; -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += interval; -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); -+ pthread_mutex_unlock(¬ify_lock); - } -- if (interval < 14400) { /* 4 hour max */ -- interval = interval * 2; -- } else { -- interval = 14400; -+ interval *= 2; -+ if (interval >= CLEANALLRUV_MAX_WAIT) { -+ interval = CLEANALLRUV_MAX_WAIT; - } - } - slapi_ch_free_string(&filter); -@@ -2248,14 +2281,16 @@ check_agmts_are_caught_up(cleanruv_data *data, char *maxcsn) - cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, - "Not all replicas caught up, retrying in %d seconds", interval); - if (!slapi_is_shutting_down()) { -- PR_Lock(notify_lock); -- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); -- PR_Unlock(notify_lock); -+ struct timespec current_time = {0}; -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += interval; -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); -+ pthread_mutex_unlock(¬ify_lock); - } -- if (interval < 14400) { /* 4 hour max */ -- interval = interval * 2; -- } else { -- interval = 14400; -+ interval *= 2; -+ if (interval >= CLEANALLRUV_MAX_WAIT) { -+ interval = CLEANALLRUV_MAX_WAIT; - } - } - slapi_ch_free_string(&rid_text); -@@ -2310,14 +2345,16 @@ check_agmts_are_alive(Replica *replica, ReplicaId rid, Slapi_Task *task) - interval); - - if (!slapi_is_shutting_down()) { -- PR_Lock(notify_lock); -- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); -- PR_Unlock(notify_lock); -+ struct timespec current_time = {0}; -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += interval; -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); -+ pthread_mutex_unlock(¬ify_lock); - } -- if (interval < 14400) { /* 4 hour max */ -- interval = interval * 2; -- } else { -- interval = 14400; -+ interval *= 2; -+ if (interval >= CLEANALLRUV_MAX_WAIT) { -+ interval = CLEANALLRUV_MAX_WAIT; - } - } - if (is_task_aborted(rid)) { -@@ -3093,16 +3130,18 @@ replica_abort_task_thread(void *arg) - * Need to sleep between passes. unless we are shutting down - */ - if (!slapi_is_shutting_down()) { -+ struct timespec current_time = {0}; - cleanruv_log(data->task, data->rid, ABORT_CLEANALLRUV_ID, SLAPI_LOG_NOTICE, "Retrying in %d seconds", interval); -- PR_Lock(notify_lock); -- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); -- PR_Unlock(notify_lock); -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += interval; -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); -+ pthread_mutex_unlock(¬ify_lock); - } - -- if (interval < 14400) { /* 4 hour max */ -- interval = interval * 2; -- } else { -- interval = 14400; -+ interval *= 2; -+ if (interval >= CLEANALLRUV_MAX_WAIT) { -+ interval = CLEANALLRUV_MAX_WAIT; - } - } /* while */ - -@@ -3536,10 +3575,10 @@ check_and_set_abort_cleanruv_task_count(void) - - PR_Lock(task_count_lock); - if (abort_task_count > CLEANRIDSIZ) { -- rc = -1; -- } else { -- abort_task_count++; -- } -+ rc = -1; -+ } else { -+ abort_task_count++; -+ } - PR_Unlock(task_count_lock); - - return rc; -@@ -3551,11 +3590,9 @@ check_and_set_abort_cleanruv_task_count(void) - void - stop_ruv_cleaning() - { -- if (notify_lock) { -- PR_Lock(notify_lock); -- PR_NotifyCondVar(notify_cvar); -- PR_Unlock(notify_lock); -- } -+ pthread_mutex_lock(¬ify_lock); -+ pthread_cond_signal(¬ify_cvar); -+ pthread_mutex_unlock(¬ify_lock); - } - - /* -diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c -index a25839f21..f67263c3e 100644 ---- a/ldap/servers/plugins/replication/repl5_tot_protocol.c -+++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -45,7 +45,7 @@ typedef struct callback_data - unsigned long num_entries; - time_t sleep_on_busy; - time_t last_busy; -- PRLock *lock; /* Lock to protect access to this structure, the message id list and to force memory barriers */ -+ pthread_mutex_t lock; /* Lock to protect access to this structure, the message id list and to force memory barriers */ - PRThread *result_tid; /* The async result thread */ - operation_id_list_item *message_id_list; /* List of IDs for outstanding operations */ - int abort; /* Flag used to tell the sending thread asyncronously that it should abort (because an error came up in a result) */ -@@ -113,7 +113,7 @@ repl5_tot_result_threadmain(void *param) - while (!finished) { - int message_id = 0; - time_t time_now = 0; -- time_t start_time = slapi_current_utc_time(); -+ time_t start_time = slapi_current_rel_time_t(); - int backoff_time = 1; - - /* Read the next result */ -@@ -130,7 +130,7 @@ repl5_tot_result_threadmain(void *param) - /* We need to a) check that the 'real' timeout hasn't expired and - * b) implement a backoff sleep to avoid spinning */ - /* Did the connection's timeout expire ? */ -- time_now = slapi_current_utc_time(); -+ time_now = slapi_current_rel_time_t(); - if (conn_get_timeout(conn) <= (time_now - start_time)) { - /* We timed out */ - conres = CONN_TIMEOUT; -@@ -142,11 +142,11 @@ repl5_tot_result_threadmain(void *param) - backoff_time <<= 1; - } - /* Should we stop ? */ -- PR_Lock(cb->lock); -+ pthread_mutex_lock(&(cb->lock)); - if (cb->stop_result_thread) { - finished = 1; - } -- PR_Unlock(cb->lock); -+ pthread_mutex_unlock(&(cb->lock)); - } else { - /* Something other than a timeout, so we exit the loop */ - break; -@@ -164,21 +164,21 @@ repl5_tot_result_threadmain(void *param) - /* Was the result itself an error ? */ - if (0 != conres) { - /* If so then we need to take steps to abort the update process */ -- PR_Lock(cb->lock); -+ pthread_mutex_lock(&(cb->lock)); - cb->abort = 1; - if (conres == CONN_NOT_CONNECTED) { - cb->rc = LDAP_CONNECT_ERROR; - } -- PR_Unlock(cb->lock); -+ pthread_mutex_unlock(&(cb->lock)); - } - /* Should we stop ? */ -- PR_Lock(cb->lock); -+ pthread_mutex_lock(&(cb->lock)); - /* if the connection is not connected, then we cannot read any more - results - we are finished */ - if (cb->stop_result_thread || (conres == CONN_NOT_CONNECTED)) { - finished = 1; - } -- PR_Unlock(cb->lock); -+ pthread_mutex_unlock(&(cb->lock)); - } - } - -@@ -209,9 +209,9 @@ repl5_tot_destroy_async_result_thread(callback_data *cb_data) - int retval = 0; - PRThread *tid = cb_data->result_tid; - if (tid) { -- PR_Lock(cb_data->lock); -+ pthread_mutex_lock(&(cb_data->lock)); - cb_data->stop_result_thread = 1; -- PR_Unlock(cb_data->lock); -+ pthread_mutex_unlock(&(cb_data->lock)); - (void)PR_JoinThread(tid); - } - return retval; -@@ -248,7 +248,7 @@ repl5_tot_waitfor_async_results(callback_data *cb_data) - /* Keep pulling results off the LDAP connection until we catch up to the last message id stored in the rd */ - while (!done) { - /* Lock the structure to force memory barrier */ -- PR_Lock(cb_data->lock); -+ pthread_mutex_lock(&(cb_data->lock)); - /* Are we caught up ? */ - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, - "repl5_tot_waitfor_async_results - %d %d\n", -@@ -260,7 +260,7 @@ repl5_tot_waitfor_async_results(callback_data *cb_data) - if (cb_data->abort && LOST_CONN_ERR(cb_data->rc)) { - done = 1; /* no connection == no more results */ - } -- PR_Unlock(cb_data->lock); -+ pthread_mutex_unlock(&(cb_data->lock)); - /* If not then sleep a bit */ - DS_Sleep(PR_SecondsToInterval(1)); - loops++; -@@ -482,9 +482,9 @@ retry: - cb_data.rc = 0; - cb_data.num_entries = 1UL; - cb_data.sleep_on_busy = 0UL; -- cb_data.last_busy = slapi_current_utc_time(); -+ cb_data.last_busy = slapi_current_rel_time_t(); - cb_data.flowcontrol_detection = 0; -- cb_data.lock = PR_NewLock(); -+ pthread_mutex_init(&(cb_data.lock), NULL); - - /* This allows during perform_operation to check the callback data - * especially to do flow contol on delta send msgid / recv msgid -@@ -541,9 +541,9 @@ retry: - cb_data.rc = 0; - cb_data.num_entries = 0UL; - cb_data.sleep_on_busy = 0UL; -- cb_data.last_busy = slapi_current_utc_time(); -+ cb_data.last_busy = slapi_current_rel_time_t(); - cb_data.flowcontrol_detection = 0; -- cb_data.lock = PR_NewLock(); -+ pthread_mutex_init(&(cb_data.lock), NULL); - - /* This allows during perform_operation to check the callback data - * especially to do flow contol on delta send msgid / recv msgid -@@ -633,9 +633,7 @@ done: - type_nsds5ReplicaFlowControlWindow); - } - conn_set_tot_update_cb(prp->conn, NULL); -- if (cb_data.lock) { -- PR_DestroyLock(cb_data.lock); -- } -+ pthread_mutex_destroy(&(cb_data.lock)); - prp->stopped = 1; - } - -@@ -700,7 +698,9 @@ Private_Repl_Protocol * - Repl_5_Tot_Protocol_new(Repl_Protocol *rp) - { - repl5_tot_private *rip = NULL; -- Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_malloc(sizeof(Private_Repl_Protocol)); -+ pthread_condattr_t cattr; -+ Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_calloc(1, sizeof(Private_Repl_Protocol)); -+ - prp->delete = repl5_tot_delete; - prp->run = repl5_tot_run; - prp->stop = repl5_tot_stop; -@@ -710,12 +710,19 @@ Repl_5_Tot_Protocol_new(Repl_Protocol *rp) - prp->notify_window_opened = repl5_tot_noop; - prp->notify_window_closed = repl5_tot_noop; - prp->update_now = repl5_tot_noop; -- if ((prp->lock = PR_NewLock()) == NULL) { -+ if (pthread_mutex_init(&(prp->lock), NULL) != 0) { -+ goto loser; -+ } -+ if (pthread_condattr_init(&cattr) != 0) { -+ goto loser; -+ } -+ if (pthread_condattr_setclock(&cattr, CLOCK_MONOTONIC) != 0) { - goto loser; - } -- if ((prp->cvar = PR_NewCondVar(prp->lock)) == NULL) { -+ if (pthread_cond_init(&(prp->cvar), &cattr) != 0) { - goto loser; - } -+ pthread_condattr_destroy(&cattr); - prp->stopped = 1; - prp->terminate = 0; - prp->eventbits = 0; -@@ -744,13 +751,11 @@ repl5_tot_delete(Private_Repl_Protocol **prpp) - (*prpp)->stop(*prpp); - } - /* Then, delete all resources used by the protocol */ -- if ((*prpp)->lock) { -- PR_DestroyLock((*prpp)->lock); -- (*prpp)->lock = NULL; -+ if (&((*prpp)->lock)) { -+ pthread_mutex_destroy(&((*prpp)->lock)); - } -- if ((*prpp)->cvar) { -- PR_DestroyCondVar((*prpp)->cvar); -- (*prpp)->cvar = NULL; -+ if (&((*prpp)->cvar)) { -+ pthread_cond_destroy(&(*prpp)->cvar); - } - slapi_ch_free((void **)&(*prpp)->private); - slapi_ch_free((void **)prpp); -@@ -824,9 +829,9 @@ send_entry(Slapi_Entry *e, void *cb_data) - - /* see if the result reader thread encountered - a fatal error */ -- PR_Lock(((callback_data *)cb_data)->lock); -+ pthread_mutex_lock((&((callback_data *)cb_data)->lock)); - rc = ((callback_data *)cb_data)->abort; -- PR_Unlock(((callback_data *)cb_data)->lock); -+ pthread_mutex_unlock((&((callback_data *)cb_data)->lock)); - if (rc) { - conn_disconnect(prp->conn); - ((callback_data *)cb_data)->rc = -1; -@@ -889,7 +894,7 @@ send_entry(Slapi_Entry *e, void *cb_data) - } - - if (rc == CONN_BUSY) { -- time_t now = slapi_current_utc_time(); -+ time_t now = slapi_current_rel_time_t(); - if ((now - *last_busyp) < (*sleep_on_busyp + 10)) { - *sleep_on_busyp += 5; - } else { -diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c -index af486f730..ef2025dd9 100644 ---- a/ldap/servers/plugins/replication/repl_extop.c -+++ b/ldap/servers/plugins/replication/repl_extop.c -@@ -1176,7 +1176,7 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb) - /* now that the changelog is open and started, we can alos cretae the - * keep alive entry without risk that db and cl will not match - */ -- replica_subentry_check(replica_get_root(r), replica_get_rid(r)); -+ replica_subentry_check((Slapi_DN *)replica_get_root(r), replica_get_rid(r)); - } - - /* ONREPL code that dealt with new RUV, etc was moved into the code -@@ -1474,7 +1474,7 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb) - * Launch the cleanruv monitoring thread. Once all the replicas are cleaned it will release the rid - */ - -- cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Launching cleanAllRUV thread...\n"); -+ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Launching cleanAllRUV thread..."); - data = (cleanruv_data *)slapi_ch_calloc(1, sizeof(cleanruv_data)); - if (data == NULL) { - slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "multimaster_extop_cleanruv - CleanAllRUV Task - Failed to allocate " -diff --git a/ldap/servers/plugins/replication/windows_connection.c b/ldap/servers/plugins/replication/windows_connection.c -index 011b328bf..ce0662544 100644 ---- a/ldap/servers/plugins/replication/windows_connection.c -+++ b/ldap/servers/plugins/replication/windows_connection.c -@@ -1121,7 +1121,7 @@ windows_conn_start_linger(Repl_Connection *conn) - agmt_get_long_name(conn->agmt)); - return; - } -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - PR_Lock(conn->lock); - if (conn->linger_active) { - slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, -diff --git a/ldap/servers/plugins/replication/windows_inc_protocol.c b/ldap/servers/plugins/replication/windows_inc_protocol.c -index 1c07534e3..3d548e5ed 100644 ---- a/ldap/servers/plugins/replication/windows_inc_protocol.c -+++ b/ldap/servers/plugins/replication/windows_inc_protocol.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -48,7 +48,7 @@ typedef struct windows_inc_private - char *ruv; /* RUV on remote replica (use diff type for this? - ggood */ - Backoff_Timer *backoff; - Repl_Protocol *rp; -- PRLock *lock; -+ pthread_mutex_t *lock; - PRUint32 eventbits; - } windows_inc_private; - -@@ -96,7 +96,7 @@ typedef struct windows_inc_private - * don't see any updates for a period equal to this interval, - * we go ahead and start a replication session, just to be safe - */ --#define MAX_WAIT_BETWEEN_SESSIONS PR_SecondsToInterval(60 * 5) /* 5 minutes */ -+#define MAX_WAIT_BETWEEN_SESSIONS 300 /* 5 minutes */ - /* - * tests if the protocol has been shutdown and we need to quit - * event_occurred resets the bits in the bit flag, so whoever tests for shutdown -@@ -108,7 +108,7 @@ typedef struct windows_inc_private - /* Forward declarations */ - static PRUint32 event_occurred(Private_Repl_Protocol *prp, PRUint32 event); - static void reset_events(Private_Repl_Protocol *prp); --static void protocol_sleep(Private_Repl_Protocol *prp, PRIntervalTime duration); -+static void protocol_sleep(Private_Repl_Protocol *prp, int32_t duration); - static int send_updates(Private_Repl_Protocol *prp, RUV *ruv, PRUint32 *num_changes_sent, int do_send); - static void windows_inc_backoff_expired(time_t timer_fire_time, void *arg); - static int windows_examine_update_vector(Private_Repl_Protocol *prp, RUV *ruv); -@@ -143,13 +143,11 @@ windows_inc_delete(Private_Repl_Protocol **prpp) - (*prpp)->stopped = 1; - (*prpp)->stop(*prpp); - } -- if ((*prpp)->lock) { -- PR_DestroyLock((*prpp)->lock); -- (*prpp)->lock = NULL; -+ if (&((*prpp)->lock)) { -+ pthread_mutex_destroy(&((*prpp)->lock)); - } -- if ((*prpp)->cvar) { -- PR_DestroyCondVar((*prpp)->cvar); -- (*prpp)->cvar = NULL; -+ if (&((*prpp)->cvar)) { -+ pthread_cond_destroy(&(*prpp)->cvar); - } - slapi_ch_free((void **)&(*prpp)->private); - slapi_ch_free((void **)prpp); -@@ -360,7 +358,7 @@ windows_inc_run(Private_Repl_Protocol *prp) - } else if (event_occurred(prp, EVENT_TRIGGERING_CRITERIA_MET)) /* change available */ - { - /* just ignore it and go to sleep */ -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } else if ((e1 = event_occurred(prp, EVENT_WINDOW_CLOSED)) || - event_occurred(prp, EVENT_BACKOFF_EXPIRED)) { - /* this events - should not occur - log a warning and go to sleep */ -@@ -370,18 +368,18 @@ windows_inc_run(Private_Repl_Protocol *prp) - agmt_get_long_name(prp->agmt), - e1 ? event2name(EVENT_WINDOW_CLOSED) : event2name(EVENT_BACKOFF_EXPIRED), - state2name(current_state)); -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } else if (event_occurred(prp, EVENT_RUN_DIRSYNC)) /* periodic_dirsync */ - { - /* just ignore it and go to sleep */ -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } else { - /* wait until window opens or an event occurs */ - slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, - "windows_inc_run - %s: " - "Waiting for update window to open\n", - agmt_get_long_name(prp->agmt)); -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } - break; - -@@ -536,7 +534,7 @@ windows_inc_run(Private_Repl_Protocol *prp) - } - next_state = STATE_BACKOFF; - backoff_reset(prp_priv->backoff, windows_inc_backoff_expired, (void *)prp); -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - use_busy_backoff_timer = PR_FALSE; - } - break; -@@ -605,7 +603,7 @@ windows_inc_run(Private_Repl_Protocol *prp) - agmt_get_long_name(prp->agmt), - next_fire_time - now); - -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } else { - /* Destroy the backoff timer, since we won't need it anymore */ - backoff_delete(&prp_priv->backoff); -@@ -624,7 +622,7 @@ windows_inc_run(Private_Repl_Protocol *prp) - next_state = STATE_READY_TO_ACQUIRE; - } else { - /* ignore changes and go to sleep */ -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } - } else if (event_occurred(prp, EVENT_WINDOW_OPENED)) { - /* this should never happen - log an error and go to sleep */ -@@ -632,7 +630,7 @@ windows_inc_run(Private_Repl_Protocol *prp) - "event %s should not occur in state %s; going to sleep\n", - agmt_get_long_name(prp->agmt), - event2name(EVENT_WINDOW_OPENED), state2name(current_state)); -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - } - break; - case STATE_SENDING_UPDATES: -@@ -856,7 +854,7 @@ windows_inc_run(Private_Repl_Protocol *prp) - reset_events(prp); - } - -- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); -+ protocol_sleep(prp, 0); - break; - - case STATE_STOP_NORMAL_TERMINATION: -@@ -891,21 +889,29 @@ windows_inc_run(Private_Repl_Protocol *prp) - * Go to sleep until awakened. - */ - static void --protocol_sleep(Private_Repl_Protocol *prp, PRIntervalTime duration) -+protocol_sleep(Private_Repl_Protocol *prp, int32_t duration) - { - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> protocol_sleep\n"); - PR_ASSERT(NULL != prp); -- PR_Lock(prp->lock); -+ pthread_mutex_lock(&(prp->lock)); - /* we should not go to sleep if there are events available to be processed. - Otherwise, we can miss the event that suppose to wake us up */ -- if (prp->eventbits == 0) -- PR_WaitCondVar(prp->cvar, duration); -- else { -+ if (prp->eventbits == 0) { -+ if (duration > 0) { -+ struct timespec current_time = {0}; -+ /* get the current monotonic time and add our interval */ -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += duration; -+ pthread_cond_timedwait(&(prp->cvar), &(prp->lock), ¤t_time); -+ } else { -+ pthread_cond_wait(&(prp->cvar), &(prp->lock)); -+ } -+ } else { - slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, - "protocol_sleep - %s: Can't go to sleep: event bits - %x\n", - agmt_get_long_name(prp->agmt), prp->eventbits); - } -- PR_Unlock(prp->lock); -+ pthread_mutex_unlock(&(prp->lock)); - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= protocol_sleep\n"); - } - -@@ -921,10 +927,10 @@ event_notify(Private_Repl_Protocol *prp, PRUint32 event) - { - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> event_notify\n"); - PR_ASSERT(NULL != prp); -- PR_Lock(prp->lock); -+ pthread_mutex_lock(&(prp->lock)); - prp->eventbits |= event; -- PR_NotifyCondVar(prp->cvar); -- PR_Unlock(prp->lock); -+ pthread_cond_signal(&(prp->cvar)); -+ pthread_mutex_unlock(&(prp->lock)); - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= event_notify\n"); - } - -@@ -941,10 +947,10 @@ event_occurred(Private_Repl_Protocol *prp, PRUint32 event) - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> event_occurred\n"); - - PR_ASSERT(NULL != prp); -- PR_Lock(prp->lock); -+ pthread_mutex_lock(&(prp->lock)); - return_value = (prp->eventbits & event); - prp->eventbits &= ~event; /* Clear event */ -- PR_Unlock(prp->lock); -+ pthread_mutex_unlock(&(prp->lock)); - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= event_occurred\n"); - return return_value; - } -@@ -954,9 +960,9 @@ reset_events(Private_Repl_Protocol *prp) - { - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> reset_events\n"); - PR_ASSERT(NULL != prp); -- PR_Lock(prp->lock); -+ pthread_mutex_lock(&(prp->lock)); - prp->eventbits = 0; -- PR_Unlock(prp->lock); -+ pthread_mutex_unlock(&(prp->lock)); - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= reset_events\n"); - } - -@@ -1416,6 +1422,7 @@ Windows_Inc_Protocol_new(Repl_Protocol *rp) - { - windows_inc_private *rip = NULL; - Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_calloc(1, sizeof(Private_Repl_Protocol)); -+ pthread_condattr_t cattr; - - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> Windows_Inc_Protocol_new\n"); - -@@ -1429,12 +1436,19 @@ Windows_Inc_Protocol_new(Repl_Protocol *rp) - prp->notify_window_closed = windows_inc_notify_window_closed; - prp->update_now = windows_inc_update_now; - prp->replica = prot_get_replica(rp); -- if ((prp->lock = PR_NewLock()) == NULL) { -+ if (pthread_mutex_init(&(prp->lock), NULL) != 0) { -+ goto loser; -+ } -+ if (pthread_condattr_init(&cattr) != 0) { -+ goto loser; -+ } -+ if (pthread_condattr_setclock(&cattr, CLOCK_MONOTONIC) != 0) { - goto loser; - } -- if ((prp->cvar = PR_NewCondVar(prp->lock)) == NULL) { -+ if (pthread_cond_init(&(prp->cvar), &cattr) != 0) { - goto loser; - } -+ pthread_condattr_destroy(&cattr); /* no longer needed */ - prp->stopped = 0; - prp->terminate = 0; - prp->eventbits = 0; -diff --git a/ldap/servers/plugins/replication/windows_tot_protocol.c b/ldap/servers/plugins/replication/windows_tot_protocol.c -index da244c166..f67e4dbd2 100644 ---- a/ldap/servers/plugins/replication/windows_tot_protocol.c -+++ b/ldap/servers/plugins/replication/windows_tot_protocol.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -326,6 +326,7 @@ Windows_Tot_Protocol_new(Repl_Protocol *rp) - { - windows_tot_private *rip = NULL; - Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_calloc(1, sizeof(Private_Repl_Protocol)); -+ pthread_condattr_t cattr; - - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> Windows_Tot_Protocol_new\n"); - -@@ -339,12 +340,19 @@ Windows_Tot_Protocol_new(Repl_Protocol *rp) - prp->notify_window_closed = windows_tot_noop; - prp->replica = prot_get_replica(rp); - prp->update_now = windows_tot_noop; -- if ((prp->lock = PR_NewLock()) == NULL) { -+ if (pthread_mutex_init(&(prp->lock), NULL) != 0) { - goto loser; - } -- if ((prp->cvar = PR_NewCondVar(prp->lock)) == NULL) { -+ if (pthread_condattr_init(&cattr) != 0) { - goto loser; - } -+ if (pthread_condattr_setclock(&cattr, CLOCK_MONOTONIC) != 0) { -+ goto loser; -+ } -+ if (pthread_cond_init(&(prp->cvar), &cattr) != 0) { -+ goto loser; -+ } -+ pthread_condattr_destroy(&cattr); - prp->stopped = 1; - prp->terminate = 0; - prp->eventbits = 0; -@@ -373,13 +381,11 @@ windows_tot_delete(Private_Repl_Protocol **prpp) - (*prpp)->stop(*prpp); - } - /* Then, delete all resources used by the protocol */ -- if ((*prpp)->lock) { -- PR_DestroyLock((*prpp)->lock); -- (*prpp)->lock = NULL; -+ if (&((*prpp)->lock)) { -+ pthread_mutex_destroy(&((*prpp)->lock)); - } -- if ((*prpp)->cvar) { -- PR_DestroyCondVar((*prpp)->cvar); -- (*prpp)->cvar = NULL; -+ if (&((*prpp)->cvar)) { -+ pthread_cond_destroy(&(*prpp)->cvar); - } - slapi_ch_free((void **)&(*prpp)->private); - slapi_ch_free((void **)prpp); -diff --git a/ldap/servers/plugins/retrocl/retrocl_trim.c b/ldap/servers/plugins/retrocl/retrocl_trim.c -index d031dc3f8..a3e16c4e1 100644 ---- a/ldap/servers/plugins/retrocl/retrocl_trim.c -+++ b/ldap/servers/plugins/retrocl/retrocl_trim.c -@@ -241,7 +241,7 @@ trim_changelog(void) - int me, lt; - - -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - - PR_Lock(ts.ts_s_trim_mutex); - me = ts.ts_c_max_age; -diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c -index de99ba233..3d076a4cb 100644 ---- a/ldap/servers/plugins/roles/roles_cache.c -+++ b/ldap/servers/plugins/roles/roles_cache.c -@@ -343,7 +343,7 @@ roles_cache_create_suffix(Slapi_DN *sdn) - - slapi_lock_mutex(new_suffix->create_lock); - if (new_suffix->is_ready != 1) { -- slapi_wait_condvar(new_suffix->suffix_created, NULL); -+ slapi_wait_condvar_pt(new_suffix->suffix_created, new_suffix->create_lock, NULL); - } - slapi_unlock_mutex(new_suffix->create_lock); - -@@ -384,7 +384,7 @@ roles_cache_wait_on_change(void *arg) - test roles_def->keeprunning before - going to sleep. - */ -- slapi_wait_condvar(roles_def->something_changed, NULL); -+ slapi_wait_condvar_pt(roles_def->something_changed, roles_def->change_lock, NULL); - - slapi_log_err(SLAPI_LOG_PLUGIN, ROLES_PLUGIN_SUBSYSTEM, "roles_cache_wait_on_change - notified\n"); - -diff --git a/ldap/servers/plugins/sync/sync.h b/ldap/servers/plugins/sync/sync.h -index 51d0da6e0..7241fddbf 100644 ---- a/ldap/servers/plugins/sync/sync.h -+++ b/ldap/servers/plugins/sync/sync.h -@@ -201,8 +201,8 @@ typedef struct sync_request_list - { - Slapi_RWLock *sync_req_rwlock; /* R/W lock struct to serialize access */ - SyncRequest *sync_req_head; /* Head of list */ -- PRLock *sync_req_cvarlock; /* Lock for cvar */ -- PRCondVar *sync_req_cvar; /* ps threads sleep on this */ -+ pthread_mutex_t sync_req_cvarlock; /* Lock for cvar */ -+ pthread_cond_t sync_req_cvar; /* ps threads sleep on this */ - int sync_req_max_persist; - int sync_req_cur_persist; - } SyncRequestList; -diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c -index 598c6868d..d13f142b0 100644 ---- a/ldap/servers/plugins/sync/sync_persist.c -+++ b/ldap/servers/plugins/sync/sync_persist.c -@@ -463,19 +463,40 @@ int - sync_persist_initialize(int argc, char **argv) - { - if (!SYNC_IS_INITIALIZED()) { -+ pthread_condattr_t sync_req_condAttr; /* cond var attribute */ -+ int rc = 0; -+ - sync_request_list = (SyncRequestList *)slapi_ch_calloc(1, sizeof(SyncRequestList)); - if ((sync_request_list->sync_req_rwlock = slapi_new_rwlock()) == NULL) { - slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_initialize - Cannot initialize lock structure(1).\n"); - return (-1); - } -- if ((sync_request_list->sync_req_cvarlock = PR_NewLock()) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_initialize - Cannot initialize lock structure(2).\n"); -+ if (pthread_mutex_init(&(sync_request_list->sync_req_cvarlock), NULL) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "sync_persist_initialize", -+ "Failed to create lock: error %d (%s)\n", -+ rc, strerror(rc)); -+ return (-1); -+ } -+ if ((rc = pthread_condattr_init(&sync_req_condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "sync_persist_initialize", -+ "Failed to create new condition attribute variable. error %d (%s)\n", -+ rc, strerror(rc)); - return (-1); - } -- if ((sync_request_list->sync_req_cvar = PR_NewCondVar(sync_request_list->sync_req_cvarlock)) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_initialize - Cannot initialize condition variable.\n"); -+ if ((rc = pthread_condattr_setclock(&sync_req_condAttr, CLOCK_MONOTONIC)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "sync_persist_initialize", -+ "Cannot set condition attr clock. error %d (%s)\n", -+ rc, strerror(rc)); - return (-1); - } -+ if ((rc = pthread_cond_init(&(sync_request_list->sync_req_cvar), &sync_req_condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "sync_persist_initialize", -+ "Failed to create new condition variable. error %d (%s)\n", -+ rc, strerror(rc)); -+ return (-1); -+ } -+ pthread_condattr_destroy(&sync_req_condAttr); /* no longer needed */ -+ - sync_request_list->sync_req_head = NULL; - sync_request_list->sync_req_cur_persist = 0; - sync_request_list->sync_req_max_persist = SYNC_MAX_CONCURRENT; -@@ -617,8 +638,8 @@ sync_persist_terminate_all() - } - - slapi_destroy_rwlock(sync_request_list->sync_req_rwlock); -- PR_DestroyLock(sync_request_list->sync_req_cvarlock); -- PR_DestroyCondVar(sync_request_list->sync_req_cvar); -+ pthread_mutex_destroy(&(sync_request_list->sync_req_cvarlock)); -+ pthread_cond_destroy(&(sync_request_list->sync_req_cvar)); - - /* it frees the structures, just in case it remained connected sync_repl client */ - for (req = sync_request_list->sync_req_head; NULL != req; req = next) { -@@ -725,9 +746,9 @@ static void - sync_request_wakeup_all(void) - { - if (SYNC_IS_INITIALIZED()) { -- PR_Lock(sync_request_list->sync_req_cvarlock); -- PR_NotifyAllCondVar(sync_request_list->sync_req_cvar); -- PR_Unlock(sync_request_list->sync_req_cvarlock); -+ pthread_mutex_lock(&(sync_request_list->sync_req_cvarlock)); -+ pthread_cond_broadcast(&(sync_request_list->sync_req_cvar)); -+ pthread_mutex_unlock(&(sync_request_list->sync_req_cvarlock)); - } - } - -@@ -817,7 +838,7 @@ sync_send_results(void *arg) - goto done; - } - -- PR_Lock(sync_request_list->sync_req_cvarlock); -+ pthread_mutex_lock(&(sync_request_list->sync_req_cvarlock)); - - while ((conn_acq_flag == 0) && !req->req_complete && !plugin_closing) { - /* Check for an abandoned operation */ -@@ -833,7 +854,12 @@ sync_send_results(void *arg) - * connection code. Wake up every second to check if thread - * should terminate. - */ -- PR_WaitCondVar(sync_request_list->sync_req_cvar, PR_SecondsToInterval(1)); -+ struct timespec current_time = {0}; -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += 1; -+ pthread_cond_timedwait(&(sync_request_list->sync_req_cvar), -+ &(sync_request_list->sync_req_cvarlock), -+ ¤t_time); - } else { - /* dequeue the item */ - int attrsonly; -@@ -864,7 +890,7 @@ sync_send_results(void *arg) - * Send the result. Since send_ldap_search_entry can block for - * up to 30 minutes, we relinquish all locks before calling it. - */ -- PR_Unlock(sync_request_list->sync_req_cvarlock); -+ pthread_mutex_unlock(&(sync_request_list->sync_req_cvarlock)); - - /* - * The entry is in the right scope and matches the filter -@@ -910,13 +936,13 @@ sync_send_results(void *arg) - ldap_controls_free(ectrls); - slapi_ch_array_free(noattrs); - } -- PR_Lock(sync_request_list->sync_req_cvarlock); -+ pthread_mutex_lock(&(sync_request_list->sync_req_cvarlock)); - - /* Deallocate our wrapper for this entry */ - sync_node_free(&qnode); - } - } -- PR_Unlock(sync_request_list->sync_req_cvarlock); -+ pthread_mutex_unlock(&(sync_request_list->sync_req_cvarlock)); - - /* indicate the end of search */ - sync_release_connection(req->req_pblock, conn, op, conn_acq_flag == 0); -diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c -index 1e4830e99..ba783ee59 100644 ---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c -+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c -@@ -1,5 +1,5 @@ - /** BEGIN COPYRIGHT BLOCK -- * Copyright (C) 2019 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -1429,21 +1429,22 @@ import_free_job(ImportJob *job) - * To avoid freeing fifo queue under bulk_import_queue use - * job lock to synchronize - */ -- if (job->wire_lock) -- PR_Lock(job->wire_lock); -+ if (&job->wire_lock) { -+ pthread_mutex_lock(&job->wire_lock); -+ } - - import_fifo_destroy(job); - -- if (job->wire_lock) -- PR_Unlock(job->wire_lock); -+ if (&job->wire_lock) { -+ pthread_mutex_unlock(&job->wire_lock); -+ } - } - -- if (NULL != job->uuid_namespace) -+ if (NULL != job->uuid_namespace) { - slapi_ch_free((void **)&job->uuid_namespace); -- if (job->wire_lock) -- PR_DestroyLock(job->wire_lock); -- if (job->wire_cv) -- PR_DestroyCondVar(job->wire_cv); -+ } -+ pthread_mutex_destroy(&job->wire_lock); -+ pthread_cond_destroy(&job->wire_cv); - slapi_ch_free((void **)&job->task_status); - } - -@@ -1777,7 +1778,7 @@ import_monitor_threads(ImportJob *job, int *status) - goto error_abort; - } - -- last_time = slapi_current_utc_time(); -+ last_time = slapi_current_rel_time_t(); - job->start_time = last_time; - import_clear_progress_history(job); - -@@ -1789,7 +1790,7 @@ import_monitor_threads(ImportJob *job, int *status) - - /* First calculate the time interval since last reported */ - if (0 == (count % display_interval)) { -- time_now = slapi_current_utc_time(); -+ time_now = slapi_current_rel_time_t(); - time_interval = time_now - last_time; - last_time = time_now; - /* Now calculate our rate of progress overall for this chunk */ -@@ -2232,7 +2233,7 @@ bdb_import_main(void *arg) - opstr = "Reindexing"; - } - PR_ASSERT(inst != NULL); -- beginning = slapi_current_utc_time(); -+ beginning = slapi_current_rel_time_t(); - - /* Decide which indexes are needed */ - if (job->flags & FLAG_INDEX_ATTRS) { -@@ -2251,9 +2252,9 @@ bdb_import_main(void *arg) - ret = import_fifo_init(job); - if (ret) { - if (!(job->flags & FLAG_USE_FILES)) { -- PR_Lock(job->wire_lock); -- PR_NotifyCondVar(job->wire_cv); -- PR_Unlock(job->wire_lock); -+ pthread_mutex_lock(&job->wire_lock); -+ pthread_cond_signal(&job->wire_cv); -+ pthread_mutex_unlock(&job->wire_lock); - } - goto error; - } -@@ -2315,9 +2316,9 @@ bdb_import_main(void *arg) - } else { - /* release the startup lock and let the entries start queueing up - * in for import */ -- PR_Lock(job->wire_lock); -- PR_NotifyCondVar(job->wire_cv); -- PR_Unlock(job->wire_lock); -+ pthread_mutex_lock(&job->wire_lock); -+ pthread_cond_signal(&job->wire_cv); -+ pthread_mutex_unlock(&job->wire_lock); - } - - /* Run as many passes as we need to complete the job or die honourably in -@@ -2499,7 +2500,7 @@ error: - import_log_notice(job, SLAPI_LOG_WARNING, "bdb_import_main", "Failed to close database"); - } - } -- end = slapi_current_utc_time(); -+ end = slapi_current_rel_time_t(); - if (verbose && (0 == ret)) { - int seconds_to_import = end - beginning; - size_t entries_processed = job->lead_ID - (job->starting_ID - 1); -@@ -3393,7 +3394,7 @@ import_mega_merge(ImportJob *job) - passes, (long unsigned int)job->number_indexers); - } - -- beginning = slapi_current_utc_time(); -+ beginning = slapi_current_rel_time_t(); - /* Iterate over the files */ - for (current_worker = job->worker_list; - (ret == 0) && (current_worker != NULL); -@@ -3405,9 +3406,9 @@ import_mega_merge(ImportJob *job) - time_t file_end = 0; - int key_count = 0; - -- file_beginning = slapi_current_utc_time(); -+ file_beginning = slapi_current_rel_time_t(); - ret = import_merge_one_file(current_worker, passes, &key_count); -- file_end = slapi_current_utc_time(); -+ file_end = slapi_current_rel_time_t(); - if (key_count == 0) { - import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge", "No files to merge for \"%s\".", - current_worker->index_info->name); -@@ -3426,7 +3427,7 @@ import_mega_merge(ImportJob *job) - } - } - -- end = slapi_current_utc_time(); -+ end = slapi_current_rel_time_t(); - if (0 == ret) { - int seconds_to_merge = end - beginning; - import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge", "Merging completed in %d seconds.", -diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c -index 5c7d9c8f7..905a84e74 100644 ---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c -+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c -@@ -1,5 +1,5 @@ - /** BEGIN COPYRIGHT BLOCK -- * Copyright (C) 2019 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -3151,8 +3151,9 @@ bulk_import_start(Slapi_PBlock *pb) - (1024 * 1024); - } - import_subcount_stuff_init(job->mothers); -- job->wire_lock = PR_NewLock(); -- job->wire_cv = PR_NewCondVar(job->wire_lock); -+ -+ pthread_mutex_init(&job->wire_lock, NULL); -+ pthread_cond_init(&job->wire_cv, NULL); - - /* COPIED from ldif2ldbm.c : */ - -@@ -3175,7 +3176,7 @@ bulk_import_start(Slapi_PBlock *pb) - - /* END OF COPIED SECTION */ - -- PR_Lock(job->wire_lock); -+ pthread_mutex_lock(&job->wire_lock); - vlv_init(job->inst); - - /* create thread for import_main, so we can return */ -@@ -3188,7 +3189,7 @@ bulk_import_start(Slapi_PBlock *pb) - slapi_log_err(SLAPI_LOG_ERR, "bulk_import_start", - "Unable to spawn import thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", - prerr, slapd_pr_strerror(prerr)); -- PR_Unlock(job->wire_lock); -+ pthread_mutex_unlock(&job->wire_lock); - ret = -2; - goto fail; - } -@@ -3204,8 +3205,8 @@ bulk_import_start(Slapi_PBlock *pb) - /* (don't want to send the success code back to the LDAP client until - * we're ready for the adds to start rolling in) - */ -- PR_WaitCondVar(job->wire_cv, PR_INTERVAL_NO_TIMEOUT); -- PR_Unlock(job->wire_lock); -+ pthread_cond_wait(&job->wire_cv, &job->wire_lock); -+ pthread_mutex_unlock(&job->wire_lock); - - return 0; - -@@ -3243,13 +3244,13 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) - return -1; - } - -- PR_Lock(job->wire_lock); -+ pthread_mutex_lock(&job->wire_lock); - /* Let's do this inside the lock !*/ - id = job->lead_ID + 1; - /* generate uniqueid if necessary */ - if (import_generate_uniqueid(job, entry) != UID_SUCCESS) { - import_abort_all(job, 1); -- PR_Unlock(job->wire_lock); -+ pthread_mutex_unlock(&job->wire_lock); - return -1; - } - -@@ -3258,7 +3259,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) - if ((ep == NULL) || (ep->ep_entry == NULL)) { - import_abort_all(job, 1); - backentry_free(&ep); /* release the backend wrapper, here */ -- PR_Unlock(job->wire_lock); -+ pthread_mutex_unlock(&job->wire_lock); - return -1; - } - -@@ -3304,7 +3305,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) - if (job->flags & FLAG_ABORT) { - backentry_clear_entry(ep); /* entry is released in the frontend on failure*/ - backentry_free(&ep); /* release the backend wrapper, here */ -- PR_Unlock(job->wire_lock); -+ pthread_mutex_unlock(&job->wire_lock); - return -2; - } - -@@ -3342,7 +3343,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) - /* entry is released in the frontend on failure*/ - backentry_clear_entry(ep); - backentry_free(&ep); /* release the backend wrapper */ -- PR_Unlock(job->wire_lock); -+ pthread_mutex_unlock(&job->wire_lock); - return -1; - } - sepp = PL_strchr(sepp + 1, ','); -@@ -3368,7 +3369,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) - (long unsigned int)newesize, (long unsigned int)job->fifo.bsize); - backentry_clear_entry(ep); /* entry is released in the frontend on failure*/ - backentry_free(&ep); /* release the backend wrapper, here */ -- PR_Unlock(job->wire_lock); -+ pthread_mutex_unlock(&job->wire_lock); - return -1; - } - /* Now check if fifo has enough space for the new entry */ -@@ -3394,7 +3395,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) - job->trailing_ID = id - job->fifo.size; - } - -- PR_Unlock(job->wire_lock); -+ pthread_mutex_unlock(&job->wire_lock); - return 0; - } - -diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c -index 0ac3694b6..5d6010f46 100644 ---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c -+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c -@@ -1,5 +1,5 @@ - /** BEGIN COPYRIGHT BLOCK -- * Copyright (C) 2019 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -270,10 +270,8 @@ bdb_instance_cleanup(struct ldbm_instance *inst) - slapi_ch_free_string(&inst_dirp); - } - slapi_destroy_rwlock(inst_env->bdb_env_lock); -- PR_DestroyCondVar(inst_env->bdb_thread_count_cv); -- inst_env->bdb_thread_count_cv = NULL; -- PR_DestroyLock(inst_env->bdb_thread_count_lock); -- inst_env->bdb_thread_count_lock = NULL; -+ pthread_mutex_destroy(&(inst_env->bdb_thread_count_lock)); -+ pthread_cond_destroy(&(inst_env->bdb_thread_count_cv)); - slapi_ch_free((void **)&inst->inst_db); - /* - slapi_destroy_rwlock(((bdb_db_env *)inst->inst_db)->bdb_env_lock); -diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c -index 464f89f4d..6cccad8e6 100644 ---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c -+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c -@@ -1,5 +1,5 @@ - /** BEGIN COPYRIGHT BLOCK -- * Copyright (C) 2019 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -52,16 +52,16 @@ - return. - */ - #define INCR_THREAD_COUNT(pEnv) \ -- PR_Lock(pEnv->bdb_thread_count_lock); \ -+ pthread_mutex_lock(&pEnv->bdb_thread_count_lock); \ - ++pEnv->bdb_thread_count; \ -- PR_Unlock(pEnv->bdb_thread_count_lock) -+ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock) - - #define DECR_THREAD_COUNT(pEnv) \ -- PR_Lock(pEnv->bdb_thread_count_lock); \ -+ pthread_mutex_lock(&pEnv->bdb_thread_count_lock); \ - if (--pEnv->bdb_thread_count == 0) { \ -- PR_NotifyCondVar(pEnv->bdb_thread_count_cv); \ -+ pthread_cond_broadcast(&pEnv->bdb_thread_count_cv); \ - } \ -- PR_Unlock(pEnv->bdb_thread_count_lock) -+ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock) - - #define NEWDIR_MODE 0755 - #define DB_REGION_PREFIX "__db." -@@ -91,9 +91,12 @@ static int trans_batch_txn_max_sleep = 50; - static PRBool log_flush_thread = PR_FALSE; - static int txn_in_progress_count = 0; - static int *txn_log_flush_pending = NULL; --static PRLock *sync_txn_log_flush = NULL; --static PRCondVar *sync_txn_log_flush_done = NULL; --static PRCondVar *sync_txn_log_do_flush = NULL; -+ -+static pthread_mutex_t sync_txn_log_flush; -+static pthread_cond_t sync_txn_log_flush_done; -+static pthread_cond_t sync_txn_log_do_flush; -+ -+ - static int bdb_db_remove_ex(bdb_db_env *env, char const path[], char const dbName[], PRBool use_lock); - static int bdb_restore_file_check(struct ldbminfo *li); - -@@ -181,12 +184,12 @@ bdb_set_batch_transactions(void *arg __attribute__((unused)), void *value, char - } else { - if (val == 0) { - if (log_flush_thread) { -- PR_Lock(sync_txn_log_flush); -+ pthread_mutex_lock(&sync_txn_log_flush); - } - trans_batch_limit = FLUSH_REMOTEOFF; - if (log_flush_thread) { - log_flush_thread = PR_FALSE; -- PR_Unlock(sync_txn_log_flush); -+ pthread_mutex_unlock(&sync_txn_log_flush); - } - } else if (val > 0) { - if (trans_batch_limit == FLUSH_REMOTEOFF) { -@@ -217,12 +220,12 @@ bdb_set_batch_txn_min_sleep(void *arg __attribute__((unused)), void *value, char - } else { - if (val == 0) { - if (log_flush_thread) { -- PR_Lock(sync_txn_log_flush); -+ pthread_mutex_lock(&sync_txn_log_flush); - } - trans_batch_txn_min_sleep = FLUSH_REMOTEOFF; - if (log_flush_thread) { - log_flush_thread = PR_FALSE; -- PR_Unlock(sync_txn_log_flush); -+ pthread_mutex_unlock(&sync_txn_log_flush); - } - } else if (val > 0) { - if (trans_batch_txn_min_sleep == FLUSH_REMOTEOFF || !log_flush_thread) { -@@ -249,12 +252,12 @@ bdb_set_batch_txn_max_sleep(void *arg __attribute__((unused)), void *value, char - } else { - if (val == 0) { - if (log_flush_thread) { -- PR_Lock(sync_txn_log_flush); -+ pthread_mutex_lock(&sync_txn_log_flush); - } - trans_batch_txn_max_sleep = FLUSH_REMOTEOFF; - if (log_flush_thread) { - log_flush_thread = PR_FALSE; -- PR_Unlock(sync_txn_log_flush); -+ pthread_mutex_unlock(&sync_txn_log_flush); - } - } else if (val > 0) { - if (trans_batch_txn_max_sleep == FLUSH_REMOTEOFF || !log_flush_thread) { -@@ -725,10 +728,9 @@ bdb_free_env(void **arg) - slapi_destroy_rwlock((*env)->bdb_env_lock); - (*env)->bdb_env_lock = NULL; - } -- PR_DestroyCondVar((*env)->bdb_thread_count_cv); -- (*env)->bdb_thread_count_cv = NULL; -- PR_DestroyLock((*env)->bdb_thread_count_lock); -- (*env)->bdb_thread_count_lock = NULL; -+ pthread_mutex_destroy(&((*env)->bdb_thread_count_lock)); -+ pthread_cond_destroy(&((*env)->bdb_thread_count_cv)); -+ - slapi_ch_free((void **)env); - return; - } -@@ -746,11 +748,15 @@ bdb_make_env(bdb_db_env **env, struct ldbminfo *li) - int ret; - Object *inst_obj; - ldbm_instance *inst = NULL; -+ pthread_condattr_t condAttr; - - pEnv = (bdb_db_env *)slapi_ch_calloc(1, sizeof(bdb_db_env)); - -- pEnv->bdb_thread_count_lock = PR_NewLock(); -- pEnv->bdb_thread_count_cv = PR_NewCondVar(pEnv->bdb_thread_count_lock); -+ pthread_mutex_init(&pEnv->bdb_thread_count_lock, NULL); -+ pthread_condattr_init(&condAttr); -+ pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC); -+ pthread_cond_init(&pEnv->bdb_thread_count_cv, &condAttr); -+ pthread_condattr_destroy(&condAttr); /* no longer needed */ - - if ((ret = db_env_create(&pEnv->bdb_DB_ENV, 0)) != 0) { - slapi_log_err(SLAPI_LOG_ERR, -@@ -2013,9 +2019,9 @@ bdb_pre_close(struct ldbminfo *li) - return; - - /* first, see if there are any housekeeping threads running */ -- PR_Lock(pEnv->bdb_thread_count_lock); -+ pthread_mutex_lock(&pEnv->bdb_thread_count_lock); - threadcount = pEnv->bdb_thread_count; -- PR_Unlock(pEnv->bdb_thread_count_lock); -+ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock); - - if (threadcount) { - PRIntervalTime cvwaittime = PR_MillisecondsToInterval(DBLAYER_SLEEP_INTERVAL * 100); -@@ -2023,7 +2029,7 @@ bdb_pre_close(struct ldbminfo *li) - /* Print handy-dandy log message */ - slapi_log_err(SLAPI_LOG_INFO, "bdb_pre_close", "Waiting for %d database threads to stop\n", - threadcount); -- PR_Lock(pEnv->bdb_thread_count_lock); -+ pthread_mutex_lock(&pEnv->bdb_thread_count_lock); - /* Tell them to stop - we wait until the last possible moment to invoke - this. If we do this much sooner than this, we could find ourselves - in a situation where the threads see the stop_threads and exit before -@@ -2034,6 +2040,7 @@ bdb_pre_close(struct ldbminfo *li) - conf->bdb_stop_threads = 1; - /* Wait for them to exit */ - while (pEnv->bdb_thread_count > 0) { -+ struct timespec current_time = {0}; - PRIntervalTime before = PR_IntervalNow(); - /* There are 3 ways to wake up from this WaitCondVar: - 1) The last database thread exits and calls NotifyCondVar - thread_count -@@ -2041,7 +2048,9 @@ bdb_pre_close(struct ldbminfo *li) - 2) Timeout - in this case, thread_count will be > 0 - bad - 3) A bad error occurs - bad - will be reported as a timeout - */ -- PR_WaitCondVar(pEnv->bdb_thread_count_cv, cvwaittime); -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += DBLAYER_SLEEP_INTERVAL / 10; /* cvwaittime but in seconds */ -+ pthread_cond_timedwait(&pEnv->bdb_thread_count_cv, &pEnv->bdb_thread_count_lock, ¤t_time); - if (pEnv->bdb_thread_count > 0) { - /* still at least 1 thread running - see if this is a timeout */ - if ((PR_IntervalNow() - before) >= cvwaittime) { -@@ -2052,7 +2061,7 @@ bdb_pre_close(struct ldbminfo *li) - /* else just a spurious interrupt */ - } - } -- PR_Unlock(pEnv->bdb_thread_count_lock); -+ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock); - if (timedout) { - slapi_log_err(SLAPI_LOG_ERR, - "bdb_pre_close", "Timeout after [%d] milliseconds; leave %d database thread(s)...\n", -@@ -2645,12 +2654,12 @@ bdb_txn_begin(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, PRBool - and new parent for any nested transactions created */ - if (use_lock && log_flush_thread) { - int txn_id = new_txn.back_txn_txn->id(new_txn.back_txn_txn); -- PR_Lock(sync_txn_log_flush); -+ pthread_mutex_lock(&sync_txn_log_flush); - txn_in_progress_count++; - slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_begin_ext", - "Batchcount: %d, txn_in_progress: %d, curr_txn: %x\n", - trans_batch_count, txn_in_progress_count, txn_id); -- PR_Unlock(sync_txn_log_flush); -+ pthread_mutex_unlock(&sync_txn_log_flush); - } - dblayer_push_pvt_txn(&new_txn); - if (txn) { -@@ -2717,11 +2726,11 @@ bdb_txn_commit(struct ldbminfo *li, back_txn *txn, PRBool use_lock) - if ((conf->bdb_durable_transactions) && use_lock) { - if (trans_batch_limit > 0 && log_flush_thread) { - /* let log_flush thread do the flushing */ -- PR_Lock(sync_txn_log_flush); -+ pthread_mutex_lock(&sync_txn_log_flush); - txn_batch_slot = trans_batch_count++; - txn_log_flush_pending[txn_batch_slot] = txn_id; -- slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_commit_ext", "(before notify): batchcount: %d, " -- "txn_in_progress: %d, curr_txn: %x\n", -+ slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_commit_ext", -+ "(before notify): batchcount: %d, txn_in_progress: %d, curr_txn: %x\n", - trans_batch_count, - txn_in_progress_count, txn_id); - /* -@@ -2731,8 +2740,9 @@ bdb_txn_commit(struct ldbminfo *li, back_txn *txn, PRBool use_lock) - * - there is no other outstanding txn - */ - if (trans_batch_count > trans_batch_limit || -- trans_batch_count == txn_in_progress_count) { -- PR_NotifyCondVar(sync_txn_log_do_flush); -+ trans_batch_count == txn_in_progress_count) -+ { -+ pthread_cond_signal(&sync_txn_log_do_flush); - } - /* - * We need to wait until the txn has been flushed before continuing -@@ -2740,14 +2750,14 @@ bdb_txn_commit(struct ldbminfo *li, back_txn *txn, PRBool use_lock) - * PR_WaitCondvar releases and reaquires the lock - */ - while (txn_log_flush_pending[txn_batch_slot] == txn_id) { -- PR_WaitCondVar(sync_txn_log_flush_done, PR_INTERVAL_NO_TIMEOUT); -+ pthread_cond_wait(&sync_txn_log_flush_done, &sync_txn_log_flush); - } - txn_in_progress_count--; -- slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_commit_ext", "(before unlock): batchcount: %d, " -- "txn_in_progress: %d, curr_txn %x\n", -+ slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_commit_ext", -+ "(before unlock): batchcount: %d, txn_in_progress: %d, curr_txn %x\n", - trans_batch_count, - txn_in_progress_count, txn_id); -- PR_Unlock(sync_txn_log_flush); -+ pthread_mutex_unlock(&sync_txn_log_flush); - } else if (trans_batch_limit == FLUSH_REMOTEOFF) { /* user remotely turned batching off */ - LOG_FLUSH(pEnv->bdb_DB_ENV, 0); - } -@@ -2799,9 +2809,9 @@ bdb_txn_abort(struct ldbminfo *li, back_txn *txn, PRBool use_lock) - int txn_id = db_txn->id(db_txn); - bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env; - if (use_lock && log_flush_thread) { -- PR_Lock(sync_txn_log_flush); -+ pthread_mutex_lock(&sync_txn_log_flush); - txn_in_progress_count--; -- PR_Unlock(sync_txn_log_flush); -+ pthread_mutex_unlock(&sync_txn_log_flush); - slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_abort_ext", - "Batchcount: %d, txn_in_progress: %d, curr_txn: %x\n", - trans_batch_count, txn_in_progress_count, txn_id); -@@ -3420,11 +3430,18 @@ bdb_start_log_flush_thread(struct ldbminfo *li) - int max_threads = config_get_threadnumber(); - - if ((BDB_CONFIG(li)->bdb_durable_transactions) && -- (BDB_CONFIG(li)->bdb_enable_transactions) && (trans_batch_limit > 0)) { -+ (BDB_CONFIG(li)->bdb_enable_transactions) && (trans_batch_limit > 0)) -+ { - /* initialize the synchronization objects for the log_flush and worker threads */ -- sync_txn_log_flush = PR_NewLock(); -- sync_txn_log_flush_done = PR_NewCondVar(sync_txn_log_flush); -- sync_txn_log_do_flush = PR_NewCondVar(sync_txn_log_flush); -+ pthread_condattr_t condAttr; -+ -+ pthread_mutex_init(&sync_txn_log_flush, NULL); -+ pthread_condattr_init(&condAttr); -+ pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC); -+ pthread_cond_init(&sync_txn_log_do_flush, &condAttr); -+ pthread_cond_init(&sync_txn_log_flush_done, NULL); -+ pthread_condattr_destroy(&condAttr); /* no longer needed */ -+ - txn_log_flush_pending = (int *)slapi_ch_malloc(max_threads * sizeof(int)); - log_flush_thread = PR_TRUE; - if (NULL == PR_CreateThread(PR_USER_THREAD, -@@ -3451,7 +3468,7 @@ bdb_start_log_flush_thread(struct ldbminfo *li) - static int - log_flush_threadmain(void *param) - { -- PRIntervalTime interval_wait, interval_flush, interval_def; -+ PRIntervalTime interval_flush, interval_def; - PRIntervalTime last_flush = 0; - int i; - int do_flush = 0; -@@ -3464,7 +3481,6 @@ log_flush_threadmain(void *param) - INCR_THREAD_COUNT(pEnv); - - interval_flush = PR_MillisecondsToInterval(trans_batch_txn_min_sleep); -- interval_wait = PR_MillisecondsToInterval(trans_batch_txn_max_sleep); - interval_def = PR_MillisecondsToInterval(300); /*used while no txn or txn batching */ - /* LK this is only needed if online change of - * of txn config is supported ??? -@@ -3473,10 +3489,10 @@ log_flush_threadmain(void *param) - if (BDB_CONFIG(li)->bdb_enable_transactions) { - if (trans_batch_limit > 0) { - /* synchronize flushing thread with workers */ -- PR_Lock(sync_txn_log_flush); -+ pthread_mutex_lock(&sync_txn_log_flush); - if (!log_flush_thread) { - /* batch transactions was disabled while waiting for the lock */ -- PR_Unlock(sync_txn_log_flush); -+ pthread_mutex_unlock(&sync_txn_log_flush); - break; - } - slapi_log_err(SLAPI_LOG_BACKLDBM, "log_flush_threadmain", "(in loop): batchcount: %d, " -@@ -3502,20 +3518,31 @@ log_flush_threadmain(void *param) - slapi_log_err(SLAPI_LOG_BACKLDBM, "log_flush_threadmain", "(before notify): batchcount: %d, " - "txn_in_progress: %d\n", - trans_batch_count, txn_in_progress_count); -- PR_NotifyAllCondVar(sync_txn_log_flush_done); -+ pthread_cond_broadcast(&sync_txn_log_flush_done); - } - /* wait until flushing conditions are met */ - while ((trans_batch_count == 0) || -- (trans_batch_count < trans_batch_limit && trans_batch_count < txn_in_progress_count)) { -+ (trans_batch_count < trans_batch_limit && trans_batch_count < txn_in_progress_count)) -+ { -+ struct timespec current_time = {0}; -+ /* convert milliseconds to nano seconds */ -+ int32_t nano_sec_sleep = trans_batch_txn_max_sleep * 1000000; - if (BDB_CONFIG(li)->bdb_stop_threads) - break; - if (PR_IntervalNow() - last_flush > interval_flush) { - do_flush = 1; - break; - } -- PR_WaitCondVar(sync_txn_log_do_flush, interval_wait); -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ if (current_time.tv_nsec + nano_sec_sleep > 1000000000) { -+ /* nano sec will overflow, just bump the seconds */ -+ current_time.tv_sec++; -+ } else { -+ current_time.tv_nsec += nano_sec_sleep; -+ } -+ pthread_cond_timedwait(&sync_txn_log_do_flush, &sync_txn_log_flush, ¤t_time); - } -- PR_Unlock(sync_txn_log_flush); -+ pthread_mutex_unlock(&sync_txn_log_flush); - slapi_log_err(SLAPI_LOG_BACKLDBM, "log_flush_threadmain", "(wakeup): batchcount: %d, " - "txn_in_progress: %d\n", - trans_batch_count, txn_in_progress_count); -diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h -index bf00d2e9a..6bb04d21a 100644 ---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h -+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h -@@ -1,5 +1,5 @@ - /** BEGIN COPYRIGHT BLOCK -- * Copyright (C) 2019 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -18,10 +18,10 @@ typedef struct bdb_db_env - Slapi_RWLock *bdb_env_lock; - int bdb_openflags; - int bdb_priv_flags; -- PRLock *bdb_thread_count_lock; /* lock for thread_count_cv */ -- PRCondVar *bdb_thread_count_cv; /* condition variable for housekeeping thread shutdown */ -- PRInt32 bdb_thread_count; /* Tells us how many threads are running, -- * used to figure out when they're all stopped */ -+ pthread_mutex_t bdb_thread_count_lock; /* lock for thread_count_cv */ -+ pthread_cond_t bdb_thread_count_cv; /* condition variable for housekeeping thread shutdown */ -+ PRInt32 bdb_thread_count; /* Tells us how many threads are running, -+ * used to figure out when they're all stopped */ - } bdb_db_env; - - /* structure which holds our stuff */ -diff --git a/ldap/servers/slapd/back-ldbm/import.h b/ldap/servers/slapd/back-ldbm/import.h -index db77a602b..bfa74ed49 100644 ---- a/ldap/servers/slapd/back-ldbm/import.h -+++ b/ldap/servers/slapd/back-ldbm/import.h -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -130,8 +130,8 @@ typedef struct - char **exclude_subtrees; /* list of subtrees to NOT import */ - Fifo fifo; /* entry fifo for indexing */ - char *task_status; /* transient state info for the end-user */ -- PRLock *wire_lock; /* lock for serializing wire imports */ -- PRCondVar *wire_cv; /* ... and ordering the startup */ -+ pthread_mutex_t wire_lock; /* lock for serializing wire imports */ -+ pthread_cond_t wire_cv; /* ... and ordering the startup */ - PRThread *main_thread; /* for FRI: import_main() thread id */ - int encrypt; - Slapi_Value *usn_value; /* entryusn for import */ -diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c -index 88b7dc3be..1883fe711 100644 ---- a/ldap/servers/slapd/connection.c -+++ b/ldap/servers/slapd/connection.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -64,8 +64,10 @@ struct Slapi_work_q - - static struct Slapi_work_q *head_work_q = NULL; /* global work queue head */ - static struct Slapi_work_q *tail_work_q = NULL; /* global work queue tail */ --static PRLock *work_q_lock = NULL; /* protects head_conn_q and tail_conn_q */ --static PRCondVar *work_q_cv; /* used by operation threads to wait for work - when there is a conn in the queue waiting to be processed */ -+static pthread_mutex_t work_q_lock; /* protects head_conn_q and tail_conn_q */ -+static pthread_cond_t work_q_cv; /* used by operation threads to wait for work - -+ * when there is a conn in the queue waiting -+ * to be processed */ - static PRInt32 work_q_size; /* size of conn_q */ - static PRInt32 work_q_size_max; /* high water mark of work_q_size */ - #define WORK_Q_EMPTY (work_q_size == 0) -@@ -409,7 +411,7 @@ connection_reset(Connection *conn, int ns, PRNetAddr *from, int fromLen __attrib - - /* initialize the remaining connection fields */ - conn->c_ldapversion = LDAP_VERSION3; -- conn->c_starttime = slapi_current_utc_time(); -+ conn->c_starttime = slapi_current_rel_time_t(); - conn->c_idlesince = conn->c_starttime; - conn->c_flags = is_SSL ? CONN_FLAG_SSL : 0; - conn->c_authtype = slapi_ch_strdup(SLAPD_AUTH_NONE); -@@ -424,32 +426,40 @@ connection_reset(Connection *conn, int ns, PRNetAddr *from, int fromLen __attrib - void - init_op_threads() - { -- int i; -- PRErrorCode errorCode; -- int max_threads = config_get_threadnumber(); -- /* Initialize the locks and cv */ -+ pthread_condattr_t condAttr; -+ int32_t max_threads = config_get_threadnumber(); -+ int32_t rc; - -- if ((work_q_lock = PR_NewLock()) == NULL) { -- errorCode = PR_GetError(); -- slapi_log_err(SLAPI_LOG_ERR, -- "init_op_threads", "PR_NewLock failed for work_q_lock, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", -- errorCode, slapd_pr_strerror(errorCode)); -+ /* Initialize the locks and cv */ -+ if ((rc = pthread_mutex_init(&work_q_lock, NULL)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", -+ "Cannot create new lock. error %d (%s)\n", -+ rc, strerror(rc)); - exit(-1); - } -- -- if ((work_q_cv = PR_NewCondVar(work_q_lock)) == NULL) { -- errorCode = PR_GetError(); -- slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", "PR_NewCondVar failed for work_q_cv, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", -- errorCode, slapd_pr_strerror(errorCode)); -+ if ((rc = pthread_condattr_init(&condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", -+ "Cannot create new condition attribute variable. error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(-1); -+ } else if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", -+ "Cannot set condition attr clock. error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(-1); -+ } else if ((rc = pthread_cond_init(&work_q_cv, &condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", -+ "Cannot create new condition variable. error %d (%s)\n", -+ rc, strerror(rc)); - exit(-1); - } -+ pthread_condattr_destroy(&condAttr); /* no longer needed */ - - work_q_stack = PR_CreateStack("connection_work_q"); -- - op_stack = PR_CreateStack("connection_operation"); - - /* start the operation threads */ -- for (i = 0; i < max_threads; i++) { -+ for (size_t i = 0; i < max_threads; i++) { - PR_SetConcurrency(4); - if (PR_CreateThread(PR_USER_THREAD, - (VFP)(void *)connection_threadmain, NULL, -@@ -457,7 +467,8 @@ init_op_threads() - PR_UNJOINABLE_THREAD, - SLAPD_DEFAULT_THREAD_STACKSIZE) == NULL) { - int prerr = PR_GetError(); -- slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", "PR_CreateThread failed, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", -+ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", -+ "PR_CreateThread failed, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", - prerr, slapd_pr_strerror(prerr)); - } else { - g_incr_active_threadcnt(); -@@ -949,16 +960,23 @@ connection_make_new_pb(Slapi_PBlock *pb, Connection *conn) - } - - int --connection_wait_for_new_work(Slapi_PBlock *pb, PRIntervalTime interval) -+connection_wait_for_new_work(Slapi_PBlock *pb, int32_t interval) - { - int ret = CONN_FOUND_WORK_TO_DO; - work_q_item *wqitem = NULL; - struct Slapi_op_stack *op_stack_obj = NULL; - -- PR_Lock(work_q_lock); -+ pthread_mutex_lock(&work_q_lock); - - while (!op_shutdown && WORK_Q_EMPTY) { -- PR_WaitCondVar(work_q_cv, interval); -+ if (interval == 0 ) { -+ pthread_cond_wait(&work_q_cv, &work_q_lock); -+ } else { -+ struct timespec current_time = {0}; -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += interval; -+ pthread_cond_timedwait(&work_q_cv, &work_q_lock, ¤t_time); -+ } - } - - if (op_shutdown) { -@@ -975,7 +993,7 @@ connection_wait_for_new_work(Slapi_PBlock *pb, PRIntervalTime interval) - slapi_pblock_set(pb, SLAPI_OPERATION, op_stack_obj->op); - } - -- PR_Unlock(work_q_lock); -+ pthread_mutex_unlock(&work_q_lock); - return ret; - } - -@@ -1353,7 +1371,7 @@ connection_check_activity_level(Connection *conn) - /* store current count in the previous count slot */ - conn->c_private->previous_op_count = current_count; - /* update the last checked time */ -- conn->c_private->previous_count_check_time = slapi_current_utc_time(); -+ conn->c_private->previous_count_check_time = slapi_current_rel_time_t(); - pthread_mutex_unlock(&(conn->c_mutex)); - slapi_log_err(SLAPI_LOG_CONNS, "connection_check_activity_level", "conn %" PRIu64 " activity level = %d\n", conn->c_connid, delta_count); - } -@@ -1463,7 +1481,7 @@ connection_threadmain() - { - Slapi_PBlock *pb = slapi_pblock_new(); - /* wait forever for new pb until one is available or shutdown */ -- PRIntervalTime interval = PR_INTERVAL_NO_TIMEOUT; /* PR_SecondsToInterval(10); */ -+ int32_t interval = 0; /* used be 10 seconds */ - Connection *conn = NULL; - Operation *op; - ber_tag_t tag = 0; -@@ -1503,7 +1521,7 @@ connection_threadmain() - - switch (ret) { - case CONN_NOWORK: -- PR_ASSERT(interval != PR_INTERVAL_NO_TIMEOUT); /* this should never happen with PR_INTERVAL_NO_TIMEOUT */ -+ PR_ASSERT(interval != 0); /* this should never happen */ - continue; - case CONN_SHUTDOWN: - slapi_log_err(SLAPI_LOG_TRACE, "connection_threadmain", -@@ -1610,7 +1628,7 @@ connection_threadmain() - conn->c_opsinitiated, conn->c_refcnt, conn->c_flags); - } - -- curtime = slapi_current_utc_time(); -+ curtime = slapi_current_rel_time_t(); - #define DB_PERF_TURBO 1 - #if defined(DB_PERF_TURBO) - /* If it's been a while since we last did it ... */ -@@ -1914,7 +1932,7 @@ add_work_q(work_q_item *wqitem, struct Slapi_op_stack *op_stack_obj) - new_work_q->op_stack_obj = op_stack_obj; - new_work_q->next_work_item = NULL; - -- PR_Lock(work_q_lock); -+ pthread_mutex_lock(&work_q_lock); - if (tail_work_q == NULL) { - tail_work_q = new_work_q; - head_work_q = new_work_q; -@@ -1926,8 +1944,8 @@ add_work_q(work_q_item *wqitem, struct Slapi_op_stack *op_stack_obj) - if (work_q_size > work_q_size_max) { - work_q_size_max = work_q_size; - } -- PR_NotifyCondVar(work_q_cv); /* notify waiters in connection_wait_for_new_work */ -- PR_Unlock(work_q_lock); -+ pthread_cond_signal(&work_q_cv); /* notify waiters in connection_wait_for_new_work */ -+ pthread_mutex_unlock(&work_q_lock); - } - - /* get_work_q(): will get a work_q_item from the beginning of the work queue, return NULL if -@@ -1975,9 +1993,9 @@ op_thread_cleanup() - op_stack_size, work_q_size_max, work_q_stack_size_max); - - PR_AtomicIncrement(&op_shutdown); -- PR_Lock(work_q_lock); -- PR_NotifyAllCondVar(work_q_cv); /* tell any thread waiting in connection_wait_for_new_work to shutdown */ -- PR_Unlock(work_q_lock); -+ pthread_mutex_lock(&work_q_lock); -+ pthread_cond_broadcast(&work_q_cv); /* tell any thread waiting in connection_wait_for_new_work to shutdown */ -+ pthread_mutex_unlock(&work_q_lock); - } - - /* do this after all worker threads have terminated */ -diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c -index bfd965263..0071ed86a 100644 ---- a/ldap/servers/slapd/daemon.c -+++ b/ldap/servers/slapd/daemon.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -81,8 +81,9 @@ static int readsignalpipe = SLAPD_INVALID_SOCKET; - #define FDS_SIGNAL_PIPE 0 - - static PRThread *disk_thread_p = NULL; --static PRCondVar *diskmon_cvar = NULL; --static PRLock *diskmon_mutex = NULL; -+static pthread_cond_t diskmon_cvar; -+static pthread_mutex_t diskmon_mutex; -+ - void disk_monitoring_stop(void); - - typedef struct listener_info -@@ -441,9 +442,13 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) - - while (!g_get_shutdown()) { - if (!first_pass) { -- PR_Lock(diskmon_mutex); -- PR_WaitCondVar(diskmon_cvar, PR_SecondsToInterval(10)); -- PR_Unlock(diskmon_mutex); -+ struct timespec current_time = {0}; -+ -+ pthread_mutex_lock(&diskmon_mutex); -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += 10; -+ pthread_cond_timedwait(&diskmon_cvar, &diskmon_mutex, ¤t_time); -+ pthread_mutex_unlock(&diskmon_mutex); - /* - * We need to subtract from disk_space to account for the - * logging we just did, it doesn't hurt if we subtract a -@@ -622,7 +627,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) - "Disk space on (%s) is too far below the threshold(%" PRIu64 " bytes). " - "Waiting %d minutes for disk space to be cleaned up before shutting slapd down...\n", - dirstr, threshold, (grace_period / 60)); -- start = slapi_current_utc_time(); -+ start = slapi_current_rel_time_t(); - now = start; - while ((now - start) < grace_period) { - if (g_get_shutdown()) { -@@ -685,7 +690,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) - immediate_shutdown = 1; - goto cleanup; - } -- now = slapi_current_utc_time(); -+ now = slapi_current_rel_time_t(); - } - - if (ok_now) { -@@ -1005,21 +1010,34 @@ slapd_daemon(daemon_ports_t *ports) - * and the monitoring thread. - */ - if (config_get_disk_monitoring()) { -- if ((diskmon_mutex = PR_NewLock()) == NULL) { -+ pthread_condattr_t condAttr; -+ int rc = 0; -+ -+ if ((rc = pthread_mutex_init(&diskmon_mutex, NULL)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "slapd_daemon", "cannot create new lock. error %d (%s)\n", -+ rc, strerror(rc)); -+ g_set_shutdown(SLAPI_SHUTDOWN_EXIT); -+ } -+ if ((rc = pthread_condattr_init(&condAttr)) != 0) { - slapi_log_err(SLAPI_LOG_ERR, "slapd_daemon", -- "Cannot create new lock for disk space monitoring. " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", -- PR_GetError(), slapd_pr_strerror(PR_GetError())); -+ "cannot create new condition attribute variable. error %d (%s)\n", -+ rc, strerror(rc)); - g_set_shutdown(SLAPI_SHUTDOWN_EXIT); - } -- if (diskmon_mutex) { -- if ((diskmon_cvar = PR_NewCondVar(diskmon_mutex)) == NULL) { -- slapi_log_err(SLAPI_LOG_EMERG, "slapd_daemon", -- "Cannot create new condition variable for disk space monitoring. " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", -- PR_GetError(), slapd_pr_strerror(PR_GetError())); -- g_set_shutdown(SLAPI_SHUTDOWN_EXIT); -- } -+ if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "slapd_daemon", -+ "cannot set condition attr clock. error %d (%s)\n", -+ rc, strerror(rc)); -+ g_set_shutdown(SLAPI_SHUTDOWN_EXIT); -+ } -+ if ((rc = pthread_cond_init(&diskmon_cvar, &condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "slapd_daemon", -+ "cannot create new condition variable. error %d (%s)\n", -+ rc, strerror(rc)); -+ g_set_shutdown(SLAPI_SHUTDOWN_EXIT); - } -- if (diskmon_mutex && diskmon_cvar) { -+ pthread_condattr_destroy(&condAttr); -+ if (rc == 0) { - disk_thread_p = PR_CreateThread(PR_SYSTEM_THREAD, - (VFP)(void *)disk_monitoring_thread, NULL, - PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, -@@ -1508,7 +1526,7 @@ static void - handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll __attribute__((unused))) - { - Connection *c; -- time_t curtime = slapi_current_utc_time(); -+ time_t curtime = slapi_current_rel_time_t(); - - #if LDAP_ERROR_LOGGING - if (slapd_ldap_debug & LDAP_DEBUG_CONNS) { -@@ -2884,8 +2902,8 @@ void - disk_monitoring_stop(void) - { - if (disk_thread_p) { -- PR_Lock(diskmon_mutex); -- PR_NotifyCondVar(diskmon_cvar); -- PR_Unlock(diskmon_mutex); -+ pthread_mutex_lock(&diskmon_mutex); -+ pthread_cond_signal(&diskmon_cvar); -+ pthread_mutex_unlock(&diskmon_mutex); - } - } -diff --git a/ldap/servers/slapd/eventq.c b/ldap/servers/slapd/eventq.c -index a491acd0a..e1900724f 100644 ---- a/ldap/servers/slapd/eventq.c -+++ b/ldap/servers/slapd/eventq.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -52,8 +52,8 @@ typedef struct _slapi_eq_context - */ - typedef struct _event_queue - { -- PRLock *eq_lock; -- PRCondVar *eq_cv; -+ pthread_mutex_t eq_lock; -+ pthread_cond_t eq_cv; - slapi_eq_context *eq_queue; - } event_queue; - -@@ -74,8 +74,8 @@ static PRThread *eq_loop_tid = NULL; - static int eq_running = 0; - static int eq_stopped = 0; - static int eq_initialized = 0; --PRLock *ss_lock = NULL; --PRCondVar *ss_cv = NULL; -+static pthread_mutex_t ss_lock; -+static pthread_cond_t ss_cv; - PRCallOnceType init_once = {0}; - - /* Forward declarations */ -@@ -170,7 +170,7 @@ slapi_eq_cancel(Slapi_Eq_Context ctx) - - PR_ASSERT(eq_initialized); - if (!eq_stopped) { -- PR_Lock(eq->eq_lock); -+ pthread_mutex_lock(&(eq->eq_lock)); - p = &(eq->eq_queue); - while (!found && *p != NULL) { - if ((*p)->ec_id == ctx) { -@@ -182,7 +182,7 @@ slapi_eq_cancel(Slapi_Eq_Context ctx) - p = &((*p)->ec_next); - } - } -- PR_Unlock(eq->eq_lock); -+ pthread_mutex_unlock(&(eq->eq_lock)); - } - slapi_log_err(SLAPI_LOG_HOUSE, NULL, - "cancellation of event id %p requested: %s\n", -@@ -223,7 +223,7 @@ eq_enqueue(slapi_eq_context *newec) - slapi_eq_context **p; - - PR_ASSERT(NULL != newec); -- PR_Lock(eq->eq_lock); -+ pthread_mutex_lock(&(eq->eq_lock)); - /* Insert in order (sorted by start time) in the list */ - for (p = &(eq->eq_queue); *p != NULL; p = &((*p)->ec_next)) { - if ((*p)->ec_when > newec->ec_when) { -@@ -236,8 +236,8 @@ eq_enqueue(slapi_eq_context *newec) - newec->ec_next = NULL; - } - *p = newec; -- PR_NotifyCondVar(eq->eq_cv); /* wake up scheduler thread */ -- PR_Unlock(eq->eq_lock); -+ pthread_cond_signal(&(eq->eq_cv)); /* wake up scheduler thread */ -+ pthread_mutex_unlock(&(eq->eq_lock)); - } - - -@@ -251,12 +251,12 @@ eq_dequeue(time_t now) - { - slapi_eq_context *retptr = NULL; - -- PR_Lock(eq->eq_lock); -+ pthread_mutex_lock(&(eq->eq_lock)); - if (NULL != eq->eq_queue && eq->eq_queue->ec_when <= now) { - retptr = eq->eq_queue; - eq->eq_queue = retptr->ec_next; - } -- PR_Unlock(eq->eq_lock); -+ pthread_mutex_unlock(&(eq->eq_lock)); - return retptr; - } - -@@ -271,7 +271,7 @@ static void - eq_call_all(void) - { - slapi_eq_context *p; -- time_t curtime = slapi_current_utc_time(); -+ time_t curtime = slapi_current_rel_time_t(); - - while ((p = eq_dequeue(curtime)) != NULL) { - /* Call the scheduled function */ -@@ -299,34 +299,35 @@ static void - eq_loop(void *arg __attribute__((unused))) - { - while (eq_running) { -- time_t curtime = slapi_current_utc_time(); -- PRIntervalTime timeout; -+ time_t curtime = slapi_current_rel_time_t(); - int until; -- PR_Lock(eq->eq_lock); -+ -+ pthread_mutex_lock(&(eq->eq_lock)); - while (!((NULL != eq->eq_queue) && (eq->eq_queue->ec_when <= curtime))) { - if (!eq_running) { -- PR_Unlock(eq->eq_lock); -+ pthread_mutex_unlock(&(eq->eq_lock)); - goto bye; - } - /* Compute new timeout */ - if (NULL != eq->eq_queue) { -+ struct timespec current_time = slapi_current_rel_time_hr(); - until = eq->eq_queue->ec_when - curtime; -- timeout = PR_SecondsToInterval(until); -+ current_time.tv_sec += until; -+ pthread_cond_timedwait(&eq->eq_cv, &eq->eq_lock, ¤t_time); - } else { -- timeout = PR_INTERVAL_NO_TIMEOUT; -+ pthread_cond_wait(&eq->eq_cv, &eq->eq_lock); - } -- PR_WaitCondVar(eq->eq_cv, timeout); -- curtime = slapi_current_utc_time(); -+ curtime = slapi_current_rel_time_t(); - } - /* There is some work to do */ -- PR_Unlock(eq->eq_lock); -+ pthread_mutex_unlock(&(eq->eq_lock)); - eq_call_all(); - } - bye: - eq_stopped = 1; -- PR_Lock(ss_lock); -- PR_NotifyAllCondVar(ss_cv); -- PR_Unlock(ss_lock); -+ pthread_mutex_lock(&ss_lock); -+ pthread_cond_broadcast(&ss_cv); -+ pthread_mutex_unlock(&ss_lock); - } - - -@@ -336,23 +337,50 @@ bye: - static PRStatus - eq_create(void) - { -- PR_ASSERT(NULL == eq->eq_lock); -- if ((eq->eq_lock = PR_NewLock()) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewLock failed\n"); -+ pthread_condattr_t condAttr; -+ int rc = 0; -+ -+ /* Init the eventq mutex and cond var */ -+ if (pthread_mutex_init(&eq->eq_lock, NULL) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ "Failed to create lock: error %d (%s)\n", -+ rc, strerror(rc)); - exit(1); - } -- if ((eq->eq_cv = PR_NewCondVar(eq->eq_lock)) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewCondVar failed\n"); -+ if ((rc = pthread_condattr_init(&condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ "Failed to create new condition attribute variable. error %d (%s)\n", -+ rc, strerror(rc)); - exit(1); - } -- if ((ss_lock = PR_NewLock()) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewLock failed\n"); -+ if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ "Cannot set condition attr clock. error %d (%s)\n", -+ rc, strerror(rc)); - exit(1); - } -- if ((ss_cv = PR_NewCondVar(ss_lock)) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewCondVar failed\n"); -+ if ((rc = pthread_cond_init(&eq->eq_cv, &condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ "Failed to create new condition variable. error %d (%s)\n", -+ rc, strerror(rc)); - exit(1); - } -+ -+ /* Init the "ss" mutex and condition var */ -+ if (pthread_mutex_init(&ss_lock, NULL) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ "Failed to create ss lock: error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(1); -+ } -+ if ((rc = pthread_cond_init(&ss_cv, &condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ "Failed to create new ss condition variable. error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(1); -+ } -+ pthread_condattr_destroy(&condAttr); /* no longer needed */ -+ - eq->eq_queue = NULL; - eq_initialized = 1; - return PR_SUCCESS; -@@ -411,7 +439,7 @@ eq_stop() - { - slapi_eq_context *p, *q; - -- if (NULL == eq || NULL == eq->eq_lock) { /* never started */ -+ if (NULL == eq) { /* never started */ - eq_stopped = 1; - return; - } -@@ -423,12 +451,24 @@ eq_stop() - * it acknowledges by setting eq_stopped. - */ - while (!eq_stopped) { -- PR_Lock(eq->eq_lock); -- PR_NotifyAllCondVar(eq->eq_cv); -- PR_Unlock(eq->eq_lock); -- PR_Lock(ss_lock); -- PR_WaitCondVar(ss_cv, PR_MillisecondsToInterval(100)); -- PR_Unlock(ss_lock); -+ struct timespec current_time = {0}; -+ -+ pthread_mutex_lock(&(eq->eq_lock)); -+ pthread_cond_broadcast(&(eq->eq_cv)); -+ pthread_mutex_unlock(&(eq->eq_lock)); -+ -+ pthread_mutex_lock(&ss_lock); -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ if (current_time.tv_nsec + 100000000 > 1000000000) { -+ /* nanoseconds will overflow, adjust the seconds and nanoseconds */ -+ current_time.tv_sec++; -+ /* Add the remainder to nanoseconds */ -+ current_time.tv_nsec = (current_time.tv_nsec + 100000000) - 1000000000; -+ } else { -+ current_time.tv_nsec += 100000000; /* 100 ms */ -+ } -+ pthread_cond_timedwait(&ss_cv, &ss_lock, ¤t_time); -+ pthread_mutex_unlock(&ss_lock); - } - (void)PR_JoinThread(eq_loop_tid); - /* -@@ -438,7 +478,7 @@ eq_stop() - * The downside is that the event queue can't be stopped and restarted - * easily. - */ -- PR_Lock(eq->eq_lock); -+ pthread_mutex_lock(&(eq->eq_lock)); - p = eq->eq_queue; - while (p != NULL) { - q = p->ec_next; -@@ -449,7 +489,7 @@ eq_stop() - */ - p = q; - } -- PR_Unlock(eq->eq_lock); -+ pthread_mutex_unlock(&(eq->eq_lock)); - slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have shut down\n"); - } - -@@ -463,17 +503,17 @@ slapi_eq_get_arg(Slapi_Eq_Context ctx) - - PR_ASSERT(eq_initialized); - if (eq && !eq_stopped) { -- PR_Lock(eq->eq_lock); -+ pthread_mutex_lock(&(eq->eq_lock)); - p = &(eq->eq_queue); - while (p && *p != NULL) { - if ((*p)->ec_id == ctx) { -- PR_Unlock(eq->eq_lock); -+ pthread_mutex_unlock(&(eq->eq_lock)); - return (*p)->ec_arg; - } else { - p = &((*p)->ec_next); - } - } -- PR_Unlock(eq->eq_lock); -+ pthread_mutex_unlock(&(eq->eq_lock)); - } - return NULL; - } -diff --git a/ldap/servers/slapd/house.c b/ldap/servers/slapd/house.c -index ff139a4a5..ac1d94f26 100644 ---- a/ldap/servers/slapd/house.c -+++ b/ldap/servers/slapd/house.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -23,17 +23,15 @@ - #define SLAPD_HOUSEKEEPING_INTERVAL 30 /* seconds */ - - static PRThread *housekeeping_tid = NULL; --static PRLock *housekeeping_mutex = NULL; --static PRCondVar *housekeeping_cvar = NULL; -+static pthread_mutex_t housekeeping_mutex; -+static pthread_cond_t housekeeping_cvar; - - - static void - housecleaning(void *cur_time __attribute__((unused))) - { -- int interval; -- -- interval = PR_SecondsToInterval(SLAPD_HOUSEKEEPING_INTERVAL); - while (!g_get_shutdown()) { -+ struct timespec current_time = {0}; - /* - * Looks simple, but could potentially take a long time. - */ -@@ -42,9 +40,15 @@ housecleaning(void *cur_time __attribute__((unused))) - if (g_get_shutdown()) { - break; - } -- PR_Lock(housekeeping_mutex); -- PR_WaitCondVar(housekeeping_cvar, interval); -- PR_Unlock(housekeeping_mutex); -+ -+ /* get the current monotonic time and add our interval */ -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += SLAPD_HOUSEKEEPING_INTERVAL; -+ -+ /* Now we wait... */ -+ pthread_mutex_lock(&housekeeping_mutex); -+ pthread_cond_timedwait(&housekeeping_cvar, &housekeeping_mutex, ¤t_time); -+ pthread_mutex_unlock(&housekeeping_mutex); - } - } - -@@ -52,20 +56,31 @@ PRThread * - housekeeping_start(time_t cur_time, void *arg __attribute__((unused))) - { - static time_t thread_start_time; -+ pthread_condattr_t condAttr; -+ int rc = 0; - - if (housekeeping_tid) { - return housekeeping_tid; - } - -- if ((housekeeping_mutex = PR_NewLock()) == NULL) { -+ if ((rc = pthread_mutex_init(&housekeeping_mutex, NULL)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", -+ "housekeeping cannot create new lock. error %d (%s)\n", -+ rc, strerror(rc)); -+ } else if ((rc = pthread_condattr_init(&condAttr)) != 0) { - slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", -- "housekeeping cannot create new lock. " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", -- PR_GetError(), slapd_pr_strerror(PR_GetError())); -- } else if ((housekeeping_cvar = PR_NewCondVar(housekeeping_mutex)) == NULL) { -+ "housekeeping cannot create new condition attribute variable. error %d (%s)\n", -+ rc, strerror(rc)); -+ } else if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { - slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", -- "housekeeping cannot create new condition variable. " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", -- PR_GetError(), slapd_pr_strerror(PR_GetError())); -+ "housekeeping cannot set condition attr clock. error %d (%s)\n", -+ rc, strerror(rc)); -+ } else if ((rc = pthread_cond_init(&housekeeping_cvar, &condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", -+ "housekeeping cannot create new condition variable. error %d (%s)\n", -+ rc, strerror(rc)); - } else { -+ pthread_condattr_destroy(&condAttr); /* no longer needed */ - thread_start_time = cur_time; - if ((housekeeping_tid = PR_CreateThread(PR_USER_THREAD, - (VFP)housecleaning, (void *)&thread_start_time, -@@ -84,9 +99,16 @@ void - housekeeping_stop() - { - if (housekeeping_tid) { -- PR_Lock(housekeeping_mutex); -- PR_NotifyCondVar(housekeeping_cvar); -- PR_Unlock(housekeeping_mutex); -+ /* Notify the thread */ -+ pthread_mutex_lock(&housekeeping_mutex); -+ pthread_cond_signal(&housekeeping_cvar); -+ pthread_mutex_unlock(&housekeeping_mutex); -+ -+ /* Wait for the thread to finish */ - (void)PR_JoinThread(housekeeping_tid); -+ -+ /* Clean it all up */ -+ pthread_mutex_destroy(&housekeeping_mutex); -+ pthread_cond_destroy(&housekeeping_cvar); - } - } -diff --git a/ldap/servers/slapd/libmakefile b/ldap/servers/slapd/libmakefile -index b3ecabc29..3559c0104 100644 ---- a/ldap/servers/slapd/libmakefile -+++ b/ldap/servers/slapd/libmakefile -@@ -46,7 +46,7 @@ LIBSLAPD_OBJS=plugin_role.o getfilelist.o libglobs.o log.o ch_malloc.o entry.o p - filter.o filtercmp.o filterentry.o operation.o schemaparse.o pw.o \ - backend.o defbackend.o ava.o charray.o regex.o \ - str2filter.o dynalib.o plugin.o plugin_syntax.o plugin_mr.o \ -- slapi2nspr.o rwlock.o control.o plugin_internal_op.o \ -+ slapi2runtime.o rwlock.o control.o plugin_internal_op.o \ - result.o pw_retry.o agtmmap.o referral.o snmp_collator.o util.o \ - dse.o errormap.o computed.o match.o fileio.o \ - generation.o localhost.o ssl.o factory.o auditlog.o \ -diff --git a/ldap/servers/slapd/psearch.c b/ldap/servers/slapd/psearch.c -index 6820a5d75..c60e6a8ed 100644 ---- a/ldap/servers/slapd/psearch.c -+++ b/ldap/servers/slapd/psearch.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -59,10 +59,10 @@ typedef struct _psearch - */ - typedef struct _psearch_list - { -- Slapi_RWLock *pl_rwlock; /* R/W lock struct to serialize access */ -- PSearch *pl_head; /* Head of list */ -- PRLock *pl_cvarlock; /* Lock for cvar */ -- PRCondVar *pl_cvar; /* ps threads sleep on this */ -+ Slapi_RWLock *pl_rwlock; /* R/W lock struct to serialize access */ -+ PSearch *pl_head; /* Head of list */ -+ pthread_mutex_t pl_cvarlock; /* Lock for cvar */ -+ pthread_cond_t pl_cvar; /* ps threads sleep on this */ - } PSearch_List; - - /* -@@ -101,21 +101,26 @@ void - ps_init_psearch_system() - { - if (!PS_IS_INITIALIZED()) { -+ int32_t rc = 0; -+ - psearch_list = (PSearch_List *)slapi_ch_calloc(1, sizeof(PSearch_List)); - if ((psearch_list->pl_rwlock = slapi_new_rwlock()) == NULL) { - slapi_log_err(SLAPI_LOG_ERR, "ps_init_psearch_system", "Cannot initialize lock structure. " - "The server is terminating.\n"); - exit(-1); - } -- if ((psearch_list->pl_cvarlock = PR_NewLock()) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, "ps_init_psearch_system", "Cannot create new lock. " -- "The server is terminating.\n"); -- exit(-1); -+ -+ if ((rc = pthread_mutex_init(&(psearch_list->pl_cvarlock), NULL)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "ps_init_psearch_system", -+ "Cannot create new lock. error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(1); - } -- if ((psearch_list->pl_cvar = PR_NewCondVar(psearch_list->pl_cvarlock)) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, "ps_init_psearch_system", "Cannot create new condition variable. " -- "The server is terminating.\n"); -- exit(-1); -+ if ((rc = pthread_cond_init(&(psearch_list->pl_cvar), NULL)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", -+ "housekeeping cannot create new condition variable. error %d (%s)\n", -+ rc, strerror(rc)); -+ exit(1); - } - psearch_list->pl_head = NULL; - } -@@ -288,7 +293,7 @@ ps_send_results(void *arg) - pb_conn->c_connid, pb_op ? pb_op->o_opid : -1); - } - -- PR_Lock(psearch_list->pl_cvarlock); -+ pthread_mutex_lock(&(psearch_list->pl_cvarlock)); - - while ((conn_acq_flag == 0) && slapi_atomic_load_64(&(ps->ps_complete), __ATOMIC_ACQUIRE) == 0) { - /* Check for an abandoned operation */ -@@ -300,7 +305,7 @@ ps_send_results(void *arg) - } - if (NULL == ps->ps_eq_head) { - /* Nothing to do */ -- PR_WaitCondVar(psearch_list->pl_cvar, PR_INTERVAL_NO_TIMEOUT); -+ pthread_cond_wait(&(psearch_list->pl_cvar), &(psearch_list->pl_cvarlock)); - } else { - /* dequeue the item */ - int attrsonly; -@@ -330,17 +335,17 @@ ps_send_results(void *arg) - } - - /* -- * Send the result. Since send_ldap_search_entry can block for -- * up to 30 minutes, we relinquish all locks before calling it. -- */ -- PR_Unlock(psearch_list->pl_cvarlock); -+ * Send the result. Since send_ldap_search_entry can block for -+ * up to 30 minutes, we relinquish all locks before calling it. -+ */ -+ pthread_mutex_unlock(&(psearch_list->pl_cvarlock)); - - /* -- * The entry is in the right scope and matches the filter -- * but we need to redo the filter test here to check access -- * controls. See the comments at the slapi_filter_test() -- * call in ps_service_persistent_searches(). -- */ -+ * The entry is in the right scope and matches the filter -+ * but we need to redo the filter test here to check access -+ * controls. See the comments at the slapi_filter_test() -+ * call in ps_service_persistent_searches(). -+ */ - slapi_pblock_get(ps->ps_pblock, SLAPI_SEARCH_FILTER, &f); - - /* See if the entry meets the filter and ACL criteria */ -@@ -358,13 +363,13 @@ ps_send_results(void *arg) - } - } - -- PR_Lock(psearch_list->pl_cvarlock); -+ pthread_mutex_lock(&(psearch_list->pl_cvarlock)); - - /* Deallocate our wrapper for this entry */ - pe_ch_free(&peq); - } - } -- PR_Unlock(psearch_list->pl_cvarlock); -+ pthread_mutex_unlock(&(psearch_list->pl_cvarlock)); - ps_remove(ps); - - /* indicate the end of search */ -@@ -474,9 +479,9 @@ void - ps_wakeup_all() - { - if (PS_IS_INITIALIZED()) { -- PR_Lock(psearch_list->pl_cvarlock); -- PR_NotifyAllCondVar(psearch_list->pl_cvar); -- PR_Unlock(psearch_list->pl_cvarlock); -+ pthread_mutex_lock(&(psearch_list->pl_cvarlock)); -+ pthread_cond_broadcast(&(psearch_list->pl_cvar)); -+ pthread_mutex_unlock(&(psearch_list->pl_cvarlock)); - } - } - -diff --git a/ldap/servers/slapd/regex.c b/ldap/servers/slapd/regex.c -index 97249a4c5..a17c354fd 100644 ---- a/ldap/servers/slapd/regex.c -+++ b/ldap/servers/slapd/regex.c -@@ -72,7 +72,7 @@ int - slapi_re_exec(Slapi_Regex *re_handle, const char *subject, time_t time_up) - { - int rc; -- time_t curtime = slapi_current_utc_time(); -+ time_t curtime = slapi_current_rel_time_t(); - - if (NULL == re_handle || NULL == re_handle->re_pcre || NULL == subject) { - return LDAP_PARAM_ERROR; -diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h -index f9ac8b46c..55ded5eb8 100644 ---- a/ldap/servers/slapd/slapi-plugin.h -+++ b/ldap/servers/slapd/slapi-plugin.h -@@ -6086,6 +6086,7 @@ Slapi_CondVar *slapi_new_condvar(Slapi_Mutex *mutex); - void slapi_destroy_condvar(Slapi_CondVar *cvar); - int slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout); - int slapi_notify_condvar(Slapi_CondVar *cvar, int notify_all); -+int slapi_wait_condvar_pt(Slapi_CondVar *cvar, Slapi_Mutex *mutex, struct timeval *timeout); - - /** - * Creates a new read/write lock -@@ -6777,6 +6778,12 @@ struct timespec slapi_current_time_hr(void); - * \return timespec of the current monotonic time. - */ - struct timespec slapi_current_rel_time_hr(void); -+/** -+ * Returns the current system time as a hr clock -+ * -+ * \return time_t of the current monotonic time. -+ */ -+time_t slapi_current_rel_time_t(void); - /** - * Returns the current system time as a hr clock in UTC timezone. - * This clock adjusts with ntp steps, and should NOT be -diff --git a/ldap/servers/slapd/slapi2nspr.c b/ldap/servers/slapd/slapi2runtime.c -similarity index 69% -rename from ldap/servers/slapd/slapi2nspr.c -rename to ldap/servers/slapd/slapi2runtime.c -index 232d1599e..85dc4c9a8 100644 ---- a/ldap/servers/slapd/slapi2nspr.c -+++ b/ldap/servers/slapd/slapi2runtime.c -@@ -1,6 +1,6 @@ - /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -- * Copyright (C) 2005 Red Hat, Inc. -+ * Copyright (C) 2020 Red Hat, Inc. - * All rights reserved. - * - * License: GPL (version 3 or any later version). -@@ -14,6 +14,8 @@ - /* - * slapi2nspr.c - expose a subset of the NSPR20/21 API to SLAPI plugin writers - * -+ * Also include slapi2pthread functions -+ * - */ - - #include "slap.h" -@@ -44,47 +46,50 @@ - Slapi_Mutex * - slapi_new_mutex(void) - { -- return ((Slapi_Mutex *)PR_NewLock()); -+ pthread_mutex_t *new_mutex = (pthread_mutex_t *)slapi_ch_calloc(1, sizeof(pthread_mutex_t)); -+ pthread_mutex_init(new_mutex, NULL); -+ return ((Slapi_Mutex *)new_mutex); - } - -- - /* - * Function: slapi_destroy_mutex -- * Description: behaves just like PR_DestroyLock(). -+ * Description: behaves just like pthread_mutex_destroy(). - */ - void - slapi_destroy_mutex(Slapi_Mutex *mutex) - { - if (mutex != NULL) { -- PR_DestroyLock((PRLock *)mutex); -+ pthread_mutex_destroy((pthread_mutex_t *)mutex); -+ slapi_ch_free((void **)&mutex); - } - } - - - /* - * Function: slapi_lock_mutex -- * Description: behaves just like PR_Lock(). -+ * Description: behaves just like pthread_mutex_lock(). - */ --void -+inline void __attribute__((always_inline)) - slapi_lock_mutex(Slapi_Mutex *mutex) - { - if (mutex != NULL) { -- PR_Lock((PRLock *)mutex); -+ pthread_mutex_lock((pthread_mutex_t *)mutex); - } - } - - - /* - * Function: slapi_unlock_mutex -- * Description: behaves just like PR_Unlock(). -+ * Description: behaves just like pthread_mutex_unlock(). - * Returns: - * non-zero if mutex was successfully unlocked. - * 0 if mutex is NULL or is not locked by the calling thread. - */ --int -+inline int __attribute__((always_inline)) - slapi_unlock_mutex(Slapi_Mutex *mutex) - { -- if (mutex == NULL || PR_Unlock((PRLock *)mutex) == PR_FAILURE) { -+ PR_ASSERT(mutex != NULL); -+ if (mutex == NULL || pthread_mutex_unlock((pthread_mutex_t *)mutex) != 0) { - return (0); - } else { - return (1); -@@ -98,13 +103,18 @@ slapi_unlock_mutex(Slapi_Mutex *mutex) - * Returns: pointer to a new condition variable (NULL if one can't be created). - */ - Slapi_CondVar * --slapi_new_condvar(Slapi_Mutex *mutex) -+slapi_new_condvar(Slapi_Mutex *mutex __attribute__((unused))) - { -- if (mutex == NULL) { -- return (NULL); -- } -+ pthread_cond_t *new_cv = (pthread_cond_t *)slapi_ch_calloc(1, sizeof(pthread_cond_t)); -+ pthread_condattr_t condAttr; -+ -+ pthread_condattr_init(&condAttr); -+ pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC); -+ pthread_cond_init(new_cv, &condAttr); -+ /* Done with the cond attr, it's safe to destroy it */ -+ pthread_condattr_destroy(&condAttr); - -- return ((Slapi_CondVar *)PR_NewCondVar((PRLock *)mutex)); -+ return (Slapi_CondVar *)new_cv; - } - - -@@ -116,7 +126,8 @@ void - slapi_destroy_condvar(Slapi_CondVar *cvar) - { - if (cvar != NULL) { -- PR_DestroyCondVar((PRCondVar *)cvar); -+ pthread_cond_destroy((pthread_cond_t *)cvar); -+ slapi_ch_free((void **)&cvar); - } - } - -@@ -134,23 +145,35 @@ slapi_destroy_condvar(Slapi_CondVar *cvar) - int - slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout) - { -- PRIntervalTime prit; -+ /* deprecated in favor of slapi_wait_condvar_pt() which requires that the -+ * mutex be passed in */ -+ return (0); -+} -+ -+int -+slapi_wait_condvar_pt(Slapi_CondVar *cvar, Slapi_Mutex *mutex, struct timeval *timeout) -+{ -+ int32_t rc = 1; - - if (cvar == NULL) { -- return (0); -+ return 0; - } - - if (timeout == NULL) { -- prit = PR_INTERVAL_NO_TIMEOUT; -+ rc = pthread_cond_wait((pthread_cond_t *)cvar, (pthread_mutex_t *)mutex); - } else { -- prit = PR_SecondsToInterval(timeout->tv_sec) + PR_MicrosecondsToInterval(timeout->tv_usec); -+ struct timespec current_time = {0}; -+ clock_gettime(CLOCK_MONOTONIC, ¤t_time); -+ current_time.tv_sec += (timeout->tv_sec + PR_MicrosecondsToInterval(timeout->tv_usec)); -+ rc = pthread_cond_timedwait((pthread_cond_t *)cvar, (pthread_mutex_t *)mutex, ¤t_time); - } - -- if (PR_WaitCondVar((PRCondVar *)cvar, prit) != PR_SUCCESS) { -- return (0); -+ if (rc != 0) { -+ /* something went wrong */ -+ return 0; - } - -- return (1); -+ return 1; /* success */ - } - - -@@ -166,19 +189,19 @@ slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout) - int - slapi_notify_condvar(Slapi_CondVar *cvar, int notify_all) - { -- PRStatus prrc; -+ int32_t rc; - - if (cvar == NULL) { -- return (0); -+ return 0; - } - - if (notify_all) { -- prrc = PR_NotifyAllCondVar((PRCondVar *)cvar); -+ rc = pthread_cond_broadcast((pthread_cond_t *)cvar); - } else { -- prrc = PR_NotifyCondVar((PRCondVar *)cvar); -+ rc = pthread_cond_signal((pthread_cond_t *)cvar); - } - -- return (prrc == PR_SUCCESS ? 1 : 0); -+ return (rc == 0 ? 1 : 0); - } - - Slapi_RWLock * -@@ -236,7 +259,7 @@ slapi_destroy_rwlock(Slapi_RWLock *rwlock) - } - } - --int -+inline int __attribute__((always_inline)) - slapi_rwlock_rdlock(Slapi_RWLock *rwlock) - { - int ret = 0; -@@ -252,7 +275,7 @@ slapi_rwlock_rdlock(Slapi_RWLock *rwlock) - return ret; - } - --int -+inline int __attribute__((always_inline)) - slapi_rwlock_wrlock(Slapi_RWLock *rwlock) - { - int ret = 0; -@@ -268,7 +291,7 @@ slapi_rwlock_wrlock(Slapi_RWLock *rwlock) - return ret; - } - --int -+inline int __attribute__((always_inline)) - slapi_rwlock_unlock(Slapi_RWLock *rwlock) - { - int ret = 0; -diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c -index 806077a16..26f281cba 100644 ---- a/ldap/servers/slapd/task.c -+++ b/ldap/servers/slapd/task.c -@@ -380,16 +380,14 @@ slapi_task_status_changed(Slapi_Task *task) - Slapi_PBlock *pb = slapi_pblock_new(); - Slapi_Entry *e; - int ttl; -- time_t expire; - - if ((e = get_internal_entry(pb, task->task_dn))) { - ttl = atoi(slapi_fetch_attr(e, "ttl", DEFAULT_TTL)); - if (ttl > (24*3600)) - ttl = (24*3600); /* be reasonable, allow to check task status not longer than one day */ -- expire = time(NULL) + ttl; - task->task_flags |= SLAPI_TASK_DESTROYING; - /* queue an event to destroy the state info */ -- slapi_eq_once(destroy_task, (void *)task, expire); -+ slapi_eq_once(destroy_task, (void *)task, slapi_current_rel_time_t() + ttl); - } - slapi_free_search_results_internal(pb); - slapi_pblock_destroy(pb); -diff --git a/ldap/servers/slapd/time.c b/ldap/servers/slapd/time.c -index 545538404..0406c3689 100644 ---- a/ldap/servers/slapd/time.c -+++ b/ldap/servers/slapd/time.c -@@ -107,6 +107,14 @@ slapi_current_rel_time_hr(void) - return now; - } - -+time_t -+slapi_current_rel_time_t(void) -+{ -+ struct timespec now = {0}; -+ clock_gettime(CLOCK_MONOTONIC, &now); -+ return now.tv_sec; -+} -+ - struct timespec - slapi_current_utc_time_hr(void) - { -@@ -292,7 +300,7 @@ slapi_timer_result - slapi_timespec_expire_check(struct timespec *expire) - { - /* -- * Check this first, as it makes no timeout virutally free. -+ * Check this first, as it makes no timeout virtually free. - */ - if (expire->tv_sec == 0 && expire->tv_nsec == 0) { - return TIMER_CONTINUE; --- -2.26.2 - diff --git a/SOURCES/0031-Issue-4384-Separate-eventq-into-REALTIME-and-MONOTON.patch b/SOURCES/0031-Issue-4384-Separate-eventq-into-REALTIME-and-MONOTON.patch deleted file mode 100644 index 66a40e8..0000000 --- a/SOURCES/0031-Issue-4384-Separate-eventq-into-REALTIME-and-MONOTON.patch +++ /dev/null @@ -1,1748 +0,0 @@ -From 69af412d42acccac660037e1f4026a6a6717634c Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Thu, 17 Dec 2020 15:25:42 -0500 -Subject: [PATCH 2/2] Issue 4384 - Separate eventq into REALTIME and MONOTONIC - -Description: The recent changes to the eventq "when" time changed - internally from REALTIME to MONOTONIC, and this broke - the API. Create a new API for MONOTONIC clocks, and - keep the original API intact for REALTIME clocks. - -Relates: https://github.com/389ds/389-ds-base/issues/4384 - -Reviewed by: firstyear(Thanks!) ---- - Makefile.am | 1 + - docs/slapi.doxy.in | 1 - - ldap/servers/plugins/chainingdb/cb_instance.c | 6 +- - ldap/servers/plugins/dna/dna.c | 4 +- - .../plugins/replication/repl5_backoff.c | 12 +- - .../plugins/replication/repl5_connection.c | 10 +- - .../plugins/replication/repl5_mtnode_ext.c | 4 +- - .../plugins/replication/repl5_replica.c | 24 +- - .../plugins/replication/repl5_schedule.c | 4 +- - .../plugins/replication/windows_connection.c | 12 +- - .../replication/windows_inc_protocol.c | 7 +- - ldap/servers/plugins/retrocl/retrocl_trim.c | 10 +- - ldap/servers/slapd/daemon.c | 3 +- - ldap/servers/slapd/eventq-deprecated.c | 483 ++++++++++++++++++ - ldap/servers/slapd/eventq.c | 236 ++++----- - ldap/servers/slapd/main.c | 18 +- - ldap/servers/slapd/proto-slap.h | 6 +- - ldap/servers/slapd/slapi-plugin.h | 62 ++- - ldap/servers/slapd/slapi2runtime.c | 23 +- - ldap/servers/slapd/snmp_collator.c | 7 +- - ldap/servers/slapd/task.c | 2 +- - ldap/servers/slapd/uuid.c | 3 +- - 22 files changed, 750 insertions(+), 188 deletions(-) - create mode 100644 ldap/servers/slapd/eventq-deprecated.c - -diff --git a/Makefile.am b/Makefile.am -index f7bf1c44c..ece1ad41a 100644 ---- a/Makefile.am -+++ b/Makefile.am -@@ -1408,6 +1408,7 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \ - ldap/servers/slapd/entrywsi.c \ - ldap/servers/slapd/errormap.c \ - ldap/servers/slapd/eventq.c \ -+ ldap/servers/slapd/eventq-deprecated.c \ - ldap/servers/slapd/factory.c \ - ldap/servers/slapd/features.c \ - ldap/servers/slapd/fileio.c \ -diff --git a/docs/slapi.doxy.in b/docs/slapi.doxy.in -index b1e4810ab..1cafc50ce 100644 ---- a/docs/slapi.doxy.in -+++ b/docs/slapi.doxy.in -@@ -759,7 +759,6 @@ WARN_LOGFILE = - # Note: If this tag is empty the current directory is searched. - - INPUT = src/libsds/include/sds.h \ -- docs/job-safety.md \ - # ldap/servers/slapd/slapi-plugin.h \ - - # This tag can be used to specify the character encoding of the source files -diff --git a/ldap/servers/plugins/chainingdb/cb_instance.c b/ldap/servers/plugins/chainingdb/cb_instance.c -index bc1864c1a..7fd85deb0 100644 ---- a/ldap/servers/plugins/chainingdb/cb_instance.c -+++ b/ldap/servers/plugins/chainingdb/cb_instance.c -@@ -217,7 +217,7 @@ cb_instance_free(cb_backend_instance *inst) - slapi_rwlock_wrlock(inst->rwl_config_lock); - - if (inst->eq_ctx != NULL) { -- slapi_eq_cancel(inst->eq_ctx); -+ slapi_eq_cancel_rel(inst->eq_ctx); - inst->eq_ctx = NULL; - } - -@@ -1947,8 +1947,8 @@ cb_instance_add_config_callback(Slapi_PBlock *pb __attribute__((unused)), - * we can't call recursively into the DSE to do more adds, they'll - * silently fail. instead, schedule the adds to happen in 1 second. - */ -- inst->eq_ctx = slapi_eq_once(cb_instance_add_monitor_later, (void *)inst, -- slapi_current_rel_time_t() + 1); -+ inst->eq_ctx = slapi_eq_once_rel(cb_instance_add_monitor_later, (void *)inst, -+ slapi_current_rel_time_t() + 1); - } - - /* Get the list of operational attrs defined in the schema */ -diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c -index 1cb54580b..b46edfcbb 100644 ---- a/ldap/servers/plugins/dna/dna.c -+++ b/ldap/servers/plugins/dna/dna.c -@@ -688,7 +688,7 @@ dna_close(Slapi_PBlock *pb __attribute__((unused))) - slapi_log_err(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, - "--> dna_close\n"); - -- slapi_eq_cancel(eq_ctx); -+ slapi_eq_cancel_rel(eq_ctx); - dna_delete_config(NULL); - slapi_ch_free((void **)&dna_global_config); - slapi_destroy_rwlock(g_dna_cache_lock); -@@ -908,7 +908,7 @@ dna_load_plugin_config(Slapi_PBlock *pb, int use_eventq) - * starting up would cause the change to not - * get changelogged. */ - now = slapi_current_rel_time_t(); -- eq_ctx = slapi_eq_once(dna_update_config_event, NULL, now + 30); -+ eq_ctx = slapi_eq_once_rel(dna_update_config_event, NULL, now + 30); - } else { - dna_update_config_event(0, NULL); - } -diff --git a/ldap/servers/plugins/replication/repl5_backoff.c b/ldap/servers/plugins/replication/repl5_backoff.c -index 40ec75dd7..8c851beb2 100644 ---- a/ldap/servers/plugins/replication/repl5_backoff.c -+++ b/ldap/servers/plugins/replication/repl5_backoff.c -@@ -99,7 +99,7 @@ backoff_reset(Backoff_Timer *bt, slapi_eq_fn_t callback, void *callback_data) - bt->callback_arg = callback_data; - /* Cancel any pending events in the event queue */ - if (NULL != bt->pending_event) { -- slapi_eq_cancel(bt->pending_event); -+ slapi_eq_cancel_rel(bt->pending_event); - bt->pending_event = NULL; - } - /* Compute the first fire time */ -@@ -112,8 +112,8 @@ backoff_reset(Backoff_Timer *bt, slapi_eq_fn_t callback, void *callback_data) - /* Schedule the callback */ - bt->last_fire_time = slapi_current_rel_time_t(); - return_value = bt->last_fire_time + bt->next_interval; -- bt->pending_event = slapi_eq_once(bt->callback, bt->callback_arg, -- return_value); -+ bt->pending_event = slapi_eq_once_rel(bt->callback, bt->callback_arg, -+ return_value); - PR_Unlock(bt->lock); - return return_value; - } -@@ -159,8 +159,8 @@ backoff_step(Backoff_Timer *bt) - /* Schedule the callback, if any */ - bt->last_fire_time += previous_interval; - return_value = bt->last_fire_time + bt->next_interval; -- bt->pending_event = slapi_eq_once(bt->callback, bt->callback_arg, -- return_value); -+ bt->pending_event = slapi_eq_once_rel(bt->callback, bt->callback_arg, -+ return_value); - } - PR_Unlock(bt->lock); - return return_value; -@@ -196,7 +196,7 @@ backoff_delete(Backoff_Timer **btp) - PR_Lock(bt->lock); - /* Cancel any pending events in the event queue */ - if (NULL != bt->pending_event) { -- slapi_eq_cancel(bt->pending_event); -+ slapi_eq_cancel_rel(bt->pending_event); - } - PR_Unlock(bt->lock); - PR_DestroyLock(bt->lock); -diff --git a/ldap/servers/plugins/replication/repl5_connection.c b/ldap/servers/plugins/replication/repl5_connection.c -index bc9ca424b..2dd74f9e7 100644 ---- a/ldap/servers/plugins/replication/repl5_connection.c -+++ b/ldap/servers/plugins/replication/repl5_connection.c -@@ -272,7 +272,7 @@ conn_delete(Repl_Connection *conn) - PR_ASSERT(NULL != conn); - PR_Lock(conn->lock); - if (conn->linger_active) { -- if (slapi_eq_cancel(conn->linger_event) == 1) { -+ if (slapi_eq_cancel_rel(conn->linger_event) == 1) { - /* Event was found and cancelled. Destroy the connection object. */ - destroy_it = PR_TRUE; - } else { -@@ -961,7 +961,7 @@ conn_cancel_linger(Repl_Connection *conn) - "conn_cancel_linger - %s - Canceling linger on the connection\n", - agmt_get_long_name(conn->agmt)); - conn->linger_active = PR_FALSE; -- if (slapi_eq_cancel(conn->linger_event) == 1) { -+ if (slapi_eq_cancel_rel(conn->linger_event) == 1) { - conn->refcnt--; - } - conn->linger_event = NULL; -@@ -1030,7 +1030,7 @@ conn_start_linger(Repl_Connection *conn) - agmt_get_long_name(conn->agmt)); - } else { - conn->linger_active = PR_TRUE; -- conn->linger_event = slapi_eq_once(linger_timeout, conn, now + conn->linger_time); -+ conn->linger_event = slapi_eq_once_rel(linger_timeout, conn, now + conn->linger_time); - conn->status = STATUS_LINGERING; - } - PR_Unlock(conn->lock); -@@ -1990,7 +1990,7 @@ repl5_start_debug_timeout(int *setlevel) - Slapi_Eq_Context eqctx = 0; - if (s_debug_timeout && s_debug_level) { - time_t now = slapi_current_rel_time_t(); -- eqctx = slapi_eq_once(repl5_debug_timeout_callback, setlevel, -+ eqctx = slapi_eq_once_rel(repl5_debug_timeout_callback, setlevel, - s_debug_timeout + now); - } - return eqctx; -@@ -2002,7 +2002,7 @@ repl5_stop_debug_timeout(Slapi_Eq_Context eqctx, int *setlevel) - char buf[20]; - - if (eqctx && !*setlevel) { -- (void)slapi_eq_cancel(eqctx); -+ (void)slapi_eq_cancel_rel(eqctx); - } - - if (s_debug_timeout && s_debug_level && *setlevel) { -diff --git a/ldap/servers/plugins/replication/repl5_mtnode_ext.c b/ldap/servers/plugins/replication/repl5_mtnode_ext.c -index 82e230958..2967a47f8 100644 ---- a/ldap/servers/plugins/replication/repl5_mtnode_ext.c -+++ b/ldap/servers/plugins/replication/repl5_mtnode_ext.c -@@ -82,8 +82,8 @@ multimaster_mtnode_construct_replicas() - } - } - /* Wait a few seconds for everything to startup before resuming any replication tasks */ -- slapi_eq_once(replica_check_for_tasks, (void *)replica_get_root(r), -- slapi_current_rel_time_t() + 5); -+ slapi_eq_once_rel(replica_check_for_tasks, (void *)replica_get_root(r), -+ slapi_current_rel_time_t() + 5); - } - } - } -diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c -index c1d376c72..7102e0606 100644 ---- a/ldap/servers/plugins/replication/repl5_replica.c -+++ b/ldap/servers/plugins/replication/repl5_replica.c -@@ -231,17 +231,17 @@ replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation, - /* ONREPL - the state update can occur before the entry is added to the DIT. - In that case the updated would fail but nothing bad would happen. The next - scheduled update would save the state */ -- r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, r->repl_name, -- slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); -+ r->repl_eqcxt_rs = slapi_eq_repeat_rel(replica_update_state, r->repl_name, -+ slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); - - if (r->tombstone_reap_interval > 0) { - /* - * Reap Tombstone should be started some time after the plugin started. - * This will allow the server to fully start before consuming resources. - */ -- r->repl_eqcxt_tr = slapi_eq_repeat(eq_cb_reap_tombstones, r->repl_name, -- slapi_current_rel_time_t() + r->tombstone_reap_interval, -- 1000 * r->tombstone_reap_interval); -+ r->repl_eqcxt_tr = slapi_eq_repeat_rel(eq_cb_reap_tombstones, r->repl_name, -+ slapi_current_rel_time_t() + r->tombstone_reap_interval, -+ 1000 * r->tombstone_reap_interval); - } - - done: -@@ -303,12 +303,12 @@ replica_destroy(void **arg) - */ - - if (r->repl_eqcxt_rs) { -- slapi_eq_cancel(r->repl_eqcxt_rs); -+ slapi_eq_cancel_rel(r->repl_eqcxt_rs); - r->repl_eqcxt_rs = NULL; - } - - if (r->repl_eqcxt_tr) { -- slapi_eq_cancel(r->repl_eqcxt_tr); -+ slapi_eq_cancel_rel(r->repl_eqcxt_tr); - r->repl_eqcxt_tr = NULL; - } - -@@ -1511,14 +1511,14 @@ replica_set_enabled(Replica *r, PRBool enable) - if (enable) { - if (r->repl_eqcxt_rs == NULL) /* event is not already registered */ - { -- r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, r->repl_name, -- slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); -+ r->repl_eqcxt_rs = slapi_eq_repeat_rel(replica_update_state, r->repl_name, -+ slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); - } - } else /* disable */ - { - if (r->repl_eqcxt_rs) /* event is still registerd */ - { -- slapi_eq_cancel(r->repl_eqcxt_rs); -+ slapi_eq_cancel_rel(r->repl_eqcxt_rs); - r->repl_eqcxt_rs = NULL; - } - } -@@ -3628,7 +3628,7 @@ replica_set_tombstone_reap_interval(Replica *r, long interval) - if (interval > 0 && r->repl_eqcxt_tr && r->tombstone_reap_interval != interval) { - int found; - -- found = slapi_eq_cancel(r->repl_eqcxt_tr); -+ found = slapi_eq_cancel_rel(r->repl_eqcxt_tr); - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, - "replica_set_tombstone_reap_interval - tombstone_reap event (interval=%" PRId64 ") was %s\n", - r->tombstone_reap_interval, (found ? "cancelled" : "not found")); -@@ -3636,7 +3636,7 @@ replica_set_tombstone_reap_interval(Replica *r, long interval) - } - r->tombstone_reap_interval = interval; - if (interval > 0 && r->repl_eqcxt_tr == NULL) { -- r->repl_eqcxt_tr = slapi_eq_repeat(eq_cb_reap_tombstones, r->repl_name, -+ r->repl_eqcxt_tr = slapi_eq_repeat_rel(eq_cb_reap_tombstones, r->repl_name, - slapi_current_rel_time_t() + r->tombstone_reap_interval, - 1000 * r->tombstone_reap_interval); - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, -diff --git a/ldap/servers/plugins/replication/repl5_schedule.c b/ldap/servers/plugins/replication/repl5_schedule.c -index 9539f4031..ca42df561 100644 ---- a/ldap/servers/plugins/replication/repl5_schedule.c -+++ b/ldap/servers/plugins/replication/repl5_schedule.c -@@ -550,7 +550,7 @@ schedule_window_state_change_event(Schedule *sch) - wakeup_time = PRTime2time_t(tm); - - /* schedule the event */ -- sch->pending_event = slapi_eq_once(window_state_changed, sch, wakeup_time); -+ sch->pending_event = slapi_eq_once_rel(window_state_changed, sch, wakeup_time); - - timestr = get_timestring(&wakeup_time); - slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "%s: Update window will %s at %s\n", -@@ -593,7 +593,7 @@ static void - unschedule_window_state_change_event(Schedule *sch) - { - if (sch->pending_event) { -- slapi_eq_cancel(sch->pending_event); -+ slapi_eq_cancel_rel(sch->pending_event); - sch->pending_event = NULL; - } - } -diff --git a/ldap/servers/plugins/replication/windows_connection.c b/ldap/servers/plugins/replication/windows_connection.c -index ce0662544..5eca5fad1 100644 ---- a/ldap/servers/plugins/replication/windows_connection.c -+++ b/ldap/servers/plugins/replication/windows_connection.c -@@ -204,7 +204,7 @@ windows_conn_delete(Repl_Connection *conn) - PR_ASSERT(NULL != conn); - PR_Lock(conn->lock); - if (conn->linger_active) { -- if (slapi_eq_cancel(conn->linger_event) == 1) { -+ if (slapi_eq_cancel_rel(conn->linger_event) == 1) { - /* Event was found and cancelled. Destroy the connection object. */ - PR_Unlock(conn->lock); - destroy_it = PR_TRUE; -@@ -1052,7 +1052,7 @@ windows_conn_cancel_linger(Repl_Connection *conn) - "windows_conn_cancel_linger - %s: Cancelling linger on the connection\n", - agmt_get_long_name(conn->agmt)); - conn->linger_active = PR_FALSE; -- if (slapi_eq_cancel(conn->linger_event) == 1) { -+ if (slapi_eq_cancel_rel(conn->linger_event) == 1) { - conn->refcnt--; - } - conn->linger_event = NULL; -@@ -1129,7 +1129,7 @@ windows_conn_start_linger(Repl_Connection *conn) - agmt_get_long_name(conn->agmt)); - } else { - conn->linger_active = PR_TRUE; -- conn->linger_event = slapi_eq_once(linger_timeout, conn, now + conn->linger_time); -+ conn->linger_event = slapi_eq_once_rel(linger_timeout, conn, now + conn->linger_time); - conn->status = STATUS_LINGERING; - } - PR_Unlock(conn->lock); -@@ -1822,8 +1822,8 @@ repl5_start_debug_timeout(int *setlevel) - - if (s_debug_timeout && s_debug_level) { - time_t now = time(NULL); -- eqctx = slapi_eq_once(repl5_debug_timeout_callback, setlevel, -- s_debug_timeout + now); -+ eqctx = slapi_eq_once_rel(repl5_debug_timeout_callback, setlevel, -+ s_debug_timeout + now); - } - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= repl5_start_debug_timeout\n"); - return eqctx; -@@ -1837,7 +1837,7 @@ repl5_stop_debug_timeout(Slapi_Eq_Context eqctx, int *setlevel) - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> repl5_stop_debug_timeout\n"); - - if (eqctx && !*setlevel) { -- (void)slapi_eq_cancel(eqctx); -+ (void)slapi_eq_cancel_rel(eqctx); - } - - if (s_debug_timeout && s_debug_level && *setlevel) { -diff --git a/ldap/servers/plugins/replication/windows_inc_protocol.c b/ldap/servers/plugins/replication/windows_inc_protocol.c -index 3d548e5ed..c07a8180a 100644 ---- a/ldap/servers/plugins/replication/windows_inc_protocol.c -+++ b/ldap/servers/plugins/replication/windows_inc_protocol.c -@@ -132,7 +132,7 @@ windows_inc_delete(Private_Repl_Protocol **prpp) - slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_inc_delete\n"); - /* First, stop the protocol if it isn't already stopped */ - /* Then, delete all resources used by the protocol */ -- rc = slapi_eq_cancel(dirsync); -+ rc = slapi_eq_cancel_rel(dirsync); - slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, - "windows_inc_delete - dirsync: %p, rval: %d\n", dirsync, rc); - /* if backoff is set, delete it (from EQ, as well) */ -@@ -324,12 +324,13 @@ windows_inc_run(Private_Repl_Protocol *prp) - if (interval != current_interval) { - current_interval = interval; - if (dirsync) { -- int rc = slapi_eq_cancel(dirsync); -+ int rc = slapi_eq_cancel_rel(dirsync); - slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, - "windows_inc_run - Cancelled dirsync: %p, rval: %d\n", - dirsync, rc); - } -- dirsync = slapi_eq_repeat(periodic_dirsync, (void *)prp, (time_t)0, interval); -+ dirsync = slapi_eq_repeat_rel(periodic_dirsync, (void *)prp, -+ slapi_current_rel_time_t(), interval); - slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, - "windows_inc_run - New dirsync: %p\n", dirsync); - } -diff --git a/ldap/servers/plugins/retrocl/retrocl_trim.c b/ldap/servers/plugins/retrocl/retrocl_trim.c -index a3e16c4e1..12a395210 100644 ---- a/ldap/servers/plugins/retrocl/retrocl_trim.c -+++ b/ldap/servers/plugins/retrocl/retrocl_trim.c -@@ -460,10 +460,10 @@ retrocl_init_trimming(void) - ts.ts_s_initialized = 1; - retrocl_trimming = 1; - -- retrocl_trim_ctx = slapi_eq_repeat(retrocl_housekeeping, -- NULL, (time_t)0, -- /* in milliseconds */ -- trim_interval * 1000); -+ retrocl_trim_ctx = slapi_eq_repeat_rel(retrocl_housekeeping, -+ NULL, (time_t)0, -+ /* in milliseconds */ -+ trim_interval * 1000); - } - - /* -@@ -487,7 +487,7 @@ retrocl_stop_trimming(void) - */ - retrocl_trimming = 0; - if (retrocl_trim_ctx) { -- slapi_eq_cancel(retrocl_trim_ctx); -+ slapi_eq_cancel_rel(retrocl_trim_ctx); - retrocl_trim_ctx = NULL; - } - PR_DestroyLock(ts.ts_s_trim_mutex); -diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c -index 0071ed86a..7681e88ea 100644 ---- a/ldap/servers/slapd/daemon.c -+++ b/ldap/servers/slapd/daemon.c -@@ -1240,7 +1240,8 @@ slapd_daemon(daemon_ports_t *ports) - slapi_log_err(SLAPI_LOG_TRACE, "slapd_daemon", - "slapd shutting down - waiting for backends to close down\n"); - -- eq_stop(); -+ eq_stop(); /* deprecated */ -+ eq_stop_rel(); - if (!in_referral_mode) { - task_shutdown(); - uniqueIDGenCleanup(); -diff --git a/ldap/servers/slapd/eventq-deprecated.c b/ldap/servers/slapd/eventq-deprecated.c -new file mode 100644 -index 000000000..71a7bf8f5 ---- /dev/null -+++ b/ldap/servers/slapd/eventq-deprecated.c -@@ -0,0 +1,483 @@ -+/** BEGIN COPYRIGHT BLOCK -+ * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -+ * Copyright (C) 2020 Red Hat, Inc. -+ * All rights reserved. -+ * -+ * License: GPL (version 3 or any later version). -+ * See LICENSE for details. -+ * END COPYRIGHT BLOCK **/ -+ -+#ifdef HAVE_CONFIG_H -+#include -+#endif -+ -+ -+/* ******************************************************** -+eventq-deprecated.c - Event queue/scheduling system. -+ -+There are 3 publicly-accessible entry points: -+ -+slapi_eq_once(): cause an event to happen exactly once -+slapi_eq_repeat(): cause an event to happen repeatedly -+slapi_eq_cancel(): cancel a pending event -+ -+There is also an initialization point which must be -+called by the server to initialize the event queue system: -+eq_start(), and an entry point used to shut down the system: -+eq_stop(). -+ -+These functions are now deprecated in favor of the functions -+in eventq.c which use MONOTONIC clocks instead of REALTIME -+clocks. -+*********************************************************** */ -+ -+#include "slap.h" -+#include "prlock.h" -+#include "prcvar.h" -+#include "prinit.h" -+ -+/* -+ * Private definition of slapi_eq_context. Only this -+ * module (eventq.c) should know about the layout of -+ * this structure. -+ */ -+typedef struct _slapi_eq_context -+{ -+ time_t ec_when; -+ time_t ec_interval; -+ slapi_eq_fn_t ec_fn; -+ void *ec_arg; -+ Slapi_Eq_Context ec_id; -+ struct _slapi_eq_context *ec_next; -+} slapi_eq_context; -+ -+/* -+ * Definition of the event queue. -+ */ -+typedef struct _event_queue -+{ -+ PRLock *eq_lock; -+ PRCondVar *eq_cv; -+ slapi_eq_context *eq_queue; -+} event_queue; -+ -+/* -+ * The event queue itself. -+ */ -+static event_queue eqs = {0}; -+static event_queue *eq = &eqs; -+ -+/* -+ * Thread ID of the main thread loop -+ */ -+static PRThread *eq_loop_tid = NULL; -+ -+/* -+ * Flags used to control startup/shutdown of the event queue -+ */ -+static int eq_running = 0; -+static int eq_stopped = 0; -+static int eq_initialized = 0; -+PRLock *ss_lock = NULL; -+PRCondVar *ss_cv = NULL; -+PRCallOnceType init_once = {0}; -+ -+/* Forward declarations */ -+static slapi_eq_context *eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); -+static void eq_enqueue(slapi_eq_context *newec); -+static slapi_eq_context *eq_dequeue(time_t now); -+static PRStatus eq_create(void); -+ -+ -+/* ******************************************************** */ -+ -+ -+/* -+ * slapi_eq_once: cause an event to happen exactly once. -+ * -+ * Arguments: -+ * fn: the function to call -+ * arg: an argument to pass to the called function -+ * when: the time that the function should be called -+ * Returns: -+ * slapi_eq_context - a handle to an opaque object which -+ * the caller can use to refer to this particular scheduled -+ * event. -+ */ -+Slapi_Eq_Context -+slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) -+{ -+ slapi_eq_context *tmp; -+ PR_ASSERT(eq_initialized); -+ if (!eq_stopped) { -+ -+ Slapi_Eq_Context id; -+ -+ tmp = eq_new(fn, arg, when, 0UL); -+ id = tmp->ec_id; -+ -+ eq_enqueue(tmp); -+ -+ /* After this point, may have */ -+ /* been freed, depending on the thread */ -+ /* scheduling. Too bad */ -+ -+ slapi_log_err(SLAPI_LOG_HOUSE, NULL, -+ "added one-time event id %p at time %ld\n", -+ id, when); -+ return (id); -+ } -+ return NULL; /* JCM - Not sure if this should be 0 or something else. */ -+} -+ -+ -+/* -+ * slapi_eq_repeat: cause an event to happen repeatedly. -+ * -+ * Arguments: -+ * fn: the function to call -+ * arg: an argument to pass to the called function -+ * when: the time that the function should first be called -+ * interval: the amount of time (in milliseconds) between -+ * successive calls to the function -+ * Returns: -+ * slapi_eq_context - a handle to an opaque object which -+ * the caller can use to refer to this particular scheduled -+ */ -+Slapi_Eq_Context -+slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) -+{ -+ slapi_eq_context *tmp; -+ PR_ASSERT(eq_initialized); -+ if (!eq_stopped) { -+ tmp = eq_new(fn, arg, when, interval); -+ eq_enqueue(tmp); -+ slapi_log_err(SLAPI_LOG_HOUSE, NULL, -+ "added repeating event id %p at time %ld, interval %lu\n", -+ tmp->ec_id, when, interval); -+ return (tmp->ec_id); -+ } -+ return NULL; /* JCM - Not sure if this should be 0 or something else. */ -+} -+ -+ -+/* -+ * slapi_eq_cancel: cancel a pending event. -+ * Arguments: -+ * ctx: the context of the event which should be de-scheduled -+ */ -+int -+slapi_eq_cancel(Slapi_Eq_Context ctx) -+{ -+ slapi_eq_context **p, *tmp = NULL; -+ int found = 0; -+ -+ PR_ASSERT(eq_initialized); -+ if (!eq_stopped) { -+ PR_Lock(eq->eq_lock); -+ p = &(eq->eq_queue); -+ while (!found && *p != NULL) { -+ if ((*p)->ec_id == ctx) { -+ tmp = *p; -+ *p = (*p)->ec_next; -+ slapi_ch_free((void **)&tmp); -+ found = 1; -+ } else { -+ p = &((*p)->ec_next); -+ } -+ } -+ PR_Unlock(eq->eq_lock); -+ } -+ slapi_log_err(SLAPI_LOG_HOUSE, NULL, -+ "cancellation of event id %p requested: %s\n", -+ ctx, found ? "cancellation succeeded" : "event not found"); -+ return found; -+} -+ -+ -+/* -+ * Construct a new ec structure -+ */ -+static slapi_eq_context * -+eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) -+{ -+ slapi_eq_context *retptr = (slapi_eq_context *)slapi_ch_calloc(1, sizeof(slapi_eq_context)); -+ -+ retptr->ec_fn = fn; -+ retptr->ec_arg = arg; -+ /* -+ * retptr->ec_when = when < now ? now : when; -+ * we used to amke this check, but it make no sense: when queued, if when -+ * has expired, we'll be executed anyway. save the cycles, and just set -+ * ec_when. -+ */ -+ retptr->ec_when = when; -+ retptr->ec_interval = interval == 0UL ? 0UL : (interval + 999) / 1000; -+ retptr->ec_id = (Slapi_Eq_Context)retptr; -+ return retptr; -+} -+ -+ -+/* -+ * Add a new event to the event queue. -+ */ -+static void -+eq_enqueue(slapi_eq_context *newec) -+{ -+ slapi_eq_context **p; -+ -+ PR_ASSERT(NULL != newec); -+ PR_Lock(eq->eq_lock); -+ /* Insert in order (sorted by start time) in the list */ -+ for (p = &(eq->eq_queue); *p != NULL; p = &((*p)->ec_next)) { -+ if ((*p)->ec_when > newec->ec_when) { -+ break; -+ } -+ } -+ if (NULL != *p) { -+ newec->ec_next = *p; -+ } else { -+ newec->ec_next = NULL; -+ } -+ *p = newec; -+ PR_NotifyCondVar(eq->eq_cv); /* wake up scheduler thread */ -+ PR_Unlock(eq->eq_lock); -+} -+ -+ -+/* -+ * If there is an event in the queue scheduled at time -+ * or before, dequeue it and return a pointer -+ * to it. Otherwise, return NULL. -+ */ -+static slapi_eq_context * -+eq_dequeue(time_t now) -+{ -+ slapi_eq_context *retptr = NULL; -+ -+ PR_Lock(eq->eq_lock); -+ if (NULL != eq->eq_queue && eq->eq_queue->ec_when <= now) { -+ retptr = eq->eq_queue; -+ eq->eq_queue = retptr->ec_next; -+ } -+ PR_Unlock(eq->eq_lock); -+ return retptr; -+} -+ -+ -+/* -+ * Call all events which are due to run. -+ * Note that if we've missed a schedule -+ * opportunity, we don't try to catch up -+ * by calling the function repeatedly. -+ */ -+static void -+eq_call_all(void) -+{ -+ slapi_eq_context *p; -+ time_t curtime = slapi_current_utc_time(); -+ -+ while ((p = eq_dequeue(curtime)) != NULL) { -+ /* Call the scheduled function */ -+ p->ec_fn(p->ec_when, p->ec_arg); -+ slapi_log_err(SLAPI_LOG_HOUSE, NULL, -+ "Event id %p called at %ld (scheduled for %ld)\n", -+ p->ec_id, curtime, p->ec_when); -+ if (0UL != p->ec_interval) { -+ /* This is a repeating event. Requeue it. */ -+ do { -+ p->ec_when += p->ec_interval; -+ } while (p->ec_when < curtime); -+ eq_enqueue(p); -+ } else { -+ slapi_ch_free((void **)&p); -+ } -+ } -+} -+ -+ -+/* -+ * The main event queue loop. -+ */ -+static void -+eq_loop(void *arg __attribute__((unused))) -+{ -+ while (eq_running) { -+ time_t curtime = slapi_current_utc_time(); -+ PRIntervalTime timeout; -+ int until; -+ PR_Lock(eq->eq_lock); -+ while (!((NULL != eq->eq_queue) && (eq->eq_queue->ec_when <= curtime))) { -+ if (!eq_running) { -+ PR_Unlock(eq->eq_lock); -+ goto bye; -+ } -+ /* Compute new timeout */ -+ if (NULL != eq->eq_queue) { -+ until = eq->eq_queue->ec_when - curtime; -+ timeout = PR_SecondsToInterval(until); -+ } else { -+ timeout = PR_INTERVAL_NO_TIMEOUT; -+ } -+ PR_WaitCondVar(eq->eq_cv, timeout); -+ curtime = slapi_current_utc_time(); -+ } -+ /* There is some work to do */ -+ PR_Unlock(eq->eq_lock); -+ eq_call_all(); -+ } -+bye: -+ eq_stopped = 1; -+ PR_Lock(ss_lock); -+ PR_NotifyAllCondVar(ss_cv); -+ PR_Unlock(ss_lock); -+} -+ -+ -+/* -+ * Allocate and initialize the event queue structures. -+ */ -+static PRStatus -+eq_create(void) -+{ -+ PR_ASSERT(NULL == eq->eq_lock); -+ if ((eq->eq_lock = PR_NewLock()) == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewLock failed\n"); -+ exit(1); -+ } -+ if ((eq->eq_cv = PR_NewCondVar(eq->eq_lock)) == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewCondVar failed\n"); -+ exit(1); -+ } -+ if ((ss_lock = PR_NewLock()) == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewLock failed\n"); -+ exit(1); -+ } -+ if ((ss_cv = PR_NewCondVar(ss_lock)) == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewCondVar failed\n"); -+ exit(1); -+ } -+ eq->eq_queue = NULL; -+ eq_initialized = 1; -+ return PR_SUCCESS; -+} -+ -+ -+/* -+ * eq_start: start the event queue system. -+ * -+ * This should be called exactly once. It will start a -+ * thread which wakes up periodically and schedules events. -+ */ -+void -+eq_start() -+{ -+ PR_ASSERT(eq_initialized); -+ eq_running = 1; -+ if ((eq_loop_tid = PR_CreateThread(PR_USER_THREAD, (VFP)eq_loop, -+ NULL, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, -+ SLAPD_DEFAULT_THREAD_STACKSIZE)) == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_start", "eq_loop PR_CreateThread failed\n"); -+ exit(1); -+ } -+ slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have started\n"); -+} -+ -+ -+/* -+ * eq_init: initialize the event queue system. -+ * -+ * This function should be called early in server startup. -+ * Once it has been called, the event queue will queue -+ * events, but will not fire any events. Once all of the -+ * server plugins have been started, the eq_start() -+ * function should be called, and events will then start -+ * to fire. -+ */ -+void -+eq_init() -+{ -+ if (!eq_initialized) { -+ if (PR_SUCCESS != PR_CallOnce(&init_once, eq_create)) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_init", "eq_create failed\n"); -+ } -+ } -+} -+ -+ -+/* -+ * eq_stop: shut down the event queue system. -+ * Does not return until event queue is fully -+ * shut down. -+ */ -+void -+eq_stop() -+{ -+ slapi_eq_context *p, *q; -+ -+ if (NULL == eq || NULL == eq->eq_lock) { /* never started */ -+ eq_stopped = 1; -+ return; -+ } -+ -+ eq_stopped = 0; -+ eq_running = 0; -+ /* -+ * Signal the eq thread function to stop, and wait until -+ * it acknowledges by setting eq_stopped. -+ */ -+ while (!eq_stopped) { -+ PR_Lock(eq->eq_lock); -+ PR_NotifyAllCondVar(eq->eq_cv); -+ PR_Unlock(eq->eq_lock); -+ PR_Lock(ss_lock); -+ PR_WaitCondVar(ss_cv, PR_MillisecondsToInterval(100)); -+ PR_Unlock(ss_lock); -+ } -+ (void)PR_JoinThread(eq_loop_tid); -+ /* -+ * XXXggood we don't free the actual event queue data structures. -+ * This is intentional, to allow enqueueing/cancellation of events -+ * even after event queue services have shut down (these are no-ops). -+ * The downside is that the event queue can't be stopped and restarted -+ * easily. -+ */ -+ PR_Lock(eq->eq_lock); -+ p = eq->eq_queue; -+ while (p != NULL) { -+ q = p->ec_next; -+ slapi_ch_free((void **)&p); -+ /* Some ec_arg could get leaked here in shutdown (e.g., replica_name) -+ * This can be fixed by specifying a flag when the context is queued. -+ * [After 6.2] -+ */ -+ p = q; -+ } -+ PR_Unlock(eq->eq_lock); -+ slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have shut down\n"); -+} -+ -+/* -+ * return arg (ec_arg) only if the context is in the event queue -+ */ -+void * -+slapi_eq_get_arg(Slapi_Eq_Context ctx) -+{ -+ slapi_eq_context **p; -+ -+ PR_ASSERT(eq_initialized); -+ if (eq && !eq_stopped) { -+ PR_Lock(eq->eq_lock); -+ p = &(eq->eq_queue); -+ while (p && *p != NULL) { -+ if ((*p)->ec_id == ctx) { -+ PR_Unlock(eq->eq_lock); -+ return (*p)->ec_arg; -+ } else { -+ p = &((*p)->ec_next); -+ } -+ } -+ PR_Unlock(eq->eq_lock); -+ } -+ return NULL; -+} -diff --git a/ldap/servers/slapd/eventq.c b/ldap/servers/slapd/eventq.c -index e1900724f..4c39e08cf 100644 ---- a/ldap/servers/slapd/eventq.c -+++ b/ldap/servers/slapd/eventq.c -@@ -17,14 +17,14 @@ eventq.c - Event queue/scheduling system. - - There are 3 publicly-accessible entry points: - --slapi_eq_once(): cause an event to happen exactly once --slapi_eq_repeat(): cause an event to happen repeatedly --slapi_eq_cancel(): cancel a pending event -+slapi_eq_once_rel(): cause an event to happen exactly once -+slapi_eq_repeat_rel(): cause an event to happen repeatedly -+slapi_eq_cancel_rel(): cancel a pending event - - There is also an initialization point which must be - called by the server to initialize the event queue system: --eq_start(), and an entry point used to shut down the system: --eq_stop(). -+eq_start_rel(), and an entry point used to shut down the system: -+eq_stop_rel(). - *********************************************************** */ - - #include "slap.h" -@@ -60,36 +60,36 @@ typedef struct _event_queue - /* - * The event queue itself. - */ --static event_queue eqs = {0}; --static event_queue *eq = &eqs; -+static event_queue eqs_rel = {0}; -+static event_queue *eq_rel = &eqs_rel; - - /* - * Thread ID of the main thread loop - */ --static PRThread *eq_loop_tid = NULL; -+static PRThread *eq_loop_rel_tid = NULL; - - /* - * Flags used to control startup/shutdown of the event queue - */ --static int eq_running = 0; --static int eq_stopped = 0; --static int eq_initialized = 0; --static pthread_mutex_t ss_lock; --static pthread_cond_t ss_cv; --PRCallOnceType init_once = {0}; -+static int eq_rel_running = 0; -+static int eq_rel_stopped = 0; -+static int eq_rel_initialized = 0; -+static pthread_mutex_t ss_rel_lock; -+static pthread_cond_t ss_rel_cv; -+PRCallOnceType init_once_rel = {0}; - - /* Forward declarations */ --static slapi_eq_context *eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); --static void eq_enqueue(slapi_eq_context *newec); --static slapi_eq_context *eq_dequeue(time_t now); --static PRStatus eq_create(void); -+static slapi_eq_context *eq_new_rel(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); -+static void eq_enqueue_rel(slapi_eq_context *newec); -+static slapi_eq_context *eq_dequeue_rel(time_t now); -+static PRStatus eq_create_rel(void); - - - /* ******************************************************** */ - - - /* -- * slapi_eq_once: cause an event to happen exactly once. -+ * slapi_eq_once_rel: cause an event to happen exactly once. - * - * Arguments: - * fn: the function to call -@@ -101,18 +101,18 @@ static PRStatus eq_create(void); - * event. - */ - Slapi_Eq_Context --slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) -+slapi_eq_once_rel(slapi_eq_fn_t fn, void *arg, time_t when) - { - slapi_eq_context *tmp; -- PR_ASSERT(eq_initialized); -- if (!eq_stopped) { -+ PR_ASSERT(eq_rel_initialized); -+ if (!eq_rel_stopped) { - - Slapi_Eq_Context id; - -- tmp = eq_new(fn, arg, when, 0UL); -+ tmp = eq_new_rel(fn, arg, when, 0UL); - id = tmp->ec_id; - -- eq_enqueue(tmp); -+ eq_enqueue_rel(tmp); - - /* After this point, may have */ - /* been freed, depending on the thread */ -@@ -128,7 +128,7 @@ slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) - - - /* -- * slapi_eq_repeat: cause an event to happen repeatedly. -+ * slapi_eq_repeat_rel: cause an event to happen repeatedly. - * - * Arguments: - * fn: the function to call -@@ -141,13 +141,13 @@ slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) - * the caller can use to refer to this particular scheduled - */ - Slapi_Eq_Context --slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) -+slapi_eq_repeat_rel(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) - { - slapi_eq_context *tmp; -- PR_ASSERT(eq_initialized); -- if (!eq_stopped) { -- tmp = eq_new(fn, arg, when, interval); -- eq_enqueue(tmp); -+ PR_ASSERT(eq_rel_initialized); -+ if (!eq_rel_stopped) { -+ tmp = eq_new_rel(fn, arg, when, interval); -+ eq_enqueue_rel(tmp); - slapi_log_err(SLAPI_LOG_HOUSE, NULL, - "added repeating event id %p at time %ld, interval %lu\n", - tmp->ec_id, when, interval); -@@ -158,20 +158,20 @@ slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval - - - /* -- * slapi_eq_cancel: cancel a pending event. -+ * slapi_eq_cancel_rel: cancel a pending event. - * Arguments: - * ctx: the context of the event which should be de-scheduled - */ - int --slapi_eq_cancel(Slapi_Eq_Context ctx) -+slapi_eq_cancel_rel(Slapi_Eq_Context ctx) - { - slapi_eq_context **p, *tmp = NULL; - int found = 0; - -- PR_ASSERT(eq_initialized); -- if (!eq_stopped) { -- pthread_mutex_lock(&(eq->eq_lock)); -- p = &(eq->eq_queue); -+ PR_ASSERT(eq_rel_initialized); -+ if (!eq_rel_stopped) { -+ pthread_mutex_lock(&(eq_rel->eq_lock)); -+ p = &(eq_rel->eq_queue); - while (!found && *p != NULL) { - if ((*p)->ec_id == ctx) { - tmp = *p; -@@ -182,7 +182,7 @@ slapi_eq_cancel(Slapi_Eq_Context ctx) - p = &((*p)->ec_next); - } - } -- pthread_mutex_unlock(&(eq->eq_lock)); -+ pthread_mutex_unlock(&(eq_rel->eq_lock)); - } - slapi_log_err(SLAPI_LOG_HOUSE, NULL, - "cancellation of event id %p requested: %s\n", -@@ -195,7 +195,7 @@ slapi_eq_cancel(Slapi_Eq_Context ctx) - * Construct a new ec structure - */ - static slapi_eq_context * --eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) -+eq_new_rel(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) - { - slapi_eq_context *retptr = (slapi_eq_context *)slapi_ch_calloc(1, sizeof(slapi_eq_context)); - -@@ -218,14 +218,14 @@ eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) - * Add a new event to the event queue. - */ - static void --eq_enqueue(slapi_eq_context *newec) -+eq_enqueue_rel(slapi_eq_context *newec) - { - slapi_eq_context **p; - - PR_ASSERT(NULL != newec); -- pthread_mutex_lock(&(eq->eq_lock)); -+ pthread_mutex_lock(&(eq_rel->eq_lock)); - /* Insert in order (sorted by start time) in the list */ -- for (p = &(eq->eq_queue); *p != NULL; p = &((*p)->ec_next)) { -+ for (p = &(eq_rel->eq_queue); *p != NULL; p = &((*p)->ec_next)) { - if ((*p)->ec_when > newec->ec_when) { - break; - } -@@ -236,8 +236,8 @@ eq_enqueue(slapi_eq_context *newec) - newec->ec_next = NULL; - } - *p = newec; -- pthread_cond_signal(&(eq->eq_cv)); /* wake up scheduler thread */ -- pthread_mutex_unlock(&(eq->eq_lock)); -+ pthread_cond_signal(&(eq_rel->eq_cv)); /* wake up scheduler thread */ -+ pthread_mutex_unlock(&(eq_rel->eq_lock)); - } - - -@@ -247,16 +247,16 @@ eq_enqueue(slapi_eq_context *newec) - * to it. Otherwise, return NULL. - */ - static slapi_eq_context * --eq_dequeue(time_t now) -+eq_dequeue_rel(time_t now) - { - slapi_eq_context *retptr = NULL; - -- pthread_mutex_lock(&(eq->eq_lock)); -- if (NULL != eq->eq_queue && eq->eq_queue->ec_when <= now) { -- retptr = eq->eq_queue; -- eq->eq_queue = retptr->ec_next; -+ pthread_mutex_lock(&(eq_rel->eq_lock)); -+ if (NULL != eq_rel->eq_queue && eq_rel->eq_queue->ec_when <= now) { -+ retptr = eq_rel->eq_queue; -+ eq_rel->eq_queue = retptr->ec_next; - } -- pthread_mutex_unlock(&(eq->eq_lock)); -+ pthread_mutex_unlock(&(eq_rel->eq_lock)); - return retptr; - } - -@@ -268,12 +268,12 @@ eq_dequeue(time_t now) - * by calling the function repeatedly. - */ - static void --eq_call_all(void) -+eq_call_all_rel(void) - { - slapi_eq_context *p; - time_t curtime = slapi_current_rel_time_t(); - -- while ((p = eq_dequeue(curtime)) != NULL) { -+ while ((p = eq_dequeue_rel(curtime)) != NULL) { - /* Call the scheduled function */ - p->ec_fn(p->ec_when, p->ec_arg); - slapi_log_err(SLAPI_LOG_HOUSE, NULL, -@@ -284,7 +284,7 @@ eq_call_all(void) - do { - p->ec_when += p->ec_interval; - } while (p->ec_when < curtime); -- eq_enqueue(p); -+ eq_enqueue_rel(p); - } else { - slapi_ch_free((void **)&p); - } -@@ -296,38 +296,38 @@ eq_call_all(void) - * The main event queue loop. - */ - static void --eq_loop(void *arg __attribute__((unused))) -+eq_loop_rel(void *arg __attribute__((unused))) - { -- while (eq_running) { -+ while (eq_rel_running) { - time_t curtime = slapi_current_rel_time_t(); - int until; - -- pthread_mutex_lock(&(eq->eq_lock)); -- while (!((NULL != eq->eq_queue) && (eq->eq_queue->ec_when <= curtime))) { -- if (!eq_running) { -- pthread_mutex_unlock(&(eq->eq_lock)); -+ pthread_mutex_lock(&(eq_rel->eq_lock)); -+ while (!((NULL != eq_rel->eq_queue) && (eq_rel->eq_queue->ec_when <= curtime))) { -+ if (!eq_rel_running) { -+ pthread_mutex_unlock(&(eq_rel->eq_lock)); - goto bye; - } - /* Compute new timeout */ -- if (NULL != eq->eq_queue) { -+ if (NULL != eq_rel->eq_queue) { - struct timespec current_time = slapi_current_rel_time_hr(); -- until = eq->eq_queue->ec_when - curtime; -+ until = eq_rel->eq_queue->ec_when - curtime; - current_time.tv_sec += until; -- pthread_cond_timedwait(&eq->eq_cv, &eq->eq_lock, ¤t_time); -+ pthread_cond_timedwait(&eq_rel->eq_cv, &eq_rel->eq_lock, ¤t_time); - } else { -- pthread_cond_wait(&eq->eq_cv, &eq->eq_lock); -+ pthread_cond_wait(&eq_rel->eq_cv, &eq_rel->eq_lock); - } - curtime = slapi_current_rel_time_t(); - } - /* There is some work to do */ -- pthread_mutex_unlock(&(eq->eq_lock)); -- eq_call_all(); -+ pthread_mutex_unlock(&(eq_rel->eq_lock)); -+ eq_call_all_rel(); - } - bye: -- eq_stopped = 1; -- pthread_mutex_lock(&ss_lock); -- pthread_cond_broadcast(&ss_cv); -- pthread_mutex_unlock(&ss_lock); -+ eq_rel_stopped = 1; -+ pthread_mutex_lock(&ss_rel_lock); -+ pthread_cond_broadcast(&ss_rel_cv); -+ pthread_mutex_unlock(&ss_rel_lock); - } - - -@@ -335,73 +335,73 @@ bye: - * Allocate and initialize the event queue structures. - */ - static PRStatus --eq_create(void) -+eq_create_rel(void) - { - pthread_condattr_t condAttr; - int rc = 0; - - /* Init the eventq mutex and cond var */ -- if (pthread_mutex_init(&eq->eq_lock, NULL) != 0) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ if (pthread_mutex_init(&eq_rel->eq_lock, NULL) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", - "Failed to create lock: error %d (%s)\n", - rc, strerror(rc)); - exit(1); - } - if ((rc = pthread_condattr_init(&condAttr)) != 0) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", - "Failed to create new condition attribute variable. error %d (%s)\n", - rc, strerror(rc)); - exit(1); - } - if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", - "Cannot set condition attr clock. error %d (%s)\n", - rc, strerror(rc)); - exit(1); - } -- if ((rc = pthread_cond_init(&eq->eq_cv, &condAttr)) != 0) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ if ((rc = pthread_cond_init(&eq_rel->eq_cv, &condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", - "Failed to create new condition variable. error %d (%s)\n", - rc, strerror(rc)); - exit(1); - } - - /* Init the "ss" mutex and condition var */ -- if (pthread_mutex_init(&ss_lock, NULL) != 0) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ if (pthread_mutex_init(&ss_rel_lock, NULL) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", - "Failed to create ss lock: error %d (%s)\n", - rc, strerror(rc)); - exit(1); - } -- if ((rc = pthread_cond_init(&ss_cv, &condAttr)) != 0) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_create", -+ if ((rc = pthread_cond_init(&ss_rel_cv, &condAttr)) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", - "Failed to create new ss condition variable. error %d (%s)\n", - rc, strerror(rc)); - exit(1); - } - pthread_condattr_destroy(&condAttr); /* no longer needed */ - -- eq->eq_queue = NULL; -- eq_initialized = 1; -+ eq_rel->eq_queue = NULL; -+ eq_rel_initialized = 1; - return PR_SUCCESS; - } - - - /* -- * eq_start: start the event queue system. -+ * eq_start_rel: start the event queue system. - * - * This should be called exactly once. It will start a - * thread which wakes up periodically and schedules events. - */ - void --eq_start() -+eq_start_rel() - { -- PR_ASSERT(eq_initialized); -- eq_running = 1; -- if ((eq_loop_tid = PR_CreateThread(PR_USER_THREAD, (VFP)eq_loop, -+ PR_ASSERT(eq_rel_initialized); -+ eq_rel_running = 1; -+ if ((eq_loop_rel_tid = PR_CreateThread(PR_USER_THREAD, (VFP)eq_loop_rel, - NULL, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, - SLAPD_DEFAULT_THREAD_STACKSIZE)) == NULL) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_start", "eq_loop PR_CreateThread failed\n"); -+ slapi_log_err(SLAPI_LOG_ERR, "eq_start_rel", "eq_loop_rel PR_CreateThread failed\n"); - exit(1); - } - slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have started\n"); -@@ -409,55 +409,55 @@ eq_start() - - - /* -- * eq_init: initialize the event queue system. -+ * eq_init_rel: initialize the event queue system. - * - * This function should be called early in server startup. - * Once it has been called, the event queue will queue - * events, but will not fire any events. Once all of the -- * server plugins have been started, the eq_start() -+ * server plugins have been started, the eq_start_rel() - * function should be called, and events will then start - * to fire. - */ - void --eq_init() -+eq_init_rel() - { -- if (!eq_initialized) { -- if (PR_SUCCESS != PR_CallOnce(&init_once, eq_create)) { -- slapi_log_err(SLAPI_LOG_ERR, "eq_init", "eq_create failed\n"); -+ if (!eq_rel_initialized) { -+ if (PR_SUCCESS != PR_CallOnce(&init_once_rel, eq_create_rel)) { -+ slapi_log_err(SLAPI_LOG_ERR, "eq_init_rel", "eq_create_rel failed\n"); - } - } - } - - - /* -- * eq_stop: shut down the event queue system. -+ * eq_stop_rel: shut down the event queue system. - * Does not return until event queue is fully - * shut down. - */ - void --eq_stop() -+eq_stop_rel() - { - slapi_eq_context *p, *q; - -- if (NULL == eq) { /* never started */ -- eq_stopped = 1; -+ if (NULL == eq_rel) { /* never started */ -+ eq_rel_stopped = 1; - return; - } - -- eq_stopped = 0; -- eq_running = 0; -+ eq_rel_stopped = 0; -+ eq_rel_running = 0; - /* - * Signal the eq thread function to stop, and wait until -- * it acknowledges by setting eq_stopped. -+ * it acknowledges by setting eq_rel_stopped. - */ -- while (!eq_stopped) { -+ while (!eq_rel_stopped) { - struct timespec current_time = {0}; - -- pthread_mutex_lock(&(eq->eq_lock)); -- pthread_cond_broadcast(&(eq->eq_cv)); -- pthread_mutex_unlock(&(eq->eq_lock)); -+ pthread_mutex_lock(&(eq_rel->eq_lock)); -+ pthread_cond_broadcast(&(eq_rel->eq_cv)); -+ pthread_mutex_unlock(&(eq_rel->eq_lock)); - -- pthread_mutex_lock(&ss_lock); -+ pthread_mutex_lock(&ss_rel_lock); - clock_gettime(CLOCK_MONOTONIC, ¤t_time); - if (current_time.tv_nsec + 100000000 > 1000000000) { - /* nanoseconds will overflow, adjust the seconds and nanoseconds */ -@@ -467,10 +467,10 @@ eq_stop() - } else { - current_time.tv_nsec += 100000000; /* 100 ms */ - } -- pthread_cond_timedwait(&ss_cv, &ss_lock, ¤t_time); -- pthread_mutex_unlock(&ss_lock); -+ pthread_cond_timedwait(&ss_rel_cv, &ss_rel_lock, ¤t_time); -+ pthread_mutex_unlock(&ss_rel_lock); - } -- (void)PR_JoinThread(eq_loop_tid); -+ (void)PR_JoinThread(eq_loop_rel_tid); - /* - * XXXggood we don't free the actual event queue data structures. - * This is intentional, to allow enqueueing/cancellation of events -@@ -478,8 +478,8 @@ eq_stop() - * The downside is that the event queue can't be stopped and restarted - * easily. - */ -- pthread_mutex_lock(&(eq->eq_lock)); -- p = eq->eq_queue; -+ pthread_mutex_lock(&(eq_rel->eq_lock)); -+ p = eq_rel->eq_queue; - while (p != NULL) { - q = p->ec_next; - slapi_ch_free((void **)&p); -@@ -489,7 +489,7 @@ eq_stop() - */ - p = q; - } -- pthread_mutex_unlock(&(eq->eq_lock)); -+ pthread_mutex_unlock(&(eq_rel->eq_lock)); - slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have shut down\n"); - } - -@@ -497,23 +497,23 @@ eq_stop() - * return arg (ec_arg) only if the context is in the event queue - */ - void * --slapi_eq_get_arg(Slapi_Eq_Context ctx) -+slapi_eq_get_arg_rel(Slapi_Eq_Context ctx) - { - slapi_eq_context **p; - -- PR_ASSERT(eq_initialized); -- if (eq && !eq_stopped) { -- pthread_mutex_lock(&(eq->eq_lock)); -- p = &(eq->eq_queue); -+ PR_ASSERT(eq_rel_initialized); -+ if (eq_rel && !eq_rel_stopped) { -+ pthread_mutex_lock(&(eq_rel->eq_lock)); -+ p = &(eq_rel->eq_queue); - while (p && *p != NULL) { - if ((*p)->ec_id == ctx) { -- pthread_mutex_unlock(&(eq->eq_lock)); -+ pthread_mutex_unlock(&(eq_rel->eq_lock)); - return (*p)->ec_arg; - } else { - p = &((*p)->ec_next); - } - } -- pthread_mutex_unlock(&(eq->eq_lock)); -+ pthread_mutex_unlock(&(eq_rel->eq_lock)); - } - return NULL; - } -diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c -index 104f6826c..dbc8cec15 100644 ---- a/ldap/servers/slapd/main.c -+++ b/ldap/servers/slapd/main.c -@@ -979,7 +979,8 @@ main(int argc, char **argv) - fedse_create_startOK(DSE_FILENAME, DSE_STARTOKFILE, - slapdFrontendConfig->configdir); - -- eq_init(); /* must be done before plugins started */ -+ eq_init(); /* DEPRECATED */ -+ eq_init_rel(); /* must be done before plugins started */ - - /* Start the SNMP collator if counters are enabled. */ - if (config_get_slapi_counters()) { -@@ -1035,7 +1036,8 @@ main(int argc, char **argv) - goto cleanup; - } - -- eq_start(); /* must be done after plugins started */ -+ eq_start(); /* must be done after plugins started - DEPRECATED */ -+ eq_start_rel(); /* must be done after plugins started */ - - #ifdef HPUX10 - /* HPUX linker voodoo */ -@@ -2205,10 +2207,13 @@ slapd_exemode_db2ldif(int argc, char **argv, struct main_config *mcfg) - */ - plugin_get_plugin_dependencies(repl_plg_name, &plugin_list); - -- eq_init(); /* must be done before plugins started */ -+ eq_init(); /* must be done before plugins started - DEPRECATED */ -+ eq_init_rel(); /* must be done before plugins started */ -+ - ps_init_psearch_system(); /* must come before plugin_startall() */ - plugin_startall(argc, argv, plugin_list); -- eq_start(); /* must be done after plugins started */ -+ eq_start(); /* must be done after plugins started - DEPRECATED*/ -+ eq_start_rel(); /* must be done after plugins started */ - charray_free(plugin_list); - } - -@@ -2263,8 +2268,9 @@ slapd_exemode_db2ldif(int argc, char **argv, struct main_config *mcfg) - charray_free(mcfg->cmd_line_instance_names); - charray_free(mcfg->db2ldif_include); - if (mcfg->db2ldif_dump_replica) { -- eq_stop(); /* event queue should be shutdown before closing -- all plugins (especailly, replication plugin) */ -+ eq_stop(); /* DEPRECATED*/ -+ eq_stop_rel(); /* event queue should be shutdown before closing -+ all plugins (especially, replication plugin) */ - plugin_closeall(1 /* Close Backends */, 1 /* Close Globals */); - } - return (return_value); -diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h -index 3acc24f03..87080dd82 100644 ---- a/ldap/servers/slapd/proto-slap.h -+++ b/ldap/servers/slapd/proto-slap.h -@@ -1322,7 +1322,6 @@ void factory_destroy_extension(int type, void *object, void *parent, void **exte - /* - * auditlog.c - */ -- - void write_audit_log_entry(Slapi_PBlock *pb); - void auditlog_hide_unhashed_pw(void); - void auditlog_expose_unhashed_pw(void); -@@ -1334,10 +1333,15 @@ void auditfaillog_expose_unhashed_pw(void); - /* - * eventq.c - */ -+void eq_init_rel(void); -+void eq_start_rel(void); -+void eq_stop_rel(void); -+/* Deprecated eventq that uses REALTIME clock instead of MONOTONIC */ - void eq_init(void); - void eq_start(void); - void eq_stop(void); - -+ - /* - * uniqueidgen.c - */ -diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h -index 55ded5eb8..f76b86e3c 100644 ---- a/ldap/servers/slapd/slapi-plugin.h -+++ b/ldap/servers/slapd/slapi-plugin.h -@@ -6084,7 +6084,7 @@ void slapi_lock_mutex(Slapi_Mutex *mutex); - int slapi_unlock_mutex(Slapi_Mutex *mutex); - Slapi_CondVar *slapi_new_condvar(Slapi_Mutex *mutex); - void slapi_destroy_condvar(Slapi_CondVar *cvar); --int slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout); -+int slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout) __attribute__((deprecated)); - int slapi_notify_condvar(Slapi_CondVar *cvar, int notify_all); - int slapi_wait_condvar_pt(Slapi_CondVar *cvar, Slapi_Mutex *mutex, struct timeval *timeout); - -@@ -8059,24 +8059,24 @@ typedef void (*slapi_eq_fn_t)(time_t when, void *arg); - * - * \param fn The function to call when the event is triggered. - * \param arg An argument to pass to the called function. -- * \param when The time that the function should be called. -+ * \param when The time that the function should be called(MONOTONIC clock). - * - * \return slapi_eq_context - */ --Slapi_Eq_Context slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when); -+Slapi_Eq_Context slapi_eq_once_rel(slapi_eq_fn_t fn, void *arg, time_t when); - - /** - * Cause an event to happen repeatedly. - * - * \param fn The function to call when the vent is triggered. - * \param arg An argument to pass to the called function. -- * \param when The time that the function should be called. -+ * \param when The time that the function should be called(MONOTONIC clock). - * \param interval The amount of time (in milliseconds) between - * successive calls to the function. - * - * \return slapi_eq_context - */ --Slapi_Eq_Context slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); -+Slapi_Eq_Context slapi_eq_repeat_rel(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); - - /** - * Cause a scheduled event to be canceled. -@@ -8086,7 +8086,7 @@ Slapi_Eq_Context slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsig - * \return 1 If event was found and canceled. - * \return 0 If event was not found in the queue. - */ --int slapi_eq_cancel(Slapi_Eq_Context ctx); -+int slapi_eq_cancel_rel(Slapi_Eq_Context ctx); - - /** - * Return the event's argument. -@@ -8095,7 +8095,55 @@ int slapi_eq_cancel(Slapi_Eq_Context ctx); - * - * \return A pointer to the event argument. - */ --void *slapi_eq_get_arg(Slapi_Eq_Context ctx); -+void *slapi_eq_get_arg_rel(Slapi_Eq_Context ctx); -+ -+/* -+ * These event queue functions are now DEPRECATED as they REALTIME clocks -+ * instead of the preferred MONOTONIC clocks. -+ */ -+ -+/** -+ * Cause an event to happen exactly once. -+ * -+ * \param fn The function to call when the event is triggered. -+ * \param arg An argument to pass to the called function. -+ * \param when The time that the function should be called(REALTIME clock). -+ * -+ * \return slapi_eq_context -+ */ -+Slapi_Eq_Context slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) __attribute__((deprecated)); -+ -+/** -+ * Cause an event to happen repeatedly. -+ * -+ * \param fn The function to call when the vent is triggered. -+ * \param arg An argument to pass to the called function. -+ * \param when The time that the function should be called(REALTIME clock). -+ * \param interval The amount of time (in milliseconds) between -+ * successive calls to the function. -+ * -+ * \return slapi_eq_context -+ */ -+Slapi_Eq_Context slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) __attribute__((deprecated)); -+ -+/** -+ * Cause a scheduled event to be canceled. -+ * -+ * \param ctx The event object to cancel -+ * -+ * \return 1 If event was found and canceled. -+ * \return 0 If event was not found in the queue. -+ */ -+int slapi_eq_cancel(Slapi_Eq_Context ctx) __attribute__((deprecated)); -+ -+/** -+ * Return the event's argument. -+ * -+ * \param ctx The event object -+ * -+ * \return A pointer to the event argument. -+ */ -+void *slapi_eq_get_arg(Slapi_Eq_Context ctx) __attribute__((deprecated)); - - /** - * Construct a full path and name of a plugin. -diff --git a/ldap/servers/slapd/slapi2runtime.c b/ldap/servers/slapd/slapi2runtime.c -index 85dc4c9a8..53927934a 100644 ---- a/ldap/servers/slapd/slapi2runtime.c -+++ b/ldap/servers/slapd/slapi2runtime.c -@@ -133,7 +133,7 @@ slapi_destroy_condvar(Slapi_CondVar *cvar) - - - /* -- * Function: slapi_wait_condvar -+ * Function: slapi_wait_condvar (DEPRECATED) - * Description: behaves just like PR_WaitCondVar() except timeout is - * in seconds and microseconds instead of PRIntervalTime units. - * If timeout is NULL, this call blocks indefinitely. -@@ -145,9 +145,26 @@ slapi_destroy_condvar(Slapi_CondVar *cvar) - int - slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout) - { -- /* deprecated in favor of slapi_wait_condvar_pt() which requires that the -+ /* Deprecated in favor of slapi_wait_condvar_pt() which requires that the - * mutex be passed in */ -- return (0); -+ PRIntervalTime prit; -+ -+ if (cvar == NULL) { -+ return (0); -+ } -+ -+ if (timeout == NULL) { -+ prit = PR_INTERVAL_NO_TIMEOUT; -+ } else { -+ prit = PR_SecondsToInterval(timeout->tv_sec) + PR_MicrosecondsToInterval(timeout->tv_usec); -+ } -+ -+ if (PR_WaitCondVar((PRCondVar *)cvar, prit) != PR_SUCCESS) { -+ return (0); -+ } -+ -+ return (1); -+ - } - - int -diff --git a/ldap/servers/slapd/snmp_collator.c b/ldap/servers/slapd/snmp_collator.c -index 3dd3af657..d760515f4 100644 ---- a/ldap/servers/slapd/snmp_collator.c -+++ b/ldap/servers/slapd/snmp_collator.c -@@ -385,8 +385,9 @@ snmp_collator_start() - snmp_collator_init(); - - /* Arrange to be called back periodically to update the mmap'd stats file. */ -- snmp_eq_ctx = slapi_eq_repeat(snmp_collator_update, NULL, (time_t)0, -- SLAPD_SNMP_UPDATE_INTERVAL); -+ snmp_eq_ctx = slapi_eq_repeat_rel(snmp_collator_update, NULL, -+ slapi_current_rel_time_t(), -+ SLAPD_SNMP_UPDATE_INTERVAL); - return 0; - } - -@@ -411,7 +412,7 @@ snmp_collator_stop() - } - - /* Abort any pending events */ -- slapi_eq_cancel(snmp_eq_ctx); -+ slapi_eq_cancel_rel(snmp_eq_ctx); - snmp_collator_stopped = 1; - - /* acquire the semaphore */ -diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c -index 26f281cba..bded287c6 100644 ---- a/ldap/servers/slapd/task.c -+++ b/ldap/servers/slapd/task.c -@@ -387,7 +387,7 @@ slapi_task_status_changed(Slapi_Task *task) - ttl = (24*3600); /* be reasonable, allow to check task status not longer than one day */ - task->task_flags |= SLAPI_TASK_DESTROYING; - /* queue an event to destroy the state info */ -- slapi_eq_once(destroy_task, (void *)task, slapi_current_rel_time_t() + ttl); -+ slapi_eq_once_rel(destroy_task, (void *)task, slapi_current_rel_time_t() + ttl); - } - slapi_free_search_results_internal(pb); - slapi_pblock_destroy(pb); -diff --git a/ldap/servers/slapd/uuid.c b/ldap/servers/slapd/uuid.c -index a8bd6ee6c..31384a544 100644 ---- a/ldap/servers/slapd/uuid.c -+++ b/ldap/servers/slapd/uuid.c -@@ -186,7 +186,8 @@ uuid_init(const char *configDir, const Slapi_DN *configDN, PRBool mtGen) - - /* schedule update task for multithreaded generation */ - if (_state.mtGen) -- slapi_eq_repeat(uuid_update_state, NULL, (time_t)0, UPDATE_INTERVAL); -+ slapi_eq_repeat_rel(uuid_update_state, NULL, slapi_current_rel_time_t(), -+ UPDATE_INTERVAL); - - _state.initialized = PR_TRUE; - return UUID_SUCCESS; --- -2.26.2 - diff --git a/SPECS/389-ds-base.spec b/SPECS/389-ds-base.spec index be63f08..4f85296 100644 --- a/SPECS/389-ds-base.spec +++ b/SPECS/389-ds-base.spec @@ -44,8 +44,8 @@ ExcludeArch: i686 Summary: 389 Directory Server (base) Name: 389-ds-base -Version: 1.4.3.16 -Release: %{?relprefix}8%{?prerel}%{?dist} +Version: 1.4.3.23 +Release: %{?relprefix}2%{?prerel}%{?dist} License: GPLv3+ URL: https://www.port389.org Group: System Environment/Daemons @@ -54,6 +54,62 @@ Conflicts: freeipa-server < 4.0.3 Obsoletes: %{name} <= 1.4.0.9 Provides: ldif2ldbm >= 0 +##### Bundled cargo crates list - START ##### +Provides: bundled(crate(ansi_term)) = 0.11.0 +Provides: bundled(crate(atty)) = 0.2.14 +Provides: bundled(crate(autocfg)) = 1.0.1 +Provides: bundled(crate(base64)) = 0.10.1 +Provides: bundled(crate(bitflags)) = 1.2.1 +Provides: bundled(crate(byteorder)) = 1.4.2 +Provides: bundled(crate(cbindgen)) = 0.9.1 +Provides: bundled(crate(cc)) = 1.0.66 +Provides: bundled(crate(cfg-if)) = 0.1.10 +Provides: bundled(crate(cfg-if)) = 1.0.0 +Provides: bundled(crate(clap)) = 2.33.3 +Provides: bundled(crate(fernet)) = 0.1.3 +Provides: bundled(crate(foreign-types)) = 0.3.2 +Provides: bundled(crate(foreign-types-shared)) = 0.1.1 +Provides: bundled(crate(getrandom)) = 0.1.16 +Provides: bundled(crate(hermit-abi)) = 0.1.17 +Provides: bundled(crate(itoa)) = 0.4.7 +Provides: bundled(crate(lazy_static)) = 1.4.0 +Provides: bundled(crate(libc)) = 0.2.82 +Provides: bundled(crate(librnsslapd)) = 0.1.0 +Provides: bundled(crate(librslapd)) = 0.1.0 +Provides: bundled(crate(log)) = 0.4.11 +Provides: bundled(crate(openssl)) = 0.10.32 +Provides: bundled(crate(openssl-sys)) = 0.9.60 +Provides: bundled(crate(pkg-config)) = 0.3.19 +Provides: bundled(crate(ppv-lite86)) = 0.2.10 +Provides: bundled(crate(proc-macro2)) = 1.0.24 +Provides: bundled(crate(quote)) = 1.0.8 +Provides: bundled(crate(rand)) = 0.7.3 +Provides: bundled(crate(rand_chacha)) = 0.2.2 +Provides: bundled(crate(rand_core)) = 0.5.1 +Provides: bundled(crate(rand_hc)) = 0.2.0 +Provides: bundled(crate(redox_syscall)) = 0.1.57 +Provides: bundled(crate(remove_dir_all)) = 0.5.3 +Provides: bundled(crate(rsds)) = 0.1.0 +Provides: bundled(crate(ryu)) = 1.0.5 +Provides: bundled(crate(serde)) = 1.0.118 +Provides: bundled(crate(serde_derive)) = 1.0.118 +Provides: bundled(crate(serde_json)) = 1.0.61 +Provides: bundled(crate(slapd)) = 0.1.0 +Provides: bundled(crate(strsim)) = 0.8.0 +Provides: bundled(crate(syn)) = 1.0.58 +Provides: bundled(crate(tempfile)) = 3.1.0 +Provides: bundled(crate(textwrap)) = 0.11.0 +Provides: bundled(crate(toml)) = 0.5.8 +Provides: bundled(crate(unicode-width)) = 0.1.8 +Provides: bundled(crate(unicode-xid)) = 0.2.1 +Provides: bundled(crate(vcpkg)) = 0.2.11 +Provides: bundled(crate(vec_map)) = 0.8.2 +Provides: bundled(crate(wasi)) = 0.9.0+wasi_snapshot_preview1 +Provides: bundled(crate(winapi)) = 0.3.9 +Provides: bundled(crate(winapi-i686-pc-windows-gnu)) = 0.4.0 +Provides: bundled(crate(winapi-x86_64-pc-windows-gnu)) = 0.4.0 +##### Bundled cargo crates list - END ##### + BuildRequires: nspr-devel BuildRequires: nss-devel >= 3.34 BuildRequires: perl-generators @@ -174,37 +230,22 @@ Source2: %{name}-devel.README %if %{bundle_jemalloc} Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download/%{jemalloc_ver}/%{jemalloc_name}-%{jemalloc_ver}.tar.bz2 %endif -Patch01: 0001-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch -Patch02: 0002-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch -Patch03: 0003-do-not-add-referrals-for-masters-with-different-data.patch -Patch04: 0004-Ticket-50933-Update-2307compat.ldif.patch -Patch05: 0005-Issue-50933-Fix-OID-change-between-10rfc2307-and-10r.patch -Patch06: 0006-Ticket-51131-improve-mutex-alloc-in-conntable.patch -Patch07: 0007-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch -Patch08: 0008-Issue-3657-Add-options-to-dsctl-for-dsrc-file.patch -Patch09: 0009-Issue-4440-BUG-ldifgen-with-start-idx-option-fails-w.patch -Patch10: 0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch -Patch11: 0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch -Patch12: 0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch -Patch13: 0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch -Patch14: 0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch -Patch15: 0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch -Patch16: 0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch -Patch17: 0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch -Patch18: 0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch -Patch19: 0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch -Patch20: 0020-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch -Patch21: 0021-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch -Patch22: 0022-Fix-cherry-pick-erorr.patch -Patch23: 0023-Issue-4419-Warn-users-of-skipped-entries-during-ldif.patch -Patch24: 0024-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch -Patch25: 0025-Issue-4414-disk-monitoring-prevent-division-by-zero-.patch -Patch26: 0026-Issue-4504-Insure-ldapi-is-enabled-in-repl_monitor_t.patch -Patch27: 0027-Issue-4315-performance-search-rate-nagle-triggers-hi.patch -Patch28: 0028-Issue-4504-insure-that-repl_monitor_test-use-ldapi-f.patch -Patch29: 0029-Issue-4528-Fix-cn-monitor-SCOPE_ONE-search-4529.patch -Patch30: 0030-Issue-4384-Use-MONOTONIC-clock-for-all-timing-events.patch -Patch31: 0031-Issue-4384-Separate-eventq-into-REALTIME-and-MONOTON.patch +%if %{use_rust} +Source4: vendor-%{version}-2.tar.gz +Source5: Cargo.lock +%endif +Patch01: 0001-Issue-4747-Remove-unstable-unstatus-tests-from-PRCI-.patch +Patch02: 0002-Issue-4701-RFE-Exclude-attributes-from-retro-changel.patch +Patch03: 0003-Ticket-137-Implement-EntryUUID-plugin.patch +Patch04: 0004-Ticket-4326-entryuuid-fixup-did-not-work-correctly-4.patch +Patch05: 0005-Issue-4498-BUG-entryuuid-replication-may-not-work-45.patch +Patch06: 0006-Issue-4421-Unable-to-build-with-Rust-enabled-in-clos.patch +Patch07: 0007-Ticket-51175-resolve-plugin-name-leaking.patch +Patch08: 0008-Issue-4773-Enable-interval-feature-of-DNA-plugin.patch +Patch09: 0009-Issue-4623-RFE-Monitor-the-current-DB-locks-4762.patch +Patch10: 0010-Issue-4764-replicated-operation-sometime-checks-ACI-.patch +Patch11: 0011-Issue-4778-RFE-Allow-setting-TOD-for-db-compaction-a.patch +Patch12: 0012-Issue-4778-RFE-Add-changelog-compaction-task-in-1.4..patch %description 389 Directory Server is an LDAPv3 compliant server. The base package includes @@ -331,6 +372,10 @@ A cockpit UI Plugin for configuring and administering the 389 Directory Server %prep %autosetup -p1 -v -n %{name}-%{version}%{?prerel} +%if %{use_rust} +tar xvzf %{SOURCE4} +cp %{SOURCE5} src/ +%endif %if %{bundle_jemalloc} %setup -q -n %{name}-%{version}%{?prerel} -T -D -b 3 %endif @@ -348,7 +393,7 @@ ASAN_FLAGS="--enable-asan --enable-debug" %endif %if %{use_rust} -RUST_FLAGS="--enable-rust" +RUST_FLAGS="--enable-rust --enable-rust-offline" %endif %if %{use_legacy} @@ -682,9 +727,6 @@ exit 0 %if %{bundle_jemalloc} %{_libdir}/%{pkgname}/lib/libjemalloc.so.2 %endif -%if %{use_rust} -%{_libdir}/%{pkgname}/librsds.so -%endif %if %{use_legacy} %files legacy-tools @@ -822,135 +864,23 @@ exit 0 %doc README.md %changelog -* Wed Jan 13 2021 Mark Reynolds - 1.4.3.16-8 -- Bump version to 1.4.3.16-8 -- Resolves: Bug 1903539 - cn=monitor is throwing err=32 with scope: -s one -- Resolves: Bug 1893870 - PR_WaitCondVar() issue causes replication delay when clock jumps backwards - -* Thu Jan 7 2021 Mark Reynolds - 1.4.3.16-7 -- Bump version to 1.4.3.16-7 -- Resolves: Bug 1890118 - SIGFPE crash in rhds disk monitoring routine -- Resolves: Bug 1904991 - 389-ds:1.4/389-ds-base: information disclosure during the binding of a DN -- Resolves: Bug 1627645 - ldif2db does not change exit code when there are skipped entries - -* Wed Dec 16 2020 Mark Reynolds - 1.4.3.16-6 -- Bump version to 1.4.3.16-6 -- Resolves: Bug 1879386 - cli dsconf replication monitor fails to retrieve database RUV - consumer (Unavailable) State (green) Reason (error (0) -- Resolves: Bug 1904991 - Unexpected info returned to ldap request -- Resolves: Bug 1843838 - heap-use-after-free in slapi_be_getsuffix -- Resolves: Bug 1903133 - Server-Cert.crt created using dscreate has Subject:CN =localhost instead of hostname. - -* Wed Dec 9 2020 Mark Reynolds - 1.4.3.16-5 -- Bump version to 1.4.3.16-5 -- Resolves: Bug 1879386 - cli dsconf replication monitor fails to retrieve database RUV -- Resolves: Bug 1887449 - Sync repl: missing update because operation are erroneously stated as nested -- Resolves: Bug 1887415 - Sync repl - if a series of updates target the same entry then the cookie get wrong changenumber -- Resolves: Bug 1851978 - SyncRepl plugin provides a wrong cookie - -* Thu Dec 3 2020 Mark Reynolds - 1.4.3.16-4 -- Bump version to 1.4.3.16-4 -- Resolves: Bug 1843517 - Using ldifgen with --start-idx option fails with unsupported operand -- Resolves: Bug 1801086 - [RFE] Generate dsrc file using dsconf -- Resolves: Bug 1843838 - heap-use-after-free in slapi_be_getsuffix - -* Wed Nov 25 2020 Mark Reynolds - 1.4.3.16-3 -- Bump version to 1.4.3.16-3 -- Resolves: Bug 1859219 - rfc2307 and rfc2307bis compat schema -- Resolves: Bug 1843604 - reduce the cost of allocation/free when open/close a connection -- Resolves: Bug 1898850 - Entries conflict not resolved by replication - -* Thu Nov 19 2020 Mark Reynolds - 1.4.3.16-2 -- Bump version to 1.4.3.16-2 -- Resolves: Bug 1859227 - create keep alive entry after on line init -- Resolves: Bug 1888863 - group rdn with leading space char and add fails error 21 invalid syntax and delete fails error 32 -- Resolves: Bug 1859228 - do not add referrals for masters with different data generation - -* Mon Oct 26 2020 Mark Reynolds - 1.4.3.16-1 -- Bump version to 1.4.3.16-1 -- Resolves: Bug 1887415 - Sync repl - if a serie of updates target the same entry then the cookie get wrong changenumber -- Resolves: Bug 1859225 - suffix management in backends incorrect - -* Mon Oct 26 2020 Mark Reynolds - 1.4.3.14-1 -- Bump version to 1.4.3.14-1 -- Resolves: Bug 1862529 - Rebase 389-ds-base-1.4.3 in RHEL 8.4 -- Resolves: Bug 1859301 - Misleading message in access log for idle timeout -- Resolves: Bug 1889782 - Missing closing quote when reporting the details of unindexed/paged search results -- Resolves: Bug 1862971 - dsidm user status fails with Error: 'nsUserAccount' object has no attribute 'is_locked' -- Resolves: Bug 1859878 - Managed Entries configuration not being enforced -- Resolves: Bug 1851973 - Duplicate entryUSN numbers for different LDAP entries in the same backend -- Resolves: Bug 1851967 - if dbhome directory is set online backup fails -- Resolves: Bug 1887449 - Sync repl: missing update because operation are erroneously stated as nested -- Resolves: Bug 1887415 - Sync repl - if a serie of updates target the same entry then the cookie get wrong changenumber -- Resolves: Bug 1851978 - SyncRepl plugin provides a wrong cookie -- Resolves: Bug 1843604 - reduce the cost of allocation/free when open/close a connection -- Resolves: Bug 1872930 - dscreate: Not possible to bind to a unix domain socket -- Resolves: Bug 1861504 - ds-replcheck crashes in offline mode -- Resolves: Bug 1859282 - remove ldbm_back_entry_release -- Resolves: Bug 1859225 - suffix management in backends incorrect -- Resolves: Bug 1859224 - remove unused or unnecessary database plugin functions -- Resolves: Bug 1859219 - rfc2307 and rfc2307bis compat schema -- Resolves: Bug 1851975 - Add option to reject internal unindexed searches -- Resolves: Bug 1851972 - Remove code duplication from the BDB backend separation work -- Resolves: Bug 1850275 - Add new access log keywords for time spent in work queue and actual operation time -- Resolves: Bug 1848359 - Add failover credentials to replication agreement -- Resolves: Bug 1837315 - Healthcheck code DSBLE0002 not returned on disabled suffix - -* Wed Aug 5 2020 Mark Reynolds - 1.4.3.8-5 -- Bump version to 1.4.3.8-5 -- Resolves: Bug 1841086 - SSL alert: The value of sslVersionMax "TLS1.3" is higher than the supported version -- Resolves: Bug 1800529 - Memory leaks in disk monitoring -- Resolves: Bug 1748227 - Instance name length is not enforced -- Resolves: Bug 1849418 - python3-lib389 pulls unnecessary bash-completion package - -* Fri Jun 26 2020 Mark Reynolds - 1.4.3.8-4 -- Bump version to 1.4.3.8-4 -- Resolves: Bug 1806978 - ns-slapd crashes during db2ldif -- Resolves: Bug 1450863 - Log warning when tuning of nsslapd-threadnumber above or below the optimal value -- Resolves: Bug 1647017 - A distinguished value of a single valued attribute can be missing in an entry -- Resolves: Bug 1806573 - Dsctl healthcheck doesn't work when using instance name with 'slapd-' -- Resolves: Bug 1807773 - dsctl healthcheck : typo in DSREPLLE0002 Lint error suggested resolution commands -- Resolves: Bug 1843567 - Healthcheck to find notes=F -- Resolves: Bug 1845094 - User/Directory Manager can modify Password Policy attribute "pwdReset" -- Resolves: Bug 1850275 - Add new access log keywords for time spent in work queue and actual operation time -- Resolves: Bug 1442386 - Recreating an index while changing case will create an indexfile with the old name (different case) and after restart the indexfile is abandoned -- Resolves: Bug 1672574 - nsIndexIDListScanLimit accepts any value -- Resolves: Bug 1800529 - Memory leaks in disk monitoring - -* Fri Jun 5 2020 Mark Reynolds - 1.4.3.8-3 -- Bump version to 1.4.3.8-3 -- Resolves: Bug 1835619 - Healthcheck with --json option reports "Object of type 'bytes' is not JSON serializable" when mapping tree is deleted -- Resolves: Bug 1836428 - Directory Server ds-replcheck RFE to add a timeout command-line arg/value to wait longer when connecting to a replica server -- Resolves: Bug 1843090 - abort when a empty valueset is freed -- Resolves: Bug 1843156 - Prevent unnecessarily duplication of the target entry -- Resolves: Bug 1843157 - Check for clock errors and time skew -- Resolves: Bug 1843159 - RFE AD filter rewriter for ObjectCategory -- Resolves: Bug 1843162 - Creating Replication Manager fails if uid=repman is used -- Resolves: Bug 1816851 - Add option to healthcheck to list all the lint reports -- Resolves: Bug 1748227 - Instance name length is not enforced -- Resolves: Bug 1748244 - dscreate doesn't sanitize instance name - -* Mon May 11 2020 Mark Reynolds - 1.4.3.8-2 -- Bump version to 1.4.3.8-2 -- Resolves: Bug 1833350 - Remove cockpit dependancies that are breaking builds - -* Mon May 11 2020 Mark Reynolds - 1.4.3.8-1 -- Bump version to 1.4.3.8-1 -- Resolves: Bug 1833350 - Rebase 389-ds-base for RHEL 8.3 -- Resolves: Bug 1728943 - [RFE] Advance options in RHDS Disk Monitoring Framework -- Resolves: Bug 1775285 - [RFE] Implement the Password Policy attribute "pwdReset" -- Resolves: Bug 1638875 - [RFE] extract key/certs pem file into a private namespace -- Resolves: Bug 1758478 - AddressSanitizer: heap-buffer-overflow in ldap_utf8prev -- Resolves: Bug 1795943 - Port dbmon.sh from legacy tools package -- Resolves: Bug 1798394 - Port dbgen from legacy tools package -- Resolves: Bug 1800529 - Memory leaks in disk monitoring -- Resolves: Bug 1807419 - Unable to create a suffix with countryName either via dscreate or the admin console -- Resolves: Bug 1816848 - Database links: get_monitor() takes 1 positional argument but 2 were given -- Resolves: Bug 1816854 - Setting nsslapd-allowed-sasl-mechanisms truncates the value -- Resolves: Bug 1816857 - Searches on cn=config takes values with spaces and makes multiple attributes out of them -- Resolves: Bug 1816859 - lib389 - Replace exec() with setattr() -- Resolves: Bug 1816862 - Memory leak in indirect COS -- Resolves: Bug 1829071 - Installation of RHDS 11 fails on RHEL8 server with IPv6 disabled -- Resolves: Bug 1833515 - set 'nsslapd-enable-upgrade-hash: off' as this raises warnings in IPA -- Resolves: Bug 1790986 - cenotaph errors on modrdn operations -- Resolves: Bug 1769734 - Heavy StartTLS connection load can randomly fail with err=1 -- Resolves: Bug 1758501 - LeakSanitizer: detected memory leaks in changelog5_init and perfctrs_init +* Sun May 30 2021 Mark Reynolds - 1.4.3.23-2 +- Bump version to 1.4.3.23-2 +- Resolves: Bug 1812286 - RFE - Monitor the current DB locks ( nsslapd-db-current-locks ) +- Resolves: Bug 1748441 - RFE - Schedule execution of "compactdb" at specific date/time +- Resolves: Bug 1938239 - RFE - Extend DNA plugin to support intervals sizes for subuids + +* Fri May 14 2021 Mark Reynolds - 1.4.3.23-1 +- Bump version to 1.4.3.23-1 +- Resolves: Bug 1947044 - Rebase 389 DS with 389-ds-base-1.4.3.23 for RHEL 8.5 +- Resolves: Bug 1850664 - RFE - Add an option for the Retro Changelog to ignore some attributes +- Resolves: Bug 1903221 - Memory leak in 389ds backend (Minor) +- Resolves: Bug 1898541 - Changelog cache can upload updates from a wrong starting point (CSN) +- Resolves: Bug 1889562 - client psearch with multiple threads hangs if nsslapd-maxthreadsperconn is under sized +- Resolves: Bug 1924848 - Negative wtime on ldapcompare +- Resolves: Bug 1895460 - RFE - Log an additional message if the server certificate nickname doesn't match nsSSLPersonalitySSL value +- Resolves: Bug 1897614 - Performance search rate: change entry cache monitor to recursive pthread mutex +- Resolves: Bug 1939607 - hang because of incorrect accounting of readers in vattr rwlock +- Resolves: Bug 1626633 - [RFE] DS - Update the password policy to support a Temporary Password with expiration +- Resolves: Bug 1952804 - CVE-2021-3514 389-ds:1.4/389-ds-base: sync_repl NULL pointer dereference in sync_create_state_control() +