From 27c13873172626cffc953d982a94e89b109ec273 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Dec 21 2020 08:22:36 +0000 Subject: import 389-ds-base-1.4.3.16-6.module+el8.4.0+9207+729bbaca --- diff --git a/SOURCES/0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch b/SOURCES/0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch new file mode 100644 index 0000000..17de2c9 --- /dev/null +++ b/SOURCES/0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch @@ -0,0 +1,127 @@ +From 2a2773d4bf8553ba64b396d567fe05506b22c94c Mon Sep 17 00:00:00 2001 +From: progier389 <72748589+progier389@users.noreply.github.com> +Date: Tue, 24 Nov 2020 19:22:49 +0100 +Subject: [PATCH] Issue 4449 - dsconf replication monitor fails to retrieve + database RUV - consumer (Unavailable) (#4451) + +Bug Description: + +"dsconf replication monitor" fails to retrieve database RUV entry from consumer and this +appears into the Cockpit web UI too. +The problem is that the bind credentials are not rightly propagated when trying to get +the consumers agreement status. Then supplier credntials are used instead and RUV +is searched anonymously because there is no bind dn in ldapi case. + +Fix Description: + +- Propagates the bind credentials when computing agreement status +- Add a credential cache because now a replica password could get asked several times: + when discovering the topology and + when getting the agreement maxcsn +- No testcase in 1.4.3 branch as the file modfied in master does not exists + +- Add a comment about nonlocal keyword + +Relates: #4449 + +Reviewers: + firstyear + droideck + mreynolds + +Issue 4449: Add a comment about nonlocal keyword + +(cherry picked from commit 73ee04fa12cd1de3a5e47c109e79e31c1aaaa2ab) +--- + src/lib389/lib389/cli_conf/replication.py | 13 +++++++++++-- + src/lib389/lib389/replica.py | 16 ++++++++++++---- + 2 files changed, 23 insertions(+), 6 deletions(-) + +diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py +index 9dbaa320a..248972cba 100644 +--- a/src/lib389/lib389/cli_conf/replication.py ++++ b/src/lib389/lib389/cli_conf/replication.py +@@ -369,9 +369,16 @@ def set_repl_config(inst, basedn, log, args): + + def get_repl_monitor_info(inst, basedn, log, args): + connection_data = dsrc_to_repl_monitor(DSRC_HOME, log) ++ credentials_cache = {} + + # Additional details for the connections to the topology + def get_credentials(host, port): ++ # credentials_cache is nonlocal to refer to the instance ++ # from enclosing function (get_repl_monitor_info)` ++ nonlocal credentials_cache ++ key = f'{host}:{port}' ++ if key in credentials_cache: ++ return credentials_cache[key] + found = False + if args.connections: + connections = args.connections +@@ -406,8 +413,10 @@ def get_repl_monitor_info(inst, basedn, log, args): + binddn = input(f'\nEnter a bind DN for {host}:{port}: ').rstrip() + bindpw = getpass(f"Enter a password for {binddn} on {host}:{port}: ").rstrip() + +- return {"binddn": binddn, +- "bindpw": bindpw} ++ credentials = {"binddn": binddn, ++ "bindpw": bindpw} ++ credentials_cache[key] = credentials ++ return credentials + + repl_monitor = ReplicationMonitor(inst) + report_dict = repl_monitor.generate_report(get_credentials, args.json) +diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py +index c2ad2104d..3d89e61fb 100644 +--- a/src/lib389/lib389/replica.py ++++ b/src/lib389/lib389/replica.py +@@ -2487,9 +2487,10 @@ class ReplicationMonitor(object): + else: + self._log = logging.getLogger(__name__) + +- def _get_replica_status(self, instance, report_data, use_json): ++ def _get_replica_status(self, instance, report_data, use_json, get_credentials=None): + """Load all of the status data to report + and add new hostname:port pairs for future processing ++ :type get_credentials: function + """ + + replicas_status = [] +@@ -2503,6 +2504,13 @@ class ReplicationMonitor(object): + for agmt in agmts.list(): + host = agmt.get_attr_val_utf8_l("nsds5replicahost") + port = agmt.get_attr_val_utf8_l("nsds5replicaport") ++ if get_credentials is not None: ++ credentials = get_credentials(host, port) ++ binddn = credentials["binddn"] ++ bindpw = credentials["bindpw"] ++ else: ++ binddn = instance.binddn ++ bindpw = instance.bindpw + protocol = agmt.get_attr_val_utf8_l('nsds5replicatransportinfo') + # Supply protocol here because we need it only for connection + # and agreement status is already preformatted for the user output +@@ -2510,9 +2518,9 @@ class ReplicationMonitor(object): + if consumer not in report_data: + report_data[f"{consumer}:{protocol}"] = None + if use_json: +- agmts_status.append(json.loads(agmt.status(use_json=True))) ++ agmts_status.append(json.loads(agmt.status(use_json=True, binddn=binddn, bindpw=bindpw))) + else: +- agmts_status.append(agmt.status()) ++ agmts_status.append(agmt.status(binddn=binddn, bindpw=bindpw)) + replicas_status.append({"replica_id": replica_id, + "replica_root": replica_root, + "replica_status": "Available", +@@ -2535,7 +2543,7 @@ class ReplicationMonitor(object): + initial_inst_key = f"{self._instance.config.get_attr_val_utf8_l('nsslapd-localhost')}:{self._instance.config.get_attr_val_utf8_l('nsslapd-port')}" + # Do this on an initial instance to get the agreements to other instances + try: +- report_data[initial_inst_key] = self._get_replica_status(self._instance, report_data, use_json) ++ report_data[initial_inst_key] = self._get_replica_status(self._instance, report_data, use_json, get_credentials) + except ldap.LDAPError as e: + self._log.debug(f"Connection to consumer ({supplier_hostname}:{supplier_port}) failed, error: {e}") + report_data[initial_inst_key] = [{"replica_status": f"Unavailable - {e.args[0]['desc']}"}] +-- +2.26.2 + diff --git a/SOURCES/0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch b/SOURCES/0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch new file mode 100644 index 0000000..70974ce --- /dev/null +++ b/SOURCES/0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch @@ -0,0 +1,63 @@ +From e540effa692976c2eef766f1f735702ba5dc0950 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Mon, 30 Nov 2020 09:03:33 +0100 +Subject: [PATCH] Issue 4243 - Fix test: SyncRepl plugin provides a wrong + cookie (#4467) + +Bug description: + This test case was incorrect. + During a refreshPersistent search, a cookie is sent + with the intermediate message that indicates the end of the refresh phase. + Then a second cookie is sent on the updated entry (group10) + I believed this test was successful some time ago but neither python-ldap + nor sync_repl changed (intermediate sent in post refresh). + So the testcase was never successful :( + +Fix description: + The fix is just to take into account the two expected cookies + +relates: https://github.com/389ds/389-ds-base/issues/4243 + +Reviewed by: Mark Reynolds + +Platforms tested: F31 +--- + .../tests/suites/syncrepl_plugin/basic_test.py | 12 +++++++----- + 1 file changed, 7 insertions(+), 5 deletions(-) + +diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py +index 79ec374bc..7b35537d5 100644 +--- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py ++++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py +@@ -589,7 +589,7 @@ def test_sync_repl_cookie_with_failure(topology, request): + sync_repl.start() + time.sleep(5) + +- # Add a test group just to check that sync_repl receives only one update ++ # Add a test group just to check that sync_repl receives that SyncControlInfo cookie + group.append(groups.create(properties={'cn': 'group%d' % 10})) + + # create users, that automember/memberof will generate nested updates +@@ -610,13 +610,15 @@ def test_sync_repl_cookie_with_failure(topology, request): + time.sleep(10) + cookies = sync_repl.get_result() + +- # checking that the cookie list contains only one entry +- assert len(cookies) == 1 +- prev = 0 ++ # checking that the cookie list contains only two entries ++ # the one from the SyncInfo/RefreshDelete that indicates the end of the refresh ++ # the the one from SyncStateControl related to the only updated entry (group10) ++ assert len(cookies) == 2 ++ prev = -1 + for cookie in cookies: + log.info('Check cookie %s' % cookie) + +- assert int(cookie) > 0 ++ assert int(cookie) >= 0 + assert int(cookie) < 1000 + assert int(cookie) > prev + prev = int(cookie) +-- +2.26.2 + diff --git a/SOURCES/0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch b/SOURCES/0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch new file mode 100644 index 0000000..13a64c2 --- /dev/null +++ b/SOURCES/0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch @@ -0,0 +1,254 @@ +From 8b0ba11c3dfb577d1696f4b71a6f4e9f8d42349f Mon Sep 17 00:00:00 2001 +From: Pierre Rogier +Date: Mon, 30 Nov 2020 12:42:17 +0100 +Subject: [PATCH] Add dsconf replication monitor test case (gitHub issue 4449) + in 1.4.3 branch + +--- + .../tests/suites/clu/repl_monitor_test.py | 234 ++++++++++++++++++ + 1 file changed, 234 insertions(+) + create mode 100644 dirsrvtests/tests/suites/clu/repl_monitor_test.py + +diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +new file mode 100644 +index 000000000..b03d170c8 +--- /dev/null ++++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +@@ -0,0 +1,234 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2020 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# ++import time ++import subprocess ++import pytest ++ ++from lib389.cli_conf.replication import get_repl_monitor_info ++from lib389.tasks import * ++from lib389.utils import * ++from lib389.topologies import topology_m2 ++from lib389.cli_base import FakeArgs ++from lib389.cli_base.dsrc import dsrc_arg_concat ++from lib389.cli_base import connect_instance ++ ++pytestmark = pytest.mark.tier0 ++ ++LOG_FILE = '/tmp/monitor.log' ++logging.getLogger(__name__).setLevel(logging.DEBUG) ++log = logging.getLogger(__name__) ++ ++ ++@pytest.fixture(scope="function") ++def set_log_file(request): ++ fh = logging.FileHandler(LOG_FILE) ++ fh.setLevel(logging.DEBUG) ++ log.addHandler(fh) ++ ++ def fin(): ++ log.info('Delete files') ++ os.remove(LOG_FILE) ++ ++ config = os.path.expanduser(DSRC_HOME) ++ if os.path.exists(config): ++ os.remove(config) ++ ++ request.addfinalizer(fin) ++ ++ ++def check_value_in_log_and_reset(content_list, second_list=None, single_value=None, error_list=None): ++ with open(LOG_FILE, 'r+') as f: ++ file_content = f.read() ++ ++ for item in content_list: ++ log.info('Check that "{}" is present'.format(item)) ++ assert item in file_content ++ ++ if second_list is not None: ++ log.info('Check for "{}"'.format(second_list)) ++ for item in second_list: ++ assert item in file_content ++ ++ if single_value is not None: ++ log.info('Check for "{}"'.format(single_value)) ++ assert single_value in file_content ++ ++ if error_list is not None: ++ log.info('Check that "{}" is not present'.format(error_list)) ++ for item in error_list: ++ assert item not in file_content ++ ++ log.info('Reset log file') ++ f.truncate(0) ++ ++ ++@pytest.mark.ds50545 ++@pytest.mark.bz1739718 ++@pytest.mark.skipif(ds_is_older("1.4.0"), reason="Not implemented") ++def test_dsconf_replication_monitor(topology_m2, set_log_file): ++ """Test replication monitor that was ported from legacy tools ++ ++ :id: ce48020d-7c30-41b7-8f68-144c9cd757f6 ++ :setup: 2 MM topology ++ :steps: ++ 1. Create DS instance ++ 2. Run replication monitor with connections option ++ 3. Run replication monitor with aliases option ++ 4. Run replication monitor with --json option ++ 5. Run replication monitor with .dsrc file created ++ 6. Run replication monitor with connections option as if using dsconf CLI ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ """ ++ ++ m1 = topology_m2.ms["master1"] ++ m2 = topology_m2.ms["master2"] ++ ++ alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')', ++ 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')'] ++ ++ connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port) ++ content_list = ['Replica Root: dc=example,dc=com', ++ 'Replica ID: 1', ++ 'Replica Status: Available', ++ 'Max CSN', ++ 'Status For Agreement: "002" ('+ m2.host + ':' + str(m2.port) + ')', ++ 'Replica Enabled: on', ++ 'Update In Progress: FALSE', ++ 'Last Update Start:', ++ 'Last Update End:', ++ 'Number Of Changes Sent:', ++ 'Number Of Changes Skipped: None', ++ 'Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded', ++ 'Last Init Start:', ++ 'Last Init End:', ++ 'Last Init Status:', ++ 'Reap Active: 0', ++ 'Replication Status: In Synchronization', ++ 'Replication Lag Time:', ++ 'Supplier: ', ++ m2.host + ':' + str(m2.port), ++ 'Replica Root: dc=example,dc=com', ++ 'Replica ID: 2', ++ 'Status For Agreement: "001" (' + m1.host + ':' + str(m1.port)+')'] ++ ++ error_list = ['consumer (Unavailable)', ++ 'Failed to retrieve database RUV entry from consumer'] ++ ++ json_list = ['type', ++ 'list', ++ 'items', ++ 'name', ++ m1.host + ':' + str(m1.port), ++ 'data', ++ '"replica_id": "1"', ++ '"replica_root": "dc=example,dc=com"', ++ '"replica_status": "Available"', ++ 'maxcsn', ++ 'agmts_status', ++ 'agmt-name', ++ '002', ++ 'replica', ++ m2.host + ':' + str(m2.port), ++ 'replica-enabled', ++ 'update-in-progress', ++ 'last-update-start', ++ 'last-update-end', ++ 'number-changes-sent', ++ 'number-changes-skipped', ++ 'last-update-status', ++ 'Error (0) Replica acquired successfully: Incremental update succeeded', ++ 'last-init-start', ++ 'last-init-end', ++ 'last-init-status', ++ 'reap-active', ++ 'replication-status', ++ 'In Synchronization', ++ 'replication-lag-time', ++ '"replica_id": "2"', ++ '001', ++ m1.host + ':' + str(m1.port)] ++ ++ dsrc_content = '[repl-monitor-connections]\n' \ ++ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ ++ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ ++ '\n' \ ++ '[repl-monitor-aliases]\n' \ ++ 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \ ++ 'M2 = ' + m2.host + ':' + str(m2.port) ++ ++ connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM, ++ m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM] ++ ++ aliases = ['M1=' + m1.host + ':' + str(m1.port), ++ 'M2=' + m2.host + ':' + str(m2.port)] ++ ++ args = FakeArgs() ++ args.connections = connections ++ args.aliases = None ++ args.json = False ++ ++ log.info('Run replication monitor with connections option') ++ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) ++ check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) ++ ++ log.info('Run replication monitor with aliases option') ++ args.aliases = aliases ++ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) ++ check_value_in_log_and_reset(content_list, alias_content) ++ ++ log.info('Run replication monitor with --json option') ++ args.aliases = None ++ args.json = True ++ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) ++ check_value_in_log_and_reset(json_list) ++ ++ with open(os.path.expanduser(DSRC_HOME), 'w+') as f: ++ f.write(dsrc_content) ++ ++ args.connections = None ++ args.aliases = None ++ args.json = False ++ ++ log.info('Run replication monitor when .dsrc file is present with content') ++ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) ++ check_value_in_log_and_reset(content_list, alias_content) ++ os.remove(os.path.expanduser(DSRC_HOME)) ++ ++ log.info('Run replication monitor with connections option as if using dsconf CLI') ++ # Perform same test than steps 2 test but without using directly the topology instance. ++ # but with an instance similar to those than dsconf cli generates: ++ # step 2 args ++ args.connections = connections ++ args.aliases = None ++ args.json = False ++ # args needed to generate an instance with dsrc_arg_concat ++ args.instance = 'master1' ++ args.basedn = None ++ args.binddn = None ++ args.bindpw = None ++ args.pwdfile = None ++ args.prompt = False ++ args.starttls = False ++ dsrc_inst = dsrc_arg_concat(args, None) ++ inst = connect_instance(dsrc_inst, True, args) ++ get_repl_monitor_info(inst, DEFAULT_SUFFIX, log, args) ++ check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) +-- +2.26.2 + diff --git a/SOURCES/0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch b/SOURCES/0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch new file mode 100644 index 0000000..74aa5aa --- /dev/null +++ b/SOURCES/0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch @@ -0,0 +1,100 @@ +From 389b2c825742392365262a719be7c8f594e7e522 Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Thu, 26 Nov 2020 09:08:13 +1000 +Subject: [PATCH] Issue 4460 - BUG - lib389 should use system tls policy + +Bug Description: Due to some changes in dsrc for tlsreqcert +and how def open was structured in lib389, the system ldap.conf +policy was ignored. + +Fix Description: Default to using the system ldap.conf policy +if undefined in lib389 or the tls_reqcert param in dsrc. + +fixes: #4460 + +Author: William Brown + +Review by: ??? +--- + src/lib389/lib389/__init__.py | 11 +++++++---- + src/lib389/lib389/cli_base/dsrc.py | 16 +++++++++------- + 2 files changed, 16 insertions(+), 11 deletions(-) + +diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py +index 99ea9cc6a..4e6a1905a 100644 +--- a/src/lib389/lib389/__init__.py ++++ b/src/lib389/lib389/__init__.py +@@ -962,7 +962,7 @@ class DirSrv(SimpleLDAPObject, object): + # Now, we are still an allocated ds object so we can be re-installed + self.state = DIRSRV_STATE_ALLOCATED + +- def open(self, uri=None, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=ldap.OPT_X_TLS_HARD, ++ def open(self, uri=None, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=None, + usercert=None, userkey=None): + ''' + It opens a ldap bound connection to dirsrv so that online +@@ -1025,9 +1025,12 @@ class DirSrv(SimpleLDAPObject, object): + try: + # Note this sets LDAP.OPT not SELF. Because once self has opened + # it can NOT change opts on reused (ie restart) +- self.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert) +- self.log.debug("Using certificate policy %s", reqcert) +- self.log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s", reqcert) ++ if reqcert is not None: ++ self.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert) ++ self.log.debug("Using lib389 certificate policy %s", reqcert) ++ else: ++ self.log.debug("Using /etc/openldap/ldap.conf certificate policy") ++ self.log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s", self.get_option(ldap.OPT_X_TLS_REQUIRE_CERT)) + except ldap.LDAPError as e: + self.log.fatal('TLS negotiation failed: %s', e) + raise e +diff --git a/src/lib389/lib389/cli_base/dsrc.py b/src/lib389/lib389/cli_base/dsrc.py +index fec18a5f9..9b09ea568 100644 +--- a/src/lib389/lib389/cli_base/dsrc.py ++++ b/src/lib389/lib389/cli_base/dsrc.py +@@ -45,7 +45,7 @@ def dsrc_arg_concat(args, dsrc_inst): + 'tls_cacertdir': None, + 'tls_cert': None, + 'tls_key': None, +- 'tls_reqcert': ldap.OPT_X_TLS_HARD, ++ 'tls_reqcert': None, + 'starttls': args.starttls, + 'prompt': False, + 'pwdfile': None, +@@ -134,7 +134,7 @@ def dsrc_to_ldap(path, instance_name, log): + dsrc_inst['binddn'] = config.get(instance_name, 'binddn', fallback=None) + dsrc_inst['saslmech'] = config.get(instance_name, 'saslmech', fallback=None) + if dsrc_inst['saslmech'] is not None and dsrc_inst['saslmech'] not in ['EXTERNAL', 'PLAIN']: +- raise Exception("%s [%s] saslmech must be one of EXTERNAL or PLAIN" % (path, instance_name)) ++ raise ValueError("%s [%s] saslmech must be one of EXTERNAL or PLAIN" % (path, instance_name)) + + dsrc_inst['tls_cacertdir'] = config.get(instance_name, 'tls_cacertdir', fallback=None) + # At this point, we should check if the provided cacertdir is indeed, a dir. This can be a cause +@@ -145,16 +145,18 @@ def dsrc_to_ldap(path, instance_name, log): + + dsrc_inst['tls_cert'] = config.get(instance_name, 'tls_cert', fallback=None) + dsrc_inst['tls_key'] = config.get(instance_name, 'tls_key', fallback=None) +- dsrc_inst['tls_reqcert'] = config.get(instance_name, 'tls_reqcert', fallback='hard') +- if dsrc_inst['tls_reqcert'] not in ['never', 'allow', 'hard']: +- raise Exception("dsrc tls_reqcert value invalid. %s [%s] tls_reqcert should be one of never, allow or hard" % (instance_name, +- path)) ++ dsrc_inst['tls_reqcert'] = config.get(instance_name, 'tls_reqcert', fallback=None) + if dsrc_inst['tls_reqcert'] == 'never': + dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_NEVER + elif dsrc_inst['tls_reqcert'] == 'allow': + dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_ALLOW +- else: ++ elif dsrc_inst['tls_reqcert'] == 'hard': + dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_HARD ++ elif dsrc_inst['tls_reqcert'] is None: ++ # Use system value ++ pass ++ else: ++ raise ValueError("dsrc tls_reqcert value invalid. %s [%s] tls_reqcert should be one of never, allow or hard" % (instance_name, path)) + dsrc_inst['starttls'] = config.getboolean(instance_name, 'starttls', fallback=False) + dsrc_inst['pwdfile'] = None + dsrc_inst['prompt'] = False +-- +2.26.2 + diff --git a/SOURCES/0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch b/SOURCES/0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch new file mode 100644 index 0000000..16637bb --- /dev/null +++ b/SOURCES/0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch @@ -0,0 +1,60 @@ +From 05b66529117d1cd85a636ab7d8fc84abdec814de Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Thu, 12 Nov 2020 13:04:21 +1000 +Subject: [PATCH] Issue 4428 - BUG Paged Results with critical false causes + sigsegv in chaining + +Bug Description: When a paged search through chaining backend is +received with a false criticality (such as SSSD), chaining backend +will sigsegv due to a null context. + +Fix Description: When a NULL ctx is recieved to be freed, this is +as paged results have finished being sent, so we check the NULL +ctx and move on. + +fixes: #4428 + +Author: William Brown + +Review by: @droideck, @mreynolds389 +--- + ldap/servers/plugins/chainingdb/cb_search.c | 6 ++++++ + ldap/servers/plugins/chainingdb/cb_utils.c | 4 ++++ + 2 files changed, 10 insertions(+) + +diff --git a/ldap/servers/plugins/chainingdb/cb_search.c b/ldap/servers/plugins/chainingdb/cb_search.c +index 69d23a6b5..d47cbc8e4 100644 +--- a/ldap/servers/plugins/chainingdb/cb_search.c ++++ b/ldap/servers/plugins/chainingdb/cb_search.c +@@ -740,6 +740,12 @@ chaining_back_search_results_release(void **sr) + + slapi_log_err(SLAPI_LOG_PLUGIN, CB_PLUGIN_SUBSYSTEM, + "chaining_back_search_results_release\n"); ++ if (ctx == NULL) { ++ /* The paged search is already complete, just return */ ++ /* Could we have a ctx state flag instead? */ ++ return; ++ } ++ + if (ctx->readahead != ctx->tobefreed) { + slapi_entry_free(ctx->readahead); + } +diff --git a/ldap/servers/plugins/chainingdb/cb_utils.c b/ldap/servers/plugins/chainingdb/cb_utils.c +index dfd5dd92c..d52fd25a6 100644 +--- a/ldap/servers/plugins/chainingdb/cb_utils.c ++++ b/ldap/servers/plugins/chainingdb/cb_utils.c +@@ -279,7 +279,11 @@ cb_add_suffix(cb_backend_instance *inst, struct berval **bvals, int apply_mod, c + return LDAP_SUCCESS; + } + ++#ifdef DEBUG ++static int debug_on = 1; ++#else + static int debug_on = 0; ++#endif + + int + cb_debug_on() +-- +2.26.2 + diff --git a/SOURCES/0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch b/SOURCES/0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch new file mode 100644 index 0000000..de8c8a8 --- /dev/null +++ b/SOURCES/0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch @@ -0,0 +1,50 @@ +From 4c133d448f451b7c3b2ff1b42806c7516d623f09 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Mon, 7 Dec 2020 00:41:27 +0100 +Subject: [PATCH] Issue 4315: performance search rate: nagle triggers high rate + of setsocketopt (#4437) + +Bug description: + When a socket is set with NO_DELAY=0 (nagle), written pdu are buffered + until buffer is full or tcp_cork is set. This reduce network traffic when + the application writes partial pdu. + DS write complete pdu (results/entries/..) so it gives low benefit for DS. + In addition nagle being 'on' by default, DS sets/unset socket tcp_cork to send + immediately results/entries at each operation. This is an overhead of syscalls. + +Fix description: + Disable nagle by default + +relates: https://github.com/389ds/389-ds-base/issues/4315 + +Reviewed by: @mreynolds389, @Firstyear + +Platforms tested: F33 +--- + ldap/servers/slapd/libglobs.c | 9 ++++----- + 1 file changed, 4 insertions(+), 5 deletions(-) + +diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c +index 7d5374c90..f8cf162e6 100644 +--- a/ldap/servers/slapd/libglobs.c ++++ b/ldap/servers/slapd/libglobs.c +@@ -1635,12 +1635,11 @@ FrontendConfig_init(void) + #endif /* USE_SYSCONF */ + + init_accesscontrol = cfg->accesscontrol = LDAP_ON; +-#if defined(LINUX) +- /* On Linux, by default, we use TCP_CORK so we must enable nagle */ +- init_nagle = cfg->nagle = LDAP_ON; +-#else ++ ++ /* nagle triggers set/unset TCP_CORK setsockopt per operation ++ * as DS only sends complete PDU there is no benefit of nagle/tcp_cork ++ */ + init_nagle = cfg->nagle = LDAP_OFF; +-#endif + init_security = cfg->security = LDAP_OFF; + init_ssl_check_hostname = cfg->ssl_check_hostname = LDAP_ON; + cfg->tls_check_crl = TLS_CHECK_NONE; +-- +2.26.2 + diff --git a/SOURCES/0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch b/SOURCES/0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch new file mode 100644 index 0000000..a2cb4bd --- /dev/null +++ b/SOURCES/0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch @@ -0,0 +1,39 @@ +From 3007700a659ede03085f5390153cce483ce987a1 Mon Sep 17 00:00:00 2001 +From: Firstyear +Date: Fri, 4 Dec 2020 10:14:33 +1000 +Subject: [PATCH] Issue 4460 - BUG - add machine name to subject alt names in + SSCA (#4472) + +Bug Description: During SSCA creation, the server cert did not have +the machine name, which meant that the cert would not work without +reqcert = never. + +Fix Description: Add the machine name as an alt name during SSCA +creation. It is not guaranteed this value is correct, but it +is better than nothing. + +relates: https://github.com/389ds/389-ds-base/issues/4460 + +Author: William Brown + +Review by: mreynolds389, droideck +--- + src/lib389/lib389/instance/setup.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py +index 7d42ba292..e46f2d1e5 100644 +--- a/src/lib389/lib389/instance/setup.py ++++ b/src/lib389/lib389/instance/setup.py +@@ -887,7 +887,7 @@ class SetupDs(object): + tlsdb_inst = NssSsl(dbpath=os.path.join(etc_dirsrv_path, dir)) + tlsdb_inst.import_rsa_crt(ca) + +- csr = tlsdb.create_rsa_key_and_csr() ++ csr = tlsdb.create_rsa_key_and_csr(alt_names=[general['full_machine_name']]) + (ca, crt) = ssca.rsa_ca_sign_csr(csr) + tlsdb.import_rsa_crt(ca, crt) + if general['selinux']: +-- +2.26.2 + diff --git a/SOURCES/0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch b/SOURCES/0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch new file mode 100644 index 0000000..067d06e --- /dev/null +++ b/SOURCES/0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch @@ -0,0 +1,50 @@ +From 1386b140d8cc81d37fdea6593487fe542587ccac Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 9 Dec 2020 09:52:08 -0500 +Subject: [PATCH] Issue 4483 - heap-use-after-free in slapi_be_getsuffix + +Description: heap-use-after-free in slapi_be_getsuffix after disk + monitoring runs. This feature is freeing a list of + backends which it does not need to do. + +Fixes: https://github.com/389ds/389-ds-base/issues/4483 + +Reviewed by: firstyear & tbordaz(Thanks!!) +--- + ldap/servers/slapd/daemon.c | 13 +------------ + 1 file changed, 1 insertion(+), 12 deletions(-) + +diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c +index 49199e4df..691f77570 100644 +--- a/ldap/servers/slapd/daemon.c ++++ b/ldap/servers/slapd/daemon.c +@@ -606,12 +606,6 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) + now = start; + while ((now - start) < grace_period) { + if (g_get_shutdown()) { +- be_index = 0; +- if (be_list[be_index] != NULL) { +- while ((be = be_list[be_index++])) { +- slapi_be_free(&be); +- } +- } + slapi_ch_array_free(dirs); + dirs = NULL; + return; +@@ -706,12 +700,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) + } + } + } +- be_index = 0; +- if (be_list[be_index] != NULL) { +- while ((be = be_list[be_index++])) { +- slapi_be_free(&be); +- } +- } ++ + slapi_ch_array_free(dirs); + dirs = NULL; /* now it is not needed but the code may be changed in the future and it'd better be more robust */ + g_set_shutdown(SLAPI_SHUTDOWN_DISKFULL); +-- +2.26.2 + diff --git a/SOURCES/0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch b/SOURCES/0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch new file mode 100644 index 0000000..9acd229 --- /dev/null +++ b/SOURCES/0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch @@ -0,0 +1,65 @@ +From 6e827f6d5e64e0be316f4e17111b2884899d302c Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Wed, 16 Dec 2020 16:30:28 +0100 +Subject: [PATCH] Issue 4480 - Unexpected info returned to ldap request (#4491) + +Bug description: + If the bind entry does not exist, the bind result info + reports that 'No such entry'. It should not give any + information if the target entry exists or not + +Fix description: + Does not return any additional information during a bind + +relates: https://github.com/389ds/389-ds-base/issues/4480 + +Reviewed by: William Brown, Viktor Ashirov, Mark Reynolds (thank you all) + +Platforms tested: F31 +--- + dirsrvtests/tests/suites/basic/basic_test.py | 1 - + ldap/servers/slapd/back-ldbm/ldbm_config.c | 2 +- + ldap/servers/slapd/result.c | 2 +- + 3 files changed, 2 insertions(+), 3 deletions(-) + +diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py +index 120207321..1ae82dcdd 100644 +--- a/dirsrvtests/tests/suites/basic/basic_test.py ++++ b/dirsrvtests/tests/suites/basic/basic_test.py +@@ -1400,7 +1400,6 @@ def test_dscreate_multiple_dashes_name(dscreate_long_instance): + assert not dscreate_long_instance.exists() + + +- + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c +index 3fe86d567..10cef250f 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c +@@ -1234,7 +1234,7 @@ ldbm_config_search_entry_callback(Slapi_PBlock *pb __attribute__((unused)), + if (attrs) { + for (size_t i = 0; attrs[i]; i++) { + if (ldbm_config_moved_attr(attrs[i])) { +- slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "at least one required attribute has been moved to the BDB scecific configuration entry"); ++ slapi_pblock_set(pb, SLAPI_RESULT_TEXT, "at least one required attribute has been moved to the BDB scecific configuration entry"); + break; + } + } +diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c +index 9daf3b151..ab0d79454 100644 +--- a/ldap/servers/slapd/result.c ++++ b/ldap/servers/slapd/result.c +@@ -355,7 +355,7 @@ send_ldap_result_ext( + if (text) { + pbtext = text; + } else { +- slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &pbtext); ++ slapi_pblock_get(pb, SLAPI_RESULT_TEXT, &pbtext); + } + + if (operation == NULL) { +-- +2.26.2 + diff --git a/SOURCES/0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch b/SOURCES/0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch new file mode 100644 index 0000000..6de8b9e --- /dev/null +++ b/SOURCES/0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch @@ -0,0 +1,108 @@ +From 1fef5649ce05a17a741789cafb65269c099b396b Mon Sep 17 00:00:00 2001 +From: progier389 <72748589+progier389@users.noreply.github.com> +Date: Wed, 16 Dec 2020 16:21:35 +0100 +Subject: [PATCH 2/3] Issue #4504 - Fix pytest test_dsconf_replication_monitor + (#4505) + +(cherry picked from commit 0b08e6f35b000d1383580be59f902ac813e940f2) +--- + .../tests/suites/clu/repl_monitor_test.py | 50 +++++++++++++------ + 1 file changed, 36 insertions(+), 14 deletions(-) + +diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +index b03d170c8..eb18d2da2 100644 +--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py ++++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +@@ -9,6 +9,7 @@ + import time + import subprocess + import pytest ++import re + + from lib389.cli_conf.replication import get_repl_monitor_info + from lib389.tasks import * +@@ -67,6 +68,25 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No + log.info('Reset log file') + f.truncate(0) + ++def get_hostnames_from_log(port1, port2): ++ # Get the supplier host names as displayed in replication monitor output ++ with open(LOG_FILE, 'r') as logfile: ++ logtext = logfile.read() ++ # search for Supplier :hostname:port ++ # and use \D to insure there is no more number is after ++ # the matched port (i.e that 10 is not matching 101) ++ regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)' ++ match=re.search(regexp, logtext) ++ host_m1 = 'localhost.localdomain' ++ if (match is not None): ++ host_m1 = match.group(2) ++ # Same for master 2 ++ regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)' ++ match=re.search(regexp, logtext) ++ host_m2 = 'localhost.localdomain' ++ if (match is not None): ++ host_m2 = match.group(2) ++ return (host_m1, host_m2) + + @pytest.mark.ds50545 + @pytest.mark.bz1739718 +@@ -95,9 +115,6 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): + m1 = topology_m2.ms["master1"] + m2 = topology_m2.ms["master2"] + +- alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')', +- 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')'] +- + connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port) + content_list = ['Replica Root: dc=example,dc=com', + 'Replica ID: 1', +@@ -160,20 +177,9 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): + '001', + m1.host + ':' + str(m1.port)] + +- dsrc_content = '[repl-monitor-connections]\n' \ +- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ +- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ +- '\n' \ +- '[repl-monitor-aliases]\n' \ +- 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \ +- 'M2 = ' + m2.host + ':' + str(m2.port) +- + connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM, + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM] + +- aliases = ['M1=' + m1.host + ':' + str(m1.port), +- 'M2=' + m2.host + ':' + str(m2.port)] +- + args = FakeArgs() + args.connections = connections + args.aliases = None +@@ -181,8 +187,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): + + log.info('Run replication monitor with connections option') + get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) ++ (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port) + check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) + ++ # Prepare the data for next tests ++ aliases = ['M1=' + host_m1 + ':' + str(m1.port), ++ 'M2=' + host_m2 + ':' + str(m2.port)] ++ ++ alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')', ++ 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')'] ++ ++ dsrc_content = '[repl-monitor-connections]\n' \ ++ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ ++ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ ++ '\n' \ ++ '[repl-monitor-aliases]\n' \ ++ 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \ ++ 'M2 = ' + host_m2 + ':' + str(m2.port) ++ + log.info('Run replication monitor with aliases option') + args.aliases = aliases + get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) +-- +2.26.2 + diff --git a/SPECS/389-ds-base.spec b/SPECS/389-ds-base.spec index 54de896..92f8d05 100644 --- a/SPECS/389-ds-base.spec +++ b/SPECS/389-ds-base.spec @@ -45,7 +45,7 @@ ExcludeArch: i686 Summary: 389 Directory Server (base) Name: 389-ds-base Version: 1.4.3.16 -Release: %{?relprefix}4%{?prerel}%{?dist} +Release: %{?relprefix}6%{?prerel}%{?dist} License: GPLv3+ URL: https://www.port389.org Group: System Environment/Daemons @@ -183,7 +183,16 @@ Patch06: 0006-Ticket-51131-improve-mutex-alloc-in-conntable.patch Patch07: 0007-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch Patch08: 0008-Issue-3657-Add-options-to-dsctl-for-dsrc-file.patch Patch09: 0009-Issue-4440-BUG-ldifgen-with-start-idx-option-fails-w.patch - +Patch10: 0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch +Patch11: 0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch +Patch12: 0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch +Patch13: 0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch +Patch14: 0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch +Patch15: 0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch +Patch16: 0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch +Patch17: 0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch +Patch18: 0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch +Patch19: 0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch %description 389 Directory Server is an LDAPv3 compliant server. The base package includes @@ -801,6 +810,20 @@ exit 0 %doc README.md %changelog +* Wed Dec 16 2020 Mark Reynolds - 1.4.3.16-6 +- Bump version to 1.4.3.16-6 +- Resolves: Bug 1879386 - cli dsconf replication monitor fails to retrieve database RUV - consumer (Unavailable) State (green) Reason (error (0) +- Resolves: Bug 1904991 - Unexpected info returned to ldap request +- Resolves: Bug 1843838 - heap-use-after-free in slapi_be_getsuffix +- Resolves: Bug 1903133 - Server-Cert.crt created using dscreate has Subject:CN =localhost instead of hostname. + +* Wed Dec 9 2020 Mark Reynolds - 1.4.3.16-5 +- Bump version to 1.4.3.16-5 +- Resolves: Bug 1879386 - cli dsconf replication monitor fails to retrieve database RUV +- Resolves: Bug 1887449 - Sync repl: missing update because operation are erroneously stated as nested +- Resolves: Bug 1887415 - Sync repl - if a series of updates target the same entry then the cookie get wrong changenumber +- Resolves: Bug 1851978 - SyncRepl plugin provides a wrong cookie + * Thu Dec 3 2020 Mark Reynolds - 1.4.3.16-4 - Bump version to 1.4.3.16-4 - Resolves: Bug 1843517 - Using ldifgen with --start-idx option fails with unsupported operand