From a6e2d819ace6b0cf835908cf5662c929a79cb8e0 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Apr 28 2020 08:50:52 +0000 Subject: import ipa-4.8.4-7.module+el8.2.0+6046+aaa49f96 --- diff --git a/.gitignore b/.gitignore index 1aa675e..5a98fc1 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -SOURCES/freeipa-4.8.0.tar.gz +SOURCES/freeipa-4.8.4.tar.gz diff --git a/.ipa.metadata b/.ipa.metadata index 965cc09..8d08b2e 100644 --- a/.ipa.metadata +++ b/.ipa.metadata @@ -1 +1 @@ -4cbc1d041eef6d9f5381bdfcfebf9a96d40b94cb SOURCES/freeipa-4.8.0.tar.gz +72c91f01b2039795223417dc6761edf8ee0f36ee SOURCES/freeipa-4.8.4.tar.gz diff --git a/README.debrand b/README.debrand deleted file mode 100644 index 01c46d2..0000000 --- a/README.debrand +++ /dev/null @@ -1,2 +0,0 @@ -Warning: This package was configured for automatic debranding, but the changes -failed to apply. diff --git a/SOURCES/0001-DNS-install-check-Fix-overlapping-DNS-zone-from-the-master-itself_2c2cef7_rhbz#1784003.patch b/SOURCES/0001-DNS-install-check-Fix-overlapping-DNS-zone-from-the-master-itself_2c2cef7_rhbz#1784003.patch new file mode 100644 index 0000000..9fba237 --- /dev/null +++ b/SOURCES/0001-DNS-install-check-Fix-overlapping-DNS-zone-from-the-master-itself_2c2cef7_rhbz#1784003.patch @@ -0,0 +1,47 @@ +From 2c2cef7063315766d893b275185b422be3f3c019 Mon Sep 17 00:00:00 2001 +From: Thomas Woerner +Date: Dec 16 2019 20:37:17 +0000 +Subject: DNS install check: Fix overlapping DNS zone from the master itself + + +The change to allow overlapping zone to be from the master itself has +introduced two issues: The check for the master itself should only executed +if options.force and options.allow_zone_overlap are both false and the +reverse zone check later on was still handling ValueError instead of +dnsutil.DNSZoneAlreadyExists. + +Both issues have been fixed and the deployment with existing name servers +is properly working again. + +Fixes: https://pagure.io/freeipa/issue/8150 +Signed-off-by: Thomas Woerner +Reviewed-By: Florence Blanc-Renaud + +--- + +diff --git a/ipaserver/install/dns.py b/ipaserver/install/dns.py +index 36ba6f8..9f08e86 100644 +--- a/ipaserver/install/dns.py ++++ b/ipaserver/install/dns.py +@@ -135,15 +135,15 @@ def install_check(standalone, api, replica, options, hostname): + logger.warning("%s Please make sure that the domain is " + "properly delegated to this IPA server.", + e) +- +- hst = dnsutil.DNSName(hostname).make_absolute().to_text() +- if hst not in e.kwargs['ns']: +- raise ValueError(str(e)) ++ else: ++ hst = dnsutil.DNSName(hostname).make_absolute().to_text() ++ if hst not in e.kwargs['ns']: ++ raise ValueError(str(e)) + + for reverse_zone in options.reverse_zones: + try: + dnsutil.check_zone_overlap(reverse_zone) +- except ValueError as e: ++ except dnsutil.DNSZoneAlreadyExists as e: + if options.force or options.allow_zone_overlap: + logger.warning('%s', str(e)) + else: + diff --git a/SOURCES/0001-No-need-to-call-rhel-specific-domainname-service.patch b/SOURCES/0001-No-need-to-call-rhel-specific-domainname-service.patch deleted file mode 100644 index bcc4ceb..0000000 --- a/SOURCES/0001-No-need-to-call-rhel-specific-domainname-service.patch +++ /dev/null @@ -1,32 +0,0 @@ -From b3378c32603e83ea3d4651cee3af99e644a30457 Mon Sep 17 00:00:00 2001 -From: Rob Crittenden -Date: Fri, 20 Jul 2018 11:06:55 -0400 -Subject: [PATCH] No need to call rhel-specific domainname service - -It was moved upstream into hostname package which named it -nis-domainname. When it was in the initscripts package there were -separate fedora-domainname and rhel-domainname services. - -From F29+ it will be nis-domainname. We can use that as well in -RHEL 8. ---- - ipaplatform/rhel/services.py | 3 --- - 1 file changed, 3 deletions(-) - -diff --git a/ipaplatform/rhel/services.py b/ipaplatform/rhel/services.py -index 1403d08..06fa633 100644 ---- a/ipaplatform/rhel/services.py -+++ b/ipaplatform/rhel/services.py -@@ -30,9 +30,6 @@ from ipaplatform.redhat import services as redhat_services - # to their actual systemd service names - rhel_system_units = redhat_services.redhat_system_units - --# Service that sets domainname on RHEL is called rhel-domainname.service --rhel_system_units['domainname'] = 'rhel-domainname.service' -- - - # Service classes that implement RHEL-specific behaviour - --- -2.13.6 - diff --git a/SOURCES/0002-Fix-test_webui.test_selinuxusermap.patch b/SOURCES/0002-Fix-test_webui.test_selinuxusermap.patch deleted file mode 100644 index ab3d133..0000000 --- a/SOURCES/0002-Fix-test_webui.test_selinuxusermap.patch +++ /dev/null @@ -1,124 +0,0 @@ -From 96af5394c210e637a5ab81d6925be3b0a429fc08 Mon Sep 17 00:00:00 2001 -From: Stanislav Levin -Date: Fri, 5 Jul 2019 14:39:17 +0300 -Subject: [PATCH] Fix `test_webui.test_selinuxusermap` - -A previous refactoring of SELinux tests has have a wrong -assumption about the user field separator within -ipaSELinuxUserMapOrder. That was '$$', but should be just '$'. - -Actually, '.ldif' and '.update' files are passed through -Python template string substitution: - -> $$ is an escape; it is replaced with a single $. -> $identifier names a substitution placeholder matching -> a mapping key of "identifier" - -This means that the text to be substituted on should not be escaped. -The wrong ipaSELinuxUserMapOrder previously set will be replaced on -upgrade. - -Fixes: https://pagure.io/freeipa/issue/7996 -Fixes: https://pagure.io/freeipa/issue/8005 -Signed-off-by: Stanislav Levin -Reviewed-By: Florence Blanc-Renaud ---- - install/updates/50-ipaconfig.update | 1 + - ipaplatform/base/constants.py | 10 +++++----- - ipaserver/install/ldapupdate.py | 3 +++ - ipatests/test_integration/test_winsyncmigrate.py | 2 +- - ipatests/test_webui/data_selinuxusermap.py | 4 ++-- - ipatests/test_xmlrpc/test_selinuxusermap_plugin.py | 4 ++-- - 6 files changed, 14 insertions(+), 10 deletions(-) - -diff --git a/install/updates/50-ipaconfig.update b/install/updates/50-ipaconfig.update -index 2e1c5c357..35e154b4e 100644 ---- a/install/updates/50-ipaconfig.update -+++ b/install/updates/50-ipaconfig.update -@@ -1,4 +1,5 @@ - dn: cn=ipaConfig,cn=etc,$SUFFIX -+replace: ipaSELinuxUserMapOrder: guest_u:s0$$$$xguest_u:s0$$$$user_u:s0$$$$staff_u:s0-s0:c0.c1023$$$$sysadm_u:s0-s0:c0.c1023$$$$unconfined_u:s0-s0:c0.c1023::$SELINUX_USERMAP_ORDER - replace: ipaSELinuxUserMapOrder: ipaSELinuxUserMapOrder: guest_u:s0$$xguest_u:s0$$user_u:s0$$staff_u:s0-s0:c0.c1023$$unconfined_u:s0-s0:c0.c1023::guest_u:s0$$xguest_u:s0$$user_u:s0$$staff_u:s0-s0:c0.c1023$$unconfined_u:s0-s0:c0.c1023 - replace: ipaSELinuxUserMapOrder: guest_u:s0$$xguest_u:s0$$user_u:s0-s0:c0.c1023$$staff_u:s0-s0:c0.c1023$$unconfined_u:s0-s0:c0.c1023::guest_u:s0$$xguest_u:s0$$user_u:s0$$staff_u:s0-s0:c0.c1023$$unconfined_u:s0-s0:c0.c1023 - add:ipaSELinuxUserMapDefault: $SELINUX_USERMAP_DEFAULT -diff --git a/ipaplatform/base/constants.py b/ipaplatform/base/constants.py -index cdb72e74a..eac60cac3 100644 ---- a/ipaplatform/base/constants.py -+++ b/ipaplatform/base/constants.py -@@ -62,11 +62,11 @@ class BaseConstantsNamespace: - SELINUX_USERMAP_DEFAULT = "unconfined_u:s0-s0:c0.c1023" - SELINUX_USERMAP_ORDER = ( - "guest_u:s0" -- "$$xguest_u:s0" -- "$$user_u:s0" -- "$$staff_u:s0-s0:c0.c1023" -- "$$sysadm_u:s0-s0:c0.c1023" -- "$$unconfined_u:s0-s0:c0.c1023" -+ "$xguest_u:s0" -+ "$user_u:s0" -+ "$staff_u:s0-s0:c0.c1023" -+ "$sysadm_u:s0-s0:c0.c1023" -+ "$unconfined_u:s0-s0:c0.c1023" - ) - SSSD_USER = "sssd" - # WSGI module override, only used on Fedora -diff --git a/ipaserver/install/ldapupdate.py b/ipaserver/install/ldapupdate.py -index d9e47dcc0..0cdea6a82 100644 ---- a/ipaserver/install/ldapupdate.py -+++ b/ipaserver/install/ldapupdate.py -@@ -322,6 +322,9 @@ class LDAPUpdate: - if not self.sub_dict.get("SELINUX_USERMAP_DEFAULT"): - self.sub_dict["SELINUX_USERMAP_DEFAULT"] = \ - platformconstants.SELINUX_USERMAP_DEFAULT -+ if not self.sub_dict.get("SELINUX_USERMAP_ORDER"): -+ self.sub_dict["SELINUX_USERMAP_ORDER"] = \ -+ platformconstants.SELINUX_USERMAP_ORDER - self.api = create_api(mode=None) - self.api.bootstrap(in_server=True, - context='updates', -diff --git a/ipatests/test_integration/test_winsyncmigrate.py b/ipatests/test_integration/test_winsyncmigrate.py -index 593fc2065..be9f44072 100644 ---- a/ipatests/test_integration/test_winsyncmigrate.py -+++ b/ipatests/test_integration/test_winsyncmigrate.py -@@ -59,7 +59,7 @@ class TestWinsyncMigrate(IntegrationTest): - ipa_group = 'ipa_group' - ad_user = 'testuser' - default_shell = platformconstants.DEFAULT_SHELL -- selinuxuser = platformconstants.SELINUX_USERMAP_ORDER.split("$$")[0] -+ selinuxuser = platformconstants.SELINUX_USERMAP_ORDER.split("$")[0] - test_role = 'test_role' - test_hbac_rule = 'test_hbac_rule' - test_selinux_map = 'test_selinux_map' -diff --git a/ipatests/test_webui/data_selinuxusermap.py b/ipatests/test_webui/data_selinuxusermap.py -index ca7b1dcdd..312e7592f 100644 ---- a/ipatests/test_webui/data_selinuxusermap.py -+++ b/ipatests/test_webui/data_selinuxusermap.py -@@ -5,8 +5,8 @@ - from ipaplatform.constants import constants as platformconstants - - # for example, user_u:s0 --selinuxuser1 = platformconstants.SELINUX_USERMAP_ORDER.split("$$")[0] --selinuxuser2 = platformconstants.SELINUX_USERMAP_ORDER.split("$$")[1] -+selinuxuser1 = platformconstants.SELINUX_USERMAP_ORDER.split("$")[0] -+selinuxuser2 = platformconstants.SELINUX_USERMAP_ORDER.split("$")[1] - - selinux_mcs_max = platformconstants.SELINUX_MCS_MAX - selinux_mls_max = platformconstants.SELINUX_MLS_MAX -diff --git a/ipatests/test_xmlrpc/test_selinuxusermap_plugin.py b/ipatests/test_xmlrpc/test_selinuxusermap_plugin.py -index 0b73992aa..e5b23bd4d 100644 ---- a/ipatests/test_xmlrpc/test_selinuxusermap_plugin.py -+++ b/ipatests/test_xmlrpc/test_selinuxusermap_plugin.py -@@ -32,8 +32,8 @@ from ipatests.test_xmlrpc.test_user_plugin import get_user_result - import pytest - - rule1 = u'selinuxrule1' --selinuxuser1 = platformconstants.SELINUX_USERMAP_ORDER.split("$$")[0] --selinuxuser2 = platformconstants.SELINUX_USERMAP_ORDER.split("$$")[1] -+selinuxuser1 = platformconstants.SELINUX_USERMAP_ORDER.split("$")[0] -+selinuxuser2 = platformconstants.SELINUX_USERMAP_ORDER.split("$")[1] - - INVALID_MCS = "Invalid MCS value, must match {}, where max category {}".format( - platformconstants.SELINUX_MCS_REGEX, --- -2.21.0 - diff --git a/SOURCES/0002-krbtktpolicy-reset.patch b/SOURCES/0002-krbtktpolicy-reset.patch new file mode 100644 index 0000000..fe12776 --- /dev/null +++ b/SOURCES/0002-krbtktpolicy-reset.patch @@ -0,0 +1,90 @@ +From a8b52eaf3cf56c90e3d94fdef0b9e426052634ea Mon Sep 17 00:00:00 2001 +From: Alexander Bokovoy +Date: Wed, 18 Dec 2019 12:08:59 +0200 +Subject: [PATCH] Reset per-indicator Kerberos policy + +When 'ipa krbtpolicy-reset' is called, we need to reset all policy +settings, including per-indicator ones. Per-indicator policy uses +subtyped attributes (foo;bar), the current krbtpolicy-reset code does +not deal with those. + +Add support for per-indicator policy reset. It is a bit tricky, as we +need to drop the values to defaults but avoid adding non-per-indicator +variants of the same attributes. + +Add test to check that policy has been resetted by observing a new +Kerberos TGT for the user after its policy reset. + +Fixes: https://pagure.io/freeipa/issue/8153 + +Signed-off-by: Alexander Bokovoy +--- + ipaserver/plugins/krbtpolicy.py | 21 +++++++++++++++++++- + ipatests/test_integration/test_krbtpolicy.py | 13 ++++++++++++ + 2 files changed, 33 insertions(+), 1 deletion(-) + +diff --git a/ipaserver/plugins/krbtpolicy.py b/ipaserver/plugins/krbtpolicy.py +index 997fe7e81..b01c44e93 100644 +--- a/ipaserver/plugins/krbtpolicy.py ++++ b/ipaserver/plugins/krbtpolicy.py +@@ -68,6 +68,8 @@ register = Registry() + _default_values = { + 'krbmaxticketlife': 86400, + 'krbmaxrenewableage': 604800, ++ 'krbauthindmaxticketlife': 86400, ++ 'krbauthindmaxrenewableage': 604800, + } + + # These attributes never have non-optional values, so they should be +@@ -311,9 +313,26 @@ class krbtpolicy_reset(baseldap.LDAPQuery): + def_values[a] = None + # if reseting global policy - set values to default + else: +- def_values = _default_values ++ def_values = _default_values.copy() + + entry = ldap.get_entry(dn, list(def_values)) ++ ++ # For per-indicator policies, drop them to defaults ++ for subtype in _supported_options: ++ for attr in _option_based_attrs: ++ name = '{};{}'.format(attr, subtype) ++ if name in entry: ++ if uid is not None: ++ def_values[name] = None ++ else: ++ def_values[name] = _default_values[attr] ++ ++ # Remove non-subtyped attrs variants, ++ # they should never be used directly. ++ for attr in _option_based_attrs: ++ if attr in def_values: ++ del def_values[attr] ++ + entry.update(def_values) + try: + ldap.update_entry(entry) +diff --git a/ipatests/test_integration/test_krbtpolicy.py b/ipatests/test_integration/test_krbtpolicy.py +index b2264de7a..08e332096 100644 +--- a/ipatests/test_integration/test_krbtpolicy.py ++++ b/ipatests/test_integration/test_krbtpolicy.py +@@ -112,3 +112,16 @@ class TestPWPolicy(IntegrationTest): + assert maxlife_within_policy(result.stdout_text, 1200) is True + + tasks.kdestroy_all(master) ++ ++ def test_krbtpolicy_reset(self): ++ """Test a hardened kerberos ticket policy reset""" ++ master = self.master ++ ++ tasks.kinit_admin(master) ++ master.run_command(['ipa', 'krbtpolicy-reset', USER2]) ++ master.run_command(['kinit', USER2], ++ stdin_text=PASSWORD + '\n') ++ result = master.run_command('klist | grep krbtgt') ++ assert maxlife_within_policy(result.stdout_text, MAXLIFE) is True ++ ++ tasks.kdestroy_all(master) +-- +2.24.1 + diff --git a/SOURCES/0003-Remove-posixAccount-from-service_find-search-filter-2f9cbff_rhbz#1731437.patch b/SOURCES/0003-Remove-posixAccount-from-service_find-search-filter-2f9cbff_rhbz#1731437.patch deleted file mode 100644 index 17c769a..0000000 --- a/SOURCES/0003-Remove-posixAccount-from-service_find-search-filter-2f9cbff_rhbz#1731437.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 2f9cbffb6e57ded2d0107f457241f33b17869a96 Mon Sep 17 00:00:00 2001 -From: Rob Crittenden -Date: Jul 19 2019 19:16:16 +0000 -Subject: Remove posixAccount from service_find search filter - - -This will allow cifs principals to be found. They were suppressed -because they include objectclass=posixAccount. - -This is a bit of a historical anomaly. This was included in the -filter from the initial commit (though it was person, not -posixAccount). I believe it was a mistake from the beginning but -it wasn't noticed because it didn't cause any obvious issues. - -https://pagure.io/freeipa/issue/8013 - -Reviewed-By: Alexander Bokovoy - ---- - -diff --git a/ipaserver/plugins/service.py b/ipaserver/plugins/service.py -index f58fe4b..c118b80 100644 ---- a/ipaserver/plugins/service.py -+++ b/ipaserver/plugins/service.py -@@ -889,7 +889,6 @@ class service_find(LDAPSearch): - assert isinstance(base_dn, DN) - # lisp style! - custom_filter = '(&(objectclass=ipaService)' \ -- '(!(objectClass=posixAccount))' \ - '(!(|(krbprincipalname=kadmin/*)' \ - '(krbprincipalname=K/M@*)' \ - '(krbprincipalname=krbtgt/*))' \ - diff --git a/SOURCES/0003-adtrust-print-DNS-records-for-external-DNS-case-after-role-is-enabled_936e27f_rhbz#1665051.patch b/SOURCES/0003-adtrust-print-DNS-records-for-external-DNS-case-after-role-is-enabled_936e27f_rhbz#1665051.patch new file mode 100644 index 0000000..f86aeaf --- /dev/null +++ b/SOURCES/0003-adtrust-print-DNS-records-for-external-DNS-case-after-role-is-enabled_936e27f_rhbz#1665051.patch @@ -0,0 +1,193 @@ +From 936e27f75961c67e619ecfa641e256ce80662d68 Mon Sep 17 00:00:00 2001 +From: Alexander Bokovoy +Date: Feb 14 2020 07:24:58 +0000 +Subject: adtrust: print DNS records for external DNS case after role is enabled + + +We cannot gather information about required DNS records before "ADTrust +Controller" role is enabled on this server. As result, we need to call +the step to add DNS records after the role was enabled. + +Fixes: https://pagure.io/freeipa/issue/8192 +Signed-off-by: Alexander Bokovoy +Reviewed-By: Florence Blanc-Renaud + +--- + +diff --git a/install/tools/ipa-adtrust-install.in b/install/tools/ipa-adtrust-install.in +index 1abfea9..7d94b71 100644 +--- a/install/tools/ipa-adtrust-install.in ++++ b/install/tools/ipa-adtrust-install.in +@@ -214,7 +214,13 @@ def main(): + + # Enable configured services and update DNS SRV records + service.sync_services_state(api.env.host) +- api.Command.dns_update_system_records() ++ ++ dns_help = adtrust.generate_dns_service_records_help(api) ++ if dns_help: ++ for line in dns_help: ++ service.print_msg(line, sys.stdout) ++ else: ++ api.Command.dns_update_system_records() + + print(""" + ============================================================================= +diff --git a/ipaserver/install/adtrust.py b/ipaserver/install/adtrust.py +index 70c4359..6c14e84 100644 +--- a/ipaserver/install/adtrust.py ++++ b/ipaserver/install/adtrust.py +@@ -26,6 +26,8 @@ from ipaserver.install import installutils + from ipaserver.install import adtrustinstance + from ipaserver.install import service + from ipaserver.install.plugins.adtrust import update_host_cifs_keytabs ++from ipaserver.install.bindinstance import dns_zone_exists ++from ipaserver.dns_data_management import IPASystemRecords + + + if six.PY3: +@@ -436,6 +438,41 @@ def install(standalone, options, fstore, api): + add_new_adtrust_agents(api, options) + + ++def generate_dns_service_records_help(api): ++ """ ++ Return list of instructions to create DNS service records for Windows ++ if in case DNS is not enabled and the DNS zone is not managed by IPA. ++ In case IPA manages the DNS zone, nothing is returned. ++ """ ++ ++ zone = api.env.domain ++ ++ err_msg = [] ++ ++ ret = api.Command['dns_is_enabled']() ++ if not ret['result']: ++ err_msg.append("DNS management was not enabled at install time.") ++ else: ++ if not dns_zone_exists(zone): ++ err_msg.append( ++ "DNS zone %s cannot be managed as it is not defined in " ++ "IPA" % zone) ++ ++ if err_msg: ++ err_msg.append("Add the following service records to your DNS " ++ "server for DNS zone %s: " % zone) ++ system_records = IPASystemRecords(api, all_servers=True) ++ adtrust_records = system_records.get_base_records( ++ [api.env.host], ["AD trust controller"], ++ include_master_role=False, include_kerberos_realm=False) ++ for r_name, node in adtrust_records.items(): ++ for rec in IPASystemRecords.records_list_from_node(r_name, node): ++ err_msg.append(rec) ++ return err_msg ++ ++ return None ++ ++ + @group + class ADTrustInstallInterface(ServiceAdminInstallInterface): + """ +diff --git a/ipaserver/install/adtrustinstance.py b/ipaserver/install/adtrustinstance.py +index 8699d53..a59e85d 100644 +--- a/ipaserver/install/adtrustinstance.py ++++ b/ipaserver/install/adtrustinstance.py +@@ -32,10 +32,8 @@ import socket + + import six + +-from ipaserver.dns_data_management import IPASystemRecords + from ipaserver.install import service + from ipaserver.install import installutils +-from ipaserver.install.bindinstance import dns_zone_exists + from ipaserver.install.replication import wait_for_task + from ipalib import errors, api + from ipalib.util import normalize_zone +@@ -586,43 +584,6 @@ class ADTRUSTInstance(service.Service): + logger.critical("Failed to remove old key for %s", + self.principal) + +- def srv_rec(self, host, port, prio): +- return "%(prio)d 100 %(port)d %(host)s" % dict(host=host,prio=prio,port=port) +- +- def __add_dns_service_records(self): +- """ +- Add DNS service records for Windows if DNS is enabled and the DNS zone +- is managed. If there are already service records for LDAP and Kerberos +- their values are used. Otherwise default values are used. +- """ +- +- zone = api.env.domain +- +- err_msg = None +- +- ret = api.Command['dns_is_enabled']() +- if not ret['result']: +- err_msg = "DNS management was not enabled at install time." +- else: +- if not dns_zone_exists(zone): +- err_msg = ( +- "DNS zone %s cannot be managed as it is not defined in " +- "IPA" % zone) +- +- if err_msg: +- self.print_msg(err_msg) +- self.print_msg("Add the following service records to your DNS " \ +- "server for DNS zone %s: " % zone) +- system_records = IPASystemRecords(api, all_servers=True) +- adtrust_records = system_records.get_base_records( +- [self.fqdn], ["AD trust controller"], +- include_master_role=False, include_kerberos_realm=False) +- for r_name, node in adtrust_records.items(): +- for rec in IPASystemRecords.records_list_from_node(r_name, node): +- self.print_msg(rec) +- else: +- api.Command.dns_update_system_records() +- + def __configure_selinux_for_smbd(self): + try: + tasks.set_selinux_booleans(constants.SELINUX_BOOLEAN_ADTRUST, +@@ -876,8 +837,6 @@ class ADTRUSTInstance(service.Service): + self.step("map BUILTIN\\Guests to nobody group", + self.__map_Guests_to_nobody) + self.step("configuring smbd to start on boot", self.__enable) +- self.step("adding special DNS service records", \ +- self.__add_dns_service_records) + + if self.enable_compat: + self.step("enabling trusted domains support for older clients via Schema Compatibility plugin", +diff --git a/ipaserver/install/server/install.py b/ipaserver/install/server/install.py +index 6b08b70..afce0d7 100644 +--- a/ipaserver/install/server/install.py ++++ b/ipaserver/install/server/install.py +@@ -984,6 +984,12 @@ def install(installer): + service.enable_services(host_name) + api.Command.dns_update_system_records() + ++ if options.setup_adtrust: ++ dns_help = adtrust.generate_dns_service_records_help(api) ++ if dns_help: ++ for line in dns_help: ++ service.print_msg(line, sys.stdout) ++ + if not options.setup_dns: + # After DNS and AD trust are configured and services are + # enabled, create a dummy instance to dump DNS configuration. +diff --git a/ipaserver/install/server/replicainstall.py b/ipaserver/install/server/replicainstall.py +index 536f0db..71ea091 100644 +--- a/ipaserver/install/server/replicainstall.py ++++ b/ipaserver/install/server/replicainstall.py +@@ -1351,6 +1351,12 @@ def install(installer): + # enabled-service case, also perform update in hidden replica case. + api.Command.dns_update_system_records() + ++ if options.setup_adtrust: ++ dns_help = adtrust.generate_dns_service_records_help(api) ++ if dns_help: ++ for line in dns_help: ++ service.print_msg(line, sys.stdout) ++ + ca_servers = find_providing_servers('CA', api.Backend.ldap2, api=api) + api.Backend.ldap2.disconnect() + + diff --git a/SOURCES/0004-AD-user-without-override-receive-InternalServerError-with-API_4db18be_rhbz#1782572.patch b/SOURCES/0004-AD-user-without-override-receive-InternalServerError-with-API_4db18be_rhbz#1782572.patch new file mode 100644 index 0000000..3795958 --- /dev/null +++ b/SOURCES/0004-AD-user-without-override-receive-InternalServerError-with-API_4db18be_rhbz#1782572.patch @@ -0,0 +1,41 @@ +From 4db18be5467c0b8f7633b281c724f469f907e573 Mon Sep 17 00:00:00 2001 +From: Florence Blanc-Renaud +Date: Jan 13 2020 12:08:19 +0000 +Subject: AD user without override receive InternalServerError with API + + +When ipa commands are used by an Active Directory user that +does not have any idoverride-user set, they return the +following error message which can be misleading: +$ kinit aduser@ADDOMAIN.COM +$ ipa ping +ipa: ERROR: cannot connect to 'https://master.ipa.com/ipa/json': Internal Server Error + +The fix properly handles ACIError exception received when +creating the context, and now the following message can be seen: + +$ kinit aduser@ADDOMAIN.COM +$ ipa ping +ipa: ERROR: cannot connect to 'https://master.ipa.com/ipa/json': Unauthorized + +with the following log in /var/log/httpd/error_log: +ipa: INFO: 401 Unauthorized: Insufficient access: Invalid credentials + +Fixes: https://pagure.io/freeipa/issue/8163 + +--- + +diff --git a/ipaserver/rpcserver.py b/ipaserver/rpcserver.py +index 0495557..194cbbc 100644 +--- a/ipaserver/rpcserver.py ++++ b/ipaserver/rpcserver.py +@@ -694,7 +694,7 @@ class KerberosWSGIExecutioner(WSGIExecutioner, KerberosSession): + status = HTTP_STATUS_SUCCESS + response = status.encode('utf-8') + start_response(status, self.headers) +- return self.marshal(None, e) ++ return [self.marshal(None, e)] + finally: + destroy_context() + return response + diff --git a/SOURCES/0004-Repeated-uninstallation-of-ipa-client-samba-crashes_rhbz#1732529.patch b/SOURCES/0004-Repeated-uninstallation-of-ipa-client-samba-crashes_rhbz#1732529.patch deleted file mode 100644 index 374534b..0000000 --- a/SOURCES/0004-Repeated-uninstallation-of-ipa-client-samba-crashes_rhbz#1732529.patch +++ /dev/null @@ -1,146 +0,0 @@ -From b9b98097a47f27b56500edc972c438597e6609b1 Mon Sep 17 00:00:00 2001 -From: François Cami -Date: Jul 26 2019 13:09:42 +0000 -Subject: ipatests: test multiple invocations of ipa-client-samba --uninstall - - -Related-to: https://pagure.io/freeipa/issue/8019 -Signed-off-by: François Cami -Reviewed-By: Alexander Bokovoy -Reviewed-By: Sergey Orlov - ---- - -diff --git a/ipatests/test_integration/test_smb.py b/ipatests/test_integration/test_smb.py -index 4e295c0..26d70b3 100644 ---- a/ipatests/test_integration/test_smb.py -+++ b/ipatests/test_integration/test_smb.py -@@ -150,3 +150,6 @@ class TestSMB(IntegrationTest): - - smbsrv = self.replicas[0] - smbsrv.run_command(['ipa-client-samba', '--uninstall', '-U']) -+ # test for https://pagure.io/freeipa/issue/8019 -+ # try another uninstall after the first one: -+ smbsrv.run_command(['ipa-client-samba', '--uninstall', '-U']) - -From 256a6a879061d2b97c11e9cd97b2427579610fa1 Mon Sep 17 00:00:00 2001 -From: François Cami -Date: Jul 26 2019 13:09:42 +0000 -Subject: ipa-client-samba: remove and restore smb.conf only on first uninstall - - -Fixes: https://pagure.io/freeipa/issue/8019 -Signed-off-by: François Cami -Reviewed-By: Alexander Bokovoy -Reviewed-By: Sergey Orlov - ---- - -diff --git a/ipaclient/install/ipa_client_samba.py b/ipaclient/install/ipa_client_samba.py -index e2be67d..6a3c3bd 100755 ---- a/ipaclient/install/ipa_client_samba.py -+++ b/ipaclient/install/ipa_client_samba.py -@@ -433,8 +433,9 @@ def uninstall(fstore, statestore, options): - ipautil.remove_ccache(ccache_path=paths.KRB5CC_SAMBA) - - # Remove samba's configuration file -- ipautil.remove_file(paths.SMB_CONF) -- fstore.restore_file(paths.SMB_CONF) -+ if fstore.has_file(paths.SMB_CONF): -+ ipautil.remove_file(paths.SMB_CONF) -+ fstore.restore_file(paths.SMB_CONF) - - # Remove samba's persistent and temporary tdb files - tdb_files = [ -@@ -624,7 +625,7 @@ def run(): - api.Command.service_del(api.env.smb_princ) - except AttributeError: - logger.error( -- "Chosen IPA master %s does not have support to" -+ "Chosen IPA master %s does not have support to " - "set up Samba domain members", server, - ) - return 1 - -From 00ba2ae6681dafa92d3f00f2a4e11adaa477ea0e Mon Sep 17 00:00:00 2001 -From: François Cami -Date: Jul 26 2019 13:09:42 +0000 -Subject: ipatests: test ipa-client-samba after --uninstall - - -Related-to: https://pagure.io/freeipa/issue/8021 -Signed-off-by: François Cami -Reviewed-By: Alexander Bokovoy -Reviewed-By: Sergey Orlov - ---- - -diff --git a/ipatests/test_integration/test_smb.py b/ipatests/test_integration/test_smb.py -index 26d70b3..933d468 100644 ---- a/ipatests/test_integration/test_smb.py -+++ b/ipatests/test_integration/test_smb.py -@@ -153,3 +153,8 @@ class TestSMB(IntegrationTest): - # test for https://pagure.io/freeipa/issue/8019 - # try another uninstall after the first one: - smbsrv.run_command(['ipa-client-samba', '--uninstall', '-U']) -+ # test for https://pagure.io/freeipa/issue/8021 -+ # try to install again: -+ smbsrv.run_command(["ipa-client-samba", "-U"]) -+ # cleanup: -+ smbsrv.run_command(['ipa-client-samba', '--uninstall', '-U']) - -From 551cd68d0959b1ee761ead6338dc06c544c0c5da Mon Sep 17 00:00:00 2001 -From: François Cami -Date: Jul 26 2019 13:09:42 +0000 -Subject: ipa-client-samba: remove state on uninstall - - -The "domain_member" state was not removed at uninstall time. -Remove it so that future invocations of ipa-client-samba work. - -Fixes: https://pagure.io/freeipa/issue/8021 -Signed-off-by: François Cami - -https://pagure.io/freeipa/issue/8021 - -Reviewed-By: Alexander Bokovoy -Reviewed-By: Sergey Orlov - ---- - -diff --git a/ipaclient/install/ipa_client_samba.py b/ipaclient/install/ipa_client_samba.py -index 6a3c3bd..126ef32 100755 ---- a/ipaclient/install/ipa_client_samba.py -+++ b/ipaclient/install/ipa_client_samba.py -@@ -523,11 +523,25 @@ def run(): - if options.uninstall: - if statestore.has_state("domain_member"): - uninstall(fstore, statestore, options) -- print( -- "Samba configuration is reverted. " -- "However, Samba databases were fully cleaned and " -- "old configuration file will not be usable anymore." -- ) -+ try: -+ keys = ( -+ "configured", "hardening", "groupmap", "tdb", -+ "service.principal", "smb.conf" -+ ) -+ for key in keys: -+ statestore.delete_state("domain_member", key) -+ except Exception as e: -+ print( -+ "Error: Failed to remove the domain_member statestores: " -+ "%s" % e -+ ) -+ return 1 -+ else: -+ print( -+ "Samba configuration is reverted. " -+ "However, Samba databases were fully cleaned and " -+ "old configuration file will not be usable anymore." -+ ) - else: - print("Samba domain member is not configured yet") - return 0 - diff --git a/SOURCES/0005-WebUI-Add-PKINIT-status-field-to-Configuration-page-a46383f_rhbz#1518153.patch b/SOURCES/0005-WebUI-Add-PKINIT-status-field-to-Configuration-page-a46383f_rhbz#1518153.patch deleted file mode 100644 index 1060da1..0000000 --- a/SOURCES/0005-WebUI-Add-PKINIT-status-field-to-Configuration-page-a46383f_rhbz#1518153.patch +++ /dev/null @@ -1,72 +0,0 @@ -From a46383ffe414f703264f8a81450f44abbd95d78e Mon Sep 17 00:00:00 2001 -From: Serhii Tsymbaliuk -Date: Jul 26 2019 20:36:58 +0000 -Subject: WebUI: Add PKINIT status field to 'Configuration' page - - -- Add 'Server Options' section to the page -- Add 'IPA master capable of PKINIT' field to the 'Server Options' - -Ticket: https://pagure.io/freeipa/issue/7305 - -Signed-off-by: Serhii Tsymbaliuk -Reviewed-By: Florence Blanc-Renaud - ---- - -diff --git a/install/ui/src/freeipa/serverconfig.js b/install/ui/src/freeipa/serverconfig.js -index 25f484a..6c82b40 100644 ---- a/install/ui/src/freeipa/serverconfig.js -+++ b/install/ui/src/freeipa/serverconfig.js -@@ -50,6 +50,24 @@ return { - ] - }, - { -+ name: 'server', -+ label: '@i18n:objects.config.server', -+ fields: [ -+ { -+ $type: 'entity_select', -+ name: 'ca_renewal_master_server', -+ other_entity: 'server', -+ other_field: 'cn', -+ flags: ['w_if_no_aci'] -+ }, -+ { -+ $type: 'multivalued', -+ name: 'pkinit_server_server', -+ read_only: true -+ } -+ ] -+ }, -+ { - name: 'user', - label: '@i18n:objects.config.user', - fields: [ -@@ -99,13 +117,6 @@ return { - { - $type: 'multivalued', - name: 'ipauserobjectclasses' -- }, -- { -- $type: 'entity_select', -- name: 'ca_renewal_master_server', -- other_entity: 'server', -- other_field: 'cn', -- flags: ['w_if_no_aci'] - } - ] - }, -diff --git a/ipaserver/plugins/internal.py b/ipaserver/plugins/internal.py -index 0f0ad3a..19957d7 100644 ---- a/ipaserver/plugins/internal.py -+++ b/ipaserver/plugins/internal.py -@@ -726,6 +726,7 @@ class i18n_messages(Command): - "group": _("Group Options"), - "search": _("Search Options"), - "selinux": _("SELinux Options"), -+ "server": _("Server Options"), - "service": _("Service Options"), - "user": _("User Options"), - }, - diff --git a/SOURCES/0005-ipa-client-automount-fails-after-repeated-installation-uninstallation_rhbz#1790886.patch b/SOURCES/0005-ipa-client-automount-fails-after-repeated-installation-uninstallation_rhbz#1790886.patch new file mode 100644 index 0000000..dbb6db5 --- /dev/null +++ b/SOURCES/0005-ipa-client-automount-fails-after-repeated-installation-uninstallation_rhbz#1790886.patch @@ -0,0 +1,82 @@ +From 6332aed9ba67e2ee759a9d988ba92139486469d4 Mon Sep 17 00:00:00 2001 +From: François Cami +Date: Feb 14 2020 16:38:19 +0000 +Subject: ipa-client-automount: call save_domain() for each change + + +Call sssdconfig.save_domain(domain) after each configuration +change during ipa-client-automount --uninstall. + +Previously, sssdconfig.save_domain(domain) was called only +outside of the domain detection loop which changed the domain +configuration. This introduced issues as this method's behavior +is only consistent when configuration items are removed in a +certain order: https://pagure.io/SSSD/sssd/issue/4149 +Plus, it is more correct to save the configuration from within +the loop if ever we support multiple domains. + +Fixes: https://pagure.io/freeipa/issue/8190 +Signed-off-by: François Cami +Reviewed-By: Alexander Bokovoy + +--- + +diff --git a/ipaclient/install/ipa_client_automount.py b/ipaclient/install/ipa_client_automount.py +index 3ef257a..fdf974d 100644 +--- a/ipaclient/install/ipa_client_automount.py ++++ b/ipaclient/install/ipa_client_automount.py +@@ -355,9 +355,10 @@ def uninstall(fstore, statestore): + continue + if provider == "ipa": + domain.remove_option('ipa_automount_location') ++ sssdconfig.save_domain(domain) + domain.remove_provider('autofs') ++ sssdconfig.save_domain(domain) + break +- sssdconfig.save_domain(domain) + sssdconfig.write(paths.SSSD_CONF) + sssd = services.service('sssd', api) + sssd.restart() + +From 7ae804c726970ae467a7f76efa21bae40405551d Mon Sep 17 00:00:00 2001 +From: François Cami +Date: Feb 14 2020 16:38:19 +0000 +Subject: ipatests: make sure ipa-client-automount reverts sssd.conf + + +Due to https://pagure.io/SSSD/sssd/issue/4149 ipa-client-automount +fails to remove the ipa_automount_location entry from sssd.conf. +Test that autofs_provider and ipa_automount_location are removed. + +Fixes: https://pagure.io/freeipa/issue/8190 +Signed-off-by: François Cami +Reviewed-By: Alexander Bokovoy + +--- + +diff --git a/ipatests/test_integration/test_nfs.py b/ipatests/test_integration/test_nfs.py +index 7d29836..532dd61 100644 +--- a/ipatests/test_integration/test_nfs.py ++++ b/ipatests/test_integration/test_nfs.py +@@ -332,6 +332,20 @@ class TestIpaClientAutomountFileRestore(IntegrationTest): + "ipa-client-automount", "--uninstall", "-U" + ]) + ++ if not no_sssd: ++ # https://pagure.io/freeipa/issue/8190 ++ # check that no ipa_automount_location is left in sssd.conf ++ # also check for autofs_provider for good measure ++ grep_automount_in_sssdconf_cmd = \ ++ "egrep ipa_automount_location\\|autofs_provider " \ ++ "/etc/sssd/sssd.conf" ++ cmd = self.clients[0].run_command( ++ grep_automount_in_sssdconf_cmd, raiseonerr=False ++ ) ++ assert cmd.returncode == 1, \ ++ "PG8190 regression found: ipa_automount_location still " \ ++ "present in sssd.conf" ++ + cmd = self.clients[0].run_command(grep_automount_command) + assert cmd.stdout_text.split() == after_ipa_client_install + + diff --git a/SOURCES/0006-external-ca-profile-fix_rhbz#1731813.patch b/SOURCES/0006-external-ca-profile-fix_rhbz#1731813.patch deleted file mode 100644 index ca70f28..0000000 --- a/SOURCES/0006-external-ca-profile-fix_rhbz#1731813.patch +++ /dev/null @@ -1,1339 +0,0 @@ -From d0d29ccc324bb9f95bffbe3162ee5c3c61c6086a Mon Sep 17 00:00:00 2001 -From: Fraser Tweedale -Date: Thu, 11 Jul 2019 15:17:04 +1000 -Subject: [PATCH] move MSCSTemplate classes to ipalib - -As we expand the integration tests for external CA functionality, it -is helpful (and avoids duplication) to use the MSCSTemplate* -classes. These currently live in ipaserver.install.cainstance, but -ipatests is no longer permitted to import from ipaserver (see commit -81714976e5e13131654c78eb734746a20237c933). So move these classes to -ipalib. - -Part of: https://pagure.io/freeipa/issue/7548 - -Reviewed-By: Florence Blanc-Renaud ---- - install/tools/ipa-ca-install.in | 6 +- - ipalib/x509.py | 171 +++++++++++++++++ - ipaserver/install/ca.py | 11 +- - ipaserver/install/cainstance.py | 180 +----------------- - ipaserver/install/ipa_cacert_manage.py | 14 +- - ipatests/test_integration/test_external_ca.py | 11 +- - ipatests/test_ipalib/test_x509.py | 115 +++++++++++ - .../test_install/test_cainstance.py | 123 ------------ - 8 files changed, 307 insertions(+), 324 deletions(-) - delete mode 100644 ipatests/test_ipaserver/test_install/test_cainstance.py - -diff --git a/install/tools/ipa-ca-install.in b/install/tools/ipa-ca-install.in -index 0700c0c38b..ce6d5fcb52 100644 ---- a/install/tools/ipa-ca-install.in -+++ b/install/tools/ipa-ca-install.in -@@ -37,7 +37,7 @@ from ipaserver.install import cainstance, service - from ipaserver.install import custodiainstance - from ipaserver.masters import find_providing_server - from ipapython import version --from ipalib import api -+from ipalib import api, x509 - from ipalib.constants import DOMAIN_LEVEL_1 - from ipapython.config import IPAOptionParser - from ipapython.ipa_log_manager import standard_logging_setup -@@ -68,13 +68,13 @@ def parse_options(): - default=False, help="unattended installation never prompts the user") - parser.add_option("--external-ca", dest="external_ca", action="store_true", - default=False, help="Generate a CSR to be signed by an external CA") -- ext_cas = tuple(x.value for x in cainstance.ExternalCAType) -+ ext_cas = tuple(x.value for x in x509.ExternalCAType) - parser.add_option("--external-ca-type", dest="external_ca_type", - type="choice", choices=ext_cas, - metavar="{{{0}}}".format(",".join(ext_cas)), - help="Type of the external CA. Default: generic") - parser.add_option("--external-ca-profile", dest="external_ca_profile", -- type='constructor', constructor=cainstance.ExternalCAProfile, -+ type='constructor', constructor=x509.ExternalCAProfile, - default=None, metavar="PROFILE-SPEC", - help="Specify the certificate profile/template to use " - "at the external CA") -diff --git a/ipalib/x509.py b/ipalib/x509.py -index ab3c5f553d..1f612a3797 100644 ---- a/ipalib/x509.py -+++ b/ipalib/x509.py -@@ -34,6 +34,7 @@ - import os - import binascii - import datetime -+import enum - import ipaddress - import ssl - import base64 -@@ -47,6 +48,7 @@ - Encoding, PublicFormat, PrivateFormat, load_pem_private_key - ) - import pyasn1 -+import pyasn1.error - from pyasn1.type import univ, char, namedtype, tag - from pyasn1.codec.der import decoder, encoder - from pyasn1_modules import rfc2315, rfc2459 -@@ -745,3 +747,172 @@ def format_datetime(t): - if t.tzinfo is None: - t = t.replace(tzinfo=UTC()) - return unicode(t.strftime("%a %b %d %H:%M:%S %Y %Z")) -+ -+ -+class ExternalCAType(enum.Enum): -+ GENERIC = 'generic' -+ MS_CS = 'ms-cs' -+ -+ -+class ExternalCAProfile: -+ """ -+ An external CA profile configuration. Currently the only -+ subclasses are for Microsoft CAs, for providing data in the -+ "Certificate Template" extension. -+ -+ Constructing this class will actually return an instance of a -+ subclass. -+ -+ Subclasses MUST set ``valid_for``. -+ -+ """ -+ def __init__(self, s=None): -+ self.unparsed_input = s -+ -+ # Which external CA types is the data valid for? -+ # A set of VALUES of the ExternalCAType enum. -+ valid_for = set() -+ -+ def __new__(cls, s=None): -+ """Construct the ExternalCAProfile value. -+ -+ Return an instance of a subclass determined by -+ the format of the argument. -+ -+ """ -+ # we are directly constructing a subclass; instantiate -+ # it and be done -+ if cls is not ExternalCAProfile: -+ return super(ExternalCAProfile, cls).__new__(cls) -+ -+ # construction via the base class; therefore the string -+ # argument is required, and is used to determine which -+ # subclass to construct -+ if s is None: -+ raise ValueError('string argument is required') -+ -+ parts = s.split(':') -+ -+ try: -+ # Is the first part on OID? -+ _oid = univ.ObjectIdentifier(parts[0]) -+ -+ # It is; construct a V2 template -+ # pylint: disable=too-many-function-args -+ return MSCSTemplateV2.__new__(MSCSTemplateV2, s) -+ -+ except pyasn1.error.PyAsn1Error: -+ # It is not an OID; treat as a template name -+ # pylint: disable=too-many-function-args -+ return MSCSTemplateV1.__new__(MSCSTemplateV1, s) -+ -+ def __getstate__(self): -+ return self.unparsed_input -+ -+ def __setstate__(self, state): -+ # explicitly call __init__ method to initialise object -+ self.__init__(state) -+ -+ -+class MSCSTemplate(ExternalCAProfile): -+ """ -+ An Microsoft AD-CS Template specifier. -+ -+ Subclasses MUST set ext_oid. -+ -+ Subclass constructors MUST set asn1obj. -+ -+ """ -+ valid_for = set([ExternalCAType.MS_CS.value]) -+ -+ ext_oid = None # extension OID, as a Python str -+ asn1obj = None # unencoded extension data -+ -+ def get_ext_data(self): -+ """Return DER-encoded extension data.""" -+ return encoder.encode(self.asn1obj) -+ -+ -+class MSCSTemplateV1(MSCSTemplate): -+ """ -+ A v1 template specifier, per -+ https://msdn.microsoft.com/en-us/library/cc250011.aspx. -+ -+ :: -+ -+ CertificateTemplateName ::= SEQUENCE { -+ Name UTF8String -+ } -+ -+ But note that a bare BMPString is used in practice. -+ -+ """ -+ ext_oid = "1.3.6.1.4.1.311.20.2" -+ -+ def __init__(self, s): -+ super(MSCSTemplateV1, self).__init__(s) -+ parts = s.split(':') -+ if len(parts) > 1: -+ raise ValueError( -+ "Cannot specify certificate template version when using name.") -+ self.asn1obj = char.BMPString(str(parts[0])) -+ -+ -+class MSCSTemplateV2(MSCSTemplate): -+ """ -+ A v2 template specifier, per -+ https://msdn.microsoft.com/en-us/library/windows/desktop/aa378274(v=vs.85).aspx -+ -+ :: -+ -+ CertificateTemplate ::= SEQUENCE { -+ templateID EncodedObjectID, -+ templateMajorVersion TemplateVersion, -+ templateMinorVersion TemplateVersion OPTIONAL -+ } -+ -+ TemplateVersion ::= INTEGER (0..4294967295) -+ -+ """ -+ ext_oid = "1.3.6.1.4.1.311.21.7" -+ -+ @staticmethod -+ def check_version_in_range(desc, n): -+ if n < 0 or n >= 2**32: -+ raise ValueError( -+ "Template {} version must be in range 0..4294967295" -+ .format(desc)) -+ -+ def __init__(self, s): -+ super(MSCSTemplateV2, self).__init__(s) -+ -+ parts = s.split(':') -+ -+ obj = CertificateTemplateV2() -+ if len(parts) < 2 or len(parts) > 3: -+ raise ValueError( -+ "Incorrect template specification; required format is: " -+ ":[:]") -+ try: -+ obj['templateID'] = univ.ObjectIdentifier(parts[0]) -+ -+ major = int(parts[1]) -+ self.check_version_in_range("major", major) -+ obj['templateMajorVersion'] = major -+ -+ if len(parts) > 2: -+ minor = int(parts[2]) -+ self.check_version_in_range("minor", minor) -+ obj['templateMinorVersion'] = int(parts[2]) -+ -+ except pyasn1.error.PyAsn1Error: -+ raise ValueError("Could not parse certificate template specifier.") -+ self.asn1obj = obj -+ -+ -+class CertificateTemplateV2(univ.Sequence): -+ componentType = namedtype.NamedTypes( -+ namedtype.NamedType('templateID', univ.ObjectIdentifier()), -+ namedtype.NamedType('templateMajorVersion', univ.Integer()), -+ namedtype.OptionalNamedType('templateMinorVersion', univ.Integer()) -+ ) -diff --git a/ipaserver/install/ca.py b/ipaserver/install/ca.py -index 6b040b311a..8fb5e3ec91 100644 ---- a/ipaserver/install/ca.py -+++ b/ipaserver/install/ca.py -@@ -28,7 +28,7 @@ - from ipaplatform.paths import paths - from ipaserver.install import installutils, certs - from ipaserver.install.replication import replica_conn_check --from ipalib import api, errors -+from ipalib import api, errors, x509 - from ipapython.dn import DN - - from . import conncheck, dogtag, cainstance -@@ -216,8 +216,7 @@ def install_check(standalone, replica_config, options): - paths.ROOT_IPA_CSR) - - if not options.external_ca_type: -- options.external_ca_type = \ -- cainstance.ExternalCAType.GENERIC.value -+ options.external_ca_type = x509.ExternalCAType.GENERIC.value - - if options.external_ca_profile is not None: - # check that profile is valid for the external ca type -@@ -478,13 +477,11 @@ class CAInstallInterface(dogtag.DogtagInstallInterface, - external_ca = master_install_only(external_ca) - - external_ca_type = knob( -- cainstance.ExternalCAType, None, -- description="Type of the external CA", -- ) -+ x509.ExternalCAType, None, description="Type of the external CA") - external_ca_type = master_install_only(external_ca_type) - - external_ca_profile = knob( -- type=cainstance.ExternalCAProfile, -+ type=x509.ExternalCAProfile, - default=None, - description=( - "Specify the certificate profile/template to use at the " -diff --git a/ipaserver/install/cainstance.py b/ipaserver/install/cainstance.py -index 6e1fc724db..2295581870 100644 ---- a/ipaserver/install/cainstance.py -+++ b/ipaserver/install/cainstance.py -@@ -26,7 +26,6 @@ - import logging - - import dbus --import enum - import ldap - import os - import pwd -@@ -39,10 +38,6 @@ - import tempfile - from configparser import RawConfigParser - --from pyasn1.codec.der import encoder --from pyasn1.type import char, univ, namedtype --import pyasn1.error -- - from ipalib import api - from ipalib import x509 - from ipalib import errors -@@ -80,11 +75,6 @@ - ] - - --class ExternalCAType(enum.Enum): -- GENERIC = 'generic' -- MS_CS = 'ms-cs' -- -- - def check_ports(): - """Check that dogtag ports (8080, 8443) are available. - -@@ -367,7 +357,7 @@ def configure_instance(self, host_name, dm_password, admin_password, - if ca_type is not None: - self.ca_type = ca_type - else: -- self.ca_type = ExternalCAType.GENERIC.value -+ self.ca_type = x509.ExternalCAType.GENERIC.value - self.external_ca_profile = external_ca_profile - - self.no_db_setup = promote -@@ -537,12 +527,12 @@ def __spawn_instance(self): - pki_ca_signing_csr_path=self.csr_file, - ) - -- if self.ca_type == ExternalCAType.MS_CS.value: -+ if self.ca_type == x509.ExternalCAType.MS_CS.value: - # Include MS template name extension in the CSR - template = self.external_ca_profile - if template is None: - # default template name -- template = MSCSTemplateV1(u"SubCA") -+ template = x509.MSCSTemplateV1(u"SubCA") - - ext_data = binascii.hexlify(template.get_ext_data()) - cfg.update( -@@ -2081,170 +2071,6 @@ def update_ipa_conf(): - parser.write(f) - - --class ExternalCAProfile: -- """ -- An external CA profile configuration. Currently the only -- subclasses are for Microsoft CAs, for providing data in the -- "Certificate Template" extension. -- -- Constructing this class will actually return an instance of a -- subclass. -- -- Subclasses MUST set ``valid_for``. -- -- """ -- def __init__(self, s=None): -- self.unparsed_input = s -- -- # Which external CA types is the data valid for? -- # A set of VALUES of the ExternalCAType enum. -- valid_for = set() -- -- def __new__(cls, s=None): -- """Construct the ExternalCAProfile value. -- -- Return an instance of a subclass determined by -- the format of the argument. -- -- """ -- # we are directly constructing a subclass; instantiate -- # it and be done -- if cls is not ExternalCAProfile: -- return super(ExternalCAProfile, cls).__new__(cls) -- -- # construction via the base class; therefore the string -- # argument is required, and is used to determine which -- # subclass to construct -- if s is None: -- raise ValueError('string argument is required') -- -- parts = s.split(':') -- -- try: -- # Is the first part on OID? -- _oid = univ.ObjectIdentifier(parts[0]) -- -- # It is; construct a V2 template -- # pylint: disable=too-many-function-args -- return MSCSTemplateV2.__new__(MSCSTemplateV2, s) -- -- except pyasn1.error.PyAsn1Error: -- # It is not an OID; treat as a template name -- # pylint: disable=too-many-function-args -- return MSCSTemplateV1.__new__(MSCSTemplateV1, s) -- -- def __getstate__(self): -- return self.unparsed_input -- -- def __setstate__(self, state): -- # explicitly call __init__ method to initialise object -- self.__init__(state) -- -- --class MSCSTemplate(ExternalCAProfile): -- """ -- An Microsoft AD-CS Template specifier. -- -- Subclasses MUST set ext_oid. -- -- Subclass constructors MUST set asn1obj. -- -- """ -- valid_for = set([ExternalCAType.MS_CS.value]) -- -- ext_oid = None # extension OID, as a Python str -- asn1obj = None # unencoded extension data -- -- def get_ext_data(self): -- """Return DER-encoded extension data.""" -- return encoder.encode(self.asn1obj) -- -- --class MSCSTemplateV1(MSCSTemplate): -- """ -- A v1 template specifier, per -- https://msdn.microsoft.com/en-us/library/cc250011.aspx. -- -- :: -- -- CertificateTemplateName ::= SEQUENCE { -- Name UTF8String -- } -- -- But note that a bare BMPString is used in practice. -- -- """ -- ext_oid = "1.3.6.1.4.1.311.20.2" -- -- def __init__(self, s): -- super(MSCSTemplateV1, self).__init__(s) -- parts = s.split(':') -- if len(parts) > 1: -- raise ValueError( -- "Cannot specify certificate template version when using name.") -- self.asn1obj = char.BMPString(str(parts[0])) -- -- --class MSCSTemplateV2(MSCSTemplate): -- """ -- A v2 template specifier, per -- https://msdn.microsoft.com/en-us/library/windows/desktop/aa378274(v=vs.85).aspx -- -- :: -- -- CertificateTemplate ::= SEQUENCE { -- templateID EncodedObjectID, -- templateMajorVersion TemplateVersion, -- templateMinorVersion TemplateVersion OPTIONAL -- } -- -- TemplateVersion ::= INTEGER (0..4294967295) -- -- """ -- ext_oid = "1.3.6.1.4.1.311.21.7" -- -- @staticmethod -- def check_version_in_range(desc, n): -- if n < 0 or n >= 2**32: -- raise ValueError( -- "Template {} version must be in range 0..4294967295" -- .format(desc)) -- -- def __init__(self, s): -- super(MSCSTemplateV2, self).__init__(s) -- -- parts = s.split(':') -- -- obj = CertificateTemplateV2() -- if len(parts) < 2 or len(parts) > 3: -- raise ValueError( -- "Incorrect template specification; required format is: " -- ":[:]") -- try: -- obj['templateID'] = univ.ObjectIdentifier(parts[0]) -- -- major = int(parts[1]) -- self.check_version_in_range("major", major) -- obj['templateMajorVersion'] = major -- -- if len(parts) > 2: -- minor = int(parts[2]) -- self.check_version_in_range("minor", minor) -- obj['templateMinorVersion'] = int(parts[2]) -- -- except pyasn1.error.PyAsn1Error: -- raise ValueError("Could not parse certificate template specifier.") -- self.asn1obj = obj -- -- --class CertificateTemplateV2(univ.Sequence): -- componentType = namedtype.NamedTypes( -- namedtype.NamedType('templateID', univ.ObjectIdentifier()), -- namedtype.NamedType('templateMajorVersion', univ.Integer()), -- namedtype.OptionalNamedType('templateMinorVersion', univ.Integer()) -- ) -- -- - if __name__ == "__main__": - standard_logging_setup("install.log") - ds = dsinstance.DsInstance() -diff --git a/ipaserver/install/ipa_cacert_manage.py b/ipaserver/install/ipa_cacert_manage.py -index 3f113c35bf..37dcc2befa 100644 ---- a/ipaserver/install/ipa_cacert_manage.py -+++ b/ipaserver/install/ipa_cacert_manage.py -@@ -65,7 +65,7 @@ def add_options(cls, parser): - "--external-ca", dest='self_signed', - action='store_false', - help="Sign the renewed certificate by external CA") -- ext_cas = tuple(x.value for x in cainstance.ExternalCAType) -+ ext_cas = tuple(x.value for x in x509.ExternalCAType) - renew_group.add_option( - "--external-ca-type", dest="external_ca_type", - type="choice", choices=ext_cas, -@@ -73,7 +73,7 @@ def add_options(cls, parser): - help="Type of the external CA. Default: generic") - renew_group.add_option( - "--external-ca-profile", dest="external_ca_profile", -- type='constructor', constructor=cainstance.ExternalCAProfile, -+ type='constructor', constructor=x509.ExternalCAProfile, - default=None, metavar="PROFILE-SPEC", - help="Specify the certificate profile/template to use " - "at the external CA") -@@ -224,11 +224,11 @@ def renew_external_step_1(self, ca): - options = self.options - - if not options.external_ca_type: -- options.external_ca_type = cainstance.ExternalCAType.GENERIC.value -+ options.external_ca_type = x509.ExternalCAType.GENERIC.value - -- if options.external_ca_type == cainstance.ExternalCAType.MS_CS.value \ -+ if options.external_ca_type == x509.ExternalCAType.MS_CS.value \ - and options.external_ca_profile is None: -- options.external_ca_profile = cainstance.MSCSTemplateV1(u"SubCA") -+ options.external_ca_profile = x509.MSCSTemplateV1(u"SubCA") - - if options.external_ca_profile is not None: - # check that profile is valid for the external ca type -@@ -352,11 +352,11 @@ def resubmit_request(self, ca=RENEWAL_CA_NAME, profile=None): - timeout = api.env.startup_timeout + 60 - - cm_profile = None -- if isinstance(profile, cainstance.MSCSTemplateV1): -+ if isinstance(profile, x509.MSCSTemplateV1): - cm_profile = profile.unparsed_input - - cm_template = None -- if isinstance(profile, cainstance.MSCSTemplateV2): -+ if isinstance(profile, x509.MSCSTemplateV2): - cm_template = profile.unparsed_input - - logger.debug("resubmitting certmonger request '%s'", self.request_id) -diff --git a/ipatests/test_integration/test_external_ca.py b/ipatests/test_integration/test_external_ca.py -index a42355217d..5aa2b7bba0 100644 ---- a/ipatests/test_integration/test_external_ca.py -+++ b/ipatests/test_integration/test_external_ca.py -@@ -108,14 +108,14 @@ def check_ipaca_issuerDN(host, expected_dn): - assert "Issuer DN: {}".format(expected_dn) in result.stdout_text - - --def check_mscs_extension(ipa_csr, oid, value): -+def check_mscs_extension(ipa_csr, template): - csr = x509.load_pem_x509_csr(ipa_csr, default_backend()) - extensions = [ - ext for ext in csr.extensions -- if ext.oid.dotted_string == oid -+ if ext.oid.dotted_string == template.ext_oid - ] - assert extensions -- assert extensions[0].value.value == value -+ assert extensions[0].value.value == template.get_ext_data() - - - class TestExternalCA(IntegrationTest): -@@ -134,10 +134,7 @@ def test_external_ca(self): - - # check CSR for extension - ipa_csr = self.master.get_file_contents(paths.ROOT_IPA_CSR) -- # Values for MSCSTemplateV1('SubCA') -- oid = "1.3.6.1.4.1.311.20.2" -- value = b'\x1e\n\x00S\x00u\x00b\x00C\x00A' -- check_mscs_extension(ipa_csr, oid, value) -+ check_mscs_extension(ipa_csr, ipa_x509.MSCSTemplateV1(u'SubCA')) - - # Sign CA, transport it to the host and get ipa a root ca paths. - root_ca_fname, ipa_ca_fname = tasks.sign_ca_and_transport( -diff --git a/ipatests/test_ipalib/test_x509.py b/ipatests/test_ipalib/test_x509.py -index ff7e6de2f7..284b998316 100644 ---- a/ipatests/test_ipalib/test_x509.py -+++ b/ipatests/test_ipalib/test_x509.py -@@ -22,7 +22,11 @@ - """ - - import base64 -+from binascii import hexlify -+from configparser import RawConfigParser - import datetime -+from io import StringIO -+import pickle - - import pytest - -@@ -268,3 +272,114 @@ def test_ipa_demo_letsencrypt(self): - b'0 \x06\x03U\x1d%\x01\x01\xff\x04\x160\x14\x06\x08+\x06\x01' - b'\x05\x05\x07\x03\x01\x06\x08+\x06\x01\x05\x05\x07\x03\x02' - ) -+ -+ -+class test_ExternalCAProfile: -+ def test_MSCSTemplateV1_good(self): -+ o = x509.MSCSTemplateV1("MySubCA") -+ assert hexlify(o.get_ext_data()) == b'1e0e004d007900530075006200430041' -+ -+ def test_MSCSTemplateV1_bad(self): -+ with pytest.raises(ValueError): -+ x509.MSCSTemplateV1("MySubCA:1") -+ -+ def test_MSCSTemplateV1_pickle_roundtrip(self): -+ o = x509.MSCSTemplateV1("MySubCA") -+ s = pickle.dumps(o) -+ assert o.get_ext_data() == pickle.loads(s).get_ext_data() -+ -+ def test_MSCSTemplateV2_too_few_parts(self): -+ with pytest.raises(ValueError): -+ x509.MSCSTemplateV2("1.2.3.4") -+ -+ def test_MSCSTemplateV2_too_many_parts(self): -+ with pytest.raises(ValueError): -+ x509.MSCSTemplateV2("1.2.3.4:100:200:300") -+ -+ def test_MSCSTemplateV2_bad_oid(self): -+ with pytest.raises(ValueError): -+ x509.MSCSTemplateV2("not_an_oid:1") -+ -+ def test_MSCSTemplateV2_non_numeric_major_version(self): -+ with pytest.raises(ValueError): -+ x509.MSCSTemplateV2("1.2.3.4:major:200") -+ -+ def test_MSCSTemplateV2_non_numeric_minor_version(self): -+ with pytest.raises(ValueError): -+ x509.MSCSTemplateV2("1.2.3.4:100:minor") -+ -+ def test_MSCSTemplateV2_major_version_lt_zero(self): -+ with pytest.raises(ValueError): -+ x509.MSCSTemplateV2("1.2.3.4:-1:200") -+ -+ def test_MSCSTemplateV2_minor_version_lt_zero(self): -+ with pytest.raises(ValueError): -+ x509.MSCSTemplateV2("1.2.3.4:100:-1") -+ -+ def test_MSCSTemplateV2_major_version_gt_max(self): -+ with pytest.raises(ValueError): -+ x509.MSCSTemplateV2("1.2.3.4:4294967296:200") -+ -+ def test_MSCSTemplateV2_minor_version_gt_max(self): -+ with pytest.raises(ValueError): -+ x509.MSCSTemplateV2("1.2.3.4:100:4294967296") -+ -+ def test_MSCSTemplateV2_good_major(self): -+ o = x509.MSCSTemplateV2("1.2.3.4:4294967295") -+ assert hexlify(o.get_ext_data()) == b'300c06032a0304020500ffffffff' -+ -+ def test_MSCSTemplateV2_good_major_minor(self): -+ o = x509.MSCSTemplateV2("1.2.3.4:4294967295:0") -+ assert hexlify(o.get_ext_data()) \ -+ == b'300f06032a0304020500ffffffff020100' -+ -+ def test_MSCSTemplateV2_pickle_roundtrip(self): -+ o = x509.MSCSTemplateV2("1.2.3.4:4294967295:0") -+ s = pickle.dumps(o) -+ assert o.get_ext_data() == pickle.loads(s).get_ext_data() -+ -+ def test_ExternalCAProfile_dispatch(self): -+ """ -+ Test that constructing ExternalCAProfile actually returns an -+ instance of the appropriate subclass. -+ """ -+ assert isinstance( -+ x509.ExternalCAProfile("MySubCA"), -+ x509.MSCSTemplateV1) -+ assert isinstance( -+ x509.ExternalCAProfile("1.2.3.4:100"), -+ x509.MSCSTemplateV2) -+ -+ def test_write_pkispawn_config_file_MSCSTemplateV1(self): -+ template = x509.MSCSTemplateV1(u"SubCA") -+ expected = ( -+ '[CA]\n' -+ 'pki_req_ext_oid = 1.3.6.1.4.1.311.20.2\n' -+ 'pki_req_ext_data = 1e0a00530075006200430041\n\n' -+ ) -+ self._test_write_pkispawn_config_file(template, expected) -+ -+ def test_write_pkispawn_config_file_MSCSTemplateV2(self): -+ template = x509.MSCSTemplateV2(u"1.2.3.4:4294967295") -+ expected = ( -+ '[CA]\n' -+ 'pki_req_ext_oid = 1.3.6.1.4.1.311.21.7\n' -+ 'pki_req_ext_data = 300c06032a0304020500ffffffff\n\n' -+ ) -+ self._test_write_pkispawn_config_file(template, expected) -+ -+ def _test_write_pkispawn_config_file(self, template, expected): -+ """ -+ Test that the values we read from an ExternalCAProfile -+ object can be used to produce a reasonable-looking pkispawn -+ configuration. -+ """ -+ config = RawConfigParser() -+ config.optionxform = str -+ config.add_section("CA") -+ config.set("CA", "pki_req_ext_oid", template.ext_oid) -+ config.set("CA", "pki_req_ext_data", -+ hexlify(template.get_ext_data()).decode('ascii')) -+ out = StringIO() -+ config.write(out) -+ assert out.getvalue() == expected -diff --git a/ipatests/test_ipaserver/test_install/test_cainstance.py b/ipatests/test_ipaserver/test_install/test_cainstance.py -deleted file mode 100644 -index 02d9758e4a..0000000000 ---- a/ipatests/test_ipaserver/test_install/test_cainstance.py -+++ /dev/null -@@ -1,123 +0,0 @@ --# --# Copyright (C) 2017 FreeIPA Contributors see COPYING for license --# -- --from binascii import hexlify --from io import StringIO --import pickle --from configparser import RawConfigParser --import pytest --from ipaserver.install import cainstance -- --pytestmark = pytest.mark.tier0 -- -- --class test_ExternalCAProfile: -- def test_MSCSTemplateV1_good(self): -- o = cainstance.MSCSTemplateV1("MySubCA") -- assert hexlify(o.get_ext_data()) == b'1e0e004d007900530075006200430041' -- -- def test_MSCSTemplateV1_bad(self): -- with pytest.raises(ValueError): -- cainstance.MSCSTemplateV1("MySubCA:1") -- -- def test_MSCSTemplateV1_pickle_roundtrip(self): -- o = cainstance.MSCSTemplateV1("MySubCA") -- s = pickle.dumps(o) -- assert o.get_ext_data() == pickle.loads(s).get_ext_data() -- -- def test_MSCSTemplateV2_too_few_parts(self): -- with pytest.raises(ValueError): -- cainstance.MSCSTemplateV2("1.2.3.4") -- -- def test_MSCSTemplateV2_too_many_parts(self): -- with pytest.raises(ValueError): -- cainstance.MSCSTemplateV2("1.2.3.4:100:200:300") -- -- def test_MSCSTemplateV2_bad_oid(self): -- with pytest.raises(ValueError): -- cainstance.MSCSTemplateV2("not_an_oid:1") -- -- def test_MSCSTemplateV2_non_numeric_major_version(self): -- with pytest.raises(ValueError): -- cainstance.MSCSTemplateV2("1.2.3.4:major:200") -- -- def test_MSCSTemplateV2_non_numeric_minor_version(self): -- with pytest.raises(ValueError): -- cainstance.MSCSTemplateV2("1.2.3.4:100:minor") -- -- def test_MSCSTemplateV2_major_version_lt_zero(self): -- with pytest.raises(ValueError): -- cainstance.MSCSTemplateV2("1.2.3.4:-1:200") -- -- def test_MSCSTemplateV2_minor_version_lt_zero(self): -- with pytest.raises(ValueError): -- cainstance.MSCSTemplateV2("1.2.3.4:100:-1") -- -- def test_MSCSTemplateV2_major_version_gt_max(self): -- with pytest.raises(ValueError): -- cainstance.MSCSTemplateV2("1.2.3.4:4294967296:200") -- -- def test_MSCSTemplateV2_minor_version_gt_max(self): -- with pytest.raises(ValueError): -- cainstance.MSCSTemplateV2("1.2.3.4:100:4294967296") -- -- def test_MSCSTemplateV2_good_major(self): -- o = cainstance.MSCSTemplateV2("1.2.3.4:4294967295") -- assert hexlify(o.get_ext_data()) == b'300c06032a0304020500ffffffff' -- -- def test_MSCSTemplateV2_good_major_minor(self): -- o = cainstance.MSCSTemplateV2("1.2.3.4:4294967295:0") -- assert hexlify(o.get_ext_data()) \ -- == b'300f06032a0304020500ffffffff020100' -- -- def test_MSCSTemplateV2_pickle_roundtrip(self): -- o = cainstance.MSCSTemplateV2("1.2.3.4:4294967295:0") -- s = pickle.dumps(o) -- assert o.get_ext_data() == pickle.loads(s).get_ext_data() -- -- def test_ExternalCAProfile_dispatch(self): -- """ -- Test that constructing ExternalCAProfile actually returns an -- instance of the appropriate subclass. -- """ -- assert isinstance( -- cainstance.ExternalCAProfile("MySubCA"), -- cainstance.MSCSTemplateV1) -- assert isinstance( -- cainstance.ExternalCAProfile("1.2.3.4:100"), -- cainstance.MSCSTemplateV2) -- -- def test_write_pkispawn_config_file_MSCSTemplateV1(self): -- template = cainstance.MSCSTemplateV1(u"SubCA") -- expected = ( -- '[CA]\n' -- 'pki_req_ext_oid = 1.3.6.1.4.1.311.20.2\n' -- 'pki_req_ext_data = 1e0a00530075006200430041\n\n' -- ) -- self._test_write_pkispawn_config_file(template, expected) -- -- def test_write_pkispawn_config_file_MSCSTemplateV2(self): -- template = cainstance.MSCSTemplateV2(u"1.2.3.4:4294967295") -- expected = ( -- '[CA]\n' -- 'pki_req_ext_oid = 1.3.6.1.4.1.311.21.7\n' -- 'pki_req_ext_data = 300c06032a0304020500ffffffff\n\n' -- ) -- self._test_write_pkispawn_config_file(template, expected) -- -- def _test_write_pkispawn_config_file(self, template, expected): -- """ -- Test that the values we read from an ExternalCAProfile -- object can be used to produce a reasonable-looking pkispawn -- configuration. -- """ -- config = RawConfigParser() -- config.optionxform = str -- config.add_section("CA") -- config.set("CA", "pki_req_ext_oid", template.ext_oid) -- config.set("CA", "pki_req_ext_data", -- hexlify(template.get_ext_data()).decode('ascii')) -- out = StringIO() -- config.write(out) -- assert out.getvalue() == expected -From e632b220798833bcd65c6b266610c800ed0914d7 Mon Sep 17 00:00:00 2001 -From: Fraser Tweedale -Date: Fri, 12 Jul 2019 13:13:02 +1000 -Subject: [PATCH] install: fix --external-ca-profile option - -Commit dd47cfc75a69618f486abefb70f2649ebf8264e7 removed the ability -to set pki_req_ext_oid and pki_req_ext_data in the pkispawn config. -This results in the --external-ca-profile option never setting the -requested values in the CSR (the default V1 template type specifying -"SubCA" is always used). - -Remove relevant fields from both ipaca_default.ini and -ipaca_customize.ini. This allows the IPA framework to set the -values (i.e. when --external-ca-type=ms-cs and ---external-ca-profile=... demand it). It also allows users to -override the pki_req_ext_* settings. - -Part of: https://pagure.io/freeipa/issue/7548 -Related: https://pagure.io/freeipa/issue/5608 -Reviewed-By: Florence Blanc-Renaud ---- - install/share/ipaca_customize.ini | 5 ----- - install/share/ipaca_default.ini | 1 - - 2 files changed, 6 deletions(-) - -diff --git a/install/share/ipaca_customize.ini b/install/share/ipaca_customize.ini -index 130ec2c102..6d58579af8 100644 ---- a/install/share/ipaca_customize.ini -+++ b/install/share/ipaca_customize.ini -@@ -93,11 +93,6 @@ pki_ca_signing_key_type=%(ipa_ca_key_type)s - pki_ca_signing_signing_algorithm=%(ipa_ca_signing_algorithm)s - pki_ca_signing_token=%(pki_token_name)s - --# MS subca request ext data --pki_req_ext_oid=1.3.6.1.4.1.311.20.2 --pki_req_ext_critical=False --pki_req_ext_data=1E0A00530075006200430041 -- - ## ocspSigningCert cert-pki-ca - pki_ocsp_signing_key_algorithm=%(ipa_key_algorithm)s - pki_ocsp_signing_key_size=%(ipa_key_size)s -diff --git a/install/share/ipaca_default.ini b/install/share/ipaca_default.ini -index fedc1b9a74..2b9900286e 100644 ---- a/install/share/ipaca_default.ini -+++ b/install/share/ipaca_default.ini -@@ -115,7 +115,6 @@ pki_ca_starting_crl_number=0 - - pki_external=False - pki_external_step_two=False --pki_req_ext_add=False - - pki_external_pkcs12_path=%(pki_pkcs12_path)s - pki_external_pkcs12_password=%(pki_pkcs12_password)s -From 71af731b3069fa1b2c0b51a3b917b5bc4da54350 Mon Sep 17 00:00:00 2001 -From: Fraser Tweedale -Date: Fri, 12 Jul 2019 13:24:51 +1000 -Subject: [PATCH] Fix use of incorrect variable - -Part of: https://pagure.io/freeipa/issue/7548 -Related: https://pagure.io/freeipa/issue/5608 -Reviewed-By: Florence Blanc-Renaud ---- - ipaserver/install/dogtaginstance.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/ipaserver/install/dogtaginstance.py b/ipaserver/install/dogtaginstance.py -index cc75d89746..5dca721d6c 100644 ---- a/ipaserver/install/dogtaginstance.py -+++ b/ipaserver/install/dogtaginstance.py -@@ -853,7 +853,7 @@ def _verify_immutable(self, config, immutable_settings, filename): - if errs: - raise ValueError( - '{} overrides immutable options:\n{}'.format( -- filename, '\n'.join(errors) -+ filename, '\n'.join(errs) - ) - ) - -From 83ed05725110de19a7098678274ecaaaf6a2c9c9 Mon Sep 17 00:00:00 2001 -From: Fraser Tweedale -Date: Wed, 20 Feb 2019 18:34:33 +1100 -Subject: [PATCH] Add more tests for --external-ca-profile handling - -Add tests for remaining untested scenarios of --external-ca-profile -handling in ipa-server-install. - -ipa-ca-install and ipa-cacert-manage remain untested at present. - -Fixes: https://pagure.io/freeipa/issue/7548 -Reviewed-By: Florence Blanc-Renaud ---- - ipatests/test_integration/test_external_ca.py | 97 ++++++++++++++++++- - 1 file changed, 95 insertions(+), 2 deletions(-) - -diff --git a/ipatests/test_integration/test_external_ca.py b/ipatests/test_integration/test_external_ca.py -index 5aa2b7bba0..dc9a09b43b 100644 ---- a/ipatests/test_integration/test_external_ca.py -+++ b/ipatests/test_integration/test_external_ca.py -@@ -74,10 +74,10 @@ def match_in_journal(host, string, since='today', services=('certmonger',)): - return match - - --def install_server_external_ca_step1(host, extra_args=()): -+def install_server_external_ca_step1(host, extra_args=(), raiseonerr=True): - """Step 1 to install the ipa server with external ca""" - return tasks.install_master( -- host, external_ca=True, extra_args=extra_args -+ host, external_ca=True, extra_args=extra_args, raiseonerr=raiseonerr, - ) - - -@@ -478,3 +478,96 @@ def test_master_install_ca2(self): - 'certutil', '-L', '-d', paths.PKI_TOMCAT_ALIAS_DIR, - '-n', cert_nick]) - assert "CN=RootCA2" in result.stdout_text -+ -+ -+def _step1_profile(master, s): -+ return install_server_external_ca_step1( -+ master, -+ extra_args=['--external-ca-type=ms-cs', f'--external-ca-profile={s}'], -+ raiseonerr=False, -+ ) -+ -+ -+def _test_invalid_profile(master, profile): -+ result = _step1_profile(master, profile) -+ assert result.returncode != 0 -+ assert '--external-ca-profile' in result.stderr_text -+ -+ -+def _test_valid_profile(master, profile_cls, profile): -+ result = _step1_profile(master, profile) -+ assert result.returncode == 0 -+ ipa_csr = master.get_file_contents(paths.ROOT_IPA_CSR) -+ check_mscs_extension(ipa_csr, profile_cls(profile)) -+ -+ -+class TestExternalCAProfileV1(IntegrationTest): -+ """ -+ Test that --external-ca-profile=Foo gets propagated to the CSR. -+ -+ The default template extension when --external-ca-type=ms-cs, -+ a V1 extension with value "SubCA", already gets tested by the -+ ``TestExternalCA`` class. -+ -+ We only need to do Step 1 of installation, then check the CSR. -+ -+ """ -+ def test_invalid_v1_template(self): -+ _test_invalid_profile(self.master, 'NotAnOid:1') -+ -+ def test_valid_v1_template(self): -+ _test_valid_profile( -+ self.master, ipa_x509.MSCSTemplateV1, 'TemplateOfAwesome') -+ -+ -+class TestExternalCAProfileV2MajorOnly(IntegrationTest): -+ """ -+ Test that V2 template specifiers without minor version get -+ propagated to CSR. This class also tests all error modes in -+ specifying a V2 template, those being: -+ -+ - no major version specified -+ - too many parts specified (i.e. major, minor, and then some more) -+ - major version is not an int -+ - major version is negative -+ - minor version is not an int -+ - minor version is negative -+ -+ We only need to do Step 1 of installation, then check the CSR. -+ -+ """ -+ def test_v2_template_too_few_parts(self): -+ _test_invalid_profile(self.master, '1.2.3.4') -+ -+ def test_v2_template_too_many_parts(self): -+ _test_invalid_profile(self.master, '1.2.3.4:100:200:300') -+ -+ def test_v2_template_major_version_not_int(self): -+ _test_invalid_profile(self.master, '1.2.3.4:wat:200') -+ -+ def test_v2_template_major_version_negative(self): -+ _test_invalid_profile(self.master, '1.2.3.4:-1:200') -+ -+ def test_v2_template_minor_version_not_int(self): -+ _test_invalid_profile(self.master, '1.2.3.4:100:wat') -+ -+ def test_v2_template_minor_version_negative(self): -+ _test_invalid_profile(self.master, '1.2.3.4:100:-2') -+ -+ def test_v2_template_valid_major_only(self): -+ _test_valid_profile( -+ self.master, ipa_x509.MSCSTemplateV2, '1.2.3.4:100') -+ -+ -+class TestExternalCAProfileV2MajorMinor(IntegrationTest): -+ """ -+ Test that V2 template specifiers _with_ minor version get -+ propagated to CSR. All error modes of V2 template specifiers -+ were tested in ``TestExternalCAProfileV2Major``. -+ -+ We only need to do Step 1 of installation, then check the CSR. -+ -+ """ -+ def test_v2_template_valid_major_minor(self): -+ _test_valid_profile( -+ self.master, ipa_x509.MSCSTemplateV2, '1.2.3.4:100:200') -From a627df87c31e4d8399bd9fab43c0c4772ddd8955 Mon Sep 17 00:00:00 2001 -From: Fraser Tweedale -Date: Thu, 11 Jul 2019 20:22:33 +1000 -Subject: [PATCH] Collapse --external-ca-profile tests into single class - -To avoid having to spawn new CI hosts for each kind of ---external-ca-profile argument we are testing, collapse the three -separate test classes into one. Uninstall the half-installed IPA -after each section of tests. - -This change is in response to review comment -https://github.com/freeipa/freeipa/pull/2852#pullrequestreview-220442170. - -Part of: https://pagure.io/freeipa/issue/7548 - -Reviewed-By: Florence Blanc-Renaud ---- - ipatests/test_integration/test_external_ca.py | 34 ++++++++++++++----- - 1 file changed, 26 insertions(+), 8 deletions(-) - -diff --git a/ipatests/test_integration/test_external_ca.py b/ipatests/test_integration/test_external_ca.py -index dc9a09b43b..714aebd4a8 100644 ---- a/ipatests/test_integration/test_external_ca.py -+++ b/ipatests/test_integration/test_external_ca.py -@@ -501,8 +501,18 @@ def _test_valid_profile(master, profile_cls, profile): - check_mscs_extension(ipa_csr, profile_cls(profile)) - - --class TestExternalCAProfileV1(IntegrationTest): -+class TestExternalCAProfileScenarios(IntegrationTest): - """ -+ Test the various --external-ca-profile scenarios. -+ This test is broken into sections, with each section first -+ testing invalid arguments, then a valid argument, and finally -+ uninstalling the half-installed IPA. -+ -+ """ -+ -+ ''' -+ Tranche 1: version 1 templates. -+ - Test that --external-ca-profile=Foo gets propagated to the CSR. - - The default template extension when --external-ca-type=ms-cs, -@@ -511,7 +521,7 @@ class TestExternalCAProfileV1(IntegrationTest): - - We only need to do Step 1 of installation, then check the CSR. - -- """ -+ ''' - def test_invalid_v1_template(self): - _test_invalid_profile(self.master, 'NotAnOid:1') - -@@ -519,9 +529,12 @@ def test_valid_v1_template(self): - _test_valid_profile( - self.master, ipa_x509.MSCSTemplateV1, 'TemplateOfAwesome') - -+ def test_uninstall_1(self): -+ tasks.uninstall_master(self.master) -+ -+ ''' -+ Tranche 2: V2 templates without minor version. - --class TestExternalCAProfileV2MajorOnly(IntegrationTest): -- """ - Test that V2 template specifiers without minor version get - propagated to CSR. This class also tests all error modes in - specifying a V2 template, those being: -@@ -535,7 +548,7 @@ class TestExternalCAProfileV2MajorOnly(IntegrationTest): - - We only need to do Step 1 of installation, then check the CSR. - -- """ -+ ''' - def test_v2_template_too_few_parts(self): - _test_invalid_profile(self.master, '1.2.3.4') - -@@ -558,16 +571,21 @@ def test_v2_template_valid_major_only(self): - _test_valid_profile( - self.master, ipa_x509.MSCSTemplateV2, '1.2.3.4:100') - -+ def test_uninstall_2(self): -+ tasks.uninstall_master(self.master) -+ -+ ''' -+ Tranche 3: V2 templates with minor version. - --class TestExternalCAProfileV2MajorMinor(IntegrationTest): -- """ - Test that V2 template specifiers _with_ minor version get - propagated to CSR. All error modes of V2 template specifiers - were tested in ``TestExternalCAProfileV2Major``. - - We only need to do Step 1 of installation, then check the CSR. - -- """ -+ ''' - def test_v2_template_valid_major_minor(self): - _test_valid_profile( - self.master, ipa_x509.MSCSTemplateV2, '1.2.3.4:100:200') -+ -+ # this is the end; no need to uninstall. -From 740964c3c47fd2cd216c233d8d9df1840eaa01ee Mon Sep 17 00:00:00 2001 -From: Fraser Tweedale -Date: Thu, 11 Jul 2019 20:27:02 +1000 -Subject: [PATCH] ci: add --external-ca-profile tests to nightly - -Part of: https://pagure.io/freeipa/issue/7548 - -Reviewed-By: Florence Blanc-Renaud ---- - ipatests/prci_definitions/nightly_f28.yaml | 12 ++++++++++++ - ipatests/prci_definitions/nightly_f29.yaml | 12 ++++++++++++ - ipatests/prci_definitions/nightly_master.yaml | 12 ++++++++++++ - ipatests/prci_definitions/nightly_master_pki.yaml | 12 ++++++++++++ - ipatests/prci_definitions/nightly_rawhide.yaml | 12 ++++++++++++ - 5 files changed, 60 insertions(+) - -diff --git a/ipatests/prci_definitions/nightly_f28.yaml b/ipatests/prci_definitions/nightly_f28.yaml -index fe86730444..d1605e6b5c 100644 ---- a/ipatests/prci_definitions/nightly_f28.yaml -+++ b/ipatests/prci_definitions/nightly_f28.yaml -@@ -75,6 +75,18 @@ jobs: - timeout: 3600 - topology: *master_1repl - -+ fedora-28/external_ca_templates: -+ requires: [fedora-28/build] -+ priority: 50 -+ job: -+ class: RunPytest -+ args: -+ build_url: '{fedora-28/build_url}' -+ test_suite: test_integration/test_external_ca.py::TestExternalCAProfileScenarios -+ template: *ci-master-f28 -+ timeout: 3600 -+ topology: *master_1repl -+ - fedora-28/test_topologies: - requires: [fedora-28/build] - priority: 50 -diff --git a/ipatests/prci_definitions/nightly_f29.yaml b/ipatests/prci_definitions/nightly_f29.yaml -index 57c1b624fe..ed88eb15c8 100644 ---- a/ipatests/prci_definitions/nightly_f29.yaml -+++ b/ipatests/prci_definitions/nightly_f29.yaml -@@ -75,6 +75,18 @@ jobs: - timeout: 3600 - topology: *master_1repl - -+ fedora-29/external_ca_templates: -+ requires: [fedora-29/build] -+ priority: 50 -+ job: -+ class: RunPytest -+ args: -+ build_url: '{fedora-29/build_url}' -+ test_suite: test_integration/test_external_ca.py::TestExternalCAProfileScenarios -+ template: *ci-master-f29 -+ timeout: 3600 -+ topology: *master_1repl -+ - fedora-29/test_topologies: - requires: [fedora-29/build] - priority: 50 -diff --git a/ipatests/prci_definitions/nightly_master.yaml b/ipatests/prci_definitions/nightly_master.yaml -index dc63f37426..0a66a13490 100644 ---- a/ipatests/prci_definitions/nightly_master.yaml -+++ b/ipatests/prci_definitions/nightly_master.yaml -@@ -75,6 +75,18 @@ jobs: - timeout: 3600 - topology: *master_1repl - -+ fedora-30/external_ca_templates: -+ requires: [fedora-30/build] -+ priority: 50 -+ job: -+ class: RunPytest -+ args: -+ build_url: '{fedora-30/build_url}' -+ test_suite: test_integration/test_external_ca.py::TestExternalCAProfileScenarios -+ template: *ci-master-f30 -+ timeout: 3600 -+ topology: *master_1repl -+ - fedora-30/test_topologies: - requires: [fedora-30/build] - priority: 50 -diff --git a/ipatests/prci_definitions/nightly_master_pki.yaml b/ipatests/prci_definitions/nightly_master_pki.yaml -index 1bb0af0244..ed2e38d3ed 100644 ---- a/ipatests/prci_definitions/nightly_master_pki.yaml -+++ b/ipatests/prci_definitions/nightly_master_pki.yaml -@@ -75,6 +75,18 @@ jobs: - timeout: 3600 - topology: *master_1repl - -+ fedora-29/external_ca_templates: -+ requires: [fedora-29/build] -+ priority: 50 -+ job: -+ class: RunPytest -+ args: -+ build_url: '{fedora-29/build_url}' -+ test_suite: test_integration/test_external_ca.py::TestExternalCAProfileScenarios -+ template: *pki-master-f29 -+ timeout: 3600 -+ topology: *master_1repl -+ - fedora-29/test_vault: - requires: [fedora-29/build] - priority: 50 -diff --git a/ipatests/prci_definitions/nightly_rawhide.yaml b/ipatests/prci_definitions/nightly_rawhide.yaml -index 301878467c..14433fcc0a 100644 ---- a/ipatests/prci_definitions/nightly_rawhide.yaml -+++ b/ipatests/prci_definitions/nightly_rawhide.yaml -@@ -75,6 +75,18 @@ jobs: - timeout: 3600 - topology: *master_1repl - -+ fedora-rawhide/external_ca_templates: -+ requires: [fedora-rawhide/build] -+ priority: 50 -+ job: -+ class: RunPytest -+ args: -+ build_url: '{fedora-rawhide/build_url}' -+ test_suite: test_integration/test_external_ca.py::TestExternalCAProfileScenarios -+ template: *ci-master-frawhide -+ timeout: 3600 -+ topology: *master_1repl -+ - fedora-rawhide/test_topologies: - requires: [fedora-rawhide/build] - priority: 50 -From 011c5283cec28ea4361eff5d2ee98da9cd3db41a Mon Sep 17 00:00:00 2001 -From: Fraser Tweedale -Date: Thu, 11 Jul 2019 20:27:02 +1000 -Subject: [PATCH] ci: add --external-ca-profile tests to gating - -Part of: https://pagure.io/freeipa/issue/7548 - -Reviewed-By: Florence Blanc-Renaud ---- - ipatests/prci_definitions/gating.yaml | 12 ++++++++++++ - 1 file changed, 12 insertions(+) - -diff --git a/ipatests/prci_definitions/gating.yaml b/ipatests/prci_definitions/gating.yaml -index 4d0107d956..81fa4bba10 100644 ---- a/ipatests/prci_definitions/gating.yaml -+++ b/ipatests/prci_definitions/gating.yaml -@@ -87,6 +87,18 @@ jobs: - timeout: 3600 - topology: *master_1repl - -+ fedora-30/external_ca_templates: -+ requires: [fedora-30/build] -+ priority: 50 -+ job: -+ class: RunPytest -+ args: -+ build_url: '{fedora-30/build_url}' -+ test_suite: test_integration/test_external_ca.py::TestExternalCAProfileScenarios -+ template: *ci-master-f30 -+ timeout: 3600 -+ topology: *master_1repl -+ - fedora-30/test_topologies: - requires: [fedora-30/build] - priority: 50 diff --git a/SOURCES/0006-install-updates-move-external-members-past-schema-compat-update_14dbf04_rhbz#1803165.patch b/SOURCES/0006-install-updates-move-external-members-past-schema-compat-update_14dbf04_rhbz#1803165.patch new file mode 100644 index 0000000..04a3bdf --- /dev/null +++ b/SOURCES/0006-install-updates-move-external-members-past-schema-compat-update_14dbf04_rhbz#1803165.patch @@ -0,0 +1,62 @@ +From 14dbf04148c6284b176eca34aa70df4bef09b857 Mon Sep 17 00:00:00 2001 +From: Alexander Bokovoy +Date: Feb 12 2020 14:16:42 +0000 +Subject: install/updates: move external members past schema compat update + + +There is an ordering discrepancy because the base compat tree +configuration is in install/updates/80-schema_compat.update so it is ran +after 50-externalmembers.update. And since at that point +cn=groups,cn=Schema ... does not exist yet, external members +configuration is not applied. + +Move it around to make sure it is applied after Schema Compatibility +plugin configuration is created. + +Fixes: https://pagure.io/freeipa/issue/8193 +Signed-off-by: Alexander Bokovoy +Reviewed-By: Florence Blanc-Renaud + +--- + +diff --git a/install/updates/50-externalmembers.update b/install/updates/50-externalmembers.update +deleted file mode 100644 +index 6b9c5dd..0000000 +--- a/install/updates/50-externalmembers.update ++++ /dev/null +@@ -1,3 +0,0 @@ +-dn: cn=groups,cn=Schema Compatibility,cn=plugins,cn=config +-addifexist: schema-compat-entry-attribute: ipaexternalmember=%deref_r("member","ipaexternalmember") +-addifexist: schema-compat-entry-attribute: objectclass=ipaexternalgroup +diff --git a/install/updates/81-externalmembers.update b/install/updates/81-externalmembers.update +new file mode 100644 +index 0000000..6b9c5dd +--- /dev/null ++++ b/install/updates/81-externalmembers.update +@@ -0,0 +1,3 @@ ++dn: cn=groups,cn=Schema Compatibility,cn=plugins,cn=config ++addifexist: schema-compat-entry-attribute: ipaexternalmember=%deref_r("member","ipaexternalmember") ++addifexist: schema-compat-entry-attribute: objectclass=ipaexternalgroup +diff --git a/install/updates/Makefile.am b/install/updates/Makefile.am +index 68facba..8a4d9cc 100644 +--- a/install/updates/Makefile.am ++++ b/install/updates/Makefile.am +@@ -52,7 +52,6 @@ app_DATA = \ + 50-krbenctypes.update \ + 50-nis.update \ + 50-ipaconfig.update \ +- 50-externalmembers.update \ + 55-pbacmemberof.update \ + 59-trusts-sysacount.update \ + 60-trusts.update \ +@@ -65,7 +64,8 @@ app_DATA = \ + 73-winsync.update \ + 73-certmap.update \ + 75-user-trust-attributes.update \ +- 80-schema_compat.update \ ++ 80-schema_compat.update \ ++ 81-externalmembers.update \ + 90-post_upgrade_plugins.update \ + $(NULL) + + diff --git a/SOURCES/0007-Allow-insecure-binds-for-migration-8e207fd3_rhbz#1731963.patch b/SOURCES/0007-Allow-insecure-binds-for-migration-8e207fd3_rhbz#1731963.patch deleted file mode 100644 index 57b2ba4..0000000 --- a/SOURCES/0007-Allow-insecure-binds-for-migration-8e207fd3_rhbz#1731963.patch +++ /dev/null @@ -1,72 +0,0 @@ -From 8e207fd33d524f5cde2dfd8a41a08926a328a92b Mon Sep 17 00:00:00 2001 -From: Christian Heimes -Date: Tue, 13 Aug 2019 17:22:01 +0200 -Subject: [PATCH] Allow insecure binds for migration - -Commit 5be9341fbabaf7bcb396a2ce40f17e1ccfa54b77 disallowed simple bind -over an insecure connection. Password logins were only allowed over LDAPS -or LDAP+STARTTLS. The restriction broke 'ipa migrate-ds' in some cases. - -This commit lifts the restriction and permits insecure binds over plain -LDAP. It also makes the migrate-ds plugin use STARTTLS when a CA -certificate is configured with a plain LDAP connection. - -Fixes: https://pagure.io/freeipa/issue/8040 -Signed-off-by: Christian Heimes -Reviewed-By: Thomas Woerner ---- - ipapython/ipaldap.py | 8 +++++--- - ipaserver/plugins/migration.py | 9 ++++----- - 2 files changed, 9 insertions(+), 8 deletions(-) - -diff --git a/ipapython/ipaldap.py b/ipapython/ipaldap.py -index 9ff443fe4f..f40858e27f 100644 ---- a/ipapython/ipaldap.py -+++ b/ipapython/ipaldap.py -@@ -1206,12 +1206,14 @@ def _connect(self): - return conn - - def simple_bind(self, bind_dn, bind_password, server_controls=None, -- client_controls=None): -+ client_controls=None, insecure_bind=False): - """ - Perform simple bind operation. - """ -- if self.protocol == 'ldap' and not self._start_tls and bind_password: -- # non-empty bind must use a secure connection -+ if (self.protocol == 'ldap' and not self._start_tls and -+ bind_password and not insecure_bind): -+ # non-empty bind must use a secure connection unless -+ # insecure bind is explicitly enabled - raise ValueError('simple_bind over insecure LDAP connection') - with self.error_handler(): - self._flush_schema() -diff --git a/ipaserver/plugins/migration.py b/ipaserver/plugins/migration.py -index d0ca8369ae..b025c46cc5 100644 ---- a/ipaserver/plugins/migration.py -+++ b/ipaserver/plugins/migration.py -@@ -901,20 +901,19 @@ def execute(self, ldapuri, bindpw, **options): - return dict(result={}, failed={}, enabled=False, compat=True) - - # connect to DS -- cacert = None - if options.get('cacertfile') is not None: - # store CA cert into file - tmp_ca_cert_f = write_tmp_file(options['cacertfile']) - cacert = tmp_ca_cert_f.name - -- # start TLS connection -- ds_ldap = LDAPClient(ldapuri, cacert=cacert) -+ # start TLS connection or STARTTLS -+ ds_ldap = LDAPClient(ldapuri, cacert=cacert, start_tls=True) - ds_ldap.simple_bind(options['binddn'], bindpw) - - tmp_ca_cert_f.close() - else: -- ds_ldap = LDAPClient(ldapuri, cacert=cacert) -- ds_ldap.simple_bind(options['binddn'], bindpw) -+ ds_ldap = LDAPClient(ldapuri) -+ ds_ldap.simple_bind(options['binddn'], bindpw, insecure_bind=True) - - # check whether the compat plugin is enabled - if not options.get('compat'): diff --git a/SOURCES/0007-kdb-make-sure-audit_as_req-callback-signature-change-is-preserved_rhbz#1803786.patch b/SOURCES/0007-kdb-make-sure-audit_as_req-callback-signature-change-is-preserved_rhbz#1803786.patch new file mode 100644 index 0000000..593f8e0 --- /dev/null +++ b/SOURCES/0007-kdb-make-sure-audit_as_req-callback-signature-change-is-preserved_rhbz#1803786.patch @@ -0,0 +1,67 @@ +From 30b8c8b9985a5eb41e700b80fd03f95548e45fba Mon Sep 17 00:00:00 2001 +From: Alexander Bokovoy +Date: Mon, 17 Feb 2020 13:58:51 +0200 +Subject: [PATCH] kdb: make sure audit_as_req callback signature change is + preserved + +audit_as_req() callback has changed its signature with MIT krb5 commit +20991d55efbe1f987c1dbc1065f2d58c8f34031b in 2017, we should preserve the +change for any newer DAL versions. Otherwise audit_as_req() callback +would reference wrong data and we might crash. + +Fixes: https://pagure.io/freeipa/issue/8200 +Signed-off-by: Alexander Bokovoy +Reviewed-By: Christian Heimes +--- + daemons/ipa-kdb/ipa_kdb.h | 2 +- + daemons/ipa-kdb/ipa_kdb_audit_as.c | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/daemons/ipa-kdb/ipa_kdb.h b/daemons/ipa-kdb/ipa_kdb.h +index 7519f26e26..ae37a5a033 100644 +--- a/daemons/ipa-kdb/ipa_kdb.h ++++ b/daemons/ipa-kdb/ipa_kdb.h +@@ -345,7 +345,7 @@ krb5_error_code ipadb_check_allowed_to_delegate(krb5_context kcontext, + + void ipadb_audit_as_req(krb5_context kcontext, + krb5_kdc_req *request, +-#if (KRB5_KDB_DAL_MAJOR_VERSION == 7) ++#if (KRB5_KDB_DAL_MAJOR_VERSION >= 7) + const krb5_address *local_addr, + const krb5_address *remote_addr, + #endif +diff --git a/daemons/ipa-kdb/ipa_kdb_audit_as.c b/daemons/ipa-kdb/ipa_kdb_audit_as.c +index 77748a75d6..a60bc82b6d 100644 +--- a/daemons/ipa-kdb/ipa_kdb_audit_as.c ++++ b/daemons/ipa-kdb/ipa_kdb_audit_as.c +@@ -25,7 +25,7 @@ + + void ipadb_audit_as_req(krb5_context kcontext, + krb5_kdc_req *request, +-#if (KRB5_KDB_DAL_MAJOR_VERSION == 7) ++#if (KRB5_KDB_DAL_MAJOR_VERSION >= 7) + const krb5_address *local_addr, + const krb5_address *remote_addr, + #endif +#From 4eb48492b354ecc30ffe1dd9654dcc0e0e833d64 Mon Sep 17 00:00:00 2001 +#From: Alexander Bokovoy +#Date: Mon, 17 Feb 2020 14:21:10 +0200 +#Subject: [PATCH] Azure Pipelines: re-enable nodejs:12 stream for Fedora 31+ +# +#Reviewed-By: Christian Heimes +#--- +# ipatests/azure/templates/prepare-build.yml | 1 + +# 1 file changed, 1 insertion(+) +# +#diff --git a/ipatests/azure/templates/prepare-build.yml b/ipatests/azure/templates/prepare-build.yml +#index 0528efe129..643910f085 100644 +#--- a/ipatests/azure/templates/prepare-build.yml +#+++ b/ipatests/azure/templates/prepare-build.yml +#@@ -13,6 +13,7 @@ steps: +# for metalink in $(sudo dnf repolist -v |grep Repo-metalink | awk '{print $2}' ) ; do echo '###############' ; echo '####' ; echo $metalink ; echo '####' ; curl $metalink ; done +# echo "Fastestmirror results:" +# sudo cat /var/cache/dnf/fastestmirror.cache +#+ sudo dnf -y module enable nodejs:12 +# sudo dnf makecache || : +# echo "Installing base development environment" +# sudo dnf install -y \ diff --git a/SOURCES/0008-Allow-an-empty-cookie-in-dogtag-ipa-ca-renew-agent-submit_3d7d58d_rhbz#1790663.patch b/SOURCES/0008-Allow-an-empty-cookie-in-dogtag-ipa-ca-renew-agent-submit_3d7d58d_rhbz#1790663.patch new file mode 100644 index 0000000..586ac94 --- /dev/null +++ b/SOURCES/0008-Allow-an-empty-cookie-in-dogtag-ipa-ca-renew-agent-submit_3d7d58d_rhbz#1790663.patch @@ -0,0 +1,120 @@ +From 3d7d58d8214f3c899c0afd1a3a6a6678f38b7b39 Mon Sep 17 00:00:00 2001 +From: Rob Crittenden +Date: Jan 13 2020 18:41:53 +0000 +Subject: Allow an empty cookie in dogtag-ipa-ca-renew-agent-submit + + +A "cookie" is used with certmonger to track the state of a +request across multiple requests to a CA (in ca-cookie). This +is used with the certmonger POLL operation to submit a request +to the CA for the status of a certificate request. This, along +with the profile, are passed to the certmonger CA helper +scripts via environment variables when a request is made. It is +cleared from the certmonger request once the certificate is +issued. + +This CA helper can do a number of things: + +- SUBMIT new certicate requests (including the CA) +- POLL for status of an existing certificate request +- For non renewal masters, POLL to see if an updated cert is in + LDAP + +A POLL operation requires a cookie so that the state about the +request can be passed to the CA. For the case of retrieving an +updated cert from LDAP there is no state to maintain. It just +checks LDAP and returns either a cert or WAIT_WITH_DELAY if one +is not yet available. + +There are two kinds of cookies in operation here: +1. The CERTMONGER_CA_COOKIE environment variable passed via + certmonger to this helper which is a JSON object. +2. The cookie value within the JSON object which contains the + URL to be passed to dogtag. + +For the purposes of clarity "cookie" here is the value within +the JSON. + +The CERTMONGER_CA_COOKIE is deconstructed and reconstructed as +the request is processed, doing double duty. It initially comes +in as a JSON dict object with two keys: profile and cookie. +In call_handler the CERTMONGER_CA_COOKIE is decomposed into a +python object and the profile compared to the requested profile +(and request rejected if they don't match) and the cookie key +overrides the CERTMONGER_CA_COOKIE environment variable. This is +then reversed at the end of the request when it again becomes a +JSON object containing the profile and cookie. + +This script was previously enforcing that a cookie be available on +all POLL requests, whether it is actually required or not. This +patch relaxes that requirement. + +The first request of a non-renewal master for an updated certicate +from LDAP is a SUBMIT operation. This is significant because it +doesn't require a cookie: there is no state on a new request. If +there is no updated cert in LDAP then the tracking request goes +into the CA_WORKING state and certmonger will wait 8 hours (as +returned by this script) and try again. + +Subsequent requests are done using POLL. This required a cookie +so all such requests would fail with the ca-error +Invalid cookie: u'' as it was empty (because there is no state). + +There is no need to fail early on a missing cookie. Enforcement +will be done later if needed (and it isn't always needed). So +if CERTMONGER_CA_COOKIE is an empty string then generate a new +CERTMONGER_CA_COOKIE containing the requested profile and an empty +cookie. It still will fail if certmonger doesn't set a cookie at +all. + +An example of a cookie when retrieving a new RA Agent certificate +is: + +{"profile": "caServerCert", "cookie": "state=retrieve&requestId=20"} + +This will result in this request to the CA: +[09/Jan/2020:14:29:54 -0500] "GET +/ca/ee/ca/displayCertFromRequest?requestId=20&importCert=true&xml=true +HTTP/1.1" 200 9857 + +For a renewal, the reconstructed cookie will consist of: + +{"profile": "caServerCert", "cookie": ""} + +https://pagure.io/freeipa/issue/8164 + +Reviewed-By: Florence Blanc-Renaud + +--- + +diff --git a/install/certmonger/dogtag-ipa-ca-renew-agent-submit.in b/install/certmonger/dogtag-ipa-ca-renew-agent-submit.in +index 10efb4c..95ad080 100644 +--- a/install/certmonger/dogtag-ipa-ca-renew-agent-submit.in ++++ b/install/certmonger/dogtag-ipa-ca-renew-agent-submit.in +@@ -123,7 +123,9 @@ def call_handler(_handler, *args, **kwargs): + operation = os.environ['CERTMONGER_OPERATION'] + if operation == 'POLL': + cookie = os.environ.pop('CERTMONGER_CA_COOKIE', None) +- if cookie is not None: ++ if cookie is None: ++ return (UNCONFIGURED, "Cookie not provided") ++ if len(cookie) > 0: + try: + context = json.loads(cookie) + if not isinstance(context, dict): +@@ -131,7 +133,13 @@ def call_handler(_handler, *args, **kwargs): + except (TypeError, ValueError): + return (UNCONFIGURED, "Invalid cookie: %r" % cookie) + else: +- return (UNCONFIGURED, "Cookie not provided") ++ # Reconstruct the data for the missing cookie. Sanity checking ++ # is done elsewhere, when needed. ++ context = dict(cookie=u'') ++ profile = os.environ.get('CERTMONGER_CA_PROFILE') ++ if profile is not None: ++ profile = profile.encode('ascii').decode('raw_unicode_escape') ++ context['profile'] = profile + + if 'profile' in context: + profile = context.pop('profile') + diff --git a/SOURCES/0008-install-Add-missing-scripts-to-app_DATA_rhbz#1741170.patch b/SOURCES/0008-install-Add-missing-scripts-to-app_DATA_rhbz#1741170.patch deleted file mode 100644 index 4b5ecd9..0000000 --- a/SOURCES/0008-install-Add-missing-scripts-to-app_DATA_rhbz#1741170.patch +++ /dev/null @@ -1,104 +0,0 @@ -install/updates/30-ipservices.update from 39eaf2fa as it is not part of the -release tarball of 4.8.0 but needed for 27586cb7: - -commit 39eaf2fab5e27bd12edfb2a24c439a8ea5fb26f0 -Author: Christian Heimes -Date: Fri Dec 7 13:08:49 2018 +0100 - - Add index and container for RFC 2307 IP services - - IPA doesn't officially support RFC 2307 IP services. However SSSD has a - nsswitch plugin to provide service lookups. The subtree search for - (&(ipserviceport=$PORT)(ipserviceprotocol=$SRV)(objectclass=ipservice)) in - cn=accounts,$SUFFIX has caused performance issues on large - installations. - - This patch introduced a dedicated container - cn=ipservices,cn=accounts,$SUFFIX for IP services for future use or 3rd - party extensions. SSSD will be change its search base in an upcoming - release, too. - - A new ipServicePort index is added to optimize searches for an IP - service by port. There is no index on ipServiceProtocol because the index - would have poor selectivity. An ipService entry has either 'tcp' or 'udp' - as protocol. - - Fixes: https://pagure.io/freeipa/issue/7797 - See: https://pagure.io/freeipa/issue/7786 - Signed-off-by: Christian Heimes - Reviewed-By: Alexander Bokovoy - -diff --git a/install/updates/30-ipservices.update b/install/updates/30-ipservices.update -new file mode 100644 -index 000000000..01a6d52f8 ---- /dev/null -+++ b/install/updates/30-ipservices.update -@@ -0,0 +1,6 @@ -+# container for RFC 2307 IP services -+ -+dn: cn=ipservices,cn=accounts,$SUFFIX -+default: objectClass: top -+default: objectClass: nsContainer -+default: cn: ipservices -install/updates/75-user-trust-attributes.update from c18ee9b6 as it is not -part of the release tarball of 4.8.0 but needed for 27586cb7: - -commit c18ee9b641ddc1e6b52d0413caa1fb98ac13785d -Author: Tibor Dudlák -Date: Tue Apr 2 16:23:09 2019 +0200 - - Add SMB attributes for users - - SMB attributes are used by Samba domain controller when reporting - details about IPA users via LSA DCE RPC calls. - - Based on the initial work from the external plugin: - https://github.com/abbra/freeipa-user-trust-attributes - - Related: https://pagure.io/freeipa/issue/3999 - - Signed-off-by: Alexander Bokovoy - Signed-off-by: Tibor Dudlák - Reviewed-By: Alexander Bokovoy - Reviewed-By: Tibor Dudlak - -diff --git a/install/updates/75-user-trust-attributes.update b/install/updates/75-user-trust-attributes.update -new file mode 100644 -index 000000000..43bb40c7d ---- /dev/null -+++ b/install/updates/75-user-trust-attributes.update -@@ -0,0 +1,5 @@ -+# Add an explicit self-service ACI to allow writing to manage trust attributes -+# for the owner of the object -+dn: cn=users,cn=accounts,$SUFFIX -+add:aci:(targetattr = "ipantlogonscript || ipantprofilepath || ipanthomedirectory || ipanthomedirectorydrive")(version 3.0;acl "system:Allow trust agents to read user SMB attributes";allow (read) groupdn = "ldap:///cn=adtrust agents,cn=sysaccounts,cn=etc,$SUFFIX";) -+add:aci:(targetattr = "ipantlogonscript || ipantprofilepath || ipanthomedirectory || ipanthomedirectorydrive")(version 3.0;acl "selfservice:Users can manage their SMB attributes";allow (write) userdn = "ldap:///self";) -commit 27586cb7ae32af191cb8a3c36fc8856957300f08 -Author: Timo Aaltonen -Date: Fri Aug 9 23:03:25 2019 +0300 - - install: Add missing scripts to app_DATA. - - Signed-off-by: Timo Aaltonen - Reviewed-By: Alexander Bokovoy - -diff --git a/install/updates/Makefile.am b/install/updates/Makefile.am -index bce8a56b1..68facbaf2 100644 ---- a/install/updates/Makefile.am -+++ b/install/updates/Makefile.am -@@ -30,6 +30,7 @@ app_DATA = \ - 21-ca_renewal_container.update \ - 21-certstore_container.update \ - 25-referint.update \ -+ 30-ipservices.update \ - 30-provisioning.update \ - 30-s4u2proxy.update \ - 37-locations.update \ -@@ -63,6 +64,7 @@ app_DATA = \ - 73-custodia.update \ - 73-winsync.update \ - 73-certmap.update \ -+ 75-user-trust-attributes.update \ - 80-schema_compat.update \ - 90-post_upgrade_plugins.update \ - $(NULL) diff --git a/SOURCES/0009-extdom-unify-error-code-handling-especially-LDAP_NO_SUCH_OBJECT_rhbz#1741530.patch b/SOURCES/0009-extdom-unify-error-code-handling-especially-LDAP_NO_SUCH_OBJECT_rhbz#1741530.patch deleted file mode 100644 index 237532d..0000000 --- a/SOURCES/0009-extdom-unify-error-code-handling-especially-LDAP_NO_SUCH_OBJECT_rhbz#1741530.patch +++ /dev/null @@ -1,345 +0,0 @@ -From 3bb72545fc337564e0843b0c72906a9a1e3f6a06 Mon Sep 17 00:00:00 2001 -From: Sumit Bose -Date: Fri, 14 Jun 2019 11:13:54 +0200 -Subject: [PATCH] extdom: unify error code handling especially - LDAP_NO_SUCH_OBJECT - -A return code LDAP_NO_SUCH_OBJECT will tell SSSD on the IPA client to -remove the searched object from the cache. As a consequence -LDAP_NO_SUCH_OBJECT should only be returned if the object really does -not exists otherwise the data of existing objects might be removed form -the cache of the clients causing unexpected behaviour like -authentication errors. - -Currently some code-paths use LDAP_NO_SUCH_OBJECT as default error code. -With this patch LDAP_NO_SUCH_OBJECT is only returned if the related -lookup functions return ENOENT. Timeout related error code will lead to -LDAP_TIMELIMIT_EXCEEDED and LDAP_OPERATIONS_ERROR is used as default -error code. - -Fixes: https://pagure.io/freeipa/issue/8044 -Reviewed-By: Alexander Bokovoy ---- - .../ipa-extdom-extop/back_extdom_sss_idmap.c | 4 +- - .../ipa-extdom-extop/ipa_extdom_common.c | 77 ++++++++++++++----- - .../ipa-extdom-extop/ipa_extdom_extop.c | 2 + - 3 files changed, 61 insertions(+), 22 deletions(-) - -diff --git a/daemons/ipa-slapi-plugins/ipa-extdom-extop/back_extdom_sss_idmap.c b/daemons/ipa-slapi-plugins/ipa-extdom-extop/back_extdom_sss_idmap.c -index ef552a9a37..163e8e1371 100644 ---- a/daemons/ipa-slapi-plugins/ipa-extdom-extop/back_extdom_sss_idmap.c -+++ b/daemons/ipa-slapi-plugins/ipa-extdom-extop/back_extdom_sss_idmap.c -@@ -62,10 +62,10 @@ static enum nss_status __convert_sss_nss2nss_status(int errcode) { - return NSS_STATUS_SUCCESS; - case ENOENT: - return NSS_STATUS_NOTFOUND; -- case ETIME: -- /* fall-through */ - case ERANGE: - return NSS_STATUS_TRYAGAIN; -+ case ETIME: -+ /* fall-through */ - case ETIMEDOUT: - /* fall-through */ - default: -diff --git a/daemons/ipa-slapi-plugins/ipa-extdom-extop/ipa_extdom_common.c b/daemons/ipa-slapi-plugins/ipa-extdom-extop/ipa_extdom_common.c -index 525487c9e4..65c723ce65 100644 ---- a/daemons/ipa-slapi-plugins/ipa-extdom-extop/ipa_extdom_common.c -+++ b/daemons/ipa-slapi-plugins/ipa-extdom-extop/ipa_extdom_common.c -@@ -523,7 +523,7 @@ int pack_ber_user(struct ipa_extdom_ctx *ctx, - if (strcasecmp(locat+1, domain_name) == 0 ) { - locat[0] = '\0'; - } else { -- ret = LDAP_NO_SUCH_OBJECT; -+ ret = LDAP_INVALID_SYNTAX; - goto done; - } - } -@@ -568,10 +568,12 @@ int pack_ber_user(struct ipa_extdom_ctx *ctx, - ret = getgrgid_r_wrapper(ctx, - groups[c], &grp, &buf, &buf_len); - if (ret != 0) { -- if (ret == ENOMEM || ret == ERANGE) { -- ret = LDAP_OPERATIONS_ERROR; -- } else { -+ if (ret == ENOENT) { - ret = LDAP_NO_SUCH_OBJECT; -+ } else if (ret == ETIMEDOUT) { -+ ret = LDAP_TIMELIMIT_EXCEEDED; -+ } else { -+ ret = LDAP_OPERATIONS_ERROR; - } - goto done; - } -@@ -634,7 +636,7 @@ int pack_ber_group(enum response_types response_type, - if (strcasecmp(locat+1, domain_name) == 0 ) { - locat[0] = '\0'; - } else { -- ret = LDAP_NO_SUCH_OBJECT; -+ ret = LDAP_INVALID_SYNTAX; - goto done; - } - } -@@ -836,6 +838,8 @@ static int handle_uid_request(struct ipa_extdom_ctx *ctx, - || id_type == SSS_ID_TYPE_BOTH)) { - if (ret == ENOENT) { - ret = LDAP_NO_SUCH_OBJECT; -+ } else if (ret == ETIMEDOUT || ret == ETIME) { -+ ret = LDAP_TIMELIMIT_EXCEEDED; - } else { - set_err_msg(req, "Failed to lookup SID by UID"); - ret = LDAP_OPERATIONS_ERROR; -@@ -847,10 +851,12 @@ static int handle_uid_request(struct ipa_extdom_ctx *ctx, - } else { - ret = getpwuid_r_wrapper(ctx, uid, &pwd, &buf, &buf_len); - if (ret != 0) { -- if (ret == ENOMEM || ret == ERANGE) { -- ret = LDAP_OPERATIONS_ERROR; -- } else { -+ if (ret == ENOENT) { - ret = LDAP_NO_SUCH_OBJECT; -+ } else if (ret == ETIMEDOUT) { -+ ret = LDAP_TIMELIMIT_EXCEEDED; -+ } else { -+ ret = LDAP_OPERATIONS_ERROR; - } - goto done; - } -@@ -862,6 +868,8 @@ static int handle_uid_request(struct ipa_extdom_ctx *ctx, - set_err_msg(req, "Failed to read original data"); - if (ret == ENOENT) { - ret = LDAP_NO_SUCH_OBJECT; -+ } else if (ret == ETIMEDOUT || ret == ETIME) { -+ ret = LDAP_TIMELIMIT_EXCEEDED; - } else { - ret = LDAP_OPERATIONS_ERROR; - } -@@ -907,6 +915,8 @@ static int handle_gid_request(struct ipa_extdom_ctx *ctx, - if (ret != 0 || id_type != SSS_ID_TYPE_GID) { - if (ret == ENOENT) { - ret = LDAP_NO_SUCH_OBJECT; -+ } else if (ret == ETIMEDOUT || ret == ETIME) { -+ ret = LDAP_TIMELIMIT_EXCEEDED; - } else { - set_err_msg(req, "Failed to lookup SID by GID"); - ret = LDAP_OPERATIONS_ERROR; -@@ -918,10 +928,12 @@ static int handle_gid_request(struct ipa_extdom_ctx *ctx, - } else { - ret = getgrgid_r_wrapper(ctx, gid, &grp, &buf, &buf_len); - if (ret != 0) { -- if (ret == ENOMEM || ret == ERANGE) { -- ret = LDAP_OPERATIONS_ERROR; -- } else { -+ if (ret == ENOENT) { - ret = LDAP_NO_SUCH_OBJECT; -+ } else if (ret == ETIMEDOUT) { -+ ret = LDAP_TIMELIMIT_EXCEEDED; -+ } else { -+ ret = LDAP_OPERATIONS_ERROR; - } - goto done; - } -@@ -933,6 +945,8 @@ static int handle_gid_request(struct ipa_extdom_ctx *ctx, - set_err_msg(req, "Failed to read original data"); - if (ret == ENOENT) { - ret = LDAP_NO_SUCH_OBJECT; -+ } else if (ret == ETIMEDOUT || ret == ETIME) { -+ ret = LDAP_TIMELIMIT_EXCEEDED; - } else { - ret = LDAP_OPERATIONS_ERROR; - } -@@ -976,6 +990,8 @@ static int handle_cert_request(struct ipa_extdom_ctx *ctx, - if (ret != 0) { - if (ret == ENOENT) { - ret = LDAP_NO_SUCH_OBJECT; -+ } else if (ret == ETIMEDOUT || ret == ETIME) { -+ ret = LDAP_TIMELIMIT_EXCEEDED; - } else { - set_err_msg(req, "Failed to lookup name by certificate"); - ret = LDAP_OPERATIONS_ERROR; -@@ -1020,6 +1036,8 @@ static int handle_sid_request(struct ipa_extdom_ctx *ctx, - if (ret != 0) { - if (ret == ENOENT) { - ret = LDAP_NO_SUCH_OBJECT; -+ } else if (ret == ETIMEDOUT || ret == ETIME) { -+ ret = LDAP_TIMELIMIT_EXCEEDED; - } else { - set_err_msg(req, "Failed to lookup name by SID"); - ret = LDAP_OPERATIONS_ERROR; -@@ -1057,10 +1075,12 @@ static int handle_sid_request(struct ipa_extdom_ctx *ctx, - case SSS_ID_TYPE_BOTH: - ret = getpwnam_r_wrapper(ctx, fq_name, &pwd, &buf, &buf_len); - if (ret != 0) { -- if (ret == ENOMEM || ret == ERANGE) { -- ret = LDAP_OPERATIONS_ERROR; -- } else { -+ if (ret == ENOENT) { - ret = LDAP_NO_SUCH_OBJECT; -+ } else if (ret == ETIMEDOUT) { -+ ret = LDAP_TIMELIMIT_EXCEEDED; -+ } else { -+ ret = LDAP_OPERATIONS_ERROR; - } - goto done; - } -@@ -1072,6 +1092,8 @@ static int handle_sid_request(struct ipa_extdom_ctx *ctx, - set_err_msg(req, "Failed to read original data"); - if (ret == ENOENT) { - ret = LDAP_NO_SUCH_OBJECT; -+ } else if (ret == ETIMEDOUT || ret == ETIME) { -+ ret = LDAP_TIMELIMIT_EXCEEDED; - } else { - ret = LDAP_OPERATIONS_ERROR; - } -@@ -1089,10 +1111,12 @@ static int handle_sid_request(struct ipa_extdom_ctx *ctx, - case SSS_ID_TYPE_GID: - ret = getgrnam_r_wrapper(ctx, fq_name, &grp, &buf, &buf_len); - if (ret != 0) { -- if (ret == ENOMEM || ret == ERANGE) { -- ret = LDAP_OPERATIONS_ERROR; -- } else { -+ if (ret == ENOENT) { - ret = LDAP_NO_SUCH_OBJECT; -+ } else if (ret == ETIMEDOUT) { -+ ret = LDAP_TIMELIMIT_EXCEEDED; -+ } else { -+ ret = LDAP_OPERATIONS_ERROR; - } - goto done; - } -@@ -1104,6 +1128,8 @@ static int handle_sid_request(struct ipa_extdom_ctx *ctx, - set_err_msg(req, "Failed to read original data"); - if (ret == ENOENT) { - ret = LDAP_NO_SUCH_OBJECT; -+ } else if (ret == ETIMEDOUT || ret == ETIME) { -+ ret = LDAP_TIMELIMIT_EXCEEDED; - } else { - ret = LDAP_OPERATIONS_ERROR; - } -@@ -1167,6 +1193,8 @@ static int handle_name_request(struct ipa_extdom_ctx *ctx, - if (ret != 0) { - if (ret == ENOENT) { - ret = LDAP_NO_SUCH_OBJECT; -+ } else if (ret == ETIMEDOUT || ret == ETIME) { -+ ret = LDAP_TIMELIMIT_EXCEEDED; - } else { - set_err_msg(req, "Failed to lookup SID by name"); - ret = LDAP_OPERATIONS_ERROR; -@@ -1190,6 +1218,8 @@ static int handle_name_request(struct ipa_extdom_ctx *ctx, - set_err_msg(req, "Failed to read original data"); - if (ret == ENOENT) { - ret = LDAP_NO_SUCH_OBJECT; -+ } else if (ret == ETIMEDOUT || ret == ETIME) { -+ ret = LDAP_TIMELIMIT_EXCEEDED; - } else { - ret = LDAP_OPERATIONS_ERROR; - } -@@ -1205,6 +1235,9 @@ static int handle_name_request(struct ipa_extdom_ctx *ctx, - } else if (ret == ENOMEM || ret == ERANGE) { - ret = LDAP_OPERATIONS_ERROR; - goto done; -+ } else if (ret == ETIMEDOUT) { -+ ret = LDAP_TIMELIMIT_EXCEEDED; -+ goto done; - } else { /* no user entry found */ - /* according to the getpwnam() man page there are a couple of - * error codes which can indicate that the user was not found. To -@@ -1212,10 +1245,12 @@ static int handle_name_request(struct ipa_extdom_ctx *ctx, - * errors. */ - ret = getgrnam_r_wrapper(ctx, fq_name, &grp, &buf, &buf_len); - if (ret != 0) { -- if (ret == ENOMEM || ret == ERANGE) { -- ret = LDAP_OPERATIONS_ERROR; -- } else { -+ if (ret == ENOENT) { - ret = LDAP_NO_SUCH_OBJECT; -+ } else if (ret == ETIMEDOUT) { -+ ret = LDAP_TIMELIMIT_EXCEEDED; -+ } else { -+ ret = LDAP_OPERATIONS_ERROR; - } - goto done; - } -@@ -1226,6 +1261,8 @@ static int handle_name_request(struct ipa_extdom_ctx *ctx, - || id_type == SSS_ID_TYPE_BOTH)) { - if (ret == ENOENT) { - ret = LDAP_NO_SUCH_OBJECT; -+ } else if (ret == ETIMEDOUT || ret == ETIME) { -+ ret = LDAP_TIMELIMIT_EXCEEDED; - } else { - set_err_msg(req, "Failed to read original data"); - ret = LDAP_OPERATIONS_ERROR; -diff --git a/daemons/ipa-slapi-plugins/ipa-extdom-extop/ipa_extdom_extop.c b/daemons/ipa-slapi-plugins/ipa-extdom-extop/ipa_extdom_extop.c -index 10d3f86eba..48fcecc1ee 100644 ---- a/daemons/ipa-slapi-plugins/ipa-extdom-extop/ipa_extdom_extop.c -+++ b/daemons/ipa-slapi-plugins/ipa-extdom-extop/ipa_extdom_extop.c -@@ -242,6 +242,8 @@ static int ipa_extdom_extop(Slapi_PBlock *pb) - if (ret != LDAP_SUCCESS) { - if (ret == LDAP_NO_SUCH_OBJECT) { - rc = LDAP_NO_SUCH_OBJECT; -+ } else if (ret == LDAP_TIMELIMIT_EXCEEDED) { -+ rc = LDAP_TIMELIMIT_EXCEEDED; - } else { - rc = LDAP_OPERATIONS_ERROR; - err_msg = "Failed to handle the request.\n"; -From 0ead6f59732e8b3370c5d8d05acd29f2d56c52bb Mon Sep 17 00:00:00 2001 -From: Alexander Bokovoy -Date: Mon, 19 Aug 2019 10:15:50 +0300 -Subject: [PATCH] ipa-extdom-extop: test timed out getgrgid_r - -Simulate getgrgid_r() timeout when packing list of groups user is a -member of in pack_ber_user(). - -Related: https://pagure.io/freeipa/issue/8044 -Reviewed-By: Alexander Bokovoy ---- - .../ipa_extdom_cmocka_tests.c | 29 +++++++++++++++++++ - 1 file changed, 29 insertions(+) - -diff --git a/daemons/ipa-slapi-plugins/ipa-extdom-extop/ipa_extdom_cmocka_tests.c b/daemons/ipa-slapi-plugins/ipa-extdom-extop/ipa_extdom_cmocka_tests.c -index 29699cfa39..1fa4c6af82 100644 ---- a/daemons/ipa-slapi-plugins/ipa-extdom-extop/ipa_extdom_cmocka_tests.c -+++ b/daemons/ipa-slapi-plugins/ipa-extdom-extop/ipa_extdom_cmocka_tests.c -@@ -493,6 +493,34 @@ void test_set_err_msg(void **state) - #define TEST_SID "S-1-2-3-4" - #define TEST_DOMAIN_NAME "DOMAIN" - -+/* Always time out for test */ -+static -+enum nss_status getgrgid_r_timeout(gid_t gid, struct group *result, -+ char *buffer, size_t buflen, int *errnop) { -+ return NSS_STATUS_UNAVAIL; -+} -+ -+void test_pack_ber_user_timeout(void **state) -+{ -+ int ret; -+ struct berval *resp_val = NULL; -+ struct test_data *test_data; -+ enum nss_status (*oldgetgrgid_r)(gid_t gid, struct group *result, -+ char *buffer, size_t buflen, int *errnop); -+ -+ test_data = (struct test_data *) *state; -+ -+ oldgetgrgid_r = test_data->ctx->nss_ctx->getgrgid_r; -+ test_data->ctx->nss_ctx->getgrgid_r = getgrgid_r_timeout; -+ -+ ret = pack_ber_user(test_data->ctx, RESP_USER_GROUPLIST, -+ TEST_DOMAIN_NAME, "member001", 12345, 54321, -+ "gecos", "homedir", "shell", NULL, &resp_val); -+ test_data->ctx->nss_ctx->getgrgid_r = oldgetgrgid_r; -+ assert_int_equal(ret, LDAP_TIMELIMIT_EXCEEDED); -+ ber_bvfree(resp_val); -+} -+ - char res_sid[] = {0x30, 0x0e, 0x0a, 0x01, 0x01, 0x04, 0x09, 0x53, 0x2d, 0x31, \ - 0x2d, 0x32, 0x2d, 0x33, 0x2d, 0x34}; - char res_nam[] = {0x30, 0x13, 0x0a, 0x01, 0x02, 0x30, 0x0e, 0x04, 0x06, 0x44, \ -@@ -614,6 +642,7 @@ void test_decode(void **state) - int main(int argc, const char *argv[]) - { - const struct CMUnitTest tests[] = { -+ cmocka_unit_test(test_pack_ber_user_timeout), - cmocka_unit_test(test_getpwnam_r_wrapper), - cmocka_unit_test(test_getpwuid_r_wrapper), - cmocka_unit_test(test_getgrnam_r_wrapper), diff --git a/SOURCES/0010-Fix-automount-behavior-with-authselect_rhbz#1740167.patch b/SOURCES/0010-Fix-automount-behavior-with-authselect_rhbz#1740167.patch deleted file mode 100644 index 60e22bb..0000000 --- a/SOURCES/0010-Fix-automount-behavior-with-authselect_rhbz#1740167.patch +++ /dev/null @@ -1,2004 +0,0 @@ -From abea98a9b918c0771ad10b314238b32c570f0372 Mon Sep 17 00:00:00 2001 -From: François Cami -Date: Aug 29 2019 06:45:12 +0000 -Subject: ipatests: check that ipa-client-automount restores nsswitch.conf at uninstall time - - -Check that using ipa-client-install, ipa-client-automount --no-ssd, then uninstalling -both properly restores nsswitch.conf sequentially. - -Related-to:: https://pagure.io/freeipa/issue/8038 -Signed-off-by: François Cami -Reviewed-By: Francois Cami -Reviewed-By: Rob Crittenden -Reviewed-By: Rob Critenden -Reviewed-By: François Cami - ---- - -#diff --git a/ipatests/prci_definitions/nightly_ipa-4-8.yaml b/ipatests/prci_definitions/nightly_ipa-4-8.yaml -#index ef5d2c6..f39e4b4 100644 -#--- a/ipatests/prci_definitions/nightly_ipa-4-8.yaml -#+++ b/ipatests/prci_definitions/nightly_ipa-4-8.yaml -#@@ -1257,6 +1257,18 @@ jobs: -# timeout: 9000 -# topology: *master_3client -# -#+ fedora-30/nfs_nsswitch_restore: -#+ requires: [fedora-30/build] -#+ priority: 50 -#+ job: -#+ class: RunPytest -#+ args: -#+ build_url: '{fedora-30/build_url}' -#+ test_suite: test_integration/test_nfs.py::TestIpaClientAutomountFileRestore -#+ template: *ci-master-f30 -#+ timeout: 3600 -#+ topology: *master_3client -#+ -# fedora-30/mask: -# requires: [fedora-30/build] -# priority: 50 -diff --git a/ipatests/test_integration/test_nfs.py b/ipatests/test_integration/test_nfs.py -index adfc19f..0e1ef6a 100644 ---- a/ipatests/test_integration/test_nfs.py -+++ b/ipatests/test_integration/test_nfs.py -@@ -15,6 +15,7 @@ - - from __future__ import absolute_import - -+import pytest - import os - import re - import time -@@ -258,3 +259,74 @@ class TestNFS(IntegrationTest): - time.sleep(WAIT_AFTER_UNINSTALL) - - self.cleanup() -+ -+ -+class TestIpaClientAutomountFileRestore(IntegrationTest): -+ -+ num_clients = 1 -+ topology = 'line' -+ -+ @classmethod -+ def install(cls, mh): -+ tasks.install_master(cls.master, setup_dns=True) -+ -+ def teardown_method(self, method): -+ tasks.uninstall_client(self.clients[0]) -+ -+ def nsswitch_backup_restore( -+ self, -+ no_sssd=False, -+ ): -+ -+ # In order to get a more pure sum, one that ignores the Generated -+ # header and any white space we have to do a bit of work... -+ sha256nsswitch_cmd = \ -+ 'egrep -v "Generated|^$" /etc/nsswitch.conf | sed "s/\\s//g" ' \ -+ '| sort | sha256sum' -+ -+ cmd = self.clients[0].run_command(sha256nsswitch_cmd) -+ orig_sha256 = cmd.stdout_text -+ -+ grep_automount_command = \ -+ "grep automount /etc/nsswitch.conf | cut -d: -f2" -+ -+ tasks.install_client(self.master, self.clients[0]) -+ cmd = self.clients[0].run_command(grep_automount_command) -+ after_ipa_client_install = cmd.stdout_text.split() -+ -+ if no_sssd: -+ ipa_client_automount_command = [ -+ "ipa-client-automount", "--no-sssd", "-U" -+ ] -+ else: -+ ipa_client_automount_command = [ -+ "ipa-client-automount", "-U" -+ ] -+ self.clients[0].run_command(ipa_client_automount_command) -+ cmd = self.clients[0].run_command(grep_automount_command) -+ after_ipa_client_automount = cmd.stdout_text.split() -+ if no_sssd: -+ assert after_ipa_client_automount == ['files', 'ldap'] -+ else: -+ assert after_ipa_client_automount == ['sss', 'files'] -+ -+ cmd = self.clients[0].run_command(grep_automount_command) -+ assert cmd.stdout_text.split() == after_ipa_client_automount -+ -+ self.clients[0].run_command([ -+ "ipa-client-automount", "--uninstall", "-U" -+ ]) -+ -+ cmd = self.clients[0].run_command(grep_automount_command) -+ assert cmd.stdout_text.split() == after_ipa_client_install -+ -+ tasks.uninstall_client(self.clients[0]) -+ cmd = self.clients[0].run_command(sha256nsswitch_cmd) -+ assert cmd.stdout_text == orig_sha256 -+ -+ @pytest.mark.xfail(reason='freeipa ticket 8054', strict=True) -+ def test_nsswitch_backup_restore_sssd(self): -+ self.nsswitch_backup_restore() -+ -+ def test_nsswitch_backup_restore_no_sssd(self): -+ self.nsswitch_backup_restore(no_sssd=True) - -From 2f0afeda6e66fcca5c184a4036112fcd315f2f6e Mon Sep 17 00:00:00 2001 -From: François Cami -Date: Aug 29 2019 06:45:12 +0000 -Subject: ipa-client-automount: always restore nsswitch.conf at uninstall time - - -ipa-client-automount used to only restore nsswitch.conf when sssd was not -used. However authselect's default profile is now sssd so always restore -nsswitch.conf's automount configuration to 'files sssd'. -Note that the behavior seen before commit: -a0e846f56c8de3b549d1d284087131da13135e34 -would always restore nsswitch.conf to the previous state which in some cases -was wrong. - -Fixes: https://pagure.io/freeipa/issue/8038 -Signed-off-by: François Cami -Reviewed-By: Francois Cami -Reviewed-By: Rob Crittenden -Reviewed-By: Rob Critenden -Reviewed-By: François Cami - ---- - -diff --git a/ipaclient/install/ipa_client_automount.py b/ipaclient/install/ipa_client_automount.py -index fa07598..a1dc2a1 100644 ---- a/ipaclient/install/ipa_client_automount.py -+++ b/ipaclient/install/ipa_client_automount.py -@@ -177,18 +177,30 @@ def configure_xml(fstore): - print("Configured %s" % authconf) - - --def configure_nsswitch(fstore, options): -+def configure_nsswitch(statestore, options): - """ -- Point automount to ldap in nsswitch.conf. This function is for non-SSSD -- setups only -+ Point automount to ldap in nsswitch.conf. -+ This function is for non-SSSD setups only. - """ -- fstore.backup_file(paths.NSSWITCH_CONF) -- - conf = ipachangeconf.IPAChangeConf("IPA Installer") - conf.setOptionAssignment(':') - -- nss_value = ' files ldap' -+ with open(paths.NSSWITCH_CONF, 'r') as f: -+ current_opts = conf.parse(f) -+ current_nss_value = conf.findOpts( -+ current_opts, name='automount', type='option' -+ )[1] -+ if current_nss_value is None: -+ # no automount database present -+ current_nss_value = False # None cannot be backed up -+ else: -+ current_nss_value = current_nss_value['value'] -+ statestore.backup_state( -+ 'ipa-client-automount-nsswitch', 'previous-automount', -+ current_nss_value -+ ) - -+ nss_value = ' files ldap' - opts = [ - { - 'name': 'automount', -@@ -198,7 +210,6 @@ def configure_nsswitch(fstore, options): - }, - {'name': 'empty', 'type': 'empty'}, - ] -- - conf.changeConf(paths.NSSWITCH_CONF, opts) - - print("Configured %s" % paths.NSSWITCH_CONF) -@@ -322,19 +333,47 @@ def configure_autofs_common(fstore, statestore, options): - def uninstall(fstore, statestore): - RESTORE_FILES = [ - paths.SYSCONFIG_AUTOFS, -- paths.NSSWITCH_CONF, - paths.AUTOFS_LDAP_AUTH_CONF, - paths.SYSCONFIG_NFS, - paths.IDMAPD_CONF, - ] - STATES = ['autofs', 'rpcidmapd', 'rpcgssd'] - -- # automount only touches /etc/nsswitch.conf if LDAP is -- # used. Don't restore it otherwise. -- if statestore.get_state('authconfig', 'sssd') or ( -- statestore.get_state('authselect', 'profile') == 'sssd' -- ): -- RESTORE_FILES.remove(paths.NSSWITCH_CONF) -+ if statestore.get_state( -+ 'ipa-client-automount-nsswitch', 'previous-automount' -+ ) is False: -+ # Previous nsswitch.conf had no automount database configured -+ # so remove it. -+ conf = ipachangeconf.IPAChangeConf("IPA automount installer") -+ conf.setOptionAssignment(':') -+ changes = [conf.rmOption('automount')] -+ conf.changeConf(paths.NSSWITCH_CONF, changes) -+ tasks.restore_context(paths.NSSWITCH_CONF) -+ statestore.delete_state( -+ 'ipa-client-automount-nsswitch', 'previous-automount' -+ ) -+ elif statestore.get_state( -+ 'ipa-client-automount-nsswitch', 'previous-automount' -+ ) is not None: -+ nss_value = statestore.get_state( -+ 'ipa-client-automount-nsswitch', 'previous-automount' -+ ) -+ opts = [ -+ { -+ 'name': 'automount', -+ 'type': 'option', -+ 'action': 'set', -+ 'value': nss_value, -+ }, -+ {'name': 'empty', 'type': 'empty'}, -+ ] -+ conf = ipachangeconf.IPAChangeConf("IPA automount installer") -+ conf.setOptionAssignment(':') -+ conf.changeConf(paths.NSSWITCH_CONF, opts) -+ tasks.restore_context(paths.NSSWITCH_CONF) -+ statestore.delete_state( -+ 'ipa-client-automount-nsswitch', 'previous-automount' -+ ) - - if not any(fstore.has_file(f) for f in RESTORE_FILES) or not any( - statestore.has_state(s) for s in STATES -@@ -588,7 +627,7 @@ def configure_automount(): - - try: - if not options.sssd: -- configure_nsswitch(fstore, options) -+ configure_nsswitch(statestore, options) - configure_nfs(fstore, statestore, options) - if options.sssd: - configure_autofs_sssd(fstore, statestore, autodiscover, options) - -From 6e92776bfc199e9ca92e11ef3315dcecad3c9307 Mon Sep 17 00:00:00 2001 -From: Rob Critenden -Date: Aug 29 2019 06:45:12 +0000 -Subject: Move ipachangeconf from ipaclient.install to ipapython - - -This will let us call it from ipaplatform. - -Mark the original location as deprecated. - -Reviewed-By: Francois Cami -Reviewed-By: Rob Crittenden -Reviewed-By: Rob Critenden -Reviewed-By: François Cami - ---- - -diff --git a/install/tools/ipa-replica-conncheck.in b/install/tools/ipa-replica-conncheck.in -index 9208076..b22db11 100644 ---- a/install/tools/ipa-replica-conncheck.in -+++ b/install/tools/ipa-replica-conncheck.in -@@ -22,7 +22,7 @@ from __future__ import print_function - - import logging - --import ipaclient.install.ipachangeconf -+from ipapython import ipachangeconf - from ipapython.config import IPAOptionParser - from ipapython.dn import DN - from ipapython import version -@@ -229,7 +229,7 @@ def sigterm_handler(signum, frame): - - def configure_krb5_conf(realm, kdc, filename): - -- krbconf = ipaclient.install.ipachangeconf.IPAChangeConf("IPA Installer") -+ krbconf = ipachangeconf.IPAChangeConf("IPA Installer") - krbconf.setOptionAssignment((" = ", " ")) - krbconf.setSectionNameDelimiters(("[","]")) - krbconf.setSubSectionDelimiters(("{","}")) -diff --git a/ipaclient/install/ipachangeconf.py b/ipaclient/install/ipachangeconf.py -index a13e0ea..c51e42e 100644 ---- a/ipaclient/install/ipachangeconf.py -+++ b/ipaclient/install/ipachangeconf.py -@@ -18,566 +18,18 @@ - # along with this program. If not, see . - # - --import fcntl --import logging --import os --import shutil -+import warnings -+from ipapython.ipachangeconf import IPAChangeConf as realIPAChangeConf - --import six - --if six.PY3: -- unicode = str -+class IPAChangeConf(realIPAChangeConf): -+ """Advertise the old name""" - --logger = logging.getLogger(__name__) -- --def openLocked(filename, perms): -- fd = -1 -- try: -- fd = os.open(filename, os.O_RDWR | os.O_CREAT, perms) -- -- fcntl.lockf(fd, fcntl.LOCK_EX) -- except OSError as e: -- if fd != -1: -- try: -- os.close(fd) -- except OSError: -- pass -- raise IOError(e.errno, e.strerror) -- return os.fdopen(fd, "r+") -- -- -- #TODO: add subsection as a concept -- # (ex. REALM.NAME = { foo = x bar = y } ) -- #TODO: put section delimiters as separating element of the list -- # so that we can process multiple sections in one go -- #TODO: add a comment all but provided options as a section option --class IPAChangeConf: - def __init__(self, name): -- self.progname = name -- self.indent = ("", "", "") -- self.assign = (" = ", "=") -- self.dassign = self.assign[0] -- self.comment = ("#",) -- self.dcomment = self.comment[0] -- self.eol = ("\n",) -- self.deol = self.eol[0] -- self.sectnamdel = ("[", "]") -- self.subsectdel = ("{", "}") -- self.case_insensitive_sections = True -- -- def setProgName(self, name): -- self.progname = name -- -- def setIndent(self, indent): -- if type(indent) is tuple: -- self.indent = indent -- elif type(indent) is str: -- self.indent = (indent, ) -- else: -- raise ValueError('Indent must be a list of strings') -- -- def setOptionAssignment(self, assign): -- if type(assign) is tuple: -- self.assign = assign -- else: -- self.assign = (assign, ) -- self.dassign = self.assign[0] -- -- def setCommentPrefix(self, comment): -- if type(comment) is tuple: -- self.comment = comment -- else: -- self.comment = (comment, ) -- self.dcomment = self.comment[0] -- -- def setEndLine(self, eol): -- if type(eol) is tuple: -- self.eol = eol -- else: -- self.eol = (eol, ) -- self.deol = self.eol[0] -- -- def setSectionNameDelimiters(self, delims): -- self.sectnamdel = delims -- -- def setSubSectionDelimiters(self, delims): -- self.subsectdel = delims -- -- def matchComment(self, line): -- for v in self.comment: -- if line.lstrip().startswith(v): -- return line.lstrip()[len(v):] -- return False -- -- def matchEmpty(self, line): -- if line.strip() == "": -- return True -- return False -- -- def matchSection(self, line): -- cl = "".join(line.strip().split()) -- cl = cl.lower() if self.case_insensitive_sections else cl -- -- if len(self.sectnamdel) != 2: -- return False -- if not cl.startswith(self.sectnamdel[0]): -- return False -- if not cl.endswith(self.sectnamdel[1]): -- return False -- return cl[len(self.sectnamdel[0]):-len(self.sectnamdel[1])] -- -- def matchSubSection(self, line): -- if self.matchComment(line): -- return False -- -- parts = line.split(self.dassign, 1) -- if len(parts) < 2: -- return False -- -- if parts[1].strip() == self.subsectdel[0]: -- return parts[0].strip() -- -- return False -- -- def matchSubSectionEnd(self, line): -- if self.matchComment(line): -- return False -- -- if line.strip() == self.subsectdel[1]: -- return True -- -- return False -- -- def getSectionLine(self, section): -- if len(self.sectnamdel) != 2: -- return section -- return self._dump_line(self.sectnamdel[0], -- section, -- self.sectnamdel[1], -- self.deol) -- -- def _dump_line(self, *args): -- return u"".join(unicode(x) for x in args) -- -- def dump(self, options, level=0): -- output = [] -- if level >= len(self.indent): -- level = len(self.indent) - 1 -- -- for o in options: -- if o['type'] == "section": -- output.append(self._dump_line(self.sectnamdel[0], -- o['name'], -- self.sectnamdel[1])) -- output.append(self.dump(o['value'], (level + 1))) -- continue -- if o['type'] == "subsection": -- output.append(self._dump_line(self.indent[level], -- o['name'], -- self.dassign, -- self.subsectdel[0])) -- output.append(self.dump(o['value'], (level + 1))) -- output.append(self._dump_line(self.indent[level], -- self.subsectdel[1])) -- continue -- if o['type'] == "option": -- delim = o.get('delim', self.dassign) -- if delim not in self.assign: -- raise ValueError('Unknown delim "%s" must be one of "%s"' % (delim, " ".join([d for d in self.assign]))) -- output.append(self._dump_line(self.indent[level], -- o['name'], -- delim, -- o['value'])) -- continue -- if o['type'] == "comment": -- output.append(self._dump_line(self.dcomment, o['value'])) -- continue -- if o['type'] == "empty": -- output.append('') -- continue -- raise SyntaxError('Unknown type: [%s]' % o['type']) -- -- # append an empty string to the output so that we add eol to the end -- # of the file contents in a single join() -- output.append('') -- return self.deol.join(output) -- -- def parseLine(self, line): -- -- if self.matchEmpty(line): -- return {'name': 'empty', 'type': 'empty'} -- -- value = self.matchComment(line) -- if value: -- return {'name': 'comment', -- 'type': 'comment', -- 'value': value.rstrip()} # pylint: disable=E1103 -- -- o = dict() -- parts = line.split(self.dassign, 1) -- if len(parts) < 2: -- # The default assign didn't match, try the non-default -- for d in self.assign[1:]: -- parts = line.split(d, 1) -- if len(parts) >= 2: -- o['delim'] = d -- break -- -- if 'delim' not in o: -- raise SyntaxError('Syntax Error: Unknown line format') -- -- o.update({'name':parts[0].strip(), 'type':'option', 'value':parts[1].rstrip()}) -- return o -- -- def findOpts(self, opts, type, name, exclude_sections=False): -- -- num = 0 -- for o in opts: -- if o['type'] == type and o['name'] == name: -- return (num, o) -- if exclude_sections and (o['type'] == "section" or -- o['type'] == "subsection"): -- return (num, None) -- num += 1 -- return (num, None) -- -- def commentOpts(self, inopts, level=0): -- -- opts = [] -- -- if level >= len(self.indent): -- level = len(self.indent) - 1 -- -- for o in inopts: -- if o['type'] == 'section': -- no = self.commentOpts(o['value'], (level + 1)) -- val = self._dump_line(self.dcomment, -- self.sectnamdel[0], -- o['name'], -- self.sectnamdel[1]) -- opts.append({'name': 'comment', -- 'type': 'comment', -- 'value': val}) -- for n in no: -- opts.append(n) -- continue -- if o['type'] == 'subsection': -- no = self.commentOpts(o['value'], (level + 1)) -- val = self._dump_line(self.indent[level], -- o['name'], -- self.dassign, -- self.subsectdel[0]) -- opts.append({'name': 'comment', -- 'type': 'comment', -- 'value': val}) -- opts.extend(no) -- val = self._dump_line(self.indent[level], self.subsectdel[1]) -- opts.append({'name': 'comment', -- 'type': 'comment', -- 'value': val}) -- continue -- if o['type'] == 'option': -- delim = o.get('delim', self.dassign) -- if delim not in self.assign: -- val = self._dump_line(self.indent[level], -- o['name'], -- delim, -- o['value']) -- opts.append({'name':'comment', 'type':'comment', 'value':val}) -- continue -- if o['type'] == 'comment': -- opts.append(o) -- continue -- if o['type'] == 'empty': -- opts.append({'name': 'comment', -- 'type': 'comment', -- 'value': ''}) -- continue -- raise SyntaxError('Unknown type: [%s]' % o['type']) -- -- return opts -- -- def mergeOld(self, oldopts, newopts): -- -- opts = [] -- -- for o in oldopts: -- if o['type'] == "section" or o['type'] == "subsection": -- _num, no = self.findOpts(newopts, o['type'], o['name']) -- if not no: -- opts.append(o) -- continue -- if no['action'] == "set": -- mo = self.mergeOld(o['value'], no['value']) -- opts.append({'name': o['name'], -- 'type': o['type'], -- 'value': mo}) -- continue -- if no['action'] == "comment": -- co = self.commentOpts(o['value']) -- for c in co: -- opts.append(c) -- continue -- if no['action'] == "remove": -- continue -- raise SyntaxError('Unknown action: [%s]' % no['action']) -- -- if o['type'] == "comment" or o['type'] == "empty": -- opts.append(o) -- continue -- -- if o['type'] == "option": -- _num, no = self.findOpts(newopts, 'option', o['name'], True) -- if not no: -- opts.append(o) -- continue -- if no['action'] == 'comment' or no['action'] == 'remove': -- if (no['value'] is not None and -- o['value'] is not no['value']): -- opts.append(o) -- continue -- if no['action'] == 'comment': -- value = self._dump_line(self.dcomment, -- o['name'], -- self.dassign, -- o['value']) -- opts.append({'name': 'comment', -- 'type': 'comment', -- 'value': value}) -- continue -- if no['action'] == 'set': -- opts.append(no) -- continue -- if no['action'] == 'addifnotset': -- opts.append({ -- 'name': 'comment', -- 'type': 'comment', -- 'value': self._dump_line( -- ' ', no['name'], ' modified by IPA' -- ), -- }) -- opts.append({'name': 'comment', 'type': 'comment', -- 'value': self._dump_line(no['name'], -- self.dassign, -- no['value'], -- )}) -- opts.append(o) -- continue -- raise SyntaxError('Unknown action: [%s]' % no['action']) -- -- raise SyntaxError('Unknown type: [%s]' % o['type']) -- -- return opts -- -- def mergeNew(self, opts, newopts): -- -- cline = 0 -- -- for no in newopts: -- -- if no['type'] == "section" or no['type'] == "subsection": -- (num, o) = self.findOpts(opts, no['type'], no['name']) -- if not o: -- if no['action'] == 'set': -- opts.append(no) -- continue -- if no['action'] == "set": -- self.mergeNew(o['value'], no['value']) -- continue -- cline = num + 1 -- continue -- -- if no['type'] == "option": -- (num, o) = self.findOpts(opts, no['type'], no['name'], True) -- if not o: -- if no['action'] == 'set' or no['action'] == 'addifnotset': -- opts.append(no) -- continue -- cline = num + 1 -- continue -- -- if no['type'] == "comment" or no['type'] == "empty": -- opts.insert(cline, no) -- cline += 1 -- continue -- -- raise SyntaxError('Unknown type: [%s]' % no['type']) -- -- def merge(self, oldopts, newopts): -- """ -- Uses a two pass strategy: -- First we create a new opts tree from oldopts removing/commenting -- the options as indicated by the contents of newopts -- Second we fill in the new opts tree with options as indicated -- in the newopts tree (this is becaus eentire (sub)sections may -- in the newopts tree (this is becaus entire (sub)sections may -- exist in the newopts that do not exist in oldopts) -- """ -- opts = self.mergeOld(oldopts, newopts) -- self.mergeNew(opts, newopts) -- return opts -- -- #TODO: Make parse() recursive? -- def parse(self, f): -- -- opts = [] -- sectopts = [] -- section = None -- subsectopts = [] -- subsection = None -- curopts = opts -- fatheropts = opts -- -- # Read in the old file. -- for line in f: -- -- # It's a section start. -- value = self.matchSection(line) -- if value: -- if section is not None: -- opts.append({'name': section, -- 'type': 'section', -- 'value': sectopts}) -- sectopts = [] -- curopts = sectopts -- fatheropts = sectopts -- section = value -- continue -- -- # It's a subsection start. -- value = self.matchSubSection(line) -- if value: -- if subsection is not None: -- raise SyntaxError('nested subsections are not ' -- 'supported yet') -- subsectopts = [] -- curopts = subsectopts -- subsection = value -- continue -- -- value = self.matchSubSectionEnd(line) -- if value: -- if subsection is None: -- raise SyntaxError('Unmatched end subsection terminator ' -- 'found') -- fatheropts.append({'name': subsection, -- 'type': 'subsection', -- 'value': subsectopts}) -- subsection = None -- curopts = fatheropts -- continue -- -- # Copy anything else as is. -- try: -- curopts.append(self.parseLine(line)) -- except SyntaxError as e: -- raise SyntaxError('{error} in file {fname}: [{line}]'.format( -- error=e, fname=f.name, line=line.rstrip())) -- -- #Add last section if any -- if len(sectopts) is not 0: -- opts.append({'name': section, -- 'type': 'section', -- 'value': sectopts}) -- -- return opts -- -- def changeConf(self, file, newopts): -- """ -- Write settings to configuration file -- :param file: path to the file -- :param options: set of dictionaries in the form: -- {'name': 'foo', 'value': 'bar', 'action': 'set/comment'} -- :param section: section name like 'global' -- """ -- output = "" -- f = None -- try: -- # Do not catch an unexisting file error -- # we want to fail in that case -- shutil.copy2(file, (file + ".ipabkp")) -- -- f = openLocked(file, 0o644) -- -- oldopts = self.parse(f) -- -- options = self.merge(oldopts, newopts) -- -- output = self.dump(options) -- -- # Write it out and close it. -- f.seek(0) -- f.truncate(0) -- f.write(output) -- finally: -- try: -- if f: -- f.close() -- except IOError: -- pass -- logger.debug("Updating configuration file %s", file) -- logger.debug(output) -- return True -- -- def newConf(self, file, options, file_perms=0o644): -- """" -- Write settings to a new file, backup the old -- :param file: path to the file -- :param options: a set of dictionaries in the form: -- {'name': 'foo', 'value': 'bar', 'action': 'set/comment'} -- :param file_perms: number defining the new file's permissions -- """ -- output = "" -- f = None -- try: -- try: -- shutil.copy2(file, (file + ".ipabkp")) -- except IOError as err: -- if err.errno == 2: -- # The orign file did not exist -- pass -- -- f = openLocked(file, file_perms) -- -- # Trunkate -- f.seek(0) -- f.truncate(0) -- -- output = self.dump(options) -- -- f.write(output) -- finally: -- try: -- if f: -- f.close() -- except IOError: -- pass -- logger.debug("Writing configuration file %s", file) -- logger.debug(output) -- return True -- -- @staticmethod -- def setOption(name, value): -- return {'name': name, -- 'type': 'option', -- 'action': 'set', -- 'value': value} -- -- @staticmethod -- def rmOption(name): -- return {'name': name, -- 'type': 'option', -- 'action': 'remove', -- 'value': None} -- -- @staticmethod -- def setSection(name, options): -- return {'name': name, -- 'type': 'section', -- 'action': 'set', -- 'value': options} -- -- @staticmethod -- def emptyLine(): -- return {'name': 'empty', -- 'type': 'empty'} -+ """something""" -+ warnings.warn( -+ "Use 'ipapython.ipachangeconf.IPAChangeConfg'", -+ DeprecationWarning, -+ stacklevel=2 -+ ) -+ super(IPAChangeConf, self).__init__(name) -diff --git a/ipapython/ipachangeconf.py b/ipapython/ipachangeconf.py -new file mode 100644 -index 0000000..cfb4a6e ---- /dev/null -+++ b/ipapython/ipachangeconf.py -@@ -0,0 +1,590 @@ -+# -+# ipachangeconf - configuration file manipulation classes and functions -+# partially based on authconfig code -+# Copyright (c) 1999-2007 Red Hat, Inc. -+# Author: Simo Sorce -+# -+# This program is free software; you can redistribute it and/or modify -+# it under the terms of the GNU General Public License as published by -+# the Free Software Foundation, either version 3 of the License, or -+# (at your option) any later version. -+# -+# This program is distributed in the hope that it will be useful, -+# but WITHOUT ANY WARRANTY; without even the implied warranty of -+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+# GNU General Public License for more details. -+# -+# You should have received a copy of the GNU General Public License -+# along with this program. If not, see . -+# -+ -+import fcntl -+import logging -+import os -+import shutil -+ -+import six -+ -+if six.PY3: -+ unicode = str -+ -+logger = logging.getLogger(__name__) -+ -+ -+def openLocked(filename, perms): -+ fd = -1 -+ try: -+ fd = os.open(filename, os.O_RDWR | os.O_CREAT, perms) -+ -+ fcntl.lockf(fd, fcntl.LOCK_EX) -+ except OSError as e: -+ if fd != -1: -+ try: -+ os.close(fd) -+ except OSError: -+ pass -+ raise IOError(e.errno, e.strerror) -+ return os.fdopen(fd, "r+") -+ -+ # TODO: add subsection as a concept -+ # (ex. REALM.NAME = { foo = x bar = y } ) -+ # TODO: put section delimiters as separating element of the list -+ # so that we can process multiple sections in one go -+ # TODO: add a comment all but provided options as a section option -+ -+ -+class IPAChangeConf: -+ def __init__(self, name): -+ self.progname = name -+ self.indent = ("", "", "") -+ self.assign = (" = ", "=") -+ self.dassign = self.assign[0] -+ self.comment = ("#",) -+ self.dcomment = self.comment[0] -+ self.eol = ("\n",) -+ self.deol = self.eol[0] -+ self.sectnamdel = ("[", "]") -+ self.subsectdel = ("{", "}") -+ self.case_insensitive_sections = True -+ -+ def setProgName(self, name): -+ self.progname = name -+ -+ def setIndent(self, indent): -+ if type(indent) is tuple: -+ self.indent = indent -+ elif type(indent) is str: -+ self.indent = (indent, ) -+ else: -+ raise ValueError('Indent must be a list of strings') -+ -+ def setOptionAssignment(self, assign): -+ if type(assign) is tuple: -+ self.assign = assign -+ else: -+ self.assign = (assign, ) -+ self.dassign = self.assign[0] -+ -+ def setCommentPrefix(self, comment): -+ if type(comment) is tuple: -+ self.comment = comment -+ else: -+ self.comment = (comment, ) -+ self.dcomment = self.comment[0] -+ -+ def setEndLine(self, eol): -+ if type(eol) is tuple: -+ self.eol = eol -+ else: -+ self.eol = (eol, ) -+ self.deol = self.eol[0] -+ -+ def setSectionNameDelimiters(self, delims): -+ self.sectnamdel = delims -+ -+ def setSubSectionDelimiters(self, delims): -+ self.subsectdel = delims -+ -+ def matchComment(self, line): -+ for v in self.comment: -+ if line.lstrip().startswith(v): -+ return line.lstrip()[len(v):] -+ return False -+ -+ def matchEmpty(self, line): -+ if line.strip() == "": -+ return True -+ return False -+ -+ def matchSection(self, line): -+ cl = "".join(line.strip().split()) -+ cl = cl.lower() if self.case_insensitive_sections else cl -+ -+ if len(self.sectnamdel) != 2: -+ return False -+ if not cl.startswith(self.sectnamdel[0]): -+ return False -+ if not cl.endswith(self.sectnamdel[1]): -+ return False -+ return cl[len(self.sectnamdel[0]):-len(self.sectnamdel[1])] -+ -+ def matchSubSection(self, line): -+ if self.matchComment(line): -+ return False -+ -+ parts = line.split(self.dassign, 1) -+ if len(parts) < 2: -+ return False -+ -+ if parts[1].strip() == self.subsectdel[0]: -+ return parts[0].strip() -+ -+ return False -+ -+ def matchSubSectionEnd(self, line): -+ if self.matchComment(line): -+ return False -+ -+ if line.strip() == self.subsectdel[1]: -+ return True -+ -+ return False -+ -+ def getSectionLine(self, section): -+ if len(self.sectnamdel) != 2: -+ return section -+ return self._dump_line(self.sectnamdel[0], -+ section, -+ self.sectnamdel[1], -+ self.deol) -+ -+ def _dump_line(self, *args): -+ return u"".join(unicode(x) for x in args) -+ -+ def dump(self, options, level=0): -+ output = [] -+ if level >= len(self.indent): -+ level = len(self.indent) - 1 -+ -+ for o in options: -+ if o['type'] == "section": -+ output.append(self._dump_line(self.sectnamdel[0], -+ o['name'], -+ self.sectnamdel[1])) -+ output.append(self.dump(o['value'], (level + 1))) -+ continue -+ if o['type'] == "subsection": -+ output.append(self._dump_line(self.indent[level], -+ o['name'], -+ self.dassign, -+ self.subsectdel[0])) -+ output.append(self.dump(o['value'], (level + 1))) -+ output.append(self._dump_line(self.indent[level], -+ self.subsectdel[1])) -+ continue -+ if o['type'] == "option": -+ delim = o.get('delim', self.dassign) -+ if delim not in self.assign: -+ raise ValueError( -+ 'Unknown delim "%s" must be one of "%s"' % -+ (delim, " ".join([d for d in self.assign])) -+ ) -+ output.append(self._dump_line(self.indent[level], -+ o['name'], -+ delim, -+ o['value'])) -+ continue -+ if o['type'] == "comment": -+ output.append(self._dump_line(self.dcomment, o['value'])) -+ continue -+ if o['type'] == "empty": -+ output.append('') -+ continue -+ raise SyntaxError('Unknown type: [%s]' % o['type']) -+ -+ # append an empty string to the output so that we add eol to the end -+ # of the file contents in a single join() -+ output.append('') -+ return self.deol.join(output) -+ -+ def parseLine(self, line): -+ -+ if self.matchEmpty(line): -+ return {'name': 'empty', 'type': 'empty'} -+ -+ value = self.matchComment(line) -+ if value: -+ return {'name': 'comment', -+ 'type': 'comment', -+ 'value': value.rstrip()} # pylint: disable=E1103 -+ -+ o = dict() -+ parts = line.split(self.dassign, 1) -+ if len(parts) < 2: -+ # The default assign didn't match, try the non-default -+ for d in self.assign[1:]: -+ parts = line.split(d, 1) -+ if len(parts) >= 2: -+ o['delim'] = d -+ break -+ -+ if 'delim' not in o: -+ raise SyntaxError('Syntax Error: Unknown line format') -+ -+ o.update({'name': parts[0].strip(), 'type': 'option', -+ 'value': parts[1].rstrip()}) -+ return o -+ -+ def findOpts(self, opts, type, name, exclude_sections=False): -+ -+ num = 0 -+ for o in opts: -+ if o['type'] == type and o['name'] == name: -+ return (num, o) -+ if exclude_sections and (o['type'] == "section" or -+ o['type'] == "subsection"): -+ return (num, None) -+ num += 1 -+ return (num, None) -+ -+ def commentOpts(self, inopts, level=0): -+ -+ opts = [] -+ -+ if level >= len(self.indent): -+ level = len(self.indent) - 1 -+ -+ for o in inopts: -+ if o['type'] == 'section': -+ no = self.commentOpts(o['value'], (level + 1)) -+ val = self._dump_line(self.dcomment, -+ self.sectnamdel[0], -+ o['name'], -+ self.sectnamdel[1]) -+ opts.append({'name': 'comment', -+ 'type': 'comment', -+ 'value': val}) -+ for n in no: -+ opts.append(n) -+ continue -+ if o['type'] == 'subsection': -+ no = self.commentOpts(o['value'], (level + 1)) -+ val = self._dump_line(self.indent[level], -+ o['name'], -+ self.dassign, -+ self.subsectdel[0]) -+ opts.append({'name': 'comment', -+ 'type': 'comment', -+ 'value': val}) -+ opts.extend(no) -+ val = self._dump_line(self.indent[level], self.subsectdel[1]) -+ opts.append({'name': 'comment', -+ 'type': 'comment', -+ 'value': val}) -+ continue -+ if o['type'] == 'option': -+ delim = o.get('delim', self.dassign) -+ if delim not in self.assign: -+ val = self._dump_line(self.indent[level], -+ o['name'], -+ delim, -+ o['value']) -+ opts.append({'name': 'comment', 'type': 'comment', -+ 'value': val}) -+ continue -+ if o['type'] == 'comment': -+ opts.append(o) -+ continue -+ if o['type'] == 'empty': -+ opts.append({'name': 'comment', -+ 'type': 'comment', -+ 'value': ''}) -+ continue -+ raise SyntaxError('Unknown type: [%s]' % o['type']) -+ -+ return opts -+ -+ def mergeOld(self, oldopts, newopts): -+ -+ opts = [] -+ -+ for o in oldopts: -+ if o['type'] == "section" or o['type'] == "subsection": -+ _num, no = self.findOpts(newopts, o['type'], o['name']) -+ if not no: -+ opts.append(o) -+ continue -+ if no['action'] == "set": -+ mo = self.mergeOld(o['value'], no['value']) -+ opts.append({'name': o['name'], -+ 'type': o['type'], -+ 'value': mo}) -+ continue -+ if no['action'] == "comment": -+ co = self.commentOpts(o['value']) -+ for c in co: -+ opts.append(c) -+ continue -+ if no['action'] == "remove": -+ continue -+ raise SyntaxError('Unknown action: [%s]' % no['action']) -+ -+ if o['type'] == "comment" or o['type'] == "empty": -+ opts.append(o) -+ continue -+ -+ if o['type'] == "option": -+ _num, no = self.findOpts(newopts, 'option', o['name'], True) -+ if not no: -+ opts.append(o) -+ continue -+ if no['action'] == 'comment' or no['action'] == 'remove': -+ if (no['value'] is not None and -+ o['value'] is not no['value']): -+ opts.append(o) -+ continue -+ if no['action'] == 'comment': -+ value = self._dump_line(self.dcomment, -+ o['name'], -+ self.dassign, -+ o['value']) -+ opts.append({'name': 'comment', -+ 'type': 'comment', -+ 'value': value}) -+ continue -+ if no['action'] == 'set': -+ opts.append(no) -+ continue -+ if no['action'] == 'addifnotset': -+ opts.append({ -+ 'name': 'comment', -+ 'type': 'comment', -+ 'value': self._dump_line( -+ ' ', no['name'], ' modified by IPA' -+ ), -+ }) -+ opts.append({'name': 'comment', 'type': 'comment', -+ 'value': self._dump_line(no['name'], -+ self.dassign, -+ no['value'], -+ )}) -+ opts.append(o) -+ continue -+ raise SyntaxError('Unknown action: [%s]' % no['action']) -+ -+ raise SyntaxError('Unknown type: [%s]' % o['type']) -+ -+ return opts -+ -+ def mergeNew(self, opts, newopts): -+ -+ cline = 0 -+ -+ for no in newopts: -+ -+ if no['type'] == "section" or no['type'] == "subsection": -+ (num, o) = self.findOpts(opts, no['type'], no['name']) -+ if not o: -+ if no['action'] == 'set': -+ opts.append(no) -+ continue -+ if no['action'] == "set": -+ self.mergeNew(o['value'], no['value']) -+ continue -+ cline = num + 1 -+ continue -+ -+ if no['type'] == "option": -+ (num, o) = self.findOpts(opts, no['type'], no['name'], True) -+ if not o: -+ if no['action'] == 'set' or no['action'] == 'addifnotset': -+ opts.append(no) -+ continue -+ cline = num + 1 -+ continue -+ -+ if no['type'] == "comment" or no['type'] == "empty": -+ opts.insert(cline, no) -+ cline += 1 -+ continue -+ -+ raise SyntaxError('Unknown type: [%s]' % no['type']) -+ -+ def merge(self, oldopts, newopts): -+ """ -+ Uses a two pass strategy: -+ First we create a new opts tree from oldopts removing/commenting -+ the options as indicated by the contents of newopts -+ Second we fill in the new opts tree with options as indicated -+ in the newopts tree (this is becaus eentire (sub)sections may -+ in the newopts tree (this is becaus entire (sub)sections may -+ exist in the newopts that do not exist in oldopts) -+ """ -+ opts = self.mergeOld(oldopts, newopts) -+ self.mergeNew(opts, newopts) -+ return opts -+ -+ # TODO: Make parse() recursive? -+ def parse(self, f): -+ -+ opts = [] -+ sectopts = [] -+ section = None -+ subsectopts = [] -+ subsection = None -+ curopts = opts -+ fatheropts = opts -+ -+ # Read in the old file. -+ for line in f: -+ -+ # It's a section start. -+ value = self.matchSection(line) -+ if value: -+ if section is not None: -+ opts.append({'name': section, -+ 'type': 'section', -+ 'value': sectopts}) -+ sectopts = [] -+ curopts = sectopts -+ fatheropts = sectopts -+ section = value -+ continue -+ -+ # It's a subsection start. -+ value = self.matchSubSection(line) -+ if value: -+ if subsection is not None: -+ raise SyntaxError('nested subsections are not ' -+ 'supported yet') -+ subsectopts = [] -+ curopts = subsectopts -+ subsection = value -+ continue -+ -+ value = self.matchSubSectionEnd(line) -+ if value: -+ if subsection is None: -+ raise SyntaxError('Unmatched end subsection terminator ' -+ 'found') -+ fatheropts.append({'name': subsection, -+ 'type': 'subsection', -+ 'value': subsectopts}) -+ subsection = None -+ curopts = fatheropts -+ continue -+ -+ # Copy anything else as is. -+ try: -+ curopts.append(self.parseLine(line)) -+ except SyntaxError as e: -+ raise SyntaxError('{error} in file {fname}: [{line}]'.format( -+ error=e, fname=f.name, line=line.rstrip())) -+ -+ # Add last section if any -+ if len(sectopts) is not 0: -+ opts.append({'name': section, -+ 'type': 'section', -+ 'value': sectopts}) -+ -+ return opts -+ -+ def changeConf(self, file, newopts): -+ """ -+ Write settings to configuration file -+ :param file: path to the file -+ :param options: set of dictionaries in the form: -+ {'name': 'foo', 'value': 'bar', 'action': 'set/comment'} -+ :param section: section name like 'global' -+ """ -+ output = "" -+ f = None -+ try: -+ # Do not catch an unexisting file error -+ # we want to fail in that case -+ shutil.copy2(file, (file + ".ipabkp")) -+ -+ f = openLocked(file, 0o644) -+ -+ oldopts = self.parse(f) -+ -+ options = self.merge(oldopts, newopts) -+ -+ output = self.dump(options) -+ -+ # Write it out and close it. -+ f.seek(0) -+ f.truncate(0) -+ f.write(output) -+ finally: -+ try: -+ if f: -+ f.close() -+ except IOError: -+ pass -+ logger.debug("Updating configuration file %s", file) -+ logger.debug(output) -+ return True -+ -+ def newConf(self, file, options, file_perms=0o644): -+ """" -+ Write settings to a new file, backup the old -+ :param file: path to the file -+ :param options: a set of dictionaries in the form: -+ {'name': 'foo', 'value': 'bar', 'action': 'set/comment'} -+ :param file_perms: number defining the new file's permissions -+ """ -+ output = "" -+ f = None -+ try: -+ try: -+ shutil.copy2(file, (file + ".ipabkp")) -+ except IOError as err: -+ if err.errno == 2: -+ # The orign file did not exist -+ pass -+ -+ f = openLocked(file, file_perms) -+ -+ # Trunkate -+ f.seek(0) -+ f.truncate(0) -+ -+ output = self.dump(options) -+ -+ f.write(output) -+ finally: -+ try: -+ if f: -+ f.close() -+ except IOError: -+ pass -+ logger.debug("Writing configuration file %s", file) -+ logger.debug(output) -+ return True -+ -+ @staticmethod -+ def setOption(name, value): -+ return {'name': name, -+ 'type': 'option', -+ 'action': 'set', -+ 'value': value} -+ -+ @staticmethod -+ def rmOption(name): -+ return {'name': name, -+ 'type': 'option', -+ 'action': 'remove', -+ 'value': None} -+ -+ @staticmethod -+ def setSection(name, options): -+ return {'name': name, -+ 'type': 'section', -+ 'action': 'set', -+ 'value': options} -+ -+ @staticmethod -+ def emptyLine(): -+ return {'name': 'empty', -+ 'type': 'empty'} -diff --git a/ipaserver/install/adtrustinstance.py b/ipaserver/install/adtrustinstance.py -index 7bb9431..47a5a92 100644 ---- a/ipaserver/install/adtrustinstance.py -+++ b/ipaserver/install/adtrustinstance.py -@@ -40,11 +40,11 @@ from ipaserver.install.replication import wait_for_task - from ipalib import errors, api - from ipalib.util import normalize_zone - from ipapython.dn import DN -+from ipapython import ipachangeconf - from ipapython import ipaldap - from ipapython import ipautil - import ipapython.errors - --import ipaclient.install.ipachangeconf - from ipaplatform import services - from ipaplatform.constants import constants - from ipaplatform.paths import paths -@@ -639,7 +639,7 @@ class ADTRUSTInstance(service.Service): - self.print_msg("Cannot modify /etc/krb5.conf") - - krbconf = ( -- ipaclient.install.ipachangeconf.IPAChangeConf("IPA Installer")) -+ ipachangeconf.IPAChangeConf("IPA Installer")) - krbconf.setOptionAssignment((" = ", " ")) - krbconf.setSectionNameDelimiters(("[", "]")) - krbconf.setSubSectionDelimiters(("{", "}")) -diff --git a/ipaserver/install/server/install.py b/ipaserver/install/server/install.py -index 02c8f4d..6a81d57 100644 ---- a/ipaserver/install/server/install.py -+++ b/ipaserver/install/server/install.py -@@ -19,7 +19,7 @@ import six - from ipaclient.install import timeconf - from ipaclient.install.client import ( - check_ldap_conf, sync_time, restore_time_sync) --from ipaclient.install.ipachangeconf import IPAChangeConf -+from ipapython.ipachangeconf import IPAChangeConf - from ipalib.install import certmonger, sysrestore - from ipapython import ipautil, version - from ipapython.ipautil import ( -diff --git a/ipaserver/install/server/replicainstall.py b/ipaserver/install/server/replicainstall.py -index 6da6804..7272640 100644 ---- a/ipaserver/install/server/replicainstall.py -+++ b/ipaserver/install/server/replicainstall.py -@@ -23,13 +23,13 @@ from pkg_resources import parse_version - import six - - from ipaclient.install.client import check_ldap_conf --from ipaclient.install.ipachangeconf import IPAChangeConf - import ipaclient.install.timeconf - from ipalib.install import certstore, sysrestore - from ipalib.install.kinit import kinit_keytab - from ipapython import ipaldap, ipautil - from ipapython.dn import DN - from ipapython.admintool import ScriptError -+from ipapython.ipachangeconf import IPAChangeConf - from ipaplatform import services - from ipaplatform.tasks import tasks - from ipaplatform.paths import paths -diff --git a/ipatests/test_install/test_changeconf.py b/ipatests/test_install/test_changeconf.py -index 2dc2b7d..40c8a1d 100644 ---- a/ipatests/test_install/test_changeconf.py -+++ b/ipatests/test_install/test_changeconf.py -@@ -3,7 +3,7 @@ - from __future__ import absolute_import - - import pytest --from ipaclient.install.ipachangeconf import IPAChangeConf -+from ipapython.ipachangeconf import IPAChangeConf - - - @pytest.fixture(scope='function') - -From 2da90887632c764a73866c9ad3824ebb53c0aa73 Mon Sep 17 00:00:00 2001 -From: Rob Critenden -Date: Aug 29 2019 06:45:12 +0000 -Subject: Use tasks to configure automount nsswitch settings - - -authselect doesn't allow one to directly write to -/etc/nsswitch.conf. It will complain bitterly if it -detects it and will refuse to work until reset. - -Instead it wants the user to write to -/etc/authselect/user-nsswitch.conf and then it will handle -merging in any differences. - -To complicate matters some databases are not user configurable -like passwd, group and of course, automount. There are some -undocumented options to allow one to override these though so -we utilize that. - -tasks are used so that authselect-based installations can still -write directly to /etc/nsswitch.conf and operate as it used to. - -Reviewed-By: Francois Cami -Reviewed-By: Rob Crittenden -Reviewed-By: Rob Critenden -Reviewed-By: François Cami - ---- - -diff --git a/ipaclient/install/client.py b/ipaclient/install/client.py -index 9492ca4..1e88ba1 100644 ---- a/ipaclient/install/client.py -+++ b/ipaclient/install/client.py -@@ -66,7 +66,7 @@ from ipapython import version - - from . import automount, timeconf, sssd - from ipaclient import discovery --from .ipachangeconf import IPAChangeConf -+from ipapython.ipachangeconf import IPAChangeConf - - NoneType = type(None) - -@@ -281,72 +281,6 @@ def is_ipa_client_installed(fstore, on_master=False): - return installed - - --def configure_nsswitch_database(fstore, database, services, preserve=True, -- append=True, default_value=()): -- """ -- Edits the specified nsswitch.conf database (e.g. passwd, group, sudoers) -- to use the specified service(s). -- -- Arguments: -- fstore - FileStore to backup the nsswitch.conf -- database - database configuration that should be ammended, -- e.g. 'sudoers' -- service - list of services that should be added, e.g. ['sss'] -- preserve - if True, the already configured services will be preserved -- -- The next arguments modify the behaviour if preserve=True: -- append - if True, the services will be appended, if False, prepended -- default_value - list of services that are considered as default (if -- the database is not mentioned in nsswitch.conf), e.g. -- ['files'] -- """ -- -- # Backup the original version of nsswitch.conf, we're going to edit it now -- if not fstore.has_file(paths.NSSWITCH_CONF): -- fstore.backup_file(paths.NSSWITCH_CONF) -- -- conf = IPAChangeConf("IPA Installer") -- conf.setOptionAssignment(':') -- -- if preserve: -- # Read the existing configuration -- with open(paths.NSSWITCH_CONF, 'r') as f: -- opts = conf.parse(f) -- raw_database_entry = conf.findOpts(opts, 'option', database)[1] -- -- # Detect the list of already configured services -- if not raw_database_entry: -- # If there is no database entry, database is not present in -- # the nsswitch.conf. Set the list of services to the -- # default list, if passed. -- configured_services = list(default_value) -- else: -- configured_services = raw_database_entry['value'].strip().split() -- -- # Make sure no service is added if already mentioned in the list -- added_services = [s for s in services -- if s not in configured_services] -- -- # Prepend / append the list of new services -- if append: -- new_value = ' ' + ' '.join(configured_services + added_services) -- else: -- new_value = ' ' + ' '.join(added_services + configured_services) -- -- else: -- # Preserve not set, let's rewrite existing configuration -- new_value = ' ' + ' '.join(services) -- -- # Set new services as sources for database -- opts = [ -- conf.setOption(database, new_value), -- conf.emptyLine(), -- ] -- -- conf.changeConf(paths.NSSWITCH_CONF, opts) -- logger.info("Configured %s in %s", database, paths.NSSWITCH_CONF) -- -- - def configure_ipa_conf( - fstore, cli_basedn, cli_realm, cli_domain, cli_server, hostname): - ipaconf = IPAChangeConf("IPA Installer") -@@ -948,9 +882,7 @@ def configure_sssd_conf( - "Unable to activate the SUDO service in SSSD config.") - - sssdconfig.activate_service('sudo') -- configure_nsswitch_database( -- fstore, 'sudoers', ['sss'], -- default_value=['files']) -+ tasks.enable_sssd_sudo(fstore) - - domain.add_provider('ipa', 'id') - -diff --git a/ipaclient/install/ipa_client_automount.py b/ipaclient/install/ipa_client_automount.py -index a1dc2a1..3a0896b 100644 ---- a/ipaclient/install/ipa_client_automount.py -+++ b/ipaclient/install/ipa_client_automount.py -@@ -41,7 +41,8 @@ from six.moves.urllib.parse import urlsplit - - # pylint: enable=import-error - from optparse import OptionParser # pylint: disable=deprecated-module --from ipaclient.install import ipachangeconf, ipadiscovery -+from ipapython import ipachangeconf -+from ipaclient.install import ipadiscovery - from ipaclient.install.client import ( - CLIENT_NOT_CONFIGURED, - CLIENT_ALREADY_CONFIGURED, -@@ -177,44 +178,6 @@ def configure_xml(fstore): - print("Configured %s" % authconf) - - --def configure_nsswitch(statestore, options): -- """ -- Point automount to ldap in nsswitch.conf. -- This function is for non-SSSD setups only. -- """ -- conf = ipachangeconf.IPAChangeConf("IPA Installer") -- conf.setOptionAssignment(':') -- -- with open(paths.NSSWITCH_CONF, 'r') as f: -- current_opts = conf.parse(f) -- current_nss_value = conf.findOpts( -- current_opts, name='automount', type='option' -- )[1] -- if current_nss_value is None: -- # no automount database present -- current_nss_value = False # None cannot be backed up -- else: -- current_nss_value = current_nss_value['value'] -- statestore.backup_state( -- 'ipa-client-automount-nsswitch', 'previous-automount', -- current_nss_value -- ) -- -- nss_value = ' files ldap' -- opts = [ -- { -- 'name': 'automount', -- 'type': 'option', -- 'action': 'set', -- 'value': nss_value, -- }, -- {'name': 'empty', 'type': 'empty'}, -- ] -- conf.changeConf(paths.NSSWITCH_CONF, opts) -- -- print("Configured %s" % paths.NSSWITCH_CONF) -- -- - def configure_autofs_sssd(fstore, statestore, autodiscover, options): - try: - sssdconfig = SSSDConfig.SSSDConfig() -@@ -339,41 +302,8 @@ def uninstall(fstore, statestore): - ] - STATES = ['autofs', 'rpcidmapd', 'rpcgssd'] - -- if statestore.get_state( -- 'ipa-client-automount-nsswitch', 'previous-automount' -- ) is False: -- # Previous nsswitch.conf had no automount database configured -- # so remove it. -- conf = ipachangeconf.IPAChangeConf("IPA automount installer") -- conf.setOptionAssignment(':') -- changes = [conf.rmOption('automount')] -- conf.changeConf(paths.NSSWITCH_CONF, changes) -- tasks.restore_context(paths.NSSWITCH_CONF) -- statestore.delete_state( -- 'ipa-client-automount-nsswitch', 'previous-automount' -- ) -- elif statestore.get_state( -- 'ipa-client-automount-nsswitch', 'previous-automount' -- ) is not None: -- nss_value = statestore.get_state( -- 'ipa-client-automount-nsswitch', 'previous-automount' -- ) -- opts = [ -- { -- 'name': 'automount', -- 'type': 'option', -- 'action': 'set', -- 'value': nss_value, -- }, -- {'name': 'empty', 'type': 'empty'}, -- ] -- conf = ipachangeconf.IPAChangeConf("IPA automount installer") -- conf.setOptionAssignment(':') -- conf.changeConf(paths.NSSWITCH_CONF, opts) -- tasks.restore_context(paths.NSSWITCH_CONF) -- statestore.delete_state( -- 'ipa-client-automount-nsswitch', 'previous-automount' -- ) -+ if not statestore.get_state('autofs', 'sssd'): -+ tasks.disable_ldap_automount(statestore) - - if not any(fstore.has_file(f) for f in RESTORE_FILES) or not any( - statestore.has_state(s) for s in STATES -@@ -627,7 +557,7 @@ def configure_automount(): - - try: - if not options.sssd: -- configure_nsswitch(statestore, options) -+ tasks.enable_ldap_automount(statestore) - configure_nfs(fstore, statestore, options) - if options.sssd: - configure_autofs_sssd(fstore, statestore, autodiscover, options) -diff --git a/ipaplatform/base/tasks.py b/ipaplatform/base/tasks.py -index 8aa9c5c..7fd7d57 100644 ---- a/ipaplatform/base/tasks.py -+++ b/ipaplatform/base/tasks.py -@@ -32,6 +32,7 @@ from pkg_resources import parse_version - from ipaplatform.constants import constants - from ipaplatform.paths import paths - from ipapython import ipautil -+from ipapython.ipachangeconf import IPAChangeConf - - logger = logging.getLogger(__name__) - -@@ -337,5 +338,157 @@ class BaseTaskNamespace: - """ - raise NotImplementedError - -+ def configure_nsswitch_database(self, fstore, database, services, -+ preserve=True, append=True, -+ default_value=()): -+ """ -+ Edits the specified nsswitch.conf database (e.g. passwd, group, -+ sudoers) to use the specified service(s). -+ -+ Arguments: -+ fstore - FileStore to backup the nsswitch.conf -+ database - database configuration that should be ammended, -+ e.g. 'sudoers' -+ service - list of services that should be added, e.g. ['sss'] -+ preserve - if True, the already configured services will be -+ preserved -+ -+ The next arguments modify the behaviour if preserve=True: -+ append - if True, the services will be appended, if False, -+ prepended -+ default_value - list of services that are considered as default (if -+ the database is not mentioned in nsswitch.conf), -+ e.g. ['files'] -+ """ -+ -+ # Backup the original version of nsswitch.conf, we're going to edit it -+ # now -+ if not fstore.has_file(paths.NSSWITCH_CONF): -+ fstore.backup_file(paths.NSSWITCH_CONF) -+ -+ conf = IPAChangeConf("IPA Installer") -+ conf.setOptionAssignment(':') -+ -+ if preserve: -+ # Read the existing configuration -+ with open(paths.NSSWITCH_CONF, 'r') as f: -+ opts = conf.parse(f) -+ raw_database_entry = conf.findOpts(opts, 'option', database)[1] -+ -+ # Detect the list of already configured services -+ if not raw_database_entry: -+ # If there is no database entry, database is not present in -+ # the nsswitch.conf. Set the list of services to the -+ # default list, if passed. -+ configured_services = list(default_value) -+ else: -+ configured_services = raw_database_entry[ -+ 'value'].strip().split() -+ -+ # Make sure no service is added if already mentioned in the list -+ added_services = [s for s in services -+ if s not in configured_services] -+ -+ # Prepend / append the list of new services -+ if append: -+ new_value = ' ' + ' '.join(configured_services + -+ added_services) -+ else: -+ new_value = ' ' + ' '.join(added_services + -+ configured_services) -+ -+ else: -+ # Preserve not set, let's rewrite existing configuration -+ new_value = ' ' + ' '.join(services) -+ -+ # Set new services as sources for database -+ opts = [ -+ conf.setOption(database, new_value), -+ conf.emptyLine(), -+ ] -+ -+ conf.changeConf(paths.NSSWITCH_CONF, opts) -+ logger.info("Configured %s in %s", database, paths.NSSWITCH_CONF) -+ -+ def enable_sssd_sudo(self, fstore): -+ """Configure nsswitch.conf to use sssd for sudo""" -+ self.configure_nsswitch_database( -+ fstore, 'sudoers', ['sss'], -+ default_value=['files']) -+ -+ def enable_ldap_automount(self, statestore): -+ """ -+ Point automount to ldap in nsswitch.conf. -+ This function is for non-SSSD setups only. -+ """ -+ conf = IPAChangeConf("IPA Installer") -+ conf.setOptionAssignment(':') -+ -+ with open(paths.NSSWITCH_CONF, 'r') as f: -+ current_opts = conf.parse(f) -+ current_nss_value = conf.findOpts( -+ current_opts, name='automount', type='option' -+ )[1] -+ if current_nss_value is None: -+ # no automount database present -+ current_nss_value = False # None cannot be backed up -+ else: -+ current_nss_value = current_nss_value['value'] -+ statestore.backup_state( -+ 'ipa-client-automount-nsswitch', 'previous-automount', -+ current_nss_value -+ ) -+ -+ nss_value = ' files ldap' -+ opts = [ -+ { -+ 'name': 'automount', -+ 'type': 'option', -+ 'action': 'set', -+ 'value': nss_value, -+ }, -+ {'name': 'empty', 'type': 'empty'}, -+ ] -+ conf.changeConf(paths.NSSWITCH_CONF, opts) -+ -+ logger.info("Configured %s", paths.NSSWITCH_CONF) -+ -+ def disable_ldap_automount(self, statestore): -+ """Disable automount using LDAP""" -+ if statestore.get_state( -+ 'ipa-client-automount-nsswitch', 'previous-automount' -+ ) is False: -+ # Previous nsswitch.conf had no automount database configured -+ # so remove it. -+ conf = IPAChangeConf("IPA automount installer") -+ conf.setOptionAssignment(':') -+ changes = [conf.rmOption('automount')] -+ conf.changeConf(paths.NSSWITCH_CONF, changes) -+ self.restore_context(paths.NSSWITCH_CONF) -+ statestore.delete_state( -+ 'ipa-client-automount-nsswitch', 'previous-automount' -+ ) -+ elif statestore.get_state( -+ 'ipa-client-automount-nsswitch', 'previous-automount' -+ ) is not None: -+ nss_value = statestore.get_state( -+ 'ipa-client-automount-nsswitch', 'previous-automount' -+ ) -+ opts = [ -+ { -+ 'name': 'automount', -+ 'type': 'option', -+ 'action': 'set', -+ 'value': nss_value, -+ }, -+ {'name': 'empty', 'type': 'empty'}, -+ ] -+ conf = IPAChangeConf("IPA automount installer") -+ conf.setOptionAssignment(':') -+ conf.changeConf(paths.NSSWITCH_CONF, opts) -+ self.restore_context(paths.NSSWITCH_CONF) -+ statestore.delete_state( -+ 'ipa-client-automount-nsswitch', 'previous-automount' -+ ) - - tasks = BaseTaskNamespace() -diff --git a/ipaplatform/redhat/paths.py b/ipaplatform/redhat/paths.py -index 8ccd04b..15bdef6 100644 ---- a/ipaplatform/redhat/paths.py -+++ b/ipaplatform/redhat/paths.py -@@ -39,6 +39,7 @@ class RedHatPathNamespace(BasePathNamespace): - AUTHCONFIG = '/usr/sbin/authconfig' - AUTHSELECT = '/usr/bin/authselect' - SYSCONF_NETWORK = '/etc/sysconfig/network' -+ NSSWITCH_CONF = '/etc/authselect/user-nsswitch.conf' - - - paths = RedHatPathNamespace() -diff --git a/ipaplatform/redhat/tasks.py b/ipaplatform/redhat/tasks.py -index be0b641..e18f6fa 100644 ---- a/ipaplatform/redhat/tasks.py -+++ b/ipaplatform/redhat/tasks.py -@@ -744,4 +744,23 @@ class RedHatTaskNamespace(BaseTaskNamespace): - - return filenames - -+ def enable_ldap_automount(self, statestore): -+ """ -+ Point automount to ldap in nsswitch.conf. -+ This function is for non-SSSD setups only. -+ """ -+ super(RedHatTaskNamespace, self).enable_ldap_automount(statestore) -+ -+ authselect_cmd = [paths.AUTHSELECT, "enable-feature", -+ "with-custom-automount"] -+ ipautil.run(authselect_cmd) -+ -+ def disable_ldap_automount(self, statestore): -+ """Disable ldap-based automount""" -+ super(RedHatTaskNamespace, self).disable_ldap_automount(statestore) -+ -+ authselect_cmd = [paths.AUTHSELECT, "disable-feature", -+ "with-custom-automount"] -+ ipautil.run(authselect_cmd) -+ - tasks = RedHatTaskNamespace() - diff --git a/SOURCES/0011-adtrust-avoid-using-timestamp-in-klist-output_ed1c1626-rhbz#1750242.patch b/SOURCES/0011-adtrust-avoid-using-timestamp-in-klist-output_ed1c1626-rhbz#1750242.patch deleted file mode 100644 index eb61338..0000000 --- a/SOURCES/0011-adtrust-avoid-using-timestamp-in-klist-output_ed1c1626-rhbz#1750242.patch +++ /dev/null @@ -1,50 +0,0 @@ -From ed1c1626a607a5292c08836d13c32464d1b71859 Mon Sep 17 00:00:00 2001 -From: Alexander Bokovoy -Date: Mon, 9 Sep 2019 11:02:29 +0300 -Subject: [PATCH] adtrust: avoid using timestamp in klist output - -When parsing a keytab to copy keys to a different keytab, we don't need -the timestamp, so don't ask klist to output it. In some locales (en_IN, -for example), the timestamp is output in a single field without a space -between date and time. In other locales it can be represented with date -and time separated by a space. - -Fixes: https://pagure.io/freeipa/issue/8066 -Reviewed-By: Thomas Woerner ---- - ipaserver/install/plugins/adtrust.py | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/ipaserver/install/plugins/adtrust.py b/ipaserver/install/plugins/adtrust.py -index 28932e6c52..3b2e49bc05 100644 ---- a/ipaserver/install/plugins/adtrust.py -+++ b/ipaserver/install/plugins/adtrust.py -@@ -721,7 +721,7 @@ def execute(self, **options): - - - KeyEntry = namedtuple('KeyEntry', -- ['kvno', 'date', 'time', 'principal', 'etype', 'key']) -+ ['kvno', 'principal', 'etype', 'key']) - - - @register() -@@ -741,7 +741,7 @@ class update_host_cifs_keytabs(Updater): - def extract_key_refs(self, keytab): - host_princ = self.host_princ_template.format( - master=self.api.env.host, realm=self.api.env.realm) -- result = ipautil.run([paths.KLIST, "-etK", "-k", keytab], -+ result = ipautil.run([paths.KLIST, "-eK", "-k", keytab], - capture_output=True, raiseonerr=False, - nolog_output=True) - if result.returncode != 0: -@@ -752,8 +752,8 @@ def extract_key_refs(self, keytab): - if (host_princ in l and any(e in l for e in self.valid_etypes)): - - els = l.split() -- els[4] = els[4].strip('()') -- els[5] = els[5].strip('()') -+ els[-2] = els[-2].strip('()') -+ els[-1] = els[-1].strip('()') - keys_to_sync.append(KeyEntry._make(els)) - - return keys_to_sync diff --git a/SOURCES/0012-add-default-access-control-configuration-to-trusted-domain-objects_rhbz#1751707.patch b/SOURCES/0012-add-default-access-control-configuration-to-trusted-domain-objects_rhbz#1751707.patch deleted file mode 100644 index beffd5d..0000000 --- a/SOURCES/0012-add-default-access-control-configuration-to-trusted-domain-objects_rhbz#1751707.patch +++ /dev/null @@ -1,151 +0,0 @@ -From 0deea83e93665404bb536d181ae54ad7cff45336 Mon Sep 17 00:00:00 2001 -From: Alexander Bokovoy -Date: Sep 13 2019 07:34:35 +0000 -Subject: add default access control when migrating trust objects - - -It looks like for some cases we do not have proper set up keytab -retrieval configuration in the old trusted domain object. This mostly -affects two-way trust cases. In such cases, create default configuration -as ipasam would have created when trust was established. - -Resolves: https://pagure.io/freeipa/issue/8067 - -Signed-off-by: Alexander Bokovoy -Reviewed-By: Florence Blanc-Renaud - ---- - -diff --git a/ipaserver/install/plugins/adtrust.py b/ipaserver/install/plugins/adtrust.py -index 3b2e49b..7e6b5c3 100644 ---- a/ipaserver/install/plugins/adtrust.py -+++ b/ipaserver/install/plugins/adtrust.py -@@ -29,6 +29,9 @@ logger = logging.getLogger(__name__) - register = Registry() - - DEFAULT_ID_RANGE_SIZE = 200000 -+trust_read_keys_template = \ -+ ["cn=adtrust agents,cn=sysaccounts,cn=etc,{basedn}", -+ "cn=trust admins,cn=groups,cn=accounts,{basedn}"] - - - @register() -@@ -576,8 +579,15 @@ class update_tdo_to_new_layout(Updater): - 'krbprincipalkey') - entry_data['krbextradata'] = en.single_value.get( - 'krbextradata') -- entry_data['ipaAllowedToPerform;read_keys'] = en.get( -- 'ipaAllowedToPerform;read_keys', []) -+ read_keys = en.get('ipaAllowedToPerform;read_keys', []) -+ if not read_keys: -+ # Old style, no ipaAllowedToPerform;read_keys in the entry, -+ # use defaults that ipasam should have set when creating a -+ # trust -+ read_keys = list(map( -+ lambda x: x.format(basedn=self.api.env.basedn), -+ trust_read_keys_template)) -+ entry_data['ipaAllowedToPerform;read_keys'] = read_keys - - entry.update(entry_data) - try: - -From b32510d67d2bd64e77659c6766d3f9647629acec Mon Sep 17 00:00:00 2001 -From: Alexander Bokovoy -Date: Sep 13 2019 07:34:35 +0000 -Subject: adtrust: add default read_keys permission for TDO objects - - -If trusted domain object (TDO) is lacking ipaAllowedToPerform;read_keys -attribute values, it cannot be used by SSSD to retrieve TDO keys and the -whole communication with Active Directory domain controllers will not be -possible. - -This seems to affect trusts which were created before -ipaAllowedToPerform;read_keys permission granting was introduced -(FreeIPA 4.2). Add back the default setting for the permissions which -grants access to trust agents and trust admins. - -Resolves: https://pagure.io/freeipa/issue/8067 - -Signed-off-by: Alexander Bokovoy -Reviewed-By: Florence Blanc-Renaud - ---- - -diff --git a/install/updates/90-post_upgrade_plugins.update b/install/updates/90-post_upgrade_plugins.update -index f5f428d..8eb1977 100644 ---- a/install/updates/90-post_upgrade_plugins.update -+++ b/install/updates/90-post_upgrade_plugins.update -@@ -13,6 +13,7 @@ plugin: update_default_trust_view - plugin: update_tdo_gidnumber - plugin: update_tdo_to_new_layout - plugin: update_host_cifs_keytabs -+plugin: update_tdo_default_read_keys_permissions - plugin: update_ca_renewal_master - plugin: update_idrange_type - plugin: update_pacs -diff --git a/ipaserver/install/plugins/adtrust.py b/ipaserver/install/plugins/adtrust.py -index 7e6b5c3..386fe53 100644 ---- a/ipaserver/install/plugins/adtrust.py -+++ b/ipaserver/install/plugins/adtrust.py -@@ -821,3 +821,59 @@ class update_host_cifs_keytabs(Updater): - self.copy_key(paths.SAMBA_KEYTAB, hostkey) - - return False, [] -+ -+ -+@register() -+class update_tdo_default_read_keys_permissions(Updater): -+ trust_filter = \ -+ "(&(objectClass=krbPrincipal)(krbPrincipalName=krbtgt/{nbt}@*))" -+ -+ def execute(self, **options): -+ ldap = self.api.Backend.ldap2 -+ -+ # First, see if trusts are enabled on the server -+ if not self.api.Command.adtrust_is_enabled()['result']: -+ logger.debug('AD Trusts are not enabled on this server') -+ return False, [] -+ -+ result = self.api.Command.trustconfig_show()['result'] -+ our_nbt_name = result.get('ipantflatname', [None])[0] -+ if not our_nbt_name: -+ return False, [] -+ -+ trusts_dn = self.api.env.container_adtrusts + self.api.env.basedn -+ trust_filter = self.trust_filter.format(nbt=our_nbt_name) -+ -+ # We might be in a situation when no trusts exist yet -+ # In such case there is nothing to upgrade but we have to catch -+ # an exception or it will abort the whole upgrade process -+ try: -+ tdos = ldap.get_entries( -+ base_dn=trusts_dn, -+ scope=ldap.SCOPE_SUBTREE, -+ filter=trust_filter, -+ attrs_list=['*']) -+ except errors.EmptyResult: -+ tdos = [] -+ -+ for tdo in tdos: -+ updates = dict() -+ oc = tdo.get('objectClass', []) -+ if 'ipaAllowedOperations' not in oc: -+ updates['objectClass'] = oc + ['ipaAllowedOperations'] -+ -+ read_keys = tdo.get('ipaAllowedToPerform;read_keys', []) -+ if not read_keys: -+ read_keys_values = list(map( -+ lambda x: x.format(basedn=self.api.env.basedn), -+ trust_read_keys_template)) -+ updates['ipaAllowedToPerform;read_keys'] = read_keys_values -+ -+ tdo.update(updates) -+ try: -+ ldap.update_entry(tdo) -+ except errors.EmptyModlist: -+ logger.debug("No update was required for TDO %s", -+ tdo.single_value.get('krbCanonicalName')) -+ -+ return False, [] - diff --git a/SOURCES/0013-Do-not-run-trust-upgrade-code-if-master-lacks-Samba-bindings_1854038_rhbz#1773516.patch b/SOURCES/0013-Do-not-run-trust-upgrade-code-if-master-lacks-Samba-bindings_1854038_rhbz#1773516.patch deleted file mode 100644 index 456f839..0000000 --- a/SOURCES/0013-Do-not-run-trust-upgrade-code-if-master-lacks-Samba-bindings_1854038_rhbz#1773516.patch +++ /dev/null @@ -1,41 +0,0 @@ -From 18540386230e295087296e58761ced2b781ae4e3 Mon Sep 17 00:00:00 2001 -From: Alexander Bokovoy -Date: Nov 21 2019 09:44:37 +0000 -Subject: Do not run trust upgrade code if master lacks Samba bindings - - -If a replica has no Samba bindings but there are trust agreements -configured on some trust controller, skip trust upgrade code on this -replica. - -Resolves: https://pagure.io/freeipa/issue/8001 -Signed-off-by: Alexander Bokovoy -Reviewed-By: Thomas Woerner - ---- - -diff --git a/ipaserver/install/plugins/adtrust.py b/ipaserver/install/plugins/adtrust.py -index b7bb53f..2a9b4f0 100644 ---- a/ipaserver/install/plugins/adtrust.py -+++ b/ipaserver/install/plugins/adtrust.py -@@ -24,6 +24,8 @@ except ImportError: - def ndr_unpack(x): - raise NotImplementedError - -+ drsblobs = None -+ - logger = logging.getLogger(__name__) - - register = Registry() -@@ -633,6 +635,10 @@ class update_tdo_to_new_layout(Updater): - logger.debug('AD Trusts are not enabled on this server') - return False, [] - -+ # If we have no Samba bindings, this master is not a trust controller -+ if drsblobs is None: -+ return False, [] -+ - ldap = self.api.Backend.ldap2 - gidNumber = get_gidNumber(ldap, self.api.env) - if gidNumber is None: - diff --git a/SOURCES/0014-CVE-2019-10195-and-CVE-2019-14867.patch b/SOURCES/0014-CVE-2019-10195-and-CVE-2019-14867.patch deleted file mode 100644 index 9b399a3..0000000 --- a/SOURCES/0014-CVE-2019-10195-and-CVE-2019-14867.patch +++ /dev/null @@ -1,187 +0,0 @@ -From e11e73abc101361c0b66b3b958a64c9c8f6c608b Mon Sep 17 00:00:00 2001 -From: Simo Sorce -Date: Mon, 16 Sep 2019 11:12:25 -0400 -Subject: [PATCH 1/2] CVE-2019-14867: Make sure to have storage space for tag - -ber_scanf expects a pointer to a ber_tag_t to return the tag pointed at -by "t", if that is not provided the pointer will be store in whatever -memory location is pointed by the stack at that time causeing a crash. - -It's also possible for unprivileged end users to trigger parsing of the -krbPrincipalKey. - -Fixes #8071: CVE-2019-14867 - -Reported by Todd Lipcon from Cloudera - -Signed-off-by: Simo Sorce -Reviewed-By: Christian Heimes -(cherry picked from commit d2e0d94521893bc5f002a335a8c0b99601e1afd6) ---- - util/ipa_krb5.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/util/ipa_krb5.c b/util/ipa_krb5.c -index a27cd4a4e..c09c3daa5 100644 ---- a/util/ipa_krb5.c -+++ b/util/ipa_krb5.c -@@ -554,7 +554,7 @@ int ber_decode_krb5_key_data(struct berval *encoded, int *m_kvno, - retag = ber_peek_tag(be, &setlen); - if (retag == (LBER_CONSTRUCTED | LBER_CLASS_CONTEXT | 2)) { - /* not supported yet, skip */ -- retag = ber_scanf(be, "t[x]}"); -+ retag = ber_scanf(be, "t[x]}", &tag); - } else { - retag = ber_scanf(be, "}"); - } --- -2.23.0 - - -From 39120fa9a4a00983917659e4253446ed82839975 Mon Sep 17 00:00:00 2001 -From: Rob Crittenden -Date: Tue, 2 Jul 2019 13:44:48 -0400 -Subject: [PATCH 2/2] CVE-2019-10195: Don't log passwords embedded in commands - in calls using batch - -A raw batch request was fully logged which could expose parameters -we don't want logged, like passwords. - -Override _repr_iter to use the individual commands to log the -values so that values are properly obscured. - -In case of errors log the full value on when the server is in -debug mode. - -Reported by Jamison Bennett from Cloudera - -Signed-off-by: Rob Crittenden -Reviewed-by: Florence Blanc-Renaud ---- - ipaserver/plugins/batch.py | 96 ++++++++++++++++++++++++++++---------- - 1 file changed, 72 insertions(+), 24 deletions(-) - -diff --git a/ipaserver/plugins/batch.py b/ipaserver/plugins/batch.py -index c9895a8f6..b95944c54 100644 ---- a/ipaserver/plugins/batch.py -+++ b/ipaserver/plugins/batch.py -@@ -93,35 +93,82 @@ class batch(Command): - Output('results', (list, tuple), doc='') - ) - -+ def _validate_request(self, request): -+ """ -+ Check that an individual request in a batch is parseable and the -+ commands exists. -+ """ -+ if 'method' not in request: -+ raise errors.RequirementError(name='method') -+ if 'params' not in request: -+ raise errors.RequirementError(name='params') -+ name = request['method'] -+ if (name not in self.api.Command or -+ isinstance(self.api.Command[name], Local)): -+ raise errors.CommandError(name=name) -+ -+ # If params are not formated as a tuple(list, dict) -+ # the following lines will raise an exception -+ # that triggers an internal server error -+ # Raise a ConversionError instead to report the issue -+ # to the client -+ try: -+ a, kw = request['params'] -+ newkw = dict((str(k), v) for k, v in kw.items()) -+ api.Command[name].args_options_2_params(*a, **newkw) -+ except (AttributeError, ValueError, TypeError): -+ raise errors.ConversionError( -+ name='params', -+ error=_(u'must contain a tuple (list, dict)')) -+ except Exception as e: -+ raise errors.ConversionError( -+ name='params', -+ error=str(e)) -+ -+ def _repr_iter(self, **params): -+ """ -+ Iterate through the request and use the Command _repr_intr so -+ that sensitive information (passwords) is not exposed. -+ -+ In case of a malformatted request redact the entire thing. -+ """ -+ exceptions = False -+ for arg in (params.get('methods', [])): -+ try: -+ self._validate_request(arg) -+ except Exception: -+ # redact the whole request since we don't know what's in it -+ exceptions = True -+ yield u'********' -+ continue -+ -+ name = arg['method'] -+ a, kw = arg['params'] -+ newkw = dict((str(k), v) for k, v in kw.items()) -+ param = api.Command[name].args_options_2_params( -+ *a, **newkw) -+ -+ yield '{}({})'.format( -+ api.Command[name].name, -+ ', '.join(api.Command[name]._repr_iter(**param)) -+ ) -+ -+ if exceptions: -+ logger.debug('batch: %s', -+ ', '.join(super(batch, self)._repr_iter(**params))) -+ - def execute(self, methods=None, **options): - results = [] - for arg in (methods or []): - params = dict() - name = None - try: -- if 'method' not in arg: -- raise errors.RequirementError(name='method') -- if 'params' not in arg: -- raise errors.RequirementError(name='params') -+ self._validate_request(arg) - name = arg['method'] -- if (name not in self.api.Command or -- isinstance(self.api.Command[name], Local)): -- raise errors.CommandError(name=name) -- -- # If params are not formated as a tuple(list, dict) -- # the following lines will raise an exception -- # that triggers an internal server error -- # Raise a ConversionError instead to report the issue -- # to the client -- try: -- a, kw = arg['params'] -- newkw = dict((str(k), v) for k, v in kw.items()) -- params = api.Command[name].args_options_2_params( -- *a, **newkw) -- except (AttributeError, ValueError, TypeError): -- raise errors.ConversionError( -- name='params', -- error=_(u'must contain a tuple (list, dict)')) -+ a, kw = arg['params'] -+ newkw = dict((str(k), v) for k, v in kw.items()) -+ params = api.Command[name].args_options_2_params( -+ *a, **newkw) - newkw.setdefault('version', options['version']) - - result = api.Command[name](*a, **newkw) -@@ -133,8 +180,9 @@ class batch(Command): - ) - result['error']=None - except Exception as e: -- if isinstance(e, errors.RequirementError) or \ -- isinstance(e, errors.CommandError): -+ if (isinstance(e, errors.RequirementError) or -+ isinstance(e, errors.CommandError) or -+ isinstance(e, errors.ConversionError)): - logger.info( - '%s: batch: %s', - context.principal, # pylint: disable=no-member --- -2.23.0 - diff --git a/SOURCES/1001-Change-branding-to-IPA-and-Identity-Management.patch b/SOURCES/1001-Change-branding-to-IPA-and-Identity-Management.patch index dd6dc07..be93e35 100644 --- a/SOURCES/1001-Change-branding-to-IPA-and-Identity-Management.patch +++ b/SOURCES/1001-Change-branding-to-IPA-and-Identity-Management.patch @@ -1,15 +1,15 @@ -From 63b3030e2e2f6411ad29448746b96bb9658467f8 Mon Sep 17 00:00:00 2001 +From a98b0595fce7dea121c743455ac5d44a2e282e80 Mon Sep 17 00:00:00 2001 From: Alexander Bokovoy Date: Sun, 7 Oct 2018 12:25:39 +0300 -Subject: [PATCH 02/72] client/man/default.conf.5: Change branding to IPA - and Identity Management +Subject: [PATCH 01/71] client/man/default.conf.5: Change branding to IPA and + Identity Management --- client/man/default.conf.5 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/man/default.conf.5 b/client/man/default.conf.5 -index f21d9d5b7..d6c1e42d1 100644 +index 728fc08..6ec8616 100644 --- a/client/man/default.conf.5 +++ b/client/man/default.conf.5 @@ -16,7 +16,7 @@ @@ -22,21 +22,21 @@ index f21d9d5b7..d6c1e42d1 100644 default.conf \- IPA configuration file .SH "SYNOPSIS" -- -2.17.1 +2.21.0 -From 3fe816976ea30d363ae5c6086b8daaaadaa5d7f7 Mon Sep 17 00:00:00 2001 +From 67d0b5bf5b4ce068d3d5a89a36fca44589ba7040 Mon Sep 17 00:00:00 2001 From: Alexander Bokovoy Date: Sun, 7 Oct 2018 12:25:39 +0300 -Subject: [PATCH 03/72] client/man/ipa-certupdate.1: Change branding to IPA - and Identity Management +Subject: [PATCH 02/71] client/man/ipa-certupdate.1: Change branding to IPA and + Identity Management --- client/man/ipa-certupdate.1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/man/ipa-certupdate.1 b/client/man/ipa-certupdate.1 -index d95790a36..431b395a9 100644 +index d95790a..431b395 100644 --- a/client/man/ipa-certupdate.1 +++ b/client/man/ipa-certupdate.1 @@ -16,7 +16,7 @@ @@ -49,21 +49,21 @@ index d95790a36..431b395a9 100644 ipa\-certupdate \- Update local IPA certificate databases with certificates from the server .SH "SYNOPSIS" -- -2.17.1 +2.21.0 -From eca4cf0eabb4dee96ca01c02910153147e58ec4d Mon Sep 17 00:00:00 2001 +From 84addd7681276f065e6c974997127d394133d51c Mon Sep 17 00:00:00 2001 From: Alexander Bokovoy Date: Sun, 7 Oct 2018 12:25:39 +0300 -Subject: [PATCH 04/72] client/man/ipa-client-automount.1: Change branding - to IPA and Identity Management +Subject: [PATCH 03/71] client/man/ipa-client-automount.1: Change branding to + IPA and Identity Management --- client/man/ipa-client-automount.1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/man/ipa-client-automount.1 b/client/man/ipa-client-automount.1 -index 343f64160..3f7c7d506 100644 +index 4c3caee..3f6edab 100644 --- a/client/man/ipa-client-automount.1 +++ b/client/man/ipa-client-automount.1 @@ -16,7 +16,7 @@ @@ -76,21 +76,21 @@ index 343f64160..3f7c7d506 100644 ipa\-client\-automount \- Configure automount and NFS for IPA .SH "SYNOPSIS" -- -2.17.1 +2.21.0 -From e4097608a167f41998e863dfed0e3d135c54b6a0 Mon Sep 17 00:00:00 2001 +From d63e2ce893f3fb8a3fcf0ec91893847f942380f6 Mon Sep 17 00:00:00 2001 From: Alexander Bokovoy Date: Sun, 7 Oct 2018 12:25:39 +0300 -Subject: [PATCH 05/72] client/man/ipa-client-install.1: Change branding to - IPA and Identity Management +Subject: [PATCH 04/71] client/man/ipa-client-install.1: Change branding to IPA + and Identity Management --- client/man/ipa-client-install.1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/man/ipa-client-install.1 b/client/man/ipa-client-install.1 -index a20bec9a1..d7347ed37 100644 +index 94b4b04..743fa6a 100644 --- a/client/man/ipa-client-install.1 +++ b/client/man/ipa-client-install.1 @@ -1,7 +1,7 @@ @@ -103,21 +103,21 @@ index a20bec9a1..d7347ed37 100644 ipa\-client\-install \- Configure an IPA client .SH "SYNOPSIS" -- -2.17.1 +2.21.0 -From 3bfd21f6778e288b5094262aa481a835b49cc0f4 Mon Sep 17 00:00:00 2001 +From 959face241f87ba6c703b7ae4aa71ff9da60d175 Mon Sep 17 00:00:00 2001 From: Alexander Bokovoy Date: Sun, 7 Oct 2018 12:25:39 +0300 -Subject: [PATCH 06/72] client/man/ipa-getkeytab.1: Change branding to IPA - and Identity Management +Subject: [PATCH 05/71] client/man/ipa-getkeytab.1: Change branding to IPA and + Identity Management --- client/man/ipa-getkeytab.1 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/man/ipa-getkeytab.1 b/client/man/ipa-getkeytab.1 -index 20ceee2e6..061798693 100644 +index f06fcd9..01a2618 100644 --- a/client/man/ipa-getkeytab.1 +++ b/client/man/ipa-getkeytab.1 @@ -17,7 +17,7 @@ @@ -129,7 +129,7 @@ index 20ceee2e6..061798693 100644 .SH "NAME" ipa\-getkeytab \- Get a keytab for a Kerberos principal .SH "SYNOPSIS" -@@ -117,7 +117,7 @@ GSSAPI or EXTERNAL. +@@ -118,7 +118,7 @@ GSSAPI or EXTERNAL. \fB\-r\fR Retrieve mode. Retrieve an existing key from the server instead of generating a new one. This is incompatible with the \-\-password option, and will work only @@ -139,13 +139,13 @@ index 20ceee2e6..061798693 100644 .SH "EXAMPLES" Add and retrieve a keytab for the NFS service principal on -- -2.17.1 +2.21.0 -From 812ccffd549367cac3e4d2896b231b7b278e0b92 Mon Sep 17 00:00:00 2001 +From f6a2e0baebd1969de46a0ea92b68bb0742459235 Mon Sep 17 00:00:00 2001 From: Alexander Bokovoy Date: Sun, 7 Oct 2018 12:25:39 +0300 -Subject: [PATCH 07/72] client/man/ipa-join.1: Change branding to IPA and +Subject: [PATCH 06/71] client/man/ipa-join.1: Change branding to IPA and Identity Management --- @@ -153,7 +153,7 @@ Subject: [PATCH 07/72] client/man/ipa-join.1: Change branding to IPA and 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/man/ipa-join.1 b/client/man/ipa-join.1 -index d88160784..30b667558 100644 +index d881607..30b6675 100644 --- a/client/man/ipa-join.1 +++ b/client/man/ipa-join.1 @@ -16,7 +16,7 @@ @@ -166,21 +166,21 @@ index d88160784..30b667558 100644 ipa\-join \- Join a machine to an IPA realm and get a keytab for the host service principal .SH "SYNOPSIS" -- -2.17.1 +2.21.0 -From 3cac7f131059c01306b1db34fc30345add3fcf11 Mon Sep 17 00:00:00 2001 +From fcf92b11295321a8df6eb27babcc959926a59fe3 Mon Sep 17 00:00:00 2001 From: Alexander Bokovoy Date: Sun, 7 Oct 2018 12:25:39 +0300 -Subject: [PATCH 08/72] client/man/ipa-rmkeytab.1: Change branding to IPA - and Identity Management +Subject: [PATCH 07/71] client/man/ipa-rmkeytab.1: Change branding to IPA and + Identity Management --- client/man/ipa-rmkeytab.1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/man/ipa-rmkeytab.1 b/client/man/ipa-rmkeytab.1 -index 53f775439..2c8218c94 100644 +index 53f7754..2c8218c 100644 --- a/client/man/ipa-rmkeytab.1 +++ b/client/man/ipa-rmkeytab.1 @@ -17,7 +17,7 @@ @@ -193,21 +193,21 @@ index 53f775439..2c8218c94 100644 ipa\-rmkeytab \- Remove a kerberos principal from a keytab .SH "SYNOPSIS" -- -2.17.1 +2.21.0 -From 0373bb1499f50bf4c04becabf2e773dd5977060e Mon Sep 17 00:00:00 2001 +From 8978dadb62b23014d5d82547e16c07c575c7cf56 Mon Sep 17 00:00:00 2001 From: Alexander Bokovoy Date: Sun, 7 Oct 2018 12:25:39 +0300 -Subject: [PATCH 09/72] client/man/ipa.1: Change branding to IPA and - Identity Management +Subject: [PATCH 08/71] client/man/ipa.1: Change branding to IPA and Identity + Management --- client/man/ipa.1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/man/ipa.1 b/client/man/ipa.1 -index f9fae7c0d..2fb21b52d 100644 +index f9fae7c..2fb21b5 100644 --- a/client/man/ipa.1 +++ b/client/man/ipa.1 @@ -16,7 +16,7 @@ @@ -220,21 +220,21 @@ index f9fae7c0d..2fb21b52d 100644 ipa \- IPA command\-line interface .SH "SYNOPSIS" -- -2.17.1 +2.21.0 -From 36b7dce706ec2b0b650c51cea24be0655fd0c096 Mon Sep 17 00:00:00 2001 +From d2a614533c0d7c1203d9251dc557871bc8962efd Mon Sep 17 00:00:00 2001 From: Alexander Bokovoy Date: Sun, 7 Oct 2018 12:25:39 +0300 -Subject: [PATCH 10/72] install/html/ssbrowser.html: Change branding to IPA - and Identity Management +Subject: [PATCH 09/71] install/html/ssbrowser.html: Change branding to IPA and + Identity Management --- install/html/ssbrowser.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/install/html/ssbrowser.html b/install/html/ssbrowser.html -index faa7e657b..89ada7cb1 100644 +index faa7e65..89ada7c 100644 --- a/install/html/ssbrowser.html +++ b/install/html/ssbrowser.html @@ -2,7 +2,7 @@ @@ -256,21 +256,21 @@ index faa7e657b..89ada7cb1 100644 -- -2.17.1 +2.21.0 -From 9273d2fdee9baef212eeaac941b7c8b497d50728 Mon Sep 17 00:00:00 2001 +From 199f34178cd8dfff0fd5edd37472787bbd3b4320 Mon Sep 17 00:00:00 2001 From: Alexander Bokovoy Date: Sun, 7 Oct 2018 12:25:39 +0300 -Subject: [PATCH 11/72] install/html/unauthorized.html: Change branding to - IPA and Identity Management +Subject: [PATCH 10/71] install/html/unauthorized.html: Change branding to IPA + and Identity Management --- install/html/unauthorized.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/install/html/unauthorized.html b/install/html/unauthorized.html -index 630982da8..b8c64d69d 100644 +index 630982d..b8c64d6 100644 --- a/install/html/unauthorized.html +++ b/install/html/unauthorized.html @@ -2,7 +2,7 @@ @@ -292,13 +292,13 @@ index 630982da8..b8c64d69d 100644 -- -2.17.1 +2.21.0 -From b9d7e2a0d08d8d03f1fbaaae6268292934f894f0 Mon Sep 17 00:00:00 2001 +From 116e40f79a289aa4817cee7d8fbb4935b6346997 Mon Sep 17 00:00:00 2001 From: Alexander Bokovoy Date: Sun, 7 Oct 2018 12:25:39 +0300 -Subject: [PATCH 12/72] install/migration/index.html: Change branding to IPA +Subject: [PATCH 11/71] install/migration/index.html: Change branding to IPA and Identity Management --- @@ -306,7 +306,7 @@ Subject: [PATCH 12/72] install/migration/index.html: Change branding to IPA 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/migration/index.html b/install/migration/index.html -index fca517cdc..b5ac1f6df 100644 +index fca517c..b5ac1f6 100644 --- a/install/migration/index.html +++ b/install/migration/index.html @@ -2,7 +2,7 @@ @@ -319,21 +319,21 @@ index fca517cdc..b5ac1f6df 100644