From faf1e54b7ddc209555a0667f5612f0910378aeb9 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Jun 23 2020 11:11:10 +0000 Subject: import cloud-init-18.5-6.el7_8.5 --- diff --git a/SOURCES/ci-Add-support-for-publishing-host-keys-to-GCE-guest-at.patch b/SOURCES/ci-Add-support-for-publishing-host-keys-to-GCE-guest-at.patch new file mode 100644 index 0000000..1e5d0ff --- /dev/null +++ b/SOURCES/ci-Add-support-for-publishing-host-keys-to-GCE-guest-at.patch @@ -0,0 +1,473 @@ +From 60991b1241a5efb585df889d4343007e501fd70c Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Tue, 5 May 2020 08:08:15 +0200 +Subject: [PATCH 2/5] Add support for publishing host keys to GCE guest + attributes + +RH-Author: Eduardo Otubo +Message-id: <20200504085238.25884-3-otubo@redhat.com> +Patchwork-id: 96243 +O-Subject: [RHEL-7.8.z cloud-init PATCH 2/5] Add support for publishing host keys to GCE guest attributes +Bugzilla: 1827207 +RH-Acked-by: Cathy Avery +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Vitaly Kuznetsov + +commit 155847209e6a3ed5face91a133d8488a703f3f93 +Author: Rick Wright +Date: Fri Aug 9 17:11:05 2019 +0000 + + Add support for publishing host keys to GCE guest attributes + + This adds an empty publish_host_keys() method to the default datasource + that is called by cc_ssh.py. This feature can be controlled by the + 'ssh_publish_hostkeys' config option. It is enabled by default but can + be disabled by setting 'enabled' to false. Also, a blacklist of key + types is supported. + + In addition, this change implements ssh_publish_hostkeys() for the GCE + datasource, attempting to write the hostkeys to the instance's guest + attributes. Using these hostkeys for ssh connections is currently + supported by the alpha version of Google's 'gcloud' command-line tool. + + (On Google Compute Engine, this feature will be enabled by setting the + 'enable-guest-attributes' metadata key to 'true' for the + project/instance that you would like to use this feature for. When + connecting to the instance for the first time using 'gcloud compute ssh' + the hostkeys will be read from the guest attributes for the instance and + written to the user's local known_hosts file for Google Compute Engine + instances.) + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/config/cc_ssh.py | 55 +++++++++ + cloudinit/config/tests/test_ssh.py | 166 ++++++++++++++++++++++++++++ + cloudinit/sources/DataSourceGCE.py | 22 +++- + cloudinit/sources/__init__.py | 10 ++ + cloudinit/url_helper.py | 9 +- + tests/unittests/test_datasource/test_gce.py | 18 +++ + 6 files changed, 274 insertions(+), 6 deletions(-) + +diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py +index f8f7cb3..53f6939 100755 +--- a/cloudinit/config/cc_ssh.py ++++ b/cloudinit/config/cc_ssh.py +@@ -91,6 +91,9 @@ public keys. + ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUU ... + - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZ ... ++ ssh_publish_hostkeys: ++ enabled: (Defaults to true) ++ blacklist: (Defaults to [dsa]) + """ + + import glob +@@ -104,6 +107,10 @@ from cloudinit import util + + GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519'] + KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key' ++PUBLISH_HOST_KEYS = True ++# Don't publish the dsa hostkey by default since OpenSSH recommends not using ++# it. ++HOST_KEY_PUBLISH_BLACKLIST = ['dsa'] + + CONFIG_KEY_TO_FILE = {} + PRIV_TO_PUB = {} +@@ -176,6 +183,23 @@ def handle(_name, cfg, cloud, log, _args): + util.logexc(log, "Failed generating key type %s to " + "file %s", keytype, keyfile) + ++ if "ssh_publish_hostkeys" in cfg: ++ host_key_blacklist = util.get_cfg_option_list( ++ cfg["ssh_publish_hostkeys"], "blacklist", ++ HOST_KEY_PUBLISH_BLACKLIST) ++ publish_hostkeys = util.get_cfg_option_bool( ++ cfg["ssh_publish_hostkeys"], "enabled", PUBLISH_HOST_KEYS) ++ else: ++ host_key_blacklist = HOST_KEY_PUBLISH_BLACKLIST ++ publish_hostkeys = PUBLISH_HOST_KEYS ++ ++ if publish_hostkeys: ++ hostkeys = get_public_host_keys(blacklist=host_key_blacklist) ++ try: ++ cloud.datasource.publish_host_keys(hostkeys) ++ except Exception as e: ++ util.logexc(log, "Publishing host keys failed!") ++ + try: + (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) + (user, _user_config) = ug_util.extract_default(users) +@@ -209,4 +233,35 @@ def apply_credentials(keys, user, disable_root, disable_root_opts): + + ssh_util.setup_user_keys(keys, 'root', options=key_prefix) + ++ ++def get_public_host_keys(blacklist=None): ++ """Read host keys from /etc/ssh/*.pub files and return them as a list. ++ ++ @param blacklist: List of key types to ignore. e.g. ['dsa', 'rsa'] ++ @returns: List of keys, each formatted as a two-element tuple. ++ e.g. [('ssh-rsa', 'AAAAB3Nz...'), ('ssh-ed25519', 'AAAAC3Nx...')] ++ """ ++ public_key_file_tmpl = '%s.pub' % (KEY_FILE_TPL,) ++ key_list = [] ++ blacklist_files = [] ++ if blacklist: ++ # Convert blacklist to filenames: ++ # 'dsa' -> '/etc/ssh/ssh_host_dsa_key.pub' ++ blacklist_files = [public_key_file_tmpl % (key_type,) ++ for key_type in blacklist] ++ # Get list of public key files and filter out blacklisted files. ++ file_list = [hostfile for hostfile ++ in glob.glob(public_key_file_tmpl % ('*',)) ++ if hostfile not in blacklist_files] ++ ++ # Read host key files, retrieve first two fields as a tuple and ++ # append that tuple to key_list. ++ for file_name in file_list: ++ file_contents = util.load_file(file_name) ++ key_data = file_contents.split() ++ if key_data and len(key_data) > 1: ++ key_list.append(tuple(key_data[:2])) ++ return key_list ++ ++ + # vi: ts=4 expandtab +diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py +index c8a4271..e778984 100644 +--- a/cloudinit/config/tests/test_ssh.py ++++ b/cloudinit/config/tests/test_ssh.py +@@ -1,5 +1,6 @@ + # This file is part of cloud-init. See LICENSE file for license information. + ++import os.path + + from cloudinit.config import cc_ssh + from cloudinit import ssh_util +@@ -12,6 +13,25 @@ MODPATH = "cloudinit.config.cc_ssh." + class TestHandleSsh(CiTestCase): + """Test cc_ssh handling of ssh config.""" + ++ def _publish_hostkey_test_setup(self): ++ self.test_hostkeys = { ++ 'dsa': ('ssh-dss', 'AAAAB3NzaC1kc3MAAACB'), ++ 'ecdsa': ('ecdsa-sha2-nistp256', 'AAAAE2VjZ'), ++ 'ed25519': ('ssh-ed25519', 'AAAAC3NzaC1lZDI'), ++ 'rsa': ('ssh-rsa', 'AAAAB3NzaC1yc2EAAA'), ++ } ++ self.test_hostkey_files = [] ++ hostkey_tmpdir = self.tmp_dir() ++ for key_type in ['dsa', 'ecdsa', 'ed25519', 'rsa']: ++ key_data = self.test_hostkeys[key_type] ++ filename = 'ssh_host_%s_key.pub' % key_type ++ filepath = os.path.join(hostkey_tmpdir, filename) ++ self.test_hostkey_files.append(filepath) ++ with open(filepath, 'w') as f: ++ f.write(' '.join(key_data)) ++ ++ cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, 'ssh_host_%s_key') ++ + def test_apply_credentials_with_user(self, m_setup_keys): + """Apply keys for the given user and root.""" + keys = ["key1"] +@@ -64,6 +84,7 @@ class TestHandleSsh(CiTestCase): + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ([], {}) ++ cc_ssh.PUBLISH_HOST_KEYS = False + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cc_ssh.handle("name", cfg, cloud, None, None) +@@ -149,3 +170,148 @@ class TestHandleSsh(CiTestCase): + self.assertEqual([mock.call(set(keys), user), + mock.call(set(keys), "root", options="")], + m_setup_keys.call_args_list) ++ ++ @mock.patch(MODPATH + "glob.glob") ++ @mock.patch(MODPATH + "ug_util.normalize_users_groups") ++ @mock.patch(MODPATH + "os.path.exists") ++ def test_handle_publish_hostkeys_default( ++ self, m_path_exists, m_nug, m_glob, m_setup_keys): ++ """Test handle with various configs for ssh_publish_hostkeys.""" ++ self._publish_hostkey_test_setup() ++ cc_ssh.PUBLISH_HOST_KEYS = True ++ keys = ["key1"] ++ user = "clouduser" ++ # Return no matching keys for first glob, test keys for second. ++ m_glob.side_effect = iter([ ++ [], ++ self.test_hostkey_files, ++ ]) ++ # Mock os.path.exits to True to short-circuit the key writing logic ++ m_path_exists.return_value = True ++ m_nug.return_value = ({user: {"default": user}}, {}) ++ cloud = self.tmp_cloud( ++ distro='ubuntu', metadata={'public-keys': keys}) ++ cloud.datasource.publish_host_keys = mock.Mock() ++ ++ cfg = {} ++ expected_call = [self.test_hostkeys[key_type] for key_type ++ in ['ecdsa', 'ed25519', 'rsa']] ++ cc_ssh.handle("name", cfg, cloud, None, None) ++ self.assertEqual([mock.call(expected_call)], ++ cloud.datasource.publish_host_keys.call_args_list) ++ ++ @mock.patch(MODPATH + "glob.glob") ++ @mock.patch(MODPATH + "ug_util.normalize_users_groups") ++ @mock.patch(MODPATH + "os.path.exists") ++ def test_handle_publish_hostkeys_config_enable( ++ self, m_path_exists, m_nug, m_glob, m_setup_keys): ++ """Test handle with various configs for ssh_publish_hostkeys.""" ++ self._publish_hostkey_test_setup() ++ cc_ssh.PUBLISH_HOST_KEYS = False ++ keys = ["key1"] ++ user = "clouduser" ++ # Return no matching keys for first glob, test keys for second. ++ m_glob.side_effect = iter([ ++ [], ++ self.test_hostkey_files, ++ ]) ++ # Mock os.path.exits to True to short-circuit the key writing logic ++ m_path_exists.return_value = True ++ m_nug.return_value = ({user: {"default": user}}, {}) ++ cloud = self.tmp_cloud( ++ distro='ubuntu', metadata={'public-keys': keys}) ++ cloud.datasource.publish_host_keys = mock.Mock() ++ ++ cfg = {'ssh_publish_hostkeys': {'enabled': True}} ++ expected_call = [self.test_hostkeys[key_type] for key_type ++ in ['ecdsa', 'ed25519', 'rsa']] ++ cc_ssh.handle("name", cfg, cloud, None, None) ++ self.assertEqual([mock.call(expected_call)], ++ cloud.datasource.publish_host_keys.call_args_list) ++ ++ @mock.patch(MODPATH + "glob.glob") ++ @mock.patch(MODPATH + "ug_util.normalize_users_groups") ++ @mock.patch(MODPATH + "os.path.exists") ++ def test_handle_publish_hostkeys_config_disable( ++ self, m_path_exists, m_nug, m_glob, m_setup_keys): ++ """Test handle with various configs for ssh_publish_hostkeys.""" ++ self._publish_hostkey_test_setup() ++ cc_ssh.PUBLISH_HOST_KEYS = True ++ keys = ["key1"] ++ user = "clouduser" ++ # Return no matching keys for first glob, test keys for second. ++ m_glob.side_effect = iter([ ++ [], ++ self.test_hostkey_files, ++ ]) ++ # Mock os.path.exits to True to short-circuit the key writing logic ++ m_path_exists.return_value = True ++ m_nug.return_value = ({user: {"default": user}}, {}) ++ cloud = self.tmp_cloud( ++ distro='ubuntu', metadata={'public-keys': keys}) ++ cloud.datasource.publish_host_keys = mock.Mock() ++ ++ cfg = {'ssh_publish_hostkeys': {'enabled': False}} ++ cc_ssh.handle("name", cfg, cloud, None, None) ++ self.assertFalse(cloud.datasource.publish_host_keys.call_args_list) ++ cloud.datasource.publish_host_keys.assert_not_called() ++ ++ @mock.patch(MODPATH + "glob.glob") ++ @mock.patch(MODPATH + "ug_util.normalize_users_groups") ++ @mock.patch(MODPATH + "os.path.exists") ++ def test_handle_publish_hostkeys_config_blacklist( ++ self, m_path_exists, m_nug, m_glob, m_setup_keys): ++ """Test handle with various configs for ssh_publish_hostkeys.""" ++ self._publish_hostkey_test_setup() ++ cc_ssh.PUBLISH_HOST_KEYS = True ++ keys = ["key1"] ++ user = "clouduser" ++ # Return no matching keys for first glob, test keys for second. ++ m_glob.side_effect = iter([ ++ [], ++ self.test_hostkey_files, ++ ]) ++ # Mock os.path.exits to True to short-circuit the key writing logic ++ m_path_exists.return_value = True ++ m_nug.return_value = ({user: {"default": user}}, {}) ++ cloud = self.tmp_cloud( ++ distro='ubuntu', metadata={'public-keys': keys}) ++ cloud.datasource.publish_host_keys = mock.Mock() ++ ++ cfg = {'ssh_publish_hostkeys': {'enabled': True, ++ 'blacklist': ['dsa', 'rsa']}} ++ expected_call = [self.test_hostkeys[key_type] for key_type ++ in ['ecdsa', 'ed25519']] ++ cc_ssh.handle("name", cfg, cloud, None, None) ++ self.assertEqual([mock.call(expected_call)], ++ cloud.datasource.publish_host_keys.call_args_list) ++ ++ @mock.patch(MODPATH + "glob.glob") ++ @mock.patch(MODPATH + "ug_util.normalize_users_groups") ++ @mock.patch(MODPATH + "os.path.exists") ++ def test_handle_publish_hostkeys_empty_blacklist( ++ self, m_path_exists, m_nug, m_glob, m_setup_keys): ++ """Test handle with various configs for ssh_publish_hostkeys.""" ++ self._publish_hostkey_test_setup() ++ cc_ssh.PUBLISH_HOST_KEYS = True ++ keys = ["key1"] ++ user = "clouduser" ++ # Return no matching keys for first glob, test keys for second. ++ m_glob.side_effect = iter([ ++ [], ++ self.test_hostkey_files, ++ ]) ++ # Mock os.path.exits to True to short-circuit the key writing logic ++ m_path_exists.return_value = True ++ m_nug.return_value = ({user: {"default": user}}, {}) ++ cloud = self.tmp_cloud( ++ distro='ubuntu', metadata={'public-keys': keys}) ++ cloud.datasource.publish_host_keys = mock.Mock() ++ ++ cfg = {'ssh_publish_hostkeys': {'enabled': True, ++ 'blacklist': []}} ++ expected_call = [self.test_hostkeys[key_type] for key_type ++ in ['dsa', 'ecdsa', 'ed25519', 'rsa']] ++ cc_ssh.handle("name", cfg, cloud, None, None) ++ self.assertEqual([mock.call(expected_call)], ++ cloud.datasource.publish_host_keys.call_args_list) +diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py +index d816262..6cbfbba 100644 +--- a/cloudinit/sources/DataSourceGCE.py ++++ b/cloudinit/sources/DataSourceGCE.py +@@ -18,10 +18,13 @@ LOG = logging.getLogger(__name__) + MD_V1_URL = 'http://metadata.google.internal/computeMetadata/v1/' + BUILTIN_DS_CONFIG = {'metadata_url': MD_V1_URL} + REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname') ++GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/' ++ 'v1/instance/guest-attributes') ++HOSTKEY_NAMESPACE = 'hostkeys' ++HEADERS = {'Metadata-Flavor': 'Google'} + + + class GoogleMetadataFetcher(object): +- headers = {'Metadata-Flavor': 'Google'} + + def __init__(self, metadata_address): + self.metadata_address = metadata_address +@@ -32,7 +35,7 @@ class GoogleMetadataFetcher(object): + url = self.metadata_address + path + if is_recursive: + url += '/?recursive=True' +- resp = url_helper.readurl(url=url, headers=self.headers) ++ resp = url_helper.readurl(url=url, headers=HEADERS) + except url_helper.UrlError as exc: + msg = "url %s raised exception %s" + LOG.debug(msg, path, exc) +@@ -90,6 +93,10 @@ class DataSourceGCE(sources.DataSource): + public_keys_data = self.metadata['public-keys-data'] + return _parse_public_keys(public_keys_data, self.default_user) + ++ def publish_host_keys(self, hostkeys): ++ for key in hostkeys: ++ _write_host_key_to_guest_attributes(*key) ++ + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): + # GCE has long FDQN's and has asked for short hostnames. + return self.metadata['local-hostname'].split('.')[0] +@@ -103,6 +110,17 @@ class DataSourceGCE(sources.DataSource): + return self.availability_zone.rsplit('-', 1)[0] + + ++def _write_host_key_to_guest_attributes(key_type, key_value): ++ url = '%s/%s/%s' % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type) ++ key_value = key_value.encode('utf-8') ++ resp = url_helper.readurl(url=url, data=key_value, headers=HEADERS, ++ request_method='PUT', check_status=False) ++ if resp.ok(): ++ LOG.debug('Wrote %s host key to guest attributes.', key_type) ++ else: ++ LOG.debug('Unable to write %s host key to guest attributes.', key_type) ++ ++ + def _has_expired(public_key): + # Check whether an SSH key is expired. Public key input is a single SSH + # public key in the GCE specific key format documented here: +diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py +index e6966b3..425e703 100644 +--- a/cloudinit/sources/__init__.py ++++ b/cloudinit/sources/__init__.py +@@ -474,6 +474,16 @@ class DataSource(object): + def get_public_ssh_keys(self): + return normalize_pubkey_data(self.metadata.get('public-keys')) + ++ def publish_host_keys(self, hostkeys): ++ """Publish the public SSH host keys (found in /etc/ssh/*.pub). ++ ++ @param hostkeys: List of host key tuples (key_type, key_value), ++ where key_type is the first field in the public key file ++ (e.g. 'ssh-rsa') and key_value is the key itself ++ (e.g. 'AAAAB3NzaC1y...'). ++ """ ++ pass ++ + def _remap_device(self, short_name): + # LP: #611137 + # the metadata service may believe that devices are named 'sda' +diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py +index 396d69a..1b0721b 100644 +--- a/cloudinit/url_helper.py ++++ b/cloudinit/url_helper.py +@@ -199,18 +199,19 @@ def _get_ssl_args(url, ssl_details): + def readurl(url, data=None, timeout=None, retries=0, sec_between=1, + headers=None, headers_cb=None, ssl_details=None, + check_status=True, allow_redirects=True, exception_cb=None, +- session=None, infinite=False, log_req_resp=True): ++ session=None, infinite=False, log_req_resp=True, ++ request_method=None): + url = _cleanurl(url) + req_args = { + 'url': url, + } + req_args.update(_get_ssl_args(url, ssl_details)) + req_args['allow_redirects'] = allow_redirects +- req_args['method'] = 'GET' ++ if not request_method: ++ request_method = 'POST' if data else 'GET' ++ req_args['method'] = request_method + if timeout is not None: + req_args['timeout'] = max(float(timeout), 0) +- if data: +- req_args['method'] = 'POST' + # It doesn't seem like config + # was added in older library versions (or newer ones either), thus we + # need to manually do the retries if it wasn't... +diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py +index 41176c6..67744d3 100644 +--- a/tests/unittests/test_datasource/test_gce.py ++++ b/tests/unittests/test_datasource/test_gce.py +@@ -55,6 +55,8 @@ GCE_USER_DATA_TEXT = { + HEADERS = {'Metadata-Flavor': 'Google'} + MD_URL_RE = re.compile( + r'http://metadata.google.internal/computeMetadata/v1/.*') ++GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/' ++ 'v1/instance/guest-attributes/hostkeys/') + + + def _set_mock_metadata(gce_meta=None): +@@ -341,4 +343,20 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): + public_key_data, default_user='default') + self.assertEqual(sorted(found), sorted(expected)) + ++ @mock.patch("cloudinit.url_helper.readurl") ++ def test_publish_host_keys(self, m_readurl): ++ hostkeys = [('ssh-rsa', 'asdfasdf'), ++ ('ssh-ed25519', 'qwerqwer')] ++ readurl_expected_calls = [ ++ mock.call(check_status=False, data=b'asdfasdf', headers=HEADERS, ++ request_method='PUT', ++ url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-rsa')), ++ mock.call(check_status=False, data=b'qwerqwer', headers=HEADERS, ++ request_method='PUT', ++ url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-ed25519')), ++ ] ++ self.ds.publish_host_keys(hostkeys) ++ m_readurl.assert_has_calls(readurl_expected_calls, any_order=True) ++ ++ + # vi: ts=4 expandtab +-- +1.8.3.1 + diff --git a/SOURCES/ci-Do-not-use-fallocate-in-swap-file-creation-on-xfs.-7.patch b/SOURCES/ci-Do-not-use-fallocate-in-swap-file-creation-on-xfs.-7.patch new file mode 100644 index 0000000..e7cb1da --- /dev/null +++ b/SOURCES/ci-Do-not-use-fallocate-in-swap-file-creation-on-xfs.-7.patch @@ -0,0 +1,164 @@ +From e78ae6d16009263a8dfcd91ea8ce8fc08a077529 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Tue, 28 Apr 2020 08:22:03 +0200 +Subject: [PATCH 1/3] Do not use fallocate in swap file creation on xfs. (#70) + +RH-Author: Eduardo Otubo +Message-id: <20200422130428.7663-2-otubo@redhat.com> +Patchwork-id: 96031 +O-Subject: [RHEL-7.7.z/RHEL-7.8.z cloud-init PATCH 1/3] Do not use fallocate in swap file creation on xfs. (#70) +Bugzilla: 1801094 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Cathy Avery + +commit 6603706eec1c39d9d591c8ffa0ef7171b74d84d6 +Author: Eduardo Otubo +Date: Thu Jan 23 17:41:48 2020 +0100 + + Do not use fallocate in swap file creation on xfs. (#70) + + When creating a swap file on an xfs filesystem, fallocate cannot be used. + Doing so results in failure of swapon and a message like: + swapon: swapfile has holes + + The solution here is to maintain a list (currently containing only XFS) + of filesystems where fallocate cannot be used. The, on those fileystems + use the slower but functional 'dd' method. + + Signed-off-by: Eduardo Otubo + Co-authored-by: Adam Dobrawy + Co-authored-by: Scott Moser + Co-authored-by: Daniel Watkins + + LP: #1781781 + +Signed-off-bt: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/config/cc_mounts.py | 67 ++++++++++++++++------ + .../unittests/test_handler/test_handler_mounts.py | 12 ++++ + 2 files changed, 62 insertions(+), 17 deletions(-) + +diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py +index 123ffb8..6884ddf 100644 +--- a/cloudinit/config/cc_mounts.py ++++ b/cloudinit/config/cc_mounts.py +@@ -223,13 +223,58 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): + return size + + ++def create_swapfile(fname, size): ++ """Size is in MiB.""" ++ ++ errmsg = "Failed to create swapfile '%s' of size %dMB via %s: %s" ++ ++ def create_swap(fname, size, method): ++ LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'", ++ fname, fstype, method) ++ ++ if method == "fallocate": ++ cmd = ['fallocate', '-l', '%dM' % size, fname] ++ elif method == "dd": ++ cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M', ++ 'count=%d' % size] ++ ++ try: ++ util.subp(cmd, capture=True) ++ except util.ProcessExecutionError as e: ++ LOG.warning(errmsg, fname, size, method, e) ++ util.del_file(fname) ++ ++ swap_dir = os.path.dirname(fname) ++ util.ensure_dir(swap_dir) ++ ++ fstype = util.get_mount_info(swap_dir)[1] ++ ++ if fstype in ("xfs", "btrfs"): ++ create_swap(fname, size, "dd") ++ else: ++ try: ++ create_swap(fname, size, "fallocate") ++ except util.ProcessExecutionError as e: ++ LOG.warning(errmsg, fname, size, "dd", e) ++ LOG.warning("Will attempt with dd.") ++ create_swap(fname, size, "dd") ++ ++ util.chmod(fname, 0o600) ++ try: ++ util.subp(['mkswap', fname]) ++ except util.ProcessExecutionError: ++ util.del_file(fname) ++ raise ++ ++ + def setup_swapfile(fname, size=None, maxsize=None): + """ + fname: full path string of filename to setup + size: the size to create. set to "auto" for recommended + maxsize: the maximum size + """ +- tdir = os.path.dirname(fname) ++ swap_dir = os.path.dirname(fname) ++ mibsize = str(int(size / (2 ** 20))) + if str(size).lower() == "auto": + try: + memsize = util.read_meminfo()['total'] +@@ -237,28 +282,16 @@ def setup_swapfile(fname, size=None, maxsize=None): + LOG.debug("Not creating swap: failed to read meminfo") + return + +- util.ensure_dir(tdir) +- size = suggested_swapsize(fsys=tdir, maxsize=maxsize, ++ util.ensure_dir(swap_dir) ++ size = suggested_swapsize(fsys=swap_dir, maxsize=maxsize, + memsize=memsize) + + if not size: + LOG.debug("Not creating swap: suggested size was 0") + return + +- mbsize = str(int(size / (2 ** 20))) +- msg = "creating swap file '%s' of %sMB" % (fname, mbsize) +- try: +- util.ensure_dir(tdir) +- util.log_time(LOG.debug, msg, func=util.subp, +- args=[['sh', '-c', +- ('rm -f "$1" && umask 0066 && ' +- '{ fallocate -l "${2}M" "$1" || ' +- ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && ' +- 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'), +- 'setup_swap', fname, mbsize]]) +- +- except Exception as e: +- raise IOError("Failed %s: %s" % (msg, e)) ++ util.log_time(LOG.debug, msg="Setting up swap file", func=create_swapfile, ++ args=[fname, mibsize]) + + return fname + +diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py +index 0fb160b..7bcefa0 100644 +--- a/tests/unittests/test_handler/test_handler_mounts.py ++++ b/tests/unittests/test_handler/test_handler_mounts.py +@@ -181,6 +181,18 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase): + + return dev + ++ def test_swap_integrity(self): ++ '''Ensure that the swap file is correctly created and can ++ swapon successfully. Fixing the corner case of: ++ kernel: swapon: swapfile has holes''' ++ ++ fstab = '/swap.img swap swap defaults 0 0\n' ++ ++ with open(cc_mounts.FSTAB_PATH, 'w') as fd: ++ fd.write(fstab) ++ cc = {'swap': ['filename: /swap.img', 'size: 512', 'maxsize: 512']} ++ cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, []) ++ + def test_fstab_no_swap_device(self): + '''Ensure that cloud-init adds a discovered swap partition + to /etc/fstab.''' +-- +1.8.3.1 + diff --git a/SOURCES/ci-New-data-source-for-the-Exoscale.com-cloud-platform.patch b/SOURCES/ci-New-data-source-for-the-Exoscale.com-cloud-platform.patch new file mode 100644 index 0000000..85bda76 --- /dev/null +++ b/SOURCES/ci-New-data-source-for-the-Exoscale.com-cloud-platform.patch @@ -0,0 +1,672 @@ +From ccae8d2ac218366c529aac03b29c46400843d4a0 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Tue, 5 May 2020 08:08:09 +0200 +Subject: [PATCH 1/5] New data source for the Exoscale.com cloud platform + +RH-Author: Eduardo Otubo +Message-id: <20200504085238.25884-2-otubo@redhat.com> +Patchwork-id: 96244 +O-Subject: [RHEL-7.8.z cloud-init PATCH 1/5] New data source for the Exoscale.com cloud platform +Bugzilla: 1827207 +RH-Acked-by: Cathy Avery +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Vitaly Kuznetsov + +commit 4dfed67d0e82970f8717d0b524c593962698ca4f +Author: Chris Glass +Date: Thu Aug 8 17:09:57 2019 +0000 + + New data source for the Exoscale.com cloud platform + + - dsidentify switches to the new Exoscale datasource on matching DMI name + - New Exoscale datasource added + + Signed-off-by: Mathieu Corbin + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/apport.py | 1 + + cloudinit/settings.py | 1 + + cloudinit/sources/DataSourceExoscale.py | 258 +++++++++++++++++++++++ + doc/rtd/topics/datasources.rst | 1 + + doc/rtd/topics/datasources/exoscale.rst | 68 ++++++ + tests/unittests/test_datasource/test_common.py | 2 + + tests/unittests/test_datasource/test_exoscale.py | 203 ++++++++++++++++++ + tools/ds-identify | 7 +- + 8 files changed, 540 insertions(+), 1 deletion(-) + create mode 100644 cloudinit/sources/DataSourceExoscale.py + create mode 100644 doc/rtd/topics/datasources/exoscale.rst + create mode 100644 tests/unittests/test_datasource/test_exoscale.py + +diff --git a/cloudinit/apport.py b/cloudinit/apport.py +index 22cb7fd..003ff1f 100644 +--- a/cloudinit/apport.py ++++ b/cloudinit/apport.py +@@ -23,6 +23,7 @@ KNOWN_CLOUD_NAMES = [ + 'CloudStack', + 'DigitalOcean', + 'GCE - Google Compute Engine', ++ 'Exoscale', + 'Hetzner Cloud', + 'IBM - (aka SoftLayer or BlueMix)', + 'LXD', +diff --git a/cloudinit/settings.py b/cloudinit/settings.py +index d982a4d..229b420 100644 +--- a/cloudinit/settings.py ++++ b/cloudinit/settings.py +@@ -39,6 +39,7 @@ CFG_BUILTIN = { + 'Hetzner', + 'IBMCloud', + 'Oracle', ++ 'Exoscale', + # At the end to act as a 'catch' when none of the above work... + 'None', + ], +diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py +new file mode 100644 +index 0000000..52e7f6f +--- /dev/null ++++ b/cloudinit/sources/DataSourceExoscale.py +@@ -0,0 +1,258 @@ ++# Author: Mathieu Corbin ++# Author: Christopher Glass ++# ++# This file is part of cloud-init. See LICENSE file for license information. ++ ++from cloudinit import ec2_utils as ec2 ++from cloudinit import log as logging ++from cloudinit import sources ++from cloudinit import url_helper ++from cloudinit import util ++ ++LOG = logging.getLogger(__name__) ++ ++METADATA_URL = "http://169.254.169.254" ++API_VERSION = "1.0" ++PASSWORD_SERVER_PORT = 8080 ++ ++URL_TIMEOUT = 10 ++URL_RETRIES = 6 ++ ++EXOSCALE_DMI_NAME = "Exoscale" ++ ++BUILTIN_DS_CONFIG = { ++ # We run the set password config module on every boot in order to enable ++ # resetting the instance's password via the exoscale console (and a ++ # subsequent instance reboot). ++ 'cloud_config_modules': [["set-passwords", "always"]] ++} ++ ++ ++class DataSourceExoscale(sources.DataSource): ++ ++ dsname = 'Exoscale' ++ ++ def __init__(self, sys_cfg, distro, paths): ++ super(DataSourceExoscale, self).__init__(sys_cfg, distro, paths) ++ LOG.debug("Initializing the Exoscale datasource") ++ ++ self.metadata_url = self.ds_cfg.get('metadata_url', METADATA_URL) ++ self.api_version = self.ds_cfg.get('api_version', API_VERSION) ++ self.password_server_port = int( ++ self.ds_cfg.get('password_server_port', PASSWORD_SERVER_PORT)) ++ self.url_timeout = self.ds_cfg.get('timeout', URL_TIMEOUT) ++ self.url_retries = self.ds_cfg.get('retries', URL_RETRIES) ++ ++ self.extra_config = BUILTIN_DS_CONFIG ++ ++ def wait_for_metadata_service(self): ++ """Wait for the metadata service to be reachable.""" ++ ++ metadata_url = "{}/{}/meta-data/instance-id".format( ++ self.metadata_url, self.api_version) ++ ++ url = url_helper.wait_for_url( ++ urls=[metadata_url], ++ max_wait=self.url_max_wait, ++ timeout=self.url_timeout, ++ status_cb=LOG.critical) ++ ++ return bool(url) ++ ++ def crawl_metadata(self): ++ """ ++ Crawl the metadata service when available. ++ ++ @returns: Dictionary of crawled metadata content. ++ """ ++ metadata_ready = util.log_time( ++ logfunc=LOG.info, ++ msg='waiting for the metadata service', ++ func=self.wait_for_metadata_service) ++ ++ if not metadata_ready: ++ return {} ++ ++ return read_metadata(self.metadata_url, self.api_version, ++ self.password_server_port, self.url_timeout, ++ self.url_retries) ++ ++ def _get_data(self): ++ """Fetch the user data, the metadata and the VM password ++ from the metadata service. ++ ++ Please refer to the datasource documentation for details on how the ++ metadata server and password server are crawled. ++ """ ++ if not self._is_platform_viable(): ++ return False ++ ++ data = util.log_time( ++ logfunc=LOG.debug, ++ msg='Crawl of metadata service', ++ func=self.crawl_metadata) ++ ++ if not data: ++ return False ++ ++ self.userdata_raw = data['user-data'] ++ self.metadata = data['meta-data'] ++ password = data.get('password') ++ ++ password_config = {} ++ if password: ++ # Since we have a password, let's make sure we are allowed to use ++ # it by allowing ssh_pwauth. ++ # The password module's default behavior is to leave the ++ # configuration as-is in this regard, so that means it will either ++ # leave the password always disabled if no password is ever set, or ++ # leave the password login enabled if we set it once. ++ password_config = { ++ 'ssh_pwauth': True, ++ 'password': password, ++ 'chpasswd': { ++ 'expire': False, ++ }, ++ } ++ ++ # builtin extra_config overrides password_config ++ self.extra_config = util.mergemanydict( ++ [self.extra_config, password_config]) ++ ++ return True ++ ++ def get_config_obj(self): ++ return self.extra_config ++ ++ def _is_platform_viable(self): ++ return util.read_dmi_data('system-product-name').startswith( ++ EXOSCALE_DMI_NAME) ++ ++ ++# Used to match classes to dependencies ++datasources = [ ++ (DataSourceExoscale, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ++] ++ ++ ++# Return a list of data sources that match this set of dependencies ++def get_datasource_list(depends): ++ return sources.list_from_depends(depends, datasources) ++ ++ ++def get_password(metadata_url=METADATA_URL, ++ api_version=API_VERSION, ++ password_server_port=PASSWORD_SERVER_PORT, ++ url_timeout=URL_TIMEOUT, ++ url_retries=URL_RETRIES): ++ """Obtain the VM's password if set. ++ ++ Once fetched the password is marked saved. Future calls to this method may ++ return empty string or 'saved_password'.""" ++ password_url = "{}:{}/{}/".format(metadata_url, password_server_port, ++ api_version) ++ response = url_helper.read_file_or_url( ++ password_url, ++ ssl_details=None, ++ headers={"DomU_Request": "send_my_password"}, ++ timeout=url_timeout, ++ retries=url_retries) ++ password = response.contents.decode('utf-8') ++ # the password is empty or already saved ++ # Note: the original metadata server would answer an additional ++ # 'bad_request' status, but the Exoscale implementation does not. ++ if password in ['', 'saved_password']: ++ return None ++ # save the password ++ url_helper.read_file_or_url( ++ password_url, ++ ssl_details=None, ++ headers={"DomU_Request": "saved_password"}, ++ timeout=url_timeout, ++ retries=url_retries) ++ return password ++ ++ ++def read_metadata(metadata_url=METADATA_URL, ++ api_version=API_VERSION, ++ password_server_port=PASSWORD_SERVER_PORT, ++ url_timeout=URL_TIMEOUT, ++ url_retries=URL_RETRIES): ++ """Query the metadata server and return the retrieved data.""" ++ crawled_metadata = {} ++ crawled_metadata['_metadata_api_version'] = api_version ++ try: ++ crawled_metadata['user-data'] = ec2.get_instance_userdata( ++ api_version, ++ metadata_url, ++ timeout=url_timeout, ++ retries=url_retries) ++ crawled_metadata['meta-data'] = ec2.get_instance_metadata( ++ api_version, ++ metadata_url, ++ timeout=url_timeout, ++ retries=url_retries) ++ except Exception as e: ++ util.logexc(LOG, "failed reading from metadata url %s (%s)", ++ metadata_url, e) ++ return {} ++ ++ try: ++ crawled_metadata['password'] = get_password( ++ api_version=api_version, ++ metadata_url=metadata_url, ++ password_server_port=password_server_port, ++ url_retries=url_retries, ++ url_timeout=url_timeout) ++ except Exception as e: ++ util.logexc(LOG, "failed to read from password server url %s:%s (%s)", ++ metadata_url, password_server_port, e) ++ ++ return crawled_metadata ++ ++ ++if __name__ == "__main__": ++ import argparse ++ ++ parser = argparse.ArgumentParser(description='Query Exoscale Metadata') ++ parser.add_argument( ++ "--endpoint", ++ metavar="URL", ++ help="The url of the metadata service.", ++ default=METADATA_URL) ++ parser.add_argument( ++ "--version", ++ metavar="VERSION", ++ help="The version of the metadata endpoint to query.", ++ default=API_VERSION) ++ parser.add_argument( ++ "--retries", ++ metavar="NUM", ++ type=int, ++ help="The number of retries querying the endpoint.", ++ default=URL_RETRIES) ++ parser.add_argument( ++ "--timeout", ++ metavar="NUM", ++ type=int, ++ help="The time in seconds to wait before timing out.", ++ default=URL_TIMEOUT) ++ parser.add_argument( ++ "--password-port", ++ metavar="PORT", ++ type=int, ++ help="The port on which the password endpoint listens", ++ default=PASSWORD_SERVER_PORT) ++ ++ args = parser.parse_args() ++ ++ data = read_metadata( ++ metadata_url=args.endpoint, ++ api_version=args.version, ++ password_server_port=args.password_port, ++ url_timeout=args.timeout, ++ url_retries=args.retries) ++ ++ print(util.json_dumps(data)) ++ ++# vi: ts=4 expandtab +diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst +index e34f145..fcfd91a 100644 +--- a/doc/rtd/topics/datasources.rst ++++ b/doc/rtd/topics/datasources.rst +@@ -96,6 +96,7 @@ Follow for more information. + datasources/configdrive.rst + datasources/digitalocean.rst + datasources/ec2.rst ++ datasources/exoscale.rst + datasources/maas.rst + datasources/nocloud.rst + datasources/opennebula.rst +diff --git a/doc/rtd/topics/datasources/exoscale.rst b/doc/rtd/topics/datasources/exoscale.rst +new file mode 100644 +index 0000000..27aec9c +--- /dev/null ++++ b/doc/rtd/topics/datasources/exoscale.rst +@@ -0,0 +1,68 @@ ++.. _datasource_exoscale: ++ ++Exoscale ++======== ++ ++This datasource supports reading from the metadata server used on the ++`Exoscale platform `_. ++ ++Use of the Exoscale datasource is recommended to benefit from new features of ++the Exoscale platform. ++ ++The datasource relies on the availability of a compatible metadata server ++(``http://169.254.169.254`` is used by default) and its companion password ++server, reachable at the same address (by default on port 8080). ++ ++Crawling of metadata ++-------------------- ++ ++The metadata service and password server are crawled slightly differently: ++ ++ * The "metadata service" is crawled every boot. ++ * The password server is also crawled every boot (the Exoscale datasource ++ forces the password module to run with "frequency always"). ++ ++In the password server case, the following rules apply in order to enable the ++"restore instance password" functionality: ++ ++ * If a password is returned by the password server, it is then marked "saved" ++ by the cloud-init datasource. Subsequent boots will skip setting the password ++ (the password server will return "saved_password"). ++ * When the instance password is reset (via the Exoscale UI), the password ++ server will return the non-empty password at next boot, therefore causing ++ cloud-init to reset the instance's password. ++ ++Configuration ++------------- ++ ++Users of this datasource are discouraged from changing the default settings ++unless instructed to by Exoscale support. ++ ++The following settings are available and can be set for the datasource in system ++configuration (in `/etc/cloud/cloud.cfg.d/`). ++ ++The settings available are: ++ ++ * **metadata_url**: The URL for the metadata service (defaults to ++ ``http://169.254.169.254``) ++ * **api_version**: The API version path on which to query the instance metadata ++ (defaults to ``1.0``) ++ * **password_server_port**: The port (on the metadata server) on which the ++ password server listens (defaults to ``8080``). ++ * **timeout**: the timeout value provided to urlopen for each individual http ++ request. (defaults to ``10``) ++ * **retries**: The number of retries that should be done for an http request ++ (defaults to ``6``) ++ ++ ++An example configuration with the default values is provided below: ++ ++.. sourcecode:: yaml ++ ++ datasource: ++ Exoscale: ++ metadata_url: "http://169.254.169.254" ++ api_version: "1.0" ++ password_server_port: 8080 ++ timeout: 10 ++ retries: 6 +diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py +index 6b01a4e..24b0fac 100644 +--- a/tests/unittests/test_datasource/test_common.py ++++ b/tests/unittests/test_datasource/test_common.py +@@ -13,6 +13,7 @@ from cloudinit.sources import ( + DataSourceConfigDrive as ConfigDrive, + DataSourceDigitalOcean as DigitalOcean, + DataSourceEc2 as Ec2, ++ DataSourceExoscale as Exoscale, + DataSourceGCE as GCE, + DataSourceHetzner as Hetzner, + DataSourceIBMCloud as IBMCloud, +@@ -53,6 +54,7 @@ DEFAULT_NETWORK = [ + CloudStack.DataSourceCloudStack, + DSNone.DataSourceNone, + Ec2.DataSourceEc2, ++ Exoscale.DataSourceExoscale, + GCE.DataSourceGCE, + MAAS.DataSourceMAAS, + NoCloud.DataSourceNoCloudNet, +diff --git a/tests/unittests/test_datasource/test_exoscale.py b/tests/unittests/test_datasource/test_exoscale.py +new file mode 100644 +index 0000000..350c330 +--- /dev/null ++++ b/tests/unittests/test_datasource/test_exoscale.py +@@ -0,0 +1,203 @@ ++# Author: Mathieu Corbin ++# Author: Christopher Glass ++# ++# This file is part of cloud-init. See LICENSE file for license information. ++from cloudinit import helpers ++from cloudinit.sources.DataSourceExoscale import ( ++ API_VERSION, ++ DataSourceExoscale, ++ METADATA_URL, ++ get_password, ++ PASSWORD_SERVER_PORT, ++ read_metadata) ++from cloudinit.tests.helpers import HttprettyTestCase, mock ++ ++import httpretty ++import requests ++ ++ ++TEST_PASSWORD_URL = "{}:{}/{}/".format(METADATA_URL, ++ PASSWORD_SERVER_PORT, ++ API_VERSION) ++ ++TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL, ++ API_VERSION) ++ ++TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL, ++ API_VERSION) ++ ++ ++@httpretty.activate ++class TestDatasourceExoscale(HttprettyTestCase): ++ ++ def setUp(self): ++ super(TestDatasourceExoscale, self).setUp() ++ self.tmp = self.tmp_dir() ++ self.password_url = TEST_PASSWORD_URL ++ self.metadata_url = TEST_METADATA_URL ++ self.userdata_url = TEST_USERDATA_URL ++ ++ def test_password_saved(self): ++ """The password is not set when it is not found ++ in the metadata service.""" ++ httpretty.register_uri(httpretty.GET, ++ self.password_url, ++ body="saved_password") ++ self.assertFalse(get_password()) ++ ++ def test_password_empty(self): ++ """No password is set if the metadata service returns ++ an empty string.""" ++ httpretty.register_uri(httpretty.GET, ++ self.password_url, ++ body="") ++ self.assertFalse(get_password()) ++ ++ def test_password(self): ++ """The password is set to what is found in the metadata ++ service.""" ++ expected_password = "p@ssw0rd" ++ httpretty.register_uri(httpretty.GET, ++ self.password_url, ++ body=expected_password) ++ password = get_password() ++ self.assertEqual(expected_password, password) ++ ++ def test_get_data(self): ++ """The datasource conforms to expected behavior when supplied ++ full test data.""" ++ path = helpers.Paths({'run_dir': self.tmp}) ++ ds = DataSourceExoscale({}, None, path) ++ ds._is_platform_viable = lambda: True ++ expected_password = "p@ssw0rd" ++ expected_id = "12345" ++ expected_hostname = "myname" ++ expected_userdata = "#cloud-config" ++ httpretty.register_uri(httpretty.GET, ++ self.userdata_url, ++ body=expected_userdata) ++ httpretty.register_uri(httpretty.GET, ++ self.password_url, ++ body=expected_password) ++ httpretty.register_uri(httpretty.GET, ++ self.metadata_url, ++ body="instance-id\nlocal-hostname") ++ httpretty.register_uri(httpretty.GET, ++ "{}local-hostname".format(self.metadata_url), ++ body=expected_hostname) ++ httpretty.register_uri(httpretty.GET, ++ "{}instance-id".format(self.metadata_url), ++ body=expected_id) ++ self.assertTrue(ds._get_data()) ++ self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") ++ self.assertEqual(ds.metadata, {"instance-id": expected_id, ++ "local-hostname": expected_hostname}) ++ self.assertEqual(ds.get_config_obj(), ++ {'ssh_pwauth': True, ++ 'password': expected_password, ++ 'cloud_config_modules': [ ++ ["set-passwords", "always"]], ++ 'chpasswd': { ++ 'expire': False, ++ }}) ++ ++ def test_get_data_saved_password(self): ++ """The datasource conforms to expected behavior when saved_password is ++ returned by the password server.""" ++ path = helpers.Paths({'run_dir': self.tmp}) ++ ds = DataSourceExoscale({}, None, path) ++ ds._is_platform_viable = lambda: True ++ expected_answer = "saved_password" ++ expected_id = "12345" ++ expected_hostname = "myname" ++ expected_userdata = "#cloud-config" ++ httpretty.register_uri(httpretty.GET, ++ self.userdata_url, ++ body=expected_userdata) ++ httpretty.register_uri(httpretty.GET, ++ self.password_url, ++ body=expected_answer) ++ httpretty.register_uri(httpretty.GET, ++ self.metadata_url, ++ body="instance-id\nlocal-hostname") ++ httpretty.register_uri(httpretty.GET, ++ "{}local-hostname".format(self.metadata_url), ++ body=expected_hostname) ++ httpretty.register_uri(httpretty.GET, ++ "{}instance-id".format(self.metadata_url), ++ body=expected_id) ++ self.assertTrue(ds._get_data()) ++ self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") ++ self.assertEqual(ds.metadata, {"instance-id": expected_id, ++ "local-hostname": expected_hostname}) ++ self.assertEqual(ds.get_config_obj(), ++ {'cloud_config_modules': [ ++ ["set-passwords", "always"]]}) ++ ++ def test_get_data_no_password(self): ++ """The datasource conforms to expected behavior when no password is ++ returned by the password server.""" ++ path = helpers.Paths({'run_dir': self.tmp}) ++ ds = DataSourceExoscale({}, None, path) ++ ds._is_platform_viable = lambda: True ++ expected_answer = "" ++ expected_id = "12345" ++ expected_hostname = "myname" ++ expected_userdata = "#cloud-config" ++ httpretty.register_uri(httpretty.GET, ++ self.userdata_url, ++ body=expected_userdata) ++ httpretty.register_uri(httpretty.GET, ++ self.password_url, ++ body=expected_answer) ++ httpretty.register_uri(httpretty.GET, ++ self.metadata_url, ++ body="instance-id\nlocal-hostname") ++ httpretty.register_uri(httpretty.GET, ++ "{}local-hostname".format(self.metadata_url), ++ body=expected_hostname) ++ httpretty.register_uri(httpretty.GET, ++ "{}instance-id".format(self.metadata_url), ++ body=expected_id) ++ self.assertTrue(ds._get_data()) ++ self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") ++ self.assertEqual(ds.metadata, {"instance-id": expected_id, ++ "local-hostname": expected_hostname}) ++ self.assertEqual(ds.get_config_obj(), ++ {'cloud_config_modules': [ ++ ["set-passwords", "always"]]}) ++ ++ @mock.patch('cloudinit.sources.DataSourceExoscale.get_password') ++ def test_read_metadata_when_password_server_unreachable(self, m_password): ++ """The read_metadata function returns partial results in case the ++ password server (only) is unreachable.""" ++ expected_id = "12345" ++ expected_hostname = "myname" ++ expected_userdata = "#cloud-config" ++ ++ m_password.side_effect = requests.Timeout('Fake Connection Timeout') ++ httpretty.register_uri(httpretty.GET, ++ self.userdata_url, ++ body=expected_userdata) ++ httpretty.register_uri(httpretty.GET, ++ self.metadata_url, ++ body="instance-id\nlocal-hostname") ++ httpretty.register_uri(httpretty.GET, ++ "{}local-hostname".format(self.metadata_url), ++ body=expected_hostname) ++ httpretty.register_uri(httpretty.GET, ++ "{}instance-id".format(self.metadata_url), ++ body=expected_id) ++ ++ result = read_metadata() ++ ++ self.assertIsNone(result.get("password")) ++ self.assertEqual(result.get("user-data").decode("utf-8"), ++ expected_userdata) ++ ++ def test_non_viable_platform(self): ++ """The datasource fails fast when the platform is not viable.""" ++ path = helpers.Paths({'run_dir': self.tmp}) ++ ds = DataSourceExoscale({}, None, path) ++ ds._is_platform_viable = lambda: False ++ self.assertFalse(ds._get_data()) +diff --git a/tools/ds-identify b/tools/ds-identify +index 1acfeeb..6c89b06 100755 +--- a/tools/ds-identify ++++ b/tools/ds-identify +@@ -124,7 +124,7 @@ DI_DSNAME="" + # be searched if there is no setting found in config. + DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ + CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ +-OVF SmartOS Scaleway Hetzner IBMCloud Oracle" ++OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale" + DI_DSLIST="" + DI_MODE="" + DI_ON_FOUND="" +@@ -553,6 +553,11 @@ dscheck_CloudStack() { + return $DS_NOT_FOUND + } + ++dscheck_Exoscale() { ++ dmi_product_name_matches "Exoscale*" && return $DS_FOUND ++ return $DS_NOT_FOUND ++} ++ + dscheck_CloudSigma() { + # http://paste.ubuntu.com/23624795/ + dmi_product_name_matches "CloudSigma" && return $DS_FOUND +-- +1.8.3.1 + diff --git a/SOURCES/ci-azure-avoid.patch b/SOURCES/ci-azure-avoid.patch new file mode 100644 index 0000000..96d78ab --- /dev/null +++ b/SOURCES/ci-azure-avoid.patch @@ -0,0 +1,213 @@ +From 3b1b95b667a767c0e0711215c7b620cde016bcd7 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Tue, 10 Mar 2020 16:04:18 +0100 +Subject: [PATCH] azure: avoid + +Message-id: <20200310160418.887-1-otubo@redhat.com> +Patchwork-id: 94221 +O-Subject: [RHEL-8.1.0/RHEL-7.8.z/RHEL-7.7.z cloud-init PATCH] azure: avoid re-running cloud-init when instance-id is byte-swapped (#84) +Bugzilla: 1810064 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Mohammed Gamal + +commit 129b1c4ea250619bd7caed7aaffacc796b0139f2 +Author: AOhassan <37305877+AOhassan@users.noreply.github.com> +Date: Thu Dec 12 13:51:42 2019 -0800 + + azure: avoid re-running cloud-init when instance-id is byte-swapped (#84) + + Azure stores the instance ID with an incorrect byte ordering for the + first three hyphen delimited parts. This results in invalid + is_new_instance checks forcing Azure datasource to recrawl the metadata + service. + + When persisting instance-id from the metadata service, swap the + instance-id string byte order such that it is consistent with + that returned by dmi information. Check whether the instance-id + string is a byte-swapped match when determining correctly whether + the Azure platform instance-id has actually changed. + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/sources/DataSourceAzure.py | 16 ++++++++++--- + cloudinit/sources/helpers/azure.py | 27 ++++++++++++++++++++++ + tests/unittests/test_datasource/test_azure.py | 24 ++++++++++++++++--- + .../unittests/test_datasource/test_azure_helper.py | 19 +++++++++++++++ + 4 files changed, 80 insertions(+), 6 deletions(-) + +diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py +index 5baf8da..66bbe5e 100755 +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -28,7 +28,8 @@ from cloudinit.reporting import events + + from cloudinit.sources.helpers.azure import (azure_ds_reporter, + azure_ds_telemetry_reporter, +- get_metadata_from_fabric) ++ get_metadata_from_fabric, ++ is_byte_swapped) + + LOG = logging.getLogger(__name__) + +@@ -458,8 +459,7 @@ class DataSourceAzure(sources.DataSource): + seed = _get_random_seed() + if seed: + crawled_data['metadata']['random_seed'] = seed +- crawled_data['metadata']['instance-id'] = util.read_dmi_data( +- 'system-uuid') ++ crawled_data['metadata']['instance-id'] = self._iid() + + if perform_reprovision: + LOG.info("Reporting ready to Azure after getting ReprovisionData") +@@ -530,6 +530,16 @@ class DataSourceAzure(sources.DataSource): + # quickly (local check only) if self.instance_id is still valid + return sources.instance_id_matches_system_uuid(self.get_instance_id()) + ++ def _iid(self, previous=None): ++ prev_iid_path = os.path.join( ++ self.paths.get_cpath('data'), 'instance-id') ++ iid = util.read_dmi_data('system-uuid') ++ if os.path.exists(prev_iid_path): ++ previous = util.load_file(prev_iid_path).strip() ++ if is_byte_swapped(previous, iid): ++ return previous ++ return iid ++ + @azure_ds_telemetry_reporter + def setup(self, is_new_instance): + if self._negotiated is False: +diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py +index 82c4c8c..c2a57cc 100755 +--- a/cloudinit/sources/helpers/azure.py ++++ b/cloudinit/sources/helpers/azure.py +@@ -7,6 +7,7 @@ import re + import socket + import struct + import time ++import textwrap + + from cloudinit.net import dhcp + from cloudinit import stages +@@ -40,6 +41,32 @@ def azure_ds_telemetry_reporter(func): + return impl + + ++def is_byte_swapped(previous_id, current_id): ++ """ ++ Azure stores the instance ID with an incorrect byte ordering for the ++ first parts. This corrects the byte order such that it is consistent with ++ that returned by the metadata service. ++ """ ++ if previous_id == current_id: ++ return False ++ ++ def swap_bytestring(s, width=2): ++ dd = [byte for byte in textwrap.wrap(s, 2)] ++ dd.reverse() ++ return ''.join(dd) ++ ++ parts = current_id.split('-') ++ swapped_id = '-'.join([ ++ swap_bytestring(parts[0]), ++ swap_bytestring(parts[1]), ++ swap_bytestring(parts[2]), ++ parts[3], ++ parts[4] ++ ]) ++ ++ return previous_id == swapped_id ++ ++ + @contextmanager + def cd(newdir): + prevdir = os.getcwd() +diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py +index bc8b42c..1fb0565 100644 +--- a/tests/unittests/test_datasource/test_azure.py ++++ b/tests/unittests/test_datasource/test_azure.py +@@ -314,7 +314,7 @@ scbus-1 on xpt0 bus 0 + 'public-keys': [], + }) + +- self.instance_id = 'test-instance-id' ++ self.instance_id = 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' + + def _dmi_mocks(key): + if key == 'system-uuid': +@@ -511,7 +511,7 @@ fdescfs /dev/fd fdescfs rw 0 0 + 'subnet': [{'address': '10.0.0.0', 'prefix': '24'}]}, + 'ipv6': {'ipAddress': []}, + 'macAddress': '000D3A047598'}]}}, +- 'instance-id': 'test-instance-id', ++ 'instance-id': 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', + 'local-hostname': u'myhost', + 'random_seed': 'wild'} + +@@ -881,6 +881,24 @@ fdescfs /dev/fd fdescfs rw 0 0 + self.assertTrue(ret) + self.assertEqual('value', dsrc.metadata['test']) + ++ def test_instance_id_endianness(self): ++ """Return the previous iid when dmi uuid is the byteswapped iid.""" ++ ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) ++ # byte-swapped previous ++ write_file( ++ os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), ++ '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') ++ ds.get_data() ++ self.assertEqual( ++ '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) ++ # not byte-swapped previous ++ write_file( ++ os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), ++ '644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') ++ ds.get_data() ++ self.assertEqual( ++ 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) ++ + def test_instance_id_from_dmidecode_used(self): + ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + ds.get_data() +@@ -1080,7 +1098,7 @@ class TestAzureBounce(CiTestCase): + + def _dmi_mocks(key): + if key == 'system-uuid': +- return 'test-instance-id' ++ return 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' + elif key == 'chassis-asset-tag': + return '7783-7084-3265-9085-8269-3286-77' + raise RuntimeError('should not get here') +diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py +index bd006ab..7ad5cc1 100644 +--- a/tests/unittests/test_datasource/test_azure_helper.py ++++ b/tests/unittests/test_datasource/test_azure_helper.py +@@ -170,6 +170,25 @@ class TestGoalStateParsing(CiTestCase): + goal_state = self._get_goal_state(instance_id=instance_id) + self.assertEqual(instance_id, goal_state.instance_id) + ++ def test_instance_id_byte_swap(self): ++ """Return true when previous_iid is byteswapped current_iid""" ++ previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" ++ current_iid = "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8" ++ self.assertTrue( ++ azure_helper.is_byte_swapped(previous_iid, current_iid)) ++ ++ def test_instance_id_no_byte_swap_same_instance_id(self): ++ previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" ++ current_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" ++ self.assertFalse( ++ azure_helper.is_byte_swapped(previous_iid, current_iid)) ++ ++ def test_instance_id_no_byte_swap_diff_instance_id(self): ++ previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" ++ current_iid = "G0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" ++ self.assertFalse( ++ azure_helper.is_byte_swapped(previous_iid, current_iid)) ++ + def test_certificates_xml_parsed_and_fetched_correctly(self): + http_client = mock.MagicMock() + certificates_url = 'TestCertificatesUrl' +-- +1.8.3.1 + diff --git a/SOURCES/ci-cc_mounts-fix-incorrect-format-specifiers-316.patch b/SOURCES/ci-cc_mounts-fix-incorrect-format-specifiers-316.patch new file mode 100644 index 0000000..d201e02 --- /dev/null +++ b/SOURCES/ci-cc_mounts-fix-incorrect-format-specifiers-316.patch @@ -0,0 +1,85 @@ +From 00b8210223ce3af97109df5cdb85b8e40541dd33 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Tue, 28 Apr 2020 08:22:07 +0200 +Subject: [PATCH 3/3] cc_mounts: fix incorrect format specifiers (#316) + +RH-Author: Eduardo Otubo +Message-id: <20200422130428.7663-4-otubo@redhat.com> +Patchwork-id: 96034 +O-Subject: [RHEL-7.7.z/RHEL-7.8.z cloud-init PATCH 3/3] cc_mounts: fix incorrect format specifiers (#316) +Bugzilla: 1801094 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Cathy Avery + +commit 9d7b35ce23aaf8741dd49b16e359c96591be3c76 +Author: Daniel Watkins +Date: Wed Apr 15 16:53:08 2020 -0400 + + cc_mounts: fix incorrect format specifiers (#316) + + LP: #1872836 + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/config/cc_mounts.py | 6 +++--- + cloudinit/config/tests/test_mounts.py | 22 ++++++++++++++++++++++ + 2 files changed, 25 insertions(+), 3 deletions(-) + create mode 100644 cloudinit/config/tests/test_mounts.py + +diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py +index 811781f..7c4e104 100644 +--- a/cloudinit/config/cc_mounts.py ++++ b/cloudinit/config/cc_mounts.py +@@ -226,17 +226,17 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): + def create_swapfile(fname, size): + """Size is in MiB.""" + +- errmsg = "Failed to create swapfile '%s' of size %dMB via %s: %s" ++ errmsg = "Failed to create swapfile '%s' of size %sMB via %s: %s" + + def create_swap(fname, size, method): + LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'", + fname, fstype, method) + + if method == "fallocate": +- cmd = ['fallocate', '-l', '%dM' % size, fname] ++ cmd = ['fallocate', '-l', '%sM' % size, fname] + elif method == "dd": + cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M', +- 'count=%d' % size] ++ 'count=%s' % size] + + try: + util.subp(cmd, capture=True) +diff --git a/cloudinit/config/tests/test_mounts.py b/cloudinit/config/tests/test_mounts.py +new file mode 100644 +index 0000000..c7dad61 +--- /dev/null ++++ b/cloudinit/config/tests/test_mounts.py +@@ -0,0 +1,22 @@ ++# This file is part of cloud-init. See LICENSE file for license information. ++from unittest import mock ++ ++from cloudinit.config.cc_mounts import create_swapfile ++ ++ ++M_PATH = 'cloudinit.config.cc_mounts.' ++ ++ ++class TestCreateSwapfile: ++ ++ @mock.patch(M_PATH + 'util.subp') ++ def test_happy_path(self, m_subp, tmpdir): ++ swap_file = tmpdir.join("swap-file") ++ fname = str(swap_file) ++ ++ # Some of the calls to util.subp should create the swap file; this ++ # roughly approximates that ++ m_subp.side_effect = lambda *args, **kwargs: swap_file.write('') ++ ++ create_swapfile(fname, '') ++ assert mock.call(['mkswap', fname]) in m_subp.call_args_list +-- +1.8.3.1 + diff --git a/SOURCES/ci-cmd-main.py-Fix-missing-modules-init-key-in-modes-di.patch b/SOURCES/ci-cmd-main.py-Fix-missing-modules-init-key-in-modes-di.patch new file mode 100644 index 0000000..5d49fbb --- /dev/null +++ b/SOURCES/ci-cmd-main.py-Fix-missing-modules-init-key-in-modes-di.patch @@ -0,0 +1,58 @@ +From 2b7bcfb3cfb6ac668627b26c83a2d60a29a75392 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Tue, 14 Apr 2020 14:21:35 +0200 +Subject: [PATCH] cmd:main.py: Fix missing 'modules-init' key in modes dict + +RH-Author: Eduardo Otubo +Message-id: <20200414104642.19930-1-otubo@redhat.com> +Patchwork-id: 94672 +O-Subject: [RHEL-7.8.z cloud-init PATCH] cmd:main.py: Fix missing 'modules-init' key in modes dict +Bugzilla: 1802173 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Cathy Avery + +commit bdd9c0ac9bcd68ec1ac3b2038dad0ba3dbd83341 +Author: Antonio Romito +Date: Tue Apr 9 14:54:23 2019 +0000 + + cmd:main.py: Fix missing 'modules-init' key in modes dict + + Cloud-init's main.py will fail when presented with a new + stage name 'modules-init' if upgrading an older cloud-init. + Fix this by initializing unknown stage names before accessing. + + LP: #1815109 + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/cmd/main.py | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py +index 933c019..a5446da 100644 +--- a/cloudinit/cmd/main.py ++++ b/cloudinit/cmd/main.py +@@ -632,13 +632,14 @@ def status_wrapper(name, args, data_d=None, link_d=None): + 'start': None, + 'finished': None, + } ++ + if status is None: + status = {'v1': {}} +- for m in modes: +- status['v1'][m] = nullstatus.copy() + status['v1']['datasource'] = None +- elif mode not in status['v1']: +- status['v1'][mode] = nullstatus.copy() ++ ++ for m in modes: ++ if m not in status['v1']: ++ status['v1'][m] = nullstatus.copy() + + v1 = status['v1'] + v1['stage'] = mode +-- +1.8.3.1 + diff --git a/SOURCES/ci-ec2-Add-support-for-AWS-IMDS-v2-session-oriented-55.patch b/SOURCES/ci-ec2-Add-support-for-AWS-IMDS-v2-session-oriented-55.patch new file mode 100644 index 0000000..0213fad --- /dev/null +++ b/SOURCES/ci-ec2-Add-support-for-AWS-IMDS-v2-session-oriented-55.patch @@ -0,0 +1,594 @@ +From 68b3718124b63fdf0c077452b559f0fccb01200d Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Tue, 5 May 2020 08:08:32 +0200 +Subject: [PATCH 5/5] ec2: Add support for AWS IMDS v2 (session-oriented) (#55) + +RH-Author: Eduardo Otubo +Message-id: <20200504085238.25884-6-otubo@redhat.com> +Patchwork-id: 96245 +O-Subject: [RHEL-7.8.z cloud-init PATCH 5/5] ec2: Add support for AWS IMDS v2 (session-oriented) (#55) +Bugzilla: 1827207 +RH-Acked-by: Cathy Avery +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Vitaly Kuznetsov + +commit 4bc399e0cd0b7e9177f948aecd49f6b8323ff30b +Author: Ryan Harper +Date: Fri Nov 22 21:05:44 2019 -0600 + + ec2: Add support for AWS IMDS v2 (session-oriented) (#55) + + * ec2: Add support for AWS IMDS v2 (session-oriented) + + AWS now supports a new version of fetching Instance Metadata[1]. + + Update cloud-init's ec2 utility functions and update ec2 derived + datasources accordingly. For DataSourceEc2 (versus ec2-look-alikes) + cloud-init will issue the PUT request to obtain an API token for + the maximum lifetime and then all subsequent interactions with the + IMDS will include the token in the header. + + If the API token endpoint is unreachable on Ec2 platform, log a + warning and fallback to using IMDS v1 and which does not use + session tokens when communicating with the Instance metadata + service. + + We handle read errors, typically seen if the IMDS is beyond one + etwork hop (IMDSv2 responses have a ttl=1), by setting the api token + to a disabled value and then using IMDSv1 paths. + + To support token-based headers, ec2_utils functions were updated + to support custom headers_cb and exception_cb callback functions + so Ec2 could store, or refresh API tokens in the event of token + becoming stale. + + [1] https://docs.aws.amazon.com/AWSEC2/latest/ \ + UserGuide/ec2-instance-metadata.html \ + #instance-metadata-v2-how-it-works + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/ec2_utils.py | 37 +++-- + cloudinit/sources/DataSourceCloudStack.py | 2 +- + cloudinit/sources/DataSourceEc2.py | 166 ++++++++++++++++++--- + cloudinit/sources/DataSourceExoscale.py | 2 +- + cloudinit/sources/DataSourceMAAS.py | 2 +- + cloudinit/sources/DataSourceOpenStack.py | 2 +- + cloudinit/url_helper.py | 15 +- + tests/unittests/test_datasource/test_cloudstack.py | 21 ++- + tests/unittests/test_datasource/test_ec2.py | 6 +- + 9 files changed, 201 insertions(+), 52 deletions(-) + +diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py +index 3b7b17f..57708c1 100644 +--- a/cloudinit/ec2_utils.py ++++ b/cloudinit/ec2_utils.py +@@ -134,25 +134,28 @@ class MetadataMaterializer(object): + return joined + + +-def _skip_retry_on_codes(status_codes, _request_args, cause): ++def skip_retry_on_codes(status_codes, _request_args, cause): + """Returns False if cause.code is in status_codes.""" + return cause.code not in status_codes + + + def get_instance_userdata(api_version='latest', + metadata_address='http://169.254.169.254', +- ssl_details=None, timeout=5, retries=5): ++ ssl_details=None, timeout=5, retries=5, ++ headers_cb=None, exception_cb=None): + ud_url = url_helper.combine_url(metadata_address, api_version) + ud_url = url_helper.combine_url(ud_url, 'user-data') + user_data = '' + try: +- # It is ok for userdata to not exist (thats why we are stopping if +- # NOT_FOUND occurs) and just in that case returning an empty string. +- exception_cb = functools.partial(_skip_retry_on_codes, +- SKIP_USERDATA_CODES) ++ if not exception_cb: ++ # It is ok for userdata to not exist (thats why we are stopping if ++ # NOT_FOUND occurs) and just in that case returning an empty ++ # string. ++ exception_cb = functools.partial(skip_retry_on_codes, ++ SKIP_USERDATA_CODES) + response = url_helper.read_file_or_url( + ud_url, ssl_details=ssl_details, timeout=timeout, +- retries=retries, exception_cb=exception_cb) ++ retries=retries, exception_cb=exception_cb, headers_cb=headers_cb) + user_data = response.contents + except url_helper.UrlError as e: + if e.code not in SKIP_USERDATA_CODES: +@@ -165,11 +168,13 @@ def get_instance_userdata(api_version='latest', + def _get_instance_metadata(tree, api_version='latest', + metadata_address='http://169.254.169.254', + ssl_details=None, timeout=5, retries=5, +- leaf_decoder=None): ++ leaf_decoder=None, headers_cb=None, ++ exception_cb=None): + md_url = url_helper.combine_url(metadata_address, api_version, tree) + caller = functools.partial( + url_helper.read_file_or_url, ssl_details=ssl_details, +- timeout=timeout, retries=retries) ++ timeout=timeout, retries=retries, headers_cb=headers_cb, ++ exception_cb=exception_cb) + + def mcaller(url): + return caller(url).contents +@@ -191,22 +196,28 @@ def _get_instance_metadata(tree, api_version='latest', + def get_instance_metadata(api_version='latest', + metadata_address='http://169.254.169.254', + ssl_details=None, timeout=5, retries=5, +- leaf_decoder=None): ++ leaf_decoder=None, headers_cb=None, ++ exception_cb=None): + # Note, 'meta-data' explicitly has trailing /. + # this is required for CloudStack (LP: #1356855) + return _get_instance_metadata(tree='meta-data/', api_version=api_version, + metadata_address=metadata_address, + ssl_details=ssl_details, timeout=timeout, +- retries=retries, leaf_decoder=leaf_decoder) ++ retries=retries, leaf_decoder=leaf_decoder, ++ headers_cb=headers_cb, ++ exception_cb=exception_cb) + + + def get_instance_identity(api_version='latest', + metadata_address='http://169.254.169.254', + ssl_details=None, timeout=5, retries=5, +- leaf_decoder=None): ++ leaf_decoder=None, headers_cb=None, ++ exception_cb=None): + return _get_instance_metadata(tree='dynamic/instance-identity', + api_version=api_version, + metadata_address=metadata_address, + ssl_details=ssl_details, timeout=timeout, +- retries=retries, leaf_decoder=leaf_decoder) ++ retries=retries, leaf_decoder=leaf_decoder, ++ headers_cb=headers_cb, ++ exception_cb=exception_cb) + # vi: ts=4 expandtab +diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py +index d4b758f..6bd2efe 100644 +--- a/cloudinit/sources/DataSourceCloudStack.py ++++ b/cloudinit/sources/DataSourceCloudStack.py +@@ -93,7 +93,7 @@ class DataSourceCloudStack(sources.DataSource): + urls = [uhelp.combine_url(self.metadata_address, + 'latest/meta-data/instance-id')] + start_time = time.time() +- url = uhelp.wait_for_url( ++ url, _response = uhelp.wait_for_url( + urls=urls, max_wait=url_params.max_wait_seconds, + timeout=url_params.timeout_seconds, status_cb=LOG.warn) + +diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py +index 9ccf2cd..fbe8f3f 100644 +--- a/cloudinit/sources/DataSourceEc2.py ++++ b/cloudinit/sources/DataSourceEc2.py +@@ -27,6 +27,10 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND]) + STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") + STRICT_ID_DEFAULT = "warn" + ++API_TOKEN_ROUTE = 'latest/api/token' ++API_TOKEN_DISABLED = '_ec2_disable_api_token' ++AWS_TOKEN_TTL_SECONDS = '21600' ++ + + class CloudNames(object): + ALIYUN = "aliyun" +@@ -59,6 +63,7 @@ class DataSourceEc2(sources.DataSource): + url_max_wait = 120 + url_timeout = 50 + ++ _api_token = None # API token for accessing the metadata service + _network_config = sources.UNSET # Used to cache calculated network cfg v1 + + # Whether we want to get network configuration from the metadata service. +@@ -132,11 +137,12 @@ class DataSourceEc2(sources.DataSource): + min_metadata_version. + """ + # Assumes metadata service is already up ++ url_tmpl = '{0}/{1}/meta-data/instance-id' ++ headers = self._get_headers() + for api_ver in self.extended_metadata_versions: +- url = '{0}/{1}/meta-data/instance-id'.format( +- self.metadata_address, api_ver) ++ url = url_tmpl.format(self.metadata_address, api_ver) + try: +- resp = uhelp.readurl(url=url) ++ resp = uhelp.readurl(url=url, headers=headers) + except uhelp.UrlError as e: + LOG.debug('url %s raised exception %s', url, e) + else: +@@ -156,12 +162,39 @@ class DataSourceEc2(sources.DataSource): + # setup self.identity. So we need to do that now. + api_version = self.get_metadata_api_version() + self.identity = ec2.get_instance_identity( +- api_version, self.metadata_address).get('document', {}) ++ api_version, self.metadata_address, ++ headers_cb=self._get_headers, ++ exception_cb=self._refresh_stale_aws_token_cb).get( ++ 'document', {}) + return self.identity.get( + 'instanceId', self.metadata['instance-id']) + else: + return self.metadata['instance-id'] + ++ def _maybe_fetch_api_token(self, mdurls, timeout=None, max_wait=None): ++ if self.cloud_name != CloudNames.AWS: ++ return ++ ++ urls = [] ++ url2base = {} ++ url_path = API_TOKEN_ROUTE ++ request_method = 'PUT' ++ for url in mdurls: ++ cur = '{0}/{1}'.format(url, url_path) ++ urls.append(cur) ++ url2base[cur] = url ++ ++ # use the self._status_cb to check for Read errors, which means ++ # we can't reach the API token URL, so we should disable IMDSv2 ++ LOG.debug('Fetching Ec2 IMDSv2 API Token') ++ url, response = uhelp.wait_for_url( ++ urls=urls, max_wait=1, timeout=1, status_cb=self._status_cb, ++ headers_cb=self._get_headers, request_method=request_method) ++ ++ if url and response: ++ self._api_token = response ++ return url2base[url] ++ + def wait_for_metadata_service(self): + mcfg = self.ds_cfg + +@@ -183,27 +216,39 @@ class DataSourceEc2(sources.DataSource): + LOG.warning("Empty metadata url list! using default list") + mdurls = self.metadata_urls + +- urls = [] +- url2base = {} +- for url in mdurls: +- cur = '{0}/{1}/meta-data/instance-id'.format( +- url, self.min_metadata_version) +- urls.append(cur) +- url2base[cur] = url +- +- start_time = time.time() +- url = uhelp.wait_for_url( +- urls=urls, max_wait=url_params.max_wait_seconds, +- timeout=url_params.timeout_seconds, status_cb=LOG.warn) +- +- if url: +- self.metadata_address = url2base[url] ++ # try the api token path first ++ metadata_address = self._maybe_fetch_api_token(mdurls) ++ if not metadata_address: ++ if self._api_token == API_TOKEN_DISABLED: ++ LOG.warning('Retrying with IMDSv1') ++ # if we can't get a token, use instance-id path ++ urls = [] ++ url2base = {} ++ url_path = '{ver}/meta-data/instance-id'.format( ++ ver=self.min_metadata_version) ++ request_method = 'GET' ++ for url in mdurls: ++ cur = '{0}/{1}'.format(url, url_path) ++ urls.append(cur) ++ url2base[cur] = url ++ ++ start_time = time.time() ++ url, _ = uhelp.wait_for_url( ++ urls=urls, max_wait=url_params.max_wait_seconds, ++ timeout=url_params.timeout_seconds, status_cb=LOG.warning, ++ headers_cb=self._get_headers, request_method=request_method) ++ ++ if url: ++ metadata_address = url2base[url] ++ ++ if metadata_address: ++ self.metadata_address = metadata_address + LOG.debug("Using metadata source: '%s'", self.metadata_address) + else: + LOG.critical("Giving up on md from %s after %s seconds", + urls, int(time.time() - start_time)) + +- return bool(url) ++ return bool(metadata_address) + + def device_name_to_device(self, name): + # Consult metadata service, that has +@@ -349,14 +394,22 @@ class DataSourceEc2(sources.DataSource): + return {} + api_version = self.get_metadata_api_version() + crawled_metadata = {} ++ if self.cloud_name == CloudNames.AWS: ++ exc_cb = self._refresh_stale_aws_token_cb ++ exc_cb_ud = self._skip_or_refresh_stale_aws_token_cb ++ else: ++ exc_cb = exc_cb_ud = None + try: + crawled_metadata['user-data'] = ec2.get_instance_userdata( +- api_version, self.metadata_address) ++ api_version, self.metadata_address, ++ headers_cb=self._get_headers, exception_cb=exc_cb_ud) + crawled_metadata['meta-data'] = ec2.get_instance_metadata( +- api_version, self.metadata_address) ++ api_version, self.metadata_address, ++ headers_cb=self._get_headers, exception_cb=exc_cb) + if self.cloud_name == CloudNames.AWS: + identity = ec2.get_instance_identity( +- api_version, self.metadata_address) ++ api_version, self.metadata_address, ++ headers_cb=self._get_headers, exception_cb=exc_cb) + crawled_metadata['dynamic'] = {'instance-identity': identity} + except Exception: + util.logexc( +@@ -366,6 +419,73 @@ class DataSourceEc2(sources.DataSource): + crawled_metadata['_metadata_api_version'] = api_version + return crawled_metadata + ++ def _refresh_api_token(self, seconds=AWS_TOKEN_TTL_SECONDS): ++ """Request new metadata API token. ++ @param seconds: The lifetime of the token in seconds ++ ++ @return: The API token or None if unavailable. ++ """ ++ if self.cloud_name != CloudNames.AWS: ++ return None ++ LOG.debug("Refreshing Ec2 metadata API token") ++ request_header = {'X-aws-ec2-metadata-token-ttl-seconds': seconds} ++ token_url = '{}/{}'.format(self.metadata_address, API_TOKEN_ROUTE) ++ try: ++ response = uhelp.readurl( ++ token_url, headers=request_header, request_method="PUT") ++ except uhelp.UrlError as e: ++ LOG.warning( ++ 'Unable to get API token: %s raised exception %s', ++ token_url, e) ++ return None ++ return response.contents ++ ++ def _skip_or_refresh_stale_aws_token_cb(self, msg, exception): ++ """Callback will not retry on SKIP_USERDATA_CODES or if no token ++ is available.""" ++ retry = ec2.skip_retry_on_codes( ++ ec2.SKIP_USERDATA_CODES, msg, exception) ++ if not retry: ++ return False # False raises exception ++ return self._refresh_stale_aws_token_cb(msg, exception) ++ ++ def _refresh_stale_aws_token_cb(self, msg, exception): ++ """Exception handler for Ec2 to refresh token if token is stale.""" ++ if isinstance(exception, uhelp.UrlError) and exception.code == 401: ++ # With _api_token as None, _get_headers will _refresh_api_token. ++ LOG.debug("Clearing cached Ec2 API token due to expiry") ++ self._api_token = None ++ return True # always retry ++ ++ def _status_cb(self, msg, exc=None): ++ LOG.warning(msg) ++ if 'Read timed out' in msg: ++ LOG.warning('Cannot use Ec2 IMDSv2 API tokens, using IMDSv1') ++ self._api_token = API_TOKEN_DISABLED ++ ++ def _get_headers(self, url=''): ++ """Return a dict of headers for accessing a url. ++ ++ If _api_token is unset on AWS, attempt to refresh the token via a PUT ++ and then return the updated token header. ++ """ ++ if self.cloud_name != CloudNames.AWS or (self._api_token == ++ API_TOKEN_DISABLED): ++ return {} ++ # Request a 6 hour token if URL is API_TOKEN_ROUTE ++ request_token_header = { ++ 'X-aws-ec2-metadata-token-ttl-seconds': AWS_TOKEN_TTL_SECONDS} ++ if API_TOKEN_ROUTE in url: ++ return request_token_header ++ if not self._api_token: ++ # If we don't yet have an API token, get one via a PUT against ++ # API_TOKEN_ROUTE. This _api_token may get unset by a 403 due ++ # to an invalid or expired token ++ self._api_token = self._refresh_api_token() ++ if not self._api_token: ++ return {} ++ return {'X-aws-ec2-metadata-token': self._api_token} ++ + + class DataSourceEc2Local(DataSourceEc2): + """Datasource run at init-local which sets up network to query metadata. +diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py +index 4616daa..d59aefd 100644 +--- a/cloudinit/sources/DataSourceExoscale.py ++++ b/cloudinit/sources/DataSourceExoscale.py +@@ -61,7 +61,7 @@ class DataSourceExoscale(sources.DataSource): + metadata_url = "{}/{}/meta-data/instance-id".format( + self.metadata_url, self.api_version) + +- url = url_helper.wait_for_url( ++ url, _response = url_helper.wait_for_url( + urls=[metadata_url], + max_wait=self.url_max_wait, + timeout=self.url_timeout, +diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py +index 61aa6d7..517913a 100644 +--- a/cloudinit/sources/DataSourceMAAS.py ++++ b/cloudinit/sources/DataSourceMAAS.py +@@ -136,7 +136,7 @@ class DataSourceMAAS(sources.DataSource): + url = url[:-1] + check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION) + urls = [check_url] +- url = self.oauth_helper.wait_for_url( ++ url, _response = self.oauth_helper.wait_for_url( + urls=urls, max_wait=max_wait, timeout=timeout) + + if url: +diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py +index 4a01524..7a5e71b 100644 +--- a/cloudinit/sources/DataSourceOpenStack.py ++++ b/cloudinit/sources/DataSourceOpenStack.py +@@ -76,7 +76,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): + + url_params = self.get_url_params() + start_time = time.time() +- avail_url = url_helper.wait_for_url( ++ avail_url, _response = url_helper.wait_for_url( + urls=md_urls, max_wait=url_params.max_wait_seconds, + timeout=url_params.timeout_seconds) + if avail_url: +diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py +index 1b0721b..a951b8b 100644 +--- a/cloudinit/url_helper.py ++++ b/cloudinit/url_helper.py +@@ -101,7 +101,7 @@ def read_file_or_url(url, timeout=5, retries=10, + raise UrlError(cause=e, code=code, headers=None, url=url) + return FileResponse(file_path, contents=contents) + else: +- return readurl(url, timeout=timeout, retries=retries, headers=headers, ++ return readurl(url, timeout=timeout, retries=retries, + headers_cb=headers_cb, data=data, + sec_between=sec_between, ssl_details=ssl_details, + exception_cb=exception_cb) +@@ -310,7 +310,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, + + def wait_for_url(urls, max_wait=None, timeout=None, + status_cb=None, headers_cb=None, sleep_time=1, +- exception_cb=None, sleep_time_cb=None): ++ exception_cb=None, sleep_time_cb=None, request_method=None): + """ + urls: a list of urls to try + max_wait: roughly the maximum time to wait before giving up +@@ -325,6 +325,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, + 'exception', the exception that occurred. + sleep_time_cb: call method with 2 arguments (response, loop_n) that + generates the next sleep time. ++ request_method: indicate the type of HTTP request, GET, PUT, or POST ++ returns: tuple of (url, response contents), on failure, (False, None) + + the idea of this routine is to wait for the EC2 metdata service to + come up. On both Eucalyptus and EC2 we have seen the case where +@@ -381,8 +383,9 @@ def wait_for_url(urls, max_wait=None, timeout=None, + else: + headers = {} + +- response = readurl(url, headers=headers, timeout=timeout, +- check_status=False) ++ response = readurl( ++ url, headers=headers, timeout=timeout, ++ check_status=False, request_method=request_method) + if not response.contents: + reason = "empty response [%s]" % (response.code) + url_exc = UrlError(ValueError(reason), code=response.code, +@@ -392,7 +395,7 @@ def wait_for_url(urls, max_wait=None, timeout=None, + url_exc = UrlError(ValueError(reason), code=response.code, + headers=response.headers, url=url) + else: +- return url ++ return url, response.contents + except UrlError as e: + reason = "request error [%s]" % e + url_exc = e +@@ -421,7 +424,7 @@ def wait_for_url(urls, max_wait=None, timeout=None, + sleep_time) + time.sleep(sleep_time) + +- return False ++ return False, None + + + class OauthUrlHelper(object): +diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py +index d6d2d6b..83c2f75 100644 +--- a/tests/unittests/test_datasource/test_cloudstack.py ++++ b/tests/unittests/test_datasource/test_cloudstack.py +@@ -10,6 +10,9 @@ from cloudinit.tests.helpers import CiTestCase, ExitStack, mock + import os + import time + ++MOD_PATH = 'cloudinit.sources.DataSourceCloudStack' ++DS_PATH = MOD_PATH + '.DataSourceCloudStack' ++ + + class TestCloudStackPasswordFetching(CiTestCase): + +@@ -17,7 +20,7 @@ class TestCloudStackPasswordFetching(CiTestCase): + super(TestCloudStackPasswordFetching, self).setUp() + self.patches = ExitStack() + self.addCleanup(self.patches.close) +- mod_name = 'cloudinit.sources.DataSourceCloudStack' ++ mod_name = MOD_PATH + self.patches.enter_context(mock.patch('{0}.ec2'.format(mod_name))) + self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name))) + default_gw = "192.201.20.0" +@@ -56,7 +59,9 @@ class TestCloudStackPasswordFetching(CiTestCase): + ds.get_data() + self.assertEqual({}, ds.get_config_obj()) + +- def test_password_sets_password(self): ++ @mock.patch(DS_PATH + '.wait_for_metadata_service') ++ def test_password_sets_password(self, m_wait): ++ m_wait.return_value = True + password = 'SekritSquirrel' + self._set_password_server_response(password) + ds = DataSourceCloudStack( +@@ -64,7 +69,9 @@ class TestCloudStackPasswordFetching(CiTestCase): + ds.get_data() + self.assertEqual(password, ds.get_config_obj()['password']) + +- def test_bad_request_doesnt_stop_ds_from_working(self): ++ @mock.patch(DS_PATH + '.wait_for_metadata_service') ++ def test_bad_request_doesnt_stop_ds_from_working(self, m_wait): ++ m_wait.return_value = True + self._set_password_server_response('bad_request') + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) +@@ -79,7 +86,9 @@ class TestCloudStackPasswordFetching(CiTestCase): + request_types.append(arg.split()[1]) + self.assertEqual(expected_request_types, request_types) + +- def test_valid_response_means_password_marked_as_saved(self): ++ @mock.patch(DS_PATH + '.wait_for_metadata_service') ++ def test_valid_response_means_password_marked_as_saved(self, m_wait): ++ m_wait.return_value = True + password = 'SekritSquirrel' + subp = self._set_password_server_response(password) + ds = DataSourceCloudStack( +@@ -92,7 +101,9 @@ class TestCloudStackPasswordFetching(CiTestCase): + subp = self._set_password_server_response(response_string) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) +- ds.get_data() ++ with mock.patch(DS_PATH + '.wait_for_metadata_service') as m_wait: ++ m_wait.return_value = True ++ ds.get_data() + self.assertRequestTypesSent(subp, ['send_my_password']) + + def test_password_not_saved_if_empty(self): +diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py +index 1a5956d..5c5c787 100644 +--- a/tests/unittests/test_datasource/test_ec2.py ++++ b/tests/unittests/test_datasource/test_ec2.py +@@ -191,7 +191,9 @@ def register_mock_metaserver(base_url, data): + register(base_url, 'not found', status=404) + + def myreg(*argc, **kwargs): +- return httpretty.register_uri(httpretty.GET, *argc, **kwargs) ++ url = argc[0] ++ method = httpretty.PUT if ec2.API_TOKEN_ROUTE in url else httpretty.GET ++ return httpretty.register_uri(method, *argc, **kwargs) + + register_helper(myreg, base_url, data) + +@@ -237,6 +239,8 @@ class TestEc2(test_helpers.HttprettyTestCase): + if md: + all_versions = ( + [ds.min_metadata_version] + ds.extended_metadata_versions) ++ token_url = self.data_url('latest', data_item='api/token') ++ register_mock_metaserver(token_url, 'API-TOKEN') + for version in all_versions: + metadata_url = self.data_url(version) + '/' + if version == md_version: +-- +1.8.3.1 + diff --git a/SOURCES/ci-exoscale-Increase-url_max_wait-to-120s.patch b/SOURCES/ci-exoscale-Increase-url_max_wait-to-120s.patch new file mode 100644 index 0000000..f22ad0e --- /dev/null +++ b/SOURCES/ci-exoscale-Increase-url_max_wait-to-120s.patch @@ -0,0 +1,51 @@ +From 4e539790e57452b24aa6851452201c0f2a87c464 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Tue, 5 May 2020 08:08:21 +0200 +Subject: [PATCH 4/5] exoscale: Increase url_max_wait to 120s. + +RH-Author: Eduardo Otubo +Message-id: <20200504085238.25884-5-otubo@redhat.com> +Patchwork-id: 96247 +O-Subject: [RHEL-7.8.z cloud-init PATCH 4/5] exoscale: Increase url_max_wait to 120s. +Bugzilla: 1827207 +RH-Acked-by: Cathy Avery +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Vitaly Kuznetsov + +commit 3e2482e8aa6630ca9bc115dc1f82d44d3fde1681 +Author: Chris Glass +Date: Thu Oct 24 17:32:58 2019 +0000 + + exoscale: Increase url_max_wait to 120s. + + The exoscale datasource defines a shorter timeout than the default (10) + but did not override url_max_wait, resulting in a single attempt being + made to wait for the metadata service. + + In some rare cases, a race condition means the route to the metadata + service is not set within 10 seconds, and more attempts should be made. + + This sets the url_max_wait for the datasource to 120. + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/sources/DataSourceExoscale.py | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py +index fdfb4ed..4616daa 100644 +--- a/cloudinit/sources/DataSourceExoscale.py ++++ b/cloudinit/sources/DataSourceExoscale.py +@@ -26,6 +26,8 @@ class DataSourceExoscale(sources.DataSource): + + dsname = 'Exoscale' + ++ url_max_wait = 120 ++ + def __init__(self, sys_cfg, distro, paths): + super(DataSourceExoscale, self).__init__(sys_cfg, distro, paths) + LOG.debug("Initializing the Exoscale datasource") +-- +1.8.3.1 + diff --git a/SOURCES/ci-exoscale-fix-sysconfig-cloud_config_modules-override.patch b/SOURCES/ci-exoscale-fix-sysconfig-cloud_config_modules-override.patch new file mode 100644 index 0000000..f7d9adc --- /dev/null +++ b/SOURCES/ci-exoscale-fix-sysconfig-cloud_config_modules-override.patch @@ -0,0 +1,152 @@ +From bbe1338c356cb5bbc1196b7f4ba620f95d2b5fd1 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Tue, 5 May 2020 08:08:18 +0200 +Subject: [PATCH 3/5] exoscale: fix sysconfig cloud_config_modules overrides + +RH-Author: Eduardo Otubo +Message-id: <20200504085238.25884-4-otubo@redhat.com> +Patchwork-id: 96246 +O-Subject: [RHEL-7.8.z cloud-init PATCH 3/5] exoscale: fix sysconfig cloud_config_modules overrides +Bugzilla: 1827207 +RH-Acked-by: Cathy Avery +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Vitaly Kuznetsov + +commit d1b022217a652c7a84d5430c9e571987864d3982 +Author: Chad Smith +Date: Wed Aug 28 00:58:16 2019 +0000 + + exoscale: fix sysconfig cloud_config_modules overrides + + Make sure Exoscale supplements or overrides existing system config + setting cloud_config_modules instead of replacing it with a one item + list set-passords + + LP: #1841454 + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/sources/DataSourceExoscale.py | 26 ++++++++++++++++-------- + tests/unittests/test_datasource/test_exoscale.py | 24 ++++++++++++++-------- + 2 files changed, 33 insertions(+), 17 deletions(-) + +diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py +index 52e7f6f..fdfb4ed 100644 +--- a/cloudinit/sources/DataSourceExoscale.py ++++ b/cloudinit/sources/DataSourceExoscale.py +@@ -6,6 +6,7 @@ + from cloudinit import ec2_utils as ec2 + from cloudinit import log as logging + from cloudinit import sources ++from cloudinit import helpers + from cloudinit import url_helper + from cloudinit import util + +@@ -20,13 +21,6 @@ URL_RETRIES = 6 + + EXOSCALE_DMI_NAME = "Exoscale" + +-BUILTIN_DS_CONFIG = { +- # We run the set password config module on every boot in order to enable +- # resetting the instance's password via the exoscale console (and a +- # subsequent instance reboot). +- 'cloud_config_modules': [["set-passwords", "always"]] +-} +- + + class DataSourceExoscale(sources.DataSource): + +@@ -42,8 +36,22 @@ class DataSourceExoscale(sources.DataSource): + self.ds_cfg.get('password_server_port', PASSWORD_SERVER_PORT)) + self.url_timeout = self.ds_cfg.get('timeout', URL_TIMEOUT) + self.url_retries = self.ds_cfg.get('retries', URL_RETRIES) +- +- self.extra_config = BUILTIN_DS_CONFIG ++ self.extra_config = {} ++ ++ def activate(self, cfg, is_new_instance): ++ """Adjust set-passwords module to run 'always' during each boot""" ++ # We run the set password config module on every boot in order to ++ # enable resetting the instance's password via the exoscale console ++ # (and a subsequent instance reboot). ++ # Exoscale password server only provides set-passwords user-data if ++ # a user has triggered a password reset. So calling that password ++ # service generally results in no additional cloud-config. ++ # TODO(Create util functions for overriding merged sys_cfg module freq) ++ mod = 'set_passwords' ++ sem_path = self.paths.get_ipath_cur('sem') ++ sem_helper = helpers.FileSemaphores(sem_path) ++ if sem_helper.clear('config_' + mod, None): ++ LOG.debug('Overriding module set-passwords with frequency always') + + def wait_for_metadata_service(self): + """Wait for the metadata service to be reachable.""" +diff --git a/tests/unittests/test_datasource/test_exoscale.py b/tests/unittests/test_datasource/test_exoscale.py +index 350c330..f006119 100644 +--- a/tests/unittests/test_datasource/test_exoscale.py ++++ b/tests/unittests/test_datasource/test_exoscale.py +@@ -11,8 +11,10 @@ from cloudinit.sources.DataSourceExoscale import ( + PASSWORD_SERVER_PORT, + read_metadata) + from cloudinit.tests.helpers import HttprettyTestCase, mock ++from cloudinit import util + + import httpretty ++import os + import requests + + +@@ -63,6 +65,18 @@ class TestDatasourceExoscale(HttprettyTestCase): + password = get_password() + self.assertEqual(expected_password, password) + ++ def test_activate_removes_set_passwords_semaphore(self): ++ """Allow set_passwords to run every boot by removing the semaphore.""" ++ path = helpers.Paths({'cloud_dir': self.tmp}) ++ sem_dir = self.tmp_path('instance/sem', dir=self.tmp) ++ util.ensure_dir(sem_dir) ++ sem_file = os.path.join(sem_dir, 'config_set_passwords') ++ with open(sem_file, 'w') as stream: ++ stream.write('') ++ ds = DataSourceExoscale({}, None, path) ++ ds.activate(None, None) ++ self.assertFalse(os.path.exists(sem_file)) ++ + def test_get_data(self): + """The datasource conforms to expected behavior when supplied + full test data.""" +@@ -95,8 +109,6 @@ class TestDatasourceExoscale(HttprettyTestCase): + self.assertEqual(ds.get_config_obj(), + {'ssh_pwauth': True, + 'password': expected_password, +- 'cloud_config_modules': [ +- ["set-passwords", "always"]], + 'chpasswd': { + 'expire': False, + }}) +@@ -130,9 +142,7 @@ class TestDatasourceExoscale(HttprettyTestCase): + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) +- self.assertEqual(ds.get_config_obj(), +- {'cloud_config_modules': [ +- ["set-passwords", "always"]]}) ++ self.assertEqual(ds.get_config_obj(), {}) + + def test_get_data_no_password(self): + """The datasource conforms to expected behavior when no password is +@@ -163,9 +173,7 @@ class TestDatasourceExoscale(HttprettyTestCase): + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) +- self.assertEqual(ds.get_config_obj(), +- {'cloud_config_modules': [ +- ["set-passwords", "always"]]}) ++ self.assertEqual(ds.get_config_obj(), {}) + + @mock.patch('cloudinit.sources.DataSourceExoscale.get_password') + def test_read_metadata_when_password_server_unreachable(self, m_password): +-- +1.8.3.1 + diff --git a/SOURCES/ci-swap-file-size-being-used-before-checked-if-str-315.patch b/SOURCES/ci-swap-file-size-being-used-before-checked-if-str-315.patch new file mode 100644 index 0000000..b55ac63 --- /dev/null +++ b/SOURCES/ci-swap-file-size-being-used-before-checked-if-str-315.patch @@ -0,0 +1,56 @@ +From 6f8623c570247903e3cba925676677e44a99c69c Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Tue, 28 Apr 2020 08:22:05 +0200 +Subject: [PATCH 2/3] swap file "size" being used before checked if str (#315) + +RH-Author: Eduardo Otubo +Message-id: <20200422130428.7663-3-otubo@redhat.com> +Patchwork-id: 96033 +O-Subject: [RHEL-7.7.z/RHEL-7.8.z cloud-init PATCH 2/3] swap file "size" being used before checked if str (#315) +Bugzilla: 1801094 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Cathy Avery + +commit 46cf23c28812d3e3ba0c570defd9a05628af5556 +Author: Eduardo Otubo +Date: Tue Apr 14 18:16:25 2020 +0200 + + swap file "size" being used before checked if str (#315) + + Swap file size variable was being used before checked if it's set to str + "auto". If set to "auto", it will break with: + + failed to setup swap: unsupported operand type(s) for /: 'str' and 'int' + + Signed-off-by: Eduardo Otubo + RHBZ: 1772505 + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/config/cc_mounts.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py +index 6884ddf..811781f 100644 +--- a/cloudinit/config/cc_mounts.py ++++ b/cloudinit/config/cc_mounts.py +@@ -274,7 +274,6 @@ def setup_swapfile(fname, size=None, maxsize=None): + maxsize: the maximum size + """ + swap_dir = os.path.dirname(fname) +- mibsize = str(int(size / (2 ** 20))) + if str(size).lower() == "auto": + try: + memsize = util.read_meminfo()['total'] +@@ -286,6 +285,7 @@ def setup_swapfile(fname, size=None, maxsize=None): + size = suggested_swapsize(fsys=swap_dir, maxsize=maxsize, + memsize=memsize) + ++ mibsize = str(int(size / (2 ** 20))) + if not size: + LOG.debug("Not creating swap: suggested size was 0") + return +-- +1.8.3.1 + diff --git a/SOURCES/ci-url_helper-read_file_or_url-should-pass-headers-para.patch b/SOURCES/ci-url_helper-read_file_or_url-should-pass-headers-para.patch new file mode 100644 index 0000000..4f08062 --- /dev/null +++ b/SOURCES/ci-url_helper-read_file_or_url-should-pass-headers-para.patch @@ -0,0 +1,326 @@ +From f9fcf18105845fbb933925ae7b0a2f1033f75127 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Wed, 20 May 2020 10:11:14 +0200 +Subject: [PATCH] url_helper: read_file_or_url should pass headers param into + readurl (#66) + +RH-Author: Eduardo Otubo +Message-id: <20200519105653.20249-1-otubo@redhat.com> +Patchwork-id: 96613 +O-Subject: [RHEL-7.8.z cloud-init PATCH] url_helper: read_file_or_url should pass headers param into readurl (#66) +Bugzilla: 1832177 +RH-Acked-by: Cathy Avery +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Vitaly Kuznetsov + +commit f69d33a723b805fec3ee70c3a6127c8cadcb02d8 +Author: Chad Smith +Date: Mon Dec 2 16:24:18 2019 -0700 + + url_helper: read_file_or_url should pass headers param into readurl (#66) + + Headers param was accidentally omitted and no longer passed through to + readurl due to a previous commit. + + To avoid this omission of params in the future, drop positional param + definitions from read_file_or_url and pass all kwargs through to readurl + when we are not operating on a file. + + In util:read_seeded, correct the case where invalid positional param + file_retries was being passed into read_file_or_url. + + Also drop duplicated file:// prefix addition from read_seeded because + read_file_or_url does that work anyway. + + LP: #1854084 + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/sources/helpers/azure.py | 6 ++- + cloudinit/tests/test_url_helper.py | 52 ++++++++++++++++++++++ + cloudinit/url_helper.py | 47 +++++++++++++++---- + cloudinit/user_data.py | 2 +- + cloudinit/util.py | 15 ++----- + .../unittests/test_datasource/test_azure_helper.py | 18 +++++--- + 6 files changed, 112 insertions(+), 28 deletions(-) + +diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py +index c2a57cc..b99c484 100755 +--- a/cloudinit/sources/helpers/azure.py ++++ b/cloudinit/sources/helpers/azure.py +@@ -103,14 +103,16 @@ class AzureEndpointHttpClient(object): + if secure: + headers = self.headers.copy() + headers.update(self.extra_secure_headers) +- return url_helper.read_file_or_url(url, headers=headers) ++ return url_helper.read_file_or_url(url, headers=headers, timeout=5, ++ retries=10) + + def post(self, url, data=None, extra_headers=None): + headers = self.headers + if extra_headers is not None: + headers = self.headers.copy() + headers.update(extra_headers) +- return url_helper.read_file_or_url(url, data=data, headers=headers) ++ return url_helper.read_file_or_url(url, data=data, headers=headers, ++ timeout=5, retries=10) + + + class GoalState(object): +diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py +index aa9f3ec..e883ddc 100644 +--- a/cloudinit/tests/test_url_helper.py ++++ b/cloudinit/tests/test_url_helper.py +@@ -4,6 +4,7 @@ from cloudinit.url_helper import ( + NOT_FOUND, UrlError, oauth_headers, read_file_or_url, retry_on_url_exc) + from cloudinit.tests.helpers import CiTestCase, mock, skipIf + from cloudinit import util ++from cloudinit import version + + import httpretty + import requests +@@ -17,6 +18,9 @@ except ImportError: + _missing_oauthlib_dep = True + + ++M_PATH = 'cloudinit.url_helper.' ++ ++ + class TestOAuthHeaders(CiTestCase): + + def test_oauth_headers_raises_not_implemented_when_oathlib_missing(self): +@@ -67,6 +71,54 @@ class TestReadFileOrUrl(CiTestCase): + self.assertEqual(result.contents, data) + self.assertEqual(str(result), data.decode('utf-8')) + ++ @mock.patch(M_PATH + 'readurl') ++ def test_read_file_or_url_passes_params_to_readurl(self, m_readurl): ++ """read_file_or_url passes all params through to readurl.""" ++ url = 'http://hostname/path' ++ response = 'This is my url content\n' ++ m_readurl.return_value = response ++ params = {'url': url, 'timeout': 1, 'retries': 2, ++ 'headers': {'somehdr': 'val'}, ++ 'data': 'data', 'sec_between': 1, ++ 'ssl_details': {'cert_file': '/path/cert.pem'}, ++ 'headers_cb': 'headers_cb', 'exception_cb': 'exception_cb'} ++ self.assertEqual(response, read_file_or_url(**params)) ++ params.pop('url') # url is passed in as a positional arg ++ self.assertEqual([mock.call(url, **params)], m_readurl.call_args_list) ++ ++ def test_wb_read_url_defaults_honored_by_read_file_or_url_callers(self): ++ """Readurl param defaults used when unspecified by read_file_or_url ++ ++ Param defaults tested are as follows: ++ retries: 0, additional headers None beyond default, method: GET, ++ data: None, check_status: True and allow_redirects: True ++ """ ++ url = 'http://hostname/path' ++ ++ m_response = mock.MagicMock() ++ ++ class FakeSession(requests.Session): ++ def request(cls, **kwargs): ++ self.assertEqual( ++ {'url': url, 'allow_redirects': True, 'method': 'GET', ++ 'headers': { ++ 'User-Agent': 'Cloud-Init/%s' % ( ++ version.version_string())}}, ++ kwargs) ++ return m_response ++ ++ with mock.patch(M_PATH + 'requests.Session') as m_session: ++ error = requests.exceptions.HTTPError('broke') ++ m_session.side_effect = [error, FakeSession()] ++ # assert no retries and check_status == True ++ with self.assertRaises(UrlError) as context_manager: ++ response = read_file_or_url(url) ++ self.assertEqual('broke', str(context_manager.exception)) ++ # assert default headers, method, url and allow_redirects True ++ # Success on 2nd call with FakeSession ++ response = read_file_or_url(url) ++ self.assertEqual(m_response, response._response) ++ + + class TestRetryOnUrlExc(CiTestCase): + +diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py +index a951b8b..beb6873 100644 +--- a/cloudinit/url_helper.py ++++ b/cloudinit/url_helper.py +@@ -81,14 +81,19 @@ def combine_url(base, *add_ons): + return url + + +-def read_file_or_url(url, timeout=5, retries=10, +- headers=None, data=None, sec_between=1, ssl_details=None, +- headers_cb=None, exception_cb=None): ++def read_file_or_url(url, **kwargs): ++ """Wrapper function around readurl to allow passing a file path as url. ++ ++ When url is not a local file path, passthrough any kwargs to readurl. ++ ++ In the case of parameter passthrough to readurl, default values for some ++ parameters. See: call-signature of readurl in this module for param docs. ++ """ + url = url.lstrip() + if url.startswith("/"): + url = "file://%s" % url + if url.lower().startswith("file://"): +- if data: ++ if kwargs.get("data"): + LOG.warning("Unable to post data to file resource %s", url) + file_path = url[len("file://"):] + try: +@@ -101,10 +106,7 @@ def read_file_or_url(url, timeout=5, retries=10, + raise UrlError(cause=e, code=code, headers=None, url=url) + return FileResponse(file_path, contents=contents) + else: +- return readurl(url, timeout=timeout, retries=retries, +- headers_cb=headers_cb, data=data, +- sec_between=sec_between, ssl_details=ssl_details, +- exception_cb=exception_cb) ++ return readurl(url, **kwargs) + + + # Made to have same accessors as UrlResponse so that the +@@ -201,6 +203,35 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, + check_status=True, allow_redirects=True, exception_cb=None, + session=None, infinite=False, log_req_resp=True, + request_method=None): ++ """Wrapper around requests.Session to read the url and retry if necessary ++ ++ :param url: Mandatory url to request. ++ :param data: Optional form data to post the URL. Will set request_method ++ to 'POST' if present. ++ :param timeout: Timeout in seconds to wait for a response ++ :param retries: Number of times to retry on exception if exception_cb is ++ None or exception_cb returns True for the exception caught. Default is ++ to fail with 0 retries on exception. ++ :param sec_between: Default 1: amount of seconds passed to time.sleep ++ between retries. None or -1 means don't sleep. ++ :param headers: Optional dict of headers to send during request ++ :param headers_cb: Optional callable returning a dict of values to send as ++ headers during request ++ :param ssl_details: Optional dict providing key_file, ca_certs, and ++ cert_file keys for use on in ssl connections. ++ :param check_status: Optional boolean set True to raise when HTTPError ++ occurs. Default: True. ++ :param allow_redirects: Optional boolean passed straight to Session.request ++ as 'allow_redirects'. Default: True. ++ :param exception_cb: Optional callable which accepts the params ++ msg and exception and returns a boolean True if retries are permitted. ++ :param session: Optional exiting requests.Session instance to reuse. ++ :param infinite: Bool, set True to retry indefinitely. Default: False. ++ :param log_req_resp: Set False to turn off verbose debug messages. ++ :param request_method: String passed as 'method' to Session.request. ++ Typically GET, or POST. Default: POST if data is provided, GET ++ otherwise. ++ """ + url = _cleanurl(url) + req_args = { + 'url': url, +diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py +index ed83d2d..15af1da 100644 +--- a/cloudinit/user_data.py ++++ b/cloudinit/user_data.py +@@ -224,7 +224,7 @@ class UserDataProcessor(object): + content = util.load_file(include_once_fn) + else: + try: +- resp = read_file_or_url(include_url, ++ resp = read_file_or_url(include_url, timeout=5, retries=10, + ssl_details=self.ssl_details) + if include_once_on and resp.ok(): + util.write_file(include_once_fn, resp.contents, +diff --git a/cloudinit/util.py b/cloudinit/util.py +index 2c9ac66..db9a229 100644 +--- a/cloudinit/util.py ++++ b/cloudinit/util.py +@@ -966,13 +966,6 @@ def load_yaml(blob, default=None, allowed=(dict,)): + + + def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): +- if base.startswith("/"): +- base = "file://%s" % base +- +- # default retries for file is 0. for network is 10 +- if base.startswith("file://"): +- retries = file_retries +- + if base.find("%s") >= 0: + ud_url = base % ("user-data" + ext) + md_url = base % ("meta-data" + ext) +@@ -980,14 +973,14 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): + ud_url = "%s%s%s" % (base, "user-data", ext) + md_url = "%s%s%s" % (base, "meta-data", ext) + +- md_resp = url_helper.read_file_or_url(md_url, timeout, retries, +- file_retries) ++ md_resp = url_helper.read_file_or_url(md_url, timeout=timeout, ++ retries=retries) + md = None + if md_resp.ok(): + md = load_yaml(decode_binary(md_resp.contents), default={}) + +- ud_resp = url_helper.read_file_or_url(ud_url, timeout, retries, +- file_retries) ++ ud_resp = url_helper.read_file_or_url(ud_url, timeout=timeout, ++ retries=retries) + ud = None + if ud_resp.ok(): + ud = ud_resp.contents +diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py +index 7ad5cc1..007df09 100644 +--- a/tests/unittests/test_datasource/test_azure_helper.py ++++ b/tests/unittests/test_datasource/test_azure_helper.py +@@ -231,8 +231,10 @@ class TestAzureEndpointHttpClient(CiTestCase): + response = client.get(url, secure=False) + self.assertEqual(1, self.read_file_or_url.call_count) + self.assertEqual(self.read_file_or_url.return_value, response) +- self.assertEqual(mock.call(url, headers=self.regular_headers), +- self.read_file_or_url.call_args) ++ self.assertEqual( ++ mock.call(url, headers=self.regular_headers, retries=10, ++ timeout=5), ++ self.read_file_or_url.call_args) + + def test_secure_get(self): + url = 'MyTestUrl' +@@ -246,8 +248,10 @@ class TestAzureEndpointHttpClient(CiTestCase): + response = client.get(url, secure=True) + self.assertEqual(1, self.read_file_or_url.call_count) + self.assertEqual(self.read_file_or_url.return_value, response) +- self.assertEqual(mock.call(url, headers=expected_headers), +- self.read_file_or_url.call_args) ++ self.assertEqual( ++ mock.call(url, headers=expected_headers, retries=10, ++ timeout=5), ++ self.read_file_or_url.call_args) + + def test_post(self): + data = mock.MagicMock() +@@ -257,7 +261,8 @@ class TestAzureEndpointHttpClient(CiTestCase): + self.assertEqual(1, self.read_file_or_url.call_count) + self.assertEqual(self.read_file_or_url.return_value, response) + self.assertEqual( +- mock.call(url, data=data, headers=self.regular_headers), ++ mock.call(url, data=data, headers=self.regular_headers, retries=10, ++ timeout=5), + self.read_file_or_url.call_args) + + def test_post_with_extra_headers(self): +@@ -269,7 +274,8 @@ class TestAzureEndpointHttpClient(CiTestCase): + expected_headers = self.regular_headers.copy() + expected_headers.update(extra_headers) + self.assertEqual( +- mock.call(mock.ANY, data=mock.ANY, headers=expected_headers), ++ mock.call(mock.ANY, data=mock.ANY, headers=expected_headers, ++ retries=10, timeout=5), + self.read_file_or_url.call_args) + + +-- +1.8.3.1 + diff --git a/SOURCES/cloud-init-centos-user.patch b/SOURCES/cloud-init-centos-user.patch deleted file mode 100644 index 3ebaa88..0000000 --- a/SOURCES/cloud-init-centos-user.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff -uNrp cloud-init-18.2.orig/rhel/cloud.cfg cloud-init-18.2/rhel/cloud.cfg ---- cloud-init-18.2.orig/rhel/cloud.cfg 2018-11-04 15:38:13.763701007 +0000 -+++ cloud-init-18.2/rhel/cloud.cfg 2018-11-04 15:41:06.934576619 +0000 -@@ -52,7 +52,7 @@ cloud_final_modules: - - system_info: - default_user: -- name: cloud-user -+ name: centos - lock_passwd: true - gecos: Cloud User - groups: [wheel, adm, systemd-journal] diff --git a/SPECS/cloud-init.spec b/SPECS/cloud-init.spec index 422bcf2..4aa2ff4 100644 --- a/SPECS/cloud-init.spec +++ b/SPECS/cloud-init.spec @@ -7,7 +7,7 @@ Name: cloud-init Version: 18.5 -Release: 6%{?dist} +Release: 6%{?dist}.5 Summary: Cloud instance init scripts Group: System Environment/Base @@ -46,8 +46,28 @@ Patch19: ci-Azure-Return-static-fallback-address-as-if-failed-to.patch Patch20: ci-Fix-for-network-configuration-not-persisting-after-r.patch # For bz#1744526 - [cloud-init][OpenStack] cloud-init can't persist instance-data.json Patch21: ci-util-json.dumps-on-python-2.7-will-handle-UnicodeDec.patch - -Patch9999: cloud-init-centos-user.patch +# For bz#1810064 - cloud-init Azure byte swap (hyperV Gen2 Only) [rhel-7.8.z] +Patch22: ci-azure-avoid.patch +# For bz#1802173 - [cloud-init][rhel-7.8.z]cloud-init cloud-final.service fail with KeyError: 'modules-init' after upgrade to version 18.2-1.el7_6.1 in RHV +Patch23: ci-cmd-main.py-Fix-missing-modules-init-key-in-modes-di.patch +# For bz#1801094 - [RHEL7] swapon fails with "swapfile has holes" when created on a xfs filesystem by cloud-init [rhel-7.8.z] +Patch24: ci-Do-not-use-fallocate-in-swap-file-creation-on-xfs.-7.patch +# For bz#1801094 - [RHEL7] swapon fails with "swapfile has holes" when created on a xfs filesystem by cloud-init [rhel-7.8.z] +Patch25: ci-swap-file-size-being-used-before-checked-if-str-315.patch +# For bz#1801094 - [RHEL7] swapon fails with "swapfile has holes" when created on a xfs filesystem by cloud-init [rhel-7.8.z] +Patch26: ci-cc_mounts-fix-incorrect-format-specifiers-316.patch +# For bz#1827207 - Support for AWS IMDS v2 (available in cloud-init 19.4) [rhel-7.8.z] +Patch27: ci-New-data-source-for-the-Exoscale.com-cloud-platform.patch +# For bz#1827207 - Support for AWS IMDS v2 (available in cloud-init 19.4) [rhel-7.8.z] +Patch28: ci-Add-support-for-publishing-host-keys-to-GCE-guest-at.patch +# For bz#1827207 - Support for AWS IMDS v2 (available in cloud-init 19.4) [rhel-7.8.z] +Patch29: ci-exoscale-fix-sysconfig-cloud_config_modules-override.patch +# For bz#1827207 - Support for AWS IMDS v2 (available in cloud-init 19.4) [rhel-7.8.z] +Patch30: ci-exoscale-Increase-url_max_wait-to-120s.patch +# For bz#1827207 - Support for AWS IMDS v2 (available in cloud-init 19.4) [rhel-7.8.z] +Patch31: ci-ec2-Add-support-for-AWS-IMDS-v2-session-oriented-55.patch +# For bz#1832177 - [Azure] cloud-init provisioning failed in Azure [rhel-7.8.z] +Patch32: ci-url_helper-read_file_or_url-should-pass-headers-para.patch # Deal with noarch -> arch # https://bugzilla.redhat.com/show_bug.cgi?id=1067089 @@ -217,6 +237,37 @@ fi %config(noreplace) %{_sysconfdir}/rsyslog.d/21-cloudinit.conf %changelog +* Wed May 20 2020 Miroslav Rezanina - 18.5-6.el7_8.5 +- ci-url_helper-read_file_or_url-should-pass-headers-para.patch [bz#1832177] +- Resolves: bz#1832177 + ([Azure] cloud-init provisioning failed in Azure [rhel-7.8.z]) + +* Tue May 05 2020 Miroslav Rezanina - 18.5-6.el7_8.4 +- ci-New-data-source-for-the-Exoscale.com-cloud-platform.patch [bz#1827207] +- ci-Add-support-for-publishing-host-keys-to-GCE-guest-at.patch [bz#1827207] +- ci-exoscale-fix-sysconfig-cloud_config_modules-override.patch [bz#1827207] +- ci-exoscale-Increase-url_max_wait-to-120s.patch [bz#1827207] +- ci-ec2-Add-support-for-AWS-IMDS-v2-session-oriented-55.patch [bz#1827207] +- Resolves: bz#1827207 + (Support for AWS IMDS v2 (available in cloud-init 19.4) [rhel-7.8.z]) + +* Tue Apr 28 2020 Miroslav Rezanina - 18.5-6.el7_8.3 +- ci-Do-not-use-fallocate-in-swap-file-creation-on-xfs.-7.patch [bz#1801094] +- ci-swap-file-size-being-used-before-checked-if-str-315.patch [bz#1801094] +- ci-cc_mounts-fix-incorrect-format-specifiers-316.patch [bz#1801094] +- Resolves: bz#1801094 + ([RHEL7] swapon fails with "swapfile has holes" when created on a xfs filesystem by cloud-init [rhel-7.8.z]) + +* Tue Apr 14 2020 Miroslav Rezanina - 18.5-6.el7_8.2 +- ci-cmd-main.py-Fix-missing-modules-init-key-in-modes-di.patch [bz#1802173] +- Resolves: bz#1802173 + ([cloud-init][rhel-7.8.z]cloud-init cloud-final.service fail with KeyError: 'modules-init' after upgrade to version 18.2-1.el7_6.1 in RHV) + +* Mon Mar 30 2020 Miroslav Rezanina - 18.5-6.el7_8.1 +- ci-azure-avoid.patch [bz#1810064] +- Resolves: bz#1810064 + (cloud-init Azure byte swap (hyperV Gen2 Only) [rhel-7.8.z]) + * Thu Oct 24 2019 Miroslav Rezanina - 18.5-6.el7 - ci-util-json.dumps-on-python-2.7-will-handle-UnicodeDec.patch [bz#1744526] - Resolves: bz#1744526