diff --git a/SOURCES/ci-Add-flexibility-to-IMDS-api-version-793.patch b/SOURCES/ci-Add-flexibility-to-IMDS-api-version-793.patch
new file mode 100644
index 0000000..9dd373f
--- /dev/null
+++ b/SOURCES/ci-Add-flexibility-to-IMDS-api-version-793.patch
@@ -0,0 +1,295 @@
+From 2a2a5cdec0de0b96d503f9357c1641043574f90a Mon Sep 17 00:00:00 2001
+From: Thomas Stringer <thstring@microsoft.com>
+Date: Wed, 3 Mar 2021 11:07:43 -0500
+Subject: [PATCH 1/7] Add flexibility to IMDS api-version (#793)
+
+RH-Author: Eduardo Otubo <otubo@redhat.com>
+RH-MergeRequest: 45: Add support for userdata on Azure from IMDS
+RH-Commit: [1/7] 9aa42581c4ff175fb6f8f4a78d94cac9c9971062
+RH-Bugzilla: 2023940
+RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
+
+Add flexibility to IMDS api-version by having both a desired IMDS
+api-version and a minimum api-version. The desired api-version will
+be used first, and if that fails it will fall back to the minimum
+api-version.
+---
+ cloudinit/sources/DataSourceAzure.py          | 113 ++++++++++++++----
+ tests/unittests/test_datasource/test_azure.py |  42 ++++++-
+ 2 files changed, 129 insertions(+), 26 deletions(-)
+
+diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
+index 553b5a7e..de1452ce 100755
+--- a/cloudinit/sources/DataSourceAzure.py
++++ b/cloudinit/sources/DataSourceAzure.py
+@@ -78,17 +78,15 @@ AGENT_SEED_DIR = '/var/lib/waagent'
+ # In the event where the IMDS primary server is not
+ # available, it takes 1s to fallback to the secondary one
+ IMDS_TIMEOUT_IN_SECONDS = 2
+-IMDS_URL = "http://169.254.169.254/metadata/"
+-IMDS_VER = "2019-06-01"
+-IMDS_VER_PARAM = "api-version={}".format(IMDS_VER)
++IMDS_URL = "http://169.254.169.254/metadata"
++IMDS_VER_MIN = "2019-06-01"
++IMDS_VER_WANT = "2020-09-01"
+ 
+ 
+ class metadata_type(Enum):
+-    compute = "{}instance?{}".format(IMDS_URL, IMDS_VER_PARAM)
+-    network = "{}instance/network?{}".format(IMDS_URL,
+-                                             IMDS_VER_PARAM)
+-    reprovisiondata = "{}reprovisiondata?{}".format(IMDS_URL,
+-                                                    IMDS_VER_PARAM)
++    compute = "{}/instance".format(IMDS_URL)
++    network = "{}/instance/network".format(IMDS_URL)
++    reprovisiondata = "{}/reprovisiondata".format(IMDS_URL)
+ 
+ 
+ PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0"
+@@ -349,6 +347,8 @@ class DataSourceAzure(sources.DataSource):
+         self.update_events['network'].add(EventType.BOOT)
+         self._ephemeral_dhcp_ctx = None
+ 
++        self.failed_desired_api_version = False
++
+     def __str__(self):
+         root = sources.DataSource.__str__(self)
+         return "%s [seed=%s]" % (root, self.seed)
+@@ -520,8 +520,10 @@ class DataSourceAzure(sources.DataSource):
+                     self._wait_for_all_nics_ready()
+                 ret = self._reprovision()
+ 
+-            imds_md = get_metadata_from_imds(
+-                self.fallback_interface, retries=10)
++            imds_md = self.get_imds_data_with_api_fallback(
++                self.fallback_interface,
++                retries=10
++            )
+             (md, userdata_raw, cfg, files) = ret
+             self.seed = cdev
+             crawled_data.update({
+@@ -652,6 +654,57 @@ class DataSourceAzure(sources.DataSource):
+             self.ds_cfg['data_dir'], crawled_data['files'], dirmode=0o700)
+         return True
+ 
++    @azure_ds_telemetry_reporter
++    def get_imds_data_with_api_fallback(
++            self,
++            fallback_nic,
++            retries,
++            md_type=metadata_type.compute):
++        """
++        Wrapper for get_metadata_from_imds so that we can have flexibility
++        in which IMDS api-version we use. If a particular instance of IMDS
++        does not have the api version that is desired, we want to make
++        this fault tolerant and fall back to a good known minimum api
++        version.
++        """
++
++        if not self.failed_desired_api_version:
++            for _ in range(retries):
++                try:
++                    LOG.info(
++                        "Attempting IMDS api-version: %s",
++                        IMDS_VER_WANT
++                    )
++                    return get_metadata_from_imds(
++                        fallback_nic=fallback_nic,
++                        retries=0,
++                        md_type=md_type,
++                        api_version=IMDS_VER_WANT
++                    )
++                except UrlError as err:
++                    LOG.info(
++                        "UrlError with IMDS api-version: %s",
++                        IMDS_VER_WANT
++                    )
++                    if err.code == 400:
++                        log_msg = "Fall back to IMDS api-version: {}".format(
++                            IMDS_VER_MIN
++                        )
++                        report_diagnostic_event(
++                            log_msg,
++                            logger_func=LOG.info
++                        )
++                        self.failed_desired_api_version = True
++                        break
++
++        LOG.info("Using IMDS api-version: %s", IMDS_VER_MIN)
++        return get_metadata_from_imds(
++            fallback_nic=fallback_nic,
++            retries=retries,
++            md_type=md_type,
++            api_version=IMDS_VER_MIN
++        )
++
+     def device_name_to_device(self, name):
+         return self.ds_cfg['disk_aliases'].get(name)
+ 
+@@ -880,10 +933,11 @@ class DataSourceAzure(sources.DataSource):
+         # primary nic is being attached first helps here. Otherwise each nic
+         # could add several seconds of delay.
+         try:
+-            imds_md = get_metadata_from_imds(
++            imds_md = self.get_imds_data_with_api_fallback(
+                 ifname,
+                 5,
+-                metadata_type.network)
++                metadata_type.network
++            )
+         except Exception as e:
+             LOG.warning(
+                 "Failed to get network metadata using nic %s. Attempt to "
+@@ -1017,7 +1071,10 @@ class DataSourceAzure(sources.DataSource):
+     def _poll_imds(self):
+         """Poll IMDS for the new provisioning data until we get a valid
+         response. Then return the returned JSON object."""
+-        url = metadata_type.reprovisiondata.value
++        url = "{}?api-version={}".format(
++            metadata_type.reprovisiondata.value,
++            IMDS_VER_MIN
++        )
+         headers = {"Metadata": "true"}
+         nl_sock = None
+         report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE))
+@@ -2059,7 +2116,8 @@ def _generate_network_config_from_fallback_config() -> dict:
+ @azure_ds_telemetry_reporter
+ def get_metadata_from_imds(fallback_nic,
+                            retries,
+-                           md_type=metadata_type.compute):
++                           md_type=metadata_type.compute,
++                           api_version=IMDS_VER_MIN):
+     """Query Azure's instance metadata service, returning a dictionary.
+ 
+     If network is not up, setup ephemeral dhcp on fallback_nic to talk to the
+@@ -2069,13 +2127,16 @@ def get_metadata_from_imds(fallback_nic,
+     @param fallback_nic: String. The name of the nic which requires active
+         network in order to query IMDS.
+     @param retries: The number of retries of the IMDS_URL.
++    @param md_type: Metadata type for IMDS request.
++    @param api_version: IMDS api-version to use in the request.
+ 
+     @return: A dict of instance metadata containing compute and network
+         info.
+     """
+     kwargs = {'logfunc': LOG.debug,
+               'msg': 'Crawl of Azure Instance Metadata Service (IMDS)',
+-              'func': _get_metadata_from_imds, 'args': (retries, md_type,)}
++              'func': _get_metadata_from_imds,
++              'args': (retries, md_type, api_version,)}
+     if net.is_up(fallback_nic):
+         return util.log_time(**kwargs)
+     else:
+@@ -2091,20 +2152,26 @@ def get_metadata_from_imds(fallback_nic,
+ 
+ 
+ @azure_ds_telemetry_reporter
+-def _get_metadata_from_imds(retries, md_type=metadata_type.compute):
+-
+-    url = md_type.value
++def _get_metadata_from_imds(
++        retries,
++        md_type=metadata_type.compute,
++        api_version=IMDS_VER_MIN):
++    url = "{}?api-version={}".format(md_type.value, api_version)
+     headers = {"Metadata": "true"}
+     try:
+         response = readurl(
+             url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers,
+             retries=retries, exception_cb=retry_on_url_exc)
+     except Exception as e:
+-        report_diagnostic_event(
+-            'Ignoring IMDS instance metadata. '
+-            'Get metadata from IMDS failed: %s' % e,
+-            logger_func=LOG.warning)
+-        return {}
++        # pylint:disable=no-member
++        if isinstance(e, UrlError) and e.code == 400:
++            raise
++        else:
++            report_diagnostic_event(
++                'Ignoring IMDS instance metadata. '
++                'Get metadata from IMDS failed: %s' % e,
++                logger_func=LOG.warning)
++            return {}
+     try:
+         from json.decoder import JSONDecodeError
+         json_decode_error = JSONDecodeError
+diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
+index f597c723..dedebeb1 100644
+--- a/tests/unittests/test_datasource/test_azure.py
++++ b/tests/unittests/test_datasource/test_azure.py
+@@ -408,7 +408,9 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
+ 
+     def setUp(self):
+         super(TestGetMetadataFromIMDS, self).setUp()
+-        self.network_md_url = dsaz.IMDS_URL + "instance?api-version=2019-06-01"
++        self.network_md_url = "{}/instance?api-version=2019-06-01".format(
++            dsaz.IMDS_URL
++        )
+ 
+     @mock.patch(MOCKPATH + 'readurl')
+     @mock.patch(MOCKPATH + 'EphemeralDHCPv4', autospec=True)
+@@ -518,7 +520,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
+         """Return empty dict when IMDS network metadata is absent."""
+         httpretty.register_uri(
+             httpretty.GET,
+-            dsaz.IMDS_URL + 'instance?api-version=2017-12-01',
++            dsaz.IMDS_URL + '/instance?api-version=2017-12-01',
+             body={}, status=404)
+ 
+         m_net_is_up.return_value = True  # skips dhcp
+@@ -1877,6 +1879,40 @@ scbus-1 on xpt0 bus 0
+         ssh_keys = dsrc.get_public_ssh_keys()
+         self.assertEqual(ssh_keys, ['key2'])
+ 
++    @mock.patch(MOCKPATH + 'get_metadata_from_imds')
++    def test_imds_api_version_wanted_nonexistent(
++            self,
++            m_get_metadata_from_imds):
++        def get_metadata_from_imds_side_eff(*args, **kwargs):
++            if kwargs['api_version'] == dsaz.IMDS_VER_WANT:
++                raise url_helper.UrlError("No IMDS version", code=400)
++            return NETWORK_METADATA
++        m_get_metadata_from_imds.side_effect = get_metadata_from_imds_side_eff
++        sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
++        odata = {'HostName': "myhost", 'UserName': "myuser"}
++        data = {
++            'ovfcontent': construct_valid_ovf_env(data=odata),
++            'sys_cfg': sys_cfg
++        }
++        dsrc = self._get_ds(data)
++        dsrc.get_data()
++        self.assertIsNotNone(dsrc.metadata)
++        self.assertTrue(dsrc.failed_desired_api_version)
++
++    @mock.patch(
++        MOCKPATH + 'get_metadata_from_imds', return_value=NETWORK_METADATA)
++    def test_imds_api_version_wanted_exists(self, m_get_metadata_from_imds):
++        sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
++        odata = {'HostName': "myhost", 'UserName': "myuser"}
++        data = {
++            'ovfcontent': construct_valid_ovf_env(data=odata),
++            'sys_cfg': sys_cfg
++        }
++        dsrc = self._get_ds(data)
++        dsrc.get_data()
++        self.assertIsNotNone(dsrc.metadata)
++        self.assertFalse(dsrc.failed_desired_api_version)
++
+ 
+ class TestAzureBounce(CiTestCase):
+ 
+@@ -2657,7 +2693,7 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
+     @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up')
+     @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event')
+     @mock.patch('cloudinit.sources.net.find_fallback_nic')
+-    @mock.patch(MOCKPATH + 'get_metadata_from_imds')
++    @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback')
+     @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
+     @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
+     @mock.patch('os.path.isfile')
+-- 
+2.27.0
+
diff --git a/SOURCES/ci-Azure-Retrieve-username-and-hostname-from-IMDS-865.patch b/SOURCES/ci-Azure-Retrieve-username-and-hostname-from-IMDS-865.patch
new file mode 100644
index 0000000..de27366
--- /dev/null
+++ b/SOURCES/ci-Azure-Retrieve-username-and-hostname-from-IMDS-865.patch
@@ -0,0 +1,397 @@
+From 3ec4ddbc595c5fe781b3dc501631d23569849818 Mon Sep 17 00:00:00 2001
+From: Thomas Stringer <thstring@microsoft.com>
+Date: Mon, 26 Apr 2021 09:41:38 -0400
+Subject: [PATCH 5/7] Azure: Retrieve username and hostname from IMDS (#865)
+
+RH-Author: Eduardo Otubo <otubo@redhat.com>
+RH-MergeRequest: 45: Add support for userdata on Azure from IMDS
+RH-Commit: [5/7] 6fab7ef28c7fd340bda4f82dbf828f10716cb3f1
+RH-Bugzilla: 2023940
+RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
+
+This change allows us to retrieve the username and hostname from
+IMDS instead of having to rely on the mounted OVF.
+---
+ cloudinit/sources/DataSourceAzure.py          | 149 ++++++++++++++----
+ tests/unittests/test_datasource/test_azure.py |  87 +++++++++-
+ 2 files changed, 205 insertions(+), 31 deletions(-)
+
+diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
+index 39e67c4f..6d7954ee 100755
+--- a/cloudinit/sources/DataSourceAzure.py
++++ b/cloudinit/sources/DataSourceAzure.py
+@@ -5,6 +5,7 @@
+ # This file is part of cloud-init. See LICENSE file for license information.
+ 
+ import base64
++from collections import namedtuple
+ import contextlib
+ import crypt
+ from functools import partial
+@@ -25,6 +26,7 @@ from cloudinit.net import device_driver
+ from cloudinit.net.dhcp import EphemeralDHCPv4
+ from cloudinit import sources
+ from cloudinit.sources.helpers import netlink
++from cloudinit import ssh_util
+ from cloudinit import subp
+ from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
+ from cloudinit import util
+@@ -80,7 +82,12 @@ AGENT_SEED_DIR = '/var/lib/waagent'
+ IMDS_TIMEOUT_IN_SECONDS = 2
+ IMDS_URL = "http://169.254.169.254/metadata"
+ IMDS_VER_MIN = "2019-06-01"
+-IMDS_VER_WANT = "2020-09-01"
++IMDS_VER_WANT = "2020-10-01"
++
++
++# This holds SSH key data including if the source was
++# from IMDS, as well as the SSH key data itself.
++SSHKeys = namedtuple("SSHKeys", ("keys_from_imds", "ssh_keys"))
+ 
+ 
+ class metadata_type(Enum):
+@@ -391,6 +398,8 @@ class DataSourceAzure(sources.DataSource):
+         """Return the subplatform metadata source details."""
+         if self.seed.startswith('/dev'):
+             subplatform_type = 'config-disk'
++        elif self.seed.lower() == 'imds':
++            subplatform_type = 'imds'
+         else:
+             subplatform_type = 'seed-dir'
+         return '%s (%s)' % (subplatform_type, self.seed)
+@@ -433,9 +442,11 @@ class DataSourceAzure(sources.DataSource):
+ 
+         found = None
+         reprovision = False
++        ovf_is_accessible = True
+         reprovision_after_nic_attach = False
+         for cdev in candidates:
+             try:
++                LOG.debug("cdev: %s", cdev)
+                 if cdev == "IMDS":
+                     ret = None
+                     reprovision = True
+@@ -462,8 +473,18 @@ class DataSourceAzure(sources.DataSource):
+                 raise sources.InvalidMetaDataException(msg)
+             except util.MountFailedError:
+                 report_diagnostic_event(
+-                    '%s was not mountable' % cdev, logger_func=LOG.warning)
+-                continue
++                    '%s was not mountable' % cdev, logger_func=LOG.debug)
++                cdev = 'IMDS'
++                ovf_is_accessible = False
++                empty_md = {'local-hostname': ''}
++                empty_cfg = dict(
++                    system_info=dict(
++                        default_user=dict(
++                            name=''
++                        )
++                    )
++                )
++                ret = (empty_md, '', empty_cfg, {})
+ 
+             report_diagnostic_event("Found provisioning metadata in %s" % cdev,
+                                     logger_func=LOG.debug)
+@@ -490,6 +511,10 @@ class DataSourceAzure(sources.DataSource):
+                 self.fallback_interface,
+                 retries=10
+             )
++            if not imds_md and not ovf_is_accessible:
++                msg = 'No OVF or IMDS available'
++                report_diagnostic_event(msg)
++                raise sources.InvalidMetaDataException(msg)
+             (md, userdata_raw, cfg, files) = ret
+             self.seed = cdev
+             crawled_data.update({
+@@ -498,6 +523,21 @@ class DataSourceAzure(sources.DataSource):
+                 'metadata': util.mergemanydict(
+                     [md, {'imds': imds_md}]),
+                 'userdata_raw': userdata_raw})
++            imds_username = _username_from_imds(imds_md)
++            imds_hostname = _hostname_from_imds(imds_md)
++            imds_disable_password = _disable_password_from_imds(imds_md)
++            if imds_username:
++                LOG.debug('Username retrieved from IMDS: %s', imds_username)
++                cfg['system_info']['default_user']['name'] = imds_username
++            if imds_hostname:
++                LOG.debug('Hostname retrieved from IMDS: %s', imds_hostname)
++                crawled_data['metadata']['local-hostname'] = imds_hostname
++            if imds_disable_password:
++                LOG.debug(
++                    'Disable password retrieved from IMDS: %s',
++                    imds_disable_password
++                )
++                crawled_data['metadata']['disable_password'] = imds_disable_password  # noqa: E501
+             found = cdev
+ 
+             report_diagnostic_event(
+@@ -676,6 +716,13 @@ class DataSourceAzure(sources.DataSource):
+ 
+     @azure_ds_telemetry_reporter
+     def get_public_ssh_keys(self):
++        """
++        Retrieve public SSH keys.
++        """
++
++        return self._get_public_ssh_keys_and_source().ssh_keys
++
++    def _get_public_ssh_keys_and_source(self):
+         """
+         Try to get the ssh keys from IMDS first, and if that fails
+         (i.e. IMDS is unavailable) then fallback to getting the ssh
+@@ -685,30 +732,50 @@ class DataSourceAzure(sources.DataSource):
+         advantage, so this is a strong preference. But we must keep
+         OVF as a second option for environments that don't have IMDS.
+         """
++
+         LOG.debug('Retrieving public SSH keys')
+         ssh_keys = []
++        keys_from_imds = True
++        LOG.debug('Attempting to get SSH keys from IMDS')
+         try:
+-            raise KeyError(
+-                "Not using public SSH keys from IMDS"
+-            )
+-            # pylint:disable=unreachable
+             ssh_keys = [
+                 public_key['keyData']
+                 for public_key
+                 in self.metadata['imds']['compute']['publicKeys']
+             ]
+-            LOG.debug('Retrieved SSH keys from IMDS')
++            for key in ssh_keys:
++                if not _key_is_openssh_formatted(key=key):
++                    keys_from_imds = False
++                    break
++
++            if not keys_from_imds:
++                log_msg = 'Keys not in OpenSSH format, using OVF'
++            else:
++                log_msg = 'Retrieved {} keys from IMDS'.format(
++                    len(ssh_keys)
++                    if ssh_keys is not None
++                    else 0
++                )
+         except KeyError:
+             log_msg = 'Unable to get keys from IMDS, falling back to OVF'
++            keys_from_imds = False
++        finally:
+             report_diagnostic_event(log_msg, logger_func=LOG.debug)
++
++        if not keys_from_imds:
++            LOG.debug('Attempting to get SSH keys from OVF')
+             try:
+                 ssh_keys = self.metadata['public-keys']
+-                LOG.debug('Retrieved keys from OVF')
++                log_msg = 'Retrieved {} keys from OVF'.format(len(ssh_keys))
+             except KeyError:
+                 log_msg = 'No keys available from OVF'
++            finally:
+                 report_diagnostic_event(log_msg, logger_func=LOG.debug)
+ 
+-        return ssh_keys
++        return SSHKeys(
++            keys_from_imds=keys_from_imds,
++            ssh_keys=ssh_keys
++        )
+ 
+     def get_config_obj(self):
+         return self.cfg
+@@ -1325,30 +1392,21 @@ class DataSourceAzure(sources.DataSource):
+         self.bounce_network_with_azure_hostname()
+ 
+         pubkey_info = None
+-        try:
+-            raise KeyError(
+-                "Not using public SSH keys from IMDS"
+-            )
+-            # pylint:disable=unreachable
+-            public_keys = self.metadata['imds']['compute']['publicKeys']
+-            LOG.debug(
+-                'Successfully retrieved %s key(s) from IMDS',
+-                len(public_keys)
+-                if public_keys is not None
++        ssh_keys_and_source = self._get_public_ssh_keys_and_source()
++
++        if not ssh_keys_and_source.keys_from_imds:
++            pubkey_info = self.cfg.get('_pubkeys', None)
++            log_msg = 'Retrieved {} fingerprints from OVF'.format(
++                len(pubkey_info)
++                if pubkey_info is not None
+                 else 0
+             )
+-        except KeyError:
+-            LOG.debug(
+-                'Unable to retrieve SSH keys from IMDS during '
+-                'negotiation, falling back to OVF'
+-            )
+-            pubkey_info = self.cfg.get('_pubkeys', None)
++            report_diagnostic_event(log_msg, logger_func=LOG.debug)
+ 
+         metadata_func = partial(get_metadata_from_fabric,
+                                 fallback_lease_file=self.
+                                 dhclient_lease_file,
+-                                pubkey_info=pubkey_info,
+-                                iso_dev=self.iso_dev)
++                                pubkey_info=pubkey_info)
+ 
+         LOG.debug("negotiating with fabric via agent command %s",
+                   self.ds_cfg['agent_command'])
+@@ -1404,6 +1462,41 @@ class DataSourceAzure(sources.DataSource):
+         return self.metadata.get('imds', {}).get('compute', {}).get('location')
+ 
+ 
++def _username_from_imds(imds_data):
++    try:
++        return imds_data['compute']['osProfile']['adminUsername']
++    except KeyError:
++        return None
++
++
++def _hostname_from_imds(imds_data):
++    try:
++        return imds_data['compute']['osProfile']['computerName']
++    except KeyError:
++        return None
++
++
++def _disable_password_from_imds(imds_data):
++    try:
++        return imds_data['compute']['osProfile']['disablePasswordAuthentication'] == 'true'  # noqa: E501
++    except KeyError:
++        return None
++
++
++def _key_is_openssh_formatted(key):
++    """
++    Validate whether or not the key is OpenSSH-formatted.
++    """
++
++    parser = ssh_util.AuthKeyLineParser()
++    try:
++        akl = parser.parse(key)
++    except TypeError:
++        return False
++
++    return akl.keytype is not None
++
++
+ def _partitions_on_device(devpath, maxnum=16):
+     # return a list of tuples (ptnum, path) for each part on devpath
+     for suff in ("-part", "p", ""):
+diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
+index 320fa857..d9817d84 100644
+--- a/tests/unittests/test_datasource/test_azure.py
++++ b/tests/unittests/test_datasource/test_azure.py
+@@ -108,7 +108,7 @@ NETWORK_METADATA = {
+         "zone": "",
+         "publicKeys": [
+             {
+-                "keyData": "key1",
++                "keyData": "ssh-rsa key1",
+                 "path": "path1"
+             }
+         ]
+@@ -1761,8 +1761,29 @@ scbus-1 on xpt0 bus 0
+         dsrc.get_data()
+         dsrc.setup(True)
+         ssh_keys = dsrc.get_public_ssh_keys()
+-        # Temporarily alter this test so that SSH public keys
+-        # from IMDS are *not* going to be in use to fix a regression.
++        self.assertEqual(ssh_keys, ["ssh-rsa key1"])
++        self.assertEqual(m_parse_certificates.call_count, 0)
++
++    @mock.patch(
++        'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates')
++    @mock.patch(MOCKPATH + 'get_metadata_from_imds')
++    def test_get_public_ssh_keys_with_no_openssh_format(
++            self,
++            m_get_metadata_from_imds,
++            m_parse_certificates):
++        imds_data = copy.deepcopy(NETWORK_METADATA)
++        imds_data['compute']['publicKeys'][0]['keyData'] = 'no-openssh-format'
++        m_get_metadata_from_imds.return_value = imds_data
++        sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
++        odata = {'HostName': "myhost", 'UserName': "myuser"}
++        data = {
++            'ovfcontent': construct_valid_ovf_env(data=odata),
++            'sys_cfg': sys_cfg
++        }
++        dsrc = self._get_ds(data)
++        dsrc.get_data()
++        dsrc.setup(True)
++        ssh_keys = dsrc.get_public_ssh_keys()
+         self.assertEqual(ssh_keys, [])
+         self.assertEqual(m_parse_certificates.call_count, 0)
+ 
+@@ -1818,6 +1839,66 @@ scbus-1 on xpt0 bus 0
+         self.assertIsNotNone(dsrc.metadata)
+         self.assertFalse(dsrc.failed_desired_api_version)
+ 
++    @mock.patch(MOCKPATH + 'get_metadata_from_imds')
++    def test_hostname_from_imds(self, m_get_metadata_from_imds):
++        sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
++        odata = {'HostName': "myhost", 'UserName': "myuser"}
++        data = {
++            'ovfcontent': construct_valid_ovf_env(data=odata),
++            'sys_cfg': sys_cfg
++        }
++        imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
++        imds_data_with_os_profile["compute"]["osProfile"] = dict(
++            adminUsername="username1",
++            computerName="hostname1",
++            disablePasswordAuthentication="true"
++        )
++        m_get_metadata_from_imds.return_value = imds_data_with_os_profile
++        dsrc = self._get_ds(data)
++        dsrc.get_data()
++        self.assertEqual(dsrc.metadata["local-hostname"], "hostname1")
++
++    @mock.patch(MOCKPATH + 'get_metadata_from_imds')
++    def test_username_from_imds(self, m_get_metadata_from_imds):
++        sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
++        odata = {'HostName': "myhost", 'UserName': "myuser"}
++        data = {
++            'ovfcontent': construct_valid_ovf_env(data=odata),
++            'sys_cfg': sys_cfg
++        }
++        imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
++        imds_data_with_os_profile["compute"]["osProfile"] = dict(
++            adminUsername="username1",
++            computerName="hostname1",
++            disablePasswordAuthentication="true"
++        )
++        m_get_metadata_from_imds.return_value = imds_data_with_os_profile
++        dsrc = self._get_ds(data)
++        dsrc.get_data()
++        self.assertEqual(
++            dsrc.cfg["system_info"]["default_user"]["name"],
++            "username1"
++        )
++
++    @mock.patch(MOCKPATH + 'get_metadata_from_imds')
++    def test_disable_password_from_imds(self, m_get_metadata_from_imds):
++        sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
++        odata = {'HostName': "myhost", 'UserName': "myuser"}
++        data = {
++            'ovfcontent': construct_valid_ovf_env(data=odata),
++            'sys_cfg': sys_cfg
++        }
++        imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
++        imds_data_with_os_profile["compute"]["osProfile"] = dict(
++            adminUsername="username1",
++            computerName="hostname1",
++            disablePasswordAuthentication="true"
++        )
++        m_get_metadata_from_imds.return_value = imds_data_with_os_profile
++        dsrc = self._get_ds(data)
++        dsrc.get_data()
++        self.assertTrue(dsrc.metadata["disable_password"])
++
+ 
+ class TestAzureBounce(CiTestCase):
+ 
+-- 
+2.27.0
+
diff --git a/SOURCES/ci-Azure-Retry-net-metadata-during-nic-attach-for-non-t.patch b/SOURCES/ci-Azure-Retry-net-metadata-during-nic-attach-for-non-t.patch
new file mode 100644
index 0000000..efc9fc2
--- /dev/null
+++ b/SOURCES/ci-Azure-Retry-net-metadata-during-nic-attach-for-non-t.patch
@@ -0,0 +1,315 @@
+From ca5b83cee7b45bf56eec258db739cb5fe51b3231 Mon Sep 17 00:00:00 2001
+From: aswinrajamannar <39812128+aswinrajamannar@users.noreply.github.com>
+Date: Mon, 26 Apr 2021 07:28:39 -0700
+Subject: [PATCH 6/7] Azure: Retry net metadata during nic attach for
+ non-timeout errs (#878)
+
+RH-Author: Eduardo Otubo <otubo@redhat.com>
+RH-MergeRequest: 45: Add support for userdata on Azure from IMDS
+RH-Commit: [6/7] 4e6e44f017d5ffcb72ac8959a94f80c71fef9560
+RH-Bugzilla: 2023940
+RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
+
+When network interfaces are hot-attached to the VM, attempting to get
+network metadata might return 410 (or 500, 503 etc) because the info
+is not yet available. In those cases, we retry getting the metadata
+before giving up. The only case where we can move on to wait for more
+nic attach events is if the call times out despite retries, which
+means the interface is not likely a primary interface, and we should
+try for more nic attach events.
+---
+ cloudinit/sources/DataSourceAzure.py          | 65 +++++++++++--
+ tests/unittests/test_datasource/test_azure.py | 95 ++++++++++++++++---
+ 2 files changed, 140 insertions(+), 20 deletions(-)
+
+diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
+index 6d7954ee..d0be6d84 100755
+--- a/cloudinit/sources/DataSourceAzure.py
++++ b/cloudinit/sources/DataSourceAzure.py
+@@ -17,6 +17,7 @@ from time import sleep
+ from xml.dom import minidom
+ import xml.etree.ElementTree as ET
+ from enum import Enum
++import requests
+ 
+ from cloudinit import dmi
+ from cloudinit import log as logging
+@@ -665,7 +666,9 @@ class DataSourceAzure(sources.DataSource):
+             self,
+             fallback_nic,
+             retries,
+-            md_type=metadata_type.compute):
++            md_type=metadata_type.compute,
++            exc_cb=retry_on_url_exc,
++            infinite=False):
+         """
+         Wrapper for get_metadata_from_imds so that we can have flexibility
+         in which IMDS api-version we use. If a particular instance of IMDS
+@@ -685,7 +688,8 @@ class DataSourceAzure(sources.DataSource):
+                         fallback_nic=fallback_nic,
+                         retries=0,
+                         md_type=md_type,
+-                        api_version=IMDS_VER_WANT
++                        api_version=IMDS_VER_WANT,
++                        exc_cb=exc_cb
+                     )
+                 except UrlError as err:
+                     LOG.info(
+@@ -708,7 +712,9 @@ class DataSourceAzure(sources.DataSource):
+             fallback_nic=fallback_nic,
+             retries=retries,
+             md_type=md_type,
+-            api_version=IMDS_VER_MIN
++            api_version=IMDS_VER_MIN,
++            exc_cb=exc_cb,
++            infinite=infinite
+         )
+ 
+     def device_name_to_device(self, name):
+@@ -938,6 +944,9 @@ class DataSourceAzure(sources.DataSource):
+         is_primary = False
+         expected_nic_count = -1
+         imds_md = None
++        metadata_poll_count = 0
++        metadata_logging_threshold = 1
++        metadata_timeout_count = 0
+ 
+         # For now, only a VM's primary NIC can contact IMDS and WireServer. If
+         # DHCP fails for a NIC, we have no mechanism to determine if the NIC is
+@@ -962,14 +971,48 @@ class DataSourceAzure(sources.DataSource):
+                                     % (ifname, e), logger_func=LOG.error)
+             raise
+ 
++        # Retry polling network metadata for a limited duration only when the
++        # calls fail due to timeout. This is because the platform drops packets
++        # going towards IMDS when it is not a primary nic. If the calls fail
++        # due to other issues like 410, 503 etc, then it means we are primary
++        # but IMDS service is unavailable at the moment. Retry indefinitely in
++        # those cases since we cannot move on without the network metadata.
++        def network_metadata_exc_cb(msg, exc):
++            nonlocal metadata_timeout_count, metadata_poll_count
++            nonlocal metadata_logging_threshold
++
++            metadata_poll_count = metadata_poll_count + 1
++
++            # Log when needed but back off exponentially to avoid exploding
++            # the log file.
++            if metadata_poll_count >= metadata_logging_threshold:
++                metadata_logging_threshold *= 2
++                report_diagnostic_event(
++                    "Ran into exception when attempting to reach %s "
++                    "after %d polls." % (msg, metadata_poll_count),
++                    logger_func=LOG.error)
++
++                if isinstance(exc, UrlError):
++                    report_diagnostic_event("poll IMDS with %s failed. "
++                                            "Exception: %s and code: %s" %
++                                            (msg, exc.cause, exc.code),
++                                            logger_func=LOG.error)
++
++            if exc.cause and isinstance(exc.cause, requests.Timeout):
++                metadata_timeout_count = metadata_timeout_count + 1
++                return (metadata_timeout_count <= 10)
++            return True
++
+         # Primary nic detection will be optimized in the future. The fact that
+         # primary nic is being attached first helps here. Otherwise each nic
+         # could add several seconds of delay.
+         try:
+             imds_md = self.get_imds_data_with_api_fallback(
+                 ifname,
+-                5,
+-                metadata_type.network
++                0,
++                metadata_type.network,
++                network_metadata_exc_cb,
++                True
+             )
+         except Exception as e:
+             LOG.warning(
+@@ -2139,7 +2182,9 @@ def _generate_network_config_from_fallback_config() -> dict:
+ def get_metadata_from_imds(fallback_nic,
+                            retries,
+                            md_type=metadata_type.compute,
+-                           api_version=IMDS_VER_MIN):
++                           api_version=IMDS_VER_MIN,
++                           exc_cb=retry_on_url_exc,
++                           infinite=False):
+     """Query Azure's instance metadata service, returning a dictionary.
+ 
+     If network is not up, setup ephemeral dhcp on fallback_nic to talk to the
+@@ -2158,7 +2203,7 @@ def get_metadata_from_imds(fallback_nic,
+     kwargs = {'logfunc': LOG.debug,
+               'msg': 'Crawl of Azure Instance Metadata Service (IMDS)',
+               'func': _get_metadata_from_imds,
+-              'args': (retries, md_type, api_version,)}
++              'args': (retries, exc_cb, md_type, api_version, infinite)}
+     if net.is_up(fallback_nic):
+         return util.log_time(**kwargs)
+     else:
+@@ -2176,14 +2221,16 @@ def get_metadata_from_imds(fallback_nic,
+ @azure_ds_telemetry_reporter
+ def _get_metadata_from_imds(
+         retries,
++        exc_cb,
+         md_type=metadata_type.compute,
+-        api_version=IMDS_VER_MIN):
++        api_version=IMDS_VER_MIN,
++        infinite=False):
+     url = "{}?api-version={}".format(md_type.value, api_version)
+     headers = {"Metadata": "true"}
+     try:
+         response = readurl(
+             url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers,
+-            retries=retries, exception_cb=retry_on_url_exc)
++            retries=retries, exception_cb=exc_cb, infinite=infinite)
+     except Exception as e:
+         # pylint:disable=no-member
+         if isinstance(e, UrlError) and e.code == 400:
+diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
+index d9817d84..c4a8e08d 100644
+--- a/tests/unittests/test_datasource/test_azure.py
++++ b/tests/unittests/test_datasource/test_azure.py
+@@ -448,7 +448,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
+             "http://169.254.169.254/metadata/instance?api-version="
+             "2019-06-01", exception_cb=mock.ANY,
+             headers=mock.ANY, retries=mock.ANY,
+-            timeout=mock.ANY)
++            timeout=mock.ANY, infinite=False)
+ 
+     @mock.patch(MOCKPATH + 'readurl', autospec=True)
+     @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
+@@ -467,7 +467,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
+             "http://169.254.169.254/metadata/instance/network?api-version="
+             "2019-06-01", exception_cb=mock.ANY,
+             headers=mock.ANY, retries=mock.ANY,
+-            timeout=mock.ANY)
++            timeout=mock.ANY, infinite=False)
+ 
+     @mock.patch(MOCKPATH + 'readurl', autospec=True)
+     @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
+@@ -486,7 +486,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
+             "http://169.254.169.254/metadata/instance?api-version="
+             "2019-06-01", exception_cb=mock.ANY,
+             headers=mock.ANY, retries=mock.ANY,
+-            timeout=mock.ANY)
++            timeout=mock.ANY, infinite=False)
+ 
+     @mock.patch(MOCKPATH + 'readurl', autospec=True)
+     @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting', autospec=True)
+@@ -511,7 +511,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
+         m_readurl.assert_called_with(
+             self.network_md_url, exception_cb=mock.ANY,
+             headers={'Metadata': 'true'}, retries=2,
+-            timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS)
++            timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, infinite=False)
+ 
+     @mock.patch('cloudinit.url_helper.time.sleep')
+     @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
+@@ -2694,15 +2694,22 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
+ 
+         def nic_attach_ret(nl_sock, nics_found):
+             nonlocal m_attach_call_count
+-            if m_attach_call_count == 0:
+-                m_attach_call_count = m_attach_call_count + 1
++            m_attach_call_count = m_attach_call_count + 1
++            if m_attach_call_count == 1:
+                 return "eth0"
+-            return "eth1"
++            elif m_attach_call_count == 2:
++                return "eth1"
++            raise RuntimeError("Must have found primary nic by now.")
++
++        # Simulate two NICs by adding the same one twice.
++        md = {
++            "interface": [
++                IMDS_NETWORK_METADATA['interface'][0],
++                IMDS_NETWORK_METADATA['interface'][0]
++            ]
++        }
+ 
+-        def network_metadata_ret(ifname, retries, type):
+-            # Simulate two NICs by adding the same one twice.
+-            md = IMDS_NETWORK_METADATA
+-            md['interface'].append(md['interface'][0])
++        def network_metadata_ret(ifname, retries, type, exc_cb, infinite):
+             if ifname == "eth0":
+                 return md
+             raise requests.Timeout('Fake connection timeout')
+@@ -2724,6 +2731,72 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
+         self.assertEqual(1, m_imds.call_count)
+         self.assertEqual(2, m_link_up.call_count)
+ 
++    @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback')
++    @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
++    def test_check_if_nic_is_primary_retries_on_failures(
++            self, m_dhcpv4, m_imds):
++        """Retry polling for network metadata on all failures except timeout"""
++        dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
++        lease = {
++            'interface': 'eth9', 'fixed-address': '192.168.2.9',
++            'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
++            'unknown-245': '624c3620'}
++
++        eth0Retries = []
++        eth1Retries = []
++        # Simulate two NICs by adding the same one twice.
++        md = {
++            "interface": [
++                IMDS_NETWORK_METADATA['interface'][0],
++                IMDS_NETWORK_METADATA['interface'][0]
++            ]
++        }
++
++        def network_metadata_ret(ifname, retries, type, exc_cb, infinite):
++            nonlocal eth0Retries, eth1Retries
++
++            # Simulate readurl functionality with retries and
++            # exception callbacks so that the callback logic can be
++            # validated.
++            if ifname == "eth0":
++                cause = requests.HTTPError()
++                for _ in range(0, 15):
++                    error = url_helper.UrlError(cause=cause, code=410)
++                    eth0Retries.append(exc_cb("No goal state.", error))
++            else:
++                cause = requests.Timeout('Fake connection timeout')
++                for _ in range(0, 10):
++                    error = url_helper.UrlError(cause=cause)
++                    eth1Retries.append(exc_cb("Connection timeout", error))
++                # Should stop retrying after 10 retries
++                eth1Retries.append(exc_cb("Connection timeout", error))
++                raise cause
++            return md
++
++        m_imds.side_effect = network_metadata_ret
++
++        dhcp_ctx = mock.MagicMock(lease=lease)
++        dhcp_ctx.obtain_lease.return_value = lease
++        m_dhcpv4.return_value = dhcp_ctx
++
++        is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth0")
++        self.assertEqual(True, is_primary)
++        self.assertEqual(2, expected_nic_count)
++
++        # All Eth0 errors are non-timeout errors. So we should have been
++        # retrying indefinitely until success.
++        for i in eth0Retries:
++            self.assertTrue(i)
++
++        is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth1")
++        self.assertEqual(False, is_primary)
++
++        # All Eth1 errors are timeout errors. Retry happens for a max of 10 and
++        # then we should have moved on assuming it is not the primary nic.
++        for i in range(0, 10):
++            self.assertTrue(eth1Retries[i])
++        self.assertFalse(eth1Retries[10])
++
+     @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up')
+     def test_wait_for_link_up_returns_if_already_up(
+             self, m_is_link_up):
+-- 
+2.27.0
+
diff --git a/SOURCES/ci-Azure-adding-support-for-consuming-userdata-from-IMD.patch b/SOURCES/ci-Azure-adding-support-for-consuming-userdata-from-IMD.patch
new file mode 100644
index 0000000..d4e7e37
--- /dev/null
+++ b/SOURCES/ci-Azure-adding-support-for-consuming-userdata-from-IMD.patch
@@ -0,0 +1,129 @@
+From c0df7233fa99d4191b5d4142e209e7465d8db5f6 Mon Sep 17 00:00:00 2001
+From: Anh Vo <anhvo@microsoft.com>
+Date: Tue, 27 Apr 2021 13:40:59 -0400
+Subject: [PATCH 7/7] Azure: adding support for consuming userdata from IMDS
+ (#884)
+
+RH-Author: Eduardo Otubo <otubo@redhat.com>
+RH-MergeRequest: 45: Add support for userdata on Azure from IMDS
+RH-Commit: [7/7] 32f840412da1a0f49b9ab5ba1d6f1bcb1bfacc16
+RH-Bugzilla: 2023940
+RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
+---
+ cloudinit/sources/DataSourceAzure.py          | 23 ++++++++-
+ tests/unittests/test_datasource/test_azure.py | 50 +++++++++++++++++++
+ 2 files changed, 72 insertions(+), 1 deletion(-)
+
+diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
+index d0be6d84..a66f023d 100755
+--- a/cloudinit/sources/DataSourceAzure.py
++++ b/cloudinit/sources/DataSourceAzure.py
+@@ -83,7 +83,7 @@ AGENT_SEED_DIR = '/var/lib/waagent'
+ IMDS_TIMEOUT_IN_SECONDS = 2
+ IMDS_URL = "http://169.254.169.254/metadata"
+ IMDS_VER_MIN = "2019-06-01"
+-IMDS_VER_WANT = "2020-10-01"
++IMDS_VER_WANT = "2021-01-01"
+ 
+ 
+ # This holds SSH key data including if the source was
+@@ -539,6 +539,20 @@ class DataSourceAzure(sources.DataSource):
+                     imds_disable_password
+                 )
+                 crawled_data['metadata']['disable_password'] = imds_disable_password  # noqa: E501
++
++            # only use userdata from imds if OVF did not provide custom data
++            # userdata provided by IMDS is always base64 encoded
++            if not userdata_raw:
++                imds_userdata = _userdata_from_imds(imds_md)
++                if imds_userdata:
++                    LOG.debug("Retrieved userdata from IMDS")
++                    try:
++                        crawled_data['userdata_raw'] = base64.b64decode(
++                            ''.join(imds_userdata.split()))
++                    except Exception:
++                        report_diagnostic_event(
++                            "Bad userdata in IMDS",
++                            logger_func=LOG.warning)
+             found = cdev
+ 
+             report_diagnostic_event(
+@@ -1512,6 +1526,13 @@ def _username_from_imds(imds_data):
+         return None
+ 
+ 
++def _userdata_from_imds(imds_data):
++    try:
++        return imds_data['compute']['userData']
++    except KeyError:
++        return None
++
++
+ def _hostname_from_imds(imds_data):
+     try:
+         return imds_data['compute']['osProfile']['computerName']
+diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
+index c4a8e08d..f8433690 100644
+--- a/tests/unittests/test_datasource/test_azure.py
++++ b/tests/unittests/test_datasource/test_azure.py
+@@ -1899,6 +1899,56 @@ scbus-1 on xpt0 bus 0
+         dsrc.get_data()
+         self.assertTrue(dsrc.metadata["disable_password"])
+ 
++    @mock.patch(MOCKPATH + 'get_metadata_from_imds')
++    def test_userdata_from_imds(self, m_get_metadata_from_imds):
++        sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
++        odata = {'HostName': "myhost", 'UserName': "myuser"}
++        data = {
++            'ovfcontent': construct_valid_ovf_env(data=odata),
++            'sys_cfg': sys_cfg
++        }
++        userdata = "userdataImds"
++        imds_data = copy.deepcopy(NETWORK_METADATA)
++        imds_data["compute"]["osProfile"] = dict(
++            adminUsername="username1",
++            computerName="hostname1",
++            disablePasswordAuthentication="true",
++        )
++        imds_data["compute"]["userData"] = b64e(userdata)
++        m_get_metadata_from_imds.return_value = imds_data
++        dsrc = self._get_ds(data)
++        ret = dsrc.get_data()
++        self.assertTrue(ret)
++        self.assertEqual(dsrc.userdata_raw, userdata.encode('utf-8'))
++
++    @mock.patch(MOCKPATH + 'get_metadata_from_imds')
++    def test_userdata_from_imds_with_customdata_from_OVF(
++            self, m_get_metadata_from_imds):
++        userdataOVF = "userdataOVF"
++        odata = {
++            'HostName': "myhost", 'UserName': "myuser",
++            'UserData': {'text': b64e(userdataOVF), 'encoding': 'base64'}
++        }
++        sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
++        data = {
++            'ovfcontent': construct_valid_ovf_env(data=odata),
++            'sys_cfg': sys_cfg
++        }
++
++        userdataImds = "userdataImds"
++        imds_data = copy.deepcopy(NETWORK_METADATA)
++        imds_data["compute"]["osProfile"] = dict(
++            adminUsername="username1",
++            computerName="hostname1",
++            disablePasswordAuthentication="true",
++        )
++        imds_data["compute"]["userData"] = b64e(userdataImds)
++        m_get_metadata_from_imds.return_value = imds_data
++        dsrc = self._get_ds(data)
++        ret = dsrc.get_data()
++        self.assertTrue(ret)
++        self.assertEqual(dsrc.userdata_raw, userdataOVF.encode('utf-8'))
++
+ 
+ class TestAzureBounce(CiTestCase):
+ 
+-- 
+2.27.0
+
diff --git a/SOURCES/ci-Azure-eject-the-provisioning-iso-before-reporting-re.patch b/SOURCES/ci-Azure-eject-the-provisioning-iso-before-reporting-re.patch
new file mode 100644
index 0000000..6f6c109
--- /dev/null
+++ b/SOURCES/ci-Azure-eject-the-provisioning-iso-before-reporting-re.patch
@@ -0,0 +1,177 @@
+From 01489fb91f64f6137ddf88c39feabe4296f3a156 Mon Sep 17 00:00:00 2001
+From: Anh Vo <anhvo@microsoft.com>
+Date: Fri, 23 Apr 2021 10:18:05 -0400
+Subject: [PATCH 4/7] Azure: eject the provisioning iso before reporting ready
+ (#861)
+
+RH-Author: Eduardo Otubo <otubo@redhat.com>
+RH-MergeRequest: 45: Add support for userdata on Azure from IMDS
+RH-Commit: [4/7] ba830546a62ac5bea33b91d133d364a897b9f6c0
+RH-Bugzilla: 2023940
+RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
+
+Due to hyper-v implementations, iso ejection is more efficient if performed
+from within the guest. The code will attempt to perform a best-effort ejection.
+Failure during ejection will not prevent reporting ready from happening. If iso
+ejection is successful, later iso ejection from the platform will be a no-op.
+In the event the iso ejection from the guest fails, iso ejection will still happen at
+the platform level.
+---
+ cloudinit/sources/DataSourceAzure.py          | 22 +++++++++++++++---
+ cloudinit/sources/helpers/azure.py            | 23 ++++++++++++++++---
+ .../test_datasource/test_azure_helper.py      | 13 +++++++++--
+ 3 files changed, 50 insertions(+), 8 deletions(-)
+
+diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
+index 020b7006..39e67c4f 100755
+--- a/cloudinit/sources/DataSourceAzure.py
++++ b/cloudinit/sources/DataSourceAzure.py
+@@ -332,6 +332,7 @@ class DataSourceAzure(sources.DataSource):
+     dsname = 'Azure'
+     _negotiated = False
+     _metadata_imds = sources.UNSET
++    _ci_pkl_version = 1
+ 
+     def __init__(self, sys_cfg, distro, paths):
+         sources.DataSource.__init__(self, sys_cfg, distro, paths)
+@@ -346,8 +347,13 @@ class DataSourceAzure(sources.DataSource):
+         # Regenerate network config new_instance boot and every boot
+         self.update_events['network'].add(EventType.BOOT)
+         self._ephemeral_dhcp_ctx = None
+-
+         self.failed_desired_api_version = False
++        self.iso_dev = None
++
++    def _unpickle(self, ci_pkl_version: int) -> None:
++        super()._unpickle(ci_pkl_version)
++        if "iso_dev" not in self.__dict__:
++            self.iso_dev = None
+ 
+     def __str__(self):
+         root = sources.DataSource.__str__(self)
+@@ -459,6 +465,13 @@ class DataSourceAzure(sources.DataSource):
+                     '%s was not mountable' % cdev, logger_func=LOG.warning)
+                 continue
+ 
++            report_diagnostic_event("Found provisioning metadata in %s" % cdev,
++                                    logger_func=LOG.debug)
++
++            # save the iso device for ejection before reporting ready
++            if cdev.startswith("/dev"):
++                self.iso_dev = cdev
++
+             perform_reprovision = reprovision or self._should_reprovision(ret)
+             perform_reprovision_after_nic_attach = (
+                 reprovision_after_nic_attach or
+@@ -1226,7 +1239,9 @@ class DataSourceAzure(sources.DataSource):
+         @return: The success status of sending the ready signal.
+         """
+         try:
+-            get_metadata_from_fabric(None, lease['unknown-245'])
++            get_metadata_from_fabric(fallback_lease_file=None,
++                                     dhcp_opts=lease['unknown-245'],
++                                     iso_dev=self.iso_dev)
+             return True
+         except Exception as e:
+             report_diagnostic_event(
+@@ -1332,7 +1347,8 @@ class DataSourceAzure(sources.DataSource):
+         metadata_func = partial(get_metadata_from_fabric,
+                                 fallback_lease_file=self.
+                                 dhclient_lease_file,
+-                                pubkey_info=pubkey_info)
++                                pubkey_info=pubkey_info,
++                                iso_dev=self.iso_dev)
+ 
+         LOG.debug("negotiating with fabric via agent command %s",
+                   self.ds_cfg['agent_command'])
+diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
+index 03e7156b..ad476076 100755
+--- a/cloudinit/sources/helpers/azure.py
++++ b/cloudinit/sources/helpers/azure.py
+@@ -865,7 +865,19 @@ class WALinuxAgentShim:
+         return endpoint_ip_address
+ 
+     @azure_ds_telemetry_reporter
+-    def register_with_azure_and_fetch_data(self, pubkey_info=None) -> dict:
++    def eject_iso(self, iso_dev) -> None:
++        try:
++            LOG.debug("Ejecting the provisioning iso")
++            subp.subp(['eject', iso_dev])
++        except Exception as e:
++            report_diagnostic_event(
++                "Failed ejecting the provisioning iso: %s" % e,
++                logger_func=LOG.debug)
++
++    @azure_ds_telemetry_reporter
++    def register_with_azure_and_fetch_data(self,
++                                           pubkey_info=None,
++                                           iso_dev=None) -> dict:
+         """Gets the VM's GoalState from Azure, uses the GoalState information
+         to report ready/send the ready signal/provisioning complete signal to
+         Azure, and then uses pubkey_info to filter and obtain the user's
+@@ -891,6 +903,10 @@ class WALinuxAgentShim:
+             ssh_keys = self._get_user_pubkeys(goal_state, pubkey_info)
+         health_reporter = GoalStateHealthReporter(
+             goal_state, self.azure_endpoint_client, self.endpoint)
++
++        if iso_dev is not None:
++            self.eject_iso(iso_dev)
++
+         health_reporter.send_ready_signal()
+         return {'public-keys': ssh_keys}
+ 
+@@ -1046,11 +1062,12 @@ class WALinuxAgentShim:
+ 
+ @azure_ds_telemetry_reporter
+ def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
+-                             pubkey_info=None):
++                             pubkey_info=None, iso_dev=None):
+     shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
+                             dhcp_options=dhcp_opts)
+     try:
+-        return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info)
++        return shim.register_with_azure_and_fetch_data(
++            pubkey_info=pubkey_info, iso_dev=iso_dev)
+     finally:
+         shim.clean_up()
+ 
+diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
+index 63482c6c..552c7905 100644
+--- a/tests/unittests/test_datasource/test_azure_helper.py
++++ b/tests/unittests/test_datasource/test_azure_helper.py
+@@ -1009,6 +1009,14 @@ class TestWALinuxAgentShim(CiTestCase):
+         self.GoalState.return_value.container_id = self.test_container_id
+         self.GoalState.return_value.instance_id = self.test_instance_id
+ 
++    def test_eject_iso_is_called(self):
++        shim = wa_shim()
++        with mock.patch.object(
++            shim, 'eject_iso', autospec=True
++        ) as m_eject_iso:
++            shim.register_with_azure_and_fetch_data(iso_dev="/dev/sr0")
++            m_eject_iso.assert_called_once_with("/dev/sr0")
++
+     def test_http_client_does_not_use_certificate_for_report_ready(self):
+         shim = wa_shim()
+         shim.register_with_azure_and_fetch_data()
+@@ -1283,13 +1291,14 @@ class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase):
+ 
+     def test_calls_shim_register_with_azure_and_fetch_data(self):
+         m_pubkey_info = mock.MagicMock()
+-        azure_helper.get_metadata_from_fabric(pubkey_info=m_pubkey_info)
++        azure_helper.get_metadata_from_fabric(
++            pubkey_info=m_pubkey_info, iso_dev="/dev/sr0")
+         self.assertEqual(
+             1,
+             self.m_shim.return_value
+                 .register_with_azure_and_fetch_data.call_count)
+         self.assertEqual(
+-            mock.call(pubkey_info=m_pubkey_info),
++            mock.call(iso_dev="/dev/sr0", pubkey_info=m_pubkey_info),
+             self.m_shim.return_value
+                 .register_with_azure_and_fetch_data.call_args)
+ 
+-- 
+2.27.0
+
diff --git a/SOURCES/ci-Azure-helper-Ensure-Azure-http-handler-sleeps-betwee.patch b/SOURCES/ci-Azure-helper-Ensure-Azure-http-handler-sleeps-betwee.patch
new file mode 100644
index 0000000..627fd2b
--- /dev/null
+++ b/SOURCES/ci-Azure-helper-Ensure-Azure-http-handler-sleeps-betwee.patch
@@ -0,0 +1,90 @@
+From f11bbe7f04a48eebcb446e283820d7592f76cf86 Mon Sep 17 00:00:00 2001
+From: Johnson Shi <Johnson.Shi@microsoft.com>
+Date: Thu, 25 Mar 2021 07:20:10 -0700
+Subject: [PATCH 2/7] Azure helper: Ensure Azure http handler sleeps between
+ retries (#842)
+
+RH-Author: Eduardo Otubo <otubo@redhat.com>
+RH-MergeRequest: 45: Add support for userdata on Azure from IMDS
+RH-Commit: [2/7] e8f8bb658b629a8444bd2ba19f109952acf33311
+RH-Bugzilla: 2023940
+RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
+
+Ensure that the Azure helper's http handler sleeps a fixed duration
+between retry failure attempts. The http handler will sleep a fixed
+duration between failed attempts regardless of whether the attempt
+failed due to (1) request timing out or (2) instant failure (no
+timeout).
+
+Due to certain platform issues, the http request to the Azure endpoint
+may instantly fail without reaching the http timeout duration. Without
+sleeping a fixed duration in between retry attempts, the http handler
+will loop through the max retry attempts quickly. This causes the
+communication between cloud-init and the Azure platform to be less
+resilient due to the short total duration if there is no sleep in
+between retries.
+---
+ cloudinit/sources/helpers/azure.py                   |  2 ++
+ tests/unittests/test_datasource/test_azure_helper.py | 11 +++++++++--
+ 2 files changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
+index d3055d08..03e7156b 100755
+--- a/cloudinit/sources/helpers/azure.py
++++ b/cloudinit/sources/helpers/azure.py
+@@ -303,6 +303,7 @@ def http_with_retries(url, **kwargs) -> str:
+ 
+     max_readurl_attempts = 240
+     default_readurl_timeout = 5
++    sleep_duration_between_retries = 5
+     periodic_logging_attempts = 12
+ 
+     if 'timeout' not in kwargs:
+@@ -338,6 +339,7 @@ def http_with_retries(url, **kwargs) -> str:
+                     'attempt %d with exception: %s' %
+                     (url, attempt, e),
+                     logger_func=LOG.debug)
++            time.sleep(sleep_duration_between_retries)
+ 
+     raise exc
+ 
+diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
+index b8899807..63482c6c 100644
+--- a/tests/unittests/test_datasource/test_azure_helper.py
++++ b/tests/unittests/test_datasource/test_azure_helper.py
+@@ -384,6 +384,7 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
+ 
+     max_readurl_attempts = 240
+     default_readurl_timeout = 5
++    sleep_duration_between_retries = 5
+     periodic_logging_attempts = 12
+ 
+     def setUp(self):
+@@ -394,8 +395,8 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
+         self.m_readurl = patches.enter_context(
+             mock.patch.object(
+                 azure_helper.url_helper, 'readurl', mock.MagicMock()))
+-        patches.enter_context(
+-            mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
++        self.m_sleep = patches.enter_context(
++            mock.patch.object(azure_helper.time, 'sleep', autospec=True))
+ 
+     def test_http_with_retries(self):
+         self.m_readurl.return_value = 'TestResp'
+@@ -438,6 +439,12 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
+             self.m_readurl.call_count,
+             self.periodic_logging_attempts + 1)
+ 
++        # Ensure that cloud-init did sleep between each failed request
++        self.assertEqual(
++            self.m_sleep.call_count,
++            self.periodic_logging_attempts)
++        self.m_sleep.assert_called_with(self.sleep_duration_between_retries)
++
+     def test_http_with_retries_long_delay_logs_periodic_failure_msg(self):
+         self.m_readurl.side_effect = \
+             [SentinelException] * self.periodic_logging_attempts + \
+-- 
+2.27.0
+
diff --git a/SOURCES/ci-Change-netifaces-dependency-to-0.10.4-965.patch b/SOURCES/ci-Change-netifaces-dependency-to-0.10.4-965.patch
index 02b5deb..32fe4ac 100644
--- a/SOURCES/ci-Change-netifaces-dependency-to-0.10.4-965.patch
+++ b/SOURCES/ci-Change-netifaces-dependency-to-0.10.4-965.patch
@@ -1,12 +1,12 @@
-From 67d62f2c0df1fcb5cd86be73cba6064075aa61e3 Mon Sep 17 00:00:00 2001
+From c3d41dc6b18df0d74f569b1a0ba43c8118437948 Mon Sep 17 00:00:00 2001
 From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
-Date: Fri, 14 Jan 2022 16:39:46 +0100
+Date: Fri, 14 Jan 2022 16:40:24 +0100
 Subject: [PATCH 3/6] Change netifaces dependency to 0.10.4 (#965)
 
 RH-Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
-RH-MergeRequest: 43: Datasource for VMware
-RH-Commit: [3/6] 81f0638e62841bab09b423d9cb5d340026ee87c2
-RH-Bugzilla: 2040704
+RH-MergeRequest: 44: Datasource for VMware
+RH-Commit: [3/6] d25d68427ab8b86ee1521c66483e9300e8fcc735
+RH-Bugzilla: 2026587
 RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
 RH-Acked-by: Eduardo Otubo <otubo@redhat.com>
 
diff --git a/SOURCES/ci-Datasource-for-VMware-953.patch b/SOURCES/ci-Datasource-for-VMware-953.patch
index 4e24674..137ee07 100644
--- a/SOURCES/ci-Datasource-for-VMware-953.patch
+++ b/SOURCES/ci-Datasource-for-VMware-953.patch
@@ -1,12 +1,12 @@
-From 697152978b1194aa10ab39597802bb2b4041773c Mon Sep 17 00:00:00 2001
+From 1917af220242840ec1b21f82f80532cf6548cc00 Mon Sep 17 00:00:00 2001
 From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
-Date: Fri, 14 Jan 2022 16:37:42 +0100
+Date: Fri, 14 Jan 2022 16:34:49 +0100
 Subject: [PATCH 2/6] Datasource for VMware (#953)
 
 RH-Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
-RH-MergeRequest: 43: Datasource for VMware
-RH-Commit: [2/6] a0999fa63b8117959839f62bd470f9fe632b31cc
-RH-Bugzilla: 2040704
+RH-MergeRequest: 44: Datasource for VMware
+RH-Commit: [2/6] bb6e58dfeaf8b64d2801ddb4cb73868cf31de3ef
+RH-Bugzilla: 2026587
 RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
 RH-Acked-by: Eduardo Otubo <otubo@redhat.com>
 
@@ -73,7 +73,7 @@ Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
  create mode 100644 tests/unittests/test_datasource/test_vmware.py
 
 diff --git a/README.md b/README.md
-index 435405da..b98f61d3 100644
+index 435405da..aa4fad63 100644
 --- a/README.md
 +++ b/README.md
 @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way!
@@ -81,7 +81,7 @@ index 435405da..b98f61d3 100644
  | Supported OSes | Supported Public Clouds | Supported Private Clouds |
  | --- | --- | --- |
 -| Alpine Linux<br />ArchLinux<br />Debian<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />RHEL/CentOS<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />Digital Ocean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
-+| Alpine Linux<br />ArchLinux<br />Debian<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />RHEL/CentOS<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />Digital Ocean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br /><br />VMware<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
++| Alpine Linux<br />ArchLinux<br />Debian<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />RHEL/CentOS<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />Digital Ocean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br />VMware<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
  
  ## To start developing cloud-init
  
diff --git a/SOURCES/ci-Detect-a-Python-version-change-and-clear-the-cache-8.patch b/SOURCES/ci-Detect-a-Python-version-change-and-clear-the-cache-8.patch
new file mode 100644
index 0000000..a691f26
--- /dev/null
+++ b/SOURCES/ci-Detect-a-Python-version-change-and-clear-the-cache-8.patch
@@ -0,0 +1,180 @@
+From b226448134b5182ba685702e7b7a486db772d956 Mon Sep 17 00:00:00 2001
+From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+Date: Fri, 4 Mar 2022 11:21:16 +0100
+Subject: [PATCH 1/2] - Detect a Python version change and clear the cache
+ (#857)
+
+RH-Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+RH-MergeRequest: 54: - Detect a Python version change and clear the cache (#857)
+RH-Commit: [1/2] c562cd802eabae9dc14079de0b26d471d2229ca8
+RH-Bugzilla: 1935826
+RH-Acked-by: Eduardo Otubo <otubo@redhat.com>
+RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
+
+commit 78e89b03ecb29e7df3181b1219a0b5f44b9d7532
+Author: Robert Schweikert <rjschwei@suse.com>
+Date:   Thu Jul 1 12:35:40 2021 -0400
+
+    - Detect a Python version change and clear the cache (#857)
+
+    summary: Clear cache when a Python version change is detected
+
+    When a distribution gets updated it is possible that the Python version
+    changes. Python makes no guarantee that pickle is consistent across
+    versions as such we need to purge the cache and start over.
+
+    Co-authored-by: James Falcon <therealfalcon@gmail.com>
+Conflicts:
+   tests/integration_tests/util.py: test is not present downstream
+
+Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+---
+ cloudinit/cmd/main.py                         |  30 ++++++++++
+ cloudinit/cmd/tests/test_main.py              |   2 +
+ .../assets/test_version_change.pkl            | Bin 0 -> 21 bytes
+ .../modules/test_ssh_auth_key_fingerprints.py |   2 +-
+ .../modules/test_version_change.py            |  56 ++++++++++++++++++
+ 5 files changed, 89 insertions(+), 1 deletion(-)
+ create mode 100644 tests/integration_tests/assets/test_version_change.pkl
+ create mode 100644 tests/integration_tests/modules/test_version_change.py
+
+diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
+index baf1381f..21213a4a 100644
+--- a/cloudinit/cmd/main.py
++++ b/cloudinit/cmd/main.py
+@@ -210,6 +210,35 @@ def attempt_cmdline_url(path, network=True, cmdline=None):
+             (cmdline_name, url, path))
+ 
+ 
++def purge_cache_on_python_version_change(init):
++    """Purge the cache if python version changed on us.
++
++    There could be changes not represented in our cache (obj.pkl) after we
++    upgrade to a new version of python, so at that point clear the cache
++    """
++    current_python_version = '%d.%d' % (
++        sys.version_info.major, sys.version_info.minor
++    )
++    python_version_path = os.path.join(
++        init.paths.get_cpath('data'), 'python-version'
++    )
++    if os.path.exists(python_version_path):
++        cached_python_version = open(python_version_path).read()
++        # The Python version has changed out from under us, anything that was
++        # pickled previously is likely useless due to API changes.
++        if cached_python_version != current_python_version:
++            LOG.debug('Python version change detected. Purging cache')
++            init.purge_cache(True)
++            util.write_file(python_version_path, current_python_version)
++    else:
++        if os.path.exists(init.paths.get_ipath_cur('obj_pkl')):
++            LOG.info(
++                'Writing python-version file. '
++                'Cache compatibility status is currently unknown.'
++            )
++        util.write_file(python_version_path, current_python_version)
++
++
+ def main_init(name, args):
+     deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
+     if args.local:
+@@ -276,6 +305,7 @@ def main_init(name, args):
+         util.logexc(LOG, "Failed to initialize, likely bad things to come!")
+     # Stage 4
+     path_helper = init.paths
++    purge_cache_on_python_version_change(init)
+     mode = sources.DSMODE_LOCAL if args.local else sources.DSMODE_NETWORK
+ 
+     if mode == sources.DSMODE_NETWORK:
+diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py
+index 78b27441..1f5975b0 100644
+--- a/cloudinit/cmd/tests/test_main.py
++++ b/cloudinit/cmd/tests/test_main.py
+@@ -17,6 +17,8 @@ myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand')
+ 
+ 
+ class TestMain(FilesystemMockingTestCase):
++    with_logs = True
++    allowed_subp = False
+ 
+     def setUp(self):
+         super(TestMain, self).setUp()
+diff --git a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
+index b9b0d85e..e1946cb1 100644
+--- a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
++++ b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
+@@ -18,7 +18,7 @@ USER_DATA_SSH_AUTHKEY_DISABLE = """\
+ no_ssh_fingerprints: true
+ """
+ 
+-USER_DATA_SSH_AUTHKEY_ENABLE="""\
++USER_DATA_SSH_AUTHKEY_ENABLE = """\
+ #cloud-config
+ ssh_genkeytypes:
+   - ecdsa
+diff --git a/tests/integration_tests/modules/test_version_change.py b/tests/integration_tests/modules/test_version_change.py
+new file mode 100644
+index 00000000..4e9ab63f
+--- /dev/null
++++ b/tests/integration_tests/modules/test_version_change.py
+@@ -0,0 +1,56 @@
++from pathlib import Path
++
++from tests.integration_tests.instances import IntegrationInstance
++from tests.integration_tests.util import ASSETS_DIR
++
++
++PICKLE_PATH = Path('/var/lib/cloud/instance/obj.pkl')
++TEST_PICKLE = ASSETS_DIR / 'test_version_change.pkl'
++
++
++def _assert_no_pickle_problems(log):
++    assert 'Failed loading pickled blob' not in log
++    assert 'Traceback' not in log
++    assert 'WARN' not in log
++
++
++def test_reboot_without_version_change(client: IntegrationInstance):
++    log = client.read_from_file('/var/log/cloud-init.log')
++    assert 'Python version change detected' not in log
++    assert 'Cache compatibility status is currently unknown.' not in log
++    _assert_no_pickle_problems(log)
++
++    client.restart()
++    log = client.read_from_file('/var/log/cloud-init.log')
++    assert 'Python version change detected' not in log
++    assert 'Could not determine Python version used to write cache' not in log
++    _assert_no_pickle_problems(log)
++
++    # Now ensure that loading a bad pickle gives us problems
++    client.push_file(TEST_PICKLE, PICKLE_PATH)
++    client.restart()
++    log = client.read_from_file('/var/log/cloud-init.log')
++    assert 'Failed loading pickled blob from {}'.format(PICKLE_PATH) in log
++
++
++def test_cache_purged_on_version_change(client: IntegrationInstance):
++    # Start by pushing the invalid pickle so we'll hit an error if the
++    # cache didn't actually get purged
++    client.push_file(TEST_PICKLE, PICKLE_PATH)
++    client.execute("echo '1.0' > /var/lib/cloud/data/python-version")
++    client.restart()
++    log = client.read_from_file('/var/log/cloud-init.log')
++    assert 'Python version change detected. Purging cache' in log
++    _assert_no_pickle_problems(log)
++
++
++def test_log_message_on_missing_version_file(client: IntegrationInstance):
++    # Start by pushing a pickle so we can see the log message
++    client.push_file(TEST_PICKLE, PICKLE_PATH)
++    client.execute("rm /var/lib/cloud/data/python-version")
++    client.restart()
++    log = client.read_from_file('/var/log/cloud-init.log')
++    assert (
++        'Writing python-version file. '
++        'Cache compatibility status is currently unknown.'
++    ) in log
+-- 
+2.31.1
+
diff --git a/SOURCES/ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch b/SOURCES/ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch
index a99e43f..d4ec623 100644
--- a/SOURCES/ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch
+++ b/SOURCES/ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch
@@ -1,14 +1,14 @@
-From e38ff212eb35943961b79f0d30cdceffc1bc0905 Mon Sep 17 00:00:00 2001
+From 7bd016008429f0a18393a070d88e669f3ed89caa Mon Sep 17 00:00:00 2001
 From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
-Date: Wed, 2 Mar 2022 10:18:02 +0100
+Date: Fri, 11 Feb 2022 14:37:46 +0100
 Subject: [PATCH] Fix IPv6 netmask format for sysconfig (#1215)
 
 RH-Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
-RH-MergeRequest: 49: Fix IPv6 netmask format for sysconfig (#1215)
-RH-Commit: [1/1] 7a97580791fc03f6ae878a699cf92f620f58a237
-RH-Bugzilla: 2060026
+RH-MergeRequest: 48: Fix IPv6 netmask format for sysconfig (#1215)
+RH-Commit: [1/1] 4c940bbcf85dba1fce9f4acb9fc7820c0d7777f6
+RH-Bugzilla: 2046540
 RH-Acked-by: Eduardo Otubo <otubo@redhat.com>
-RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
+RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
 
 commit b97a30f0a05c1dea918c46ca9c05c869d15fe2d5
 Author: Harald <hjensas@redhat.com>
@@ -45,16 +45,6 @@ Date:   Tue Feb 8 15:49:00 2022 +0100
 
     LP: #1959148
 
-Conflicts (most related to different format style):
-        cloudinit/net/__init__.py
-        cloudinit/net/network_state.py
-        cloudinit/net/sysconfig.py
-        cloudinit/sources/DataSourceOpenNebula.py
-        cloudinit/sources/helpers/vmware/imc/config_nic.py
-        tests/unittests/net/test_init.py (file not backported)
-        tests/unittests/net/test_network_state.py (file not backported)
-        tests/unittests/test_net.py
-
 Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
 ---
  cloudinit/net/__init__.py                     |   7 +-
diff --git a/SOURCES/ci-Fix-MIME-policy-failure-on-python-version-upgrade-93.patch b/SOURCES/ci-Fix-MIME-policy-failure-on-python-version-upgrade-93.patch
new file mode 100644
index 0000000..889b8db
--- /dev/null
+++ b/SOURCES/ci-Fix-MIME-policy-failure-on-python-version-upgrade-93.patch
@@ -0,0 +1,705 @@
+From 04a4cc7b8da04ba4103118cf9d975d8e9548e0dc Mon Sep 17 00:00:00 2001
+From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+Date: Fri, 4 Mar 2022 11:23:22 +0100
+Subject: [PATCH 2/2] Fix MIME policy failure on python version upgrade (#934)
+
+RH-Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+RH-MergeRequest: 54: - Detect a Python version change and clear the cache (#857)
+RH-Commit: [2/2] 05fc8c52a39b5ad464ad146488703467e39d73b1
+RH-Bugzilla: 1935826
+RH-Acked-by: Eduardo Otubo <otubo@redhat.com>
+RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
+
+commit eacb0353803263934aa2ac827c37e461c87cb107
+Author: James Falcon <therealfalcon@gmail.com>
+Date:   Thu Jul 15 17:52:21 2021 -0500
+
+    Fix MIME policy failure on python version upgrade (#934)
+
+    Python 3.6 added a new `policy` attribute to `MIMEMultipart`.
+    MIMEMultipart may be part of the cached object pickle of a datasource.
+    Upgrading from an old version of python to 3.6+ will cause the
+    datasource to be invalid after pickle load.
+
+    This commit uses the upgrade framework to attempt to access the mime
+    message and fail early (thus discarding the cache) if we cannot.
+    Commit 78e89b03 should fix this issue more generally.
+
+Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+---
+ cloudinit/sources/__init__.py                 |  18 +
+ cloudinit/stages.py                           |   2 +
+ .../assets/trusty_with_mime.pkl               | 572 ++++++++++++++++++
+ .../modules/test_persistence.py               |  30 +
+ 4 files changed, 622 insertions(+)
+ create mode 100644 tests/integration_tests/assets/trusty_with_mime.pkl
+ create mode 100644 tests/integration_tests/modules/test_persistence.py
+
+diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
+index 7d74f8d9..338861e6 100644
+--- a/cloudinit/sources/__init__.py
++++ b/cloudinit/sources/__init__.py
+@@ -74,6 +74,10 @@ NetworkConfigSource = namedtuple('NetworkConfigSource',
+                                  _NETCFG_SOURCE_NAMES)(*_NETCFG_SOURCE_NAMES)
+ 
+ 
++class DatasourceUnpickleUserDataError(Exception):
++    """Raised when userdata is unable to be unpickled due to python upgrades"""
++
++
+ class DataSourceNotFoundException(Exception):
+     pass
+ 
+@@ -227,6 +231,20 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
+             self.vendordata2 = None
+         if not hasattr(self, 'vendordata2_raw'):
+             self.vendordata2_raw = None
++        if hasattr(self, 'userdata') and self.userdata is not None:
++            # If userdata stores MIME data, on < python3.6 it will be
++            # missing the 'policy' attribute that exists on >=python3.6.
++            # Calling str() on the userdata will attempt to access this
++            # policy attribute. This will raise an exception, causing
++            # the pickle load to fail, so cloud-init will discard the cache
++            try:
++                str(self.userdata)
++            except AttributeError as e:
++                LOG.debug(
++                    "Unable to unpickle datasource: %s."
++                    " Ignoring current cache.", e
++                )
++                raise DatasourceUnpickleUserDataError() from e
+ 
+     def __str__(self):
+         return type_utils.obj_name(self)
+diff --git a/cloudinit/stages.py b/cloudinit/stages.py
+index 83e25dd1..e709a5cf 100644
+--- a/cloudinit/stages.py
++++ b/cloudinit/stages.py
+@@ -980,6 +980,8 @@ def _pkl_load(fname):
+         return None
+     try:
+         return pickle.loads(pickle_contents)
++    except sources.DatasourceUnpickleUserDataError:
++        return None
+     except Exception:
+         util.logexc(LOG, "Failed loading pickled blob from %s", fname)
+         return None
+diff --git a/tests/integration_tests/assets/trusty_with_mime.pkl b/tests/integration_tests/assets/trusty_with_mime.pkl
+new file mode 100644
+index 00000000..a4089ecf
+--- /dev/null
++++ b/tests/integration_tests/assets/trusty_with_mime.pkl
+@@ -0,0 +1,572 @@
++ccopy_reg
++_reconstructor
++p1
++(ccloudinit.sources.DataSourceNoCloud
++DataSourceNoCloudNet
++p2
++c__builtin__
++object
++p3
++NtRp4
++(dp5
++S'paths'
++p6
++g1
++(ccloudinit.helpers
++Paths
++p7
++g3
++NtRp8
++(dp9
++S'lookups'
++p10
++(dp11
++S'cloud_config'
++p12
++S'cloud-config.txt'
++p13
++sS'userdata'
++p14
++S'user-data.txt.i'
++p15
++sS'vendordata'
++p16
++S'vendor-data.txt.i'
++p17
++sS'userdata_raw'
++p18
++S'user-data.txt'
++p19
++sS'boothooks'
++p20
++g20
++sS'scripts'
++p21
++g21
++sS'sem'
++p22
++g22
++sS'data'
++p23
++g23
++sS'vendor_scripts'
++p24
++S'scripts/vendor'
++p25
++sS'handlers'
++p26
++g26
++sS'obj_pkl'
++p27
++S'obj.pkl'
++p28
++sS'vendordata_raw'
++p29
++S'vendor-data.txt'
++p30
++sS'vendor_cloud_config'
++p31
++S'vendor-cloud-config.txt'
++p32
++ssS'template_tpl'
++p33
++S'/etc/cloud/templates/%s.tmpl'
++p34
++sS'cfgs'
++p35
++(dp36
++S'cloud_dir'
++p37
++S'/var/lib/cloud/'
++p38
++sS'templates_dir'
++p39
++S'/etc/cloud/templates/'
++p40
++sS'upstart_dir'
++p41
++S'/etc/init/'
++p42
++ssS'cloud_dir'
++p43
++g38
++sS'datasource'
++p44
++NsS'upstart_conf_d'
++p45
++g42
++sS'boot_finished'
++p46
++S'/var/lib/cloud/instance/boot-finished'
++p47
++sS'instance_link'
++p48
++S'/var/lib/cloud/instance'
++p49
++sS'seed_dir'
++p50
++S'/var/lib/cloud/seed'
++p51
++sbsS'supported_seed_starts'
++p52
++(S'http://'
++p53
++S'https://'
++p54
++S'ftp://'
++p55
++tp56
++sS'sys_cfg'
++p57
++(dp58
++S'output'
++p59
++(dp60
++S'all'
++p61
++S'| tee -a /var/log/cloud-init-output.log'
++p62
++ssS'users'
++p63
++(lp64
++S'default'
++p65
++asS'def_log_file'
++p66
++S'/var/log/cloud-init.log'
++p67
++sS'cloud_final_modules'
++p68
++(lp69
++S'rightscale_userdata'
++p70
++aS'scripts-vendor'
++p71
++aS'scripts-per-once'
++p72
++aS'scripts-per-boot'
++p73
++aS'scripts-per-instance'
++p74
++aS'scripts-user'
++p75
++aS'ssh-authkey-fingerprints'
++p76
++aS'keys-to-console'
++p77
++aS'phone-home'
++p78
++aS'final-message'
++p79
++aS'power-state-change'
++p80
++asS'disable_root'
++p81
++I01
++sS'syslog_fix_perms'
++p82
++S'syslog:adm'
++p83
++sS'log_cfgs'
++p84
++(lp85
++(lp86
++S'[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n'
++p87
++aS'[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=("/dev/log", handlers.SysLogHandler.LOG_USER)\n'
++p88
++aa(lp89
++g87
++aS"[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n"
++p90
++aasS'cloud_init_modules'
++p91
++(lp92
++S'migrator'
++p93
++aS'seed_random'
++p94
++aS'bootcmd'
++p95
++aS'write-files'
++p96
++aS'growpart'
++p97
++aS'resizefs'
++p98
++aS'set_hostname'
++p99
++aS'update_hostname'
++p100
++aS'update_etc_hosts'
++p101
++aS'ca-certs'
++p102
++aS'rsyslog'
++p103
++aS'users-groups'
++p104
++aS'ssh'
++p105
++asS'preserve_hostname'
++p106
++I00
++sS'_log'
++p107
++(lp108
++g87
++ag90
++ag88
++asS'datasource_list'
++p109
++(lp110
++S'NoCloud'
++p111
++aS'ConfigDrive'
++p112
++aS'OpenNebula'
++p113
++aS'Azure'
++p114
++aS'AltCloud'
++p115
++aS'OVF'
++p116
++aS'MAAS'
++p117
++aS'GCE'
++p118
++aS'OpenStack'
++p119
++aS'CloudSigma'
++p120
++aS'Ec2'
++p121
++aS'CloudStack'
++p122
++aS'SmartOS'
++p123
++aS'None'
++p124
++asS'vendor_data'
++p125
++(dp126
++S'prefix'
++p127
++(lp128
++sS'enabled'
++p129
++I01
++ssS'cloud_config_modules'
++p130
++(lp131
++S'emit_upstart'
++p132
++aS'disk_setup'
++p133
++aS'mounts'
++p134
++aS'ssh-import-id'
++p135
++aS'locale'
++p136
++aS'set-passwords'
++p137
++aS'grub-dpkg'
++p138
++aS'apt-pipelining'
++p139
++aS'apt-configure'
++p140
++aS'package-update-upgrade-install'
++p141
++aS'landscape'
++p142
++aS'timezone'
++p143
++aS'puppet'
++p144
++aS'chef'
++p145
++aS'salt-minion'
++p146
++aS'mcollective'
++p147
++aS'disable-ec2-metadata'
++p148
++aS'runcmd'
++p149
++aS'byobu'
++p150
++assg14
++(iemail.mime.multipart
++MIMEMultipart
++p151
++(dp152
++S'_headers'
++p153
++(lp154
++(S'Content-Type'
++p155
++S'multipart/mixed; boundary="===============4291038100093149247=="'
++tp156
++a(S'MIME-Version'
++p157
++S'1.0'
++p158
++tp159
++a(S'Number-Attachments'
++p160
++S'1'
++tp161
++asS'_payload'
++p162
++(lp163
++(iemail.mime.base
++MIMEBase
++p164
++(dp165
++g153
++(lp166
++(g157
++g158
++tp167
++a(S'Content-Type'
++p168
++S'text/x-not-multipart'
++tp169
++a(S'Content-Disposition'
++p170
++S'attachment; filename="part-001"'
++tp171
++asg162
++S''
++sS'_charset'
++p172
++NsS'_default_type'
++p173
++S'text/plain'
++p174
++sS'preamble'
++p175
++NsS'defects'
++p176
++(lp177
++sS'_unixfrom'
++p178
++NsS'epilogue'
++p179
++Nsbasg172
++Nsg173
++g174
++sg175
++Nsg176
++(lp180
++sg178
++Nsg179
++Nsbsg16
++S'#cloud-config\n{}\n\n'
++p181
++sg18
++S'Content-Type: multipart/mixed; boundary="===============1378281702283945349=="\nMIME-Version: 1.0\n\n--===============1378281702283945349==\nContent-Type: text/x-shellscript; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: base64\nContent-Disposition: attachment; filename="script1.sh"\n\nIyEvYmluL3NoCgplY2hvICdoaScgPiAvdmFyL3RtcC9oaQo=\n\n--===============1378281702283945349==\nContent-Type: text/x-shellscript; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: base64\nContent-Disposition: attachment; filename="script2.sh"\n\nIyEvYmluL2Jhc2gKCmVjaG8gJ2hpMicgPiAvdmFyL3RtcC9oaTIK\n\n--===============1378281702283945349==--\n\n#cloud-config\n# final_message: |\n#   This is my final message!\n#   $version\n#   $timestamp\n#   $datasource\n#   $uptime\n# updates:\n#   network:\n#     when: [\'hotplug\']\n'
++p182
++sg29
++NsS'dsmode'
++p183
++S'net'
++p184
++sS'seed'
++p185
++S'/var/lib/cloud/seed/nocloud-net'
++p186
++sS'cmdline_id'
++p187
++S'ds=nocloud-net'
++p188
++sS'ud_proc'
++p189
++g1
++(ccloudinit.user_data
++UserDataProcessor
++p190
++g3
++NtRp191
++(dp192
++g6
++g8
++sS'ssl_details'
++p193
++(dp194
++sbsg50
++g186
++sS'ds_cfg'
++p195
++(dp196
++sS'distro'
++p197
++g1
++(ccloudinit.distros.ubuntu
++Distro
++p198
++g3
++NtRp199
++(dp200
++S'osfamily'
++p201
++S'debian'
++p202
++sS'_paths'
++p203
++g8
++sS'name'
++p204
++S'ubuntu'
++p205
++sS'_runner'
++p206
++g1
++(ccloudinit.helpers
++Runners
++p207
++g3
++NtRp208
++(dp209
++g6
++g8
++sS'sems'
++p210
++(dp211
++sbsS'_cfg'
++p212
++(dp213
++S'paths'
++p214
++(dp215
++g37
++g38
++sg39
++g40
++sg41
++g42
++ssS'default_user'
++p216
++(dp217
++S'shell'
++p218
++S'/bin/bash'
++p219
++sS'name'
++p220
++S'ubuntu'
++p221
++sS'sudo'
++p222
++(lp223
++S'ALL=(ALL) NOPASSWD:ALL'
++p224
++asS'lock_passwd'
++p225
++I01
++sS'gecos'
++p226
++S'Ubuntu'
++p227
++sS'groups'
++p228
++(lp229
++S'adm'
++p230
++aS'audio'
++p231
++aS'cdrom'
++p232
++aS'dialout'
++p233
++aS'dip'
++p234
++aS'floppy'
++p235
++aS'netdev'
++p236
++aS'plugdev'
++p237
++aS'sudo'
++p238
++aS'video'
++p239
++assS'package_mirrors'
++p240
++(lp241
++(dp242
++S'arches'
++p243
++(lp244
++S'i386'
++p245
++aS'amd64'
++p246
++asS'failsafe'
++p247
++(dp248
++S'security'
++p249
++S'http://security.ubuntu.com/ubuntu'
++p250
++sS'primary'
++p251
++S'http://archive.ubuntu.com/ubuntu'
++p252
++ssS'search'
++p253
++(dp254
++S'security'
++p255
++(lp256
++sS'primary'
++p257
++(lp258
++S'http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/'
++p259
++aS'http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/'
++p260
++aS'http://%(region)s.clouds.archive.ubuntu.com/ubuntu/'
++p261
++assa(dp262
++S'arches'
++p263
++(lp264
++S'armhf'
++p265
++aS'armel'
++p266
++aS'default'
++p267
++asS'failsafe'
++p268
++(dp269
++S'security'
++p270
++S'http://ports.ubuntu.com/ubuntu-ports'
++p271
++sS'primary'
++p272
++S'http://ports.ubuntu.com/ubuntu-ports'
++p273
++ssasS'ssh_svcname'
++p274
++S'ssh'
++p275
++ssbsS'metadata'
++p276
++(dp277
++g183
++g184
++sS'local-hostname'
++p278
++S'me'
++p279
++sS'instance-id'
++p280
++S'me'
++p281
++ssb.
+\ No newline at end of file
+diff --git a/tests/integration_tests/modules/test_persistence.py b/tests/integration_tests/modules/test_persistence.py
+new file mode 100644
+index 00000000..00fdeaea
+--- /dev/null
++++ b/tests/integration_tests/modules/test_persistence.py
+@@ -0,0 +1,30 @@
++# This file is part of cloud-init. See LICENSE file for license information.
++"""Test the behavior of loading/discarding pickle data"""
++from pathlib import Path
++
++import pytest
++
++from tests.integration_tests.instances import IntegrationInstance
++from tests.integration_tests.util import (
++    ASSETS_DIR,
++    verify_ordered_items_in_text,
++)
++
++
++PICKLE_PATH = Path('/var/lib/cloud/instance/obj.pkl')
++TEST_PICKLE = ASSETS_DIR / 'trusty_with_mime.pkl'
++
++
++@pytest.mark.lxd_container
++def test_log_message_on_missing_version_file(client: IntegrationInstance):
++    client.push_file(TEST_PICKLE, PICKLE_PATH)
++    client.restart()
++    assert client.execute('cloud-init status --wait').ok
++    log = client.read_from_file('/var/log/cloud-init.log')
++    verify_ordered_items_in_text([
++        "Unable to unpickle datasource: 'MIMEMultipart' object has no "
++        "attribute 'policy'. Ignoring current cache.",
++        'no cache found',
++        'Searching for local data source',
++        'SUCCESS: found local data from DataSourceNoCloud'
++    ], log)
+-- 
+2.31.1
+
diff --git a/SOURCES/ci-Revert-unnecesary-lcase-in-ds-identify-978.patch b/SOURCES/ci-Revert-unnecesary-lcase-in-ds-identify-978.patch
index 2e5349c..c47788f 100644
--- a/SOURCES/ci-Revert-unnecesary-lcase-in-ds-identify-978.patch
+++ b/SOURCES/ci-Revert-unnecesary-lcase-in-ds-identify-978.patch
@@ -1,12 +1,12 @@
-From 532a36edf0dea2b98835bd08e285bec9c50eb0f9 Mon Sep 17 00:00:00 2001
+From 0eeec94882779de76c08b1a7faf862e22f21f242 Mon Sep 17 00:00:00 2001
 From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
-Date: Fri, 14 Jan 2022 16:42:41 +0100
+Date: Fri, 14 Jan 2022 16:42:46 +0100
 Subject: [PATCH 5/6] Revert unnecesary lcase in ds-identify (#978)
 
 RH-Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
-RH-MergeRequest: 43: Datasource for VMware
-RH-Commit: [5/6] 95634e4b42e3abfb91182b090c312eef29c63e54
-RH-Bugzilla: 2040704
+RH-MergeRequest: 44: Datasource for VMware
+RH-Commit: [5/6] f7385c15cf17a9c4a2fa15b29afd1b8a96b24d1e
+RH-Bugzilla: 2026587
 RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
 RH-Acked-by: Eduardo Otubo <otubo@redhat.com>
 
diff --git a/SOURCES/ci-Update-dscheck_VMware-s-rpctool-check-970.patch b/SOURCES/ci-Update-dscheck_VMware-s-rpctool-check-970.patch
index 492f1c6..07c44fe 100644
--- a/SOURCES/ci-Update-dscheck_VMware-s-rpctool-check-970.patch
+++ b/SOURCES/ci-Update-dscheck_VMware-s-rpctool-check-970.patch
@@ -1,12 +1,12 @@
-From cc79cb3958b943b755a9b11b3e87ce820058ccaa Mon Sep 17 00:00:00 2001
+From ded01bd47c65636e59dc332d06fb8acb982ec677 Mon Sep 17 00:00:00 2001
 From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
-Date: Fri, 14 Jan 2022 16:41:47 +0100
+Date: Fri, 14 Jan 2022 16:41:52 +0100
 Subject: [PATCH 4/6] Update dscheck_VMware's rpctool check (#970)
 
 RH-Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
-RH-MergeRequest: 43: Datasource for VMware
-RH-Commit: [4/6] 6f4d732c55c521869210d8aeedfa1150ea5a92f8
-RH-Bugzilla: 2040704
+RH-MergeRequest: 44: Datasource for VMware
+RH-Commit: [4/6] 509f68596f2d8f32027677f756b9d81e6a507ff1
+RH-Bugzilla: 2026587
 RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
 RH-Acked-by: Eduardo Otubo <otubo@redhat.com>
 
diff --git a/SOURCES/ci-azure-Removing-ability-to-invoke-walinuxagent-799.patch b/SOURCES/ci-azure-Removing-ability-to-invoke-walinuxagent-799.patch
new file mode 100644
index 0000000..1ccfec9
--- /dev/null
+++ b/SOURCES/ci-azure-Removing-ability-to-invoke-walinuxagent-799.patch
@@ -0,0 +1,470 @@
+From 6e79106a09a0d142915da1fb48640575bb4bfe08 Mon Sep 17 00:00:00 2001
+From: Anh Vo <anhvo@microsoft.com>
+Date: Tue, 13 Apr 2021 17:39:39 -0400
+Subject: [PATCH 3/7] azure: Removing ability to invoke walinuxagent (#799)
+
+RH-Author: Eduardo Otubo <otubo@redhat.com>
+RH-MergeRequest: 45: Add support for userdata on Azure from IMDS
+RH-Commit: [3/7] f5e98665bf2093edeeccfcd95b47df2e44a40536
+RH-Bugzilla: 2023940
+RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
+
+Invoking walinuxagent from within cloud-init is no longer
+supported/necessary
+---
+ cloudinit/sources/DataSourceAzure.py          | 137 ++++--------------
+ doc/rtd/topics/datasources/azure.rst          |  62 ++------
+ tests/unittests/test_datasource/test_azure.py |  97 -------------
+ 3 files changed, 35 insertions(+), 261 deletions(-)
+
+diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
+index de1452ce..020b7006 100755
+--- a/cloudinit/sources/DataSourceAzure.py
++++ b/cloudinit/sources/DataSourceAzure.py
+@@ -381,53 +381,6 @@ class DataSourceAzure(sources.DataSource):
+                     util.logexc(LOG, "handling set_hostname failed")
+         return False
+ 
+-    @azure_ds_telemetry_reporter
+-    def get_metadata_from_agent(self):
+-        temp_hostname = self.metadata.get('local-hostname')
+-        agent_cmd = self.ds_cfg['agent_command']
+-        LOG.debug("Getting metadata via agent.  hostname=%s cmd=%s",
+-                  temp_hostname, agent_cmd)
+-
+-        self.bounce_network_with_azure_hostname()
+-
+-        try:
+-            invoke_agent(agent_cmd)
+-        except subp.ProcessExecutionError:
+-            # claim the datasource even if the command failed
+-            util.logexc(LOG, "agent command '%s' failed.",
+-                        self.ds_cfg['agent_command'])
+-
+-        ddir = self.ds_cfg['data_dir']
+-
+-        fp_files = []
+-        key_value = None
+-        for pk in self.cfg.get('_pubkeys', []):
+-            if pk.get('value', None):
+-                key_value = pk['value']
+-                LOG.debug("SSH authentication: using value from fabric")
+-            else:
+-                bname = str(pk['fingerprint'] + ".crt")
+-                fp_files += [os.path.join(ddir, bname)]
+-                LOG.debug("SSH authentication: "
+-                          "using fingerprint from fabric")
+-
+-        with events.ReportEventStack(
+-                name="waiting-for-ssh-public-key",
+-                description="wait for agents to retrieve SSH keys",
+-                parent=azure_ds_reporter):
+-            # wait very long for public SSH keys to arrive
+-            # https://bugs.launchpad.net/cloud-init/+bug/1717611
+-            missing = util.log_time(logfunc=LOG.debug,
+-                                    msg="waiting for SSH public key files",
+-                                    func=util.wait_for_files,
+-                                    args=(fp_files, 900))
+-            if len(missing):
+-                LOG.warning("Did not find files, but going on: %s", missing)
+-
+-        metadata = {}
+-        metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
+-        return metadata
+-
+     def _get_subplatform(self):
+         """Return the subplatform metadata source details."""
+         if self.seed.startswith('/dev'):
+@@ -1354,35 +1307,32 @@ class DataSourceAzure(sources.DataSource):
+            On failure, returns False.
+         """
+ 
+-        if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN:
+-            self.bounce_network_with_azure_hostname()
++        self.bounce_network_with_azure_hostname()
+ 
+-            pubkey_info = None
+-            try:
+-                raise KeyError(
+-                    "Not using public SSH keys from IMDS"
+-                )
+-                # pylint:disable=unreachable
+-                public_keys = self.metadata['imds']['compute']['publicKeys']
+-                LOG.debug(
+-                    'Successfully retrieved %s key(s) from IMDS',
+-                    len(public_keys)
+-                    if public_keys is not None
+-                    else 0
+-                )
+-            except KeyError:
+-                LOG.debug(
+-                    'Unable to retrieve SSH keys from IMDS during '
+-                    'negotiation, falling back to OVF'
+-                )
+-                pubkey_info = self.cfg.get('_pubkeys', None)
+-
+-            metadata_func = partial(get_metadata_from_fabric,
+-                                    fallback_lease_file=self.
+-                                    dhclient_lease_file,
+-                                    pubkey_info=pubkey_info)
+-        else:
+-            metadata_func = self.get_metadata_from_agent
++        pubkey_info = None
++        try:
++            raise KeyError(
++                "Not using public SSH keys from IMDS"
++            )
++            # pylint:disable=unreachable
++            public_keys = self.metadata['imds']['compute']['publicKeys']
++            LOG.debug(
++                'Successfully retrieved %s key(s) from IMDS',
++                len(public_keys)
++                if public_keys is not None
++                else 0
++            )
++        except KeyError:
++            LOG.debug(
++                'Unable to retrieve SSH keys from IMDS during '
++                'negotiation, falling back to OVF'
++            )
++            pubkey_info = self.cfg.get('_pubkeys', None)
++
++        metadata_func = partial(get_metadata_from_fabric,
++                                fallback_lease_file=self.
++                                dhclient_lease_file,
++                                pubkey_info=pubkey_info)
+ 
+         LOG.debug("negotiating with fabric via agent command %s",
+                   self.ds_cfg['agent_command'])
+@@ -1617,33 +1567,6 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
+     return True
+ 
+ 
+-@azure_ds_telemetry_reporter
+-def crtfile_to_pubkey(fname, data=None):
+-    pipeline = ('openssl x509 -noout -pubkey < "$0" |'
+-                'ssh-keygen -i -m PKCS8 -f /dev/stdin')
+-    (out, _err) = subp.subp(['sh', '-c', pipeline, fname],
+-                            capture=True, data=data)
+-    return out.rstrip()
+-
+-
+-@azure_ds_telemetry_reporter
+-def pubkeys_from_crt_files(flist):
+-    pubkeys = []
+-    errors = []
+-    for fname in flist:
+-        try:
+-            pubkeys.append(crtfile_to_pubkey(fname))
+-        except subp.ProcessExecutionError:
+-            errors.append(fname)
+-
+-    if errors:
+-        report_diagnostic_event(
+-            "failed to convert the crt files to pubkey: %s" % errors,
+-            logger_func=LOG.warning)
+-
+-    return pubkeys
+-
+-
+ @azure_ds_telemetry_reporter
+ def write_files(datadir, files, dirmode=None):
+ 
+@@ -1672,16 +1595,6 @@ def write_files(datadir, files, dirmode=None):
+         util.write_file(filename=fname, content=content, mode=0o600)
+ 
+ 
+-@azure_ds_telemetry_reporter
+-def invoke_agent(cmd):
+-    # this is a function itself to simplify patching it for test
+-    if cmd:
+-        LOG.debug("invoking agent: %s", cmd)
+-        subp.subp(cmd, shell=(not isinstance(cmd, list)))
+-    else:
+-        LOG.debug("not invoking agent")
+-
+-
+ def find_child(node, filter_func):
+     ret = []
+     if not node.hasChildNodes():
+diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst
+index e04c3a33..ad9f2236 100644
+--- a/doc/rtd/topics/datasources/azure.rst
++++ b/doc/rtd/topics/datasources/azure.rst
+@@ -5,28 +5,6 @@ Azure
+ 
+ This datasource finds metadata and user-data from the Azure cloud platform.
+ 
+-walinuxagent
+-------------
+-walinuxagent has several functions within images.  For cloud-init
+-specifically, the relevant functionality it performs is to register the
+-instance with the Azure cloud platform at boot so networking will be
+-permitted.  For more information about the other functionality of
+-walinuxagent, see `Azure's documentation
+-<https://github.com/Azure/WALinuxAgent#introduction>`_ for more details.
+-(Note, however, that only one of walinuxagent's provisioning and cloud-init
+-should be used to perform instance customisation.)
+-
+-If you are configuring walinuxagent yourself, you will want to ensure that you
+-have `Provisioning.UseCloudInit
+-<https://github.com/Azure/WALinuxAgent#provisioningusecloudinit>`_ set to
+-``y``.
+-
+-
+-Builtin Agent
+--------------
+-An alternative to using walinuxagent to register to the Azure cloud platform
+-is to use the ``__builtin__`` agent command.  This section contains more
+-background on what that code path does, and how to enable it.
+ 
+ The Azure cloud platform provides initial data to an instance via an attached
+ CD formatted in UDF.  That CD contains a 'ovf-env.xml' file that provides some
+@@ -41,16 +19,6 @@ by calling a script in /etc/dhcp/dhclient-exit-hooks or a file in
+ 'dhclient_hook' of cloud-init itself. This sub-command will write the client
+ information in json format to /run/cloud-init/dhclient.hook/<interface>.json.
+ 
+-In order for cloud-init to leverage this method to find the endpoint, the
+-cloud.cfg file must contain:
+-
+-.. sourcecode:: yaml
+-
+-  datasource:
+-    Azure:
+-      set_hostname: False
+-      agent_command: __builtin__
+-
+ If those files are not available, the fallback is to check the leases file
+ for the endpoint server (again option 245).
+ 
+@@ -83,9 +51,6 @@ configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``).
+ 
+ The settings that may be configured are:
+ 
+- * **agent_command**: Either __builtin__ (default) or a command to run to getcw
+-   metadata. If __builtin__, get metadata from walinuxagent. Otherwise run the
+-   provided command to obtain metadata.
+  * **apply_network_config**: Boolean set to True to use network configuration
+    described by Azure's IMDS endpoint instead of fallback network config of
+    dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is
+@@ -121,7 +86,6 @@ An example configuration with the default values is provided below:
+ 
+   datasource:
+     Azure:
+-      agent_command: __builtin__
+       apply_network_config: true
+       data_dir: /var/lib/waagent
+       dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases
+@@ -144,9 +108,7 @@ child of the ``LinuxProvisioningConfigurationSet`` (a sibling to ``UserName``)
+ If both ``UserData`` and ``CustomData`` are provided behavior is undefined on
+ which will be selected.
+ 
+-In the example below, user-data provided is 'this is my userdata', and the
+-datasource config provided is ``{"agent_command": ["start", "walinuxagent"]}``.
+-That agent command will take affect as if it were specified in system config.
++In the example below, user-data provided is 'this is my userdata'
+ 
+ Example:
+ 
+@@ -184,20 +146,16 @@ The hostname is provided to the instance in the ovf-env.xml file as
+ Whatever value the instance provides in its dhcp request will resolve in the
+ domain returned in the 'search' request.
+ 
+-The interesting issue is that a generic image will already have a hostname
+-configured.  The ubuntu cloud images have 'ubuntu' as the hostname of the
+-system, and the initial dhcp request on eth0 is not guaranteed to occur after
+-the datasource code has been run.  So, on first boot, that initial value will
+-be sent in the dhcp request and *that* value will resolve.
+-
+-In order to make the ``HostName`` provided in the ovf-env.xml resolve, a
+-dhcp request must be made with the new value.  Walinuxagent (in its current
+-version) handles this by polling the state of hostname and bouncing ('``ifdown
+-eth0; ifup eth0``' the network interface if it sees that a change has been
+-made.
++A generic image will already have a hostname configured.  The ubuntu
++cloud images have 'ubuntu' as the hostname of the system, and the
++initial dhcp request on eth0 is not guaranteed to occur after the
++datasource code has been run.  So, on first boot, that initial value
++will be sent in the dhcp request and *that* value will resolve.
+ 
+-cloud-init handles this by setting the hostname in the DataSource's 'get_data'
+-method via '``hostname $HostName``', and then bouncing the interface.  This
++In order to make the ``HostName`` provided in the ovf-env.xml resolve,
++a dhcp request must be made with the new value. cloud-init handles
++this by setting the hostname in the DataSource's 'get_data' method via
++'``hostname $HostName``', and then bouncing the interface.  This
+ behavior can be configured or disabled in the datasource config.  See
+ 'Configuration' above.
+ 
+diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
+index dedebeb1..320fa857 100644
+--- a/tests/unittests/test_datasource/test_azure.py
++++ b/tests/unittests/test_datasource/test_azure.py
+@@ -638,17 +638,10 @@ scbus-1 on xpt0 bus 0
+         def dsdevs():
+             return data.get('dsdevs', [])
+ 
+-        def _invoke_agent(cmd):
+-            data['agent_invoked'] = cmd
+-
+         def _wait_for_files(flist, _maxwait=None, _naplen=None):
+             data['waited'] = flist
+             return []
+ 
+-        def _pubkeys_from_crt_files(flist):
+-            data['pubkey_files'] = flist
+-            return ["pubkey_from: %s" % f for f in flist]
+-
+         if data.get('ovfcontent') is not None:
+             populate_dir(os.path.join(self.paths.seed_dir, "azure"),
+                          {'ovf-env.xml': data['ovfcontent']})
+@@ -675,8 +668,6 @@ scbus-1 on xpt0 bus 0
+ 
+         self.apply_patches([
+             (dsaz, 'list_possible_azure_ds_devs', dsdevs),
+-            (dsaz, 'invoke_agent', _invoke_agent),
+-            (dsaz, 'pubkeys_from_crt_files', _pubkeys_from_crt_files),
+             (dsaz, 'perform_hostname_bounce', mock.MagicMock()),
+             (dsaz, 'get_hostname', mock.MagicMock()),
+             (dsaz, 'set_hostname', mock.MagicMock()),
+@@ -765,7 +756,6 @@ scbus-1 on xpt0 bus 0
+             ret = dsrc.get_data()
+             self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
+             self.assertFalse(ret)
+-            self.assertNotIn('agent_invoked', data)
+             # Assert that for non viable platforms,
+             # there is no communication with the Azure datasource.
+             self.assertEqual(
+@@ -789,7 +779,6 @@ scbus-1 on xpt0 bus 0
+             ret = dsrc.get_data()
+             self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
+             self.assertFalse(ret)
+-            self.assertNotIn('agent_invoked', data)
+             self.assertEqual(
+                 1,
+                 m_report_failure.call_count)
+@@ -806,7 +795,6 @@ scbus-1 on xpt0 bus 0
+                 1,
+                 m_crawl_metadata.call_count)
+             self.assertFalse(ret)
+-            self.assertNotIn('agent_invoked', data)
+ 
+     def test_crawl_metadata_exception_should_report_failure_with_msg(self):
+         data = {}
+@@ -1086,21 +1074,6 @@ scbus-1 on xpt0 bus 0
+         self.assertTrue(os.path.isdir(self.waagent_d))
+         self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700)
+ 
+-    def test_user_cfg_set_agent_command_plain(self):
+-        # set dscfg in via plaintext
+-        # we must have friendly-to-xml formatted plaintext in yaml_cfg
+-        # not all plaintext is expected to work.
+-        yaml_cfg = "{agent_command: my_command}\n"
+-        cfg = yaml.safe_load(yaml_cfg)
+-        odata = {'HostName': "myhost", 'UserName': "myuser",
+-                 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}}
+-        data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+-
+-        dsrc = self._get_ds(data)
+-        ret = self._get_and_setup(dsrc)
+-        self.assertTrue(ret)
+-        self.assertEqual(data['agent_invoked'], cfg['agent_command'])
+-
+     @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
+                 return_value=None)
+     def test_network_config_set_from_imds(self, m_driver):
+@@ -1205,29 +1178,6 @@ scbus-1 on xpt0 bus 0
+         dsrc.get_data()
+         self.assertEqual('eastus2', dsrc.region)
+ 
+-    def test_user_cfg_set_agent_command(self):
+-        # set dscfg in via base64 encoded yaml
+-        cfg = {'agent_command': "my_command"}
+-        odata = {'HostName': "myhost", 'UserName': "myuser",
+-                 'dscfg': {'text': b64e(yaml.dump(cfg)),
+-                           'encoding': 'base64'}}
+-        data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+-
+-        dsrc = self._get_ds(data)
+-        ret = self._get_and_setup(dsrc)
+-        self.assertTrue(ret)
+-        self.assertEqual(data['agent_invoked'], cfg['agent_command'])
+-
+-    def test_sys_cfg_set_agent_command(self):
+-        sys_cfg = {'datasource': {'Azure': {'agent_command': '_COMMAND'}}}
+-        data = {'ovfcontent': construct_valid_ovf_env(data={}),
+-                'sys_cfg': sys_cfg}
+-
+-        dsrc = self._get_ds(data)
+-        ret = self._get_and_setup(dsrc)
+-        self.assertTrue(ret)
+-        self.assertEqual(data['agent_invoked'], '_COMMAND')
+-
+     def test_sys_cfg_set_never_destroy_ntfs(self):
+         sys_cfg = {'datasource': {'Azure': {
+             'never_destroy_ntfs': 'user-supplied-value'}}}
+@@ -1311,51 +1261,6 @@ scbus-1 on xpt0 bus 0
+         self.assertTrue(ret)
+         self.assertEqual(dsrc.userdata_raw, mydata.encode('utf-8'))
+ 
+-    def test_cfg_has_pubkeys_fingerprint(self):
+-        odata = {'HostName': "myhost", 'UserName': "myuser"}
+-        mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}]
+-        pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
+-        data = {'ovfcontent': construct_valid_ovf_env(data=odata,
+-                                                      pubkeys=pubkeys)}
+-
+-        dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
+-        ret = self._get_and_setup(dsrc)
+-        self.assertTrue(ret)
+-        for mypk in mypklist:
+-            self.assertIn(mypk, dsrc.cfg['_pubkeys'])
+-            self.assertIn('pubkey_from', dsrc.metadata['public-keys'][-1])
+-
+-    def test_cfg_has_pubkeys_value(self):
+-        # make sure that provided key is used over fingerprint
+-        odata = {'HostName': "myhost", 'UserName': "myuser"}
+-        mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': 'value1'}]
+-        pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
+-        data = {'ovfcontent': construct_valid_ovf_env(data=odata,
+-                                                      pubkeys=pubkeys)}
+-
+-        dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
+-        ret = self._get_and_setup(dsrc)
+-        self.assertTrue(ret)
+-
+-        for mypk in mypklist:
+-            self.assertIn(mypk, dsrc.cfg['_pubkeys'])
+-            self.assertIn(mypk['value'], dsrc.metadata['public-keys'])
+-
+-    def test_cfg_has_no_fingerprint_has_value(self):
+-        # test value is used when fingerprint not provided
+-        odata = {'HostName': "myhost", 'UserName': "myuser"}
+-        mypklist = [{'fingerprint': None, 'path': 'path1', 'value': 'value1'}]
+-        pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
+-        data = {'ovfcontent': construct_valid_ovf_env(data=odata,
+-                                                      pubkeys=pubkeys)}
+-
+-        dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
+-        ret = self._get_and_setup(dsrc)
+-        self.assertTrue(ret)
+-
+-        for mypk in mypklist:
+-            self.assertIn(mypk['value'], dsrc.metadata['public-keys'])
+-
+     def test_default_ephemeral_configs_ephemeral_exists(self):
+         # make sure the ephemeral configs are correct if disk present
+         odata = {}
+@@ -1919,8 +1824,6 @@ class TestAzureBounce(CiTestCase):
+     with_logs = True
+ 
+     def mock_out_azure_moving_parts(self):
+-        self.patches.enter_context(
+-            mock.patch.object(dsaz, 'invoke_agent'))
+         self.patches.enter_context(
+             mock.patch.object(dsaz.util, 'wait_for_files'))
+         self.patches.enter_context(
+-- 
+2.27.0
+
diff --git a/SOURCES/ci-cc_ssh.py-fix-private-key-group-owner-and-permission.patch b/SOURCES/ci-cc_ssh.py-fix-private-key-group-owner-and-permission.patch
index 86899cc..44ad400 100644
--- a/SOURCES/ci-cc_ssh.py-fix-private-key-group-owner-and-permission.patch
+++ b/SOURCES/ci-cc_ssh.py-fix-private-key-group-owner-and-permission.patch
@@ -1,14 +1,14 @@
-From 8dc357c036e393ae7d869d3074377f5447fa9b77 Mon Sep 17 00:00:00 2001
+From 478709d7c157a085e3b2fee432e24978a3485234 Mon Sep 17 00:00:00 2001
 From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
-Date: Tue, 26 Oct 2021 22:18:06 +0200
+Date: Wed, 20 Oct 2021 16:28:42 +0200
 Subject: [PATCH] cc_ssh.py: fix private key group owner and permissions
  (#1070)
 
 RH-Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
-RH-MergeRequest: 34: cc_ssh.py: fix private key group owner and permissions (#1070)
-RH-Commit: [1/1] 6dfd47416dd2cb7ed3822199c43cbd2fdada7aa1 (eesposit/cloud-init)
-RH-Bugzilla: 2017697
-RH-Acked-by: Eduardo Otubo <otubo@redhat.com>
+RH-MergeRequest: 32: cc_ssh.py: fix private key group owner and permissions (#1070)
+RH-Commit: [1/1] 0382c3f671ae0fa9cab23dfad1f636967b012148
+RH-Bugzilla: 2013644
+RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
 RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
 
 commit ee296ced9c0a61b1484d850b807c601bcd670ec1
diff --git a/SOURCES/ci-cloudinit-net-handle-two-different-routes-for-the-sa.patch b/SOURCES/ci-cloudinit-net-handle-two-different-routes-for-the-sa.patch
index 5cd0ae9..9ea95c1 100644
--- a/SOURCES/ci-cloudinit-net-handle-two-different-routes-for-the-sa.patch
+++ b/SOURCES/ci-cloudinit-net-handle-two-different-routes-for-the-sa.patch
@@ -1,13 +1,13 @@
-From 0a6a89e6b243e587daf8ce356fccb5d6a6acf089 Mon Sep 17 00:00:00 2001
+From ea83e72b335e652b080fda66a075c0d1322ed6dc Mon Sep 17 00:00:00 2001
 From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
-Date: Tue, 7 Dec 2021 09:56:58 +0100
+Date: Tue, 7 Dec 2021 10:00:41 +0100
 Subject: [PATCH] cloudinit/net: handle two different routes for the same ip
  (#1124)
 
 RH-Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
-RH-MergeRequest: 37: cloudinit/net: handle two different routes for the same ip (#1124)
-RH-Commit: [1/1] 9cd9c38606bfe2395d808a48ac986dce7624e147
-RH-Bugzilla: 2028756
+RH-MergeRequest: 39: cloudinit/net: handle two different routes for the same ip (#1124)
+RH-Commit: [1/1] 6810dc29ce786fbca96d2033386aa69c6ab65997
+RH-Bugzilla: 2028028
 RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
 RH-Acked-by: Eduardo Otubo <otubo@redhat.com>
 
diff --git a/SOURCES/ci-fix-error-on-upgrade-caused-by-new-vendordata2-attri.patch b/SOURCES/ci-fix-error-on-upgrade-caused-by-new-vendordata2-attri.patch
index caa98ad..f257a67 100644
--- a/SOURCES/ci-fix-error-on-upgrade-caused-by-new-vendordata2-attri.patch
+++ b/SOURCES/ci-fix-error-on-upgrade-caused-by-new-vendordata2-attri.patch
@@ -1,15 +1,15 @@
-From 3636c2284132dbcd1cc505fb9f81ab722f4f99f0 Mon Sep 17 00:00:00 2001
+From 005d0a98c69d154a00e9fd599c7fbe5aef73c933 Mon Sep 17 00:00:00 2001
 From: Amy Chen <xiachen@redhat.com>
-Date: Fri, 3 Dec 2021 14:38:16 +0800
+Date: Thu, 25 Nov 2021 18:30:48 +0800
 Subject: [PATCH] fix error on upgrade caused by new vendordata2 attributes
 
 RH-Author: xiachen <None>
-RH-MergeRequest: 36: fix error on upgrade caused by new vendordata2 attributes
-RH-Commit: [1/1] c16351924d4220a719380f12c2e8c03185f53c01
-RH-Bugzilla: 2028738
+RH-MergeRequest: 35: fix error on upgrade caused by new vendordata2 attributes
+RH-Commit: [1/1] 9e00a7744838afbbdc5eb14628b7f572beba9f19
+RH-Bugzilla: 2021538
 RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
-RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
 RH-Acked-by: Eduardo Otubo <otubo@redhat.com>
+RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
 
 commit d132356cc361abef2d90d4073438f3ab759d5964
 Author: James Falcon <TheRealFalcon@users.noreply.github.com>
diff --git a/SOURCES/ci-ssh_utils.py-ignore-when-sshd_config-options-are-not.patch b/SOURCES/ci-ssh_utils.py-ignore-when-sshd_config-options-are-not.patch
new file mode 100644
index 0000000..13484d3
--- /dev/null
+++ b/SOURCES/ci-ssh_utils.py-ignore-when-sshd_config-options-are-not.patch
@@ -0,0 +1,85 @@
+From 7d4e16bfc1cefbdd4d1477480b02b1d6c1399e4d Mon Sep 17 00:00:00 2001
+From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+Date: Mon, 20 Sep 2021 12:16:36 +0200
+Subject: [PATCH] ssh_utils.py: ignore when sshd_config options are not
+ key/value pairs (#1007)
+
+RH-Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+RH-MergeRequest: 31: ssh_utils.py: ignore when sshd_config options are not key/value pairs (#1007)
+RH-Commit: [1/1] 9007fb8a116e98036ff17df0168a76e9a5843671 (eesposit/cloud-init)
+RH-Bugzilla: 1862933
+RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
+RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+TESTED: by me
+BREW: 39832462
+
+commit 2ce857248162957a785af61c135ca8433fdbbcde
+Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+Date:   Wed Sep 8 02:08:36 2021 +0200
+
+    ssh_utils.py: ignore when sshd_config options are not key/value pairs (#1007)
+
+    As specified in #LP 1845552,
+    In cloudinit/ssh_util.py, in parse_ssh_config_lines(), we attempt to
+    parse each line of sshd_config. This function expects each line to
+    be one of the following forms:
+
+        \# comment
+        key value
+        key=value
+
+    However, options like DenyGroups and DenyUsers are specified to
+    *optionally* accepts values in sshd_config.
+    Cloud-init should comply to this and skip the option if a value
+    is not provided.
+
+    Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+
+Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+---
+ cloudinit/ssh_util.py           | 8 +++++++-
+ tests/unittests/test_sshutil.py | 8 ++++++++
+ 2 files changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
+index 9ccadf09..33679dcc 100644
+--- a/cloudinit/ssh_util.py
++++ b/cloudinit/ssh_util.py
+@@ -484,7 +484,13 @@ def parse_ssh_config_lines(lines):
+         try:
+             key, val = line.split(None, 1)
+         except ValueError:
+-            key, val = line.split('=', 1)
++            try:
++                key, val = line.split('=', 1)
++            except ValueError:
++                LOG.debug(
++                    "sshd_config: option \"%s\" has no key/value pair,"
++                    " skipping it", line)
++                continue
+         ret.append(SshdConfigLine(line, key, val))
+     return ret
+ 
+diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
+index a66788bf..08e20050 100644
+--- a/tests/unittests/test_sshutil.py
++++ b/tests/unittests/test_sshutil.py
+@@ -525,6 +525,14 @@ class TestUpdateSshConfigLines(test_helpers.CiTestCase):
+         self.assertEqual([self.pwauth], result)
+         self.check_line(lines[-1], self.pwauth, "no")
+ 
++    def test_option_without_value(self):
++        """Implementation only accepts key-value pairs."""
++        extended_exlines = self.exlines.copy()
++        denyusers_opt = "DenyUsers"
++        extended_exlines.append(denyusers_opt)
++        lines = ssh_util.parse_ssh_config_lines(list(extended_exlines))
++        self.assertNotIn(denyusers_opt, str(lines))
++
+     def test_single_option_updated(self):
+         """A single update should have change made and line updated."""
+         opt, val = ("UsePAM", "no")
+-- 
+2.27.0
+
diff --git a/SOURCES/test_version_change.pkl b/SOURCES/test_version_change.pkl
new file mode 100644
index 0000000..65ae93e
Binary files /dev/null and b/SOURCES/test_version_change.pkl differ
diff --git a/SPECS/cloud-init.spec b/SPECS/cloud-init.spec
index 156ce5a..04b8907 100644
--- a/SPECS/cloud-init.spec
+++ b/SPECS/cloud-init.spec
@@ -6,7 +6,7 @@
 
 Name:           cloud-init
 Version:        21.1
-Release:        7%{?dist}.5
+Release:        15%{?dist}
 Summary:        Cloud instance init scripts
 
 Group:          System Environment/Base
@@ -14,6 +14,7 @@ License:        GPLv3
 URL:            http://launchpad.net/cloud-init
 Source0:        https://launchpad.net/cloud-init/trunk/%{version}/+download/%{name}-%{version}.tar.gz
 Source1:        cloud-init-tmpfiles.conf
+Source2:        test_version_change.pkl
 
 Patch0001: 0001-Add-initial-redhat-setup.patch
 Patch0002: 0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch
@@ -34,26 +35,45 @@ Patch12: ci-ssh-util-allow-cloudinit-to-merge-all-ssh-keys-into-.patch
 Patch13: ci-Stop-copying-ssh-system-keys-and-check-folder-permis.patch
 # For bz#1995840 - [cloudinit]  Fix home permissions modified by ssh module
 Patch14: ci-Fix-home-permissions-modified-by-ssh-module-SC-338-9.patch
-# For bz#2017697 - cloud-init fails to set host key permissions correctly [rhel-8.5.0.z]
-Patch15: ci-cc_ssh.py-fix-private-key-group-owner-and-permission.patch
-# For bz#2028738 - cloud-init.service fails to start after package update [rhel-8.5.0.z]
-Patch16: ci-fix-error-on-upgrade-caused-by-new-vendordata2-attri.patch
-# For bz#2028756 - [RHEL-8] Above 19.2 of cloud-init fails to configure routes when configuring static and default routes to the same destination IP [rhel-8.5.0.z]
-Patch17: ci-cloudinit-net-handle-two-different-routes-for-the-sa.patch
-# For bz#2040690 - [RHEL8] [Azure] cloud-init fails to configure the system [rhel-8.5.0.z]
-#Patch18: ci-Add-gdisk-and-openssl-as-deps-to-fix-UEFI-Azure-init.patch
-# For bz#2040704 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo' [rhel-8.5.0.z]
-Patch19: ci-Datasource-for-VMware-953.patch
-# For bz#2040704 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo' [rhel-8.5.0.z]
-Patch20: ci-Change-netifaces-dependency-to-0.10.4-965.patch
-# For bz#2040704 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo' [rhel-8.5.0.z]
-Patch21: ci-Update-dscheck_VMware-s-rpctool-check-970.patch
-# For bz#2040704 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo' [rhel-8.5.0.z]
-Patch22: ci-Revert-unnecesary-lcase-in-ds-identify-978.patch
-# For bz#2060026 - cloud-init writes route6-$DEVICE config with a HEX netmask. ip route does not like : Error: inet6 prefix is expected rather than "fd00:fd00:fd00::/ffff:ffff:ffff:ffff::". [rhel-8.5.0.z]
-Patch23: ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch
-# For bz#2040704 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo' [rhel-8.5.0.z]
-#Patch23: ci-Add-netifaces-package-as-a-Requires-in-cloud-init.sp.patch
+# For bz#1862933 - cloud-init fails with ValueError: need more than 1 value to unpack[rhel-8]
+Patch15: ci-ssh_utils.py-ignore-when-sshd_config-options-are-not.patch
+# For bz#2013644 - cloud-init fails to set host key permissions correctly
+Patch16: ci-cc_ssh.py-fix-private-key-group-owner-and-permission.patch
+# For bz#2021538 - cloud-init.service fails to start after package update
+Patch17: ci-fix-error-on-upgrade-caused-by-new-vendordata2-attri.patch
+# For bz#2028028 - [RHEL-8] Above 19.2 of cloud-init fails to configure routes when configuring static and default routes to the same destination IP
+Patch18: ci-cloudinit-net-handle-two-different-routes-for-the-sa.patch
+# For bz#2039697 - [RHEL8] [Azure] cloud-init fails to configure the system
+# For bz#2026587 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo'
+Patch20: ci-Datasource-for-VMware-953.patch
+# For bz#2026587 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo'
+Patch21: ci-Change-netifaces-dependency-to-0.10.4-965.patch
+# For bz#2026587 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo'
+Patch22: ci-Update-dscheck_VMware-s-rpctool-check-970.patch
+# For bz#2026587 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo'
+Patch23: ci-Revert-unnecesary-lcase-in-ds-identify-978.patch
+# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata
+Patch24: ci-Add-flexibility-to-IMDS-api-version-793.patch
+# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata
+Patch25: ci-Azure-helper-Ensure-Azure-http-handler-sleeps-betwee.patch
+# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata
+Patch26: ci-azure-Removing-ability-to-invoke-walinuxagent-799.patch
+# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata
+Patch27: ci-Azure-eject-the-provisioning-iso-before-reporting-re.patch
+# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata
+Patch28: ci-Azure-Retrieve-username-and-hostname-from-IMDS-865.patch
+# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata
+Patch29: ci-Azure-Retry-net-metadata-during-nic-attach-for-non-t.patch
+# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata
+Patch30: ci-Azure-adding-support-for-consuming-userdata-from-IMD.patch
+# For bz#2046540 - cloud-init writes route6-$DEVICE config with a HEX netmask. ip route does not like : Error: inet6 prefix is expected rather than "fd00:fd00:fd00::/ffff:ffff:ffff:ffff::".
+Patch31: ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch
+# For bz#1935826 - [rhel-8] Cloud-init init stage fails after upgrade from RHEL7 to RHEL8.
+Patch32: ci-Detect-a-Python-version-change-and-clear-the-cache-8.patch
+# For bz#1935826 - [rhel-8] Cloud-init init stage fails after upgrade from RHEL7 to RHEL8.
+Patch33: ci-Fix-MIME-policy-failure-on-python-version-upgrade-93.patch
+# For bz#2026587 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo'
+
 
 BuildArch:      noarch
 
@@ -102,7 +122,7 @@ Requires:       shadow-utils
 Requires:       util-linux
 Requires:       xfsprogs
 Requires:       dhcp-client
-# https://bugzilla.redhat.com/show_bug.cgi?id=2040690
+# https://bugzilla.redhat.com/show_bug.cgi?id=2039697
 Requires:       gdisk
 Requires:       openssl
 Requires:       python3-netifaces
@@ -122,6 +142,8 @@ ssh keys and to let the user run various scripts.
 sed -i -e 's|#!/usr/bin/env python|#!/usr/bin/env python3|' \
        -e 's|#!/usr/bin/python|#!/usr/bin/python3|' tools/* cloudinit/ssh_util.py
 
+cp -f %{SOURCE2} tests/integration_tests/assets/test_version_change.pkl
+
 %build
 %py3_build
 
@@ -249,37 +271,59 @@ fi
 %config(noreplace) %{_sysconfdir}/rsyslog.d/21-cloudinit.conf
 
 %changelog
-* Thu Mar 03 2022 Jon Maloy <jmaloy@redhat.com> - 21.1-7.el8_5.5
-- ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch [bz#2060026]
-- Resolves: bz#2060026
-  (cloud-init writes route6-$DEVICE config with a HEX netmask. ip route does not like : Error: inet6 prefix is expected rather than "fd00:fd00:fd00::/ffff:ffff:ffff:ffff::". [rhel-8.5.0.z])
-
-* Wed Jan 19 2022 Jon Maloy <jmaloy@redhat.com> - 21.1-7.el8_5.4
-- ci-Add-gdisk-and-openssl-as-deps-to-fix-UEFI-Azure-init.patch [bz#2040690]
-- ci-Datasource-for-VMware-953.patch [bz#2040704]
-- ci-Change-netifaces-dependency-to-0.10.4-965.patch [bz#2040704]
-- ci-Update-dscheck_VMware-s-rpctool-check-970.patch [bz#2040704]
-- ci-Revert-unnecesary-lcase-in-ds-identify-978.patch [bz#2040704]
-- ci-Add-netifaces-package-as-a-Requires-in-cloud-init.sp.patch [bz#2040704]
-- Resolves: bz#2040690
-  ([RHEL8] [Azure] cloud-init fails to configure the system [rhel-8.5.0.z])
-- Resolves: bz#2040704
-  ([cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo' [rhel-8.5.0.z])
-
-* Wed Dec 08 2021 Jon Maloy <jmaloy@redhat.com> - 21.1-7.el8_5.3
-- ci-cloudinit-net-handle-two-different-routes-for-the-sa.patch [bz#2028756]
-- Resolves: bz#2028756
-  ([RHEL-8] Above 19.2 of cloud-init fails to configure routes when configuring static and default routes to the same destination IP [rhel-8.5.0.z])
-
-* Mon Dec 06 2021 Jon Maloy <jmaloy@redhat.com> - 21.1-7.el8_5.2
-- ci-fix-error-on-upgrade-caused-by-new-vendordata2-attri.patch [bz#2028738]
-- Resolves: bz#2028738
-  (cloud-init.service fails to start after package update [rhel-8.5.0.z])
-
-* Tue Nov 02 2021 Miroslav Rezanina <mrezanin@redhat.com> - 21.1-7.el8_5.1
-- ci-cc_ssh.py-fix-private-key-group-owner-and-permission.patch [bz#2017697]
-- Resolves: bz#2017697
-  (cloud-init fails to set host key permissions correctly [rhel-8.5.0.z])
+* Fri Apr 01 2022 Camilla Conte <cconte@redhat.com> - 21.1-15
+- ci-Detect-a-Python-version-change-and-clear-the-cache-8.patch [bz#1935826]
+- ci-Fix-MIME-policy-failure-on-python-version-upgrade-93.patch [bz#1935826]
+- Resolves: bz#1935826
+  ([rhel-8] Cloud-init init stage fails after upgrade from RHEL7 to RHEL8.)
+
+* Fri Feb 25 2022 Jon Maloy <jmaloy@redhat.com> - 21.1-14
+- ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch [bz#2046540]
+- Resolves: bz#2046540
+  (cloud-init writes route6-$DEVICE config with a HEX netmask. ip route does not like : Error: inet6 prefix is expected rather than "fd00:fd00:fd00::/ffff:ffff:ffff:ffff::".)
+
+* Tue Jan 25 2022 Jon Maloy <jmaloy@redhat.com> - 21.1-13
+- ci-Add-flexibility-to-IMDS-api-version-793.patch [bz#2023940]
+- ci-Azure-helper-Ensure-Azure-http-handler-sleeps-betwee.patch [bz#2023940]
+- ci-azure-Removing-ability-to-invoke-walinuxagent-799.patch [bz#2023940]
+- ci-Azure-eject-the-provisioning-iso-before-reporting-re.patch [bz#2023940]
+- ci-Azure-Retrieve-username-and-hostname-from-IMDS-865.patch [bz#2023940]
+- ci-Azure-Retry-net-metadata-during-nic-attach-for-non-t.patch [bz#2023940]
+- ci-Azure-adding-support-for-consuming-userdata-from-IMD.patch [bz#2023940]
+- Resolves: bz#2023940
+  ([RHEL-8] Support for provisioning Azure VM with userdata)
+
+* Wed Jan 19 2022 Jon Maloy <jmaloy@redhat.com> - 21.1-12
+- ci-Add-gdisk-and-openssl-as-deps-to-fix-UEFI-Azure-init.patch [bz#2039697]
+- ci-Datasource-for-VMware-953.patch [bz#2026587]
+- ci-Change-netifaces-dependency-to-0.10.4-965.patch [bz#2026587]
+- ci-Update-dscheck_VMware-s-rpctool-check-970.patch [bz#2026587]
+- ci-Revert-unnecesary-lcase-in-ds-identify-978.patch [bz#2026587]
+- ci-Add-netifaces-package-as-a-Requires-in-cloud-init.sp.patch [bz#2026587]
+- Resolves: bz#2039697
+  ([RHEL8] [Azure] cloud-init fails to configure the system)
+- Resolves: bz#2026587
+  ([cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo')
+
+* Wed Dec 08 2021 Jon Maloy <jmaloy@redhat.com> - 21.1-11
+- ci-cloudinit-net-handle-two-different-routes-for-the-sa.patch [bz#2028028]
+- Resolves: bz#2028028
+  ([RHEL-8] Above 19.2 of cloud-init fails to configure routes when configuring static and default routes to the same destination IP)
+
+* Mon Dec 06 2021 Jon Maloy <jmaloy@redhat.com> - 21.1-10
+- ci-fix-error-on-upgrade-caused-by-new-vendordata2-attri.patch [bz#2021538]
+- Resolves: bz#2021538
+  (cloud-init.service fails to start after package update)
+
+* Mon Oct 25 2021 Jon Maloy <jmaloy@redhat.com> - 21.1-9
+- ci-cc_ssh.py-fix-private-key-group-owner-and-permission.patch [bz#2013644]
+- Resolves: bz#2013644
+  (cloud-init fails to set host key permissions correctly)
+
+* Thu Sep 23 2021 Miroslav Rezanina <mrezanin@redhat.com> - 21.1-8
+- ci-ssh_utils.py-ignore-when-sshd_config-options-are-not.patch [bz#1862933]
+- Resolves: bz#1862933
+  (cloud-init fails with ValueError: need more than 1 value to unpack[rhel-8])
 
 * Fri Aug 27 2021 Miroslav Rezanina <mrezanin@redhat.com> - 21.1-7
 - ci-Fix-home-permissions-modified-by-ssh-module-SC-338-9.patch [bz#1995840]