diff --git a/.cloud-init.metadata b/.cloud-init.metadata
index 5f02606..f7516a9 100644
--- a/.cloud-init.metadata
+++ b/.cloud-init.metadata
@@ -1 +1 @@
-703864e79a15335d1e2552866b63d25f55f8a555 SOURCES/cloud-init-18.2.tar.gz
+a862d6618a4c56c79d3fb0e279f6c93d0f0141cd SOURCES/cloud-init-18.5.tar.gz
diff --git a/.gitignore b/.gitignore
index b4be453..e2ea71d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1 @@
-SOURCES/cloud-init-18.2.tar.gz
+SOURCES/cloud-init-18.5.tar.gz
diff --git a/SOURCES/0001-Add-initial-redhat-setup.patch b/SOURCES/0001-Add-initial-redhat-setup.patch
index 0de3cdd..6cdf59c 100644
--- a/SOURCES/0001-Add-initial-redhat-setup.patch
+++ b/SOURCES/0001-Add-initial-redhat-setup.patch
@@ -1,37 +1,48 @@
-From 8ff55af097479b3d29519f15624aa6c69fda3bba Mon Sep 17 00:00:00 2001
+From bfdc177f6127043eac555d356403d9e1d5c52243 Mon Sep 17 00:00:00 2001
 From: Miroslav Rezanina <mrezanin@redhat.com>
 Date: Thu, 31 May 2018 16:45:23 +0200
 Subject: Add initial redhat setup
 
+Rebase notes (18.5):
+- added bash_completition file
+- added cloud-id file
+
+Merged patches (18.5):
+- 2d6b469 add power-state-change module to cloud_final_modules
+- 764159f Adding systemd mount options to wait for cloud-init
+- da4d99e Adding disk_setup to rhel/cloud.cfg
+- f5c6832 Enable cloud-init by default on vmware
+
+Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
 ---
  cloudinit/config/cc_chef.py           |   6 +-
  cloudinit/settings.py                 |   7 +-
  redhat/.gitignore                     |   1 +
- redhat/Makefile                       |  71 ++++++++
- redhat/Makefile.common                |  35 ++++
+ redhat/Makefile                       |  71 ++++++
+ redhat/Makefile.common                |  35 +++
  redhat/cloud-init-tmpfiles.conf       |   1 +
- redhat/cloud-init.spec.template       | 318 +++++++++++++++++++++++++++++++++
+ redhat/cloud-init.spec.template       | 352 ++++++++++++++++++++++++++
  redhat/rpmbuild/BUILD/.gitignore      |   3 +
  redhat/rpmbuild/RPMS/.gitignore       |   3 +
  redhat/rpmbuild/SOURCES/.gitignore    |   3 +
  redhat/rpmbuild/SPECS/.gitignore      |   3 +
  redhat/rpmbuild/SRPMS/.gitignore      |   3 +
- redhat/scripts/frh.py                 |  27 +++
- redhat/scripts/git-backport-diff      | 327 ++++++++++++++++++++++++++++++++++
- redhat/scripts/git-compile-check      | 215 ++++++++++++++++++++++
- redhat/scripts/process-patches.sh     |  73 ++++++++
+ redhat/scripts/frh.py                 |  27 ++
+ redhat/scripts/git-backport-diff      | 327 ++++++++++++++++++++++++
+ redhat/scripts/git-compile-check      | 215 ++++++++++++++++
+ redhat/scripts/process-patches.sh     |  73 ++++++
  redhat/scripts/tarball_checksum.sh    |   3 +
  rhel/README.rhel                      |   5 +
  rhel/cloud-init-tmpfiles.conf         |   1 +
- rhel/cloud.cfg                        |  66 +++++++
+ rhel/cloud.cfg                        |  69 +++++
  rhel/systemd/cloud-config.service     |  18 ++
- rhel/systemd/cloud-config.target      |  11 ++
+ rhel/systemd/cloud-config.target      |  11 +
  rhel/systemd/cloud-final.service      |  19 ++
- rhel/systemd/cloud-init-local.service |  31 ++++
- rhel/systemd/cloud-init.service       |  25 +++
- setup.py                              |  64 +------
- tools/read-version                    |  19 +-
- 27 files changed, 1274 insertions(+), 84 deletions(-)
+ rhel/systemd/cloud-init-local.service |  31 +++
+ rhel/systemd/cloud-init.service       |  25 ++
+ setup.py                              |  64 +----
+ tools/read-version                    |  25 +-
+ 27 files changed, 1311 insertions(+), 90 deletions(-)
  create mode 100644 redhat/.gitignore
  create mode 100644 redhat/Makefile
  create mode 100644 redhat/Makefile.common
@@ -57,7 +68,7 @@ Subject: Add initial redhat setup
  create mode 100644 rhel/systemd/cloud-init.service
 
 diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
-index 46abedd..fe7bda8 100644
+index 46abedd1..fe7bda8c 100644
 --- a/cloudinit/config/cc_chef.py
 +++ b/cloudinit/config/cc_chef.py
 @@ -33,7 +33,7 @@ file).
@@ -88,14 +99,14 @@ index 46abedd..fe7bda8 100644
  }
  CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time'])
 diff --git a/cloudinit/settings.py b/cloudinit/settings.py
-index dde5749..a5a1eec 100644
+index b1ebaade..c5367687 100644
 --- a/cloudinit/settings.py
 +++ b/cloudinit/settings.py
-@@ -43,13 +43,16 @@ CFG_BUILTIN = {
+@@ -44,13 +44,16 @@ CFG_BUILTIN = {
      ],
      'def_log_file': '/var/log/cloud-init.log',
      'log_cfgs': [],
--    'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel'],
+-    'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel', 'root:root'],
 +    'mount_default_fields': [None, None, 'auto', 'defaults,nofail', '0', '2'],
 +    'ssh_deletekeys': False,
 +    'ssh_genkeytypes': [],
@@ -112,7 +123,7 @@ index dde5749..a5a1eec 100644
      'vendor_data': {'enabled': True, 'prefix': []},
 diff --git a/rhel/README.rhel b/rhel/README.rhel
 new file mode 100644
-index 0000000..aa29630
+index 00000000..aa29630d
 --- /dev/null
 +++ b/rhel/README.rhel
 @@ -0,0 +1,5 @@
@@ -123,30 +134,32 @@ index 0000000..aa29630
 + - grub_dpkg
 diff --git a/rhel/cloud-init-tmpfiles.conf b/rhel/cloud-init-tmpfiles.conf
 new file mode 100644
-index 0000000..0c6d2a3
+index 00000000..0c6d2a3b
 --- /dev/null
 +++ b/rhel/cloud-init-tmpfiles.conf
 @@ -0,0 +1 @@
 +d /run/cloud-init 0700 root root - -
 diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg
 new file mode 100644
-index 0000000..986f241
+index 00000000..f0db3c12
 --- /dev/null
 +++ b/rhel/cloud.cfg
-@@ -0,0 +1,66 @@
+@@ -0,0 +1,69 @@
 +users:
 + - default
 +
 +disable_root: 1
 +ssh_pwauth:   0
 +
-+mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2']
++mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service', '0', '2']
 +resize_rootfs_tmp: /dev
 +ssh_deletekeys:   0
 +ssh_genkeytypes:  ~
 +syslog_fix_perms: ~
++disable_vmware_customization: false
 +
 +cloud_init_modules:
++ - disk_setup
 + - migrator
 + - bootcmd
 + - write-files
@@ -184,6 +197,7 @@ index 0000000..986f241
 + - keys-to-console
 + - phone-home
 + - final-message
++ - power-state-change
 +
 +system_info:
 +  default_user:
@@ -202,7 +216,7 @@ index 0000000..986f241
 +# vim:syntax=yaml
 diff --git a/rhel/systemd/cloud-config.service b/rhel/systemd/cloud-config.service
 new file mode 100644
-index 0000000..12ca9df
+index 00000000..12ca9dfd
 --- /dev/null
 +++ b/rhel/systemd/cloud-config.service
 @@ -0,0 +1,18 @@
@@ -226,7 +240,7 @@ index 0000000..12ca9df
 +WantedBy=multi-user.target
 diff --git a/rhel/systemd/cloud-config.target b/rhel/systemd/cloud-config.target
 new file mode 100644
-index 0000000..ae9b7d0
+index 00000000..ae9b7d02
 --- /dev/null
 +++ b/rhel/systemd/cloud-config.target
 @@ -0,0 +1,11 @@
@@ -243,7 +257,7 @@ index 0000000..ae9b7d0
 +After=cloud-init-local.service cloud-init.service
 diff --git a/rhel/systemd/cloud-final.service b/rhel/systemd/cloud-final.service
 new file mode 100644
-index 0000000..32a83d8
+index 00000000..32a83d85
 --- /dev/null
 +++ b/rhel/systemd/cloud-final.service
 @@ -0,0 +1,19 @@
@@ -268,7 +282,7 @@ index 0000000..32a83d8
 +WantedBy=multi-user.target
 diff --git a/rhel/systemd/cloud-init-local.service b/rhel/systemd/cloud-init-local.service
 new file mode 100644
-index 0000000..656eddb
+index 00000000..656eddb9
 --- /dev/null
 +++ b/rhel/systemd/cloud-init-local.service
 @@ -0,0 +1,31 @@
@@ -305,7 +319,7 @@ index 0000000..656eddb
 +WantedBy=multi-user.target
 diff --git a/rhel/systemd/cloud-init.service b/rhel/systemd/cloud-init.service
 new file mode 100644
-index 0000000..68fc5f1
+index 00000000..68fc5f19
 --- /dev/null
 +++ b/rhel/systemd/cloud-init.service
 @@ -0,0 +1,25 @@
@@ -335,10 +349,10 @@ index 0000000..68fc5f1
 +[Install]
 +WantedBy=multi-user.target
 diff --git a/setup.py b/setup.py
-index bc3f52a..47cf842 100755
+index ea37efc3..06ae48a6 100755
 --- a/setup.py
 +++ b/setup.py
-@@ -125,11 +125,6 @@ INITSYS_FILES = {
+@@ -135,11 +135,6 @@ INITSYS_FILES = {
      'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)],
      'sysvinit_openrc': [f for f in glob('sysvinit/gentoo/*') if is_f(f)],
      'sysvinit_suse': [f for f in glob('sysvinit/suse/*') if is_f(f)],
@@ -350,7 +364,7 @@ index bc3f52a..47cf842 100755
      'upstart': [f for f in glob('upstart/*') if is_f(f)],
  }
  INITSYS_ROOTS = {
-@@ -138,9 +133,6 @@ INITSYS_ROOTS = {
+@@ -148,9 +143,6 @@ INITSYS_ROOTS = {
      'sysvinit_deb': 'etc/init.d',
      'sysvinit_openrc': 'etc/init.d',
      'sysvinit_suse': 'etc/init.d',
@@ -360,7 +374,7 @@ index bc3f52a..47cf842 100755
      'upstart': 'etc/init/',
  }
  INITSYS_TYPES = sorted([f.partition(".")[0] for f in INITSYS_ROOTS.keys()])
-@@ -178,47 +170,6 @@ class MyEggInfo(egg_info):
+@@ -188,47 +180,6 @@ class MyEggInfo(egg_info):
          return ret
  
  
@@ -408,10 +422,10 @@ index bc3f52a..47cf842 100755
  if not in_virtualenv():
      USR = "/" + USR
      ETC = "/" + ETC
-@@ -228,11 +179,9 @@ if not in_virtualenv():
-         INITSYS_ROOTS[k] = "/" + INITSYS_ROOTS[k]
+@@ -239,11 +190,9 @@ if not in_virtualenv():
  
  data_files = [
+     (ETC + '/bash_completion.d', ['bash_completion/cloud-init']),
 -    (ETC + '/cloud', [render_tmpl("config/cloud.cfg.tmpl")]),
      (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')),
      (ETC + '/cloud/templates', glob('templates/*')),
@@ -421,7 +435,7 @@ index bc3f52a..47cf842 100755
                                      'tools/write-ssh-key-fingerprints']),
      (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]),
      (USR + '/share/doc/cloud-init/examples',
-@@ -244,15 +193,8 @@ if os.uname()[0] != 'FreeBSD':
+@@ -255,15 +204,8 @@ if os.uname()[0] != 'FreeBSD':
      data_files.extend([
          (ETC + '/NetworkManager/dispatcher.d/',
           ['tools/hook-network-manager']),
@@ -438,7 +452,7 @@ index bc3f52a..47cf842 100755
  
  requirements = read_requires()
  
-@@ -267,8 +209,6 @@ setuptools.setup(
+@@ -278,8 +220,6 @@ setuptools.setup(
      scripts=['tools/cloud-init-per'],
      license='Dual-licensed under GPLv3 or Apache 2.0',
      data_files=data_files,
@@ -446,12 +460,12 @@ index bc3f52a..47cf842 100755
 -    cmdclass=cmdclass,
      entry_points={
          'console_scripts': [
-             'cloud-init = cloudinit.cmd.main:main'
+             'cloud-init = cloudinit.cmd.main:main',
 diff --git a/tools/read-version b/tools/read-version
-index 3ea9e66..d43cc8f 100755
+index e69c2ce0..d43cc8f0 100755
 --- a/tools/read-version
 +++ b/tools/read-version
-@@ -65,23 +65,8 @@ output_json = '--json' in sys.argv
+@@ -65,29 +65,8 @@ output_json = '--json' in sys.argv
  src_version = ci_version.version_string()
  version_long = None
  
@@ -466,6 +480,12 @@ index 3ea9e66..d43cc8f 100755
 -    if not version.startswith(src_version):
 -        sys.stderr.write("git describe version (%s) differs from "
 -                         "cloudinit.version (%s)\n" % (version, src_version))
+-        sys.stderr.write(
+-            "Please get the latest upstream tags.\n"
+-            "As an example, this can be done with the following:\n"
+-            "$ git remote add upstream https://git.launchpad.net/cloud-init\n"
+-            "$ git fetch upstream --tags\n"
+-        )
 -        sys.exit(1)
 -
 -    version_long = tiny_p(cmd + ["--long"]).strip()
@@ -478,5 +498,5 @@ index 3ea9e66..d43cc8f 100755
  # version is X.Y.Z[+xxx.gHASH]
  # version_long is None or X.Y.Z-xxx-gHASH
 -- 
-1.8.3.1
+2.20.1
 
diff --git a/SOURCES/0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch b/SOURCES/0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch
index 13e1baf..1dcf4bd 100644
--- a/SOURCES/0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch
+++ b/SOURCES/0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch
@@ -1,21 +1,21 @@
-From 48610bf1f404fc8c71e0091ad4b877e851e06642 Mon Sep 17 00:00:00 2001
+From 0bff7d73c49043b0820d0231c9a47539287f35e3 Mon Sep 17 00:00:00 2001
 From: Miroslav Rezanina <mrezanin@redhat.com>
 Date: Thu, 31 May 2018 19:37:55 +0200
 Subject: Do not write NM_CONTROLLED=no in generated interface config  files
 
 X-downstream-only: true
 Signed-off-by: Ryan McCabe <rmccabe@redhat.com>
+Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
 ---
- cloudinit/net/sysconfig.py                     |  1 -
- tests/unittests/test_distros/test_netconfig.py |  4 ----
- tests/unittests/test_net.py                    | 30 --------------------------
- 3 files changed, 35 deletions(-)
+ cloudinit/net/sysconfig.py  |  1 -
+ tests/unittests/test_net.py | 30 ------------------------------
+ 2 files changed, 31 deletions(-)
 
 diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
-index 39d89c4..cefb5c5 100644
+index 17293e1d..ae0554ef 100644
 --- a/cloudinit/net/sysconfig.py
 +++ b/cloudinit/net/sysconfig.py
-@@ -233,7 +233,6 @@ class Renderer(renderer.Renderer):
+@@ -250,7 +250,6 @@ class Renderer(renderer.Renderer):
      iface_defaults = tuple([
          ('ONBOOT', True),
          ('USERCTL', False),
@@ -23,47 +23,11 @@ index 39d89c4..cefb5c5 100644
          ('BOOTPROTO', 'none'),
      ])
  
-diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
-index 1c2e45f..2f69455 100644
---- a/tests/unittests/test_distros/test_netconfig.py
-+++ b/tests/unittests/test_distros/test_netconfig.py
-@@ -479,7 +479,6 @@ DEVICE=eth0
- GATEWAY=192.168.1.254
- IPADDR=192.168.1.5
- NETMASK=255.255.255.0
--NM_CONTROLLED=no
- ONBOOT=yes
- TYPE=Ethernet
- USERCTL=no
-@@ -496,7 +495,6 @@ USERCTL=no
- #
- BOOTPROTO=dhcp
- DEVICE=eth1
--NM_CONTROLLED=no
- ONBOOT=yes
- TYPE=Ethernet
- USERCTL=no
-@@ -630,7 +628,6 @@ DEVICE=eth0
- IPV6ADDR=2607:f0d0:1002:0011::2/64
- IPV6INIT=yes
- IPV6_DEFAULTGW=2607:f0d0:1002:0011::1
--NM_CONTROLLED=no
- ONBOOT=yes
- TYPE=Ethernet
- USERCTL=no
-@@ -645,7 +642,6 @@ USERCTL=no
- #
- BOOTPROTO=dhcp
- DEVICE=eth1
--NM_CONTROLLED=no
- ONBOOT=yes
- TYPE=Ethernet
- USERCTL=no
 diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
-index c12a487..95318ed 100644
+index 195f261c..5f1aa3e7 100644
 --- a/tests/unittests/test_net.py
 +++ b/tests/unittests/test_net.py
-@@ -144,7 +144,6 @@ GATEWAY=172.19.3.254
+@@ -175,7 +175,6 @@ GATEWAY=172.19.3.254
  HWADDR=fa:16:3e:ed:9a:59
  IPADDR=172.19.1.34
  NETMASK=255.255.252.0
@@ -71,7 +35,7 @@ index c12a487..95318ed 100644
  ONBOOT=yes
  TYPE=Ethernet
  USERCTL=no
-@@ -212,7 +211,6 @@ IPADDR=172.19.1.34
+@@ -279,7 +278,6 @@ IPADDR=172.19.1.34
  IPADDR1=10.0.0.10
  NETMASK=255.255.252.0
  NETMASK1=255.255.255.0
@@ -79,7 +43,7 @@ index c12a487..95318ed 100644
  ONBOOT=yes
  TYPE=Ethernet
  USERCTL=no
-@@ -302,7 +300,6 @@ IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64"
+@@ -407,7 +405,6 @@ IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64"
  IPV6INIT=yes
  IPV6_DEFAULTGW=2001:DB8::1
  NETMASK=255.255.252.0
@@ -87,7 +51,7 @@ index c12a487..95318ed 100644
  ONBOOT=yes
  TYPE=Ethernet
  USERCTL=no
-@@ -417,7 +414,6 @@ NETWORK_CONFIGS = {
+@@ -523,7 +520,6 @@ NETWORK_CONFIGS = {
                  BOOTPROTO=none
                  DEVICE=eth1
                  HWADDR=cf:d6:af:48:e8:80
@@ -95,15 +59,15 @@ index c12a487..95318ed 100644
                  ONBOOT=yes
                  TYPE=Ethernet
                  USERCTL=no"""),
-@@ -432,7 +428,6 @@ NETWORK_CONFIGS = {
-                 HWADDR=c0:d6:9f:2c:e8:80
+@@ -539,7 +535,6 @@ NETWORK_CONFIGS = {
                  IPADDR=192.168.21.3
                  NETMASK=255.255.255.0
+                 METRIC=10000
 -                NM_CONTROLLED=no
                  ONBOOT=yes
                  TYPE=Ethernet
                  USERCTL=no"""),
-@@ -544,7 +539,6 @@ NETWORK_CONFIGS = {
+@@ -652,7 +647,6 @@ NETWORK_CONFIGS = {
                  IPV6ADDR=2001:1::1/64
                  IPV6INIT=yes
                  NETMASK=255.255.255.0
@@ -111,7 +75,7 @@ index c12a487..95318ed 100644
                  ONBOOT=yes
                  TYPE=Ethernet
                  USERCTL=no
-@@ -745,14 +739,12 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+@@ -894,14 +888,12 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
                  DHCPV6C=yes
                  IPV6INIT=yes
                  MACADDR=aa:bb:cc:dd:ee:ff
@@ -126,15 +90,15 @@ index c12a487..95318ed 100644
                  ONBOOT=yes
                  PHYSDEV=bond0
                  TYPE=Ethernet
-@@ -768,7 +760,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
-                 IPV6INIT=yes
+@@ -918,7 +910,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
                  IPV6_DEFAULTGW=2001:4800:78ff:1b::1
+                 MACADDR=bb:bb:bb:bb:bb:aa
                  NETMASK=255.255.255.0
 -                NM_CONTROLLED=no
                  ONBOOT=yes
                  PRIO=22
                  STP=no
-@@ -778,7 +769,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+@@ -928,7 +919,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
                  BOOTPROTO=none
                  DEVICE=eth0
                  HWADDR=c0:d6:9f:2c:e8:80
@@ -142,7 +106,7 @@ index c12a487..95318ed 100644
                  ONBOOT=yes
                  TYPE=Ethernet
                  USERCTL=no"""),
-@@ -795,7 +785,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+@@ -945,7 +935,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
                  MTU=1500
                  NETMASK=255.255.255.0
                  NETMASK1=255.255.255.0
@@ -150,7 +114,7 @@ index c12a487..95318ed 100644
                  ONBOOT=yes
                  PHYSDEV=eth0
                  TYPE=Ethernet
-@@ -806,7 +795,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+@@ -956,7 +945,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
                  DEVICE=eth1
                  HWADDR=aa:d6:9f:2c:e8:80
                  MASTER=bond0
@@ -158,7 +122,7 @@ index c12a487..95318ed 100644
                  ONBOOT=yes
                  SLAVE=yes
                  TYPE=Ethernet
-@@ -816,7 +804,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+@@ -966,7 +954,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
                  DEVICE=eth2
                  HWADDR=c0:bb:9f:2c:e8:80
                  MASTER=bond0
@@ -166,7 +130,7 @@ index c12a487..95318ed 100644
                  ONBOOT=yes
                  SLAVE=yes
                  TYPE=Ethernet
-@@ -826,7 +813,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+@@ -976,7 +963,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
                  BRIDGE=br0
                  DEVICE=eth3
                  HWADDR=66:bb:9f:2c:e8:80
@@ -174,7 +138,7 @@ index c12a487..95318ed 100644
                  ONBOOT=yes
                  TYPE=Ethernet
                  USERCTL=no"""),
-@@ -835,7 +821,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+@@ -985,7 +971,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
                  BRIDGE=br0
                  DEVICE=eth4
                  HWADDR=98:bb:9f:2c:e8:80
@@ -182,7 +146,7 @@ index c12a487..95318ed 100644
                  ONBOOT=yes
                  TYPE=Ethernet
                  USERCTL=no"""),
-@@ -843,7 +828,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+@@ -993,7 +978,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
                  BOOTPROTO=dhcp
                  DEVICE=eth5
                  HWADDR=98:bb:9f:2c:e8:8a
@@ -190,15 +154,15 @@ index c12a487..95318ed 100644
                  ONBOOT=no
                  TYPE=Ethernet
                  USERCTL=no""")
-@@ -1125,7 +1109,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
-         IPV6INIT=yes
+@@ -1356,7 +1340,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+         MTU=9000
          NETMASK=255.255.255.0
          NETMASK1=255.255.255.0
 -        NM_CONTROLLED=no
          ONBOOT=yes
          TYPE=Bond
          USERCTL=no
-@@ -1135,7 +1118,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+@@ -1366,7 +1349,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
          DEVICE=bond0s0
          HWADDR=aa:bb:cc:dd:e8:00
          MASTER=bond0
@@ -206,7 +170,7 @@ index c12a487..95318ed 100644
          ONBOOT=yes
          SLAVE=yes
          TYPE=Ethernet
-@@ -1153,7 +1135,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+@@ -1388,7 +1370,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
          DEVICE=bond0s1
          HWADDR=aa:bb:cc:dd:e8:01
          MASTER=bond0
@@ -214,7 +178,7 @@ index c12a487..95318ed 100644
          ONBOOT=yes
          SLAVE=yes
          TYPE=Ethernet
-@@ -1190,7 +1171,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+@@ -1426,7 +1407,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
                  BOOTPROTO=none
                  DEVICE=en0
                  HWADDR=aa:bb:cc:dd:e8:00
@@ -222,15 +186,15 @@ index c12a487..95318ed 100644
                  ONBOOT=yes
                  TYPE=Ethernet
                  USERCTL=no"""),
-@@ -1206,7 +1186,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
-                 IPV6_DEFAULTGW=2001:1::1
+@@ -1443,7 +1423,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+                 MTU=2222
                  NETMASK=255.255.255.0
                  NETMASK1=255.255.255.0
 -                NM_CONTROLLED=no
                  ONBOOT=yes
                  PHYSDEV=en0
                  TYPE=Ethernet
-@@ -1247,7 +1226,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+@@ -1484,7 +1463,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
                  DEVICE=br0
                  IPADDR=192.168.2.2
                  NETMASK=255.255.255.0
@@ -238,7 +202,7 @@ index c12a487..95318ed 100644
                  ONBOOT=yes
                  PRIO=22
                  STP=no
-@@ -1261,7 +1239,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+@@ -1498,7 +1476,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
                  HWADDR=52:54:00:12:34:00
                  IPV6ADDR=2001:1::100/96
                  IPV6INIT=yes
@@ -246,7 +210,7 @@ index c12a487..95318ed 100644
                  ONBOOT=yes
                  TYPE=Ethernet
                  USERCTL=no
-@@ -1273,7 +1250,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+@@ -1510,7 +1487,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
                  HWADDR=52:54:00:12:34:01
                  IPV6ADDR=2001:1::101/96
                  IPV6INIT=yes
@@ -254,7 +218,7 @@ index c12a487..95318ed 100644
                  ONBOOT=yes
                  TYPE=Ethernet
                  USERCTL=no
-@@ -1347,7 +1323,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+@@ -1584,7 +1560,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
                  HWADDR=52:54:00:12:34:00
                  IPADDR=192.168.1.2
                  NETMASK=255.255.255.0
@@ -262,7 +226,7 @@ index c12a487..95318ed 100644
                  ONBOOT=no
                  TYPE=Ethernet
                  USERCTL=no
-@@ -1357,7 +1332,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+@@ -1594,7 +1569,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
                  DEVICE=eth1
                  HWADDR=52:54:00:12:34:aa
                  MTU=1480
@@ -270,7 +234,7 @@ index c12a487..95318ed 100644
                  ONBOOT=yes
                  TYPE=Ethernet
                  USERCTL=no
-@@ -1366,7 +1340,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+@@ -1603,7 +1577,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
                  BOOTPROTO=none
                  DEVICE=eth2
                  HWADDR=52:54:00:12:34:ff
@@ -278,7 +242,7 @@ index c12a487..95318ed 100644
                  ONBOOT=no
                  TYPE=Ethernet
                  USERCTL=no
-@@ -1639,7 +1612,6 @@ class TestSysConfigRendering(CiTestCase):
+@@ -1969,7 +1942,6 @@ class TestRhelSysConfigRendering(CiTestCase):
  BOOTPROTO=dhcp
  DEVICE=eth1000
  HWADDR=07-1C-C6-75-A4-BE
@@ -286,7 +250,7 @@ index c12a487..95318ed 100644
  ONBOOT=yes
  TYPE=Ethernet
  USERCTL=no
-@@ -1759,7 +1731,6 @@ GATEWAY=10.0.2.2
+@@ -2090,7 +2062,6 @@ GATEWAY=10.0.2.2
  HWADDR=52:54:00:12:34:00
  IPADDR=10.0.2.15
  NETMASK=255.255.255.0
@@ -294,7 +258,7 @@ index c12a487..95318ed 100644
  ONBOOT=yes
  TYPE=Ethernet
  USERCTL=no
-@@ -1780,7 +1751,6 @@ USERCTL=no
+@@ -2111,7 +2082,6 @@ USERCTL=no
  #
  BOOTPROTO=dhcp
  DEVICE=eth0
@@ -303,5 +267,5 @@ index c12a487..95318ed 100644
  TYPE=Ethernet
  USERCTL=no
 -- 
-1.8.3.1
+2.20.1
 
diff --git a/SOURCES/0003-limit-permissions-on-def_log_file.patch b/SOURCES/0003-limit-permissions-on-def_log_file.patch
index 20d7e7b..1e63b06 100644
--- a/SOURCES/0003-limit-permissions-on-def_log_file.patch
+++ b/SOURCES/0003-limit-permissions-on-def_log_file.patch
@@ -1,4 +1,4 @@
-From a49f5cd665c3bdb6a40c95d561791b6bbce9f079 Mon Sep 17 00:00:00 2001
+From fa8f782f5dd24e81f7072bfc24c75340f0972af5 Mon Sep 17 00:00:00 2001
 From: Lars Kellogg-Stedman <lars@redhat.com>
 Date: Fri, 7 Apr 2017 18:50:54 -0400
 Subject: limit permissions on def_log_file
@@ -9,6 +9,7 @@ configurable via the def_log_file_mode option in cloud.cfg.
 LP: #1541196
 Resolves: rhbz#1424612
 X-approved-upstream: true
+Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
 ---
  cloudinit/settings.py         | 1 +
  cloudinit/stages.py           | 3 ++-
@@ -16,10 +17,10 @@ X-approved-upstream: true
  3 files changed, 7 insertions(+), 1 deletion(-)
 
 diff --git a/cloudinit/settings.py b/cloudinit/settings.py
-index a5a1eec..4efe6b6 100644
+index c5367687..d982a4d6 100644
 --- a/cloudinit/settings.py
 +++ b/cloudinit/settings.py
-@@ -42,6 +42,7 @@ CFG_BUILTIN = {
+@@ -43,6 +43,7 @@ CFG_BUILTIN = {
          'None',
      ],
      'def_log_file': '/var/log/cloud-init.log',
@@ -28,10 +29,10 @@ index a5a1eec..4efe6b6 100644
      'mount_default_fields': [None, None, 'auto', 'defaults,nofail', '0', '2'],
      'ssh_deletekeys': False,
 diff --git a/cloudinit/stages.py b/cloudinit/stages.py
-index bc4ebc8..40336e0 100644
+index 8a064124..4f15484d 100644
 --- a/cloudinit/stages.py
 +++ b/cloudinit/stages.py
-@@ -145,8 +145,9 @@ class Init(object):
+@@ -148,8 +148,9 @@ class Init(object):
      def _initialize_filesystem(self):
          util.ensure_dirs(self._initial_subdirs())
          log_file = util.get_cfg_option_str(self.cfg, 'def_log_file')
@@ -43,10 +44,10 @@ index bc4ebc8..40336e0 100644
              if not perms:
                  perms = {}
 diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
-index bd84c64..99e6fdd 100644
+index eb84dcf5..0e82b83e 100644
 --- a/doc/examples/cloud-config.txt
 +++ b/doc/examples/cloud-config.txt
-@@ -397,10 +397,14 @@ timezone: US/Eastern
+@@ -413,10 +413,14 @@ timezone: US/Eastern
  # if syslog_fix_perms is a list, it will iterate through and use the
  # first pair that does not raise error.
  #
@@ -62,5 +63,5 @@ index bd84c64..99e6fdd 100644
  
  # you can set passwords for a user or multiple users
 -- 
-1.8.3.1
+2.20.1
 
diff --git a/SOURCES/0004-azure-ensure-that-networkmanager-hook-script-runs.patch b/SOURCES/0004-azure-ensure-that-networkmanager-hook-script-runs.patch
new file mode 100644
index 0000000..bb78670
--- /dev/null
+++ b/SOURCES/0004-azure-ensure-that-networkmanager-hook-script-runs.patch
@@ -0,0 +1,64 @@
+From 8a8af21fc8fff984f2b4285e9993cfd50cad70c4 Mon Sep 17 00:00:00 2001
+From: Lars Kellogg-Stedman <lars@redhat.com>
+Date: Thu, 15 Jun 2017 12:20:39 -0400
+Subject: azure: ensure that networkmanager hook script runs
+
+The networkmanager hook script was failing to run due to the changes
+we made to resolve rhbz#1440831.  This corrects the regression by
+allowing the NM hook script to run regardless of whether or not
+cloud-init is "enabled".
+
+Resolves: rhbz#1460206
+X-downstream-only: true
+Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
+---
+ tools/hook-dhclient        | 3 +--
+ tools/hook-network-manager | 3 +--
+ tools/hook-rhel.sh         | 3 +--
+ 3 files changed, 3 insertions(+), 6 deletions(-)
+
+diff --git a/tools/hook-dhclient b/tools/hook-dhclient
+index 02122f37..181cd51e 100755
+--- a/tools/hook-dhclient
++++ b/tools/hook-dhclient
+@@ -13,8 +13,7 @@ is_azure() {
+ }
+ 
+ is_enabled() {
+-    # only execute hooks if cloud-init is enabled and on azure
+-    [ -e /run/cloud-init/enabled ] || return 1
++    # only execute hooks if cloud-init is running on azure
+     is_azure
+ }
+ 
+diff --git a/tools/hook-network-manager b/tools/hook-network-manager
+index 67d9044a..1d52cad7 100755
+--- a/tools/hook-network-manager
++++ b/tools/hook-network-manager
+@@ -13,8 +13,7 @@ is_azure() {
+ }
+ 
+ is_enabled() {
+-    # only execute hooks if cloud-init is enabled and on azure
+-    [ -e /run/cloud-init/enabled ] || return 1
++    # only execute hooks if cloud-init running on azure
+     is_azure
+ }
+ 
+diff --git a/tools/hook-rhel.sh b/tools/hook-rhel.sh
+index 513a5515..d75767e2 100755
+--- a/tools/hook-rhel.sh
++++ b/tools/hook-rhel.sh
+@@ -13,8 +13,7 @@ is_azure() {
+ }
+ 
+ is_enabled() {
+-    # only execute hooks if cloud-init is enabled and on azure
+-    [ -e /run/cloud-init/enabled ] || return 1
++    # only execute hooks if cloud-init is running on azure
+     is_azure
+ }
+ 
+-- 
+2.20.1
+
diff --git a/SOURCES/0005-add-power-state-change-module-to-cloud_final_modules.patch b/SOURCES/0005-add-power-state-change-module-to-cloud_final_modules.patch
deleted file mode 100644
index fd0605e..0000000
--- a/SOURCES/0005-add-power-state-change-module-to-cloud_final_modules.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From 2d6b469b1aa915c219fa19012b1fc3c57b218cb3 Mon Sep 17 00:00:00 2001
-From: Miroslav Rezanina <mrezanin@redhat.com>
-Date: Thu, 31 May 2018 19:47:44 +0200
-Subject: add power-state-change module to cloud_final_modules
-
-Resolves: rhbz#1252477
-X-downstream-only: true
-Signed-off-by: Ryan McCabe <rmccabe@redhat.com>
----
- rhel/cloud.cfg | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg
-index 986f241..8644872 100644
---- a/rhel/cloud.cfg
-+++ b/rhel/cloud.cfg
-@@ -48,6 +48,7 @@ cloud_final_modules:
-  - keys-to-console
-  - phone-home
-  - final-message
-+ - power-state-change
- 
- system_info:
-   default_user:
--- 
-1.8.3.1
-
diff --git a/SOURCES/0005-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch b/SOURCES/0005-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch
new file mode 100644
index 0000000..f157e21
--- /dev/null
+++ b/SOURCES/0005-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch
@@ -0,0 +1,45 @@
+From 471353b3c3bf5cba5cab4d1b203b1c259c709fde Mon Sep 17 00:00:00 2001
+From: Miroslav Rezanina <mrezanin@redhat.com>
+Date: Thu, 31 May 2018 20:00:32 +0200
+Subject: sysconfig: Don't write BOOTPROTO=dhcp for ipv6 dhcp
+
+Don't write BOOTPROTO=dhcp for ipv6 dhcp, as BOOTPROTO applies
+only to ipv4. Explicitly write IPV6_AUTOCONF=no for dhcp on ipv6.
+
+X-downstream-only: yes
+
+Resolves: rhbz#1519271
+Signed-off-by: Ryan McCabe <rmccabe@redhat.com>
+Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
+---
+ cloudinit/net/sysconfig.py  | 1 +
+ tests/unittests/test_net.py | 1 +
+ 2 files changed, 2 insertions(+)
+
+diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
+index ae0554ef..ec166cf1 100644
+--- a/cloudinit/net/sysconfig.py
++++ b/cloudinit/net/sysconfig.py
+@@ -310,6 +310,7 @@ class Renderer(renderer.Renderer):
+             if subnet_type == 'dhcp6':
+                 iface_cfg['IPV6INIT'] = True
+                 iface_cfg['DHCPV6C'] = True
++                iface_cfg['IPV6_AUTOCONF'] = False
+             elif subnet_type in ['dhcp4', 'dhcp']:
+                 iface_cfg['BOOTPROTO'] = 'dhcp'
+             elif subnet_type == 'static':
+diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
+index 5f1aa3e7..8bcafe08 100644
+--- a/tests/unittests/test_net.py
++++ b/tests/unittests/test_net.py
+@@ -886,6 +886,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+                 BOOTPROTO=none
+                 DEVICE=bond0
+                 DHCPV6C=yes
++                IPV6_AUTOCONF=no
+                 IPV6INIT=yes
+                 MACADDR=aa:bb:cc:dd:ee:ff
+                 ONBOOT=yes
+-- 
+2.20.1
+
diff --git a/SOURCES/0006-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch b/SOURCES/0006-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch
new file mode 100644
index 0000000..0da5464
--- /dev/null
+++ b/SOURCES/0006-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch
@@ -0,0 +1,57 @@
+From 21ea1cda0055416119edea44de95b5606f0b0e15 Mon Sep 17 00:00:00 2001
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Tue, 17 Apr 2018 13:07:54 +0200
+Subject: DataSourceAzure.py: use hostnamectl to set hostname
+
+RH-Author: Vitaly Kuznetsov <vkuznets@redhat.com>
+Message-id: <20180417130754.12918-3-vkuznets@redhat.com>
+Patchwork-id: 79659
+O-Subject: [RHEL7.6/7.5.z cloud-init PATCH 2/2] DataSourceAzure.py: use hostnamectl to set hostname
+Bugzilla: 1568717
+RH-Acked-by: Eduardo Otubo <otubo@redhat.com>
+RH-Acked-by: Mohammed Gamal <mgamal@redhat.com>
+RH-Acked-by: Cathy Avery <cavery@redhat.com>
+
+The right way to set hostname in RHEL7 is:
+
+ $ hostnamectl set-hostname HOSTNAME
+
+DataSourceAzure, however, uses:
+ $ hostname HOSTSNAME
+
+instead and this causes problems. We can't simply change
+'BUILTIN_DS_CONFIG' in DataSourceAzure.py as 'hostname' is being used
+for both getting and setting the hostname.
+
+Long term, this should be fixed in a different way. Cloud-init
+has distro-specific hostname setting/getting (see
+cloudinit/distros/rhel.py) and DataSourceAzure.py needs to be switched
+to use these.
+
+Resolves: rhbz#1434109
+
+X-downstream-only: yes
+
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
+Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
+---
+ cloudinit/sources/DataSourceAzure.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
+index e076d5dc..7dbeb04c 100644
+--- a/cloudinit/sources/DataSourceAzure.py
++++ b/cloudinit/sources/DataSourceAzure.py
+@@ -238,7 +238,7 @@ def get_hostname(hostname_command='hostname'):
+ 
+ 
+ def set_hostname(hostname, hostname_command='hostname'):
+-    util.subp([hostname_command, hostname])
++    util.subp(['hostnamectl', 'set-hostname', str(hostname)])
+ 
+ 
+ @contextlib.contextmanager
+-- 
+2.20.1
+
diff --git a/SOURCES/0006-azure-ensure-that-networkmanager-hook-script-runs.patch b/SOURCES/0006-azure-ensure-that-networkmanager-hook-script-runs.patch
deleted file mode 100644
index 51ed40f..0000000
--- a/SOURCES/0006-azure-ensure-that-networkmanager-hook-script-runs.patch
+++ /dev/null
@@ -1,63 +0,0 @@
-From c48497435e8195dbd87262c2f00e484e63fe3343 Mon Sep 17 00:00:00 2001
-From: Lars Kellogg-Stedman <lars@redhat.com>
-Date: Thu, 15 Jun 2017 12:20:39 -0400
-Subject: azure: ensure that networkmanager hook script runs
-
-The networkmanager hook script was failing to run due to the changes
-we made to resolve rhbz#1440831.  This corrects the regression by
-allowing the NM hook script to run regardless of whether or not
-cloud-init is "enabled".
-
-Resolves: rhbz#1460206
-X-downstream-only: true
----
- tools/hook-dhclient        | 3 +--
- tools/hook-network-manager | 3 +--
- tools/hook-rhel.sh         | 3 +--
- 3 files changed, 3 insertions(+), 6 deletions(-)
-
-diff --git a/tools/hook-dhclient b/tools/hook-dhclient
-index 02122f3..181cd51 100755
---- a/tools/hook-dhclient
-+++ b/tools/hook-dhclient
-@@ -13,8 +13,7 @@ is_azure() {
- }
- 
- is_enabled() {
--    # only execute hooks if cloud-init is enabled and on azure
--    [ -e /run/cloud-init/enabled ] || return 1
-+    # only execute hooks if cloud-init is running on azure
-     is_azure
- }
- 
-diff --git a/tools/hook-network-manager b/tools/hook-network-manager
-index 67d9044..1d52cad 100755
---- a/tools/hook-network-manager
-+++ b/tools/hook-network-manager
-@@ -13,8 +13,7 @@ is_azure() {
- }
- 
- is_enabled() {
--    # only execute hooks if cloud-init is enabled and on azure
--    [ -e /run/cloud-init/enabled ] || return 1
-+    # only execute hooks if cloud-init running on azure
-     is_azure
- }
- 
-diff --git a/tools/hook-rhel.sh b/tools/hook-rhel.sh
-index 513a551..d75767e 100755
---- a/tools/hook-rhel.sh
-+++ b/tools/hook-rhel.sh
-@@ -13,8 +13,7 @@ is_azure() {
- }
- 
- is_enabled() {
--    # only execute hooks if cloud-init is enabled and on azure
--    [ -e /run/cloud-init/enabled ] || return 1
-+    # only execute hooks if cloud-init is running on azure
-     is_azure
- }
- 
--- 
-1.8.3.1
-
diff --git a/SOURCES/0007-sysconfig-Don-t-disable-IPV6_AUTOCONF.patch b/SOURCES/0007-sysconfig-Don-t-disable-IPV6_AUTOCONF.patch
new file mode 100644
index 0000000..33de1b6
--- /dev/null
+++ b/SOURCES/0007-sysconfig-Don-t-disable-IPV6_AUTOCONF.patch
@@ -0,0 +1,50 @@
+From 6444df4c91c611c65bb292e75e2726f767edcf2b Mon Sep 17 00:00:00 2001
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Thu, 26 Apr 2018 09:27:49 +0200
+Subject: sysconfig: Don't disable IPV6_AUTOCONF
+
+RH-Author: Vitaly Kuznetsov <vkuznets@redhat.com>
+Message-id: <20180426092749.7251-2-vkuznets@redhat.com>
+Patchwork-id: 79904
+O-Subject: [RHEL7.6/7.5.z cloud-init PATCH 1/1] sysconfig: Don't disable IPV6_AUTOCONF
+Bugzilla: 1578702
+RH-Acked-by: Mohammed Gamal <mgamal@redhat.com>
+RH-Acked-by: Cathy Avery <cavery@redhat.com>
+RH-Acked-by: Eduardo Otubo <otubo@redhat.com>
+
+Downstream-only commit 118458a3fb ("sysconfig: Don't write BOOTPROTO=dhcp
+for ipv6 dhcp") did two things:
+1) Disabled BOOTPROTO='dhcp' for dhcp6 setups. This change seems to be
+   correct as BOOTPROTO is unrelated to IPv6. The change was since merged
+   upstream (commit a57928d3c314d9568712cd190cb1e721e14c108b).
+2) Explicitly disabled AUTOCONF and this broke many valid configurations
+   using it instead of DHCPV6C. Revert this part of the change. In case
+   DHCPV6C-only support is needed something like a new 'dhcpv6c_only'
+   network type needs to be suggested upstream.
+
+X-downstream-only: yes
+
+Resolves: rhbz#1558854
+
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
+Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
+---
+ cloudinit/net/sysconfig.py | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
+index ec166cf1..ae0554ef 100644
+--- a/cloudinit/net/sysconfig.py
++++ b/cloudinit/net/sysconfig.py
+@@ -310,7 +310,6 @@ class Renderer(renderer.Renderer):
+             if subnet_type == 'dhcp6':
+                 iface_cfg['IPV6INIT'] = True
+                 iface_cfg['DHCPV6C'] = True
+-                iface_cfg['IPV6_AUTOCONF'] = False
+             elif subnet_type in ['dhcp4', 'dhcp']:
+                 iface_cfg['BOOTPROTO'] = 'dhcp'
+             elif subnet_type == 'static':
+-- 
+2.20.1
+
diff --git a/SOURCES/0007-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch b/SOURCES/0007-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch
deleted file mode 100644
index 027abbe..0000000
--- a/SOURCES/0007-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch
+++ /dev/null
@@ -1,49 +0,0 @@
-From 867056799c8dca5e8e7768df71fd8e7ff08d200d Mon Sep 17 00:00:00 2001
-From: Miroslav Rezanina <mrezanin@redhat.com>
-Date: Thu, 31 May 2018 20:00:32 +0200
-Subject: sysconfig: Don't write BOOTPROTO=dhcp for ipv6 dhcp
-
-Don't write BOOTPROTO=dhcp for ipv6 dhcp, as BOOTPROTO applies
-only to ipv4. Explicitly write IPV6_AUTOCONF=no for dhcp on ipv6.
-
-X-downstream-only: yes
-
-Resolves: rhbz#1519271
-Signed-off-by: Ryan McCabe <rmccabe@redhat.com>
----
- cloudinit/net/sysconfig.py  | 2 +-
- tests/unittests/test_net.py | 3 ++-
- 2 files changed, 3 insertions(+), 2 deletions(-)
-
-diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
-index cefb5c5..f870b0f 100644
---- a/cloudinit/net/sysconfig.py
-+++ b/cloudinit/net/sysconfig.py
-@@ -286,7 +286,7 @@ class Renderer(renderer.Renderer):
-             if subnet_type == 'dhcp6':
-                 iface_cfg['IPV6INIT'] = True
-                 iface_cfg['DHCPV6C'] = True
--                iface_cfg['BOOTPROTO'] = 'dhcp'
-+                iface_cfg['IPV6_AUTOCONF'] = False
-             elif subnet_type in ['dhcp4', 'dhcp']:
-                 iface_cfg['BOOTPROTO'] = 'dhcp'
-             elif subnet_type == 'static':
-diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
-index 95318ed..9cf41bc 100644
---- a/tests/unittests/test_net.py
-+++ b/tests/unittests/test_net.py
-@@ -734,9 +734,10 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
-                                            """miimon=100"
-                 BONDING_SLAVE0=eth1
-                 BONDING_SLAVE1=eth2
--                BOOTPROTO=dhcp
-+                BOOTPROTO=none
-                 DEVICE=bond0
-                 DHCPV6C=yes
-+                IPV6_AUTOCONF=no
-                 IPV6INIT=yes
-                 MACADDR=aa:bb:cc:dd:ee:ff
-                 ONBOOT=yes
--- 
-1.8.3.1
-
diff --git a/SOURCES/0008-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch b/SOURCES/0008-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch
deleted file mode 100644
index ee23035..0000000
--- a/SOURCES/0008-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch
+++ /dev/null
@@ -1,56 +0,0 @@
-From 004f265bace18a6e45d954c5d7bdfe344a19178e Mon Sep 17 00:00:00 2001
-From: Vitaly Kuznetsov <vkuznets@redhat.com>
-Date: Tue, 17 Apr 2018 13:07:54 +0200
-Subject: DataSourceAzure.py: use hostnamectl to set hostname
-
-RH-Author: Vitaly Kuznetsov <vkuznets@redhat.com>
-Message-id: <20180417130754.12918-3-vkuznets@redhat.com>
-Patchwork-id: 79659
-O-Subject: [RHEL7.6/7.5.z cloud-init PATCH 2/2] DataSourceAzure.py: use hostnamectl to set hostname
-Bugzilla: 1568717
-RH-Acked-by: Eduardo Otubo <otubo@redhat.com>
-RH-Acked-by: Mohammed Gamal <mgamal@redhat.com>
-RH-Acked-by: Cathy Avery <cavery@redhat.com>
-
-The right way to set hostname in RHEL7 is:
-
- $ hostnamectl set-hostname HOSTNAME
-
-DataSourceAzure, however, uses:
- $ hostname HOSTSNAME
-
-instead and this causes problems. We can't simply change
-'BUILTIN_DS_CONFIG' in DataSourceAzure.py as 'hostname' is being used
-for both getting and setting the hostname.
-
-Long term, this should be fixed in a different way. Cloud-init
-has distro-specific hostname setting/getting (see
-cloudinit/distros/rhel.py) and DataSourceAzure.py needs to be switched
-to use these.
-
-Resolves: rhbz#1434109
-
-X-downstream-only: yes
-
-Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
-Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
----
- cloudinit/sources/DataSourceAzure.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
-index 0ee622e..23b4d53 100644
---- a/cloudinit/sources/DataSourceAzure.py
-+++ b/cloudinit/sources/DataSourceAzure.py
-@@ -228,7 +228,7 @@ def get_hostname(hostname_command='hostname'):
- 
- 
- def set_hostname(hostname, hostname_command='hostname'):
--    util.subp([hostname_command, hostname])
-+    util.subp(['hostnamectl', 'set-hostname', str(hostname)])
- 
- 
- @contextlib.contextmanager
--- 
-1.8.3.1
-
diff --git a/SOURCES/0008-net-Make-sysconfig-renderer-compatible-with-Network-.patch b/SOURCES/0008-net-Make-sysconfig-renderer-compatible-with-Network-.patch
new file mode 100644
index 0000000..be1644e
--- /dev/null
+++ b/SOURCES/0008-net-Make-sysconfig-renderer-compatible-with-Network-.patch
@@ -0,0 +1,217 @@
+From 86bd1e20fc802edfb920fa53bd611d469f83250b Mon Sep 17 00:00:00 2001
+From: Eduardo Otubo <otubo@redhat.com>
+Date: Fri, 18 Jan 2019 16:55:36 +0100
+Subject: net: Make sysconfig renderer compatible with Network Manager.
+
+RH-Author: Eduardo Otubo <otubo@redhat.com>
+Message-id: <20190118165536.25963-1-otubo@redhat.com>
+Patchwork-id: 84052
+O-Subject: [RHEL-8.0 cloud-init PATCH] net: Make sysconfig renderer compatible with Network Manager.
+Bugzilla: 1602784
+RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+RH-Acked-by: Mohammed Gamal <mgamal@redhat.com>
+
+Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1602784
+Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=19877292
+Tested by: upstream maintainers and me
+
+commit 3861102fcaf47a882516d8b6daab518308eb3086
+Author: Eduardo Otubo <otubo@redhat.com>
+Date:   Fri Jan 18 15:36:19 2019 +0000
+
+    net: Make sysconfig renderer compatible with Network Manager.
+
+    The 'sysconfig' renderer is activated if, and only if, there's ifup and
+    ifdown commands present in its search dictonary or the network-scripts
+    configuration files are found. This patch adds a check for Network-
+    Manager configuration file as well.
+
+    This solution is based on the use of the plugin 'ifcfg-rh' present in
+    Network-Manager and is designed to support Fedora 29 or other
+    distributions that also replaced network-scripts by Network-Manager.
+
+Signed-off-by: Eduardo Otubo <otubo@redhat.com>
+Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
+Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
+---
+ cloudinit/net/sysconfig.py  | 36 +++++++++++++++++++
+ tests/unittests/test_net.py | 71 +++++++++++++++++++++++++++++++++++++
+ 2 files changed, 107 insertions(+)
+
+diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
+index ae0554ef..dc1815d9 100644
+--- a/cloudinit/net/sysconfig.py
++++ b/cloudinit/net/sysconfig.py
+@@ -10,11 +10,14 @@ from cloudinit.distros.parsers import resolv_conf
+ from cloudinit import log as logging
+ from cloudinit import util
+ 
++from configobj import ConfigObj
++
+ from . import renderer
+ from .network_state import (
+     is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6)
+ 
+ LOG = logging.getLogger(__name__)
++NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf"
+ 
+ 
+ def _make_header(sep='#'):
+@@ -46,6 +49,24 @@ def _quote_value(value):
+         return value
+ 
+ 
++def enable_ifcfg_rh(path):
++    """Add ifcfg-rh to NetworkManager.cfg plugins if main section is present"""
++    config = ConfigObj(path)
++    if 'main' in config:
++        if 'plugins' in config['main']:
++            if 'ifcfg-rh' in config['main']['plugins']:
++                return
++        else:
++            config['main']['plugins'] = []
++
++        if isinstance(config['main']['plugins'], list):
++            config['main']['plugins'].append('ifcfg-rh')
++        else:
++            config['main']['plugins'] = [config['main']['plugins'], 'ifcfg-rh']
++        config.write()
++        LOG.debug('Enabled ifcfg-rh NetworkManager plugins')
++
++
+ class ConfigMap(object):
+     """Sysconfig like dictionary object."""
+ 
+@@ -656,6 +677,8 @@ class Renderer(renderer.Renderer):
+             netrules_content = self._render_persistent_net(network_state)
+             netrules_path = util.target_path(target, self.netrules_path)
+             util.write_file(netrules_path, netrules_content, file_mode)
++        if available_nm(target=target):
++            enable_ifcfg_rh(util.target_path(target, path=NM_CFG_FILE))
+ 
+         sysconfig_path = util.target_path(target, templates.get('control'))
+         # Distros configuring /etc/sysconfig/network as a file e.g. Centos
+@@ -670,6 +693,13 @@ class Renderer(renderer.Renderer):
+ 
+ 
+ def available(target=None):
++    sysconfig = available_sysconfig(target=target)
++    nm = available_nm(target=target)
++
++    return any([nm, sysconfig])
++
++
++def available_sysconfig(target=None):
+     expected = ['ifup', 'ifdown']
+     search = ['/sbin', '/usr/sbin']
+     for p in expected:
+@@ -685,4 +715,10 @@ def available(target=None):
+     return True
+ 
+ 
++def available_nm(target=None):
++    if not os.path.isfile(util.target_path(target, path=NM_CFG_FILE)):
++        return False
++    return True
++
++
+ # vi: ts=4 expandtab
+diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
+index 8bcafe08..526a30ed 100644
+--- a/tests/unittests/test_net.py
++++ b/tests/unittests/test_net.py
+@@ -22,6 +22,7 @@ import os
+ import textwrap
+ import yaml
+ 
++
+ DHCP_CONTENT_1 = """
+ DEVICE='eth0'
+ PROTO='dhcp'
+@@ -1854,6 +1855,7 @@ class TestRhelSysConfigRendering(CiTestCase):
+ 
+     with_logs = True
+ 
++    nm_cfg_file = "/etc/NetworkManager/NetworkManager.conf"
+     scripts_dir = '/etc/sysconfig/network-scripts'
+     header = ('# Created by cloud-init on instance boot automatically, '
+               'do not edit.\n#\n')
+@@ -2497,6 +2499,75 @@ iface eth0 inet dhcp
+         self.assertEqual(
+             expected, dir2dict(tmp_dir)['/etc/network/interfaces'])
+ 
++    def test_check_ifcfg_rh(self):
++        """ifcfg-rh plugin is added NetworkManager.conf if conf present."""
++        render_dir = self.tmp_dir()
++        nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file)
++        util.ensure_dir(os.path.dirname(nm_cfg))
++
++        # write a template nm.conf, note plugins is a list here
++        with open(nm_cfg, 'w') as fh:
++            fh.write('# test_check_ifcfg_rh\n[main]\nplugins=foo,bar\n')
++        self.assertTrue(os.path.exists(nm_cfg))
++
++        # render and read
++        entry = NETWORK_CONFIGS['small']
++        found = self._render_and_read(network_config=yaml.load(entry['yaml']),
++                                      dir=render_dir)
++        self._compare_files_to_expected(entry[self.expected_name], found)
++        self._assert_headers(found)
++
++        # check ifcfg-rh is in the 'plugins' list
++        config = sysconfig.ConfigObj(nm_cfg)
++        self.assertIn('ifcfg-rh', config['main']['plugins'])
++
++    def test_check_ifcfg_rh_plugins_string(self):
++        """ifcfg-rh plugin is append when plugins is a string."""
++        render_dir = self.tmp_path("render")
++        os.makedirs(render_dir)
++        nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file)
++        util.ensure_dir(os.path.dirname(nm_cfg))
++
++        # write a template nm.conf, note plugins is a value here
++        util.write_file(nm_cfg, '# test_check_ifcfg_rh\n[main]\nplugins=foo\n')
++
++        # render and read
++        entry = NETWORK_CONFIGS['small']
++        found = self._render_and_read(network_config=yaml.load(entry['yaml']),
++                                      dir=render_dir)
++        self._compare_files_to_expected(entry[self.expected_name], found)
++        self._assert_headers(found)
++
++        # check raw content has plugin
++        nm_file_content = util.load_file(nm_cfg)
++        self.assertIn('ifcfg-rh', nm_file_content)
++
++        # check ifcfg-rh is in the 'plugins' list
++        config = sysconfig.ConfigObj(nm_cfg)
++        self.assertIn('ifcfg-rh', config['main']['plugins'])
++
++    def test_check_ifcfg_rh_plugins_no_plugins(self):
++        """enable_ifcfg_plugin creates plugins value if missing."""
++        render_dir = self.tmp_path("render")
++        os.makedirs(render_dir)
++        nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file)
++        util.ensure_dir(os.path.dirname(nm_cfg))
++
++        # write a template nm.conf, note plugins is missing
++        util.write_file(nm_cfg, '# test_check_ifcfg_rh\n[main]\n')
++        self.assertTrue(os.path.exists(nm_cfg))
++
++        # render and read
++        entry = NETWORK_CONFIGS['small']
++        found = self._render_and_read(network_config=yaml.load(entry['yaml']),
++                                      dir=render_dir)
++        self._compare_files_to_expected(entry[self.expected_name], found)
++        self._assert_headers(found)
++
++        # check ifcfg-rh is in the 'plugins' list
++        config = sysconfig.ConfigObj(nm_cfg)
++        self.assertIn('ifcfg-rh', config['main']['plugins'])
++
+ 
+ class TestNetplanNetRendering(CiTestCase):
+ 
+-- 
+2.20.1
+
diff --git a/SOURCES/0009-net-Wait-for-dhclient-to-daemonize-before-reading-le.patch b/SOURCES/0009-net-Wait-for-dhclient-to-daemonize-before-reading-le.patch
new file mode 100644
index 0000000..41ba44b
--- /dev/null
+++ b/SOURCES/0009-net-Wait-for-dhclient-to-daemonize-before-reading-le.patch
@@ -0,0 +1,296 @@
+From 2e070086275341dfceb6d5b1e12f06f22e7bbfcd Mon Sep 17 00:00:00 2001
+From: Eduardo Otubo <otubo@redhat.com>
+Date: Wed, 23 Jan 2019 12:30:21 +0100
+Subject: net: Wait for dhclient to daemonize before reading lease file
+
+RH-Author: Eduardo Otubo <otubo@redhat.com>
+Message-id: <20190123123021.32708-1-otubo@redhat.com>
+Patchwork-id: 84095
+O-Subject: [RHEL-7.7 cloud-init PATCH] net: Wait for dhclient to daemonize before reading lease file
+Bugzilla: 1632967
+RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
+
+Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1632967
+Brew: https://bugzilla.redhat.com/show_bug.cgi?id=1632967
+Tested: Me and upstream
+
+commit fdadcb5fae51f4e6799314ab98e3aec56c79b17c
+Author: Jason Zions <jasonzio@microsoft.com>
+Date:   Tue Jan 15 21:37:17 2019 +0000
+
+    net: Wait for dhclient to daemonize before reading lease file
+
+    cloud-init uses dhclient to fetch the DHCP lease so it can extract
+    DHCP options.  dhclient creates the leasefile, then writes to it;
+    simply waiting for the leasefile to appear creates a race between
+    dhclient and cloud-init. Instead, wait for dhclient to be parented by
+    init. At that point, we know it has written to the leasefile, so it's
+    safe to copy the file and kill the process.
+
+    cloud-init creates a temporary directory in which to execute dhclient,
+    and deletes that directory after it has killed the process. If
+    cloud-init abandons waiting for dhclient to daemonize, it will still
+    attempt to delete the temporary directory, but will not report an
+    exception should that attempt fail.
+
+    LP: #1794399
+
+Signed-off-by: Eduardo Otubo <otubo@redhat.com>
+Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
+Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
+---
+ cloudinit/net/dhcp.py              | 44 +++++++++++++++++++++---------
+ cloudinit/net/tests/test_dhcp.py   | 15 ++++++++--
+ cloudinit/temp_utils.py            |  4 +--
+ cloudinit/tests/test_temp_utils.py | 18 +++++++++++-
+ cloudinit/util.py                  | 16 ++++++++++-
+ tests/unittests/test_util.py       |  6 ++++
+ 6 files changed, 83 insertions(+), 20 deletions(-)
+
+diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
+index 0db991db..c98a97cd 100644
+--- a/cloudinit/net/dhcp.py
++++ b/cloudinit/net/dhcp.py
+@@ -9,6 +9,7 @@ import logging
+ import os
+ import re
+ import signal
++import time
+ 
+ from cloudinit.net import (
+     EphemeralIPv4Network, find_fallback_nic, get_devicelist,
+@@ -127,7 +128,9 @@ def maybe_perform_dhcp_discovery(nic=None):
+     if not dhclient_path:
+         LOG.debug('Skip dhclient configuration: No dhclient command found.')
+         return []
+-    with temp_utils.tempdir(prefix='cloud-init-dhcp-', needs_exe=True) as tdir:
++    with temp_utils.tempdir(rmtree_ignore_errors=True,
++                            prefix='cloud-init-dhcp-',
++                            needs_exe=True) as tdir:
+         # Use /var/tmp because /run/cloud-init/tmp is mounted noexec
+         return dhcp_discovery(dhclient_path, nic, tdir)
+ 
+@@ -195,24 +198,39 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
+            '-pf', pid_file, interface, '-sf', '/bin/true']
+     util.subp(cmd, capture=True)
+ 
+-    # dhclient doesn't write a pid file until after it forks when it gets a
+-    # proper lease response. Since cleandir is a temp directory that gets
+-    # removed, we need to wait for that pidfile creation before the
+-    # cleandir is removed, otherwise we get FileNotFound errors.
++    # Wait for pid file and lease file to appear, and for the process
++    # named by the pid file to daemonize (have pid 1 as its parent). If we
++    # try to read the lease file before daemonization happens, we might try
++    # to read it before the dhclient has actually written it. We also have
++    # to wait until the dhclient has become a daemon so we can be sure to
++    # kill the correct process, thus freeing cleandir to be deleted back
++    # up the callstack.
+     missing = util.wait_for_files(
+         [pid_file, lease_file], maxwait=5, naplen=0.01)
+     if missing:
+         LOG.warning("dhclient did not produce expected files: %s",
+                     ', '.join(os.path.basename(f) for f in missing))
+         return []
+-    pid_content = util.load_file(pid_file).strip()
+-    try:
+-        pid = int(pid_content)
+-    except ValueError:
+-        LOG.debug(
+-            "pid file contains non-integer content '%s'", pid_content)
+-    else:
+-        os.kill(pid, signal.SIGKILL)
++
++    ppid = 'unknown'
++    for _ in range(0, 1000):
++        pid_content = util.load_file(pid_file).strip()
++        try:
++            pid = int(pid_content)
++        except ValueError:
++            pass
++        else:
++            ppid = util.get_proc_ppid(pid)
++            if ppid == 1:
++                LOG.debug('killing dhclient with pid=%s', pid)
++                os.kill(pid, signal.SIGKILL)
++                return parse_dhcp_lease_file(lease_file)
++        time.sleep(0.01)
++
++    LOG.error(
++        'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s seconds',
++        pid_content, ppid, 0.01 * 1000
++    )
+     return parse_dhcp_lease_file(lease_file)
+ 
+ 
+diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py
+index cd3e7328..79e8842f 100644
+--- a/cloudinit/net/tests/test_dhcp.py
++++ b/cloudinit/net/tests/test_dhcp.py
+@@ -145,16 +145,20 @@ class TestDHCPDiscoveryClean(CiTestCase):
+               'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}],
+             dhcp_discovery(dhclient_script, 'eth9', tmpdir))
+         self.assertIn(
+-            "pid file contains non-integer content ''", self.logs.getvalue())
++            "dhclient(pid=, parentpid=unknown) failed "
++            "to daemonize after 10.0 seconds",
++            self.logs.getvalue())
+         m_kill.assert_not_called()
+ 
++    @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid')
+     @mock.patch('cloudinit.net.dhcp.os.kill')
+     @mock.patch('cloudinit.net.dhcp.util.wait_for_files')
+     @mock.patch('cloudinit.net.dhcp.util.subp')
+     def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(self,
+                                                                   m_subp,
+                                                                   m_wait,
+-                                                                  m_kill):
++                                                                  m_kill,
++                                                                  m_getppid):
+         """dhcp_discovery waits for the presence of pidfile and dhcp.leases."""
+         tmpdir = self.tmp_dir()
+         dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
+@@ -164,6 +168,7 @@ class TestDHCPDiscoveryClean(CiTestCase):
+         pidfile = self.tmp_path('dhclient.pid', tmpdir)
+         leasefile = self.tmp_path('dhcp.leases', tmpdir)
+         m_wait.return_value = [pidfile]  # Return the missing pidfile wait for
++        m_getppid.return_value = 1  # Indicate that dhclient has daemonized
+         self.assertEqual([], dhcp_discovery(dhclient_script, 'eth9', tmpdir))
+         self.assertEqual(
+             mock.call([pidfile, leasefile], maxwait=5, naplen=0.01),
+@@ -173,9 +178,10 @@ class TestDHCPDiscoveryClean(CiTestCase):
+             self.logs.getvalue())
+         m_kill.assert_not_called()
+ 
++    @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid')
+     @mock.patch('cloudinit.net.dhcp.os.kill')
+     @mock.patch('cloudinit.net.dhcp.util.subp')
+-    def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill):
++    def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill, m_getppid):
+         """dhcp_discovery brings up the interface and runs dhclient.
+ 
+         It also returns the parsed dhcp.leases file generated in the sandbox.
+@@ -197,6 +203,7 @@ class TestDHCPDiscoveryClean(CiTestCase):
+         pid_file = os.path.join(tmpdir, 'dhclient.pid')
+         my_pid = 1
+         write_file(pid_file, "%d\n" % my_pid)
++        m_getppid.return_value = 1  # Indicate that dhclient has daemonized
+ 
+         self.assertItemsEqual(
+             [{'interface': 'eth9', 'fixed-address': '192.168.2.74',
+@@ -355,3 +362,5 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase):
+             self.assertEqual(fake_lease, lease)
+         # Ensure that dhcp discovery occurs
+         m_dhcp.called_once_with()
++
++# vi: ts=4 expandtab
+diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py
+index c98a1b53..346276ec 100644
+--- a/cloudinit/temp_utils.py
++++ b/cloudinit/temp_utils.py
+@@ -81,7 +81,7 @@ def ExtendedTemporaryFile(**kwargs):
+ 
+ 
+ @contextlib.contextmanager
+-def tempdir(**kwargs):
++def tempdir(rmtree_ignore_errors=False, **kwargs):
+     # This seems like it was only added in python 3.2
+     # Make it since its useful...
+     # See: http://bugs.python.org/file12970/tempdir.patch
+@@ -89,7 +89,7 @@ def tempdir(**kwargs):
+     try:
+         yield tdir
+     finally:
+-        shutil.rmtree(tdir)
++        shutil.rmtree(tdir, ignore_errors=rmtree_ignore_errors)
+ 
+ 
+ def mkdtemp(**kwargs):
+diff --git a/cloudinit/tests/test_temp_utils.py b/cloudinit/tests/test_temp_utils.py
+index ffbb92cd..4a52ef89 100644
+--- a/cloudinit/tests/test_temp_utils.py
++++ b/cloudinit/tests/test_temp_utils.py
+@@ -2,8 +2,9 @@
+ 
+ """Tests for cloudinit.temp_utils"""
+ 
+-from cloudinit.temp_utils import mkdtemp, mkstemp
++from cloudinit.temp_utils import mkdtemp, mkstemp, tempdir
+ from cloudinit.tests.helpers import CiTestCase, wrap_and_call
++import os
+ 
+ 
+ class TestTempUtils(CiTestCase):
+@@ -98,4 +99,19 @@ class TestTempUtils(CiTestCase):
+         self.assertEqual('/fake/return/path', retval)
+         self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls)
+ 
++    def test_tempdir_error_suppression(self):
++        """test tempdir suppresses errors during directory removal."""
++
++        with self.assertRaises(OSError):
++            with tempdir(prefix='cloud-init-dhcp-') as tdir:
++                os.rmdir(tdir)
++                # As a result, the directory is already gone,
++                # so shutil.rmtree should raise OSError
++
++        with tempdir(rmtree_ignore_errors=True,
++                     prefix='cloud-init-dhcp-') as tdir:
++            os.rmdir(tdir)
++            # Since the directory is already gone, shutil.rmtree would raise
++            # OSError, but we suppress that
++
+ # vi: ts=4 expandtab
+diff --git a/cloudinit/util.py b/cloudinit/util.py
+index 7800f7bc..a84112a9 100644
+--- a/cloudinit/util.py
++++ b/cloudinit/util.py
+@@ -2861,7 +2861,6 @@ def mount_is_read_write(mount_point):
+     mount_opts = result[-1].split(',')
+     return mount_opts[0] == 'rw'
+ 
+-
+ def udevadm_settle(exists=None, timeout=None):
+     """Invoke udevadm settle with optional exists and timeout parameters"""
+     settle_cmd = ["udevadm", "settle"]
+@@ -2875,5 +2874,20 @@ def udevadm_settle(exists=None, timeout=None):
+ 
+     return subp(settle_cmd)
+ 
++def get_proc_ppid(pid):
++    """
++    Return the parent pid of a process.
++    """
++    ppid = 0
++    try:
++        contents = load_file("/proc/%s/stat" % pid, quiet=True)
++    except IOError as e:
++        LOG.warning('Failed to load /proc/%s/stat. %s', pid, e)
++    if contents:
++        parts = contents.split(" ", 4)
++        # man proc says
++        #  ppid %d     (4) The PID of the parent.
++        ppid = int(parts[3])
++    return ppid
+ 
+ # vi: ts=4 expandtab
+diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
+index 5a14479a..8aebcd62 100644
+--- a/tests/unittests/test_util.py
++++ b/tests/unittests/test_util.py
+@@ -1114,6 +1114,12 @@ class TestLoadShellContent(helpers.TestCase):
+                 'key3="val3 #tricky"',
+                 ''])))
+ 
++    def test_get_proc_ppid(self):
++        """get_proc_ppid returns correct parent pid value."""
++        my_pid = os.getpid()
++        my_ppid = os.getppid()
++        self.assertEqual(my_ppid, util.get_proc_ppid(my_pid))
++
+ 
+ class TestGetProcEnv(helpers.TestCase):
+     """test get_proc_env."""
+-- 
+2.20.1
+
diff --git a/SOURCES/0009-sysconfig-Don-t-disable-IPV6_AUTOCONF.patch b/SOURCES/0009-sysconfig-Don-t-disable-IPV6_AUTOCONF.patch
deleted file mode 100644
index 6562738..0000000
--- a/SOURCES/0009-sysconfig-Don-t-disable-IPV6_AUTOCONF.patch
+++ /dev/null
@@ -1,49 +0,0 @@
-From 643e0ba0cf8b4a28e83c2a4db7d93ec423b24a02 Mon Sep 17 00:00:00 2001
-From: Vitaly Kuznetsov <vkuznets@redhat.com>
-Date: Thu, 26 Apr 2018 09:27:49 +0200
-Subject: sysconfig: Don't disable IPV6_AUTOCONF
-
-RH-Author: Vitaly Kuznetsov <vkuznets@redhat.com>
-Message-id: <20180426092749.7251-2-vkuznets@redhat.com>
-Patchwork-id: 79904
-O-Subject: [RHEL7.6/7.5.z cloud-init PATCH 1/1] sysconfig: Don't disable IPV6_AUTOCONF
-Bugzilla: 1578702
-RH-Acked-by: Mohammed Gamal <mgamal@redhat.com>
-RH-Acked-by: Cathy Avery <cavery@redhat.com>
-RH-Acked-by: Eduardo Otubo <otubo@redhat.com>
-
-Downstream-only commit 118458a3fb ("sysconfig: Don't write BOOTPROTO=dhcp
-for ipv6 dhcp") did two things:
-1) Disabled BOOTPROTO='dhcp' for dhcp6 setups. This change seems to be
-   correct as BOOTPROTO is unrelated to IPv6. The change was since merged
-   upstream (commit a57928d3c314d9568712cd190cb1e721e14c108b).
-2) Explicitly disabled AUTOCONF and this broke many valid configurations
-   using it instead of DHCPV6C. Revert this part of the change. In case
-   DHCPV6C-only support is needed something like a new 'dhcpv6c_only'
-   network type needs to be suggested upstream.
-
-X-downstream-only: yes
-
-Resolves: rhbz#1558854
-
-Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
-Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
----
- cloudinit/net/sysconfig.py | 1 -
- 1 file changed, 1 deletion(-)
-
-diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
-index f870b0f..bd81832 100644
---- a/cloudinit/net/sysconfig.py
-+++ b/cloudinit/net/sysconfig.py
-@@ -286,7 +286,6 @@ class Renderer(renderer.Renderer):
-             if subnet_type == 'dhcp6':
-                 iface_cfg['IPV6INIT'] = True
-                 iface_cfg['DHCPV6C'] = True
--                iface_cfg['IPV6_AUTOCONF'] = False
-             elif subnet_type in ['dhcp4', 'dhcp']:
-                 iface_cfg['BOOTPROTO'] = 'dhcp'
-             elif subnet_type == 'static':
--- 
-1.8.3.1
-
diff --git a/SOURCES/0010-cloud-init-per-don-t-use-dashes-in-sem-names.patch b/SOURCES/0010-cloud-init-per-don-t-use-dashes-in-sem-names.patch
new file mode 100644
index 0000000..f356066
--- /dev/null
+++ b/SOURCES/0010-cloud-init-per-don-t-use-dashes-in-sem-names.patch
@@ -0,0 +1,90 @@
+From 8a3bf53398f312b46ed4f304df4c66d061e612c7 Mon Sep 17 00:00:00 2001
+From: Eduardo Otubo <otubo@redhat.com>
+Date: Thu, 28 Feb 2019 12:38:36 +0100
+Subject: cloud-init-per: don't use dashes in sem names
+
+RH-Author: Eduardo Otubo <otubo@redhat.com>
+Message-id: <20190228123836.17979-1-otubo@redhat.com>
+Patchwork-id: 84743
+O-Subject: [RHEL-7.7 cloud-init PATCH] This is to fix https://bugs.launchpad.net/cloud-init/+bug/1812676
+Bugzilla: 1664876
+RH-Acked-by: Mohammed Gamal <mgamal@redhat.com>
+RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+    It was found that when there is a dash in cloud-init-per command
+    name and cloud-init-per is executed through cloud-init's bootcmd, e.g:
+
+    bootcmd:
+     - cloud-init-per instance mycmd-bootcmd /usr/bin/mycmd
+
+    the command is executed on each boot. However, running the same
+    cloud-init-per command manually after boot doesn't reveal the issue. Turns
+    out the issue comes from 'migrator' cloud-init module which renames all
+    files in /var/lib/cloud/instance/sem/ replacing dashes with underscores. As
+    migrator runs before bootcmd it renames
+
+    /var/lib/cloud/instance/sem/bootper.mycmd-bootcmd.instance
+    to
+    /var/lib/cloud/instance/sem/bootper.mycmd_bootcmd.instance
+
+    so cloud-init-per doesn't see it and thinks that the comment was never ran
+    before. On next boot the sequence repeats.
+
+    There are multiple ways to resolve the issue. This patch takes the
+    following approach: 'canonicalize' sem names by replacing dashes with
+    underscores (this is consistent with post-'migrator' contents of
+    /var/lib/cloud/instance/sem/). We, however, need to be careful: in case
+    someone had a command with dashes before and he had migrator module enables
+    we need to see the old sem file (or the command will run again and this can
+    be as bad as formatting a partition!) so we add a small 'migrator' part to
+    cloud-init-per script itself checking for legacy sem names.
+
+    Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+commit 9cf9d8cdd3a8fd7d4d425f7051122d0ac8af2bbd
+Author: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date:   Mon Feb 18 22:55:49 2019 +0000
+
+    This is to fix https://bugs.launchpad.net/cloud-init/+bug/1812676
+
+Resolves: rhbz#1664876
+X-downstream-only: false
+
+Signed-off-by: Eduardo Otubo <otubo@redhat.com>
+Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
+Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
+---
+ tools/cloud-init-per | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/tools/cloud-init-per b/tools/cloud-init-per
+index 7d6754b6..eae3e93f 100755
+--- a/tools/cloud-init-per
++++ b/tools/cloud-init-per
+@@ -38,7 +38,7 @@ fi
+ [ "$1" = "-h" -o "$1" = "--help" ] && { Usage ; exit 0; }
+ [ $# -ge 3 ] || { Usage 1>&2; exit 1; }
+ freq=$1
+-name=$2
++name=${2/-/_}
+ shift 2;
+ 
+ [ "${name#*/}" = "${name}" ] || fail "name cannot contain a /"
+@@ -53,6 +53,12 @@ esac
+ [ -d "${sem%/*}" ] || mkdir -p "${sem%/*}" ||
+    fail "failed to make directory for ${sem}"
+ 
++# Rename legacy sem files with dashes in their names. Do not overwrite existing
++# sem files to prevent clobbering those which may have been created from calls
++# outside of cloud-init.
++sem_legacy="${sem/_/-}"
++[ "$sem" != "$sem_legacy" -a -e "$sem_legacy" ] && mv -n "$sem_legacy" "$sem"
++
+ [ "$freq" != "always" -a -e "$sem" ] && exit 0
+ "$@"
+ ret=$?
+-- 
+2.20.1
+
diff --git a/SOURCES/0011-azure-Filter-list-of-ssh-keys-pulled-from-fabric.patch b/SOURCES/0011-azure-Filter-list-of-ssh-keys-pulled-from-fabric.patch
new file mode 100644
index 0000000..bb63c25
--- /dev/null
+++ b/SOURCES/0011-azure-Filter-list-of-ssh-keys-pulled-from-fabric.patch
@@ -0,0 +1,572 @@
+From 8e168f17b0c138d589f7b3bea4a4b6fcc8e5e03f Mon Sep 17 00:00:00 2001
+From: Eduardo Otubo <otubo@redhat.com>
+Date: Wed, 6 Mar 2019 14:20:18 +0100
+Subject: azure: Filter list of ssh keys pulled from fabric
+
+RH-Author: Eduardo Otubo <otubo@redhat.com>
+Message-id: <20190306142018.8902-1-otubo@redhat.com>
+Patchwork-id: 84807
+O-Subject: [RHEL-7.7 cloud-init PATCH] azure: Filter list of ssh keys pulled from fabric
+Bugzilla: 1684040
+RH-Acked-by: Cathy Avery <cavery@redhat.com>
+RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+From: "Jason Zions (MSFT)" <jasonzio@microsoft.com>
+
+commit 34f54360fcc1e0f805002a0b639d0a84eb2cb8ee
+Author: Jason Zions (MSFT) <jasonzio@microsoft.com>
+Date:   Fri Feb 22 13:26:31 2019 +0000
+
+    azure: Filter list of ssh keys pulled from fabric
+
+    The Azure data source is expected to expose a list of
+    ssh keys for the user-to-be-provisioned in the crawled
+    metadata. When configured to use the __builtin__ agent
+    this list is built by the WALinuxAgentShim. The shim
+    retrieves the full set of certificates and public keys
+    exposed to the VM from the wireserver, extracts any
+    ssh keys it can, and returns that list.
+
+    This fix reduces that list of ssh keys to just the
+    ones whose fingerprints appear in the "administrative
+    user" section of the ovf-env.xml file. The Azure
+    control plane exposes other ssh keys to the VM for
+    other reasons, but those should not be added to the
+    authorized_keys file for the provisioned user.
+
+Signed-off-by: Eduardo Otubo <otubo@redhat.com>
+Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
+Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
+---
+ cloudinit/sources/DataSourceAzure.py          |  13 +-
+ cloudinit/sources/helpers/azure.py            | 109 +++++++++----
+ .../azure/parse_certificates_fingerprints     |   4 +
+ tests/data/azure/parse_certificates_pem       | 152 ++++++++++++++++++
+ tests/data/azure/pubkey_extract_cert          |  13 ++
+ tests/data/azure/pubkey_extract_ssh_key       |   1 +
+ .../test_datasource/test_azure_helper.py      |  71 +++++++-
+ 7 files changed, 322 insertions(+), 41 deletions(-)
+ create mode 100644 tests/data/azure/parse_certificates_fingerprints
+ create mode 100644 tests/data/azure/parse_certificates_pem
+ create mode 100644 tests/data/azure/pubkey_extract_cert
+ create mode 100644 tests/data/azure/pubkey_extract_ssh_key
+
+diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
+index 7dbeb04c..2062ca5d 100644
+--- a/cloudinit/sources/DataSourceAzure.py
++++ b/cloudinit/sources/DataSourceAzure.py
+@@ -627,9 +627,11 @@ class DataSourceAzure(sources.DataSource):
+         if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN:
+             self.bounce_network_with_azure_hostname()
+ 
++            pubkey_info = self.cfg.get('_pubkeys', None)
+             metadata_func = partial(get_metadata_from_fabric,
+                                     fallback_lease_file=self.
+-                                    dhclient_lease_file)
++                                    dhclient_lease_file,
++                                    pubkey_info=pubkey_info)
+         else:
+             metadata_func = self.get_metadata_from_agent
+ 
+@@ -642,6 +644,7 @@ class DataSourceAzure(sources.DataSource):
+                 "Error communicating with Azure fabric; You may experience."
+                 "connectivity issues.", exc_info=True)
+             return False
++
+         util.del_file(REPORTED_READY_MARKER_FILE)
+         util.del_file(REPROVISION_MARKER_FILE)
+         return fabric_data
+@@ -909,13 +912,15 @@ def find_child(node, filter_func):
+ def load_azure_ovf_pubkeys(sshnode):
+     # This parses a 'SSH' node formatted like below, and returns
+     # an array of dicts.
+-    #  [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7',
+-    #    'path': 'where/to/go'}]
++    #  [{'fingerprint': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7',
++    #    'path': '/where/to/go'}]
+     #
+     # <SSH><PublicKeys>
+-    #   <PublicKey><Fingerprint>ABC</FingerPrint><Path>/ABC</Path>
++    #   <PublicKey><Fingerprint>ABC</FingerPrint><Path>/x/y/z</Path>
+     #   ...
+     # </PublicKeys></SSH>
++    # Under some circumstances, there may be a <Value> element along with the
++    # Fingerprint and Path. Pass those along if they appear.
+     results = find_child(sshnode, lambda n: n.localName == "PublicKeys")
+     if len(results) == 0:
+         return []
+diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
+index e5696b1f..2829dd20 100644
+--- a/cloudinit/sources/helpers/azure.py
++++ b/cloudinit/sources/helpers/azure.py
+@@ -138,9 +138,36 @@ class OpenSSLManager(object):
+             self.certificate = certificate
+         LOG.debug('New certificate generated.')
+ 
+-    def parse_certificates(self, certificates_xml):
+-        tag = ElementTree.fromstring(certificates_xml).find(
+-            './/Data')
++    @staticmethod
++    def _run_x509_action(action, cert):
++        cmd = ['openssl', 'x509', '-noout', action]
++        result, _ = util.subp(cmd, data=cert)
++        return result
++
++    def _get_ssh_key_from_cert(self, certificate):
++        pub_key = self._run_x509_action('-pubkey', certificate)
++        keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin']
++        ssh_key, _ = util.subp(keygen_cmd, data=pub_key)
++        return ssh_key
++
++    def _get_fingerprint_from_cert(self, certificate):
++        """openssl x509 formats fingerprints as so:
++        'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\
++        B6:A8:BF:27:D4:73\n'
++
++        Azure control plane passes that fingerprint as so:
++        '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473'
++        """
++        raw_fp = self._run_x509_action('-fingerprint', certificate)
++        eq = raw_fp.find('=')
++        octets = raw_fp[eq+1:-1].split(':')
++        return ''.join(octets)
++
++    def _decrypt_certs_from_xml(self, certificates_xml):
++        """Decrypt the certificates XML document using the our private key;
++           return the list of certs and private keys contained in the doc.
++        """
++        tag = ElementTree.fromstring(certificates_xml).find('.//Data')
+         certificates_content = tag.text
+         lines = [
+             b'MIME-Version: 1.0',
+@@ -151,32 +178,30 @@ class OpenSSLManager(object):
+             certificates_content.encode('utf-8'),
+         ]
+         with cd(self.tmpdir):
+-            with open('Certificates.p7m', 'wb') as f:
+-                f.write(b'\n'.join(lines))
+             out, _ = util.subp(
+-                'openssl cms -decrypt -in Certificates.p7m -inkey'
++                'openssl cms -decrypt -in /dev/stdin -inkey'
+                 ' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
+                 ' -password pass:'.format(**self.certificate_names),
+-                shell=True)
+-        private_keys, certificates = [], []
++                shell=True, data=b'\n'.join(lines))
++        return out
++
++    def parse_certificates(self, certificates_xml):
++        """Given the Certificates XML document, return a dictionary of
++           fingerprints and associated SSH keys derived from the certs."""
++        out = self._decrypt_certs_from_xml(certificates_xml)
+         current = []
++        keys = {}
+         for line in out.splitlines():
+             current.append(line)
+             if re.match(r'[-]+END .*?KEY[-]+$', line):
+-                private_keys.append('\n'.join(current))
++                # ignore private_keys
+                 current = []
+             elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line):
+-                certificates.append('\n'.join(current))
++                certificate = '\n'.join(current)
++                ssh_key = self._get_ssh_key_from_cert(certificate)
++                fingerprint = self._get_fingerprint_from_cert(certificate)
++                keys[fingerprint] = ssh_key
+                 current = []
+-        keys = []
+-        for certificate in certificates:
+-            with cd(self.tmpdir):
+-                public_key, _ = util.subp(
+-                    'openssl x509 -noout -pubkey |'
+-                    'ssh-keygen -i -m PKCS8 -f /dev/stdin',
+-                    data=certificate,
+-                    shell=True)
+-            keys.append(public_key)
+         return keys
+ 
+ 
+@@ -206,7 +231,6 @@ class WALinuxAgentShim(object):
+         self.dhcpoptions = dhcp_options
+         self._endpoint = None
+         self.openssl_manager = None
+-        self.values = {}
+         self.lease_file = fallback_lease_file
+ 
+     def clean_up(self):
+@@ -328,8 +352,9 @@ class WALinuxAgentShim(object):
+         LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
+         return endpoint_ip_address
+ 
+-    def register_with_azure_and_fetch_data(self):
+-        self.openssl_manager = OpenSSLManager()
++    def register_with_azure_and_fetch_data(self, pubkey_info=None):
++        if self.openssl_manager is None:
++            self.openssl_manager = OpenSSLManager()
+         http_client = AzureEndpointHttpClient(self.openssl_manager.certificate)
+         LOG.info('Registering with Azure...')
+         attempts = 0
+@@ -347,16 +372,37 @@ class WALinuxAgentShim(object):
+             attempts += 1
+         LOG.debug('Successfully fetched GoalState XML.')
+         goal_state = GoalState(response.contents, http_client)
+-        public_keys = []
+-        if goal_state.certificates_xml is not None:
++        ssh_keys = []
++        if goal_state.certificates_xml is not None and pubkey_info is not None:
+             LOG.debug('Certificate XML found; parsing out public keys.')
+-            public_keys = self.openssl_manager.parse_certificates(
++            keys_by_fingerprint = self.openssl_manager.parse_certificates(
+                 goal_state.certificates_xml)
+-        data = {
+-            'public-keys': public_keys,
+-        }
++            ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info)
+         self._report_ready(goal_state, http_client)
+-        return data
++        return {'public-keys': ssh_keys}
++
++    def _filter_pubkeys(self, keys_by_fingerprint, pubkey_info):
++        """cloud-init expects a straightforward array of keys to be dropped
++           into the user's authorized_keys file. Azure control plane exposes
++           multiple public keys to the VM via wireserver. Select just the
++           user's key(s) and return them, ignoring any other certs.
++        """
++        keys = []
++        for pubkey in pubkey_info:
++            if 'value' in pubkey and pubkey['value']:
++                keys.append(pubkey['value'])
++            elif 'fingerprint' in pubkey and pubkey['fingerprint']:
++                fingerprint = pubkey['fingerprint']
++                if fingerprint in keys_by_fingerprint:
++                    keys.append(keys_by_fingerprint[fingerprint])
++                else:
++                    LOG.warning("ovf-env.xml specified PublicKey fingerprint "
++                                "%s not found in goalstate XML", fingerprint)
++            else:
++                LOG.warning("ovf-env.xml specified PublicKey with neither "
++                            "value nor fingerprint: %s", pubkey)
++
++        return keys
+ 
+     def _report_ready(self, goal_state, http_client):
+         LOG.debug('Reporting ready to Azure fabric.')
+@@ -373,11 +419,12 @@ class WALinuxAgentShim(object):
+         LOG.info('Reported ready to Azure fabric.')
+ 
+ 
+-def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None):
++def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
++                             pubkey_info=None):
+     shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
+                             dhcp_options=dhcp_opts)
+     try:
+-        return shim.register_with_azure_and_fetch_data()
++        return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info)
+     finally:
+         shim.clean_up()
+ 
+diff --git a/tests/data/azure/parse_certificates_fingerprints b/tests/data/azure/parse_certificates_fingerprints
+new file mode 100644
+index 00000000..f7293c56
+--- /dev/null
++++ b/tests/data/azure/parse_certificates_fingerprints
+@@ -0,0 +1,4 @@
++ECEDEB3B8488D31AF3BC4CCED493F64B7D27D7B1
++073E19D14D1C799224C6A0FD8DDAB6A8BF27D473
++4C16E7FAD6297D74A9B25EB8F0A12808CEBE293E
++929130695289B450FE45DCD5F6EF0CDE69865867
+diff --git a/tests/data/azure/parse_certificates_pem b/tests/data/azure/parse_certificates_pem
+new file mode 100644
+index 00000000..3521ea3a
+--- /dev/null
++++ b/tests/data/azure/parse_certificates_pem
+@@ -0,0 +1,152 @@
++Bag Attributes
++    localKeyID: 01 00 00 00
++    Microsoft CSP Name: Microsoft Enhanced Cryptographic Provider v1.0
++Key Attributes
++    X509v3 Key Usage: 10
++-----BEGIN PRIVATE KEY-----
++MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDlEe5fUqwdrQTP
++W2oVlGK2f31q/8ULT8KmOTyUvL0RPdJQ69vvHOc5Q2CKg2eviHC2LWhF8WmpnZj6
++61RL0GeFGizwvU8Moebw5p3oqdcgoGpHVtxf+mr4QcWF58/Fwez0dA4hcsimVNBz
++eNpBBUIKNBMTBG+4d6hcQBUAGKUdGRcCGEyTqXLU0MgHjxC9JgVqWJl+X2LcAGj5
++7J+tGYGTLzKJmeCeGVNN5ZtJ0T85MYHCKQk1/FElK+Kq5akovXffQHjlnCPcx0NJ
++47NBjlPaFp2gjnAChn79bT4iCjOFZ9avWpqRpeU517UCnY7djOr3fuod/MSQyh3L
++Wuem1tWBAgMBAAECggEBAM4ZXQRs6Kjmo95BHGiAEnSqrlgX+dycjcBq3QPh8KZT
++nifqnf48XhnackENy7tWIjr3DctoUq4mOp8AHt77ijhqfaa4XSg7fwKeK9NLBGC5
++lAXNtAey0o2894/sKrd+LMkgphoYIUnuI4LRaGV56potkj/ZDP/GwTcG/R4SDnTn
++C1Nb05PNTAPQtPZrgPo7TdM6gGsTnFbVrYHQLyg2Sq/osHfF15YohB01esRLCAwb
++EF8JkRC4hWIZoV7BsyQ39232zAJQGGla7+wKFs3kObwh3VnFkQpT94KZnNiZuEfG
++x5pW4Pn3gXgNsftscXsaNe/M9mYZqo//Qw7NvUIvAvECgYEA9AVveyK0HOA06fhh
+++3hUWdvw7Pbrl+e06jO9+bT1RjQMbHKyI60DZyVGuAySN86iChJRoJr5c6xj+iXU
++cR6BVJDjGH5t1tyiK2aYf6hEpK9/j8Z54UiVQ486zPP0PGfT2TO4lBLK+8AUmoaH
++gk21ul8QeVCeCJa/o+xEoRFvzcUCgYEA8FCbbvInrUtNY+9eKaUYoNodsgBVjm5X
++I0YPUL9D4d+1nvupHSV2NVmQl0w1RaJwrNTafrl5LkqjhQbmuWNta6QgfZzSA3LB
++lWXo1Mm0azKdcD3qMGbvn0Q3zU+yGNEgmB/Yju3/NtgYRG6tc+FCWRbPbiCnZWT8
++v3C2Y0XggI0CgYEA2/jCZBgGkTkzue5kNVJlh5OS/aog+pCvL6hxCtarfBuTT3ed
++Sje+p46cz3DVpmUpATc+Si8py7KNdYQAm/BJ2be6X+woi9Xcgo87zWgcaPCjZzId
++0I2jsIE/Gl6XvpRCDrxnGWRPgt3GNP4szbPLrDPiH9oie8+Y9eYYf7G+PZkCgYEA
++nRSzZOPYV4f/QDF4pVQLMykfe/iH9B/fyWjEHg3He19VQmRReIHCMMEoqBziPXAe
++onpHj8oAkeer1wpZyhhZr6CKtFDLXgGm09bXSC/IRMHC81klORovyzU2HHfZfCtG
++WOmIDnU2+0xpIGIP8sztJ3qnf97MTJSkOSadsWo9gwkCgYEAh5AQmJQmck88Dff2
++qIfJIX8d+BDw47BFJ89OmMFjGV8TNB+JO+AV4Vkodg4hxKpLqTFZTTUFgoYfy5u1
++1/BhAjpmCDCrzubCFhx+8VEoM2+2+MmnuQoMAm9+/mD/IidwRaARgXgvEmp7sfdt
++RyWd+p2lYvFkC/jORQtDMY4uW1o=
++-----END PRIVATE KEY-----
++Bag Attributes
++    localKeyID: 02 00 00 00
++    Microsoft CSP Name: Microsoft Strong Cryptographic Provider
++Key Attributes
++    X509v3 Key Usage: 10
++-----BEGIN PRIVATE KEY-----
++MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDlQhPrZwVQYFV4
++FBc0H1iTXYaznMpwZvEITKtXWACzTdguUderEVOkXW3HTi5HvC2rMayt0nqo3zcd
++x1eGiqdjpZQ/wMrkz9wNEM/nNMsXntEwxk0jCVNKB/jz6vf+BOtrSI01SritAGZW
++dpKoTUyztT8C2mA3X6D8g3m4Dd07ltnzxaDqAQIU5jBHh3f/Q14tlPNZWUIiqVTC
++gDxgAe7MDmfs9h3CInTBX1XM5J4UsLTL23/padgeSvP5YF5qr1+0c7Tdftxr2lwA
++N3rLkisf5EiLAToVyJJlgP/exo2I8DaIKe7DZzD3Y1CrurOpkcMKYu5kM1Htlbua
++tDkAa2oDAgMBAAECggEAOvdueS9DyiMlCKAeQb1IQosdQOh0l0ma+FgEABC2CWhd
++0LgjQTBRM6cGO+urcq7/jhdWQ1UuUG4tVn71z7itCi/F/Enhxc2C22d2GhFVpWsn
++giSXJYpZ/mIjkdVfWNo6FRuRmmHwMys1p0qTOS+8qUJWhSzW75csqJZGgeUrAI61
++LBV5F0SGR7dR2xZfy7PeDs9xpD0QivDt5DpsZWPaPvw4QlhdLgw6/YU1h9vtm6ci
++xLjnPRLZ7JMpcQHO8dUDl6FiEI7yQ11BDm253VQAVMddYRPQABn7SpEF8kD/aZVh
++2Clvz61Rz80SKjPUthMPLWMCRp7zB0xDMzt3/1i+tQKBgQD6Ar1/oD3eFnRnpi4u
++n/hdHJtMuXWNfUA4dspNjP6WGOid9sgIeUUdif1XyVJ+afITzvgpWc7nUWIqG2bQ
++WxJ/4q2rjUdvjNXTy1voVungR2jD5WLQ9DKeaTR0yCliWlx4JgdPG7qGI5MMwsr+
++R/PUoUUhGeEX+o/sCSieO3iUrQKBgQDqwBEMvIdhAv/CK2sG3fsKYX8rFT55ZNX3
++Tix9DbUGY3wQColNuI8U1nDlxE9U6VOfT9RPqKelBLCgbzB23kdEJnjSlnqlTxrx
++E+Hkndyf2ckdJAR3XNxoQ6SRLJNBsgoBj/z5tlfZE9/Jc+uh0mYy3e6g6XCVPBcz
++MgoIc+ofbwKBgQCGQhZ1hR30N+bHCozeaPW9OvGDIE0qcEqeh9xYDRFilXnF6pK9
++SjJ9jG7KR8jPLiHb1VebDSl5O1EV/6UU2vNyTc6pw7LLCryBgkGW4aWy1WZDXNnW
++EG1meGS9GghvUss5kmJ2bxOZmV0Mi0brisQ8OWagQf+JGvtS7BAt+Q3l+QKBgAb9
++8YQPmXiqPjPqVyW9Ntz4SnFeEJ5NApJ7IZgX8GxgSjGwHqbR+HEGchZl4ncE/Bii
++qBA3Vcb0fM5KgYcI19aPzsl28fA6ivLjRLcqfIfGVNcpW3iyq13vpdctHLW4N9QU
++FdTaOYOds+ysJziKq8CYG6NvUIshXw+HTgUybqbBAoGBAIIOqcmmtgOClAwipA17
++dAHsI9Sjk+J0+d4JU6o+5TsmhUfUKIjXf5+xqJkJcQZMEe5GhxcCuYkgFicvh4Hz
++kv2H/EU35LcJTqC6KTKZOWIbGcn1cqsvwm3GQJffYDiO8fRZSwCaif2J3F2lfH4Y
++R/fA67HXFSTT+OncdRpY1NOn
++-----END PRIVATE KEY-----
++Bag Attributes: <Empty Attributes>
++subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US
++issuer=/CN=Root Agency
++-----BEGIN CERTIFICATE-----
++MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290
++IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV
++BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv
++cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE
++BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIlPjJXzrRih4C
++k/XsoI01oqo7IUxH3dA2F7vHGXQoIpKCp8Qe6Z6cFfdD8Uj+s+B1BX6hngwzIwjN
++jE/23X3SALVzJVWzX4Y/IEjbgsuao6sOyNyB18wIU9YzZkVGj68fmMlUw3LnhPbe
++eWkufZaJCaLyhQOwlRMbOcn48D6Ys8fccOyXNzpq3rH1OzeQpxS2M8zaJYP4/VZ/
++sf6KRpI7bP+QwyFvNKfhcaO9/gj4kMo9lVGjvDU20FW6g8UVNJCV9N4GO6mOcyqo
++OhuhVfjCNGgW7N1qi0TIVn0/MQM4l4dcT2R7Z/bV9fhMJLjGsy5A4TLAdRrhKUHT
++bzi9HyDvAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA=
++-----END CERTIFICATE-----
++Bag Attributes
++    localKeyID: 01 00 00 00
++subject=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redacted@microsoft.com
++issuer=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redacted@microsoft.com
++-----BEGIN CERTIFICATE-----
++MIID7TCCAtWgAwIBAgIJALQS3yMg3R41MA0GCSqGSIb3DQEBCwUAMIGMMQswCQYD
++VQQGEwJVUzETMBEGA1UECAwKV0FTSElOR1RPTjEQMA4GA1UEBwwHU2VhdHRsZTES
++MBAGA1UECgwJTWljcm9zb2Z0MQ4wDAYDVQQLDAVBenVyZTEOMAwGA1UEAwwFQW5o
++Vm8xIjAgBgkqhkiG9w0BCQEWE2FuaHZvQG1pY3Jvc29mdC5jb20wHhcNMTkwMjE0
++MjMxMjQwWhcNMjExMTEwMjMxMjQwWjCBjDELMAkGA1UEBhMCVVMxEzARBgNVBAgM
++CldBU0hJTkdUT04xEDAOBgNVBAcMB1NlYXR0bGUxEjAQBgNVBAoMCU1pY3Jvc29m
++dDEOMAwGA1UECwwFQXp1cmUxDjAMBgNVBAMMBUFuaFZvMSIwIAYJKoZIhvcNAQkB
++FhNhbmh2b0BtaWNyb3NvZnQuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
++CgKCAQEA5RHuX1KsHa0Ez1tqFZRitn99av/FC0/Cpjk8lLy9ET3SUOvb7xznOUNg
++ioNnr4hwti1oRfFpqZ2Y+utUS9BnhRos8L1PDKHm8Oad6KnXIKBqR1bcX/pq+EHF
++hefPxcHs9HQOIXLIplTQc3jaQQVCCjQTEwRvuHeoXEAVABilHRkXAhhMk6ly1NDI
++B48QvSYFaliZfl9i3ABo+eyfrRmBky8yiZngnhlTTeWbSdE/OTGBwikJNfxRJSvi
++quWpKL1330B45Zwj3MdDSeOzQY5T2hadoI5wAoZ+/W0+IgozhWfWr1qakaXlOde1
++Ap2O3Yzq937qHfzEkMody1rnptbVgQIDAQABo1AwTjAdBgNVHQ4EFgQUPvdgLiv3
++pAk4r0QTPZU3PFOZJvgwHwYDVR0jBBgwFoAUPvdgLiv3pAk4r0QTPZU3PFOZJvgw
++DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAVUHZT+h9+uCPLTEl5IDg
++kqd9WpzXA7PJd/V+7DeDDTkEd06FIKTWZLfxLVVDjQJnQqubQb//e0zGu1qKbXnX
++R7xqWabGU4eyPeUFWddmt1OHhxKLU3HbJNJJdL6XKiQtpGGUQt/mqNQ/DEr6hhNF
++im5I79iA8H/dXA2gyZrj5Rxea4mtsaYO0mfp1NrFtJpAh2Djy4B1lBXBIv4DWG9e
++mMEwzcLCOZj2cOMA6+mdLMUjYCvIRtnn5MKUHyZX5EmX79wsqMTvVpddlVLB9Kgz
++Qnvft9+SBWh9+F3ip7BsL6Q4Q9v8eHRbnP0ya7ddlgh64uwf9VOfZZdKCnwqudJP
++3g==
++-----END CERTIFICATE-----
++Bag Attributes
++    localKeyID: 02 00 00 00
++subject=/CN=/subscriptions/redacted/resourcegroups/redacted/providers/Microsoft.Compute/virtualMachines/redacted
++issuer=/CN=Microsoft.ManagedIdentity
++-----BEGIN CERTIFICATE-----
++MIIDnTCCAoWgAwIBAgIUB2lauSRccvFkoJybUfIwOUqBN7MwDQYJKoZIhvcNAQEL
++BQAwJDEiMCAGA1UEAxMZTWljcm9zb2Z0Lk1hbmFnZWRJZGVudGl0eTAeFw0xOTAy
++MTUxOTA5MDBaFw0xOTA4MTQxOTA5MDBaMIGUMYGRMIGOBgNVBAMTgYYvc3Vic2Ny
++aXB0aW9ucy8yN2I3NTBjZC1lZDQzLTQyZmQtOTA0NC04ZDc1ZTEyNGFlNTUvcmVz
++b3VyY2Vncm91cHMvYW5oZXh0cmFzc2gvcHJvdmlkZXJzL01pY3Jvc29mdC5Db21w
++dXRlL3ZpcnR1YWxNYWNoaW5lcy9hbmh0ZXN0Y2VydDCCASIwDQYJKoZIhvcNAQEB
++BQADggEPADCCAQoCggEBAOVCE+tnBVBgVXgUFzQfWJNdhrOcynBm8QhMq1dYALNN
++2C5R16sRU6RdbcdOLke8LasxrK3SeqjfNx3HV4aKp2OllD/AyuTP3A0Qz+c0yxee
++0TDGTSMJU0oH+PPq9/4E62tIjTVKuK0AZlZ2kqhNTLO1PwLaYDdfoPyDebgN3TuW
++2fPFoOoBAhTmMEeHd/9DXi2U81lZQiKpVMKAPGAB7swOZ+z2HcIidMFfVczknhSw
++tMvbf+lp2B5K8/lgXmqvX7RztN1+3GvaXAA3esuSKx/kSIsBOhXIkmWA/97GjYjw
++Nogp7sNnMPdjUKu6s6mRwwpi7mQzUe2Vu5q0OQBragMCAwEAAaNWMFQwDgYDVR0P
++AQH/BAQDAgeAMAwGA1UdEwEB/wQCMAAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwHwYD
++VR0jBBgwFoAUOJvzEsriQWdJBndPrK+Me1bCPjYwDQYJKoZIhvcNAQELBQADggEB
++AFGP/g8o7Hv/to11M0UqfzJuW/AyH9RZtSRcNQFLZUndwweQ6fap8lFsA4REUdqe
++7Quqp5JNNY1XzKLWXMPoheIDH1A8FFXdsAroArzlNs9tO3TlIHE8A7HxEVZEmR4b
++7ZiixmkQPS2RkjEoV/GM6fheBrzuFn7X5kVZyE6cC5sfcebn8xhk3ZcXI0VmpdT0
++jFBsf5IvFCIXXLLhJI4KXc8VMoKFU1jT9na/jyaoGmfwovKj4ib8s2aiXGAp7Y38
++UCmY+bJapWom6Piy5Jzi/p/kzMVdJcSa+GqpuFxBoQYEVs2XYVl7cGu/wPM+NToC
++pkSoWwF1QAnHn0eokR9E1rU=
++-----END CERTIFICATE-----
++Bag Attributes: <Empty Attributes>
++subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US
++issuer=/CN=Root Agency
++-----BEGIN CERTIFICATE-----
++MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290
++IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV
++BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv
++cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE
++BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb
++Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi
++nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW
++vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+
++lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y
++WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7
++t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA=
++-----END CERTIFICATE-----
+diff --git a/tests/data/azure/pubkey_extract_cert b/tests/data/azure/pubkey_extract_cert
+new file mode 100644
+index 00000000..ce9b852d
+--- /dev/null
++++ b/tests/data/azure/pubkey_extract_cert
+@@ -0,0 +1,13 @@
++-----BEGIN CERTIFICATE-----
++MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290
++IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV
++BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv
++cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE
++BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb
++Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi
++nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW
++vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+
++lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y
++WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7
++t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA=
++-----END CERTIFICATE-----
+diff --git a/tests/data/azure/pubkey_extract_ssh_key b/tests/data/azure/pubkey_extract_ssh_key
+new file mode 100644
+index 00000000..54d749ed
+--- /dev/null
++++ b/tests/data/azure/pubkey_extract_ssh_key
+@@ -0,0 +1 @@
++ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHU9IDclbKVYVbYuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoinlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmWvwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4yWzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7t5btUyvp
+diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
+index 26b2b93d..02556165 100644
+--- a/tests/unittests/test_datasource/test_azure_helper.py
++++ b/tests/unittests/test_datasource/test_azure_helper.py
+@@ -1,11 +1,13 @@
+ # This file is part of cloud-init. See LICENSE file for license information.
+ 
+ import os
++import unittest2
+ from textwrap import dedent
+ 
+ from cloudinit.sources.helpers import azure as azure_helper
+ from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir
+ 
++from cloudinit.util import load_file
+ from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim
+ 
+ GOAL_STATE_TEMPLATE = """\
+@@ -289,6 +291,50 @@ class TestOpenSSLManager(CiTestCase):
+         self.assertEqual([mock.call(manager.tmpdir)], del_dir.call_args_list)
+ 
+ 
++class TestOpenSSLManagerActions(CiTestCase):
++
++    def setUp(self):
++        super(TestOpenSSLManagerActions, self).setUp()
++
++        self.allowed_subp = True
++
++    def _data_file(self, name):
++        path = 'tests/data/azure'
++        return os.path.join(path, name)
++
++    @unittest2.skip("todo move to cloud_test")
++    def test_pubkey_extract(self):
++        cert = load_file(self._data_file('pubkey_extract_cert'))
++        good_key = load_file(self._data_file('pubkey_extract_ssh_key'))
++        sslmgr = azure_helper.OpenSSLManager()
++        key = sslmgr._get_ssh_key_from_cert(cert)
++        self.assertEqual(good_key, key)
++
++        good_fingerprint = '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473'
++        fingerprint = sslmgr._get_fingerprint_from_cert(cert)
++        self.assertEqual(good_fingerprint, fingerprint)
++
++    @unittest2.skip("todo move to cloud_test")
++    @mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml')
++    def test_parse_certificates(self, mock_decrypt_certs):
++        """Azure control plane puts private keys as well as certificates
++           into the Certificates XML object. Make sure only the public keys
++           from certs are extracted and that fingerprints are converted to
++           the form specified in the ovf-env.xml file.
++        """
++        cert_contents = load_file(self._data_file('parse_certificates_pem'))
++        fingerprints = load_file(self._data_file(
++            'parse_certificates_fingerprints')
++        ).splitlines()
++        mock_decrypt_certs.return_value = cert_contents
++        sslmgr = azure_helper.OpenSSLManager()
++        keys_by_fp = sslmgr.parse_certificates('')
++        for fp in keys_by_fp.keys():
++            self.assertIn(fp, fingerprints)
++        for fp in fingerprints:
++            self.assertIn(fp, keys_by_fp)
++
++
+ class TestWALinuxAgentShim(CiTestCase):
+ 
+     def setUp(self):
+@@ -329,18 +375,31 @@ class TestWALinuxAgentShim(CiTestCase):
+ 
+     def test_certificates_used_to_determine_public_keys(self):
+         shim = wa_shim()
+-        data = shim.register_with_azure_and_fetch_data()
++        """if register_with_azure_and_fetch_data() isn't passed some info about
++           the user's public keys, there's no point in even trying to parse
++           the certificates
++        """
++        mypk = [{'fingerprint': 'fp1', 'path': 'path1'},
++                {'fingerprint': 'fp3', 'path': 'path3', 'value': ''}]
++        certs = {'fp1': 'expected-key',
++                 'fp2': 'should-not-be-found',
++                 'fp3': 'expected-no-value-key',
++                 }
++        sslmgr = self.OpenSSLManager.return_value
++        sslmgr.parse_certificates.return_value = certs
++        data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk)
+         self.assertEqual(
+             [mock.call(self.GoalState.return_value.certificates_xml)],
+-            self.OpenSSLManager.return_value.parse_certificates.call_args_list)
+-        self.assertEqual(
+-            self.OpenSSLManager.return_value.parse_certificates.return_value,
+-            data['public-keys'])
++            sslmgr.parse_certificates.call_args_list)
++        self.assertIn('expected-key', data['public-keys'])
++        self.assertIn('expected-no-value-key', data['public-keys'])
++        self.assertNotIn('should-not-be-found', data['public-keys'])
+ 
+     def test_absent_certificates_produces_empty_public_keys(self):
++        mypk = [{'fingerprint': 'fp1', 'path': 'path1'}]
+         self.GoalState.return_value.certificates_xml = None
+         shim = wa_shim()
+-        data = shim.register_with_azure_and_fetch_data()
++        data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk)
+         self.assertEqual([], data['public-keys'])
+ 
+     def test_correct_url_used_for_report_ready(self):
+-- 
+2.20.1
+
diff --git a/SOURCES/0012-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch b/SOURCES/0012-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch
new file mode 100644
index 0000000..2010bad
--- /dev/null
+++ b/SOURCES/0012-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch
@@ -0,0 +1,66 @@
+From ffabcbbf0d4e990f04ab755dd87bb24e70c4fe78 Mon Sep 17 00:00:00 2001
+From: Eduardo Otubo <otubo@redhat.com>
+Date: Wed, 20 Mar 2019 11:45:59 +0100
+Subject: include 'NOZEROCONF=yes' in /etc/sysconfig/network
+
+RH-Author: Eduardo Otubo <otubo@redhat.com>
+Message-id: <20190320114559.23708-1-otubo@redhat.com>
+Patchwork-id: 84937
+O-Subject: [RHEL-7.7 cloud-init PATCH] include 'NOZEROCONF=yes' in /etc/sysconfig/network
+Bugzilla: 1653131
+RH-Acked-by: Cathy Avery <cavery@redhat.com>
+RH-Acked-by: Mohammed Gamal <mgamal@redhat.com>
+RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+The option NOZEROCONF=yes is not included by default in
+/etc/sysconfig/network, which is required by Overcloud instances. The
+patch also includes tests for the modifications.
+
+X-downstream-only: yes
+Resolves: rhbz#1653131
+
+Signed-off-by: Eduardo Otubo <otubo@redhat.com>
+Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
+Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
+---
+ cloudinit/net/sysconfig.py  | 11 ++++++++++-
+ tests/unittests/test_net.py |  1 -
+ 2 files changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
+index dc1815d9..52bb8483 100644
+--- a/cloudinit/net/sysconfig.py
++++ b/cloudinit/net/sysconfig.py
+@@ -684,7 +684,16 @@ class Renderer(renderer.Renderer):
+         # Distros configuring /etc/sysconfig/network as a file e.g. Centos
+         if sysconfig_path.endswith('network'):
+             util.ensure_dir(os.path.dirname(sysconfig_path))
+-            netcfg = [_make_header(), 'NETWORKING=yes']
++            netcfg = []
++            for line in util.load_file(sysconfig_path, quiet=True).split('\n'):
++                if 'cloud-init' in line:
++                    break
++                if not line.startswith(('NETWORKING=',
++                                        'IPV6_AUTOCONF=',
++                                        'NETWORKING_IPV6=')):
++                    netcfg.append(line)
++            # Now generate the cloud-init portion of sysconfig/network
++            netcfg.extend([_make_header(), 'NETWORKING=yes'])
+             if network_state.use_ipv6:
+                 netcfg.append('NETWORKING_IPV6=yes')
+                 netcfg.append('IPV6_AUTOCONF=no')
+diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
+index 526a30ed..012c43b5 100644
+--- a/tests/unittests/test_net.py
++++ b/tests/unittests/test_net.py
+@@ -887,7 +887,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
+                 BOOTPROTO=none
+                 DEVICE=bond0
+                 DHCPV6C=yes
+-                IPV6_AUTOCONF=no
+                 IPV6INIT=yes
+                 MACADDR=aa:bb:cc:dd:ee:ff
+                 ONBOOT=yes
+-- 
+2.20.1
+
diff --git a/SOURCES/ci-Adding-disk_setup-to-rhel-cloud.cfg.patch b/SOURCES/ci-Adding-disk_setup-to-rhel-cloud.cfg.patch
deleted file mode 100644
index ebf7715..0000000
--- a/SOURCES/ci-Adding-disk_setup-to-rhel-cloud.cfg.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From da4d99e4d4c9b0a6992378009a402d510d99010d Mon Sep 17 00:00:00 2001
-From: Eduardo Otubo <otubo@redhat.com>
-Date: Fri, 5 Oct 2018 09:53:03 +0200
-Subject: [PATCH 4/4] Adding disk_setup to rhel/cloud.cfg
-
-RH-Author: Eduardo Otubo <otubo@redhat.com>
-Message-id: <20181005095303.20597-5-otubo@redhat.com>
-Patchwork-id: 82387
-O-Subject: [RHEL-8.0 cloud-init PATCH 4/4] Adding disk_setup to rhel/cloud.cfg
-Bugzilla: 1615599
-RH-Acked-by: Cathy Avery <cavery@redhat.com>
-RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
-RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
-
-When Azure VM is de-allocated and started again its resource disk
-needs to be re-partitioned and a RHEL supported filesystem needs to be
-created on top. Include disk_setup module in the default RHEL config
-which does the job.
-
-X-downstream-only: yes
-Resolves: rhbz#1615599
-
-Signed-off-by: Eduardo Otubo <otubo@redhat.com>
-Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
----
- rhel/cloud.cfg | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg
-index bb6bc4d..4a73981 100644
---- a/rhel/cloud.cfg
-+++ b/rhel/cloud.cfg
-@@ -11,6 +11,7 @@ ssh_genkeytypes:  ~
- syslog_fix_perms: ~
- 
- cloud_init_modules:
-+ - disk_setup
-  - migrator
-  - bootcmd
-  - write-files
--- 
-1.8.3.1
-
diff --git a/SOURCES/ci-Adding-systemd-mount-options-to-wait-for-cloud-init.patch b/SOURCES/ci-Adding-systemd-mount-options-to-wait-for-cloud-init.patch
deleted file mode 100644
index 64d86d7..0000000
--- a/SOURCES/ci-Adding-systemd-mount-options-to-wait-for-cloud-init.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From 764159f648256d3e00aa2e78d2734a6fc89db9ef Mon Sep 17 00:00:00 2001
-From: Eduardo Otubo <otubo@redhat.com>
-Date: Fri, 5 Oct 2018 09:53:00 +0200
-Subject: [PATCH 1/4] Adding systemd mount options to wait for cloud-init
-
-RH-Author: Eduardo Otubo <otubo@redhat.com>
-Message-id: <20181005095303.20597-2-otubo@redhat.com>
-Patchwork-id: 82384
-O-Subject: [RHEL-8.0 cloud-init PATCH 1/4] Adding systemd mount options to wait for cloud-init
-Bugzilla: 1615599
-RH-Acked-by: Cathy Avery <cavery@redhat.com>
-RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
-RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
-
-This patch adds systemd mount options to wait for cloud-init. On Azure,
-cloud-init needs to format ephemeral disk before we are able to mount
-it.
-
-X-downstream-only: yes
-Resolves: rhbz#1615599
-
-Signed-off-by: Eduardo Otubo <otubo@redhat.com>
-Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
----
- rhel/cloud.cfg | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg
-index 8644872..bb6bc4d 100644
---- a/rhel/cloud.cfg
-+++ b/rhel/cloud.cfg
-@@ -4,7 +4,7 @@ users:
- disable_root: 1
- ssh_pwauth:   0
- 
--mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2']
-+mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service', '0', '2']
- resize_rootfs_tmp: /dev
- ssh_deletekeys:   0
- ssh_genkeytypes:  ~
--- 
-1.8.3.1
-
diff --git a/SOURCES/ci-Azure-Changes-to-the-Hyper-V-KVP-Reporter.patch b/SOURCES/ci-Azure-Changes-to-the-Hyper-V-KVP-Reporter.patch
new file mode 100644
index 0000000..945df1d
--- /dev/null
+++ b/SOURCES/ci-Azure-Changes-to-the-Hyper-V-KVP-Reporter.patch
@@ -0,0 +1,405 @@
+From a758c579c707df04734efb4390bd4d45ff398eab Mon Sep 17 00:00:00 2001
+From: Eduardo Otubo <otubo@redhat.com>
+Date: Wed, 29 May 2019 13:41:47 +0200
+Subject: [PATCH 3/5] Azure: Changes to the Hyper-V KVP Reporter
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+RH-Author: Eduardo Otubo <otubo@redhat.com>
+Message-id: <20190529134149.842-4-otubo@redhat.com>
+Patchwork-id: 88266
+O-Subject: [RHEL-8.0.1/RHEL-8.1.0 cloud-init PATCHv2 3/5] Azure: Changes to the Hyper-V KVP Reporter
+Bugzilla: 1648375
+RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+RH-Acked-by: Cathy Avery <cavery@redhat.com>
+
+From: Anh Vo <anhvo@microsoft.com>
+commit 86674f013dfcea3c075ab41373ffb475881066f6
+Author: Anh Vo <anhvo@microsoft.com>
+Date:   Mon Apr 29 20:22:16 2019 +0000
+
+    Azure: Changes to the Hyper-V KVP Reporter
+
+     + Truncate KVP Pool file to prevent stale entries from
+       being processed by the Hyper-V KVP reporter.
+     + Drop filtering of KVPs as it is no longer needed.
+     + Batch appending of existing KVP entries.
+
+Signed-off-by: Eduardo Otubo <otubo@redhat.com>
+Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
+---
+ cloudinit/reporting/handlers.py          | 117 +++++++++++++++----------------
+ tests/unittests/test_reporting_hyperv.py | 104 +++++++++++++--------------
+ 2 files changed, 106 insertions(+), 115 deletions(-)
+ mode change 100644 => 100755 cloudinit/reporting/handlers.py
+ mode change 100644 => 100755 tests/unittests/test_reporting_hyperv.py
+
+diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
+old mode 100644
+new mode 100755
+index 6d23558..10165ae
+--- a/cloudinit/reporting/handlers.py
++++ b/cloudinit/reporting/handlers.py
+@@ -5,7 +5,6 @@ import fcntl
+ import json
+ import six
+ import os
+-import re
+ import struct
+ import threading
+ import time
+@@ -14,6 +13,7 @@ from cloudinit import log as logging
+ from cloudinit.registry import DictRegistry
+ from cloudinit import (url_helper, util)
+ from datetime import datetime
++from six.moves.queue import Empty as QueueEmptyError
+ 
+ if six.PY2:
+     from multiprocessing.queues import JoinableQueue as JQueue
+@@ -129,24 +129,50 @@ class HyperVKvpReportingHandler(ReportingHandler):
+     DESC_IDX_KEY = 'msg_i'
+     JSON_SEPARATORS = (',', ':')
+     KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1'
++    _already_truncated_pool_file = False
+ 
+     def __init__(self,
+                  kvp_file_path=KVP_POOL_FILE_GUEST,
+                  event_types=None):
+         super(HyperVKvpReportingHandler, self).__init__()
+         self._kvp_file_path = kvp_file_path
++        HyperVKvpReportingHandler._truncate_guest_pool_file(
++            self._kvp_file_path)
++
+         self._event_types = event_types
+         self.q = JQueue()
+-        self.kvp_file = None
+         self.incarnation_no = self._get_incarnation_no()
+         self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
+                                                   self.incarnation_no)
+-        self._current_offset = 0
+         self.publish_thread = threading.Thread(
+                 target=self._publish_event_routine)
+         self.publish_thread.daemon = True
+         self.publish_thread.start()
+ 
++    @classmethod
++    def _truncate_guest_pool_file(cls, kvp_file):
++        """
++        Truncate the pool file if it has not been truncated since boot.
++        This should be done exactly once for the file indicated by
++        KVP_POOL_FILE_GUEST constant above. This method takes a filename
++        so that we can use an arbitrary file during unit testing.
++        Since KVP is a best-effort telemetry channel we only attempt to
++        truncate the file once and only if the file has not been modified
++        since boot. Additional truncation can lead to loss of existing
++        KVPs.
++        """
++        if cls._already_truncated_pool_file:
++            return
++        boot_time = time.time() - float(util.uptime())
++        try:
++            if os.path.getmtime(kvp_file) < boot_time:
++                with open(kvp_file, "w"):
++                    pass
++        except (OSError, IOError) as e:
++            LOG.warning("failed to truncate kvp pool file, %s", e)
++        finally:
++            cls._already_truncated_pool_file = True
++
+     def _get_incarnation_no(self):
+         """
+         use the time passed as the incarnation number.
+@@ -162,20 +188,15 @@ class HyperVKvpReportingHandler(ReportingHandler):
+ 
+     def _iterate_kvps(self, offset):
+         """iterate the kvp file from the current offset."""
+-        try:
+-            with open(self._kvp_file_path, 'rb+') as f:
+-                self.kvp_file = f
+-                fcntl.flock(f, fcntl.LOCK_EX)
+-                f.seek(offset)
++        with open(self._kvp_file_path, 'rb') as f:
++            fcntl.flock(f, fcntl.LOCK_EX)
++            f.seek(offset)
++            record_data = f.read(self.HV_KVP_RECORD_SIZE)
++            while len(record_data) == self.HV_KVP_RECORD_SIZE:
++                kvp_item = self._decode_kvp_item(record_data)
++                yield kvp_item
+                 record_data = f.read(self.HV_KVP_RECORD_SIZE)
+-                while len(record_data) == self.HV_KVP_RECORD_SIZE:
+-                    self._current_offset += self.HV_KVP_RECORD_SIZE
+-                    kvp_item = self._decode_kvp_item(record_data)
+-                    yield kvp_item
+-                    record_data = f.read(self.HV_KVP_RECORD_SIZE)
+-                fcntl.flock(f, fcntl.LOCK_UN)
+-        finally:
+-            self.kvp_file = None
++            fcntl.flock(f, fcntl.LOCK_UN)
+ 
+     def _event_key(self, event):
+         """
+@@ -207,23 +228,13 @@ class HyperVKvpReportingHandler(ReportingHandler):
+ 
+         return {'key': k, 'value': v}
+ 
+-    def _update_kvp_item(self, record_data):
+-        if self.kvp_file is None:
+-            raise ReportException(
+-                "kvp file '{0}' not opened."
+-                .format(self._kvp_file_path))
+-        self.kvp_file.seek(-self.HV_KVP_RECORD_SIZE, 1)
+-        self.kvp_file.write(record_data)
+-
+     def _append_kvp_item(self, record_data):
+-        with open(self._kvp_file_path, 'rb+') as f:
++        with open(self._kvp_file_path, 'ab') as f:
+             fcntl.flock(f, fcntl.LOCK_EX)
+-            # seek to end of the file
+-            f.seek(0, 2)
+-            f.write(record_data)
++            for data in record_data:
++                f.write(data)
+             f.flush()
+             fcntl.flock(f, fcntl.LOCK_UN)
+-            self._current_offset = f.tell()
+ 
+     def _break_down(self, key, meta_data, description):
+         del meta_data[self.MSG_KEY]
+@@ -279,40 +290,26 @@ class HyperVKvpReportingHandler(ReportingHandler):
+ 
+     def _publish_event_routine(self):
+         while True:
++            items_from_queue = 0
+             try:
+                 event = self.q.get(block=True)
+-                need_append = True
++                items_from_queue += 1
++                encoded_data = []
++                while event is not None:
++                    encoded_data += self._encode_event(event)
++                    try:
++                        # get all the rest of the events in the queue
++                        event = self.q.get(block=False)
++                        items_from_queue += 1
++                    except QueueEmptyError:
++                        event = None
+                 try:
+-                    if not os.path.exists(self._kvp_file_path):
+-                        LOG.warning(
+-                            "skip writing events %s to %s. file not present.",
+-                            event.as_string(),
+-                            self._kvp_file_path)
+-                    encoded_event = self._encode_event(event)
+-                    # for each encoded_event
+-                    for encoded_data in (encoded_event):
+-                        for kvp in self._iterate_kvps(self._current_offset):
+-                            match = (
+-                                re.match(
+-                                    r"^{0}\|(\d+)\|.+"
+-                                    .format(self.EVENT_PREFIX),
+-                                    kvp['key']
+-                                ))
+-                            if match:
+-                                match_groups = match.groups(0)
+-                                if int(match_groups[0]) < self.incarnation_no:
+-                                    need_append = False
+-                                    self._update_kvp_item(encoded_data)
+-                                    continue
+-                        if need_append:
+-                            self._append_kvp_item(encoded_data)
+-                except IOError as e:
+-                    LOG.warning(
+-                        "failed posting event to kvp: %s e:%s",
+-                        event.as_string(), e)
++                    self._append_kvp_item(encoded_data)
++                except (OSError, IOError) as e:
++                    LOG.warning("failed posting events to kvp, %s", e)
+                 finally:
+-                    self.q.task_done()
+-
++                    for _ in range(items_from_queue):
++                        self.q.task_done()
+             # when main process exits, q.get() will through EOFError
+             # indicating we should exit this thread.
+             except EOFError:
+@@ -322,7 +319,7 @@ class HyperVKvpReportingHandler(ReportingHandler):
+     # if the kvp pool already contains a chunk of data,
+     # so defer it to another thread.
+     def publish_event(self, event):
+-        if (not self._event_types or event.event_type in self._event_types):
++        if not self._event_types or event.event_type in self._event_types:
+             self.q.put(event)
+ 
+     def flush(self):
+diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py
+old mode 100644
+new mode 100755
+index 2e64c6c..d01ed5b
+--- a/tests/unittests/test_reporting_hyperv.py
++++ b/tests/unittests/test_reporting_hyperv.py
+@@ -1,10 +1,12 @@
+ # This file is part of cloud-init. See LICENSE file for license information.
+ 
+ from cloudinit.reporting import events
+-from cloudinit.reporting import handlers
++from cloudinit.reporting.handlers import HyperVKvpReportingHandler
+ 
+ import json
+ import os
++import struct
++import time
+ 
+ from cloudinit import util
+ from cloudinit.tests.helpers import CiTestCase
+@@ -13,7 +15,7 @@ from cloudinit.tests.helpers import CiTestCase
+ class TestKvpEncoding(CiTestCase):
+     def test_encode_decode(self):
+         kvp = {'key': 'key1', 'value': 'value1'}
+-        kvp_reporting = handlers.HyperVKvpReportingHandler()
++        kvp_reporting = HyperVKvpReportingHandler()
+         data = kvp_reporting._encode_kvp_item(kvp['key'], kvp['value'])
+         self.assertEqual(len(data), kvp_reporting.HV_KVP_RECORD_SIZE)
+         decoded_kvp = kvp_reporting._decode_kvp_item(data)
+@@ -26,57 +28,9 @@ class TextKvpReporter(CiTestCase):
+         self.tmp_file_path = self.tmp_path('kvp_pool_file')
+         util.ensure_file(self.tmp_file_path)
+ 
+-    def test_event_type_can_be_filtered(self):
+-        reporter = handlers.HyperVKvpReportingHandler(
+-            kvp_file_path=self.tmp_file_path,
+-            event_types=['foo', 'bar'])
+-
+-        reporter.publish_event(
+-            events.ReportingEvent('foo', 'name', 'description'))
+-        reporter.publish_event(
+-            events.ReportingEvent('some_other', 'name', 'description3'))
+-        reporter.q.join()
+-
+-        kvps = list(reporter._iterate_kvps(0))
+-        self.assertEqual(1, len(kvps))
+-
+-        reporter.publish_event(
+-            events.ReportingEvent('bar', 'name', 'description2'))
+-        reporter.q.join()
+-        kvps = list(reporter._iterate_kvps(0))
+-        self.assertEqual(2, len(kvps))
+-
+-        self.assertIn('foo', kvps[0]['key'])
+-        self.assertIn('bar', kvps[1]['key'])
+-        self.assertNotIn('some_other', kvps[0]['key'])
+-        self.assertNotIn('some_other', kvps[1]['key'])
+-
+-    def test_events_are_over_written(self):
+-        reporter = handlers.HyperVKvpReportingHandler(
+-            kvp_file_path=self.tmp_file_path)
+-
+-        self.assertEqual(0, len(list(reporter._iterate_kvps(0))))
+-
+-        reporter.publish_event(
+-            events.ReportingEvent('foo', 'name1', 'description'))
+-        reporter.publish_event(
+-            events.ReportingEvent('foo', 'name2', 'description'))
+-        reporter.q.join()
+-        self.assertEqual(2, len(list(reporter._iterate_kvps(0))))
+-
+-        reporter2 = handlers.HyperVKvpReportingHandler(
+-            kvp_file_path=self.tmp_file_path)
+-        reporter2.incarnation_no = reporter.incarnation_no + 1
+-        reporter2.publish_event(
+-            events.ReportingEvent('foo', 'name3', 'description'))
+-        reporter2.q.join()
+-
+-        self.assertEqual(2, len(list(reporter2._iterate_kvps(0))))
+-
+     def test_events_with_higher_incarnation_not_over_written(self):
+-        reporter = handlers.HyperVKvpReportingHandler(
++        reporter = HyperVKvpReportingHandler(
+             kvp_file_path=self.tmp_file_path)
+-
+         self.assertEqual(0, len(list(reporter._iterate_kvps(0))))
+ 
+         reporter.publish_event(
+@@ -86,7 +40,7 @@ class TextKvpReporter(CiTestCase):
+         reporter.q.join()
+         self.assertEqual(2, len(list(reporter._iterate_kvps(0))))
+ 
+-        reporter3 = handlers.HyperVKvpReportingHandler(
++        reporter3 = HyperVKvpReportingHandler(
+             kvp_file_path=self.tmp_file_path)
+         reporter3.incarnation_no = reporter.incarnation_no - 1
+         reporter3.publish_event(
+@@ -95,7 +49,7 @@ class TextKvpReporter(CiTestCase):
+         self.assertEqual(3, len(list(reporter3._iterate_kvps(0))))
+ 
+     def test_finish_event_result_is_logged(self):
+-        reporter = handlers.HyperVKvpReportingHandler(
++        reporter = HyperVKvpReportingHandler(
+             kvp_file_path=self.tmp_file_path)
+         reporter.publish_event(
+             events.FinishReportingEvent('name2', 'description1',
+@@ -105,7 +59,7 @@ class TextKvpReporter(CiTestCase):
+ 
+     def test_file_operation_issue(self):
+         os.remove(self.tmp_file_path)
+-        reporter = handlers.HyperVKvpReportingHandler(
++        reporter = HyperVKvpReportingHandler(
+             kvp_file_path=self.tmp_file_path)
+         reporter.publish_event(
+             events.FinishReportingEvent('name2', 'description1',
+@@ -113,7 +67,7 @@ class TextKvpReporter(CiTestCase):
+         reporter.q.join()
+ 
+     def test_event_very_long(self):
+-        reporter = handlers.HyperVKvpReportingHandler(
++        reporter = HyperVKvpReportingHandler(
+             kvp_file_path=self.tmp_file_path)
+         description = 'ab' * reporter.HV_KVP_EXCHANGE_MAX_VALUE_SIZE
+         long_event = events.FinishReportingEvent(
+@@ -132,3 +86,43 @@ class TextKvpReporter(CiTestCase):
+             self.assertEqual(msg_slice['msg_i'], i)
+             full_description += msg_slice['msg']
+         self.assertEqual(description, full_description)
++
++    def test_not_truncate_kvp_file_modified_after_boot(self):
++        with open(self.tmp_file_path, "wb+") as f:
++            kvp = {'key': 'key1', 'value': 'value1'}
++            data = (struct.pack("%ds%ds" % (
++                    HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
++                    HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
++                    kvp['key'].encode('utf-8'), kvp['value'].encode('utf-8')))
++            f.write(data)
++        cur_time = time.time()
++        os.utime(self.tmp_file_path, (cur_time, cur_time))
++
++        # reset this because the unit test framework
++        # has already polluted the class variable
++        HyperVKvpReportingHandler._already_truncated_pool_file = False
++
++        reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
++        kvps = list(reporter._iterate_kvps(0))
++        self.assertEqual(1, len(kvps))
++
++    def test_truncate_stale_kvp_file(self):
++        with open(self.tmp_file_path, "wb+") as f:
++            kvp = {'key': 'key1', 'value': 'value1'}
++            data = (struct.pack("%ds%ds" % (
++                HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
++                HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
++                kvp['key'].encode('utf-8'), kvp['value'].encode('utf-8')))
++            f.write(data)
++
++        # set the time ways back to make it look like
++        # we had an old kvp file
++        os.utime(self.tmp_file_path, (1000000, 1000000))
++
++        # reset this because the unit test framework
++        # has already polluted the class variable
++        HyperVKvpReportingHandler._already_truncated_pool_file = False
++
++        reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
++        kvps = list(reporter._iterate_kvps(0))
++        self.assertEqual(0, len(kvps))
+-- 
+1.8.3.1
+
diff --git a/SOURCES/ci-Azure-Ensure-platform-random_seed-is-always-serializ.patch b/SOURCES/ci-Azure-Ensure-platform-random_seed-is-always-serializ.patch
new file mode 100644
index 0000000..130ad9a
--- /dev/null
+++ b/SOURCES/ci-Azure-Ensure-platform-random_seed-is-always-serializ.patch
@@ -0,0 +1,156 @@
+From 7fb6356ab820b28af7479382c4f63c32e3d653be Mon Sep 17 00:00:00 2001
+From: Eduardo Otubo <otubo@redhat.com>
+Date: Wed, 29 May 2019 13:41:45 +0200
+Subject: [PATCH 1/5] Azure: Ensure platform random_seed is always serializable
+ as JSON.
+
+RH-Author: Eduardo Otubo <otubo@redhat.com>
+Message-id: <20190529134149.842-2-otubo@redhat.com>
+Patchwork-id: 88272
+O-Subject: [RHEL-8.0.1/RHEL-8.1.0 cloud-init PATCHv2 1/5] Azure: Ensure platform random_seed is always serializable as JSON.
+Bugzilla: 1648375
+RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+RH-Acked-by: Cathy Avery <cavery@redhat.com>
+
+From: "Jason Zions (MSFT)" <jasonzio@microsoft.com>
+commit 0dc3a77f41f4544e4cb5a41637af7693410d4cdf
+Author: Jason Zions (MSFT) <jasonzio@microsoft.com>
+Date:   Tue Mar 26 18:53:50 2019 +0000
+
+    Azure: Ensure platform random_seed is always serializable as JSON.
+
+    The Azure platform surfaces random bytes into /sys via Hyper-V.
+    Python 2.7 json.dump() raises an exception if asked to convert
+    a str with non-character content, and python 3.0 json.dump()
+    won't serialize a "bytes" value. As a result, c-i instance
+    data is often not written by Azure, making reboots slower (c-i
+    has to repeat work).
+
+    The random data is base64-encoded and then decoded into a string
+    (str or unicode depending on the version of Python in use). The
+    base64 string has just as many bits of entropy, so we're not
+    throwing away useful "information", but we can be certain
+    json.dump() will correctly serialize the bits.
+
+Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
+
+Conflicts:
+    tests/unittests/test_datasource/test_azure.py
+    Skipped the commit edf052c as it removes support for python-2.6
+
+Signed-off-by: Eduardo Otubo <otubo@redhat.com>
+---
+ cloudinit/sources/DataSourceAzure.py          | 24 +++++++++++++++++++-----
+ tests/data/azure/non_unicode_random_string    |  1 +
+ tests/unittests/test_datasource/test_azure.py | 24 ++++++++++++++++++++++--
+ 3 files changed, 42 insertions(+), 7 deletions(-)
+ create mode 100644 tests/data/azure/non_unicode_random_string
+
+diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
+index 2062ca5..a768b2c 100644
+--- a/cloudinit/sources/DataSourceAzure.py
++++ b/cloudinit/sources/DataSourceAzure.py
+@@ -54,6 +54,7 @@ REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
+ REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
+ AGENT_SEED_DIR = '/var/lib/waagent'
+ IMDS_URL = "http://169.254.169.254/metadata/"
++PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0"
+ 
+ # List of static scripts and network config artifacts created by
+ # stock ubuntu suported images.
+@@ -195,6 +196,8 @@ if util.is_FreeBSD():
+         RESOURCE_DISK_PATH = "/dev/" + res_disk
+     else:
+         LOG.debug("resource disk is None")
++    # TODO Find where platform entropy data is surfaced
++    PLATFORM_ENTROPY_SOURCE = None
+ 
+ BUILTIN_DS_CONFIG = {
+     'agent_command': AGENT_START_BUILTIN,
+@@ -1100,16 +1103,27 @@ def _check_freebsd_cdrom(cdrom_dev):
+     return False
+ 
+ 
+-def _get_random_seed():
++def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE):
+     """Return content random seed file if available, otherwise,
+        return None."""
+     # azure / hyper-v provides random data here
+-    # TODO. find the seed on FreeBSD platform
+     # now update ds_cfg to reflect contents pass in config
+-    if util.is_FreeBSD():
++    if source is None:
+         return None
+-    return util.load_file("/sys/firmware/acpi/tables/OEM0",
+-                          quiet=True, decode=False)
++    seed = util.load_file(source, quiet=True, decode=False)
++
++    # The seed generally contains non-Unicode characters. load_file puts
++    # them into a str (in python 2) or bytes (in python 3). In python 2,
++    # bad octets in a str cause util.json_dumps() to throw an exception. In
++    # python 3, bytes is a non-serializable type, and the handler load_file
++    # uses applies b64 encoding *again* to handle it. The simplest solution
++    # is to just b64encode the data and then decode it to a serializable
++    # string. Same number of bits of entropy, just with 25% more zeroes.
++    # There's no need to undo this base64-encoding when the random seed is
++    # actually used in cc_seed_random.py.
++    seed = base64.b64encode(seed).decode()
++
++    return seed
+ 
+ 
+ def list_possible_azure_ds_devs():
+diff --git a/tests/data/azure/non_unicode_random_string b/tests/data/azure/non_unicode_random_string
+new file mode 100644
+index 0000000..b9ecefb
+--- /dev/null
++++ b/tests/data/azure/non_unicode_random_string
+@@ -0,0 +1 @@
++OEM0d\x00\x00\x00\x01\x80VRTUALMICROSFT\x02\x17\x00\x06MSFT\x97\x00\x00\x00C\xb4{V\xf4X%\x061x\x90\x1c\xfen\x86\xbf~\xf5\x8c\x94&\x88\xed\x84\xf9B\xbd\xd3\xf1\xdb\xee:\xd9\x0fc\x0e\x83(\xbd\xe3'\xfc\x85,\xdf\xf4\x13\x99N\xc5\xf3Y\x1e\xe3\x0b\xa4H\x08J\xb9\xdcdb$
+\ No newline at end of file
+diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
+index 417d86a..eacf225 100644
+--- a/tests/unittests/test_datasource/test_azure.py
++++ b/tests/unittests/test_datasource/test_azure.py
+@@ -7,11 +7,11 @@ from cloudinit.sources import (
+     UNSET, DataSourceAzure as dsaz, InvalidMetaDataException)
+ from cloudinit.util import (b64e, decode_binary, load_file, write_file,
+                             find_freebsd_part, get_path_dev_freebsd,
+-                            MountFailedError)
++                            MountFailedError, json_dumps, load_json)
+ from cloudinit.version import version_string as vs
+ from cloudinit.tests.helpers import (
+     HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call,
+-    ExitStack, PY26, SkipTest)
++    ExitStack, PY26, SkipTest, resourceLocation)
+ 
+ import crypt
+ import httpretty
+@@ -1924,4 +1924,24 @@ class TestWBIsPlatformViable(CiTestCase):
+             self.logs.getvalue())
+ 
+ 
++class TestRandomSeed(CiTestCase):
++    """Test proper handling of random_seed"""
++
++    def test_non_ascii_seed_is_serializable(self):
++        """Pass if a random string from the Azure infrastructure which
++        contains at least one non-Unicode character can be converted to/from
++        JSON without alteration and without throwing an exception.
++        """
++        path = resourceLocation("azure/non_unicode_random_string")
++        result = dsaz._get_random_seed(path)
++
++        obj = {'seed': result}
++        try:
++            serialized = json_dumps(obj)
++            deserialized = load_json(serialized)
++        except UnicodeDecodeError:
++            self.fail("Non-serializable random seed returned")
++
++        self.assertEqual(deserialized['seed'], result)
++
+ # vi: ts=4 expandtab
+-- 
+1.8.3.1
+
diff --git a/SOURCES/ci-Azure-Ignore-NTFS-mount-errors-when-checking-ephemer.patch b/SOURCES/ci-Azure-Ignore-NTFS-mount-errors-when-checking-ephemer.patch
deleted file mode 100644
index 263014a..0000000
--- a/SOURCES/ci-Azure-Ignore-NTFS-mount-errors-when-checking-ephemer.patch
+++ /dev/null
@@ -1,423 +0,0 @@
-From 767c4f590bd1ac6cd32c34be8cb813a2cbec08ad Mon Sep 17 00:00:00 2001
-From: Eduardo Otubo <otubo@redhat.com>
-Date: Fri, 5 Oct 2018 09:53:01 +0200
-Subject: [PATCH 2/4] Azure: Ignore NTFS mount errors when checking ephemeral
- drive
-
-RH-Author: Eduardo Otubo <otubo@redhat.com>
-Message-id: <20181005095303.20597-3-otubo@redhat.com>
-Patchwork-id: 82385
-O-Subject: [RHEL-8.0 cloud-init PATCH 2/4] Azure: Ignore NTFS mount errors when checking ephemeral drive
-Bugzilla: 1615599
-RH-Acked-by: Cathy Avery <cavery@redhat.com>
-RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
-RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
-
-commit aa4eeb80839382117e1813e396dc53aa634fd7ba
-Author: Paul Meyer <paulmey@microsoft.com>
-Date:   Wed May 23 15:45:39 2018 -0400
-
-    Azure: Ignore NTFS mount errors when checking ephemeral drive
-
-    The Azure data source provides a method to check whether a NTFS partition
-    on the ephemeral disk is safe for reformatting to ext4. The method checks
-    to see if there are customer data files on the disk. However, mounting
-    the partition fails on systems that do not have the capability of
-    mounting NTFS. Note that in this case, it is also very unlikely that the
-    NTFS partition would have been used by the system (since it can't mount
-    it). The only case would be where an update to the system removed the
-    capability to mount NTFS, the likelihood of which is also very small.
-    This change allows the reformatting of the ephemeral disk to ext4 on
-    systems where mounting NTFS is not supported.
-
-Signed-off-by: Eduardo Otubo <otubo@redhat.com>
-Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
----
- cloudinit/sources/DataSourceAzure.py          |  63 ++++++++++++----
- cloudinit/util.py                             |   5 +-
- tests/unittests/test_datasource/test_azure.py | 105 +++++++++++++++++++++-----
- 3 files changed, 138 insertions(+), 35 deletions(-)
-
-diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
-index 23b4d53..7e49455 100644
---- a/cloudinit/sources/DataSourceAzure.py
-+++ b/cloudinit/sources/DataSourceAzure.py
-@@ -214,6 +214,7 @@ BUILTIN_CLOUD_CONFIG = {
- }
- 
- DS_CFG_PATH = ['datasource', DS_NAME]
-+DS_CFG_KEY_PRESERVE_NTFS = 'never_destroy_ntfs'
- DEF_EPHEMERAL_LABEL = 'Temporary Storage'
- 
- # The redacted password fails to meet password complexity requirements
-@@ -400,14 +401,9 @@ class DataSourceAzure(sources.DataSource):
-         if found == ddir:
-             LOG.debug("using files cached in %s", ddir)
- 
--        # azure / hyper-v provides random data here
--        # TODO. find the seed on FreeBSD platform
--        # now update ds_cfg to reflect contents pass in config
--        if not util.is_FreeBSD():
--            seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
--                                  quiet=True, decode=False)
--            if seed:
--                self.metadata['random_seed'] = seed
-+        seed = _get_random_seed()
-+        if seed:
-+            self.metadata['random_seed'] = seed
- 
-         user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
-         self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
-@@ -537,7 +533,9 @@ class DataSourceAzure(sources.DataSource):
-         return fabric_data
- 
-     def activate(self, cfg, is_new_instance):
--        address_ephemeral_resize(is_new_instance=is_new_instance)
-+        address_ephemeral_resize(is_new_instance=is_new_instance,
-+                                 preserve_ntfs=self.ds_cfg.get(
-+                                     DS_CFG_KEY_PRESERVE_NTFS, False))
-         return
- 
-     @property
-@@ -581,17 +579,29 @@ def _has_ntfs_filesystem(devpath):
-     return os.path.realpath(devpath) in ntfs_devices
- 
- 
--def can_dev_be_reformatted(devpath):
--    """Determine if block device devpath is newly formatted ephemeral.
-+def can_dev_be_reformatted(devpath, preserve_ntfs):
-+    """Determine if the ephemeral drive at devpath should be reformatted.
- 
--    A newly formatted disk will:
-+    A fresh ephemeral disk is formatted by Azure and will:
-       a.) have a partition table (dos or gpt)
-       b.) have 1 partition that is ntfs formatted, or
-           have 2 partitions with the second partition ntfs formatted.
-           (larger instances with >2TB ephemeral disk have gpt, and will
-            have a microsoft reserved partition as part 1.  LP: #1686514)
-       c.) the ntfs partition will have no files other than possibly
--          'dataloss_warning_readme.txt'"""
-+          'dataloss_warning_readme.txt'
-+
-+    User can indicate that NTFS should never be destroyed by setting
-+    DS_CFG_KEY_PRESERVE_NTFS in dscfg.
-+    If data is found on NTFS, user is warned to set DS_CFG_KEY_PRESERVE_NTFS
-+    to make sure cloud-init does not accidentally wipe their data.
-+    If cloud-init cannot mount the disk to check for data, destruction
-+    will be allowed, unless the dscfg key is set."""
-+    if preserve_ntfs:
-+        msg = ('config says to never destroy NTFS (%s.%s), skipping checks' %
-+               (".".join(DS_CFG_PATH), DS_CFG_KEY_PRESERVE_NTFS))
-+        return False, msg
-+
-     if not os.path.exists(devpath):
-         return False, 'device %s does not exist' % devpath
- 
-@@ -624,18 +634,27 @@ def can_dev_be_reformatted(devpath):
-     bmsg = ('partition %s (%s) on device %s was ntfs formatted' %
-             (cand_part, cand_path, devpath))
-     try:
--        file_count = util.mount_cb(cand_path, count_files)
-+        file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
-+                                   update_env_for_mount={'LANG': 'C'})
-     except util.MountFailedError as e:
-+        if "mount: unknown filesystem type 'ntfs'" in str(e):
-+            return True, (bmsg + ' but this system cannot mount NTFS,'
-+                          ' assuming there are no important files.'
-+                          ' Formatting allowed.')
-         return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
- 
-     if file_count != 0:
-+        LOG.warning("it looks like you're using NTFS on the ephemeral disk, "
-+                    'to ensure that filesystem does not get wiped, set '
-+                    '%s.%s in config', '.'.join(DS_CFG_PATH),
-+                    DS_CFG_KEY_PRESERVE_NTFS)
-         return False, bmsg + ' but had %d files on it.' % file_count
- 
-     return True, bmsg + ' and had no important files. Safe for reformatting.'
- 
- 
- def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
--                             is_new_instance=False):
-+                             is_new_instance=False, preserve_ntfs=False):
-     # wait for ephemeral disk to come up
-     naplen = .2
-     missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen,
-@@ -651,7 +670,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
-     if is_new_instance:
-         result, msg = (True, "First instance boot.")
-     else:
--        result, msg = can_dev_be_reformatted(devpath)
-+        result, msg = can_dev_be_reformatted(devpath, preserve_ntfs)
- 
-     LOG.debug("reformattable=%s: %s", result, msg)
-     if not result:
-@@ -965,6 +984,18 @@ def _check_freebsd_cdrom(cdrom_dev):
-     return False
- 
- 
-+def _get_random_seed():
-+    """Return content random seed file if available, otherwise,
-+       return None."""
-+    # azure / hyper-v provides random data here
-+    # TODO. find the seed on FreeBSD platform
-+    # now update ds_cfg to reflect contents pass in config
-+    if util.is_FreeBSD():
-+        return None
-+    return util.load_file("/sys/firmware/acpi/tables/OEM0",
-+                          quiet=True, decode=False)
-+
-+
- def list_possible_azure_ds_devs():
-     devlist = []
-     if util.is_FreeBSD():
-diff --git a/cloudinit/util.py b/cloudinit/util.py
-index 0ab2c48..c8e14ba 100644
---- a/cloudinit/util.py
-+++ b/cloudinit/util.py
-@@ -1608,7 +1608,8 @@ def mounts():
-     return mounted
- 
- 
--def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
-+def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True,
-+             update_env_for_mount=None):
-     """
-     Mount the device, call method 'callback' passing the directory
-     in which it was mounted, then unmount.  Return whatever 'callback'
-@@ -1670,7 +1671,7 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
-                         mountcmd.extend(['-t', mtype])
-                     mountcmd.append(device)
-                     mountcmd.append(tmpd)
--                    subp(mountcmd)
-+                    subp(mountcmd, update_env=update_env_for_mount)
-                     umount = tmpd  # This forces it to be unmounted (when set)
-                     mountpoint = tmpd
-                     break
-diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
-index 3e8b791..af2c93a 100644
---- a/tests/unittests/test_datasource/test_azure.py
-+++ b/tests/unittests/test_datasource/test_azure.py
-@@ -1,10 +1,10 @@
- # This file is part of cloud-init. See LICENSE file for license information.
- 
- from cloudinit import helpers
--from cloudinit.util import b64e, decode_binary, load_file, write_file
- from cloudinit.sources import DataSourceAzure as dsaz
--from cloudinit.util import find_freebsd_part
--from cloudinit.util import get_path_dev_freebsd
-+from cloudinit.util import (b64e, decode_binary, load_file, write_file,
-+                            find_freebsd_part, get_path_dev_freebsd,
-+                            MountFailedError)
- from cloudinit.version import version_string as vs
- from cloudinit.tests.helpers import (CiTestCase, TestCase, populate_dir, mock,
-                                      ExitStack, PY26, SkipTest)
-@@ -95,6 +95,8 @@ class TestAzureDataSource(CiTestCase):
-         self.patches = ExitStack()
-         self.addCleanup(self.patches.close)
- 
-+        self.patches.enter_context(mock.patch.object(dsaz, '_get_random_seed'))
-+
-         super(TestAzureDataSource, self).setUp()
- 
-     def apply_patches(self, patches):
-@@ -335,6 +337,18 @@ fdescfs            /dev/fd          fdescfs rw              0 0
-         self.assertTrue(ret)
-         self.assertEqual(data['agent_invoked'], '_COMMAND')
- 
-+    def test_sys_cfg_set_never_destroy_ntfs(self):
-+        sys_cfg = {'datasource': {'Azure': {
-+            'never_destroy_ntfs': 'user-supplied-value'}}}
-+        data = {'ovfcontent': construct_valid_ovf_env(data={}),
-+                'sys_cfg': sys_cfg}
-+
-+        dsrc = self._get_ds(data)
-+        ret = self._get_and_setup(dsrc)
-+        self.assertTrue(ret)
-+        self.assertEqual(dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS),
-+                         'user-supplied-value')
-+
-     def test_username_used(self):
-         odata = {'HostName': "myhost", 'UserName': "myuser"}
-         data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-@@ -676,6 +690,8 @@ class TestAzureBounce(CiTestCase):
-                               mock.MagicMock(return_value={})))
-         self.patches.enter_context(
-             mock.patch.object(dsaz.util, 'which', lambda x: True))
-+        self.patches.enter_context(
-+            mock.patch.object(dsaz, '_get_random_seed'))
- 
-         def _dmi_mocks(key):
-             if key == 'system-uuid':
-@@ -957,7 +973,9 @@ class TestCanDevBeReformatted(CiTestCase):
-             # return sorted by partition number
-             return sorted(ret, key=lambda d: d[0])
- 
--        def mount_cb(device, callback):
-+        def mount_cb(device, callback, mtype, update_env_for_mount):
-+            self.assertEqual('ntfs', mtype)
-+            self.assertEqual('C', update_env_for_mount.get('LANG'))
-             p = self.tmp_dir()
-             for f in bypath.get(device).get('files', []):
-                 write_file(os.path.join(p, f), content=f)
-@@ -988,14 +1006,16 @@ class TestCanDevBeReformatted(CiTestCase):
-                     '/dev/sda2': {'num': 2},
-                     '/dev/sda3': {'num': 3},
-                 }}})
--        value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
-+        value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
-+                                                 preserve_ntfs=False)
-         self.assertFalse(value)
-         self.assertIn("3 or more", msg.lower())
- 
-     def test_no_partitions_is_false(self):
-         """A disk with no partitions can not be formatted."""
-         self.patchup({'/dev/sda': {}})
--        value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
-+        value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
-+                                                 preserve_ntfs=False)
-         self.assertFalse(value)
-         self.assertIn("not partitioned", msg.lower())
- 
-@@ -1007,7 +1027,8 @@ class TestCanDevBeReformatted(CiTestCase):
-                     '/dev/sda1': {'num': 1},
-                     '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []},
-                 }}})
--        value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
-+        value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
-+                                                 preserve_ntfs=False)
-         self.assertFalse(value)
-         self.assertIn("not ntfs", msg.lower())
- 
-@@ -1020,7 +1041,8 @@ class TestCanDevBeReformatted(CiTestCase):
-                     '/dev/sda2': {'num': 2, 'fs': 'ntfs',
-                                   'files': ['secret.txt']},
-                 }}})
--        value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
-+        value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
-+                                                 preserve_ntfs=False)
-         self.assertFalse(value)
-         self.assertIn("files on it", msg.lower())
- 
-@@ -1032,7 +1054,8 @@ class TestCanDevBeReformatted(CiTestCase):
-                     '/dev/sda1': {'num': 1},
-                     '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []},
-                 }}})
--        value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
-+        value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
-+                                                 preserve_ntfs=False)
-         self.assertTrue(value)
-         self.assertIn("safe for", msg.lower())
- 
-@@ -1043,7 +1066,8 @@ class TestCanDevBeReformatted(CiTestCase):
-                 'partitions': {
-                     '/dev/sda1': {'num': 1, 'fs': 'zfs'},
-                 }}})
--        value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
-+        value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
-+                                                 preserve_ntfs=False)
-         self.assertFalse(value)
-         self.assertIn("not ntfs", msg.lower())
- 
-@@ -1055,9 +1079,14 @@ class TestCanDevBeReformatted(CiTestCase):
-                     '/dev/sda1': {'num': 1, 'fs': 'ntfs',
-                                   'files': ['file1.txt', 'file2.exe']},
-                 }}})
--        value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
--        self.assertFalse(value)
--        self.assertIn("files on it", msg.lower())
-+        with mock.patch.object(dsaz.LOG, 'warning') as warning:
-+            value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
-+                                                     preserve_ntfs=False)
-+            wmsg = warning.call_args[0][0]
-+            self.assertIn("looks like you're using NTFS on the ephemeral disk",
-+                          wmsg)
-+            self.assertFalse(value)
-+            self.assertIn("files on it", msg.lower())
- 
-     def test_one_partition_ntfs_empty_is_true(self):
-         """1 mountable ntfs partition and no files can be formatted."""
-@@ -1066,7 +1095,8 @@ class TestCanDevBeReformatted(CiTestCase):
-                 'partitions': {
-                     '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []}
-                 }}})
--        value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
-+        value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
-+                                                 preserve_ntfs=False)
-         self.assertTrue(value)
-         self.assertIn("safe for", msg.lower())
- 
-@@ -1078,7 +1108,8 @@ class TestCanDevBeReformatted(CiTestCase):
-                     '/dev/sda1': {'num': 1, 'fs': 'ntfs',
-                                   'files': ['dataloss_warning_readme.txt']}
-                 }}})
--        value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
-+        value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
-+                                                 preserve_ntfs=False)
-         self.assertTrue(value)
-         self.assertIn("safe for", msg.lower())
- 
-@@ -1093,7 +1124,8 @@ class TestCanDevBeReformatted(CiTestCase):
-                         'num': 1, 'fs': 'ntfs', 'files': [self.warning_file],
-                         'realpath': '/dev/sdb1'}
-                 }}})
--        value, msg = dsaz.can_dev_be_reformatted(epath)
-+        value, msg = dsaz.can_dev_be_reformatted(epath,
-+                                                 preserve_ntfs=False)
-         self.assertTrue(value)
-         self.assertIn("safe for", msg.lower())
- 
-@@ -1112,10 +1144,49 @@ class TestCanDevBeReformatted(CiTestCase):
-                     epath + '-part3': {'num': 3, 'fs': 'ext',
-                                        'realpath': '/dev/sdb3'}
-                 }}})
--        value, msg = dsaz.can_dev_be_reformatted(epath)
-+        value, msg = dsaz.can_dev_be_reformatted(epath,
-+                                                 preserve_ntfs=False)
-         self.assertFalse(value)
-         self.assertIn("3 or more", msg.lower())
- 
-+    def test_ntfs_mount_errors_true(self):
-+        """can_dev_be_reformatted does not fail if NTFS is unknown fstype."""
-+        self.patchup({
-+            '/dev/sda': {
-+                'partitions': {
-+                    '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []}
-+                }}})
-+
-+        err = ("Unexpected error while running command.\n",
-+               "Command: ['mount', '-o', 'ro,sync', '-t', 'auto', ",
-+               "'/dev/sda1', '/fake-tmp/dir']\n"
-+               "Exit code: 32\n"
-+               "Reason: -\n"
-+               "Stdout: -\n"
-+               "Stderr: mount: unknown filesystem type 'ntfs'")
-+        self.m_mount_cb.side_effect = MountFailedError(
-+            'Failed mounting %s to %s due to: %s' %
-+            ('/dev/sda', '/fake-tmp/dir', err))
-+
-+        value, msg = dsaz.can_dev_be_reformatted('/dev/sda',
-+                                                 preserve_ntfs=False)
-+        self.assertTrue(value)
-+        self.assertIn('cannot mount NTFS, assuming', msg)
-+
-+    def test_never_destroy_ntfs_config_false(self):
-+        """Normally formattable situation with never_destroy_ntfs set."""
-+        self.patchup({
-+            '/dev/sda': {
-+                'partitions': {
-+                    '/dev/sda1': {'num': 1, 'fs': 'ntfs',
-+                                  'files': ['dataloss_warning_readme.txt']}
-+                }}})
-+        value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
-+                                                 preserve_ntfs=True)
-+        self.assertFalse(value)
-+        self.assertIn("config says to never destroy NTFS "
-+                      "(datasource.Azure.never_destroy_ntfs)", msg)
-+
- 
- class TestAzureNetExists(CiTestCase):
- 
--- 
-1.8.3.1
-
diff --git a/SOURCES/ci-Azure-Return-static-fallback-address-as-if-failed-to.patch b/SOURCES/ci-Azure-Return-static-fallback-address-as-if-failed-to.patch
new file mode 100644
index 0000000..917559c
--- /dev/null
+++ b/SOURCES/ci-Azure-Return-static-fallback-address-as-if-failed-to.patch
@@ -0,0 +1,102 @@
+From f28ece4ddb379b5e223c658200a9747b97a4701c Mon Sep 17 00:00:00 2001
+From: Eduardo Otubo <otubo@redhat.com>
+Date: Wed, 3 Jul 2019 13:06:49 +0200
+Subject: [PATCH] Azure: Return static fallback address as if failed to find
+ endpoint
+
+RH-Author: Eduardo Otubo <otubo@redhat.com>
+Message-id: <20190703130649.14511-1-otubo@redhat.com>
+Patchwork-id: 89353
+O-Subject: [RHEL-8.0.1/RHEL-8.1.0 cloud-init PATCH] Azure: Return static fallback address as if failed to find endpoint
+Bugzilla: 1648375
+RH-Acked-by: Bandan Das <bsd@redhat.com>
+RH-Acked-by: Mohammed Gamal <mgamal@redhat.com>
+
+commit ade77012c8bbcd215b7e26065981194ce1b6a157
+Author: Jason Zions (MSFT) <jasonzio@microsoft.com>
+Date:   Fri May 10 18:38:55 2019 +0000
+
+    Azure: Return static fallback address as if failed to find endpoint
+
+    The Azure data source helper attempts to use information in the dhcp
+    lease to find the Wireserver endpoint (IP address). Under some unusual
+    circumstances, those attempts will fail. This change uses a static
+    address, known to be always correct in the Azure public and sovereign
+    clouds, when the helper fails to locate a valid dhcp lease. This
+    address is not guaranteed to be correct in Azure Stack environments;
+    it's still best to use the information from the lease whenever possible.
+
+Signed-off-by: Eduardo Otubo <otubo@redhat.com>
+Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
+---
+ cloudinit/sources/helpers/azure.py                   | 14 +++++++++++---
+ tests/unittests/test_datasource/test_azure_helper.py |  9 +++++++--
+ 2 files changed, 18 insertions(+), 5 deletions(-)
+
+diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
+index d3af05e..82c4c8c 100755
+--- a/cloudinit/sources/helpers/azure.py
++++ b/cloudinit/sources/helpers/azure.py
+@@ -20,6 +20,9 @@ from cloudinit.reporting import events
+ 
+ LOG = logging.getLogger(__name__)
+ 
++# This endpoint matches the format as found in dhcp lease files, since this
++# value is applied if the endpoint can't be found within a lease file
++DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10"
+ 
+ azure_ds_reporter = events.ReportEventStack(
+     name="azure-ds",
+@@ -297,7 +300,12 @@ class WALinuxAgentShim(object):
+     @azure_ds_telemetry_reporter
+     def _get_value_from_leases_file(fallback_lease_file):
+         leases = []
+-        content = util.load_file(fallback_lease_file)
++        try:
++            content = util.load_file(fallback_lease_file)
++        except IOError as ex:
++            LOG.error("Failed to read %s: %s", fallback_lease_file, ex)
++            return None
++
+         LOG.debug("content is %s", content)
+         option_name = _get_dhcp_endpoint_option_name()
+         for line in content.splitlines():
+@@ -372,9 +380,9 @@ class WALinuxAgentShim(object):
+                           fallback_lease_file)
+                 value = WALinuxAgentShim._get_value_from_leases_file(
+                     fallback_lease_file)
+-
+         if value is None:
+-            raise ValueError('No endpoint found.')
++            LOG.warning("No lease found; using default endpoint")
++            value = DEFAULT_WIRESERVER_ENDPOINT
+ 
+         endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value)
+         LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
+diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
+index 0255616..bd006ab 100644
+--- a/tests/unittests/test_datasource/test_azure_helper.py
++++ b/tests/unittests/test_datasource/test_azure_helper.py
+@@ -67,12 +67,17 @@ class TestFindEndpoint(CiTestCase):
+         self.networkd_leases.return_value = None
+ 
+     def test_missing_file(self):
+-        self.assertRaises(ValueError, wa_shim.find_endpoint)
++        """wa_shim find_endpoint uses default endpoint if leasefile not found
++        """
++        self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16")
+ 
+     def test_missing_special_azure_line(self):
++        """wa_shim find_endpoint uses default endpoint if leasefile is found
++        but does not contain DHCP Option 245 (whose value is the endpoint)
++        """
+         self.load_file.return_value = ''
+         self.dhcp_options.return_value = {'eth0': {'key': 'value'}}
+-        self.assertRaises(ValueError, wa_shim.find_endpoint)
++        self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16")
+ 
+     @staticmethod
+     def _build_lease_content(encoded_address):
+-- 
+1.8.3.1
+
diff --git a/SOURCES/ci-DataSourceAzure-Adjust-timeout-for-polling-IMDS.patch b/SOURCES/ci-DataSourceAzure-Adjust-timeout-for-polling-IMDS.patch
new file mode 100644
index 0000000..6a91571
--- /dev/null
+++ b/SOURCES/ci-DataSourceAzure-Adjust-timeout-for-polling-IMDS.patch
@@ -0,0 +1,111 @@
+From daed9fe5c87c679fe2576be47fc195d44629b142 Mon Sep 17 00:00:00 2001
+From: Eduardo Otubo <otubo@redhat.com>
+Date: Wed, 29 May 2019 13:41:48 +0200
+Subject: [PATCH 4/5] DataSourceAzure: Adjust timeout for polling IMDS
+
+RH-Author: Eduardo Otubo <otubo@redhat.com>
+Message-id: <20190529134149.842-5-otubo@redhat.com>
+Patchwork-id: 88267
+O-Subject: [RHEL-8.0.1/RHEL-8.1.0 cloud-init PATCHv2 4/5] DataSourceAzure: Adjust timeout for polling IMDS
+Bugzilla: 1648375
+RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+RH-Acked-by: Cathy Avery <cavery@redhat.com>
+
+From: Anh Vo <anhvo@microsoft.com>
+commit ab6621d849b24bb652243e88c79f6f3b446048d7
+Author: Anh Vo <anhvo@microsoft.com>
+Date:   Wed May 8 14:54:03 2019 +0000
+
+    DataSourceAzure: Adjust timeout for polling IMDS
+
+    If the IMDS primary server is not available, falling back to the
+    secondary server takes about 1s. The net result is that the
+    expected E2E time is slightly more than 1s. This change increases
+    the timeout to 2s to prevent the infinite loop of timeouts.
+
+Signed-off-by: Eduardo Otubo <otubo@redhat.com>
+Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
+---
+ cloudinit/sources/DataSourceAzure.py          | 15 ++++++++++-----
+ tests/unittests/test_datasource/test_azure.py | 10 +++++++---
+ 2 files changed, 17 insertions(+), 8 deletions(-)
+
+diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
+index c827816..5baf8da 100755
+--- a/cloudinit/sources/DataSourceAzure.py
++++ b/cloudinit/sources/DataSourceAzure.py
+@@ -57,7 +57,12 @@ AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
+ REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
+ REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
+ AGENT_SEED_DIR = '/var/lib/waagent'
++
++# In the event where the IMDS primary server is not
++# available, it takes 1s to fallback to the secondary one
++IMDS_TIMEOUT_IN_SECONDS = 2
+ IMDS_URL = "http://169.254.169.254/metadata/"
++
+ PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0"
+ 
+ # List of static scripts and network config artifacts created by
+@@ -582,9 +587,9 @@ class DataSourceAzure(sources.DataSource):
+                         return
+                     self._ephemeral_dhcp_ctx.clean_network()
+                 else:
+-                    return readurl(url, timeout=1, headers=headers,
+-                                   exception_cb=exc_cb, infinite=True,
+-                                   log_req_resp=False).contents
++                    return readurl(url, timeout=IMDS_TIMEOUT_IN_SECONDS,
++                                   headers=headers, exception_cb=exc_cb,
++                                   infinite=True, log_req_resp=False).contents
+             except UrlError:
+                 # Teardown our EphemeralDHCPv4 context on failure as we retry
+                 self._ephemeral_dhcp_ctx.clean_network()
+@@ -1291,8 +1296,8 @@ def _get_metadata_from_imds(retries):
+     headers = {"Metadata": "true"}
+     try:
+         response = readurl(
+-            url, timeout=1, headers=headers, retries=retries,
+-            exception_cb=retry_on_url_exc)
++            url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers,
++            retries=retries, exception_cb=retry_on_url_exc)
+     except Exception as e:
+         LOG.debug('Ignoring IMDS instance metadata: %s', e)
+         return {}
+diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
+index eacf225..bc8b42c 100644
+--- a/tests/unittests/test_datasource/test_azure.py
++++ b/tests/unittests/test_datasource/test_azure.py
+@@ -163,7 +163,8 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
+ 
+         m_readurl.assert_called_with(
+             self.network_md_url, exception_cb=mock.ANY,
+-            headers={'Metadata': 'true'}, retries=2, timeout=1)
++            headers={'Metadata': 'true'}, retries=2,
++            timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS)
+ 
+     @mock.patch('cloudinit.url_helper.time.sleep')
+     @mock.patch(MOCKPATH + 'net.is_up')
+@@ -1789,7 +1790,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
+                                     headers={'Metadata': 'true',
+                                              'User-Agent':
+                                              'Cloud-Init/%s' % vs()
+-                                             }, method='GET', timeout=1,
++                                             }, method='GET',
++                                    timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
+                                     url=full_url)])
+         self.assertEqual(m_dhcp.call_count, 2)
+         m_net.assert_any_call(
+@@ -1826,7 +1828,9 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
+                                     headers={'Metadata': 'true',
+                                              'User-Agent':
+                                              'Cloud-Init/%s' % vs()},
+-                                    method='GET', timeout=1, url=full_url)])
++                                    method='GET',
++                                    timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
++                                    url=full_url)])
+         self.assertEqual(m_dhcp.call_count, 2)
+         m_net.assert_any_call(
+             broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9',
+-- 
+1.8.3.1
+
diff --git a/SOURCES/ci-DatasourceAzure-add-additional-logging-for-azure-dat.patch b/SOURCES/ci-DatasourceAzure-add-additional-logging-for-azure-dat.patch
new file mode 100644
index 0000000..1f461db
--- /dev/null
+++ b/SOURCES/ci-DatasourceAzure-add-additional-logging-for-azure-dat.patch
@@ -0,0 +1,642 @@
+From f6054bcebaca3fd731f4547ce0ad4ddf3bbcbd23 Mon Sep 17 00:00:00 2001
+From: Eduardo Otubo <otubo@redhat.com>
+Date: Wed, 29 May 2019 13:41:46 +0200
+Subject: [PATCH 2/5] DatasourceAzure: add additional logging for azure
+ datasource
+
+RH-Author: Eduardo Otubo <otubo@redhat.com>
+Message-id: <20190529134149.842-3-otubo@redhat.com>
+Patchwork-id: 88268
+O-Subject: [RHEL-8.0.1/RHEL-8.1.0 cloud-init PATCHv2 2/5] DatasourceAzure: add additional logging for azure datasource
+Bugzilla: 1648375
+RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+RH-Acked-by: Cathy Avery <cavery@redhat.com>
+
+From: Anh Vo <anhvo@microsoft.com>
+commit 0d8c88393b51db6454491a379dcc2e691551217a
+Author: Anh Vo <anhvo@microsoft.com>
+Date:   Wed Apr 3 18:23:18 2019 +0000
+
+    DatasourceAzure: add additional logging for azure datasource
+
+    Create an Azure logging decorator and use additional ReportEventStack
+    context managers to provide additional logging details.
+
+Signed-off-by: Eduardo Otubo <otubo@redhat.com>
+Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
+---
+ cloudinit/sources/DataSourceAzure.py | 231 ++++++++++++++++++++++-------------
+ cloudinit/sources/helpers/azure.py   |  31 +++++
+ 2 files changed, 179 insertions(+), 83 deletions(-)
+ mode change 100644 => 100755 cloudinit/sources/DataSourceAzure.py
+ mode change 100644 => 100755 cloudinit/sources/helpers/azure.py
+
+diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
+old mode 100644
+new mode 100755
+index a768b2c..c827816
+--- a/cloudinit/sources/DataSourceAzure.py
++++ b/cloudinit/sources/DataSourceAzure.py
+@@ -21,10 +21,14 @@ from cloudinit import net
+ from cloudinit.event import EventType
+ from cloudinit.net.dhcp import EphemeralDHCPv4
+ from cloudinit import sources
+-from cloudinit.sources.helpers.azure import get_metadata_from_fabric
+ from cloudinit.sources.helpers import netlink
+ from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
+ from cloudinit import util
++from cloudinit.reporting import events
++
++from cloudinit.sources.helpers.azure import (azure_ds_reporter,
++                                             azure_ds_telemetry_reporter,
++                                             get_metadata_from_fabric)
+ 
+ LOG = logging.getLogger(__name__)
+ 
+@@ -244,6 +248,7 @@ def set_hostname(hostname, hostname_command='hostname'):
+     util.subp(['hostnamectl', 'set-hostname', str(hostname)])
+ 
+ 
++@azure_ds_telemetry_reporter
+ @contextlib.contextmanager
+ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
+     """
+@@ -290,6 +295,7 @@ class DataSourceAzure(sources.DataSource):
+         root = sources.DataSource.__str__(self)
+         return "%s [seed=%s]" % (root, self.seed)
+ 
++    @azure_ds_telemetry_reporter
+     def bounce_network_with_azure_hostname(self):
+         # When using cloud-init to provision, we have to set the hostname from
+         # the metadata and "bounce" the network to force DDNS to update via
+@@ -315,6 +321,7 @@ class DataSourceAzure(sources.DataSource):
+                     util.logexc(LOG, "handling set_hostname failed")
+         return False
+ 
++    @azure_ds_telemetry_reporter
+     def get_metadata_from_agent(self):
+         temp_hostname = self.metadata.get('local-hostname')
+         agent_cmd = self.ds_cfg['agent_command']
+@@ -344,15 +351,18 @@ class DataSourceAzure(sources.DataSource):
+                 LOG.debug("ssh authentication: "
+                           "using fingerprint from fabirc")
+ 
+-        # wait very long for public SSH keys to arrive
+-        # https://bugs.launchpad.net/cloud-init/+bug/1717611
+-        missing = util.log_time(logfunc=LOG.debug,
+-                                msg="waiting for SSH public key files",
+-                                func=util.wait_for_files,
+-                                args=(fp_files, 900))
+-
+-        if len(missing):
+-            LOG.warning("Did not find files, but going on: %s", missing)
++        with events.ReportEventStack(
++                name="waiting-for-ssh-public-key",
++                description="wait for agents to retrieve ssh keys",
++                parent=azure_ds_reporter):
++            # wait very long for public SSH keys to arrive
++            # https://bugs.launchpad.net/cloud-init/+bug/1717611
++            missing = util.log_time(logfunc=LOG.debug,
++                                    msg="waiting for SSH public key files",
++                                    func=util.wait_for_files,
++                                    args=(fp_files, 900))
++            if len(missing):
++                LOG.warning("Did not find files, but going on: %s", missing)
+ 
+         metadata = {}
+         metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
+@@ -366,6 +376,7 @@ class DataSourceAzure(sources.DataSource):
+             subplatform_type = 'seed-dir'
+         return '%s (%s)' % (subplatform_type, self.seed)
+ 
++    @azure_ds_telemetry_reporter
+     def crawl_metadata(self):
+         """Walk all instance metadata sources returning a dict on success.
+ 
+@@ -467,6 +478,7 @@ class DataSourceAzure(sources.DataSource):
+         super(DataSourceAzure, self).clear_cached_attrs(attr_defaults)
+         self._metadata_imds = sources.UNSET
+ 
++    @azure_ds_telemetry_reporter
+     def _get_data(self):
+         """Crawl and process datasource metadata caching metadata as attrs.
+ 
+@@ -513,6 +525,7 @@ class DataSourceAzure(sources.DataSource):
+         # quickly (local check only) if self.instance_id is still valid
+         return sources.instance_id_matches_system_uuid(self.get_instance_id())
+ 
++    @azure_ds_telemetry_reporter
+     def setup(self, is_new_instance):
+         if self._negotiated is False:
+             LOG.debug("negotiating for %s (new_instance=%s)",
+@@ -580,6 +593,7 @@ class DataSourceAzure(sources.DataSource):
+                 if nl_sock:
+                     nl_sock.close()
+ 
++    @azure_ds_telemetry_reporter
+     def _report_ready(self, lease):
+         """Tells the fabric provisioning has completed """
+         try:
+@@ -617,9 +631,14 @@ class DataSourceAzure(sources.DataSource):
+     def _reprovision(self):
+         """Initiate the reprovisioning workflow."""
+         contents = self._poll_imds()
+-        md, ud, cfg = read_azure_ovf(contents)
+-        return (md, ud, cfg, {'ovf-env.xml': contents})
+-
++        with events.ReportEventStack(
++                name="reprovisioning-read-azure-ovf",
++                description="read azure ovf during reprovisioning",
++                parent=azure_ds_reporter):
++            md, ud, cfg = read_azure_ovf(contents)
++            return (md, ud, cfg, {'ovf-env.xml': contents})
++
++    @azure_ds_telemetry_reporter
+     def _negotiate(self):
+         """Negotiate with fabric and return data from it.
+ 
+@@ -652,6 +671,7 @@ class DataSourceAzure(sources.DataSource):
+         util.del_file(REPROVISION_MARKER_FILE)
+         return fabric_data
+ 
++    @azure_ds_telemetry_reporter
+     def activate(self, cfg, is_new_instance):
+         address_ephemeral_resize(is_new_instance=is_new_instance,
+                                  preserve_ntfs=self.ds_cfg.get(
+@@ -690,12 +710,14 @@ def _partitions_on_device(devpath, maxnum=16):
+     return []
+ 
+ 
++@azure_ds_telemetry_reporter
+ def _has_ntfs_filesystem(devpath):
+     ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True)
+     LOG.debug('ntfs_devices found = %s', ntfs_devices)
+     return os.path.realpath(devpath) in ntfs_devices
+ 
+ 
++@azure_ds_telemetry_reporter
+ def can_dev_be_reformatted(devpath, preserve_ntfs):
+     """Determine if the ephemeral drive at devpath should be reformatted.
+ 
+@@ -744,43 +766,59 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
+                (cand_part, cand_path, devpath))
+         return False, msg
+ 
++    @azure_ds_telemetry_reporter
+     def count_files(mp):
+         ignored = set(['dataloss_warning_readme.txt'])
+         return len([f for f in os.listdir(mp) if f.lower() not in ignored])
+ 
+     bmsg = ('partition %s (%s) on device %s was ntfs formatted' %
+             (cand_part, cand_path, devpath))
+-    try:
+-        file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
+-                                   update_env_for_mount={'LANG': 'C'})
+-    except util.MountFailedError as e:
+-        if "unknown filesystem type 'ntfs'" in str(e):
+-            return True, (bmsg + ' but this system cannot mount NTFS,'
+-                          ' assuming there are no important files.'
+-                          ' Formatting allowed.')
+-        return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
+-
+-    if file_count != 0:
+-        LOG.warning("it looks like you're using NTFS on the ephemeral disk, "
+-                    'to ensure that filesystem does not get wiped, set '
+-                    '%s.%s in config', '.'.join(DS_CFG_PATH),
+-                    DS_CFG_KEY_PRESERVE_NTFS)
+-        return False, bmsg + ' but had %d files on it.' % file_count
++
++    with events.ReportEventStack(
++                name="mount-ntfs-and-count",
++                description="mount-ntfs-and-count",
++                parent=azure_ds_reporter) as evt:
++        try:
++            file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
++                                       update_env_for_mount={'LANG': 'C'})
++        except util.MountFailedError as e:
++            evt.description = "cannot mount ntfs"
++            if "unknown filesystem type 'ntfs'" in str(e):
++                return True, (bmsg + ' but this system cannot mount NTFS,'
++                              ' assuming there are no important files.'
++                              ' Formatting allowed.')
++            return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
++
++        if file_count != 0:
++            evt.description = "mounted and counted %d files" % file_count
++            LOG.warning("it looks like you're using NTFS on the ephemeral"
++                        " disk, to ensure that filesystem does not get wiped,"
++                        " set %s.%s in config", '.'.join(DS_CFG_PATH),
++                        DS_CFG_KEY_PRESERVE_NTFS)
++            return False, bmsg + ' but had %d files on it.' % file_count
+ 
+     return True, bmsg + ' and had no important files. Safe for reformatting.'
+ 
+ 
++@azure_ds_telemetry_reporter
+ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
+                              is_new_instance=False, preserve_ntfs=False):
+     # wait for ephemeral disk to come up
+     naplen = .2
+-    missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen,
+-                                  log_pre="Azure ephemeral disk: ")
+-
+-    if missing:
+-        LOG.warning("ephemeral device '%s' did not appear after %d seconds.",
+-                    devpath, maxwait)
+-        return
++    with events.ReportEventStack(
++                name="wait-for-ephemeral-disk",
++                description="wait for ephemeral disk",
++                parent=azure_ds_reporter):
++        missing = util.wait_for_files([devpath],
++                                      maxwait=maxwait,
++                                      naplen=naplen,
++                                      log_pre="Azure ephemeral disk: ")
++
++        if missing:
++            LOG.warning("ephemeral device '%s' did"
++                        " not appear after %d seconds.",
++                        devpath, maxwait)
++            return
+ 
+     result = False
+     msg = None
+@@ -808,6 +846,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
+     return
+ 
+ 
++@azure_ds_telemetry_reporter
+ def perform_hostname_bounce(hostname, cfg, prev_hostname):
+     # set the hostname to 'hostname' if it is not already set to that.
+     # then, if policy is not off, bounce the interface using command
+@@ -843,6 +882,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
+     return True
+ 
+ 
++@azure_ds_telemetry_reporter
+ def crtfile_to_pubkey(fname, data=None):
+     pipeline = ('openssl x509 -noout -pubkey < "$0" |'
+                 'ssh-keygen -i -m PKCS8 -f /dev/stdin')
+@@ -851,6 +891,7 @@ def crtfile_to_pubkey(fname, data=None):
+     return out.rstrip()
+ 
+ 
++@azure_ds_telemetry_reporter
+ def pubkeys_from_crt_files(flist):
+     pubkeys = []
+     errors = []
+@@ -866,6 +907,7 @@ def pubkeys_from_crt_files(flist):
+     return pubkeys
+ 
+ 
++@azure_ds_telemetry_reporter
+ def write_files(datadir, files, dirmode=None):
+ 
+     def _redact_password(cnt, fname):
+@@ -893,6 +935,7 @@ def write_files(datadir, files, dirmode=None):
+         util.write_file(filename=fname, content=content, mode=0o600)
+ 
+ 
++@azure_ds_telemetry_reporter
+ def invoke_agent(cmd):
+     # this is a function itself to simplify patching it for test
+     if cmd:
+@@ -912,6 +955,7 @@ def find_child(node, filter_func):
+     return ret
+ 
+ 
++@azure_ds_telemetry_reporter
+ def load_azure_ovf_pubkeys(sshnode):
+     # This parses a 'SSH' node formatted like below, and returns
+     # an array of dicts.
+@@ -964,6 +1008,7 @@ def load_azure_ovf_pubkeys(sshnode):
+     return found
+ 
+ 
++@azure_ds_telemetry_reporter
+ def read_azure_ovf(contents):
+     try:
+         dom = minidom.parseString(contents)
+@@ -1064,6 +1109,7 @@ def read_azure_ovf(contents):
+     return (md, ud, cfg)
+ 
+ 
++@azure_ds_telemetry_reporter
+ def _extract_preprovisioned_vm_setting(dom):
+     """Read the preprovision flag from the ovf. It should not
+        exist unless true."""
+@@ -1092,6 +1138,7 @@ def encrypt_pass(password, salt_id="$6$"):
+     return crypt.crypt(password, salt_id + util.rand_str(strlen=16))
+ 
+ 
++@azure_ds_telemetry_reporter
+ def _check_freebsd_cdrom(cdrom_dev):
+     """Return boolean indicating path to cdrom device has content."""
+     try:
+@@ -1103,6 +1150,7 @@ def _check_freebsd_cdrom(cdrom_dev):
+     return False
+ 
+ 
++@azure_ds_telemetry_reporter
+ def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE):
+     """Return content random seed file if available, otherwise,
+        return None."""
+@@ -1126,6 +1174,7 @@ def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE):
+     return seed
+ 
+ 
++@azure_ds_telemetry_reporter
+ def list_possible_azure_ds_devs():
+     devlist = []
+     if util.is_FreeBSD():
+@@ -1140,6 +1189,7 @@ def list_possible_azure_ds_devs():
+     return devlist
+ 
+ 
++@azure_ds_telemetry_reporter
+ def load_azure_ds_dir(source_dir):
+     ovf_file = os.path.join(source_dir, "ovf-env.xml")
+ 
+@@ -1162,47 +1212,54 @@ def parse_network_config(imds_metadata):
+     @param: imds_metadata: Dict of content read from IMDS network service.
+     @return: Dictionary containing network version 2 standard configuration.
+     """
+-    if imds_metadata != sources.UNSET and imds_metadata:
+-        netconfig = {'version': 2, 'ethernets': {}}
+-        LOG.debug('Azure: generating network configuration from IMDS')
+-        network_metadata = imds_metadata['network']
+-        for idx, intf in enumerate(network_metadata['interface']):
+-            nicname = 'eth{idx}'.format(idx=idx)
+-            dev_config = {}
+-            for addr4 in intf['ipv4']['ipAddress']:
+-                privateIpv4 = addr4['privateIpAddress']
+-                if privateIpv4:
+-                    if dev_config.get('dhcp4', False):
+-                        # Append static address config for nic > 1
+-                        netPrefix = intf['ipv4']['subnet'][0].get(
+-                            'prefix', '24')
+-                        if not dev_config.get('addresses'):
+-                            dev_config['addresses'] = []
+-                        dev_config['addresses'].append(
+-                            '{ip}/{prefix}'.format(
+-                                ip=privateIpv4, prefix=netPrefix))
+-                    else:
+-                        dev_config['dhcp4'] = True
+-            for addr6 in intf['ipv6']['ipAddress']:
+-                privateIpv6 = addr6['privateIpAddress']
+-                if privateIpv6:
+-                    dev_config['dhcp6'] = True
+-                    break
+-            if dev_config:
+-                mac = ':'.join(re.findall(r'..', intf['macAddress']))
+-                dev_config.update(
+-                    {'match': {'macaddress': mac.lower()},
+-                     'set-name': nicname})
+-                netconfig['ethernets'][nicname] = dev_config
+-    else:
+-        blacklist = ['mlx4_core']
+-        LOG.debug('Azure: generating fallback configuration')
+-        # generate a network config, blacklist picking mlx4_core devs
+-        netconfig = net.generate_fallback_config(
+-            blacklist_drivers=blacklist, config_driver=True)
+-    return netconfig
++    with events.ReportEventStack(
++                name="parse_network_config",
++                description="",
++                parent=azure_ds_reporter) as evt:
++        if imds_metadata != sources.UNSET and imds_metadata:
++            netconfig = {'version': 2, 'ethernets': {}}
++            LOG.debug('Azure: generating network configuration from IMDS')
++            network_metadata = imds_metadata['network']
++            for idx, intf in enumerate(network_metadata['interface']):
++                nicname = 'eth{idx}'.format(idx=idx)
++                dev_config = {}
++                for addr4 in intf['ipv4']['ipAddress']:
++                    privateIpv4 = addr4['privateIpAddress']
++                    if privateIpv4:
++                        if dev_config.get('dhcp4', False):
++                            # Append static address config for nic > 1
++                            netPrefix = intf['ipv4']['subnet'][0].get(
++                                'prefix', '24')
++                            if not dev_config.get('addresses'):
++                                dev_config['addresses'] = []
++                            dev_config['addresses'].append(
++                                '{ip}/{prefix}'.format(
++                                    ip=privateIpv4, prefix=netPrefix))
++                        else:
++                            dev_config['dhcp4'] = True
++                for addr6 in intf['ipv6']['ipAddress']:
++                    privateIpv6 = addr6['privateIpAddress']
++                    if privateIpv6:
++                        dev_config['dhcp6'] = True
++                        break
++                if dev_config:
++                    mac = ':'.join(re.findall(r'..', intf['macAddress']))
++                    dev_config.update(
++                        {'match': {'macaddress': mac.lower()},
++                         'set-name': nicname})
++                    netconfig['ethernets'][nicname] = dev_config
++            evt.description = "network config from imds"
++        else:
++            blacklist = ['mlx4_core']
++            LOG.debug('Azure: generating fallback configuration')
++            # generate a network config, blacklist picking mlx4_core devs
++            netconfig = net.generate_fallback_config(
++                blacklist_drivers=blacklist, config_driver=True)
++            evt.description = "network config from fallback"
++        return netconfig
+ 
+ 
++@azure_ds_telemetry_reporter
+ def get_metadata_from_imds(fallback_nic, retries):
+     """Query Azure's network metadata service, returning a dictionary.
+ 
+@@ -1227,6 +1284,7 @@ def get_metadata_from_imds(fallback_nic, retries):
+             return util.log_time(**kwargs)
+ 
+ 
++@azure_ds_telemetry_reporter
+ def _get_metadata_from_imds(retries):
+ 
+     url = IMDS_URL + "instance?api-version=2017-12-01"
+@@ -1246,6 +1304,7 @@ def _get_metadata_from_imds(retries):
+     return {}
+ 
+ 
++@azure_ds_telemetry_reporter
+ def maybe_remove_ubuntu_network_config_scripts(paths=None):
+     """Remove Azure-specific ubuntu network config for non-primary nics.
+ 
+@@ -1283,14 +1342,20 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None):
+ 
+ 
+ def _is_platform_viable(seed_dir):
+-    """Check platform environment to report if this datasource may run."""
+-    asset_tag = util.read_dmi_data('chassis-asset-tag')
+-    if asset_tag == AZURE_CHASSIS_ASSET_TAG:
+-        return True
+-    LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag)
+-    if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')):
+-        return True
+-    return False
++    with events.ReportEventStack(
++                name="check-platform-viability",
++                description="found azure asset tag",
++                parent=azure_ds_reporter) as evt:
++
++        """Check platform environment to report if this datasource may run."""
++        asset_tag = util.read_dmi_data('chassis-asset-tag')
++        if asset_tag == AZURE_CHASSIS_ASSET_TAG:
++            return True
++        LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag)
++        evt.description = "Non-Azure DMI asset tag '%s' discovered.", asset_tag
++        if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')):
++            return True
++        return False
+ 
+ 
+ class BrokenAzureDataSource(Exception):
+diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
+old mode 100644
+new mode 100755
+index 2829dd2..d3af05e
+--- a/cloudinit/sources/helpers/azure.py
++++ b/cloudinit/sources/helpers/azure.py
+@@ -16,10 +16,27 @@ from xml.etree import ElementTree
+ 
+ from cloudinit import url_helper
+ from cloudinit import util
++from cloudinit.reporting import events
+ 
+ LOG = logging.getLogger(__name__)
+ 
+ 
++azure_ds_reporter = events.ReportEventStack(
++    name="azure-ds",
++    description="initialize reporter for azure ds",
++    reporting_enabled=True)
++
++
++def azure_ds_telemetry_reporter(func):
++    def impl(*args, **kwargs):
++        with events.ReportEventStack(
++                name=func.__name__,
++                description=func.__name__,
++                parent=azure_ds_reporter):
++            return func(*args, **kwargs)
++    return impl
++
++
+ @contextmanager
+ def cd(newdir):
+     prevdir = os.getcwd()
+@@ -119,6 +136,7 @@ class OpenSSLManager(object):
+     def clean_up(self):
+         util.del_dir(self.tmpdir)
+ 
++    @azure_ds_telemetry_reporter
+     def generate_certificate(self):
+         LOG.debug('Generating certificate for communication with fabric...')
+         if self.certificate is not None:
+@@ -139,17 +157,20 @@ class OpenSSLManager(object):
+         LOG.debug('New certificate generated.')
+ 
+     @staticmethod
++    @azure_ds_telemetry_reporter
+     def _run_x509_action(action, cert):
+         cmd = ['openssl', 'x509', '-noout', action]
+         result, _ = util.subp(cmd, data=cert)
+         return result
+ 
++    @azure_ds_telemetry_reporter
+     def _get_ssh_key_from_cert(self, certificate):
+         pub_key = self._run_x509_action('-pubkey', certificate)
+         keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin']
+         ssh_key, _ = util.subp(keygen_cmd, data=pub_key)
+         return ssh_key
+ 
++    @azure_ds_telemetry_reporter
+     def _get_fingerprint_from_cert(self, certificate):
+         """openssl x509 formats fingerprints as so:
+         'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\
+@@ -163,6 +184,7 @@ class OpenSSLManager(object):
+         octets = raw_fp[eq+1:-1].split(':')
+         return ''.join(octets)
+ 
++    @azure_ds_telemetry_reporter
+     def _decrypt_certs_from_xml(self, certificates_xml):
+         """Decrypt the certificates XML document using the our private key;
+            return the list of certs and private keys contained in the doc.
+@@ -185,6 +207,7 @@ class OpenSSLManager(object):
+                 shell=True, data=b'\n'.join(lines))
+         return out
+ 
++    @azure_ds_telemetry_reporter
+     def parse_certificates(self, certificates_xml):
+         """Given the Certificates XML document, return a dictionary of
+            fingerprints and associated SSH keys derived from the certs."""
+@@ -265,11 +288,13 @@ class WALinuxAgentShim(object):
+         return socket.inet_ntoa(packed_bytes)
+ 
+     @staticmethod
++    @azure_ds_telemetry_reporter
+     def _networkd_get_value_from_leases(leases_d=None):
+         return dhcp.networkd_get_option_from_leases(
+             'OPTION_245', leases_d=leases_d)
+ 
+     @staticmethod
++    @azure_ds_telemetry_reporter
+     def _get_value_from_leases_file(fallback_lease_file):
+         leases = []
+         content = util.load_file(fallback_lease_file)
+@@ -287,6 +312,7 @@ class WALinuxAgentShim(object):
+             return leases[-1]
+ 
+     @staticmethod
++    @azure_ds_telemetry_reporter
+     def _load_dhclient_json():
+         dhcp_options = {}
+         hooks_dir = WALinuxAgentShim._get_hooks_dir()
+@@ -305,6 +331,7 @@ class WALinuxAgentShim(object):
+         return dhcp_options
+ 
+     @staticmethod
++    @azure_ds_telemetry_reporter
+     def _get_value_from_dhcpoptions(dhcp_options):
+         if dhcp_options is None:
+             return None
+@@ -318,6 +345,7 @@ class WALinuxAgentShim(object):
+         return _value
+ 
+     @staticmethod
++    @azure_ds_telemetry_reporter
+     def find_endpoint(fallback_lease_file=None, dhcp245=None):
+         value = None
+         if dhcp245 is not None:
+@@ -352,6 +380,7 @@ class WALinuxAgentShim(object):
+         LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
+         return endpoint_ip_address
+ 
++    @azure_ds_telemetry_reporter
+     def register_with_azure_and_fetch_data(self, pubkey_info=None):
+         if self.openssl_manager is None:
+             self.openssl_manager = OpenSSLManager()
+@@ -404,6 +433,7 @@ class WALinuxAgentShim(object):
+ 
+         return keys
+ 
++    @azure_ds_telemetry_reporter
+     def _report_ready(self, goal_state, http_client):
+         LOG.debug('Reporting ready to Azure fabric.')
+         document = self.REPORT_READY_XML_TEMPLATE.format(
+@@ -419,6 +449,7 @@ class WALinuxAgentShim(object):
+         LOG.info('Reported ready to Azure fabric.')
+ 
+ 
++@azure_ds_telemetry_reporter
+ def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
+                              pubkey_info=None):
+     shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
+-- 
+1.8.3.1
+
diff --git a/SOURCES/ci-Enable-cloud-init-by-default-on-vmware.patch b/SOURCES/ci-Enable-cloud-init-by-default-on-vmware.patch
deleted file mode 100644
index 00edc76..0000000
--- a/SOURCES/ci-Enable-cloud-init-by-default-on-vmware.patch
+++ /dev/null
@@ -1,45 +0,0 @@
-From f5c6832cede618d83c2a3844287922fa2874521d Mon Sep 17 00:00:00 2001
-From: Eduardo Otubo <otubo@redhat.com>
-Date: Mon, 17 Dec 2018 11:27:29 +0100
-Subject: [PATCH] Enable cloud-init by default on vmware
-
-RH-Author: Eduardo Otubo <otubo@redhat.com>
-Message-id: <20181217112729.16625-1-otubo@redhat.com>
-Patchwork-id: 83538
-O-Subject: [RHEL-8.0 cloud-init PATCH] Enable cloud-init by default on vmware
-Bugzilla: 1644335
-RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
-RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
-
-Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1644335
-Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=19536284
-Tested: By me
-
-According to BZ#1644335, the variable `disable_vmware_customization'
-should be set to `false' in order to enable cloud-init by default on
-VMware. This patch sets it accordingly.
-
-X-downstream-only: yes
-Resolves: rhbz#1644335
-
-Signed-off-by: Eduardo Otubo <otubo@redhat.com>
-Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
----
- rhel/cloud.cfg | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg
-index 4a73981d..f0db3c12 100644
---- a/rhel/cloud.cfg
-+++ b/rhel/cloud.cfg
-@@ -9,6 +9,7 @@ resize_rootfs_tmp: /dev
- ssh_deletekeys:   0
- ssh_genkeytypes:  ~
- syslog_fix_perms: ~
-+disable_vmware_customization: false
- 
- cloud_init_modules:
-  - disk_setup
--- 
-2.19.1
-
diff --git a/SOURCES/ci-Fix-string-missmatch-when-mounting-ntfs.patch b/SOURCES/ci-Fix-string-missmatch-when-mounting-ntfs.patch
deleted file mode 100644
index 1e6b8ba..0000000
--- a/SOURCES/ci-Fix-string-missmatch-when-mounting-ntfs.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From f502ab105bca61613a4ff83aa5ea373bc00bb4e9 Mon Sep 17 00:00:00 2001
-From: Eduardo Otubo <otubo@redhat.com>
-Date: Thu, 10 Jan 2019 10:03:14 +0100
-Subject: [PATCH] Fix string missmatch when mounting ntfs
-
-RH-Author: Eduardo Otubo <otubo@redhat.com>
-Message-id: <20190110100314.32713-1-otubo@redhat.com>
-Patchwork-id: 83943
-O-Subject: [RHEL-8.0 cloud-init PATCH] Fix string missmatch when mounting ntfs
-Bugzilla: 1664227
-RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
-RH-Acked-by: Mohammed Gamal <mgamal@redhat.com>
-
-This patch fixes a simple string missmatch when attempting to mount ntfs
-partitions.
-
-X-downstream-only: yes
-Resolves: rhbz#1664227
-
-Signed-off-by: Eduardo Otubo <otubo@redhat.com>
-Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
----
- cloudinit/sources/DataSourceAzure.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
-index 46d57446..61d374a7 100644
---- a/cloudinit/sources/DataSourceAzure.py
-+++ b/cloudinit/sources/DataSourceAzure.py
-@@ -646,7 +646,7 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
-         file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
-                                    update_env_for_mount={'LANG': 'C'})
-     except util.MountFailedError as e:
--        if "mount: unknown filesystem type 'ntfs'" in str(e):
-+        if "unknown filesystem type 'ntfs'" in str(e):
-             return True, (bmsg + ' but this system cannot mount NTFS,'
-                           ' assuming there are no important files.'
-                           ' Formatting allowed.')
--- 
-2.19.1
-
diff --git a/SOURCES/ci-Revert-azure-ensure-that-networkmanager-hook-script-.patch b/SOURCES/ci-Revert-azure-ensure-that-networkmanager-hook-script-.patch
new file mode 100644
index 0000000..601e555
--- /dev/null
+++ b/SOURCES/ci-Revert-azure-ensure-that-networkmanager-hook-script-.patch
@@ -0,0 +1,85 @@
+From c49b8b2ec4fcee5212440d996bc3dafa74992470 Mon Sep 17 00:00:00 2001
+From: Eduardo Otubo <otubo@redhat.com>
+Date: Thu, 14 Mar 2019 15:01:34 +0100
+Subject: [PATCH] Revert: azure: ensure that networkmanager hook script runs
+
+RH-Author: Eduardo Otubo <otubo@redhat.com>
+Message-id: <20190314150134.28636-1-otubo@redhat.com>
+Patchwork-id: 84868
+O-Subject: [RHEL-8.0.1 cloud-init PATCHv2] Revert: azure: ensure that networkmanager hook script runs
+Bugzilla: 1579237
+RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
+
+This patch reverts the commit:
+
+commit c48497435e8195dbd87262c2f00e484e63fe3343
+Author: Lars Kellogg-Stedman <lars@redhat.com>
+Date:   Thu Jun 15 12:20:39 2017 -0400
+
+    azure: ensure that networkmanager hook script runs
+
+    The networkmanager hook script was failing to run due to the changes
+    we made to resolve rhbz#1440831.  This corrects the regression by
+    allowing the NM hook script to run regardless of whether or not
+    cloud-init is "enabled".
+
+    Resolves: rhbz#1460206
+    X-downstream-only: true
+
+Resolves: rhbz:1579237
+X-downstream-only: yes
+
+Signed-off-by: Eduardo Otubo <otubo@redhat.com>
+Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
+---
+ tools/hook-dhclient        | 3 ++-
+ tools/hook-network-manager | 3 ++-
+ tools/hook-rhel.sh         | 3 ++-
+ 3 files changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/tools/hook-dhclient b/tools/hook-dhclient
+index 181cd51..02122f3 100755
+--- a/tools/hook-dhclient
++++ b/tools/hook-dhclient
+@@ -13,7 +13,8 @@ is_azure() {
+ }
+ 
+ is_enabled() {
+-    # only execute hooks if cloud-init is running on azure
++    # only execute hooks if cloud-init is enabled and on azure
++    [ -e /run/cloud-init/enabled ] || return 1
+     is_azure
+ }
+ 
+diff --git a/tools/hook-network-manager b/tools/hook-network-manager
+index 1d52cad..67d9044 100755
+--- a/tools/hook-network-manager
++++ b/tools/hook-network-manager
+@@ -13,7 +13,8 @@ is_azure() {
+ }
+ 
+ is_enabled() {
+-    # only execute hooks if cloud-init running on azure
++    # only execute hooks if cloud-init is enabled and on azure
++    [ -e /run/cloud-init/enabled ] || return 1
+     is_azure
+ }
+ 
+diff --git a/tools/hook-rhel.sh b/tools/hook-rhel.sh
+index d75767e..513a551 100755
+--- a/tools/hook-rhel.sh
++++ b/tools/hook-rhel.sh
+@@ -13,7 +13,8 @@ is_azure() {
+ }
+ 
+ is_enabled() {
+-    # only execute hooks if cloud-init is running on azure
++    # only execute hooks if cloud-init is enabled and on azure
++    [ -e /run/cloud-init/enabled ] || return 1
+     is_azure
+ }
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/ci-azure-Add-reported-ready-marker-file.patch b/SOURCES/ci-azure-Add-reported-ready-marker-file.patch
deleted file mode 100644
index fb83c6e..0000000
--- a/SOURCES/ci-azure-Add-reported-ready-marker-file.patch
+++ /dev/null
@@ -1,330 +0,0 @@
-From b03bfae6c032a2590e094e9bceeedd47525ca057 Mon Sep 17 00:00:00 2001
-From: Eduardo Otubo <otubo@redhat.com>
-Date: Fri, 5 Oct 2018 09:53:02 +0200
-Subject: [PATCH 3/4] azure: Add reported ready marker file.
-
-RH-Author: Eduardo Otubo <otubo@redhat.com>
-Message-id: <20181005095303.20597-4-otubo@redhat.com>
-Patchwork-id: 82386
-O-Subject: [RHEL-8.0 cloud-init PATCH 3/4] azure: Add reported ready marker file.
-Bugzilla: 1615599
-RH-Acked-by: Cathy Avery <cavery@redhat.com>
-RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
-RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
-
-commit aae494c39f4c6f625e7409ca262e657d085dd5d1
-Author: Joshua Chan <jocha@microsoft.com>
-Date:   Thu May 3 14:50:16 2018 -0600
-
-    azure: Add reported ready marker file.
-
-    This change is for Azure VM Preprovisioning. A bug was found when after
-    azure VMs report ready the first time, during the time when VM is polling
-    indefinitely for the new ovf-env.xml from Instance Metadata Service
-    (IMDS), if a reboot happens, we send another report ready signal to the
-    fabric, which deletes the reprovisioning data on the node.
-
-    This marker file is used to fix this issue so that we will only send a
-    report ready signal to the fabric when no marker file is present. Then,
-    create a marker file so that when a reboot does occur, we check if a
-    marker file has been created and decide whether we would like to send the
-    repot ready signal.
-
-    LP: #1765214
-
-Signed-off-by: Eduardo Otubo <otubo@redhat.com>
-Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
----
- cloudinit/sources/DataSourceAzure.py          |  21 +++-
- tests/unittests/test_datasource/test_azure.py | 170 ++++++++++++++++++--------
- 2 files changed, 134 insertions(+), 57 deletions(-)
-
-diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
-index 7e49455..46d5744 100644
---- a/cloudinit/sources/DataSourceAzure.py
-+++ b/cloudinit/sources/DataSourceAzure.py
-@@ -48,6 +48,7 @@ DEFAULT_FS = 'ext4'
- # DMI chassis-asset-tag is set static for all azure instances
- AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
- REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
-+REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
- IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata"
- 
- 
-@@ -439,11 +440,12 @@ class DataSourceAzure(sources.DataSource):
-             LOG.debug("negotiating already done for %s",
-                       self.get_instance_id())
- 
--    def _poll_imds(self, report_ready=True):
-+    def _poll_imds(self):
-         """Poll IMDS for the new provisioning data until we get a valid
-         response. Then return the returned JSON object."""
-         url = IMDS_URL + "?api-version=2017-04-02"
-         headers = {"Metadata": "true"}
-+        report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE))
-         LOG.debug("Start polling IMDS")
- 
-         def exc_cb(msg, exception):
-@@ -453,13 +455,17 @@ class DataSourceAzure(sources.DataSource):
-             # call DHCP and setup the ephemeral network to acquire the new IP.
-             return False
- 
--        need_report = report_ready
-         while True:
-             try:
-                 with EphemeralDHCPv4() as lease:
--                    if need_report:
-+                    if report_ready:
-+                        path = REPORTED_READY_MARKER_FILE
-+                        LOG.info(
-+                            "Creating a marker file to report ready: %s", path)
-+                        util.write_file(path, "{pid}: {time}\n".format(
-+                            pid=os.getpid(), time=time()))
-                         self._report_ready(lease=lease)
--                        need_report = False
-+                        report_ready = False
-                     return readurl(url, timeout=1, headers=headers,
-                                    exception_cb=exc_cb, infinite=True).contents
-             except UrlError:
-@@ -493,8 +499,10 @@ class DataSourceAzure(sources.DataSource):
-         if (cfg.get('PreprovisionedVm') is True or
-                 os.path.isfile(path)):
-             if not os.path.isfile(path):
--                LOG.info("Creating a marker file to poll imds")
--                util.write_file(path, "%s: %s\n" % (os.getpid(), time()))
-+                LOG.info("Creating a marker file to poll imds: %s",
-+                         path)
-+                util.write_file(path, "{pid}: {time}\n".format(
-+                    pid=os.getpid(), time=time()))
-             return True
-         return False
- 
-@@ -529,6 +537,7 @@ class DataSourceAzure(sources.DataSource):
-                 "Error communicating with Azure fabric; You may experience."
-                 "connectivity issues.", exc_info=True)
-             return False
-+        util.del_file(REPORTED_READY_MARKER_FILE)
-         util.del_file(REPROVISION_MARKER_FILE)
-         return fabric_data
- 
-diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
-index af2c93a..ed810d2 100644
---- a/tests/unittests/test_datasource/test_azure.py
-+++ b/tests/unittests/test_datasource/test_azure.py
-@@ -1196,19 +1196,9 @@ class TestAzureNetExists(CiTestCase):
-         self.assertTrue(hasattr(dsaz, "DataSourceAzureNet"))
- 
- 
--@mock.patch('cloudinit.sources.DataSourceAzure.util.subp')
--@mock.patch.object(dsaz, 'get_hostname')
--@mock.patch.object(dsaz, 'set_hostname')
--class TestAzureDataSourcePreprovisioning(CiTestCase):
--
--    def setUp(self):
--        super(TestAzureDataSourcePreprovisioning, self).setUp()
--        tmp = self.tmp_dir()
--        self.waagent_d = self.tmp_path('/var/lib/waagent', tmp)
--        self.paths = helpers.Paths({'cloud_dir': tmp})
--        dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
-+class TestPreprovisioningReadAzureOvfFlag(CiTestCase):
- 
--    def test_read_azure_ovf_with_true_flag(self, *args):
-+    def test_read_azure_ovf_with_true_flag(self):
-         """The read_azure_ovf method should set the PreprovisionedVM
-            cfg flag if the proper setting is present."""
-         content = construct_valid_ovf_env(
-@@ -1217,7 +1207,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
-         cfg = ret[2]
-         self.assertTrue(cfg['PreprovisionedVm'])
- 
--    def test_read_azure_ovf_with_false_flag(self, *args):
-+    def test_read_azure_ovf_with_false_flag(self):
-         """The read_azure_ovf method should set the PreprovisionedVM
-            cfg flag to false if the proper setting is false."""
-         content = construct_valid_ovf_env(
-@@ -1226,7 +1216,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
-         cfg = ret[2]
-         self.assertFalse(cfg['PreprovisionedVm'])
- 
--    def test_read_azure_ovf_without_flag(self, *args):
-+    def test_read_azure_ovf_without_flag(self):
-         """The read_azure_ovf method should not set the
-            PreprovisionedVM cfg flag."""
-         content = construct_valid_ovf_env()
-@@ -1234,12 +1224,121 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
-         cfg = ret[2]
-         self.assertFalse(cfg['PreprovisionedVm'])
- 
--    @mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD')
--    @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
--    @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
--    @mock.patch('requests.Session.request')
-+
-+@mock.patch('os.path.isfile')
-+class TestPreprovisioningShouldReprovision(CiTestCase):
-+
-+    def setUp(self):
-+        super(TestPreprovisioningShouldReprovision, self).setUp()
-+        tmp = self.tmp_dir()
-+        self.waagent_d = self.tmp_path('/var/lib/waagent', tmp)
-+        self.paths = helpers.Paths({'cloud_dir': tmp})
-+        dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
-+
-+    @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
-+    def test__should_reprovision_with_true_cfg(self, isfile, write_f):
-+        """The _should_reprovision method should return true with config
-+           flag present."""
-+        isfile.return_value = False
-+        dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
-+        self.assertTrue(dsa._should_reprovision(
-+            (None, None, {'PreprovisionedVm': True}, None)))
-+
-+    def test__should_reprovision_with_file_existing(self, isfile):
-+        """The _should_reprovision method should return True if the sentinal
-+           exists."""
-+        isfile.return_value = True
-+        dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
-+        self.assertTrue(dsa._should_reprovision(
-+            (None, None, {'preprovisionedvm': False}, None)))
-+
-+    def test__should_reprovision_returns_false(self, isfile):
-+        """The _should_reprovision method should return False
-+           if config and sentinal are not present."""
-+        isfile.return_value = False
-+        dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
-+        self.assertFalse(dsa._should_reprovision((None, None, {}, None)))
-+
-+    @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
-+    def test_reprovision_calls__poll_imds(self, _poll_imds, isfile):
-+        """_reprovision will poll IMDS."""
-+        isfile.return_value = False
-+        hostname = "myhost"
-+        username = "myuser"
-+        odata = {'HostName': hostname, 'UserName': username}
-+        _poll_imds.return_value = construct_valid_ovf_env(data=odata)
-+        dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
-+        dsa._reprovision()
-+        _poll_imds.assert_called_with()
-+
-+
-+@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
-+@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
-+@mock.patch('requests.Session.request')
-+@mock.patch(
-+    'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
-+class TestPreprovisioningPollIMDS(CiTestCase):
-+
-+    def setUp(self):
-+        super(TestPreprovisioningPollIMDS, self).setUp()
-+        self.tmp = self.tmp_dir()
-+        self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp)
-+        self.paths = helpers.Paths({'cloud_dir': self.tmp})
-+        dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
-+
-+    @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
-+    def test_poll_imds_calls_report_ready(self, write_f, report_ready_func,
-+                                          fake_resp, m_dhcp, m_net):
-+        """The poll_imds will call report_ready after creating marker file."""
-+        report_marker = self.tmp_path('report_marker', self.tmp)
-+        lease = {
-+            'interface': 'eth9', 'fixed-address': '192.168.2.9',
-+            'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
-+            'unknown-245': '624c3620'}
-+        m_dhcp.return_value = [lease]
-+        dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
-+        mock_path = (
-+            'cloudinit.sources.DataSourceAzure.REPORTED_READY_MARKER_FILE')
-+        with mock.patch(mock_path, report_marker):
-+            dsa._poll_imds()
-+        self.assertEqual(report_ready_func.call_count, 1)
-+        report_ready_func.assert_called_with(lease=lease)
-+
-+    def test_poll_imds_report_ready_false(self, report_ready_func,
-+                                          fake_resp, m_dhcp, m_net):
-+        """The poll_imds should not call reporting ready
-+           when flag is false"""
-+        report_marker = self.tmp_path('report_marker', self.tmp)
-+        write_file(report_marker, content='dont run report_ready :)')
-+        m_dhcp.return_value = [{
-+            'interface': 'eth9', 'fixed-address': '192.168.2.9',
-+            'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
-+            'unknown-245': '624c3620'}]
-+        dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
-+        mock_path = (
-+            'cloudinit.sources.DataSourceAzure.REPORTED_READY_MARKER_FILE')
-+        with mock.patch(mock_path, report_marker):
-+            dsa._poll_imds()
-+        self.assertEqual(report_ready_func.call_count, 0)
-+
-+
-+@mock.patch('cloudinit.sources.DataSourceAzure.util.subp')
-+@mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
-+@mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD')
-+@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
-+@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
-+@mock.patch('requests.Session.request')
-+class TestAzureDataSourcePreprovisioning(CiTestCase):
-+
-+    def setUp(self):
-+        super(TestAzureDataSourcePreprovisioning, self).setUp()
-+        tmp = self.tmp_dir()
-+        self.waagent_d = self.tmp_path('/var/lib/waagent', tmp)
-+        self.paths = helpers.Paths({'cloud_dir': tmp})
-+        dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
-+
-     def test_poll_imds_returns_ovf_env(self, fake_resp, m_dhcp, m_net,
--                                       m_is_bsd, *args):
-+                                       m_is_bsd, write_f, subp):
-         """The _poll_imds method should return the ovf_env.xml."""
-         m_is_bsd.return_value = False
-         m_dhcp.return_value = [{
-@@ -1265,12 +1364,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
-             prefix_or_mask='255.255.255.0', router='192.168.2.1')
-         self.assertEqual(m_net.call_count, 1)
- 
--    @mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD')
--    @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
--    @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
--    @mock.patch('requests.Session.request')
-     def test__reprovision_calls__poll_imds(self, fake_resp, m_dhcp, m_net,
--                                           m_is_bsd, *args):
-+                                           m_is_bsd, write_f, subp):
-         """The _reprovision method should call poll IMDS."""
-         m_is_bsd.return_value = False
-         m_dhcp.return_value = [{
-@@ -1302,32 +1397,5 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
-             prefix_or_mask='255.255.255.0', router='192.168.2.1')
-         self.assertEqual(m_net.call_count, 1)
- 
--    @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
--    @mock.patch('os.path.isfile')
--    def test__should_reprovision_with_true_cfg(self, isfile, write_f, *args):
--        """The _should_reprovision method should return true with config
--           flag present."""
--        isfile.return_value = False
--        dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
--        self.assertTrue(dsa._should_reprovision(
--            (None, None, {'PreprovisionedVm': True}, None)))
--
--    @mock.patch('os.path.isfile')
--    def test__should_reprovision_with_file_existing(self, isfile, *args):
--        """The _should_reprovision method should return True if the sentinal
--           exists."""
--        isfile.return_value = True
--        dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
--        self.assertTrue(dsa._should_reprovision(
--            (None, None, {'preprovisionedvm': False}, None)))
--
--    @mock.patch('os.path.isfile')
--    def test__should_reprovision_returns_false(self, isfile, *args):
--        """The _should_reprovision method should return False
--           if config and sentinal are not present."""
--        isfile.return_value = False
--        dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
--        self.assertFalse(dsa._should_reprovision((None, None, {}, None)))
--
- 
- # vi: ts=4 expandtab
--- 
-1.8.3.1
-
diff --git a/SOURCES/ci-cc_mounts-check-if-mount-a-on-no-change-fstab-path.patch b/SOURCES/ci-cc_mounts-check-if-mount-a-on-no-change-fstab-path.patch
new file mode 100644
index 0000000..256331d
--- /dev/null
+++ b/SOURCES/ci-cc_mounts-check-if-mount-a-on-no-change-fstab-path.patch
@@ -0,0 +1,129 @@
+From 73e300d896aa04ca4612efc4454c650d92bb10c0 Mon Sep 17 00:00:00 2001
+From: Eduardo Otubo <otubo@redhat.com>
+Date: Wed, 29 May 2019 13:41:49 +0200
+Subject: [PATCH 5/5] cc_mounts: check if mount -a on no-change fstab path
+
+RH-Author: Eduardo Otubo <otubo@redhat.com>
+Message-id: <20190529134149.842-6-otubo@redhat.com>
+Patchwork-id: 88269
+O-Subject: [RHEL-8.0.1/RHEL-8.1.0 cloud-init PATCHv2 5/5] cc_mounts: check if mount -a on no-change fstab path
+Bugzilla: 1648375
+RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+RH-Acked-by: Cathy Avery <cavery@redhat.com>
+
+From: "Jason Zions (MSFT)" <jasonzio@microsoft.com>
+commit acc25d8d7d603313059ac35b4253b504efc560a9
+Author: Jason Zions (MSFT) <jasonzio@microsoft.com>
+Date:   Wed May 8 22:47:07 2019 +0000
+
+    cc_mounts: check if mount -a on no-change fstab path
+
+    Under some circumstances, cc_disk_setup may reformat volumes which
+    already appear in /etc/fstab (e.g. Azure ephemeral drive is reformatted
+    from NTFS to ext4 after service-heal). Normally, cc_mounts only calls
+    mount -a if it altered /etc/fstab. With this change cc_mounts will read
+    /proc/mounts and verify if configured mounts are already mounted and if
+    not raise flag to request a mount -a.  This handles the case where no
+    changes to fstab occur but a mount -a is required due to change in
+    underlying device which prevented the .mount unit from running until
+    after disk was reformatted.
+
+    LP: #1825596
+
+Signed-off-by: Eduardo Otubo <otubo@redhat.com>
+Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
+---
+ cloudinit/config/cc_mounts.py                      | 11 ++++++++
+ .../unittests/test_handler/test_handler_mounts.py  | 30 +++++++++++++++++++++-
+ 2 files changed, 40 insertions(+), 1 deletion(-)
+
+diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
+index 339baba..123ffb8 100644
+--- a/cloudinit/config/cc_mounts.py
++++ b/cloudinit/config/cc_mounts.py
+@@ -439,6 +439,7 @@ def handle(_name, cfg, cloud, log, _args):
+ 
+     cc_lines = []
+     needswap = False
++    need_mount_all = False
+     dirs = []
+     for line in actlist:
+         # write 'comment' in the fs_mntops, entry,  claiming this
+@@ -449,11 +450,18 @@ def handle(_name, cfg, cloud, log, _args):
+             dirs.append(line[1])
+         cc_lines.append('\t'.join(line))
+ 
++    mount_points = [v['mountpoint'] for k, v in util.mounts().items()
++                    if 'mountpoint' in v]
+     for d in dirs:
+         try:
+             util.ensure_dir(d)
+         except Exception:
+             util.logexc(log, "Failed to make '%s' config-mount", d)
++        # dirs is list of directories on which a volume should be mounted.
++        # If any of them does not already show up in the list of current
++        # mount points, we will definitely need to do mount -a.
++        if not need_mount_all and d not in mount_points:
++            need_mount_all = True
+ 
+     sadds = [WS.sub(" ", n) for n in cc_lines]
+     sdrops = [WS.sub(" ", n) for n in fstab_removed]
+@@ -473,6 +481,9 @@ def handle(_name, cfg, cloud, log, _args):
+         log.debug("No changes to /etc/fstab made.")
+     else:
+         log.debug("Changes to fstab: %s", sops)
++        need_mount_all = True
++
++    if need_mount_all:
+         activate_cmds.append(["mount", "-a"])
+         if uses_systemd:
+             activate_cmds.append(["systemctl", "daemon-reload"])
+diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py
+index 8fea6c2..0fb160b 100644
+--- a/tests/unittests/test_handler/test_handler_mounts.py
++++ b/tests/unittests/test_handler/test_handler_mounts.py
+@@ -154,7 +154,15 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
+                        return_value=True)
+ 
+         self.add_patch('cloudinit.config.cc_mounts.util.subp',
+-                       'mock_util_subp')
++                       'm_util_subp')
++
++        self.add_patch('cloudinit.config.cc_mounts.util.mounts',
++                       'mock_util_mounts',
++                       return_value={
++                           '/dev/sda1': {'fstype': 'ext4',
++                                         'mountpoint': '/',
++                                         'opts': 'rw,relatime,discard'
++                                         }})
+ 
+         self.mock_cloud = mock.Mock()
+         self.mock_log = mock.Mock()
+@@ -230,4 +238,24 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
+             fstab_new_content = fd.read()
+             self.assertEqual(fstab_expected_content, fstab_new_content)
+ 
++    def test_no_change_fstab_sets_needs_mount_all(self):
++        '''verify unchanged fstab entries are mounted if not call mount -a'''
++        fstab_original_content = (
++            'LABEL=cloudimg-rootfs / ext4 defaults 0 0\n'
++            'LABEL=UEFI /boot/efi vfat defaults 0 0\n'
++            '/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n'
++        )
++        fstab_expected_content = fstab_original_content
++        cc = {'mounts': [
++                 ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec']]}
++        with open(cc_mounts.FSTAB_PATH, 'w') as fd:
++            fd.write(fstab_original_content)
++        with open(cc_mounts.FSTAB_PATH, 'r') as fd:
++            fstab_new_content = fd.read()
++            self.assertEqual(fstab_expected_content, fstab_new_content)
++        cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, [])
++        self.m_util_subp.assert_has_calls([
++            mock.call(['mount', '-a']),
++            mock.call(['systemctl', 'daemon-reload'])])
++
+ # vi: ts=4 expandtab
+-- 
+1.8.3.1
+
diff --git a/SOURCES/ci-net-Make-sysconfig-renderer-compatible-with-Network-.patch b/SOURCES/ci-net-Make-sysconfig-renderer-compatible-with-Network-.patch
deleted file mode 100644
index 114366c..0000000
--- a/SOURCES/ci-net-Make-sysconfig-renderer-compatible-with-Network-.patch
+++ /dev/null
@@ -1,216 +0,0 @@
-From 09b873ca69821ac2a3e306da0af0437b849d1dd8 Mon Sep 17 00:00:00 2001
-From: Eduardo Otubo <otubo@redhat.com>
-Date: Fri, 18 Jan 2019 16:55:36 +0100
-Subject: [PATCH] net: Make sysconfig renderer compatible with Network Manager.
-
-RH-Author: Eduardo Otubo <otubo@redhat.com>
-Message-id: <20190118165536.25963-1-otubo@redhat.com>
-Patchwork-id: 84052
-O-Subject: [RHEL-8.0 cloud-init PATCH] net: Make sysconfig renderer compatible with Network Manager.
-Bugzilla: 1602784
-RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
-RH-Acked-by: Mohammed Gamal <mgamal@redhat.com>
-
-Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1602784
-Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=19877292
-Tested by: upstream maintainers and me
-
-commit 3861102fcaf47a882516d8b6daab518308eb3086
-Author: Eduardo Otubo <otubo@redhat.com>
-Date:   Fri Jan 18 15:36:19 2019 +0000
-
-    net: Make sysconfig renderer compatible with Network Manager.
-
-    The 'sysconfig' renderer is activated if, and only if, there's ifup and
-    ifdown commands present in its search dictonary or the network-scripts
-    configuration files are found. This patch adds a check for Network-
-    Manager configuration file as well.
-
-    This solution is based on the use of the plugin 'ifcfg-rh' present in
-    Network-Manager and is designed to support Fedora 29 or other
-    distributions that also replaced network-scripts by Network-Manager.
-
-Signed-off-by: Eduardo Otubo <otubo@redhat.com>
-Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
----
- cloudinit/net/sysconfig.py  | 36 +++++++++++++++++++++++
- tests/unittests/test_net.py | 71 +++++++++++++++++++++++++++++++++++++++++++++
- 2 files changed, 107 insertions(+)
-
-diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
-index bd81832..42291aa 100644
---- a/cloudinit/net/sysconfig.py
-+++ b/cloudinit/net/sysconfig.py
-@@ -10,11 +10,14 @@ from cloudinit.distros.parsers import resolv_conf
- from cloudinit import log as logging
- from cloudinit import util
- 
-+from configobj import ConfigObj
-+
- from . import renderer
- from .network_state import (
-     is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6)
- 
- LOG = logging.getLogger(__name__)
-+NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf"
- 
- 
- def _make_header(sep='#'):
-@@ -46,6 +49,24 @@ def _quote_value(value):
-         return value
- 
- 
-+def enable_ifcfg_rh(path):
-+    """Add ifcfg-rh to NetworkManager.cfg plugins if main section is present"""
-+    config = ConfigObj(path)
-+    if 'main' in config:
-+        if 'plugins' in config['main']:
-+            if 'ifcfg-rh' in config['main']['plugins']:
-+                return
-+        else:
-+            config['main']['plugins'] = []
-+
-+        if isinstance(config['main']['plugins'], list):
-+            config['main']['plugins'].append('ifcfg-rh')
-+        else:
-+            config['main']['plugins'] = [config['main']['plugins'], 'ifcfg-rh']
-+        config.write()
-+        LOG.debug('Enabled ifcfg-rh NetworkManager plugins')
-+
-+
- class ConfigMap(object):
-     """Sysconfig like dictionary object."""
- 
-@@ -597,6 +618,8 @@ class Renderer(renderer.Renderer):
-             netrules_content = self._render_persistent_net(network_state)
-             netrules_path = util.target_path(target, self.netrules_path)
-             util.write_file(netrules_path, netrules_content, file_mode)
-+        if available_nm(target=target):
-+            enable_ifcfg_rh(util.target_path(target, path=NM_CFG_FILE))
- 
-         # always write /etc/sysconfig/network configuration
-         sysconfig_path = util.target_path(target, "etc/sysconfig/network")
-@@ -608,6 +631,13 @@ class Renderer(renderer.Renderer):
- 
- 
- def available(target=None):
-+    sysconfig = available_sysconfig(target=target)
-+    nm = available_nm(target=target)
-+
-+    return any([nm, sysconfig])
-+
-+
-+def available_sysconfig(target=None):
-     expected = ['ifup', 'ifdown']
-     search = ['/sbin', '/usr/sbin']
-     for p in expected:
-@@ -623,4 +653,10 @@ def available(target=None):
-     return True
- 
- 
-+def available_nm(target=None):
-+    if not os.path.isfile(util.target_path(target, path=NM_CFG_FILE)):
-+        return False
-+    return True
-+
-+
- # vi: ts=4 expandtab
-diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
-index 9cf41bc..8e520f6 100644
---- a/tests/unittests/test_net.py
-+++ b/tests/unittests/test_net.py
-@@ -24,6 +24,7 @@ import os
- import textwrap
- import yaml
- 
-+
- DHCP_CONTENT_1 = """
- DEVICE='eth0'
- PROTO='dhcp'
-@@ -1542,6 +1543,7 @@ iface eth1 inet dhcp
- 
- class TestSysConfigRendering(CiTestCase):
- 
-+    nm_cfg_file = "/etc/NetworkManager/NetworkManager.conf"
-     scripts_dir = '/etc/sysconfig/network-scripts'
-     header = ('# Created by cloud-init on instance boot automatically, '
-               'do not edit.\n#\n')
-@@ -1853,6 +1855,75 @@ iface eth0 inet dhcp
-         self.assertEqual(
-             expected, dir2dict(tmp_dir)['/etc/network/interfaces'])
- 
-+    def test_check_ifcfg_rh(self):
-+        """ifcfg-rh plugin is added NetworkManager.conf if conf present."""
-+        render_dir = self.tmp_dir()
-+        nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file)
-+        util.ensure_dir(os.path.dirname(nm_cfg))
-+
-+        # write a template nm.conf, note plugins is a list here
-+        with open(nm_cfg, 'w') as fh:
-+            fh.write('# test_check_ifcfg_rh\n[main]\nplugins=foo,bar\n')
-+        self.assertTrue(os.path.exists(nm_cfg))
-+
-+        # render and read
-+        entry = NETWORK_CONFIGS['small']
-+        found = self._render_and_read(network_config=yaml.load(entry['yaml']),
-+                                      dir=render_dir)
-+        self._compare_files_to_expected(entry[self.expected_name], found)
-+        self._assert_headers(found)
-+
-+        # check ifcfg-rh is in the 'plugins' list
-+        config = sysconfig.ConfigObj(nm_cfg)
-+        self.assertIn('ifcfg-rh', config['main']['plugins'])
-+
-+    def test_check_ifcfg_rh_plugins_string(self):
-+        """ifcfg-rh plugin is append when plugins is a string."""
-+        render_dir = self.tmp_path("render")
-+        os.makedirs(render_dir)
-+        nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file)
-+        util.ensure_dir(os.path.dirname(nm_cfg))
-+
-+        # write a template nm.conf, note plugins is a value here
-+        util.write_file(nm_cfg, '# test_check_ifcfg_rh\n[main]\nplugins=foo\n')
-+
-+        # render and read
-+        entry = NETWORK_CONFIGS['small']
-+        found = self._render_and_read(network_config=yaml.load(entry['yaml']),
-+                                      dir=render_dir)
-+        self._compare_files_to_expected(entry[self.expected_name], found)
-+        self._assert_headers(found)
-+
-+        # check raw content has plugin
-+        nm_file_content = util.load_file(nm_cfg)
-+        self.assertIn('ifcfg-rh', nm_file_content)
-+
-+        # check ifcfg-rh is in the 'plugins' list
-+        config = sysconfig.ConfigObj(nm_cfg)
-+        self.assertIn('ifcfg-rh', config['main']['plugins'])
-+
-+    def test_check_ifcfg_rh_plugins_no_plugins(self):
-+        """enable_ifcfg_plugin creates plugins value if missing."""
-+        render_dir = self.tmp_path("render")
-+        os.makedirs(render_dir)
-+        nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file)
-+        util.ensure_dir(os.path.dirname(nm_cfg))
-+
-+        # write a template nm.conf, note plugins is missing
-+        util.write_file(nm_cfg, '# test_check_ifcfg_rh\n[main]\n')
-+        self.assertTrue(os.path.exists(nm_cfg))
-+
-+        # render and read
-+        entry = NETWORK_CONFIGS['small']
-+        found = self._render_and_read(network_config=yaml.load(entry['yaml']),
-+                                      dir=render_dir)
-+        self._compare_files_to_expected(entry[self.expected_name], found)
-+        self._assert_headers(found)
-+
-+        # check ifcfg-rh is in the 'plugins' list
-+        config = sysconfig.ConfigObj(nm_cfg)
-+        self.assertIn('ifcfg-rh', config['main']['plugins'])
-+
- 
- class TestNetplanNetRendering(CiTestCase):
- 
--- 
-1.8.3.1
-
diff --git a/SPECS/cloud-init.spec b/SPECS/cloud-init.spec
index 60bf175..c0568f2 100644
--- a/SPECS/cloud-init.spec
+++ b/SPECS/cloud-init.spec
@@ -5,8 +5,8 @@
 %global debug_package %{nil}
 
 Name:           cloud-init
-Version:        18.2
-Release:        6%{?dist}
+Version:        18.5
+Release:        1%{?dist}.4
 Summary:        Cloud instance init scripts
 
 Group:          System Environment/Base
@@ -18,25 +18,29 @@ Source1:        cloud-init-tmpfiles.conf
 Patch0001: 0001-Add-initial-redhat-setup.patch
 Patch0002: 0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch
 Patch0003: 0003-limit-permissions-on-def_log_file.patch
-Patch0005: 0005-add-power-state-change-module-to-cloud_final_modules.patch
-Patch0006: 0006-azure-ensure-that-networkmanager-hook-script-runs.patch
-Patch0007: 0007-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch
-Patch0008: 0008-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch
-Patch0009: 0009-sysconfig-Don-t-disable-IPV6_AUTOCONF.patch
-# For bz#1615599 - [Azure] cloud-init fails to mount /dev/sdb1 after stop(deallocate)&&start VM
-Patch10: ci-Adding-systemd-mount-options-to-wait-for-cloud-init.patch
-# For bz#1615599 - [Azure] cloud-init fails to mount /dev/sdb1 after stop(deallocate)&&start VM
-Patch11: ci-Azure-Ignore-NTFS-mount-errors-when-checking-ephemer.patch
-# For bz#1615599 - [Azure] cloud-init fails to mount /dev/sdb1 after stop(deallocate)&&start VM
-Patch12: ci-azure-Add-reported-ready-marker-file.patch
-# For bz#1615599 - [Azure] cloud-init fails to mount /dev/sdb1 after stop(deallocate)&&start VM
-Patch13: ci-Adding-disk_setup-to-rhel-cloud.cfg.patch
-# For bz#1644335 - [ESXi][RHEL8.0]Enable cloud-init by default on VMware
-Patch14: ci-Enable-cloud-init-by-default-on-vmware.patch
-# For bz#1664227 - [Azure]String missmatch causes the /dev/sdb1 mounting failed after stop&start VM
-Patch15: ci-Fix-string-missmatch-when-mounting-ntfs.patch
-# For bz#1602784 - cloud-init: Sometimes image boots fingerprints is configured, there's a network device present but it's not configured
-Patch16: ci-net-Make-sysconfig-renderer-compatible-with-Network-.patch
+Patch0004: 0004-azure-ensure-that-networkmanager-hook-script-runs.patch
+Patch0005: 0005-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch
+Patch0006: 0006-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch
+Patch0007: 0007-sysconfig-Don-t-disable-IPV6_AUTOCONF.patch
+Patch0008: 0008-net-Make-sysconfig-renderer-compatible-with-Network-.patch
+Patch0009: 0009-net-Wait-for-dhclient-to-daemonize-before-reading-le.patch
+Patch0010: 0010-cloud-init-per-don-t-use-dashes-in-sem-names.patch
+Patch0011: 0011-azure-Filter-list-of-ssh-keys-pulled-from-fabric.patch
+Patch0012: 0012-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch
+# For bz#1579237 - [WALA][cloud] cloud-init dhclient-hook script has some unexpected side-effects on Azure
+Patch13: ci-Revert-azure-ensure-that-networkmanager-hook-script-.patch
+# For bz#1648375 - [Azure] [RHEL 8] Cloud-init fixes to support fast provisioning for Azure
+Patch14: ci-Azure-Ensure-platform-random_seed-is-always-serializ.patch
+# For bz#1648375 - [Azure] [RHEL 8] Cloud-init fixes to support fast provisioning for Azure
+Patch15: ci-DatasourceAzure-add-additional-logging-for-azure-dat.patch
+# For bz#1648375 - [Azure] [RHEL 8] Cloud-init fixes to support fast provisioning for Azure
+Patch16: ci-Azure-Changes-to-the-Hyper-V-KVP-Reporter.patch
+# For bz#1648375 - [Azure] [RHEL 8] Cloud-init fixes to support fast provisioning for Azure
+Patch17: ci-DataSourceAzure-Adjust-timeout-for-polling-IMDS.patch
+# For bz#1648375 - [Azure] [RHEL 8] Cloud-init fixes to support fast provisioning for Azure
+Patch18: ci-cc_mounts-check-if-mount-a-on-no-change-fstab-path.patch
+# For bz#1648375 - [Azure] [RHEL 8] Cloud-init fixes to support fast provisioning for Azure[8.0.1]
+Patch19: ci-Azure-Return-static-fallback-address-as-if-failed-to.patch
 
 BuildArch:      noarch
 
@@ -193,15 +197,46 @@ fi
 %{_libexecdir}/%{name}
 %{_bindir}/cloud-init*
 %doc %{_datadir}/doc/%{name}
-%dir /run/cloud-init
+%dir %verify(not mode) /run/cloud-init
 %dir /var/lib/cloud
 /etc/NetworkManager/dispatcher.d/cloud-init-azure-hook
 %{_udevrulesdir}/66-azure-ephemeral.rules
+%{_sysconfdir}/bash_completion.d/cloud-init
+%{_bindir}/cloud-id
 
 %dir %{_sysconfdir}/rsyslog.d
 %config(noreplace) %{_sysconfdir}/rsyslog.d/21-cloudinit.conf
 
 %changelog
+* Fri Jul 12 2019 Miroslav Rezanina <mrezanin@redhat.com> - 18.5-1.el8.4
+- Fix for TPS tests [bz#1687563]
+  Resolve: bz#1687563
+  (cloud-init 18.5 rebase for fast provisioning on Azure [RHEL 8.0.1])
+
+* Thu Jul 04 2019 Miroslav Rezanina <mrezanin@redhat.com> - 18.5-1.el8.3
+- ci-Azure-Return-static-fallback-address-as-if-failed-to.patch [bz#1648375]
+- Resolves: bz#1648375
+  ([Azure] [RHEL 8] Cloud-init fixes to support fast provisioning for Azure[8.0.1])
+
+* Mon Jun 03 2019 Miroslav Rezanina <mrezanin@redhat.com> - 18.5-1.el8.2
+- ci-Azure-Ensure-platform-random_seed-is-always-serializ.patch [bz#1648375]
+- ci-DatasourceAzure-add-additional-logging-for-azure-dat.patch [bz#1648375]
+- ci-Azure-Changes-to-the-Hyper-V-KVP-Reporter.patch [bz#1648375]
+- ci-DataSourceAzure-Adjust-timeout-for-polling-IMDS.patch [bz#1648375]
+- ci-cc_mounts-check-if-mount-a-on-no-change-fstab-path.patch [bz#1648375]
+- Resolves: bz#1648375
+  ([Azure] [RHEL 8] Cloud-init fixes to support fast provisioning for Azure)
+
+* Thu May 09 2019 Miroslav Rezanina <mrezanin@redhat.com> - 18.5-1.el8.1
+- ci-Revert-azure-ensure-that-networkmanager-hook-script-.patch [bz#1579237]
+- Resolves: bz#1579237
+  ([WALA][cloud] cloud-init dhclient-hook script has some unexpected side-effects on Azure)
+
+* Wed Apr 10 2019 Danilo de Paula <ddepaula@redhat.com: - 18.5-1.el8
+- Rebase to cloud-init 18.5
+- Resolves: bz#1687563
+  (cloud-init 18.5 rebase for fast provisioning on Azure [RHEL 8.0.1])
+
 * Wed Jan 23 2019 Miroslav Rezanina <mrezanin@redhat.com> - 18.2-6.el8
 - ci-net-Make-sysconfig-renderer-compatible-with-Network-.patch [bz#1602784]
 - Resolves: bz#1602784
@@ -212,7 +247,7 @@ fi
 - Resolves: bz#1664227
   ([Azure]String missmatch causes the /dev/sdb1 mounting failed after stop&start VM)
 
-* Thu Jan 10 2019  <> - 18.2-4.el8
+* Thu Jan 10 2019 Miroslav Rezanina <mrezanin@redhat.com> - 18.2-4.el8
 - ci-Enable-cloud-init-by-default-on-vmware.patch [bz#1644335]
 - Resolves: bz#1644335
   ([ESXi][RHEL8.0]Enable cloud-init by default on VMware)