From c1c26ed526aec346303a8ca295cd8cf5718b2364 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Jul 30 2019 03:40:16 +0000 Subject: import cloud-init-18.5-4.el8 --- diff --git a/.cloud-init.metadata b/.cloud-init.metadata new file mode 100644 index 0000000..f7516a9 --- /dev/null +++ b/.cloud-init.metadata @@ -0,0 +1 @@ +a862d6618a4c56c79d3fb0e279f6c93d0f0141cd SOURCES/cloud-init-18.5.tar.gz diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e2ea71d --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +SOURCES/cloud-init-18.5.tar.gz diff --git a/SOURCES/0001-Add-initial-redhat-setup.patch b/SOURCES/0001-Add-initial-redhat-setup.patch new file mode 100644 index 0000000..6cdf59c --- /dev/null +++ b/SOURCES/0001-Add-initial-redhat-setup.patch @@ -0,0 +1,502 @@ +From bfdc177f6127043eac555d356403d9e1d5c52243 Mon Sep 17 00:00:00 2001 +From: Miroslav Rezanina +Date: Thu, 31 May 2018 16:45:23 +0200 +Subject: Add initial redhat setup + +Rebase notes (18.5): +- added bash_completition file +- added cloud-id file + +Merged patches (18.5): +- 2d6b469 add power-state-change module to cloud_final_modules +- 764159f Adding systemd mount options to wait for cloud-init +- da4d99e Adding disk_setup to rhel/cloud.cfg +- f5c6832 Enable cloud-init by default on vmware + +Signed-off-by: Danilo C. L. de Paula +--- + cloudinit/config/cc_chef.py | 6 +- + cloudinit/settings.py | 7 +- + redhat/.gitignore | 1 + + redhat/Makefile | 71 ++++++ + redhat/Makefile.common | 35 +++ + redhat/cloud-init-tmpfiles.conf | 1 + + redhat/cloud-init.spec.template | 352 ++++++++++++++++++++++++++ + redhat/rpmbuild/BUILD/.gitignore | 3 + + redhat/rpmbuild/RPMS/.gitignore | 3 + + redhat/rpmbuild/SOURCES/.gitignore | 3 + + redhat/rpmbuild/SPECS/.gitignore | 3 + + redhat/rpmbuild/SRPMS/.gitignore | 3 + + redhat/scripts/frh.py | 27 ++ + redhat/scripts/git-backport-diff | 327 ++++++++++++++++++++++++ + redhat/scripts/git-compile-check | 215 ++++++++++++++++ + redhat/scripts/process-patches.sh | 73 ++++++ + redhat/scripts/tarball_checksum.sh | 3 + + rhel/README.rhel | 5 + + rhel/cloud-init-tmpfiles.conf | 1 + + rhel/cloud.cfg | 69 +++++ + rhel/systemd/cloud-config.service | 18 ++ + rhel/systemd/cloud-config.target | 11 + + rhel/systemd/cloud-final.service | 19 ++ + rhel/systemd/cloud-init-local.service | 31 +++ + rhel/systemd/cloud-init.service | 25 ++ + setup.py | 64 +---- + tools/read-version | 25 +- + 27 files changed, 1311 insertions(+), 90 deletions(-) + create mode 100644 redhat/.gitignore + create mode 100644 redhat/Makefile + create mode 100644 redhat/Makefile.common + create mode 100644 redhat/cloud-init-tmpfiles.conf + create mode 100644 redhat/cloud-init.spec.template + create mode 100644 redhat/rpmbuild/BUILD/.gitignore + create mode 100644 redhat/rpmbuild/RPMS/.gitignore + create mode 100644 redhat/rpmbuild/SOURCES/.gitignore + create mode 100644 redhat/rpmbuild/SPECS/.gitignore + create mode 100644 redhat/rpmbuild/SRPMS/.gitignore + create mode 100755 redhat/scripts/frh.py + create mode 100755 redhat/scripts/git-backport-diff + create mode 100755 redhat/scripts/git-compile-check + create mode 100755 redhat/scripts/process-patches.sh + create mode 100755 redhat/scripts/tarball_checksum.sh + create mode 100644 rhel/README.rhel + create mode 100644 rhel/cloud-init-tmpfiles.conf + create mode 100644 rhel/cloud.cfg + create mode 100644 rhel/systemd/cloud-config.service + create mode 100644 rhel/systemd/cloud-config.target + create mode 100644 rhel/systemd/cloud-final.service + create mode 100644 rhel/systemd/cloud-init-local.service + create mode 100644 rhel/systemd/cloud-init.service + +diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py +index 46abedd1..fe7bda8c 100644 +--- a/cloudinit/config/cc_chef.py ++++ b/cloudinit/config/cc_chef.py +@@ -33,7 +33,7 @@ file). + + chef: + directories: (defaulting to /etc/chef, /var/log/chef, /var/lib/chef, +- /var/cache/chef, /var/backups/chef, /var/run/chef) ++ /var/cache/chef, /var/backups/chef, /run/chef) + validation_cert: (optional string to be written to file validation_key) + special value 'system' means set use existing file + validation_key: (optional the path for validation_cert. default +@@ -88,7 +88,7 @@ CHEF_DIRS = tuple([ + '/var/lib/chef', + '/var/cache/chef', + '/var/backups/chef', +- '/var/run/chef', ++ '/run/chef', + ]) + REQUIRED_CHEF_DIRS = tuple([ + '/etc/chef', +@@ -112,7 +112,7 @@ CHEF_RB_TPL_DEFAULTS = { + 'json_attribs': CHEF_FB_PATH, + 'file_cache_path': "/var/cache/chef", + 'file_backup_path': "/var/backups/chef", +- 'pid_file': "/var/run/chef/client.pid", ++ 'pid_file': "/run/chef/client.pid", + 'show_time': True, + } + CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time']) +diff --git a/cloudinit/settings.py b/cloudinit/settings.py +index b1ebaade..c5367687 100644 +--- a/cloudinit/settings.py ++++ b/cloudinit/settings.py +@@ -44,13 +44,16 @@ CFG_BUILTIN = { + ], + 'def_log_file': '/var/log/cloud-init.log', + 'log_cfgs': [], +- 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel', 'root:root'], ++ 'mount_default_fields': [None, None, 'auto', 'defaults,nofail', '0', '2'], ++ 'ssh_deletekeys': False, ++ 'ssh_genkeytypes': [], ++ 'syslog_fix_perms': [], + 'system_info': { + 'paths': { + 'cloud_dir': '/var/lib/cloud', + 'templates_dir': '/etc/cloud/templates/', + }, +- 'distro': 'ubuntu', ++ 'distro': 'rhel', + 'network': {'renderers': None}, + }, + 'vendor_data': {'enabled': True, 'prefix': []}, +diff --git a/rhel/README.rhel b/rhel/README.rhel +new file mode 100644 +index 00000000..aa29630d +--- /dev/null ++++ b/rhel/README.rhel +@@ -0,0 +1,5 @@ ++The following cloud-init modules are currently unsupported on this OS: ++ - apt_update_upgrade ('apt_update', 'apt_upgrade', 'apt_mirror', 'apt_preserve_sources_list', 'apt_old_mirror', 'apt_sources', 'debconf_selections', 'packages' options) ++ - byobu ('byobu_by_default' option) ++ - chef ++ - grub_dpkg +diff --git a/rhel/cloud-init-tmpfiles.conf b/rhel/cloud-init-tmpfiles.conf +new file mode 100644 +index 00000000..0c6d2a3b +--- /dev/null ++++ b/rhel/cloud-init-tmpfiles.conf +@@ -0,0 +1 @@ ++d /run/cloud-init 0700 root root - - +diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg +new file mode 100644 +index 00000000..f0db3c12 +--- /dev/null ++++ b/rhel/cloud.cfg +@@ -0,0 +1,69 @@ ++users: ++ - default ++ ++disable_root: 1 ++ssh_pwauth: 0 ++ ++mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service', '0', '2'] ++resize_rootfs_tmp: /dev ++ssh_deletekeys: 0 ++ssh_genkeytypes: ~ ++syslog_fix_perms: ~ ++disable_vmware_customization: false ++ ++cloud_init_modules: ++ - disk_setup ++ - migrator ++ - bootcmd ++ - write-files ++ - growpart ++ - resizefs ++ - set_hostname ++ - update_hostname ++ - update_etc_hosts ++ - rsyslog ++ - users-groups ++ - ssh ++ ++cloud_config_modules: ++ - mounts ++ - locale ++ - set-passwords ++ - rh_subscription ++ - yum-add-repo ++ - package-update-upgrade-install ++ - timezone ++ - puppet ++ - chef ++ - salt-minion ++ - mcollective ++ - disable-ec2-metadata ++ - runcmd ++ ++cloud_final_modules: ++ - rightscale_userdata ++ - scripts-per-once ++ - scripts-per-boot ++ - scripts-per-instance ++ - scripts-user ++ - ssh-authkey-fingerprints ++ - keys-to-console ++ - phone-home ++ - final-message ++ - power-state-change ++ ++system_info: ++ default_user: ++ name: cloud-user ++ lock_passwd: true ++ gecos: Cloud User ++ groups: [wheel, adm, systemd-journal] ++ sudo: ["ALL=(ALL) NOPASSWD:ALL"] ++ shell: /bin/bash ++ distro: rhel ++ paths: ++ cloud_dir: /var/lib/cloud ++ templates_dir: /etc/cloud/templates ++ ssh_svcname: sshd ++ ++# vim:syntax=yaml +diff --git a/rhel/systemd/cloud-config.service b/rhel/systemd/cloud-config.service +new file mode 100644 +index 00000000..12ca9dfd +--- /dev/null ++++ b/rhel/systemd/cloud-config.service +@@ -0,0 +1,18 @@ ++[Unit] ++Description=Apply the settings specified in cloud-config ++After=network-online.target cloud-config.target ++Wants=network-online.target cloud-config.target ++ConditionPathExists=!/etc/cloud/cloud-init.disabled ++ConditionKernelCommandLine=!cloud-init=disabled ++ ++[Service] ++Type=oneshot ++ExecStart=/usr/bin/cloud-init modules --mode=config ++RemainAfterExit=yes ++TimeoutSec=0 ++ ++# Output needs to appear in instance console output ++StandardOutput=journal+console ++ ++[Install] ++WantedBy=multi-user.target +diff --git a/rhel/systemd/cloud-config.target b/rhel/systemd/cloud-config.target +new file mode 100644 +index 00000000..ae9b7d02 +--- /dev/null ++++ b/rhel/systemd/cloud-config.target +@@ -0,0 +1,11 @@ ++# cloud-init normally emits a "cloud-config" upstart event to inform third ++# parties that cloud-config is available, which does us no good when we're ++# using systemd. cloud-config.target serves as this synchronization point ++# instead. Services that would "start on cloud-config" with upstart can ++# instead use "After=cloud-config.target" and "Wants=cloud-config.target" ++# as appropriate. ++ ++[Unit] ++Description=Cloud-config availability ++Wants=cloud-init-local.service cloud-init.service ++After=cloud-init-local.service cloud-init.service +diff --git a/rhel/systemd/cloud-final.service b/rhel/systemd/cloud-final.service +new file mode 100644 +index 00000000..32a83d85 +--- /dev/null ++++ b/rhel/systemd/cloud-final.service +@@ -0,0 +1,19 @@ ++[Unit] ++Description=Execute cloud user/final scripts ++After=network-online.target cloud-config.service rc-local.service ++Wants=network-online.target cloud-config.service ++ConditionPathExists=!/etc/cloud/cloud-init.disabled ++ConditionKernelCommandLine=!cloud-init=disabled ++ ++[Service] ++Type=oneshot ++ExecStart=/usr/bin/cloud-init modules --mode=final ++RemainAfterExit=yes ++TimeoutSec=0 ++KillMode=process ++ ++# Output needs to appear in instance console output ++StandardOutput=journal+console ++ ++[Install] ++WantedBy=multi-user.target +diff --git a/rhel/systemd/cloud-init-local.service b/rhel/systemd/cloud-init-local.service +new file mode 100644 +index 00000000..656eddb9 +--- /dev/null ++++ b/rhel/systemd/cloud-init-local.service +@@ -0,0 +1,31 @@ ++[Unit] ++Description=Initial cloud-init job (pre-networking) ++DefaultDependencies=no ++Wants=network-pre.target ++After=systemd-remount-fs.service ++Requires=dbus.socket ++After=dbus.socket ++Before=NetworkManager.service network.service ++Before=network-pre.target ++Before=shutdown.target ++Before=firewalld.target ++Conflicts=shutdown.target ++RequiresMountsFor=/var/lib/cloud ++ConditionPathExists=!/etc/cloud/cloud-init.disabled ++ConditionKernelCommandLine=!cloud-init=disabled ++ ++[Service] ++Type=oneshot ++ExecStartPre=/bin/mkdir -p /run/cloud-init ++ExecStartPre=/sbin/restorecon /run/cloud-init ++ExecStartPre=/usr/bin/touch /run/cloud-init/enabled ++ExecStart=/usr/bin/cloud-init init --local ++ExecStart=/bin/touch /run/cloud-init/network-config-ready ++RemainAfterExit=yes ++TimeoutSec=0 ++ ++# Output needs to appear in instance console output ++StandardOutput=journal+console ++ ++[Install] ++WantedBy=multi-user.target +diff --git a/rhel/systemd/cloud-init.service b/rhel/systemd/cloud-init.service +new file mode 100644 +index 00000000..68fc5f19 +--- /dev/null ++++ b/rhel/systemd/cloud-init.service +@@ -0,0 +1,25 @@ ++[Unit] ++Description=Initial cloud-init job (metadata service crawler) ++Wants=cloud-init-local.service ++Wants=sshd-keygen.service ++Wants=sshd.service ++After=cloud-init-local.service ++After=NetworkManager.service network.service ++Before=network-online.target ++Before=sshd-keygen.service ++Before=sshd.service ++Before=systemd-user-sessions.service ++ConditionPathExists=!/etc/cloud/cloud-init.disabled ++ConditionKernelCommandLine=!cloud-init=disabled ++ ++[Service] ++Type=oneshot ++ExecStart=/usr/bin/cloud-init init ++RemainAfterExit=yes ++TimeoutSec=0 ++ ++# Output needs to appear in instance console output ++StandardOutput=journal+console ++ ++[Install] ++WantedBy=multi-user.target +diff --git a/setup.py b/setup.py +index ea37efc3..06ae48a6 100755 +--- a/setup.py ++++ b/setup.py +@@ -135,11 +135,6 @@ INITSYS_FILES = { + 'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)], + 'sysvinit_openrc': [f for f in glob('sysvinit/gentoo/*') if is_f(f)], + 'sysvinit_suse': [f for f in glob('sysvinit/suse/*') if is_f(f)], +- 'systemd': [render_tmpl(f) +- for f in (glob('systemd/*.tmpl') + +- glob('systemd/*.service') + +- glob('systemd/*.target')) if is_f(f)], +- 'systemd.generators': [f for f in glob('systemd/*-generator') if is_f(f)], + 'upstart': [f for f in glob('upstart/*') if is_f(f)], + } + INITSYS_ROOTS = { +@@ -148,9 +143,6 @@ INITSYS_ROOTS = { + 'sysvinit_deb': 'etc/init.d', + 'sysvinit_openrc': 'etc/init.d', + 'sysvinit_suse': 'etc/init.d', +- 'systemd': pkg_config_read('systemd', 'systemdsystemunitdir'), +- 'systemd.generators': pkg_config_read('systemd', +- 'systemdsystemgeneratordir'), + 'upstart': 'etc/init/', + } + INITSYS_TYPES = sorted([f.partition(".")[0] for f in INITSYS_ROOTS.keys()]) +@@ -188,47 +180,6 @@ class MyEggInfo(egg_info): + return ret + + +-# TODO: Is there a better way to do this?? +-class InitsysInstallData(install): +- init_system = None +- user_options = install.user_options + [ +- # This will magically show up in member variable 'init_sys' +- ('init-system=', None, +- ('init system(s) to configure (%s) [default: None]' % +- (", ".join(INITSYS_TYPES)))), +- ] +- +- def initialize_options(self): +- install.initialize_options(self) +- self.init_system = "" +- +- def finalize_options(self): +- install.finalize_options(self) +- +- if self.init_system and isinstance(self.init_system, str): +- self.init_system = self.init_system.split(",") +- +- if len(self.init_system) == 0: +- self.init_system = ['systemd'] +- +- bad = [f for f in self.init_system if f not in INITSYS_TYPES] +- if len(bad) != 0: +- raise DistutilsArgError( +- "Invalid --init-system: %s" % (','.join(bad))) +- +- for system in self.init_system: +- # add data files for anything that starts with '.' +- datakeys = [k for k in INITSYS_ROOTS +- if k.partition(".")[0] == system] +- for k in datakeys: +- if not INITSYS_FILES[k]: +- continue +- self.distribution.data_files.append( +- (INITSYS_ROOTS[k], INITSYS_FILES[k])) +- # Force that command to reinitalize (with new file list) +- self.distribution.reinitialize_command('install_data', True) +- +- + if not in_virtualenv(): + USR = "/" + USR + ETC = "/" + ETC +@@ -239,11 +190,9 @@ if not in_virtualenv(): + + data_files = [ + (ETC + '/bash_completion.d', ['bash_completion/cloud-init']), +- (ETC + '/cloud', [render_tmpl("config/cloud.cfg.tmpl")]), + (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')), + (ETC + '/cloud/templates', glob('templates/*')), +- (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify', +- 'tools/uncloud-init', ++ (USR_LIB_EXEC + '/cloud-init', ['tools/uncloud-init', + 'tools/write-ssh-key-fingerprints']), + (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]), + (USR + '/share/doc/cloud-init/examples', +@@ -255,15 +204,8 @@ if os.uname()[0] != 'FreeBSD': + data_files.extend([ + (ETC + '/NetworkManager/dispatcher.d/', + ['tools/hook-network-manager']), +- (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']), +- (LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')]) ++ ('/usr/lib/udev/rules.d', [f for f in glob('udev/*.rules')]) + ]) +-# Use a subclass for install that handles +-# adding on the right init system configuration files +-cmdclass = { +- 'install': InitsysInstallData, +- 'egg_info': MyEggInfo, +-} + + requirements = read_requires() + +@@ -278,8 +220,6 @@ setuptools.setup( + scripts=['tools/cloud-init-per'], + license='Dual-licensed under GPLv3 or Apache 2.0', + data_files=data_files, +- install_requires=requirements, +- cmdclass=cmdclass, + entry_points={ + 'console_scripts': [ + 'cloud-init = cloudinit.cmd.main:main', +diff --git a/tools/read-version b/tools/read-version +index e69c2ce0..d43cc8f0 100755 +--- a/tools/read-version ++++ b/tools/read-version +@@ -65,29 +65,8 @@ output_json = '--json' in sys.argv + src_version = ci_version.version_string() + version_long = None + +-if is_gitdir(_tdir) and which("git"): +- flags = [] +- if use_tags: +- flags = ['--tags'] +- cmd = ['git', 'describe', '--abbrev=8', '--match=[0-9]*'] + flags +- +- version = tiny_p(cmd).strip() +- +- if not version.startswith(src_version): +- sys.stderr.write("git describe version (%s) differs from " +- "cloudinit.version (%s)\n" % (version, src_version)) +- sys.stderr.write( +- "Please get the latest upstream tags.\n" +- "As an example, this can be done with the following:\n" +- "$ git remote add upstream https://git.launchpad.net/cloud-init\n" +- "$ git fetch upstream --tags\n" +- ) +- sys.exit(1) +- +- version_long = tiny_p(cmd + ["--long"]).strip() +-else: +- version = src_version +- version_long = None ++version = src_version ++version_long = None + + # version is X.Y.Z[+xxx.gHASH] + # version_long is None or X.Y.Z-xxx-gHASH +-- +2.20.1 + diff --git a/SOURCES/0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch b/SOURCES/0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch new file mode 100644 index 0000000..1dcf4bd --- /dev/null +++ b/SOURCES/0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch @@ -0,0 +1,271 @@ +From 0bff7d73c49043b0820d0231c9a47539287f35e3 Mon Sep 17 00:00:00 2001 +From: Miroslav Rezanina +Date: Thu, 31 May 2018 19:37:55 +0200 +Subject: Do not write NM_CONTROLLED=no in generated interface config files + +X-downstream-only: true +Signed-off-by: Ryan McCabe +Signed-off-by: Danilo C. L. de Paula +--- + cloudinit/net/sysconfig.py | 1 - + tests/unittests/test_net.py | 30 ------------------------------ + 2 files changed, 31 deletions(-) + +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index 17293e1d..ae0554ef 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -250,7 +250,6 @@ class Renderer(renderer.Renderer): + iface_defaults = tuple([ + ('ONBOOT', True), + ('USERCTL', False), +- ('NM_CONTROLLED', False), + ('BOOTPROTO', 'none'), + ]) + +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index 195f261c..5f1aa3e7 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -175,7 +175,6 @@ GATEWAY=172.19.3.254 + HWADDR=fa:16:3e:ed:9a:59 + IPADDR=172.19.1.34 + NETMASK=255.255.252.0 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -279,7 +278,6 @@ IPADDR=172.19.1.34 + IPADDR1=10.0.0.10 + NETMASK=255.255.252.0 + NETMASK1=255.255.255.0 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -407,7 +405,6 @@ IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" + IPV6INIT=yes + IPV6_DEFAULTGW=2001:DB8::1 + NETMASK=255.255.252.0 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -523,7 +520,6 @@ NETWORK_CONFIGS = { + BOOTPROTO=none + DEVICE=eth1 + HWADDR=cf:d6:af:48:e8:80 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no"""), +@@ -539,7 +535,6 @@ NETWORK_CONFIGS = { + IPADDR=192.168.21.3 + NETMASK=255.255.255.0 + METRIC=10000 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no"""), +@@ -652,7 +647,6 @@ NETWORK_CONFIGS = { + IPV6ADDR=2001:1::1/64 + IPV6INIT=yes + NETMASK=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -894,14 +888,12 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + DHCPV6C=yes + IPV6INIT=yes + MACADDR=aa:bb:cc:dd:ee:ff +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Bond + USERCTL=no"""), + 'ifcfg-bond0.200': textwrap.dedent("""\ + BOOTPROTO=dhcp + DEVICE=bond0.200 +- NM_CONTROLLED=no + ONBOOT=yes + PHYSDEV=bond0 + TYPE=Ethernet +@@ -918,7 +910,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + IPV6_DEFAULTGW=2001:4800:78ff:1b::1 + MACADDR=bb:bb:bb:bb:bb:aa + NETMASK=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=yes + PRIO=22 + STP=no +@@ -928,7 +919,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + BOOTPROTO=none + DEVICE=eth0 + HWADDR=c0:d6:9f:2c:e8:80 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no"""), +@@ -945,7 +935,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + MTU=1500 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=yes + PHYSDEV=eth0 + TYPE=Ethernet +@@ -956,7 +945,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + DEVICE=eth1 + HWADDR=aa:d6:9f:2c:e8:80 + MASTER=bond0 +- NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet +@@ -966,7 +954,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + DEVICE=eth2 + HWADDR=c0:bb:9f:2c:e8:80 + MASTER=bond0 +- NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet +@@ -976,7 +963,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + BRIDGE=br0 + DEVICE=eth3 + HWADDR=66:bb:9f:2c:e8:80 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no"""), +@@ -985,7 +971,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + BRIDGE=br0 + DEVICE=eth4 + HWADDR=98:bb:9f:2c:e8:80 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no"""), +@@ -993,7 +978,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + BOOTPROTO=dhcp + DEVICE=eth5 + HWADDR=98:bb:9f:2c:e8:8a +- NM_CONTROLLED=no + ONBOOT=no + TYPE=Ethernet + USERCTL=no""") +@@ -1356,7 +1340,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + MTU=9000 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Bond + USERCTL=no +@@ -1366,7 +1349,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + DEVICE=bond0s0 + HWADDR=aa:bb:cc:dd:e8:00 + MASTER=bond0 +- NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet +@@ -1388,7 +1370,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + DEVICE=bond0s1 + HWADDR=aa:bb:cc:dd:e8:01 + MASTER=bond0 +- NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet +@@ -1426,7 +1407,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + BOOTPROTO=none + DEVICE=en0 + HWADDR=aa:bb:cc:dd:e8:00 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no"""), +@@ -1443,7 +1423,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + MTU=2222 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=yes + PHYSDEV=en0 + TYPE=Ethernet +@@ -1484,7 +1463,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + DEVICE=br0 + IPADDR=192.168.2.2 + NETMASK=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=yes + PRIO=22 + STP=no +@@ -1498,7 +1476,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + HWADDR=52:54:00:12:34:00 + IPV6ADDR=2001:1::100/96 + IPV6INIT=yes +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -1510,7 +1487,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + HWADDR=52:54:00:12:34:01 + IPV6ADDR=2001:1::101/96 + IPV6INIT=yes +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -1584,7 +1560,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + HWADDR=52:54:00:12:34:00 + IPADDR=192.168.1.2 + NETMASK=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=no + TYPE=Ethernet + USERCTL=no +@@ -1594,7 +1569,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + DEVICE=eth1 + HWADDR=52:54:00:12:34:aa + MTU=1480 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -1603,7 +1577,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + BOOTPROTO=none + DEVICE=eth2 + HWADDR=52:54:00:12:34:ff +- NM_CONTROLLED=no + ONBOOT=no + TYPE=Ethernet + USERCTL=no +@@ -1969,7 +1942,6 @@ class TestRhelSysConfigRendering(CiTestCase): + BOOTPROTO=dhcp + DEVICE=eth1000 + HWADDR=07-1C-C6-75-A4-BE +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -2090,7 +2062,6 @@ GATEWAY=10.0.2.2 + HWADDR=52:54:00:12:34:00 + IPADDR=10.0.2.15 + NETMASK=255.255.255.0 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -2111,7 +2082,6 @@ USERCTL=no + # + BOOTPROTO=dhcp + DEVICE=eth0 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +-- +2.20.1 + diff --git a/SOURCES/0003-limit-permissions-on-def_log_file.patch b/SOURCES/0003-limit-permissions-on-def_log_file.patch new file mode 100644 index 0000000..1e63b06 --- /dev/null +++ b/SOURCES/0003-limit-permissions-on-def_log_file.patch @@ -0,0 +1,67 @@ +From fa8f782f5dd24e81f7072bfc24c75340f0972af5 Mon Sep 17 00:00:00 2001 +From: Lars Kellogg-Stedman +Date: Fri, 7 Apr 2017 18:50:54 -0400 +Subject: limit permissions on def_log_file + +This sets a default mode of 0600 on def_log_file, and makes this +configurable via the def_log_file_mode option in cloud.cfg. + +LP: #1541196 +Resolves: rhbz#1424612 +X-approved-upstream: true +Signed-off-by: Danilo C. L. de Paula +--- + cloudinit/settings.py | 1 + + cloudinit/stages.py | 3 ++- + doc/examples/cloud-config.txt | 4 ++++ + 3 files changed, 7 insertions(+), 1 deletion(-) + +diff --git a/cloudinit/settings.py b/cloudinit/settings.py +index c5367687..d982a4d6 100644 +--- a/cloudinit/settings.py ++++ b/cloudinit/settings.py +@@ -43,6 +43,7 @@ CFG_BUILTIN = { + 'None', + ], + 'def_log_file': '/var/log/cloud-init.log', ++ 'def_log_file_mode': 0o600, + 'log_cfgs': [], + 'mount_default_fields': [None, None, 'auto', 'defaults,nofail', '0', '2'], + 'ssh_deletekeys': False, +diff --git a/cloudinit/stages.py b/cloudinit/stages.py +index 8a064124..4f15484d 100644 +--- a/cloudinit/stages.py ++++ b/cloudinit/stages.py +@@ -148,8 +148,9 @@ class Init(object): + def _initialize_filesystem(self): + util.ensure_dirs(self._initial_subdirs()) + log_file = util.get_cfg_option_str(self.cfg, 'def_log_file') ++ log_file_mode = util.get_cfg_option_int(self.cfg, 'def_log_file_mode') + if log_file: +- util.ensure_file(log_file) ++ util.ensure_file(log_file, mode=log_file_mode) + perms = self.cfg.get('syslog_fix_perms') + if not perms: + perms = {} +diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt +index eb84dcf5..0e82b83e 100644 +--- a/doc/examples/cloud-config.txt ++++ b/doc/examples/cloud-config.txt +@@ -413,10 +413,14 @@ timezone: US/Eastern + # if syslog_fix_perms is a list, it will iterate through and use the + # first pair that does not raise error. + # ++# 'def_log_file' will be created with mode 'def_log_file_mode', which ++# is specified as a numeric value and defaults to 0600. ++# + # the default values are '/var/log/cloud-init.log' and 'syslog:adm' + # the value of 'def_log_file' should match what is configured in logging + # if either is empty, then no change of ownership will be done + def_log_file: /var/log/my-logging-file.log ++def_log_file_mode: 0600 + syslog_fix_perms: syslog:root + + # you can set passwords for a user or multiple users +-- +2.20.1 + diff --git a/SOURCES/0004-azure-ensure-that-networkmanager-hook-script-runs.patch b/SOURCES/0004-azure-ensure-that-networkmanager-hook-script-runs.patch new file mode 100644 index 0000000..bb78670 --- /dev/null +++ b/SOURCES/0004-azure-ensure-that-networkmanager-hook-script-runs.patch @@ -0,0 +1,64 @@ +From 8a8af21fc8fff984f2b4285e9993cfd50cad70c4 Mon Sep 17 00:00:00 2001 +From: Lars Kellogg-Stedman +Date: Thu, 15 Jun 2017 12:20:39 -0400 +Subject: azure: ensure that networkmanager hook script runs + +The networkmanager hook script was failing to run due to the changes +we made to resolve rhbz#1440831. This corrects the regression by +allowing the NM hook script to run regardless of whether or not +cloud-init is "enabled". + +Resolves: rhbz#1460206 +X-downstream-only: true +Signed-off-by: Danilo C. L. de Paula +--- + tools/hook-dhclient | 3 +-- + tools/hook-network-manager | 3 +-- + tools/hook-rhel.sh | 3 +-- + 3 files changed, 3 insertions(+), 6 deletions(-) + +diff --git a/tools/hook-dhclient b/tools/hook-dhclient +index 02122f37..181cd51e 100755 +--- a/tools/hook-dhclient ++++ b/tools/hook-dhclient +@@ -13,8 +13,7 @@ is_azure() { + } + + is_enabled() { +- # only execute hooks if cloud-init is enabled and on azure +- [ -e /run/cloud-init/enabled ] || return 1 ++ # only execute hooks if cloud-init is running on azure + is_azure + } + +diff --git a/tools/hook-network-manager b/tools/hook-network-manager +index 67d9044a..1d52cad7 100755 +--- a/tools/hook-network-manager ++++ b/tools/hook-network-manager +@@ -13,8 +13,7 @@ is_azure() { + } + + is_enabled() { +- # only execute hooks if cloud-init is enabled and on azure +- [ -e /run/cloud-init/enabled ] || return 1 ++ # only execute hooks if cloud-init running on azure + is_azure + } + +diff --git a/tools/hook-rhel.sh b/tools/hook-rhel.sh +index 513a5515..d75767e2 100755 +--- a/tools/hook-rhel.sh ++++ b/tools/hook-rhel.sh +@@ -13,8 +13,7 @@ is_azure() { + } + + is_enabled() { +- # only execute hooks if cloud-init is enabled and on azure +- [ -e /run/cloud-init/enabled ] || return 1 ++ # only execute hooks if cloud-init is running on azure + is_azure + } + +-- +2.20.1 + diff --git a/SOURCES/0005-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch b/SOURCES/0005-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch new file mode 100644 index 0000000..f157e21 --- /dev/null +++ b/SOURCES/0005-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch @@ -0,0 +1,45 @@ +From 471353b3c3bf5cba5cab4d1b203b1c259c709fde Mon Sep 17 00:00:00 2001 +From: Miroslav Rezanina +Date: Thu, 31 May 2018 20:00:32 +0200 +Subject: sysconfig: Don't write BOOTPROTO=dhcp for ipv6 dhcp + +Don't write BOOTPROTO=dhcp for ipv6 dhcp, as BOOTPROTO applies +only to ipv4. Explicitly write IPV6_AUTOCONF=no for dhcp on ipv6. + +X-downstream-only: yes + +Resolves: rhbz#1519271 +Signed-off-by: Ryan McCabe +Signed-off-by: Danilo C. L. de Paula +--- + cloudinit/net/sysconfig.py | 1 + + tests/unittests/test_net.py | 1 + + 2 files changed, 2 insertions(+) + +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index ae0554ef..ec166cf1 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -310,6 +310,7 @@ class Renderer(renderer.Renderer): + if subnet_type == 'dhcp6': + iface_cfg['IPV6INIT'] = True + iface_cfg['DHCPV6C'] = True ++ iface_cfg['IPV6_AUTOCONF'] = False + elif subnet_type in ['dhcp4', 'dhcp']: + iface_cfg['BOOTPROTO'] = 'dhcp' + elif subnet_type == 'static': +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index 5f1aa3e7..8bcafe08 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -886,6 +886,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + BOOTPROTO=none + DEVICE=bond0 + DHCPV6C=yes ++ IPV6_AUTOCONF=no + IPV6INIT=yes + MACADDR=aa:bb:cc:dd:ee:ff + ONBOOT=yes +-- +2.20.1 + diff --git a/SOURCES/0006-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch b/SOURCES/0006-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch new file mode 100644 index 0000000..0da5464 --- /dev/null +++ b/SOURCES/0006-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch @@ -0,0 +1,57 @@ +From 21ea1cda0055416119edea44de95b5606f0b0e15 Mon Sep 17 00:00:00 2001 +From: Vitaly Kuznetsov +Date: Tue, 17 Apr 2018 13:07:54 +0200 +Subject: DataSourceAzure.py: use hostnamectl to set hostname + +RH-Author: Vitaly Kuznetsov +Message-id: <20180417130754.12918-3-vkuznets@redhat.com> +Patchwork-id: 79659 +O-Subject: [RHEL7.6/7.5.z cloud-init PATCH 2/2] DataSourceAzure.py: use hostnamectl to set hostname +Bugzilla: 1568717 +RH-Acked-by: Eduardo Otubo +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Cathy Avery + +The right way to set hostname in RHEL7 is: + + $ hostnamectl set-hostname HOSTNAME + +DataSourceAzure, however, uses: + $ hostname HOSTSNAME + +instead and this causes problems. We can't simply change +'BUILTIN_DS_CONFIG' in DataSourceAzure.py as 'hostname' is being used +for both getting and setting the hostname. + +Long term, this should be fixed in a different way. Cloud-init +has distro-specific hostname setting/getting (see +cloudinit/distros/rhel.py) and DataSourceAzure.py needs to be switched +to use these. + +Resolves: rhbz#1434109 + +X-downstream-only: yes + +Signed-off-by: Vitaly Kuznetsov +Signed-off-by: Miroslav Rezanina +Signed-off-by: Danilo C. L. de Paula +--- + cloudinit/sources/DataSourceAzure.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py +index e076d5dc..7dbeb04c 100644 +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -238,7 +238,7 @@ def get_hostname(hostname_command='hostname'): + + + def set_hostname(hostname, hostname_command='hostname'): +- util.subp([hostname_command, hostname]) ++ util.subp(['hostnamectl', 'set-hostname', str(hostname)]) + + + @contextlib.contextmanager +-- +2.20.1 + diff --git a/SOURCES/0007-sysconfig-Don-t-disable-IPV6_AUTOCONF.patch b/SOURCES/0007-sysconfig-Don-t-disable-IPV6_AUTOCONF.patch new file mode 100644 index 0000000..33de1b6 --- /dev/null +++ b/SOURCES/0007-sysconfig-Don-t-disable-IPV6_AUTOCONF.patch @@ -0,0 +1,50 @@ +From 6444df4c91c611c65bb292e75e2726f767edcf2b Mon Sep 17 00:00:00 2001 +From: Vitaly Kuznetsov +Date: Thu, 26 Apr 2018 09:27:49 +0200 +Subject: sysconfig: Don't disable IPV6_AUTOCONF + +RH-Author: Vitaly Kuznetsov +Message-id: <20180426092749.7251-2-vkuznets@redhat.com> +Patchwork-id: 79904 +O-Subject: [RHEL7.6/7.5.z cloud-init PATCH 1/1] sysconfig: Don't disable IPV6_AUTOCONF +Bugzilla: 1578702 +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Cathy Avery +RH-Acked-by: Eduardo Otubo + +Downstream-only commit 118458a3fb ("sysconfig: Don't write BOOTPROTO=dhcp +for ipv6 dhcp") did two things: +1) Disabled BOOTPROTO='dhcp' for dhcp6 setups. This change seems to be + correct as BOOTPROTO is unrelated to IPv6. The change was since merged + upstream (commit a57928d3c314d9568712cd190cb1e721e14c108b). +2) Explicitly disabled AUTOCONF and this broke many valid configurations + using it instead of DHCPV6C. Revert this part of the change. In case + DHCPV6C-only support is needed something like a new 'dhcpv6c_only' + network type needs to be suggested upstream. + +X-downstream-only: yes + +Resolves: rhbz#1558854 + +Signed-off-by: Vitaly Kuznetsov +Signed-off-by: Miroslav Rezanina +Signed-off-by: Danilo C. L. de Paula +--- + cloudinit/net/sysconfig.py | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index ec166cf1..ae0554ef 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -310,7 +310,6 @@ class Renderer(renderer.Renderer): + if subnet_type == 'dhcp6': + iface_cfg['IPV6INIT'] = True + iface_cfg['DHCPV6C'] = True +- iface_cfg['IPV6_AUTOCONF'] = False + elif subnet_type in ['dhcp4', 'dhcp']: + iface_cfg['BOOTPROTO'] = 'dhcp' + elif subnet_type == 'static': +-- +2.20.1 + diff --git a/SOURCES/0008-net-Make-sysconfig-renderer-compatible-with-Network-.patch b/SOURCES/0008-net-Make-sysconfig-renderer-compatible-with-Network-.patch new file mode 100644 index 0000000..be1644e --- /dev/null +++ b/SOURCES/0008-net-Make-sysconfig-renderer-compatible-with-Network-.patch @@ -0,0 +1,217 @@ +From 86bd1e20fc802edfb920fa53bd611d469f83250b Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Fri, 18 Jan 2019 16:55:36 +0100 +Subject: net: Make sysconfig renderer compatible with Network Manager. + +RH-Author: Eduardo Otubo +Message-id: <20190118165536.25963-1-otubo@redhat.com> +Patchwork-id: 84052 +O-Subject: [RHEL-8.0 cloud-init PATCH] net: Make sysconfig renderer compatible with Network Manager. +Bugzilla: 1602784 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Mohammed Gamal + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1602784 +Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=19877292 +Tested by: upstream maintainers and me + +commit 3861102fcaf47a882516d8b6daab518308eb3086 +Author: Eduardo Otubo +Date: Fri Jan 18 15:36:19 2019 +0000 + + net: Make sysconfig renderer compatible with Network Manager. + + The 'sysconfig' renderer is activated if, and only if, there's ifup and + ifdown commands present in its search dictonary or the network-scripts + configuration files are found. This patch adds a check for Network- + Manager configuration file as well. + + This solution is based on the use of the plugin 'ifcfg-rh' present in + Network-Manager and is designed to support Fedora 29 or other + distributions that also replaced network-scripts by Network-Manager. + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +Signed-off-by: Danilo C. L. de Paula +--- + cloudinit/net/sysconfig.py | 36 +++++++++++++++++++ + tests/unittests/test_net.py | 71 +++++++++++++++++++++++++++++++++++++ + 2 files changed, 107 insertions(+) + +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index ae0554ef..dc1815d9 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -10,11 +10,14 @@ from cloudinit.distros.parsers import resolv_conf + from cloudinit import log as logging + from cloudinit import util + ++from configobj import ConfigObj ++ + from . import renderer + from .network_state import ( + is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6) + + LOG = logging.getLogger(__name__) ++NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" + + + def _make_header(sep='#'): +@@ -46,6 +49,24 @@ def _quote_value(value): + return value + + ++def enable_ifcfg_rh(path): ++ """Add ifcfg-rh to NetworkManager.cfg plugins if main section is present""" ++ config = ConfigObj(path) ++ if 'main' in config: ++ if 'plugins' in config['main']: ++ if 'ifcfg-rh' in config['main']['plugins']: ++ return ++ else: ++ config['main']['plugins'] = [] ++ ++ if isinstance(config['main']['plugins'], list): ++ config['main']['plugins'].append('ifcfg-rh') ++ else: ++ config['main']['plugins'] = [config['main']['plugins'], 'ifcfg-rh'] ++ config.write() ++ LOG.debug('Enabled ifcfg-rh NetworkManager plugins') ++ ++ + class ConfigMap(object): + """Sysconfig like dictionary object.""" + +@@ -656,6 +677,8 @@ class Renderer(renderer.Renderer): + netrules_content = self._render_persistent_net(network_state) + netrules_path = util.target_path(target, self.netrules_path) + util.write_file(netrules_path, netrules_content, file_mode) ++ if available_nm(target=target): ++ enable_ifcfg_rh(util.target_path(target, path=NM_CFG_FILE)) + + sysconfig_path = util.target_path(target, templates.get('control')) + # Distros configuring /etc/sysconfig/network as a file e.g. Centos +@@ -670,6 +693,13 @@ class Renderer(renderer.Renderer): + + + def available(target=None): ++ sysconfig = available_sysconfig(target=target) ++ nm = available_nm(target=target) ++ ++ return any([nm, sysconfig]) ++ ++ ++def available_sysconfig(target=None): + expected = ['ifup', 'ifdown'] + search = ['/sbin', '/usr/sbin'] + for p in expected: +@@ -685,4 +715,10 @@ def available(target=None): + return True + + ++def available_nm(target=None): ++ if not os.path.isfile(util.target_path(target, path=NM_CFG_FILE)): ++ return False ++ return True ++ ++ + # vi: ts=4 expandtab +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index 8bcafe08..526a30ed 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -22,6 +22,7 @@ import os + import textwrap + import yaml + ++ + DHCP_CONTENT_1 = """ + DEVICE='eth0' + PROTO='dhcp' +@@ -1854,6 +1855,7 @@ class TestRhelSysConfigRendering(CiTestCase): + + with_logs = True + ++ nm_cfg_file = "/etc/NetworkManager/NetworkManager.conf" + scripts_dir = '/etc/sysconfig/network-scripts' + header = ('# Created by cloud-init on instance boot automatically, ' + 'do not edit.\n#\n') +@@ -2497,6 +2499,75 @@ iface eth0 inet dhcp + self.assertEqual( + expected, dir2dict(tmp_dir)['/etc/network/interfaces']) + ++ def test_check_ifcfg_rh(self): ++ """ifcfg-rh plugin is added NetworkManager.conf if conf present.""" ++ render_dir = self.tmp_dir() ++ nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file) ++ util.ensure_dir(os.path.dirname(nm_cfg)) ++ ++ # write a template nm.conf, note plugins is a list here ++ with open(nm_cfg, 'w') as fh: ++ fh.write('# test_check_ifcfg_rh\n[main]\nplugins=foo,bar\n') ++ self.assertTrue(os.path.exists(nm_cfg)) ++ ++ # render and read ++ entry = NETWORK_CONFIGS['small'] ++ found = self._render_and_read(network_config=yaml.load(entry['yaml']), ++ dir=render_dir) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ self._assert_headers(found) ++ ++ # check ifcfg-rh is in the 'plugins' list ++ config = sysconfig.ConfigObj(nm_cfg) ++ self.assertIn('ifcfg-rh', config['main']['plugins']) ++ ++ def test_check_ifcfg_rh_plugins_string(self): ++ """ifcfg-rh plugin is append when plugins is a string.""" ++ render_dir = self.tmp_path("render") ++ os.makedirs(render_dir) ++ nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file) ++ util.ensure_dir(os.path.dirname(nm_cfg)) ++ ++ # write a template nm.conf, note plugins is a value here ++ util.write_file(nm_cfg, '# test_check_ifcfg_rh\n[main]\nplugins=foo\n') ++ ++ # render and read ++ entry = NETWORK_CONFIGS['small'] ++ found = self._render_and_read(network_config=yaml.load(entry['yaml']), ++ dir=render_dir) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ self._assert_headers(found) ++ ++ # check raw content has plugin ++ nm_file_content = util.load_file(nm_cfg) ++ self.assertIn('ifcfg-rh', nm_file_content) ++ ++ # check ifcfg-rh is in the 'plugins' list ++ config = sysconfig.ConfigObj(nm_cfg) ++ self.assertIn('ifcfg-rh', config['main']['plugins']) ++ ++ def test_check_ifcfg_rh_plugins_no_plugins(self): ++ """enable_ifcfg_plugin creates plugins value if missing.""" ++ render_dir = self.tmp_path("render") ++ os.makedirs(render_dir) ++ nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file) ++ util.ensure_dir(os.path.dirname(nm_cfg)) ++ ++ # write a template nm.conf, note plugins is missing ++ util.write_file(nm_cfg, '# test_check_ifcfg_rh\n[main]\n') ++ self.assertTrue(os.path.exists(nm_cfg)) ++ ++ # render and read ++ entry = NETWORK_CONFIGS['small'] ++ found = self._render_and_read(network_config=yaml.load(entry['yaml']), ++ dir=render_dir) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ self._assert_headers(found) ++ ++ # check ifcfg-rh is in the 'plugins' list ++ config = sysconfig.ConfigObj(nm_cfg) ++ self.assertIn('ifcfg-rh', config['main']['plugins']) ++ + + class TestNetplanNetRendering(CiTestCase): + +-- +2.20.1 + diff --git a/SOURCES/0009-net-Wait-for-dhclient-to-daemonize-before-reading-le.patch b/SOURCES/0009-net-Wait-for-dhclient-to-daemonize-before-reading-le.patch new file mode 100644 index 0000000..41ba44b --- /dev/null +++ b/SOURCES/0009-net-Wait-for-dhclient-to-daemonize-before-reading-le.patch @@ -0,0 +1,296 @@ +From 2e070086275341dfceb6d5b1e12f06f22e7bbfcd Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Wed, 23 Jan 2019 12:30:21 +0100 +Subject: net: Wait for dhclient to daemonize before reading lease file + +RH-Author: Eduardo Otubo +Message-id: <20190123123021.32708-1-otubo@redhat.com> +Patchwork-id: 84095 +O-Subject: [RHEL-7.7 cloud-init PATCH] net: Wait for dhclient to daemonize before reading lease file +Bugzilla: 1632967 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Miroslav Rezanina + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1632967 +Brew: https://bugzilla.redhat.com/show_bug.cgi?id=1632967 +Tested: Me and upstream + +commit fdadcb5fae51f4e6799314ab98e3aec56c79b17c +Author: Jason Zions +Date: Tue Jan 15 21:37:17 2019 +0000 + + net: Wait for dhclient to daemonize before reading lease file + + cloud-init uses dhclient to fetch the DHCP lease so it can extract + DHCP options. dhclient creates the leasefile, then writes to it; + simply waiting for the leasefile to appear creates a race between + dhclient and cloud-init. Instead, wait for dhclient to be parented by + init. At that point, we know it has written to the leasefile, so it's + safe to copy the file and kill the process. + + cloud-init creates a temporary directory in which to execute dhclient, + and deletes that directory after it has killed the process. If + cloud-init abandons waiting for dhclient to daemonize, it will still + attempt to delete the temporary directory, but will not report an + exception should that attempt fail. + + LP: #1794399 + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +Signed-off-by: Danilo C. L. de Paula +--- + cloudinit/net/dhcp.py | 44 +++++++++++++++++++++--------- + cloudinit/net/tests/test_dhcp.py | 15 ++++++++-- + cloudinit/temp_utils.py | 4 +-- + cloudinit/tests/test_temp_utils.py | 18 +++++++++++- + cloudinit/util.py | 16 ++++++++++- + tests/unittests/test_util.py | 6 ++++ + 6 files changed, 83 insertions(+), 20 deletions(-) + +diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py +index 0db991db..c98a97cd 100644 +--- a/cloudinit/net/dhcp.py ++++ b/cloudinit/net/dhcp.py +@@ -9,6 +9,7 @@ import logging + import os + import re + import signal ++import time + + from cloudinit.net import ( + EphemeralIPv4Network, find_fallback_nic, get_devicelist, +@@ -127,7 +128,9 @@ def maybe_perform_dhcp_discovery(nic=None): + if not dhclient_path: + LOG.debug('Skip dhclient configuration: No dhclient command found.') + return [] +- with temp_utils.tempdir(prefix='cloud-init-dhcp-', needs_exe=True) as tdir: ++ with temp_utils.tempdir(rmtree_ignore_errors=True, ++ prefix='cloud-init-dhcp-', ++ needs_exe=True) as tdir: + # Use /var/tmp because /run/cloud-init/tmp is mounted noexec + return dhcp_discovery(dhclient_path, nic, tdir) + +@@ -195,24 +198,39 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir): + '-pf', pid_file, interface, '-sf', '/bin/true'] + util.subp(cmd, capture=True) + +- # dhclient doesn't write a pid file until after it forks when it gets a +- # proper lease response. Since cleandir is a temp directory that gets +- # removed, we need to wait for that pidfile creation before the +- # cleandir is removed, otherwise we get FileNotFound errors. ++ # Wait for pid file and lease file to appear, and for the process ++ # named by the pid file to daemonize (have pid 1 as its parent). If we ++ # try to read the lease file before daemonization happens, we might try ++ # to read it before the dhclient has actually written it. We also have ++ # to wait until the dhclient has become a daemon so we can be sure to ++ # kill the correct process, thus freeing cleandir to be deleted back ++ # up the callstack. + missing = util.wait_for_files( + [pid_file, lease_file], maxwait=5, naplen=0.01) + if missing: + LOG.warning("dhclient did not produce expected files: %s", + ', '.join(os.path.basename(f) for f in missing)) + return [] +- pid_content = util.load_file(pid_file).strip() +- try: +- pid = int(pid_content) +- except ValueError: +- LOG.debug( +- "pid file contains non-integer content '%s'", pid_content) +- else: +- os.kill(pid, signal.SIGKILL) ++ ++ ppid = 'unknown' ++ for _ in range(0, 1000): ++ pid_content = util.load_file(pid_file).strip() ++ try: ++ pid = int(pid_content) ++ except ValueError: ++ pass ++ else: ++ ppid = util.get_proc_ppid(pid) ++ if ppid == 1: ++ LOG.debug('killing dhclient with pid=%s', pid) ++ os.kill(pid, signal.SIGKILL) ++ return parse_dhcp_lease_file(lease_file) ++ time.sleep(0.01) ++ ++ LOG.error( ++ 'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s seconds', ++ pid_content, ppid, 0.01 * 1000 ++ ) + return parse_dhcp_lease_file(lease_file) + + +diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py +index cd3e7328..79e8842f 100644 +--- a/cloudinit/net/tests/test_dhcp.py ++++ b/cloudinit/net/tests/test_dhcp.py +@@ -145,16 +145,20 @@ class TestDHCPDiscoveryClean(CiTestCase): + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], + dhcp_discovery(dhclient_script, 'eth9', tmpdir)) + self.assertIn( +- "pid file contains non-integer content ''", self.logs.getvalue()) ++ "dhclient(pid=, parentpid=unknown) failed " ++ "to daemonize after 10.0 seconds", ++ self.logs.getvalue()) + m_kill.assert_not_called() + ++ @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') + @mock.patch('cloudinit.net.dhcp.os.kill') + @mock.patch('cloudinit.net.dhcp.util.wait_for_files') + @mock.patch('cloudinit.net.dhcp.util.subp') + def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(self, + m_subp, + m_wait, +- m_kill): ++ m_kill, ++ m_getppid): + """dhcp_discovery waits for the presence of pidfile and dhcp.leases.""" + tmpdir = self.tmp_dir() + dhclient_script = os.path.join(tmpdir, 'dhclient.orig') +@@ -164,6 +168,7 @@ class TestDHCPDiscoveryClean(CiTestCase): + pidfile = self.tmp_path('dhclient.pid', tmpdir) + leasefile = self.tmp_path('dhcp.leases', tmpdir) + m_wait.return_value = [pidfile] # Return the missing pidfile wait for ++ m_getppid.return_value = 1 # Indicate that dhclient has daemonized + self.assertEqual([], dhcp_discovery(dhclient_script, 'eth9', tmpdir)) + self.assertEqual( + mock.call([pidfile, leasefile], maxwait=5, naplen=0.01), +@@ -173,9 +178,10 @@ class TestDHCPDiscoveryClean(CiTestCase): + self.logs.getvalue()) + m_kill.assert_not_called() + ++ @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') + @mock.patch('cloudinit.net.dhcp.os.kill') + @mock.patch('cloudinit.net.dhcp.util.subp') +- def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill): ++ def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill, m_getppid): + """dhcp_discovery brings up the interface and runs dhclient. + + It also returns the parsed dhcp.leases file generated in the sandbox. +@@ -197,6 +203,7 @@ class TestDHCPDiscoveryClean(CiTestCase): + pid_file = os.path.join(tmpdir, 'dhclient.pid') + my_pid = 1 + write_file(pid_file, "%d\n" % my_pid) ++ m_getppid.return_value = 1 # Indicate that dhclient has daemonized + + self.assertItemsEqual( + [{'interface': 'eth9', 'fixed-address': '192.168.2.74', +@@ -355,3 +362,5 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase): + self.assertEqual(fake_lease, lease) + # Ensure that dhcp discovery occurs + m_dhcp.called_once_with() ++ ++# vi: ts=4 expandtab +diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py +index c98a1b53..346276ec 100644 +--- a/cloudinit/temp_utils.py ++++ b/cloudinit/temp_utils.py +@@ -81,7 +81,7 @@ def ExtendedTemporaryFile(**kwargs): + + + @contextlib.contextmanager +-def tempdir(**kwargs): ++def tempdir(rmtree_ignore_errors=False, **kwargs): + # This seems like it was only added in python 3.2 + # Make it since its useful... + # See: http://bugs.python.org/file12970/tempdir.patch +@@ -89,7 +89,7 @@ def tempdir(**kwargs): + try: + yield tdir + finally: +- shutil.rmtree(tdir) ++ shutil.rmtree(tdir, ignore_errors=rmtree_ignore_errors) + + + def mkdtemp(**kwargs): +diff --git a/cloudinit/tests/test_temp_utils.py b/cloudinit/tests/test_temp_utils.py +index ffbb92cd..4a52ef89 100644 +--- a/cloudinit/tests/test_temp_utils.py ++++ b/cloudinit/tests/test_temp_utils.py +@@ -2,8 +2,9 @@ + + """Tests for cloudinit.temp_utils""" + +-from cloudinit.temp_utils import mkdtemp, mkstemp ++from cloudinit.temp_utils import mkdtemp, mkstemp, tempdir + from cloudinit.tests.helpers import CiTestCase, wrap_and_call ++import os + + + class TestTempUtils(CiTestCase): +@@ -98,4 +99,19 @@ class TestTempUtils(CiTestCase): + self.assertEqual('/fake/return/path', retval) + self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls) + ++ def test_tempdir_error_suppression(self): ++ """test tempdir suppresses errors during directory removal.""" ++ ++ with self.assertRaises(OSError): ++ with tempdir(prefix='cloud-init-dhcp-') as tdir: ++ os.rmdir(tdir) ++ # As a result, the directory is already gone, ++ # so shutil.rmtree should raise OSError ++ ++ with tempdir(rmtree_ignore_errors=True, ++ prefix='cloud-init-dhcp-') as tdir: ++ os.rmdir(tdir) ++ # Since the directory is already gone, shutil.rmtree would raise ++ # OSError, but we suppress that ++ + # vi: ts=4 expandtab +diff --git a/cloudinit/util.py b/cloudinit/util.py +index 7800f7bc..a84112a9 100644 +--- a/cloudinit/util.py ++++ b/cloudinit/util.py +@@ -2861,7 +2861,6 @@ def mount_is_read_write(mount_point): + mount_opts = result[-1].split(',') + return mount_opts[0] == 'rw' + +- + def udevadm_settle(exists=None, timeout=None): + """Invoke udevadm settle with optional exists and timeout parameters""" + settle_cmd = ["udevadm", "settle"] +@@ -2875,5 +2874,20 @@ def udevadm_settle(exists=None, timeout=None): + + return subp(settle_cmd) + ++def get_proc_ppid(pid): ++ """ ++ Return the parent pid of a process. ++ """ ++ ppid = 0 ++ try: ++ contents = load_file("/proc/%s/stat" % pid, quiet=True) ++ except IOError as e: ++ LOG.warning('Failed to load /proc/%s/stat. %s', pid, e) ++ if contents: ++ parts = contents.split(" ", 4) ++ # man proc says ++ # ppid %d (4) The PID of the parent. ++ ppid = int(parts[3]) ++ return ppid + + # vi: ts=4 expandtab +diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py +index 5a14479a..8aebcd62 100644 +--- a/tests/unittests/test_util.py ++++ b/tests/unittests/test_util.py +@@ -1114,6 +1114,12 @@ class TestLoadShellContent(helpers.TestCase): + 'key3="val3 #tricky"', + '']))) + ++ def test_get_proc_ppid(self): ++ """get_proc_ppid returns correct parent pid value.""" ++ my_pid = os.getpid() ++ my_ppid = os.getppid() ++ self.assertEqual(my_ppid, util.get_proc_ppid(my_pid)) ++ + + class TestGetProcEnv(helpers.TestCase): + """test get_proc_env.""" +-- +2.20.1 + diff --git a/SOURCES/0010-cloud-init-per-don-t-use-dashes-in-sem-names.patch b/SOURCES/0010-cloud-init-per-don-t-use-dashes-in-sem-names.patch new file mode 100644 index 0000000..f356066 --- /dev/null +++ b/SOURCES/0010-cloud-init-per-don-t-use-dashes-in-sem-names.patch @@ -0,0 +1,90 @@ +From 8a3bf53398f312b46ed4f304df4c66d061e612c7 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Thu, 28 Feb 2019 12:38:36 +0100 +Subject: cloud-init-per: don't use dashes in sem names + +RH-Author: Eduardo Otubo +Message-id: <20190228123836.17979-1-otubo@redhat.com> +Patchwork-id: 84743 +O-Subject: [RHEL-7.7 cloud-init PATCH] This is to fix https://bugs.launchpad.net/cloud-init/+bug/1812676 +Bugzilla: 1664876 +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Vitaly Kuznetsov + +From: Vitaly Kuznetsov + + It was found that when there is a dash in cloud-init-per command + name and cloud-init-per is executed through cloud-init's bootcmd, e.g: + + bootcmd: + - cloud-init-per instance mycmd-bootcmd /usr/bin/mycmd + + the command is executed on each boot. However, running the same + cloud-init-per command manually after boot doesn't reveal the issue. Turns + out the issue comes from 'migrator' cloud-init module which renames all + files in /var/lib/cloud/instance/sem/ replacing dashes with underscores. As + migrator runs before bootcmd it renames + + /var/lib/cloud/instance/sem/bootper.mycmd-bootcmd.instance + to + /var/lib/cloud/instance/sem/bootper.mycmd_bootcmd.instance + + so cloud-init-per doesn't see it and thinks that the comment was never ran + before. On next boot the sequence repeats. + + There are multiple ways to resolve the issue. This patch takes the + following approach: 'canonicalize' sem names by replacing dashes with + underscores (this is consistent with post-'migrator' contents of + /var/lib/cloud/instance/sem/). We, however, need to be careful: in case + someone had a command with dashes before and he had migrator module enables + we need to see the old sem file (or the command will run again and this can + be as bad as formatting a partition!) so we add a small 'migrator' part to + cloud-init-per script itself checking for legacy sem names. + + Signed-off-by: Vitaly Kuznetsov + +commit 9cf9d8cdd3a8fd7d4d425f7051122d0ac8af2bbd +Author: Vitaly Kuznetsov +Date: Mon Feb 18 22:55:49 2019 +0000 + + This is to fix https://bugs.launchpad.net/cloud-init/+bug/1812676 + +Resolves: rhbz#1664876 +X-downstream-only: false + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +Signed-off-by: Danilo C. L. de Paula +--- + tools/cloud-init-per | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/tools/cloud-init-per b/tools/cloud-init-per +index 7d6754b6..eae3e93f 100755 +--- a/tools/cloud-init-per ++++ b/tools/cloud-init-per +@@ -38,7 +38,7 @@ fi + [ "$1" = "-h" -o "$1" = "--help" ] && { Usage ; exit 0; } + [ $# -ge 3 ] || { Usage 1>&2; exit 1; } + freq=$1 +-name=$2 ++name=${2/-/_} + shift 2; + + [ "${name#*/}" = "${name}" ] || fail "name cannot contain a /" +@@ -53,6 +53,12 @@ esac + [ -d "${sem%/*}" ] || mkdir -p "${sem%/*}" || + fail "failed to make directory for ${sem}" + ++# Rename legacy sem files with dashes in their names. Do not overwrite existing ++# sem files to prevent clobbering those which may have been created from calls ++# outside of cloud-init. ++sem_legacy="${sem/_/-}" ++[ "$sem" != "$sem_legacy" -a -e "$sem_legacy" ] && mv -n "$sem_legacy" "$sem" ++ + [ "$freq" != "always" -a -e "$sem" ] && exit 0 + "$@" + ret=$? +-- +2.20.1 + diff --git a/SOURCES/0011-azure-Filter-list-of-ssh-keys-pulled-from-fabric.patch b/SOURCES/0011-azure-Filter-list-of-ssh-keys-pulled-from-fabric.patch new file mode 100644 index 0000000..bb63c25 --- /dev/null +++ b/SOURCES/0011-azure-Filter-list-of-ssh-keys-pulled-from-fabric.patch @@ -0,0 +1,572 @@ +From 8e168f17b0c138d589f7b3bea4a4b6fcc8e5e03f Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Wed, 6 Mar 2019 14:20:18 +0100 +Subject: azure: Filter list of ssh keys pulled from fabric + +RH-Author: Eduardo Otubo +Message-id: <20190306142018.8902-1-otubo@redhat.com> +Patchwork-id: 84807 +O-Subject: [RHEL-7.7 cloud-init PATCH] azure: Filter list of ssh keys pulled from fabric +Bugzilla: 1684040 +RH-Acked-by: Cathy Avery +RH-Acked-by: Vitaly Kuznetsov + +From: "Jason Zions (MSFT)" + +commit 34f54360fcc1e0f805002a0b639d0a84eb2cb8ee +Author: Jason Zions (MSFT) +Date: Fri Feb 22 13:26:31 2019 +0000 + + azure: Filter list of ssh keys pulled from fabric + + The Azure data source is expected to expose a list of + ssh keys for the user-to-be-provisioned in the crawled + metadata. When configured to use the __builtin__ agent + this list is built by the WALinuxAgentShim. The shim + retrieves the full set of certificates and public keys + exposed to the VM from the wireserver, extracts any + ssh keys it can, and returns that list. + + This fix reduces that list of ssh keys to just the + ones whose fingerprints appear in the "administrative + user" section of the ovf-env.xml file. The Azure + control plane exposes other ssh keys to the VM for + other reasons, but those should not be added to the + authorized_keys file for the provisioned user. + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +Signed-off-by: Danilo C. L. de Paula +--- + cloudinit/sources/DataSourceAzure.py | 13 +- + cloudinit/sources/helpers/azure.py | 109 +++++++++---- + .../azure/parse_certificates_fingerprints | 4 + + tests/data/azure/parse_certificates_pem | 152 ++++++++++++++++++ + tests/data/azure/pubkey_extract_cert | 13 ++ + tests/data/azure/pubkey_extract_ssh_key | 1 + + .../test_datasource/test_azure_helper.py | 71 +++++++- + 7 files changed, 322 insertions(+), 41 deletions(-) + create mode 100644 tests/data/azure/parse_certificates_fingerprints + create mode 100644 tests/data/azure/parse_certificates_pem + create mode 100644 tests/data/azure/pubkey_extract_cert + create mode 100644 tests/data/azure/pubkey_extract_ssh_key + +diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py +index 7dbeb04c..2062ca5d 100644 +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -627,9 +627,11 @@ class DataSourceAzure(sources.DataSource): + if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN: + self.bounce_network_with_azure_hostname() + ++ pubkey_info = self.cfg.get('_pubkeys', None) + metadata_func = partial(get_metadata_from_fabric, + fallback_lease_file=self. +- dhclient_lease_file) ++ dhclient_lease_file, ++ pubkey_info=pubkey_info) + else: + metadata_func = self.get_metadata_from_agent + +@@ -642,6 +644,7 @@ class DataSourceAzure(sources.DataSource): + "Error communicating with Azure fabric; You may experience." + "connectivity issues.", exc_info=True) + return False ++ + util.del_file(REPORTED_READY_MARKER_FILE) + util.del_file(REPROVISION_MARKER_FILE) + return fabric_data +@@ -909,13 +912,15 @@ def find_child(node, filter_func): + def load_azure_ovf_pubkeys(sshnode): + # This parses a 'SSH' node formatted like below, and returns + # an array of dicts. +- # [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', +- # 'path': 'where/to/go'}] ++ # [{'fingerprint': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', ++ # 'path': '/where/to/go'}] + # + # +- # ABC/ABC ++ # ABC/x/y/z + # ... + # ++ # Under some circumstances, there may be a element along with the ++ # Fingerprint and Path. Pass those along if they appear. + results = find_child(sshnode, lambda n: n.localName == "PublicKeys") + if len(results) == 0: + return [] +diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py +index e5696b1f..2829dd20 100644 +--- a/cloudinit/sources/helpers/azure.py ++++ b/cloudinit/sources/helpers/azure.py +@@ -138,9 +138,36 @@ class OpenSSLManager(object): + self.certificate = certificate + LOG.debug('New certificate generated.') + +- def parse_certificates(self, certificates_xml): +- tag = ElementTree.fromstring(certificates_xml).find( +- './/Data') ++ @staticmethod ++ def _run_x509_action(action, cert): ++ cmd = ['openssl', 'x509', '-noout', action] ++ result, _ = util.subp(cmd, data=cert) ++ return result ++ ++ def _get_ssh_key_from_cert(self, certificate): ++ pub_key = self._run_x509_action('-pubkey', certificate) ++ keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] ++ ssh_key, _ = util.subp(keygen_cmd, data=pub_key) ++ return ssh_key ++ ++ def _get_fingerprint_from_cert(self, certificate): ++ """openssl x509 formats fingerprints as so: ++ 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ ++ B6:A8:BF:27:D4:73\n' ++ ++ Azure control plane passes that fingerprint as so: ++ '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' ++ """ ++ raw_fp = self._run_x509_action('-fingerprint', certificate) ++ eq = raw_fp.find('=') ++ octets = raw_fp[eq+1:-1].split(':') ++ return ''.join(octets) ++ ++ def _decrypt_certs_from_xml(self, certificates_xml): ++ """Decrypt the certificates XML document using the our private key; ++ return the list of certs and private keys contained in the doc. ++ """ ++ tag = ElementTree.fromstring(certificates_xml).find('.//Data') + certificates_content = tag.text + lines = [ + b'MIME-Version: 1.0', +@@ -151,32 +178,30 @@ class OpenSSLManager(object): + certificates_content.encode('utf-8'), + ] + with cd(self.tmpdir): +- with open('Certificates.p7m', 'wb') as f: +- f.write(b'\n'.join(lines)) + out, _ = util.subp( +- 'openssl cms -decrypt -in Certificates.p7m -inkey' ++ 'openssl cms -decrypt -in /dev/stdin -inkey' + ' {private_key} -recip {certificate} | openssl pkcs12 -nodes' + ' -password pass:'.format(**self.certificate_names), +- shell=True) +- private_keys, certificates = [], [] ++ shell=True, data=b'\n'.join(lines)) ++ return out ++ ++ def parse_certificates(self, certificates_xml): ++ """Given the Certificates XML document, return a dictionary of ++ fingerprints and associated SSH keys derived from the certs.""" ++ out = self._decrypt_certs_from_xml(certificates_xml) + current = [] ++ keys = {} + for line in out.splitlines(): + current.append(line) + if re.match(r'[-]+END .*?KEY[-]+$', line): +- private_keys.append('\n'.join(current)) ++ # ignore private_keys + current = [] + elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line): +- certificates.append('\n'.join(current)) ++ certificate = '\n'.join(current) ++ ssh_key = self._get_ssh_key_from_cert(certificate) ++ fingerprint = self._get_fingerprint_from_cert(certificate) ++ keys[fingerprint] = ssh_key + current = [] +- keys = [] +- for certificate in certificates: +- with cd(self.tmpdir): +- public_key, _ = util.subp( +- 'openssl x509 -noout -pubkey |' +- 'ssh-keygen -i -m PKCS8 -f /dev/stdin', +- data=certificate, +- shell=True) +- keys.append(public_key) + return keys + + +@@ -206,7 +231,6 @@ class WALinuxAgentShim(object): + self.dhcpoptions = dhcp_options + self._endpoint = None + self.openssl_manager = None +- self.values = {} + self.lease_file = fallback_lease_file + + def clean_up(self): +@@ -328,8 +352,9 @@ class WALinuxAgentShim(object): + LOG.debug('Azure endpoint found at %s', endpoint_ip_address) + return endpoint_ip_address + +- def register_with_azure_and_fetch_data(self): +- self.openssl_manager = OpenSSLManager() ++ def register_with_azure_and_fetch_data(self, pubkey_info=None): ++ if self.openssl_manager is None: ++ self.openssl_manager = OpenSSLManager() + http_client = AzureEndpointHttpClient(self.openssl_manager.certificate) + LOG.info('Registering with Azure...') + attempts = 0 +@@ -347,16 +372,37 @@ class WALinuxAgentShim(object): + attempts += 1 + LOG.debug('Successfully fetched GoalState XML.') + goal_state = GoalState(response.contents, http_client) +- public_keys = [] +- if goal_state.certificates_xml is not None: ++ ssh_keys = [] ++ if goal_state.certificates_xml is not None and pubkey_info is not None: + LOG.debug('Certificate XML found; parsing out public keys.') +- public_keys = self.openssl_manager.parse_certificates( ++ keys_by_fingerprint = self.openssl_manager.parse_certificates( + goal_state.certificates_xml) +- data = { +- 'public-keys': public_keys, +- } ++ ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info) + self._report_ready(goal_state, http_client) +- return data ++ return {'public-keys': ssh_keys} ++ ++ def _filter_pubkeys(self, keys_by_fingerprint, pubkey_info): ++ """cloud-init expects a straightforward array of keys to be dropped ++ into the user's authorized_keys file. Azure control plane exposes ++ multiple public keys to the VM via wireserver. Select just the ++ user's key(s) and return them, ignoring any other certs. ++ """ ++ keys = [] ++ for pubkey in pubkey_info: ++ if 'value' in pubkey and pubkey['value']: ++ keys.append(pubkey['value']) ++ elif 'fingerprint' in pubkey and pubkey['fingerprint']: ++ fingerprint = pubkey['fingerprint'] ++ if fingerprint in keys_by_fingerprint: ++ keys.append(keys_by_fingerprint[fingerprint]) ++ else: ++ LOG.warning("ovf-env.xml specified PublicKey fingerprint " ++ "%s not found in goalstate XML", fingerprint) ++ else: ++ LOG.warning("ovf-env.xml specified PublicKey with neither " ++ "value nor fingerprint: %s", pubkey) ++ ++ return keys + + def _report_ready(self, goal_state, http_client): + LOG.debug('Reporting ready to Azure fabric.') +@@ -373,11 +419,12 @@ class WALinuxAgentShim(object): + LOG.info('Reported ready to Azure fabric.') + + +-def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None): ++def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, ++ pubkey_info=None): + shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, + dhcp_options=dhcp_opts) + try: +- return shim.register_with_azure_and_fetch_data() ++ return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info) + finally: + shim.clean_up() + +diff --git a/tests/data/azure/parse_certificates_fingerprints b/tests/data/azure/parse_certificates_fingerprints +new file mode 100644 +index 00000000..f7293c56 +--- /dev/null ++++ b/tests/data/azure/parse_certificates_fingerprints +@@ -0,0 +1,4 @@ ++ECEDEB3B8488D31AF3BC4CCED493F64B7D27D7B1 ++073E19D14D1C799224C6A0FD8DDAB6A8BF27D473 ++4C16E7FAD6297D74A9B25EB8F0A12808CEBE293E ++929130695289B450FE45DCD5F6EF0CDE69865867 +diff --git a/tests/data/azure/parse_certificates_pem b/tests/data/azure/parse_certificates_pem +new file mode 100644 +index 00000000..3521ea3a +--- /dev/null ++++ b/tests/data/azure/parse_certificates_pem +@@ -0,0 +1,152 @@ ++Bag Attributes ++ localKeyID: 01 00 00 00 ++ Microsoft CSP Name: Microsoft Enhanced Cryptographic Provider v1.0 ++Key Attributes ++ X509v3 Key Usage: 10 ++-----BEGIN PRIVATE KEY----- ++MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDlEe5fUqwdrQTP ++W2oVlGK2f31q/8ULT8KmOTyUvL0RPdJQ69vvHOc5Q2CKg2eviHC2LWhF8WmpnZj6 ++61RL0GeFGizwvU8Moebw5p3oqdcgoGpHVtxf+mr4QcWF58/Fwez0dA4hcsimVNBz ++eNpBBUIKNBMTBG+4d6hcQBUAGKUdGRcCGEyTqXLU0MgHjxC9JgVqWJl+X2LcAGj5 ++7J+tGYGTLzKJmeCeGVNN5ZtJ0T85MYHCKQk1/FElK+Kq5akovXffQHjlnCPcx0NJ ++47NBjlPaFp2gjnAChn79bT4iCjOFZ9avWpqRpeU517UCnY7djOr3fuod/MSQyh3L ++Wuem1tWBAgMBAAECggEBAM4ZXQRs6Kjmo95BHGiAEnSqrlgX+dycjcBq3QPh8KZT ++nifqnf48XhnackENy7tWIjr3DctoUq4mOp8AHt77ijhqfaa4XSg7fwKeK9NLBGC5 ++lAXNtAey0o2894/sKrd+LMkgphoYIUnuI4LRaGV56potkj/ZDP/GwTcG/R4SDnTn ++C1Nb05PNTAPQtPZrgPo7TdM6gGsTnFbVrYHQLyg2Sq/osHfF15YohB01esRLCAwb ++EF8JkRC4hWIZoV7BsyQ39232zAJQGGla7+wKFs3kObwh3VnFkQpT94KZnNiZuEfG ++x5pW4Pn3gXgNsftscXsaNe/M9mYZqo//Qw7NvUIvAvECgYEA9AVveyK0HOA06fhh +++3hUWdvw7Pbrl+e06jO9+bT1RjQMbHKyI60DZyVGuAySN86iChJRoJr5c6xj+iXU ++cR6BVJDjGH5t1tyiK2aYf6hEpK9/j8Z54UiVQ486zPP0PGfT2TO4lBLK+8AUmoaH ++gk21ul8QeVCeCJa/o+xEoRFvzcUCgYEA8FCbbvInrUtNY+9eKaUYoNodsgBVjm5X ++I0YPUL9D4d+1nvupHSV2NVmQl0w1RaJwrNTafrl5LkqjhQbmuWNta6QgfZzSA3LB ++lWXo1Mm0azKdcD3qMGbvn0Q3zU+yGNEgmB/Yju3/NtgYRG6tc+FCWRbPbiCnZWT8 ++v3C2Y0XggI0CgYEA2/jCZBgGkTkzue5kNVJlh5OS/aog+pCvL6hxCtarfBuTT3ed ++Sje+p46cz3DVpmUpATc+Si8py7KNdYQAm/BJ2be6X+woi9Xcgo87zWgcaPCjZzId ++0I2jsIE/Gl6XvpRCDrxnGWRPgt3GNP4szbPLrDPiH9oie8+Y9eYYf7G+PZkCgYEA ++nRSzZOPYV4f/QDF4pVQLMykfe/iH9B/fyWjEHg3He19VQmRReIHCMMEoqBziPXAe ++onpHj8oAkeer1wpZyhhZr6CKtFDLXgGm09bXSC/IRMHC81klORovyzU2HHfZfCtG ++WOmIDnU2+0xpIGIP8sztJ3qnf97MTJSkOSadsWo9gwkCgYEAh5AQmJQmck88Dff2 ++qIfJIX8d+BDw47BFJ89OmMFjGV8TNB+JO+AV4Vkodg4hxKpLqTFZTTUFgoYfy5u1 ++1/BhAjpmCDCrzubCFhx+8VEoM2+2+MmnuQoMAm9+/mD/IidwRaARgXgvEmp7sfdt ++RyWd+p2lYvFkC/jORQtDMY4uW1o= ++-----END PRIVATE KEY----- ++Bag Attributes ++ localKeyID: 02 00 00 00 ++ Microsoft CSP Name: Microsoft Strong Cryptographic Provider ++Key Attributes ++ X509v3 Key Usage: 10 ++-----BEGIN PRIVATE KEY----- ++MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDlQhPrZwVQYFV4 ++FBc0H1iTXYaznMpwZvEITKtXWACzTdguUderEVOkXW3HTi5HvC2rMayt0nqo3zcd ++x1eGiqdjpZQ/wMrkz9wNEM/nNMsXntEwxk0jCVNKB/jz6vf+BOtrSI01SritAGZW ++dpKoTUyztT8C2mA3X6D8g3m4Dd07ltnzxaDqAQIU5jBHh3f/Q14tlPNZWUIiqVTC ++gDxgAe7MDmfs9h3CInTBX1XM5J4UsLTL23/padgeSvP5YF5qr1+0c7Tdftxr2lwA ++N3rLkisf5EiLAToVyJJlgP/exo2I8DaIKe7DZzD3Y1CrurOpkcMKYu5kM1Htlbua ++tDkAa2oDAgMBAAECggEAOvdueS9DyiMlCKAeQb1IQosdQOh0l0ma+FgEABC2CWhd ++0LgjQTBRM6cGO+urcq7/jhdWQ1UuUG4tVn71z7itCi/F/Enhxc2C22d2GhFVpWsn ++giSXJYpZ/mIjkdVfWNo6FRuRmmHwMys1p0qTOS+8qUJWhSzW75csqJZGgeUrAI61 ++LBV5F0SGR7dR2xZfy7PeDs9xpD0QivDt5DpsZWPaPvw4QlhdLgw6/YU1h9vtm6ci ++xLjnPRLZ7JMpcQHO8dUDl6FiEI7yQ11BDm253VQAVMddYRPQABn7SpEF8kD/aZVh ++2Clvz61Rz80SKjPUthMPLWMCRp7zB0xDMzt3/1i+tQKBgQD6Ar1/oD3eFnRnpi4u ++n/hdHJtMuXWNfUA4dspNjP6WGOid9sgIeUUdif1XyVJ+afITzvgpWc7nUWIqG2bQ ++WxJ/4q2rjUdvjNXTy1voVungR2jD5WLQ9DKeaTR0yCliWlx4JgdPG7qGI5MMwsr+ ++R/PUoUUhGeEX+o/sCSieO3iUrQKBgQDqwBEMvIdhAv/CK2sG3fsKYX8rFT55ZNX3 ++Tix9DbUGY3wQColNuI8U1nDlxE9U6VOfT9RPqKelBLCgbzB23kdEJnjSlnqlTxrx ++E+Hkndyf2ckdJAR3XNxoQ6SRLJNBsgoBj/z5tlfZE9/Jc+uh0mYy3e6g6XCVPBcz ++MgoIc+ofbwKBgQCGQhZ1hR30N+bHCozeaPW9OvGDIE0qcEqeh9xYDRFilXnF6pK9 ++SjJ9jG7KR8jPLiHb1VebDSl5O1EV/6UU2vNyTc6pw7LLCryBgkGW4aWy1WZDXNnW ++EG1meGS9GghvUss5kmJ2bxOZmV0Mi0brisQ8OWagQf+JGvtS7BAt+Q3l+QKBgAb9 ++8YQPmXiqPjPqVyW9Ntz4SnFeEJ5NApJ7IZgX8GxgSjGwHqbR+HEGchZl4ncE/Bii ++qBA3Vcb0fM5KgYcI19aPzsl28fA6ivLjRLcqfIfGVNcpW3iyq13vpdctHLW4N9QU ++FdTaOYOds+ysJziKq8CYG6NvUIshXw+HTgUybqbBAoGBAIIOqcmmtgOClAwipA17 ++dAHsI9Sjk+J0+d4JU6o+5TsmhUfUKIjXf5+xqJkJcQZMEe5GhxcCuYkgFicvh4Hz ++kv2H/EU35LcJTqC6KTKZOWIbGcn1cqsvwm3GQJffYDiO8fRZSwCaif2J3F2lfH4Y ++R/fA67HXFSTT+OncdRpY1NOn ++-----END PRIVATE KEY----- ++Bag Attributes: ++subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US ++issuer=/CN=Root Agency ++-----BEGIN CERTIFICATE----- ++MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 ++IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV ++BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv ++cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE ++BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIlPjJXzrRih4C ++k/XsoI01oqo7IUxH3dA2F7vHGXQoIpKCp8Qe6Z6cFfdD8Uj+s+B1BX6hngwzIwjN ++jE/23X3SALVzJVWzX4Y/IEjbgsuao6sOyNyB18wIU9YzZkVGj68fmMlUw3LnhPbe ++eWkufZaJCaLyhQOwlRMbOcn48D6Ys8fccOyXNzpq3rH1OzeQpxS2M8zaJYP4/VZ/ ++sf6KRpI7bP+QwyFvNKfhcaO9/gj4kMo9lVGjvDU20FW6g8UVNJCV9N4GO6mOcyqo ++OhuhVfjCNGgW7N1qi0TIVn0/MQM4l4dcT2R7Z/bV9fhMJLjGsy5A4TLAdRrhKUHT ++bzi9HyDvAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= ++-----END CERTIFICATE----- ++Bag Attributes ++ localKeyID: 01 00 00 00 ++subject=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redacted@microsoft.com ++issuer=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redacted@microsoft.com ++-----BEGIN CERTIFICATE----- ++MIID7TCCAtWgAwIBAgIJALQS3yMg3R41MA0GCSqGSIb3DQEBCwUAMIGMMQswCQYD ++VQQGEwJVUzETMBEGA1UECAwKV0FTSElOR1RPTjEQMA4GA1UEBwwHU2VhdHRsZTES ++MBAGA1UECgwJTWljcm9zb2Z0MQ4wDAYDVQQLDAVBenVyZTEOMAwGA1UEAwwFQW5o ++Vm8xIjAgBgkqhkiG9w0BCQEWE2FuaHZvQG1pY3Jvc29mdC5jb20wHhcNMTkwMjE0 ++MjMxMjQwWhcNMjExMTEwMjMxMjQwWjCBjDELMAkGA1UEBhMCVVMxEzARBgNVBAgM ++CldBU0hJTkdUT04xEDAOBgNVBAcMB1NlYXR0bGUxEjAQBgNVBAoMCU1pY3Jvc29m ++dDEOMAwGA1UECwwFQXp1cmUxDjAMBgNVBAMMBUFuaFZvMSIwIAYJKoZIhvcNAQkB ++FhNhbmh2b0BtaWNyb3NvZnQuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB ++CgKCAQEA5RHuX1KsHa0Ez1tqFZRitn99av/FC0/Cpjk8lLy9ET3SUOvb7xznOUNg ++ioNnr4hwti1oRfFpqZ2Y+utUS9BnhRos8L1PDKHm8Oad6KnXIKBqR1bcX/pq+EHF ++hefPxcHs9HQOIXLIplTQc3jaQQVCCjQTEwRvuHeoXEAVABilHRkXAhhMk6ly1NDI ++B48QvSYFaliZfl9i3ABo+eyfrRmBky8yiZngnhlTTeWbSdE/OTGBwikJNfxRJSvi ++quWpKL1330B45Zwj3MdDSeOzQY5T2hadoI5wAoZ+/W0+IgozhWfWr1qakaXlOde1 ++Ap2O3Yzq937qHfzEkMody1rnptbVgQIDAQABo1AwTjAdBgNVHQ4EFgQUPvdgLiv3 ++pAk4r0QTPZU3PFOZJvgwHwYDVR0jBBgwFoAUPvdgLiv3pAk4r0QTPZU3PFOZJvgw ++DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAVUHZT+h9+uCPLTEl5IDg ++kqd9WpzXA7PJd/V+7DeDDTkEd06FIKTWZLfxLVVDjQJnQqubQb//e0zGu1qKbXnX ++R7xqWabGU4eyPeUFWddmt1OHhxKLU3HbJNJJdL6XKiQtpGGUQt/mqNQ/DEr6hhNF ++im5I79iA8H/dXA2gyZrj5Rxea4mtsaYO0mfp1NrFtJpAh2Djy4B1lBXBIv4DWG9e ++mMEwzcLCOZj2cOMA6+mdLMUjYCvIRtnn5MKUHyZX5EmX79wsqMTvVpddlVLB9Kgz ++Qnvft9+SBWh9+F3ip7BsL6Q4Q9v8eHRbnP0ya7ddlgh64uwf9VOfZZdKCnwqudJP ++3g== ++-----END CERTIFICATE----- ++Bag Attributes ++ localKeyID: 02 00 00 00 ++subject=/CN=/subscriptions/redacted/resourcegroups/redacted/providers/Microsoft.Compute/virtualMachines/redacted ++issuer=/CN=Microsoft.ManagedIdentity ++-----BEGIN CERTIFICATE----- ++MIIDnTCCAoWgAwIBAgIUB2lauSRccvFkoJybUfIwOUqBN7MwDQYJKoZIhvcNAQEL ++BQAwJDEiMCAGA1UEAxMZTWljcm9zb2Z0Lk1hbmFnZWRJZGVudGl0eTAeFw0xOTAy ++MTUxOTA5MDBaFw0xOTA4MTQxOTA5MDBaMIGUMYGRMIGOBgNVBAMTgYYvc3Vic2Ny ++aXB0aW9ucy8yN2I3NTBjZC1lZDQzLTQyZmQtOTA0NC04ZDc1ZTEyNGFlNTUvcmVz ++b3VyY2Vncm91cHMvYW5oZXh0cmFzc2gvcHJvdmlkZXJzL01pY3Jvc29mdC5Db21w ++dXRlL3ZpcnR1YWxNYWNoaW5lcy9hbmh0ZXN0Y2VydDCCASIwDQYJKoZIhvcNAQEB ++BQADggEPADCCAQoCggEBAOVCE+tnBVBgVXgUFzQfWJNdhrOcynBm8QhMq1dYALNN ++2C5R16sRU6RdbcdOLke8LasxrK3SeqjfNx3HV4aKp2OllD/AyuTP3A0Qz+c0yxee ++0TDGTSMJU0oH+PPq9/4E62tIjTVKuK0AZlZ2kqhNTLO1PwLaYDdfoPyDebgN3TuW ++2fPFoOoBAhTmMEeHd/9DXi2U81lZQiKpVMKAPGAB7swOZ+z2HcIidMFfVczknhSw ++tMvbf+lp2B5K8/lgXmqvX7RztN1+3GvaXAA3esuSKx/kSIsBOhXIkmWA/97GjYjw ++Nogp7sNnMPdjUKu6s6mRwwpi7mQzUe2Vu5q0OQBragMCAwEAAaNWMFQwDgYDVR0P ++AQH/BAQDAgeAMAwGA1UdEwEB/wQCMAAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwHwYD ++VR0jBBgwFoAUOJvzEsriQWdJBndPrK+Me1bCPjYwDQYJKoZIhvcNAQELBQADggEB ++AFGP/g8o7Hv/to11M0UqfzJuW/AyH9RZtSRcNQFLZUndwweQ6fap8lFsA4REUdqe ++7Quqp5JNNY1XzKLWXMPoheIDH1A8FFXdsAroArzlNs9tO3TlIHE8A7HxEVZEmR4b ++7ZiixmkQPS2RkjEoV/GM6fheBrzuFn7X5kVZyE6cC5sfcebn8xhk3ZcXI0VmpdT0 ++jFBsf5IvFCIXXLLhJI4KXc8VMoKFU1jT9na/jyaoGmfwovKj4ib8s2aiXGAp7Y38 ++UCmY+bJapWom6Piy5Jzi/p/kzMVdJcSa+GqpuFxBoQYEVs2XYVl7cGu/wPM+NToC ++pkSoWwF1QAnHn0eokR9E1rU= ++-----END CERTIFICATE----- ++Bag Attributes: ++subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US ++issuer=/CN=Root Agency ++-----BEGIN CERTIFICATE----- ++MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 ++IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV ++BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv ++cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE ++BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb ++Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi ++nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW ++vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+ ++lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y ++WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7 ++t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= ++-----END CERTIFICATE----- +diff --git a/tests/data/azure/pubkey_extract_cert b/tests/data/azure/pubkey_extract_cert +new file mode 100644 +index 00000000..ce9b852d +--- /dev/null ++++ b/tests/data/azure/pubkey_extract_cert +@@ -0,0 +1,13 @@ ++-----BEGIN CERTIFICATE----- ++MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 ++IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV ++BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv ++cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE ++BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb ++Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi ++nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW ++vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+ ++lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y ++WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7 ++t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= ++-----END CERTIFICATE----- +diff --git a/tests/data/azure/pubkey_extract_ssh_key b/tests/data/azure/pubkey_extract_ssh_key +new file mode 100644 +index 00000000..54d749ed +--- /dev/null ++++ b/tests/data/azure/pubkey_extract_ssh_key +@@ -0,0 +1 @@ ++ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHU9IDclbKVYVbYuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoinlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmWvwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4yWzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7t5btUyvp +diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py +index 26b2b93d..02556165 100644 +--- a/tests/unittests/test_datasource/test_azure_helper.py ++++ b/tests/unittests/test_datasource/test_azure_helper.py +@@ -1,11 +1,13 @@ + # This file is part of cloud-init. See LICENSE file for license information. + + import os ++import unittest2 + from textwrap import dedent + + from cloudinit.sources.helpers import azure as azure_helper + from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir + ++from cloudinit.util import load_file + from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim + + GOAL_STATE_TEMPLATE = """\ +@@ -289,6 +291,50 @@ class TestOpenSSLManager(CiTestCase): + self.assertEqual([mock.call(manager.tmpdir)], del_dir.call_args_list) + + ++class TestOpenSSLManagerActions(CiTestCase): ++ ++ def setUp(self): ++ super(TestOpenSSLManagerActions, self).setUp() ++ ++ self.allowed_subp = True ++ ++ def _data_file(self, name): ++ path = 'tests/data/azure' ++ return os.path.join(path, name) ++ ++ @unittest2.skip("todo move to cloud_test") ++ def test_pubkey_extract(self): ++ cert = load_file(self._data_file('pubkey_extract_cert')) ++ good_key = load_file(self._data_file('pubkey_extract_ssh_key')) ++ sslmgr = azure_helper.OpenSSLManager() ++ key = sslmgr._get_ssh_key_from_cert(cert) ++ self.assertEqual(good_key, key) ++ ++ good_fingerprint = '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' ++ fingerprint = sslmgr._get_fingerprint_from_cert(cert) ++ self.assertEqual(good_fingerprint, fingerprint) ++ ++ @unittest2.skip("todo move to cloud_test") ++ @mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml') ++ def test_parse_certificates(self, mock_decrypt_certs): ++ """Azure control plane puts private keys as well as certificates ++ into the Certificates XML object. Make sure only the public keys ++ from certs are extracted and that fingerprints are converted to ++ the form specified in the ovf-env.xml file. ++ """ ++ cert_contents = load_file(self._data_file('parse_certificates_pem')) ++ fingerprints = load_file(self._data_file( ++ 'parse_certificates_fingerprints') ++ ).splitlines() ++ mock_decrypt_certs.return_value = cert_contents ++ sslmgr = azure_helper.OpenSSLManager() ++ keys_by_fp = sslmgr.parse_certificates('') ++ for fp in keys_by_fp.keys(): ++ self.assertIn(fp, fingerprints) ++ for fp in fingerprints: ++ self.assertIn(fp, keys_by_fp) ++ ++ + class TestWALinuxAgentShim(CiTestCase): + + def setUp(self): +@@ -329,18 +375,31 @@ class TestWALinuxAgentShim(CiTestCase): + + def test_certificates_used_to_determine_public_keys(self): + shim = wa_shim() +- data = shim.register_with_azure_and_fetch_data() ++ """if register_with_azure_and_fetch_data() isn't passed some info about ++ the user's public keys, there's no point in even trying to parse ++ the certificates ++ """ ++ mypk = [{'fingerprint': 'fp1', 'path': 'path1'}, ++ {'fingerprint': 'fp3', 'path': 'path3', 'value': ''}] ++ certs = {'fp1': 'expected-key', ++ 'fp2': 'should-not-be-found', ++ 'fp3': 'expected-no-value-key', ++ } ++ sslmgr = self.OpenSSLManager.return_value ++ sslmgr.parse_certificates.return_value = certs ++ data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) + self.assertEqual( + [mock.call(self.GoalState.return_value.certificates_xml)], +- self.OpenSSLManager.return_value.parse_certificates.call_args_list) +- self.assertEqual( +- self.OpenSSLManager.return_value.parse_certificates.return_value, +- data['public-keys']) ++ sslmgr.parse_certificates.call_args_list) ++ self.assertIn('expected-key', data['public-keys']) ++ self.assertIn('expected-no-value-key', data['public-keys']) ++ self.assertNotIn('should-not-be-found', data['public-keys']) + + def test_absent_certificates_produces_empty_public_keys(self): ++ mypk = [{'fingerprint': 'fp1', 'path': 'path1'}] + self.GoalState.return_value.certificates_xml = None + shim = wa_shim() +- data = shim.register_with_azure_and_fetch_data() ++ data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) + self.assertEqual([], data['public-keys']) + + def test_correct_url_used_for_report_ready(self): +-- +2.20.1 + diff --git a/SOURCES/0012-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch b/SOURCES/0012-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch new file mode 100644 index 0000000..2010bad --- /dev/null +++ b/SOURCES/0012-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch @@ -0,0 +1,66 @@ +From ffabcbbf0d4e990f04ab755dd87bb24e70c4fe78 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Wed, 20 Mar 2019 11:45:59 +0100 +Subject: include 'NOZEROCONF=yes' in /etc/sysconfig/network + +RH-Author: Eduardo Otubo +Message-id: <20190320114559.23708-1-otubo@redhat.com> +Patchwork-id: 84937 +O-Subject: [RHEL-7.7 cloud-init PATCH] include 'NOZEROCONF=yes' in /etc/sysconfig/network +Bugzilla: 1653131 +RH-Acked-by: Cathy Avery +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Vitaly Kuznetsov + +The option NOZEROCONF=yes is not included by default in +/etc/sysconfig/network, which is required by Overcloud instances. The +patch also includes tests for the modifications. + +X-downstream-only: yes +Resolves: rhbz#1653131 + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +Signed-off-by: Danilo C. L. de Paula +--- + cloudinit/net/sysconfig.py | 11 ++++++++++- + tests/unittests/test_net.py | 1 - + 2 files changed, 10 insertions(+), 2 deletions(-) + +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index dc1815d9..52bb8483 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -684,7 +684,16 @@ class Renderer(renderer.Renderer): + # Distros configuring /etc/sysconfig/network as a file e.g. Centos + if sysconfig_path.endswith('network'): + util.ensure_dir(os.path.dirname(sysconfig_path)) +- netcfg = [_make_header(), 'NETWORKING=yes'] ++ netcfg = [] ++ for line in util.load_file(sysconfig_path, quiet=True).split('\n'): ++ if 'cloud-init' in line: ++ break ++ if not line.startswith(('NETWORKING=', ++ 'IPV6_AUTOCONF=', ++ 'NETWORKING_IPV6=')): ++ netcfg.append(line) ++ # Now generate the cloud-init portion of sysconfig/network ++ netcfg.extend([_make_header(), 'NETWORKING=yes']) + if network_state.use_ipv6: + netcfg.append('NETWORKING_IPV6=yes') + netcfg.append('IPV6_AUTOCONF=no') +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index 526a30ed..012c43b5 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -887,7 +887,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + BOOTPROTO=none + DEVICE=bond0 + DHCPV6C=yes +- IPV6_AUTOCONF=no + IPV6INIT=yes + MACADDR=aa:bb:cc:dd:ee:ff + ONBOOT=yes +-- +2.20.1 + diff --git a/SOURCES/ci-Azure-Changes-to-the-Hyper-V-KVP-Reporter.patch b/SOURCES/ci-Azure-Changes-to-the-Hyper-V-KVP-Reporter.patch new file mode 100644 index 0000000..7fe1d64 --- /dev/null +++ b/SOURCES/ci-Azure-Changes-to-the-Hyper-V-KVP-Reporter.patch @@ -0,0 +1,405 @@ +From f919e65e4a462b385a6daa6b7cccc6af1358cbcf Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Wed, 29 May 2019 13:41:47 +0200 +Subject: [PATCH 3/5] Azure: Changes to the Hyper-V KVP Reporter +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eduardo Otubo +Message-id: <20190529134149.842-4-otubo@redhat.com> +Patchwork-id: 88266 +O-Subject: [RHEL-8.0.1/RHEL-8.1.0 cloud-init PATCHv2 3/5] Azure: Changes to the Hyper-V KVP Reporter +Bugzilla: 1691986 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Cathy Avery + +From: Anh Vo +commit 86674f013dfcea3c075ab41373ffb475881066f6 +Author: Anh Vo +Date: Mon Apr 29 20:22:16 2019 +0000 + + Azure: Changes to the Hyper-V KVP Reporter + +  + Truncate KVP Pool file to prevent stale entries from + being processed by the Hyper-V KVP reporter. +  + Drop filtering of KVPs as it is no longer needed. +  + Batch appending of existing KVP entries. + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/reporting/handlers.py | 117 +++++++++++++++---------------- + tests/unittests/test_reporting_hyperv.py | 104 +++++++++++++-------------- + 2 files changed, 106 insertions(+), 115 deletions(-) + mode change 100644 => 100755 cloudinit/reporting/handlers.py + mode change 100644 => 100755 tests/unittests/test_reporting_hyperv.py + +diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py +old mode 100644 +new mode 100755 +index 6d23558..10165ae +--- a/cloudinit/reporting/handlers.py ++++ b/cloudinit/reporting/handlers.py +@@ -5,7 +5,6 @@ import fcntl + import json + import six + import os +-import re + import struct + import threading + import time +@@ -14,6 +13,7 @@ from cloudinit import log as logging + from cloudinit.registry import DictRegistry + from cloudinit import (url_helper, util) + from datetime import datetime ++from six.moves.queue import Empty as QueueEmptyError + + if six.PY2: + from multiprocessing.queues import JoinableQueue as JQueue +@@ -129,24 +129,50 @@ class HyperVKvpReportingHandler(ReportingHandler): + DESC_IDX_KEY = 'msg_i' + JSON_SEPARATORS = (',', ':') + KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1' ++ _already_truncated_pool_file = False + + def __init__(self, + kvp_file_path=KVP_POOL_FILE_GUEST, + event_types=None): + super(HyperVKvpReportingHandler, self).__init__() + self._kvp_file_path = kvp_file_path ++ HyperVKvpReportingHandler._truncate_guest_pool_file( ++ self._kvp_file_path) ++ + self._event_types = event_types + self.q = JQueue() +- self.kvp_file = None + self.incarnation_no = self._get_incarnation_no() + self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX, + self.incarnation_no) +- self._current_offset = 0 + self.publish_thread = threading.Thread( + target=self._publish_event_routine) + self.publish_thread.daemon = True + self.publish_thread.start() + ++ @classmethod ++ def _truncate_guest_pool_file(cls, kvp_file): ++ """ ++ Truncate the pool file if it has not been truncated since boot. ++ This should be done exactly once for the file indicated by ++ KVP_POOL_FILE_GUEST constant above. This method takes a filename ++ so that we can use an arbitrary file during unit testing. ++ Since KVP is a best-effort telemetry channel we only attempt to ++ truncate the file once and only if the file has not been modified ++ since boot. Additional truncation can lead to loss of existing ++ KVPs. ++ """ ++ if cls._already_truncated_pool_file: ++ return ++ boot_time = time.time() - float(util.uptime()) ++ try: ++ if os.path.getmtime(kvp_file) < boot_time: ++ with open(kvp_file, "w"): ++ pass ++ except (OSError, IOError) as e: ++ LOG.warning("failed to truncate kvp pool file, %s", e) ++ finally: ++ cls._already_truncated_pool_file = True ++ + def _get_incarnation_no(self): + """ + use the time passed as the incarnation number. +@@ -162,20 +188,15 @@ class HyperVKvpReportingHandler(ReportingHandler): + + def _iterate_kvps(self, offset): + """iterate the kvp file from the current offset.""" +- try: +- with open(self._kvp_file_path, 'rb+') as f: +- self.kvp_file = f +- fcntl.flock(f, fcntl.LOCK_EX) +- f.seek(offset) ++ with open(self._kvp_file_path, 'rb') as f: ++ fcntl.flock(f, fcntl.LOCK_EX) ++ f.seek(offset) ++ record_data = f.read(self.HV_KVP_RECORD_SIZE) ++ while len(record_data) == self.HV_KVP_RECORD_SIZE: ++ kvp_item = self._decode_kvp_item(record_data) ++ yield kvp_item + record_data = f.read(self.HV_KVP_RECORD_SIZE) +- while len(record_data) == self.HV_KVP_RECORD_SIZE: +- self._current_offset += self.HV_KVP_RECORD_SIZE +- kvp_item = self._decode_kvp_item(record_data) +- yield kvp_item +- record_data = f.read(self.HV_KVP_RECORD_SIZE) +- fcntl.flock(f, fcntl.LOCK_UN) +- finally: +- self.kvp_file = None ++ fcntl.flock(f, fcntl.LOCK_UN) + + def _event_key(self, event): + """ +@@ -207,23 +228,13 @@ class HyperVKvpReportingHandler(ReportingHandler): + + return {'key': k, 'value': v} + +- def _update_kvp_item(self, record_data): +- if self.kvp_file is None: +- raise ReportException( +- "kvp file '{0}' not opened." +- .format(self._kvp_file_path)) +- self.kvp_file.seek(-self.HV_KVP_RECORD_SIZE, 1) +- self.kvp_file.write(record_data) +- + def _append_kvp_item(self, record_data): +- with open(self._kvp_file_path, 'rb+') as f: ++ with open(self._kvp_file_path, 'ab') as f: + fcntl.flock(f, fcntl.LOCK_EX) +- # seek to end of the file +- f.seek(0, 2) +- f.write(record_data) ++ for data in record_data: ++ f.write(data) + f.flush() + fcntl.flock(f, fcntl.LOCK_UN) +- self._current_offset = f.tell() + + def _break_down(self, key, meta_data, description): + del meta_data[self.MSG_KEY] +@@ -279,40 +290,26 @@ class HyperVKvpReportingHandler(ReportingHandler): + + def _publish_event_routine(self): + while True: ++ items_from_queue = 0 + try: + event = self.q.get(block=True) +- need_append = True ++ items_from_queue += 1 ++ encoded_data = [] ++ while event is not None: ++ encoded_data += self._encode_event(event) ++ try: ++ # get all the rest of the events in the queue ++ event = self.q.get(block=False) ++ items_from_queue += 1 ++ except QueueEmptyError: ++ event = None + try: +- if not os.path.exists(self._kvp_file_path): +- LOG.warning( +- "skip writing events %s to %s. file not present.", +- event.as_string(), +- self._kvp_file_path) +- encoded_event = self._encode_event(event) +- # for each encoded_event +- for encoded_data in (encoded_event): +- for kvp in self._iterate_kvps(self._current_offset): +- match = ( +- re.match( +- r"^{0}\|(\d+)\|.+" +- .format(self.EVENT_PREFIX), +- kvp['key'] +- )) +- if match: +- match_groups = match.groups(0) +- if int(match_groups[0]) < self.incarnation_no: +- need_append = False +- self._update_kvp_item(encoded_data) +- continue +- if need_append: +- self._append_kvp_item(encoded_data) +- except IOError as e: +- LOG.warning( +- "failed posting event to kvp: %s e:%s", +- event.as_string(), e) ++ self._append_kvp_item(encoded_data) ++ except (OSError, IOError) as e: ++ LOG.warning("failed posting events to kvp, %s", e) + finally: +- self.q.task_done() +- ++ for _ in range(items_from_queue): ++ self.q.task_done() + # when main process exits, q.get() will through EOFError + # indicating we should exit this thread. + except EOFError: +@@ -322,7 +319,7 @@ class HyperVKvpReportingHandler(ReportingHandler): + # if the kvp pool already contains a chunk of data, + # so defer it to another thread. + def publish_event(self, event): +- if (not self._event_types or event.event_type in self._event_types): ++ if not self._event_types or event.event_type in self._event_types: + self.q.put(event) + + def flush(self): +diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py +old mode 100644 +new mode 100755 +index 2e64c6c..d01ed5b +--- a/tests/unittests/test_reporting_hyperv.py ++++ b/tests/unittests/test_reporting_hyperv.py +@@ -1,10 +1,12 @@ + # This file is part of cloud-init. See LICENSE file for license information. + + from cloudinit.reporting import events +-from cloudinit.reporting import handlers ++from cloudinit.reporting.handlers import HyperVKvpReportingHandler + + import json + import os ++import struct ++import time + + from cloudinit import util + from cloudinit.tests.helpers import CiTestCase +@@ -13,7 +15,7 @@ from cloudinit.tests.helpers import CiTestCase + class TestKvpEncoding(CiTestCase): + def test_encode_decode(self): + kvp = {'key': 'key1', 'value': 'value1'} +- kvp_reporting = handlers.HyperVKvpReportingHandler() ++ kvp_reporting = HyperVKvpReportingHandler() + data = kvp_reporting._encode_kvp_item(kvp['key'], kvp['value']) + self.assertEqual(len(data), kvp_reporting.HV_KVP_RECORD_SIZE) + decoded_kvp = kvp_reporting._decode_kvp_item(data) +@@ -26,57 +28,9 @@ class TextKvpReporter(CiTestCase): + self.tmp_file_path = self.tmp_path('kvp_pool_file') + util.ensure_file(self.tmp_file_path) + +- def test_event_type_can_be_filtered(self): +- reporter = handlers.HyperVKvpReportingHandler( +- kvp_file_path=self.tmp_file_path, +- event_types=['foo', 'bar']) +- +- reporter.publish_event( +- events.ReportingEvent('foo', 'name', 'description')) +- reporter.publish_event( +- events.ReportingEvent('some_other', 'name', 'description3')) +- reporter.q.join() +- +- kvps = list(reporter._iterate_kvps(0)) +- self.assertEqual(1, len(kvps)) +- +- reporter.publish_event( +- events.ReportingEvent('bar', 'name', 'description2')) +- reporter.q.join() +- kvps = list(reporter._iterate_kvps(0)) +- self.assertEqual(2, len(kvps)) +- +- self.assertIn('foo', kvps[0]['key']) +- self.assertIn('bar', kvps[1]['key']) +- self.assertNotIn('some_other', kvps[0]['key']) +- self.assertNotIn('some_other', kvps[1]['key']) +- +- def test_events_are_over_written(self): +- reporter = handlers.HyperVKvpReportingHandler( +- kvp_file_path=self.tmp_file_path) +- +- self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) +- +- reporter.publish_event( +- events.ReportingEvent('foo', 'name1', 'description')) +- reporter.publish_event( +- events.ReportingEvent('foo', 'name2', 'description')) +- reporter.q.join() +- self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) +- +- reporter2 = handlers.HyperVKvpReportingHandler( +- kvp_file_path=self.tmp_file_path) +- reporter2.incarnation_no = reporter.incarnation_no + 1 +- reporter2.publish_event( +- events.ReportingEvent('foo', 'name3', 'description')) +- reporter2.q.join() +- +- self.assertEqual(2, len(list(reporter2._iterate_kvps(0)))) +- + def test_events_with_higher_incarnation_not_over_written(self): +- reporter = handlers.HyperVKvpReportingHandler( ++ reporter = HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path) +- + self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) + + reporter.publish_event( +@@ -86,7 +40,7 @@ class TextKvpReporter(CiTestCase): + reporter.q.join() + self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) + +- reporter3 = handlers.HyperVKvpReportingHandler( ++ reporter3 = HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path) + reporter3.incarnation_no = reporter.incarnation_no - 1 + reporter3.publish_event( +@@ -95,7 +49,7 @@ class TextKvpReporter(CiTestCase): + self.assertEqual(3, len(list(reporter3._iterate_kvps(0)))) + + def test_finish_event_result_is_logged(self): +- reporter = handlers.HyperVKvpReportingHandler( ++ reporter = HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path) + reporter.publish_event( + events.FinishReportingEvent('name2', 'description1', +@@ -105,7 +59,7 @@ class TextKvpReporter(CiTestCase): + + def test_file_operation_issue(self): + os.remove(self.tmp_file_path) +- reporter = handlers.HyperVKvpReportingHandler( ++ reporter = HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path) + reporter.publish_event( + events.FinishReportingEvent('name2', 'description1', +@@ -113,7 +67,7 @@ class TextKvpReporter(CiTestCase): + reporter.q.join() + + def test_event_very_long(self): +- reporter = handlers.HyperVKvpReportingHandler( ++ reporter = HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path) + description = 'ab' * reporter.HV_KVP_EXCHANGE_MAX_VALUE_SIZE + long_event = events.FinishReportingEvent( +@@ -132,3 +86,43 @@ class TextKvpReporter(CiTestCase): + self.assertEqual(msg_slice['msg_i'], i) + full_description += msg_slice['msg'] + self.assertEqual(description, full_description) ++ ++ def test_not_truncate_kvp_file_modified_after_boot(self): ++ with open(self.tmp_file_path, "wb+") as f: ++ kvp = {'key': 'key1', 'value': 'value1'} ++ data = (struct.pack("%ds%ds" % ( ++ HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE, ++ HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE), ++ kvp['key'].encode('utf-8'), kvp['value'].encode('utf-8'))) ++ f.write(data) ++ cur_time = time.time() ++ os.utime(self.tmp_file_path, (cur_time, cur_time)) ++ ++ # reset this because the unit test framework ++ # has already polluted the class variable ++ HyperVKvpReportingHandler._already_truncated_pool_file = False ++ ++ reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) ++ kvps = list(reporter._iterate_kvps(0)) ++ self.assertEqual(1, len(kvps)) ++ ++ def test_truncate_stale_kvp_file(self): ++ with open(self.tmp_file_path, "wb+") as f: ++ kvp = {'key': 'key1', 'value': 'value1'} ++ data = (struct.pack("%ds%ds" % ( ++ HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE, ++ HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE), ++ kvp['key'].encode('utf-8'), kvp['value'].encode('utf-8'))) ++ f.write(data) ++ ++ # set the time ways back to make it look like ++ # we had an old kvp file ++ os.utime(self.tmp_file_path, (1000000, 1000000)) ++ ++ # reset this because the unit test framework ++ # has already polluted the class variable ++ HyperVKvpReportingHandler._already_truncated_pool_file = False ++ ++ reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) ++ kvps = list(reporter._iterate_kvps(0)) ++ self.assertEqual(0, len(kvps)) +-- +1.8.3.1 + diff --git a/SOURCES/ci-Azure-Ensure-platform-random_seed-is-always-serializ.patch b/SOURCES/ci-Azure-Ensure-platform-random_seed-is-always-serializ.patch new file mode 100644 index 0000000..b77e755 --- /dev/null +++ b/SOURCES/ci-Azure-Ensure-platform-random_seed-is-always-serializ.patch @@ -0,0 +1,156 @@ +From ae9b545cef4a68dfb9f9356dd27e43ff71ec26aa Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Wed, 29 May 2019 13:41:45 +0200 +Subject: [PATCH 1/5] Azure: Ensure platform random_seed is always serializable + as JSON. + +RH-Author: Eduardo Otubo +Message-id: <20190529134149.842-2-otubo@redhat.com> +Patchwork-id: 88272 +O-Subject: [RHEL-8.0.1/RHEL-8.1.0 cloud-init PATCHv2 1/5] Azure: Ensure platform random_seed is always serializable as JSON. +Bugzilla: 1691986 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Cathy Avery + +From: "Jason Zions (MSFT)" +commit 0dc3a77f41f4544e4cb5a41637af7693410d4cdf +Author: Jason Zions (MSFT) +Date: Tue Mar 26 18:53:50 2019 +0000 + + Azure: Ensure platform random_seed is always serializable as JSON. + + The Azure platform surfaces random bytes into /sys via Hyper-V. + Python 2.7 json.dump() raises an exception if asked to convert + a str with non-character content, and python 3.0 json.dump() + won't serialize a "bytes" value. As a result, c-i instance + data is often not written by Azure, making reboots slower (c-i + has to repeat work). + + The random data is base64-encoded and then decoded into a string + (str or unicode depending on the version of Python in use). The + base64 string has just as many bits of entropy, so we're not + throwing away useful "information", but we can be certain + json.dump() will correctly serialize the bits. + +Signed-off-by: Miroslav Rezanina + +Conflicts: + tests/unittests/test_datasource/test_azure.py + Skipped the commit edf052c as it removes support for python-2.6 + +Signed-off-by: Eduardo Otubo +--- + cloudinit/sources/DataSourceAzure.py | 24 +++++++++++++++++++----- + tests/data/azure/non_unicode_random_string | 1 + + tests/unittests/test_datasource/test_azure.py | 24 ++++++++++++++++++++++-- + 3 files changed, 42 insertions(+), 7 deletions(-) + create mode 100644 tests/data/azure/non_unicode_random_string + +diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py +index 2062ca5..a768b2c 100644 +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -54,6 +54,7 @@ REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" + REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" + AGENT_SEED_DIR = '/var/lib/waagent' + IMDS_URL = "http://169.254.169.254/metadata/" ++PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" + + # List of static scripts and network config artifacts created by + # stock ubuntu suported images. +@@ -195,6 +196,8 @@ if util.is_FreeBSD(): + RESOURCE_DISK_PATH = "/dev/" + res_disk + else: + LOG.debug("resource disk is None") ++ # TODO Find where platform entropy data is surfaced ++ PLATFORM_ENTROPY_SOURCE = None + + BUILTIN_DS_CONFIG = { + 'agent_command': AGENT_START_BUILTIN, +@@ -1100,16 +1103,27 @@ def _check_freebsd_cdrom(cdrom_dev): + return False + + +-def _get_random_seed(): ++def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): + """Return content random seed file if available, otherwise, + return None.""" + # azure / hyper-v provides random data here +- # TODO. find the seed on FreeBSD platform + # now update ds_cfg to reflect contents pass in config +- if util.is_FreeBSD(): ++ if source is None: + return None +- return util.load_file("/sys/firmware/acpi/tables/OEM0", +- quiet=True, decode=False) ++ seed = util.load_file(source, quiet=True, decode=False) ++ ++ # The seed generally contains non-Unicode characters. load_file puts ++ # them into a str (in python 2) or bytes (in python 3). In python 2, ++ # bad octets in a str cause util.json_dumps() to throw an exception. In ++ # python 3, bytes is a non-serializable type, and the handler load_file ++ # uses applies b64 encoding *again* to handle it. The simplest solution ++ # is to just b64encode the data and then decode it to a serializable ++ # string. Same number of bits of entropy, just with 25% more zeroes. ++ # There's no need to undo this base64-encoding when the random seed is ++ # actually used in cc_seed_random.py. ++ seed = base64.b64encode(seed).decode() ++ ++ return seed + + + def list_possible_azure_ds_devs(): +diff --git a/tests/data/azure/non_unicode_random_string b/tests/data/azure/non_unicode_random_string +new file mode 100644 +index 0000000..b9ecefb +--- /dev/null ++++ b/tests/data/azure/non_unicode_random_string +@@ -0,0 +1 @@ ++OEM0d\x00\x00\x00\x01\x80VRTUALMICROSFT\x02\x17\x00\x06MSFT\x97\x00\x00\x00C\xb4{V\xf4X%\x061x\x90\x1c\xfen\x86\xbf~\xf5\x8c\x94&\x88\xed\x84\xf9B\xbd\xd3\xf1\xdb\xee:\xd9\x0fc\x0e\x83(\xbd\xe3'\xfc\x85,\xdf\xf4\x13\x99N\xc5\xf3Y\x1e\xe3\x0b\xa4H\x08J\xb9\xdcdb$ +\ No newline at end of file +diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py +index 417d86a..eacf225 100644 +--- a/tests/unittests/test_datasource/test_azure.py ++++ b/tests/unittests/test_datasource/test_azure.py +@@ -7,11 +7,11 @@ from cloudinit.sources import ( + UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) + from cloudinit.util import (b64e, decode_binary, load_file, write_file, + find_freebsd_part, get_path_dev_freebsd, +- MountFailedError) ++ MountFailedError, json_dumps, load_json) + from cloudinit.version import version_string as vs + from cloudinit.tests.helpers import ( + HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, +- ExitStack, PY26, SkipTest) ++ ExitStack, PY26, SkipTest, resourceLocation) + + import crypt + import httpretty +@@ -1924,4 +1924,24 @@ class TestWBIsPlatformViable(CiTestCase): + self.logs.getvalue()) + + ++class TestRandomSeed(CiTestCase): ++ """Test proper handling of random_seed""" ++ ++ def test_non_ascii_seed_is_serializable(self): ++ """Pass if a random string from the Azure infrastructure which ++ contains at least one non-Unicode character can be converted to/from ++ JSON without alteration and without throwing an exception. ++ """ ++ path = resourceLocation("azure/non_unicode_random_string") ++ result = dsaz._get_random_seed(path) ++ ++ obj = {'seed': result} ++ try: ++ serialized = json_dumps(obj) ++ deserialized = load_json(serialized) ++ except UnicodeDecodeError: ++ self.fail("Non-serializable random seed returned") ++ ++ self.assertEqual(deserialized['seed'], result) ++ + # vi: ts=4 expandtab +-- +1.8.3.1 + diff --git a/SOURCES/ci-DataSourceAzure-Adjust-timeout-for-polling-IMDS.patch b/SOURCES/ci-DataSourceAzure-Adjust-timeout-for-polling-IMDS.patch new file mode 100644 index 0000000..afa3324 --- /dev/null +++ b/SOURCES/ci-DataSourceAzure-Adjust-timeout-for-polling-IMDS.patch @@ -0,0 +1,111 @@ +From b2500e258b930479cef36f514fcf9581ba68c976 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Wed, 29 May 2019 13:41:48 +0200 +Subject: [PATCH 4/5] DataSourceAzure: Adjust timeout for polling IMDS + +RH-Author: Eduardo Otubo +Message-id: <20190529134149.842-5-otubo@redhat.com> +Patchwork-id: 88267 +O-Subject: [RHEL-8.0.1/RHEL-8.1.0 cloud-init PATCHv2 4/5] DataSourceAzure: Adjust timeout for polling IMDS +Bugzilla: 1691986 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Cathy Avery + +From: Anh Vo +commit ab6621d849b24bb652243e88c79f6f3b446048d7 +Author: Anh Vo +Date: Wed May 8 14:54:03 2019 +0000 + + DataSourceAzure: Adjust timeout for polling IMDS + + If the IMDS primary server is not available, falling back to the + secondary server takes about 1s. The net result is that the + expected E2E time is slightly more than 1s. This change increases + the timeout to 2s to prevent the infinite loop of timeouts. + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/sources/DataSourceAzure.py | 15 ++++++++++----- + tests/unittests/test_datasource/test_azure.py | 10 +++++++--- + 2 files changed, 17 insertions(+), 8 deletions(-) + +diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py +index c827816..5baf8da 100755 +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -57,7 +57,12 @@ AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' + REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" + REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" + AGENT_SEED_DIR = '/var/lib/waagent' ++ ++# In the event where the IMDS primary server is not ++# available, it takes 1s to fallback to the secondary one ++IMDS_TIMEOUT_IN_SECONDS = 2 + IMDS_URL = "http://169.254.169.254/metadata/" ++ + PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" + + # List of static scripts and network config artifacts created by +@@ -582,9 +587,9 @@ class DataSourceAzure(sources.DataSource): + return + self._ephemeral_dhcp_ctx.clean_network() + else: +- return readurl(url, timeout=1, headers=headers, +- exception_cb=exc_cb, infinite=True, +- log_req_resp=False).contents ++ return readurl(url, timeout=IMDS_TIMEOUT_IN_SECONDS, ++ headers=headers, exception_cb=exc_cb, ++ infinite=True, log_req_resp=False).contents + except UrlError: + # Teardown our EphemeralDHCPv4 context on failure as we retry + self._ephemeral_dhcp_ctx.clean_network() +@@ -1291,8 +1296,8 @@ def _get_metadata_from_imds(retries): + headers = {"Metadata": "true"} + try: + response = readurl( +- url, timeout=1, headers=headers, retries=retries, +- exception_cb=retry_on_url_exc) ++ url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers, ++ retries=retries, exception_cb=retry_on_url_exc) + except Exception as e: + LOG.debug('Ignoring IMDS instance metadata: %s', e) + return {} +diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py +index eacf225..bc8b42c 100644 +--- a/tests/unittests/test_datasource/test_azure.py ++++ b/tests/unittests/test_datasource/test_azure.py +@@ -163,7 +163,8 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): + + m_readurl.assert_called_with( + self.network_md_url, exception_cb=mock.ANY, +- headers={'Metadata': 'true'}, retries=2, timeout=1) ++ headers={'Metadata': 'true'}, retries=2, ++ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS) + + @mock.patch('cloudinit.url_helper.time.sleep') + @mock.patch(MOCKPATH + 'net.is_up') +@@ -1789,7 +1790,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs() +- }, method='GET', timeout=1, ++ }, method='GET', ++ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, + url=full_url)]) + self.assertEqual(m_dhcp.call_count, 2) + m_net.assert_any_call( +@@ -1826,7 +1828,9 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs()}, +- method='GET', timeout=1, url=full_url)]) ++ method='GET', ++ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, ++ url=full_url)]) + self.assertEqual(m_dhcp.call_count, 2) + m_net.assert_any_call( + broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', +-- +1.8.3.1 + diff --git a/SOURCES/ci-DatasourceAzure-add-additional-logging-for-azure-dat.patch b/SOURCES/ci-DatasourceAzure-add-additional-logging-for-azure-dat.patch new file mode 100644 index 0000000..ea58c58 --- /dev/null +++ b/SOURCES/ci-DatasourceAzure-add-additional-logging-for-azure-dat.patch @@ -0,0 +1,642 @@ +From 4e4e73df5ce7a819f48bed0ae6d2f0b1fb7e243b Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Wed, 29 May 2019 13:41:46 +0200 +Subject: [PATCH 2/5] DatasourceAzure: add additional logging for azure + datasource + +RH-Author: Eduardo Otubo +Message-id: <20190529134149.842-3-otubo@redhat.com> +Patchwork-id: 88268 +O-Subject: [RHEL-8.0.1/RHEL-8.1.0 cloud-init PATCHv2 2/5] DatasourceAzure: add additional logging for azure datasource +Bugzilla: 1691986 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Cathy Avery + +From: Anh Vo +commit 0d8c88393b51db6454491a379dcc2e691551217a +Author: Anh Vo +Date: Wed Apr 3 18:23:18 2019 +0000 + + DatasourceAzure: add additional logging for azure datasource + + Create an Azure logging decorator and use additional ReportEventStack + context managers to provide additional logging details. + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/sources/DataSourceAzure.py | 231 ++++++++++++++++++++++------------- + cloudinit/sources/helpers/azure.py | 31 +++++ + 2 files changed, 179 insertions(+), 83 deletions(-) + mode change 100644 => 100755 cloudinit/sources/DataSourceAzure.py + mode change 100644 => 100755 cloudinit/sources/helpers/azure.py + +diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py +old mode 100644 +new mode 100755 +index a768b2c..c827816 +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -21,10 +21,14 @@ from cloudinit import net + from cloudinit.event import EventType + from cloudinit.net.dhcp import EphemeralDHCPv4 + from cloudinit import sources +-from cloudinit.sources.helpers.azure import get_metadata_from_fabric + from cloudinit.sources.helpers import netlink + from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc + from cloudinit import util ++from cloudinit.reporting import events ++ ++from cloudinit.sources.helpers.azure import (azure_ds_reporter, ++ azure_ds_telemetry_reporter, ++ get_metadata_from_fabric) + + LOG = logging.getLogger(__name__) + +@@ -244,6 +248,7 @@ def set_hostname(hostname, hostname_command='hostname'): + util.subp(['hostnamectl', 'set-hostname', str(hostname)]) + + ++@azure_ds_telemetry_reporter + @contextlib.contextmanager + def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): + """ +@@ -290,6 +295,7 @@ class DataSourceAzure(sources.DataSource): + root = sources.DataSource.__str__(self) + return "%s [seed=%s]" % (root, self.seed) + ++ @azure_ds_telemetry_reporter + def bounce_network_with_azure_hostname(self): + # When using cloud-init to provision, we have to set the hostname from + # the metadata and "bounce" the network to force DDNS to update via +@@ -315,6 +321,7 @@ class DataSourceAzure(sources.DataSource): + util.logexc(LOG, "handling set_hostname failed") + return False + ++ @azure_ds_telemetry_reporter + def get_metadata_from_agent(self): + temp_hostname = self.metadata.get('local-hostname') + agent_cmd = self.ds_cfg['agent_command'] +@@ -344,15 +351,18 @@ class DataSourceAzure(sources.DataSource): + LOG.debug("ssh authentication: " + "using fingerprint from fabirc") + +- # wait very long for public SSH keys to arrive +- # https://bugs.launchpad.net/cloud-init/+bug/1717611 +- missing = util.log_time(logfunc=LOG.debug, +- msg="waiting for SSH public key files", +- func=util.wait_for_files, +- args=(fp_files, 900)) +- +- if len(missing): +- LOG.warning("Did not find files, but going on: %s", missing) ++ with events.ReportEventStack( ++ name="waiting-for-ssh-public-key", ++ description="wait for agents to retrieve ssh keys", ++ parent=azure_ds_reporter): ++ # wait very long for public SSH keys to arrive ++ # https://bugs.launchpad.net/cloud-init/+bug/1717611 ++ missing = util.log_time(logfunc=LOG.debug, ++ msg="waiting for SSH public key files", ++ func=util.wait_for_files, ++ args=(fp_files, 900)) ++ if len(missing): ++ LOG.warning("Did not find files, but going on: %s", missing) + + metadata = {} + metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) +@@ -366,6 +376,7 @@ class DataSourceAzure(sources.DataSource): + subplatform_type = 'seed-dir' + return '%s (%s)' % (subplatform_type, self.seed) + ++ @azure_ds_telemetry_reporter + def crawl_metadata(self): + """Walk all instance metadata sources returning a dict on success. + +@@ -467,6 +478,7 @@ class DataSourceAzure(sources.DataSource): + super(DataSourceAzure, self).clear_cached_attrs(attr_defaults) + self._metadata_imds = sources.UNSET + ++ @azure_ds_telemetry_reporter + def _get_data(self): + """Crawl and process datasource metadata caching metadata as attrs. + +@@ -513,6 +525,7 @@ class DataSourceAzure(sources.DataSource): + # quickly (local check only) if self.instance_id is still valid + return sources.instance_id_matches_system_uuid(self.get_instance_id()) + ++ @azure_ds_telemetry_reporter + def setup(self, is_new_instance): + if self._negotiated is False: + LOG.debug("negotiating for %s (new_instance=%s)", +@@ -580,6 +593,7 @@ class DataSourceAzure(sources.DataSource): + if nl_sock: + nl_sock.close() + ++ @azure_ds_telemetry_reporter + def _report_ready(self, lease): + """Tells the fabric provisioning has completed """ + try: +@@ -617,9 +631,14 @@ class DataSourceAzure(sources.DataSource): + def _reprovision(self): + """Initiate the reprovisioning workflow.""" + contents = self._poll_imds() +- md, ud, cfg = read_azure_ovf(contents) +- return (md, ud, cfg, {'ovf-env.xml': contents}) +- ++ with events.ReportEventStack( ++ name="reprovisioning-read-azure-ovf", ++ description="read azure ovf during reprovisioning", ++ parent=azure_ds_reporter): ++ md, ud, cfg = read_azure_ovf(contents) ++ return (md, ud, cfg, {'ovf-env.xml': contents}) ++ ++ @azure_ds_telemetry_reporter + def _negotiate(self): + """Negotiate with fabric and return data from it. + +@@ -652,6 +671,7 @@ class DataSourceAzure(sources.DataSource): + util.del_file(REPROVISION_MARKER_FILE) + return fabric_data + ++ @azure_ds_telemetry_reporter + def activate(self, cfg, is_new_instance): + address_ephemeral_resize(is_new_instance=is_new_instance, + preserve_ntfs=self.ds_cfg.get( +@@ -690,12 +710,14 @@ def _partitions_on_device(devpath, maxnum=16): + return [] + + ++@azure_ds_telemetry_reporter + def _has_ntfs_filesystem(devpath): + ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) + LOG.debug('ntfs_devices found = %s', ntfs_devices) + return os.path.realpath(devpath) in ntfs_devices + + ++@azure_ds_telemetry_reporter + def can_dev_be_reformatted(devpath, preserve_ntfs): + """Determine if the ephemeral drive at devpath should be reformatted. + +@@ -744,43 +766,59 @@ def can_dev_be_reformatted(devpath, preserve_ntfs): + (cand_part, cand_path, devpath)) + return False, msg + ++ @azure_ds_telemetry_reporter + def count_files(mp): + ignored = set(['dataloss_warning_readme.txt']) + return len([f for f in os.listdir(mp) if f.lower() not in ignored]) + + bmsg = ('partition %s (%s) on device %s was ntfs formatted' % + (cand_part, cand_path, devpath)) +- try: +- file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", +- update_env_for_mount={'LANG': 'C'}) +- except util.MountFailedError as e: +- if "unknown filesystem type 'ntfs'" in str(e): +- return True, (bmsg + ' but this system cannot mount NTFS,' +- ' assuming there are no important files.' +- ' Formatting allowed.') +- return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) +- +- if file_count != 0: +- LOG.warning("it looks like you're using NTFS on the ephemeral disk, " +- 'to ensure that filesystem does not get wiped, set ' +- '%s.%s in config', '.'.join(DS_CFG_PATH), +- DS_CFG_KEY_PRESERVE_NTFS) +- return False, bmsg + ' but had %d files on it.' % file_count ++ ++ with events.ReportEventStack( ++ name="mount-ntfs-and-count", ++ description="mount-ntfs-and-count", ++ parent=azure_ds_reporter) as evt: ++ try: ++ file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", ++ update_env_for_mount={'LANG': 'C'}) ++ except util.MountFailedError as e: ++ evt.description = "cannot mount ntfs" ++ if "unknown filesystem type 'ntfs'" in str(e): ++ return True, (bmsg + ' but this system cannot mount NTFS,' ++ ' assuming there are no important files.' ++ ' Formatting allowed.') ++ return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) ++ ++ if file_count != 0: ++ evt.description = "mounted and counted %d files" % file_count ++ LOG.warning("it looks like you're using NTFS on the ephemeral" ++ " disk, to ensure that filesystem does not get wiped," ++ " set %s.%s in config", '.'.join(DS_CFG_PATH), ++ DS_CFG_KEY_PRESERVE_NTFS) ++ return False, bmsg + ' but had %d files on it.' % file_count + + return True, bmsg + ' and had no important files. Safe for reformatting.' + + ++@azure_ds_telemetry_reporter + def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, + is_new_instance=False, preserve_ntfs=False): + # wait for ephemeral disk to come up + naplen = .2 +- missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen, +- log_pre="Azure ephemeral disk: ") +- +- if missing: +- LOG.warning("ephemeral device '%s' did not appear after %d seconds.", +- devpath, maxwait) +- return ++ with events.ReportEventStack( ++ name="wait-for-ephemeral-disk", ++ description="wait for ephemeral disk", ++ parent=azure_ds_reporter): ++ missing = util.wait_for_files([devpath], ++ maxwait=maxwait, ++ naplen=naplen, ++ log_pre="Azure ephemeral disk: ") ++ ++ if missing: ++ LOG.warning("ephemeral device '%s' did" ++ " not appear after %d seconds.", ++ devpath, maxwait) ++ return + + result = False + msg = None +@@ -808,6 +846,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, + return + + ++@azure_ds_telemetry_reporter + def perform_hostname_bounce(hostname, cfg, prev_hostname): + # set the hostname to 'hostname' if it is not already set to that. + # then, if policy is not off, bounce the interface using command +@@ -843,6 +882,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): + return True + + ++@azure_ds_telemetry_reporter + def crtfile_to_pubkey(fname, data=None): + pipeline = ('openssl x509 -noout -pubkey < "$0" |' + 'ssh-keygen -i -m PKCS8 -f /dev/stdin') +@@ -851,6 +891,7 @@ def crtfile_to_pubkey(fname, data=None): + return out.rstrip() + + ++@azure_ds_telemetry_reporter + def pubkeys_from_crt_files(flist): + pubkeys = [] + errors = [] +@@ -866,6 +907,7 @@ def pubkeys_from_crt_files(flist): + return pubkeys + + ++@azure_ds_telemetry_reporter + def write_files(datadir, files, dirmode=None): + + def _redact_password(cnt, fname): +@@ -893,6 +935,7 @@ def write_files(datadir, files, dirmode=None): + util.write_file(filename=fname, content=content, mode=0o600) + + ++@azure_ds_telemetry_reporter + def invoke_agent(cmd): + # this is a function itself to simplify patching it for test + if cmd: +@@ -912,6 +955,7 @@ def find_child(node, filter_func): + return ret + + ++@azure_ds_telemetry_reporter + def load_azure_ovf_pubkeys(sshnode): + # This parses a 'SSH' node formatted like below, and returns + # an array of dicts. +@@ -964,6 +1008,7 @@ def load_azure_ovf_pubkeys(sshnode): + return found + + ++@azure_ds_telemetry_reporter + def read_azure_ovf(contents): + try: + dom = minidom.parseString(contents) +@@ -1064,6 +1109,7 @@ def read_azure_ovf(contents): + return (md, ud, cfg) + + ++@azure_ds_telemetry_reporter + def _extract_preprovisioned_vm_setting(dom): + """Read the preprovision flag from the ovf. It should not + exist unless true.""" +@@ -1092,6 +1138,7 @@ def encrypt_pass(password, salt_id="$6$"): + return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) + + ++@azure_ds_telemetry_reporter + def _check_freebsd_cdrom(cdrom_dev): + """Return boolean indicating path to cdrom device has content.""" + try: +@@ -1103,6 +1150,7 @@ def _check_freebsd_cdrom(cdrom_dev): + return False + + ++@azure_ds_telemetry_reporter + def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): + """Return content random seed file if available, otherwise, + return None.""" +@@ -1126,6 +1174,7 @@ def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): + return seed + + ++@azure_ds_telemetry_reporter + def list_possible_azure_ds_devs(): + devlist = [] + if util.is_FreeBSD(): +@@ -1140,6 +1189,7 @@ def list_possible_azure_ds_devs(): + return devlist + + ++@azure_ds_telemetry_reporter + def load_azure_ds_dir(source_dir): + ovf_file = os.path.join(source_dir, "ovf-env.xml") + +@@ -1162,47 +1212,54 @@ def parse_network_config(imds_metadata): + @param: imds_metadata: Dict of content read from IMDS network service. + @return: Dictionary containing network version 2 standard configuration. + """ +- if imds_metadata != sources.UNSET and imds_metadata: +- netconfig = {'version': 2, 'ethernets': {}} +- LOG.debug('Azure: generating network configuration from IMDS') +- network_metadata = imds_metadata['network'] +- for idx, intf in enumerate(network_metadata['interface']): +- nicname = 'eth{idx}'.format(idx=idx) +- dev_config = {} +- for addr4 in intf['ipv4']['ipAddress']: +- privateIpv4 = addr4['privateIpAddress'] +- if privateIpv4: +- if dev_config.get('dhcp4', False): +- # Append static address config for nic > 1 +- netPrefix = intf['ipv4']['subnet'][0].get( +- 'prefix', '24') +- if not dev_config.get('addresses'): +- dev_config['addresses'] = [] +- dev_config['addresses'].append( +- '{ip}/{prefix}'.format( +- ip=privateIpv4, prefix=netPrefix)) +- else: +- dev_config['dhcp4'] = True +- for addr6 in intf['ipv6']['ipAddress']: +- privateIpv6 = addr6['privateIpAddress'] +- if privateIpv6: +- dev_config['dhcp6'] = True +- break +- if dev_config: +- mac = ':'.join(re.findall(r'..', intf['macAddress'])) +- dev_config.update( +- {'match': {'macaddress': mac.lower()}, +- 'set-name': nicname}) +- netconfig['ethernets'][nicname] = dev_config +- else: +- blacklist = ['mlx4_core'] +- LOG.debug('Azure: generating fallback configuration') +- # generate a network config, blacklist picking mlx4_core devs +- netconfig = net.generate_fallback_config( +- blacklist_drivers=blacklist, config_driver=True) +- return netconfig ++ with events.ReportEventStack( ++ name="parse_network_config", ++ description="", ++ parent=azure_ds_reporter) as evt: ++ if imds_metadata != sources.UNSET and imds_metadata: ++ netconfig = {'version': 2, 'ethernets': {}} ++ LOG.debug('Azure: generating network configuration from IMDS') ++ network_metadata = imds_metadata['network'] ++ for idx, intf in enumerate(network_metadata['interface']): ++ nicname = 'eth{idx}'.format(idx=idx) ++ dev_config = {} ++ for addr4 in intf['ipv4']['ipAddress']: ++ privateIpv4 = addr4['privateIpAddress'] ++ if privateIpv4: ++ if dev_config.get('dhcp4', False): ++ # Append static address config for nic > 1 ++ netPrefix = intf['ipv4']['subnet'][0].get( ++ 'prefix', '24') ++ if not dev_config.get('addresses'): ++ dev_config['addresses'] = [] ++ dev_config['addresses'].append( ++ '{ip}/{prefix}'.format( ++ ip=privateIpv4, prefix=netPrefix)) ++ else: ++ dev_config['dhcp4'] = True ++ for addr6 in intf['ipv6']['ipAddress']: ++ privateIpv6 = addr6['privateIpAddress'] ++ if privateIpv6: ++ dev_config['dhcp6'] = True ++ break ++ if dev_config: ++ mac = ':'.join(re.findall(r'..', intf['macAddress'])) ++ dev_config.update( ++ {'match': {'macaddress': mac.lower()}, ++ 'set-name': nicname}) ++ netconfig['ethernets'][nicname] = dev_config ++ evt.description = "network config from imds" ++ else: ++ blacklist = ['mlx4_core'] ++ LOG.debug('Azure: generating fallback configuration') ++ # generate a network config, blacklist picking mlx4_core devs ++ netconfig = net.generate_fallback_config( ++ blacklist_drivers=blacklist, config_driver=True) ++ evt.description = "network config from fallback" ++ return netconfig + + ++@azure_ds_telemetry_reporter + def get_metadata_from_imds(fallback_nic, retries): + """Query Azure's network metadata service, returning a dictionary. + +@@ -1227,6 +1284,7 @@ def get_metadata_from_imds(fallback_nic, retries): + return util.log_time(**kwargs) + + ++@azure_ds_telemetry_reporter + def _get_metadata_from_imds(retries): + + url = IMDS_URL + "instance?api-version=2017-12-01" +@@ -1246,6 +1304,7 @@ def _get_metadata_from_imds(retries): + return {} + + ++@azure_ds_telemetry_reporter + def maybe_remove_ubuntu_network_config_scripts(paths=None): + """Remove Azure-specific ubuntu network config for non-primary nics. + +@@ -1283,14 +1342,20 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None): + + + def _is_platform_viable(seed_dir): +- """Check platform environment to report if this datasource may run.""" +- asset_tag = util.read_dmi_data('chassis-asset-tag') +- if asset_tag == AZURE_CHASSIS_ASSET_TAG: +- return True +- LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) +- if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): +- return True +- return False ++ with events.ReportEventStack( ++ name="check-platform-viability", ++ description="found azure asset tag", ++ parent=azure_ds_reporter) as evt: ++ ++ """Check platform environment to report if this datasource may run.""" ++ asset_tag = util.read_dmi_data('chassis-asset-tag') ++ if asset_tag == AZURE_CHASSIS_ASSET_TAG: ++ return True ++ LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) ++ evt.description = "Non-Azure DMI asset tag '%s' discovered.", asset_tag ++ if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): ++ return True ++ return False + + + class BrokenAzureDataSource(Exception): +diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py +old mode 100644 +new mode 100755 +index 2829dd2..d3af05e +--- a/cloudinit/sources/helpers/azure.py ++++ b/cloudinit/sources/helpers/azure.py +@@ -16,10 +16,27 @@ from xml.etree import ElementTree + + from cloudinit import url_helper + from cloudinit import util ++from cloudinit.reporting import events + + LOG = logging.getLogger(__name__) + + ++azure_ds_reporter = events.ReportEventStack( ++ name="azure-ds", ++ description="initialize reporter for azure ds", ++ reporting_enabled=True) ++ ++ ++def azure_ds_telemetry_reporter(func): ++ def impl(*args, **kwargs): ++ with events.ReportEventStack( ++ name=func.__name__, ++ description=func.__name__, ++ parent=azure_ds_reporter): ++ return func(*args, **kwargs) ++ return impl ++ ++ + @contextmanager + def cd(newdir): + prevdir = os.getcwd() +@@ -119,6 +136,7 @@ class OpenSSLManager(object): + def clean_up(self): + util.del_dir(self.tmpdir) + ++ @azure_ds_telemetry_reporter + def generate_certificate(self): + LOG.debug('Generating certificate for communication with fabric...') + if self.certificate is not None: +@@ -139,17 +157,20 @@ class OpenSSLManager(object): + LOG.debug('New certificate generated.') + + @staticmethod ++ @azure_ds_telemetry_reporter + def _run_x509_action(action, cert): + cmd = ['openssl', 'x509', '-noout', action] + result, _ = util.subp(cmd, data=cert) + return result + ++ @azure_ds_telemetry_reporter + def _get_ssh_key_from_cert(self, certificate): + pub_key = self._run_x509_action('-pubkey', certificate) + keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] + ssh_key, _ = util.subp(keygen_cmd, data=pub_key) + return ssh_key + ++ @azure_ds_telemetry_reporter + def _get_fingerprint_from_cert(self, certificate): + """openssl x509 formats fingerprints as so: + 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ +@@ -163,6 +184,7 @@ class OpenSSLManager(object): + octets = raw_fp[eq+1:-1].split(':') + return ''.join(octets) + ++ @azure_ds_telemetry_reporter + def _decrypt_certs_from_xml(self, certificates_xml): + """Decrypt the certificates XML document using the our private key; + return the list of certs and private keys contained in the doc. +@@ -185,6 +207,7 @@ class OpenSSLManager(object): + shell=True, data=b'\n'.join(lines)) + return out + ++ @azure_ds_telemetry_reporter + def parse_certificates(self, certificates_xml): + """Given the Certificates XML document, return a dictionary of + fingerprints and associated SSH keys derived from the certs.""" +@@ -265,11 +288,13 @@ class WALinuxAgentShim(object): + return socket.inet_ntoa(packed_bytes) + + @staticmethod ++ @azure_ds_telemetry_reporter + def _networkd_get_value_from_leases(leases_d=None): + return dhcp.networkd_get_option_from_leases( + 'OPTION_245', leases_d=leases_d) + + @staticmethod ++ @azure_ds_telemetry_reporter + def _get_value_from_leases_file(fallback_lease_file): + leases = [] + content = util.load_file(fallback_lease_file) +@@ -287,6 +312,7 @@ class WALinuxAgentShim(object): + return leases[-1] + + @staticmethod ++ @azure_ds_telemetry_reporter + def _load_dhclient_json(): + dhcp_options = {} + hooks_dir = WALinuxAgentShim._get_hooks_dir() +@@ -305,6 +331,7 @@ class WALinuxAgentShim(object): + return dhcp_options + + @staticmethod ++ @azure_ds_telemetry_reporter + def _get_value_from_dhcpoptions(dhcp_options): + if dhcp_options is None: + return None +@@ -318,6 +345,7 @@ class WALinuxAgentShim(object): + return _value + + @staticmethod ++ @azure_ds_telemetry_reporter + def find_endpoint(fallback_lease_file=None, dhcp245=None): + value = None + if dhcp245 is not None: +@@ -352,6 +380,7 @@ class WALinuxAgentShim(object): + LOG.debug('Azure endpoint found at %s', endpoint_ip_address) + return endpoint_ip_address + ++ @azure_ds_telemetry_reporter + def register_with_azure_and_fetch_data(self, pubkey_info=None): + if self.openssl_manager is None: + self.openssl_manager = OpenSSLManager() +@@ -404,6 +433,7 @@ class WALinuxAgentShim(object): + + return keys + ++ @azure_ds_telemetry_reporter + def _report_ready(self, goal_state, http_client): + LOG.debug('Reporting ready to Azure fabric.') + document = self.REPORT_READY_XML_TEMPLATE.format( +@@ -419,6 +449,7 @@ class WALinuxAgentShim(object): + LOG.info('Reported ready to Azure fabric.') + + ++@azure_ds_telemetry_reporter + def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, + pubkey_info=None): + shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, +-- +1.8.3.1 + diff --git a/SOURCES/ci-cc_mounts-check-if-mount-a-on-no-change-fstab-path.patch b/SOURCES/ci-cc_mounts-check-if-mount-a-on-no-change-fstab-path.patch new file mode 100644 index 0000000..d0f1fe7 --- /dev/null +++ b/SOURCES/ci-cc_mounts-check-if-mount-a-on-no-change-fstab-path.patch @@ -0,0 +1,129 @@ +From 2604984fc44dde89dac847de9f95011713d448ff Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Wed, 29 May 2019 13:41:49 +0200 +Subject: [PATCH 5/5] cc_mounts: check if mount -a on no-change fstab path + +RH-Author: Eduardo Otubo +Message-id: <20190529134149.842-6-otubo@redhat.com> +Patchwork-id: 88269 +O-Subject: [RHEL-8.0.1/RHEL-8.1.0 cloud-init PATCHv2 5/5] cc_mounts: check if mount -a on no-change fstab path +Bugzilla: 1691986 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Cathy Avery + +From: "Jason Zions (MSFT)" +commit acc25d8d7d603313059ac35b4253b504efc560a9 +Author: Jason Zions (MSFT) +Date: Wed May 8 22:47:07 2019 +0000 + + cc_mounts: check if mount -a on no-change fstab path + + Under some circumstances, cc_disk_setup may reformat volumes which + already appear in /etc/fstab (e.g. Azure ephemeral drive is reformatted + from NTFS to ext4 after service-heal). Normally, cc_mounts only calls + mount -a if it altered /etc/fstab. With this change cc_mounts will read + /proc/mounts and verify if configured mounts are already mounted and if + not raise flag to request a mount -a. This handles the case where no + changes to fstab occur but a mount -a is required due to change in + underlying device which prevented the .mount unit from running until + after disk was reformatted. + + LP: #1825596 + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/config/cc_mounts.py | 11 ++++++++ + .../unittests/test_handler/test_handler_mounts.py | 30 +++++++++++++++++++++- + 2 files changed, 40 insertions(+), 1 deletion(-) + +diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py +index 339baba..123ffb8 100644 +--- a/cloudinit/config/cc_mounts.py ++++ b/cloudinit/config/cc_mounts.py +@@ -439,6 +439,7 @@ def handle(_name, cfg, cloud, log, _args): + + cc_lines = [] + needswap = False ++ need_mount_all = False + dirs = [] + for line in actlist: + # write 'comment' in the fs_mntops, entry, claiming this +@@ -449,11 +450,18 @@ def handle(_name, cfg, cloud, log, _args): + dirs.append(line[1]) + cc_lines.append('\t'.join(line)) + ++ mount_points = [v['mountpoint'] for k, v in util.mounts().items() ++ if 'mountpoint' in v] + for d in dirs: + try: + util.ensure_dir(d) + except Exception: + util.logexc(log, "Failed to make '%s' config-mount", d) ++ # dirs is list of directories on which a volume should be mounted. ++ # If any of them does not already show up in the list of current ++ # mount points, we will definitely need to do mount -a. ++ if not need_mount_all and d not in mount_points: ++ need_mount_all = True + + sadds = [WS.sub(" ", n) for n in cc_lines] + sdrops = [WS.sub(" ", n) for n in fstab_removed] +@@ -473,6 +481,9 @@ def handle(_name, cfg, cloud, log, _args): + log.debug("No changes to /etc/fstab made.") + else: + log.debug("Changes to fstab: %s", sops) ++ need_mount_all = True ++ ++ if need_mount_all: + activate_cmds.append(["mount", "-a"]) + if uses_systemd: + activate_cmds.append(["systemctl", "daemon-reload"]) +diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py +index 8fea6c2..0fb160b 100644 +--- a/tests/unittests/test_handler/test_handler_mounts.py ++++ b/tests/unittests/test_handler/test_handler_mounts.py +@@ -154,7 +154,15 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase): + return_value=True) + + self.add_patch('cloudinit.config.cc_mounts.util.subp', +- 'mock_util_subp') ++ 'm_util_subp') ++ ++ self.add_patch('cloudinit.config.cc_mounts.util.mounts', ++ 'mock_util_mounts', ++ return_value={ ++ '/dev/sda1': {'fstype': 'ext4', ++ 'mountpoint': '/', ++ 'opts': 'rw,relatime,discard' ++ }}) + + self.mock_cloud = mock.Mock() + self.mock_log = mock.Mock() +@@ -230,4 +238,24 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase): + fstab_new_content = fd.read() + self.assertEqual(fstab_expected_content, fstab_new_content) + ++ def test_no_change_fstab_sets_needs_mount_all(self): ++ '''verify unchanged fstab entries are mounted if not call mount -a''' ++ fstab_original_content = ( ++ 'LABEL=cloudimg-rootfs / ext4 defaults 0 0\n' ++ 'LABEL=UEFI /boot/efi vfat defaults 0 0\n' ++ '/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n' ++ ) ++ fstab_expected_content = fstab_original_content ++ cc = {'mounts': [ ++ ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec']]} ++ with open(cc_mounts.FSTAB_PATH, 'w') as fd: ++ fd.write(fstab_original_content) ++ with open(cc_mounts.FSTAB_PATH, 'r') as fd: ++ fstab_new_content = fd.read() ++ self.assertEqual(fstab_expected_content, fstab_new_content) ++ cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, []) ++ self.m_util_subp.assert_has_calls([ ++ mock.call(['mount', '-a']), ++ mock.call(['systemctl', 'daemon-reload'])]) ++ + # vi: ts=4 expandtab +-- +1.8.3.1 + diff --git a/SOURCES/cloud-init-tmpfiles.conf b/SOURCES/cloud-init-tmpfiles.conf new file mode 100644 index 0000000..0c6d2a3 --- /dev/null +++ b/SOURCES/cloud-init-tmpfiles.conf @@ -0,0 +1 @@ +d /run/cloud-init 0700 root root - - diff --git a/SPECS/cloud-init.spec b/SPECS/cloud-init.spec new file mode 100644 index 0000000..56ed546 --- /dev/null +++ b/SPECS/cloud-init.spec @@ -0,0 +1,397 @@ +%{!?license: %global license %%doc} + +# The only reason we are archful is because dmidecode is ExclusiveArch +# https://bugzilla.redhat.com/show_bug.cgi?id=1067089 +%global debug_package %{nil} + +Name: cloud-init +Version: 18.5 +Release: 4%{?dist} +Summary: Cloud instance init scripts + +Group: System Environment/Base +License: GPLv3 +URL: http://launchpad.net/cloud-init +Source0: https://launchpad.net/cloud-init/trunk/%{version}/+download/%{name}-%{version}.tar.gz +Source1: cloud-init-tmpfiles.conf + +Patch0001: 0001-Add-initial-redhat-setup.patch +Patch0002: 0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch +Patch0003: 0003-limit-permissions-on-def_log_file.patch +Patch0004: 0004-azure-ensure-that-networkmanager-hook-script-runs.patch +Patch0005: 0005-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch +Patch0006: 0006-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch +Patch0007: 0007-sysconfig-Don-t-disable-IPV6_AUTOCONF.patch +Patch0008: 0008-net-Make-sysconfig-renderer-compatible-with-Network-.patch +Patch0009: 0009-net-Wait-for-dhclient-to-daemonize-before-reading-le.patch +Patch0010: 0010-cloud-init-per-don-t-use-dashes-in-sem-names.patch +Patch0011: 0011-azure-Filter-list-of-ssh-keys-pulled-from-fabric.patch +Patch0012: 0012-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch +# For bz#1691986 - [Azure] [RHEL 8.1] Cloud-init fixes to support fast provisioning for Azure +Patch13: ci-Azure-Ensure-platform-random_seed-is-always-serializ.patch +# For bz#1691986 - [Azure] [RHEL 8.1] Cloud-init fixes to support fast provisioning for Azure +Patch14: ci-DatasourceAzure-add-additional-logging-for-azure-dat.patch +# For bz#1691986 - [Azure] [RHEL 8.1] Cloud-init fixes to support fast provisioning for Azure +Patch15: ci-Azure-Changes-to-the-Hyper-V-KVP-Reporter.patch +# For bz#1691986 - [Azure] [RHEL 8.1] Cloud-init fixes to support fast provisioning for Azure +Patch16: ci-DataSourceAzure-Adjust-timeout-for-polling-IMDS.patch +# For bz#1691986 - [Azure] [RHEL 8.1] Cloud-init fixes to support fast provisioning for Azure +Patch17: ci-cc_mounts-check-if-mount-a-on-no-change-fstab-path.patch + +BuildArch: noarch + +BuildRequires: pkgconfig(systemd) +BuildRequires: python3-devel +BuildRequires: python3-setuptools +BuildRequires: systemd + +# For tests +BuildRequires: iproute +BuildRequires: python3-configobj +# # https://bugzilla.redhat.com/show_bug.cgi?id=1417029 +BuildRequires: python3-httpretty >= 0.8.14-2 +BuildRequires: python3-jinja2 +BuildRequires: python3-jsonpatch +BuildRequires: python3-jsonschema +BuildRequires: python3-mock +BuildRequires: python3-nose +BuildRequires: python3-oauthlib +BuildRequires: python3-prettytable +BuildRequires: python3-pyserial +BuildRequires: python3-PyYAML +BuildRequires: python3-requests +BuildRequires: python3-six +BuildRequires: python3-unittest2 +# dnf is needed to make cc_ntp unit tests work +# https://bugs.launchpad.net/cloud-init/+bug/1721573 +BuildRequires: /usr/bin/dnf + +Requires: e2fsprogs +Requires: iproute +Requires: libselinux-python3 +Requires: net-tools +Requires: policycoreutils-python3 +Requires: procps +Requires: python3-configobj +Requires: python3-jinja2 +Requires: python3-jsonpatch +Requires: python3-jsonschema +Requires: python3-oauthlib +Requires: python3-prettytable +Requires: python3-pyserial +Requires: python3-PyYAML +Requires: python3-requests +Requires: python3-six +Requires: shadow-utils +Requires: util-linux +Requires: xfsprogs + +%{?systemd_requires} + +%description +Cloud-init is a set of init scripts for cloud instances. Cloud instances +need special scripts to run during initialization to retrieve and install +ssh keys and to let the user run various scripts. + + +%prep +%autosetup -p1 + +# Change shebangs +sed -i -e 's|#!/usr/bin/env python|#!/usr/bin/env python3|' \ + -e 's|#!/usr/bin/python|#!/usr/bin/python3|' tools/* cloudinit/ssh_util.py + +%build +%py3_build + + +%install +%py3_install -- + +python3 tools/render-cloudcfg --variant fedora > $RPM_BUILD_ROOT/%{_sysconfdir}/cloud/cloud.cfg + +mkdir -p $RPM_BUILD_ROOT/var/lib/cloud + +# /run/cloud-init needs a tmpfiles.d entry +mkdir -p $RPM_BUILD_ROOT/run/cloud-init +mkdir -p $RPM_BUILD_ROOT/%{_tmpfilesdir} +cp -p %{SOURCE1} $RPM_BUILD_ROOT/%{_tmpfilesdir}/%{name}.conf + +# We supply our own config file since our software differs from Ubuntu's. +cp -p rhel/cloud.cfg $RPM_BUILD_ROOT/%{_sysconfdir}/cloud/cloud.cfg + +mkdir -p $RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d +cp -p tools/21-cloudinit.conf $RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d/21-cloudinit.conf + +# Make installed NetworkManager hook name less generic +mv $RPM_BUILD_ROOT/etc/NetworkManager/dispatcher.d/hook-network-manager \ + $RPM_BUILD_ROOT/etc/NetworkManager/dispatcher.d/cloud-init-azure-hook + +# Install our own systemd units (rhbz#1440831) +mkdir -p $RPM_BUILD_ROOT%{_unitdir} +cp rhel/systemd/* $RPM_BUILD_ROOT%{_unitdir}/ + + +%clean +rm -rf $RPM_BUILD_ROOT + + +%post +if [ $1 -eq 1 ] ; then + # Initial installation + # Enabled by default per "runs once then goes away" exception + /bin/systemctl enable cloud-config.service >/dev/null 2>&1 || : + /bin/systemctl enable cloud-final.service >/dev/null 2>&1 || : + /bin/systemctl enable cloud-init.service >/dev/null 2>&1 || : + /bin/systemctl enable cloud-init-local.service >/dev/null 2>&1 || : +elif [ $1 -eq 2 ]; then + # Upgrade. If the upgrade is from a version older than 0.7.9-8, + # there will be stale systemd config + /bin/systemctl is-enabled cloud-config.service >/dev/null 2>&1 && + /bin/systemctl reenable cloud-config.service >/dev/null 2>&1 || : + + /bin/systemctl is-enabled cloud-final.service >/dev/null 2>&1 && + /bin/systemctl reenable cloud-final.service >/dev/null 2>&1 || : + + /bin/systemctl is-enabled cloud-init.service >/dev/null 2>&1 && + /bin/systemctl reenable cloud-init.service >/dev/null 2>&1 || : + + /bin/systemctl is-enabled cloud-init-local.service >/dev/null 2>&1 && + /bin/systemctl reenable cloud-init-local.service >/dev/null 2>&1 || : +fi + +%preun +if [ $1 -eq 0 ] ; then + # Package removal, not upgrade + /bin/systemctl --no-reload disable cloud-config.service >/dev/null 2>&1 || : + /bin/systemctl --no-reload disable cloud-final.service >/dev/null 2>&1 || : + /bin/systemctl --no-reload disable cloud-init.service >/dev/null 2>&1 || : + /bin/systemctl --no-reload disable cloud-init-local.service >/dev/null 2>&1 || : + # One-shot services -> no need to stop +fi + +%postun +%systemd_postun + + +%files +%license LICENSE +%doc ChangeLog rhel/README.rhel +%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg +%dir %{_sysconfdir}/cloud/cloud.cfg.d +%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/*.cfg +%doc %{_sysconfdir}/cloud/cloud.cfg.d/README +%dir %{_sysconfdir}/cloud/templates +%config(noreplace) %{_sysconfdir}/cloud/templates/* +%{_unitdir}/cloud-config.service +%{_unitdir}/cloud-config.target +%{_unitdir}/cloud-final.service +%{_unitdir}/cloud-init-local.service +%{_unitdir}/cloud-init.service +%{_tmpfilesdir}/%{name}.conf +%{python3_sitelib}/* +%{_libexecdir}/%{name} +%{_bindir}/cloud-init* +%doc %{_datadir}/doc/%{name} +%dir /run/cloud-init +%dir /var/lib/cloud +/etc/NetworkManager/dispatcher.d/cloud-init-azure-hook +%{_udevrulesdir}/66-azure-ephemeral.rules +%{_sysconfdir}/bash_completion.d/cloud-init +%{_bindir}/cloud-id + +%dir %{_sysconfdir}/rsyslog.d +%config(noreplace) %{_sysconfdir}/rsyslog.d/21-cloudinit.conf + +%changelog +* Mon Jun 03 2019 Miroslav Rezanina - 18.5-4.el8 +- ci-Azure-Ensure-platform-random_seed-is-always-serializ.patch [bz#1691986] +- ci-DatasourceAzure-add-additional-logging-for-azure-dat.patch [bz#1691986] +- ci-Azure-Changes-to-the-Hyper-V-KVP-Reporter.patch [bz#1691986] +- ci-DataSourceAzure-Adjust-timeout-for-polling-IMDS.patch [bz#1691986] +- ci-cc_mounts-check-if-mount-a-on-no-change-fstab-path.patch [bz#1691986] +- Resolves: bz#1691986 + ([Azure] [RHEL 8.1] Cloud-init fixes to support fast provisioning for Azure) + +* Tue Apr 16 2019 Danilo Cesar Lemes de Paula - 18.5-3.el8 +- ci-Adding-gating-tests-for-Azure-ESXi-and-AWS.patch [bz#1682786] +- Resolves: bz#1682786 + (cloud-init changes blocked until gating tests are added) + +* Wed Apr 10 2019 Danilo C. L. de Paula - 18.5-2 +- Adding gating.yaml file +- Resolves: rhbz#1682786 + (cloud-init changes blocked until gating tests are added) + +* Wed Apr 10 2019 Danilo de Paula - 18.2-6.el8 +- ci-net-Make-sysconfig-renderer-compatible-with-Network-.patch [bz#1602784] +- Resolves: bz#1602784 + (cloud-init: Sometimes image boots fingerprints is configured, there's a network device present but it's not configured) + +* Fri Jan 18 2019 Miroslav Rezanina - 18.2-5.el8 +- ci-Fix-string-missmatch-when-mounting-ntfs.patch [bz#1664227] +- Resolves: bz#1664227 + ([Azure]String missmatch causes the /dev/sdb1 mounting failed after stop&start VM) + +* Thu Jan 10 2019 Miroslav Rezanina - 18.2-4.el8 +- ci-Enable-cloud-init-by-default-on-vmware.patch [bz#1644335] +- Resolves: bz#1644335 + ([ESXi][RHEL8.0]Enable cloud-init by default on VMware) + +* Wed Nov 28 2018 Miroslav Rezanina - 18.2-3.el8 +- ci-Adding-systemd-mount-options-to-wait-for-cloud-init.patch [bz#1615599] +- ci-Azure-Ignore-NTFS-mount-errors-when-checking-ephemer.patch [bz#1615599] +- ci-azure-Add-reported-ready-marker-file.patch [bz#1615599] +- ci-Adding-disk_setup-to-rhel-cloud.cfg.patch [bz#1615599] +- Resolves: bz#1615599 + ([Azure] cloud-init fails to mount /dev/sdb1 after stop(deallocate)&&start VM) + +* Tue Nov 06 2018 Miroslav Rezanina - 18.2-2.el7 +- Revert "remove 'tee' command from logging configuration" [bz#1626117] +- Resolves: rhbz#1626117] + (cloud-init-0.7.9-9 doesn't feed cloud-init-output.log) + +* Fri Jun 29 2018 Miroslav Rezanina - 18.2-1.el7 +- Rebase to 18.2 [bz#1515909] + Resolves: rhbz#1515909 + +* Tue Feb 13 2018 Ryan McCabe 0.7.9-24 +- Set DHCP_HOSTNAME on Azure to allow for the hostname to be + published correctly when bouncing the network. + Resolves: rhbz#1434109 + +* Mon Jan 15 2018 Ryan McCabe 0.7.9-23 +- Fix a bug tha caused cloud-init to fail as a result of trying + to rename bonds. + Resolves: rhbz#1512247 + +* Mon Jan 15 2018 Ryan McCabe 0.7.9-22 +- Apply patch from -21 + Resolves: rhbz#1489270 + +* Mon Jan 15 2018 Ryan McCabe 0.7.9-21 +- sysconfig: Fix a potential traceback introduced in the + 0.7.9-17 build + Resolves: rhbz#1489270 + +* Sun Dec 17 2017 Ryan McCabe 0.7.9-20 +- sysconfig: Correct rendering for dhcp on ipv6 + Resolves: rhbz#1519271 + +* Thu Nov 30 2017 Ryan McCabe 0.7.9-19 +- sysconfig: Fix rendering of default gateway for ipv6 + Resolves: rhbz#1492726 + +* Fri Nov 24 2017 Ryan McCabe 0.7.9-18 +- Start the cloud-init init local service after the dbus socket is created + so that the hostnamectl command works. + Resolves: rhbz#1450521 + +* Tue Nov 21 2017 Ryan McCabe 0.7.9-17 +- Correctly render DNS and DOMAIN for sysconfig + Resolves: rhbz#1489270 + +* Mon Nov 20 2017 Ryan McCabe 0.7.9-16 +- Disable NetworkManager management of resolv.conf if nameservers + are specified by configuration. + Resolves: rhbz#1454491 + +* Mon Nov 13 2017 Ryan McCabe 0.7.9-15 +- Fix a null reference error in the rh_subscription module + Resolves: rhbz#1498974 + +* Mon Nov 13 2017 Ryan McCabe 0-7.9-14 +- Include gateway if it's included in subnet configration + Resolves: rhbz#1492726 + +* Sun Nov 12 2017 Ryan McCabe 0-7.9-13 +- Do proper cleanup of systemd units when upgrading from versions + 0.7.9-3 through 0.7.9-8. + Resolves: rhbz#1465730 + +* Thu Nov 09 2017 Ryan McCabe 0.7.9-12 +- Prevent Azure NM and dhclient hooks from running when cloud-init is + disabled (rhbz#1474226) + +* Tue Oct 31 2017 Ryan McCabe 0.7.9-11 +- Fix rendering of multiple static IPs per interface file + Resolves: rhbz#bz1497954 + +* Tue Sep 26 2017 Ryan McCabe 0.7.9-10 +- AliCloud: Add support for the Alibaba Cloud datasource (rhbz#1482547) + +* Thu Jun 22 2017 Lars Kellogg-Stedman 0.7.9-9 +- RHEL/CentOS: Fix default routes for IPv4/IPv6 configuration. (rhbz#1438082) +- azure: ensure that networkmanager hook script runs (rhbz#1440831 rhbz#1460206) +- Fix ipv6 subnet detection (rhbz#1438082) + +* Tue May 23 2017 Lars Kellogg-Stedman 0.7.9-8 +- Update patches + +* Mon May 22 2017 Lars Kellogg-Stedman 0.7.9-7 +- Add missing sysconfig unit test data (rhbz#1438082) +- Fix dual stack IPv4/IPv6 configuration for RHEL (rhbz#1438082) +- sysconfig: Raise ValueError when multiple default gateways are present. (rhbz#1438082) +- Bounce network interface for Azure when using the built-in path. (rhbz#1434109) +- Do not write NM_CONTROLLED=no in generated interface config files (rhbz#1385172) + +* Wed May 10 2017 Lars Kellogg-Stedman 0.7.9-6 +- add power-state-change module to cloud_final_modules (rhbz#1252477) +- remove 'tee' command from logging configuration (rhbz#1424612) +- limit permissions on def_log_file (rhbz#1424612) +- Bounce network interface for Azure when using the built-in path. (rhbz#1434109) +- OpenStack: add 'dvs' to the list of physical link types. (rhbz#1442783) + +* Wed May 10 2017 Lars Kellogg-Stedman 0.7.9-5 +- systemd: replace generator with unit conditionals (rhbz#1440831) + +* Thu Apr 13 2017 Charalampos Stratakis 0.7.9-4 +- Import to RHEL 7 +Resolves: rhbz#1427280 + +* Tue Mar 07 2017 Lars Kellogg-Stedman 0.7.9-3 +- fixes for network config generation +- avoid dependency cycle at boot (rhbz#1420946) + +* Tue Jan 17 2017 Lars Kellogg-Stedman 0.7.9-2 +- use timeout from datasource config in openstack get_data (rhbz#1408589) + +* Thu Dec 01 2016 Lars Kellogg-Stedman - 0.7.9-1 +- Rebased on upstream 0.7.9. +- Remove dependency on run-parts + +* Wed Jan 06 2016 Lars Kellogg-Stedman - 0.7.6-8 +- make rh_subscription plugin do nothing in the absence of a valid + configuration [RH:1295953] +- move rh_subscription module to cloud_config stage + +* Wed Jan 06 2016 Lars Kellogg-Stedman - 0.7.6-7 +- correct permissions on /etc/ssh/sshd_config [RH:1296191] + +* Thu Sep 03 2015 Lars Kellogg-Stedman - 0.7.6-6 +- rebuild for ppc64le + +* Tue Jul 07 2015 Lars Kellogg-Stedman - 0.7.6-5 +- bump revision for new build + +* Tue Jul 07 2015 Lars Kellogg-Stedman - 0.7.6-4 +- ensure rh_subscription plugin is enabled by default + +* Wed Apr 29 2015 Lars Kellogg-Stedman - 0.7.6-3 +- added dependency on python-jinja2 [RH:1215913] +- added rhn_subscription plugin [RH:1227393] +- require pyserial to support smartos data source [RH:1226187] + +* Fri Jan 16 2015 Lars Kellogg-Stedman - 0.7.6-2 +- Rebased RHEL version to Fedora rawhide +- Backported fix for https://bugs.launchpad.net/cloud-init/+bug/1246485 +- Backported fix for https://bugs.launchpad.net/cloud-init/+bug/1411829 + +* Fri Nov 14 2014 Colin Walters - 0.7.6-1 +- New upstream version [RH:974327] +- Drop python-cheetah dependency (same as above bug)