diff --git a/.cloud-init.metadata b/.cloud-init.metadata index f7516a9..9b7ca96 100644 --- a/.cloud-init.metadata +++ b/.cloud-init.metadata @@ -1 +1 @@ -a862d6618a4c56c79d3fb0e279f6c93d0f0141cd SOURCES/cloud-init-18.5.tar.gz +5f4de38850f9691dc9789bd4db4be512c9717d7b SOURCES/cloud-init-19.4.tar.gz diff --git a/.gitignore b/.gitignore index e2ea71d..cc9fcc1 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -SOURCES/cloud-init-18.5.tar.gz +SOURCES/cloud-init-19.4.tar.gz diff --git a/SOURCES/0001-Add-initial-redhat-setup.patch b/SOURCES/0001-Add-initial-redhat-setup.patch index c0171dd..a35f5fd 100644 --- a/SOURCES/0001-Add-initial-redhat-setup.patch +++ b/SOURCES/0001-Add-initial-redhat-setup.patch @@ -1,4 +1,4 @@ -From ecbba8bd696010d6d9aea96e02cf40dfb82f00dd Mon Sep 17 00:00:00 2001 +From fa04378748e046997fdb47c50def77fac0b89692 Mon Sep 17 00:00:00 2001 From: Miroslav Rezanina Date: Thu, 31 May 2018 16:45:23 +0200 Subject: Add initial redhat setup @@ -20,28 +20,28 @@ Merged patches (18.5): redhat/Makefile | 71 +++++++ redhat/Makefile.common | 35 ++++ redhat/cloud-init-tmpfiles.conf | 1 + - redhat/cloud-init.spec.template | 335 ++++++++++++++++++++++++++++++++++ + redhat/cloud-init.spec.template | 369 ++++++++++++++++++++++++++++++++++ redhat/rpmbuild/BUILD/.gitignore | 3 + redhat/rpmbuild/RPMS/.gitignore | 3 + redhat/rpmbuild/SOURCES/.gitignore | 3 + redhat/rpmbuild/SPECS/.gitignore | 3 + redhat/rpmbuild/SRPMS/.gitignore | 3 + redhat/scripts/frh.py | 27 +++ - redhat/scripts/git-backport-diff | 327 +++++++++++++++++++++++++++++++++ - redhat/scripts/git-compile-check | 215 ++++++++++++++++++++++ - redhat/scripts/process-patches.sh | 73 ++++++++ + redhat/scripts/git-backport-diff | 327 ++++++++++++++++++++++++++++++ + redhat/scripts/git-compile-check | 215 ++++++++++++++++++++ + redhat/scripts/process-patches.sh | 73 +++++++ redhat/scripts/tarball_checksum.sh | 3 + rhel/README.rhel | 5 + rhel/cloud-init-tmpfiles.conf | 1 + rhel/cloud.cfg | 69 +++++++ rhel/systemd/cloud-config.service | 18 ++ - rhel/systemd/cloud-config.target | 11 ++ + rhel/systemd/cloud-config.target | 11 + rhel/systemd/cloud-final.service | 19 ++ - rhel/systemd/cloud-init-local.service | 31 ++++ + rhel/systemd/cloud-init-local.service | 31 +++ rhel/systemd/cloud-init.service | 25 +++ - setup.py | 64 +------ - tools/read-version | 25 +-- - 28 files changed, 1295 insertions(+), 90 deletions(-) + setup.py | 70 +------ + tools/read-version | 28 +-- + 28 files changed, 1330 insertions(+), 98 deletions(-) create mode 100644 redhat/.gitignore create mode 100644 redhat/Makefile create mode 100644 redhat/Makefile.common @@ -67,7 +67,7 @@ Merged patches (18.5): create mode 100644 rhel/systemd/cloud-init.service diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py -index 46abedd..fe7bda8 100644 +index 0ad6b7f..e4408a4 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -33,7 +33,7 @@ file). @@ -79,7 +79,7 @@ index 46abedd..fe7bda8 100644 validation_cert: (optional string to be written to file validation_key) special value 'system' means set use existing file validation_key: (optional the path for validation_cert. default -@@ -88,7 +88,7 @@ CHEF_DIRS = tuple([ +@@ -89,7 +89,7 @@ CHEF_DIRS = tuple([ '/var/lib/chef', '/var/cache/chef', '/var/backups/chef', @@ -88,20 +88,20 @@ index 46abedd..fe7bda8 100644 ]) REQUIRED_CHEF_DIRS = tuple([ '/etc/chef', -@@ -112,7 +112,7 @@ CHEF_RB_TPL_DEFAULTS = { +@@ -113,7 +113,7 @@ CHEF_RB_TPL_DEFAULTS = { 'json_attribs': CHEF_FB_PATH, 'file_cache_path': "/var/cache/chef", 'file_backup_path': "/var/backups/chef", - 'pid_file': "/var/run/chef/client.pid", + 'pid_file': "/run/chef/client.pid", 'show_time': True, + 'encrypted_data_bag_secret': None, } - CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time']) diff --git a/cloudinit/settings.py b/cloudinit/settings.py -index b1ebaad..c536768 100644 +index ca4ffa8..3a04a58 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py -@@ -44,13 +44,16 @@ CFG_BUILTIN = { +@@ -46,13 +46,16 @@ CFG_BUILTIN = { ], 'def_log_file': '/var/log/cloud-init.log', 'log_cfgs': [], @@ -348,22 +348,25 @@ index 0000000..68fc5f1 +[Install] +WantedBy=multi-user.target diff --git a/setup.py b/setup.py -index ea37efc..06ae48a 100755 +index 01a67b9..b2ac9bb 100755 --- a/setup.py +++ b/setup.py -@@ -135,11 +135,6 @@ INITSYS_FILES = { +@@ -139,14 +139,6 @@ INITSYS_FILES = { 'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)], 'sysvinit_openrc': [f for f in glob('sysvinit/gentoo/*') if is_f(f)], 'sysvinit_suse': [f for f in glob('sysvinit/suse/*') if is_f(f)], - 'systemd': [render_tmpl(f) - for f in (glob('systemd/*.tmpl') + - glob('systemd/*.service') + -- glob('systemd/*.target')) if is_f(f)], -- 'systemd.generators': [f for f in glob('systemd/*-generator') if is_f(f)], +- glob('systemd/*.target')) +- if (is_f(f) and not is_generator(f))], +- 'systemd.generators': [ +- render_tmpl(f, mode=0o755) +- for f in glob('systemd/*') if is_f(f) and is_generator(f)], 'upstart': [f for f in glob('upstart/*') if is_f(f)], } INITSYS_ROOTS = { -@@ -148,9 +143,6 @@ INITSYS_ROOTS = { +@@ -155,9 +147,6 @@ INITSYS_ROOTS = { 'sysvinit_deb': 'etc/init.d', 'sysvinit_openrc': 'etc/init.d', 'sysvinit_suse': 'etc/init.d', @@ -373,7 +376,7 @@ index ea37efc..06ae48a 100755 'upstart': 'etc/init/', } INITSYS_TYPES = sorted([f.partition(".")[0] for f in INITSYS_ROOTS.keys()]) -@@ -188,47 +180,6 @@ class MyEggInfo(egg_info): +@@ -208,47 +197,6 @@ class MyEggInfo(egg_info): return ret @@ -421,20 +424,24 @@ index ea37efc..06ae48a 100755 if not in_virtualenv(): USR = "/" + USR ETC = "/" + ETC -@@ -239,11 +190,9 @@ if not in_virtualenv(): +@@ -258,14 +206,11 @@ if not in_virtualenv(): + INITSYS_ROOTS[k] = "/" + INITSYS_ROOTS[k] data_files = [ - (ETC + '/bash_completion.d', ['bash_completion/cloud-init']), - (ETC + '/cloud', [render_tmpl("config/cloud.cfg.tmpl")]), ++ (ETC + '/bash_completion.d', ['bash_completion/cloud-init']), (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')), (ETC + '/cloud/templates', glob('templates/*')), - (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify', - 'tools/uncloud-init', + (USR_LIB_EXEC + '/cloud-init', ['tools/uncloud-init', 'tools/write-ssh-key-fingerprints']), +- (USR + '/share/bash-completion/completions', +- ['bash_completion/cloud-init']), (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]), (USR + '/share/doc/cloud-init/examples', -@@ -255,15 +204,8 @@ if os.uname()[0] != 'FreeBSD': + [f for f in glob('doc/examples/*') if is_f(f)]), +@@ -276,15 +221,8 @@ if os.uname()[0] != 'FreeBSD': data_files.extend([ (ETC + '/NetworkManager/dispatcher.d/', ['tools/hook-network-manager']), @@ -451,7 +458,7 @@ index ea37efc..06ae48a 100755 requirements = read_requires() -@@ -278,8 +220,6 @@ setuptools.setup( +@@ -299,8 +237,6 @@ setuptools.setup( scripts=['tools/cloud-init-per'], license='Dual-licensed under GPLv3 or Apache 2.0', data_files=data_files, @@ -461,10 +468,10 @@ index ea37efc..06ae48a 100755 'console_scripts': [ 'cloud-init = cloudinit.cmd.main:main', diff --git a/tools/read-version b/tools/read-version -index e69c2ce..d43cc8f 100755 +index 6dca659..d43cc8f 100755 --- a/tools/read-version +++ b/tools/read-version -@@ -65,29 +65,8 @@ output_json = '--json' in sys.argv +@@ -65,32 +65,8 @@ output_json = '--json' in sys.argv src_version = ci_version.version_string() version_long = None @@ -474,9 +481,12 @@ index e69c2ce..d43cc8f 100755 - flags = ['--tags'] - cmd = ['git', 'describe', '--abbrev=8', '--match=[0-9]*'] + flags - -- version = tiny_p(cmd).strip() +- try: +- version = tiny_p(cmd).strip() +- except RuntimeError: +- version = None - -- if not version.startswith(src_version): +- if version is None or not version.startswith(src_version): - sys.stderr.write("git describe version (%s) differs from " - "cloudinit.version (%s)\n" % (version, src_version)) - sys.stderr.write( diff --git a/SOURCES/0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch b/SOURCES/0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch index 1711add..ffe505e 100644 --- a/SOURCES/0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch +++ b/SOURCES/0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch @@ -1,4 +1,4 @@ -From af85b22b99e3b784e982e4be11b2a39d79316265 Mon Sep 17 00:00:00 2001 +From e6c32d9e6c01e46a91699954b88543cce4d325bb Mon Sep 17 00:00:00 2001 From: Miroslav Rezanina Date: Thu, 31 May 2018 19:37:55 +0200 Subject: Do not write NM_CONTROLLED=no in generated interface config files @@ -11,260 +11,261 @@ Signed-off-by: Ryan McCabe 2 files changed, 31 deletions(-) diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py -index 17293e1..ae0554e 100644 +index 310cdf0..8bd7e88 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py -@@ -250,7 +250,6 @@ class Renderer(renderer.Renderer): +@@ -272,7 +272,6 @@ class Renderer(renderer.Renderer): iface_defaults = tuple([ ('ONBOOT', True), ('USERCTL', False), - ('NM_CONTROLLED', False), ('BOOTPROTO', 'none'), + ('STARTMODE', 'auto'), ]) - diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py -index 195f261..5f1aa3e 100644 +index 01119e0..a931a3e 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py -@@ -175,7 +175,6 @@ GATEWAY=172.19.3.254 +@@ -530,7 +530,6 @@ GATEWAY=172.19.3.254 HWADDR=fa:16:3e:ed:9a:59 IPADDR=172.19.1.34 NETMASK=255.255.252.0 -NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet - USERCTL=no -@@ -279,7 +278,6 @@ IPADDR=172.19.1.34 +@@ -636,7 +635,6 @@ IPADDR=172.19.1.34 IPADDR1=10.0.0.10 NETMASK=255.255.252.0 NETMASK1=255.255.255.0 -NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet - USERCTL=no -@@ -407,7 +405,6 @@ IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" +@@ -772,7 +770,6 @@ IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" IPV6INIT=yes IPV6_DEFAULTGW=2001:DB8::1 NETMASK=255.255.252.0 -NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet - USERCTL=no -@@ -523,7 +520,6 @@ NETWORK_CONFIGS = { +@@ -889,7 +886,6 @@ NETWORK_CONFIGS = { BOOTPROTO=none DEVICE=eth1 HWADDR=cf:d6:af:48:e8:80 - NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet - USERCTL=no"""), -@@ -539,7 +535,6 @@ NETWORK_CONFIGS = { +@@ -907,7 +903,6 @@ NETWORK_CONFIGS = { IPADDR=192.168.21.3 NETMASK=255.255.255.0 METRIC=10000 - NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet - USERCTL=no"""), -@@ -652,7 +647,6 @@ NETWORK_CONFIGS = { +@@ -1022,7 +1017,6 @@ NETWORK_CONFIGS = { IPV6ADDR=2001:1::1/64 IPV6INIT=yes NETMASK=255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet - USERCTL=no -@@ -894,14 +888,12 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true +@@ -1491,7 +1485,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true DHCPV6C=yes IPV6INIT=yes MACADDR=aa:bb:cc:dd:ee:ff - NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Bond - USERCTL=no"""), - 'ifcfg-bond0.200': textwrap.dedent("""\ +@@ -1500,7 +1493,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true BOOTPROTO=dhcp DEVICE=bond0.200 + DHCLIENT_SET_DEFAULT_ROUTE=no - NM_CONTROLLED=no ONBOOT=yes PHYSDEV=bond0 - TYPE=Ethernet -@@ -918,7 +910,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + STARTMODE=auto +@@ -1519,7 +1511,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true IPV6_DEFAULTGW=2001:4800:78ff:1b::1 MACADDR=bb:bb:bb:bb:bb:aa NETMASK=255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes PRIO=22 - STP=no -@@ -928,7 +919,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + STARTMODE=auto +@@ -1530,7 +1521,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true BOOTPROTO=none DEVICE=eth0 HWADDR=c0:d6:9f:2c:e8:80 - NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet - USERCTL=no"""), -@@ -945,7 +935,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true +@@ -1548,7 +1538,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true MTU=1500 NETMASK=255.255.255.0 NETMASK1=255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes PHYSDEV=eth0 - TYPE=Ethernet -@@ -956,7 +945,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + STARTMODE=auto +@@ -1560,7 +1549,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true DEVICE=eth1 HWADDR=aa:d6:9f:2c:e8:80 MASTER=bond0 - NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto SLAVE=yes - TYPE=Ethernet -@@ -966,7 +954,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true +@@ -1571,7 +1559,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true DEVICE=eth2 HWADDR=c0:bb:9f:2c:e8:80 MASTER=bond0 - NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto SLAVE=yes - TYPE=Ethernet -@@ -976,7 +963,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true +@@ -1582,7 +1569,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true BRIDGE=br0 DEVICE=eth3 HWADDR=66:bb:9f:2c:e8:80 - NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet - USERCTL=no"""), -@@ -985,7 +971,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true +@@ -1592,7 +1578,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true BRIDGE=br0 DEVICE=eth4 HWADDR=98:bb:9f:2c:e8:80 - NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet - USERCTL=no"""), -@@ -993,7 +978,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true - BOOTPROTO=dhcp +@@ -1602,7 +1587,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true DEVICE=eth5 + DHCLIENT_SET_DEFAULT_ROUTE=no HWADDR=98:bb:9f:2c:e8:8a - NM_CONTROLLED=no ONBOOT=no + STARTMODE=manual TYPE=Ethernet - USERCTL=no""") -@@ -1356,7 +1340,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true +@@ -2088,7 +2072,6 @@ iface bond0 inet6 static MTU=9000 NETMASK=255.255.255.0 NETMASK1=255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Bond - USERCTL=no -@@ -1366,7 +1349,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true +@@ -2099,7 +2082,6 @@ iface bond0 inet6 static DEVICE=bond0s0 HWADDR=aa:bb:cc:dd:e8:00 MASTER=bond0 - NM_CONTROLLED=no ONBOOT=yes SLAVE=yes - TYPE=Ethernet -@@ -1388,7 +1370,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + STARTMODE=auto +@@ -2122,7 +2104,6 @@ iface bond0 inet6 static DEVICE=bond0s1 HWADDR=aa:bb:cc:dd:e8:01 MASTER=bond0 - NM_CONTROLLED=no ONBOOT=yes SLAVE=yes - TYPE=Ethernet -@@ -1426,7 +1407,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + STARTMODE=auto +@@ -2161,7 +2142,6 @@ iface bond0 inet6 static BOOTPROTO=none DEVICE=en0 HWADDR=aa:bb:cc:dd:e8:00 - NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet - USERCTL=no"""), -@@ -1443,7 +1423,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true +@@ -2180,7 +2160,6 @@ iface bond0 inet6 static MTU=2222 NETMASK=255.255.255.0 NETMASK1=255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes PHYSDEV=en0 - TYPE=Ethernet -@@ -1484,7 +1463,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true + STARTMODE=auto +@@ -2222,7 +2201,6 @@ iface bond0 inet6 static DEVICE=br0 IPADDR=192.168.2.2 NETMASK=255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes PRIO=22 - STP=no -@@ -1498,7 +1476,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true - HWADDR=52:54:00:12:34:00 + STARTMODE=auto +@@ -2238,7 +2216,6 @@ iface bond0 inet6 static + IPADDR6=2001:1::100/96 IPV6ADDR=2001:1::100/96 IPV6INIT=yes - NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet - USERCTL=no -@@ -1510,7 +1487,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true - HWADDR=52:54:00:12:34:01 +@@ -2252,7 +2229,6 @@ iface bond0 inet6 static + IPADDR6=2001:1::101/96 IPV6ADDR=2001:1::101/96 IPV6INIT=yes - NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet - USERCTL=no -@@ -1584,7 +1560,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true +@@ -2327,7 +2303,6 @@ iface bond0 inet6 static HWADDR=52:54:00:12:34:00 IPADDR=192.168.1.2 NETMASK=255.255.255.0 - NM_CONTROLLED=no ONBOOT=no + STARTMODE=manual TYPE=Ethernet - USERCTL=no -@@ -1594,7 +1569,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true +@@ -2338,7 +2313,6 @@ iface bond0 inet6 static DEVICE=eth1 HWADDR=52:54:00:12:34:aa MTU=1480 - NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet - USERCTL=no -@@ -1603,7 +1577,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true +@@ -2348,7 +2322,6 @@ iface bond0 inet6 static BOOTPROTO=none DEVICE=eth2 HWADDR=52:54:00:12:34:ff - NM_CONTROLLED=no ONBOOT=no + STARTMODE=manual TYPE=Ethernet - USERCTL=no -@@ -1969,7 +1942,6 @@ class TestRhelSysConfigRendering(CiTestCase): +@@ -2766,7 +2739,6 @@ class TestRhelSysConfigRendering(CiTestCase): BOOTPROTO=dhcp DEVICE=eth1000 - HWADDR=07-1C-C6-75-A4-BE + HWADDR=07-1c-c6-75-a4-be -NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet - USERCTL=no -@@ -2090,7 +2062,6 @@ GATEWAY=10.0.2.2 +@@ -2888,7 +2860,6 @@ GATEWAY=10.0.2.2 HWADDR=52:54:00:12:34:00 IPADDR=10.0.2.15 NETMASK=255.255.255.0 -NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet - USERCTL=no -@@ -2111,7 +2082,6 @@ USERCTL=no +@@ -2961,7 +2932,6 @@ USERCTL=no # BOOTPROTO=dhcp DEVICE=eth0 -NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet - USERCTL=no -- 1.8.3.1 diff --git a/SOURCES/0003-limit-permissions-on-def_log_file.patch b/SOURCES/0003-limit-permissions-on-def_log_file.patch index dbddd99..3a3610c 100644 --- a/SOURCES/0003-limit-permissions-on-def_log_file.patch +++ b/SOURCES/0003-limit-permissions-on-def_log_file.patch @@ -1,4 +1,4 @@ -From 0101e1677a1ae5ba328657e0b060277707de3913 Mon Sep 17 00:00:00 2001 +From ca723f1846bb8e2880ac264b28bdf7ecd14c9e70 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Fri, 7 Apr 2017 18:50:54 -0400 Subject: limit permissions on def_log_file @@ -16,10 +16,10 @@ X-approved-upstream: true 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/cloudinit/settings.py b/cloudinit/settings.py -index c536768..d982a4d 100644 +index 3a04a58..439eee0 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py -@@ -43,6 +43,7 @@ CFG_BUILTIN = { +@@ -45,6 +45,7 @@ CFG_BUILTIN = { 'None', ], 'def_log_file': '/var/log/cloud-init.log', @@ -28,10 +28,10 @@ index c536768..d982a4d 100644 'mount_default_fields': [None, None, 'auto', 'defaults,nofail', '0', '2'], 'ssh_deletekeys': False, diff --git a/cloudinit/stages.py b/cloudinit/stages.py -index 8a06412..4f15484 100644 +index 71f3a49..68b83af 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py -@@ -148,8 +148,9 @@ class Init(object): +@@ -149,8 +149,9 @@ class Init(object): def _initialize_filesystem(self): util.ensure_dirs(self._initial_subdirs()) log_file = util.get_cfg_option_str(self.cfg, 'def_log_file') diff --git a/SOURCES/0004-remove-tee-command-from-logging-configuration.patch b/SOURCES/0004-remove-tee-command-from-logging-configuration.patch index 8064382..820011f 100644 --- a/SOURCES/0004-remove-tee-command-from-logging-configuration.patch +++ b/SOURCES/0004-remove-tee-command-from-logging-configuration.patch @@ -1,4 +1,4 @@ -From 933f0331cfe293a34412eb06c59d8d3a06cdd9f9 Mon Sep 17 00:00:00 2001 +From f762ed6ffe869cfbf3d527eb9e65a3745dd3c890 Mon Sep 17 00:00:00 2001 From: Miroslav Rezanina Date: Thu, 31 May 2018 19:46:53 +0200 Subject: remove 'tee' command from logging configuration diff --git a/SOURCES/0005-azure-ensure-that-networkmanager-hook-script-runs.patch b/SOURCES/0005-azure-ensure-that-networkmanager-hook-script-runs.patch deleted file mode 100644 index 995d365..0000000 --- a/SOURCES/0005-azure-ensure-that-networkmanager-hook-script-runs.patch +++ /dev/null @@ -1,63 +0,0 @@ -From 2ea5ccd14def513ced62a03a4b2ac14f58f28d32 Mon Sep 17 00:00:00 2001 -From: Lars Kellogg-Stedman -Date: Thu, 15 Jun 2017 12:20:39 -0400 -Subject: azure: ensure that networkmanager hook script runs - -The networkmanager hook script was failing to run due to the changes -we made to resolve rhbz#1440831. This corrects the regression by -allowing the NM hook script to run regardless of whether or not -cloud-init is "enabled". - -Resolves: rhbz#1460206 -X-downstream-only: true ---- - tools/hook-dhclient | 3 +-- - tools/hook-network-manager | 3 +-- - tools/hook-rhel.sh | 3 +-- - 3 files changed, 3 insertions(+), 6 deletions(-) - -diff --git a/tools/hook-dhclient b/tools/hook-dhclient -index 02122f3..181cd51 100755 ---- a/tools/hook-dhclient -+++ b/tools/hook-dhclient -@@ -13,8 +13,7 @@ is_azure() { - } - - is_enabled() { -- # only execute hooks if cloud-init is enabled and on azure -- [ -e /run/cloud-init/enabled ] || return 1 -+ # only execute hooks if cloud-init is running on azure - is_azure - } - -diff --git a/tools/hook-network-manager b/tools/hook-network-manager -index 67d9044..1d52cad 100755 ---- a/tools/hook-network-manager -+++ b/tools/hook-network-manager -@@ -13,8 +13,7 @@ is_azure() { - } - - is_enabled() { -- # only execute hooks if cloud-init is enabled and on azure -- [ -e /run/cloud-init/enabled ] || return 1 -+ # only execute hooks if cloud-init running on azure - is_azure - } - -diff --git a/tools/hook-rhel.sh b/tools/hook-rhel.sh -index 513a551..d75767e 100755 ---- a/tools/hook-rhel.sh -+++ b/tools/hook-rhel.sh -@@ -13,8 +13,7 @@ is_azure() { - } - - is_enabled() { -- # only execute hooks if cloud-init is enabled and on azure -- [ -e /run/cloud-init/enabled ] || return 1 -+ # only execute hooks if cloud-init is running on azure - is_azure - } - --- -1.8.3.1 - diff --git a/SOURCES/0005-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch b/SOURCES/0005-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch new file mode 100644 index 0000000..a0e4dc5 --- /dev/null +++ b/SOURCES/0005-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch @@ -0,0 +1,34 @@ +From a48707b6ecd359a2bb058b1278584185a98a8445 Mon Sep 17 00:00:00 2001 +From: Miroslav Rezanina +Date: Thu, 31 May 2018 20:00:32 +0200 +Subject: sysconfig: Don't write BOOTPROTO=dhcp for ipv6 dhcp + +Don't write BOOTPROTO=dhcp for ipv6 dhcp, as BOOTPROTO applies +only to ipv4. Explicitly write IPV6_AUTOCONF=no for dhcp on ipv6. + +X-downstream-only: yes + +Resolves: rhbz#1519271 +Signed-off-by: Ryan McCabe + +Merged patches (19.4): +- 1da1ea1 sysconfig: Don't disable IPV6_AUTOCONF +--- + tests/unittests/test_net.py | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index a931a3e..1306a0f 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -1483,6 +1483,7 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + BOOTPROTO=none + DEVICE=bond0 + DHCPV6C=yes ++ IPV6_AUTOCONF=no + IPV6INIT=yes + MACADDR=aa:bb:cc:dd:ee:ff + ONBOOT=yes +-- +1.8.3.1 + diff --git a/SOURCES/0006-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch b/SOURCES/0006-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch new file mode 100644 index 0000000..e132de7 --- /dev/null +++ b/SOURCES/0006-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch @@ -0,0 +1,56 @@ +From de1ca08b5a7b6006aad24bbe23a6a837bdb77274 Mon Sep 17 00:00:00 2001 +From: Vitaly Kuznetsov +Date: Tue, 17 Apr 2018 13:07:54 +0200 +Subject: DataSourceAzure.py: use hostnamectl to set hostname + +RH-Author: Vitaly Kuznetsov +Message-id: <20180417130754.12918-3-vkuznets@redhat.com> +Patchwork-id: 79659 +O-Subject: [RHEL7.6/7.5.z cloud-init PATCH 2/2] DataSourceAzure.py: use hostnamectl to set hostname +Bugzilla: 1568717 +RH-Acked-by: Eduardo Otubo +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Cathy Avery + +The right way to set hostname in RHEL7 is: + + $ hostnamectl set-hostname HOSTNAME + +DataSourceAzure, however, uses: + $ hostname HOSTSNAME + +instead and this causes problems. We can't simply change +'BUILTIN_DS_CONFIG' in DataSourceAzure.py as 'hostname' is being used +for both getting and setting the hostname. + +Long term, this should be fixed in a different way. Cloud-init +has distro-specific hostname setting/getting (see +cloudinit/distros/rhel.py) and DataSourceAzure.py needs to be switched +to use these. + +Resolves: rhbz#1434109 + +X-downstream-only: yes + +Signed-off-by: Vitaly Kuznetsov +Signed-off-by: Miroslav Rezanina +--- + cloudinit/sources/DataSourceAzure.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py +index 24f448c..6fb889c 100755 +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -256,7 +256,7 @@ def get_hostname(hostname_command='hostname'): + + + def set_hostname(hostname, hostname_command='hostname'): +- util.subp([hostname_command, hostname]) ++ util.subp(['hostnamectl', 'set-hostname', str(hostname)]) + + + @azure_ds_telemetry_reporter +-- +1.8.3.1 + diff --git a/SOURCES/0006-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch b/SOURCES/0006-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch deleted file mode 100644 index c809af5..0000000 --- a/SOURCES/0006-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch +++ /dev/null @@ -1,44 +0,0 @@ -From 928518481bf8963f25ffe3b7a48c87864a5d7331 Mon Sep 17 00:00:00 2001 -From: Miroslav Rezanina -Date: Thu, 31 May 2018 20:00:32 +0200 -Subject: sysconfig: Don't write BOOTPROTO=dhcp for ipv6 dhcp - -Don't write BOOTPROTO=dhcp for ipv6 dhcp, as BOOTPROTO applies -only to ipv4. Explicitly write IPV6_AUTOCONF=no for dhcp on ipv6. - -X-downstream-only: yes - -Resolves: rhbz#1519271 -Signed-off-by: Ryan McCabe ---- - cloudinit/net/sysconfig.py | 1 + - tests/unittests/test_net.py | 1 + - 2 files changed, 2 insertions(+) - -diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py -index ae0554e..ec166cf 100644 ---- a/cloudinit/net/sysconfig.py -+++ b/cloudinit/net/sysconfig.py -@@ -310,6 +310,7 @@ class Renderer(renderer.Renderer): - if subnet_type == 'dhcp6': - iface_cfg['IPV6INIT'] = True - iface_cfg['DHCPV6C'] = True -+ iface_cfg['IPV6_AUTOCONF'] = False - elif subnet_type in ['dhcp4', 'dhcp']: - iface_cfg['BOOTPROTO'] = 'dhcp' - elif subnet_type == 'static': -diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py -index 5f1aa3e..8bcafe0 100644 ---- a/tests/unittests/test_net.py -+++ b/tests/unittests/test_net.py -@@ -886,6 +886,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true - BOOTPROTO=none - DEVICE=bond0 - DHCPV6C=yes -+ IPV6_AUTOCONF=no - IPV6INIT=yes - MACADDR=aa:bb:cc:dd:ee:ff - ONBOOT=yes --- -1.8.3.1 - diff --git a/SOURCES/0007-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch b/SOURCES/0007-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch deleted file mode 100644 index 98d69d3..0000000 --- a/SOURCES/0007-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch +++ /dev/null @@ -1,56 +0,0 @@ -From 0d66b31973a4934b2019ff7df37087e2f427f6fd Mon Sep 17 00:00:00 2001 -From: Vitaly Kuznetsov -Date: Tue, 17 Apr 2018 13:07:54 +0200 -Subject: DataSourceAzure.py: use hostnamectl to set hostname - -RH-Author: Vitaly Kuznetsov -Message-id: <20180417130754.12918-3-vkuznets@redhat.com> -Patchwork-id: 79659 -O-Subject: [RHEL7.6/7.5.z cloud-init PATCH 2/2] DataSourceAzure.py: use hostnamectl to set hostname -Bugzilla: 1568717 -RH-Acked-by: Eduardo Otubo -RH-Acked-by: Mohammed Gamal -RH-Acked-by: Cathy Avery - -The right way to set hostname in RHEL7 is: - - $ hostnamectl set-hostname HOSTNAME - -DataSourceAzure, however, uses: - $ hostname HOSTSNAME - -instead and this causes problems. We can't simply change -'BUILTIN_DS_CONFIG' in DataSourceAzure.py as 'hostname' is being used -for both getting and setting the hostname. - -Long term, this should be fixed in a different way. Cloud-init -has distro-specific hostname setting/getting (see -cloudinit/distros/rhel.py) and DataSourceAzure.py needs to be switched -to use these. - -Resolves: rhbz#1434109 - -X-downstream-only: yes - -Signed-off-by: Vitaly Kuznetsov -Signed-off-by: Miroslav Rezanina ---- - cloudinit/sources/DataSourceAzure.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py -index e076d5d..7dbeb04 100644 ---- a/cloudinit/sources/DataSourceAzure.py -+++ b/cloudinit/sources/DataSourceAzure.py -@@ -238,7 +238,7 @@ def get_hostname(hostname_command='hostname'): - - - def set_hostname(hostname, hostname_command='hostname'): -- util.subp([hostname_command, hostname]) -+ util.subp(['hostnamectl', 'set-hostname', str(hostname)]) - - - @contextlib.contextmanager --- -1.8.3.1 - diff --git a/SOURCES/0007-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch b/SOURCES/0007-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch new file mode 100644 index 0000000..88e7d0d --- /dev/null +++ b/SOURCES/0007-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch @@ -0,0 +1,65 @@ +From 9694c02529a8c1c41fd3ea003b750fd519f24420 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Wed, 20 Mar 2019 11:45:59 +0100 +Subject: include 'NOZEROCONF=yes' in /etc/sysconfig/network + +RH-Author: Eduardo Otubo +Message-id: <20190320114559.23708-1-otubo@redhat.com> +Patchwork-id: 84937 +O-Subject: [RHEL-7.7 cloud-init PATCH] include 'NOZEROCONF=yes' in /etc/sysconfig/network +Bugzilla: 1653131 +RH-Acked-by: Cathy Avery +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Vitaly Kuznetsov + +The option NOZEROCONF=yes is not included by default in +/etc/sysconfig/network, which is required by Overcloud instances. The +patch also includes tests for the modifications. + +X-downstream-only: yes +Resolves: rhbz#1653131 + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/net/sysconfig.py | 11 ++++++++++- + tests/unittests/test_net.py | 1 - + 2 files changed, 10 insertions(+), 2 deletions(-) + +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index 8bd7e88..810b283 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -754,7 +754,16 @@ class Renderer(renderer.Renderer): + # Distros configuring /etc/sysconfig/network as a file e.g. Centos + if sysconfig_path.endswith('network'): + util.ensure_dir(os.path.dirname(sysconfig_path)) +- netcfg = [_make_header(), 'NETWORKING=yes'] ++ netcfg = [] ++ for line in util.load_file(sysconfig_path, quiet=True).split('\n'): ++ if 'cloud-init' in line: ++ break ++ if not line.startswith(('NETWORKING=', ++ 'IPV6_AUTOCONF=', ++ 'NETWORKING_IPV6=')): ++ netcfg.append(line) ++ # Now generate the cloud-init portion of sysconfig/network ++ netcfg.extend([_make_header(), 'NETWORKING=yes']) + if network_state.use_ipv6: + netcfg.append('NETWORKING_IPV6=yes') + netcfg.append('IPV6_AUTOCONF=no') +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index 1306a0f..a931a3e 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -1483,7 +1483,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + BOOTPROTO=none + DEVICE=bond0 + DHCPV6C=yes +- IPV6_AUTOCONF=no + IPV6INIT=yes + MACADDR=aa:bb:cc:dd:ee:ff + ONBOOT=yes +-- +1.8.3.1 + diff --git a/SOURCES/0008-Fix-for-network-configuration-not-persisting-after-r.patch b/SOURCES/0008-Fix-for-network-configuration-not-persisting-after-r.patch new file mode 100644 index 0000000..cb80582 --- /dev/null +++ b/SOURCES/0008-Fix-for-network-configuration-not-persisting-after-r.patch @@ -0,0 +1,125 @@ +From 3bf572e6716815a6c901fa210d940780d58869d5 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Fri, 6 Sep 2019 12:12:11 +0200 +Subject: Fix for network configuration not persisting after reboot + +RH-Author: Eduardo Otubo +Message-id: <20190906121211.23172-1-otubo@redhat.com> +Patchwork-id: 90300 +O-Subject: [RHEL-7.8/RHEL-8.1.0 cloud-init PATCH] Fix for network configuration not persisting after reboot +Bugzilla: 1593010 +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Miroslav Rezanina + +The reasons the configuration does not persist after reboot includes +different aspects and they're all fixed on this patch: + + 1) The rpm package doesn't include the systemd-generator and +ds-identify. The systemd-generator is called early in the boot process +that calls ds-identify to check if there's any Data Source available in +the current boot. In the current use case, the Data Source is removed +from the VM on the second boot, this means cloud-init should disable +itself in order to keep the configuration it did in the first boot. + + 2) Even after adding those scripts, cloud-init was still being +executed and the configurations were being lost. The reason for this is +that the cloud-init systemd units had a wrong dependency + + WantedBy: multi-user.target + + Which would start them every time no matter the return of +ds-identify. The fix is to replace the dependency by the systemd unit to +cloud-init.target, which is the main cloud-init target enabled - or in +this case, disabled by ds-identify. The file cloud-init.target was also +missing on rpm package. + +After adding both scripts, the main cloud-init systemd target and +adjusting the systemd dependencies the configuration persists after +reboots and shutdowns. + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina + +Rebase notes (19.4): +- Using cloud-init-generator.tmpl for cloud-init-generator (upstream change) +- Hack fix in ds-identify handling in cloud-init-generator +--- + redhat/cloud-init.spec.template | 16 ++++++++++++++++ + rhel/systemd/cloud-config.service | 2 +- + rhel/systemd/cloud-final.service | 2 +- + rhel/systemd/cloud-init-local.service | 2 +- + rhel/systemd/cloud-init.service | 2 +- + rhel/systemd/cloud-init.target | 7 +++++++ + systemd/cloud-init-generator.tmpl | 2 +- + 7 files changed, 28 insertions(+), 5 deletions(-) + create mode 100644 rhel/systemd/cloud-init.target + +diff --git a/rhel/systemd/cloud-config.service b/rhel/systemd/cloud-config.service +index 12ca9df..f3dcd4b 100644 +--- a/rhel/systemd/cloud-config.service ++++ b/rhel/systemd/cloud-config.service +@@ -15,4 +15,4 @@ TimeoutSec=0 + StandardOutput=journal+console + + [Install] +-WantedBy=multi-user.target ++WantedBy=cloud-init.target +diff --git a/rhel/systemd/cloud-final.service b/rhel/systemd/cloud-final.service +index 32a83d8..739b7e3 100644 +--- a/rhel/systemd/cloud-final.service ++++ b/rhel/systemd/cloud-final.service +@@ -16,4 +16,4 @@ KillMode=process + StandardOutput=journal+console + + [Install] +-WantedBy=multi-user.target ++WantedBy=cloud-init.target +diff --git a/rhel/systemd/cloud-init-local.service b/rhel/systemd/cloud-init-local.service +index 656eddb..8f9f6c9 100644 +--- a/rhel/systemd/cloud-init-local.service ++++ b/rhel/systemd/cloud-init-local.service +@@ -28,4 +28,4 @@ TimeoutSec=0 + StandardOutput=journal+console + + [Install] +-WantedBy=multi-user.target ++WantedBy=cloud-init.target +diff --git a/rhel/systemd/cloud-init.service b/rhel/systemd/cloud-init.service +index 68fc5f1..d0023a0 100644 +--- a/rhel/systemd/cloud-init.service ++++ b/rhel/systemd/cloud-init.service +@@ -22,4 +22,4 @@ TimeoutSec=0 + StandardOutput=journal+console + + [Install] +-WantedBy=multi-user.target ++WantedBy=cloud-init.target +diff --git a/rhel/systemd/cloud-init.target b/rhel/systemd/cloud-init.target +new file mode 100644 +index 0000000..083c3b6 +--- /dev/null ++++ b/rhel/systemd/cloud-init.target +@@ -0,0 +1,7 @@ ++# cloud-init target is enabled by cloud-init-generator ++# To disable it you can either: ++# a.) boot with kernel cmdline of 'cloud-init=disabled' ++# b.) touch a file /etc/cloud/cloud-init.disabled ++[Unit] ++Description=Cloud-init target ++After=multi-user.target +diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl +index 45efa24..ac8becc 100755 +--- a/systemd/cloud-init-generator.tmpl ++++ b/systemd/cloud-init-generator.tmpl +@@ -83,7 +83,7 @@ default() { + + check_for_datasource() { + local ds_rc="" +-{% if variant in ["redhat", "fedora", "centos"] %} ++{% if variant in ["fedora", "centos"] %} + local dsidentify="/usr/libexec/cloud-init/ds-identify" + {% else %} + local dsidentify="/usr/lib/cloud-init/ds-identify" +-- +1.8.3.1 + diff --git a/SOURCES/0008-sysconfig-Don-t-disable-IPV6_AUTOCONF.patch b/SOURCES/0008-sysconfig-Don-t-disable-IPV6_AUTOCONF.patch deleted file mode 100644 index 19f1edc..0000000 --- a/SOURCES/0008-sysconfig-Don-t-disable-IPV6_AUTOCONF.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 1da1ea11647a60339d063e2d613e692d4bfd79bd Mon Sep 17 00:00:00 2001 -From: Vitaly Kuznetsov -Date: Thu, 26 Apr 2018 09:27:49 +0200 -Subject: sysconfig: Don't disable IPV6_AUTOCONF - -RH-Author: Vitaly Kuznetsov -Message-id: <20180426092749.7251-2-vkuznets@redhat.com> -Patchwork-id: 79904 -O-Subject: [RHEL7.6/7.5.z cloud-init PATCH 1/1] sysconfig: Don't disable IPV6_AUTOCONF -Bugzilla: 1578702 -RH-Acked-by: Mohammed Gamal -RH-Acked-by: Cathy Avery -RH-Acked-by: Eduardo Otubo - -Downstream-only commit 118458a3fb ("sysconfig: Don't write BOOTPROTO=dhcp -for ipv6 dhcp") did two things: -1) Disabled BOOTPROTO='dhcp' for dhcp6 setups. This change seems to be - correct as BOOTPROTO is unrelated to IPv6. The change was since merged - upstream (commit a57928d3c314d9568712cd190cb1e721e14c108b). -2) Explicitly disabled AUTOCONF and this broke many valid configurations - using it instead of DHCPV6C. Revert this part of the change. In case - DHCPV6C-only support is needed something like a new 'dhcpv6c_only' - network type needs to be suggested upstream. - -X-downstream-only: yes - -Resolves: rhbz#1558854 - -Signed-off-by: Vitaly Kuznetsov -Signed-off-by: Miroslav Rezanina ---- - cloudinit/net/sysconfig.py | 1 - - 1 file changed, 1 deletion(-) - -diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py -index ec166cf..ae0554e 100644 ---- a/cloudinit/net/sysconfig.py -+++ b/cloudinit/net/sysconfig.py -@@ -310,7 +310,6 @@ class Renderer(renderer.Renderer): - if subnet_type == 'dhcp6': - iface_cfg['IPV6INIT'] = True - iface_cfg['DHCPV6C'] = True -- iface_cfg['IPV6_AUTOCONF'] = False - elif subnet_type in ['dhcp4', 'dhcp']: - iface_cfg['BOOTPROTO'] = 'dhcp' - elif subnet_type == 'static': --- -1.8.3.1 - diff --git a/SOURCES/0009-net-Wait-for-dhclient-to-daemonize-before-reading-le.patch b/SOURCES/0009-net-Wait-for-dhclient-to-daemonize-before-reading-le.patch deleted file mode 100644 index 6b1b887..0000000 --- a/SOURCES/0009-net-Wait-for-dhclient-to-daemonize-before-reading-le.patch +++ /dev/null @@ -1,295 +0,0 @@ -From c1c6d0b586e5556a37b9b813afbb4e6a24921adf Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Wed, 23 Jan 2019 12:30:21 +0100 -Subject: net: Wait for dhclient to daemonize before reading lease file - -RH-Author: Eduardo Otubo -Message-id: <20190123123021.32708-1-otubo@redhat.com> -Patchwork-id: 84095 -O-Subject: [RHEL-7.7 cloud-init PATCH] net: Wait for dhclient to daemonize before reading lease file -Bugzilla: 1632967 -RH-Acked-by: Vitaly Kuznetsov -RH-Acked-by: Miroslav Rezanina - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1632967 -Brew: https://bugzilla.redhat.com/show_bug.cgi?id=1632967 -Tested: Me and upstream - -commit fdadcb5fae51f4e6799314ab98e3aec56c79b17c -Author: Jason Zions -Date: Tue Jan 15 21:37:17 2019 +0000 - - net: Wait for dhclient to daemonize before reading lease file - - cloud-init uses dhclient to fetch the DHCP lease so it can extract - DHCP options. dhclient creates the leasefile, then writes to it; - simply waiting for the leasefile to appear creates a race between - dhclient and cloud-init. Instead, wait for dhclient to be parented by - init. At that point, we know it has written to the leasefile, so it's - safe to copy the file and kill the process. - - cloud-init creates a temporary directory in which to execute dhclient, - and deletes that directory after it has killed the process. If - cloud-init abandons waiting for dhclient to daemonize, it will still - attempt to delete the temporary directory, but will not report an - exception should that attempt fail. - - LP: #1794399 - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - cloudinit/net/dhcp.py | 44 +++++++++++++++++++++++++++----------- - cloudinit/net/tests/test_dhcp.py | 15 ++++++++++--- - cloudinit/temp_utils.py | 4 ++-- - cloudinit/tests/test_temp_utils.py | 18 +++++++++++++++- - cloudinit/util.py | 16 +++++++++++++- - tests/unittests/test_util.py | 6 ++++++ - 6 files changed, 83 insertions(+), 20 deletions(-) - -diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py -index 0db991d..c98a97c 100644 ---- a/cloudinit/net/dhcp.py -+++ b/cloudinit/net/dhcp.py -@@ -9,6 +9,7 @@ import logging - import os - import re - import signal -+import time - - from cloudinit.net import ( - EphemeralIPv4Network, find_fallback_nic, get_devicelist, -@@ -127,7 +128,9 @@ def maybe_perform_dhcp_discovery(nic=None): - if not dhclient_path: - LOG.debug('Skip dhclient configuration: No dhclient command found.') - return [] -- with temp_utils.tempdir(prefix='cloud-init-dhcp-', needs_exe=True) as tdir: -+ with temp_utils.tempdir(rmtree_ignore_errors=True, -+ prefix='cloud-init-dhcp-', -+ needs_exe=True) as tdir: - # Use /var/tmp because /run/cloud-init/tmp is mounted noexec - return dhcp_discovery(dhclient_path, nic, tdir) - -@@ -195,24 +198,39 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir): - '-pf', pid_file, interface, '-sf', '/bin/true'] - util.subp(cmd, capture=True) - -- # dhclient doesn't write a pid file until after it forks when it gets a -- # proper lease response. Since cleandir is a temp directory that gets -- # removed, we need to wait for that pidfile creation before the -- # cleandir is removed, otherwise we get FileNotFound errors. -+ # Wait for pid file and lease file to appear, and for the process -+ # named by the pid file to daemonize (have pid 1 as its parent). If we -+ # try to read the lease file before daemonization happens, we might try -+ # to read it before the dhclient has actually written it. We also have -+ # to wait until the dhclient has become a daemon so we can be sure to -+ # kill the correct process, thus freeing cleandir to be deleted back -+ # up the callstack. - missing = util.wait_for_files( - [pid_file, lease_file], maxwait=5, naplen=0.01) - if missing: - LOG.warning("dhclient did not produce expected files: %s", - ', '.join(os.path.basename(f) for f in missing)) - return [] -- pid_content = util.load_file(pid_file).strip() -- try: -- pid = int(pid_content) -- except ValueError: -- LOG.debug( -- "pid file contains non-integer content '%s'", pid_content) -- else: -- os.kill(pid, signal.SIGKILL) -+ -+ ppid = 'unknown' -+ for _ in range(0, 1000): -+ pid_content = util.load_file(pid_file).strip() -+ try: -+ pid = int(pid_content) -+ except ValueError: -+ pass -+ else: -+ ppid = util.get_proc_ppid(pid) -+ if ppid == 1: -+ LOG.debug('killing dhclient with pid=%s', pid) -+ os.kill(pid, signal.SIGKILL) -+ return parse_dhcp_lease_file(lease_file) -+ time.sleep(0.01) -+ -+ LOG.error( -+ 'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s seconds', -+ pid_content, ppid, 0.01 * 1000 -+ ) - return parse_dhcp_lease_file(lease_file) - - -diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py -index cd3e732..79e8842 100644 ---- a/cloudinit/net/tests/test_dhcp.py -+++ b/cloudinit/net/tests/test_dhcp.py -@@ -145,16 +145,20 @@ class TestDHCPDiscoveryClean(CiTestCase): - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], - dhcp_discovery(dhclient_script, 'eth9', tmpdir)) - self.assertIn( -- "pid file contains non-integer content ''", self.logs.getvalue()) -+ "dhclient(pid=, parentpid=unknown) failed " -+ "to daemonize after 10.0 seconds", -+ self.logs.getvalue()) - m_kill.assert_not_called() - -+ @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') - @mock.patch('cloudinit.net.dhcp.os.kill') - @mock.patch('cloudinit.net.dhcp.util.wait_for_files') - @mock.patch('cloudinit.net.dhcp.util.subp') - def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(self, - m_subp, - m_wait, -- m_kill): -+ m_kill, -+ m_getppid): - """dhcp_discovery waits for the presence of pidfile and dhcp.leases.""" - tmpdir = self.tmp_dir() - dhclient_script = os.path.join(tmpdir, 'dhclient.orig') -@@ -164,6 +168,7 @@ class TestDHCPDiscoveryClean(CiTestCase): - pidfile = self.tmp_path('dhclient.pid', tmpdir) - leasefile = self.tmp_path('dhcp.leases', tmpdir) - m_wait.return_value = [pidfile] # Return the missing pidfile wait for -+ m_getppid.return_value = 1 # Indicate that dhclient has daemonized - self.assertEqual([], dhcp_discovery(dhclient_script, 'eth9', tmpdir)) - self.assertEqual( - mock.call([pidfile, leasefile], maxwait=5, naplen=0.01), -@@ -173,9 +178,10 @@ class TestDHCPDiscoveryClean(CiTestCase): - self.logs.getvalue()) - m_kill.assert_not_called() - -+ @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') - @mock.patch('cloudinit.net.dhcp.os.kill') - @mock.patch('cloudinit.net.dhcp.util.subp') -- def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill): -+ def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill, m_getppid): - """dhcp_discovery brings up the interface and runs dhclient. - - It also returns the parsed dhcp.leases file generated in the sandbox. -@@ -197,6 +203,7 @@ class TestDHCPDiscoveryClean(CiTestCase): - pid_file = os.path.join(tmpdir, 'dhclient.pid') - my_pid = 1 - write_file(pid_file, "%d\n" % my_pid) -+ m_getppid.return_value = 1 # Indicate that dhclient has daemonized - - self.assertItemsEqual( - [{'interface': 'eth9', 'fixed-address': '192.168.2.74', -@@ -355,3 +362,5 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase): - self.assertEqual(fake_lease, lease) - # Ensure that dhcp discovery occurs - m_dhcp.called_once_with() -+ -+# vi: ts=4 expandtab -diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py -index c98a1b5..346276e 100644 ---- a/cloudinit/temp_utils.py -+++ b/cloudinit/temp_utils.py -@@ -81,7 +81,7 @@ def ExtendedTemporaryFile(**kwargs): - - - @contextlib.contextmanager --def tempdir(**kwargs): -+def tempdir(rmtree_ignore_errors=False, **kwargs): - # This seems like it was only added in python 3.2 - # Make it since its useful... - # See: http://bugs.python.org/file12970/tempdir.patch -@@ -89,7 +89,7 @@ def tempdir(**kwargs): - try: - yield tdir - finally: -- shutil.rmtree(tdir) -+ shutil.rmtree(tdir, ignore_errors=rmtree_ignore_errors) - - - def mkdtemp(**kwargs): -diff --git a/cloudinit/tests/test_temp_utils.py b/cloudinit/tests/test_temp_utils.py -index ffbb92c..4a52ef8 100644 ---- a/cloudinit/tests/test_temp_utils.py -+++ b/cloudinit/tests/test_temp_utils.py -@@ -2,8 +2,9 @@ - - """Tests for cloudinit.temp_utils""" - --from cloudinit.temp_utils import mkdtemp, mkstemp -+from cloudinit.temp_utils import mkdtemp, mkstemp, tempdir - from cloudinit.tests.helpers import CiTestCase, wrap_and_call -+import os - - - class TestTempUtils(CiTestCase): -@@ -98,4 +99,19 @@ class TestTempUtils(CiTestCase): - self.assertEqual('/fake/return/path', retval) - self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls) - -+ def test_tempdir_error_suppression(self): -+ """test tempdir suppresses errors during directory removal.""" -+ -+ with self.assertRaises(OSError): -+ with tempdir(prefix='cloud-init-dhcp-') as tdir: -+ os.rmdir(tdir) -+ # As a result, the directory is already gone, -+ # so shutil.rmtree should raise OSError -+ -+ with tempdir(rmtree_ignore_errors=True, -+ prefix='cloud-init-dhcp-') as tdir: -+ os.rmdir(tdir) -+ # Since the directory is already gone, shutil.rmtree would raise -+ # OSError, but we suppress that -+ - # vi: ts=4 expandtab -diff --git a/cloudinit/util.py b/cloudinit/util.py -index 7800f7b..a84112a 100644 ---- a/cloudinit/util.py -+++ b/cloudinit/util.py -@@ -2861,7 +2861,6 @@ def mount_is_read_write(mount_point): - mount_opts = result[-1].split(',') - return mount_opts[0] == 'rw' - -- - def udevadm_settle(exists=None, timeout=None): - """Invoke udevadm settle with optional exists and timeout parameters""" - settle_cmd = ["udevadm", "settle"] -@@ -2875,5 +2874,20 @@ def udevadm_settle(exists=None, timeout=None): - - return subp(settle_cmd) - -+def get_proc_ppid(pid): -+ """ -+ Return the parent pid of a process. -+ """ -+ ppid = 0 -+ try: -+ contents = load_file("/proc/%s/stat" % pid, quiet=True) -+ except IOError as e: -+ LOG.warning('Failed to load /proc/%s/stat. %s', pid, e) -+ if contents: -+ parts = contents.split(" ", 4) -+ # man proc says -+ # ppid %d (4) The PID of the parent. -+ ppid = int(parts[3]) -+ return ppid - - # vi: ts=4 expandtab -diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py -index 5a14479..8aebcd6 100644 ---- a/tests/unittests/test_util.py -+++ b/tests/unittests/test_util.py -@@ -1114,6 +1114,12 @@ class TestLoadShellContent(helpers.TestCase): - 'key3="val3 #tricky"', - '']))) - -+ def test_get_proc_ppid(self): -+ """get_proc_ppid returns correct parent pid value.""" -+ my_pid = os.getpid() -+ my_ppid = os.getppid() -+ self.assertEqual(my_ppid, util.get_proc_ppid(my_pid)) -+ - - class TestGetProcEnv(helpers.TestCase): - """test get_proc_env.""" --- -1.8.3.1 - diff --git a/SOURCES/0010-cloud-init-per-don-t-use-dashes-in-sem-names.patch b/SOURCES/0010-cloud-init-per-don-t-use-dashes-in-sem-names.patch deleted file mode 100644 index b3a7f4a..0000000 --- a/SOURCES/0010-cloud-init-per-don-t-use-dashes-in-sem-names.patch +++ /dev/null @@ -1,89 +0,0 @@ -From f8d348243bd32fe3c3f0b55c2a216e95d44c5abd Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Thu, 28 Feb 2019 12:38:36 +0100 -Subject: cloud-init-per: don't use dashes in sem names - -RH-Author: Eduardo Otubo -Message-id: <20190228123836.17979-1-otubo@redhat.com> -Patchwork-id: 84743 -O-Subject: [RHEL-7.7 cloud-init PATCH] This is to fix https://bugs.launchpad.net/cloud-init/+bug/1812676 -Bugzilla: 1664876 -RH-Acked-by: Mohammed Gamal -RH-Acked-by: Vitaly Kuznetsov - -From: Vitaly Kuznetsov - - It was found that when there is a dash in cloud-init-per command - name and cloud-init-per is executed through cloud-init's bootcmd, e.g: - - bootcmd: - - cloud-init-per instance mycmd-bootcmd /usr/bin/mycmd - - the command is executed on each boot. However, running the same - cloud-init-per command manually after boot doesn't reveal the issue. Turns - out the issue comes from 'migrator' cloud-init module which renames all - files in /var/lib/cloud/instance/sem/ replacing dashes with underscores. As - migrator runs before bootcmd it renames - - /var/lib/cloud/instance/sem/bootper.mycmd-bootcmd.instance - to - /var/lib/cloud/instance/sem/bootper.mycmd_bootcmd.instance - - so cloud-init-per doesn't see it and thinks that the comment was never ran - before. On next boot the sequence repeats. - - There are multiple ways to resolve the issue. This patch takes the - following approach: 'canonicalize' sem names by replacing dashes with - underscores (this is consistent with post-'migrator' contents of - /var/lib/cloud/instance/sem/). We, however, need to be careful: in case - someone had a command with dashes before and he had migrator module enables - we need to see the old sem file (or the command will run again and this can - be as bad as formatting a partition!) so we add a small 'migrator' part to - cloud-init-per script itself checking for legacy sem names. - - Signed-off-by: Vitaly Kuznetsov - -commit 9cf9d8cdd3a8fd7d4d425f7051122d0ac8af2bbd -Author: Vitaly Kuznetsov -Date: Mon Feb 18 22:55:49 2019 +0000 - - This is to fix https://bugs.launchpad.net/cloud-init/+bug/1812676 - -Resolves: rhbz#1664876 -X-downstream-only: false - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - tools/cloud-init-per | 8 +++++++- - 1 file changed, 7 insertions(+), 1 deletion(-) - -diff --git a/tools/cloud-init-per b/tools/cloud-init-per -index 7d6754b..eae3e93 100755 ---- a/tools/cloud-init-per -+++ b/tools/cloud-init-per -@@ -38,7 +38,7 @@ fi - [ "$1" = "-h" -o "$1" = "--help" ] && { Usage ; exit 0; } - [ $# -ge 3 ] || { Usage 1>&2; exit 1; } - freq=$1 --name=$2 -+name=${2/-/_} - shift 2; - - [ "${name#*/}" = "${name}" ] || fail "name cannot contain a /" -@@ -53,6 +53,12 @@ esac - [ -d "${sem%/*}" ] || mkdir -p "${sem%/*}" || - fail "failed to make directory for ${sem}" - -+# Rename legacy sem files with dashes in their names. Do not overwrite existing -+# sem files to prevent clobbering those which may have been created from calls -+# outside of cloud-init. -+sem_legacy="${sem/_/-}" -+[ "$sem" != "$sem_legacy" -a -e "$sem_legacy" ] && mv -n "$sem_legacy" "$sem" -+ - [ "$freq" != "always" -a -e "$sem" ] && exit 0 - "$@" - ret=$? --- -1.8.3.1 - diff --git a/SOURCES/0011-azure-Filter-list-of-ssh-keys-pulled-from-fabric.patch b/SOURCES/0011-azure-Filter-list-of-ssh-keys-pulled-from-fabric.patch deleted file mode 100644 index e105056..0000000 --- a/SOURCES/0011-azure-Filter-list-of-ssh-keys-pulled-from-fabric.patch +++ /dev/null @@ -1,571 +0,0 @@ -From fc47793e3fedeafdb19e3c3adfbb9c2be82b64c0 Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Wed, 6 Mar 2019 14:20:18 +0100 -Subject: azure: Filter list of ssh keys pulled from fabric - -RH-Author: Eduardo Otubo -Message-id: <20190306142018.8902-1-otubo@redhat.com> -Patchwork-id: 84807 -O-Subject: [RHEL-7.7 cloud-init PATCH] azure: Filter list of ssh keys pulled from fabric -Bugzilla: 1684040 -RH-Acked-by: Cathy Avery -RH-Acked-by: Vitaly Kuznetsov - -From: "Jason Zions (MSFT)" - -commit 34f54360fcc1e0f805002a0b639d0a84eb2cb8ee -Author: Jason Zions (MSFT) -Date: Fri Feb 22 13:26:31 2019 +0000 - - azure: Filter list of ssh keys pulled from fabric - - The Azure data source is expected to expose a list of - ssh keys for the user-to-be-provisioned in the crawled - metadata. When configured to use the __builtin__ agent - this list is built by the WALinuxAgentShim. The shim - retrieves the full set of certificates and public keys - exposed to the VM from the wireserver, extracts any - ssh keys it can, and returns that list. - - This fix reduces that list of ssh keys to just the - ones whose fingerprints appear in the "administrative - user" section of the ovf-env.xml file. The Azure - control plane exposes other ssh keys to the VM for - other reasons, but those should not be added to the - authorized_keys file for the provisioned user. - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - cloudinit/sources/DataSourceAzure.py | 13 +- - cloudinit/sources/helpers/azure.py | 109 ++++++++++----- - tests/data/azure/parse_certificates_fingerprints | 4 + - tests/data/azure/parse_certificates_pem | 152 +++++++++++++++++++++ - tests/data/azure/pubkey_extract_cert | 13 ++ - tests/data/azure/pubkey_extract_ssh_key | 1 + - .../unittests/test_datasource/test_azure_helper.py | 71 +++++++++- - 7 files changed, 322 insertions(+), 41 deletions(-) - create mode 100644 tests/data/azure/parse_certificates_fingerprints - create mode 100644 tests/data/azure/parse_certificates_pem - create mode 100644 tests/data/azure/pubkey_extract_cert - create mode 100644 tests/data/azure/pubkey_extract_ssh_key - -diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py -index 7dbeb04..2062ca5 100644 ---- a/cloudinit/sources/DataSourceAzure.py -+++ b/cloudinit/sources/DataSourceAzure.py -@@ -627,9 +627,11 @@ class DataSourceAzure(sources.DataSource): - if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN: - self.bounce_network_with_azure_hostname() - -+ pubkey_info = self.cfg.get('_pubkeys', None) - metadata_func = partial(get_metadata_from_fabric, - fallback_lease_file=self. -- dhclient_lease_file) -+ dhclient_lease_file, -+ pubkey_info=pubkey_info) - else: - metadata_func = self.get_metadata_from_agent - -@@ -642,6 +644,7 @@ class DataSourceAzure(sources.DataSource): - "Error communicating with Azure fabric; You may experience." - "connectivity issues.", exc_info=True) - return False -+ - util.del_file(REPORTED_READY_MARKER_FILE) - util.del_file(REPROVISION_MARKER_FILE) - return fabric_data -@@ -909,13 +912,15 @@ def find_child(node, filter_func): - def load_azure_ovf_pubkeys(sshnode): - # This parses a 'SSH' node formatted like below, and returns - # an array of dicts. -- # [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', -- # 'path': 'where/to/go'}] -+ # [{'fingerprint': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', -+ # 'path': '/where/to/go'}] - # - # -- # ABC/ABC -+ # ABC/x/y/z - # ... - # -+ # Under some circumstances, there may be a element along with the -+ # Fingerprint and Path. Pass those along if they appear. - results = find_child(sshnode, lambda n: n.localName == "PublicKeys") - if len(results) == 0: - return [] -diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py -index e5696b1..2829dd2 100644 ---- a/cloudinit/sources/helpers/azure.py -+++ b/cloudinit/sources/helpers/azure.py -@@ -138,9 +138,36 @@ class OpenSSLManager(object): - self.certificate = certificate - LOG.debug('New certificate generated.') - -- def parse_certificates(self, certificates_xml): -- tag = ElementTree.fromstring(certificates_xml).find( -- './/Data') -+ @staticmethod -+ def _run_x509_action(action, cert): -+ cmd = ['openssl', 'x509', '-noout', action] -+ result, _ = util.subp(cmd, data=cert) -+ return result -+ -+ def _get_ssh_key_from_cert(self, certificate): -+ pub_key = self._run_x509_action('-pubkey', certificate) -+ keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] -+ ssh_key, _ = util.subp(keygen_cmd, data=pub_key) -+ return ssh_key -+ -+ def _get_fingerprint_from_cert(self, certificate): -+ """openssl x509 formats fingerprints as so: -+ 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ -+ B6:A8:BF:27:D4:73\n' -+ -+ Azure control plane passes that fingerprint as so: -+ '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' -+ """ -+ raw_fp = self._run_x509_action('-fingerprint', certificate) -+ eq = raw_fp.find('=') -+ octets = raw_fp[eq+1:-1].split(':') -+ return ''.join(octets) -+ -+ def _decrypt_certs_from_xml(self, certificates_xml): -+ """Decrypt the certificates XML document using the our private key; -+ return the list of certs and private keys contained in the doc. -+ """ -+ tag = ElementTree.fromstring(certificates_xml).find('.//Data') - certificates_content = tag.text - lines = [ - b'MIME-Version: 1.0', -@@ -151,32 +178,30 @@ class OpenSSLManager(object): - certificates_content.encode('utf-8'), - ] - with cd(self.tmpdir): -- with open('Certificates.p7m', 'wb') as f: -- f.write(b'\n'.join(lines)) - out, _ = util.subp( -- 'openssl cms -decrypt -in Certificates.p7m -inkey' -+ 'openssl cms -decrypt -in /dev/stdin -inkey' - ' {private_key} -recip {certificate} | openssl pkcs12 -nodes' - ' -password pass:'.format(**self.certificate_names), -- shell=True) -- private_keys, certificates = [], [] -+ shell=True, data=b'\n'.join(lines)) -+ return out -+ -+ def parse_certificates(self, certificates_xml): -+ """Given the Certificates XML document, return a dictionary of -+ fingerprints and associated SSH keys derived from the certs.""" -+ out = self._decrypt_certs_from_xml(certificates_xml) - current = [] -+ keys = {} - for line in out.splitlines(): - current.append(line) - if re.match(r'[-]+END .*?KEY[-]+$', line): -- private_keys.append('\n'.join(current)) -+ # ignore private_keys - current = [] - elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line): -- certificates.append('\n'.join(current)) -+ certificate = '\n'.join(current) -+ ssh_key = self._get_ssh_key_from_cert(certificate) -+ fingerprint = self._get_fingerprint_from_cert(certificate) -+ keys[fingerprint] = ssh_key - current = [] -- keys = [] -- for certificate in certificates: -- with cd(self.tmpdir): -- public_key, _ = util.subp( -- 'openssl x509 -noout -pubkey |' -- 'ssh-keygen -i -m PKCS8 -f /dev/stdin', -- data=certificate, -- shell=True) -- keys.append(public_key) - return keys - - -@@ -206,7 +231,6 @@ class WALinuxAgentShim(object): - self.dhcpoptions = dhcp_options - self._endpoint = None - self.openssl_manager = None -- self.values = {} - self.lease_file = fallback_lease_file - - def clean_up(self): -@@ -328,8 +352,9 @@ class WALinuxAgentShim(object): - LOG.debug('Azure endpoint found at %s', endpoint_ip_address) - return endpoint_ip_address - -- def register_with_azure_and_fetch_data(self): -- self.openssl_manager = OpenSSLManager() -+ def register_with_azure_and_fetch_data(self, pubkey_info=None): -+ if self.openssl_manager is None: -+ self.openssl_manager = OpenSSLManager() - http_client = AzureEndpointHttpClient(self.openssl_manager.certificate) - LOG.info('Registering with Azure...') - attempts = 0 -@@ -347,16 +372,37 @@ class WALinuxAgentShim(object): - attempts += 1 - LOG.debug('Successfully fetched GoalState XML.') - goal_state = GoalState(response.contents, http_client) -- public_keys = [] -- if goal_state.certificates_xml is not None: -+ ssh_keys = [] -+ if goal_state.certificates_xml is not None and pubkey_info is not None: - LOG.debug('Certificate XML found; parsing out public keys.') -- public_keys = self.openssl_manager.parse_certificates( -+ keys_by_fingerprint = self.openssl_manager.parse_certificates( - goal_state.certificates_xml) -- data = { -- 'public-keys': public_keys, -- } -+ ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info) - self._report_ready(goal_state, http_client) -- return data -+ return {'public-keys': ssh_keys} -+ -+ def _filter_pubkeys(self, keys_by_fingerprint, pubkey_info): -+ """cloud-init expects a straightforward array of keys to be dropped -+ into the user's authorized_keys file. Azure control plane exposes -+ multiple public keys to the VM via wireserver. Select just the -+ user's key(s) and return them, ignoring any other certs. -+ """ -+ keys = [] -+ for pubkey in pubkey_info: -+ if 'value' in pubkey and pubkey['value']: -+ keys.append(pubkey['value']) -+ elif 'fingerprint' in pubkey and pubkey['fingerprint']: -+ fingerprint = pubkey['fingerprint'] -+ if fingerprint in keys_by_fingerprint: -+ keys.append(keys_by_fingerprint[fingerprint]) -+ else: -+ LOG.warning("ovf-env.xml specified PublicKey fingerprint " -+ "%s not found in goalstate XML", fingerprint) -+ else: -+ LOG.warning("ovf-env.xml specified PublicKey with neither " -+ "value nor fingerprint: %s", pubkey) -+ -+ return keys - - def _report_ready(self, goal_state, http_client): - LOG.debug('Reporting ready to Azure fabric.') -@@ -373,11 +419,12 @@ class WALinuxAgentShim(object): - LOG.info('Reported ready to Azure fabric.') - - --def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None): -+def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, -+ pubkey_info=None): - shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, - dhcp_options=dhcp_opts) - try: -- return shim.register_with_azure_and_fetch_data() -+ return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info) - finally: - shim.clean_up() - -diff --git a/tests/data/azure/parse_certificates_fingerprints b/tests/data/azure/parse_certificates_fingerprints -new file mode 100644 -index 0000000..f7293c5 ---- /dev/null -+++ b/tests/data/azure/parse_certificates_fingerprints -@@ -0,0 +1,4 @@ -+ECEDEB3B8488D31AF3BC4CCED493F64B7D27D7B1 -+073E19D14D1C799224C6A0FD8DDAB6A8BF27D473 -+4C16E7FAD6297D74A9B25EB8F0A12808CEBE293E -+929130695289B450FE45DCD5F6EF0CDE69865867 -diff --git a/tests/data/azure/parse_certificates_pem b/tests/data/azure/parse_certificates_pem -new file mode 100644 -index 0000000..3521ea3 ---- /dev/null -+++ b/tests/data/azure/parse_certificates_pem -@@ -0,0 +1,152 @@ -+Bag Attributes -+ localKeyID: 01 00 00 00 -+ Microsoft CSP Name: Microsoft Enhanced Cryptographic Provider v1.0 -+Key Attributes -+ X509v3 Key Usage: 10 -+-----BEGIN PRIVATE KEY----- -+MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDlEe5fUqwdrQTP -+W2oVlGK2f31q/8ULT8KmOTyUvL0RPdJQ69vvHOc5Q2CKg2eviHC2LWhF8WmpnZj6 -+61RL0GeFGizwvU8Moebw5p3oqdcgoGpHVtxf+mr4QcWF58/Fwez0dA4hcsimVNBz -+eNpBBUIKNBMTBG+4d6hcQBUAGKUdGRcCGEyTqXLU0MgHjxC9JgVqWJl+X2LcAGj5 -+7J+tGYGTLzKJmeCeGVNN5ZtJ0T85MYHCKQk1/FElK+Kq5akovXffQHjlnCPcx0NJ -+47NBjlPaFp2gjnAChn79bT4iCjOFZ9avWpqRpeU517UCnY7djOr3fuod/MSQyh3L -+Wuem1tWBAgMBAAECggEBAM4ZXQRs6Kjmo95BHGiAEnSqrlgX+dycjcBq3QPh8KZT -+nifqnf48XhnackENy7tWIjr3DctoUq4mOp8AHt77ijhqfaa4XSg7fwKeK9NLBGC5 -+lAXNtAey0o2894/sKrd+LMkgphoYIUnuI4LRaGV56potkj/ZDP/GwTcG/R4SDnTn -+C1Nb05PNTAPQtPZrgPo7TdM6gGsTnFbVrYHQLyg2Sq/osHfF15YohB01esRLCAwb -+EF8JkRC4hWIZoV7BsyQ39232zAJQGGla7+wKFs3kObwh3VnFkQpT94KZnNiZuEfG -+x5pW4Pn3gXgNsftscXsaNe/M9mYZqo//Qw7NvUIvAvECgYEA9AVveyK0HOA06fhh -++3hUWdvw7Pbrl+e06jO9+bT1RjQMbHKyI60DZyVGuAySN86iChJRoJr5c6xj+iXU -+cR6BVJDjGH5t1tyiK2aYf6hEpK9/j8Z54UiVQ486zPP0PGfT2TO4lBLK+8AUmoaH -+gk21ul8QeVCeCJa/o+xEoRFvzcUCgYEA8FCbbvInrUtNY+9eKaUYoNodsgBVjm5X -+I0YPUL9D4d+1nvupHSV2NVmQl0w1RaJwrNTafrl5LkqjhQbmuWNta6QgfZzSA3LB -+lWXo1Mm0azKdcD3qMGbvn0Q3zU+yGNEgmB/Yju3/NtgYRG6tc+FCWRbPbiCnZWT8 -+v3C2Y0XggI0CgYEA2/jCZBgGkTkzue5kNVJlh5OS/aog+pCvL6hxCtarfBuTT3ed -+Sje+p46cz3DVpmUpATc+Si8py7KNdYQAm/BJ2be6X+woi9Xcgo87zWgcaPCjZzId -+0I2jsIE/Gl6XvpRCDrxnGWRPgt3GNP4szbPLrDPiH9oie8+Y9eYYf7G+PZkCgYEA -+nRSzZOPYV4f/QDF4pVQLMykfe/iH9B/fyWjEHg3He19VQmRReIHCMMEoqBziPXAe -+onpHj8oAkeer1wpZyhhZr6CKtFDLXgGm09bXSC/IRMHC81klORovyzU2HHfZfCtG -+WOmIDnU2+0xpIGIP8sztJ3qnf97MTJSkOSadsWo9gwkCgYEAh5AQmJQmck88Dff2 -+qIfJIX8d+BDw47BFJ89OmMFjGV8TNB+JO+AV4Vkodg4hxKpLqTFZTTUFgoYfy5u1 -+1/BhAjpmCDCrzubCFhx+8VEoM2+2+MmnuQoMAm9+/mD/IidwRaARgXgvEmp7sfdt -+RyWd+p2lYvFkC/jORQtDMY4uW1o= -+-----END PRIVATE KEY----- -+Bag Attributes -+ localKeyID: 02 00 00 00 -+ Microsoft CSP Name: Microsoft Strong Cryptographic Provider -+Key Attributes -+ X509v3 Key Usage: 10 -+-----BEGIN PRIVATE KEY----- -+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDlQhPrZwVQYFV4 -+FBc0H1iTXYaznMpwZvEITKtXWACzTdguUderEVOkXW3HTi5HvC2rMayt0nqo3zcd -+x1eGiqdjpZQ/wMrkz9wNEM/nNMsXntEwxk0jCVNKB/jz6vf+BOtrSI01SritAGZW -+dpKoTUyztT8C2mA3X6D8g3m4Dd07ltnzxaDqAQIU5jBHh3f/Q14tlPNZWUIiqVTC -+gDxgAe7MDmfs9h3CInTBX1XM5J4UsLTL23/padgeSvP5YF5qr1+0c7Tdftxr2lwA -+N3rLkisf5EiLAToVyJJlgP/exo2I8DaIKe7DZzD3Y1CrurOpkcMKYu5kM1Htlbua -+tDkAa2oDAgMBAAECggEAOvdueS9DyiMlCKAeQb1IQosdQOh0l0ma+FgEABC2CWhd -+0LgjQTBRM6cGO+urcq7/jhdWQ1UuUG4tVn71z7itCi/F/Enhxc2C22d2GhFVpWsn -+giSXJYpZ/mIjkdVfWNo6FRuRmmHwMys1p0qTOS+8qUJWhSzW75csqJZGgeUrAI61 -+LBV5F0SGR7dR2xZfy7PeDs9xpD0QivDt5DpsZWPaPvw4QlhdLgw6/YU1h9vtm6ci -+xLjnPRLZ7JMpcQHO8dUDl6FiEI7yQ11BDm253VQAVMddYRPQABn7SpEF8kD/aZVh -+2Clvz61Rz80SKjPUthMPLWMCRp7zB0xDMzt3/1i+tQKBgQD6Ar1/oD3eFnRnpi4u -+n/hdHJtMuXWNfUA4dspNjP6WGOid9sgIeUUdif1XyVJ+afITzvgpWc7nUWIqG2bQ -+WxJ/4q2rjUdvjNXTy1voVungR2jD5WLQ9DKeaTR0yCliWlx4JgdPG7qGI5MMwsr+ -+R/PUoUUhGeEX+o/sCSieO3iUrQKBgQDqwBEMvIdhAv/CK2sG3fsKYX8rFT55ZNX3 -+Tix9DbUGY3wQColNuI8U1nDlxE9U6VOfT9RPqKelBLCgbzB23kdEJnjSlnqlTxrx -+E+Hkndyf2ckdJAR3XNxoQ6SRLJNBsgoBj/z5tlfZE9/Jc+uh0mYy3e6g6XCVPBcz -+MgoIc+ofbwKBgQCGQhZ1hR30N+bHCozeaPW9OvGDIE0qcEqeh9xYDRFilXnF6pK9 -+SjJ9jG7KR8jPLiHb1VebDSl5O1EV/6UU2vNyTc6pw7LLCryBgkGW4aWy1WZDXNnW -+EG1meGS9GghvUss5kmJ2bxOZmV0Mi0brisQ8OWagQf+JGvtS7BAt+Q3l+QKBgAb9 -+8YQPmXiqPjPqVyW9Ntz4SnFeEJ5NApJ7IZgX8GxgSjGwHqbR+HEGchZl4ncE/Bii -+qBA3Vcb0fM5KgYcI19aPzsl28fA6ivLjRLcqfIfGVNcpW3iyq13vpdctHLW4N9QU -+FdTaOYOds+ysJziKq8CYG6NvUIshXw+HTgUybqbBAoGBAIIOqcmmtgOClAwipA17 -+dAHsI9Sjk+J0+d4JU6o+5TsmhUfUKIjXf5+xqJkJcQZMEe5GhxcCuYkgFicvh4Hz -+kv2H/EU35LcJTqC6KTKZOWIbGcn1cqsvwm3GQJffYDiO8fRZSwCaif2J3F2lfH4Y -+R/fA67HXFSTT+OncdRpY1NOn -+-----END PRIVATE KEY----- -+Bag Attributes: -+subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US -+issuer=/CN=Root Agency -+-----BEGIN CERTIFICATE----- -+MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 -+IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV -+BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv -+cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE -+BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIlPjJXzrRih4C -+k/XsoI01oqo7IUxH3dA2F7vHGXQoIpKCp8Qe6Z6cFfdD8Uj+s+B1BX6hngwzIwjN -+jE/23X3SALVzJVWzX4Y/IEjbgsuao6sOyNyB18wIU9YzZkVGj68fmMlUw3LnhPbe -+eWkufZaJCaLyhQOwlRMbOcn48D6Ys8fccOyXNzpq3rH1OzeQpxS2M8zaJYP4/VZ/ -+sf6KRpI7bP+QwyFvNKfhcaO9/gj4kMo9lVGjvDU20FW6g8UVNJCV9N4GO6mOcyqo -+OhuhVfjCNGgW7N1qi0TIVn0/MQM4l4dcT2R7Z/bV9fhMJLjGsy5A4TLAdRrhKUHT -+bzi9HyDvAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= -+-----END CERTIFICATE----- -+Bag Attributes -+ localKeyID: 01 00 00 00 -+subject=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redacted@microsoft.com -+issuer=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redacted@microsoft.com -+-----BEGIN CERTIFICATE----- -+MIID7TCCAtWgAwIBAgIJALQS3yMg3R41MA0GCSqGSIb3DQEBCwUAMIGMMQswCQYD -+VQQGEwJVUzETMBEGA1UECAwKV0FTSElOR1RPTjEQMA4GA1UEBwwHU2VhdHRsZTES -+MBAGA1UECgwJTWljcm9zb2Z0MQ4wDAYDVQQLDAVBenVyZTEOMAwGA1UEAwwFQW5o -+Vm8xIjAgBgkqhkiG9w0BCQEWE2FuaHZvQG1pY3Jvc29mdC5jb20wHhcNMTkwMjE0 -+MjMxMjQwWhcNMjExMTEwMjMxMjQwWjCBjDELMAkGA1UEBhMCVVMxEzARBgNVBAgM -+CldBU0hJTkdUT04xEDAOBgNVBAcMB1NlYXR0bGUxEjAQBgNVBAoMCU1pY3Jvc29m -+dDEOMAwGA1UECwwFQXp1cmUxDjAMBgNVBAMMBUFuaFZvMSIwIAYJKoZIhvcNAQkB -+FhNhbmh2b0BtaWNyb3NvZnQuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -+CgKCAQEA5RHuX1KsHa0Ez1tqFZRitn99av/FC0/Cpjk8lLy9ET3SUOvb7xznOUNg -+ioNnr4hwti1oRfFpqZ2Y+utUS9BnhRos8L1PDKHm8Oad6KnXIKBqR1bcX/pq+EHF -+hefPxcHs9HQOIXLIplTQc3jaQQVCCjQTEwRvuHeoXEAVABilHRkXAhhMk6ly1NDI -+B48QvSYFaliZfl9i3ABo+eyfrRmBky8yiZngnhlTTeWbSdE/OTGBwikJNfxRJSvi -+quWpKL1330B45Zwj3MdDSeOzQY5T2hadoI5wAoZ+/W0+IgozhWfWr1qakaXlOde1 -+Ap2O3Yzq937qHfzEkMody1rnptbVgQIDAQABo1AwTjAdBgNVHQ4EFgQUPvdgLiv3 -+pAk4r0QTPZU3PFOZJvgwHwYDVR0jBBgwFoAUPvdgLiv3pAk4r0QTPZU3PFOZJvgw -+DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAVUHZT+h9+uCPLTEl5IDg -+kqd9WpzXA7PJd/V+7DeDDTkEd06FIKTWZLfxLVVDjQJnQqubQb//e0zGu1qKbXnX -+R7xqWabGU4eyPeUFWddmt1OHhxKLU3HbJNJJdL6XKiQtpGGUQt/mqNQ/DEr6hhNF -+im5I79iA8H/dXA2gyZrj5Rxea4mtsaYO0mfp1NrFtJpAh2Djy4B1lBXBIv4DWG9e -+mMEwzcLCOZj2cOMA6+mdLMUjYCvIRtnn5MKUHyZX5EmX79wsqMTvVpddlVLB9Kgz -+Qnvft9+SBWh9+F3ip7BsL6Q4Q9v8eHRbnP0ya7ddlgh64uwf9VOfZZdKCnwqudJP -+3g== -+-----END CERTIFICATE----- -+Bag Attributes -+ localKeyID: 02 00 00 00 -+subject=/CN=/subscriptions/redacted/resourcegroups/redacted/providers/Microsoft.Compute/virtualMachines/redacted -+issuer=/CN=Microsoft.ManagedIdentity -+-----BEGIN CERTIFICATE----- -+MIIDnTCCAoWgAwIBAgIUB2lauSRccvFkoJybUfIwOUqBN7MwDQYJKoZIhvcNAQEL -+BQAwJDEiMCAGA1UEAxMZTWljcm9zb2Z0Lk1hbmFnZWRJZGVudGl0eTAeFw0xOTAy -+MTUxOTA5MDBaFw0xOTA4MTQxOTA5MDBaMIGUMYGRMIGOBgNVBAMTgYYvc3Vic2Ny -+aXB0aW9ucy8yN2I3NTBjZC1lZDQzLTQyZmQtOTA0NC04ZDc1ZTEyNGFlNTUvcmVz -+b3VyY2Vncm91cHMvYW5oZXh0cmFzc2gvcHJvdmlkZXJzL01pY3Jvc29mdC5Db21w -+dXRlL3ZpcnR1YWxNYWNoaW5lcy9hbmh0ZXN0Y2VydDCCASIwDQYJKoZIhvcNAQEB -+BQADggEPADCCAQoCggEBAOVCE+tnBVBgVXgUFzQfWJNdhrOcynBm8QhMq1dYALNN -+2C5R16sRU6RdbcdOLke8LasxrK3SeqjfNx3HV4aKp2OllD/AyuTP3A0Qz+c0yxee -+0TDGTSMJU0oH+PPq9/4E62tIjTVKuK0AZlZ2kqhNTLO1PwLaYDdfoPyDebgN3TuW -+2fPFoOoBAhTmMEeHd/9DXi2U81lZQiKpVMKAPGAB7swOZ+z2HcIidMFfVczknhSw -+tMvbf+lp2B5K8/lgXmqvX7RztN1+3GvaXAA3esuSKx/kSIsBOhXIkmWA/97GjYjw -+Nogp7sNnMPdjUKu6s6mRwwpi7mQzUe2Vu5q0OQBragMCAwEAAaNWMFQwDgYDVR0P -+AQH/BAQDAgeAMAwGA1UdEwEB/wQCMAAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwHwYD -+VR0jBBgwFoAUOJvzEsriQWdJBndPrK+Me1bCPjYwDQYJKoZIhvcNAQELBQADggEB -+AFGP/g8o7Hv/to11M0UqfzJuW/AyH9RZtSRcNQFLZUndwweQ6fap8lFsA4REUdqe -+7Quqp5JNNY1XzKLWXMPoheIDH1A8FFXdsAroArzlNs9tO3TlIHE8A7HxEVZEmR4b -+7ZiixmkQPS2RkjEoV/GM6fheBrzuFn7X5kVZyE6cC5sfcebn8xhk3ZcXI0VmpdT0 -+jFBsf5IvFCIXXLLhJI4KXc8VMoKFU1jT9na/jyaoGmfwovKj4ib8s2aiXGAp7Y38 -+UCmY+bJapWom6Piy5Jzi/p/kzMVdJcSa+GqpuFxBoQYEVs2XYVl7cGu/wPM+NToC -+pkSoWwF1QAnHn0eokR9E1rU= -+-----END CERTIFICATE----- -+Bag Attributes: -+subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US -+issuer=/CN=Root Agency -+-----BEGIN CERTIFICATE----- -+MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 -+IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV -+BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv -+cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE -+BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb -+Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi -+nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW -+vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+ -+lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y -+WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7 -+t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= -+-----END CERTIFICATE----- -diff --git a/tests/data/azure/pubkey_extract_cert b/tests/data/azure/pubkey_extract_cert -new file mode 100644 -index 0000000..ce9b852 ---- /dev/null -+++ b/tests/data/azure/pubkey_extract_cert -@@ -0,0 +1,13 @@ -+-----BEGIN CERTIFICATE----- -+MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 -+IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV -+BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv -+cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE -+BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb -+Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi -+nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW -+vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+ -+lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y -+WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7 -+t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= -+-----END CERTIFICATE----- -diff --git a/tests/data/azure/pubkey_extract_ssh_key b/tests/data/azure/pubkey_extract_ssh_key -new file mode 100644 -index 0000000..54d749e ---- /dev/null -+++ b/tests/data/azure/pubkey_extract_ssh_key -@@ -0,0 +1 @@ -+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHU9IDclbKVYVbYuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoinlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmWvwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4yWzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7t5btUyvp -diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py -index 26b2b93..0255616 100644 ---- a/tests/unittests/test_datasource/test_azure_helper.py -+++ b/tests/unittests/test_datasource/test_azure_helper.py -@@ -1,11 +1,13 @@ - # This file is part of cloud-init. See LICENSE file for license information. - - import os -+import unittest2 - from textwrap import dedent - - from cloudinit.sources.helpers import azure as azure_helper - from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir - -+from cloudinit.util import load_file - from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim - - GOAL_STATE_TEMPLATE = """\ -@@ -289,6 +291,50 @@ class TestOpenSSLManager(CiTestCase): - self.assertEqual([mock.call(manager.tmpdir)], del_dir.call_args_list) - - -+class TestOpenSSLManagerActions(CiTestCase): -+ -+ def setUp(self): -+ super(TestOpenSSLManagerActions, self).setUp() -+ -+ self.allowed_subp = True -+ -+ def _data_file(self, name): -+ path = 'tests/data/azure' -+ return os.path.join(path, name) -+ -+ @unittest2.skip("todo move to cloud_test") -+ def test_pubkey_extract(self): -+ cert = load_file(self._data_file('pubkey_extract_cert')) -+ good_key = load_file(self._data_file('pubkey_extract_ssh_key')) -+ sslmgr = azure_helper.OpenSSLManager() -+ key = sslmgr._get_ssh_key_from_cert(cert) -+ self.assertEqual(good_key, key) -+ -+ good_fingerprint = '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' -+ fingerprint = sslmgr._get_fingerprint_from_cert(cert) -+ self.assertEqual(good_fingerprint, fingerprint) -+ -+ @unittest2.skip("todo move to cloud_test") -+ @mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml') -+ def test_parse_certificates(self, mock_decrypt_certs): -+ """Azure control plane puts private keys as well as certificates -+ into the Certificates XML object. Make sure only the public keys -+ from certs are extracted and that fingerprints are converted to -+ the form specified in the ovf-env.xml file. -+ """ -+ cert_contents = load_file(self._data_file('parse_certificates_pem')) -+ fingerprints = load_file(self._data_file( -+ 'parse_certificates_fingerprints') -+ ).splitlines() -+ mock_decrypt_certs.return_value = cert_contents -+ sslmgr = azure_helper.OpenSSLManager() -+ keys_by_fp = sslmgr.parse_certificates('') -+ for fp in keys_by_fp.keys(): -+ self.assertIn(fp, fingerprints) -+ for fp in fingerprints: -+ self.assertIn(fp, keys_by_fp) -+ -+ - class TestWALinuxAgentShim(CiTestCase): - - def setUp(self): -@@ -329,18 +375,31 @@ class TestWALinuxAgentShim(CiTestCase): - - def test_certificates_used_to_determine_public_keys(self): - shim = wa_shim() -- data = shim.register_with_azure_and_fetch_data() -+ """if register_with_azure_and_fetch_data() isn't passed some info about -+ the user's public keys, there's no point in even trying to parse -+ the certificates -+ """ -+ mypk = [{'fingerprint': 'fp1', 'path': 'path1'}, -+ {'fingerprint': 'fp3', 'path': 'path3', 'value': ''}] -+ certs = {'fp1': 'expected-key', -+ 'fp2': 'should-not-be-found', -+ 'fp3': 'expected-no-value-key', -+ } -+ sslmgr = self.OpenSSLManager.return_value -+ sslmgr.parse_certificates.return_value = certs -+ data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) - self.assertEqual( - [mock.call(self.GoalState.return_value.certificates_xml)], -- self.OpenSSLManager.return_value.parse_certificates.call_args_list) -- self.assertEqual( -- self.OpenSSLManager.return_value.parse_certificates.return_value, -- data['public-keys']) -+ sslmgr.parse_certificates.call_args_list) -+ self.assertIn('expected-key', data['public-keys']) -+ self.assertIn('expected-no-value-key', data['public-keys']) -+ self.assertNotIn('should-not-be-found', data['public-keys']) - - def test_absent_certificates_produces_empty_public_keys(self): -+ mypk = [{'fingerprint': 'fp1', 'path': 'path1'}] - self.GoalState.return_value.certificates_xml = None - shim = wa_shim() -- data = shim.register_with_azure_and_fetch_data() -+ data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) - self.assertEqual([], data['public-keys']) - - def test_correct_url_used_for_report_ready(self): --- -1.8.3.1 - diff --git a/SOURCES/0012-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch b/SOURCES/0012-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch deleted file mode 100644 index f3c3252..0000000 --- a/SOURCES/0012-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch +++ /dev/null @@ -1,65 +0,0 @@ -From 320cbee1530952d31e9e0a3047ba292e0d6d16fc Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Wed, 20 Mar 2019 11:45:59 +0100 -Subject: include 'NOZEROCONF=yes' in /etc/sysconfig/network - -RH-Author: Eduardo Otubo -Message-id: <20190320114559.23708-1-otubo@redhat.com> -Patchwork-id: 84937 -O-Subject: [RHEL-7.7 cloud-init PATCH] include 'NOZEROCONF=yes' in /etc/sysconfig/network -Bugzilla: 1653131 -RH-Acked-by: Cathy Avery -RH-Acked-by: Mohammed Gamal -RH-Acked-by: Vitaly Kuznetsov - -The option NOZEROCONF=yes is not included by default in -/etc/sysconfig/network, which is required by Overcloud instances. The -patch also includes tests for the modifications. - -X-downstream-only: yes -Resolves: rhbz#1653131 - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - cloudinit/net/sysconfig.py | 11 ++++++++++- - tests/unittests/test_net.py | 1 - - 2 files changed, 10 insertions(+), 2 deletions(-) - -diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py -index ae0554e..d94c1f5 100644 ---- a/cloudinit/net/sysconfig.py -+++ b/cloudinit/net/sysconfig.py -@@ -661,7 +661,16 @@ class Renderer(renderer.Renderer): - # Distros configuring /etc/sysconfig/network as a file e.g. Centos - if sysconfig_path.endswith('network'): - util.ensure_dir(os.path.dirname(sysconfig_path)) -- netcfg = [_make_header(), 'NETWORKING=yes'] -+ netcfg = [] -+ for line in util.load_file(sysconfig_path, quiet=True).split('\n'): -+ if 'cloud-init' in line: -+ break -+ if not line.startswith(('NETWORKING=', -+ 'IPV6_AUTOCONF=', -+ 'NETWORKING_IPV6=')): -+ netcfg.append(line) -+ # Now generate the cloud-init portion of sysconfig/network -+ netcfg.extend([_make_header(), 'NETWORKING=yes']) - if network_state.use_ipv6: - netcfg.append('NETWORKING_IPV6=yes') - netcfg.append('IPV6_AUTOCONF=no') -diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py -index 8bcafe0..5f1aa3e 100644 ---- a/tests/unittests/test_net.py -+++ b/tests/unittests/test_net.py -@@ -886,7 +886,6 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true - BOOTPROTO=none - DEVICE=bond0 - DHCPV6C=yes -- IPV6_AUTOCONF=no - IPV6INIT=yes - MACADDR=aa:bb:cc:dd:ee:ff - ONBOOT=yes --- -1.8.3.1 - diff --git a/SOURCES/ci-Add-support-for-publishing-host-keys-to-GCE-guest-at.patch b/SOURCES/ci-Add-support-for-publishing-host-keys-to-GCE-guest-at.patch deleted file mode 100644 index 1e5d0ff..0000000 --- a/SOURCES/ci-Add-support-for-publishing-host-keys-to-GCE-guest-at.patch +++ /dev/null @@ -1,473 +0,0 @@ -From 60991b1241a5efb585df889d4343007e501fd70c Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Tue, 5 May 2020 08:08:15 +0200 -Subject: [PATCH 2/5] Add support for publishing host keys to GCE guest - attributes - -RH-Author: Eduardo Otubo -Message-id: <20200504085238.25884-3-otubo@redhat.com> -Patchwork-id: 96243 -O-Subject: [RHEL-7.8.z cloud-init PATCH 2/5] Add support for publishing host keys to GCE guest attributes -Bugzilla: 1827207 -RH-Acked-by: Cathy Avery -RH-Acked-by: Mohammed Gamal -RH-Acked-by: Vitaly Kuznetsov - -commit 155847209e6a3ed5face91a133d8488a703f3f93 -Author: Rick Wright -Date: Fri Aug 9 17:11:05 2019 +0000 - - Add support for publishing host keys to GCE guest attributes - - This adds an empty publish_host_keys() method to the default datasource - that is called by cc_ssh.py. This feature can be controlled by the - 'ssh_publish_hostkeys' config option. It is enabled by default but can - be disabled by setting 'enabled' to false. Also, a blacklist of key - types is supported. - - In addition, this change implements ssh_publish_hostkeys() for the GCE - datasource, attempting to write the hostkeys to the instance's guest - attributes. Using these hostkeys for ssh connections is currently - supported by the alpha version of Google's 'gcloud' command-line tool. - - (On Google Compute Engine, this feature will be enabled by setting the - 'enable-guest-attributes' metadata key to 'true' for the - project/instance that you would like to use this feature for. When - connecting to the instance for the first time using 'gcloud compute ssh' - the hostkeys will be read from the guest attributes for the instance and - written to the user's local known_hosts file for Google Compute Engine - instances.) - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - cloudinit/config/cc_ssh.py | 55 +++++++++ - cloudinit/config/tests/test_ssh.py | 166 ++++++++++++++++++++++++++++ - cloudinit/sources/DataSourceGCE.py | 22 +++- - cloudinit/sources/__init__.py | 10 ++ - cloudinit/url_helper.py | 9 +- - tests/unittests/test_datasource/test_gce.py | 18 +++ - 6 files changed, 274 insertions(+), 6 deletions(-) - -diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py -index f8f7cb3..53f6939 100755 ---- a/cloudinit/config/cc_ssh.py -+++ b/cloudinit/config/cc_ssh.py -@@ -91,6 +91,9 @@ public keys. - ssh_authorized_keys: - - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUU ... - - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZ ... -+ ssh_publish_hostkeys: -+ enabled: (Defaults to true) -+ blacklist: (Defaults to [dsa]) - """ - - import glob -@@ -104,6 +107,10 @@ from cloudinit import util - - GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519'] - KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key' -+PUBLISH_HOST_KEYS = True -+# Don't publish the dsa hostkey by default since OpenSSH recommends not using -+# it. -+HOST_KEY_PUBLISH_BLACKLIST = ['dsa'] - - CONFIG_KEY_TO_FILE = {} - PRIV_TO_PUB = {} -@@ -176,6 +183,23 @@ def handle(_name, cfg, cloud, log, _args): - util.logexc(log, "Failed generating key type %s to " - "file %s", keytype, keyfile) - -+ if "ssh_publish_hostkeys" in cfg: -+ host_key_blacklist = util.get_cfg_option_list( -+ cfg["ssh_publish_hostkeys"], "blacklist", -+ HOST_KEY_PUBLISH_BLACKLIST) -+ publish_hostkeys = util.get_cfg_option_bool( -+ cfg["ssh_publish_hostkeys"], "enabled", PUBLISH_HOST_KEYS) -+ else: -+ host_key_blacklist = HOST_KEY_PUBLISH_BLACKLIST -+ publish_hostkeys = PUBLISH_HOST_KEYS -+ -+ if publish_hostkeys: -+ hostkeys = get_public_host_keys(blacklist=host_key_blacklist) -+ try: -+ cloud.datasource.publish_host_keys(hostkeys) -+ except Exception as e: -+ util.logexc(log, "Publishing host keys failed!") -+ - try: - (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) - (user, _user_config) = ug_util.extract_default(users) -@@ -209,4 +233,35 @@ def apply_credentials(keys, user, disable_root, disable_root_opts): - - ssh_util.setup_user_keys(keys, 'root', options=key_prefix) - -+ -+def get_public_host_keys(blacklist=None): -+ """Read host keys from /etc/ssh/*.pub files and return them as a list. -+ -+ @param blacklist: List of key types to ignore. e.g. ['dsa', 'rsa'] -+ @returns: List of keys, each formatted as a two-element tuple. -+ e.g. [('ssh-rsa', 'AAAAB3Nz...'), ('ssh-ed25519', 'AAAAC3Nx...')] -+ """ -+ public_key_file_tmpl = '%s.pub' % (KEY_FILE_TPL,) -+ key_list = [] -+ blacklist_files = [] -+ if blacklist: -+ # Convert blacklist to filenames: -+ # 'dsa' -> '/etc/ssh/ssh_host_dsa_key.pub' -+ blacklist_files = [public_key_file_tmpl % (key_type,) -+ for key_type in blacklist] -+ # Get list of public key files and filter out blacklisted files. -+ file_list = [hostfile for hostfile -+ in glob.glob(public_key_file_tmpl % ('*',)) -+ if hostfile not in blacklist_files] -+ -+ # Read host key files, retrieve first two fields as a tuple and -+ # append that tuple to key_list. -+ for file_name in file_list: -+ file_contents = util.load_file(file_name) -+ key_data = file_contents.split() -+ if key_data and len(key_data) > 1: -+ key_list.append(tuple(key_data[:2])) -+ return key_list -+ -+ - # vi: ts=4 expandtab -diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py -index c8a4271..e778984 100644 ---- a/cloudinit/config/tests/test_ssh.py -+++ b/cloudinit/config/tests/test_ssh.py -@@ -1,5 +1,6 @@ - # This file is part of cloud-init. See LICENSE file for license information. - -+import os.path - - from cloudinit.config import cc_ssh - from cloudinit import ssh_util -@@ -12,6 +13,25 @@ MODPATH = "cloudinit.config.cc_ssh." - class TestHandleSsh(CiTestCase): - """Test cc_ssh handling of ssh config.""" - -+ def _publish_hostkey_test_setup(self): -+ self.test_hostkeys = { -+ 'dsa': ('ssh-dss', 'AAAAB3NzaC1kc3MAAACB'), -+ 'ecdsa': ('ecdsa-sha2-nistp256', 'AAAAE2VjZ'), -+ 'ed25519': ('ssh-ed25519', 'AAAAC3NzaC1lZDI'), -+ 'rsa': ('ssh-rsa', 'AAAAB3NzaC1yc2EAAA'), -+ } -+ self.test_hostkey_files = [] -+ hostkey_tmpdir = self.tmp_dir() -+ for key_type in ['dsa', 'ecdsa', 'ed25519', 'rsa']: -+ key_data = self.test_hostkeys[key_type] -+ filename = 'ssh_host_%s_key.pub' % key_type -+ filepath = os.path.join(hostkey_tmpdir, filename) -+ self.test_hostkey_files.append(filepath) -+ with open(filepath, 'w') as f: -+ f.write(' '.join(key_data)) -+ -+ cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, 'ssh_host_%s_key') -+ - def test_apply_credentials_with_user(self, m_setup_keys): - """Apply keys for the given user and root.""" - keys = ["key1"] -@@ -64,6 +84,7 @@ class TestHandleSsh(CiTestCase): - # Mock os.path.exits to True to short-circuit the key writing logic - m_path_exists.return_value = True - m_nug.return_value = ([], {}) -+ cc_ssh.PUBLISH_HOST_KEYS = False - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) - cc_ssh.handle("name", cfg, cloud, None, None) -@@ -149,3 +170,148 @@ class TestHandleSsh(CiTestCase): - self.assertEqual([mock.call(set(keys), user), - mock.call(set(keys), "root", options="")], - m_setup_keys.call_args_list) -+ -+ @mock.patch(MODPATH + "glob.glob") -+ @mock.patch(MODPATH + "ug_util.normalize_users_groups") -+ @mock.patch(MODPATH + "os.path.exists") -+ def test_handle_publish_hostkeys_default( -+ self, m_path_exists, m_nug, m_glob, m_setup_keys): -+ """Test handle with various configs for ssh_publish_hostkeys.""" -+ self._publish_hostkey_test_setup() -+ cc_ssh.PUBLISH_HOST_KEYS = True -+ keys = ["key1"] -+ user = "clouduser" -+ # Return no matching keys for first glob, test keys for second. -+ m_glob.side_effect = iter([ -+ [], -+ self.test_hostkey_files, -+ ]) -+ # Mock os.path.exits to True to short-circuit the key writing logic -+ m_path_exists.return_value = True -+ m_nug.return_value = ({user: {"default": user}}, {}) -+ cloud = self.tmp_cloud( -+ distro='ubuntu', metadata={'public-keys': keys}) -+ cloud.datasource.publish_host_keys = mock.Mock() -+ -+ cfg = {} -+ expected_call = [self.test_hostkeys[key_type] for key_type -+ in ['ecdsa', 'ed25519', 'rsa']] -+ cc_ssh.handle("name", cfg, cloud, None, None) -+ self.assertEqual([mock.call(expected_call)], -+ cloud.datasource.publish_host_keys.call_args_list) -+ -+ @mock.patch(MODPATH + "glob.glob") -+ @mock.patch(MODPATH + "ug_util.normalize_users_groups") -+ @mock.patch(MODPATH + "os.path.exists") -+ def test_handle_publish_hostkeys_config_enable( -+ self, m_path_exists, m_nug, m_glob, m_setup_keys): -+ """Test handle with various configs for ssh_publish_hostkeys.""" -+ self._publish_hostkey_test_setup() -+ cc_ssh.PUBLISH_HOST_KEYS = False -+ keys = ["key1"] -+ user = "clouduser" -+ # Return no matching keys for first glob, test keys for second. -+ m_glob.side_effect = iter([ -+ [], -+ self.test_hostkey_files, -+ ]) -+ # Mock os.path.exits to True to short-circuit the key writing logic -+ m_path_exists.return_value = True -+ m_nug.return_value = ({user: {"default": user}}, {}) -+ cloud = self.tmp_cloud( -+ distro='ubuntu', metadata={'public-keys': keys}) -+ cloud.datasource.publish_host_keys = mock.Mock() -+ -+ cfg = {'ssh_publish_hostkeys': {'enabled': True}} -+ expected_call = [self.test_hostkeys[key_type] for key_type -+ in ['ecdsa', 'ed25519', 'rsa']] -+ cc_ssh.handle("name", cfg, cloud, None, None) -+ self.assertEqual([mock.call(expected_call)], -+ cloud.datasource.publish_host_keys.call_args_list) -+ -+ @mock.patch(MODPATH + "glob.glob") -+ @mock.patch(MODPATH + "ug_util.normalize_users_groups") -+ @mock.patch(MODPATH + "os.path.exists") -+ def test_handle_publish_hostkeys_config_disable( -+ self, m_path_exists, m_nug, m_glob, m_setup_keys): -+ """Test handle with various configs for ssh_publish_hostkeys.""" -+ self._publish_hostkey_test_setup() -+ cc_ssh.PUBLISH_HOST_KEYS = True -+ keys = ["key1"] -+ user = "clouduser" -+ # Return no matching keys for first glob, test keys for second. -+ m_glob.side_effect = iter([ -+ [], -+ self.test_hostkey_files, -+ ]) -+ # Mock os.path.exits to True to short-circuit the key writing logic -+ m_path_exists.return_value = True -+ m_nug.return_value = ({user: {"default": user}}, {}) -+ cloud = self.tmp_cloud( -+ distro='ubuntu', metadata={'public-keys': keys}) -+ cloud.datasource.publish_host_keys = mock.Mock() -+ -+ cfg = {'ssh_publish_hostkeys': {'enabled': False}} -+ cc_ssh.handle("name", cfg, cloud, None, None) -+ self.assertFalse(cloud.datasource.publish_host_keys.call_args_list) -+ cloud.datasource.publish_host_keys.assert_not_called() -+ -+ @mock.patch(MODPATH + "glob.glob") -+ @mock.patch(MODPATH + "ug_util.normalize_users_groups") -+ @mock.patch(MODPATH + "os.path.exists") -+ def test_handle_publish_hostkeys_config_blacklist( -+ self, m_path_exists, m_nug, m_glob, m_setup_keys): -+ """Test handle with various configs for ssh_publish_hostkeys.""" -+ self._publish_hostkey_test_setup() -+ cc_ssh.PUBLISH_HOST_KEYS = True -+ keys = ["key1"] -+ user = "clouduser" -+ # Return no matching keys for first glob, test keys for second. -+ m_glob.side_effect = iter([ -+ [], -+ self.test_hostkey_files, -+ ]) -+ # Mock os.path.exits to True to short-circuit the key writing logic -+ m_path_exists.return_value = True -+ m_nug.return_value = ({user: {"default": user}}, {}) -+ cloud = self.tmp_cloud( -+ distro='ubuntu', metadata={'public-keys': keys}) -+ cloud.datasource.publish_host_keys = mock.Mock() -+ -+ cfg = {'ssh_publish_hostkeys': {'enabled': True, -+ 'blacklist': ['dsa', 'rsa']}} -+ expected_call = [self.test_hostkeys[key_type] for key_type -+ in ['ecdsa', 'ed25519']] -+ cc_ssh.handle("name", cfg, cloud, None, None) -+ self.assertEqual([mock.call(expected_call)], -+ cloud.datasource.publish_host_keys.call_args_list) -+ -+ @mock.patch(MODPATH + "glob.glob") -+ @mock.patch(MODPATH + "ug_util.normalize_users_groups") -+ @mock.patch(MODPATH + "os.path.exists") -+ def test_handle_publish_hostkeys_empty_blacklist( -+ self, m_path_exists, m_nug, m_glob, m_setup_keys): -+ """Test handle with various configs for ssh_publish_hostkeys.""" -+ self._publish_hostkey_test_setup() -+ cc_ssh.PUBLISH_HOST_KEYS = True -+ keys = ["key1"] -+ user = "clouduser" -+ # Return no matching keys for first glob, test keys for second. -+ m_glob.side_effect = iter([ -+ [], -+ self.test_hostkey_files, -+ ]) -+ # Mock os.path.exits to True to short-circuit the key writing logic -+ m_path_exists.return_value = True -+ m_nug.return_value = ({user: {"default": user}}, {}) -+ cloud = self.tmp_cloud( -+ distro='ubuntu', metadata={'public-keys': keys}) -+ cloud.datasource.publish_host_keys = mock.Mock() -+ -+ cfg = {'ssh_publish_hostkeys': {'enabled': True, -+ 'blacklist': []}} -+ expected_call = [self.test_hostkeys[key_type] for key_type -+ in ['dsa', 'ecdsa', 'ed25519', 'rsa']] -+ cc_ssh.handle("name", cfg, cloud, None, None) -+ self.assertEqual([mock.call(expected_call)], -+ cloud.datasource.publish_host_keys.call_args_list) -diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py -index d816262..6cbfbba 100644 ---- a/cloudinit/sources/DataSourceGCE.py -+++ b/cloudinit/sources/DataSourceGCE.py -@@ -18,10 +18,13 @@ LOG = logging.getLogger(__name__) - MD_V1_URL = 'http://metadata.google.internal/computeMetadata/v1/' - BUILTIN_DS_CONFIG = {'metadata_url': MD_V1_URL} - REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname') -+GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/' -+ 'v1/instance/guest-attributes') -+HOSTKEY_NAMESPACE = 'hostkeys' -+HEADERS = {'Metadata-Flavor': 'Google'} - - - class GoogleMetadataFetcher(object): -- headers = {'Metadata-Flavor': 'Google'} - - def __init__(self, metadata_address): - self.metadata_address = metadata_address -@@ -32,7 +35,7 @@ class GoogleMetadataFetcher(object): - url = self.metadata_address + path - if is_recursive: - url += '/?recursive=True' -- resp = url_helper.readurl(url=url, headers=self.headers) -+ resp = url_helper.readurl(url=url, headers=HEADERS) - except url_helper.UrlError as exc: - msg = "url %s raised exception %s" - LOG.debug(msg, path, exc) -@@ -90,6 +93,10 @@ class DataSourceGCE(sources.DataSource): - public_keys_data = self.metadata['public-keys-data'] - return _parse_public_keys(public_keys_data, self.default_user) - -+ def publish_host_keys(self, hostkeys): -+ for key in hostkeys: -+ _write_host_key_to_guest_attributes(*key) -+ - def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): - # GCE has long FDQN's and has asked for short hostnames. - return self.metadata['local-hostname'].split('.')[0] -@@ -103,6 +110,17 @@ class DataSourceGCE(sources.DataSource): - return self.availability_zone.rsplit('-', 1)[0] - - -+def _write_host_key_to_guest_attributes(key_type, key_value): -+ url = '%s/%s/%s' % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type) -+ key_value = key_value.encode('utf-8') -+ resp = url_helper.readurl(url=url, data=key_value, headers=HEADERS, -+ request_method='PUT', check_status=False) -+ if resp.ok(): -+ LOG.debug('Wrote %s host key to guest attributes.', key_type) -+ else: -+ LOG.debug('Unable to write %s host key to guest attributes.', key_type) -+ -+ - def _has_expired(public_key): - # Check whether an SSH key is expired. Public key input is a single SSH - # public key in the GCE specific key format documented here: -diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py -index e6966b3..425e703 100644 ---- a/cloudinit/sources/__init__.py -+++ b/cloudinit/sources/__init__.py -@@ -474,6 +474,16 @@ class DataSource(object): - def get_public_ssh_keys(self): - return normalize_pubkey_data(self.metadata.get('public-keys')) - -+ def publish_host_keys(self, hostkeys): -+ """Publish the public SSH host keys (found in /etc/ssh/*.pub). -+ -+ @param hostkeys: List of host key tuples (key_type, key_value), -+ where key_type is the first field in the public key file -+ (e.g. 'ssh-rsa') and key_value is the key itself -+ (e.g. 'AAAAB3NzaC1y...'). -+ """ -+ pass -+ - def _remap_device(self, short_name): - # LP: #611137 - # the metadata service may believe that devices are named 'sda' -diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py -index 396d69a..1b0721b 100644 ---- a/cloudinit/url_helper.py -+++ b/cloudinit/url_helper.py -@@ -199,18 +199,19 @@ def _get_ssl_args(url, ssl_details): - def readurl(url, data=None, timeout=None, retries=0, sec_between=1, - headers=None, headers_cb=None, ssl_details=None, - check_status=True, allow_redirects=True, exception_cb=None, -- session=None, infinite=False, log_req_resp=True): -+ session=None, infinite=False, log_req_resp=True, -+ request_method=None): - url = _cleanurl(url) - req_args = { - 'url': url, - } - req_args.update(_get_ssl_args(url, ssl_details)) - req_args['allow_redirects'] = allow_redirects -- req_args['method'] = 'GET' -+ if not request_method: -+ request_method = 'POST' if data else 'GET' -+ req_args['method'] = request_method - if timeout is not None: - req_args['timeout'] = max(float(timeout), 0) -- if data: -- req_args['method'] = 'POST' - # It doesn't seem like config - # was added in older library versions (or newer ones either), thus we - # need to manually do the retries if it wasn't... -diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py -index 41176c6..67744d3 100644 ---- a/tests/unittests/test_datasource/test_gce.py -+++ b/tests/unittests/test_datasource/test_gce.py -@@ -55,6 +55,8 @@ GCE_USER_DATA_TEXT = { - HEADERS = {'Metadata-Flavor': 'Google'} - MD_URL_RE = re.compile( - r'http://metadata.google.internal/computeMetadata/v1/.*') -+GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/' -+ 'v1/instance/guest-attributes/hostkeys/') - - - def _set_mock_metadata(gce_meta=None): -@@ -341,4 +343,20 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): - public_key_data, default_user='default') - self.assertEqual(sorted(found), sorted(expected)) - -+ @mock.patch("cloudinit.url_helper.readurl") -+ def test_publish_host_keys(self, m_readurl): -+ hostkeys = [('ssh-rsa', 'asdfasdf'), -+ ('ssh-ed25519', 'qwerqwer')] -+ readurl_expected_calls = [ -+ mock.call(check_status=False, data=b'asdfasdf', headers=HEADERS, -+ request_method='PUT', -+ url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-rsa')), -+ mock.call(check_status=False, data=b'qwerqwer', headers=HEADERS, -+ request_method='PUT', -+ url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-ed25519')), -+ ] -+ self.ds.publish_host_keys(hostkeys) -+ m_readurl.assert_has_calls(readurl_expected_calls, any_order=True) -+ -+ - # vi: ts=4 expandtab --- -1.8.3.1 - diff --git a/SOURCES/ci-Azure-Changes-to-the-Hyper-V-KVP-Reporter.patch b/SOURCES/ci-Azure-Changes-to-the-Hyper-V-KVP-Reporter.patch deleted file mode 100644 index ca9ae1d..0000000 --- a/SOURCES/ci-Azure-Changes-to-the-Hyper-V-KVP-Reporter.patch +++ /dev/null @@ -1,411 +0,0 @@ -From a4b4a11f904d7f70b53c7959e489d7aab72a9fa4 Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Wed, 15 May 2019 12:15:27 +0200 -Subject: [PATCH 3/5] Azure: Changes to the Hyper-V KVP Reporter -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -RH-Author: Eduardo Otubo -Message-id: <20190515121529.11191-4-otubo@redhat.com> -Patchwork-id: 87885 -O-Subject: [rhel-7 cloud-init PATCHv2 3/5] Azure: Changes to the Hyper-V KVP Reporter -Bugzilla: 1687565 -RH-Acked-by: Vitaly Kuznetsov -RH-Acked-by: Mohammed Gamal - -From: Anh Vo - -BZ: 1687565 -BRANCH: rhel7/master-18.5 -UPSTREAM: 86674f01 -BREW: 21696239 - -commit 86674f013dfcea3c075ab41373ffb475881066f6 -Author: Anh Vo -Date: Mon Apr 29 20:22:16 2019 +0000 - - Azure: Changes to the Hyper-V KVP Reporter - -  + Truncate KVP Pool file to prevent stale entries from - being processed by the Hyper-V KVP reporter. -  + Drop filtering of KVPs as it is no longer needed. -  + Batch appending of existing KVP entries. - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - cloudinit/reporting/handlers.py | 117 +++++++++++++++---------------- - tests/unittests/test_reporting_hyperv.py | 104 +++++++++++++-------------- - 2 files changed, 106 insertions(+), 115 deletions(-) - mode change 100644 => 100755 cloudinit/reporting/handlers.py - mode change 100644 => 100755 tests/unittests/test_reporting_hyperv.py - -diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py -old mode 100644 -new mode 100755 -index 6d23558..10165ae ---- a/cloudinit/reporting/handlers.py -+++ b/cloudinit/reporting/handlers.py -@@ -5,7 +5,6 @@ import fcntl - import json - import six - import os --import re - import struct - import threading - import time -@@ -14,6 +13,7 @@ from cloudinit import log as logging - from cloudinit.registry import DictRegistry - from cloudinit import (url_helper, util) - from datetime import datetime -+from six.moves.queue import Empty as QueueEmptyError - - if six.PY2: - from multiprocessing.queues import JoinableQueue as JQueue -@@ -129,24 +129,50 @@ class HyperVKvpReportingHandler(ReportingHandler): - DESC_IDX_KEY = 'msg_i' - JSON_SEPARATORS = (',', ':') - KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1' -+ _already_truncated_pool_file = False - - def __init__(self, - kvp_file_path=KVP_POOL_FILE_GUEST, - event_types=None): - super(HyperVKvpReportingHandler, self).__init__() - self._kvp_file_path = kvp_file_path -+ HyperVKvpReportingHandler._truncate_guest_pool_file( -+ self._kvp_file_path) -+ - self._event_types = event_types - self.q = JQueue() -- self.kvp_file = None - self.incarnation_no = self._get_incarnation_no() - self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX, - self.incarnation_no) -- self._current_offset = 0 - self.publish_thread = threading.Thread( - target=self._publish_event_routine) - self.publish_thread.daemon = True - self.publish_thread.start() - -+ @classmethod -+ def _truncate_guest_pool_file(cls, kvp_file): -+ """ -+ Truncate the pool file if it has not been truncated since boot. -+ This should be done exactly once for the file indicated by -+ KVP_POOL_FILE_GUEST constant above. This method takes a filename -+ so that we can use an arbitrary file during unit testing. -+ Since KVP is a best-effort telemetry channel we only attempt to -+ truncate the file once and only if the file has not been modified -+ since boot. Additional truncation can lead to loss of existing -+ KVPs. -+ """ -+ if cls._already_truncated_pool_file: -+ return -+ boot_time = time.time() - float(util.uptime()) -+ try: -+ if os.path.getmtime(kvp_file) < boot_time: -+ with open(kvp_file, "w"): -+ pass -+ except (OSError, IOError) as e: -+ LOG.warning("failed to truncate kvp pool file, %s", e) -+ finally: -+ cls._already_truncated_pool_file = True -+ - def _get_incarnation_no(self): - """ - use the time passed as the incarnation number. -@@ -162,20 +188,15 @@ class HyperVKvpReportingHandler(ReportingHandler): - - def _iterate_kvps(self, offset): - """iterate the kvp file from the current offset.""" -- try: -- with open(self._kvp_file_path, 'rb+') as f: -- self.kvp_file = f -- fcntl.flock(f, fcntl.LOCK_EX) -- f.seek(offset) -+ with open(self._kvp_file_path, 'rb') as f: -+ fcntl.flock(f, fcntl.LOCK_EX) -+ f.seek(offset) -+ record_data = f.read(self.HV_KVP_RECORD_SIZE) -+ while len(record_data) == self.HV_KVP_RECORD_SIZE: -+ kvp_item = self._decode_kvp_item(record_data) -+ yield kvp_item - record_data = f.read(self.HV_KVP_RECORD_SIZE) -- while len(record_data) == self.HV_KVP_RECORD_SIZE: -- self._current_offset += self.HV_KVP_RECORD_SIZE -- kvp_item = self._decode_kvp_item(record_data) -- yield kvp_item -- record_data = f.read(self.HV_KVP_RECORD_SIZE) -- fcntl.flock(f, fcntl.LOCK_UN) -- finally: -- self.kvp_file = None -+ fcntl.flock(f, fcntl.LOCK_UN) - - def _event_key(self, event): - """ -@@ -207,23 +228,13 @@ class HyperVKvpReportingHandler(ReportingHandler): - - return {'key': k, 'value': v} - -- def _update_kvp_item(self, record_data): -- if self.kvp_file is None: -- raise ReportException( -- "kvp file '{0}' not opened." -- .format(self._kvp_file_path)) -- self.kvp_file.seek(-self.HV_KVP_RECORD_SIZE, 1) -- self.kvp_file.write(record_data) -- - def _append_kvp_item(self, record_data): -- with open(self._kvp_file_path, 'rb+') as f: -+ with open(self._kvp_file_path, 'ab') as f: - fcntl.flock(f, fcntl.LOCK_EX) -- # seek to end of the file -- f.seek(0, 2) -- f.write(record_data) -+ for data in record_data: -+ f.write(data) - f.flush() - fcntl.flock(f, fcntl.LOCK_UN) -- self._current_offset = f.tell() - - def _break_down(self, key, meta_data, description): - del meta_data[self.MSG_KEY] -@@ -279,40 +290,26 @@ class HyperVKvpReportingHandler(ReportingHandler): - - def _publish_event_routine(self): - while True: -+ items_from_queue = 0 - try: - event = self.q.get(block=True) -- need_append = True -+ items_from_queue += 1 -+ encoded_data = [] -+ while event is not None: -+ encoded_data += self._encode_event(event) -+ try: -+ # get all the rest of the events in the queue -+ event = self.q.get(block=False) -+ items_from_queue += 1 -+ except QueueEmptyError: -+ event = None - try: -- if not os.path.exists(self._kvp_file_path): -- LOG.warning( -- "skip writing events %s to %s. file not present.", -- event.as_string(), -- self._kvp_file_path) -- encoded_event = self._encode_event(event) -- # for each encoded_event -- for encoded_data in (encoded_event): -- for kvp in self._iterate_kvps(self._current_offset): -- match = ( -- re.match( -- r"^{0}\|(\d+)\|.+" -- .format(self.EVENT_PREFIX), -- kvp['key'] -- )) -- if match: -- match_groups = match.groups(0) -- if int(match_groups[0]) < self.incarnation_no: -- need_append = False -- self._update_kvp_item(encoded_data) -- continue -- if need_append: -- self._append_kvp_item(encoded_data) -- except IOError as e: -- LOG.warning( -- "failed posting event to kvp: %s e:%s", -- event.as_string(), e) -+ self._append_kvp_item(encoded_data) -+ except (OSError, IOError) as e: -+ LOG.warning("failed posting events to kvp, %s", e) - finally: -- self.q.task_done() -- -+ for _ in range(items_from_queue): -+ self.q.task_done() - # when main process exits, q.get() will through EOFError - # indicating we should exit this thread. - except EOFError: -@@ -322,7 +319,7 @@ class HyperVKvpReportingHandler(ReportingHandler): - # if the kvp pool already contains a chunk of data, - # so defer it to another thread. - def publish_event(self, event): -- if (not self._event_types or event.event_type in self._event_types): -+ if not self._event_types or event.event_type in self._event_types: - self.q.put(event) - - def flush(self): -diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py -old mode 100644 -new mode 100755 -index 2e64c6c..d01ed5b ---- a/tests/unittests/test_reporting_hyperv.py -+++ b/tests/unittests/test_reporting_hyperv.py -@@ -1,10 +1,12 @@ - # This file is part of cloud-init. See LICENSE file for license information. - - from cloudinit.reporting import events --from cloudinit.reporting import handlers -+from cloudinit.reporting.handlers import HyperVKvpReportingHandler - - import json - import os -+import struct -+import time - - from cloudinit import util - from cloudinit.tests.helpers import CiTestCase -@@ -13,7 +15,7 @@ from cloudinit.tests.helpers import CiTestCase - class TestKvpEncoding(CiTestCase): - def test_encode_decode(self): - kvp = {'key': 'key1', 'value': 'value1'} -- kvp_reporting = handlers.HyperVKvpReportingHandler() -+ kvp_reporting = HyperVKvpReportingHandler() - data = kvp_reporting._encode_kvp_item(kvp['key'], kvp['value']) - self.assertEqual(len(data), kvp_reporting.HV_KVP_RECORD_SIZE) - decoded_kvp = kvp_reporting._decode_kvp_item(data) -@@ -26,57 +28,9 @@ class TextKvpReporter(CiTestCase): - self.tmp_file_path = self.tmp_path('kvp_pool_file') - util.ensure_file(self.tmp_file_path) - -- def test_event_type_can_be_filtered(self): -- reporter = handlers.HyperVKvpReportingHandler( -- kvp_file_path=self.tmp_file_path, -- event_types=['foo', 'bar']) -- -- reporter.publish_event( -- events.ReportingEvent('foo', 'name', 'description')) -- reporter.publish_event( -- events.ReportingEvent('some_other', 'name', 'description3')) -- reporter.q.join() -- -- kvps = list(reporter._iterate_kvps(0)) -- self.assertEqual(1, len(kvps)) -- -- reporter.publish_event( -- events.ReportingEvent('bar', 'name', 'description2')) -- reporter.q.join() -- kvps = list(reporter._iterate_kvps(0)) -- self.assertEqual(2, len(kvps)) -- -- self.assertIn('foo', kvps[0]['key']) -- self.assertIn('bar', kvps[1]['key']) -- self.assertNotIn('some_other', kvps[0]['key']) -- self.assertNotIn('some_other', kvps[1]['key']) -- -- def test_events_are_over_written(self): -- reporter = handlers.HyperVKvpReportingHandler( -- kvp_file_path=self.tmp_file_path) -- -- self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) -- -- reporter.publish_event( -- events.ReportingEvent('foo', 'name1', 'description')) -- reporter.publish_event( -- events.ReportingEvent('foo', 'name2', 'description')) -- reporter.q.join() -- self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) -- -- reporter2 = handlers.HyperVKvpReportingHandler( -- kvp_file_path=self.tmp_file_path) -- reporter2.incarnation_no = reporter.incarnation_no + 1 -- reporter2.publish_event( -- events.ReportingEvent('foo', 'name3', 'description')) -- reporter2.q.join() -- -- self.assertEqual(2, len(list(reporter2._iterate_kvps(0)))) -- - def test_events_with_higher_incarnation_not_over_written(self): -- reporter = handlers.HyperVKvpReportingHandler( -+ reporter = HyperVKvpReportingHandler( - kvp_file_path=self.tmp_file_path) -- - self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) - - reporter.publish_event( -@@ -86,7 +40,7 @@ class TextKvpReporter(CiTestCase): - reporter.q.join() - self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) - -- reporter3 = handlers.HyperVKvpReportingHandler( -+ reporter3 = HyperVKvpReportingHandler( - kvp_file_path=self.tmp_file_path) - reporter3.incarnation_no = reporter.incarnation_no - 1 - reporter3.publish_event( -@@ -95,7 +49,7 @@ class TextKvpReporter(CiTestCase): - self.assertEqual(3, len(list(reporter3._iterate_kvps(0)))) - - def test_finish_event_result_is_logged(self): -- reporter = handlers.HyperVKvpReportingHandler( -+ reporter = HyperVKvpReportingHandler( - kvp_file_path=self.tmp_file_path) - reporter.publish_event( - events.FinishReportingEvent('name2', 'description1', -@@ -105,7 +59,7 @@ class TextKvpReporter(CiTestCase): - - def test_file_operation_issue(self): - os.remove(self.tmp_file_path) -- reporter = handlers.HyperVKvpReportingHandler( -+ reporter = HyperVKvpReportingHandler( - kvp_file_path=self.tmp_file_path) - reporter.publish_event( - events.FinishReportingEvent('name2', 'description1', -@@ -113,7 +67,7 @@ class TextKvpReporter(CiTestCase): - reporter.q.join() - - def test_event_very_long(self): -- reporter = handlers.HyperVKvpReportingHandler( -+ reporter = HyperVKvpReportingHandler( - kvp_file_path=self.tmp_file_path) - description = 'ab' * reporter.HV_KVP_EXCHANGE_MAX_VALUE_SIZE - long_event = events.FinishReportingEvent( -@@ -132,3 +86,43 @@ class TextKvpReporter(CiTestCase): - self.assertEqual(msg_slice['msg_i'], i) - full_description += msg_slice['msg'] - self.assertEqual(description, full_description) -+ -+ def test_not_truncate_kvp_file_modified_after_boot(self): -+ with open(self.tmp_file_path, "wb+") as f: -+ kvp = {'key': 'key1', 'value': 'value1'} -+ data = (struct.pack("%ds%ds" % ( -+ HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE, -+ HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE), -+ kvp['key'].encode('utf-8'), kvp['value'].encode('utf-8'))) -+ f.write(data) -+ cur_time = time.time() -+ os.utime(self.tmp_file_path, (cur_time, cur_time)) -+ -+ # reset this because the unit test framework -+ # has already polluted the class variable -+ HyperVKvpReportingHandler._already_truncated_pool_file = False -+ -+ reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) -+ kvps = list(reporter._iterate_kvps(0)) -+ self.assertEqual(1, len(kvps)) -+ -+ def test_truncate_stale_kvp_file(self): -+ with open(self.tmp_file_path, "wb+") as f: -+ kvp = {'key': 'key1', 'value': 'value1'} -+ data = (struct.pack("%ds%ds" % ( -+ HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE, -+ HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE), -+ kvp['key'].encode('utf-8'), kvp['value'].encode('utf-8'))) -+ f.write(data) -+ -+ # set the time ways back to make it look like -+ # we had an old kvp file -+ os.utime(self.tmp_file_path, (1000000, 1000000)) -+ -+ # reset this because the unit test framework -+ # has already polluted the class variable -+ HyperVKvpReportingHandler._already_truncated_pool_file = False -+ -+ reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) -+ kvps = list(reporter._iterate_kvps(0)) -+ self.assertEqual(0, len(kvps)) --- -1.8.3.1 - diff --git a/SOURCES/ci-Azure-Ensure-platform-random_seed-is-always-serializ.patch b/SOURCES/ci-Azure-Ensure-platform-random_seed-is-always-serializ.patch deleted file mode 100644 index 97fc503..0000000 --- a/SOURCES/ci-Azure-Ensure-platform-random_seed-is-always-serializ.patch +++ /dev/null @@ -1,162 +0,0 @@ -From 2428320b2157a0fcc0f35bea12584286ebd02aab Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Wed, 15 May 2019 12:15:25 +0200 -Subject: [PATCH 1/5] Azure: Ensure platform random_seed is always serializable - as JSON. - -RH-Author: Eduardo Otubo -Message-id: <20190515121529.11191-2-otubo@redhat.com> -Patchwork-id: 87881 -O-Subject: [rhel-7 cloud-init PATCHv2 1/5] Azure: Ensure platform random_seed is always serializable as JSON. -Bugzilla: 1687565 -RH-Acked-by: Vitaly Kuznetsov -RH-Acked-by: Mohammed Gamal - -From: "Jason Zions (MSFT)" - -BZ: 1687565 -BRANCH: rhel7/master-18.5 -UPSTREAM: 0dc3a77f -BREW: 21696239 - -commit 0dc3a77f41f4544e4cb5a41637af7693410d4cdf -Author: Jason Zions (MSFT) -Date: Tue Mar 26 18:53:50 2019 +0000 - - Azure: Ensure platform random_seed is always serializable as JSON. - - The Azure platform surfaces random bytes into /sys via Hyper-V. - Python 2.7 json.dump() raises an exception if asked to convert - a str with non-character content, and python 3.0 json.dump() - won't serialize a "bytes" value. As a result, c-i instance - data is often not written by Azure, making reboots slower (c-i - has to repeat work). - - The random data is base64-encoded and then decoded into a string - (str or unicode depending on the version of Python in use). The - base64 string has just as many bits of entropy, so we're not - throwing away useful "information", but we can be certain - json.dump() will correctly serialize the bits. - -Signed-off-by: Miroslav Rezanina - -Conflicts: - tests/unittests/test_datasource/test_azure.py - Skipped the commit edf052c as it removes support for python-2.6 - -Signed-off-by: Eduardo Otubo ---- - cloudinit/sources/DataSourceAzure.py | 24 +++++++++++++++++++----- - tests/data/azure/non_unicode_random_string | 1 + - tests/unittests/test_datasource/test_azure.py | 24 ++++++++++++++++++++++-- - 3 files changed, 42 insertions(+), 7 deletions(-) - create mode 100644 tests/data/azure/non_unicode_random_string - -diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py -index 2062ca5..a768b2c 100644 ---- a/cloudinit/sources/DataSourceAzure.py -+++ b/cloudinit/sources/DataSourceAzure.py -@@ -54,6 +54,7 @@ REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" - REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" - AGENT_SEED_DIR = '/var/lib/waagent' - IMDS_URL = "http://169.254.169.254/metadata/" -+PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" - - # List of static scripts and network config artifacts created by - # stock ubuntu suported images. -@@ -195,6 +196,8 @@ if util.is_FreeBSD(): - RESOURCE_DISK_PATH = "/dev/" + res_disk - else: - LOG.debug("resource disk is None") -+ # TODO Find where platform entropy data is surfaced -+ PLATFORM_ENTROPY_SOURCE = None - - BUILTIN_DS_CONFIG = { - 'agent_command': AGENT_START_BUILTIN, -@@ -1100,16 +1103,27 @@ def _check_freebsd_cdrom(cdrom_dev): - return False - - --def _get_random_seed(): -+def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): - """Return content random seed file if available, otherwise, - return None.""" - # azure / hyper-v provides random data here -- # TODO. find the seed on FreeBSD platform - # now update ds_cfg to reflect contents pass in config -- if util.is_FreeBSD(): -+ if source is None: - return None -- return util.load_file("/sys/firmware/acpi/tables/OEM0", -- quiet=True, decode=False) -+ seed = util.load_file(source, quiet=True, decode=False) -+ -+ # The seed generally contains non-Unicode characters. load_file puts -+ # them into a str (in python 2) or bytes (in python 3). In python 2, -+ # bad octets in a str cause util.json_dumps() to throw an exception. In -+ # python 3, bytes is a non-serializable type, and the handler load_file -+ # uses applies b64 encoding *again* to handle it. The simplest solution -+ # is to just b64encode the data and then decode it to a serializable -+ # string. Same number of bits of entropy, just with 25% more zeroes. -+ # There's no need to undo this base64-encoding when the random seed is -+ # actually used in cc_seed_random.py. -+ seed = base64.b64encode(seed).decode() -+ -+ return seed - - - def list_possible_azure_ds_devs(): -diff --git a/tests/data/azure/non_unicode_random_string b/tests/data/azure/non_unicode_random_string -new file mode 100644 -index 0000000..b9ecefb ---- /dev/null -+++ b/tests/data/azure/non_unicode_random_string -@@ -0,0 +1 @@ -+OEM0d\x00\x00\x00\x01\x80VRTUALMICROSFT\x02\x17\x00\x06MSFT\x97\x00\x00\x00C\xb4{V\xf4X%\x061x\x90\x1c\xfen\x86\xbf~\xf5\x8c\x94&\x88\xed\x84\xf9B\xbd\xd3\xf1\xdb\xee:\xd9\x0fc\x0e\x83(\xbd\xe3'\xfc\x85,\xdf\xf4\x13\x99N\xc5\xf3Y\x1e\xe3\x0b\xa4H\x08J\xb9\xdcdb$ -\ No newline at end of file -diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py -index 417d86a..eacf225 100644 ---- a/tests/unittests/test_datasource/test_azure.py -+++ b/tests/unittests/test_datasource/test_azure.py -@@ -7,11 +7,11 @@ from cloudinit.sources import ( - UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) - from cloudinit.util import (b64e, decode_binary, load_file, write_file, - find_freebsd_part, get_path_dev_freebsd, -- MountFailedError) -+ MountFailedError, json_dumps, load_json) - from cloudinit.version import version_string as vs - from cloudinit.tests.helpers import ( - HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, -- ExitStack, PY26, SkipTest) -+ ExitStack, PY26, SkipTest, resourceLocation) - - import crypt - import httpretty -@@ -1924,4 +1924,24 @@ class TestWBIsPlatformViable(CiTestCase): - self.logs.getvalue()) - - -+class TestRandomSeed(CiTestCase): -+ """Test proper handling of random_seed""" -+ -+ def test_non_ascii_seed_is_serializable(self): -+ """Pass if a random string from the Azure infrastructure which -+ contains at least one non-Unicode character can be converted to/from -+ JSON without alteration and without throwing an exception. -+ """ -+ path = resourceLocation("azure/non_unicode_random_string") -+ result = dsaz._get_random_seed(path) -+ -+ obj = {'seed': result} -+ try: -+ serialized = json_dumps(obj) -+ deserialized = load_json(serialized) -+ except UnicodeDecodeError: -+ self.fail("Non-serializable random seed returned") -+ -+ self.assertEqual(deserialized['seed'], result) -+ - # vi: ts=4 expandtab --- -1.8.3.1 - diff --git a/SOURCES/ci-Azure-Return-static-fallback-address-as-if-failed-to.patch b/SOURCES/ci-Azure-Return-static-fallback-address-as-if-failed-to.patch deleted file mode 100644 index 32d3734..0000000 --- a/SOURCES/ci-Azure-Return-static-fallback-address-as-if-failed-to.patch +++ /dev/null @@ -1,107 +0,0 @@ -From f54ebeac5b95c7481718e09c4598a86bc1a8dcfb Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Wed, 3 Jul 2019 13:14:53 +0200 -Subject: [PATCH] Azure: Return static fallback address as if failed to find - endpoint - -RH-Author: Eduardo Otubo -Message-id: <20190703131453.15811-1-otubo@redhat.com> -Patchwork-id: 89354 -O-Subject: [RHEL-7.8 cloud-init PATCH] Azure: Return static fallback address as if failed to find endpoint -Bugzilla: 1726701 -RH-Acked-by: Bandan Das -RH-Acked-by: Mohammed Gamal - -BZ: 1687565 -BRANCH: rhel7/master-18.5 -UPSTREAM: baa478546d8cac98a706010699d64f8c2f70b5bf -BREW: 22476988 - -commit aefb0f1c281740ef307116509057770062d61375 -Author: Jason Zions (MSFT) -Date: Fri May 10 18:38:55 2019 +0000 - - Azure: Return static fallback address as if failed to find endpoint - - The Azure data source helper attempts to use information in the dhcp - lease to find the Wireserver endpoint (IP address). Under some unusual - circumstances, those attempts will fail. This change uses a static - address, known to be always correct in the Azure public and sovereign - clouds, when the helper fails to locate a valid dhcp lease. This - address is not guaranteed to be correct in Azure Stack environments; - it's still best to use the information from the lease whenever possible. - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - cloudinit/sources/helpers/azure.py | 14 +++++++++++--- - tests/unittests/test_datasource/test_azure_helper.py | 9 +++++++-- - 2 files changed, 18 insertions(+), 5 deletions(-) - -diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py -index d3af05e..82c4c8c 100755 ---- a/cloudinit/sources/helpers/azure.py -+++ b/cloudinit/sources/helpers/azure.py -@@ -20,6 +20,9 @@ from cloudinit.reporting import events - - LOG = logging.getLogger(__name__) - -+# This endpoint matches the format as found in dhcp lease files, since this -+# value is applied if the endpoint can't be found within a lease file -+DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10" - - azure_ds_reporter = events.ReportEventStack( - name="azure-ds", -@@ -297,7 +300,12 @@ class WALinuxAgentShim(object): - @azure_ds_telemetry_reporter - def _get_value_from_leases_file(fallback_lease_file): - leases = [] -- content = util.load_file(fallback_lease_file) -+ try: -+ content = util.load_file(fallback_lease_file) -+ except IOError as ex: -+ LOG.error("Failed to read %s: %s", fallback_lease_file, ex) -+ return None -+ - LOG.debug("content is %s", content) - option_name = _get_dhcp_endpoint_option_name() - for line in content.splitlines(): -@@ -372,9 +380,9 @@ class WALinuxAgentShim(object): - fallback_lease_file) - value = WALinuxAgentShim._get_value_from_leases_file( - fallback_lease_file) -- - if value is None: -- raise ValueError('No endpoint found.') -+ LOG.warning("No lease found; using default endpoint") -+ value = DEFAULT_WIRESERVER_ENDPOINT - - endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value) - LOG.debug('Azure endpoint found at %s', endpoint_ip_address) -diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py -index 0255616..bd006ab 100644 ---- a/tests/unittests/test_datasource/test_azure_helper.py -+++ b/tests/unittests/test_datasource/test_azure_helper.py -@@ -67,12 +67,17 @@ class TestFindEndpoint(CiTestCase): - self.networkd_leases.return_value = None - - def test_missing_file(self): -- self.assertRaises(ValueError, wa_shim.find_endpoint) -+ """wa_shim find_endpoint uses default endpoint if leasefile not found -+ """ -+ self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") - - def test_missing_special_azure_line(self): -+ """wa_shim find_endpoint uses default endpoint if leasefile is found -+ but does not contain DHCP Option 245 (whose value is the endpoint) -+ """ - self.load_file.return_value = '' - self.dhcp_options.return_value = {'eth0': {'key': 'value'}} -- self.assertRaises(ValueError, wa_shim.find_endpoint) -+ self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") - - @staticmethod - def _build_lease_content(encoded_address): --- -1.8.3.1 - diff --git a/SOURCES/ci-DataSourceAzure-Adjust-timeout-for-polling-IMDS.patch b/SOURCES/ci-DataSourceAzure-Adjust-timeout-for-polling-IMDS.patch deleted file mode 100644 index 1f22cd1..0000000 --- a/SOURCES/ci-DataSourceAzure-Adjust-timeout-for-polling-IMDS.patch +++ /dev/null @@ -1,117 +0,0 @@ -From 635dcbf5a4a5b060ebf417c66789c59ebb28c39f Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Wed, 15 May 2019 12:15:28 +0200 -Subject: [PATCH 4/5] DataSourceAzure: Adjust timeout for polling IMDS - -RH-Author: Eduardo Otubo -Message-id: <20190515121529.11191-5-otubo@redhat.com> -Patchwork-id: 87883 -O-Subject: [rhel-7 cloud-init PATCHv2 4/5] DataSourceAzure: Adjust timeout for polling IMDS -Bugzilla: 1687565 -RH-Acked-by: Vitaly Kuznetsov -RH-Acked-by: Mohammed Gamal - -From: Anh Vo - -BZ: 1687565 -BRANCH: rhel7/master-18.5 -UPSTREAM: ab6621d8 -BREW: 21696239 - -commit ab6621d849b24bb652243e88c79f6f3b446048d7 -Author: Anh Vo -Date: Wed May 8 14:54:03 2019 +0000 - - DataSourceAzure: Adjust timeout for polling IMDS - - If the IMDS primary server is not available, falling back to the - secondary server takes about 1s. The net result is that the - expected E2E time is slightly more than 1s. This change increases - the timeout to 2s to prevent the infinite loop of timeouts. - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - cloudinit/sources/DataSourceAzure.py | 15 ++++++++++----- - tests/unittests/test_datasource/test_azure.py | 10 +++++++--- - 2 files changed, 17 insertions(+), 8 deletions(-) - -diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py -index c827816..5baf8da 100755 ---- a/cloudinit/sources/DataSourceAzure.py -+++ b/cloudinit/sources/DataSourceAzure.py -@@ -57,7 +57,12 @@ AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' - REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" - REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" - AGENT_SEED_DIR = '/var/lib/waagent' -+ -+# In the event where the IMDS primary server is not -+# available, it takes 1s to fallback to the secondary one -+IMDS_TIMEOUT_IN_SECONDS = 2 - IMDS_URL = "http://169.254.169.254/metadata/" -+ - PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" - - # List of static scripts and network config artifacts created by -@@ -582,9 +587,9 @@ class DataSourceAzure(sources.DataSource): - return - self._ephemeral_dhcp_ctx.clean_network() - else: -- return readurl(url, timeout=1, headers=headers, -- exception_cb=exc_cb, infinite=True, -- log_req_resp=False).contents -+ return readurl(url, timeout=IMDS_TIMEOUT_IN_SECONDS, -+ headers=headers, exception_cb=exc_cb, -+ infinite=True, log_req_resp=False).contents - except UrlError: - # Teardown our EphemeralDHCPv4 context on failure as we retry - self._ephemeral_dhcp_ctx.clean_network() -@@ -1291,8 +1296,8 @@ def _get_metadata_from_imds(retries): - headers = {"Metadata": "true"} - try: - response = readurl( -- url, timeout=1, headers=headers, retries=retries, -- exception_cb=retry_on_url_exc) -+ url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers, -+ retries=retries, exception_cb=retry_on_url_exc) - except Exception as e: - LOG.debug('Ignoring IMDS instance metadata: %s', e) - return {} -diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py -index eacf225..bc8b42c 100644 ---- a/tests/unittests/test_datasource/test_azure.py -+++ b/tests/unittests/test_datasource/test_azure.py -@@ -163,7 +163,8 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): - - m_readurl.assert_called_with( - self.network_md_url, exception_cb=mock.ANY, -- headers={'Metadata': 'true'}, retries=2, timeout=1) -+ headers={'Metadata': 'true'}, retries=2, -+ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS) - - @mock.patch('cloudinit.url_helper.time.sleep') - @mock.patch(MOCKPATH + 'net.is_up') -@@ -1789,7 +1790,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): - headers={'Metadata': 'true', - 'User-Agent': - 'Cloud-Init/%s' % vs() -- }, method='GET', timeout=1, -+ }, method='GET', -+ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, - url=full_url)]) - self.assertEqual(m_dhcp.call_count, 2) - m_net.assert_any_call( -@@ -1826,7 +1828,9 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): - headers={'Metadata': 'true', - 'User-Agent': - 'Cloud-Init/%s' % vs()}, -- method='GET', timeout=1, url=full_url)]) -+ method='GET', -+ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, -+ url=full_url)]) - self.assertEqual(m_dhcp.call_count, 2) - m_net.assert_any_call( - broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', --- -1.8.3.1 - diff --git a/SOURCES/ci-DatasourceAzure-add-additional-logging-for-azure-dat.patch b/SOURCES/ci-DatasourceAzure-add-additional-logging-for-azure-dat.patch deleted file mode 100644 index 177d22e..0000000 --- a/SOURCES/ci-DatasourceAzure-add-additional-logging-for-azure-dat.patch +++ /dev/null @@ -1,648 +0,0 @@ -From 7765776d538e61639d1ea920919211f780b75d13 Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Wed, 15 May 2019 12:15:26 +0200 -Subject: [PATCH 2/5] DatasourceAzure: add additional logging for azure - datasource - -RH-Author: Eduardo Otubo -Message-id: <20190515121529.11191-3-otubo@redhat.com> -Patchwork-id: 87882 -O-Subject: [rhel-7 cloud-init PATCHv2 2/5] DatasourceAzure: add additional logging for azure datasource -Bugzilla: 1687565 -RH-Acked-by: Vitaly Kuznetsov -RH-Acked-by: Mohammed Gamal - -From: Anh Vo - -BZ: 1687565 -BRANCH: rhel7/master-18.5 -UPSTREAM: 0d8c8839 -BREW: 21696239 - -commit 0d8c88393b51db6454491a379dcc2e691551217a -Author: Anh Vo -Date: Wed Apr 3 18:23:18 2019 +0000 - - DatasourceAzure: add additional logging for azure datasource - - Create an Azure logging decorator and use additional ReportEventStack - context managers to provide additional logging details. - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - cloudinit/sources/DataSourceAzure.py | 231 ++++++++++++++++++++++------------- - cloudinit/sources/helpers/azure.py | 31 +++++ - 2 files changed, 179 insertions(+), 83 deletions(-) - mode change 100644 => 100755 cloudinit/sources/DataSourceAzure.py - mode change 100644 => 100755 cloudinit/sources/helpers/azure.py - -diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py -old mode 100644 -new mode 100755 -index a768b2c..c827816 ---- a/cloudinit/sources/DataSourceAzure.py -+++ b/cloudinit/sources/DataSourceAzure.py -@@ -21,10 +21,14 @@ from cloudinit import net - from cloudinit.event import EventType - from cloudinit.net.dhcp import EphemeralDHCPv4 - from cloudinit import sources --from cloudinit.sources.helpers.azure import get_metadata_from_fabric - from cloudinit.sources.helpers import netlink - from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc - from cloudinit import util -+from cloudinit.reporting import events -+ -+from cloudinit.sources.helpers.azure import (azure_ds_reporter, -+ azure_ds_telemetry_reporter, -+ get_metadata_from_fabric) - - LOG = logging.getLogger(__name__) - -@@ -244,6 +248,7 @@ def set_hostname(hostname, hostname_command='hostname'): - util.subp(['hostnamectl', 'set-hostname', str(hostname)]) - - -+@azure_ds_telemetry_reporter - @contextlib.contextmanager - def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): - """ -@@ -290,6 +295,7 @@ class DataSourceAzure(sources.DataSource): - root = sources.DataSource.__str__(self) - return "%s [seed=%s]" % (root, self.seed) - -+ @azure_ds_telemetry_reporter - def bounce_network_with_azure_hostname(self): - # When using cloud-init to provision, we have to set the hostname from - # the metadata and "bounce" the network to force DDNS to update via -@@ -315,6 +321,7 @@ class DataSourceAzure(sources.DataSource): - util.logexc(LOG, "handling set_hostname failed") - return False - -+ @azure_ds_telemetry_reporter - def get_metadata_from_agent(self): - temp_hostname = self.metadata.get('local-hostname') - agent_cmd = self.ds_cfg['agent_command'] -@@ -344,15 +351,18 @@ class DataSourceAzure(sources.DataSource): - LOG.debug("ssh authentication: " - "using fingerprint from fabirc") - -- # wait very long for public SSH keys to arrive -- # https://bugs.launchpad.net/cloud-init/+bug/1717611 -- missing = util.log_time(logfunc=LOG.debug, -- msg="waiting for SSH public key files", -- func=util.wait_for_files, -- args=(fp_files, 900)) -- -- if len(missing): -- LOG.warning("Did not find files, but going on: %s", missing) -+ with events.ReportEventStack( -+ name="waiting-for-ssh-public-key", -+ description="wait for agents to retrieve ssh keys", -+ parent=azure_ds_reporter): -+ # wait very long for public SSH keys to arrive -+ # https://bugs.launchpad.net/cloud-init/+bug/1717611 -+ missing = util.log_time(logfunc=LOG.debug, -+ msg="waiting for SSH public key files", -+ func=util.wait_for_files, -+ args=(fp_files, 900)) -+ if len(missing): -+ LOG.warning("Did not find files, but going on: %s", missing) - - metadata = {} - metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) -@@ -366,6 +376,7 @@ class DataSourceAzure(sources.DataSource): - subplatform_type = 'seed-dir' - return '%s (%s)' % (subplatform_type, self.seed) - -+ @azure_ds_telemetry_reporter - def crawl_metadata(self): - """Walk all instance metadata sources returning a dict on success. - -@@ -467,6 +478,7 @@ class DataSourceAzure(sources.DataSource): - super(DataSourceAzure, self).clear_cached_attrs(attr_defaults) - self._metadata_imds = sources.UNSET - -+ @azure_ds_telemetry_reporter - def _get_data(self): - """Crawl and process datasource metadata caching metadata as attrs. - -@@ -513,6 +525,7 @@ class DataSourceAzure(sources.DataSource): - # quickly (local check only) if self.instance_id is still valid - return sources.instance_id_matches_system_uuid(self.get_instance_id()) - -+ @azure_ds_telemetry_reporter - def setup(self, is_new_instance): - if self._negotiated is False: - LOG.debug("negotiating for %s (new_instance=%s)", -@@ -580,6 +593,7 @@ class DataSourceAzure(sources.DataSource): - if nl_sock: - nl_sock.close() - -+ @azure_ds_telemetry_reporter - def _report_ready(self, lease): - """Tells the fabric provisioning has completed """ - try: -@@ -617,9 +631,14 @@ class DataSourceAzure(sources.DataSource): - def _reprovision(self): - """Initiate the reprovisioning workflow.""" - contents = self._poll_imds() -- md, ud, cfg = read_azure_ovf(contents) -- return (md, ud, cfg, {'ovf-env.xml': contents}) -- -+ with events.ReportEventStack( -+ name="reprovisioning-read-azure-ovf", -+ description="read azure ovf during reprovisioning", -+ parent=azure_ds_reporter): -+ md, ud, cfg = read_azure_ovf(contents) -+ return (md, ud, cfg, {'ovf-env.xml': contents}) -+ -+ @azure_ds_telemetry_reporter - def _negotiate(self): - """Negotiate with fabric and return data from it. - -@@ -652,6 +671,7 @@ class DataSourceAzure(sources.DataSource): - util.del_file(REPROVISION_MARKER_FILE) - return fabric_data - -+ @azure_ds_telemetry_reporter - def activate(self, cfg, is_new_instance): - address_ephemeral_resize(is_new_instance=is_new_instance, - preserve_ntfs=self.ds_cfg.get( -@@ -690,12 +710,14 @@ def _partitions_on_device(devpath, maxnum=16): - return [] - - -+@azure_ds_telemetry_reporter - def _has_ntfs_filesystem(devpath): - ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) - LOG.debug('ntfs_devices found = %s', ntfs_devices) - return os.path.realpath(devpath) in ntfs_devices - - -+@azure_ds_telemetry_reporter - def can_dev_be_reformatted(devpath, preserve_ntfs): - """Determine if the ephemeral drive at devpath should be reformatted. - -@@ -744,43 +766,59 @@ def can_dev_be_reformatted(devpath, preserve_ntfs): - (cand_part, cand_path, devpath)) - return False, msg - -+ @azure_ds_telemetry_reporter - def count_files(mp): - ignored = set(['dataloss_warning_readme.txt']) - return len([f for f in os.listdir(mp) if f.lower() not in ignored]) - - bmsg = ('partition %s (%s) on device %s was ntfs formatted' % - (cand_part, cand_path, devpath)) -- try: -- file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", -- update_env_for_mount={'LANG': 'C'}) -- except util.MountFailedError as e: -- if "unknown filesystem type 'ntfs'" in str(e): -- return True, (bmsg + ' but this system cannot mount NTFS,' -- ' assuming there are no important files.' -- ' Formatting allowed.') -- return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) -- -- if file_count != 0: -- LOG.warning("it looks like you're using NTFS on the ephemeral disk, " -- 'to ensure that filesystem does not get wiped, set ' -- '%s.%s in config', '.'.join(DS_CFG_PATH), -- DS_CFG_KEY_PRESERVE_NTFS) -- return False, bmsg + ' but had %d files on it.' % file_count -+ -+ with events.ReportEventStack( -+ name="mount-ntfs-and-count", -+ description="mount-ntfs-and-count", -+ parent=azure_ds_reporter) as evt: -+ try: -+ file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", -+ update_env_for_mount={'LANG': 'C'}) -+ except util.MountFailedError as e: -+ evt.description = "cannot mount ntfs" -+ if "unknown filesystem type 'ntfs'" in str(e): -+ return True, (bmsg + ' but this system cannot mount NTFS,' -+ ' assuming there are no important files.' -+ ' Formatting allowed.') -+ return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) -+ -+ if file_count != 0: -+ evt.description = "mounted and counted %d files" % file_count -+ LOG.warning("it looks like you're using NTFS on the ephemeral" -+ " disk, to ensure that filesystem does not get wiped," -+ " set %s.%s in config", '.'.join(DS_CFG_PATH), -+ DS_CFG_KEY_PRESERVE_NTFS) -+ return False, bmsg + ' but had %d files on it.' % file_count - - return True, bmsg + ' and had no important files. Safe for reformatting.' - - -+@azure_ds_telemetry_reporter - def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, - is_new_instance=False, preserve_ntfs=False): - # wait for ephemeral disk to come up - naplen = .2 -- missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen, -- log_pre="Azure ephemeral disk: ") -- -- if missing: -- LOG.warning("ephemeral device '%s' did not appear after %d seconds.", -- devpath, maxwait) -- return -+ with events.ReportEventStack( -+ name="wait-for-ephemeral-disk", -+ description="wait for ephemeral disk", -+ parent=azure_ds_reporter): -+ missing = util.wait_for_files([devpath], -+ maxwait=maxwait, -+ naplen=naplen, -+ log_pre="Azure ephemeral disk: ") -+ -+ if missing: -+ LOG.warning("ephemeral device '%s' did" -+ " not appear after %d seconds.", -+ devpath, maxwait) -+ return - - result = False - msg = None -@@ -808,6 +846,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, - return - - -+@azure_ds_telemetry_reporter - def perform_hostname_bounce(hostname, cfg, prev_hostname): - # set the hostname to 'hostname' if it is not already set to that. - # then, if policy is not off, bounce the interface using command -@@ -843,6 +882,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): - return True - - -+@azure_ds_telemetry_reporter - def crtfile_to_pubkey(fname, data=None): - pipeline = ('openssl x509 -noout -pubkey < "$0" |' - 'ssh-keygen -i -m PKCS8 -f /dev/stdin') -@@ -851,6 +891,7 @@ def crtfile_to_pubkey(fname, data=None): - return out.rstrip() - - -+@azure_ds_telemetry_reporter - def pubkeys_from_crt_files(flist): - pubkeys = [] - errors = [] -@@ -866,6 +907,7 @@ def pubkeys_from_crt_files(flist): - return pubkeys - - -+@azure_ds_telemetry_reporter - def write_files(datadir, files, dirmode=None): - - def _redact_password(cnt, fname): -@@ -893,6 +935,7 @@ def write_files(datadir, files, dirmode=None): - util.write_file(filename=fname, content=content, mode=0o600) - - -+@azure_ds_telemetry_reporter - def invoke_agent(cmd): - # this is a function itself to simplify patching it for test - if cmd: -@@ -912,6 +955,7 @@ def find_child(node, filter_func): - return ret - - -+@azure_ds_telemetry_reporter - def load_azure_ovf_pubkeys(sshnode): - # This parses a 'SSH' node formatted like below, and returns - # an array of dicts. -@@ -964,6 +1008,7 @@ def load_azure_ovf_pubkeys(sshnode): - return found - - -+@azure_ds_telemetry_reporter - def read_azure_ovf(contents): - try: - dom = minidom.parseString(contents) -@@ -1064,6 +1109,7 @@ def read_azure_ovf(contents): - return (md, ud, cfg) - - -+@azure_ds_telemetry_reporter - def _extract_preprovisioned_vm_setting(dom): - """Read the preprovision flag from the ovf. It should not - exist unless true.""" -@@ -1092,6 +1138,7 @@ def encrypt_pass(password, salt_id="$6$"): - return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) - - -+@azure_ds_telemetry_reporter - def _check_freebsd_cdrom(cdrom_dev): - """Return boolean indicating path to cdrom device has content.""" - try: -@@ -1103,6 +1150,7 @@ def _check_freebsd_cdrom(cdrom_dev): - return False - - -+@azure_ds_telemetry_reporter - def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): - """Return content random seed file if available, otherwise, - return None.""" -@@ -1126,6 +1174,7 @@ def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): - return seed - - -+@azure_ds_telemetry_reporter - def list_possible_azure_ds_devs(): - devlist = [] - if util.is_FreeBSD(): -@@ -1140,6 +1189,7 @@ def list_possible_azure_ds_devs(): - return devlist - - -+@azure_ds_telemetry_reporter - def load_azure_ds_dir(source_dir): - ovf_file = os.path.join(source_dir, "ovf-env.xml") - -@@ -1162,47 +1212,54 @@ def parse_network_config(imds_metadata): - @param: imds_metadata: Dict of content read from IMDS network service. - @return: Dictionary containing network version 2 standard configuration. - """ -- if imds_metadata != sources.UNSET and imds_metadata: -- netconfig = {'version': 2, 'ethernets': {}} -- LOG.debug('Azure: generating network configuration from IMDS') -- network_metadata = imds_metadata['network'] -- for idx, intf in enumerate(network_metadata['interface']): -- nicname = 'eth{idx}'.format(idx=idx) -- dev_config = {} -- for addr4 in intf['ipv4']['ipAddress']: -- privateIpv4 = addr4['privateIpAddress'] -- if privateIpv4: -- if dev_config.get('dhcp4', False): -- # Append static address config for nic > 1 -- netPrefix = intf['ipv4']['subnet'][0].get( -- 'prefix', '24') -- if not dev_config.get('addresses'): -- dev_config['addresses'] = [] -- dev_config['addresses'].append( -- '{ip}/{prefix}'.format( -- ip=privateIpv4, prefix=netPrefix)) -- else: -- dev_config['dhcp4'] = True -- for addr6 in intf['ipv6']['ipAddress']: -- privateIpv6 = addr6['privateIpAddress'] -- if privateIpv6: -- dev_config['dhcp6'] = True -- break -- if dev_config: -- mac = ':'.join(re.findall(r'..', intf['macAddress'])) -- dev_config.update( -- {'match': {'macaddress': mac.lower()}, -- 'set-name': nicname}) -- netconfig['ethernets'][nicname] = dev_config -- else: -- blacklist = ['mlx4_core'] -- LOG.debug('Azure: generating fallback configuration') -- # generate a network config, blacklist picking mlx4_core devs -- netconfig = net.generate_fallback_config( -- blacklist_drivers=blacklist, config_driver=True) -- return netconfig -+ with events.ReportEventStack( -+ name="parse_network_config", -+ description="", -+ parent=azure_ds_reporter) as evt: -+ if imds_metadata != sources.UNSET and imds_metadata: -+ netconfig = {'version': 2, 'ethernets': {}} -+ LOG.debug('Azure: generating network configuration from IMDS') -+ network_metadata = imds_metadata['network'] -+ for idx, intf in enumerate(network_metadata['interface']): -+ nicname = 'eth{idx}'.format(idx=idx) -+ dev_config = {} -+ for addr4 in intf['ipv4']['ipAddress']: -+ privateIpv4 = addr4['privateIpAddress'] -+ if privateIpv4: -+ if dev_config.get('dhcp4', False): -+ # Append static address config for nic > 1 -+ netPrefix = intf['ipv4']['subnet'][0].get( -+ 'prefix', '24') -+ if not dev_config.get('addresses'): -+ dev_config['addresses'] = [] -+ dev_config['addresses'].append( -+ '{ip}/{prefix}'.format( -+ ip=privateIpv4, prefix=netPrefix)) -+ else: -+ dev_config['dhcp4'] = True -+ for addr6 in intf['ipv6']['ipAddress']: -+ privateIpv6 = addr6['privateIpAddress'] -+ if privateIpv6: -+ dev_config['dhcp6'] = True -+ break -+ if dev_config: -+ mac = ':'.join(re.findall(r'..', intf['macAddress'])) -+ dev_config.update( -+ {'match': {'macaddress': mac.lower()}, -+ 'set-name': nicname}) -+ netconfig['ethernets'][nicname] = dev_config -+ evt.description = "network config from imds" -+ else: -+ blacklist = ['mlx4_core'] -+ LOG.debug('Azure: generating fallback configuration') -+ # generate a network config, blacklist picking mlx4_core devs -+ netconfig = net.generate_fallback_config( -+ blacklist_drivers=blacklist, config_driver=True) -+ evt.description = "network config from fallback" -+ return netconfig - - -+@azure_ds_telemetry_reporter - def get_metadata_from_imds(fallback_nic, retries): - """Query Azure's network metadata service, returning a dictionary. - -@@ -1227,6 +1284,7 @@ def get_metadata_from_imds(fallback_nic, retries): - return util.log_time(**kwargs) - - -+@azure_ds_telemetry_reporter - def _get_metadata_from_imds(retries): - - url = IMDS_URL + "instance?api-version=2017-12-01" -@@ -1246,6 +1304,7 @@ def _get_metadata_from_imds(retries): - return {} - - -+@azure_ds_telemetry_reporter - def maybe_remove_ubuntu_network_config_scripts(paths=None): - """Remove Azure-specific ubuntu network config for non-primary nics. - -@@ -1283,14 +1342,20 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None): - - - def _is_platform_viable(seed_dir): -- """Check platform environment to report if this datasource may run.""" -- asset_tag = util.read_dmi_data('chassis-asset-tag') -- if asset_tag == AZURE_CHASSIS_ASSET_TAG: -- return True -- LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) -- if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): -- return True -- return False -+ with events.ReportEventStack( -+ name="check-platform-viability", -+ description="found azure asset tag", -+ parent=azure_ds_reporter) as evt: -+ -+ """Check platform environment to report if this datasource may run.""" -+ asset_tag = util.read_dmi_data('chassis-asset-tag') -+ if asset_tag == AZURE_CHASSIS_ASSET_TAG: -+ return True -+ LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) -+ evt.description = "Non-Azure DMI asset tag '%s' discovered.", asset_tag -+ if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): -+ return True -+ return False - - - class BrokenAzureDataSource(Exception): -diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py -old mode 100644 -new mode 100755 -index 2829dd2..d3af05e ---- a/cloudinit/sources/helpers/azure.py -+++ b/cloudinit/sources/helpers/azure.py -@@ -16,10 +16,27 @@ from xml.etree import ElementTree - - from cloudinit import url_helper - from cloudinit import util -+from cloudinit.reporting import events - - LOG = logging.getLogger(__name__) - - -+azure_ds_reporter = events.ReportEventStack( -+ name="azure-ds", -+ description="initialize reporter for azure ds", -+ reporting_enabled=True) -+ -+ -+def azure_ds_telemetry_reporter(func): -+ def impl(*args, **kwargs): -+ with events.ReportEventStack( -+ name=func.__name__, -+ description=func.__name__, -+ parent=azure_ds_reporter): -+ return func(*args, **kwargs) -+ return impl -+ -+ - @contextmanager - def cd(newdir): - prevdir = os.getcwd() -@@ -119,6 +136,7 @@ class OpenSSLManager(object): - def clean_up(self): - util.del_dir(self.tmpdir) - -+ @azure_ds_telemetry_reporter - def generate_certificate(self): - LOG.debug('Generating certificate for communication with fabric...') - if self.certificate is not None: -@@ -139,17 +157,20 @@ class OpenSSLManager(object): - LOG.debug('New certificate generated.') - - @staticmethod -+ @azure_ds_telemetry_reporter - def _run_x509_action(action, cert): - cmd = ['openssl', 'x509', '-noout', action] - result, _ = util.subp(cmd, data=cert) - return result - -+ @azure_ds_telemetry_reporter - def _get_ssh_key_from_cert(self, certificate): - pub_key = self._run_x509_action('-pubkey', certificate) - keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] - ssh_key, _ = util.subp(keygen_cmd, data=pub_key) - return ssh_key - -+ @azure_ds_telemetry_reporter - def _get_fingerprint_from_cert(self, certificate): - """openssl x509 formats fingerprints as so: - 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ -@@ -163,6 +184,7 @@ class OpenSSLManager(object): - octets = raw_fp[eq+1:-1].split(':') - return ''.join(octets) - -+ @azure_ds_telemetry_reporter - def _decrypt_certs_from_xml(self, certificates_xml): - """Decrypt the certificates XML document using the our private key; - return the list of certs and private keys contained in the doc. -@@ -185,6 +207,7 @@ class OpenSSLManager(object): - shell=True, data=b'\n'.join(lines)) - return out - -+ @azure_ds_telemetry_reporter - def parse_certificates(self, certificates_xml): - """Given the Certificates XML document, return a dictionary of - fingerprints and associated SSH keys derived from the certs.""" -@@ -265,11 +288,13 @@ class WALinuxAgentShim(object): - return socket.inet_ntoa(packed_bytes) - - @staticmethod -+ @azure_ds_telemetry_reporter - def _networkd_get_value_from_leases(leases_d=None): - return dhcp.networkd_get_option_from_leases( - 'OPTION_245', leases_d=leases_d) - - @staticmethod -+ @azure_ds_telemetry_reporter - def _get_value_from_leases_file(fallback_lease_file): - leases = [] - content = util.load_file(fallback_lease_file) -@@ -287,6 +312,7 @@ class WALinuxAgentShim(object): - return leases[-1] - - @staticmethod -+ @azure_ds_telemetry_reporter - def _load_dhclient_json(): - dhcp_options = {} - hooks_dir = WALinuxAgentShim._get_hooks_dir() -@@ -305,6 +331,7 @@ class WALinuxAgentShim(object): - return dhcp_options - - @staticmethod -+ @azure_ds_telemetry_reporter - def _get_value_from_dhcpoptions(dhcp_options): - if dhcp_options is None: - return None -@@ -318,6 +345,7 @@ class WALinuxAgentShim(object): - return _value - - @staticmethod -+ @azure_ds_telemetry_reporter - def find_endpoint(fallback_lease_file=None, dhcp245=None): - value = None - if dhcp245 is not None: -@@ -352,6 +380,7 @@ class WALinuxAgentShim(object): - LOG.debug('Azure endpoint found at %s', endpoint_ip_address) - return endpoint_ip_address - -+ @azure_ds_telemetry_reporter - def register_with_azure_and_fetch_data(self, pubkey_info=None): - if self.openssl_manager is None: - self.openssl_manager = OpenSSLManager() -@@ -404,6 +433,7 @@ class WALinuxAgentShim(object): - - return keys - -+ @azure_ds_telemetry_reporter - def _report_ready(self, goal_state, http_client): - LOG.debug('Reporting ready to Azure fabric.') - document = self.REPORT_READY_XML_TEMPLATE.format( -@@ -419,6 +449,7 @@ class WALinuxAgentShim(object): - LOG.info('Reported ready to Azure fabric.') - - -+@azure_ds_telemetry_reporter - def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, - pubkey_info=None): - shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, --- -1.8.3.1 - diff --git a/SOURCES/ci-Do-not-use-fallocate-in-swap-file-creation-on-xfs.-7.patch b/SOURCES/ci-Do-not-use-fallocate-in-swap-file-creation-on-xfs.-7.patch index e7cb1da..6dca90b 100644 --- a/SOURCES/ci-Do-not-use-fallocate-in-swap-file-creation-on-xfs.-7.patch +++ b/SOURCES/ci-Do-not-use-fallocate-in-swap-file-creation-on-xfs.-7.patch @@ -1,15 +1,14 @@ -From e78ae6d16009263a8dfcd91ea8ce8fc08a077529 Mon Sep 17 00:00:00 2001 +From 5bdb6bc091a0270912974583a7dabe94f5b8a1ef Mon Sep 17 00:00:00 2001 From: Eduardo Otubo -Date: Tue, 28 Apr 2020 08:22:03 +0200 -Subject: [PATCH 1/3] Do not use fallocate in swap file creation on xfs. (#70) +Date: Wed, 18 Mar 2020 14:11:23 +0100 +Subject: [PATCH] Do not use fallocate in swap file creation on xfs. (#70) -RH-Author: Eduardo Otubo -Message-id: <20200422130428.7663-2-otubo@redhat.com> -Patchwork-id: 96031 -O-Subject: [RHEL-7.7.z/RHEL-7.8.z cloud-init PATCH 1/3] Do not use fallocate in swap file creation on xfs. (#70) -Bugzilla: 1801094 +Message-id: <20200318141123.30265-1-otubo@redhat.com> +Patchwork-id: 94377 +O-Subject: [RHEL-7.9 cloud-init PATCH] Do not use fallocate in swap file creation on xfs. (#70) +Bugzilla: 1772505 +RH-Acked-by: Miroslav Rezanina RH-Acked-by: Vitaly Kuznetsov -RH-Acked-by: Cathy Avery commit 6603706eec1c39d9d591c8ffa0ef7171b74d84d6 Author: Eduardo Otubo @@ -32,7 +31,7 @@ Date: Thu Jan 23 17:41:48 2020 +0100 LP: #1781781 -Signed-off-bt: Eduardo Otubo +Signed-off-by: Eduardo Otubo Signed-off-by: Miroslav Rezanina --- cloudinit/config/cc_mounts.py | 67 ++++++++++++++++------ @@ -40,7 +39,7 @@ Signed-off-by: Miroslav Rezanina 2 files changed, 62 insertions(+), 17 deletions(-) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py -index 123ffb8..6884ddf 100644 +index c741c74..4293844 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -223,13 +223,58 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): @@ -123,10 +122,10 @@ index 123ffb8..6884ddf 100644 - util.ensure_dir(tdir) - util.log_time(LOG.debug, msg, func=util.subp, - args=[['sh', '-c', -- ('rm -f "$1" && umask 0066 && ' -- '{ fallocate -l "${2}M" "$1" || ' -- ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && ' -- 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'), +- ('rm -f "$1" && umask 0066 && ' +- '{ fallocate -l "${2}M" "$1" || ' +- 'dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && ' +- 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'), - 'setup_swap', fname, mbsize]]) - - except Exception as e: diff --git a/SOURCES/ci-Enable-ssh_deletekeys-by-default.patch b/SOURCES/ci-Enable-ssh_deletekeys-by-default.patch new file mode 100644 index 0000000..28d53bb --- /dev/null +++ b/SOURCES/ci-Enable-ssh_deletekeys-by-default.patch @@ -0,0 +1,40 @@ +From 5bc897b13aa3090770af618f0cfa49f958436515 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Tue, 17 Mar 2020 09:17:05 +0100 +Subject: [PATCH 5/5] Enable ssh_deletekeys by default + +Message-id: <20200317091705.15715-1-otubo@redhat.com> +Patchwork-id: 94365 +O-Subject: [RHEL-7.9/RHEL-8.2.0 cloud-init PATCH] Enable ssh_deletekeys by default +Bugzilla: 1574338 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Mohammed Gamal + +The configuration option ssh_deletekeys will trigger the generation +of new ssh keys for every new instance deployed. + +x-downstream-only: yes +resolves: rhbz#1814152 + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + rhel/cloud.cfg | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg +index 82e8bf6..9ecba21 100644 +--- a/rhel/cloud.cfg ++++ b/rhel/cloud.cfg +@@ -6,7 +6,7 @@ ssh_pwauth: 0 + + mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service', '0', '2'] + resize_rootfs_tmp: /dev +-ssh_deletekeys: 0 ++ssh_deletekeys: 1 + ssh_genkeytypes: ~ + syslog_fix_perms: ~ + disable_vmware_customization: false +-- +1.8.3.1 + diff --git a/SOURCES/ci-Fix-for-network-configuration-not-persisting-after-r.patch b/SOURCES/ci-Fix-for-network-configuration-not-persisting-after-r.patch deleted file mode 100644 index 8e4a8e3..0000000 --- a/SOURCES/ci-Fix-for-network-configuration-not-persisting-after-r.patch +++ /dev/null @@ -1,107 +0,0 @@ -From 9969cf3eaa23398816d140b319b3277465aa4bb8 Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Fri, 6 Sep 2019 12:12:11 +0200 -Subject: [PATCH] Fix for network configuration not persisting after reboot - -RH-Author: Eduardo Otubo -Message-id: <20190906121211.23172-1-otubo@redhat.com> -Patchwork-id: 90300 -O-Subject: [RHEL-7.8/RHEL-8.1.0 cloud-init PATCH] Fix for network configuration not persisting after reboot -Bugzilla: 1593010 -RH-Acked-by: Mohammed Gamal -RH-Acked-by: Miroslav Rezanina - -The reasons the configuration does not persist after reboot includes -different aspects and they're all fixed on this patch: - - 1) The rpm package doesn't include the systemd-generator and -ds-identify. The systemd-generator is called early in the boot process -that calls ds-identify to check if there's any Data Source available in -the current boot. In the current use case, the Data Source is removed -from the VM on the second boot, this means cloud-init should disable -itself in order to keep the configuration it did in the first boot. - - 2) Even after adding those scripts, cloud-init was still being -executed and the configurations were being lost. The reason for this is -that the cloud-init systemd units had a wrong dependency - - WantedBy: multi-user.target - - Which would start them every time no matter the return of -ds-identify. The fix is to replace the dependency by the systemd unit to -cloud-init.target, which is the main cloud-init target enabled - or in -this case, disabled by ds-identify. The file cloud-init.target was also -missing on rpm package. - -After adding both scripts, the main cloud-init systemd target and -adjusting the systemd dependencies the configuration persists after -reboots and shutdowns. - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - redhat/cloud-init.spec.template | 15 +++++++++++++++ - rhel/systemd/cloud-config.service | 2 +- - rhel/systemd/cloud-final.service | 2 +- - rhel/systemd/cloud-init-local.service | 2 +- - rhel/systemd/cloud-init.service | 2 +- - rhel/systemd/cloud-init.target | 7 +++++++ - 6 files changed, 26 insertions(+), 4 deletions(-) - create mode 100644 rhel/systemd/cloud-init.target - -diff --git a/rhel/systemd/cloud-config.service b/rhel/systemd/cloud-config.service -index 12ca9df..f3dcd4b 100644 ---- a/rhel/systemd/cloud-config.service -+++ b/rhel/systemd/cloud-config.service -@@ -15,4 +15,4 @@ TimeoutSec=0 - StandardOutput=journal+console - - [Install] --WantedBy=multi-user.target -+WantedBy=cloud-init.target -diff --git a/rhel/systemd/cloud-final.service b/rhel/systemd/cloud-final.service -index 32a83d8..739b7e3 100644 ---- a/rhel/systemd/cloud-final.service -+++ b/rhel/systemd/cloud-final.service -@@ -16,4 +16,4 @@ KillMode=process - StandardOutput=journal+console - - [Install] --WantedBy=multi-user.target -+WantedBy=cloud-init.target -diff --git a/rhel/systemd/cloud-init-local.service b/rhel/systemd/cloud-init-local.service -index 656eddb..8f9f6c9 100644 ---- a/rhel/systemd/cloud-init-local.service -+++ b/rhel/systemd/cloud-init-local.service -@@ -28,4 +28,4 @@ TimeoutSec=0 - StandardOutput=journal+console - - [Install] --WantedBy=multi-user.target -+WantedBy=cloud-init.target -diff --git a/rhel/systemd/cloud-init.service b/rhel/systemd/cloud-init.service -index 68fc5f1..d0023a0 100644 ---- a/rhel/systemd/cloud-init.service -+++ b/rhel/systemd/cloud-init.service -@@ -22,4 +22,4 @@ TimeoutSec=0 - StandardOutput=journal+console - - [Install] --WantedBy=multi-user.target -+WantedBy=cloud-init.target -diff --git a/rhel/systemd/cloud-init.target b/rhel/systemd/cloud-init.target -new file mode 100644 -index 0000000..083c3b6 ---- /dev/null -+++ b/rhel/systemd/cloud-init.target -@@ -0,0 +1,7 @@ -+# cloud-init target is enabled by cloud-init-generator -+# To disable it you can either: -+# a.) boot with kernel cmdline of 'cloud-init=disabled' -+# b.) touch a file /etc/cloud/cloud-init.disabled -+[Unit] -+Description=Cloud-init target -+After=multi-user.target --- -1.8.3.1 - diff --git a/SOURCES/ci-New-data-source-for-the-Exoscale.com-cloud-platform.patch b/SOURCES/ci-New-data-source-for-the-Exoscale.com-cloud-platform.patch deleted file mode 100644 index 85bda76..0000000 --- a/SOURCES/ci-New-data-source-for-the-Exoscale.com-cloud-platform.patch +++ /dev/null @@ -1,672 +0,0 @@ -From ccae8d2ac218366c529aac03b29c46400843d4a0 Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Tue, 5 May 2020 08:08:09 +0200 -Subject: [PATCH 1/5] New data source for the Exoscale.com cloud platform - -RH-Author: Eduardo Otubo -Message-id: <20200504085238.25884-2-otubo@redhat.com> -Patchwork-id: 96244 -O-Subject: [RHEL-7.8.z cloud-init PATCH 1/5] New data source for the Exoscale.com cloud platform -Bugzilla: 1827207 -RH-Acked-by: Cathy Avery -RH-Acked-by: Mohammed Gamal -RH-Acked-by: Vitaly Kuznetsov - -commit 4dfed67d0e82970f8717d0b524c593962698ca4f -Author: Chris Glass -Date: Thu Aug 8 17:09:57 2019 +0000 - - New data source for the Exoscale.com cloud platform - - - dsidentify switches to the new Exoscale datasource on matching DMI name - - New Exoscale datasource added - - Signed-off-by: Mathieu Corbin - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - cloudinit/apport.py | 1 + - cloudinit/settings.py | 1 + - cloudinit/sources/DataSourceExoscale.py | 258 +++++++++++++++++++++++ - doc/rtd/topics/datasources.rst | 1 + - doc/rtd/topics/datasources/exoscale.rst | 68 ++++++ - tests/unittests/test_datasource/test_common.py | 2 + - tests/unittests/test_datasource/test_exoscale.py | 203 ++++++++++++++++++ - tools/ds-identify | 7 +- - 8 files changed, 540 insertions(+), 1 deletion(-) - create mode 100644 cloudinit/sources/DataSourceExoscale.py - create mode 100644 doc/rtd/topics/datasources/exoscale.rst - create mode 100644 tests/unittests/test_datasource/test_exoscale.py - -diff --git a/cloudinit/apport.py b/cloudinit/apport.py -index 22cb7fd..003ff1f 100644 ---- a/cloudinit/apport.py -+++ b/cloudinit/apport.py -@@ -23,6 +23,7 @@ KNOWN_CLOUD_NAMES = [ - 'CloudStack', - 'DigitalOcean', - 'GCE - Google Compute Engine', -+ 'Exoscale', - 'Hetzner Cloud', - 'IBM - (aka SoftLayer or BlueMix)', - 'LXD', -diff --git a/cloudinit/settings.py b/cloudinit/settings.py -index d982a4d..229b420 100644 ---- a/cloudinit/settings.py -+++ b/cloudinit/settings.py -@@ -39,6 +39,7 @@ CFG_BUILTIN = { - 'Hetzner', - 'IBMCloud', - 'Oracle', -+ 'Exoscale', - # At the end to act as a 'catch' when none of the above work... - 'None', - ], -diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py -new file mode 100644 -index 0000000..52e7f6f ---- /dev/null -+++ b/cloudinit/sources/DataSourceExoscale.py -@@ -0,0 +1,258 @@ -+# Author: Mathieu Corbin -+# Author: Christopher Glass -+# -+# This file is part of cloud-init. See LICENSE file for license information. -+ -+from cloudinit import ec2_utils as ec2 -+from cloudinit import log as logging -+from cloudinit import sources -+from cloudinit import url_helper -+from cloudinit import util -+ -+LOG = logging.getLogger(__name__) -+ -+METADATA_URL = "http://169.254.169.254" -+API_VERSION = "1.0" -+PASSWORD_SERVER_PORT = 8080 -+ -+URL_TIMEOUT = 10 -+URL_RETRIES = 6 -+ -+EXOSCALE_DMI_NAME = "Exoscale" -+ -+BUILTIN_DS_CONFIG = { -+ # We run the set password config module on every boot in order to enable -+ # resetting the instance's password via the exoscale console (and a -+ # subsequent instance reboot). -+ 'cloud_config_modules': [["set-passwords", "always"]] -+} -+ -+ -+class DataSourceExoscale(sources.DataSource): -+ -+ dsname = 'Exoscale' -+ -+ def __init__(self, sys_cfg, distro, paths): -+ super(DataSourceExoscale, self).__init__(sys_cfg, distro, paths) -+ LOG.debug("Initializing the Exoscale datasource") -+ -+ self.metadata_url = self.ds_cfg.get('metadata_url', METADATA_URL) -+ self.api_version = self.ds_cfg.get('api_version', API_VERSION) -+ self.password_server_port = int( -+ self.ds_cfg.get('password_server_port', PASSWORD_SERVER_PORT)) -+ self.url_timeout = self.ds_cfg.get('timeout', URL_TIMEOUT) -+ self.url_retries = self.ds_cfg.get('retries', URL_RETRIES) -+ -+ self.extra_config = BUILTIN_DS_CONFIG -+ -+ def wait_for_metadata_service(self): -+ """Wait for the metadata service to be reachable.""" -+ -+ metadata_url = "{}/{}/meta-data/instance-id".format( -+ self.metadata_url, self.api_version) -+ -+ url = url_helper.wait_for_url( -+ urls=[metadata_url], -+ max_wait=self.url_max_wait, -+ timeout=self.url_timeout, -+ status_cb=LOG.critical) -+ -+ return bool(url) -+ -+ def crawl_metadata(self): -+ """ -+ Crawl the metadata service when available. -+ -+ @returns: Dictionary of crawled metadata content. -+ """ -+ metadata_ready = util.log_time( -+ logfunc=LOG.info, -+ msg='waiting for the metadata service', -+ func=self.wait_for_metadata_service) -+ -+ if not metadata_ready: -+ return {} -+ -+ return read_metadata(self.metadata_url, self.api_version, -+ self.password_server_port, self.url_timeout, -+ self.url_retries) -+ -+ def _get_data(self): -+ """Fetch the user data, the metadata and the VM password -+ from the metadata service. -+ -+ Please refer to the datasource documentation for details on how the -+ metadata server and password server are crawled. -+ """ -+ if not self._is_platform_viable(): -+ return False -+ -+ data = util.log_time( -+ logfunc=LOG.debug, -+ msg='Crawl of metadata service', -+ func=self.crawl_metadata) -+ -+ if not data: -+ return False -+ -+ self.userdata_raw = data['user-data'] -+ self.metadata = data['meta-data'] -+ password = data.get('password') -+ -+ password_config = {} -+ if password: -+ # Since we have a password, let's make sure we are allowed to use -+ # it by allowing ssh_pwauth. -+ # The password module's default behavior is to leave the -+ # configuration as-is in this regard, so that means it will either -+ # leave the password always disabled if no password is ever set, or -+ # leave the password login enabled if we set it once. -+ password_config = { -+ 'ssh_pwauth': True, -+ 'password': password, -+ 'chpasswd': { -+ 'expire': False, -+ }, -+ } -+ -+ # builtin extra_config overrides password_config -+ self.extra_config = util.mergemanydict( -+ [self.extra_config, password_config]) -+ -+ return True -+ -+ def get_config_obj(self): -+ return self.extra_config -+ -+ def _is_platform_viable(self): -+ return util.read_dmi_data('system-product-name').startswith( -+ EXOSCALE_DMI_NAME) -+ -+ -+# Used to match classes to dependencies -+datasources = [ -+ (DataSourceExoscale, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), -+] -+ -+ -+# Return a list of data sources that match this set of dependencies -+def get_datasource_list(depends): -+ return sources.list_from_depends(depends, datasources) -+ -+ -+def get_password(metadata_url=METADATA_URL, -+ api_version=API_VERSION, -+ password_server_port=PASSWORD_SERVER_PORT, -+ url_timeout=URL_TIMEOUT, -+ url_retries=URL_RETRIES): -+ """Obtain the VM's password if set. -+ -+ Once fetched the password is marked saved. Future calls to this method may -+ return empty string or 'saved_password'.""" -+ password_url = "{}:{}/{}/".format(metadata_url, password_server_port, -+ api_version) -+ response = url_helper.read_file_or_url( -+ password_url, -+ ssl_details=None, -+ headers={"DomU_Request": "send_my_password"}, -+ timeout=url_timeout, -+ retries=url_retries) -+ password = response.contents.decode('utf-8') -+ # the password is empty or already saved -+ # Note: the original metadata server would answer an additional -+ # 'bad_request' status, but the Exoscale implementation does not. -+ if password in ['', 'saved_password']: -+ return None -+ # save the password -+ url_helper.read_file_or_url( -+ password_url, -+ ssl_details=None, -+ headers={"DomU_Request": "saved_password"}, -+ timeout=url_timeout, -+ retries=url_retries) -+ return password -+ -+ -+def read_metadata(metadata_url=METADATA_URL, -+ api_version=API_VERSION, -+ password_server_port=PASSWORD_SERVER_PORT, -+ url_timeout=URL_TIMEOUT, -+ url_retries=URL_RETRIES): -+ """Query the metadata server and return the retrieved data.""" -+ crawled_metadata = {} -+ crawled_metadata['_metadata_api_version'] = api_version -+ try: -+ crawled_metadata['user-data'] = ec2.get_instance_userdata( -+ api_version, -+ metadata_url, -+ timeout=url_timeout, -+ retries=url_retries) -+ crawled_metadata['meta-data'] = ec2.get_instance_metadata( -+ api_version, -+ metadata_url, -+ timeout=url_timeout, -+ retries=url_retries) -+ except Exception as e: -+ util.logexc(LOG, "failed reading from metadata url %s (%s)", -+ metadata_url, e) -+ return {} -+ -+ try: -+ crawled_metadata['password'] = get_password( -+ api_version=api_version, -+ metadata_url=metadata_url, -+ password_server_port=password_server_port, -+ url_retries=url_retries, -+ url_timeout=url_timeout) -+ except Exception as e: -+ util.logexc(LOG, "failed to read from password server url %s:%s (%s)", -+ metadata_url, password_server_port, e) -+ -+ return crawled_metadata -+ -+ -+if __name__ == "__main__": -+ import argparse -+ -+ parser = argparse.ArgumentParser(description='Query Exoscale Metadata') -+ parser.add_argument( -+ "--endpoint", -+ metavar="URL", -+ help="The url of the metadata service.", -+ default=METADATA_URL) -+ parser.add_argument( -+ "--version", -+ metavar="VERSION", -+ help="The version of the metadata endpoint to query.", -+ default=API_VERSION) -+ parser.add_argument( -+ "--retries", -+ metavar="NUM", -+ type=int, -+ help="The number of retries querying the endpoint.", -+ default=URL_RETRIES) -+ parser.add_argument( -+ "--timeout", -+ metavar="NUM", -+ type=int, -+ help="The time in seconds to wait before timing out.", -+ default=URL_TIMEOUT) -+ parser.add_argument( -+ "--password-port", -+ metavar="PORT", -+ type=int, -+ help="The port on which the password endpoint listens", -+ default=PASSWORD_SERVER_PORT) -+ -+ args = parser.parse_args() -+ -+ data = read_metadata( -+ metadata_url=args.endpoint, -+ api_version=args.version, -+ password_server_port=args.password_port, -+ url_timeout=args.timeout, -+ url_retries=args.retries) -+ -+ print(util.json_dumps(data)) -+ -+# vi: ts=4 expandtab -diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst -index e34f145..fcfd91a 100644 ---- a/doc/rtd/topics/datasources.rst -+++ b/doc/rtd/topics/datasources.rst -@@ -96,6 +96,7 @@ Follow for more information. - datasources/configdrive.rst - datasources/digitalocean.rst - datasources/ec2.rst -+ datasources/exoscale.rst - datasources/maas.rst - datasources/nocloud.rst - datasources/opennebula.rst -diff --git a/doc/rtd/topics/datasources/exoscale.rst b/doc/rtd/topics/datasources/exoscale.rst -new file mode 100644 -index 0000000..27aec9c ---- /dev/null -+++ b/doc/rtd/topics/datasources/exoscale.rst -@@ -0,0 +1,68 @@ -+.. _datasource_exoscale: -+ -+Exoscale -+======== -+ -+This datasource supports reading from the metadata server used on the -+`Exoscale platform `_. -+ -+Use of the Exoscale datasource is recommended to benefit from new features of -+the Exoscale platform. -+ -+The datasource relies on the availability of a compatible metadata server -+(``http://169.254.169.254`` is used by default) and its companion password -+server, reachable at the same address (by default on port 8080). -+ -+Crawling of metadata -+-------------------- -+ -+The metadata service and password server are crawled slightly differently: -+ -+ * The "metadata service" is crawled every boot. -+ * The password server is also crawled every boot (the Exoscale datasource -+ forces the password module to run with "frequency always"). -+ -+In the password server case, the following rules apply in order to enable the -+"restore instance password" functionality: -+ -+ * If a password is returned by the password server, it is then marked "saved" -+ by the cloud-init datasource. Subsequent boots will skip setting the password -+ (the password server will return "saved_password"). -+ * When the instance password is reset (via the Exoscale UI), the password -+ server will return the non-empty password at next boot, therefore causing -+ cloud-init to reset the instance's password. -+ -+Configuration -+------------- -+ -+Users of this datasource are discouraged from changing the default settings -+unless instructed to by Exoscale support. -+ -+The following settings are available and can be set for the datasource in system -+configuration (in `/etc/cloud/cloud.cfg.d/`). -+ -+The settings available are: -+ -+ * **metadata_url**: The URL for the metadata service (defaults to -+ ``http://169.254.169.254``) -+ * **api_version**: The API version path on which to query the instance metadata -+ (defaults to ``1.0``) -+ * **password_server_port**: The port (on the metadata server) on which the -+ password server listens (defaults to ``8080``). -+ * **timeout**: the timeout value provided to urlopen for each individual http -+ request. (defaults to ``10``) -+ * **retries**: The number of retries that should be done for an http request -+ (defaults to ``6``) -+ -+ -+An example configuration with the default values is provided below: -+ -+.. sourcecode:: yaml -+ -+ datasource: -+ Exoscale: -+ metadata_url: "http://169.254.169.254" -+ api_version: "1.0" -+ password_server_port: 8080 -+ timeout: 10 -+ retries: 6 -diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py -index 6b01a4e..24b0fac 100644 ---- a/tests/unittests/test_datasource/test_common.py -+++ b/tests/unittests/test_datasource/test_common.py -@@ -13,6 +13,7 @@ from cloudinit.sources import ( - DataSourceConfigDrive as ConfigDrive, - DataSourceDigitalOcean as DigitalOcean, - DataSourceEc2 as Ec2, -+ DataSourceExoscale as Exoscale, - DataSourceGCE as GCE, - DataSourceHetzner as Hetzner, - DataSourceIBMCloud as IBMCloud, -@@ -53,6 +54,7 @@ DEFAULT_NETWORK = [ - CloudStack.DataSourceCloudStack, - DSNone.DataSourceNone, - Ec2.DataSourceEc2, -+ Exoscale.DataSourceExoscale, - GCE.DataSourceGCE, - MAAS.DataSourceMAAS, - NoCloud.DataSourceNoCloudNet, -diff --git a/tests/unittests/test_datasource/test_exoscale.py b/tests/unittests/test_datasource/test_exoscale.py -new file mode 100644 -index 0000000..350c330 ---- /dev/null -+++ b/tests/unittests/test_datasource/test_exoscale.py -@@ -0,0 +1,203 @@ -+# Author: Mathieu Corbin -+# Author: Christopher Glass -+# -+# This file is part of cloud-init. See LICENSE file for license information. -+from cloudinit import helpers -+from cloudinit.sources.DataSourceExoscale import ( -+ API_VERSION, -+ DataSourceExoscale, -+ METADATA_URL, -+ get_password, -+ PASSWORD_SERVER_PORT, -+ read_metadata) -+from cloudinit.tests.helpers import HttprettyTestCase, mock -+ -+import httpretty -+import requests -+ -+ -+TEST_PASSWORD_URL = "{}:{}/{}/".format(METADATA_URL, -+ PASSWORD_SERVER_PORT, -+ API_VERSION) -+ -+TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL, -+ API_VERSION) -+ -+TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL, -+ API_VERSION) -+ -+ -+@httpretty.activate -+class TestDatasourceExoscale(HttprettyTestCase): -+ -+ def setUp(self): -+ super(TestDatasourceExoscale, self).setUp() -+ self.tmp = self.tmp_dir() -+ self.password_url = TEST_PASSWORD_URL -+ self.metadata_url = TEST_METADATA_URL -+ self.userdata_url = TEST_USERDATA_URL -+ -+ def test_password_saved(self): -+ """The password is not set when it is not found -+ in the metadata service.""" -+ httpretty.register_uri(httpretty.GET, -+ self.password_url, -+ body="saved_password") -+ self.assertFalse(get_password()) -+ -+ def test_password_empty(self): -+ """No password is set if the metadata service returns -+ an empty string.""" -+ httpretty.register_uri(httpretty.GET, -+ self.password_url, -+ body="") -+ self.assertFalse(get_password()) -+ -+ def test_password(self): -+ """The password is set to what is found in the metadata -+ service.""" -+ expected_password = "p@ssw0rd" -+ httpretty.register_uri(httpretty.GET, -+ self.password_url, -+ body=expected_password) -+ password = get_password() -+ self.assertEqual(expected_password, password) -+ -+ def test_get_data(self): -+ """The datasource conforms to expected behavior when supplied -+ full test data.""" -+ path = helpers.Paths({'run_dir': self.tmp}) -+ ds = DataSourceExoscale({}, None, path) -+ ds._is_platform_viable = lambda: True -+ expected_password = "p@ssw0rd" -+ expected_id = "12345" -+ expected_hostname = "myname" -+ expected_userdata = "#cloud-config" -+ httpretty.register_uri(httpretty.GET, -+ self.userdata_url, -+ body=expected_userdata) -+ httpretty.register_uri(httpretty.GET, -+ self.password_url, -+ body=expected_password) -+ httpretty.register_uri(httpretty.GET, -+ self.metadata_url, -+ body="instance-id\nlocal-hostname") -+ httpretty.register_uri(httpretty.GET, -+ "{}local-hostname".format(self.metadata_url), -+ body=expected_hostname) -+ httpretty.register_uri(httpretty.GET, -+ "{}instance-id".format(self.metadata_url), -+ body=expected_id) -+ self.assertTrue(ds._get_data()) -+ self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") -+ self.assertEqual(ds.metadata, {"instance-id": expected_id, -+ "local-hostname": expected_hostname}) -+ self.assertEqual(ds.get_config_obj(), -+ {'ssh_pwauth': True, -+ 'password': expected_password, -+ 'cloud_config_modules': [ -+ ["set-passwords", "always"]], -+ 'chpasswd': { -+ 'expire': False, -+ }}) -+ -+ def test_get_data_saved_password(self): -+ """The datasource conforms to expected behavior when saved_password is -+ returned by the password server.""" -+ path = helpers.Paths({'run_dir': self.tmp}) -+ ds = DataSourceExoscale({}, None, path) -+ ds._is_platform_viable = lambda: True -+ expected_answer = "saved_password" -+ expected_id = "12345" -+ expected_hostname = "myname" -+ expected_userdata = "#cloud-config" -+ httpretty.register_uri(httpretty.GET, -+ self.userdata_url, -+ body=expected_userdata) -+ httpretty.register_uri(httpretty.GET, -+ self.password_url, -+ body=expected_answer) -+ httpretty.register_uri(httpretty.GET, -+ self.metadata_url, -+ body="instance-id\nlocal-hostname") -+ httpretty.register_uri(httpretty.GET, -+ "{}local-hostname".format(self.metadata_url), -+ body=expected_hostname) -+ httpretty.register_uri(httpretty.GET, -+ "{}instance-id".format(self.metadata_url), -+ body=expected_id) -+ self.assertTrue(ds._get_data()) -+ self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") -+ self.assertEqual(ds.metadata, {"instance-id": expected_id, -+ "local-hostname": expected_hostname}) -+ self.assertEqual(ds.get_config_obj(), -+ {'cloud_config_modules': [ -+ ["set-passwords", "always"]]}) -+ -+ def test_get_data_no_password(self): -+ """The datasource conforms to expected behavior when no password is -+ returned by the password server.""" -+ path = helpers.Paths({'run_dir': self.tmp}) -+ ds = DataSourceExoscale({}, None, path) -+ ds._is_platform_viable = lambda: True -+ expected_answer = "" -+ expected_id = "12345" -+ expected_hostname = "myname" -+ expected_userdata = "#cloud-config" -+ httpretty.register_uri(httpretty.GET, -+ self.userdata_url, -+ body=expected_userdata) -+ httpretty.register_uri(httpretty.GET, -+ self.password_url, -+ body=expected_answer) -+ httpretty.register_uri(httpretty.GET, -+ self.metadata_url, -+ body="instance-id\nlocal-hostname") -+ httpretty.register_uri(httpretty.GET, -+ "{}local-hostname".format(self.metadata_url), -+ body=expected_hostname) -+ httpretty.register_uri(httpretty.GET, -+ "{}instance-id".format(self.metadata_url), -+ body=expected_id) -+ self.assertTrue(ds._get_data()) -+ self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") -+ self.assertEqual(ds.metadata, {"instance-id": expected_id, -+ "local-hostname": expected_hostname}) -+ self.assertEqual(ds.get_config_obj(), -+ {'cloud_config_modules': [ -+ ["set-passwords", "always"]]}) -+ -+ @mock.patch('cloudinit.sources.DataSourceExoscale.get_password') -+ def test_read_metadata_when_password_server_unreachable(self, m_password): -+ """The read_metadata function returns partial results in case the -+ password server (only) is unreachable.""" -+ expected_id = "12345" -+ expected_hostname = "myname" -+ expected_userdata = "#cloud-config" -+ -+ m_password.side_effect = requests.Timeout('Fake Connection Timeout') -+ httpretty.register_uri(httpretty.GET, -+ self.userdata_url, -+ body=expected_userdata) -+ httpretty.register_uri(httpretty.GET, -+ self.metadata_url, -+ body="instance-id\nlocal-hostname") -+ httpretty.register_uri(httpretty.GET, -+ "{}local-hostname".format(self.metadata_url), -+ body=expected_hostname) -+ httpretty.register_uri(httpretty.GET, -+ "{}instance-id".format(self.metadata_url), -+ body=expected_id) -+ -+ result = read_metadata() -+ -+ self.assertIsNone(result.get("password")) -+ self.assertEqual(result.get("user-data").decode("utf-8"), -+ expected_userdata) -+ -+ def test_non_viable_platform(self): -+ """The datasource fails fast when the platform is not viable.""" -+ path = helpers.Paths({'run_dir': self.tmp}) -+ ds = DataSourceExoscale({}, None, path) -+ ds._is_platform_viable = lambda: False -+ self.assertFalse(ds._get_data()) -diff --git a/tools/ds-identify b/tools/ds-identify -index 1acfeeb..6c89b06 100755 ---- a/tools/ds-identify -+++ b/tools/ds-identify -@@ -124,7 +124,7 @@ DI_DSNAME="" - # be searched if there is no setting found in config. - DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ - CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ --OVF SmartOS Scaleway Hetzner IBMCloud Oracle" -+OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale" - DI_DSLIST="" - DI_MODE="" - DI_ON_FOUND="" -@@ -553,6 +553,11 @@ dscheck_CloudStack() { - return $DS_NOT_FOUND - } - -+dscheck_Exoscale() { -+ dmi_product_name_matches "Exoscale*" && return $DS_FOUND -+ return $DS_NOT_FOUND -+} -+ - dscheck_CloudSigma() { - # http://paste.ubuntu.com/23624795/ - dmi_product_name_matches "CloudSigma" && return $DS_FOUND --- -1.8.3.1 - diff --git a/SOURCES/ci-Remove-race-condition-between-cloud-init-and-Network-v2.patch b/SOURCES/ci-Remove-race-condition-between-cloud-init-and-Network-v2.patch new file mode 100644 index 0000000..e4b8e0a --- /dev/null +++ b/SOURCES/ci-Remove-race-condition-between-cloud-init-and-Network-v2.patch @@ -0,0 +1,49 @@ +From 291dbf3d63d8b591a9255853858e2540238a8ff0 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Mon, 27 Apr 2020 10:52:52 +0200 +Subject: [PATCH 1/2] Remove race condition between cloud-init and + NetworkManager + +RH-Author: Eduardo Otubo +Message-id: <20200327121911.17699-1-otubo@redhat.com> +Patchwork-id: 94453 +O-Subject: [RHEL-7.9/RHEL-8.2.0 cloud-init PATCHv2] Remove race condition between cloud-init and NetworkManager +Bugzilla: 1748015 +RH-Acked-by: Cathy Avery +RH-Acked-by: Miroslav Rezanina +RH-Acked-by: Vitaly Kuznetsov + +cloud-init service is set to start before NetworkManager service starts, +but this does not avoid a race condition between them. NetworkManager +starts before cloud-init can write `dns=none' to the file: +/etc/NetworkManager/conf.d/99-cloud-init.conf. This way NetworkManager +doesn't read the configuration and erases all resolv.conf values upon +shutdown. On the next reboot neither cloud-init or NetworkManager will +write anything to resolv.conf, leaving it blank. + +This patch introduces a NM reload (try-reload-or-restart) at the end of cloud-init +start up so it won't erase resolv.conf upon first shutdown. + +x-downstream-only: yes + +Signed-off-by: Eduardo Otubo otubo@redhat.com +--- + rhel/systemd/cloud-final.service | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/rhel/systemd/cloud-final.service b/rhel/systemd/cloud-final.service +index f303483..8510520 100644 +--- a/rhel/systemd/cloud-final.service ++++ b/rhel/systemd/cloud-final.service +@@ -12,7 +12,7 @@ RemainAfterExit=yes + TimeoutSec=0 + KillMode=process + ExecStartPost=/bin/echo "try restart NetworkManager.service" +-ExecStartPost=/usr/bin/systemctl try-restart NetworkManager.service ++ExecStartPost=/usr/bin/systemctl try-reload-or-restart NetworkManager.service + + # Output needs to appear in instance console output + StandardOutput=journal+console +-- +1.8.3.1 + diff --git a/SOURCES/ci-Remove-race-condition-between-cloud-init-and-Network.patch b/SOURCES/ci-Remove-race-condition-between-cloud-init-and-Network.patch new file mode 100644 index 0000000..0351fca --- /dev/null +++ b/SOURCES/ci-Remove-race-condition-between-cloud-init-and-Network.patch @@ -0,0 +1,49 @@ +From 3b847118d283245c809172250b90a698372d7cdb Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Mon, 2 Mar 2020 10:46:35 +0100 +Subject: [PATCH 2/5] Remove race condition between cloud-init and + NetworkManager + +Message-id: <20200302104635.11648-1-otubo@redhat.com> +Patchwork-id: 94098 +O-Subject: [RHEL-7.9/RHEL-8.2.0 cloud-init PATCH] Remove race condition between cloud-init and NetworkManager +Bugzilla: 1748015 +RH-Acked-by: Cathy Avery +RH-Acked-by: Mohammed Gamal + +cloud-init service is set to start before NetworkManager service starts, +but this does not avoid a race condition between them. NetworkManager +starts before cloud-init can write `dns=none' to the file: +/etc/NetworkManager/conf.d/99-cloud-init.conf. This way NetworkManager +doesn't read the configuration and erases all resolv.conf values upon +shutdown. On the next reboot neither cloud-init or NetworkManager will +write anything to resolv.conf, leaving it blank. + +This patch introduces a NM reload (try-restart) at the end of cloud-init +start up so it won't erase resolv.conf upon first shutdown. + +x-downstream-only: yes +resolves: rhbz#1748015, rhbz#1807797 and rhbz#1804780 + +Signed-off-by: Eduardo Otubo otubo@redhat.com +Signed-off-by: Miroslav Rezanina +--- + rhel/systemd/cloud-final.service | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/rhel/systemd/cloud-final.service b/rhel/systemd/cloud-final.service +index 739b7e3..f303483 100644 +--- a/rhel/systemd/cloud-final.service ++++ b/rhel/systemd/cloud-final.service +@@ -11,6 +11,8 @@ ExecStart=/usr/bin/cloud-init modules --mode=final + RemainAfterExit=yes + TimeoutSec=0 + KillMode=process ++ExecStartPost=/bin/echo "try restart NetworkManager.service" ++ExecStartPost=/usr/bin/systemctl try-restart NetworkManager.service + + # Output needs to appear in instance console output + StandardOutput=journal+console +-- +1.8.3.1 + diff --git a/SOURCES/ci-Removing-cloud-user-from-wheel.patch b/SOURCES/ci-Removing-cloud-user-from-wheel.patch new file mode 100644 index 0000000..140e121 --- /dev/null +++ b/SOURCES/ci-Removing-cloud-user-from-wheel.patch @@ -0,0 +1,43 @@ +From 2c0a8c2df07ad186e19cc9bde31b407c83d9ff40 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Thu, 23 Jan 2020 11:13:17 +0100 +Subject: [PATCH 1/5] Removing cloud-user from wheel + +Message-id: <20200123111317.15542-1-otubo@redhat.com> +Patchwork-id: 93442 +O-Subject: [RHEL-7.9/RHEL-8 cloud-init PATCH] Removing cloud-user from wheel +Bugzilla: 1549638 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Stefan Hajnoczi + +Including cloud-user to both sudoers and wheel group makes the command +`sudo -v' to ask for password. Besides the bogus behavior, it's +unecessary to have the user on both. Removing the user from `wheel' +group solves the issue + +X-downstream-only: yes +Resolves: rhbz#1549638 +Resolves: rhbz#1785648 + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + rhel/cloud.cfg | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg +index f0db3c1..82e8bf6 100644 +--- a/rhel/cloud.cfg ++++ b/rhel/cloud.cfg +@@ -57,7 +57,7 @@ system_info: + name: cloud-user + lock_passwd: true + gecos: Cloud User +- groups: [wheel, adm, systemd-journal] ++ groups: [adm, systemd-journal] + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + shell: /bin/bash + distro: rhel +-- +1.8.3.1 + diff --git a/SOURCES/ci-Revert-azure-ensure-that-networkmanager-hook-script-.patch b/SOURCES/ci-Revert-azure-ensure-that-networkmanager-hook-script-.patch deleted file mode 100644 index 98d0289..0000000 --- a/SOURCES/ci-Revert-azure-ensure-that-networkmanager-hook-script-.patch +++ /dev/null @@ -1,85 +0,0 @@ -From 49b0d2baf79199b3c9c0ce85cb0a7ac27e18cd3d Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Tue, 21 May 2019 13:42:00 +0200 -Subject: [PATCH] Revert: azure: ensure that networkmanager hook script runs - -RH-Author: Eduardo Otubo -Message-id: <20190521134200.24783-1-otubo@redhat.com> -Patchwork-id: 88133 -O-Subject: [rhel-7.7 cloud-init PATCH] Revert: azure: ensure that networkmanager hook script runs -Bugzilla: 1707725 -RH-Acked-by: Mohammed Gamal -RH-Acked-by: Vitaly Kuznetsov - -This patch reverts the commit: - -commit c48497435e8195dbd87262c2f00e484e63fe3343 -Author: Lars Kellogg-Stedman -Date: Thu Jun 15 12:20:39 2017 -0400 - - azure: ensure that networkmanager hook script runs - - The networkmanager hook script was failing to run due to the changes - we made to resolve rhbz#1440831. This corrects the regression by - allowing the NM hook script to run regardless of whether or not - cloud-init is "enabled". - - Resolves: rhbz#1460206 - X-downstream-only: true - -Resolves: rhbz:1707725 -X-downstream-only: yes - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - tools/hook-dhclient | 3 ++- - tools/hook-network-manager | 3 ++- - tools/hook-rhel.sh | 3 ++- - 3 files changed, 6 insertions(+), 3 deletions(-) - -diff --git a/tools/hook-dhclient b/tools/hook-dhclient -index 181cd51..02122f3 100755 ---- a/tools/hook-dhclient -+++ b/tools/hook-dhclient -@@ -13,7 +13,8 @@ is_azure() { - } - - is_enabled() { -- # only execute hooks if cloud-init is running on azure -+ # only execute hooks if cloud-init is enabled and on azure -+ [ -e /run/cloud-init/enabled ] || return 1 - is_azure - } - -diff --git a/tools/hook-network-manager b/tools/hook-network-manager -index 1d52cad..67d9044 100755 ---- a/tools/hook-network-manager -+++ b/tools/hook-network-manager -@@ -13,7 +13,8 @@ is_azure() { - } - - is_enabled() { -- # only execute hooks if cloud-init running on azure -+ # only execute hooks if cloud-init is enabled and on azure -+ [ -e /run/cloud-init/enabled ] || return 1 - is_azure - } - -diff --git a/tools/hook-rhel.sh b/tools/hook-rhel.sh -index d75767e..513a551 100755 ---- a/tools/hook-rhel.sh -+++ b/tools/hook-rhel.sh -@@ -13,7 +13,8 @@ is_azure() { - } - - is_enabled() { -- # only execute hooks if cloud-init is running on azure -+ # only execute hooks if cloud-init is enabled and on azure -+ [ -e /run/cloud-init/enabled ] || return 1 - is_azure - } - --- -1.8.3.1 - diff --git a/SOURCES/ci-Use-reload-or-try-restart-instead-of-try-reload-or-r.patch b/SOURCES/ci-Use-reload-or-try-restart-instead-of-try-reload-or-r.patch new file mode 100644 index 0000000..e5177b1 --- /dev/null +++ b/SOURCES/ci-Use-reload-or-try-restart-instead-of-try-reload-or-r.patch @@ -0,0 +1,43 @@ +From 43026829fcd2154f3a6d94eb512e7fd956fde424 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Mon, 11 May 2020 09:24:12 +0200 +Subject: [PATCH 1/2] Use reload-or-try-restart instead of + try-reload-or-restart + +RH-Author: Eduardo Otubo +Message-id: <20200504091743.28013-1-otubo@redhat.com> +Patchwork-id: 96249 +O-Subject: [RHEL-7.9 cloud-init PATCH] Use reload-or-try-restart instead of try-reload-or-restart +Bugzilla: 1748015 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Mohammed Gamal + +The verb `try-reload-or-restart' is available only on systemd >= 229 and +RHEL-7.9 uses systemd = 219. Also this doesn't happen on RHEL-8.* which +uses 239. + +x-downstream-only: yes +Resolves: rhbz#1748015 + +Signed-off-by: Eduardo Otubo +--- + rhel/systemd/cloud-final.service | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/rhel/systemd/cloud-final.service b/rhel/systemd/cloud-final.service +index 8510520..621d4f8 100644 +--- a/rhel/systemd/cloud-final.service ++++ b/rhel/systemd/cloud-final.service +@@ -12,7 +12,8 @@ RemainAfterExit=yes + TimeoutSec=0 + KillMode=process + ExecStartPost=/bin/echo "try restart NetworkManager.service" +-ExecStartPost=/usr/bin/systemctl try-reload-or-restart NetworkManager.service ++# TODO: try-reload-or-restart is available only on systemd >= 229 ++ExecStartPost=/usr/bin/systemctl reload-or-try-restart NetworkManager.service + + # Output needs to appear in instance console output + StandardOutput=journal+console +-- +1.8.3.1 + diff --git a/SOURCES/ci-azure-avoid.patch b/SOURCES/ci-azure-avoid.patch deleted file mode 100644 index 96d78ab..0000000 --- a/SOURCES/ci-azure-avoid.patch +++ /dev/null @@ -1,213 +0,0 @@ -From 3b1b95b667a767c0e0711215c7b620cde016bcd7 Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Tue, 10 Mar 2020 16:04:18 +0100 -Subject: [PATCH] azure: avoid - -Message-id: <20200310160418.887-1-otubo@redhat.com> -Patchwork-id: 94221 -O-Subject: [RHEL-8.1.0/RHEL-7.8.z/RHEL-7.7.z cloud-init PATCH] azure: avoid re-running cloud-init when instance-id is byte-swapped (#84) -Bugzilla: 1810064 -RH-Acked-by: Vitaly Kuznetsov -RH-Acked-by: Mohammed Gamal - -commit 129b1c4ea250619bd7caed7aaffacc796b0139f2 -Author: AOhassan <37305877+AOhassan@users.noreply.github.com> -Date: Thu Dec 12 13:51:42 2019 -0800 - - azure: avoid re-running cloud-init when instance-id is byte-swapped (#84) - - Azure stores the instance ID with an incorrect byte ordering for the - first three hyphen delimited parts. This results in invalid - is_new_instance checks forcing Azure datasource to recrawl the metadata - service. - - When persisting instance-id from the metadata service, swap the - instance-id string byte order such that it is consistent with - that returned by dmi information. Check whether the instance-id - string is a byte-swapped match when determining correctly whether - the Azure platform instance-id has actually changed. - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - cloudinit/sources/DataSourceAzure.py | 16 ++++++++++--- - cloudinit/sources/helpers/azure.py | 27 ++++++++++++++++++++++ - tests/unittests/test_datasource/test_azure.py | 24 ++++++++++++++++--- - .../unittests/test_datasource/test_azure_helper.py | 19 +++++++++++++++ - 4 files changed, 80 insertions(+), 6 deletions(-) - -diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py -index 5baf8da..66bbe5e 100755 ---- a/cloudinit/sources/DataSourceAzure.py -+++ b/cloudinit/sources/DataSourceAzure.py -@@ -28,7 +28,8 @@ from cloudinit.reporting import events - - from cloudinit.sources.helpers.azure import (azure_ds_reporter, - azure_ds_telemetry_reporter, -- get_metadata_from_fabric) -+ get_metadata_from_fabric, -+ is_byte_swapped) - - LOG = logging.getLogger(__name__) - -@@ -458,8 +459,7 @@ class DataSourceAzure(sources.DataSource): - seed = _get_random_seed() - if seed: - crawled_data['metadata']['random_seed'] = seed -- crawled_data['metadata']['instance-id'] = util.read_dmi_data( -- 'system-uuid') -+ crawled_data['metadata']['instance-id'] = self._iid() - - if perform_reprovision: - LOG.info("Reporting ready to Azure after getting ReprovisionData") -@@ -530,6 +530,16 @@ class DataSourceAzure(sources.DataSource): - # quickly (local check only) if self.instance_id is still valid - return sources.instance_id_matches_system_uuid(self.get_instance_id()) - -+ def _iid(self, previous=None): -+ prev_iid_path = os.path.join( -+ self.paths.get_cpath('data'), 'instance-id') -+ iid = util.read_dmi_data('system-uuid') -+ if os.path.exists(prev_iid_path): -+ previous = util.load_file(prev_iid_path).strip() -+ if is_byte_swapped(previous, iid): -+ return previous -+ return iid -+ - @azure_ds_telemetry_reporter - def setup(self, is_new_instance): - if self._negotiated is False: -diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py -index 82c4c8c..c2a57cc 100755 ---- a/cloudinit/sources/helpers/azure.py -+++ b/cloudinit/sources/helpers/azure.py -@@ -7,6 +7,7 @@ import re - import socket - import struct - import time -+import textwrap - - from cloudinit.net import dhcp - from cloudinit import stages -@@ -40,6 +41,32 @@ def azure_ds_telemetry_reporter(func): - return impl - - -+def is_byte_swapped(previous_id, current_id): -+ """ -+ Azure stores the instance ID with an incorrect byte ordering for the -+ first parts. This corrects the byte order such that it is consistent with -+ that returned by the metadata service. -+ """ -+ if previous_id == current_id: -+ return False -+ -+ def swap_bytestring(s, width=2): -+ dd = [byte for byte in textwrap.wrap(s, 2)] -+ dd.reverse() -+ return ''.join(dd) -+ -+ parts = current_id.split('-') -+ swapped_id = '-'.join([ -+ swap_bytestring(parts[0]), -+ swap_bytestring(parts[1]), -+ swap_bytestring(parts[2]), -+ parts[3], -+ parts[4] -+ ]) -+ -+ return previous_id == swapped_id -+ -+ - @contextmanager - def cd(newdir): - prevdir = os.getcwd() -diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py -index bc8b42c..1fb0565 100644 ---- a/tests/unittests/test_datasource/test_azure.py -+++ b/tests/unittests/test_datasource/test_azure.py -@@ -314,7 +314,7 @@ scbus-1 on xpt0 bus 0 - 'public-keys': [], - }) - -- self.instance_id = 'test-instance-id' -+ self.instance_id = 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' - - def _dmi_mocks(key): - if key == 'system-uuid': -@@ -511,7 +511,7 @@ fdescfs /dev/fd fdescfs rw 0 0 - 'subnet': [{'address': '10.0.0.0', 'prefix': '24'}]}, - 'ipv6': {'ipAddress': []}, - 'macAddress': '000D3A047598'}]}}, -- 'instance-id': 'test-instance-id', -+ 'instance-id': 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', - 'local-hostname': u'myhost', - 'random_seed': 'wild'} - -@@ -881,6 +881,24 @@ fdescfs /dev/fd fdescfs rw 0 0 - self.assertTrue(ret) - self.assertEqual('value', dsrc.metadata['test']) - -+ def test_instance_id_endianness(self): -+ """Return the previous iid when dmi uuid is the byteswapped iid.""" -+ ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) -+ # byte-swapped previous -+ write_file( -+ os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), -+ '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') -+ ds.get_data() -+ self.assertEqual( -+ '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) -+ # not byte-swapped previous -+ write_file( -+ os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), -+ '644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') -+ ds.get_data() -+ self.assertEqual( -+ 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) -+ - def test_instance_id_from_dmidecode_used(self): - ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - ds.get_data() -@@ -1080,7 +1098,7 @@ class TestAzureBounce(CiTestCase): - - def _dmi_mocks(key): - if key == 'system-uuid': -- return 'test-instance-id' -+ return 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' - elif key == 'chassis-asset-tag': - return '7783-7084-3265-9085-8269-3286-77' - raise RuntimeError('should not get here') -diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py -index bd006ab..7ad5cc1 100644 ---- a/tests/unittests/test_datasource/test_azure_helper.py -+++ b/tests/unittests/test_datasource/test_azure_helper.py -@@ -170,6 +170,25 @@ class TestGoalStateParsing(CiTestCase): - goal_state = self._get_goal_state(instance_id=instance_id) - self.assertEqual(instance_id, goal_state.instance_id) - -+ def test_instance_id_byte_swap(self): -+ """Return true when previous_iid is byteswapped current_iid""" -+ previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" -+ current_iid = "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8" -+ self.assertTrue( -+ azure_helper.is_byte_swapped(previous_iid, current_iid)) -+ -+ def test_instance_id_no_byte_swap_same_instance_id(self): -+ previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" -+ current_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" -+ self.assertFalse( -+ azure_helper.is_byte_swapped(previous_iid, current_iid)) -+ -+ def test_instance_id_no_byte_swap_diff_instance_id(self): -+ previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" -+ current_iid = "G0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" -+ self.assertFalse( -+ azure_helper.is_byte_swapped(previous_iid, current_iid)) -+ - def test_certificates_xml_parsed_and_fetched_correctly(self): - http_client = mock.MagicMock() - certificates_url = 'TestCertificatesUrl' --- -1.8.3.1 - diff --git a/SOURCES/ci-cc_mounts-check-if-mount-a-on-no-change-fstab-path.patch b/SOURCES/ci-cc_mounts-check-if-mount-a-on-no-change-fstab-path.patch deleted file mode 100644 index 4fd4c55..0000000 --- a/SOURCES/ci-cc_mounts-check-if-mount-a-on-no-change-fstab-path.patch +++ /dev/null @@ -1,135 +0,0 @@ -From e3f04e297ce950ce0d183ca87a434ec932ae6a86 Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Wed, 15 May 2019 12:15:29 +0200 -Subject: [PATCH 5/5] cc_mounts: check if mount -a on no-change fstab path - -RH-Author: Eduardo Otubo -Message-id: <20190515121529.11191-6-otubo@redhat.com> -Patchwork-id: 87886 -O-Subject: [rhel-7 cloud-init PATCHv2 5/5] cc_mounts: check if mount -a on no-change fstab path -Bugzilla: 1687565 -RH-Acked-by: Vitaly Kuznetsov -RH-Acked-by: Mohammed Gamal - -From: "Jason Zions (MSFT)" - -BZ: 1687565 -BRANCH: rhel7/master-18.5 -UPSTREAM: acc25d8d -BREW: 21696239 - -commit acc25d8d7d603313059ac35b4253b504efc560a9 -Author: Jason Zions (MSFT) -Date: Wed May 8 22:47:07 2019 +0000 - - cc_mounts: check if mount -a on no-change fstab path - - Under some circumstances, cc_disk_setup may reformat volumes which - already appear in /etc/fstab (e.g. Azure ephemeral drive is reformatted - from NTFS to ext4 after service-heal). Normally, cc_mounts only calls - mount -a if it altered /etc/fstab. With this change cc_mounts will read - /proc/mounts and verify if configured mounts are already mounted and if - not raise flag to request a mount -a. This handles the case where no - changes to fstab occur but a mount -a is required due to change in - underlying device which prevented the .mount unit from running until - after disk was reformatted. - - LP: #1825596 - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - cloudinit/config/cc_mounts.py | 11 ++++++++ - .../unittests/test_handler/test_handler_mounts.py | 30 +++++++++++++++++++++- - 2 files changed, 40 insertions(+), 1 deletion(-) - -diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py -index 339baba..123ffb8 100644 ---- a/cloudinit/config/cc_mounts.py -+++ b/cloudinit/config/cc_mounts.py -@@ -439,6 +439,7 @@ def handle(_name, cfg, cloud, log, _args): - - cc_lines = [] - needswap = False -+ need_mount_all = False - dirs = [] - for line in actlist: - # write 'comment' in the fs_mntops, entry, claiming this -@@ -449,11 +450,18 @@ def handle(_name, cfg, cloud, log, _args): - dirs.append(line[1]) - cc_lines.append('\t'.join(line)) - -+ mount_points = [v['mountpoint'] for k, v in util.mounts().items() -+ if 'mountpoint' in v] - for d in dirs: - try: - util.ensure_dir(d) - except Exception: - util.logexc(log, "Failed to make '%s' config-mount", d) -+ # dirs is list of directories on which a volume should be mounted. -+ # If any of them does not already show up in the list of current -+ # mount points, we will definitely need to do mount -a. -+ if not need_mount_all and d not in mount_points: -+ need_mount_all = True - - sadds = [WS.sub(" ", n) for n in cc_lines] - sdrops = [WS.sub(" ", n) for n in fstab_removed] -@@ -473,6 +481,9 @@ def handle(_name, cfg, cloud, log, _args): - log.debug("No changes to /etc/fstab made.") - else: - log.debug("Changes to fstab: %s", sops) -+ need_mount_all = True -+ -+ if need_mount_all: - activate_cmds.append(["mount", "-a"]) - if uses_systemd: - activate_cmds.append(["systemctl", "daemon-reload"]) -diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py -index 8fea6c2..0fb160b 100644 ---- a/tests/unittests/test_handler/test_handler_mounts.py -+++ b/tests/unittests/test_handler/test_handler_mounts.py -@@ -154,7 +154,15 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase): - return_value=True) - - self.add_patch('cloudinit.config.cc_mounts.util.subp', -- 'mock_util_subp') -+ 'm_util_subp') -+ -+ self.add_patch('cloudinit.config.cc_mounts.util.mounts', -+ 'mock_util_mounts', -+ return_value={ -+ '/dev/sda1': {'fstype': 'ext4', -+ 'mountpoint': '/', -+ 'opts': 'rw,relatime,discard' -+ }}) - - self.mock_cloud = mock.Mock() - self.mock_log = mock.Mock() -@@ -230,4 +238,24 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase): - fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) - -+ def test_no_change_fstab_sets_needs_mount_all(self): -+ '''verify unchanged fstab entries are mounted if not call mount -a''' -+ fstab_original_content = ( -+ 'LABEL=cloudimg-rootfs / ext4 defaults 0 0\n' -+ 'LABEL=UEFI /boot/efi vfat defaults 0 0\n' -+ '/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n' -+ ) -+ fstab_expected_content = fstab_original_content -+ cc = {'mounts': [ -+ ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec']]} -+ with open(cc_mounts.FSTAB_PATH, 'w') as fd: -+ fd.write(fstab_original_content) -+ with open(cc_mounts.FSTAB_PATH, 'r') as fd: -+ fstab_new_content = fd.read() -+ self.assertEqual(fstab_expected_content, fstab_new_content) -+ cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, []) -+ self.m_util_subp.assert_has_calls([ -+ mock.call(['mount', '-a']), -+ mock.call(['systemctl', 'daemon-reload'])]) -+ - # vi: ts=4 expandtab --- -1.8.3.1 - diff --git a/SOURCES/ci-cc_mounts-fix-incorrect-format-specifiers-316.patch b/SOURCES/ci-cc_mounts-fix-incorrect-format-specifiers-316.patch index d201e02..f0ed307 100644 --- a/SOURCES/ci-cc_mounts-fix-incorrect-format-specifiers-316.patch +++ b/SOURCES/ci-cc_mounts-fix-incorrect-format-specifiers-316.patch @@ -1,15 +1,24 @@ -From 00b8210223ce3af97109df5cdb85b8e40541dd33 Mon Sep 17 00:00:00 2001 +From 20b0ac8b92ef134dbf8446c79da54011ddc5be2e Mon Sep 17 00:00:00 2001 From: Eduardo Otubo -Date: Tue, 28 Apr 2020 08:22:07 +0200 -Subject: [PATCH 3/3] cc_mounts: fix incorrect format specifiers (#316) +Date: Mon, 27 Apr 2020 10:53:07 +0200 +Subject: [PATCH 2/2] cc_mounts: fix incorrect format specifiers (#316) RH-Author: Eduardo Otubo -Message-id: <20200422130428.7663-4-otubo@redhat.com> -Patchwork-id: 96034 -O-Subject: [RHEL-7.7.z/RHEL-7.8.z cloud-init PATCH 3/3] cc_mounts: fix incorrect format specifiers (#316) -Bugzilla: 1801094 +Message-id: <20200421081604.8658-1-otubo@redhat.com> +Patchwork-id: 96012 +O-Subject: [RHEL-7.9 cloud-init PATCH] cc_mounts: fix incorrect format specifiers (#316) +Bugzilla: 1772505 RH-Acked-by: Vitaly Kuznetsov -RH-Acked-by: Cathy Avery +RH-Acked-by: Mohammed Gamal + +BZ: 1772505 +BRANCH: rhel79/master-19.4 +BREW: 28082908 + +Conflicts: Not exactly a conflict, but removed optional notations +"variable: type" and "-> type" from function header create_swapfile() as +it is only available on Python >= 3.5 and this patch is for RHEL-7.9 +only (Python 2.*). The rest of the cherry-pick was clean. commit 9d7b35ce23aaf8741dd49b16e359c96591be3c76 Author: Daniel Watkins @@ -28,7 +37,7 @@ Signed-off-by: Miroslav Rezanina create mode 100644 cloudinit/config/tests/test_mounts.py diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py -index 811781f..7c4e104 100644 +index 0573026..7e8d63e 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -226,17 +226,17 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): diff --git a/SOURCES/ci-cc_set_password-increase-random-pwlength-from-9-to-2.patch b/SOURCES/ci-cc_set_password-increase-random-pwlength-from-9-to-2.patch new file mode 100644 index 0000000..4b30698 --- /dev/null +++ b/SOURCES/ci-cc_set_password-increase-random-pwlength-from-9-to-2.patch @@ -0,0 +1,44 @@ +From 5123a7d9558a7d3bffd72f48554a1026ddacf624 Mon Sep 17 00:00:00 2001 +From: Jon Maloy +Date: Thu, 19 Mar 2020 07:03:22 +0100 +Subject: [PATCH 3/5] cc_set_password: increase random pwlength from 9 to 20 + (#189) +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Message-id: <20200312182427.7304-2-jmaloy@redhat.com> +Patchwork-id: 94251 +O-Subject: [RHEL-7.9 cloud-init 1/1] cc_set_password: increase random pwlength from 9 to 20 (#189) +Bugzilla: 1812170 +RH-Acked-by: Eduardo Otubo +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Philippe Mathieu-Daudé + +From: Ryan Harper + +Increasing the bits of security from 52 to 115. + +LP: #1860795 +(cherry picked from commit 42788bf24a1a0a5421a2d00a7f59b59e38ba1a14) +Signed-off-by: Jon Maloy +--- + cloudinit/config/cc_set_passwords.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py +index c3c5b0f..0742234 100755 +--- a/cloudinit/config/cc_set_passwords.py ++++ b/cloudinit/config/cc_set_passwords.py +@@ -236,7 +236,7 @@ def handle(_name, cfg, cloud, log, args): + raise errors[-1] + + +-def rand_user_password(pwlen=9): ++def rand_user_password(pwlen=20): + return util.rand_str(pwlen, select_from=PW_SET) + + +-- +1.8.3.1 + diff --git a/SOURCES/ci-cmd-main.py-Fix-missing-modules-init-key-in-modes-di.patch b/SOURCES/ci-cmd-main.py-Fix-missing-modules-init-key-in-modes-di.patch deleted file mode 100644 index 5d49fbb..0000000 --- a/SOURCES/ci-cmd-main.py-Fix-missing-modules-init-key-in-modes-di.patch +++ /dev/null @@ -1,58 +0,0 @@ -From 2b7bcfb3cfb6ac668627b26c83a2d60a29a75392 Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Tue, 14 Apr 2020 14:21:35 +0200 -Subject: [PATCH] cmd:main.py: Fix missing 'modules-init' key in modes dict - -RH-Author: Eduardo Otubo -Message-id: <20200414104642.19930-1-otubo@redhat.com> -Patchwork-id: 94672 -O-Subject: [RHEL-7.8.z cloud-init PATCH] cmd:main.py: Fix missing 'modules-init' key in modes dict -Bugzilla: 1802173 -RH-Acked-by: Vitaly Kuznetsov -RH-Acked-by: Mohammed Gamal -RH-Acked-by: Cathy Avery - -commit bdd9c0ac9bcd68ec1ac3b2038dad0ba3dbd83341 -Author: Antonio Romito -Date: Tue Apr 9 14:54:23 2019 +0000 - - cmd:main.py: Fix missing 'modules-init' key in modes dict - - Cloud-init's main.py will fail when presented with a new - stage name 'modules-init' if upgrading an older cloud-init. - Fix this by initializing unknown stage names before accessing. - - LP: #1815109 - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - cloudinit/cmd/main.py | 9 +++++---- - 1 file changed, 5 insertions(+), 4 deletions(-) - -diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py -index 933c019..a5446da 100644 ---- a/cloudinit/cmd/main.py -+++ b/cloudinit/cmd/main.py -@@ -632,13 +632,14 @@ def status_wrapper(name, args, data_d=None, link_d=None): - 'start': None, - 'finished': None, - } -+ - if status is None: - status = {'v1': {}} -- for m in modes: -- status['v1'][m] = nullstatus.copy() - status['v1']['datasource'] = None -- elif mode not in status['v1']: -- status['v1'][mode] = nullstatus.copy() -+ -+ for m in modes: -+ if m not in status['v1']: -+ status['v1'][m] = nullstatus.copy() - - v1 = status['v1'] - v1['stage'] = mode --- -1.8.3.1 - diff --git a/SOURCES/ci-ec2-Add-support-for-AWS-IMDS-v2-session-oriented-55.patch b/SOURCES/ci-ec2-Add-support-for-AWS-IMDS-v2-session-oriented-55.patch deleted file mode 100644 index 0213fad..0000000 --- a/SOURCES/ci-ec2-Add-support-for-AWS-IMDS-v2-session-oriented-55.patch +++ /dev/null @@ -1,594 +0,0 @@ -From 68b3718124b63fdf0c077452b559f0fccb01200d Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Tue, 5 May 2020 08:08:32 +0200 -Subject: [PATCH 5/5] ec2: Add support for AWS IMDS v2 (session-oriented) (#55) - -RH-Author: Eduardo Otubo -Message-id: <20200504085238.25884-6-otubo@redhat.com> -Patchwork-id: 96245 -O-Subject: [RHEL-7.8.z cloud-init PATCH 5/5] ec2: Add support for AWS IMDS v2 (session-oriented) (#55) -Bugzilla: 1827207 -RH-Acked-by: Cathy Avery -RH-Acked-by: Mohammed Gamal -RH-Acked-by: Vitaly Kuznetsov - -commit 4bc399e0cd0b7e9177f948aecd49f6b8323ff30b -Author: Ryan Harper -Date: Fri Nov 22 21:05:44 2019 -0600 - - ec2: Add support for AWS IMDS v2 (session-oriented) (#55) - - * ec2: Add support for AWS IMDS v2 (session-oriented) - - AWS now supports a new version of fetching Instance Metadata[1]. - - Update cloud-init's ec2 utility functions and update ec2 derived - datasources accordingly. For DataSourceEc2 (versus ec2-look-alikes) - cloud-init will issue the PUT request to obtain an API token for - the maximum lifetime and then all subsequent interactions with the - IMDS will include the token in the header. - - If the API token endpoint is unreachable on Ec2 platform, log a - warning and fallback to using IMDS v1 and which does not use - session tokens when communicating with the Instance metadata - service. - - We handle read errors, typically seen if the IMDS is beyond one - etwork hop (IMDSv2 responses have a ttl=1), by setting the api token - to a disabled value and then using IMDSv1 paths. - - To support token-based headers, ec2_utils functions were updated - to support custom headers_cb and exception_cb callback functions - so Ec2 could store, or refresh API tokens in the event of token - becoming stale. - - [1] https://docs.aws.amazon.com/AWSEC2/latest/ \ - UserGuide/ec2-instance-metadata.html \ - #instance-metadata-v2-how-it-works - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - cloudinit/ec2_utils.py | 37 +++-- - cloudinit/sources/DataSourceCloudStack.py | 2 +- - cloudinit/sources/DataSourceEc2.py | 166 ++++++++++++++++++--- - cloudinit/sources/DataSourceExoscale.py | 2 +- - cloudinit/sources/DataSourceMAAS.py | 2 +- - cloudinit/sources/DataSourceOpenStack.py | 2 +- - cloudinit/url_helper.py | 15 +- - tests/unittests/test_datasource/test_cloudstack.py | 21 ++- - tests/unittests/test_datasource/test_ec2.py | 6 +- - 9 files changed, 201 insertions(+), 52 deletions(-) - -diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py -index 3b7b17f..57708c1 100644 ---- a/cloudinit/ec2_utils.py -+++ b/cloudinit/ec2_utils.py -@@ -134,25 +134,28 @@ class MetadataMaterializer(object): - return joined - - --def _skip_retry_on_codes(status_codes, _request_args, cause): -+def skip_retry_on_codes(status_codes, _request_args, cause): - """Returns False if cause.code is in status_codes.""" - return cause.code not in status_codes - - - def get_instance_userdata(api_version='latest', - metadata_address='http://169.254.169.254', -- ssl_details=None, timeout=5, retries=5): -+ ssl_details=None, timeout=5, retries=5, -+ headers_cb=None, exception_cb=None): - ud_url = url_helper.combine_url(metadata_address, api_version) - ud_url = url_helper.combine_url(ud_url, 'user-data') - user_data = '' - try: -- # It is ok for userdata to not exist (thats why we are stopping if -- # NOT_FOUND occurs) and just in that case returning an empty string. -- exception_cb = functools.partial(_skip_retry_on_codes, -- SKIP_USERDATA_CODES) -+ if not exception_cb: -+ # It is ok for userdata to not exist (thats why we are stopping if -+ # NOT_FOUND occurs) and just in that case returning an empty -+ # string. -+ exception_cb = functools.partial(skip_retry_on_codes, -+ SKIP_USERDATA_CODES) - response = url_helper.read_file_or_url( - ud_url, ssl_details=ssl_details, timeout=timeout, -- retries=retries, exception_cb=exception_cb) -+ retries=retries, exception_cb=exception_cb, headers_cb=headers_cb) - user_data = response.contents - except url_helper.UrlError as e: - if e.code not in SKIP_USERDATA_CODES: -@@ -165,11 +168,13 @@ def get_instance_userdata(api_version='latest', - def _get_instance_metadata(tree, api_version='latest', - metadata_address='http://169.254.169.254', - ssl_details=None, timeout=5, retries=5, -- leaf_decoder=None): -+ leaf_decoder=None, headers_cb=None, -+ exception_cb=None): - md_url = url_helper.combine_url(metadata_address, api_version, tree) - caller = functools.partial( - url_helper.read_file_or_url, ssl_details=ssl_details, -- timeout=timeout, retries=retries) -+ timeout=timeout, retries=retries, headers_cb=headers_cb, -+ exception_cb=exception_cb) - - def mcaller(url): - return caller(url).contents -@@ -191,22 +196,28 @@ def _get_instance_metadata(tree, api_version='latest', - def get_instance_metadata(api_version='latest', - metadata_address='http://169.254.169.254', - ssl_details=None, timeout=5, retries=5, -- leaf_decoder=None): -+ leaf_decoder=None, headers_cb=None, -+ exception_cb=None): - # Note, 'meta-data' explicitly has trailing /. - # this is required for CloudStack (LP: #1356855) - return _get_instance_metadata(tree='meta-data/', api_version=api_version, - metadata_address=metadata_address, - ssl_details=ssl_details, timeout=timeout, -- retries=retries, leaf_decoder=leaf_decoder) -+ retries=retries, leaf_decoder=leaf_decoder, -+ headers_cb=headers_cb, -+ exception_cb=exception_cb) - - - def get_instance_identity(api_version='latest', - metadata_address='http://169.254.169.254', - ssl_details=None, timeout=5, retries=5, -- leaf_decoder=None): -+ leaf_decoder=None, headers_cb=None, -+ exception_cb=None): - return _get_instance_metadata(tree='dynamic/instance-identity', - api_version=api_version, - metadata_address=metadata_address, - ssl_details=ssl_details, timeout=timeout, -- retries=retries, leaf_decoder=leaf_decoder) -+ retries=retries, leaf_decoder=leaf_decoder, -+ headers_cb=headers_cb, -+ exception_cb=exception_cb) - # vi: ts=4 expandtab -diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py -index d4b758f..6bd2efe 100644 ---- a/cloudinit/sources/DataSourceCloudStack.py -+++ b/cloudinit/sources/DataSourceCloudStack.py -@@ -93,7 +93,7 @@ class DataSourceCloudStack(sources.DataSource): - urls = [uhelp.combine_url(self.metadata_address, - 'latest/meta-data/instance-id')] - start_time = time.time() -- url = uhelp.wait_for_url( -+ url, _response = uhelp.wait_for_url( - urls=urls, max_wait=url_params.max_wait_seconds, - timeout=url_params.timeout_seconds, status_cb=LOG.warn) - -diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py -index 9ccf2cd..fbe8f3f 100644 ---- a/cloudinit/sources/DataSourceEc2.py -+++ b/cloudinit/sources/DataSourceEc2.py -@@ -27,6 +27,10 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND]) - STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") - STRICT_ID_DEFAULT = "warn" - -+API_TOKEN_ROUTE = 'latest/api/token' -+API_TOKEN_DISABLED = '_ec2_disable_api_token' -+AWS_TOKEN_TTL_SECONDS = '21600' -+ - - class CloudNames(object): - ALIYUN = "aliyun" -@@ -59,6 +63,7 @@ class DataSourceEc2(sources.DataSource): - url_max_wait = 120 - url_timeout = 50 - -+ _api_token = None # API token for accessing the metadata service - _network_config = sources.UNSET # Used to cache calculated network cfg v1 - - # Whether we want to get network configuration from the metadata service. -@@ -132,11 +137,12 @@ class DataSourceEc2(sources.DataSource): - min_metadata_version. - """ - # Assumes metadata service is already up -+ url_tmpl = '{0}/{1}/meta-data/instance-id' -+ headers = self._get_headers() - for api_ver in self.extended_metadata_versions: -- url = '{0}/{1}/meta-data/instance-id'.format( -- self.metadata_address, api_ver) -+ url = url_tmpl.format(self.metadata_address, api_ver) - try: -- resp = uhelp.readurl(url=url) -+ resp = uhelp.readurl(url=url, headers=headers) - except uhelp.UrlError as e: - LOG.debug('url %s raised exception %s', url, e) - else: -@@ -156,12 +162,39 @@ class DataSourceEc2(sources.DataSource): - # setup self.identity. So we need to do that now. - api_version = self.get_metadata_api_version() - self.identity = ec2.get_instance_identity( -- api_version, self.metadata_address).get('document', {}) -+ api_version, self.metadata_address, -+ headers_cb=self._get_headers, -+ exception_cb=self._refresh_stale_aws_token_cb).get( -+ 'document', {}) - return self.identity.get( - 'instanceId', self.metadata['instance-id']) - else: - return self.metadata['instance-id'] - -+ def _maybe_fetch_api_token(self, mdurls, timeout=None, max_wait=None): -+ if self.cloud_name != CloudNames.AWS: -+ return -+ -+ urls = [] -+ url2base = {} -+ url_path = API_TOKEN_ROUTE -+ request_method = 'PUT' -+ for url in mdurls: -+ cur = '{0}/{1}'.format(url, url_path) -+ urls.append(cur) -+ url2base[cur] = url -+ -+ # use the self._status_cb to check for Read errors, which means -+ # we can't reach the API token URL, so we should disable IMDSv2 -+ LOG.debug('Fetching Ec2 IMDSv2 API Token') -+ url, response = uhelp.wait_for_url( -+ urls=urls, max_wait=1, timeout=1, status_cb=self._status_cb, -+ headers_cb=self._get_headers, request_method=request_method) -+ -+ if url and response: -+ self._api_token = response -+ return url2base[url] -+ - def wait_for_metadata_service(self): - mcfg = self.ds_cfg - -@@ -183,27 +216,39 @@ class DataSourceEc2(sources.DataSource): - LOG.warning("Empty metadata url list! using default list") - mdurls = self.metadata_urls - -- urls = [] -- url2base = {} -- for url in mdurls: -- cur = '{0}/{1}/meta-data/instance-id'.format( -- url, self.min_metadata_version) -- urls.append(cur) -- url2base[cur] = url -- -- start_time = time.time() -- url = uhelp.wait_for_url( -- urls=urls, max_wait=url_params.max_wait_seconds, -- timeout=url_params.timeout_seconds, status_cb=LOG.warn) -- -- if url: -- self.metadata_address = url2base[url] -+ # try the api token path first -+ metadata_address = self._maybe_fetch_api_token(mdurls) -+ if not metadata_address: -+ if self._api_token == API_TOKEN_DISABLED: -+ LOG.warning('Retrying with IMDSv1') -+ # if we can't get a token, use instance-id path -+ urls = [] -+ url2base = {} -+ url_path = '{ver}/meta-data/instance-id'.format( -+ ver=self.min_metadata_version) -+ request_method = 'GET' -+ for url in mdurls: -+ cur = '{0}/{1}'.format(url, url_path) -+ urls.append(cur) -+ url2base[cur] = url -+ -+ start_time = time.time() -+ url, _ = uhelp.wait_for_url( -+ urls=urls, max_wait=url_params.max_wait_seconds, -+ timeout=url_params.timeout_seconds, status_cb=LOG.warning, -+ headers_cb=self._get_headers, request_method=request_method) -+ -+ if url: -+ metadata_address = url2base[url] -+ -+ if metadata_address: -+ self.metadata_address = metadata_address - LOG.debug("Using metadata source: '%s'", self.metadata_address) - else: - LOG.critical("Giving up on md from %s after %s seconds", - urls, int(time.time() - start_time)) - -- return bool(url) -+ return bool(metadata_address) - - def device_name_to_device(self, name): - # Consult metadata service, that has -@@ -349,14 +394,22 @@ class DataSourceEc2(sources.DataSource): - return {} - api_version = self.get_metadata_api_version() - crawled_metadata = {} -+ if self.cloud_name == CloudNames.AWS: -+ exc_cb = self._refresh_stale_aws_token_cb -+ exc_cb_ud = self._skip_or_refresh_stale_aws_token_cb -+ else: -+ exc_cb = exc_cb_ud = None - try: - crawled_metadata['user-data'] = ec2.get_instance_userdata( -- api_version, self.metadata_address) -+ api_version, self.metadata_address, -+ headers_cb=self._get_headers, exception_cb=exc_cb_ud) - crawled_metadata['meta-data'] = ec2.get_instance_metadata( -- api_version, self.metadata_address) -+ api_version, self.metadata_address, -+ headers_cb=self._get_headers, exception_cb=exc_cb) - if self.cloud_name == CloudNames.AWS: - identity = ec2.get_instance_identity( -- api_version, self.metadata_address) -+ api_version, self.metadata_address, -+ headers_cb=self._get_headers, exception_cb=exc_cb) - crawled_metadata['dynamic'] = {'instance-identity': identity} - except Exception: - util.logexc( -@@ -366,6 +419,73 @@ class DataSourceEc2(sources.DataSource): - crawled_metadata['_metadata_api_version'] = api_version - return crawled_metadata - -+ def _refresh_api_token(self, seconds=AWS_TOKEN_TTL_SECONDS): -+ """Request new metadata API token. -+ @param seconds: The lifetime of the token in seconds -+ -+ @return: The API token or None if unavailable. -+ """ -+ if self.cloud_name != CloudNames.AWS: -+ return None -+ LOG.debug("Refreshing Ec2 metadata API token") -+ request_header = {'X-aws-ec2-metadata-token-ttl-seconds': seconds} -+ token_url = '{}/{}'.format(self.metadata_address, API_TOKEN_ROUTE) -+ try: -+ response = uhelp.readurl( -+ token_url, headers=request_header, request_method="PUT") -+ except uhelp.UrlError as e: -+ LOG.warning( -+ 'Unable to get API token: %s raised exception %s', -+ token_url, e) -+ return None -+ return response.contents -+ -+ def _skip_or_refresh_stale_aws_token_cb(self, msg, exception): -+ """Callback will not retry on SKIP_USERDATA_CODES or if no token -+ is available.""" -+ retry = ec2.skip_retry_on_codes( -+ ec2.SKIP_USERDATA_CODES, msg, exception) -+ if not retry: -+ return False # False raises exception -+ return self._refresh_stale_aws_token_cb(msg, exception) -+ -+ def _refresh_stale_aws_token_cb(self, msg, exception): -+ """Exception handler for Ec2 to refresh token if token is stale.""" -+ if isinstance(exception, uhelp.UrlError) and exception.code == 401: -+ # With _api_token as None, _get_headers will _refresh_api_token. -+ LOG.debug("Clearing cached Ec2 API token due to expiry") -+ self._api_token = None -+ return True # always retry -+ -+ def _status_cb(self, msg, exc=None): -+ LOG.warning(msg) -+ if 'Read timed out' in msg: -+ LOG.warning('Cannot use Ec2 IMDSv2 API tokens, using IMDSv1') -+ self._api_token = API_TOKEN_DISABLED -+ -+ def _get_headers(self, url=''): -+ """Return a dict of headers for accessing a url. -+ -+ If _api_token is unset on AWS, attempt to refresh the token via a PUT -+ and then return the updated token header. -+ """ -+ if self.cloud_name != CloudNames.AWS or (self._api_token == -+ API_TOKEN_DISABLED): -+ return {} -+ # Request a 6 hour token if URL is API_TOKEN_ROUTE -+ request_token_header = { -+ 'X-aws-ec2-metadata-token-ttl-seconds': AWS_TOKEN_TTL_SECONDS} -+ if API_TOKEN_ROUTE in url: -+ return request_token_header -+ if not self._api_token: -+ # If we don't yet have an API token, get one via a PUT against -+ # API_TOKEN_ROUTE. This _api_token may get unset by a 403 due -+ # to an invalid or expired token -+ self._api_token = self._refresh_api_token() -+ if not self._api_token: -+ return {} -+ return {'X-aws-ec2-metadata-token': self._api_token} -+ - - class DataSourceEc2Local(DataSourceEc2): - """Datasource run at init-local which sets up network to query metadata. -diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py -index 4616daa..d59aefd 100644 ---- a/cloudinit/sources/DataSourceExoscale.py -+++ b/cloudinit/sources/DataSourceExoscale.py -@@ -61,7 +61,7 @@ class DataSourceExoscale(sources.DataSource): - metadata_url = "{}/{}/meta-data/instance-id".format( - self.metadata_url, self.api_version) - -- url = url_helper.wait_for_url( -+ url, _response = url_helper.wait_for_url( - urls=[metadata_url], - max_wait=self.url_max_wait, - timeout=self.url_timeout, -diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py -index 61aa6d7..517913a 100644 ---- a/cloudinit/sources/DataSourceMAAS.py -+++ b/cloudinit/sources/DataSourceMAAS.py -@@ -136,7 +136,7 @@ class DataSourceMAAS(sources.DataSource): - url = url[:-1] - check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION) - urls = [check_url] -- url = self.oauth_helper.wait_for_url( -+ url, _response = self.oauth_helper.wait_for_url( - urls=urls, max_wait=max_wait, timeout=timeout) - - if url: -diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py -index 4a01524..7a5e71b 100644 ---- a/cloudinit/sources/DataSourceOpenStack.py -+++ b/cloudinit/sources/DataSourceOpenStack.py -@@ -76,7 +76,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): - - url_params = self.get_url_params() - start_time = time.time() -- avail_url = url_helper.wait_for_url( -+ avail_url, _response = url_helper.wait_for_url( - urls=md_urls, max_wait=url_params.max_wait_seconds, - timeout=url_params.timeout_seconds) - if avail_url: -diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py -index 1b0721b..a951b8b 100644 ---- a/cloudinit/url_helper.py -+++ b/cloudinit/url_helper.py -@@ -101,7 +101,7 @@ def read_file_or_url(url, timeout=5, retries=10, - raise UrlError(cause=e, code=code, headers=None, url=url) - return FileResponse(file_path, contents=contents) - else: -- return readurl(url, timeout=timeout, retries=retries, headers=headers, -+ return readurl(url, timeout=timeout, retries=retries, - headers_cb=headers_cb, data=data, - sec_between=sec_between, ssl_details=ssl_details, - exception_cb=exception_cb) -@@ -310,7 +310,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, - - def wait_for_url(urls, max_wait=None, timeout=None, - status_cb=None, headers_cb=None, sleep_time=1, -- exception_cb=None, sleep_time_cb=None): -+ exception_cb=None, sleep_time_cb=None, request_method=None): - """ - urls: a list of urls to try - max_wait: roughly the maximum time to wait before giving up -@@ -325,6 +325,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, - 'exception', the exception that occurred. - sleep_time_cb: call method with 2 arguments (response, loop_n) that - generates the next sleep time. -+ request_method: indicate the type of HTTP request, GET, PUT, or POST -+ returns: tuple of (url, response contents), on failure, (False, None) - - the idea of this routine is to wait for the EC2 metdata service to - come up. On both Eucalyptus and EC2 we have seen the case where -@@ -381,8 +383,9 @@ def wait_for_url(urls, max_wait=None, timeout=None, - else: - headers = {} - -- response = readurl(url, headers=headers, timeout=timeout, -- check_status=False) -+ response = readurl( -+ url, headers=headers, timeout=timeout, -+ check_status=False, request_method=request_method) - if not response.contents: - reason = "empty response [%s]" % (response.code) - url_exc = UrlError(ValueError(reason), code=response.code, -@@ -392,7 +395,7 @@ def wait_for_url(urls, max_wait=None, timeout=None, - url_exc = UrlError(ValueError(reason), code=response.code, - headers=response.headers, url=url) - else: -- return url -+ return url, response.contents - except UrlError as e: - reason = "request error [%s]" % e - url_exc = e -@@ -421,7 +424,7 @@ def wait_for_url(urls, max_wait=None, timeout=None, - sleep_time) - time.sleep(sleep_time) - -- return False -+ return False, None - - - class OauthUrlHelper(object): -diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py -index d6d2d6b..83c2f75 100644 ---- a/tests/unittests/test_datasource/test_cloudstack.py -+++ b/tests/unittests/test_datasource/test_cloudstack.py -@@ -10,6 +10,9 @@ from cloudinit.tests.helpers import CiTestCase, ExitStack, mock - import os - import time - -+MOD_PATH = 'cloudinit.sources.DataSourceCloudStack' -+DS_PATH = MOD_PATH + '.DataSourceCloudStack' -+ - - class TestCloudStackPasswordFetching(CiTestCase): - -@@ -17,7 +20,7 @@ class TestCloudStackPasswordFetching(CiTestCase): - super(TestCloudStackPasswordFetching, self).setUp() - self.patches = ExitStack() - self.addCleanup(self.patches.close) -- mod_name = 'cloudinit.sources.DataSourceCloudStack' -+ mod_name = MOD_PATH - self.patches.enter_context(mock.patch('{0}.ec2'.format(mod_name))) - self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name))) - default_gw = "192.201.20.0" -@@ -56,7 +59,9 @@ class TestCloudStackPasswordFetching(CiTestCase): - ds.get_data() - self.assertEqual({}, ds.get_config_obj()) - -- def test_password_sets_password(self): -+ @mock.patch(DS_PATH + '.wait_for_metadata_service') -+ def test_password_sets_password(self, m_wait): -+ m_wait.return_value = True - password = 'SekritSquirrel' - self._set_password_server_response(password) - ds = DataSourceCloudStack( -@@ -64,7 +69,9 @@ class TestCloudStackPasswordFetching(CiTestCase): - ds.get_data() - self.assertEqual(password, ds.get_config_obj()['password']) - -- def test_bad_request_doesnt_stop_ds_from_working(self): -+ @mock.patch(DS_PATH + '.wait_for_metadata_service') -+ def test_bad_request_doesnt_stop_ds_from_working(self, m_wait): -+ m_wait.return_value = True - self._set_password_server_response('bad_request') - ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) -@@ -79,7 +86,9 @@ class TestCloudStackPasswordFetching(CiTestCase): - request_types.append(arg.split()[1]) - self.assertEqual(expected_request_types, request_types) - -- def test_valid_response_means_password_marked_as_saved(self): -+ @mock.patch(DS_PATH + '.wait_for_metadata_service') -+ def test_valid_response_means_password_marked_as_saved(self, m_wait): -+ m_wait.return_value = True - password = 'SekritSquirrel' - subp = self._set_password_server_response(password) - ds = DataSourceCloudStack( -@@ -92,7 +101,9 @@ class TestCloudStackPasswordFetching(CiTestCase): - subp = self._set_password_server_response(response_string) - ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) -- ds.get_data() -+ with mock.patch(DS_PATH + '.wait_for_metadata_service') as m_wait: -+ m_wait.return_value = True -+ ds.get_data() - self.assertRequestTypesSent(subp, ['send_my_password']) - - def test_password_not_saved_if_empty(self): -diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py -index 1a5956d..5c5c787 100644 ---- a/tests/unittests/test_datasource/test_ec2.py -+++ b/tests/unittests/test_datasource/test_ec2.py -@@ -191,7 +191,9 @@ def register_mock_metaserver(base_url, data): - register(base_url, 'not found', status=404) - - def myreg(*argc, **kwargs): -- return httpretty.register_uri(httpretty.GET, *argc, **kwargs) -+ url = argc[0] -+ method = httpretty.PUT if ec2.API_TOKEN_ROUTE in url else httpretty.GET -+ return httpretty.register_uri(method, *argc, **kwargs) - - register_helper(myreg, base_url, data) - -@@ -237,6 +239,8 @@ class TestEc2(test_helpers.HttprettyTestCase): - if md: - all_versions = ( - [ds.min_metadata_version] + ds.extended_metadata_versions) -+ token_url = self.data_url('latest', data_item='api/token') -+ register_mock_metaserver(token_url, 'API-TOKEN') - for version in all_versions: - metadata_url = self.data_url(version) + '/' - if version == md_version: --- -1.8.3.1 - diff --git a/SOURCES/ci-ec2-Do-not-log-IMDSv2-token-values-instead-use-REDAC.patch b/SOURCES/ci-ec2-Do-not-log-IMDSv2-token-values-instead-use-REDAC.patch new file mode 100644 index 0000000..55dfdab --- /dev/null +++ b/SOURCES/ci-ec2-Do-not-log-IMDSv2-token-values-instead-use-REDAC.patch @@ -0,0 +1,350 @@ +From 0bd7f6b5f393a88b45ced71f1645705b651de9f2 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Mon, 11 May 2020 09:24:29 +0200 +Subject: [PATCH 2/2] ec2: Do not log IMDSv2 token values, instead use REDACTED + (#219) + +RH-Author: Eduardo Otubo +Message-id: <20200505082940.18316-1-otubo@redhat.com> +Patchwork-id: 96264 +O-Subject: [RHEL-7.9/RHEL-8.3 cloud-init PATCH] ec2: Do not log IMDSv2 token values, instead use REDACTED (#219) +Bugzilla: 1821999 +RH-Acked-by: Cathy Avery +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Vitaly Kuznetsov + +Note: There's no RHEL-8.3/cloud-init-19.4 branch yet, but it should be +queued to be applied on top of it when it's created. + +commit 87cd040ed8fe7195cbb357ed3bbf53cd2a81436c +Author: Ryan Harper +Date: Wed Feb 19 15:01:09 2020 -0600 + + ec2: Do not log IMDSv2 token values, instead use REDACTED (#219) + + Instead of logging the token values used log the headers and replace the actual + values with the string 'REDACTED'. This allows users to examine cloud-init.log + and see that the IMDSv2 token header is being used but avoids leaving the value + used in the log file itself. + + LP: #1863943 + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/ec2_utils.py | 12 ++++++++-- + cloudinit/sources/DataSourceEc2.py | 35 +++++++++++++++++++---------- + cloudinit/url_helper.py | 27 ++++++++++++++++------ + tests/unittests/test_datasource/test_ec2.py | 17 ++++++++++++++ + 4 files changed, 70 insertions(+), 21 deletions(-) + +diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py +index 57708c1..34acfe8 100644 +--- a/cloudinit/ec2_utils.py ++++ b/cloudinit/ec2_utils.py +@@ -142,7 +142,8 @@ def skip_retry_on_codes(status_codes, _request_args, cause): + def get_instance_userdata(api_version='latest', + metadata_address='http://169.254.169.254', + ssl_details=None, timeout=5, retries=5, +- headers_cb=None, exception_cb=None): ++ headers_cb=None, headers_redact=None, ++ exception_cb=None): + ud_url = url_helper.combine_url(metadata_address, api_version) + ud_url = url_helper.combine_url(ud_url, 'user-data') + user_data = '' +@@ -155,7 +156,8 @@ def get_instance_userdata(api_version='latest', + SKIP_USERDATA_CODES) + response = url_helper.read_file_or_url( + ud_url, ssl_details=ssl_details, timeout=timeout, +- retries=retries, exception_cb=exception_cb, headers_cb=headers_cb) ++ retries=retries, exception_cb=exception_cb, headers_cb=headers_cb, ++ headers_redact=headers_redact) + user_data = response.contents + except url_helper.UrlError as e: + if e.code not in SKIP_USERDATA_CODES: +@@ -169,11 +171,13 @@ def _get_instance_metadata(tree, api_version='latest', + metadata_address='http://169.254.169.254', + ssl_details=None, timeout=5, retries=5, + leaf_decoder=None, headers_cb=None, ++ headers_redact=None, + exception_cb=None): + md_url = url_helper.combine_url(metadata_address, api_version, tree) + caller = functools.partial( + url_helper.read_file_or_url, ssl_details=ssl_details, + timeout=timeout, retries=retries, headers_cb=headers_cb, ++ headers_redact=headers_redact, + exception_cb=exception_cb) + + def mcaller(url): +@@ -197,6 +201,7 @@ def get_instance_metadata(api_version='latest', + metadata_address='http://169.254.169.254', + ssl_details=None, timeout=5, retries=5, + leaf_decoder=None, headers_cb=None, ++ headers_redact=None, + exception_cb=None): + # Note, 'meta-data' explicitly has trailing /. + # this is required for CloudStack (LP: #1356855) +@@ -204,6 +209,7 @@ def get_instance_metadata(api_version='latest', + metadata_address=metadata_address, + ssl_details=ssl_details, timeout=timeout, + retries=retries, leaf_decoder=leaf_decoder, ++ headers_redact=headers_redact, + headers_cb=headers_cb, + exception_cb=exception_cb) + +@@ -212,12 +218,14 @@ def get_instance_identity(api_version='latest', + metadata_address='http://169.254.169.254', + ssl_details=None, timeout=5, retries=5, + leaf_decoder=None, headers_cb=None, ++ headers_redact=None, + exception_cb=None): + return _get_instance_metadata(tree='dynamic/instance-identity', + api_version=api_version, + metadata_address=metadata_address, + ssl_details=ssl_details, timeout=timeout, + retries=retries, leaf_decoder=leaf_decoder, ++ headers_redact=headers_redact, + headers_cb=headers_cb, + exception_cb=exception_cb) + # vi: ts=4 expandtab +diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py +index b9f346a..0f2bfef 100644 +--- a/cloudinit/sources/DataSourceEc2.py ++++ b/cloudinit/sources/DataSourceEc2.py +@@ -31,6 +31,9 @@ STRICT_ID_DEFAULT = "warn" + API_TOKEN_ROUTE = 'latest/api/token' + API_TOKEN_DISABLED = '_ec2_disable_api_token' + AWS_TOKEN_TTL_SECONDS = '21600' ++AWS_TOKEN_PUT_HEADER = 'X-aws-ec2-metadata-token' ++AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + '-ttl-seconds' ++AWS_TOKEN_REDACT = [AWS_TOKEN_PUT_HEADER, AWS_TOKEN_REQ_HEADER] + + + class CloudNames(object): +@@ -158,7 +161,8 @@ class DataSourceEc2(sources.DataSource): + for api_ver in self.extended_metadata_versions: + url = url_tmpl.format(self.metadata_address, api_ver) + try: +- resp = uhelp.readurl(url=url, headers=headers) ++ resp = uhelp.readurl(url=url, headers=headers, ++ headers_redact=AWS_TOKEN_REDACT) + except uhelp.UrlError as e: + LOG.debug('url %s raised exception %s', url, e) + else: +@@ -180,6 +184,7 @@ class DataSourceEc2(sources.DataSource): + self.identity = ec2.get_instance_identity( + api_version, self.metadata_address, + headers_cb=self._get_headers, ++ headers_redact=AWS_TOKEN_REDACT, + exception_cb=self._refresh_stale_aws_token_cb).get( + 'document', {}) + return self.identity.get( +@@ -205,7 +210,8 @@ class DataSourceEc2(sources.DataSource): + LOG.debug('Fetching Ec2 IMDSv2 API Token') + url, response = uhelp.wait_for_url( + urls=urls, max_wait=1, timeout=1, status_cb=self._status_cb, +- headers_cb=self._get_headers, request_method=request_method) ++ headers_cb=self._get_headers, request_method=request_method, ++ headers_redact=AWS_TOKEN_REDACT) + + if url and response: + self._api_token = response +@@ -252,7 +258,8 @@ class DataSourceEc2(sources.DataSource): + url, _ = uhelp.wait_for_url( + urls=urls, max_wait=url_params.max_wait_seconds, + timeout=url_params.timeout_seconds, status_cb=LOG.warning, +- headers_cb=self._get_headers, request_method=request_method) ++ headers_redact=AWS_TOKEN_REDACT, headers_cb=self._get_headers, ++ request_method=request_method) + + if url: + metadata_address = url2base[url] +@@ -420,6 +427,7 @@ class DataSourceEc2(sources.DataSource): + if not self.wait_for_metadata_service(): + return {} + api_version = self.get_metadata_api_version() ++ redact = AWS_TOKEN_REDACT + crawled_metadata = {} + if self.cloud_name == CloudNames.AWS: + exc_cb = self._refresh_stale_aws_token_cb +@@ -429,14 +437,17 @@ class DataSourceEc2(sources.DataSource): + try: + crawled_metadata['user-data'] = ec2.get_instance_userdata( + api_version, self.metadata_address, +- headers_cb=self._get_headers, exception_cb=exc_cb_ud) ++ headers_cb=self._get_headers, headers_redact=redact, ++ exception_cb=exc_cb_ud) + crawled_metadata['meta-data'] = ec2.get_instance_metadata( + api_version, self.metadata_address, +- headers_cb=self._get_headers, exception_cb=exc_cb) ++ headers_cb=self._get_headers, headers_redact=redact, ++ exception_cb=exc_cb) + if self.cloud_name == CloudNames.AWS: + identity = ec2.get_instance_identity( + api_version, self.metadata_address, +- headers_cb=self._get_headers, exception_cb=exc_cb) ++ headers_cb=self._get_headers, headers_redact=redact, ++ exception_cb=exc_cb) + crawled_metadata['dynamic'] = {'instance-identity': identity} + except Exception: + util.logexc( +@@ -455,11 +466,12 @@ class DataSourceEc2(sources.DataSource): + if self.cloud_name != CloudNames.AWS: + return None + LOG.debug("Refreshing Ec2 metadata API token") +- request_header = {'X-aws-ec2-metadata-token-ttl-seconds': seconds} ++ request_header = {AWS_TOKEN_REQ_HEADER: seconds} + token_url = '{}/{}'.format(self.metadata_address, API_TOKEN_ROUTE) + try: +- response = uhelp.readurl( +- token_url, headers=request_header, request_method="PUT") ++ response = uhelp.readurl(token_url, headers=request_header, ++ headers_redact=AWS_TOKEN_REDACT, ++ request_method="PUT") + except uhelp.UrlError as e: + LOG.warning( + 'Unable to get API token: %s raised exception %s', +@@ -500,8 +512,7 @@ class DataSourceEc2(sources.DataSource): + API_TOKEN_DISABLED): + return {} + # Request a 6 hour token if URL is API_TOKEN_ROUTE +- request_token_header = { +- 'X-aws-ec2-metadata-token-ttl-seconds': AWS_TOKEN_TTL_SECONDS} ++ request_token_header = {AWS_TOKEN_REQ_HEADER: AWS_TOKEN_TTL_SECONDS} + if API_TOKEN_ROUTE in url: + return request_token_header + if not self._api_token: +@@ -511,7 +522,7 @@ class DataSourceEc2(sources.DataSource): + self._api_token = self._refresh_api_token() + if not self._api_token: + return {} +- return {'X-aws-ec2-metadata-token': self._api_token} ++ return {AWS_TOKEN_PUT_HEADER: self._api_token} + + + class DataSourceEc2Local(DataSourceEc2): +diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py +index 1496a47..3e7de9f 100644 +--- a/cloudinit/url_helper.py ++++ b/cloudinit/url_helper.py +@@ -8,6 +8,7 @@ + # + # This file is part of cloud-init. See LICENSE file for license information. + ++import copy + import json + import os + import requests +@@ -41,6 +42,7 @@ else: + SSL_ENABLED = False + CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0) + _REQ_VER = None ++REDACTED = 'REDACTED' + try: + from distutils.version import LooseVersion + import pkg_resources +@@ -199,9 +201,9 @@ def _get_ssl_args(url, ssl_details): + + + def readurl(url, data=None, timeout=None, retries=0, sec_between=1, +- headers=None, headers_cb=None, ssl_details=None, +- check_status=True, allow_redirects=True, exception_cb=None, +- session=None, infinite=False, log_req_resp=True, ++ headers=None, headers_cb=None, headers_redact=None, ++ ssl_details=None, check_status=True, allow_redirects=True, ++ exception_cb=None, session=None, infinite=False, log_req_resp=True, + request_method=None): + """Wrapper around requests.Session to read the url and retry if necessary + +@@ -217,6 +219,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, + :param headers: Optional dict of headers to send during request + :param headers_cb: Optional callable returning a dict of values to send as + headers during request ++ :param headers_redact: Optional list of header names to redact from the log + :param ssl_details: Optional dict providing key_file, ca_certs, and + cert_file keys for use on in ssl connections. + :param check_status: Optional boolean set True to raise when HTTPError +@@ -243,6 +246,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, + req_args['method'] = request_method + if timeout is not None: + req_args['timeout'] = max(float(timeout), 0) ++ if headers_redact is None: ++ headers_redact = [] + # It doesn't seem like config + # was added in older library versions (or newer ones either), thus we + # need to manually do the retries if it wasn't... +@@ -287,6 +292,12 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, + if k == 'data': + continue + filtered_req_args[k] = v ++ if k == 'headers': ++ for hkey, _hval in v.items(): ++ if hkey in headers_redact: ++ filtered_req_args[k][hkey] = ( ++ copy.deepcopy(req_args[k][hkey])) ++ filtered_req_args[k][hkey] = REDACTED + try: + + if log_req_resp: +@@ -339,8 +350,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, + return None # Should throw before this... + + +-def wait_for_url(urls, max_wait=None, timeout=None, +- status_cb=None, headers_cb=None, sleep_time=1, ++def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None, ++ headers_cb=None, headers_redact=None, sleep_time=1, + exception_cb=None, sleep_time_cb=None, request_method=None): + """ + urls: a list of urls to try +@@ -352,6 +363,7 @@ def wait_for_url(urls, max_wait=None, timeout=None, + status_cb: call method with string message when a url is not available + headers_cb: call method with single argument of url to get headers + for request. ++ headers_redact: a list of header names to redact from the log + exception_cb: call method with 2 arguments 'msg' (per status_cb) and + 'exception', the exception that occurred. + sleep_time_cb: call method with 2 arguments (response, loop_n) that +@@ -415,8 +427,9 @@ def wait_for_url(urls, max_wait=None, timeout=None, + headers = {} + + response = readurl( +- url, headers=headers, timeout=timeout, +- check_status=False, request_method=request_method) ++ url, headers=headers, headers_redact=headers_redact, ++ timeout=timeout, check_status=False, ++ request_method=request_method) + if not response.contents: + reason = "empty response [%s]" % (response.code) + url_exc = UrlError(ValueError(reason), code=response.code, +diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py +index 34a089f..bd5bd4c 100644 +--- a/tests/unittests/test_datasource/test_ec2.py ++++ b/tests/unittests/test_datasource/test_ec2.py +@@ -429,6 +429,23 @@ class TestEc2(test_helpers.HttprettyTestCase): + self.assertTrue(ds.get_data()) + self.assertFalse(ds.is_classic_instance()) + ++ def test_aws_token_redacted(self): ++ """Verify that aws tokens are redacted when logged.""" ++ ds = self._setup_ds( ++ platform_data=self.valid_platform_data, ++ sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, ++ md={'md': DEFAULT_METADATA}) ++ self.assertTrue(ds.get_data()) ++ all_logs = self.logs.getvalue().splitlines() ++ REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'" ++ REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'" ++ logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log] ++ logs_with_redacted = [log for log in all_logs if REDACT_TOK in log] ++ logs_with_token = [log for log in all_logs if 'API-TOKEN' in log] ++ self.assertEqual(1, len(logs_with_redacted_ttl)) ++ self.assertEqual(79, len(logs_with_redacted)) ++ self.assertEqual(0, len(logs_with_token)) ++ + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_valid_platform_with_strict_true(self, m_dhcp): + """Valid platform data should return true with strict_id true.""" +-- +1.8.3.1 + diff --git a/SOURCES/ci-ec2-only-redact-token-request-headers-in-logs-avoid-.patch b/SOURCES/ci-ec2-only-redact-token-request-headers-in-logs-avoid-.patch new file mode 100644 index 0000000..9064f70 --- /dev/null +++ b/SOURCES/ci-ec2-only-redact-token-request-headers-in-logs-avoid-.patch @@ -0,0 +1,126 @@ +From 2a79d9ca066648feaa29e16e0ab6c2607907352e Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Wed, 20 May 2020 12:44:07 +0200 +Subject: [PATCH] ec2: only redact token request headers in logs, avoid + altering request (#230) + +RH-Author: Eduardo Otubo +Message-id: <20200519110500.21088-1-otubo@redhat.com> +Patchwork-id: 96614 +O-Subject: [RHEL-7.9 cloud-init PATCH] ec2: only redact token request headers in logs, avoid altering request (#230) +Bugzilla: 1821999 +RH-Acked-by: Cathy Avery +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Vitaly Kuznetsov + +commit fa1abfec27050a4fb71cad950a17e42f9b43b478 +Author: Chad Smith +Date: Tue Mar 3 15:23:33 2020 -0700 + + ec2: only redact token request headers in logs, avoid altering request (#230) + + Our header redact logic was redacting both logged request headers and + the actual source request. This results in DataSourceEc2 sending the + invalid header "X-aws-ec2-metadata-token-ttl-seconds: REDACTED" which + gets an HTTP status response of 400. + + Cloud-init retries this failed token request for 2 minutes before + falling back to IMDSv1. + + LP: #1865882 + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/tests/test_url_helper.py | 34 +++++++++++++++++++++++++++++++++- + cloudinit/url_helper.py | 15 ++++++++------- + 2 files changed, 41 insertions(+), 8 deletions(-) + +diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py +index 1674120..29b3937 100644 +--- a/cloudinit/tests/test_url_helper.py ++++ b/cloudinit/tests/test_url_helper.py +@@ -1,7 +1,8 @@ + # This file is part of cloud-init. See LICENSE file for license information. + + from cloudinit.url_helper import ( +- NOT_FOUND, UrlError, oauth_headers, read_file_or_url, retry_on_url_exc) ++ NOT_FOUND, UrlError, REDACTED, oauth_headers, read_file_or_url, ++ retry_on_url_exc) + from cloudinit.tests.helpers import CiTestCase, mock, skipIf + from cloudinit import util + from cloudinit import version +@@ -50,6 +51,9 @@ class TestOAuthHeaders(CiTestCase): + + + class TestReadFileOrUrl(CiTestCase): ++ ++ with_logs = True ++ + def test_read_file_or_url_str_from_file(self): + """Test that str(result.contents) on file is text version of contents. + It should not be "b'data'", but just "'data'" """ +@@ -71,6 +75,34 @@ class TestReadFileOrUrl(CiTestCase): + self.assertEqual(result.contents, data) + self.assertEqual(str(result), data.decode('utf-8')) + ++ @httpretty.activate ++ def test_read_file_or_url_str_from_url_redacting_headers_from_logs(self): ++ """Headers are redacted from logs but unredacted in requests.""" ++ url = 'http://hostname/path' ++ headers = {'sensitive': 'sekret', 'server': 'blah'} ++ httpretty.register_uri(httpretty.GET, url) ++ ++ read_file_or_url(url, headers=headers, headers_redact=['sensitive']) ++ logs = self.logs.getvalue() ++ for k in headers.keys(): ++ self.assertEqual(headers[k], httpretty.last_request().headers[k]) ++ self.assertIn(REDACTED, logs) ++ self.assertNotIn('sekret', logs) ++ ++ @httpretty.activate ++ def test_read_file_or_url_str_from_url_redacts_noheaders(self): ++ """When no headers_redact, header values are in logs and requests.""" ++ url = 'http://hostname/path' ++ headers = {'sensitive': 'sekret', 'server': 'blah'} ++ httpretty.register_uri(httpretty.GET, url) ++ ++ read_file_or_url(url, headers=headers) ++ for k in headers.keys(): ++ self.assertEqual(headers[k], httpretty.last_request().headers[k]) ++ logs = self.logs.getvalue() ++ self.assertNotIn(REDACTED, logs) ++ self.assertIn('sekret', logs) ++ + @mock.patch(M_PATH + 'readurl') + def test_read_file_or_url_passes_params_to_readurl(self, m_readurl): + """read_file_or_url passes all params through to readurl.""" +diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py +index 3e7de9f..e6188ea 100644 +--- a/cloudinit/url_helper.py ++++ b/cloudinit/url_helper.py +@@ -291,13 +291,14 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, + for (k, v) in req_args.items(): + if k == 'data': + continue +- filtered_req_args[k] = v +- if k == 'headers': +- for hkey, _hval in v.items(): +- if hkey in headers_redact: +- filtered_req_args[k][hkey] = ( +- copy.deepcopy(req_args[k][hkey])) +- filtered_req_args[k][hkey] = REDACTED ++ if k == 'headers' and headers_redact: ++ matched_headers = [k for k in headers_redact if v.get(k)] ++ if matched_headers: ++ filtered_req_args[k] = copy.deepcopy(v) ++ for key in matched_headers: ++ filtered_req_args[k][key] = REDACTED ++ else: ++ filtered_req_args[k] = v + try: + + if log_req_resp: +-- +1.8.3.1 + diff --git a/SOURCES/ci-exoscale-Increase-url_max_wait-to-120s.patch b/SOURCES/ci-exoscale-Increase-url_max_wait-to-120s.patch deleted file mode 100644 index f22ad0e..0000000 --- a/SOURCES/ci-exoscale-Increase-url_max_wait-to-120s.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 4e539790e57452b24aa6851452201c0f2a87c464 Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Tue, 5 May 2020 08:08:21 +0200 -Subject: [PATCH 4/5] exoscale: Increase url_max_wait to 120s. - -RH-Author: Eduardo Otubo -Message-id: <20200504085238.25884-5-otubo@redhat.com> -Patchwork-id: 96247 -O-Subject: [RHEL-7.8.z cloud-init PATCH 4/5] exoscale: Increase url_max_wait to 120s. -Bugzilla: 1827207 -RH-Acked-by: Cathy Avery -RH-Acked-by: Mohammed Gamal -RH-Acked-by: Vitaly Kuznetsov - -commit 3e2482e8aa6630ca9bc115dc1f82d44d3fde1681 -Author: Chris Glass -Date: Thu Oct 24 17:32:58 2019 +0000 - - exoscale: Increase url_max_wait to 120s. - - The exoscale datasource defines a shorter timeout than the default (10) - but did not override url_max_wait, resulting in a single attempt being - made to wait for the metadata service. - - In some rare cases, a race condition means the route to the metadata - service is not set within 10 seconds, and more attempts should be made. - - This sets the url_max_wait for the datasource to 120. - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - cloudinit/sources/DataSourceExoscale.py | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py -index fdfb4ed..4616daa 100644 ---- a/cloudinit/sources/DataSourceExoscale.py -+++ b/cloudinit/sources/DataSourceExoscale.py -@@ -26,6 +26,8 @@ class DataSourceExoscale(sources.DataSource): - - dsname = 'Exoscale' - -+ url_max_wait = 120 -+ - def __init__(self, sys_cfg, distro, paths): - super(DataSourceExoscale, self).__init__(sys_cfg, distro, paths) - LOG.debug("Initializing the Exoscale datasource") --- -1.8.3.1 - diff --git a/SOURCES/ci-exoscale-fix-sysconfig-cloud_config_modules-override.patch b/SOURCES/ci-exoscale-fix-sysconfig-cloud_config_modules-override.patch deleted file mode 100644 index f7d9adc..0000000 --- a/SOURCES/ci-exoscale-fix-sysconfig-cloud_config_modules-override.patch +++ /dev/null @@ -1,152 +0,0 @@ -From bbe1338c356cb5bbc1196b7f4ba620f95d2b5fd1 Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Tue, 5 May 2020 08:08:18 +0200 -Subject: [PATCH 3/5] exoscale: fix sysconfig cloud_config_modules overrides - -RH-Author: Eduardo Otubo -Message-id: <20200504085238.25884-4-otubo@redhat.com> -Patchwork-id: 96246 -O-Subject: [RHEL-7.8.z cloud-init PATCH 3/5] exoscale: fix sysconfig cloud_config_modules overrides -Bugzilla: 1827207 -RH-Acked-by: Cathy Avery -RH-Acked-by: Mohammed Gamal -RH-Acked-by: Vitaly Kuznetsov - -commit d1b022217a652c7a84d5430c9e571987864d3982 -Author: Chad Smith -Date: Wed Aug 28 00:58:16 2019 +0000 - - exoscale: fix sysconfig cloud_config_modules overrides - - Make sure Exoscale supplements or overrides existing system config - setting cloud_config_modules instead of replacing it with a one item - list set-passords - - LP: #1841454 - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - cloudinit/sources/DataSourceExoscale.py | 26 ++++++++++++++++-------- - tests/unittests/test_datasource/test_exoscale.py | 24 ++++++++++++++-------- - 2 files changed, 33 insertions(+), 17 deletions(-) - -diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py -index 52e7f6f..fdfb4ed 100644 ---- a/cloudinit/sources/DataSourceExoscale.py -+++ b/cloudinit/sources/DataSourceExoscale.py -@@ -6,6 +6,7 @@ - from cloudinit import ec2_utils as ec2 - from cloudinit import log as logging - from cloudinit import sources -+from cloudinit import helpers - from cloudinit import url_helper - from cloudinit import util - -@@ -20,13 +21,6 @@ URL_RETRIES = 6 - - EXOSCALE_DMI_NAME = "Exoscale" - --BUILTIN_DS_CONFIG = { -- # We run the set password config module on every boot in order to enable -- # resetting the instance's password via the exoscale console (and a -- # subsequent instance reboot). -- 'cloud_config_modules': [["set-passwords", "always"]] --} -- - - class DataSourceExoscale(sources.DataSource): - -@@ -42,8 +36,22 @@ class DataSourceExoscale(sources.DataSource): - self.ds_cfg.get('password_server_port', PASSWORD_SERVER_PORT)) - self.url_timeout = self.ds_cfg.get('timeout', URL_TIMEOUT) - self.url_retries = self.ds_cfg.get('retries', URL_RETRIES) -- -- self.extra_config = BUILTIN_DS_CONFIG -+ self.extra_config = {} -+ -+ def activate(self, cfg, is_new_instance): -+ """Adjust set-passwords module to run 'always' during each boot""" -+ # We run the set password config module on every boot in order to -+ # enable resetting the instance's password via the exoscale console -+ # (and a subsequent instance reboot). -+ # Exoscale password server only provides set-passwords user-data if -+ # a user has triggered a password reset. So calling that password -+ # service generally results in no additional cloud-config. -+ # TODO(Create util functions for overriding merged sys_cfg module freq) -+ mod = 'set_passwords' -+ sem_path = self.paths.get_ipath_cur('sem') -+ sem_helper = helpers.FileSemaphores(sem_path) -+ if sem_helper.clear('config_' + mod, None): -+ LOG.debug('Overriding module set-passwords with frequency always') - - def wait_for_metadata_service(self): - """Wait for the metadata service to be reachable.""" -diff --git a/tests/unittests/test_datasource/test_exoscale.py b/tests/unittests/test_datasource/test_exoscale.py -index 350c330..f006119 100644 ---- a/tests/unittests/test_datasource/test_exoscale.py -+++ b/tests/unittests/test_datasource/test_exoscale.py -@@ -11,8 +11,10 @@ from cloudinit.sources.DataSourceExoscale import ( - PASSWORD_SERVER_PORT, - read_metadata) - from cloudinit.tests.helpers import HttprettyTestCase, mock -+from cloudinit import util - - import httpretty -+import os - import requests - - -@@ -63,6 +65,18 @@ class TestDatasourceExoscale(HttprettyTestCase): - password = get_password() - self.assertEqual(expected_password, password) - -+ def test_activate_removes_set_passwords_semaphore(self): -+ """Allow set_passwords to run every boot by removing the semaphore.""" -+ path = helpers.Paths({'cloud_dir': self.tmp}) -+ sem_dir = self.tmp_path('instance/sem', dir=self.tmp) -+ util.ensure_dir(sem_dir) -+ sem_file = os.path.join(sem_dir, 'config_set_passwords') -+ with open(sem_file, 'w') as stream: -+ stream.write('') -+ ds = DataSourceExoscale({}, None, path) -+ ds.activate(None, None) -+ self.assertFalse(os.path.exists(sem_file)) -+ - def test_get_data(self): - """The datasource conforms to expected behavior when supplied - full test data.""" -@@ -95,8 +109,6 @@ class TestDatasourceExoscale(HttprettyTestCase): - self.assertEqual(ds.get_config_obj(), - {'ssh_pwauth': True, - 'password': expected_password, -- 'cloud_config_modules': [ -- ["set-passwords", "always"]], - 'chpasswd': { - 'expire': False, - }}) -@@ -130,9 +142,7 @@ class TestDatasourceExoscale(HttprettyTestCase): - self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") - self.assertEqual(ds.metadata, {"instance-id": expected_id, - "local-hostname": expected_hostname}) -- self.assertEqual(ds.get_config_obj(), -- {'cloud_config_modules': [ -- ["set-passwords", "always"]]}) -+ self.assertEqual(ds.get_config_obj(), {}) - - def test_get_data_no_password(self): - """The datasource conforms to expected behavior when no password is -@@ -163,9 +173,7 @@ class TestDatasourceExoscale(HttprettyTestCase): - self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") - self.assertEqual(ds.metadata, {"instance-id": expected_id, - "local-hostname": expected_hostname}) -- self.assertEqual(ds.get_config_obj(), -- {'cloud_config_modules': [ -- ["set-passwords", "always"]]}) -+ self.assertEqual(ds.get_config_obj(), {}) - - @mock.patch('cloudinit.sources.DataSourceExoscale.get_password') - def test_read_metadata_when_password_server_unreachable(self, m_password): --- -1.8.3.1 - diff --git a/SOURCES/ci-swap-file-size-being-used-before-checked-if-str-315.patch b/SOURCES/ci-swap-file-size-being-used-before-checked-if-str-315.patch index b55ac63..530225d 100644 --- a/SOURCES/ci-swap-file-size-being-used-before-checked-if-str-315.patch +++ b/SOURCES/ci-swap-file-size-being-used-before-checked-if-str-315.patch @@ -1,21 +1,21 @@ -From 6f8623c570247903e3cba925676677e44a99c69c Mon Sep 17 00:00:00 2001 +From cda350cffa9f04e0ba4fb787217c715a7c7fb777 Mon Sep 17 00:00:00 2001 From: Eduardo Otubo -Date: Tue, 28 Apr 2020 08:22:05 +0200 -Subject: [PATCH 2/3] swap file "size" being used before checked if str (#315) +Date: Wed, 15 Apr 2020 09:40:11 +0200 +Subject: [PATCH] swap file "size" being used before checked if str (#315) RH-Author: Eduardo Otubo -Message-id: <20200422130428.7663-3-otubo@redhat.com> -Patchwork-id: 96033 -O-Subject: [RHEL-7.7.z/RHEL-7.8.z cloud-init PATCH 2/3] swap file "size" being used before checked if str (#315) -Bugzilla: 1801094 +Message-id: <20200414163333.5424-1-otubo@redhat.com> +Patchwork-id: 94678 +O-Subject: [RHEL-7.9 cloud-init PATCH] swap file "size" being used before checked if str (#315) +Bugzilla: 1772505 +RH-Acked-by: Miroslav Rezanina RH-Acked-by: Vitaly Kuznetsov -RH-Acked-by: Cathy Avery commit 46cf23c28812d3e3ba0c570defd9a05628af5556 Author: Eduardo Otubo -Date: Tue Apr 14 18:16:25 2020 +0200 +Date: Tue Apr 14 17:45:14 2020 +0200 - swap file "size" being used before checked if str (#315) + swap file "size" being used before checked if str Swap file size variable was being used before checked if it's set to str "auto". If set to "auto", it will break with: @@ -23,7 +23,6 @@ Date: Tue Apr 14 18:16:25 2020 +0200 failed to setup swap: unsupported operand type(s) for /: 'str' and 'int' Signed-off-by: Eduardo Otubo - RHBZ: 1772505 Signed-off-by: Eduardo Otubo Signed-off-by: Miroslav Rezanina @@ -32,7 +31,7 @@ Signed-off-by: Miroslav Rezanina 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py -index 6884ddf..811781f 100644 +index 4293844..0573026 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -274,7 +274,6 @@ def setup_swapfile(fname, size=None, maxsize=None): diff --git a/SOURCES/ci-url_helper-read_file_or_url-should-pass-headers-para.patch b/SOURCES/ci-url_helper-read_file_or_url-should-pass-headers-para.patch deleted file mode 100644 index 4f08062..0000000 --- a/SOURCES/ci-url_helper-read_file_or_url-should-pass-headers-para.patch +++ /dev/null @@ -1,326 +0,0 @@ -From f9fcf18105845fbb933925ae7b0a2f1033f75127 Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Wed, 20 May 2020 10:11:14 +0200 -Subject: [PATCH] url_helper: read_file_or_url should pass headers param into - readurl (#66) - -RH-Author: Eduardo Otubo -Message-id: <20200519105653.20249-1-otubo@redhat.com> -Patchwork-id: 96613 -O-Subject: [RHEL-7.8.z cloud-init PATCH] url_helper: read_file_or_url should pass headers param into readurl (#66) -Bugzilla: 1832177 -RH-Acked-by: Cathy Avery -RH-Acked-by: Mohammed Gamal -RH-Acked-by: Vitaly Kuznetsov - -commit f69d33a723b805fec3ee70c3a6127c8cadcb02d8 -Author: Chad Smith -Date: Mon Dec 2 16:24:18 2019 -0700 - - url_helper: read_file_or_url should pass headers param into readurl (#66) - - Headers param was accidentally omitted and no longer passed through to - readurl due to a previous commit. - - To avoid this omission of params in the future, drop positional param - definitions from read_file_or_url and pass all kwargs through to readurl - when we are not operating on a file. - - In util:read_seeded, correct the case where invalid positional param - file_retries was being passed into read_file_or_url. - - Also drop duplicated file:// prefix addition from read_seeded because - read_file_or_url does that work anyway. - - LP: #1854084 - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - cloudinit/sources/helpers/azure.py | 6 ++- - cloudinit/tests/test_url_helper.py | 52 ++++++++++++++++++++++ - cloudinit/url_helper.py | 47 +++++++++++++++---- - cloudinit/user_data.py | 2 +- - cloudinit/util.py | 15 ++----- - .../unittests/test_datasource/test_azure_helper.py | 18 +++++--- - 6 files changed, 112 insertions(+), 28 deletions(-) - -diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py -index c2a57cc..b99c484 100755 ---- a/cloudinit/sources/helpers/azure.py -+++ b/cloudinit/sources/helpers/azure.py -@@ -103,14 +103,16 @@ class AzureEndpointHttpClient(object): - if secure: - headers = self.headers.copy() - headers.update(self.extra_secure_headers) -- return url_helper.read_file_or_url(url, headers=headers) -+ return url_helper.read_file_or_url(url, headers=headers, timeout=5, -+ retries=10) - - def post(self, url, data=None, extra_headers=None): - headers = self.headers - if extra_headers is not None: - headers = self.headers.copy() - headers.update(extra_headers) -- return url_helper.read_file_or_url(url, data=data, headers=headers) -+ return url_helper.read_file_or_url(url, data=data, headers=headers, -+ timeout=5, retries=10) - - - class GoalState(object): -diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py -index aa9f3ec..e883ddc 100644 ---- a/cloudinit/tests/test_url_helper.py -+++ b/cloudinit/tests/test_url_helper.py -@@ -4,6 +4,7 @@ from cloudinit.url_helper import ( - NOT_FOUND, UrlError, oauth_headers, read_file_or_url, retry_on_url_exc) - from cloudinit.tests.helpers import CiTestCase, mock, skipIf - from cloudinit import util -+from cloudinit import version - - import httpretty - import requests -@@ -17,6 +18,9 @@ except ImportError: - _missing_oauthlib_dep = True - - -+M_PATH = 'cloudinit.url_helper.' -+ -+ - class TestOAuthHeaders(CiTestCase): - - def test_oauth_headers_raises_not_implemented_when_oathlib_missing(self): -@@ -67,6 +71,54 @@ class TestReadFileOrUrl(CiTestCase): - self.assertEqual(result.contents, data) - self.assertEqual(str(result), data.decode('utf-8')) - -+ @mock.patch(M_PATH + 'readurl') -+ def test_read_file_or_url_passes_params_to_readurl(self, m_readurl): -+ """read_file_or_url passes all params through to readurl.""" -+ url = 'http://hostname/path' -+ response = 'This is my url content\n' -+ m_readurl.return_value = response -+ params = {'url': url, 'timeout': 1, 'retries': 2, -+ 'headers': {'somehdr': 'val'}, -+ 'data': 'data', 'sec_between': 1, -+ 'ssl_details': {'cert_file': '/path/cert.pem'}, -+ 'headers_cb': 'headers_cb', 'exception_cb': 'exception_cb'} -+ self.assertEqual(response, read_file_or_url(**params)) -+ params.pop('url') # url is passed in as a positional arg -+ self.assertEqual([mock.call(url, **params)], m_readurl.call_args_list) -+ -+ def test_wb_read_url_defaults_honored_by_read_file_or_url_callers(self): -+ """Readurl param defaults used when unspecified by read_file_or_url -+ -+ Param defaults tested are as follows: -+ retries: 0, additional headers None beyond default, method: GET, -+ data: None, check_status: True and allow_redirects: True -+ """ -+ url = 'http://hostname/path' -+ -+ m_response = mock.MagicMock() -+ -+ class FakeSession(requests.Session): -+ def request(cls, **kwargs): -+ self.assertEqual( -+ {'url': url, 'allow_redirects': True, 'method': 'GET', -+ 'headers': { -+ 'User-Agent': 'Cloud-Init/%s' % ( -+ version.version_string())}}, -+ kwargs) -+ return m_response -+ -+ with mock.patch(M_PATH + 'requests.Session') as m_session: -+ error = requests.exceptions.HTTPError('broke') -+ m_session.side_effect = [error, FakeSession()] -+ # assert no retries and check_status == True -+ with self.assertRaises(UrlError) as context_manager: -+ response = read_file_or_url(url) -+ self.assertEqual('broke', str(context_manager.exception)) -+ # assert default headers, method, url and allow_redirects True -+ # Success on 2nd call with FakeSession -+ response = read_file_or_url(url) -+ self.assertEqual(m_response, response._response) -+ - - class TestRetryOnUrlExc(CiTestCase): - -diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py -index a951b8b..beb6873 100644 ---- a/cloudinit/url_helper.py -+++ b/cloudinit/url_helper.py -@@ -81,14 +81,19 @@ def combine_url(base, *add_ons): - return url - - --def read_file_or_url(url, timeout=5, retries=10, -- headers=None, data=None, sec_between=1, ssl_details=None, -- headers_cb=None, exception_cb=None): -+def read_file_or_url(url, **kwargs): -+ """Wrapper function around readurl to allow passing a file path as url. -+ -+ When url is not a local file path, passthrough any kwargs to readurl. -+ -+ In the case of parameter passthrough to readurl, default values for some -+ parameters. See: call-signature of readurl in this module for param docs. -+ """ - url = url.lstrip() - if url.startswith("/"): - url = "file://%s" % url - if url.lower().startswith("file://"): -- if data: -+ if kwargs.get("data"): - LOG.warning("Unable to post data to file resource %s", url) - file_path = url[len("file://"):] - try: -@@ -101,10 +106,7 @@ def read_file_or_url(url, timeout=5, retries=10, - raise UrlError(cause=e, code=code, headers=None, url=url) - return FileResponse(file_path, contents=contents) - else: -- return readurl(url, timeout=timeout, retries=retries, -- headers_cb=headers_cb, data=data, -- sec_between=sec_between, ssl_details=ssl_details, -- exception_cb=exception_cb) -+ return readurl(url, **kwargs) - - - # Made to have same accessors as UrlResponse so that the -@@ -201,6 +203,35 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, - check_status=True, allow_redirects=True, exception_cb=None, - session=None, infinite=False, log_req_resp=True, - request_method=None): -+ """Wrapper around requests.Session to read the url and retry if necessary -+ -+ :param url: Mandatory url to request. -+ :param data: Optional form data to post the URL. Will set request_method -+ to 'POST' if present. -+ :param timeout: Timeout in seconds to wait for a response -+ :param retries: Number of times to retry on exception if exception_cb is -+ None or exception_cb returns True for the exception caught. Default is -+ to fail with 0 retries on exception. -+ :param sec_between: Default 1: amount of seconds passed to time.sleep -+ between retries. None or -1 means don't sleep. -+ :param headers: Optional dict of headers to send during request -+ :param headers_cb: Optional callable returning a dict of values to send as -+ headers during request -+ :param ssl_details: Optional dict providing key_file, ca_certs, and -+ cert_file keys for use on in ssl connections. -+ :param check_status: Optional boolean set True to raise when HTTPError -+ occurs. Default: True. -+ :param allow_redirects: Optional boolean passed straight to Session.request -+ as 'allow_redirects'. Default: True. -+ :param exception_cb: Optional callable which accepts the params -+ msg and exception and returns a boolean True if retries are permitted. -+ :param session: Optional exiting requests.Session instance to reuse. -+ :param infinite: Bool, set True to retry indefinitely. Default: False. -+ :param log_req_resp: Set False to turn off verbose debug messages. -+ :param request_method: String passed as 'method' to Session.request. -+ Typically GET, or POST. Default: POST if data is provided, GET -+ otherwise. -+ """ - url = _cleanurl(url) - req_args = { - 'url': url, -diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py -index ed83d2d..15af1da 100644 ---- a/cloudinit/user_data.py -+++ b/cloudinit/user_data.py -@@ -224,7 +224,7 @@ class UserDataProcessor(object): - content = util.load_file(include_once_fn) - else: - try: -- resp = read_file_or_url(include_url, -+ resp = read_file_or_url(include_url, timeout=5, retries=10, - ssl_details=self.ssl_details) - if include_once_on and resp.ok(): - util.write_file(include_once_fn, resp.contents, -diff --git a/cloudinit/util.py b/cloudinit/util.py -index 2c9ac66..db9a229 100644 ---- a/cloudinit/util.py -+++ b/cloudinit/util.py -@@ -966,13 +966,6 @@ def load_yaml(blob, default=None, allowed=(dict,)): - - - def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): -- if base.startswith("/"): -- base = "file://%s" % base -- -- # default retries for file is 0. for network is 10 -- if base.startswith("file://"): -- retries = file_retries -- - if base.find("%s") >= 0: - ud_url = base % ("user-data" + ext) - md_url = base % ("meta-data" + ext) -@@ -980,14 +973,14 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): - ud_url = "%s%s%s" % (base, "user-data", ext) - md_url = "%s%s%s" % (base, "meta-data", ext) - -- md_resp = url_helper.read_file_or_url(md_url, timeout, retries, -- file_retries) -+ md_resp = url_helper.read_file_or_url(md_url, timeout=timeout, -+ retries=retries) - md = None - if md_resp.ok(): - md = load_yaml(decode_binary(md_resp.contents), default={}) - -- ud_resp = url_helper.read_file_or_url(ud_url, timeout, retries, -- file_retries) -+ ud_resp = url_helper.read_file_or_url(ud_url, timeout=timeout, -+ retries=retries) - ud = None - if ud_resp.ok(): - ud = ud_resp.contents -diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py -index 7ad5cc1..007df09 100644 ---- a/tests/unittests/test_datasource/test_azure_helper.py -+++ b/tests/unittests/test_datasource/test_azure_helper.py -@@ -231,8 +231,10 @@ class TestAzureEndpointHttpClient(CiTestCase): - response = client.get(url, secure=False) - self.assertEqual(1, self.read_file_or_url.call_count) - self.assertEqual(self.read_file_or_url.return_value, response) -- self.assertEqual(mock.call(url, headers=self.regular_headers), -- self.read_file_or_url.call_args) -+ self.assertEqual( -+ mock.call(url, headers=self.regular_headers, retries=10, -+ timeout=5), -+ self.read_file_or_url.call_args) - - def test_secure_get(self): - url = 'MyTestUrl' -@@ -246,8 +248,10 @@ class TestAzureEndpointHttpClient(CiTestCase): - response = client.get(url, secure=True) - self.assertEqual(1, self.read_file_or_url.call_count) - self.assertEqual(self.read_file_or_url.return_value, response) -- self.assertEqual(mock.call(url, headers=expected_headers), -- self.read_file_or_url.call_args) -+ self.assertEqual( -+ mock.call(url, headers=expected_headers, retries=10, -+ timeout=5), -+ self.read_file_or_url.call_args) - - def test_post(self): - data = mock.MagicMock() -@@ -257,7 +261,8 @@ class TestAzureEndpointHttpClient(CiTestCase): - self.assertEqual(1, self.read_file_or_url.call_count) - self.assertEqual(self.read_file_or_url.return_value, response) - self.assertEqual( -- mock.call(url, data=data, headers=self.regular_headers), -+ mock.call(url, data=data, headers=self.regular_headers, retries=10, -+ timeout=5), - self.read_file_or_url.call_args) - - def test_post_with_extra_headers(self): -@@ -269,7 +274,8 @@ class TestAzureEndpointHttpClient(CiTestCase): - expected_headers = self.regular_headers.copy() - expected_headers.update(extra_headers) - self.assertEqual( -- mock.call(mock.ANY, data=mock.ANY, headers=expected_headers), -+ mock.call(mock.ANY, data=mock.ANY, headers=expected_headers, -+ retries=10, timeout=5), - self.read_file_or_url.call_args) - - --- -1.8.3.1 - diff --git a/SOURCES/ci-util-json.dumps-on-python-2.7-will-handle-UnicodeDec.patch b/SOURCES/ci-util-json.dumps-on-python-2.7-will-handle-UnicodeDec.patch deleted file mode 100644 index a3919e4..0000000 --- a/SOURCES/ci-util-json.dumps-on-python-2.7-will-handle-UnicodeDec.patch +++ /dev/null @@ -1,144 +0,0 @@ -From 5a3cd50df652e4a70f85ccc712dc11bf9726adda Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Wed, 16 Oct 2019 12:10:24 +0200 -Subject: [PATCH] util: json.dumps on python 2.7 will handle UnicodeDecodeError - on binary - -RH-Author: Eduardo Otubo -Message-id: <20191016121024.23694-1-otubo@redhat.com> -Patchwork-id: 91812 -O-Subject: [RHEL-7.8/RHEL-8.1.0 cloud-init PATCH] util: json.dumps on python 2.7 will handle UnicodeDecodeError on binary -Bugzilla: 1744526 -RH-Acked-by: Vitaly Kuznetsov -RH-Acked-by: Mohammed Gamal - -commit 067516d7bc917e4921b9f1424b7a64e92cae0ad2 -Author: Chad Smith -Date: Fri Sep 27 20:46:00 2019 +0000 - - util: json.dumps on python 2.7 will handle UnicodeDecodeError on binary - - Since python 2.7 doesn't handle UnicodeDecodeErrors with the default - handler - - LP: #1801364 - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - cloudinit/sources/tests/test_init.py | 12 +++++------- - cloudinit/tests/test_util.py | 20 ++++++++++++++++++++ - cloudinit/util.py | 27 +++++++++++++++++++++++++-- - 3 files changed, 50 insertions(+), 9 deletions(-) - -diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py -index 6378e98..9698261 100644 ---- a/cloudinit/sources/tests/test_init.py -+++ b/cloudinit/sources/tests/test_init.py -@@ -457,19 +457,17 @@ class TestDataSource(CiTestCase): - instance_json['ds']['meta_data']) - - @skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8") -- def test_non_utf8_encoding_logs_warning(self): -- """When non-utf-8 values exist in py2 instance-data is not written.""" -+ def test_non_utf8_encoding_gets_b64encoded(self): -+ """When non-utf-8 values exist in py2 instance-data is b64encoded.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp}), - custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}}) - self.assertTrue(datasource.get_data()) - json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) -- self.assertFalse(os.path.exists(json_file)) -- self.assertIn( -- "WARNING: Error persisting instance-data.json: 'utf8' codec can't" -- " decode byte 0xaa in position 2: invalid start byte", -- self.logs.getvalue()) -+ instance_json = util.load_json(util.load_file(json_file)) -+ key21_value = instance_json['ds']['meta_data']['key2']['key2.1'] -+ self.assertEqual('ci-b64:' + util.b64e(b'ab\xaadef'), key21_value) - - def test_get_hostname_subclass_support(self): - """Validate get_hostname signature on all subclasses of DataSource.""" -diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py -index e3d2dba..f4f95e9 100644 ---- a/cloudinit/tests/test_util.py -+++ b/cloudinit/tests/test_util.py -@@ -2,7 +2,9 @@ - - """Tests for cloudinit.util""" - -+import base64 - import logging -+import json - import platform - - import cloudinit.util as util -@@ -528,6 +530,24 @@ class TestGetLinuxDistro(CiTestCase): - self.assertEqual(('foo', '1.1', 'aarch64'), dist) - - -+class TestJsonDumps(CiTestCase): -+ def test_is_str(self): -+ """json_dumps should return a string.""" -+ self.assertTrue(isinstance(util.json_dumps({'abc': '123'}), str)) -+ -+ def test_utf8(self): -+ smiley = '\\ud83d\\ude03' -+ self.assertEqual( -+ {'smiley': smiley}, -+ json.loads(util.json_dumps({'smiley': smiley}))) -+ -+ def test_non_utf8(self): -+ blob = b'\xba\x03Qx-#y\xea' -+ self.assertEqual( -+ {'blob': 'ci-b64:' + base64.b64encode(blob).decode('utf-8')}, -+ json.loads(util.json_dumps({'blob': blob}))) -+ -+ - @mock.patch('os.path.exists') - class TestIsLXD(CiTestCase): - -diff --git a/cloudinit/util.py b/cloudinit/util.py -index a84112a..2c9ac66 100644 ---- a/cloudinit/util.py -+++ b/cloudinit/util.py -@@ -1590,10 +1590,33 @@ def json_serialize_default(_obj): - return 'Warning: redacted unserializable type {0}'.format(type(_obj)) - - -+def json_preserialize_binary(data): -+ """Preserialize any discovered binary values to avoid json.dumps issues. -+ -+ Used only on python 2.7 where default type handling is not honored for -+ failure to encode binary data. LP: #1801364. -+ TODO(Drop this function when py2.7 support is dropped from cloud-init) -+ """ -+ data = obj_copy.deepcopy(data) -+ for key, value in data.items(): -+ if isinstance(value, (dict)): -+ data[key] = json_preserialize_binary(value) -+ if isinstance(value, bytes): -+ data[key] = 'ci-b64:{0}'.format(b64e(value)) -+ return data -+ -+ - def json_dumps(data): - """Return data in nicely formatted json.""" -- return json.dumps(data, indent=1, sort_keys=True, -- separators=(',', ': '), default=json_serialize_default) -+ try: -+ return json.dumps( -+ data, indent=1, sort_keys=True, separators=(',', ': '), -+ default=json_serialize_default) -+ except UnicodeDecodeError: -+ if sys.version_info[:2] == (2, 7): -+ data = json_preserialize_binary(data) -+ return json.dumps(data) -+ raise - - - def yaml_dumps(obj, explicit_start=True, explicit_end=True): --- -1.8.3.1 - diff --git a/SOURCES/ci-utils-use-SystemRandom-when-generating-random-passwo.patch b/SOURCES/ci-utils-use-SystemRandom-when-generating-random-passwo.patch new file mode 100644 index 0000000..4837d1e --- /dev/null +++ b/SOURCES/ci-utils-use-SystemRandom-when-generating-random-passwo.patch @@ -0,0 +1,45 @@ +From cdb5f116c3a43ff2a5943cdadd0562ec03054a5b Mon Sep 17 00:00:00 2001 +From: jmaloy +Date: Fri, 13 Mar 2020 18:55:18 +0100 +Subject: [PATCH 4/5] utils: use SystemRandom when generating random password. + (#204) + +Message-id: <20200313185518.18544-2-jmaloy@redhat.com> +Patchwork-id: 94296 +O-Subject: [RHEL-7.9 cloud-init PATCH 1/1] utils: use SystemRandom when generating random password. (#204) +Bugzilla: 1812173 +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Eduardo Otubo + +From: Dimitri John Ledkov + +As noticed by Seth Arnold, non-deterministic SystemRandom should be +used when creating security sensitive random strings. + +(cherry picked from commit 3e2f7356effc9e9cccc5ae945846279804eedc46) +Signed-off-by: Jon Maloy +Signed-off-by: Miroslav Rezanina +--- + cloudinit/util.py | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/cloudinit/util.py b/cloudinit/util.py +index 9d9d5c7..5d51ba8 100644 +--- a/cloudinit/util.py ++++ b/cloudinit/util.py +@@ -401,9 +401,10 @@ def translate_bool(val, addons=None): + + + def rand_str(strlen=32, select_from=None): ++ r = random.SystemRandom() + if not select_from: + select_from = string.ascii_letters + string.digits +- return "".join([random.choice(select_from) for _x in range(0, strlen)]) ++ return "".join([r.choice(select_from) for _x in range(0, strlen)]) + + + def rand_dict_key(dictionary, postfix=None): +-- +1.8.3.1 + diff --git a/SOURCES/cloud-init-centos-user.patch b/SOURCES/cloud-init-centos-user.patch deleted file mode 100644 index 3ebaa88..0000000 --- a/SOURCES/cloud-init-centos-user.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff -uNrp cloud-init-18.2.orig/rhel/cloud.cfg cloud-init-18.2/rhel/cloud.cfg ---- cloud-init-18.2.orig/rhel/cloud.cfg 2018-11-04 15:38:13.763701007 +0000 -+++ cloud-init-18.2/rhel/cloud.cfg 2018-11-04 15:41:06.934576619 +0000 -@@ -52,7 +52,7 @@ cloud_final_modules: - - system_info: - default_user: -- name: cloud-user -+ name: centos - lock_passwd: true - gecos: Cloud User - groups: [wheel, adm, systemd-journal] diff --git a/SPECS/cloud-init.spec b/SPECS/cloud-init.spec index 544add5..34d4040 100644 --- a/SPECS/cloud-init.spec +++ b/SPECS/cloud-init.spec @@ -1,3 +1,9 @@ +%if %{rhel} >= 8 +%global __python %{__python3} +%else +%global __python %{__python2} +%endif + %{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} %{!?license: %global license %%doc} @@ -6,8 +12,8 @@ %global debug_package %{nil} Name: cloud-init -Version: 18.5 -Release: 6%{?dist}.5 +Version: 19.4 +Release: 7%{?dist} Summary: Cloud instance init scripts Group: System Environment/Base @@ -20,57 +26,34 @@ Patch0001: 0001-Add-initial-redhat-setup.patch Patch0002: 0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch Patch0003: 0003-limit-permissions-on-def_log_file.patch Patch0004: 0004-remove-tee-command-from-logging-configuration.patch -Patch0005: 0005-azure-ensure-that-networkmanager-hook-script-runs.patch -Patch0006: 0006-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch -Patch0007: 0007-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch -Patch0008: 0008-sysconfig-Don-t-disable-IPV6_AUTOCONF.patch -Patch0009: 0009-net-Wait-for-dhclient-to-daemonize-before-reading-le.patch -Patch0010: 0010-cloud-init-per-don-t-use-dashes-in-sem-names.patch -Patch0011: 0011-azure-Filter-list-of-ssh-keys-pulled-from-fabric.patch -Patch0012: 0012-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch -# For bz#1687565 - cloud-init 18.5 rebase for fast provisioning on Azure [RHEL 7] -Patch13: ci-Azure-Ensure-platform-random_seed-is-always-serializ.patch -# For bz#1687565 - cloud-init 18.5 rebase for fast provisioning on Azure [RHEL 7] -Patch14: ci-DatasourceAzure-add-additional-logging-for-azure-dat.patch -# For bz#1687565 - cloud-init 18.5 rebase for fast provisioning on Azure [RHEL 7] -Patch15: ci-Azure-Changes-to-the-Hyper-V-KVP-Reporter.patch -# For bz#1687565 - cloud-init 18.5 rebase for fast provisioning on Azure [RHEL 7] -Patch16: ci-DataSourceAzure-Adjust-timeout-for-polling-IMDS.patch -# For bz#1687565 - cloud-init 18.5 rebase for fast provisioning on Azure [RHEL 7] -Patch17: ci-cc_mounts-check-if-mount-a-on-no-change-fstab-path.patch -# For bz#1707725 - [WALA][cloud] cloud-init dhclient-hook script has some unexpected side-effects on Azure -Patch18: ci-Revert-azure-ensure-that-networkmanager-hook-script-.patch -# For bz#1726701 - [Azure] [RHEL 7.8] Cloud-init fixes to support fast provisioning for Azure -Patch19: ci-Azure-Return-static-fallback-address-as-if-failed-to.patch -# For bz#1593010 - [cloud-init][RHVM]cloud-init network configuration does not persist reboot [RHEL 7.8] -Patch20: ci-Fix-for-network-configuration-not-persisting-after-r.patch -# For bz#1744526 - [cloud-init][OpenStack] cloud-init can't persist instance-data.json -Patch21: ci-util-json.dumps-on-python-2.7-will-handle-UnicodeDec.patch -# For bz#1810064 - cloud-init Azure byte swap (hyperV Gen2 Only) [rhel-7.8.z] -Patch22: ci-azure-avoid.patch -# For bz#1802173 - [cloud-init][rhel-7.8.z]cloud-init cloud-final.service fail with KeyError: 'modules-init' after upgrade to version 18.2-1.el7_6.1 in RHV -Patch23: ci-cmd-main.py-Fix-missing-modules-init-key-in-modes-di.patch -# For bz#1801094 - [RHEL7] swapon fails with "swapfile has holes" when created on a xfs filesystem by cloud-init [rhel-7.8.z] -Patch24: ci-Do-not-use-fallocate-in-swap-file-creation-on-xfs.-7.patch -# For bz#1801094 - [RHEL7] swapon fails with "swapfile has holes" when created on a xfs filesystem by cloud-init [rhel-7.8.z] -Patch25: ci-swap-file-size-being-used-before-checked-if-str-315.patch -# For bz#1801094 - [RHEL7] swapon fails with "swapfile has holes" when created on a xfs filesystem by cloud-init [rhel-7.8.z] -Patch26: ci-cc_mounts-fix-incorrect-format-specifiers-316.patch -# For bz#1827207 - Support for AWS IMDS v2 (available in cloud-init 19.4) [rhel-7.8.z] -Patch27: ci-New-data-source-for-the-Exoscale.com-cloud-platform.patch -# For bz#1827207 - Support for AWS IMDS v2 (available in cloud-init 19.4) [rhel-7.8.z] -Patch28: ci-Add-support-for-publishing-host-keys-to-GCE-guest-at.patch -# For bz#1827207 - Support for AWS IMDS v2 (available in cloud-init 19.4) [rhel-7.8.z] -Patch29: ci-exoscale-fix-sysconfig-cloud_config_modules-override.patch -# For bz#1827207 - Support for AWS IMDS v2 (available in cloud-init 19.4) [rhel-7.8.z] -Patch30: ci-exoscale-Increase-url_max_wait-to-120s.patch -# For bz#1827207 - Support for AWS IMDS v2 (available in cloud-init 19.4) [rhel-7.8.z] -Patch31: ci-ec2-Add-support-for-AWS-IMDS-v2-session-oriented-55.patch -# For bz#1832177 - [Azure] cloud-init provisioning failed in Azure [rhel-7.8.z] -Patch32: ci-url_helper-read_file_or_url-should-pass-headers-para.patch - -Patch9999: cloud-init-centos-user.patch - +Patch0005: 0005-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch +Patch0006: 0006-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch +Patch0007: 0007-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch +Patch0008: 0008-Fix-for-network-configuration-not-persisting-after-r.patch +# For bz#1549638 - [RHEL7]cloud-user added to wheel group and sudoers.d causes 'sudo -v' prompts for passphrase +Patch9: ci-Removing-cloud-user-from-wheel.patch +# For bz#1748015 - [cloud-init][RHEL7] /etc/resolv.conf lose config after reboot (initial instance is ok) +Patch10: ci-Remove-race-condition-between-cloud-init-and-Network.patch +# For bz#1812170 - CVE-2020-8632 cloud-init: Too short random password length in cc_set_password in config/cc_set_passwords.py [rhel-7] +Patch11: ci-cc_set_password-increase-random-pwlength-from-9-to-2.patch +# For bz#1812173 - CVE-2020-8631 cloud-init: Use of random.choice when generating random password [rhel-7] +Patch12: ci-utils-use-SystemRandom-when-generating-random-passwo.patch +# For bz#1574338 - CVE-2018-10896 cloud-init: SSH host keys are not regenerated for the new instances [rhel-7] +Patch13: ci-Enable-ssh_deletekeys-by-default.patch +# For bz#1772505 - [RHEL7] swapon fails with "swapfile has holes" when created on a xfs filesystem by cloud-init +Patch14: ci-Do-not-use-fallocate-in-swap-file-creation-on-xfs.-7.patch +# For bz#1772505 - [RHEL7] swapon fails with "swapfile has holes" when created on a xfs filesystem by cloud-init +Patch15: ci-swap-file-size-being-used-before-checked-if-str-315.patch +# For bz#1748015 - [cloud-init][RHEL7] /etc/resolv.conf lose config after reboot (initial instance is ok) +Patch16: ci-Remove-race-condition-between-cloud-init-and-Network-v2.patch +# For bz#1772505 - [RHEL7] swapon fails with "swapfile has holes" when created on a xfs filesystem by cloud-init +Patch17: ci-cc_mounts-fix-incorrect-format-specifiers-316.patch +# For bz#1748015 - [cloud-init][RHEL7] /etc/resolv.conf lose config after reboot (initial instance is ok) +Patch18: ci-Use-reload-or-try-restart-instead-of-try-reload-or-r.patch +# For bz#1821999 - [RHEL7.9] Do not log IMDSv2 token values into cloud-init.log +Patch19: ci-ec2-Do-not-log-IMDSv2-token-values-instead-use-REDAC.patch +# For bz#1821999 - [RHEL7.9] Do not log IMDSv2 token values into cloud-init.log +Patch20: ci-ec2-only-redact-token-request-headers-in-logs-avoid-.patch # Deal with noarch -> arch # https://bugzilla.redhat.com/show_bug.cgi?id=1067089 @@ -152,7 +135,8 @@ mkdir -p $RPM_BUILD_ROOT%{_unitdir} cp rhel/systemd/* $RPM_BUILD_ROOT%{_unitdir}/ [ ! -d $RPM_BUILD_ROOT/usr/lib/systemd/system-generators ] && mkdir -p $RPM_BUILD_ROOT/usr/lib/systemd/system-generators -cp -p systemd/cloud-init-generator $RPM_BUILD_ROOT/usr/lib/systemd/system-generators +cp -p systemd/cloud-init-generator.tmpl $RPM_BUILD_ROOT/usr/lib/systemd/system-generators/cloud-init-generator +sed -i '1d' $RPM_BUILD_ROOT/usr/lib/systemd/system-generators/cloud-init-generator [ ! -d $RPM_BUILD_ROOT/usr/lib/%{name} ] && mkdir -p $RPM_BUILD_ROOT/usr/lib/%{name} cp -p tools/ds-identify $RPM_BUILD_ROOT/usr/lib/%{name}/ds-identify @@ -240,36 +224,58 @@ fi %config(noreplace) %{_sysconfdir}/rsyslog.d/21-cloudinit.conf %changelog -* Wed May 20 2020 Miroslav Rezanina - 18.5-6.el7_8.5 -- ci-url_helper-read_file_or_url-should-pass-headers-para.patch [bz#1832177] -- Resolves: bz#1832177 - ([Azure] cloud-init provisioning failed in Azure [rhel-7.8.z]) - -* Tue May 05 2020 Miroslav Rezanina - 18.5-6.el7_8.4 -- ci-New-data-source-for-the-Exoscale.com-cloud-platform.patch [bz#1827207] -- ci-Add-support-for-publishing-host-keys-to-GCE-guest-at.patch [bz#1827207] -- ci-exoscale-fix-sysconfig-cloud_config_modules-override.patch [bz#1827207] -- ci-exoscale-Increase-url_max_wait-to-120s.patch [bz#1827207] -- ci-ec2-Add-support-for-AWS-IMDS-v2-session-oriented-55.patch [bz#1827207] -- Resolves: bz#1827207 - (Support for AWS IMDS v2 (available in cloud-init 19.4) [rhel-7.8.z]) - -* Tue Apr 28 2020 Miroslav Rezanina - 18.5-6.el7_8.3 -- ci-Do-not-use-fallocate-in-swap-file-creation-on-xfs.-7.patch [bz#1801094] -- ci-swap-file-size-being-used-before-checked-if-str-315.patch [bz#1801094] -- ci-cc_mounts-fix-incorrect-format-specifiers-316.patch [bz#1801094] -- Resolves: bz#1801094 - ([RHEL7] swapon fails with "swapfile has holes" when created on a xfs filesystem by cloud-init [rhel-7.8.z]) - -* Tue Apr 14 2020 Miroslav Rezanina - 18.5-6.el7_8.2 -- ci-cmd-main.py-Fix-missing-modules-init-key-in-modes-di.patch [bz#1802173] -- Resolves: bz#1802173 - ([cloud-init][rhel-7.8.z]cloud-init cloud-final.service fail with KeyError: 'modules-init' after upgrade to version 18.2-1.el7_6.1 in RHV) - -* Mon Mar 30 2020 Miroslav Rezanina - 18.5-6.el7_8.1 -- ci-azure-avoid.patch [bz#1810064] -- Resolves: bz#1810064 - (cloud-init Azure byte swap (hyperV Gen2 Only) [rhel-7.8.z]) +* Wed May 20 2020 Miroslav Rezanina - 19.4-7.el7 +- ci-ec2-only-redact-token-request-headers-in-logs-avoid-.patch [bz#1821999] +- Resolves: bz#1821999 + ([RHEL7.9] Do not log IMDSv2 token values into cloud-init.log) + +* Mon May 11 2020 Miroslav Rezanina - 19.4-6.el7 +- ci-Use-reload-or-try-restart-instead-of-try-reload-or-r.patch [bz#1748015] +- ci-ec2-Do-not-log-IMDSv2-token-values-instead-use-REDAC.patch [bz#1821999] +- Resolves: bz#1748015 + ([cloud-init][RHEL7] /etc/resolv.conf lose config after reboot (initial instance is ok)) +- Resolves: bz#1821999 + ([RHEL7.9] Do not log IMDSv2 token values into cloud-init.log) + +* Mon Apr 27 2020 Miroslav Rezanina - 19.4-5.el7 +- ci-Remove-race-condition-between-cloud-init-and-Network-v2.patch [bz#1748015] +- ci-cc_mounts-fix-incorrect-format-specifiers-316.patch [bz#1772505] +- Resolves: bz#1748015 + ([cloud-init][RHEL7] /etc/resolv.conf lose config after reboot (initial instance is ok)) +- Resolves: bz#1772505 + ([RHEL7] swapon fails with "swapfile has holes" when created on a xfs filesystem by cloud-init) + +* Wed Apr 15 2020 Miroslav Rezanina - 19.4-4.el7 +- ci-swap-file-size-being-used-before-checked-if-str-315.patch [bz#1772505] +- Resolves: bz#1772505 + ([RHEL7] swapon fails with "swapfile has holes" when created on a xfs filesystem by cloud-init) + +* Mon Mar 30 2020 Miroslav Rezanina - 19.4-3.el7 +- ci-Do-not-use-fallocate-in-swap-file-creation-on-xfs.-7.patch [bz#1772505] +- Resolves: bz#1772505 + ([RHEL7] swapon fails with "swapfile has holes" when created on a xfs filesystem by cloud-init) + +* Thu Mar 19 2020 Miroslav Rezanina - 19.4-2.el7 +- ci-Removing-cloud-user-from-wheel.patch [bz#1549638] +- ci-Remove-race-condition-between-cloud-init-and-Network.patch [bz#1748015] +- ci-cc_set_password-increase-random-pwlength-from-9-to-2.patch [bz#1812170] +- ci-utils-use-SystemRandom-when-generating-random-passwo.patch [bz#1812173] +- ci-Enable-ssh_deletekeys-by-default.patch [bz#1574338] +- Resolves: bz#1549638 + ([RHEL7]cloud-user added to wheel group and sudoers.d causes 'sudo -v' prompts for passphrase) +- Resolves: bz#1574338 + (CVE-2018-10896 cloud-init: SSH host keys are not regenerated for the new instances [rhel-7]) +- Resolves: bz#1748015 + ([cloud-init][RHEL7] /etc/resolv.conf lose config after reboot (initial instance is ok)) +- Resolves: bz#1812170 + (CVE-2020-8632 cloud-init: Too short random password length in cc_set_password in config/cc_set_passwords.py [rhel-7]) +- Resolves: bz#1812173 + (CVE-2020-8631 cloud-init: Use of random.choice when generating random password [rhel-7]) + +* Tue Mar 17 2020 Miroslav Rezanina - 19.4-1.el7 +- Rebase to 19.4 [bz#1803094] +- Resolves: bz#1803094 + ([RHEL-7.9] cloud-init rebase to 19.4) * Thu Oct 24 2019 Miroslav Rezanina - 18.5-6.el7 - ci-util-json.dumps-on-python-2.7-will-handle-UnicodeDec.patch [bz#1744526]