diff --git a/.cloud-init.metadata b/.cloud-init.metadata index 6803c51..0356978 100644 --- a/.cloud-init.metadata +++ b/.cloud-init.metadata @@ -1 +1 @@ -2ae378aa2ae23b34b0ff123623ba5e2fbdc4928d SOURCES/cloud-init-21.1.tar.gz +830185bb5ce87ad86e4d1c0c62329bb255ec1648 SOURCES/cloud-init-22.1.tar.gz diff --git a/.gitignore b/.gitignore index 103bcf7..bf19bdd 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -SOURCES/cloud-init-21.1.tar.gz +SOURCES/cloud-init-22.1.tar.gz diff --git a/SOURCES/0001-Add-initial-redhat-setup.patch b/SOURCES/0001-Add-initial-redhat-setup.patch index b67fcae..d93c32c 100644 --- a/SOURCES/0001-Add-initial-redhat-setup.patch +++ b/SOURCES/0001-Add-initial-redhat-setup.patch @@ -1,8 +1,34 @@ -From 074cb9b011623849cfa95c1d7cc813bb28f03ff0 Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Fri, 7 May 2021 13:36:03 +0200 +From 5e1e568d7085fd4443b4e3ccc492f5e31747e270 Mon Sep 17 00:00:00 2001 +From: Amy Chen +Date: Wed, 20 Apr 2022 10:59:48 +0800 Subject: Add initial redhat setup +Merged patches (22.1) +- d18029bf Add netifaces package as a Requires in cloud-init.spec.template +- 31adf961 Add gdisk and openssl as deps to fix UEFI / Azure initialization +- f4a2905d Add dhcp-client as a dependency +- 290e14cc cloud-init.spec.template: update %systemd_postun parameter +- 9be4ae9b (tag: cloud-init-21.1-1.el8) Update to cloud-init-21.1-1.el8 + +Conflicts: +cloudinit/config/cc_chef.py Using double quotes instead of single quotes + +cloudinit/settings.py +- Using rhel settings +- Using double quotes instead of single quotes + +setup.py +- Following the changes of 21.1 rebase +- Using double quotes instead of single quotes + +redhat/cloud-init.spec.template +- Add the drop-in to the right cloud-init.spec used by our package builder, which is downstream-only part of the bz 2002492 fix. + +redhat/Makefile.common +- Backport the build handling fixes from patch "Update to cloud-init-21.1-1.el8" + +Signed-off-by: Amy Chen + Merged patches (21.1): - 915d30ad Change gating file to correct rhel version - 311f318d Removing net-tools dependency @@ -43,36 +69,36 @@ setup.py: Signed-off-by: Eduardo Otubo --- .gitignore | 1 + - cloudinit/config/cc_chef.py | 67 +++- + cloudinit/config/cc_chef.py | 65 ++- cloudinit/settings.py | 7 +- redhat/.gitignore | 1 + - redhat/Makefile | 71 ++++ + redhat/Makefile | 71 +++ redhat/Makefile.common | 37 ++ redhat/cloud-init-tmpfiles.conf | 1 + - redhat/cloud-init.spec.template | 530 ++++++++++++++++++++++++++ + redhat/cloud-init.spec.template | 696 ++++++++++++++++++++++++++ redhat/gating.yaml | 8 + redhat/rpmbuild/BUILD/.gitignore | 3 + redhat/rpmbuild/RPMS/.gitignore | 3 + redhat/rpmbuild/SOURCES/.gitignore | 3 + redhat/rpmbuild/SPECS/.gitignore | 3 + redhat/rpmbuild/SRPMS/.gitignore | 3 + - redhat/scripts/frh.py | 27 ++ - redhat/scripts/git-backport-diff | 327 ++++++++++++++++ - redhat/scripts/git-compile-check | 215 +++++++++++ - redhat/scripts/process-patches.sh | 77 ++++ + redhat/scripts/frh.py | 25 + + redhat/scripts/git-backport-diff | 327 ++++++++++++ + redhat/scripts/git-compile-check | 215 ++++++++ + redhat/scripts/process-patches.sh | 92 ++++ redhat/scripts/tarball_checksum.sh | 3 + rhel/README.rhel | 5 + rhel/cloud-init-tmpfiles.conf | 1 + - rhel/cloud.cfg | 69 ++++ + rhel/cloud.cfg | 69 +++ rhel/systemd/cloud-config.service | 18 + rhel/systemd/cloud-config.target | 11 + - rhel/systemd/cloud-final.service | 24 ++ + rhel/systemd/cloud-final.service | 24 + rhel/systemd/cloud-init-local.service | 31 ++ - rhel/systemd/cloud-init.service | 25 ++ + rhel/systemd/cloud-init.service | 25 + rhel/systemd/cloud-init.target | 7 + - setup.py | 23 +- + setup.py | 28 +- tools/read-version | 28 +- - 30 files changed, 1579 insertions(+), 50 deletions(-) + 30 files changed, 1756 insertions(+), 55 deletions(-) create mode 100644 redhat/.gitignore create mode 100644 redhat/Makefile create mode 100644 redhat/Makefile.common @@ -100,7 +126,7 @@ Signed-off-by: Eduardo Otubo create mode 100644 rhel/systemd/cloud-init.target diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py -index aaf71366..97ef649a 100644 +index fdb3a6e3..d028c548 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -6,7 +6,70 @@ @@ -175,38 +201,29 @@ index aaf71366..97ef649a 100644 import itertools import json -@@ -31,7 +94,7 @@ CHEF_DIRS = tuple([ - '/var/lib/chef', - '/var/cache/chef', - '/var/backups/chef', -- '/var/run/chef', -+ '/run/chef', - ]) - REQUIRED_CHEF_DIRS = tuple([ - '/etc/chef', diff --git a/cloudinit/settings.py b/cloudinit/settings.py -index 91e1bfe7..e690c0fd 100644 +index ecc1403b..39650a5b 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py -@@ -47,13 +47,16 @@ CFG_BUILTIN = { +@@ -50,13 +50,16 @@ CFG_BUILTIN = { ], - 'def_log_file': '/var/log/cloud-init.log', - 'log_cfgs': [], -- 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel', 'root:root'], -+ 'mount_default_fields': [None, None, 'auto', 'defaults,nofail', '0', '2'], -+ 'ssh_deletekeys': False, -+ 'ssh_genkeytypes': [], -+ 'syslog_fix_perms': [], - 'system_info': { - 'paths': { - 'cloud_dir': '/var/lib/cloud', - 'templates_dir': '/etc/cloud/templates/', + "def_log_file": "/var/log/cloud-init.log", + "log_cfgs": [], +- "syslog_fix_perms": ["syslog:adm", "root:adm", "root:wheel", "root:root"], ++ "mount_default_fields": [None, None, "auto", "defaults,nofail", "0", "2"], ++ "ssh_deletekeys": False, ++ "ssh_genkeytypes": [], ++ "syslog_fix_perms": [], + "system_info": { + "paths": { + "cloud_dir": "/var/lib/cloud", + "templates_dir": "/etc/cloud/templates/", }, -- 'distro': 'ubuntu', -+ 'distro': 'rhel', - 'network': {'renderers': None}, +- "distro": "ubuntu", ++ "distro": "rhel", + "network": {"renderers": None}, }, - 'vendor_data': {'enabled': True, 'prefix': []}, + "vendor_data": {"enabled": True, "prefix": []}, diff --git a/rhel/README.rhel b/rhel/README.rhel new file mode 100644 index 00000000..aa29630d @@ -453,70 +470,78 @@ index 00000000..083c3b6f +Description=Cloud-init target +After=multi-user.target diff --git a/setup.py b/setup.py -index cbacf48e..d5cd01a4 100755 +index a9132d2c..3c377eaa 100755 --- a/setup.py +++ b/setup.py -@@ -125,14 +125,6 @@ INITSYS_FILES = { - 'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)], - 'sysvinit_openrc': [f for f in glob('sysvinit/gentoo/*') if is_f(f)], - 'sysvinit_suse': [f for f in glob('sysvinit/suse/*') if is_f(f)], -- 'systemd': [render_tmpl(f) -- for f in (glob('systemd/*.tmpl') + -- glob('systemd/*.service') + -- glob('systemd/*.target')) -- if (is_f(f) and not is_generator(f))], -- 'systemd.generators': [ +@@ -139,21 +139,6 @@ INITSYS_FILES = { + "sysvinit_deb": [f for f in glob("sysvinit/debian/*") if is_f(f)], + "sysvinit_openrc": [f for f in glob("sysvinit/gentoo/*") if is_f(f)], + "sysvinit_suse": [f for f in glob("sysvinit/suse/*") if is_f(f)], +- "systemd": [ +- render_tmpl(f) +- for f in ( +- glob("systemd/*.tmpl") +- + glob("systemd/*.service") +- + glob("systemd/*.socket") +- + glob("systemd/*.target") +- ) +- if (is_f(f) and not is_generator(f)) +- ], +- "systemd.generators": [ - render_tmpl(f, mode=0o755) -- for f in glob('systemd/*') if is_f(f) and is_generator(f)], - 'upstart': [f for f in glob('upstart/*') if is_f(f)], +- for f in glob("systemd/*") +- if is_f(f) and is_generator(f) +- ], + "upstart": [f for f in glob("upstart/*") if is_f(f)], } INITSYS_ROOTS = { -@@ -142,9 +134,6 @@ INITSYS_ROOTS = { - 'sysvinit_deb': 'etc/init.d', - 'sysvinit_openrc': 'etc/init.d', - 'sysvinit_suse': 'etc/init.d', -- 'systemd': pkg_config_read('systemd', 'systemdsystemunitdir'), -- 'systemd.generators': pkg_config_read('systemd', -- 'systemdsystemgeneratordir'), - 'upstart': 'etc/init/', +@@ -163,10 +148,6 @@ INITSYS_ROOTS = { + "sysvinit_deb": "etc/init.d", + "sysvinit_openrc": "etc/init.d", + "sysvinit_suse": "etc/init.d", +- "systemd": pkg_config_read("systemd", "systemdsystemunitdir"), +- "systemd.generators": pkg_config_read( +- "systemd", "systemdsystemgeneratordir" +- ), + "upstart": "etc/init/", } INITSYS_TYPES = sorted([f.partition(".")[0] for f in INITSYS_ROOTS.keys()]) -@@ -245,14 +234,11 @@ if not in_virtualenv(): - INITSYS_ROOTS[k] = "/" + INITSYS_ROOTS[k] - - data_files = [ -- (ETC + '/cloud', [render_tmpl("config/cloud.cfg.tmpl")]), -+ (ETC + '/bash_completion.d', ['bash_completion/cloud-init']), - (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')), - (ETC + '/cloud/templates', glob('templates/*')), -- (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify', -- 'tools/uncloud-init', -+ (USR_LIB_EXEC + '/cloud-init', ['tools/uncloud-init', - 'tools/write-ssh-key-fingerprints']), -- (USR + '/share/bash-completion/completions', -- ['bash_completion/cloud-init']), - (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]), - (USR + '/share/doc/cloud-init/examples', - [f for f in glob('doc/examples/*') if is_f(f)]), -@@ -263,8 +249,7 @@ if not platform.system().endswith('BSD'): - data_files.extend([ - (ETC + '/NetworkManager/dispatcher.d/', - ['tools/hook-network-manager']), -- (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']), -- (LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')]) -+ ('/usr/lib/udev/rules.d', [f for f in glob('udev/*.rules')]) - ]) - # Use a subclass for install that handles - # adding on the right init system configuration files -@@ -286,8 +271,6 @@ setuptools.setup( - scripts=['tools/cloud-init-per'], - license='Dual-licensed under GPLv3 or Apache 2.0', +@@ -281,15 +262,13 @@ data_files = [ + ( + USR_LIB_EXEC + "/cloud-init", + [ +- "tools/ds-identify", + "tools/hook-hotplug", + "tools/uncloud-init", + "tools/write-ssh-key-fingerprints", + ], + ), + ( +- USR + "/share/bash-completion/completions", +- ["bash_completion/cloud-init"], ++ ETC + "/bash_completion.d", ["bash_completion/cloud-init"], + ), + (USR + "/share/doc/cloud-init", [f for f in glob("doc/*") if is_f(f)]), + ( +@@ -308,8 +287,7 @@ if not platform.system().endswith("BSD"): + ETC + "/NetworkManager/dispatcher.d/", + ["tools/hook-network-manager"], + ), +- (ETC + "/dhcp/dhclient-exit-hooks.d/", ["tools/hook-dhclient"]), +- (LIB + "/udev/rules.d", [f for f in glob("udev/*.rules")]), ++ ("/usr/lib/udev/rules.d", [f for f in glob("udev/*.rules")]), + ( + ETC + "/systemd/system/sshd-keygen@.service.d/", + ["systemd/disable-sshd-keygen-if-cloud-init-active.conf"], +@@ -339,8 +317,6 @@ setuptools.setup( + scripts=["tools/cloud-init-per"], + license="Dual-licensed under GPLv3 or Apache 2.0", data_files=data_files, - install_requires=requirements, - cmdclass=cmdclass, entry_points={ - 'console_scripts': [ - 'cloud-init = cloudinit.cmd.main:main', + "console_scripts": [ + "cloud-init = cloudinit.cmd.main:main", diff --git a/tools/read-version b/tools/read-version index 02c90643..79755f78 100755 --- a/tools/read-version @@ -557,5 +582,5 @@ index 02c90643..79755f78 100755 # version is X.Y.Z[+xxx.gHASH] # version_long is None or X.Y.Z-xxx-gHASH -- -2.27.0 +2.31.1 diff --git a/SOURCES/0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch b/SOURCES/0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch index 3dc704f..60c0a2a 100644 --- a/SOURCES/0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch +++ b/SOURCES/0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch @@ -1,4 +1,4 @@ -From 472c2b5d4342b6ab6ce1584dc39bed0e6c1ca2e7 Mon Sep 17 00:00:00 2001 +From e0dc628ac553072891fa6607dc91b652efd99be2 Mon Sep 17 00:00:00 2001 From: Eduardo Otubo Date: Fri, 7 May 2021 13:36:06 +0200 Subject: Do not write NM_CONTROLLED=no in generated interface config files @@ -12,28 +12,27 @@ X-downstream-only: true Signed-off-by: Eduardo Otubo Signed-off-by: Ryan McCabe --- - cloudinit/net/sysconfig.py | 2 +- + cloudinit/net/sysconfig.py | 1 - tests/unittests/test_net.py | 28 ---------------------------- - 2 files changed, 1 insertion(+), 29 deletions(-) + 2 files changed, 29 deletions(-) diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py -index 99a4bae4..3d276666 100644 +index ba85c4f6..e06ddee7 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py -@@ -289,7 +289,7 @@ class Renderer(renderer.Renderer): - # details about this) - - iface_defaults = { -- 'rhel': {'ONBOOT': True, 'USERCTL': False, 'NM_CONTROLLED': False, -+ 'rhel': {'ONBOOT': True, 'USERCTL': False, - 'BOOTPROTO': 'none'}, - 'suse': {'BOOTPROTO': 'static', 'STARTMODE': 'auto'}, - } +@@ -336,7 +336,6 @@ class Renderer(renderer.Renderer): + "rhel": { + "ONBOOT": True, + "USERCTL": False, +- "NM_CONTROLLED": False, + "BOOTPROTO": "none", + }, + "suse": {"BOOTPROTO": "static", "STARTMODE": "auto"}, diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py -index 38d934d4..c67b5fcc 100644 +index 47e4ba00..591241b3 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py -@@ -535,7 +535,6 @@ GATEWAY=172.19.3.254 +@@ -579,7 +579,6 @@ GATEWAY=172.19.3.254 HWADDR=fa:16:3e:ed:9a:59 IPADDR=172.19.1.34 NETMASK=255.255.252.0 @@ -41,7 +40,7 @@ index 38d934d4..c67b5fcc 100644 ONBOOT=yes TYPE=Ethernet USERCTL=no -@@ -633,7 +632,6 @@ IPADDR=172.19.1.34 +@@ -712,7 +711,6 @@ IPADDR=172.19.1.34 IPADDR1=10.0.0.10 NETMASK=255.255.252.0 NETMASK1=255.255.255.0 @@ -49,7 +48,7 @@ index 38d934d4..c67b5fcc 100644 ONBOOT=yes TYPE=Ethernet USERCTL=no -@@ -756,7 +754,6 @@ IPV6_AUTOCONF=no +@@ -874,7 +872,6 @@ IPV6_AUTOCONF=no IPV6_DEFAULTGW=2001:DB8::1 IPV6_FORCE_ACCEPT_RA=no NETMASK=255.255.252.0 @@ -57,23 +56,23 @@ index 38d934d4..c67b5fcc 100644 ONBOOT=yes TYPE=Ethernet USERCTL=no -@@ -884,7 +881,6 @@ NETWORK_CONFIGS = { +@@ -1053,7 +1050,6 @@ NETWORK_CONFIGS = { BOOTPROTO=none DEVICE=eth1 HWADDR=cf:d6:af:48:e8:80 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet - USERCTL=no"""), -@@ -901,7 +897,6 @@ NETWORK_CONFIGS = { + USERCTL=no""" +@@ -1072,7 +1068,6 @@ NETWORK_CONFIGS = { IPADDR=192.168.21.3 NETMASK=255.255.255.0 METRIC=10000 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet - USERCTL=no"""), -@@ -1032,7 +1027,6 @@ NETWORK_CONFIGS = { + USERCTL=no""" +@@ -1244,7 +1239,6 @@ NETWORK_CONFIGS = { IPV6_AUTOCONF=no IPV6_FORCE_ACCEPT_RA=no NETMASK=255.255.255.0 @@ -81,15 +80,15 @@ index 38d934d4..c67b5fcc 100644 ONBOOT=yes TYPE=Ethernet USERCTL=no -@@ -1737,7 +1731,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +@@ -2093,7 +2087,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true DHCPV6C=yes IPV6INIT=yes MACADDR=aa:bb:cc:dd:ee:ff - NM_CONTROLLED=no ONBOOT=yes TYPE=Bond - USERCTL=no"""), -@@ -1745,7 +1738,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + USERCTL=no""" +@@ -2103,7 +2096,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true BOOTPROTO=dhcp DEVICE=bond0.200 DHCLIENT_SET_DEFAULT_ROUTE=no @@ -97,7 +96,7 @@ index 38d934d4..c67b5fcc 100644 ONBOOT=yes PHYSDEV=bond0 USERCTL=no -@@ -1763,7 +1755,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +@@ -2123,7 +2115,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true IPV6_DEFAULTGW=2001:4800:78ff:1b::1 MACADDR=bb:bb:bb:bb:bb:aa NETMASK=255.255.255.0 @@ -105,15 +104,15 @@ index 38d934d4..c67b5fcc 100644 ONBOOT=yes PRIO=22 STP=no -@@ -1773,7 +1764,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +@@ -2135,7 +2126,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true BOOTPROTO=none DEVICE=eth0 HWADDR=c0:d6:9f:2c:e8:80 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet - USERCTL=no"""), -@@ -1790,7 +1780,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + USERCTL=no""" +@@ -2154,7 +2144,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true MTU=1500 NETMASK=255.255.255.0 NETMASK1=255.255.255.0 @@ -121,7 +120,7 @@ index 38d934d4..c67b5fcc 100644 ONBOOT=yes PHYSDEV=eth0 USERCTL=no -@@ -1800,7 +1789,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +@@ -2166,7 +2155,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true DEVICE=eth1 HWADDR=aa:d6:9f:2c:e8:80 MASTER=bond0 @@ -129,7 +128,7 @@ index 38d934d4..c67b5fcc 100644 ONBOOT=yes SLAVE=yes TYPE=Ethernet -@@ -1810,7 +1798,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +@@ -2178,7 +2166,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true DEVICE=eth2 HWADDR=c0:bb:9f:2c:e8:80 MASTER=bond0 @@ -137,31 +136,31 @@ index 38d934d4..c67b5fcc 100644 ONBOOT=yes SLAVE=yes TYPE=Ethernet -@@ -1820,7 +1807,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +@@ -2190,7 +2177,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true BRIDGE=br0 DEVICE=eth3 HWADDR=66:bb:9f:2c:e8:80 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet - USERCTL=no"""), -@@ -1829,7 +1815,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + USERCTL=no""" +@@ -2201,7 +2187,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true BRIDGE=br0 DEVICE=eth4 HWADDR=98:bb:9f:2c:e8:80 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet - USERCTL=no"""), -@@ -1838,7 +1823,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + USERCTL=no""" +@@ -2212,7 +2197,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true DEVICE=eth5 DHCLIENT_SET_DEFAULT_ROUTE=no HWADDR=98:bb:9f:2c:e8:8a - NM_CONTROLLED=no ONBOOT=no TYPE=Ethernet - USERCTL=no"""), -@@ -2294,7 +2278,6 @@ iface bond0 inet6 static + USERCTL=no""" +@@ -2689,7 +2673,6 @@ iface bond0 inet6 static MTU=9000 NETMASK=255.255.255.0 NETMASK1=255.255.255.0 @@ -169,7 +168,7 @@ index 38d934d4..c67b5fcc 100644 ONBOOT=yes TYPE=Bond USERCTL=no -@@ -2304,7 +2287,6 @@ iface bond0 inet6 static +@@ -2701,7 +2684,6 @@ iface bond0 inet6 static DEVICE=bond0s0 HWADDR=aa:bb:cc:dd:e8:00 MASTER=bond0 @@ -177,7 +176,7 @@ index 38d934d4..c67b5fcc 100644 ONBOOT=yes SLAVE=yes TYPE=Ethernet -@@ -2326,7 +2308,6 @@ iface bond0 inet6 static +@@ -2729,7 +2711,6 @@ iface bond0 inet6 static DEVICE=bond0s1 HWADDR=aa:bb:cc:dd:e8:01 MASTER=bond0 @@ -185,15 +184,15 @@ index 38d934d4..c67b5fcc 100644 ONBOOT=yes SLAVE=yes TYPE=Ethernet -@@ -2383,7 +2364,6 @@ iface bond0 inet6 static +@@ -2794,7 +2775,6 @@ iface bond0 inet6 static BOOTPROTO=none DEVICE=en0 HWADDR=aa:bb:cc:dd:e8:00 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet - USERCTL=no"""), -@@ -2402,7 +2382,6 @@ iface bond0 inet6 static + USERCTL=no""" +@@ -2815,7 +2795,6 @@ iface bond0 inet6 static MTU=2222 NETMASK=255.255.255.0 NETMASK1=255.255.255.0 @@ -201,7 +200,7 @@ index 38d934d4..c67b5fcc 100644 ONBOOT=yes PHYSDEV=en0 USERCTL=no -@@ -2467,7 +2446,6 @@ iface bond0 inet6 static +@@ -2890,7 +2869,6 @@ iface bond0 inet6 static DEVICE=br0 IPADDR=192.168.2.2 NETMASK=255.255.255.0 @@ -209,7 +208,7 @@ index 38d934d4..c67b5fcc 100644 ONBOOT=yes PRIO=22 STP=no -@@ -2591,7 +2569,6 @@ iface bond0 inet6 static +@@ -3032,7 +3010,6 @@ iface bond0 inet6 static HWADDR=52:54:00:12:34:00 IPADDR=192.168.1.2 NETMASK=255.255.255.0 @@ -217,7 +216,7 @@ index 38d934d4..c67b5fcc 100644 ONBOOT=no TYPE=Ethernet USERCTL=no -@@ -2601,7 +2578,6 @@ iface bond0 inet6 static +@@ -3044,7 +3021,6 @@ iface bond0 inet6 static DEVICE=eth1 HWADDR=52:54:00:12:34:aa MTU=1480 @@ -225,7 +224,7 @@ index 38d934d4..c67b5fcc 100644 ONBOOT=yes TYPE=Ethernet USERCTL=no -@@ -2610,7 +2586,6 @@ iface bond0 inet6 static +@@ -3055,7 +3031,6 @@ iface bond0 inet6 static BOOTPROTO=none DEVICE=eth2 HWADDR=52:54:00:12:34:ff @@ -233,7 +232,7 @@ index 38d934d4..c67b5fcc 100644 ONBOOT=no TYPE=Ethernet USERCTL=no -@@ -3027,7 +3002,6 @@ class TestRhelSysConfigRendering(CiTestCase): +@@ -3628,7 +3603,6 @@ class TestRhelSysConfigRendering(CiTestCase): BOOTPROTO=dhcp DEVICE=eth1000 HWADDR=07-1c-c6-75-a4-be @@ -241,7 +240,7 @@ index 38d934d4..c67b5fcc 100644 ONBOOT=yes TYPE=Ethernet USERCTL=no -@@ -3148,7 +3122,6 @@ GATEWAY=10.0.2.2 +@@ -3840,7 +3814,6 @@ GATEWAY=10.0.2.2 HWADDR=52:54:00:12:34:00 IPADDR=10.0.2.15 NETMASK=255.255.255.0 @@ -249,7 +248,7 @@ index 38d934d4..c67b5fcc 100644 ONBOOT=yes TYPE=Ethernet USERCTL=no -@@ -3218,7 +3191,6 @@ USERCTL=no +@@ -3910,7 +3883,6 @@ USERCTL=no # BOOTPROTO=dhcp DEVICE=eth0 @@ -258,5 +257,5 @@ index 38d934d4..c67b5fcc 100644 TYPE=Ethernet USERCTL=no -- -2.27.0 +2.31.1 diff --git a/SOURCES/0003-limit-permissions-on-def_log_file.patch b/SOURCES/0003-limit-permissions-on-def_log_file.patch index 941adaf..6f58247 100644 --- a/SOURCES/0003-limit-permissions-on-def_log_file.patch +++ b/SOURCES/0003-limit-permissions-on-def_log_file.patch @@ -1,4 +1,4 @@ -From 6134624f10ef56534e37624adc12f11b09910591 Mon Sep 17 00:00:00 2001 +From cb7b35ca10c82c9725c3527e3ec5fb8cb7c61bc0 Mon Sep 17 00:00:00 2001 From: Eduardo Otubo Date: Fri, 7 May 2021 13:36:08 +0200 Subject: limit permissions on def_log_file @@ -22,31 +22,31 @@ Signed-off-by: Eduardo Otubo 3 files changed, 6 insertions(+) diff --git a/cloudinit/settings.py b/cloudinit/settings.py -index e690c0fd..43a1490c 100644 +index 39650a5b..3c2145e9 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py -@@ -46,6 +46,7 @@ CFG_BUILTIN = { - 'None', +@@ -49,6 +49,7 @@ CFG_BUILTIN = { + "None", ], - 'def_log_file': '/var/log/cloud-init.log', -+ 'def_log_file_mode': 0o600, - 'log_cfgs': [], - 'mount_default_fields': [None, None, 'auto', 'defaults,nofail', '0', '2'], - 'ssh_deletekeys': False, + "def_log_file": "/var/log/cloud-init.log", ++ "def_log_file_mode": 0o600, + "log_cfgs": [], + "mount_default_fields": [None, None, "auto", "defaults,nofail", "0", "2"], + "ssh_deletekeys": False, diff --git a/cloudinit/stages.py b/cloudinit/stages.py -index 3ef4491c..83e25dd1 100644 +index 3f17294b..61db1dbd 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py -@@ -147,6 +147,7 @@ class Init(object): +@@ -205,6 +205,7 @@ class Init(object): def _initialize_filesystem(self): util.ensure_dirs(self._initial_subdirs()) - log_file = util.get_cfg_option_str(self.cfg, 'def_log_file') -+ log_file_mode = util.get_cfg_option_int(self.cfg, 'def_log_file_mode') + log_file = util.get_cfg_option_str(self.cfg, "def_log_file") ++ log_file_mode = util.get_cfg_option_int(self.cfg, "def_log_file_mode") if log_file: - util.ensure_file(log_file, preserve_mode=True) - perms = self.cfg.get('syslog_fix_perms') + util.ensure_file(log_file, mode=0o640, preserve_mode=True) + perms = self.cfg.get("syslog_fix_perms") diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt -index de9a0f87..bb33ad45 100644 +index a2b4a3fa..0ccf3147 100644 --- a/doc/examples/cloud-config.txt +++ b/doc/examples/cloud-config.txt @@ -414,10 +414,14 @@ timezone: US/Eastern @@ -65,5 +65,5 @@ index de9a0f87..bb33ad45 100644 # you can set passwords for a user or multiple users -- -2.27.0 +2.31.1 diff --git a/SOURCES/0004-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch b/SOURCES/0004-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch new file mode 100644 index 0000000..5c5a144 --- /dev/null +++ b/SOURCES/0004-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch @@ -0,0 +1,52 @@ +From ffa647e83efd4293bd027e9e390274aad8a12d94 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Fri, 7 May 2021 13:36:13 +0200 +Subject: include 'NOZEROCONF=yes' in /etc/sysconfig/network + +RH-Author: Eduardo Otubo +Message-id: <20190320114559.23708-1-otubo@redhat.com> +Patchwork-id: 84937 +O-Subject: [RHEL-7.7 cloud-init PATCH] include 'NOZEROCONF=yes' in /etc/sysconfig/network +Bugzilla: 1653131 +RH-Acked-by: Cathy Avery +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Vitaly Kuznetsov + +The option NOZEROCONF=yes is not included by default in +/etc/sysconfig/network, which is required by Overcloud instances. The +patch also includes tests for the modifications. + +X-downstream-only: yes +Resolves: rhbz#1653131 + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/net/sysconfig.py | 11 ++++++++++- + 1 file changed, 10 insertions(+), 1 deletion(-) + +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index e06ddee7..362e8d19 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -1038,7 +1038,16 @@ class Renderer(renderer.Renderer): + # Distros configuring /etc/sysconfig/network as a file e.g. Centos + if sysconfig_path.endswith("network"): + util.ensure_dir(os.path.dirname(sysconfig_path)) +- netcfg = [_make_header(), "NETWORKING=yes"] ++ netcfg = [] ++ for line in util.load_file(sysconfig_path, quiet=True).split("\n"): ++ if "cloud-init" in line: ++ break ++ if not line.startswith(("NETWORKING=", ++ "IPV6_AUTOCONF=", ++ "NETWORKING_IPV6=")): ++ netcfg.append(line) ++ # Now generate the cloud-init portion of sysconfig/network ++ netcfg.extend([_make_header(), "NETWORKING=yes"]) + if network_state.use_ipv6: + netcfg.append("NETWORKING_IPV6=yes") + netcfg.append("IPV6_AUTOCONF=no") +-- +2.31.1 + diff --git a/SOURCES/0004-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch b/SOURCES/0004-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch deleted file mode 100644 index 4d5a0d2..0000000 --- a/SOURCES/0004-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 699d37a6ff3e343e214943794aac09e4156c2b2b Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Fri, 7 May 2021 13:36:10 +0200 -Subject: sysconfig: Don't write BOOTPROTO=dhcp for ipv6 dhcp - -Don't write BOOTPROTO=dhcp for ipv6 dhcp, as BOOTPROTO applies -only to ipv4. Explicitly write IPV6_AUTOCONF=no for dhcp on ipv6. - -X-downstream-only: yes - -Resolves: rhbz#1519271 -Signed-off-by: Ryan McCabe - -Merged patches (19.4): -- 6444df4 sysconfig: Don't disable IPV6_AUTOCONF - -Signed-off-by: Eduardo Otubo ---- - tests/unittests/test_net.py | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py -index c67b5fcc..4ea0e597 100644 ---- a/tests/unittests/test_net.py -+++ b/tests/unittests/test_net.py -@@ -1729,6 +1729,7 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true - BOOTPROTO=none - DEVICE=bond0 - DHCPV6C=yes -+ IPV6_AUTOCONF=no - IPV6INIT=yes - MACADDR=aa:bb:cc:dd:ee:ff - ONBOOT=yes --- -2.27.0 - diff --git a/SOURCES/0005-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch b/SOURCES/0005-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch deleted file mode 100644 index 100d3a2..0000000 --- a/SOURCES/0005-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch +++ /dev/null @@ -1,57 +0,0 @@ -From ccc75c1be3ae08d813193071c798fc905b5c03e5 Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Fri, 7 May 2021 13:36:12 +0200 -Subject: DataSourceAzure.py: use hostnamectl to set hostname - -RH-Author: Vitaly Kuznetsov -Message-id: <20180417130754.12918-3-vkuznets@redhat.com> -Patchwork-id: 79659 -O-Subject: [RHEL7.6/7.5.z cloud-init PATCH 2/2] DataSourceAzure.py: use hostnamectl to set hostname -Bugzilla: 1568717 -RH-Acked-by: Eduardo Otubo -RH-Acked-by: Mohammed Gamal -RH-Acked-by: Cathy Avery - -The right way to set hostname in RHEL7 is: - - $ hostnamectl set-hostname HOSTNAME - -DataSourceAzure, however, uses: - $ hostname HOSTSNAME - -instead and this causes problems. We can't simply change -'BUILTIN_DS_CONFIG' in DataSourceAzure.py as 'hostname' is being used -for both getting and setting the hostname. - -Long term, this should be fixed in a different way. Cloud-init -has distro-specific hostname setting/getting (see -cloudinit/distros/rhel.py) and DataSourceAzure.py needs to be switched -to use these. - -Resolves: rhbz#1434109 - -X-downstream-only: yes - -Signed-off-by: Eduardo Otubo -Signed-off-by: Vitaly Kuznetsov -Signed-off-by: Miroslav Rezanina ---- - cloudinit/sources/DataSourceAzure.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py -index cee630f7..553b5a7e 100755 ---- a/cloudinit/sources/DataSourceAzure.py -+++ b/cloudinit/sources/DataSourceAzure.py -@@ -296,7 +296,7 @@ def get_hostname(hostname_command='hostname'): - - - def set_hostname(hostname, hostname_command='hostname'): -- subp.subp([hostname_command, hostname]) -+ util.subp(['hostnamectl', 'set-hostname', str(hostname)]) - - - @azure_ds_telemetry_reporter --- -2.27.0 - diff --git a/SOURCES/0005-Remove-race-condition-between-cloud-init-and-Network.patch b/SOURCES/0005-Remove-race-condition-between-cloud-init-and-Network.patch new file mode 100644 index 0000000..478e5ab --- /dev/null +++ b/SOURCES/0005-Remove-race-condition-between-cloud-init-and-Network.patch @@ -0,0 +1,148 @@ +From 386f0a82bfdfd62e506bf4251c17263260d3250a Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Fri, 7 May 2021 13:36:14 +0200 +Subject: Remove race condition between cloud-init and NetworkManager + +Message-id: <20200302104635.11648-1-otubo@redhat.com> +Patchwork-id: 94098 +O-Subject: [RHEL-7.9/RHEL-8.2.0 cloud-init PATCH] Remove race condition between cloud-init and NetworkManager +Bugzilla: 1807797 +RH-Acked-by: Cathy Avery +RH-Acked-by: Mohammed Gamal + +BZ: 1748015 +BRANCH: rhel7/master-18.5 +BREW: 26924611 + +BZ: 1807797 +BRANCH: rhel820/master-18.5 +BREW: 26924957 + +cloud-init service is set to start before NetworkManager service starts, +but this does not avoid a race condition between them. NetworkManager +starts before cloud-init can write `dns=none' to the file: +/etc/NetworkManager/conf.d/99-cloud-init.conf. This way NetworkManager +doesn't read the configuration and erases all resolv.conf values upon +shutdown. On the next reboot neither cloud-init or NetworkManager will +write anything to resolv.conf, leaving it blank. + +This patch introduces a NM reload (try-restart) at the end of cloud-init +start up so it won't erase resolv.conf upon first shutdown. + +x-downstream-only: yes +resolves: rhbz#1748015, rhbz#1807797 and rhbz#1804780 + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina + +This commit is a squash and also includes the folloowing commits: + +commit 316a17b7c02a87fa9b2981535be0b20d165adc46 +Author: Eduardo Otubo +Date: Mon Jun 1 11:58:06 2020 +0200 + + Make cloud-init.service execute after network is up + + RH-Author: Eduardo Otubo + Message-id: <20200526090804.2047-1-otubo@redhat.com> + Patchwork-id: 96809 + O-Subject: [RHEL-8.2.1 cloud-init PATCH] Make cloud-init.service execute after network is up + Bugzilla: 1803928 + RH-Acked-by: Vitaly Kuznetsov + RH-Acked-by: Miroslav Rezanina + + cloud-init.service needs to wait until network is fully up before + continuing executing and configuring its service. + + Signed-off-by: Eduardo Otubo + + x-downstream-only: yes + Resolves: rhbz#1831646 + Signed-off-by: Miroslav Rezanina + +commit 0422ba0e773d1a8257a3f2bf3db05f3bc7917eb7 +Author: Eduardo Otubo +Date: Thu May 28 08:44:08 2020 +0200 + + Remove race condition between cloud-init and NetworkManager + + RH-Author: Eduardo Otubo + Message-id: <20200327121911.17699-1-otubo@redhat.com> + Patchwork-id: 94453 + O-Subject: [RHEL-7.9/RHEL-8.2.0 cloud-init PATCHv2] Remove race condition between cloud-init and NetworkManager + Bugzilla: 1840648 + RH-Acked-by: Vitaly Kuznetsov + RH-Acked-by: Miroslav Rezanina + RH-Acked-by: Cathy Avery + + cloud-init service is set to start before NetworkManager service starts, + but this does not avoid a race condition between them. NetworkManager + starts before cloud-init can write `dns=none' to the file: + /etc/NetworkManager/conf.d/99-cloud-init.conf. This way NetworkManager + doesn't read the configuration and erases all resolv.conf values upon + shutdown. On the next reboot neither cloud-init or NetworkManager will + write anything to resolv.conf, leaving it blank. + + This patch introduces a NM reload (try-reload-or-restart) at the end of cloud-init + start up so it won't erase resolv.conf upon first shutdown. + + x-downstream-only: yes + + Signed-off-by: Eduardo Otubo otubo@redhat.com + Signed-off-by: Miroslav Rezanina + +commit e0b48a936433faea7f56dbc29dda35acf7d375f7 +Author: Eduardo Otubo +Date: Thu May 28 08:44:06 2020 +0200 + + Enable ssh_deletekeys by default + + RH-Author: Eduardo Otubo + Message-id: <20200317091705.15715-1-otubo@redhat.com> + Patchwork-id: 94365 + O-Subject: [RHEL-7.9/RHEL-8.2.0 cloud-init PATCH] Enable ssh_deletekeys by default + Bugzilla: 1814152 + RH-Acked-by: Mohammed Gamal + RH-Acked-by: Vitaly Kuznetsov + + The configuration option ssh_deletekeys will trigger the generation + of new ssh keys for every new instance deployed. + + x-downstream-only: yes + resolves: rhbz#1814152 + + Signed-off-by: Eduardo Otubo + Signed-off-by: Miroslav Rezanina +--- + rhel/cloud.cfg | 2 +- + rhel/systemd/cloud-init.service | 1 + + 2 files changed, 2 insertions(+), 1 deletion(-) + +diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg +index 82e8bf62..9ecba215 100644 +--- a/rhel/cloud.cfg ++++ b/rhel/cloud.cfg +@@ -6,7 +6,7 @@ ssh_pwauth: 0 + + mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service', '0', '2'] + resize_rootfs_tmp: /dev +-ssh_deletekeys: 0 ++ssh_deletekeys: 1 + ssh_genkeytypes: ~ + syslog_fix_perms: ~ + disable_vmware_customization: false +diff --git a/rhel/systemd/cloud-init.service b/rhel/systemd/cloud-init.service +index d0023a05..0b3d796d 100644 +--- a/rhel/systemd/cloud-init.service ++++ b/rhel/systemd/cloud-init.service +@@ -5,6 +5,7 @@ Wants=sshd-keygen.service + Wants=sshd.service + After=cloud-init-local.service + After=NetworkManager.service network.service ++After=NetworkManager-wait-online.service + Before=network-online.target + Before=sshd-keygen.service + Before=sshd.service +-- +2.31.1 + diff --git a/SOURCES/0006-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch b/SOURCES/0006-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch deleted file mode 100644 index 6276255..0000000 --- a/SOURCES/0006-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch +++ /dev/null @@ -1,65 +0,0 @@ -From dfea0490b899804761fbd7aa23822783d7c36ec5 Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Fri, 7 May 2021 13:36:13 +0200 -Subject: include 'NOZEROCONF=yes' in /etc/sysconfig/network - -RH-Author: Eduardo Otubo -Message-id: <20190320114559.23708-1-otubo@redhat.com> -Patchwork-id: 84937 -O-Subject: [RHEL-7.7 cloud-init PATCH] include 'NOZEROCONF=yes' in /etc/sysconfig/network -Bugzilla: 1653131 -RH-Acked-by: Cathy Avery -RH-Acked-by: Mohammed Gamal -RH-Acked-by: Vitaly Kuznetsov - -The option NOZEROCONF=yes is not included by default in -/etc/sysconfig/network, which is required by Overcloud instances. The -patch also includes tests for the modifications. - -X-downstream-only: yes -Resolves: rhbz#1653131 - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina ---- - cloudinit/net/sysconfig.py | 11 ++++++++++- - tests/unittests/test_net.py | 1 - - 2 files changed, 10 insertions(+), 2 deletions(-) - -diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py -index 3d276666..d5440998 100644 ---- a/cloudinit/net/sysconfig.py -+++ b/cloudinit/net/sysconfig.py -@@ -925,7 +925,16 @@ class Renderer(renderer.Renderer): - # Distros configuring /etc/sysconfig/network as a file e.g. Centos - if sysconfig_path.endswith('network'): - util.ensure_dir(os.path.dirname(sysconfig_path)) -- netcfg = [_make_header(), 'NETWORKING=yes'] -+ netcfg = [] -+ for line in util.load_file(sysconfig_path, quiet=True).split('\n'): -+ if 'cloud-init' in line: -+ break -+ if not line.startswith(('NETWORKING=', -+ 'IPV6_AUTOCONF=', -+ 'NETWORKING_IPV6=')): -+ netcfg.append(line) -+ # Now generate the cloud-init portion of sysconfig/network -+ netcfg.extend([_make_header(), 'NETWORKING=yes']) - if network_state.use_ipv6: - netcfg.append('NETWORKING_IPV6=yes') - netcfg.append('IPV6_AUTOCONF=no') -diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py -index 4ea0e597..c67b5fcc 100644 ---- a/tests/unittests/test_net.py -+++ b/tests/unittests/test_net.py -@@ -1729,7 +1729,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true - BOOTPROTO=none - DEVICE=bond0 - DHCPV6C=yes -- IPV6_AUTOCONF=no - IPV6INIT=yes - MACADDR=aa:bb:cc:dd:ee:ff - ONBOOT=yes --- -2.27.0 - diff --git a/SOURCES/0006-rhel-cloud.cfg-remove-ssh_genkeytypes-in-settings.py.patch b/SOURCES/0006-rhel-cloud.cfg-remove-ssh_genkeytypes-in-settings.py.patch new file mode 100644 index 0000000..e596836 --- /dev/null +++ b/SOURCES/0006-rhel-cloud.cfg-remove-ssh_genkeytypes-in-settings.py.patch @@ -0,0 +1,65 @@ +From b545a0cbabe8924d048b7172b30e7aad59ed32d5 Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Thu, 20 May 2021 08:53:55 +0200 +Subject: rhel/cloud.cfg: remove ssh_genkeytypes in settings.py and set in + cloud.cfg + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 10: rhel/cloud.cfg: remove ssh_genkeytypes in settings.py and set in cloud.cfg +RH-Commit: [1/1] 6da989423b9b6e017afbac2f1af3649b0487310f +RH-Bugzilla: 1957532 +RH-Acked-by: Eduardo Otubo +RH-Acked-by: Cathy Avery +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Mohamed Gamal Morsy + +Currently genkeytypes in cloud.cfg is set to None, so together with +ssh_deletekeys=1 cloudinit on first boot it will just delete the existing +keys and not generate new ones. + +Just removing that property in cloud.cfg is not enough, because +settings.py provides another empty default value that will be used +instead, resulting to no key generated even when the property is not defined. + +Removing genkeytypes also in settings.py will default to GENERATE_KEY_NAMES, +but since we want only 'rsa', 'ecdsa' and 'ed25519', add back genkeytypes in +cloud.cfg with the above defaults. + +Also remove ssh_deletekeys in settings.py as we always need +to 1 (and it also defaults to 1). + +Signed-off-by: Emanuele Giuseppe Esposito +--- + cloudinit/settings.py | 2 -- + rhel/cloud.cfg | 2 +- + 2 files changed, 1 insertion(+), 3 deletions(-) + +diff --git a/cloudinit/settings.py b/cloudinit/settings.py +index 3c2145e9..71672e10 100644 +--- a/cloudinit/settings.py ++++ b/cloudinit/settings.py +@@ -52,8 +52,6 @@ CFG_BUILTIN = { + "def_log_file_mode": 0o600, + "log_cfgs": [], + "mount_default_fields": [None, None, "auto", "defaults,nofail", "0", "2"], +- "ssh_deletekeys": False, +- "ssh_genkeytypes": [], + "syslog_fix_perms": [], + "system_info": { + "paths": { +diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg +index 9ecba215..cbee197a 100644 +--- a/rhel/cloud.cfg ++++ b/rhel/cloud.cfg +@@ -7,7 +7,7 @@ ssh_pwauth: 0 + mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service', '0', '2'] + resize_rootfs_tmp: /dev + ssh_deletekeys: 1 +-ssh_genkeytypes: ~ ++ssh_genkeytypes: ['rsa', 'ecdsa', 'ed25519'] + syslog_fix_perms: ~ + disable_vmware_customization: false + +-- +2.31.1 + diff --git a/SOURCES/0007-Remove-race-condition-between-cloud-init-and-Network.patch b/SOURCES/0007-Remove-race-condition-between-cloud-init-and-Network.patch deleted file mode 100644 index 9c9e4cc..0000000 --- a/SOURCES/0007-Remove-race-condition-between-cloud-init-and-Network.patch +++ /dev/null @@ -1,148 +0,0 @@ -From 24894dcf45a307f44e29dc5d5b2d864b75fd982c Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Fri, 7 May 2021 13:36:14 +0200 -Subject: Remove race condition between cloud-init and NetworkManager - -Message-id: <20200302104635.11648-1-otubo@redhat.com> -Patchwork-id: 94098 -O-Subject: [RHEL-7.9/RHEL-8.2.0 cloud-init PATCH] Remove race condition between cloud-init and NetworkManager -Bugzilla: 1807797 -RH-Acked-by: Cathy Avery -RH-Acked-by: Mohammed Gamal - -BZ: 1748015 -BRANCH: rhel7/master-18.5 -BREW: 26924611 - -BZ: 1807797 -BRANCH: rhel820/master-18.5 -BREW: 26924957 - -cloud-init service is set to start before NetworkManager service starts, -but this does not avoid a race condition between them. NetworkManager -starts before cloud-init can write `dns=none' to the file: -/etc/NetworkManager/conf.d/99-cloud-init.conf. This way NetworkManager -doesn't read the configuration and erases all resolv.conf values upon -shutdown. On the next reboot neither cloud-init or NetworkManager will -write anything to resolv.conf, leaving it blank. - -This patch introduces a NM reload (try-restart) at the end of cloud-init -start up so it won't erase resolv.conf upon first shutdown. - -x-downstream-only: yes -resolves: rhbz#1748015, rhbz#1807797 and rhbz#1804780 - -Signed-off-by: Eduardo Otubo -Signed-off-by: Miroslav Rezanina - -This commit is a squash and also includes the folloowing commits: - -commit 316a17b7c02a87fa9b2981535be0b20d165adc46 -Author: Eduardo Otubo -Date: Mon Jun 1 11:58:06 2020 +0200 - - Make cloud-init.service execute after network is up - - RH-Author: Eduardo Otubo - Message-id: <20200526090804.2047-1-otubo@redhat.com> - Patchwork-id: 96809 - O-Subject: [RHEL-8.2.1 cloud-init PATCH] Make cloud-init.service execute after network is up - Bugzilla: 1803928 - RH-Acked-by: Vitaly Kuznetsov - RH-Acked-by: Miroslav Rezanina - - cloud-init.service needs to wait until network is fully up before - continuing executing and configuring its service. - - Signed-off-by: Eduardo Otubo - - x-downstream-only: yes - Resolves: rhbz#1831646 - Signed-off-by: Miroslav Rezanina - -commit 0422ba0e773d1a8257a3f2bf3db05f3bc7917eb7 -Author: Eduardo Otubo -Date: Thu May 28 08:44:08 2020 +0200 - - Remove race condition between cloud-init and NetworkManager - - RH-Author: Eduardo Otubo - Message-id: <20200327121911.17699-1-otubo@redhat.com> - Patchwork-id: 94453 - O-Subject: [RHEL-7.9/RHEL-8.2.0 cloud-init PATCHv2] Remove race condition between cloud-init and NetworkManager - Bugzilla: 1840648 - RH-Acked-by: Vitaly Kuznetsov - RH-Acked-by: Miroslav Rezanina - RH-Acked-by: Cathy Avery - - cloud-init service is set to start before NetworkManager service starts, - but this does not avoid a race condition between them. NetworkManager - starts before cloud-init can write `dns=none' to the file: - /etc/NetworkManager/conf.d/99-cloud-init.conf. This way NetworkManager - doesn't read the configuration and erases all resolv.conf values upon - shutdown. On the next reboot neither cloud-init or NetworkManager will - write anything to resolv.conf, leaving it blank. - - This patch introduces a NM reload (try-reload-or-restart) at the end of cloud-init - start up so it won't erase resolv.conf upon first shutdown. - - x-downstream-only: yes - - Signed-off-by: Eduardo Otubo otubo@redhat.com - Signed-off-by: Miroslav Rezanina - -commit e0b48a936433faea7f56dbc29dda35acf7d375f7 -Author: Eduardo Otubo -Date: Thu May 28 08:44:06 2020 +0200 - - Enable ssh_deletekeys by default - - RH-Author: Eduardo Otubo - Message-id: <20200317091705.15715-1-otubo@redhat.com> - Patchwork-id: 94365 - O-Subject: [RHEL-7.9/RHEL-8.2.0 cloud-init PATCH] Enable ssh_deletekeys by default - Bugzilla: 1814152 - RH-Acked-by: Mohammed Gamal - RH-Acked-by: Vitaly Kuznetsov - - The configuration option ssh_deletekeys will trigger the generation - of new ssh keys for every new instance deployed. - - x-downstream-only: yes - resolves: rhbz#1814152 - - Signed-off-by: Eduardo Otubo - Signed-off-by: Miroslav Rezanina ---- - rhel/cloud.cfg | 2 +- - rhel/systemd/cloud-init.service | 1 + - 2 files changed, 2 insertions(+), 1 deletion(-) - -diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg -index 82e8bf62..9ecba215 100644 ---- a/rhel/cloud.cfg -+++ b/rhel/cloud.cfg -@@ -6,7 +6,7 @@ ssh_pwauth: 0 - - mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service', '0', '2'] - resize_rootfs_tmp: /dev --ssh_deletekeys: 0 -+ssh_deletekeys: 1 - ssh_genkeytypes: ~ - syslog_fix_perms: ~ - disable_vmware_customization: false -diff --git a/rhel/systemd/cloud-init.service b/rhel/systemd/cloud-init.service -index d0023a05..0b3d796d 100644 ---- a/rhel/systemd/cloud-init.service -+++ b/rhel/systemd/cloud-init.service -@@ -5,6 +5,7 @@ Wants=sshd-keygen.service - Wants=sshd.service - After=cloud-init-local.service - After=NetworkManager.service network.service -+After=NetworkManager-wait-online.service - Before=network-online.target - Before=sshd-keygen.service - Before=sshd.service --- -2.27.0 - diff --git a/SOURCES/0008-net-exclude-OVS-internal-interfaces-in-get_interface.patch b/SOURCES/0008-net-exclude-OVS-internal-interfaces-in-get_interface.patch deleted file mode 100644 index 38f08cc..0000000 --- a/SOURCES/0008-net-exclude-OVS-internal-interfaces-in-get_interface.patch +++ /dev/null @@ -1,496 +0,0 @@ -From b48dda73da94782d7ab0c455fa382d3a5ef3c419 Mon Sep 17 00:00:00 2001 -From: Daniel Watkins -Date: Mon, 8 Mar 2021 12:50:57 -0500 -Subject: net: exclude OVS internal interfaces in get_interfaces (#829) - -`get_interfaces` is used to in two ways, broadly: firstly, to determine -the available interfaces when converting cloud network configuration -formats to cloud-init's network configuration formats; and, secondly, to -ensure that any interfaces which are specified in network configuration -are (a) available, and (b) named correctly. The first of these is -unaffected by this commit, as no clouds support Open vSwitch -configuration in their network configuration formats. - -For the second, we check that MAC addresses of physical devices are -unique. In some OVS configurations, there are OVS-created devices which -have duplicate MAC addresses, either with each other or with physical -devices. As these interfaces are created by OVS, we can be confident -that (a) they will be available when appropriate, and (b) that OVS will -name them correctly. As such, this commit excludes any OVS-internal -interfaces from the set of interfaces returned by `get_interfaces`. - -LP: #1912844 ---- - cloudinit/net/__init__.py | 62 +++++++++ - cloudinit/net/tests/test_init.py | 119 ++++++++++++++++++ - .../sources/helpers/tests/test_openstack.py | 5 + - cloudinit/sources/tests/test_oracle.py | 4 + - .../integration_tests/bugs/test_lp1912844.py | 103 +++++++++++++++ - .../test_datasource/test_configdrive.py | 8 ++ - tests/unittests/test_net.py | 20 +++ - 7 files changed, 321 insertions(+) - create mode 100644 tests/integration_tests/bugs/test_lp1912844.py - -diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py -index de65e7af..385b7bcc 100644 ---- a/cloudinit/net/__init__.py -+++ b/cloudinit/net/__init__.py -@@ -6,6 +6,7 @@ - # This file is part of cloud-init. See LICENSE file for license information. - - import errno -+import functools - import ipaddress - import logging - import os -@@ -19,6 +20,19 @@ from cloudinit.url_helper import UrlError, readurl - LOG = logging.getLogger(__name__) - SYS_CLASS_NET = "/sys/class/net/" - DEFAULT_PRIMARY_INTERFACE = 'eth0' -+OVS_INTERNAL_INTERFACE_LOOKUP_CMD = [ -+ "ovs-vsctl", -+ "--format", -+ "csv", -+ "--no-headings", -+ "--timeout", -+ "10", -+ "--columns", -+ "name", -+ "find", -+ "interface", -+ "type=internal", -+] - - - def natural_sort_key(s, _nsre=re.compile('([0-9]+)')): -@@ -133,6 +147,52 @@ def master_is_openvswitch(devname): - return os.path.exists(ovs_path) - - -+@functools.lru_cache(maxsize=None) -+def openvswitch_is_installed() -> bool: -+ """Return a bool indicating if Open vSwitch is installed in the system.""" -+ ret = bool(subp.which("ovs-vsctl")) -+ if not ret: -+ LOG.debug( -+ "ovs-vsctl not in PATH; not detecting Open vSwitch interfaces" -+ ) -+ return ret -+ -+ -+@functools.lru_cache(maxsize=None) -+def get_ovs_internal_interfaces() -> list: -+ """Return a list of the names of OVS internal interfaces on the system. -+ -+ These will all be strings, and are used to exclude OVS-specific interface -+ from cloud-init's network configuration handling. -+ """ -+ try: -+ out, _err = subp.subp(OVS_INTERNAL_INTERFACE_LOOKUP_CMD) -+ except subp.ProcessExecutionError as exc: -+ if "database connection failed" in exc.stderr: -+ LOG.info( -+ "Open vSwitch is not yet up; no interfaces will be detected as" -+ " OVS-internal" -+ ) -+ return [] -+ raise -+ else: -+ return out.splitlines() -+ -+ -+def is_openvswitch_internal_interface(devname: str) -> bool: -+ """Returns True if this is an OVS internal interface. -+ -+ If OVS is not installed or not yet running, this will return False. -+ """ -+ if not openvswitch_is_installed(): -+ return False -+ ovs_bridges = get_ovs_internal_interfaces() -+ if devname in ovs_bridges: -+ LOG.debug("Detected %s as an OVS interface", devname) -+ return True -+ return False -+ -+ - def is_netfailover(devname, driver=None): - """ netfailover driver uses 3 nics, master, primary and standby. - this returns True if the device is either the primary or standby -@@ -884,6 +944,8 @@ def get_interfaces(blacklist_drivers=None) -> list: - # skip nics that have no mac (00:00....) - if name != 'lo' and mac == zero_mac[:len(mac)]: - continue -+ if is_openvswitch_internal_interface(name): -+ continue - # skip nics that have drivers blacklisted - driver = device_driver(name) - if driver in blacklist_drivers: -diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py -index 0535387a..946f8ee2 100644 ---- a/cloudinit/net/tests/test_init.py -+++ b/cloudinit/net/tests/test_init.py -@@ -391,6 +391,10 @@ class TestGetDeviceList(CiTestCase): - self.assertCountEqual(['eth0', 'eth1'], net.get_devicelist()) - - -+@mock.patch( -+ "cloudinit.net.is_openvswitch_internal_interface", -+ mock.Mock(return_value=False), -+) - class TestGetInterfaceMAC(CiTestCase): - - def setUp(self): -@@ -1224,6 +1228,121 @@ class TestNetFailOver(CiTestCase): - self.assertFalse(net.is_netfailover(devname, driver)) - - -+class TestOpenvswitchIsInstalled: -+ """Test cloudinit.net.openvswitch_is_installed. -+ -+ Uses the ``clear_lru_cache`` local autouse fixture to allow us to test -+ despite the ``lru_cache`` decorator on the unit under test. -+ """ -+ -+ @pytest.fixture(autouse=True) -+ def clear_lru_cache(self): -+ net.openvswitch_is_installed.cache_clear() -+ -+ @pytest.mark.parametrize( -+ "expected,which_return", [(True, "/some/path"), (False, None)] -+ ) -+ @mock.patch("cloudinit.net.subp.which") -+ def test_mirrors_which_result(self, m_which, expected, which_return): -+ m_which.return_value = which_return -+ assert expected == net.openvswitch_is_installed() -+ -+ @mock.patch("cloudinit.net.subp.which") -+ def test_only_calls_which_once(self, m_which): -+ net.openvswitch_is_installed() -+ net.openvswitch_is_installed() -+ assert 1 == m_which.call_count -+ -+ -+@mock.patch("cloudinit.net.subp.subp", return_value=("", "")) -+class TestGetOVSInternalInterfaces: -+ """Test cloudinit.net.get_ovs_internal_interfaces. -+ -+ Uses the ``clear_lru_cache`` local autouse fixture to allow us to test -+ despite the ``lru_cache`` decorator on the unit under test. -+ """ -+ @pytest.fixture(autouse=True) -+ def clear_lru_cache(self): -+ net.get_ovs_internal_interfaces.cache_clear() -+ -+ def test_command_used(self, m_subp): -+ """Test we use the correct command when we call subp""" -+ net.get_ovs_internal_interfaces() -+ -+ assert [ -+ mock.call(net.OVS_INTERNAL_INTERFACE_LOOKUP_CMD) -+ ] == m_subp.call_args_list -+ -+ def test_subp_contents_split_and_returned(self, m_subp): -+ """Test that the command output is appropriately mangled.""" -+ stdout = "iface1\niface2\niface3\n" -+ m_subp.return_value = (stdout, "") -+ -+ assert [ -+ "iface1", -+ "iface2", -+ "iface3", -+ ] == net.get_ovs_internal_interfaces() -+ -+ def test_database_connection_error_handled_gracefully(self, m_subp): -+ """Test that the error indicating OVS is down is handled gracefully.""" -+ m_subp.side_effect = ProcessExecutionError( -+ stderr="database connection failed" -+ ) -+ -+ assert [] == net.get_ovs_internal_interfaces() -+ -+ def test_other_errors_raised(self, m_subp): -+ """Test that only database connection errors are handled.""" -+ m_subp.side_effect = ProcessExecutionError() -+ -+ with pytest.raises(ProcessExecutionError): -+ net.get_ovs_internal_interfaces() -+ -+ def test_only_runs_once(self, m_subp): -+ """Test that we cache the value.""" -+ net.get_ovs_internal_interfaces() -+ net.get_ovs_internal_interfaces() -+ -+ assert 1 == m_subp.call_count -+ -+ -+@mock.patch("cloudinit.net.get_ovs_internal_interfaces") -+@mock.patch("cloudinit.net.openvswitch_is_installed") -+class TestIsOpenVSwitchInternalInterface: -+ def test_false_if_ovs_not_installed( -+ self, m_openvswitch_is_installed, _m_get_ovs_internal_interfaces -+ ): -+ """Test that OVS' absence returns False.""" -+ m_openvswitch_is_installed.return_value = False -+ -+ assert not net.is_openvswitch_internal_interface("devname") -+ -+ @pytest.mark.parametrize( -+ "detected_interfaces,devname,expected_return", -+ [ -+ ([], "devname", False), -+ (["notdevname"], "devname", False), -+ (["devname"], "devname", True), -+ (["some", "other", "devices", "and", "ours"], "ours", True), -+ ], -+ ) -+ def test_return_value_based_on_detected_interfaces( -+ self, -+ m_openvswitch_is_installed, -+ m_get_ovs_internal_interfaces, -+ detected_interfaces, -+ devname, -+ expected_return, -+ ): -+ """Test that the detected interfaces are used correctly.""" -+ m_openvswitch_is_installed.return_value = True -+ m_get_ovs_internal_interfaces.return_value = detected_interfaces -+ assert expected_return == net.is_openvswitch_internal_interface( -+ devname -+ ) -+ -+ - class TestIsIpAddress: - """Tests for net.is_ip_address. - -diff --git a/cloudinit/sources/helpers/tests/test_openstack.py b/cloudinit/sources/helpers/tests/test_openstack.py -index 2bde1e3f..95fb9743 100644 ---- a/cloudinit/sources/helpers/tests/test_openstack.py -+++ b/cloudinit/sources/helpers/tests/test_openstack.py -@@ -1,10 +1,15 @@ - # This file is part of cloud-init. See LICENSE file for license information. - # ./cloudinit/sources/helpers/tests/test_openstack.py -+from unittest import mock - - from cloudinit.sources.helpers import openstack - from cloudinit.tests import helpers as test_helpers - - -+@mock.patch( -+ "cloudinit.net.is_openvswitch_internal_interface", -+ mock.Mock(return_value=False) -+) - class TestConvertNetJson(test_helpers.CiTestCase): - - def test_phy_types(self): -diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py -index a7bbdfd9..dcf33b9b 100644 ---- a/cloudinit/sources/tests/test_oracle.py -+++ b/cloudinit/sources/tests/test_oracle.py -@@ -173,6 +173,10 @@ class TestIsPlatformViable(test_helpers.CiTestCase): - m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) - - -+@mock.patch( -+ "cloudinit.net.is_openvswitch_internal_interface", -+ mock.Mock(return_value=False) -+) - class TestNetworkConfigFromOpcImds: - def test_no_secondary_nics_does_not_mutate_input(self, oracle_ds): - oracle_ds._vnics_data = [{}] -diff --git a/tests/integration_tests/bugs/test_lp1912844.py b/tests/integration_tests/bugs/test_lp1912844.py -new file mode 100644 -index 00000000..efafae50 ---- /dev/null -+++ b/tests/integration_tests/bugs/test_lp1912844.py -@@ -0,0 +1,103 @@ -+"""Integration test for LP: #1912844 -+ -+cloud-init should ignore OVS-internal interfaces when performing its own -+interface determination: these interfaces are handled fully by OVS, so -+cloud-init should never need to touch them. -+ -+This test is a semi-synthetic reproducer for the bug. It uses a similar -+network configuration, tweaked slightly to DHCP in a way that will succeed even -+on "failed" boots. The exact bug doesn't reproduce with the NoCloud -+datasource, because it runs at init-local time (whereas the MAAS datasource, -+from the report, runs only at init (network) time): this means that the -+networking code runs before OVS creates its interfaces (which happens after -+init-local but, of course, before networking is up), and so doesn't generate -+the traceback that they cause. We work around this by calling -+``get_interfaces_by_mac` directly in the test code. -+""" -+import pytest -+ -+from tests.integration_tests import random_mac_address -+ -+MAC_ADDRESS = random_mac_address() -+ -+NETWORK_CONFIG = """\ -+bonds: -+ bond0: -+ interfaces: -+ - enp5s0 -+ macaddress: {0} -+ mtu: 1500 -+bridges: -+ ovs-br: -+ interfaces: -+ - bond0 -+ macaddress: {0} -+ mtu: 1500 -+ openvswitch: {{}} -+ dhcp4: true -+ethernets: -+ enp5s0: -+ mtu: 1500 -+ set-name: enp5s0 -+ match: -+ macaddress: {0} -+version: 2 -+vlans: -+ ovs-br.100: -+ id: 100 -+ link: ovs-br -+ mtu: 1500 -+ ovs-br.200: -+ id: 200 -+ link: ovs-br -+ mtu: 1500 -+""".format(MAC_ADDRESS) -+ -+ -+SETUP_USER_DATA = """\ -+#cloud-config -+packages: -+- openvswitch-switch -+""" -+ -+ -+@pytest.fixture -+def ovs_enabled_session_cloud(session_cloud): -+ """A session_cloud wrapper, to use an OVS-enabled image for tests. -+ -+ This implementation is complicated by wanting to use ``session_cloud``s -+ snapshot cleanup/retention logic, to avoid having to reimplement that here. -+ """ -+ old_snapshot_id = session_cloud.snapshot_id -+ with session_cloud.launch( -+ user_data=SETUP_USER_DATA, -+ ) as instance: -+ instance.instance.clean() -+ session_cloud.snapshot_id = instance.snapshot() -+ -+ yield session_cloud -+ -+ try: -+ session_cloud.delete_snapshot() -+ finally: -+ session_cloud.snapshot_id = old_snapshot_id -+ -+ -+@pytest.mark.lxd_vm -+def test_get_interfaces_by_mac_doesnt_traceback(ovs_enabled_session_cloud): -+ """Launch our OVS-enabled image and confirm the bug doesn't reproduce.""" -+ launch_kwargs = { -+ "config_dict": { -+ "user.network-config": NETWORK_CONFIG, -+ "volatile.eth0.hwaddr": MAC_ADDRESS, -+ }, -+ } -+ with ovs_enabled_session_cloud.launch( -+ launch_kwargs=launch_kwargs, -+ ) as client: -+ result = client.execute( -+ "python3 -c" -+ "'from cloudinit.net import get_interfaces_by_mac;" -+ "get_interfaces_by_mac()'" -+ ) -+ assert result.ok -diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py -index 6f830cc6..2e2b7847 100644 ---- a/tests/unittests/test_datasource/test_configdrive.py -+++ b/tests/unittests/test_datasource/test_configdrive.py -@@ -494,6 +494,10 @@ class TestConfigDriveDataSource(CiTestCase): - self.assertEqual('config-disk (/dev/anything)', cfg_ds.subplatform) - - -+@mock.patch( -+ "cloudinit.net.is_openvswitch_internal_interface", -+ mock.Mock(return_value=False) -+) - class TestNetJson(CiTestCase): - def setUp(self): - super(TestNetJson, self).setUp() -@@ -654,6 +658,10 @@ class TestNetJson(CiTestCase): - self.assertEqual(out_data, conv_data) - - -+@mock.patch( -+ "cloudinit.net.is_openvswitch_internal_interface", -+ mock.Mock(return_value=False) -+) - class TestConvertNetworkData(CiTestCase): - - with_logs = True -diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py -index c67b5fcc..14d3462f 100644 ---- a/tests/unittests/test_net.py -+++ b/tests/unittests/test_net.py -@@ -2908,6 +2908,10 @@ iface eth1 inet dhcp - self.assertEqual(0, mock_settle.call_count) - - -+@mock.patch( -+ "cloudinit.net.is_openvswitch_internal_interface", -+ mock.Mock(return_value=False) -+) - class TestRhelSysConfigRendering(CiTestCase): - - with_logs = True -@@ -3592,6 +3596,10 @@ USERCTL=no - expected, self._render_and_read(network_config=v2data)) - - -+@mock.patch( -+ "cloudinit.net.is_openvswitch_internal_interface", -+ mock.Mock(return_value=False) -+) - class TestOpenSuseSysConfigRendering(CiTestCase): - - with_logs = True -@@ -5009,6 +5017,10 @@ class TestNetRenderers(CiTestCase): - self.assertTrue(result) - - -+@mock.patch( -+ "cloudinit.net.is_openvswitch_internal_interface", -+ mock.Mock(return_value=False) -+) - class TestGetInterfaces(CiTestCase): - _data = {'bonds': ['bond1'], - 'bridges': ['bridge1'], -@@ -5158,6 +5170,10 @@ class TestInterfaceHasOwnMac(CiTestCase): - self.assertFalse(interface_has_own_mac("eth0")) - - -+@mock.patch( -+ "cloudinit.net.is_openvswitch_internal_interface", -+ mock.Mock(return_value=False) -+) - class TestGetInterfacesByMac(CiTestCase): - _data = {'bonds': ['bond1'], - 'bridges': ['bridge1'], -@@ -5314,6 +5330,10 @@ class TestInterfacesSorting(CiTestCase): - ['enp0s3', 'enp0s8', 'enp0s13', 'enp1s2', 'enp2s0', 'enp2s3']) - - -+@mock.patch( -+ "cloudinit.net.is_openvswitch_internal_interface", -+ mock.Mock(return_value=False) -+) - class TestGetIBHwaddrsByInterface(CiTestCase): - - _ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56' --- -2.27.0 - diff --git a/SOURCES/0009-Fix-requiring-device-number-on-EC2-derivatives-836.patch b/SOURCES/0009-Fix-requiring-device-number-on-EC2-derivatives-836.patch deleted file mode 100644 index 0d474bc..0000000 --- a/SOURCES/0009-Fix-requiring-device-number-on-EC2-derivatives-836.patch +++ /dev/null @@ -1,87 +0,0 @@ -From bec5fb60ffae3d1137c7261e5571c2751c5dda25 Mon Sep 17 00:00:00 2001 -From: James Falcon -Date: Mon, 8 Mar 2021 14:09:47 -0600 -Subject: Fix requiring device-number on EC2 derivatives (#836) - -#342 (70dbccbb) introduced the ability to determine route-metrics based on -the `device-number` provided by the EC2 IMDS. Not all datasources that -subclass EC2 will have this attribute, so allow the old behavior if -`device-number` is not present. - -LP: #1917875 ---- - cloudinit/sources/DataSourceEc2.py | 3 +- - .../unittests/test_datasource/test_aliyun.py | 30 +++++++++++++++++++ - 2 files changed, 32 insertions(+), 1 deletion(-) - -diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py -index 1930a509..a2105dc7 100644 ---- a/cloudinit/sources/DataSourceEc2.py -+++ b/cloudinit/sources/DataSourceEc2.py -@@ -765,13 +765,14 @@ def convert_ec2_metadata_network_config( - netcfg['ethernets'][nic_name] = dev_config - return netcfg - # Apply network config for all nics and any secondary IPv4/v6 addresses -+ nic_idx = 0 - for mac, nic_name in sorted(macs_to_nics.items()): - nic_metadata = macs_metadata.get(mac) - if not nic_metadata: - continue # Not a physical nic represented in metadata - # device-number is zero-indexed, we want it 1-indexed for the - # multiplication on the following line -- nic_idx = int(nic_metadata['device-number']) + 1 -+ nic_idx = int(nic_metadata.get('device-number', nic_idx)) + 1 - dhcp_override = {'route-metric': nic_idx * 100} - dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override, - 'dhcp6': False, -diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py -index eb2828d5..cab1ac2b 100644 ---- a/tests/unittests/test_datasource/test_aliyun.py -+++ b/tests/unittests/test_datasource/test_aliyun.py -@@ -7,6 +7,7 @@ from unittest import mock - - from cloudinit import helpers - from cloudinit.sources import DataSourceAliYun as ay -+from cloudinit.sources.DataSourceEc2 import convert_ec2_metadata_network_config - from cloudinit.tests import helpers as test_helpers - - DEFAULT_METADATA = { -@@ -183,6 +184,35 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): - self.assertEqual(ay.parse_public_keys(public_keys), - public_keys['key-pair-0']['openssh-key']) - -+ def test_route_metric_calculated_without_device_number(self): -+ """Test that route-metric code works without `device-number` -+ -+ `device-number` is part of EC2 metadata, but not supported on aliyun. -+ Attempting to access it will raise a KeyError. -+ -+ LP: #1917875 -+ """ -+ netcfg = convert_ec2_metadata_network_config( -+ {"interfaces": {"macs": { -+ "06:17:04:d7:26:09": { -+ "interface-id": "eni-e44ef49e", -+ }, -+ "06:17:04:d7:26:08": { -+ "interface-id": "eni-e44ef49f", -+ } -+ }}}, -+ macs_to_nics={ -+ '06:17:04:d7:26:09': 'eth0', -+ '06:17:04:d7:26:08': 'eth1', -+ } -+ ) -+ -+ met0 = netcfg['ethernets']['eth0']['dhcp4-overrides']['route-metric'] -+ met1 = netcfg['ethernets']['eth1']['dhcp4-overrides']['route-metric'] -+ -+ # route-metric numbers should be 100 apart -+ assert 100 == abs(met0 - met1) -+ - - class TestIsAliYun(test_helpers.CiTestCase): - ALIYUN_PRODUCT = 'Alibaba Cloud ECS' --- -2.27.0 - diff --git a/SOURCES/ci-Add-flexibility-to-IMDS-api-version-793.patch b/SOURCES/ci-Add-flexibility-to-IMDS-api-version-793.patch deleted file mode 100644 index 9dd373f..0000000 --- a/SOURCES/ci-Add-flexibility-to-IMDS-api-version-793.patch +++ /dev/null @@ -1,295 +0,0 @@ -From 2a2a5cdec0de0b96d503f9357c1641043574f90a Mon Sep 17 00:00:00 2001 -From: Thomas Stringer -Date: Wed, 3 Mar 2021 11:07:43 -0500 -Subject: [PATCH 1/7] Add flexibility to IMDS api-version (#793) - -RH-Author: Eduardo Otubo -RH-MergeRequest: 45: Add support for userdata on Azure from IMDS -RH-Commit: [1/7] 9aa42581c4ff175fb6f8f4a78d94cac9c9971062 -RH-Bugzilla: 2023940 -RH-Acked-by: Emanuele Giuseppe Esposito -RH-Acked-by: Mohamed Gamal Morsy - -Add flexibility to IMDS api-version by having both a desired IMDS -api-version and a minimum api-version. The desired api-version will -be used first, and if that fails it will fall back to the minimum -api-version. ---- - cloudinit/sources/DataSourceAzure.py | 113 ++++++++++++++---- - tests/unittests/test_datasource/test_azure.py | 42 ++++++- - 2 files changed, 129 insertions(+), 26 deletions(-) - -diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py -index 553b5a7e..de1452ce 100755 ---- a/cloudinit/sources/DataSourceAzure.py -+++ b/cloudinit/sources/DataSourceAzure.py -@@ -78,17 +78,15 @@ AGENT_SEED_DIR = '/var/lib/waagent' - # In the event where the IMDS primary server is not - # available, it takes 1s to fallback to the secondary one - IMDS_TIMEOUT_IN_SECONDS = 2 --IMDS_URL = "http://169.254.169.254/metadata/" --IMDS_VER = "2019-06-01" --IMDS_VER_PARAM = "api-version={}".format(IMDS_VER) -+IMDS_URL = "http://169.254.169.254/metadata" -+IMDS_VER_MIN = "2019-06-01" -+IMDS_VER_WANT = "2020-09-01" - - - class metadata_type(Enum): -- compute = "{}instance?{}".format(IMDS_URL, IMDS_VER_PARAM) -- network = "{}instance/network?{}".format(IMDS_URL, -- IMDS_VER_PARAM) -- reprovisiondata = "{}reprovisiondata?{}".format(IMDS_URL, -- IMDS_VER_PARAM) -+ compute = "{}/instance".format(IMDS_URL) -+ network = "{}/instance/network".format(IMDS_URL) -+ reprovisiondata = "{}/reprovisiondata".format(IMDS_URL) - - - PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" -@@ -349,6 +347,8 @@ class DataSourceAzure(sources.DataSource): - self.update_events['network'].add(EventType.BOOT) - self._ephemeral_dhcp_ctx = None - -+ self.failed_desired_api_version = False -+ - def __str__(self): - root = sources.DataSource.__str__(self) - return "%s [seed=%s]" % (root, self.seed) -@@ -520,8 +520,10 @@ class DataSourceAzure(sources.DataSource): - self._wait_for_all_nics_ready() - ret = self._reprovision() - -- imds_md = get_metadata_from_imds( -- self.fallback_interface, retries=10) -+ imds_md = self.get_imds_data_with_api_fallback( -+ self.fallback_interface, -+ retries=10 -+ ) - (md, userdata_raw, cfg, files) = ret - self.seed = cdev - crawled_data.update({ -@@ -652,6 +654,57 @@ class DataSourceAzure(sources.DataSource): - self.ds_cfg['data_dir'], crawled_data['files'], dirmode=0o700) - return True - -+ @azure_ds_telemetry_reporter -+ def get_imds_data_with_api_fallback( -+ self, -+ fallback_nic, -+ retries, -+ md_type=metadata_type.compute): -+ """ -+ Wrapper for get_metadata_from_imds so that we can have flexibility -+ in which IMDS api-version we use. If a particular instance of IMDS -+ does not have the api version that is desired, we want to make -+ this fault tolerant and fall back to a good known minimum api -+ version. -+ """ -+ -+ if not self.failed_desired_api_version: -+ for _ in range(retries): -+ try: -+ LOG.info( -+ "Attempting IMDS api-version: %s", -+ IMDS_VER_WANT -+ ) -+ return get_metadata_from_imds( -+ fallback_nic=fallback_nic, -+ retries=0, -+ md_type=md_type, -+ api_version=IMDS_VER_WANT -+ ) -+ except UrlError as err: -+ LOG.info( -+ "UrlError with IMDS api-version: %s", -+ IMDS_VER_WANT -+ ) -+ if err.code == 400: -+ log_msg = "Fall back to IMDS api-version: {}".format( -+ IMDS_VER_MIN -+ ) -+ report_diagnostic_event( -+ log_msg, -+ logger_func=LOG.info -+ ) -+ self.failed_desired_api_version = True -+ break -+ -+ LOG.info("Using IMDS api-version: %s", IMDS_VER_MIN) -+ return get_metadata_from_imds( -+ fallback_nic=fallback_nic, -+ retries=retries, -+ md_type=md_type, -+ api_version=IMDS_VER_MIN -+ ) -+ - def device_name_to_device(self, name): - return self.ds_cfg['disk_aliases'].get(name) - -@@ -880,10 +933,11 @@ class DataSourceAzure(sources.DataSource): - # primary nic is being attached first helps here. Otherwise each nic - # could add several seconds of delay. - try: -- imds_md = get_metadata_from_imds( -+ imds_md = self.get_imds_data_with_api_fallback( - ifname, - 5, -- metadata_type.network) -+ metadata_type.network -+ ) - except Exception as e: - LOG.warning( - "Failed to get network metadata using nic %s. Attempt to " -@@ -1017,7 +1071,10 @@ class DataSourceAzure(sources.DataSource): - def _poll_imds(self): - """Poll IMDS for the new provisioning data until we get a valid - response. Then return the returned JSON object.""" -- url = metadata_type.reprovisiondata.value -+ url = "{}?api-version={}".format( -+ metadata_type.reprovisiondata.value, -+ IMDS_VER_MIN -+ ) - headers = {"Metadata": "true"} - nl_sock = None - report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) -@@ -2059,7 +2116,8 @@ def _generate_network_config_from_fallback_config() -> dict: - @azure_ds_telemetry_reporter - def get_metadata_from_imds(fallback_nic, - retries, -- md_type=metadata_type.compute): -+ md_type=metadata_type.compute, -+ api_version=IMDS_VER_MIN): - """Query Azure's instance metadata service, returning a dictionary. - - If network is not up, setup ephemeral dhcp on fallback_nic to talk to the -@@ -2069,13 +2127,16 @@ def get_metadata_from_imds(fallback_nic, - @param fallback_nic: String. The name of the nic which requires active - network in order to query IMDS. - @param retries: The number of retries of the IMDS_URL. -+ @param md_type: Metadata type for IMDS request. -+ @param api_version: IMDS api-version to use in the request. - - @return: A dict of instance metadata containing compute and network - info. - """ - kwargs = {'logfunc': LOG.debug, - 'msg': 'Crawl of Azure Instance Metadata Service (IMDS)', -- 'func': _get_metadata_from_imds, 'args': (retries, md_type,)} -+ 'func': _get_metadata_from_imds, -+ 'args': (retries, md_type, api_version,)} - if net.is_up(fallback_nic): - return util.log_time(**kwargs) - else: -@@ -2091,20 +2152,26 @@ def get_metadata_from_imds(fallback_nic, - - - @azure_ds_telemetry_reporter --def _get_metadata_from_imds(retries, md_type=metadata_type.compute): -- -- url = md_type.value -+def _get_metadata_from_imds( -+ retries, -+ md_type=metadata_type.compute, -+ api_version=IMDS_VER_MIN): -+ url = "{}?api-version={}".format(md_type.value, api_version) - headers = {"Metadata": "true"} - try: - response = readurl( - url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers, - retries=retries, exception_cb=retry_on_url_exc) - except Exception as e: -- report_diagnostic_event( -- 'Ignoring IMDS instance metadata. ' -- 'Get metadata from IMDS failed: %s' % e, -- logger_func=LOG.warning) -- return {} -+ # pylint:disable=no-member -+ if isinstance(e, UrlError) and e.code == 400: -+ raise -+ else: -+ report_diagnostic_event( -+ 'Ignoring IMDS instance metadata. ' -+ 'Get metadata from IMDS failed: %s' % e, -+ logger_func=LOG.warning) -+ return {} - try: - from json.decoder import JSONDecodeError - json_decode_error = JSONDecodeError -diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py -index f597c723..dedebeb1 100644 ---- a/tests/unittests/test_datasource/test_azure.py -+++ b/tests/unittests/test_datasource/test_azure.py -@@ -408,7 +408,9 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): - - def setUp(self): - super(TestGetMetadataFromIMDS, self).setUp() -- self.network_md_url = dsaz.IMDS_URL + "instance?api-version=2019-06-01" -+ self.network_md_url = "{}/instance?api-version=2019-06-01".format( -+ dsaz.IMDS_URL -+ ) - - @mock.patch(MOCKPATH + 'readurl') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4', autospec=True) -@@ -518,7 +520,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): - """Return empty dict when IMDS network metadata is absent.""" - httpretty.register_uri( - httpretty.GET, -- dsaz.IMDS_URL + 'instance?api-version=2017-12-01', -+ dsaz.IMDS_URL + '/instance?api-version=2017-12-01', - body={}, status=404) - - m_net_is_up.return_value = True # skips dhcp -@@ -1877,6 +1879,40 @@ scbus-1 on xpt0 bus 0 - ssh_keys = dsrc.get_public_ssh_keys() - self.assertEqual(ssh_keys, ['key2']) - -+ @mock.patch(MOCKPATH + 'get_metadata_from_imds') -+ def test_imds_api_version_wanted_nonexistent( -+ self, -+ m_get_metadata_from_imds): -+ def get_metadata_from_imds_side_eff(*args, **kwargs): -+ if kwargs['api_version'] == dsaz.IMDS_VER_WANT: -+ raise url_helper.UrlError("No IMDS version", code=400) -+ return NETWORK_METADATA -+ m_get_metadata_from_imds.side_effect = get_metadata_from_imds_side_eff -+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} -+ odata = {'HostName': "myhost", 'UserName': "myuser"} -+ data = { -+ 'ovfcontent': construct_valid_ovf_env(data=odata), -+ 'sys_cfg': sys_cfg -+ } -+ dsrc = self._get_ds(data) -+ dsrc.get_data() -+ self.assertIsNotNone(dsrc.metadata) -+ self.assertTrue(dsrc.failed_desired_api_version) -+ -+ @mock.patch( -+ MOCKPATH + 'get_metadata_from_imds', return_value=NETWORK_METADATA) -+ def test_imds_api_version_wanted_exists(self, m_get_metadata_from_imds): -+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} -+ odata = {'HostName': "myhost", 'UserName': "myuser"} -+ data = { -+ 'ovfcontent': construct_valid_ovf_env(data=odata), -+ 'sys_cfg': sys_cfg -+ } -+ dsrc = self._get_ds(data) -+ dsrc.get_data() -+ self.assertIsNotNone(dsrc.metadata) -+ self.assertFalse(dsrc.failed_desired_api_version) -+ - - class TestAzureBounce(CiTestCase): - -@@ -2657,7 +2693,7 @@ class TestPreprovisioningHotAttachNics(CiTestCase): - @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up') - @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event') - @mock.patch('cloudinit.sources.net.find_fallback_nic') -- @mock.patch(MOCKPATH + 'get_metadata_from_imds') -+ @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') - @mock.patch('os.path.isfile') --- -2.27.0 - diff --git a/SOURCES/ci-Add-native-NetworkManager-support-1224.patch b/SOURCES/ci-Add-native-NetworkManager-support-1224.patch new file mode 100644 index 0000000..aad448a --- /dev/null +++ b/SOURCES/ci-Add-native-NetworkManager-support-1224.patch @@ -0,0 +1,2300 @@ +From 0d93e53fd05c44b62e3456b7580c9de8135e6b5a Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Mon, 2 May 2022 14:21:24 +0200 +Subject: [PATCH 1/4] Add native NetworkManager support (#1224) + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 57: Add native NetworkManager support (#1224) +RH-Commit: [1/2] 56b9ed40840a4930c421c2749e8aa385097bef93 +RH-Bugzilla: 2059872 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Jon Maloy +RH-Acked-by: Eduardo Otubo + +commit feda344e6cf9d37b09bc13cf333a717d1654c26c +Author: Lubomir Rintel +Date: Fri Feb 25 23:33:20 2022 +0100 + + Add native NetworkManager support (#1224) + + Fedora currently relies on sysconfig/ifcfg renderer. This is not too great, + because Fedora (also RHEL since version 8) dropped support for the legacy + network service that uses ifcfg files long ago. + + In turn, Fedora ended up patching cloud-init downstream to utilize + NetworkManager's ifcfg compatibility mode [1]. This seems to have worked + for a while, nevertheless the NetworkManager's ifcfg backend is reaching + the end of its useful life too [2]. + + [1] https://src.fedoraproject.org/rpms/cloud-init/blob/rawhide/f/cloud-init-21.3-nm-controlled.patch + [2] https://fedoraproject.org/wiki/Changes/NoIfcfgFiles + + Let's not mangle things downstream and make vanilla cloud-init work great + on Fedora instead. + + This also means that the sysconfig compatibility with + Network Manager was removed. + + Firstly, this relies upon the fact that you can get ifcfg support by adding + it to NetworkManager.conf. That is not guaranteed and certainly will not + be case in future. + + Secondly, cloud-init always generates configuration with + NM_CONTROLLED=no, so the generated ifcfg files are no good for + NetworkManager. Fedora patches around this by just removing those lines + in their cloud-init package. + +Signed-off-by: Emanuele Giuseppe Esposito +--- + cloudinit/cmd/devel/net_convert.py | 14 +- + cloudinit/net/activators.py | 25 +- + cloudinit/net/network_manager.py | 377 +++++++ + cloudinit/net/renderers.py | 3 + + cloudinit/net/sysconfig.py | 37 +- + tests/unittests/test_net.py | 1270 +++++++++++++++++++++--- + tests/unittests/test_net_activators.py | 93 +- + 7 files changed, 1625 insertions(+), 194 deletions(-) + create mode 100644 cloudinit/net/network_manager.py + +diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py +index 18b1e7ff..647fe07b 100755 +--- a/cloudinit/cmd/devel/net_convert.py ++++ b/cloudinit/cmd/devel/net_convert.py +@@ -7,7 +7,14 @@ import os + import sys + + from cloudinit import distros, log, safeyaml +-from cloudinit.net import eni, netplan, network_state, networkd, sysconfig ++from cloudinit.net import ( ++ eni, ++ netplan, ++ network_manager, ++ network_state, ++ networkd, ++ sysconfig, ++) + from cloudinit.sources import DataSourceAzure as azure + from cloudinit.sources import DataSourceOVF as ovf + from cloudinit.sources.helpers import openstack +@@ -74,7 +81,7 @@ def get_parser(parser=None): + parser.add_argument( + "-O", + "--output-kind", +- choices=["eni", "netplan", "networkd", "sysconfig"], ++ choices=["eni", "netplan", "networkd", "sysconfig", "network-manager"], + required=True, + help="The network config format to emit", + ) +@@ -148,6 +155,9 @@ def handle_args(name, args): + elif args.output_kind == "sysconfig": + r_cls = sysconfig.Renderer + config = distro.renderer_configs.get("sysconfig") ++ elif args.output_kind == "network-manager": ++ r_cls = network_manager.Renderer ++ config = distro.renderer_configs.get("network-manager") + else: + raise RuntimeError("Invalid output_kind") + +diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py +index e80c26df..edbc0c06 100644 +--- a/cloudinit/net/activators.py ++++ b/cloudinit/net/activators.py +@@ -1,15 +1,14 @@ + # This file is part of cloud-init. See LICENSE file for license information. + import logging +-import os + from abc import ABC, abstractmethod + from typing import Iterable, List, Type + + from cloudinit import subp, util + from cloudinit.net.eni import available as eni_available + from cloudinit.net.netplan import available as netplan_available ++from cloudinit.net.network_manager import available as nm_available + from cloudinit.net.network_state import NetworkState + from cloudinit.net.networkd import available as networkd_available +-from cloudinit.net.sysconfig import NM_CFG_FILE + + LOG = logging.getLogger(__name__) + +@@ -124,20 +123,24 @@ class IfUpDownActivator(NetworkActivator): + class NetworkManagerActivator(NetworkActivator): + @staticmethod + def available(target=None) -> bool: +- """Return true if network manager can be used on this system.""" +- config_present = os.path.isfile( +- subp.target_path(target, path=NM_CFG_FILE) +- ) +- nmcli_present = subp.which("nmcli", target=target) +- return config_present and bool(nmcli_present) ++ """Return true if NetworkManager can be used on this system.""" ++ return nm_available(target=target) + + @staticmethod + def bring_up_interface(device_name: str) -> bool: +- """Bring up interface using nmcli. ++ """Bring up connection using nmcli. + + Return True is successful, otherwise return False + """ +- cmd = ["nmcli", "connection", "up", "ifname", device_name] ++ from cloudinit.net.network_manager import conn_filename ++ ++ filename = conn_filename(device_name) ++ cmd = ["nmcli", "connection", "load", filename] ++ if _alter_interface(cmd, device_name): ++ cmd = ["nmcli", "connection", "up", "filename", filename] ++ else: ++ _alter_interface(["nmcli", "connection", "reload"], device_name) ++ cmd = ["nmcli", "connection", "up", "ifname", device_name] + return _alter_interface(cmd, device_name) + + @staticmethod +@@ -146,7 +149,7 @@ class NetworkManagerActivator(NetworkActivator): + + Return True is successful, otherwise return False + """ +- cmd = ["nmcli", "connection", "down", device_name] ++ cmd = ["nmcli", "device", "disconnect", device_name] + return _alter_interface(cmd, device_name) + + +diff --git a/cloudinit/net/network_manager.py b/cloudinit/net/network_manager.py +new file mode 100644 +index 00000000..79b0fe0b +--- /dev/null ++++ b/cloudinit/net/network_manager.py +@@ -0,0 +1,377 @@ ++# Copyright 2022 Red Hat, Inc. ++# ++# Author: Lubomir Rintel ++# Fixes and suggestions contributed by James Falcon, Neal Gompa, ++# Zbigniew Jędrzejewski-Szmek and Emanuele Giuseppe Esposito. ++# ++# This file is part of cloud-init. See LICENSE file for license information. ++ ++import configparser ++import io ++import itertools ++import os ++import uuid ++ ++from cloudinit import log as logging ++from cloudinit import subp, util ++ ++from . import renderer ++from .network_state import is_ipv6_addr, subnet_is_ipv6 ++ ++NM_RUN_DIR = "/etc/NetworkManager" ++NM_LIB_DIR = "/usr/lib/NetworkManager" ++LOG = logging.getLogger(__name__) ++ ++ ++class NMConnection: ++ """Represents a NetworkManager connection profile.""" ++ ++ def __init__(self, con_id): ++ """ ++ Initializes the connection with some very basic properties, ++ notably the UUID so that the connection can be referred to. ++ """ ++ ++ # Chosen by fair dice roll ++ CI_NM_UUID = uuid.UUID("a3924cb8-09e0-43e9-890b-77972a800108") ++ ++ self.config = configparser.ConfigParser() ++ # Identity option name mapping, to achieve case sensitivity ++ self.config.optionxform = str ++ ++ self.config["connection"] = { ++ "id": f"cloud-init {con_id}", ++ "uuid": str(uuid.uuid5(CI_NM_UUID, con_id)), ++ } ++ ++ # This is not actually used anywhere, but may be useful in future ++ self.config["user"] = { ++ "org.freedesktop.NetworkManager.origin": "cloud-init" ++ } ++ ++ def _set_default(self, section, option, value): ++ """ ++ Sets a property unless it's already set, ensuring the section ++ exists. ++ """ ++ ++ if not self.config.has_section(section): ++ self.config[section] = {} ++ if not self.config.has_option(section, option): ++ self.config[section][option] = value ++ ++ def _set_ip_method(self, family, subnet_type): ++ """ ++ Ensures there's appropriate [ipv4]/[ipv6] for given family ++ appropriate for given configuration type ++ """ ++ ++ method_map = { ++ "static": "manual", ++ "dhcp6": "dhcp", ++ "ipv6_slaac": "auto", ++ "ipv6_dhcpv6-stateless": "auto", ++ "ipv6_dhcpv6-stateful": "auto", ++ "dhcp4": "auto", ++ "dhcp": "auto", ++ } ++ ++ # Ensure we got an [ipvX] section ++ self._set_default(family, "method", "disabled") ++ ++ try: ++ method = method_map[subnet_type] ++ except KeyError: ++ # What else can we do ++ method = "auto" ++ self.config[family]["may-fail"] = "true" ++ ++ # Make sure we don't "downgrade" the method in case ++ # we got conflicting subnets (e.g. static along with dhcp) ++ if self.config[family]["method"] == "dhcp": ++ return ++ if self.config[family]["method"] == "auto" and method == "manual": ++ return ++ ++ self.config[family]["method"] = method ++ self._set_default(family, "may-fail", "false") ++ if family == "ipv6": ++ self._set_default(family, "addr-gen-mode", "stable-privacy") ++ ++ def _add_numbered(self, section, key_prefix, value): ++ """ ++ Adds a numbered property, such as address or route, ensuring ++ the appropriate value gets used for . ++ """ ++ ++ for index in itertools.count(1): ++ key = f"{key_prefix}{index}" ++ if not self.config.has_option(section, key): ++ self.config[section][key] = value ++ break ++ ++ def _add_address(self, family, subnet): ++ """ ++ Adds an ipv[46]address property. ++ """ ++ ++ value = subnet["address"] + "/" + str(subnet["prefix"]) ++ self._add_numbered(family, "address", value) ++ ++ def _add_route(self, family, route): ++ """ ++ Adds a ipv[46].route property. ++ """ ++ ++ value = route["network"] + "/" + str(route["prefix"]) ++ if "gateway" in route: ++ value = value + "," + route["gateway"] ++ self._add_numbered(family, "route", value) ++ ++ def _add_nameserver(self, dns): ++ """ ++ Extends the ipv[46].dns property with a name server. ++ """ ++ ++ # FIXME: the subnet contains IPv4 and IPv6 name server mixed ++ # together. We might be getting an IPv6 name server while ++ # we're dealing with an IPv4 subnet. Sort this out by figuring ++ # out the correct family and making sure a valid section exist. ++ family = "ipv6" if is_ipv6_addr(dns) else "ipv4" ++ self._set_default(family, "method", "disabled") ++ ++ self._set_default(family, "dns", "") ++ self.config[family]["dns"] = self.config[family]["dns"] + dns + ";" ++ ++ def _add_dns_search(self, family, dns_search): ++ """ ++ Extends the ipv[46].dns-search property with a name server. ++ """ ++ ++ self._set_default(family, "dns-search", "") ++ self.config[family]["dns-search"] = ( ++ self.config[family]["dns-search"] + ";".join(dns_search) + ";" ++ ) ++ ++ def con_uuid(self): ++ """ ++ Returns the connection UUID ++ """ ++ return self.config["connection"]["uuid"] ++ ++ def valid(self): ++ """ ++ Can this be serialized into a meaningful connection profile? ++ """ ++ return self.config.has_option("connection", "type") ++ ++ @staticmethod ++ def mac_addr(addr): ++ """ ++ Sanitize a MAC address. ++ """ ++ return addr.replace("-", ":").upper() ++ ++ def render_interface(self, iface, renderer): ++ """ ++ Integrate information from network state interface information ++ into the connection. Most of the work is done here. ++ """ ++ ++ # Initialize type & connectivity ++ _type_map = { ++ "physical": "ethernet", ++ "vlan": "vlan", ++ "bond": "bond", ++ "bridge": "bridge", ++ "infiniband": "infiniband", ++ "loopback": None, ++ } ++ ++ if_type = _type_map[iface["type"]] ++ if if_type is None: ++ return ++ if "bond-master" in iface: ++ slave_type = "bond" ++ else: ++ slave_type = None ++ ++ self.config["connection"]["type"] = if_type ++ if slave_type is not None: ++ self.config["connection"]["slave-type"] = slave_type ++ self.config["connection"]["master"] = renderer.con_ref( ++ iface[slave_type + "-master"] ++ ) ++ ++ # Add type specific-section ++ self.config[if_type] = {} ++ ++ # These are the interface properties that map nicely ++ # to NetworkManager properties ++ _prop_map = { ++ "bond": { ++ "mode": "bond-mode", ++ "miimon": "bond_miimon", ++ "xmit_hash_policy": "bond-xmit-hash-policy", ++ "num_grat_arp": "bond-num-grat-arp", ++ "downdelay": "bond-downdelay", ++ "updelay": "bond-updelay", ++ "fail_over_mac": "bond-fail-over-mac", ++ "primary_reselect": "bond-primary-reselect", ++ "primary": "bond-primary", ++ }, ++ "bridge": { ++ "stp": "bridge_stp", ++ "priority": "bridge_bridgeprio", ++ }, ++ "vlan": { ++ "id": "vlan_id", ++ }, ++ "ethernet": {}, ++ "infiniband": {}, ++ } ++ ++ device_mtu = iface["mtu"] ++ ipv4_mtu = None ++ ++ # Deal with Layer 3 configuration ++ for subnet in iface["subnets"]: ++ family = "ipv6" if subnet_is_ipv6(subnet) else "ipv4" ++ ++ self._set_ip_method(family, subnet["type"]) ++ if "address" in subnet: ++ self._add_address(family, subnet) ++ if "gateway" in subnet: ++ self.config[family]["gateway"] = subnet["gateway"] ++ for route in subnet["routes"]: ++ self._add_route(family, route) ++ if "dns_nameservers" in subnet: ++ for nameserver in subnet["dns_nameservers"]: ++ self._add_nameserver(nameserver) ++ if "dns_search" in subnet: ++ self._add_dns_search(family, subnet["dns_search"]) ++ if family == "ipv4" and "mtu" in subnet: ++ ipv4_mtu = subnet["mtu"] ++ ++ if ipv4_mtu is None: ++ ipv4_mtu = device_mtu ++ if not ipv4_mtu == device_mtu: ++ LOG.warning( ++ "Network config: ignoring %s device-level mtu:%s" ++ " because ipv4 subnet-level mtu:%s provided.", ++ iface["name"], ++ device_mtu, ++ ipv4_mtu, ++ ) ++ ++ # Parse type-specific properties ++ for nm_prop, key in _prop_map[if_type].items(): ++ if key not in iface: ++ continue ++ if iface[key] is None: ++ continue ++ if isinstance(iface[key], bool): ++ self.config[if_type][nm_prop] = ( ++ "true" if iface[key] else "false" ++ ) ++ else: ++ self.config[if_type][nm_prop] = str(iface[key]) ++ ++ # These ones need special treatment ++ if if_type == "ethernet": ++ if iface["wakeonlan"] is True: ++ # NM_SETTING_WIRED_WAKE_ON_LAN_MAGIC ++ self.config["ethernet"]["wake-on-lan"] = str(0x40) ++ if ipv4_mtu is not None: ++ self.config["ethernet"]["mtu"] = str(ipv4_mtu) ++ if iface["mac_address"] is not None: ++ self.config["ethernet"]["mac-address"] = self.mac_addr( ++ iface["mac_address"] ++ ) ++ if if_type == "vlan" and "vlan-raw-device" in iface: ++ self.config["vlan"]["parent"] = renderer.con_ref( ++ iface["vlan-raw-device"] ++ ) ++ if if_type == "bridge": ++ # Bridge is ass-backwards compared to bond ++ for port in iface["bridge_ports"]: ++ port = renderer.get_conn(port) ++ port._set_default("connection", "slave-type", "bridge") ++ port._set_default("connection", "master", self.con_uuid()) ++ if iface["mac_address"] is not None: ++ self.config["bridge"]["mac-address"] = self.mac_addr( ++ iface["mac_address"] ++ ) ++ if if_type == "infiniband" and ipv4_mtu is not None: ++ self.config["infiniband"]["transport-mode"] = "datagram" ++ self.config["infiniband"]["mtu"] = str(ipv4_mtu) ++ if iface["mac_address"] is not None: ++ self.config["infiniband"]["mac-address"] = self.mac_addr( ++ iface["mac_address"] ++ ) ++ ++ # Finish up ++ if if_type == "bridge" or not self.config.has_option( ++ if_type, "mac-address" ++ ): ++ self.config["connection"]["interface-name"] = iface["name"] ++ ++ def dump(self): ++ """ ++ Stringify. ++ """ ++ ++ buf = io.StringIO() ++ self.config.write(buf, space_around_delimiters=False) ++ header = "# Generated by cloud-init. Changes will be lost.\n\n" ++ return header + buf.getvalue() ++ ++ ++class Renderer(renderer.Renderer): ++ """Renders network information in a NetworkManager keyfile format.""" ++ ++ def __init__(self, config=None): ++ self.connections = {} ++ ++ def get_conn(self, con_id): ++ return self.connections[con_id] ++ ++ def con_ref(self, con_id): ++ if con_id in self.connections: ++ return self.connections[con_id].con_uuid() ++ else: ++ # Well, what can we do... ++ return con_id ++ ++ def render_network_state(self, network_state, templates=None, target=None): ++ # First pass makes sure there's NMConnections for all known ++ # interfaces that have UUIDs that can be linked to from related ++ # interfaces ++ for iface in network_state.iter_interfaces(): ++ self.connections[iface["name"]] = NMConnection(iface["name"]) ++ ++ # Now render the actual interface configuration ++ for iface in network_state.iter_interfaces(): ++ conn = self.connections[iface["name"]] ++ conn.render_interface(iface, self) ++ ++ # And finally write the files ++ for con_id, conn in self.connections.items(): ++ if not conn.valid(): ++ continue ++ name = conn_filename(con_id, target) ++ util.write_file(name, conn.dump(), 0o600) ++ ++ ++def conn_filename(con_id, target=None): ++ target_con_dir = subp.target_path(target, NM_RUN_DIR) ++ con_file = f"cloud-init-{con_id}.nmconnection" ++ return f"{target_con_dir}/system-connections/{con_file}" ++ ++ ++def available(target=None): ++ target_nm_dir = subp.target_path(target, NM_LIB_DIR) ++ return os.path.exists(target_nm_dir) ++ ++ ++# vi: ts=4 expandtab +diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py +index c755f04c..7edc34b5 100644 +--- a/cloudinit/net/renderers.py ++++ b/cloudinit/net/renderers.py +@@ -8,6 +8,7 @@ from . import ( + freebsd, + netbsd, + netplan, ++ network_manager, + networkd, + openbsd, + renderer, +@@ -19,6 +20,7 @@ NAME_TO_RENDERER = { + "freebsd": freebsd, + "netbsd": netbsd, + "netplan": netplan, ++ "network-manager": network_manager, + "networkd": networkd, + "openbsd": openbsd, + "sysconfig": sysconfig, +@@ -28,6 +30,7 @@ DEFAULT_PRIORITY = [ + "eni", + "sysconfig", + "netplan", ++ "network-manager", + "freebsd", + "netbsd", + "openbsd", +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index 362e8d19..c3b0c795 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -5,8 +5,6 @@ import io + import os + import re + +-from configobj import ConfigObj +- + from cloudinit import log as logging + from cloudinit import subp, util + from cloudinit.distros.parsers import networkmanager_conf, resolv_conf +@@ -66,24 +64,6 @@ def _quote_value(value): + return value + + +-def enable_ifcfg_rh(path): +- """Add ifcfg-rh to NetworkManager.cfg plugins if main section is present""" +- config = ConfigObj(path) +- if "main" in config: +- if "plugins" in config["main"]: +- if "ifcfg-rh" in config["main"]["plugins"]: +- return +- else: +- config["main"]["plugins"] = [] +- +- if isinstance(config["main"]["plugins"], list): +- config["main"]["plugins"].append("ifcfg-rh") +- else: +- config["main"]["plugins"] = [config["main"]["plugins"], "ifcfg-rh"] +- config.write() +- LOG.debug("Enabled ifcfg-rh NetworkManager plugins") +- +- + class ConfigMap(object): + """Sysconfig like dictionary object.""" + +@@ -1031,8 +1011,6 @@ class Renderer(renderer.Renderer): + netrules_content = self._render_persistent_net(network_state) + netrules_path = subp.target_path(target, self.netrules_path) + util.write_file(netrules_path, netrules_content, file_mode) +- if available_nm(target=target): +- enable_ifcfg_rh(subp.target_path(target, path=NM_CFG_FILE)) + + sysconfig_path = subp.target_path(target, templates.get("control")) + # Distros configuring /etc/sysconfig/network as a file e.g. Centos +@@ -1071,14 +1049,9 @@ def _supported_vlan_names(rdev, vid): + + + def available(target=None): +- sysconfig = available_sysconfig(target=target) +- nm = available_nm(target=target) +- return util.system_info()["variant"] in KNOWN_DISTROS and any( +- [nm, sysconfig] +- ) +- ++ if not util.system_info()["variant"] in KNOWN_DISTROS: ++ return False + +-def available_sysconfig(target=None): + expected = ["ifup", "ifdown"] + search = ["/sbin", "/usr/sbin"] + for p in expected: +@@ -1095,10 +1068,4 @@ def available_sysconfig(target=None): + return False + + +-def available_nm(target=None): +- if not os.path.isfile(subp.target_path(target, path=NM_CFG_FILE)): +- return False +- return True +- +- + # vi: ts=4 expandtab +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index 591241b3..ef21ad76 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -21,6 +21,7 @@ from cloudinit.net import ( + interface_has_own_mac, + natural_sort_key, + netplan, ++ network_manager, + network_state, + networkd, + renderers, +@@ -611,6 +612,37 @@ dns = none + ), + ), + ], ++ "expected_network_manager": [ ++ ( ++ "".join( ++ [ ++ "etc/NetworkManager/system-connections", ++ "/cloud-init-eth0.nmconnection", ++ ] ++ ), ++ """ ++# Generated by cloud-init. Changes will be lost. ++ ++[connection] ++id=cloud-init eth0 ++uuid=1dd9a779-d327-56e1-8454-c65e2556c12c ++type=ethernet ++ ++[user] ++org.freedesktop.NetworkManager.origin=cloud-init ++ ++[ethernet] ++mac-address=FA:16:3E:ED:9A:59 ++ ++[ipv4] ++method=manual ++may-fail=false ++address1=172.19.1.34/22 ++route1=0.0.0.0/0,172.19.3.254 ++ ++""".lstrip(), ++ ), ++ ], + }, + { + "in_data": { +@@ -1073,6 +1105,50 @@ NETWORK_CONFIGS = { + USERCTL=no""" + ), + }, ++ "expected_network_manager": { ++ "cloud-init-eth1.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init eth1 ++ uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 ++ type=ethernet ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ mac-address=CF:D6:AF:48:E8:80 ++ ++ """ ++ ), ++ "cloud-init-eth99.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init eth99 ++ uuid=b1b88000-1f03-5360-8377-1a2205efffb4 ++ type=ethernet ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ mac-address=C0:D6:9F:2C:E8:80 ++ ++ [ipv4] ++ method=auto ++ may-fail=false ++ address1=192.168.21.3/24 ++ route1=0.0.0.0/0,65.61.151.37 ++ dns=8.8.8.8;8.8.4.4; ++ dns-search=barley.maas;sach.maas; ++ ++ """ ++ ), ++ }, + "yaml": textwrap.dedent( + """ + version: 1 +@@ -1145,6 +1221,34 @@ NETWORK_CONFIGS = { + STARTMODE=auto""" + ) + }, ++ "expected_network_manager": { ++ "cloud-init-iface0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init iface0 ++ uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 ++ type=ethernet ++ interface-name=iface0 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ ++ [ipv4] ++ method=auto ++ may-fail=false ++ ++ [ipv6] ++ method=dhcp ++ may-fail=false ++ addr-gen-mode=stable-privacy ++ ++ """ ++ ), ++ }, + "yaml": textwrap.dedent( + """\ + version: 1 +@@ -1247,6 +1351,37 @@ NETWORK_CONFIGS = { + """ + ), + }, ++ "expected_network_manager": { ++ "cloud-init-iface0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init iface0 ++ uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 ++ type=ethernet ++ interface-name=iface0 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ mtu=9000 ++ ++ [ipv4] ++ method=manual ++ may-fail=false ++ address1=192.168.14.2/24 ++ ++ [ipv6] ++ method=manual ++ may-fail=false ++ addr-gen-mode=stable-privacy ++ address1=2001:1::1/64 ++ ++ """ ++ ), ++ }, + }, + "v6_and_v4": { + "expected_sysconfig_opensuse": { +@@ -1257,6 +1392,34 @@ NETWORK_CONFIGS = { + STARTMODE=auto""" + ) + }, ++ "expected_network_manager": { ++ "cloud-init-iface0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init iface0 ++ uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 ++ type=ethernet ++ interface-name=iface0 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ ++ [ipv6] ++ method=dhcp ++ may-fail=false ++ addr-gen-mode=stable-privacy ++ ++ [ipv4] ++ method=auto ++ may-fail=false ++ ++ """ ++ ), ++ }, + "yaml": textwrap.dedent( + """\ + version: 1 +@@ -1330,6 +1493,30 @@ NETWORK_CONFIGS = { + """ + ), + }, ++ "expected_network_manager": { ++ "cloud-init-iface0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init iface0 ++ uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 ++ type=ethernet ++ interface-name=iface0 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ ++ [ipv6] ++ method=dhcp ++ may-fail=false ++ addr-gen-mode=stable-privacy ++ ++ """ ++ ), ++ }, + }, + "dhcpv6_accept_ra": { + "expected_eni": textwrap.dedent( +@@ -1537,6 +1724,30 @@ NETWORK_CONFIGS = { + """ + ), + }, ++ "expected_network_manager": { ++ "cloud-init-iface0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init iface0 ++ uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 ++ type=ethernet ++ interface-name=iface0 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ ++ [ipv6] ++ method=auto ++ may-fail=false ++ addr-gen-mode=stable-privacy ++ ++ """ ++ ), ++ }, + }, + "static6": { + "yaml": textwrap.dedent( +@@ -1625,6 +1836,30 @@ NETWORK_CONFIGS = { + """ + ), + }, ++ "expected_network_manager": { ++ "cloud-init-iface0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init iface0 ++ uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 ++ type=ethernet ++ interface-name=iface0 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ ++ [ipv6] ++ method=auto ++ may-fail=false ++ addr-gen-mode=stable-privacy ++ ++ """ ++ ), ++ }, + }, + "dhcpv6_stateful": { + "expected_eni": textwrap.dedent( +@@ -1724,6 +1959,29 @@ NETWORK_CONFIGS = { + """ + ), + }, ++ "expected_network_manager": { ++ "cloud-init-iface0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init iface0 ++ uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 ++ type=ethernet ++ interface-name=iface0 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ ++ [ipv4] ++ method=auto ++ may-fail=false ++ ++ """ ++ ), ++ }, + "yaml_v2": textwrap.dedent( + """\ + version: 2 +@@ -1777,6 +2035,30 @@ NETWORK_CONFIGS = { + """ + ), + }, ++ "expected_network_manager": { ++ "cloud-init-iface0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init iface0 ++ uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 ++ type=ethernet ++ interface-name=iface0 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ wake-on-lan=64 ++ ++ [ipv4] ++ method=auto ++ may-fail=false ++ ++ """ ++ ), ++ }, + "yaml_v2": textwrap.dedent( + """\ + version: 2 +@@ -2215,6 +2497,254 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + USERCTL=no""" + ), + }, ++ "expected_network_manager": { ++ "cloud-init-eth3.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init eth3 ++ uuid=b7e95dda-7746-5bf8-bf33-6e5f3c926790 ++ type=ethernet ++ slave-type=bridge ++ master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ mac-address=66:BB:9F:2C:E8:80 ++ ++ """ ++ ), ++ "cloud-init-eth5.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init eth5 ++ uuid=5fda13c7-9942-5e90-a41b-1d043bd725dc ++ type=ethernet ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ mac-address=98:BB:9F:2C:E8:8A ++ ++ [ipv4] ++ method=auto ++ may-fail=false ++ ++ """ ++ ), ++ "cloud-init-ib0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init ib0 ++ uuid=11a1dda7-78b4-5529-beba-d9b5f549ad7b ++ type=infiniband ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [infiniband] ++ transport-mode=datagram ++ mtu=9000 ++ mac-address=A0:00:02:20:FE:80:00:00:00:00:00:00:EC:0D:9A:03:00:15:E2:C1 ++ ++ [ipv4] ++ method=manual ++ may-fail=false ++ address1=192.168.200.7/24 ++ ++ """ ++ ), ++ "cloud-init-bond0.200.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init bond0.200 ++ uuid=88984a9c-ff22-5233-9267-86315e0acaa7 ++ type=vlan ++ interface-name=bond0.200 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [vlan] ++ id=200 ++ parent=54317911-f840-516b-a10d-82cb4c1f075c ++ ++ [ipv4] ++ method=auto ++ may-fail=false ++ ++ """ ++ ), ++ "cloud-init-eth0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init eth0 ++ uuid=1dd9a779-d327-56e1-8454-c65e2556c12c ++ type=ethernet ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ mac-address=C0:D6:9F:2C:E8:80 ++ ++ """ ++ ), ++ "cloud-init-eth4.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init eth4 ++ uuid=e27e4959-fb50-5580-b9a4-2073554627b9 ++ type=ethernet ++ slave-type=bridge ++ master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ mac-address=98:BB:9F:2C:E8:80 ++ ++ """ ++ ), ++ "cloud-init-eth1.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init eth1 ++ uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 ++ type=ethernet ++ slave-type=bond ++ master=54317911-f840-516b-a10d-82cb4c1f075c ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ mac-address=AA:D6:9F:2C:E8:80 ++ ++ """ ++ ), ++ "cloud-init-br0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init br0 ++ uuid=dee46ce4-af7a-5e7c-aa08-b25533ae9213 ++ type=bridge ++ interface-name=br0 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [bridge] ++ stp=false ++ priority=22 ++ mac-address=BB:BB:BB:BB:BB:AA ++ ++ [ipv4] ++ method=manual ++ may-fail=false ++ address1=192.168.14.2/24 ++ ++ [ipv6] ++ method=manual ++ may-fail=false ++ addr-gen-mode=stable-privacy ++ address1=2001:1::1/64 ++ route1=::/0,2001:4800:78ff:1b::1 ++ ++ """ ++ ), ++ "cloud-init-eth0.101.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init eth0.101 ++ uuid=b5acec5e-db80-5935-8b02-0d5619fc42bf ++ type=vlan ++ interface-name=eth0.101 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [vlan] ++ id=101 ++ parent=1dd9a779-d327-56e1-8454-c65e2556c12c ++ ++ [ipv4] ++ method=manual ++ may-fail=false ++ address1=192.168.0.2/24 ++ gateway=192.168.0.1 ++ dns=192.168.0.10;10.23.23.134; ++ dns-search=barley.maas;sacchromyces.maas;brettanomyces.maas; ++ address2=192.168.2.10/24 ++ ++ """ ++ ), ++ "cloud-init-bond0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init bond0 ++ uuid=54317911-f840-516b-a10d-82cb4c1f075c ++ type=bond ++ interface-name=bond0 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [bond] ++ mode=active-backup ++ miimon=100 ++ xmit_hash_policy=layer3+4 ++ ++ [ipv6] ++ method=dhcp ++ may-fail=false ++ addr-gen-mode=stable-privacy ++ ++ """ ++ ), ++ "cloud-init-eth2.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init eth2 ++ uuid=5559a242-3421-5fdd-896e-9cb8313d5804 ++ type=ethernet ++ slave-type=bond ++ master=54317911-f840-516b-a10d-82cb4c1f075c ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ mac-address=C0:BB:9F:2C:E8:80 ++ ++ """ ++ ), ++ }, + "yaml": textwrap.dedent( + """ + version: 1 +@@ -2403,10 +2933,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + - type: static + address: 2001:1::1/92 + routes: +- - gateway: 2001:67c:1562:1 ++ - gateway: 2001:67c:1562::1 + network: 2001:67c:1 + netmask: "ffff:ffff::" +- - gateway: 3001:67c:1562:1 ++ - gateway: 3001:67c:15::1 + network: 3001:67c:1 + netmask: "ffff:ffff::" + metric: 10000 +@@ -2451,10 +2981,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + - to: 10.1.3.0/24 + via: 192.168.0.3 + - to: 2001:67c:1/32 +- via: 2001:67c:1562:1 ++ via: 2001:67c:1562::1 + - metric: 10000 + to: 3001:67c:1/32 +- via: 3001:67c:1562:1 ++ via: 3001:67c:15::1 + """ + ), + "expected_eni": textwrap.dedent( +@@ -2514,11 +3044,11 @@ iface bond0 inet static + # control-alias bond0 + iface bond0 inet6 static + address 2001:1::1/92 +- post-up route add -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true +- pre-down route del -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true +- post-up route add -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \ ++ post-up route add -A inet6 2001:67c:1/32 gw 2001:67c:1562::1 || true ++ pre-down route del -A inet6 2001:67c:1/32 gw 2001:67c:1562::1 || true ++ post-up route add -A inet6 3001:67c:1/32 gw 3001:67c:15::1 metric 10000 \ + || true +- pre-down route del -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \ ++ pre-down route del -A inet6 3001:67c:1/32 gw 3001:67c:15::1 metric 10000 \ + || true + """ + ), +@@ -2561,8 +3091,8 @@ iface bond0 inet6 static + - to: 2001:67c:1562:8007::1/64 + via: 2001:67c:1562:8007::aac:40b2 + - metric: 10000 +- to: 3001:67c:1562:8007::1/64 +- via: 3001:67c:1562:8007::aac:40b2 ++ to: 3001:67c:15:8007::1/64 ++ via: 3001:67c:15:8007::aac:40b2 + """ + ), + "expected_netplan-v2": textwrap.dedent( +@@ -2594,8 +3124,8 @@ iface bond0 inet6 static + - to: 2001:67c:1562:8007::1/64 + via: 2001:67c:1562:8007::aac:40b2 + - metric: 10000 +- to: 3001:67c:1562:8007::1/64 +- via: 3001:67c:1562:8007::aac:40b2 ++ to: 3001:67c:15:8007::1/64 ++ via: 3001:67c:15:8007::aac:40b2 + ethernets: + eth0: + match: +@@ -2694,8 +3224,8 @@ iface bond0 inet6 static + """\ + # Created by cloud-init on instance boot automatically, do not edit. + # +- 2001:67c:1/32 via 2001:67c:1562:1 dev bond0 +- 3001:67c:1/32 via 3001:67c:1562:1 metric 10000 dev bond0 ++ 2001:67c:1/32 via 2001:67c:1562::1 dev bond0 ++ 3001:67c:1/32 via 3001:67c:15::1 metric 10000 dev bond0 + """ + ), + "route-bond0": textwrap.dedent( +@@ -2718,6 +3248,88 @@ iface bond0 inet6 static + """ + ), + }, ++ "expected_network_manager": { ++ "cloud-init-bond0s0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init bond0s0 ++ uuid=09d0b5b9-67e7-5577-a1af-74d1cf17a71e ++ type=ethernet ++ slave-type=bond ++ master=54317911-f840-516b-a10d-82cb4c1f075c ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ mac-address=AA:BB:CC:DD:E8:00 ++ ++ """ ++ ), ++ "cloud-init-bond0s1.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init bond0s1 ++ uuid=4d9aca96-b515-5630-ad83-d13daac7f9d0 ++ type=ethernet ++ slave-type=bond ++ master=54317911-f840-516b-a10d-82cb4c1f075c ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ mac-address=AA:BB:CC:DD:E8:01 ++ ++ """ ++ ), ++ "cloud-init-bond0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init bond0 ++ uuid=54317911-f840-516b-a10d-82cb4c1f075c ++ type=bond ++ interface-name=bond0 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [bond] ++ mode=active-backup ++ miimon=100 ++ xmit_hash_policy=layer3+4 ++ num_grat_arp=5 ++ downdelay=10 ++ updelay=20 ++ fail_over_mac=active ++ primary_reselect=always ++ primary=bond0s0 ++ ++ [ipv4] ++ method=manual ++ may-fail=false ++ address1=192.168.0.2/24 ++ gateway=192.168.0.1 ++ route1=10.1.3.0/24,192.168.0.3 ++ address2=192.168.1.2/24 ++ ++ [ipv6] ++ method=manual ++ may-fail=false ++ addr-gen-mode=stable-privacy ++ address1=2001:1::1/92 ++ route1=2001:67c:1/32,2001:67c:1562::1 ++ route2=3001:67c:1/32,3001:67c:15::1 ++ ++ """ ++ ), ++ }, + }, + "vlan": { + "yaml": textwrap.dedent( +@@ -2801,6 +3413,58 @@ iface bond0 inet6 static + VLAN=yes""" + ), + }, ++ "expected_network_manager": { ++ "cloud-init-en0.99.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init en0.99 ++ uuid=f594e2ed-f107-51df-b225-1dc530a5356b ++ type=vlan ++ interface-name=en0.99 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [vlan] ++ id=99 ++ parent=e0ca478b-8d84-52ab-8fae-628482c629b5 ++ ++ [ipv4] ++ method=manual ++ may-fail=false ++ address1=192.168.2.2/24 ++ address2=192.168.1.2/24 ++ gateway=192.168.1.1 ++ ++ [ipv6] ++ method=manual ++ may-fail=false ++ addr-gen-mode=stable-privacy ++ address1=2001:1::bbbb/96 ++ route1=::/0,2001:1::1 ++ ++ """ ++ ), ++ "cloud-init-en0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init en0 ++ uuid=e0ca478b-8d84-52ab-8fae-628482c629b5 ++ type=ethernet ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ mac-address=AA:BB:CC:DD:E8:00 ++ ++ """ ++ ), ++ }, + }, + "bridge": { + "yaml": textwrap.dedent( +@@ -2909,6 +3573,82 @@ iface bond0 inet6 static + """ + ), + }, ++ "expected_network_manager": { ++ "cloud-init-br0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init br0 ++ uuid=dee46ce4-af7a-5e7c-aa08-b25533ae9213 ++ type=bridge ++ interface-name=br0 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [bridge] ++ stp=false ++ priority=22 ++ ++ [ipv4] ++ method=manual ++ may-fail=false ++ address1=192.168.2.2/24 ++ ++ """ ++ ), ++ "cloud-init-eth0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init eth0 ++ uuid=1dd9a779-d327-56e1-8454-c65e2556c12c ++ type=ethernet ++ slave-type=bridge ++ master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ mac-address=52:54:00:12:34:00 ++ ++ [ipv6] ++ method=manual ++ may-fail=false ++ addr-gen-mode=stable-privacy ++ address1=2001:1::100/96 ++ ++ """ ++ ), ++ "cloud-init-eth1.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init eth1 ++ uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 ++ type=ethernet ++ slave-type=bridge ++ master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ mac-address=52:54:00:12:34:01 ++ ++ [ipv6] ++ method=manual ++ may-fail=false ++ addr-gen-mode=stable-privacy ++ address1=2001:1::101/96 ++ ++ """ ++ ), ++ }, + }, + "manual": { + "yaml": textwrap.dedent( +@@ -3037,28 +3777,95 @@ iface bond0 inet6 static + """ + ), + }, +- }, +-} ++ "expected_network_manager": { ++ "cloud-init-eth0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. + ++ [connection] ++ id=cloud-init eth0 ++ uuid=1dd9a779-d327-56e1-8454-c65e2556c12c ++ type=ethernet + +-CONFIG_V1_EXPLICIT_LOOPBACK = { +- "version": 1, +- "config": [ +- { +- "name": "eth0", +- "type": "physical", +- "subnets": [{"control": "auto", "type": "dhcp"}], +- }, +- { +- "name": "lo", +- "type": "loopback", +- "subnets": [{"control": "auto", "type": "loopback"}], +- }, +- ], +-} ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init + ++ [ethernet] ++ mac-address=52:54:00:12:34:00 + +-CONFIG_V1_SIMPLE_SUBNET = { ++ [ipv4] ++ method=manual ++ may-fail=false ++ address1=192.168.1.2/24 ++ ++ """ ++ ), ++ "cloud-init-eth1.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init eth1 ++ uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 ++ type=ethernet ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ mtu=1480 ++ mac-address=52:54:00:12:34:AA ++ ++ [ipv4] ++ method=auto ++ may-fail=true ++ ++ """ ++ ), ++ "cloud-init-eth2.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init eth2 ++ uuid=5559a242-3421-5fdd-896e-9cb8313d5804 ++ type=ethernet ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ mac-address=52:54:00:12:34:FF ++ ++ [ipv4] ++ method=auto ++ may-fail=true ++ ++ """ ++ ), ++ }, ++ }, ++} ++ ++ ++CONFIG_V1_EXPLICIT_LOOPBACK = { ++ "version": 1, ++ "config": [ ++ { ++ "name": "eth0", ++ "type": "physical", ++ "subnets": [{"control": "auto", "type": "dhcp"}], ++ }, ++ { ++ "name": "lo", ++ "type": "loopback", ++ "subnets": [{"control": "auto", "type": "loopback"}], ++ }, ++ ], ++} ++ ++ ++CONFIG_V1_SIMPLE_SUBNET = { + "version": 1, + "config": [ + { +@@ -3497,7 +4304,6 @@ class TestRhelSysConfigRendering(CiTestCase): + + with_logs = True + +- nm_cfg_file = "/etc/NetworkManager/NetworkManager.conf" + scripts_dir = "/etc/sysconfig/network-scripts" + header = ( + "# Created by cloud-init on instance boot automatically, " +@@ -4072,78 +4878,6 @@ USERCTL=no + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + +- def test_check_ifcfg_rh(self): +- """ifcfg-rh plugin is added NetworkManager.conf if conf present.""" +- render_dir = self.tmp_dir() +- nm_cfg = subp.target_path(render_dir, path=self.nm_cfg_file) +- util.ensure_dir(os.path.dirname(nm_cfg)) +- +- # write a template nm.conf, note plugins is a list here +- with open(nm_cfg, "w") as fh: +- fh.write("# test_check_ifcfg_rh\n[main]\nplugins=foo,bar\n") +- self.assertTrue(os.path.exists(nm_cfg)) +- +- # render and read +- entry = NETWORK_CONFIGS["small"] +- found = self._render_and_read( +- network_config=yaml.load(entry["yaml"]), dir=render_dir +- ) +- self._compare_files_to_expected(entry[self.expected_name], found) +- self._assert_headers(found) +- +- # check ifcfg-rh is in the 'plugins' list +- config = sysconfig.ConfigObj(nm_cfg) +- self.assertIn("ifcfg-rh", config["main"]["plugins"]) +- +- def test_check_ifcfg_rh_plugins_string(self): +- """ifcfg-rh plugin is append when plugins is a string.""" +- render_dir = self.tmp_path("render") +- os.makedirs(render_dir) +- nm_cfg = subp.target_path(render_dir, path=self.nm_cfg_file) +- util.ensure_dir(os.path.dirname(nm_cfg)) +- +- # write a template nm.conf, note plugins is a value here +- util.write_file(nm_cfg, "# test_check_ifcfg_rh\n[main]\nplugins=foo\n") +- +- # render and read +- entry = NETWORK_CONFIGS["small"] +- found = self._render_and_read( +- network_config=yaml.load(entry["yaml"]), dir=render_dir +- ) +- self._compare_files_to_expected(entry[self.expected_name], found) +- self._assert_headers(found) +- +- # check raw content has plugin +- nm_file_content = util.load_file(nm_cfg) +- self.assertIn("ifcfg-rh", nm_file_content) +- +- # check ifcfg-rh is in the 'plugins' list +- config = sysconfig.ConfigObj(nm_cfg) +- self.assertIn("ifcfg-rh", config["main"]["plugins"]) +- +- def test_check_ifcfg_rh_plugins_no_plugins(self): +- """enable_ifcfg_plugin creates plugins value if missing.""" +- render_dir = self.tmp_path("render") +- os.makedirs(render_dir) +- nm_cfg = subp.target_path(render_dir, path=self.nm_cfg_file) +- util.ensure_dir(os.path.dirname(nm_cfg)) +- +- # write a template nm.conf, note plugins is missing +- util.write_file(nm_cfg, "# test_check_ifcfg_rh\n[main]\n") +- self.assertTrue(os.path.exists(nm_cfg)) +- +- # render and read +- entry = NETWORK_CONFIGS["small"] +- found = self._render_and_read( +- network_config=yaml.load(entry["yaml"]), dir=render_dir +- ) +- self._compare_files_to_expected(entry[self.expected_name], found) +- self._assert_headers(found) +- +- # check ifcfg-rh is in the 'plugins' list +- config = sysconfig.ConfigObj(nm_cfg) +- self.assertIn("ifcfg-rh", config["main"]["plugins"]) +- + def test_netplan_dhcp_false_disable_dhcp_in_state(self): + """netplan config with dhcp[46]: False should not add dhcp in state""" + net_config = yaml.load(NETPLAN_DHCP_FALSE) +@@ -4699,6 +5433,281 @@ STARTMODE=auto + self._assert_headers(found) + + ++@mock.patch( ++ "cloudinit.net.is_openvswitch_internal_interface", ++ mock.Mock(return_value=False), ++) ++class TestNetworkManagerRendering(CiTestCase): ++ ++ with_logs = True ++ ++ scripts_dir = "/etc/NetworkManager/system-connections" ++ ++ expected_name = "expected_network_manager" ++ ++ def _get_renderer(self): ++ return network_manager.Renderer() ++ ++ def _render_and_read(self, network_config=None, state=None, dir=None): ++ if dir is None: ++ dir = self.tmp_dir() ++ ++ if network_config: ++ ns = network_state.parse_net_config_data(network_config) ++ elif state: ++ ns = state ++ else: ++ raise ValueError("Expected data or state, got neither") ++ ++ renderer = self._get_renderer() ++ renderer.render_network_state(ns, target=dir) ++ return dir2dict(dir) ++ ++ def _compare_files_to_expected(self, expected, found): ++ orig_maxdiff = self.maxDiff ++ expected_d = dict( ++ (os.path.join(self.scripts_dir, k), v) for k, v in expected.items() ++ ) ++ ++ try: ++ self.maxDiff = None ++ self.assertEqual(expected_d, found) ++ finally: ++ self.maxDiff = orig_maxdiff ++ ++ @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") ++ @mock.patch("cloudinit.net.sys_dev_path") ++ @mock.patch("cloudinit.net.read_sys_net") ++ @mock.patch("cloudinit.net.get_devicelist") ++ def test_default_generation( ++ self, ++ mock_get_devicelist, ++ mock_read_sys_net, ++ mock_sys_dev_path, ++ m_get_cmdline, ++ ): ++ tmp_dir = self.tmp_dir() ++ _setup_test( ++ tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path ++ ) ++ ++ network_cfg = net.generate_fallback_config() ++ ns = network_state.parse_net_config_data( ++ network_cfg, skip_broken=False ++ ) ++ ++ render_dir = os.path.join(tmp_dir, "render") ++ os.makedirs(render_dir) ++ ++ renderer = self._get_renderer() ++ renderer.render_network_state(ns, target=render_dir) ++ ++ found = dir2dict(render_dir) ++ self._compare_files_to_expected( ++ { ++ "cloud-init-eth1000.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init eth1000 ++ uuid=8c517500-0c95-5308-9c8a-3092eebc44eb ++ type=ethernet ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ mac-address=07:1C:C6:75:A4:BE ++ ++ [ipv4] ++ method=auto ++ may-fail=false ++ ++ """ ++ ), ++ }, ++ found, ++ ) ++ ++ def test_openstack_rendering_samples(self): ++ for os_sample in OS_SAMPLES: ++ render_dir = self.tmp_dir() ++ ex_input = os_sample["in_data"] ++ ex_mac_addrs = os_sample["in_macs"] ++ network_cfg = openstack.convert_net_json( ++ ex_input, known_macs=ex_mac_addrs ++ ) ++ ns = network_state.parse_net_config_data( ++ network_cfg, skip_broken=False ++ ) ++ renderer = self._get_renderer() ++ # render a multiple times to simulate reboots ++ renderer.render_network_state(ns, target=render_dir) ++ renderer.render_network_state(ns, target=render_dir) ++ renderer.render_network_state(ns, target=render_dir) ++ for fn, expected_content in os_sample.get(self.expected_name, []): ++ with open(os.path.join(render_dir, fn)) as fh: ++ self.assertEqual(expected_content, fh.read()) ++ ++ def test_network_config_v1_samples(self): ++ ns = network_state.parse_net_config_data(CONFIG_V1_SIMPLE_SUBNET) ++ render_dir = self.tmp_path("render") ++ os.makedirs(render_dir) ++ renderer = self._get_renderer() ++ renderer.render_network_state(ns, target=render_dir) ++ found = dir2dict(render_dir) ++ self._compare_files_to_expected( ++ { ++ "cloud-init-interface0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init interface0 ++ uuid=8b6862ed-dbd6-5830-93f7-a91451c13828 ++ type=ethernet ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ mac-address=52:54:00:12:34:00 ++ ++ [ipv4] ++ method=manual ++ may-fail=false ++ address1=10.0.2.15/24 ++ gateway=10.0.2.2 ++ ++ """ ++ ), ++ }, ++ found, ++ ) ++ ++ def test_config_with_explicit_loopback(self): ++ render_dir = self.tmp_path("render") ++ os.makedirs(render_dir) ++ ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) ++ renderer = self._get_renderer() ++ renderer.render_network_state(ns, target=render_dir) ++ found = dir2dict(render_dir) ++ self._compare_files_to_expected( ++ { ++ "cloud-init-eth0.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ ++ [connection] ++ id=cloud-init eth0 ++ uuid=1dd9a779-d327-56e1-8454-c65e2556c12c ++ type=ethernet ++ interface-name=eth0 ++ ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ ++ [ethernet] ++ ++ [ipv4] ++ method=auto ++ may-fail=false ++ ++ """ ++ ), ++ }, ++ found, ++ ) ++ ++ def test_bond_config(self): ++ entry = NETWORK_CONFIGS["bond"] ++ found = self._render_and_read(network_config=yaml.load(entry["yaml"])) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ ++ def test_vlan_config(self): ++ entry = NETWORK_CONFIGS["vlan"] ++ found = self._render_and_read(network_config=yaml.load(entry["yaml"])) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ ++ def test_bridge_config(self): ++ entry = NETWORK_CONFIGS["bridge"] ++ found = self._render_and_read(network_config=yaml.load(entry["yaml"])) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ ++ def test_manual_config(self): ++ entry = NETWORK_CONFIGS["manual"] ++ found = self._render_and_read(network_config=yaml.load(entry["yaml"])) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ ++ def test_all_config(self): ++ entry = NETWORK_CONFIGS["all"] ++ found = self._render_and_read(network_config=yaml.load(entry["yaml"])) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ self.assertNotIn( ++ "WARNING: Network config: ignoring eth0.101 device-level mtu", ++ self.logs.getvalue(), ++ ) ++ ++ def test_small_config(self): ++ entry = NETWORK_CONFIGS["small"] ++ found = self._render_and_read(network_config=yaml.load(entry["yaml"])) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ ++ def test_v4_and_v6_static_config(self): ++ entry = NETWORK_CONFIGS["v4_and_v6_static"] ++ found = self._render_and_read(network_config=yaml.load(entry["yaml"])) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ expected_msg = ( ++ "WARNING: Network config: ignoring iface0 device-level mtu:8999" ++ " because ipv4 subnet-level mtu:9000 provided." ++ ) ++ self.assertIn(expected_msg, self.logs.getvalue()) ++ ++ def test_dhcpv6_only_config(self): ++ entry = NETWORK_CONFIGS["dhcpv6_only"] ++ found = self._render_and_read(network_config=yaml.load(entry["yaml"])) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ ++ def test_simple_render_ipv6_slaac(self): ++ entry = NETWORK_CONFIGS["ipv6_slaac"] ++ found = self._render_and_read(network_config=yaml.load(entry["yaml"])) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ ++ def test_dhcpv6_stateless_config(self): ++ entry = NETWORK_CONFIGS["dhcpv6_stateless"] ++ found = self._render_and_read(network_config=yaml.load(entry["yaml"])) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ ++ def test_wakeonlan_disabled_config_v2(self): ++ entry = NETWORK_CONFIGS["wakeonlan_disabled"] ++ found = self._render_and_read( ++ network_config=yaml.load(entry["yaml_v2"]) ++ ) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ ++ def test_wakeonlan_enabled_config_v2(self): ++ entry = NETWORK_CONFIGS["wakeonlan_enabled"] ++ found = self._render_and_read( ++ network_config=yaml.load(entry["yaml_v2"]) ++ ) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ ++ def test_render_v4_and_v6(self): ++ entry = NETWORK_CONFIGS["v4_and_v6"] ++ found = self._render_and_read(network_config=yaml.load(entry["yaml"])) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ ++ def test_render_v6_and_v4(self): ++ entry = NETWORK_CONFIGS["v6_and_v4"] ++ found = self._render_and_read(network_config=yaml.load(entry["yaml"])) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ ++ ++@mock.patch( ++ "cloudinit.net.is_openvswitch_internal_interface", ++ mock.Mock(return_value=False), ++) + class TestEniNetRendering(CiTestCase): + @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") + @mock.patch("cloudinit.net.sys_dev_path") +@@ -6136,9 +7145,9 @@ class TestNetworkdRoundTrip(CiTestCase): + + class TestRenderersSelect: + @pytest.mark.parametrize( +- "renderer_selected,netplan,eni,nm,scfg,sys,networkd", ++ "renderer_selected,netplan,eni,sys,network_manager,networkd", + ( +- # -netplan -ifupdown -nm -scfg -sys raises error ++ # -netplan -ifupdown -sys -network-manager -networkd raises error + ( + net.RendererNotFoundError, + False, +@@ -6146,52 +7155,51 @@ class TestRenderersSelect: + False, + False, + False, +- False, + ), +- # -netplan +ifupdown -nm -scfg -sys selects eni +- ("eni", False, True, False, False, False, False), +- # +netplan +ifupdown -nm -scfg -sys selects eni +- ("eni", True, True, False, False, False, False), +- # +netplan -ifupdown -nm -scfg -sys selects netplan +- ("netplan", True, False, False, False, False, False), +- # Ubuntu with Network-Manager installed +- # +netplan -ifupdown +nm -scfg -sys selects netplan +- ("netplan", True, False, True, False, False, False), +- # Centos/OpenSuse with Network-Manager installed selects sysconfig +- # -netplan -ifupdown +nm -scfg +sys selects netplan +- ("sysconfig", False, False, True, False, True, False), +- # -netplan -ifupdown -nm -scfg -sys +networkd selects networkd +- ("networkd", False, False, False, False, False, True), ++ # -netplan +ifupdown -sys -nm -networkd selects eni ++ ("eni", False, True, False, False, False), ++ # +netplan +ifupdown -sys -nm -networkd selects eni ++ ("eni", True, True, False, False, False), ++ # +netplan -ifupdown -sys -nm -networkd selects netplan ++ ("netplan", True, False, False, False, False), ++ # +netplan -ifupdown -sys -nm -networkd selects netplan ++ ("netplan", True, False, False, False, False), ++ # -netplan -ifupdown +sys -nm -networkd selects sysconfig ++ ("sysconfig", False, False, True, False, False), ++ # -netplan -ifupdown +sys +nm -networkd selects sysconfig ++ ("sysconfig", False, False, True, True, False), ++ # -netplan -ifupdown -sys +nm -networkd selects nm ++ ("network-manager", False, False, False, True, False), ++ # -netplan -ifupdown -sys +nm +networkd selects nm ++ ("network-manager", False, False, False, True, True), ++ # -netplan -ifupdown -sys -nm +networkd selects networkd ++ ("networkd", False, False, False, False, True), + ), + ) + @mock.patch("cloudinit.net.renderers.networkd.available") ++ @mock.patch("cloudinit.net.renderers.network_manager.available") + @mock.patch("cloudinit.net.renderers.netplan.available") + @mock.patch("cloudinit.net.renderers.sysconfig.available") +- @mock.patch("cloudinit.net.renderers.sysconfig.available_sysconfig") +- @mock.patch("cloudinit.net.renderers.sysconfig.available_nm") + @mock.patch("cloudinit.net.renderers.eni.available") + def test_valid_renderer_from_defaults_depending_on_availability( + self, + m_eni_avail, +- m_nm_avail, +- m_scfg_avail, + m_sys_avail, + m_netplan_avail, ++ m_network_manager_avail, + m_networkd_avail, + renderer_selected, + netplan, + eni, +- nm, +- scfg, + sys, ++ network_manager, + networkd, + ): + """Assert proper renderer per DEFAULT_PRIORITY given availability.""" + m_eni_avail.return_value = eni # ifupdown pkg presence +- m_nm_avail.return_value = nm # network-manager presence +- m_scfg_avail.return_value = scfg # sysconfig presence + m_sys_avail.return_value = sys # sysconfig/ifup/down presence + m_netplan_avail.return_value = netplan # netplan presence ++ m_network_manager_avail.return_value = network_manager # NM presence + m_networkd_avail.return_value = networkd # networkd presence + if isinstance(renderer_selected, str): + (renderer_name, _rnd_class) = renderers.select( +@@ -6249,7 +7257,7 @@ class TestNetRenderers(CiTestCase): + priority=["sysconfig", "eni"], + ) + +- @mock.patch("cloudinit.net.sysconfig.available_sysconfig") ++ @mock.patch("cloudinit.net.sysconfig.available") + @mock.patch("cloudinit.util.system_info") + def test_sysconfig_available_uses_variant_mapping(self, m_info, m_avail): + m_avail.return_value = True +diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py +index 3c29e2f7..4525c49c 100644 +--- a/tests/unittests/test_net_activators.py ++++ b/tests/unittests/test_net_activators.py +@@ -41,18 +41,20 @@ NETPLAN_CALL_LIST = [ + + @pytest.fixture + def available_mocks(): +- mocks = namedtuple("Mocks", "m_which, m_file") ++ mocks = namedtuple("Mocks", "m_which, m_file, m_exists") + with patch("cloudinit.subp.which", return_value=True) as m_which: + with patch("os.path.isfile", return_value=True) as m_file: +- yield mocks(m_which, m_file) ++ with patch("os.path.exists", return_value=True) as m_exists: ++ yield mocks(m_which, m_file, m_exists) + + + @pytest.fixture + def unavailable_mocks(): +- mocks = namedtuple("Mocks", "m_which, m_file") ++ mocks = namedtuple("Mocks", "m_which, m_file, m_exists") + with patch("cloudinit.subp.which", return_value=False) as m_which: + with patch("os.path.isfile", return_value=False) as m_file: +- yield mocks(m_which, m_file) ++ with patch("os.path.exists", return_value=False) as m_exists: ++ yield mocks(m_which, m_file, m_exists) + + + class TestSearchAndSelect: +@@ -113,10 +115,6 @@ NETPLAN_AVAILABLE_CALLS = [ + (("netplan",), {"search": ["/usr/sbin", "/sbin"], "target": None}), + ] + +-NETWORK_MANAGER_AVAILABLE_CALLS = [ +- (("nmcli",), {"target": None}), +-] +- + NETWORKD_AVAILABLE_CALLS = [ + (("ip",), {"search": ["/usr/sbin", "/bin"], "target": None}), + (("systemctl",), {"search": ["/usr/sbin", "/bin"], "target": None}), +@@ -128,7 +126,6 @@ NETWORKD_AVAILABLE_CALLS = [ + [ + (IfUpDownActivator, IF_UP_DOWN_AVAILABLE_CALLS), + (NetplanActivator, NETPLAN_AVAILABLE_CALLS), +- (NetworkManagerActivator, NETWORK_MANAGER_AVAILABLE_CALLS), + (NetworkdActivator, NETWORKD_AVAILABLE_CALLS), + ], + ) +@@ -144,8 +141,72 @@ IF_UP_DOWN_BRING_UP_CALL_LIST = [ + ] + + NETWORK_MANAGER_BRING_UP_CALL_LIST = [ +- ((["nmcli", "connection", "up", "ifname", "eth0"],), {}), +- ((["nmcli", "connection", "up", "ifname", "eth1"],), {}), ++ ( ++ ( ++ [ ++ "nmcli", ++ "connection", ++ "load", ++ "".join( ++ [ ++ "/etc/NetworkManager/system-connections", ++ "/cloud-init-eth0.nmconnection", ++ ] ++ ), ++ ], ++ ), ++ {}, ++ ), ++ ( ++ ( ++ [ ++ "nmcli", ++ "connection", ++ "up", ++ "filename", ++ "".join( ++ [ ++ "/etc/NetworkManager/system-connections", ++ "/cloud-init-eth0.nmconnection", ++ ] ++ ), ++ ], ++ ), ++ {}, ++ ), ++ ( ++ ( ++ [ ++ "nmcli", ++ "connection", ++ "load", ++ "".join( ++ [ ++ "/etc/NetworkManager/system-connections", ++ "/cloud-init-eth1.nmconnection", ++ ] ++ ), ++ ], ++ ), ++ {}, ++ ), ++ ( ++ ( ++ [ ++ "nmcli", ++ "connection", ++ "up", ++ "filename", ++ "".join( ++ [ ++ "/etc/NetworkManager/system-connections", ++ "/cloud-init-eth1.nmconnection", ++ ] ++ ), ++ ], ++ ), ++ {}, ++ ), + ] + + NETWORKD_BRING_UP_CALL_LIST = [ +@@ -169,9 +230,11 @@ class TestActivatorsBringUp: + def test_bring_up_interface( + self, m_subp, activator, expected_call_list, available_mocks + ): ++ index = 0 + activator.bring_up_interface("eth0") +- assert len(m_subp.call_args_list) == 1 +- assert m_subp.call_args_list[0] == expected_call_list[0] ++ for call in m_subp.call_args_list: ++ assert call == expected_call_list[index] ++ index += 1 + + @patch("cloudinit.subp.subp", return_value=("", "")) + def test_bring_up_interfaces( +@@ -208,8 +271,8 @@ IF_UP_DOWN_BRING_DOWN_CALL_LIST = [ + ] + + NETWORK_MANAGER_BRING_DOWN_CALL_LIST = [ +- ((["nmcli", "connection", "down", "eth0"],), {}), +- ((["nmcli", "connection", "down", "eth1"],), {}), ++ ((["nmcli", "device", "disconnect", "eth0"],), {}), ++ ((["nmcli", "device", "disconnect", "eth1"],), {}), + ] + + NETWORKD_BRING_DOWN_CALL_LIST = [ +-- +2.35.3 + diff --git a/SOURCES/ci-Add-r-n-check-for-SSH-keys-in-Azure-889.patch b/SOURCES/ci-Add-r-n-check-for-SSH-keys-in-Azure-889.patch deleted file mode 100644 index 154b62e..0000000 --- a/SOURCES/ci-Add-r-n-check-for-SSH-keys-in-Azure-889.patch +++ /dev/null @@ -1,62 +0,0 @@ -From f73d2460e5ad205a1cd2d74a73c2d1308265d9f9 Mon Sep 17 00:00:00 2001 -From: Miroslav Rezanina -Date: Wed, 18 May 2022 05:23:48 -0400 -Subject: [PATCH] Add \r\n check for SSH keys in Azure (#889) - -RH-Author: Miroslav Rezanina -RH-MergeRequest: 64: Properly handle \r\n in SSH keys in Azure -RH-Commit: [1/1] c0868258fd63f6c531acd8da81e0494a8412d5ea (mrezanin/src_rhel_cloud-init) -RH-Bugzilla: 2088028 -RH-Acked-by: xiachen -RH-Acked-by: Eduardo Otubo -RH-Acked-by: Emanuele Giuseppe Esposito - -See https://bugs.launchpad.net/cloud-init/+bug/1910835 - -(cherry picked from commit f17f78fa9d28e62793a5f2c7109fc29eeffb0c89) -Signed-off-by: Miroslav Rezanina ---- - cloudinit/sources/DataSourceAzure.py | 3 +++ - tests/unittests/test_datasource/test_azure.py | 12 ++++++++++++ - 2 files changed, 15 insertions(+) - -diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py -index a66f023d..247284ad 100755 ---- a/cloudinit/sources/DataSourceAzure.py -+++ b/cloudinit/sources/DataSourceAzure.py -@@ -1551,6 +1551,9 @@ def _key_is_openssh_formatted(key): - """ - Validate whether or not the key is OpenSSH-formatted. - """ -+ # See https://bugs.launchpad.net/cloud-init/+bug/1910835 -+ if '\r\n' in key.strip(): -+ return False - - parser = ssh_util.AuthKeyLineParser() - try: -diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py -index f8433690..742d1faa 100644 ---- a/tests/unittests/test_datasource/test_azure.py -+++ b/tests/unittests/test_datasource/test_azure.py -@@ -1764,6 +1764,18 @@ scbus-1 on xpt0 bus 0 - self.assertEqual(ssh_keys, ["ssh-rsa key1"]) - self.assertEqual(m_parse_certificates.call_count, 0) - -+ def test_key_without_crlf_valid(self): -+ test_key = 'ssh-rsa somerandomkeystuff some comment' -+ assert True is dsaz._key_is_openssh_formatted(test_key) -+ -+ def test_key_with_crlf_invalid(self): -+ test_key = 'ssh-rsa someran\r\ndomkeystuff some comment' -+ assert False is dsaz._key_is_openssh_formatted(test_key) -+ -+ def test_key_endswith_crlf_valid(self): -+ test_key = 'ssh-rsa somerandomkeystuff some comment\r\n' -+ assert True is dsaz._key_is_openssh_formatted(test_key) -+ - @mock.patch( - 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') - @mock.patch(MOCKPATH + 'get_metadata_from_imds') --- -2.31.1 - diff --git a/SOURCES/ci-Align-rhel-custom-files-with-upstream-1431.patch b/SOURCES/ci-Align-rhel-custom-files-with-upstream-1431.patch new file mode 100644 index 0000000..7346183 --- /dev/null +++ b/SOURCES/ci-Align-rhel-custom-files-with-upstream-1431.patch @@ -0,0 +1,257 @@ +From 5c99ba05086b1ec83ce7e0c64edb4add4b47d923 Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Thu, 19 May 2022 11:14:39 +0200 +Subject: [PATCH 3/4] Align rhel custom files with upstream (#1431) + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 65: Align rhel custom files with upstream (#1431) +RH-Commit: [1/2] 5d9067175688b1006472a477b0916b81c73d5e07 +RH-Bugzilla: 2082071 +RH-Acked-by: Mohamed Gamal Morsy +RH-Acked-by: Eduardo Otubo +RH-Acked-by: Vitaly Kuznetsov + +commit 9624758f91b61f4711e8d7b5c83075b5d23e0c43 +Author: Emanuele Giuseppe Esposito +Date: Wed May 18 15:18:04 2022 +0200 + + Align rhel custom files with upstream (#1431) + + So far RHEL had its own custom .service and cloud.cfg files, + that diverged from upstream. We always replaced the generated files + with the ones we had. + + This caused only confusion and made it harder to rebase and backport + patches targeting these files. + At the same time, we are going to delete our custom downstream-only files + and use the ones generated by .tmpl. + + The mapping is: + config/cloud.cfg.tmpl -> rhel/cloud.cfg + systemd/* -> rhel/systemd/* + + Such rhel-specific files are open and available in the Centos repo: + https://gitlab.com/redhat/centos-stream/src/cloud-init + + With this commit, we are also introducing modules in cloud.cfg that + were not in the default rhel cfg file, even though they should already + have been there with previous rebases and releases. + Anyways such modules support rhel as distro, and + therefore should cause no harm. + + Signed-off-by: Emanuele Giuseppe Esposito + + RHBZ: 2082071 + +Signed-off-by: Emanuele Giuseppe Esposito +--- + config/cloud.cfg.tmpl | 23 +++++++++++++++++++++++ + systemd/cloud-config.service.tmpl | 4 ++++ + systemd/cloud-final.service.tmpl | 13 +++++++++++++ + systemd/cloud-init-local.service.tmpl | 22 +++++++++++++++++++++- + systemd/cloud-init.service.tmpl | 6 +++++- + tests/unittests/test_render_cloudcfg.py | 1 + + 6 files changed, 67 insertions(+), 2 deletions(-) + +diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl +index 86beee3c..f4d2fd14 100644 +--- a/config/cloud.cfg.tmpl ++++ b/config/cloud.cfg.tmpl +@@ -34,7 +34,11 @@ disable_root: true + + {% if variant in ["almalinux", "alpine", "amazon", "centos", "cloudlinux", "eurolinux", + "fedora", "miraclelinux", "openEuler", "rhel", "rocky", "virtuozzo"] %} ++{% if variant == "rhel" %} ++mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service,_netdev', '0', '2'] ++{% else %} + mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] ++{% endif %} + {% if variant == "amazon" %} + resize_rootfs: noblock + {% endif %} +@@ -66,6 +70,14 @@ network: + config: disabled + {% endif %} + ++{% if variant == "rhel" %} ++# Default redhat settings: ++ssh_deletekeys: true ++ssh_genkeytypes: ['rsa', 'ecdsa', 'ed25519'] ++syslog_fix_perms: ~ ++disable_vmware_customization: false ++{% endif %} ++ + # The modules that run in the 'init' stage + cloud_init_modules: + - migrator +@@ -107,10 +119,15 @@ cloud_config_modules: + {% endif %} + {% if variant not in ["photon"] %} + - ssh-import-id ++{% if variant not in ["rhel"] %} + - keyboard ++{% endif %} + - locale + {% endif %} + - set-passwords ++{% if variant in ["rhel"] %} ++ - rh_subscription ++{% endif %} + {% if variant in ["rhel", "fedora", "photon"] %} + {% if variant not in ["photon"] %} + - spacewalk +@@ -239,6 +256,10 @@ system_info: + name: ec2-user + lock_passwd: True + gecos: EC2 Default User ++{% elif variant == "rhel" %} ++ name: cloud-user ++ lock_passwd: true ++ gecos: Cloud User + {% else %} + name: {{ variant }} + lock_passwd: True +@@ -254,6 +275,8 @@ system_info: + groups: [adm, sudo] + {% elif variant == "arch" %} + groups: [wheel, users] ++{% elif variant == "rhel" %} ++ groups: [adm, systemd-journal] + {% else %} + groups: [wheel, adm, systemd-journal] + {% endif %} +diff --git a/systemd/cloud-config.service.tmpl b/systemd/cloud-config.service.tmpl +index 9d928ca2..d5568a6e 100644 +--- a/systemd/cloud-config.service.tmpl ++++ b/systemd/cloud-config.service.tmpl +@@ -4,6 +4,10 @@ Description=Apply the settings specified in cloud-config + After=network-online.target cloud-config.target + After=snapd.seeded.service + Wants=network-online.target cloud-config.target ++{% if variant == "rhel" %} ++ConditionPathExists=!/etc/cloud/cloud-init.disabled ++ConditionKernelCommandLine=!cloud-init=disabled ++{% endif %} + + [Service] + Type=oneshot +diff --git a/systemd/cloud-final.service.tmpl b/systemd/cloud-final.service.tmpl +index 8207b18c..85f423ac 100644 +--- a/systemd/cloud-final.service.tmpl ++++ b/systemd/cloud-final.service.tmpl +@@ -7,6 +7,10 @@ After=multi-user.target + Before=apt-daily.service + {% endif %} + Wants=network-online.target cloud-config.service ++{% if variant == "rhel" %} ++ConditionPathExists=!/etc/cloud/cloud-init.disabled ++ConditionKernelCommandLine=!cloud-init=disabled ++{% endif %} + + + [Service] +@@ -15,7 +19,16 @@ ExecStart=/usr/bin/cloud-init modules --mode=final + RemainAfterExit=yes + TimeoutSec=0 + KillMode=process ++{% if variant == "rhel" %} ++# Restart NetworkManager if it is present and running. ++ExecStartPost=/bin/sh -c 'u=NetworkManager.service; \ ++ out=$(systemctl show --property=SubState $u) || exit; \ ++ [ "$out" = "SubState=running" ] || exit 0; \ ++ systemctl reload-or-try-restart $u' ++{% else %} + TasksMax=infinity ++{% endif %} ++ + + # Output needs to appear in instance console output + StandardOutput=journal+console +diff --git a/systemd/cloud-init-local.service.tmpl b/systemd/cloud-init-local.service.tmpl +index 7166f640..a6b82650 100644 +--- a/systemd/cloud-init-local.service.tmpl ++++ b/systemd/cloud-init-local.service.tmpl +@@ -1,23 +1,43 @@ + ## template:jinja + [Unit] + Description=Initial cloud-init job (pre-networking) +-{% if variant in ["ubuntu", "unknown", "debian"] %} ++{% if variant in ["ubuntu", "unknown", "debian", "rhel" ] %} + DefaultDependencies=no + {% endif %} + Wants=network-pre.target + After=hv_kvp_daemon.service + After=systemd-remount-fs.service ++{% if variant == "rhel" %} ++Requires=dbus.socket ++After=dbus.socket ++{% endif %} + Before=NetworkManager.service ++{% if variant == "rhel" %} ++Before=network.service ++{% endif %} + Before=network-pre.target + Before=shutdown.target ++{% if variant == "rhel" %} ++Before=firewalld.target ++Conflicts=shutdown.target ++{% endif %} + {% if variant in ["ubuntu", "unknown", "debian"] %} + Before=sysinit.target + Conflicts=shutdown.target + {% endif %} + RequiresMountsFor=/var/lib/cloud ++{% if variant == "rhel" %} ++ConditionPathExists=!/etc/cloud/cloud-init.disabled ++ConditionKernelCommandLine=!cloud-init=disabled ++{% endif %} + + [Service] + Type=oneshot ++{% if variant == "rhel" %} ++ExecStartPre=/bin/mkdir -p /run/cloud-init ++ExecStartPre=/sbin/restorecon /run/cloud-init ++ExecStartPre=/usr/bin/touch /run/cloud-init/enabled ++{% endif %} + ExecStart=/usr/bin/cloud-init init --local + ExecStart=/bin/touch /run/cloud-init/network-config-ready + RemainAfterExit=yes +diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl +index e71e5679..c170aef7 100644 +--- a/systemd/cloud-init.service.tmpl ++++ b/systemd/cloud-init.service.tmpl +@@ -1,7 +1,7 @@ + ## template:jinja + [Unit] + Description=Initial cloud-init job (metadata service crawler) +-{% if variant not in ["photon"] %} ++{% if variant not in ["photon", "rhel"] %} + DefaultDependencies=no + {% endif %} + Wants=cloud-init-local.service +@@ -36,6 +36,10 @@ Before=shutdown.target + Conflicts=shutdown.target + {% endif %} + Before=systemd-user-sessions.service ++{% if variant == "rhel" %} ++ConditionPathExists=!/etc/cloud/cloud-init.disabled ++ConditionKernelCommandLine=!cloud-init=disabled ++{% endif %} + + [Service] + Type=oneshot +diff --git a/tests/unittests/test_render_cloudcfg.py b/tests/unittests/test_render_cloudcfg.py +index 30fbd1a4..9f95d448 100644 +--- a/tests/unittests/test_render_cloudcfg.py ++++ b/tests/unittests/test_render_cloudcfg.py +@@ -68,6 +68,7 @@ class TestRenderCloudCfg: + default_user_exceptions = { + "amazon": "ec2-user", + "debian": "ubuntu", ++ "rhel": "cloud-user", + "unknown": "ubuntu", + } + default_user = system_cfg["system_info"]["default_user"]["name"] +-- +2.35.3 + diff --git a/SOURCES/ci-Azure-Retrieve-username-and-hostname-from-IMDS-865.patch b/SOURCES/ci-Azure-Retrieve-username-and-hostname-from-IMDS-865.patch deleted file mode 100644 index de27366..0000000 --- a/SOURCES/ci-Azure-Retrieve-username-and-hostname-from-IMDS-865.patch +++ /dev/null @@ -1,397 +0,0 @@ -From 3ec4ddbc595c5fe781b3dc501631d23569849818 Mon Sep 17 00:00:00 2001 -From: Thomas Stringer -Date: Mon, 26 Apr 2021 09:41:38 -0400 -Subject: [PATCH 5/7] Azure: Retrieve username and hostname from IMDS (#865) - -RH-Author: Eduardo Otubo -RH-MergeRequest: 45: Add support for userdata on Azure from IMDS -RH-Commit: [5/7] 6fab7ef28c7fd340bda4f82dbf828f10716cb3f1 -RH-Bugzilla: 2023940 -RH-Acked-by: Emanuele Giuseppe Esposito -RH-Acked-by: Mohamed Gamal Morsy - -This change allows us to retrieve the username and hostname from -IMDS instead of having to rely on the mounted OVF. ---- - cloudinit/sources/DataSourceAzure.py | 149 ++++++++++++++---- - tests/unittests/test_datasource/test_azure.py | 87 +++++++++- - 2 files changed, 205 insertions(+), 31 deletions(-) - -diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py -index 39e67c4f..6d7954ee 100755 ---- a/cloudinit/sources/DataSourceAzure.py -+++ b/cloudinit/sources/DataSourceAzure.py -@@ -5,6 +5,7 @@ - # This file is part of cloud-init. See LICENSE file for license information. - - import base64 -+from collections import namedtuple - import contextlib - import crypt - from functools import partial -@@ -25,6 +26,7 @@ from cloudinit.net import device_driver - from cloudinit.net.dhcp import EphemeralDHCPv4 - from cloudinit import sources - from cloudinit.sources.helpers import netlink -+from cloudinit import ssh_util - from cloudinit import subp - from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc - from cloudinit import util -@@ -80,7 +82,12 @@ AGENT_SEED_DIR = '/var/lib/waagent' - IMDS_TIMEOUT_IN_SECONDS = 2 - IMDS_URL = "http://169.254.169.254/metadata" - IMDS_VER_MIN = "2019-06-01" --IMDS_VER_WANT = "2020-09-01" -+IMDS_VER_WANT = "2020-10-01" -+ -+ -+# This holds SSH key data including if the source was -+# from IMDS, as well as the SSH key data itself. -+SSHKeys = namedtuple("SSHKeys", ("keys_from_imds", "ssh_keys")) - - - class metadata_type(Enum): -@@ -391,6 +398,8 @@ class DataSourceAzure(sources.DataSource): - """Return the subplatform metadata source details.""" - if self.seed.startswith('/dev'): - subplatform_type = 'config-disk' -+ elif self.seed.lower() == 'imds': -+ subplatform_type = 'imds' - else: - subplatform_type = 'seed-dir' - return '%s (%s)' % (subplatform_type, self.seed) -@@ -433,9 +442,11 @@ class DataSourceAzure(sources.DataSource): - - found = None - reprovision = False -+ ovf_is_accessible = True - reprovision_after_nic_attach = False - for cdev in candidates: - try: -+ LOG.debug("cdev: %s", cdev) - if cdev == "IMDS": - ret = None - reprovision = True -@@ -462,8 +473,18 @@ class DataSourceAzure(sources.DataSource): - raise sources.InvalidMetaDataException(msg) - except util.MountFailedError: - report_diagnostic_event( -- '%s was not mountable' % cdev, logger_func=LOG.warning) -- continue -+ '%s was not mountable' % cdev, logger_func=LOG.debug) -+ cdev = 'IMDS' -+ ovf_is_accessible = False -+ empty_md = {'local-hostname': ''} -+ empty_cfg = dict( -+ system_info=dict( -+ default_user=dict( -+ name='' -+ ) -+ ) -+ ) -+ ret = (empty_md, '', empty_cfg, {}) - - report_diagnostic_event("Found provisioning metadata in %s" % cdev, - logger_func=LOG.debug) -@@ -490,6 +511,10 @@ class DataSourceAzure(sources.DataSource): - self.fallback_interface, - retries=10 - ) -+ if not imds_md and not ovf_is_accessible: -+ msg = 'No OVF or IMDS available' -+ report_diagnostic_event(msg) -+ raise sources.InvalidMetaDataException(msg) - (md, userdata_raw, cfg, files) = ret - self.seed = cdev - crawled_data.update({ -@@ -498,6 +523,21 @@ class DataSourceAzure(sources.DataSource): - 'metadata': util.mergemanydict( - [md, {'imds': imds_md}]), - 'userdata_raw': userdata_raw}) -+ imds_username = _username_from_imds(imds_md) -+ imds_hostname = _hostname_from_imds(imds_md) -+ imds_disable_password = _disable_password_from_imds(imds_md) -+ if imds_username: -+ LOG.debug('Username retrieved from IMDS: %s', imds_username) -+ cfg['system_info']['default_user']['name'] = imds_username -+ if imds_hostname: -+ LOG.debug('Hostname retrieved from IMDS: %s', imds_hostname) -+ crawled_data['metadata']['local-hostname'] = imds_hostname -+ if imds_disable_password: -+ LOG.debug( -+ 'Disable password retrieved from IMDS: %s', -+ imds_disable_password -+ ) -+ crawled_data['metadata']['disable_password'] = imds_disable_password # noqa: E501 - found = cdev - - report_diagnostic_event( -@@ -676,6 +716,13 @@ class DataSourceAzure(sources.DataSource): - - @azure_ds_telemetry_reporter - def get_public_ssh_keys(self): -+ """ -+ Retrieve public SSH keys. -+ """ -+ -+ return self._get_public_ssh_keys_and_source().ssh_keys -+ -+ def _get_public_ssh_keys_and_source(self): - """ - Try to get the ssh keys from IMDS first, and if that fails - (i.e. IMDS is unavailable) then fallback to getting the ssh -@@ -685,30 +732,50 @@ class DataSourceAzure(sources.DataSource): - advantage, so this is a strong preference. But we must keep - OVF as a second option for environments that don't have IMDS. - """ -+ - LOG.debug('Retrieving public SSH keys') - ssh_keys = [] -+ keys_from_imds = True -+ LOG.debug('Attempting to get SSH keys from IMDS') - try: -- raise KeyError( -- "Not using public SSH keys from IMDS" -- ) -- # pylint:disable=unreachable - ssh_keys = [ - public_key['keyData'] - for public_key - in self.metadata['imds']['compute']['publicKeys'] - ] -- LOG.debug('Retrieved SSH keys from IMDS') -+ for key in ssh_keys: -+ if not _key_is_openssh_formatted(key=key): -+ keys_from_imds = False -+ break -+ -+ if not keys_from_imds: -+ log_msg = 'Keys not in OpenSSH format, using OVF' -+ else: -+ log_msg = 'Retrieved {} keys from IMDS'.format( -+ len(ssh_keys) -+ if ssh_keys is not None -+ else 0 -+ ) - except KeyError: - log_msg = 'Unable to get keys from IMDS, falling back to OVF' -+ keys_from_imds = False -+ finally: - report_diagnostic_event(log_msg, logger_func=LOG.debug) -+ -+ if not keys_from_imds: -+ LOG.debug('Attempting to get SSH keys from OVF') - try: - ssh_keys = self.metadata['public-keys'] -- LOG.debug('Retrieved keys from OVF') -+ log_msg = 'Retrieved {} keys from OVF'.format(len(ssh_keys)) - except KeyError: - log_msg = 'No keys available from OVF' -+ finally: - report_diagnostic_event(log_msg, logger_func=LOG.debug) - -- return ssh_keys -+ return SSHKeys( -+ keys_from_imds=keys_from_imds, -+ ssh_keys=ssh_keys -+ ) - - def get_config_obj(self): - return self.cfg -@@ -1325,30 +1392,21 @@ class DataSourceAzure(sources.DataSource): - self.bounce_network_with_azure_hostname() - - pubkey_info = None -- try: -- raise KeyError( -- "Not using public SSH keys from IMDS" -- ) -- # pylint:disable=unreachable -- public_keys = self.metadata['imds']['compute']['publicKeys'] -- LOG.debug( -- 'Successfully retrieved %s key(s) from IMDS', -- len(public_keys) -- if public_keys is not None -+ ssh_keys_and_source = self._get_public_ssh_keys_and_source() -+ -+ if not ssh_keys_and_source.keys_from_imds: -+ pubkey_info = self.cfg.get('_pubkeys', None) -+ log_msg = 'Retrieved {} fingerprints from OVF'.format( -+ len(pubkey_info) -+ if pubkey_info is not None - else 0 - ) -- except KeyError: -- LOG.debug( -- 'Unable to retrieve SSH keys from IMDS during ' -- 'negotiation, falling back to OVF' -- ) -- pubkey_info = self.cfg.get('_pubkeys', None) -+ report_diagnostic_event(log_msg, logger_func=LOG.debug) - - metadata_func = partial(get_metadata_from_fabric, - fallback_lease_file=self. - dhclient_lease_file, -- pubkey_info=pubkey_info, -- iso_dev=self.iso_dev) -+ pubkey_info=pubkey_info) - - LOG.debug("negotiating with fabric via agent command %s", - self.ds_cfg['agent_command']) -@@ -1404,6 +1462,41 @@ class DataSourceAzure(sources.DataSource): - return self.metadata.get('imds', {}).get('compute', {}).get('location') - - -+def _username_from_imds(imds_data): -+ try: -+ return imds_data['compute']['osProfile']['adminUsername'] -+ except KeyError: -+ return None -+ -+ -+def _hostname_from_imds(imds_data): -+ try: -+ return imds_data['compute']['osProfile']['computerName'] -+ except KeyError: -+ return None -+ -+ -+def _disable_password_from_imds(imds_data): -+ try: -+ return imds_data['compute']['osProfile']['disablePasswordAuthentication'] == 'true' # noqa: E501 -+ except KeyError: -+ return None -+ -+ -+def _key_is_openssh_formatted(key): -+ """ -+ Validate whether or not the key is OpenSSH-formatted. -+ """ -+ -+ parser = ssh_util.AuthKeyLineParser() -+ try: -+ akl = parser.parse(key) -+ except TypeError: -+ return False -+ -+ return akl.keytype is not None -+ -+ - def _partitions_on_device(devpath, maxnum=16): - # return a list of tuples (ptnum, path) for each part on devpath - for suff in ("-part", "p", ""): -diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py -index 320fa857..d9817d84 100644 ---- a/tests/unittests/test_datasource/test_azure.py -+++ b/tests/unittests/test_datasource/test_azure.py -@@ -108,7 +108,7 @@ NETWORK_METADATA = { - "zone": "", - "publicKeys": [ - { -- "keyData": "key1", -+ "keyData": "ssh-rsa key1", - "path": "path1" - } - ] -@@ -1761,8 +1761,29 @@ scbus-1 on xpt0 bus 0 - dsrc.get_data() - dsrc.setup(True) - ssh_keys = dsrc.get_public_ssh_keys() -- # Temporarily alter this test so that SSH public keys -- # from IMDS are *not* going to be in use to fix a regression. -+ self.assertEqual(ssh_keys, ["ssh-rsa key1"]) -+ self.assertEqual(m_parse_certificates.call_count, 0) -+ -+ @mock.patch( -+ 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') -+ @mock.patch(MOCKPATH + 'get_metadata_from_imds') -+ def test_get_public_ssh_keys_with_no_openssh_format( -+ self, -+ m_get_metadata_from_imds, -+ m_parse_certificates): -+ imds_data = copy.deepcopy(NETWORK_METADATA) -+ imds_data['compute']['publicKeys'][0]['keyData'] = 'no-openssh-format' -+ m_get_metadata_from_imds.return_value = imds_data -+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} -+ odata = {'HostName': "myhost", 'UserName': "myuser"} -+ data = { -+ 'ovfcontent': construct_valid_ovf_env(data=odata), -+ 'sys_cfg': sys_cfg -+ } -+ dsrc = self._get_ds(data) -+ dsrc.get_data() -+ dsrc.setup(True) -+ ssh_keys = dsrc.get_public_ssh_keys() - self.assertEqual(ssh_keys, []) - self.assertEqual(m_parse_certificates.call_count, 0) - -@@ -1818,6 +1839,66 @@ scbus-1 on xpt0 bus 0 - self.assertIsNotNone(dsrc.metadata) - self.assertFalse(dsrc.failed_desired_api_version) - -+ @mock.patch(MOCKPATH + 'get_metadata_from_imds') -+ def test_hostname_from_imds(self, m_get_metadata_from_imds): -+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} -+ odata = {'HostName': "myhost", 'UserName': "myuser"} -+ data = { -+ 'ovfcontent': construct_valid_ovf_env(data=odata), -+ 'sys_cfg': sys_cfg -+ } -+ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) -+ imds_data_with_os_profile["compute"]["osProfile"] = dict( -+ adminUsername="username1", -+ computerName="hostname1", -+ disablePasswordAuthentication="true" -+ ) -+ m_get_metadata_from_imds.return_value = imds_data_with_os_profile -+ dsrc = self._get_ds(data) -+ dsrc.get_data() -+ self.assertEqual(dsrc.metadata["local-hostname"], "hostname1") -+ -+ @mock.patch(MOCKPATH + 'get_metadata_from_imds') -+ def test_username_from_imds(self, m_get_metadata_from_imds): -+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} -+ odata = {'HostName': "myhost", 'UserName': "myuser"} -+ data = { -+ 'ovfcontent': construct_valid_ovf_env(data=odata), -+ 'sys_cfg': sys_cfg -+ } -+ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) -+ imds_data_with_os_profile["compute"]["osProfile"] = dict( -+ adminUsername="username1", -+ computerName="hostname1", -+ disablePasswordAuthentication="true" -+ ) -+ m_get_metadata_from_imds.return_value = imds_data_with_os_profile -+ dsrc = self._get_ds(data) -+ dsrc.get_data() -+ self.assertEqual( -+ dsrc.cfg["system_info"]["default_user"]["name"], -+ "username1" -+ ) -+ -+ @mock.patch(MOCKPATH + 'get_metadata_from_imds') -+ def test_disable_password_from_imds(self, m_get_metadata_from_imds): -+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} -+ odata = {'HostName': "myhost", 'UserName': "myuser"} -+ data = { -+ 'ovfcontent': construct_valid_ovf_env(data=odata), -+ 'sys_cfg': sys_cfg -+ } -+ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) -+ imds_data_with_os_profile["compute"]["osProfile"] = dict( -+ adminUsername="username1", -+ computerName="hostname1", -+ disablePasswordAuthentication="true" -+ ) -+ m_get_metadata_from_imds.return_value = imds_data_with_os_profile -+ dsrc = self._get_ds(data) -+ dsrc.get_data() -+ self.assertTrue(dsrc.metadata["disable_password"]) -+ - - class TestAzureBounce(CiTestCase): - --- -2.27.0 - diff --git a/SOURCES/ci-Azure-Retry-net-metadata-during-nic-attach-for-non-t.patch b/SOURCES/ci-Azure-Retry-net-metadata-during-nic-attach-for-non-t.patch deleted file mode 100644 index efc9fc2..0000000 --- a/SOURCES/ci-Azure-Retry-net-metadata-during-nic-attach-for-non-t.patch +++ /dev/null @@ -1,315 +0,0 @@ -From ca5b83cee7b45bf56eec258db739cb5fe51b3231 Mon Sep 17 00:00:00 2001 -From: aswinrajamannar <39812128+aswinrajamannar@users.noreply.github.com> -Date: Mon, 26 Apr 2021 07:28:39 -0700 -Subject: [PATCH 6/7] Azure: Retry net metadata during nic attach for - non-timeout errs (#878) - -RH-Author: Eduardo Otubo -RH-MergeRequest: 45: Add support for userdata on Azure from IMDS -RH-Commit: [6/7] 4e6e44f017d5ffcb72ac8959a94f80c71fef9560 -RH-Bugzilla: 2023940 -RH-Acked-by: Emanuele Giuseppe Esposito -RH-Acked-by: Mohamed Gamal Morsy - -When network interfaces are hot-attached to the VM, attempting to get -network metadata might return 410 (or 500, 503 etc) because the info -is not yet available. In those cases, we retry getting the metadata -before giving up. The only case where we can move on to wait for more -nic attach events is if the call times out despite retries, which -means the interface is not likely a primary interface, and we should -try for more nic attach events. ---- - cloudinit/sources/DataSourceAzure.py | 65 +++++++++++-- - tests/unittests/test_datasource/test_azure.py | 95 ++++++++++++++++--- - 2 files changed, 140 insertions(+), 20 deletions(-) - -diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py -index 6d7954ee..d0be6d84 100755 ---- a/cloudinit/sources/DataSourceAzure.py -+++ b/cloudinit/sources/DataSourceAzure.py -@@ -17,6 +17,7 @@ from time import sleep - from xml.dom import minidom - import xml.etree.ElementTree as ET - from enum import Enum -+import requests - - from cloudinit import dmi - from cloudinit import log as logging -@@ -665,7 +666,9 @@ class DataSourceAzure(sources.DataSource): - self, - fallback_nic, - retries, -- md_type=metadata_type.compute): -+ md_type=metadata_type.compute, -+ exc_cb=retry_on_url_exc, -+ infinite=False): - """ - Wrapper for get_metadata_from_imds so that we can have flexibility - in which IMDS api-version we use. If a particular instance of IMDS -@@ -685,7 +688,8 @@ class DataSourceAzure(sources.DataSource): - fallback_nic=fallback_nic, - retries=0, - md_type=md_type, -- api_version=IMDS_VER_WANT -+ api_version=IMDS_VER_WANT, -+ exc_cb=exc_cb - ) - except UrlError as err: - LOG.info( -@@ -708,7 +712,9 @@ class DataSourceAzure(sources.DataSource): - fallback_nic=fallback_nic, - retries=retries, - md_type=md_type, -- api_version=IMDS_VER_MIN -+ api_version=IMDS_VER_MIN, -+ exc_cb=exc_cb, -+ infinite=infinite - ) - - def device_name_to_device(self, name): -@@ -938,6 +944,9 @@ class DataSourceAzure(sources.DataSource): - is_primary = False - expected_nic_count = -1 - imds_md = None -+ metadata_poll_count = 0 -+ metadata_logging_threshold = 1 -+ metadata_timeout_count = 0 - - # For now, only a VM's primary NIC can contact IMDS and WireServer. If - # DHCP fails for a NIC, we have no mechanism to determine if the NIC is -@@ -962,14 +971,48 @@ class DataSourceAzure(sources.DataSource): - % (ifname, e), logger_func=LOG.error) - raise - -+ # Retry polling network metadata for a limited duration only when the -+ # calls fail due to timeout. This is because the platform drops packets -+ # going towards IMDS when it is not a primary nic. If the calls fail -+ # due to other issues like 410, 503 etc, then it means we are primary -+ # but IMDS service is unavailable at the moment. Retry indefinitely in -+ # those cases since we cannot move on without the network metadata. -+ def network_metadata_exc_cb(msg, exc): -+ nonlocal metadata_timeout_count, metadata_poll_count -+ nonlocal metadata_logging_threshold -+ -+ metadata_poll_count = metadata_poll_count + 1 -+ -+ # Log when needed but back off exponentially to avoid exploding -+ # the log file. -+ if metadata_poll_count >= metadata_logging_threshold: -+ metadata_logging_threshold *= 2 -+ report_diagnostic_event( -+ "Ran into exception when attempting to reach %s " -+ "after %d polls." % (msg, metadata_poll_count), -+ logger_func=LOG.error) -+ -+ if isinstance(exc, UrlError): -+ report_diagnostic_event("poll IMDS with %s failed. " -+ "Exception: %s and code: %s" % -+ (msg, exc.cause, exc.code), -+ logger_func=LOG.error) -+ -+ if exc.cause and isinstance(exc.cause, requests.Timeout): -+ metadata_timeout_count = metadata_timeout_count + 1 -+ return (metadata_timeout_count <= 10) -+ return True -+ - # Primary nic detection will be optimized in the future. The fact that - # primary nic is being attached first helps here. Otherwise each nic - # could add several seconds of delay. - try: - imds_md = self.get_imds_data_with_api_fallback( - ifname, -- 5, -- metadata_type.network -+ 0, -+ metadata_type.network, -+ network_metadata_exc_cb, -+ True - ) - except Exception as e: - LOG.warning( -@@ -2139,7 +2182,9 @@ def _generate_network_config_from_fallback_config() -> dict: - def get_metadata_from_imds(fallback_nic, - retries, - md_type=metadata_type.compute, -- api_version=IMDS_VER_MIN): -+ api_version=IMDS_VER_MIN, -+ exc_cb=retry_on_url_exc, -+ infinite=False): - """Query Azure's instance metadata service, returning a dictionary. - - If network is not up, setup ephemeral dhcp on fallback_nic to talk to the -@@ -2158,7 +2203,7 @@ def get_metadata_from_imds(fallback_nic, - kwargs = {'logfunc': LOG.debug, - 'msg': 'Crawl of Azure Instance Metadata Service (IMDS)', - 'func': _get_metadata_from_imds, -- 'args': (retries, md_type, api_version,)} -+ 'args': (retries, exc_cb, md_type, api_version, infinite)} - if net.is_up(fallback_nic): - return util.log_time(**kwargs) - else: -@@ -2176,14 +2221,16 @@ def get_metadata_from_imds(fallback_nic, - @azure_ds_telemetry_reporter - def _get_metadata_from_imds( - retries, -+ exc_cb, - md_type=metadata_type.compute, -- api_version=IMDS_VER_MIN): -+ api_version=IMDS_VER_MIN, -+ infinite=False): - url = "{}?api-version={}".format(md_type.value, api_version) - headers = {"Metadata": "true"} - try: - response = readurl( - url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers, -- retries=retries, exception_cb=retry_on_url_exc) -+ retries=retries, exception_cb=exc_cb, infinite=infinite) - except Exception as e: - # pylint:disable=no-member - if isinstance(e, UrlError) and e.code == 400: -diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py -index d9817d84..c4a8e08d 100644 ---- a/tests/unittests/test_datasource/test_azure.py -+++ b/tests/unittests/test_datasource/test_azure.py -@@ -448,7 +448,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): - "http://169.254.169.254/metadata/instance?api-version=" - "2019-06-01", exception_cb=mock.ANY, - headers=mock.ANY, retries=mock.ANY, -- timeout=mock.ANY) -+ timeout=mock.ANY, infinite=False) - - @mock.patch(MOCKPATH + 'readurl', autospec=True) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') -@@ -467,7 +467,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): - "http://169.254.169.254/metadata/instance/network?api-version=" - "2019-06-01", exception_cb=mock.ANY, - headers=mock.ANY, retries=mock.ANY, -- timeout=mock.ANY) -+ timeout=mock.ANY, infinite=False) - - @mock.patch(MOCKPATH + 'readurl', autospec=True) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') -@@ -486,7 +486,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): - "http://169.254.169.254/metadata/instance?api-version=" - "2019-06-01", exception_cb=mock.ANY, - headers=mock.ANY, retries=mock.ANY, -- timeout=mock.ANY) -+ timeout=mock.ANY, infinite=False) - - @mock.patch(MOCKPATH + 'readurl', autospec=True) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting', autospec=True) -@@ -511,7 +511,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): - m_readurl.assert_called_with( - self.network_md_url, exception_cb=mock.ANY, - headers={'Metadata': 'true'}, retries=2, -- timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS) -+ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, infinite=False) - - @mock.patch('cloudinit.url_helper.time.sleep') - @mock.patch(MOCKPATH + 'net.is_up', autospec=True) -@@ -2694,15 +2694,22 @@ class TestPreprovisioningHotAttachNics(CiTestCase): - - def nic_attach_ret(nl_sock, nics_found): - nonlocal m_attach_call_count -- if m_attach_call_count == 0: -- m_attach_call_count = m_attach_call_count + 1 -+ m_attach_call_count = m_attach_call_count + 1 -+ if m_attach_call_count == 1: - return "eth0" -- return "eth1" -+ elif m_attach_call_count == 2: -+ return "eth1" -+ raise RuntimeError("Must have found primary nic by now.") -+ -+ # Simulate two NICs by adding the same one twice. -+ md = { -+ "interface": [ -+ IMDS_NETWORK_METADATA['interface'][0], -+ IMDS_NETWORK_METADATA['interface'][0] -+ ] -+ } - -- def network_metadata_ret(ifname, retries, type): -- # Simulate two NICs by adding the same one twice. -- md = IMDS_NETWORK_METADATA -- md['interface'].append(md['interface'][0]) -+ def network_metadata_ret(ifname, retries, type, exc_cb, infinite): - if ifname == "eth0": - return md - raise requests.Timeout('Fake connection timeout') -@@ -2724,6 +2731,72 @@ class TestPreprovisioningHotAttachNics(CiTestCase): - self.assertEqual(1, m_imds.call_count) - self.assertEqual(2, m_link_up.call_count) - -+ @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback') -+ @mock.patch(MOCKPATH + 'EphemeralDHCPv4') -+ def test_check_if_nic_is_primary_retries_on_failures( -+ self, m_dhcpv4, m_imds): -+ """Retry polling for network metadata on all failures except timeout""" -+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) -+ lease = { -+ 'interface': 'eth9', 'fixed-address': '192.168.2.9', -+ 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', -+ 'unknown-245': '624c3620'} -+ -+ eth0Retries = [] -+ eth1Retries = [] -+ # Simulate two NICs by adding the same one twice. -+ md = { -+ "interface": [ -+ IMDS_NETWORK_METADATA['interface'][0], -+ IMDS_NETWORK_METADATA['interface'][0] -+ ] -+ } -+ -+ def network_metadata_ret(ifname, retries, type, exc_cb, infinite): -+ nonlocal eth0Retries, eth1Retries -+ -+ # Simulate readurl functionality with retries and -+ # exception callbacks so that the callback logic can be -+ # validated. -+ if ifname == "eth0": -+ cause = requests.HTTPError() -+ for _ in range(0, 15): -+ error = url_helper.UrlError(cause=cause, code=410) -+ eth0Retries.append(exc_cb("No goal state.", error)) -+ else: -+ cause = requests.Timeout('Fake connection timeout') -+ for _ in range(0, 10): -+ error = url_helper.UrlError(cause=cause) -+ eth1Retries.append(exc_cb("Connection timeout", error)) -+ # Should stop retrying after 10 retries -+ eth1Retries.append(exc_cb("Connection timeout", error)) -+ raise cause -+ return md -+ -+ m_imds.side_effect = network_metadata_ret -+ -+ dhcp_ctx = mock.MagicMock(lease=lease) -+ dhcp_ctx.obtain_lease.return_value = lease -+ m_dhcpv4.return_value = dhcp_ctx -+ -+ is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth0") -+ self.assertEqual(True, is_primary) -+ self.assertEqual(2, expected_nic_count) -+ -+ # All Eth0 errors are non-timeout errors. So we should have been -+ # retrying indefinitely until success. -+ for i in eth0Retries: -+ self.assertTrue(i) -+ -+ is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth1") -+ self.assertEqual(False, is_primary) -+ -+ # All Eth1 errors are timeout errors. Retry happens for a max of 10 and -+ # then we should have moved on assuming it is not the primary nic. -+ for i in range(0, 10): -+ self.assertTrue(eth1Retries[i]) -+ self.assertFalse(eth1Retries[10]) -+ - @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') - def test_wait_for_link_up_returns_if_already_up( - self, m_is_link_up): --- -2.27.0 - diff --git a/SOURCES/ci-Azure-adding-support-for-consuming-userdata-from-IMD.patch b/SOURCES/ci-Azure-adding-support-for-consuming-userdata-from-IMD.patch deleted file mode 100644 index d4e7e37..0000000 --- a/SOURCES/ci-Azure-adding-support-for-consuming-userdata-from-IMD.patch +++ /dev/null @@ -1,129 +0,0 @@ -From c0df7233fa99d4191b5d4142e209e7465d8db5f6 Mon Sep 17 00:00:00 2001 -From: Anh Vo -Date: Tue, 27 Apr 2021 13:40:59 -0400 -Subject: [PATCH 7/7] Azure: adding support for consuming userdata from IMDS - (#884) - -RH-Author: Eduardo Otubo -RH-MergeRequest: 45: Add support for userdata on Azure from IMDS -RH-Commit: [7/7] 32f840412da1a0f49b9ab5ba1d6f1bcb1bfacc16 -RH-Bugzilla: 2023940 -RH-Acked-by: Emanuele Giuseppe Esposito -RH-Acked-by: Mohamed Gamal Morsy ---- - cloudinit/sources/DataSourceAzure.py | 23 ++++++++- - tests/unittests/test_datasource/test_azure.py | 50 +++++++++++++++++++ - 2 files changed, 72 insertions(+), 1 deletion(-) - -diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py -index d0be6d84..a66f023d 100755 ---- a/cloudinit/sources/DataSourceAzure.py -+++ b/cloudinit/sources/DataSourceAzure.py -@@ -83,7 +83,7 @@ AGENT_SEED_DIR = '/var/lib/waagent' - IMDS_TIMEOUT_IN_SECONDS = 2 - IMDS_URL = "http://169.254.169.254/metadata" - IMDS_VER_MIN = "2019-06-01" --IMDS_VER_WANT = "2020-10-01" -+IMDS_VER_WANT = "2021-01-01" - - - # This holds SSH key data including if the source was -@@ -539,6 +539,20 @@ class DataSourceAzure(sources.DataSource): - imds_disable_password - ) - crawled_data['metadata']['disable_password'] = imds_disable_password # noqa: E501 -+ -+ # only use userdata from imds if OVF did not provide custom data -+ # userdata provided by IMDS is always base64 encoded -+ if not userdata_raw: -+ imds_userdata = _userdata_from_imds(imds_md) -+ if imds_userdata: -+ LOG.debug("Retrieved userdata from IMDS") -+ try: -+ crawled_data['userdata_raw'] = base64.b64decode( -+ ''.join(imds_userdata.split())) -+ except Exception: -+ report_diagnostic_event( -+ "Bad userdata in IMDS", -+ logger_func=LOG.warning) - found = cdev - - report_diagnostic_event( -@@ -1512,6 +1526,13 @@ def _username_from_imds(imds_data): - return None - - -+def _userdata_from_imds(imds_data): -+ try: -+ return imds_data['compute']['userData'] -+ except KeyError: -+ return None -+ -+ - def _hostname_from_imds(imds_data): - try: - return imds_data['compute']['osProfile']['computerName'] -diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py -index c4a8e08d..f8433690 100644 ---- a/tests/unittests/test_datasource/test_azure.py -+++ b/tests/unittests/test_datasource/test_azure.py -@@ -1899,6 +1899,56 @@ scbus-1 on xpt0 bus 0 - dsrc.get_data() - self.assertTrue(dsrc.metadata["disable_password"]) - -+ @mock.patch(MOCKPATH + 'get_metadata_from_imds') -+ def test_userdata_from_imds(self, m_get_metadata_from_imds): -+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} -+ odata = {'HostName': "myhost", 'UserName': "myuser"} -+ data = { -+ 'ovfcontent': construct_valid_ovf_env(data=odata), -+ 'sys_cfg': sys_cfg -+ } -+ userdata = "userdataImds" -+ imds_data = copy.deepcopy(NETWORK_METADATA) -+ imds_data["compute"]["osProfile"] = dict( -+ adminUsername="username1", -+ computerName="hostname1", -+ disablePasswordAuthentication="true", -+ ) -+ imds_data["compute"]["userData"] = b64e(userdata) -+ m_get_metadata_from_imds.return_value = imds_data -+ dsrc = self._get_ds(data) -+ ret = dsrc.get_data() -+ self.assertTrue(ret) -+ self.assertEqual(dsrc.userdata_raw, userdata.encode('utf-8')) -+ -+ @mock.patch(MOCKPATH + 'get_metadata_from_imds') -+ def test_userdata_from_imds_with_customdata_from_OVF( -+ self, m_get_metadata_from_imds): -+ userdataOVF = "userdataOVF" -+ odata = { -+ 'HostName': "myhost", 'UserName': "myuser", -+ 'UserData': {'text': b64e(userdataOVF), 'encoding': 'base64'} -+ } -+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} -+ data = { -+ 'ovfcontent': construct_valid_ovf_env(data=odata), -+ 'sys_cfg': sys_cfg -+ } -+ -+ userdataImds = "userdataImds" -+ imds_data = copy.deepcopy(NETWORK_METADATA) -+ imds_data["compute"]["osProfile"] = dict( -+ adminUsername="username1", -+ computerName="hostname1", -+ disablePasswordAuthentication="true", -+ ) -+ imds_data["compute"]["userData"] = b64e(userdataImds) -+ m_get_metadata_from_imds.return_value = imds_data -+ dsrc = self._get_ds(data) -+ ret = dsrc.get_data() -+ self.assertTrue(ret) -+ self.assertEqual(dsrc.userdata_raw, userdataOVF.encode('utf-8')) -+ - - class TestAzureBounce(CiTestCase): - --- -2.27.0 - diff --git a/SOURCES/ci-Azure-eject-the-provisioning-iso-before-reporting-re.patch b/SOURCES/ci-Azure-eject-the-provisioning-iso-before-reporting-re.patch deleted file mode 100644 index 6f6c109..0000000 --- a/SOURCES/ci-Azure-eject-the-provisioning-iso-before-reporting-re.patch +++ /dev/null @@ -1,177 +0,0 @@ -From 01489fb91f64f6137ddf88c39feabe4296f3a156 Mon Sep 17 00:00:00 2001 -From: Anh Vo -Date: Fri, 23 Apr 2021 10:18:05 -0400 -Subject: [PATCH 4/7] Azure: eject the provisioning iso before reporting ready - (#861) - -RH-Author: Eduardo Otubo -RH-MergeRequest: 45: Add support for userdata on Azure from IMDS -RH-Commit: [4/7] ba830546a62ac5bea33b91d133d364a897b9f6c0 -RH-Bugzilla: 2023940 -RH-Acked-by: Emanuele Giuseppe Esposito -RH-Acked-by: Mohamed Gamal Morsy - -Due to hyper-v implementations, iso ejection is more efficient if performed -from within the guest. The code will attempt to perform a best-effort ejection. -Failure during ejection will not prevent reporting ready from happening. If iso -ejection is successful, later iso ejection from the platform will be a no-op. -In the event the iso ejection from the guest fails, iso ejection will still happen at -the platform level. ---- - cloudinit/sources/DataSourceAzure.py | 22 +++++++++++++++--- - cloudinit/sources/helpers/azure.py | 23 ++++++++++++++++--- - .../test_datasource/test_azure_helper.py | 13 +++++++++-- - 3 files changed, 50 insertions(+), 8 deletions(-) - -diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py -index 020b7006..39e67c4f 100755 ---- a/cloudinit/sources/DataSourceAzure.py -+++ b/cloudinit/sources/DataSourceAzure.py -@@ -332,6 +332,7 @@ class DataSourceAzure(sources.DataSource): - dsname = 'Azure' - _negotiated = False - _metadata_imds = sources.UNSET -+ _ci_pkl_version = 1 - - def __init__(self, sys_cfg, distro, paths): - sources.DataSource.__init__(self, sys_cfg, distro, paths) -@@ -346,8 +347,13 @@ class DataSourceAzure(sources.DataSource): - # Regenerate network config new_instance boot and every boot - self.update_events['network'].add(EventType.BOOT) - self._ephemeral_dhcp_ctx = None -- - self.failed_desired_api_version = False -+ self.iso_dev = None -+ -+ def _unpickle(self, ci_pkl_version: int) -> None: -+ super()._unpickle(ci_pkl_version) -+ if "iso_dev" not in self.__dict__: -+ self.iso_dev = None - - def __str__(self): - root = sources.DataSource.__str__(self) -@@ -459,6 +465,13 @@ class DataSourceAzure(sources.DataSource): - '%s was not mountable' % cdev, logger_func=LOG.warning) - continue - -+ report_diagnostic_event("Found provisioning metadata in %s" % cdev, -+ logger_func=LOG.debug) -+ -+ # save the iso device for ejection before reporting ready -+ if cdev.startswith("/dev"): -+ self.iso_dev = cdev -+ - perform_reprovision = reprovision or self._should_reprovision(ret) - perform_reprovision_after_nic_attach = ( - reprovision_after_nic_attach or -@@ -1226,7 +1239,9 @@ class DataSourceAzure(sources.DataSource): - @return: The success status of sending the ready signal. - """ - try: -- get_metadata_from_fabric(None, lease['unknown-245']) -+ get_metadata_from_fabric(fallback_lease_file=None, -+ dhcp_opts=lease['unknown-245'], -+ iso_dev=self.iso_dev) - return True - except Exception as e: - report_diagnostic_event( -@@ -1332,7 +1347,8 @@ class DataSourceAzure(sources.DataSource): - metadata_func = partial(get_metadata_from_fabric, - fallback_lease_file=self. - dhclient_lease_file, -- pubkey_info=pubkey_info) -+ pubkey_info=pubkey_info, -+ iso_dev=self.iso_dev) - - LOG.debug("negotiating with fabric via agent command %s", - self.ds_cfg['agent_command']) -diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py -index 03e7156b..ad476076 100755 ---- a/cloudinit/sources/helpers/azure.py -+++ b/cloudinit/sources/helpers/azure.py -@@ -865,7 +865,19 @@ class WALinuxAgentShim: - return endpoint_ip_address - - @azure_ds_telemetry_reporter -- def register_with_azure_and_fetch_data(self, pubkey_info=None) -> dict: -+ def eject_iso(self, iso_dev) -> None: -+ try: -+ LOG.debug("Ejecting the provisioning iso") -+ subp.subp(['eject', iso_dev]) -+ except Exception as e: -+ report_diagnostic_event( -+ "Failed ejecting the provisioning iso: %s" % e, -+ logger_func=LOG.debug) -+ -+ @azure_ds_telemetry_reporter -+ def register_with_azure_and_fetch_data(self, -+ pubkey_info=None, -+ iso_dev=None) -> dict: - """Gets the VM's GoalState from Azure, uses the GoalState information - to report ready/send the ready signal/provisioning complete signal to - Azure, and then uses pubkey_info to filter and obtain the user's -@@ -891,6 +903,10 @@ class WALinuxAgentShim: - ssh_keys = self._get_user_pubkeys(goal_state, pubkey_info) - health_reporter = GoalStateHealthReporter( - goal_state, self.azure_endpoint_client, self.endpoint) -+ -+ if iso_dev is not None: -+ self.eject_iso(iso_dev) -+ - health_reporter.send_ready_signal() - return {'public-keys': ssh_keys} - -@@ -1046,11 +1062,12 @@ class WALinuxAgentShim: - - @azure_ds_telemetry_reporter - def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, -- pubkey_info=None): -+ pubkey_info=None, iso_dev=None): - shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, - dhcp_options=dhcp_opts) - try: -- return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info) -+ return shim.register_with_azure_and_fetch_data( -+ pubkey_info=pubkey_info, iso_dev=iso_dev) - finally: - shim.clean_up() - -diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py -index 63482c6c..552c7905 100644 ---- a/tests/unittests/test_datasource/test_azure_helper.py -+++ b/tests/unittests/test_datasource/test_azure_helper.py -@@ -1009,6 +1009,14 @@ class TestWALinuxAgentShim(CiTestCase): - self.GoalState.return_value.container_id = self.test_container_id - self.GoalState.return_value.instance_id = self.test_instance_id - -+ def test_eject_iso_is_called(self): -+ shim = wa_shim() -+ with mock.patch.object( -+ shim, 'eject_iso', autospec=True -+ ) as m_eject_iso: -+ shim.register_with_azure_and_fetch_data(iso_dev="/dev/sr0") -+ m_eject_iso.assert_called_once_with("/dev/sr0") -+ - def test_http_client_does_not_use_certificate_for_report_ready(self): - shim = wa_shim() - shim.register_with_azure_and_fetch_data() -@@ -1283,13 +1291,14 @@ class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase): - - def test_calls_shim_register_with_azure_and_fetch_data(self): - m_pubkey_info = mock.MagicMock() -- azure_helper.get_metadata_from_fabric(pubkey_info=m_pubkey_info) -+ azure_helper.get_metadata_from_fabric( -+ pubkey_info=m_pubkey_info, iso_dev="/dev/sr0") - self.assertEqual( - 1, - self.m_shim.return_value - .register_with_azure_and_fetch_data.call_count) - self.assertEqual( -- mock.call(pubkey_info=m_pubkey_info), -+ mock.call(iso_dev="/dev/sr0", pubkey_info=m_pubkey_info), - self.m_shim.return_value - .register_with_azure_and_fetch_data.call_args) - --- -2.27.0 - diff --git a/SOURCES/ci-Azure-helper-Ensure-Azure-http-handler-sleeps-betwee.patch b/SOURCES/ci-Azure-helper-Ensure-Azure-http-handler-sleeps-betwee.patch deleted file mode 100644 index 627fd2b..0000000 --- a/SOURCES/ci-Azure-helper-Ensure-Azure-http-handler-sleeps-betwee.patch +++ /dev/null @@ -1,90 +0,0 @@ -From f11bbe7f04a48eebcb446e283820d7592f76cf86 Mon Sep 17 00:00:00 2001 -From: Johnson Shi -Date: Thu, 25 Mar 2021 07:20:10 -0700 -Subject: [PATCH 2/7] Azure helper: Ensure Azure http handler sleeps between - retries (#842) - -RH-Author: Eduardo Otubo -RH-MergeRequest: 45: Add support for userdata on Azure from IMDS -RH-Commit: [2/7] e8f8bb658b629a8444bd2ba19f109952acf33311 -RH-Bugzilla: 2023940 -RH-Acked-by: Emanuele Giuseppe Esposito -RH-Acked-by: Mohamed Gamal Morsy - -Ensure that the Azure helper's http handler sleeps a fixed duration -between retry failure attempts. The http handler will sleep a fixed -duration between failed attempts regardless of whether the attempt -failed due to (1) request timing out or (2) instant failure (no -timeout). - -Due to certain platform issues, the http request to the Azure endpoint -may instantly fail without reaching the http timeout duration. Without -sleeping a fixed duration in between retry attempts, the http handler -will loop through the max retry attempts quickly. This causes the -communication between cloud-init and the Azure platform to be less -resilient due to the short total duration if there is no sleep in -between retries. ---- - cloudinit/sources/helpers/azure.py | 2 ++ - tests/unittests/test_datasource/test_azure_helper.py | 11 +++++++++-- - 2 files changed, 11 insertions(+), 2 deletions(-) - -diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py -index d3055d08..03e7156b 100755 ---- a/cloudinit/sources/helpers/azure.py -+++ b/cloudinit/sources/helpers/azure.py -@@ -303,6 +303,7 @@ def http_with_retries(url, **kwargs) -> str: - - max_readurl_attempts = 240 - default_readurl_timeout = 5 -+ sleep_duration_between_retries = 5 - periodic_logging_attempts = 12 - - if 'timeout' not in kwargs: -@@ -338,6 +339,7 @@ def http_with_retries(url, **kwargs) -> str: - 'attempt %d with exception: %s' % - (url, attempt, e), - logger_func=LOG.debug) -+ time.sleep(sleep_duration_between_retries) - - raise exc - -diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py -index b8899807..63482c6c 100644 ---- a/tests/unittests/test_datasource/test_azure_helper.py -+++ b/tests/unittests/test_datasource/test_azure_helper.py -@@ -384,6 +384,7 @@ class TestAzureHelperHttpWithRetries(CiTestCase): - - max_readurl_attempts = 240 - default_readurl_timeout = 5 -+ sleep_duration_between_retries = 5 - periodic_logging_attempts = 12 - - def setUp(self): -@@ -394,8 +395,8 @@ class TestAzureHelperHttpWithRetries(CiTestCase): - self.m_readurl = patches.enter_context( - mock.patch.object( - azure_helper.url_helper, 'readurl', mock.MagicMock())) -- patches.enter_context( -- mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock())) -+ self.m_sleep = patches.enter_context( -+ mock.patch.object(azure_helper.time, 'sleep', autospec=True)) - - def test_http_with_retries(self): - self.m_readurl.return_value = 'TestResp' -@@ -438,6 +439,12 @@ class TestAzureHelperHttpWithRetries(CiTestCase): - self.m_readurl.call_count, - self.periodic_logging_attempts + 1) - -+ # Ensure that cloud-init did sleep between each failed request -+ self.assertEqual( -+ self.m_sleep.call_count, -+ self.periodic_logging_attempts) -+ self.m_sleep.assert_called_with(self.sleep_duration_between_retries) -+ - def test_http_with_retries_long_delay_logs_periodic_failure_msg(self): - self.m_readurl.side_effect = \ - [SentinelException] * self.periodic_logging_attempts + \ --- -2.27.0 - diff --git a/SOURCES/ci-Change-netifaces-dependency-to-0.10.4-965.patch b/SOURCES/ci-Change-netifaces-dependency-to-0.10.4-965.patch deleted file mode 100644 index 32fe4ac..0000000 --- a/SOURCES/ci-Change-netifaces-dependency-to-0.10.4-965.patch +++ /dev/null @@ -1,47 +0,0 @@ -From c3d41dc6b18df0d74f569b1a0ba43c8118437948 Mon Sep 17 00:00:00 2001 -From: Emanuele Giuseppe Esposito -Date: Fri, 14 Jan 2022 16:40:24 +0100 -Subject: [PATCH 3/6] Change netifaces dependency to 0.10.4 (#965) - -RH-Author: Emanuele Giuseppe Esposito -RH-MergeRequest: 44: Datasource for VMware -RH-Commit: [3/6] d25d68427ab8b86ee1521c66483e9300e8fcc735 -RH-Bugzilla: 2026587 -RH-Acked-by: Mohamed Gamal Morsy -RH-Acked-by: Eduardo Otubo - -commit b9d308b4d61d22bacc05bcae59819755975631f8 -Author: Andrew Kutz <101085+akutz@users.noreply.github.com> -Date: Tue Aug 10 15:10:44 2021 -0500 - - Change netifaces dependency to 0.10.4 (#965) - - Change netifaces dependency to 0.10.4 - - Currently versions Ubuntu <=20.10 use netifaces 0.10.4 By requiring - netifaces 0.10.9, the VMware datasource omitted itself from cloud-init - on Ubuntu <=20.10. - - This patch changes the netifaces dependency to 0.10.4. While it is true - there are patches to netifaces post 0.10.4 that are desirable, testing - against the most common network configuration was performed to verify - the VMware datasource will still function with netifaces 0.10.4. - -Signed-off-by: Emanuele Giuseppe Esposito ---- - requirements.txt | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/requirements.txt b/requirements.txt -index 41d01d62..c4adc455 100644 ---- a/requirements.txt -+++ b/requirements.txt -@@ -40,4 +40,4 @@ jsonschema - # and still participate in instance-data by gathering the network in detail at - # runtime and merge that information into the metadata and repersist that to - # disk. --netifaces>=0.10.9 -+netifaces>=0.10.4 --- -2.27.0 - diff --git a/SOURCES/ci-Datasource-for-VMware-953.patch b/SOURCES/ci-Datasource-for-VMware-953.patch deleted file mode 100644 index 137ee07..0000000 --- a/SOURCES/ci-Datasource-for-VMware-953.patch +++ /dev/null @@ -1,2198 +0,0 @@ -From 1917af220242840ec1b21f82f80532cf6548cc00 Mon Sep 17 00:00:00 2001 -From: Emanuele Giuseppe Esposito -Date: Fri, 14 Jan 2022 16:34:49 +0100 -Subject: [PATCH 2/6] Datasource for VMware (#953) - -RH-Author: Emanuele Giuseppe Esposito -RH-MergeRequest: 44: Datasource for VMware -RH-Commit: [2/6] bb6e58dfeaf8b64d2801ddb4cb73868cf31de3ef -RH-Bugzilla: 2026587 -RH-Acked-by: Mohamed Gamal Morsy -RH-Acked-by: Eduardo Otubo - -commit 8b4a9bc7b81e61943af873bad92e2133f8275b0b -Author: Andrew Kutz <101085+akutz@users.noreply.github.com> -Date: Mon Aug 9 21:24:07 2021 -0500 - - Datasource for VMware (#953) - - This patch finally introduces the Cloud-Init Datasource for VMware - GuestInfo as a part of cloud-init proper. This datasource has existed - since 2018, and rapidly became the de facto datasource for developers - working with Packer, Terraform, for projects like kube-image-builder, - and the de jure datasource for Photon OS. - - The major change to the datasource from its previous incarnation is - the name. Now named DatasourceVMware, this new version of the - datasource will allow multiple transport types in addition to - GuestInfo keys. - - This datasource includes several unique features developed to address - real-world situations: - - * Support for reading any key (metadata, userdata, vendordata) both - from the guestinfo table when running on a VM in vSphere as well as - from an environment variable when running inside of a container, - useful for rapid dev/test. - - * Allows booting with DHCP while still providing full participation - in Cloud-Init instance data and Jinja queries. The netifaces library - provides the ability to inspect the network after it is online, - and the runtime network configuration is then merged into the - existing metadata and persisted to disk. - - * Advertises the local_ipv4 and local_ipv6 addresses via guestinfo - as well. This is useful as Guest Tools is not always able to - identify what would be considered the local address. - - The primary author and current steward of this datasource spoke at - Cloud-Init Con 2020 where there was interest in contributing this datasource - to the Cloud-Init codebase. - - The datasource currently lives in its own GitHub repository at - https://github.com/vmware/cloud-init-vmware-guestinfo. Once the datasource - is merged into Cloud-Init, the old repository will be deprecated. - -Signed-off-by: Emanuele Giuseppe Esposito ---- - README.md | 2 +- - cloudinit/settings.py | 1 + - cloudinit/sources/DataSourceVMware.py | 871 ++++++++++++++++++ - doc/rtd/topics/availability.rst | 1 + - doc/rtd/topics/datasources.rst | 2 +- - doc/rtd/topics/datasources/vmware.rst | 359 ++++++++ - requirements.txt | 9 + - .../unittests/test_datasource/test_common.py | 3 + - .../unittests/test_datasource/test_vmware.py | 377 ++++++++ - tests/unittests/test_ds_identify.py | 279 +++++- - tools/.github-cla-signers | 1 + - tools/ds-identify | 76 +- - 12 files changed, 1977 insertions(+), 4 deletions(-) - create mode 100644 cloudinit/sources/DataSourceVMware.py - create mode 100644 doc/rtd/topics/datasources/vmware.rst - create mode 100644 tests/unittests/test_datasource/test_vmware.py - -diff --git a/README.md b/README.md -index 435405da..aa4fad63 100644 ---- a/README.md -+++ b/README.md -@@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! - - | Supported OSes | Supported Public Clouds | Supported Private Clouds | - | --- | --- | --- | --| Alpine Linux
ArchLinux
Debian
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| -+| Alpine Linux
ArchLinux
Debian
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)
VMware















| - - ## To start developing cloud-init - -diff --git a/cloudinit/settings.py b/cloudinit/settings.py -index 2acf2615..d5f32dbb 100644 ---- a/cloudinit/settings.py -+++ b/cloudinit/settings.py -@@ -42,6 +42,7 @@ CFG_BUILTIN = { - 'Exoscale', - 'RbxCloud', - 'UpCloud', -+ 'VMware', - # At the end to act as a 'catch' when none of the above work... - 'None', - ], -diff --git a/cloudinit/sources/DataSourceVMware.py b/cloudinit/sources/DataSourceVMware.py -new file mode 100644 -index 00000000..22ca63de ---- /dev/null -+++ b/cloudinit/sources/DataSourceVMware.py -@@ -0,0 +1,871 @@ -+# Cloud-Init DataSource for VMware -+# -+# Copyright (c) 2018-2021 VMware, Inc. All Rights Reserved. -+# -+# Authors: Anish Swaminathan -+# Andrew Kutz -+# -+# This file is part of cloud-init. See LICENSE file for license information. -+ -+"""Cloud-Init DataSource for VMware -+ -+This module provides a cloud-init datasource for VMware systems and supports -+multiple transports types, including: -+ -+ * EnvVars -+ * GuestInfo -+ -+Netifaces (https://github.com/al45tair/netifaces) -+ -+ Please note this module relies on the netifaces project to introspect the -+ runtime, network configuration of the host on which this datasource is -+ running. This is in contrast to the rest of cloud-init which uses the -+ cloudinit/netinfo module. -+ -+ The reasons for using netifaces include: -+ -+ * Netifaces is built in C and is more portable across multiple systems -+ and more deterministic than shell exec'ing local network commands and -+ parsing their output. -+ -+ * Netifaces provides a stable way to determine the view of the host's -+ network after DHCP has brought the network online. Unlike most other -+ datasources, this datasource still provides support for JINJA queries -+ based on networking information even when the network is based on a -+ DHCP lease. While this does not tie this datasource directly to -+ netifaces, it does mean the ability to consistently obtain the -+ correct information is paramount. -+ -+ * It is currently possible to execute this datasource on macOS -+ (which many developers use today) to print the output of the -+ get_host_info function. This function calls netifaces to obtain -+ the same runtime network configuration that the datasource would -+ persist to the local system's instance data. -+ -+ However, the netinfo module fails on macOS. The result is either a -+ hung operation that requires a SIGINT to return control to the user, -+ or, if brew is used to install iproute2mac, the ip commands are used -+ but produce output the netinfo module is unable to parse. -+ -+ While macOS is not a target of cloud-init, this feature is quite -+ useful when working on this datasource. -+ -+ For more information about this behavior, please see the following -+ PR comment, https://bit.ly/3fG7OVh. -+ -+ The authors of this datasource are not opposed to moving away from -+ netifaces. The goal may be to eventually do just that. This proviso was -+ added to the top of this module as a way to remind future-us and others -+ why netifaces was used in the first place in order to either smooth the -+ transition away from netifaces or embrace it further up the cloud-init -+ stack. -+""" -+ -+import collections -+import copy -+from distutils.spawn import find_executable -+import ipaddress -+import json -+import os -+import socket -+import time -+ -+from cloudinit import dmi, log as logging -+from cloudinit import sources -+from cloudinit import util -+from cloudinit.subp import subp, ProcessExecutionError -+ -+import netifaces -+ -+ -+PRODUCT_UUID_FILE_PATH = "/sys/class/dmi/id/product_uuid" -+ -+LOG = logging.getLogger(__name__) -+NOVAL = "No value found" -+ -+DATA_ACCESS_METHOD_ENVVAR = "envvar" -+DATA_ACCESS_METHOD_GUESTINFO = "guestinfo" -+ -+VMWARE_RPCTOOL = find_executable("vmware-rpctool") -+REDACT = "redact" -+CLEANUP_GUESTINFO = "cleanup-guestinfo" -+VMX_GUESTINFO = "VMX_GUESTINFO" -+GUESTINFO_EMPTY_YAML_VAL = "---" -+ -+LOCAL_IPV4 = "local-ipv4" -+LOCAL_IPV6 = "local-ipv6" -+WAIT_ON_NETWORK = "wait-on-network" -+WAIT_ON_NETWORK_IPV4 = "ipv4" -+WAIT_ON_NETWORK_IPV6 = "ipv6" -+ -+ -+class DataSourceVMware(sources.DataSource): -+ """ -+ Setting the hostname: -+ The hostname is set by way of the metadata key "local-hostname". -+ -+ Setting the instance ID: -+ The instance ID may be set by way of the metadata key "instance-id". -+ However, if this value is absent then the instance ID is read -+ from the file /sys/class/dmi/id/product_uuid. -+ -+ Configuring the network: -+ The network is configured by setting the metadata key "network" -+ with a value consistent with Network Config Versions 1 or 2, -+ depending on the Linux distro's version of cloud-init: -+ -+ Network Config Version 1 - http://bit.ly/cloudinit-net-conf-v1 -+ Network Config Version 2 - http://bit.ly/cloudinit-net-conf-v2 -+ -+ For example, CentOS 7's official cloud-init package is version -+ 0.7.9 and does not support Network Config Version 2. However, -+ this datasource still supports supplying Network Config Version 2 -+ data as long as the Linux distro's cloud-init package is new -+ enough to parse the data. -+ -+ The metadata key "network.encoding" may be used to indicate the -+ format of the metadata key "network". Valid encodings are base64 -+ and gzip+base64. -+ """ -+ -+ dsname = "VMware" -+ -+ def __init__(self, sys_cfg, distro, paths, ud_proc=None): -+ sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc) -+ -+ self.data_access_method = None -+ self.vmware_rpctool = VMWARE_RPCTOOL -+ -+ def _get_data(self): -+ """ -+ _get_data loads the metadata, userdata, and vendordata from one of -+ the following locations in the given order: -+ -+ * envvars -+ * guestinfo -+ -+ Please note when updating this function with support for new data -+ transports, the order should match the order in the dscheck_VMware -+ function from the file ds-identify. -+ """ -+ -+ # Initialize the locally scoped metadata, userdata, and vendordata -+ # variables. They are assigned below depending on the detected data -+ # access method. -+ md, ud, vd = None, None, None -+ -+ # First check to see if there is data via env vars. -+ if os.environ.get(VMX_GUESTINFO, ""): -+ md = guestinfo_envvar("metadata") -+ ud = guestinfo_envvar("userdata") -+ vd = guestinfo_envvar("vendordata") -+ -+ if md or ud or vd: -+ self.data_access_method = DATA_ACCESS_METHOD_ENVVAR -+ -+ # At this point, all additional data transports are valid only on -+ # a VMware platform. -+ if not self.data_access_method: -+ system_type = dmi.read_dmi_data("system-product-name") -+ if system_type is None: -+ LOG.debug("No system-product-name found") -+ return False -+ if "vmware" not in system_type.lower(): -+ LOG.debug("Not a VMware platform") -+ return False -+ -+ # If no data was detected, check the guestinfo transport next. -+ if not self.data_access_method: -+ if self.vmware_rpctool: -+ md = guestinfo("metadata", self.vmware_rpctool) -+ ud = guestinfo("userdata", self.vmware_rpctool) -+ vd = guestinfo("vendordata", self.vmware_rpctool) -+ -+ if md or ud or vd: -+ self.data_access_method = DATA_ACCESS_METHOD_GUESTINFO -+ -+ if not self.data_access_method: -+ LOG.error("failed to find a valid data access method") -+ return False -+ -+ LOG.info("using data access method %s", self._get_subplatform()) -+ -+ # Get the metadata. -+ self.metadata = process_metadata(load_json_or_yaml(md)) -+ -+ # Get the user data. -+ self.userdata_raw = ud -+ -+ # Get the vendor data. -+ self.vendordata_raw = vd -+ -+ # Redact any sensitive information. -+ self.redact_keys() -+ -+ # get_data returns true if there is any available metadata, -+ # userdata, or vendordata. -+ if self.metadata or self.userdata_raw or self.vendordata_raw: -+ return True -+ else: -+ return False -+ -+ def setup(self, is_new_instance): -+ """setup(is_new_instance) -+ -+ This is called before user-data and vendor-data have been processed. -+ -+ Unless the datasource has set mode to 'local', then networking -+ per 'fallback' or per 'network_config' will have been written and -+ brought up the OS at this point. -+ """ -+ -+ host_info = wait_on_network(self.metadata) -+ LOG.info("got host-info: %s", host_info) -+ -+ # Reflect any possible local IPv4 or IPv6 addresses in the guest -+ # info. -+ advertise_local_ip_addrs(host_info) -+ -+ # Ensure the metadata gets updated with information about the -+ # host, including the network interfaces, default IP addresses, -+ # etc. -+ self.metadata = util.mergemanydict([self.metadata, host_info]) -+ -+ # Persist the instance data for versions of cloud-init that support -+ # doing so. This occurs here rather than in the get_data call in -+ # order to ensure that the network interfaces are up and can be -+ # persisted with the metadata. -+ self.persist_instance_data() -+ -+ def _get_subplatform(self): -+ get_key_name_fn = None -+ if self.data_access_method == DATA_ACCESS_METHOD_ENVVAR: -+ get_key_name_fn = get_guestinfo_envvar_key_name -+ elif self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO: -+ get_key_name_fn = get_guestinfo_key_name -+ else: -+ return sources.METADATA_UNKNOWN -+ -+ return "%s (%s)" % ( -+ self.data_access_method, -+ get_key_name_fn("metadata"), -+ ) -+ -+ @property -+ def network_config(self): -+ if "network" in self.metadata: -+ LOG.debug("using metadata network config") -+ else: -+ LOG.debug("using fallback network config") -+ self.metadata["network"] = { -+ "config": self.distro.generate_fallback_config(), -+ } -+ return self.metadata["network"]["config"] -+ -+ def get_instance_id(self): -+ # Pull the instance ID out of the metadata if present. Otherwise -+ # read the file /sys/class/dmi/id/product_uuid for the instance ID. -+ if self.metadata and "instance-id" in self.metadata: -+ return self.metadata["instance-id"] -+ with open(PRODUCT_UUID_FILE_PATH, "r") as id_file: -+ self.metadata["instance-id"] = str(id_file.read()).rstrip().lower() -+ return self.metadata["instance-id"] -+ -+ def get_public_ssh_keys(self): -+ for key_name in ( -+ "public-keys-data", -+ "public_keys_data", -+ "public-keys", -+ "public_keys", -+ ): -+ if key_name in self.metadata: -+ return sources.normalize_pubkey_data(self.metadata[key_name]) -+ return [] -+ -+ def redact_keys(self): -+ # Determine if there are any keys to redact. -+ keys_to_redact = None -+ if REDACT in self.metadata: -+ keys_to_redact = self.metadata[REDACT] -+ elif CLEANUP_GUESTINFO in self.metadata: -+ # This is for backwards compatibility. -+ keys_to_redact = self.metadata[CLEANUP_GUESTINFO] -+ -+ if self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO: -+ guestinfo_redact_keys(keys_to_redact, self.vmware_rpctool) -+ -+ -+def decode(key, enc_type, data): -+ """ -+ decode returns the decoded string value of data -+ key is a string used to identify the data being decoded in log messages -+ """ -+ LOG.debug("Getting encoded data for key=%s, enc=%s", key, enc_type) -+ -+ raw_data = None -+ if enc_type in ["gzip+base64", "gz+b64"]: -+ LOG.debug("Decoding %s format %s", enc_type, key) -+ raw_data = util.decomp_gzip(util.b64d(data)) -+ elif enc_type in ["base64", "b64"]: -+ LOG.debug("Decoding %s format %s", enc_type, key) -+ raw_data = util.b64d(data) -+ else: -+ LOG.debug("Plain-text data %s", key) -+ raw_data = data -+ -+ return util.decode_binary(raw_data) -+ -+ -+def get_none_if_empty_val(val): -+ """ -+ get_none_if_empty_val returns None if the provided value, once stripped -+ of its trailing whitespace, is empty or equal to GUESTINFO_EMPTY_YAML_VAL. -+ -+ The return value is always a string, regardless of whether the input is -+ a bytes class or a string. -+ """ -+ -+ # If the provided value is a bytes class, convert it to a string to -+ # simplify the rest of this function's logic. -+ val = util.decode_binary(val) -+ val = val.rstrip() -+ if len(val) == 0 or val == GUESTINFO_EMPTY_YAML_VAL: -+ return None -+ return val -+ -+ -+def advertise_local_ip_addrs(host_info): -+ """ -+ advertise_local_ip_addrs gets the local IP address information from -+ the provided host_info map and sets the addresses in the guestinfo -+ namespace -+ """ -+ if not host_info: -+ return -+ -+ # Reflect any possible local IPv4 or IPv6 addresses in the guest -+ # info. -+ local_ipv4 = host_info.get(LOCAL_IPV4) -+ if local_ipv4: -+ guestinfo_set_value(LOCAL_IPV4, local_ipv4) -+ LOG.info("advertised local ipv4 address %s in guestinfo", local_ipv4) -+ -+ local_ipv6 = host_info.get(LOCAL_IPV6) -+ if local_ipv6: -+ guestinfo_set_value(LOCAL_IPV6, local_ipv6) -+ LOG.info("advertised local ipv6 address %s in guestinfo", local_ipv6) -+ -+ -+def handle_returned_guestinfo_val(key, val): -+ """ -+ handle_returned_guestinfo_val returns the provided value if it is -+ not empty or set to GUESTINFO_EMPTY_YAML_VAL, otherwise None is -+ returned -+ """ -+ val = get_none_if_empty_val(val) -+ if val: -+ return val -+ LOG.debug("No value found for key %s", key) -+ return None -+ -+ -+def get_guestinfo_key_name(key): -+ return "guestinfo." + key -+ -+ -+def get_guestinfo_envvar_key_name(key): -+ return ("vmx." + get_guestinfo_key_name(key)).upper().replace(".", "_", -1) -+ -+ -+def guestinfo_envvar(key): -+ val = guestinfo_envvar_get_value(key) -+ if not val: -+ return None -+ enc_type = guestinfo_envvar_get_value(key + ".encoding") -+ return decode(get_guestinfo_envvar_key_name(key), enc_type, val) -+ -+ -+def guestinfo_envvar_get_value(key): -+ env_key = get_guestinfo_envvar_key_name(key) -+ return handle_returned_guestinfo_val(key, os.environ.get(env_key, "")) -+ -+ -+def guestinfo(key, vmware_rpctool=VMWARE_RPCTOOL): -+ """ -+ guestinfo returns the guestinfo value for the provided key, decoding -+ the value when required -+ """ -+ val = guestinfo_get_value(key, vmware_rpctool) -+ if not val: -+ return None -+ enc_type = guestinfo_get_value(key + ".encoding", vmware_rpctool) -+ return decode(get_guestinfo_key_name(key), enc_type, val) -+ -+ -+def guestinfo_get_value(key, vmware_rpctool=VMWARE_RPCTOOL): -+ """ -+ Returns a guestinfo value for the specified key. -+ """ -+ LOG.debug("Getting guestinfo value for key %s", key) -+ -+ try: -+ (stdout, stderr) = subp( -+ [ -+ vmware_rpctool, -+ "info-get " + get_guestinfo_key_name(key), -+ ] -+ ) -+ if stderr == NOVAL: -+ LOG.debug("No value found for key %s", key) -+ elif not stdout: -+ LOG.error("Failed to get guestinfo value for key %s", key) -+ return handle_returned_guestinfo_val(key, stdout) -+ except ProcessExecutionError as error: -+ if error.stderr == NOVAL: -+ LOG.debug("No value found for key %s", key) -+ else: -+ util.logexc( -+ LOG, -+ "Failed to get guestinfo value for key %s: %s", -+ key, -+ error, -+ ) -+ except Exception: -+ util.logexc( -+ LOG, -+ "Unexpected error while trying to get " -+ + "guestinfo value for key %s", -+ key, -+ ) -+ -+ return None -+ -+ -+def guestinfo_set_value(key, value, vmware_rpctool=VMWARE_RPCTOOL): -+ """ -+ Sets a guestinfo value for the specified key. Set value to an empty string -+ to clear an existing guestinfo key. -+ """ -+ -+ # If value is an empty string then set it to a single space as it is not -+ # possible to set a guestinfo key to an empty string. Setting a guestinfo -+ # key to a single space is as close as it gets to clearing an existing -+ # guestinfo key. -+ if value == "": -+ value = " " -+ -+ LOG.debug("Setting guestinfo key=%s to value=%s", key, value) -+ -+ try: -+ subp( -+ [ -+ vmware_rpctool, -+ ("info-set %s %s" % (get_guestinfo_key_name(key), value)), -+ ] -+ ) -+ return True -+ except ProcessExecutionError as error: -+ util.logexc( -+ LOG, -+ "Failed to set guestinfo key=%s to value=%s: %s", -+ key, -+ value, -+ error, -+ ) -+ except Exception: -+ util.logexc( -+ LOG, -+ "Unexpected error while trying to set " -+ + "guestinfo key=%s to value=%s", -+ key, -+ value, -+ ) -+ -+ return None -+ -+ -+def guestinfo_redact_keys(keys, vmware_rpctool=VMWARE_RPCTOOL): -+ """ -+ guestinfo_redact_keys redacts guestinfo of all of the keys in the given -+ list. each key will have its value set to "---". Since the value is valid -+ YAML, cloud-init can still read it if it tries. -+ """ -+ if not keys: -+ return -+ if not type(keys) in (list, tuple): -+ keys = [keys] -+ for key in keys: -+ key_name = get_guestinfo_key_name(key) -+ LOG.info("clearing %s", key_name) -+ if not guestinfo_set_value( -+ key, GUESTINFO_EMPTY_YAML_VAL, vmware_rpctool -+ ): -+ LOG.error("failed to clear %s", key_name) -+ LOG.info("clearing %s.encoding", key_name) -+ if not guestinfo_set_value(key + ".encoding", "", vmware_rpctool): -+ LOG.error("failed to clear %s.encoding", key_name) -+ -+ -+def load_json_or_yaml(data): -+ """ -+ load first attempts to unmarshal the provided data as JSON, and if -+ that fails then attempts to unmarshal the data as YAML. If data is -+ None then a new dictionary is returned. -+ """ -+ if not data: -+ return {} -+ try: -+ return util.load_json(data) -+ except (json.JSONDecodeError, TypeError): -+ return util.load_yaml(data) -+ -+ -+def process_metadata(data): -+ """ -+ process_metadata processes metadata and loads the optional network -+ configuration. -+ """ -+ network = None -+ if "network" in data: -+ network = data["network"] -+ del data["network"] -+ -+ network_enc = None -+ if "network.encoding" in data: -+ network_enc = data["network.encoding"] -+ del data["network.encoding"] -+ -+ if network: -+ if isinstance(network, collections.abc.Mapping): -+ LOG.debug("network data copied to 'config' key") -+ network = {"config": copy.deepcopy(network)} -+ else: -+ LOG.debug("network data to be decoded %s", network) -+ dec_net = decode("metadata.network", network_enc, network) -+ network = { -+ "config": load_json_or_yaml(dec_net), -+ } -+ -+ LOG.debug("network data %s", network) -+ data["network"] = network -+ -+ return data -+ -+ -+# Used to match classes to dependencies -+datasources = [ -+ (DataSourceVMware, (sources.DEP_FILESYSTEM,)), # Run at init-local -+ (DataSourceVMware, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), -+] -+ -+ -+def get_datasource_list(depends): -+ """ -+ Return a list of data sources that match this set of dependencies -+ """ -+ return sources.list_from_depends(depends, datasources) -+ -+ -+def get_default_ip_addrs(): -+ """ -+ Returns the default IPv4 and IPv6 addresses based on the device(s) used for -+ the default route. Please note that None may be returned for either address -+ family if that family has no default route or if there are multiple -+ addresses associated with the device used by the default route for a given -+ address. -+ """ -+ # TODO(promote and use netifaces in cloudinit.net* modules) -+ gateways = netifaces.gateways() -+ if "default" not in gateways: -+ return None, None -+ -+ default_gw = gateways["default"] -+ if ( -+ netifaces.AF_INET not in default_gw -+ and netifaces.AF_INET6 not in default_gw -+ ): -+ return None, None -+ -+ ipv4 = None -+ ipv6 = None -+ -+ gw4 = default_gw.get(netifaces.AF_INET) -+ if gw4: -+ _, dev4 = gw4 -+ addr4_fams = netifaces.ifaddresses(dev4) -+ if addr4_fams: -+ af_inet4 = addr4_fams.get(netifaces.AF_INET) -+ if af_inet4: -+ if len(af_inet4) > 1: -+ LOG.warning( -+ "device %s has more than one ipv4 address: %s", -+ dev4, -+ af_inet4, -+ ) -+ elif "addr" in af_inet4[0]: -+ ipv4 = af_inet4[0]["addr"] -+ -+ # Try to get the default IPv6 address by first seeing if there is a default -+ # IPv6 route. -+ gw6 = default_gw.get(netifaces.AF_INET6) -+ if gw6: -+ _, dev6 = gw6 -+ addr6_fams = netifaces.ifaddresses(dev6) -+ if addr6_fams: -+ af_inet6 = addr6_fams.get(netifaces.AF_INET6) -+ if af_inet6: -+ if len(af_inet6) > 1: -+ LOG.warning( -+ "device %s has more than one ipv6 address: %s", -+ dev6, -+ af_inet6, -+ ) -+ elif "addr" in af_inet6[0]: -+ ipv6 = af_inet6[0]["addr"] -+ -+ # If there is a default IPv4 address but not IPv6, then see if there is a -+ # single IPv6 address associated with the same device associated with the -+ # default IPv4 address. -+ if ipv4 and not ipv6: -+ af_inet6 = addr4_fams.get(netifaces.AF_INET6) -+ if af_inet6: -+ if len(af_inet6) > 1: -+ LOG.warning( -+ "device %s has more than one ipv6 address: %s", -+ dev4, -+ af_inet6, -+ ) -+ elif "addr" in af_inet6[0]: -+ ipv6 = af_inet6[0]["addr"] -+ -+ # If there is a default IPv6 address but not IPv4, then see if there is a -+ # single IPv4 address associated with the same device associated with the -+ # default IPv6 address. -+ if not ipv4 and ipv6: -+ af_inet4 = addr6_fams.get(netifaces.AF_INET) -+ if af_inet4: -+ if len(af_inet4) > 1: -+ LOG.warning( -+ "device %s has more than one ipv4 address: %s", -+ dev6, -+ af_inet4, -+ ) -+ elif "addr" in af_inet4[0]: -+ ipv4 = af_inet4[0]["addr"] -+ -+ return ipv4, ipv6 -+ -+ -+# patched socket.getfqdn() - see https://bugs.python.org/issue5004 -+ -+ -+def getfqdn(name=""): -+ """Get fully qualified domain name from name. -+ An empty argument is interpreted as meaning the local host. -+ """ -+ # TODO(may want to promote this function to util.getfqdn) -+ # TODO(may want to extend util.get_hostname to accept fqdn=True param) -+ name = name.strip() -+ if not name or name == "0.0.0.0": -+ name = util.get_hostname() -+ try: -+ addrs = socket.getaddrinfo( -+ name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME -+ ) -+ except socket.error: -+ pass -+ else: -+ for addr in addrs: -+ if addr[3]: -+ name = addr[3] -+ break -+ return name -+ -+ -+def is_valid_ip_addr(val): -+ """ -+ Returns false if the address is loopback, link local or unspecified; -+ otherwise true is returned. -+ """ -+ # TODO(extend cloudinit.net.is_ip_addr exclude link_local/loopback etc) -+ # TODO(migrate to use cloudinit.net.is_ip_addr)# -+ -+ addr = None -+ try: -+ addr = ipaddress.ip_address(val) -+ except ipaddress.AddressValueError: -+ addr = ipaddress.ip_address(str(val)) -+ except Exception: -+ return None -+ -+ if addr.is_link_local or addr.is_loopback or addr.is_unspecified: -+ return False -+ return True -+ -+ -+def get_host_info(): -+ """ -+ Returns host information such as the host name and network interfaces. -+ """ -+ # TODO(look to promote netifices use up in cloud-init netinfo funcs) -+ host_info = { -+ "network": { -+ "interfaces": { -+ "by-mac": collections.OrderedDict(), -+ "by-ipv4": collections.OrderedDict(), -+ "by-ipv6": collections.OrderedDict(), -+ }, -+ }, -+ } -+ hostname = getfqdn(util.get_hostname()) -+ if hostname: -+ host_info["hostname"] = hostname -+ host_info["local-hostname"] = hostname -+ host_info["local_hostname"] = hostname -+ -+ default_ipv4, default_ipv6 = get_default_ip_addrs() -+ if default_ipv4: -+ host_info[LOCAL_IPV4] = default_ipv4 -+ if default_ipv6: -+ host_info[LOCAL_IPV6] = default_ipv6 -+ -+ by_mac = host_info["network"]["interfaces"]["by-mac"] -+ by_ipv4 = host_info["network"]["interfaces"]["by-ipv4"] -+ by_ipv6 = host_info["network"]["interfaces"]["by-ipv6"] -+ -+ ifaces = netifaces.interfaces() -+ for dev_name in ifaces: -+ addr_fams = netifaces.ifaddresses(dev_name) -+ af_link = addr_fams.get(netifaces.AF_LINK) -+ af_inet4 = addr_fams.get(netifaces.AF_INET) -+ af_inet6 = addr_fams.get(netifaces.AF_INET6) -+ -+ mac = None -+ if af_link and "addr" in af_link[0]: -+ mac = af_link[0]["addr"] -+ -+ # Do not bother recording localhost -+ if mac == "00:00:00:00:00:00": -+ continue -+ -+ if mac and (af_inet4 or af_inet6): -+ key = mac -+ val = {} -+ if af_inet4: -+ af_inet4_vals = [] -+ for ip_info in af_inet4: -+ if not is_valid_ip_addr(ip_info["addr"]): -+ continue -+ af_inet4_vals.append(ip_info) -+ val["ipv4"] = af_inet4_vals -+ if af_inet6: -+ af_inet6_vals = [] -+ for ip_info in af_inet6: -+ if not is_valid_ip_addr(ip_info["addr"]): -+ continue -+ af_inet6_vals.append(ip_info) -+ val["ipv6"] = af_inet6_vals -+ by_mac[key] = val -+ -+ if af_inet4: -+ for ip_info in af_inet4: -+ key = ip_info["addr"] -+ if not is_valid_ip_addr(key): -+ continue -+ val = copy.deepcopy(ip_info) -+ del val["addr"] -+ if mac: -+ val["mac"] = mac -+ by_ipv4[key] = val -+ -+ if af_inet6: -+ for ip_info in af_inet6: -+ key = ip_info["addr"] -+ if not is_valid_ip_addr(key): -+ continue -+ val = copy.deepcopy(ip_info) -+ del val["addr"] -+ if mac: -+ val["mac"] = mac -+ by_ipv6[key] = val -+ -+ return host_info -+ -+ -+def wait_on_network(metadata): -+ # Determine whether we need to wait on the network coming online. -+ wait_on_ipv4 = False -+ wait_on_ipv6 = False -+ if WAIT_ON_NETWORK in metadata: -+ wait_on_network = metadata[WAIT_ON_NETWORK] -+ if WAIT_ON_NETWORK_IPV4 in wait_on_network: -+ wait_on_ipv4_val = wait_on_network[WAIT_ON_NETWORK_IPV4] -+ if isinstance(wait_on_ipv4_val, bool): -+ wait_on_ipv4 = wait_on_ipv4_val -+ else: -+ wait_on_ipv4 = util.translate_bool(wait_on_ipv4_val) -+ if WAIT_ON_NETWORK_IPV6 in wait_on_network: -+ wait_on_ipv6_val = wait_on_network[WAIT_ON_NETWORK_IPV6] -+ if isinstance(wait_on_ipv6_val, bool): -+ wait_on_ipv6 = wait_on_ipv6_val -+ else: -+ wait_on_ipv6 = util.translate_bool(wait_on_ipv6_val) -+ -+ # Get information about the host. -+ host_info = None -+ while host_info is None: -+ # This loop + sleep results in two logs every second while waiting -+ # for either ipv4 or ipv6 up. Do we really need to log each iteration -+ # or can we log once and log on successful exit? -+ host_info = get_host_info() -+ -+ network = host_info.get("network") or {} -+ interfaces = network.get("interfaces") or {} -+ by_ipv4 = interfaces.get("by-ipv4") or {} -+ by_ipv6 = interfaces.get("by-ipv6") or {} -+ -+ if wait_on_ipv4: -+ ipv4_ready = len(by_ipv4) > 0 if by_ipv4 else False -+ if not ipv4_ready: -+ host_info = None -+ -+ if wait_on_ipv6: -+ ipv6_ready = len(by_ipv6) > 0 if by_ipv6 else False -+ if not ipv6_ready: -+ host_info = None -+ -+ if host_info is None: -+ LOG.debug( -+ "waiting on network: wait4=%s, ready4=%s, wait6=%s, ready6=%s", -+ wait_on_ipv4, -+ ipv4_ready, -+ wait_on_ipv6, -+ ipv6_ready, -+ ) -+ time.sleep(1) -+ -+ LOG.debug("waiting on network complete") -+ return host_info -+ -+ -+def main(): -+ """ -+ Executed when this file is used as a program. -+ """ -+ try: -+ logging.setupBasicLogging() -+ except Exception: -+ pass -+ metadata = { -+ "wait-on-network": {"ipv4": True, "ipv6": "false"}, -+ "network": {"config": {"dhcp": True}}, -+ } -+ host_info = wait_on_network(metadata) -+ metadata = util.mergemanydict([metadata, host_info]) -+ print(util.json_dumps(metadata)) -+ -+ -+if __name__ == "__main__": -+ main() -+ -+# vi: ts=4 expandtab -diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst -index f58b2b38..6606367c 100644 ---- a/doc/rtd/topics/availability.rst -+++ b/doc/rtd/topics/availability.rst -@@ -64,5 +64,6 @@ Additionally, cloud-init is supported on these private clouds: - - LXD - - KVM - - Metal-as-a-Service (MAAS) -+- VMware - - .. vi: textwidth=79 -diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst -index 228173d2..8afed470 100644 ---- a/doc/rtd/topics/datasources.rst -+++ b/doc/rtd/topics/datasources.rst -@@ -49,7 +49,7 @@ The following is a list of documents for each supported datasource: - datasources/smartos.rst - datasources/upcloud.rst - datasources/zstack.rst -- -+ datasources/vmware.rst - - Creation - ======== -diff --git a/doc/rtd/topics/datasources/vmware.rst b/doc/rtd/topics/datasources/vmware.rst -new file mode 100644 -index 00000000..996eb61f ---- /dev/null -+++ b/doc/rtd/topics/datasources/vmware.rst -@@ -0,0 +1,359 @@ -+.. _datasource_vmware: -+ -+VMware -+====== -+ -+This datasource is for use with systems running on a VMware platform such as -+vSphere and currently supports the following data transports: -+ -+ -+* `GuestInfo `_ keys -+ -+Configuration -+------------- -+ -+The configuration method is dependent upon the transport: -+ -+GuestInfo Keys -+^^^^^^^^^^^^^^ -+ -+One method of providing meta, user, and vendor data is by setting the following -+key/value pairs on a VM's ``extraConfig`` `property `_ : -+ -+.. list-table:: -+ :header-rows: 1 -+ -+ * - Property -+ - Description -+ * - ``guestinfo.metadata`` -+ - A YAML or JSON document containing the cloud-init metadata. -+ * - ``guestinfo.metadata.encoding`` -+ - The encoding type for ``guestinfo.metadata``. -+ * - ``guestinfo.userdata`` -+ - A YAML document containing the cloud-init user data. -+ * - ``guestinfo.userdata.encoding`` -+ - The encoding type for ``guestinfo.userdata``. -+ * - ``guestinfo.vendordata`` -+ - A YAML document containing the cloud-init vendor data. -+ * - ``guestinfo.vendordata.encoding`` -+ - The encoding type for ``guestinfo.vendordata``. -+ -+ -+All ``guestinfo.*.encoding`` values may be set to ``base64`` or -+``gzip+base64``. -+ -+Features -+-------- -+ -+This section reviews several features available in this datasource, regardless -+of how the meta, user, and vendor data was discovered. -+ -+Instance data and lazy networks -+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -+ -+One of the hallmarks of cloud-init is `its use of instance-data and JINJA -+queries <../instancedata.html#using-instance-data>`_ -+-- the ability to write queries in user and vendor data that reference runtime -+information present in ``/run/cloud-init/instance-data.json``. This works well -+when the metadata provides all of the information up front, such as the network -+configuration. For systems that rely on DHCP, however, this information may not -+be available when the metadata is persisted to disk. -+ -+This datasource ensures that even if the instance is using DHCP to configure -+networking, the same details about the configured network are available in -+``/run/cloud-init/instance-data.json`` as if static networking was used. This -+information collected at runtime is easy to demonstrate by executing the -+datasource on the command line. From the root of this repository, run the -+following command: -+ -+.. code-block:: bash -+ -+ PYTHONPATH="$(pwd)" python3 cloudinit/sources/DataSourceVMware.py -+ -+The above command will result in output similar to the below JSON: -+ -+.. code-block:: json -+ -+ { -+ "hostname": "akutz.localhost", -+ "local-hostname": "akutz.localhost", -+ "local-ipv4": "192.168.0.188", -+ "local_hostname": "akutz.localhost", -+ "network": { -+ "config": { -+ "dhcp": true -+ }, -+ "interfaces": { -+ "by-ipv4": { -+ "172.0.0.2": { -+ "netmask": "255.255.255.255", -+ "peer": "172.0.0.2" -+ }, -+ "192.168.0.188": { -+ "broadcast": "192.168.0.255", -+ "mac": "64:4b:f0:18:9a:21", -+ "netmask": "255.255.255.0" -+ } -+ }, -+ "by-ipv6": { -+ "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2": { -+ "flags": 208, -+ "mac": "64:4b:f0:18:9a:21", -+ "netmask": "ffff:ffff:ffff:ffff::/64" -+ } -+ }, -+ "by-mac": { -+ "64:4b:f0:18:9a:21": { -+ "ipv4": [ -+ { -+ "addr": "192.168.0.188", -+ "broadcast": "192.168.0.255", -+ "netmask": "255.255.255.0" -+ } -+ ], -+ "ipv6": [ -+ { -+ "addr": "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2", -+ "flags": 208, -+ "netmask": "ffff:ffff:ffff:ffff::/64" -+ } -+ ] -+ }, -+ "ac:de:48:00:11:22": { -+ "ipv6": [] -+ } -+ } -+ } -+ }, -+ "wait-on-network": { -+ "ipv4": true, -+ "ipv6": "false" -+ } -+ } -+ -+ -+Redacting sensitive information -+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -+ -+Sometimes the cloud-init userdata might contain sensitive information, and it -+may be desirable to have the ``guestinfo.userdata`` key (or other guestinfo -+keys) redacted as soon as its data is read by the datasource. This is possible -+by adding the following to the metadata: -+ -+.. code-block:: yaml -+ -+ redact: # formerly named cleanup-guestinfo, which will also work -+ - userdata -+ - vendordata -+ -+When the above snippet is added to the metadata, the datasource will iterate -+over the elements in the ``redact`` array and clear each of the keys. For -+example, when the guestinfo transport is used, the above snippet will cause -+the following commands to be executed: -+ -+.. code-block:: shell -+ -+ vmware-rpctool "info-set guestinfo.userdata ---" -+ vmware-rpctool "info-set guestinfo.userdata.encoding " -+ vmware-rpctool "info-set guestinfo.vendordata ---" -+ vmware-rpctool "info-set guestinfo.vendordata.encoding " -+ -+Please note that keys are set to the valid YAML string ``---`` as it is not -+possible remove an existing key from the guestinfo key-space. A key's analogous -+encoding property will be set to a single white-space character, causing the -+datasource to treat the actual key value as plain-text, thereby loading it as -+an empty YAML doc (hence the aforementioned ``---``\ ). -+ -+Reading the local IP addresses -+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -+ -+This datasource automatically discovers the local IPv4 and IPv6 addresses for -+a guest operating system based on the default routes. However, when inspecting -+a VM externally, it's not possible to know what the *default* IP address is for -+the guest OS. That's why this datasource sets the discovered, local IPv4 and -+IPv6 addresses back in the guestinfo namespace as the following keys: -+ -+ -+* ``guestinfo.local-ipv4`` -+* ``guestinfo.local-ipv6`` -+ -+It is possible that a host may not have any default, local IP addresses. It's -+also possible the reported, local addresses are link-local addresses. But these -+two keys may be used to discover what this datasource determined were the local -+IPv4 and IPv6 addresses for a host. -+ -+Waiting on the network -+^^^^^^^^^^^^^^^^^^^^^^ -+ -+Sometimes cloud-init may bring up the network, but it will not finish coming -+online before the datasource's ``setup`` function is called, resulting in an -+``/var/run/cloud-init/instance-data.json`` file that does not have the correct -+network information. It is possible to instruct the datasource to wait until an -+IPv4 or IPv6 address is available before writing the instance data with the -+following metadata properties: -+ -+.. code-block:: yaml -+ -+ wait-on-network: -+ ipv4: true -+ ipv6: true -+ -+If either of the above values are true, then the datasource will sleep for a -+second, check the network status, and repeat until one or both addresses from -+the specified families are available. -+ -+Walkthrough -+----------- -+ -+The following series of steps is a demonstration on how to configure a VM with -+this datasource: -+ -+ -+#. Create the metadata file for the VM. Save the following YAML to a file named -+ ``metadata.yaml``\ : -+ -+ .. code-block:: yaml -+ -+ instance-id: cloud-vm -+ local-hostname: cloud-vm -+ network: -+ version: 2 -+ ethernets: -+ nics: -+ match: -+ name: ens* -+ dhcp4: yes -+ -+#. Create the userdata file ``userdata.yaml``\ : -+ -+ .. code-block:: yaml -+ -+ #cloud-config -+ -+ users: -+ - default -+ - name: akutz -+ primary_group: akutz -+ sudo: ALL=(ALL) NOPASSWD:ALL -+ groups: sudo, wheel -+ ssh_import_id: None -+ lock_passwd: true -+ ssh_authorized_keys: -+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDE0c5FczvcGSh/tG4iw+Fhfi/O5/EvUM/96js65tly4++YTXK1d9jcznPS5ruDlbIZ30oveCBd3kT8LLVFwzh6hepYTf0YmCTpF4eDunyqmpCXDvVscQYRXyasEm5olGmVe05RrCJSeSShAeptv4ueIn40kZKOghinGWLDSZG4+FFfgrmcMCpx5YSCtX2gvnEYZJr0czt4rxOZuuP7PkJKgC/mt2PcPjooeX00vAj81jjU2f3XKrjjz2u2+KIt9eba+vOQ6HiC8c2IzRkUAJ5i1atLy8RIbejo23+0P4N2jjk17QySFOVHwPBDTYb0/0M/4ideeU74EN/CgVsvO6JrLsPBR4dojkV5qNbMNxIVv5cUwIy2ThlLgqpNCeFIDLCWNZEFKlEuNeSQ2mPtIO7ETxEL2Cz5y/7AIuildzYMc6wi2bofRC8HmQ7rMXRWdwLKWsR0L7SKjHblIwarxOGqLnUI+k2E71YoP7SZSlxaKi17pqkr0OMCF+kKqvcvHAQuwGqyumTEWOlH6TCx1dSPrW+pVCZSHSJtSTfDW2uzL6y8k10MT06+pVunSrWo5LHAXcS91htHV1M1UrH/tZKSpjYtjMb5+RonfhaFRNzvj7cCE1f3Kp8UVqAdcGBTtReoE8eRUT63qIxjw03a7VwAyB2w+9cu1R9/vAo8SBeRqw== sakutz@gmail.com -+ -+#. Please note this step requires that the VM be powered off. All of the -+ commands below use the VMware CLI tool, `govc `_. -+ -+ Go ahead and assign the path to the VM to the environment variable ``VM``\ : -+ -+ .. code-block:: shell -+ -+ export VM="/inventory/path/to/the/vm" -+ -+#. Power off the VM: -+ -+ .. raw:: html -+ -+
-+ -+ ⚠️ First Boot Mode -+ -+ To ensure the next power-on operation results in a first-boot scenario for -+ cloud-init, it may be necessary to run the following command just before -+ powering off the VM: -+ -+ .. code-block:: bash -+ -+ cloud-init clean -+ -+ Otherwise cloud-init may not run in first-boot mode. For more information -+ on how the boot mode is determined, please see the -+ `First Boot Documentation <../boot.html#first-boot-determination>`_. -+ -+ .. raw:: html -+ -+
-+ -+ .. code-block:: shell -+ -+ govc vm.power -off "${VM}" -+ -+#. -+ Export the environment variables that contain the cloud-init metadata and -+ userdata: -+ -+ .. code-block:: shell -+ -+ export METADATA=$(gzip -c9 /dev/null || base64; }) \ -+ USERDATA=$(gzip -c9 /dev/null || base64; }) -+ -+#. -+ Assign the metadata and userdata to the VM: -+ -+ .. code-block:: shell -+ -+ govc vm.change -vm "${VM}" \ -+ -e guestinfo.metadata="${METADATA}" \ -+ -e guestinfo.metadata.encoding="gzip+base64" \ -+ -e guestinfo.userdata="${USERDATA}" \ -+ -e guestinfo.userdata.encoding="gzip+base64" -+ -+ Please note the above commands include specifying the encoding for the -+ properties. This is important as it informs the datasource how to decode -+ the data for cloud-init. Valid values for ``metadata.encoding`` and -+ ``userdata.encoding`` include: -+ -+ -+ * ``base64`` -+ * ``gzip+base64`` -+ -+#. -+ Power on the VM: -+ -+ .. code-block:: shell -+ -+ govc vm.power -vm "${VM}" -on -+ -+If all went according to plan, the CentOS box is: -+ -+* Locked down, allowing SSH access only for the user in the userdata -+* Configured for a dynamic IP address via DHCP -+* Has a hostname of ``cloud-vm`` -+ -+Examples -+-------- -+ -+This section reviews common configurations: -+ -+Setting the hostname -+^^^^^^^^^^^^^^^^^^^^ -+ -+The hostname is set by way of the metadata key ``local-hostname``. -+ -+Setting the instance ID -+^^^^^^^^^^^^^^^^^^^^^^^ -+ -+The instance ID may be set by way of the metadata key ``instance-id``. However, -+if this value is absent then then the instance ID is read from the file -+``/sys/class/dmi/id/product_uuid``. -+ -+Providing public SSH keys -+^^^^^^^^^^^^^^^^^^^^^^^^^ -+ -+The public SSH keys may be set by way of the metadata key ``public-keys-data``. -+Each newline-terminated string will be interpreted as a separate SSH public -+key, which will be placed in distro's default user's -+``~/.ssh/authorized_keys``. If the value is empty or absent, then nothing will -+be written to ``~/.ssh/authorized_keys``. -+ -+Configuring the network -+^^^^^^^^^^^^^^^^^^^^^^^ -+ -+The network is configured by setting the metadata key ``network`` with a value -+consistent with Network Config Versions -+`1 <../network-config-format-v1.html>`_ or -+`2 <../network-config-format-v2.html>`_\ , depending on the Linux -+distro's version of cloud-init. -+ -+The metadata key ``network.encoding`` may be used to indicate the format of -+the metadata key "network". Valid encodings are ``base64`` and ``gzip+base64``. -diff --git a/requirements.txt b/requirements.txt -index 5817da3b..41d01d62 100644 ---- a/requirements.txt -+++ b/requirements.txt -@@ -32,3 +32,12 @@ jsonpatch - - # For validating cloud-config sections per schema definitions - jsonschema -+ -+# Used by DataSourceVMware to inspect the host's network configuration during -+# the "setup()" function. -+# -+# This allows a host that uses DHCP to bring up the network during BootLocal -+# and still participate in instance-data by gathering the network in detail at -+# runtime and merge that information into the metadata and repersist that to -+# disk. -+netifaces>=0.10.9 -diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py -index 5912f7ee..475a2cf8 100644 ---- a/tests/unittests/test_datasource/test_common.py -+++ b/tests/unittests/test_datasource/test_common.py -@@ -28,6 +28,7 @@ from cloudinit.sources import ( - DataSourceScaleway as Scaleway, - DataSourceSmartOS as SmartOS, - DataSourceUpCloud as UpCloud, -+ DataSourceVMware as VMware, - ) - from cloudinit.sources import DataSourceNone as DSNone - -@@ -50,6 +51,7 @@ DEFAULT_LOCAL = [ - RbxCloud.DataSourceRbxCloud, - Scaleway.DataSourceScaleway, - UpCloud.DataSourceUpCloudLocal, -+ VMware.DataSourceVMware, - ] - - DEFAULT_NETWORK = [ -@@ -66,6 +68,7 @@ DEFAULT_NETWORK = [ - OpenStack.DataSourceOpenStack, - OVF.DataSourceOVFNet, - UpCloud.DataSourceUpCloud, -+ VMware.DataSourceVMware, - ] - - -diff --git a/tests/unittests/test_datasource/test_vmware.py b/tests/unittests/test_datasource/test_vmware.py -new file mode 100644 -index 00000000..597db7c8 ---- /dev/null -+++ b/tests/unittests/test_datasource/test_vmware.py -@@ -0,0 +1,377 @@ -+# Copyright (c) 2021 VMware, Inc. All Rights Reserved. -+# -+# Authors: Andrew Kutz -+# -+# This file is part of cloud-init. See LICENSE file for license information. -+ -+import base64 -+import gzip -+from cloudinit import dmi, helpers, safeyaml -+from cloudinit import settings -+from cloudinit.sources import DataSourceVMware -+from cloudinit.tests.helpers import ( -+ mock, -+ CiTestCase, -+ FilesystemMockingTestCase, -+ populate_dir, -+) -+ -+import os -+ -+PRODUCT_NAME_FILE_PATH = "/sys/class/dmi/id/product_name" -+PRODUCT_NAME = "VMware7,1" -+PRODUCT_UUID = "82343CED-E4C7-423B-8F6B-0D34D19067AB" -+REROOT_FILES = { -+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, -+ PRODUCT_NAME_FILE_PATH: PRODUCT_NAME, -+} -+ -+VMW_MULTIPLE_KEYS = [ -+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@vmw.com", -+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@vmw.com", -+] -+VMW_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@vmw.com" -+ -+VMW_METADATA_YAML = """instance-id: cloud-vm -+local-hostname: cloud-vm -+network: -+ version: 2 -+ ethernets: -+ nics: -+ match: -+ name: ens* -+ dhcp4: yes -+""" -+ -+VMW_USERDATA_YAML = """## template: jinja -+#cloud-config -+users: -+- default -+""" -+ -+VMW_VENDORDATA_YAML = """## template: jinja -+#cloud-config -+runcmd: -+- echo "Hello, world." -+""" -+ -+ -+class TestDataSourceVMware(CiTestCase): -+ """ -+ Test common functionality that is not transport specific. -+ """ -+ -+ def setUp(self): -+ super(TestDataSourceVMware, self).setUp() -+ self.tmp = self.tmp_dir() -+ -+ def test_no_data_access_method(self): -+ ds = get_ds(self.tmp) -+ ds.vmware_rpctool = None -+ ret = ds.get_data() -+ self.assertFalse(ret) -+ -+ def test_get_host_info(self): -+ host_info = DataSourceVMware.get_host_info() -+ self.assertTrue(host_info) -+ self.assertTrue(host_info["hostname"]) -+ self.assertTrue(host_info["local-hostname"]) -+ self.assertTrue(host_info["local_hostname"]) -+ self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4]) -+ -+ -+class TestDataSourceVMwareEnvVars(FilesystemMockingTestCase): -+ """ -+ Test the envvar transport. -+ """ -+ -+ def setUp(self): -+ super(TestDataSourceVMwareEnvVars, self).setUp() -+ self.tmp = self.tmp_dir() -+ os.environ[DataSourceVMware.VMX_GUESTINFO] = "1" -+ self.create_system_files() -+ -+ def tearDown(self): -+ del os.environ[DataSourceVMware.VMX_GUESTINFO] -+ return super(TestDataSourceVMwareEnvVars, self).tearDown() -+ -+ def create_system_files(self): -+ rootd = self.tmp_dir() -+ populate_dir( -+ rootd, -+ { -+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, -+ }, -+ ) -+ self.assertTrue(self.reRoot(rootd)) -+ -+ def assert_get_data_ok(self, m_fn, m_fn_call_count=6): -+ ds = get_ds(self.tmp) -+ ds.vmware_rpctool = None -+ ret = ds.get_data() -+ self.assertTrue(ret) -+ self.assertEqual(m_fn_call_count, m_fn.call_count) -+ self.assertEqual( -+ ds.data_access_method, DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR -+ ) -+ return ds -+ -+ def assert_metadata(self, metadata, m_fn, m_fn_call_count=6): -+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count) -+ assert_metadata(self, ds, metadata) -+ -+ @mock.patch( -+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" -+ ) -+ def test_get_subplatform(self, m_fn): -+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] -+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4) -+ self.assertEqual( -+ ds.subplatform, -+ "%s (%s)" -+ % ( -+ DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR, -+ DataSourceVMware.get_guestinfo_envvar_key_name("metadata"), -+ ), -+ ) -+ -+ @mock.patch( -+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" -+ ) -+ def test_get_data_metadata_only(self, m_fn): -+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] -+ self.assert_get_data_ok(m_fn, m_fn_call_count=4) -+ -+ @mock.patch( -+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" -+ ) -+ def test_get_data_userdata_only(self, m_fn): -+ m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""] -+ self.assert_get_data_ok(m_fn, m_fn_call_count=4) -+ -+ @mock.patch( -+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" -+ ) -+ def test_get_data_vendordata_only(self, m_fn): -+ m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""] -+ self.assert_get_data_ok(m_fn, m_fn_call_count=4) -+ -+ @mock.patch( -+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" -+ ) -+ def test_get_data_metadata_base64(self, m_fn): -+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) -+ m_fn.side_effect = [data, "base64", "", ""] -+ self.assert_get_data_ok(m_fn, m_fn_call_count=4) -+ -+ @mock.patch( -+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" -+ ) -+ def test_get_data_metadata_b64(self, m_fn): -+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) -+ m_fn.side_effect = [data, "b64", "", ""] -+ self.assert_get_data_ok(m_fn, m_fn_call_count=4) -+ -+ @mock.patch( -+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" -+ ) -+ def test_get_data_metadata_gzip_base64(self, m_fn): -+ data = VMW_METADATA_YAML.encode("utf-8") -+ data = gzip.compress(data) -+ data = base64.b64encode(data) -+ m_fn.side_effect = [data, "gzip+base64", "", ""] -+ self.assert_get_data_ok(m_fn, m_fn_call_count=4) -+ -+ @mock.patch( -+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" -+ ) -+ def test_get_data_metadata_gz_b64(self, m_fn): -+ data = VMW_METADATA_YAML.encode("utf-8") -+ data = gzip.compress(data) -+ data = base64.b64encode(data) -+ m_fn.side_effect = [data, "gz+b64", "", ""] -+ self.assert_get_data_ok(m_fn, m_fn_call_count=4) -+ -+ @mock.patch( -+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" -+ ) -+ def test_metadata_single_ssh_key(self, m_fn): -+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) -+ metadata["public_keys"] = VMW_SINGLE_KEY -+ metadata_yaml = safeyaml.dumps(metadata) -+ m_fn.side_effect = [metadata_yaml, "", "", ""] -+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4) -+ -+ @mock.patch( -+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" -+ ) -+ def test_metadata_multiple_ssh_keys(self, m_fn): -+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) -+ metadata["public_keys"] = VMW_MULTIPLE_KEYS -+ metadata_yaml = safeyaml.dumps(metadata) -+ m_fn.side_effect = [metadata_yaml, "", "", ""] -+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4) -+ -+ -+class TestDataSourceVMwareGuestInfo(FilesystemMockingTestCase): -+ """ -+ Test the guestinfo transport on a VMware platform. -+ """ -+ -+ def setUp(self): -+ super(TestDataSourceVMwareGuestInfo, self).setUp() -+ self.tmp = self.tmp_dir() -+ self.create_system_files() -+ -+ def create_system_files(self): -+ rootd = self.tmp_dir() -+ populate_dir( -+ rootd, -+ { -+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, -+ PRODUCT_NAME_FILE_PATH: PRODUCT_NAME, -+ }, -+ ) -+ self.assertTrue(self.reRoot(rootd)) -+ -+ def assert_get_data_ok(self, m_fn, m_fn_call_count=6): -+ ds = get_ds(self.tmp) -+ ds.vmware_rpctool = "vmware-rpctool" -+ ret = ds.get_data() -+ self.assertTrue(ret) -+ self.assertEqual(m_fn_call_count, m_fn.call_count) -+ self.assertEqual( -+ ds.data_access_method, -+ DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO, -+ ) -+ return ds -+ -+ def assert_metadata(self, metadata, m_fn, m_fn_call_count=6): -+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count) -+ assert_metadata(self, ds, metadata) -+ -+ def test_ds_valid_on_vmware_platform(self): -+ system_type = dmi.read_dmi_data("system-product-name") -+ self.assertEqual(system_type, PRODUCT_NAME) -+ -+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") -+ def test_get_subplatform(self, m_fn): -+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] -+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4) -+ self.assertEqual( -+ ds.subplatform, -+ "%s (%s)" -+ % ( -+ DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO, -+ DataSourceVMware.get_guestinfo_key_name("metadata"), -+ ), -+ ) -+ -+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") -+ def test_get_data_userdata_only(self, m_fn): -+ m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""] -+ self.assert_get_data_ok(m_fn, m_fn_call_count=4) -+ -+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") -+ def test_get_data_vendordata_only(self, m_fn): -+ m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""] -+ self.assert_get_data_ok(m_fn, m_fn_call_count=4) -+ -+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") -+ def test_metadata_single_ssh_key(self, m_fn): -+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) -+ metadata["public_keys"] = VMW_SINGLE_KEY -+ metadata_yaml = safeyaml.dumps(metadata) -+ m_fn.side_effect = [metadata_yaml, "", "", ""] -+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4) -+ -+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") -+ def test_metadata_multiple_ssh_keys(self, m_fn): -+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) -+ metadata["public_keys"] = VMW_MULTIPLE_KEYS -+ metadata_yaml = safeyaml.dumps(metadata) -+ m_fn.side_effect = [metadata_yaml, "", "", ""] -+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4) -+ -+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") -+ def test_get_data_metadata_base64(self, m_fn): -+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) -+ m_fn.side_effect = [data, "base64", "", ""] -+ self.assert_get_data_ok(m_fn, m_fn_call_count=4) -+ -+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") -+ def test_get_data_metadata_b64(self, m_fn): -+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) -+ m_fn.side_effect = [data, "b64", "", ""] -+ self.assert_get_data_ok(m_fn, m_fn_call_count=4) -+ -+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") -+ def test_get_data_metadata_gzip_base64(self, m_fn): -+ data = VMW_METADATA_YAML.encode("utf-8") -+ data = gzip.compress(data) -+ data = base64.b64encode(data) -+ m_fn.side_effect = [data, "gzip+base64", "", ""] -+ self.assert_get_data_ok(m_fn, m_fn_call_count=4) -+ -+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") -+ def test_get_data_metadata_gz_b64(self, m_fn): -+ data = VMW_METADATA_YAML.encode("utf-8") -+ data = gzip.compress(data) -+ data = base64.b64encode(data) -+ m_fn.side_effect = [data, "gz+b64", "", ""] -+ self.assert_get_data_ok(m_fn, m_fn_call_count=4) -+ -+ -+class TestDataSourceVMwareGuestInfo_InvalidPlatform(FilesystemMockingTestCase): -+ """ -+ Test the guestinfo transport on a non-VMware platform. -+ """ -+ -+ def setUp(self): -+ super(TestDataSourceVMwareGuestInfo_InvalidPlatform, self).setUp() -+ self.tmp = self.tmp_dir() -+ self.create_system_files() -+ -+ def create_system_files(self): -+ rootd = self.tmp_dir() -+ populate_dir( -+ rootd, -+ { -+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, -+ }, -+ ) -+ self.assertTrue(self.reRoot(rootd)) -+ -+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") -+ def test_ds_invalid_on_non_vmware_platform(self, m_fn): -+ system_type = dmi.read_dmi_data("system-product-name") -+ self.assertEqual(system_type, None) -+ -+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] -+ ds = get_ds(self.tmp) -+ ds.vmware_rpctool = "vmware-rpctool" -+ ret = ds.get_data() -+ self.assertFalse(ret) -+ -+ -+def assert_metadata(test_obj, ds, metadata): -+ test_obj.assertEqual(metadata.get("instance-id"), ds.get_instance_id()) -+ test_obj.assertEqual(metadata.get("local-hostname"), ds.get_hostname()) -+ -+ expected_public_keys = metadata.get("public_keys") -+ if not isinstance(expected_public_keys, list): -+ expected_public_keys = [expected_public_keys] -+ -+ test_obj.assertEqual(expected_public_keys, ds.get_public_ssh_keys()) -+ test_obj.assertIsInstance(ds.get_public_ssh_keys(), list) -+ -+ -+def get_ds(temp_dir): -+ ds = DataSourceVMware.DataSourceVMware( -+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": temp_dir}) -+ ) -+ ds.vmware_rpctool = "vmware-rpctool" -+ return ds -+ -+ -+# vi: ts=4 expandtab -diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py -index 1d8aaf18..8617d7bd 100644 ---- a/tests/unittests/test_ds_identify.py -+++ b/tests/unittests/test_ds_identify.py -@@ -649,6 +649,50 @@ class TestDsIdentify(DsIdentifyBase): - """EC2: bobrightbox.com in product_serial is not brightbox'""" - self._test_ds_not_found('Ec2-E24Cloud-negative') - -+ def test_vmware_no_valid_transports(self): -+ """VMware: no valid transports""" -+ self._test_ds_not_found('VMware-NoValidTransports') -+ -+ def test_vmware_envvar_no_data(self): -+ """VMware: envvar transport no data""" -+ self._test_ds_not_found('VMware-EnvVar-NoData') -+ -+ def test_vmware_envvar_no_virt_id(self): -+ """VMware: envvar transport success if no virt id""" -+ self._test_ds_found('VMware-EnvVar-NoVirtID') -+ -+ def test_vmware_envvar_activated_by_metadata(self): -+ """VMware: envvar transport activated by metadata""" -+ self._test_ds_found('VMware-EnvVar-Metadata') -+ -+ def test_vmware_envvar_activated_by_userdata(self): -+ """VMware: envvar transport activated by userdata""" -+ self._test_ds_found('VMware-EnvVar-Userdata') -+ -+ def test_vmware_envvar_activated_by_vendordata(self): -+ """VMware: envvar transport activated by vendordata""" -+ self._test_ds_found('VMware-EnvVar-Vendordata') -+ -+ def test_vmware_guestinfo_no_data(self): -+ """VMware: guestinfo transport no data""" -+ self._test_ds_not_found('VMware-GuestInfo-NoData') -+ -+ def test_vmware_guestinfo_no_virt_id(self): -+ """VMware: guestinfo transport fails if no virt id""" -+ self._test_ds_not_found('VMware-GuestInfo-NoVirtID') -+ -+ def test_vmware_guestinfo_activated_by_metadata(self): -+ """VMware: guestinfo transport activated by metadata""" -+ self._test_ds_found('VMware-GuestInfo-Metadata') -+ -+ def test_vmware_guestinfo_activated_by_userdata(self): -+ """VMware: guestinfo transport activated by userdata""" -+ self._test_ds_found('VMware-GuestInfo-Userdata') -+ -+ def test_vmware_guestinfo_activated_by_vendordata(self): -+ """VMware: guestinfo transport activated by vendordata""" -+ self._test_ds_found('VMware-GuestInfo-Vendordata') -+ - - class TestBSDNoSys(DsIdentifyBase): - """Test *BSD code paths -@@ -1136,7 +1180,240 @@ VALID_CFG = { - 'Ec2-E24Cloud-negative': { - 'ds': 'Ec2', - 'files': {P_SYS_VENDOR: 'e24cloudyday\n'}, -- } -+ }, -+ 'VMware-NoValidTransports': { -+ 'ds': 'VMware', -+ 'mocks': [ -+ MOCK_VIRT_IS_VMWARE, -+ ], -+ }, -+ 'VMware-EnvVar-NoData': { -+ 'ds': 'VMware', -+ 'mocks': [ -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo', -+ 'ret': 0, -+ }, -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', -+ 'ret': 1, -+ }, -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', -+ 'ret': 1, -+ }, -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', -+ 'ret': 1, -+ }, -+ MOCK_VIRT_IS_VMWARE, -+ ], -+ }, -+ 'VMware-EnvVar-NoVirtID': { -+ 'ds': 'VMware', -+ 'mocks': [ -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo', -+ 'ret': 0, -+ }, -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', -+ 'ret': 0, -+ }, -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', -+ 'ret': 1, -+ }, -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', -+ 'ret': 1, -+ }, -+ ], -+ }, -+ 'VMware-EnvVar-Metadata': { -+ 'ds': 'VMware', -+ 'mocks': [ -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo', -+ 'ret': 0, -+ }, -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', -+ 'ret': 0, -+ }, -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', -+ 'ret': 1, -+ }, -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', -+ 'ret': 1, -+ }, -+ MOCK_VIRT_IS_VMWARE, -+ ], -+ }, -+ 'VMware-EnvVar-Userdata': { -+ 'ds': 'VMware', -+ 'mocks': [ -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo', -+ 'ret': 0, -+ }, -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', -+ 'ret': 1, -+ }, -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', -+ 'ret': 0, -+ }, -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', -+ 'ret': 1, -+ }, -+ MOCK_VIRT_IS_VMWARE, -+ ], -+ }, -+ 'VMware-EnvVar-Vendordata': { -+ 'ds': 'VMware', -+ 'mocks': [ -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo', -+ 'ret': 0, -+ }, -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', -+ 'ret': 1, -+ }, -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', -+ 'ret': 1, -+ }, -+ { -+ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', -+ 'ret': 0, -+ }, -+ MOCK_VIRT_IS_VMWARE, -+ ], -+ }, -+ 'VMware-GuestInfo-NoData': { -+ 'ds': 'VMware', -+ 'mocks': [ -+ { -+ 'name': 'vmware_has_rpctool', -+ 'ret': 0, -+ 'out': '/usr/bin/vmware-rpctool', -+ }, -+ { -+ 'name': 'vmware_rpctool_guestinfo_metadata', -+ 'ret': 1, -+ }, -+ { -+ 'name': 'vmware_rpctool_guestinfo_userdata', -+ 'ret': 1, -+ }, -+ { -+ 'name': 'vmware_rpctool_guestinfo_vendordata', -+ 'ret': 1, -+ }, -+ MOCK_VIRT_IS_VMWARE, -+ ], -+ }, -+ 'VMware-GuestInfo-NoVirtID': { -+ 'ds': 'VMware', -+ 'mocks': [ -+ { -+ 'name': 'vmware_has_rpctool', -+ 'ret': 0, -+ 'out': '/usr/bin/vmware-rpctool', -+ }, -+ { -+ 'name': 'vmware_rpctool_guestinfo_metadata', -+ 'ret': 0, -+ 'out': '---', -+ }, -+ { -+ 'name': 'vmware_rpctool_guestinfo_userdata', -+ 'ret': 1, -+ }, -+ { -+ 'name': 'vmware_rpctool_guestinfo_vendordata', -+ 'ret': 1, -+ }, -+ ], -+ }, -+ 'VMware-GuestInfo-Metadata': { -+ 'ds': 'VMware', -+ 'mocks': [ -+ { -+ 'name': 'vmware_has_rpctool', -+ 'ret': 0, -+ 'out': '/usr/bin/vmware-rpctool', -+ }, -+ { -+ 'name': 'vmware_rpctool_guestinfo_metadata', -+ 'ret': 0, -+ 'out': '---', -+ }, -+ { -+ 'name': 'vmware_rpctool_guestinfo_userdata', -+ 'ret': 1, -+ }, -+ { -+ 'name': 'vmware_rpctool_guestinfo_vendordata', -+ 'ret': 1, -+ }, -+ MOCK_VIRT_IS_VMWARE, -+ ], -+ }, -+ 'VMware-GuestInfo-Userdata': { -+ 'ds': 'VMware', -+ 'mocks': [ -+ { -+ 'name': 'vmware_has_rpctool', -+ 'ret': 0, -+ 'out': '/usr/bin/vmware-rpctool', -+ }, -+ { -+ 'name': 'vmware_rpctool_guestinfo_metadata', -+ 'ret': 1, -+ }, -+ { -+ 'name': 'vmware_rpctool_guestinfo_userdata', -+ 'ret': 0, -+ 'out': '---', -+ }, -+ { -+ 'name': 'vmware_rpctool_guestinfo_vendordata', -+ 'ret': 1, -+ }, -+ MOCK_VIRT_IS_VMWARE, -+ ], -+ }, -+ 'VMware-GuestInfo-Vendordata': { -+ 'ds': 'VMware', -+ 'mocks': [ -+ { -+ 'name': 'vmware_has_rpctool', -+ 'ret': 0, -+ 'out': '/usr/bin/vmware-rpctool', -+ }, -+ { -+ 'name': 'vmware_rpctool_guestinfo_metadata', -+ 'ret': 1, -+ }, -+ { -+ 'name': 'vmware_rpctool_guestinfo_userdata', -+ 'ret': 1, -+ }, -+ { -+ 'name': 'vmware_rpctool_guestinfo_vendordata', -+ 'ret': 0, -+ 'out': '---', -+ }, -+ MOCK_VIRT_IS_VMWARE, -+ ], -+ }, - } - - # vi: ts=4 expandtab -diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers -index 689d7902..cbfa883c 100644 ---- a/tools/.github-cla-signers -+++ b/tools/.github-cla-signers -@@ -1,5 +1,6 @@ - ader1990 - ajmyyra -+akutz - AlexBaranowski - Aman306 - andrewbogott -diff --git a/tools/ds-identify b/tools/ds-identify -index 2f2486f7..c01eae3d 100755 ---- a/tools/ds-identify -+++ b/tools/ds-identify -@@ -125,7 +125,7 @@ DI_DSNAME="" - # be searched if there is no setting found in config. - DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ - CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ --OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud" -+OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud VMware" - DI_DSLIST="" - DI_MODE="" - DI_ON_FOUND="" -@@ -1350,6 +1350,80 @@ dscheck_IBMCloud() { - return ${DS_NOT_FOUND} - } - -+vmware_has_envvar_vmx_guestinfo() { -+ [ -n "${VMX_GUESTINFO:-}" ] -+} -+ -+vmware_has_envvar_vmx_guestinfo_metadata() { -+ [ -n "${VMX_GUESTINFO_METADATA:-}" ] -+} -+ -+vmware_has_envvar_vmx_guestinfo_userdata() { -+ [ -n "${VMX_GUESTINFO_USERDATA:-}" ] -+} -+ -+vmware_has_envvar_vmx_guestinfo_vendordata() { -+ [ -n "${VMX_GUESTINFO_VENDORDATA:-}" ] -+} -+ -+vmware_has_rpctool() { -+ command -v vmware-rpctool >/dev/null 2>&1 -+} -+ -+vmware_rpctool_guestinfo_metadata() { -+ vmware-rpctool "info-get guestinfo.metadata" -+} -+ -+vmware_rpctool_guestinfo_userdata() { -+ vmware-rpctool "info-get guestinfo.userdata" -+} -+ -+vmware_rpctool_guestinfo_vendordata() { -+ vmware-rpctool "info-get guestinfo.vendordata" -+} -+ -+dscheck_VMware() { -+ # Checks to see if there is valid data for the VMware datasource. -+ # The data transports are checked in the following order: -+ # -+ # * envvars -+ # * guestinfo -+ # -+ # Please note when updating this function with support for new data -+ # transports, the order should match the order in the _get_data -+ # function from the file DataSourceVMware.py. -+ -+ # Check to see if running in a container and the VMware -+ # datasource is configured via environment variables. -+ if vmware_has_envvar_vmx_guestinfo; then -+ if vmware_has_envvar_vmx_guestinfo_metadata || \ -+ vmware_has_envvar_vmx_guestinfo_userdata || \ -+ vmware_has_envvar_vmx_guestinfo_vendordata; then -+ return "${DS_FOUND}" -+ fi -+ fi -+ -+ # Do not proceed unless the detected platform is VMware. -+ if [ ! "${DI_VIRT}" = "vmware" ]; then -+ return "${DS_NOT_FOUND}" -+ fi -+ -+ # Do not proceed if the vmware-rpctool command is not present. -+ if ! vmware_has_rpctool; then -+ return "${DS_NOT_FOUND}" -+ fi -+ -+ # Activate the VMware datasource only if any of the fields used -+ # by the datasource are present in the guestinfo table. -+ if { vmware_rpctool_guestinfo_metadata || \ -+ vmware_rpctool_guestinfo_userdata || \ -+ vmware_rpctool_guestinfo_vendordata; } >/dev/null 2>&1; then -+ return "${DS_FOUND}" -+ fi -+ -+ return "${DS_NOT_FOUND}" -+} -+ - collect_info() { - read_uname_info - read_virt --- -2.27.0 - diff --git a/SOURCES/ci-Detect-a-Python-version-change-and-clear-the-cache-8.patch b/SOURCES/ci-Detect-a-Python-version-change-and-clear-the-cache-8.patch deleted file mode 100644 index a691f26..0000000 --- a/SOURCES/ci-Detect-a-Python-version-change-and-clear-the-cache-8.patch +++ /dev/null @@ -1,180 +0,0 @@ -From b226448134b5182ba685702e7b7a486db772d956 Mon Sep 17 00:00:00 2001 -From: Emanuele Giuseppe Esposito -Date: Fri, 4 Mar 2022 11:21:16 +0100 -Subject: [PATCH 1/2] - Detect a Python version change and clear the cache - (#857) - -RH-Author: Emanuele Giuseppe Esposito -RH-MergeRequest: 54: - Detect a Python version change and clear the cache (#857) -RH-Commit: [1/2] c562cd802eabae9dc14079de0b26d471d2229ca8 -RH-Bugzilla: 1935826 -RH-Acked-by: Eduardo Otubo -RH-Acked-by: Vitaly Kuznetsov -RH-Acked-by: Mohamed Gamal Morsy - -commit 78e89b03ecb29e7df3181b1219a0b5f44b9d7532 -Author: Robert Schweikert -Date: Thu Jul 1 12:35:40 2021 -0400 - - - Detect a Python version change and clear the cache (#857) - - summary: Clear cache when a Python version change is detected - - When a distribution gets updated it is possible that the Python version - changes. Python makes no guarantee that pickle is consistent across - versions as such we need to purge the cache and start over. - - Co-authored-by: James Falcon -Conflicts: - tests/integration_tests/util.py: test is not present downstream - -Signed-off-by: Emanuele Giuseppe Esposito ---- - cloudinit/cmd/main.py | 30 ++++++++++ - cloudinit/cmd/tests/test_main.py | 2 + - .../assets/test_version_change.pkl | Bin 0 -> 21 bytes - .../modules/test_ssh_auth_key_fingerprints.py | 2 +- - .../modules/test_version_change.py | 56 ++++++++++++++++++ - 5 files changed, 89 insertions(+), 1 deletion(-) - create mode 100644 tests/integration_tests/assets/test_version_change.pkl - create mode 100644 tests/integration_tests/modules/test_version_change.py - -diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py -index baf1381f..21213a4a 100644 ---- a/cloudinit/cmd/main.py -+++ b/cloudinit/cmd/main.py -@@ -210,6 +210,35 @@ def attempt_cmdline_url(path, network=True, cmdline=None): - (cmdline_name, url, path)) - - -+def purge_cache_on_python_version_change(init): -+ """Purge the cache if python version changed on us. -+ -+ There could be changes not represented in our cache (obj.pkl) after we -+ upgrade to a new version of python, so at that point clear the cache -+ """ -+ current_python_version = '%d.%d' % ( -+ sys.version_info.major, sys.version_info.minor -+ ) -+ python_version_path = os.path.join( -+ init.paths.get_cpath('data'), 'python-version' -+ ) -+ if os.path.exists(python_version_path): -+ cached_python_version = open(python_version_path).read() -+ # The Python version has changed out from under us, anything that was -+ # pickled previously is likely useless due to API changes. -+ if cached_python_version != current_python_version: -+ LOG.debug('Python version change detected. Purging cache') -+ init.purge_cache(True) -+ util.write_file(python_version_path, current_python_version) -+ else: -+ if os.path.exists(init.paths.get_ipath_cur('obj_pkl')): -+ LOG.info( -+ 'Writing python-version file. ' -+ 'Cache compatibility status is currently unknown.' -+ ) -+ util.write_file(python_version_path, current_python_version) -+ -+ - def main_init(name, args): - deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] - if args.local: -@@ -276,6 +305,7 @@ def main_init(name, args): - util.logexc(LOG, "Failed to initialize, likely bad things to come!") - # Stage 4 - path_helper = init.paths -+ purge_cache_on_python_version_change(init) - mode = sources.DSMODE_LOCAL if args.local else sources.DSMODE_NETWORK - - if mode == sources.DSMODE_NETWORK: -diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py -index 78b27441..1f5975b0 100644 ---- a/cloudinit/cmd/tests/test_main.py -+++ b/cloudinit/cmd/tests/test_main.py -@@ -17,6 +17,8 @@ myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand') - - - class TestMain(FilesystemMockingTestCase): -+ with_logs = True -+ allowed_subp = False - - def setUp(self): - super(TestMain, self).setUp() -diff --git a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py -index b9b0d85e..e1946cb1 100644 ---- a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py -+++ b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py -@@ -18,7 +18,7 @@ USER_DATA_SSH_AUTHKEY_DISABLE = """\ - no_ssh_fingerprints: true - """ - --USER_DATA_SSH_AUTHKEY_ENABLE="""\ -+USER_DATA_SSH_AUTHKEY_ENABLE = """\ - #cloud-config - ssh_genkeytypes: - - ecdsa -diff --git a/tests/integration_tests/modules/test_version_change.py b/tests/integration_tests/modules/test_version_change.py -new file mode 100644 -index 00000000..4e9ab63f ---- /dev/null -+++ b/tests/integration_tests/modules/test_version_change.py -@@ -0,0 +1,56 @@ -+from pathlib import Path -+ -+from tests.integration_tests.instances import IntegrationInstance -+from tests.integration_tests.util import ASSETS_DIR -+ -+ -+PICKLE_PATH = Path('/var/lib/cloud/instance/obj.pkl') -+TEST_PICKLE = ASSETS_DIR / 'test_version_change.pkl' -+ -+ -+def _assert_no_pickle_problems(log): -+ assert 'Failed loading pickled blob' not in log -+ assert 'Traceback' not in log -+ assert 'WARN' not in log -+ -+ -+def test_reboot_without_version_change(client: IntegrationInstance): -+ log = client.read_from_file('/var/log/cloud-init.log') -+ assert 'Python version change detected' not in log -+ assert 'Cache compatibility status is currently unknown.' not in log -+ _assert_no_pickle_problems(log) -+ -+ client.restart() -+ log = client.read_from_file('/var/log/cloud-init.log') -+ assert 'Python version change detected' not in log -+ assert 'Could not determine Python version used to write cache' not in log -+ _assert_no_pickle_problems(log) -+ -+ # Now ensure that loading a bad pickle gives us problems -+ client.push_file(TEST_PICKLE, PICKLE_PATH) -+ client.restart() -+ log = client.read_from_file('/var/log/cloud-init.log') -+ assert 'Failed loading pickled blob from {}'.format(PICKLE_PATH) in log -+ -+ -+def test_cache_purged_on_version_change(client: IntegrationInstance): -+ # Start by pushing the invalid pickle so we'll hit an error if the -+ # cache didn't actually get purged -+ client.push_file(TEST_PICKLE, PICKLE_PATH) -+ client.execute("echo '1.0' > /var/lib/cloud/data/python-version") -+ client.restart() -+ log = client.read_from_file('/var/log/cloud-init.log') -+ assert 'Python version change detected. Purging cache' in log -+ _assert_no_pickle_problems(log) -+ -+ -+def test_log_message_on_missing_version_file(client: IntegrationInstance): -+ # Start by pushing a pickle so we can see the log message -+ client.push_file(TEST_PICKLE, PICKLE_PATH) -+ client.execute("rm /var/lib/cloud/data/python-version") -+ client.restart() -+ log = client.read_from_file('/var/log/cloud-init.log') -+ assert ( -+ 'Writing python-version file. ' -+ 'Cache compatibility status is currently unknown.' -+ ) in log --- -2.31.1 - diff --git a/SOURCES/ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch b/SOURCES/ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch deleted file mode 100644 index d4ec623..0000000 --- a/SOURCES/ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch +++ /dev/null @@ -1,474 +0,0 @@ -From 7bd016008429f0a18393a070d88e669f3ed89caa Mon Sep 17 00:00:00 2001 -From: Emanuele Giuseppe Esposito -Date: Fri, 11 Feb 2022 14:37:46 +0100 -Subject: [PATCH] Fix IPv6 netmask format for sysconfig (#1215) - -RH-Author: Emanuele Giuseppe Esposito -RH-MergeRequest: 48: Fix IPv6 netmask format for sysconfig (#1215) -RH-Commit: [1/1] 4c940bbcf85dba1fce9f4acb9fc7820c0d7777f6 -RH-Bugzilla: 2046540 -RH-Acked-by: Eduardo Otubo -RH-Acked-by: Vitaly Kuznetsov - -commit b97a30f0a05c1dea918c46ca9c05c869d15fe2d5 -Author: Harald -Date: Tue Feb 8 15:49:00 2022 +0100 - - Fix IPv6 netmask format for sysconfig (#1215) - - This change converts the IPv6 netmask from the network_data.json[1] - format to the CIDR style, /. - - Using an IPv6 address like ffff:ffff:ffff:ffff:: does not work with - NetworkManager, nor networkscripts. - - NetworkManager will ignore the route, logging: - ifcfg-rh: ignoring invalid route at \ - "::/:: via fd00:fd00:fd00:2::fffe dev $DEV" \ - (/etc/sysconfig/network-scripts/route6-$DEV:3): \ - Argument for "::/::" is not ADDR/PREFIX format - - Similarly if using networkscripts, ip route fail with error: - Error: inet6 prefix is expected rather than \ - "fd00:fd00:fd00::/ffff:ffff:ffff:ffff::". - - Also a bit of refactoring ... - - cloudinit.net.sysconfig.Route.to_string: - * Move a couple of lines around to reduce repeated code. - * if "ADDRESS" not in key -> continute, so that the - code block following it can be de-indented. - cloudinit.net.network_state: - * Refactors the ipv4_mask_to_net_prefix, ipv6_mask_to_net_prefix - removes mask_to_net_prefix methods. Utilize ipaddress library to - do some of the heavy lifting. - - LP: #1959148 - -Signed-off-by: Emanuele Giuseppe Esposito ---- - cloudinit/net/__init__.py | 7 +- - cloudinit/net/network_state.py | 103 +++++++----------- - cloudinit/net/sysconfig.py | 91 ++++++++++------ - cloudinit/sources/DataSourceOpenNebula.py | 2 +- - .../sources/helpers/vmware/imc/config_nic.py | 4 +- - tests/unittests/test_net.py | 78 ++++++++++++- - 6 files changed, 176 insertions(+), 109 deletions(-) - -diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py -index 003efa2a..12bf64de 100644 ---- a/cloudinit/net/__init__.py -+++ b/cloudinit/net/__init__.py -@@ -14,7 +14,7 @@ import re - - from cloudinit import subp - from cloudinit import util --from cloudinit.net.network_state import mask_to_net_prefix -+from cloudinit.net.network_state import ipv4_mask_to_net_prefix - from cloudinit.url_helper import UrlError, readurl - - LOG = logging.getLogger(__name__) -@@ -1048,10 +1048,11 @@ class EphemeralIPv4Network(object): - 'Cannot init network on {0} with {1}/{2} and bcast {3}'.format( - interface, ip, prefix_or_mask, broadcast)) - try: -- self.prefix = mask_to_net_prefix(prefix_or_mask) -+ self.prefix = ipv4_mask_to_net_prefix(prefix_or_mask) - except ValueError as e: - raise ValueError( -- 'Cannot setup network: {0}'.format(e) -+ "Cannot setup network, invalid prefix or " -+ "netmask: {0}".format(e) - ) from e - - self.connectivity_url = connectivity_url -diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py -index e8bf9e39..2768ef94 100644 ---- a/cloudinit/net/network_state.py -+++ b/cloudinit/net/network_state.py -@@ -6,6 +6,7 @@ - - import copy - import functools -+import ipaddress - import logging - import socket - import struct -@@ -872,12 +873,18 @@ def _normalize_net_keys(network, address_keys=()): - try: - prefix = int(maybe_prefix) - except ValueError: -- # this supports input of
/255.255.255.0 -- prefix = mask_to_net_prefix(maybe_prefix) -- elif netmask: -- prefix = mask_to_net_prefix(netmask) -- elif 'prefix' in net: -- prefix = int(net['prefix']) -+ if ipv6: -+ # this supports input of ffff:ffff:ffff:: -+ prefix = ipv6_mask_to_net_prefix(maybe_prefix) -+ else: -+ # this supports input of 255.255.255.0 -+ prefix = ipv4_mask_to_net_prefix(maybe_prefix) -+ elif netmask and not ipv6: -+ prefix = ipv4_mask_to_net_prefix(netmask) -+ elif netmask and ipv6: -+ prefix = ipv6_mask_to_net_prefix(netmask) -+ elif "prefix" in net: -+ prefix = int(net["prefix"]) - else: - prefix = 64 if ipv6 else 24 - -@@ -972,72 +979,42 @@ def ipv4_mask_to_net_prefix(mask): - str(24) => 24 - "24" => 24 - """ -- if isinstance(mask, int): -- return mask -- if isinstance(mask, str): -- try: -- return int(mask) -- except ValueError: -- pass -- else: -- raise TypeError("mask '%s' is not a string or int") -- -- if '.' not in mask: -- raise ValueError("netmask '%s' does not contain a '.'" % mask) -- -- toks = mask.split(".") -- if len(toks) != 4: -- raise ValueError("netmask '%s' had only %d parts" % (mask, len(toks))) -- -- return sum([bin(int(x)).count('1') for x in toks]) -+ return ipaddress.ip_network(f"0.0.0.0/{mask}").prefixlen - - - def ipv6_mask_to_net_prefix(mask): - """Convert an ipv6 netmask (very uncommon) or prefix (64) to prefix. - -- If 'mask' is an integer or string representation of one then -- int(mask) will be returned. -+ If the input is already an integer or a string representation of -+ an integer, then int(mask) will be returned. -+ "ffff:ffff:ffff::" => 48 -+ "48" => 48 - """ -- -- if isinstance(mask, int): -- return mask -- if isinstance(mask, str): -- try: -- return int(mask) -- except ValueError: -- pass -- else: -- raise TypeError("mask '%s' is not a string or int") -- -- if ':' not in mask: -- raise ValueError("mask '%s' does not have a ':'") -- -- bitCount = [0, 0x8000, 0xc000, 0xe000, 0xf000, 0xf800, 0xfc00, 0xfe00, -- 0xff00, 0xff80, 0xffc0, 0xffe0, 0xfff0, 0xfff8, 0xfffc, -- 0xfffe, 0xffff] -- prefix = 0 -- for word in mask.split(':'): -- if not word or int(word, 16) == 0: -- break -- prefix += bitCount.index(int(word, 16)) -- -- return prefix -- -- --def mask_to_net_prefix(mask): -- """Return the network prefix for the netmask provided. -- -- Supports ipv4 or ipv6 netmasks.""" - try: -- # if 'mask' is a prefix that is an integer. -- # then just return it. -- return int(mask) -+ # In the case the mask is already a prefix -+ prefixlen = ipaddress.ip_network(f"::/{mask}").prefixlen -+ return prefixlen - except ValueError: -+ # ValueError means mask is an IPv6 address representation and need -+ # conversion. - pass -- if is_ipv6_addr(mask): -- return ipv6_mask_to_net_prefix(mask) -- else: -- return ipv4_mask_to_net_prefix(mask) -+ -+ netmask = ipaddress.ip_address(mask) -+ mask_int = int(netmask) -+ # If the mask is all zeroes, just return it -+ if mask_int == 0: -+ return mask_int -+ -+ trailing_zeroes = min( -+ ipaddress.IPV6LENGTH, (~mask_int & (mask_int - 1)).bit_length() -+ ) -+ leading_ones = mask_int >> trailing_zeroes -+ prefixlen = ipaddress.IPV6LENGTH - trailing_zeroes -+ all_ones = (1 << prefixlen) - 1 -+ if leading_ones != all_ones: -+ raise ValueError("Invalid network mask '%s'" % mask) -+ -+ return prefixlen - - - def mask_and_ipv4_to_bcast_addr(mask, ip): -diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py -index d5440998..7ecbe1c3 100644 ---- a/cloudinit/net/sysconfig.py -+++ b/cloudinit/net/sysconfig.py -@@ -12,6 +12,7 @@ from cloudinit import util - from cloudinit import subp - from cloudinit.distros.parsers import networkmanager_conf - from cloudinit.distros.parsers import resolv_conf -+from cloudinit.net import network_state - - from . import renderer - from .network_state import ( -@@ -171,43 +172,61 @@ class Route(ConfigMap): - # (because Route can contain a mix of IPv4 and IPv6) - reindex = -1 - for key in sorted(self._conf.keys()): -- if 'ADDRESS' in key: -- index = key.replace('ADDRESS', '') -- address_value = str(self._conf[key]) -- # only accept combinations: -- # if proto ipv6 only display ipv6 routes -- # if proto ipv4 only display ipv4 routes -- # do not add ipv6 routes if proto is ipv4 -- # do not add ipv4 routes if proto is ipv6 -- # (this array will contain a mix of ipv4 and ipv6) -- if proto == "ipv4" and not self.is_ipv6_route(address_value): -- netmask_value = str(self._conf['NETMASK' + index]) -- gateway_value = str(self._conf['GATEWAY' + index]) -- # increase IPv4 index -- reindex = reindex + 1 -- buf.write("%s=%s\n" % ('ADDRESS' + str(reindex), -- _quote_value(address_value))) -- buf.write("%s=%s\n" % ('GATEWAY' + str(reindex), -- _quote_value(gateway_value))) -- buf.write("%s=%s\n" % ('NETMASK' + str(reindex), -- _quote_value(netmask_value))) -- metric_key = 'METRIC' + index -- if metric_key in self._conf: -- metric_value = str(self._conf['METRIC' + index]) -- buf.write("%s=%s\n" % ('METRIC' + str(reindex), -- _quote_value(metric_value))) -- elif proto == "ipv6" and self.is_ipv6_route(address_value): -- netmask_value = str(self._conf['NETMASK' + index]) -- gateway_value = str(self._conf['GATEWAY' + index]) -- metric_value = ( -- 'metric ' + str(self._conf['METRIC' + index]) -- if 'METRIC' + index in self._conf else '') -+ if "ADDRESS" not in key: -+ continue -+ -+ index = key.replace("ADDRESS", "") -+ address_value = str(self._conf[key]) -+ netmask_value = str(self._conf["NETMASK" + index]) -+ gateway_value = str(self._conf["GATEWAY" + index]) -+ -+ # only accept combinations: -+ # if proto ipv6 only display ipv6 routes -+ # if proto ipv4 only display ipv4 routes -+ # do not add ipv6 routes if proto is ipv4 -+ # do not add ipv4 routes if proto is ipv6 -+ # (this array will contain a mix of ipv4 and ipv6) -+ if proto == "ipv4" and not self.is_ipv6_route(address_value): -+ # increase IPv4 index -+ reindex = reindex + 1 -+ buf.write( -+ "%s=%s\n" -+ % ("ADDRESS" + str(reindex), _quote_value(address_value)) -+ ) -+ buf.write( -+ "%s=%s\n" -+ % ("GATEWAY" + str(reindex), _quote_value(gateway_value)) -+ ) -+ buf.write( -+ "%s=%s\n" -+ % ("NETMASK" + str(reindex), _quote_value(netmask_value)) -+ ) -+ metric_key = "METRIC" + index -+ if metric_key in self._conf: -+ metric_value = str(self._conf["METRIC" + index]) - buf.write( -- "%s/%s via %s %s dev %s\n" % (address_value, -- netmask_value, -- gateway_value, -- metric_value, -- self._route_name)) -+ "%s=%s\n" -+ % ("METRIC" + str(reindex), _quote_value(metric_value)) -+ ) -+ elif proto == "ipv6" and self.is_ipv6_route(address_value): -+ prefix_value = network_state.ipv6_mask_to_net_prefix( -+ netmask_value -+ ) -+ metric_value = ( -+ "metric " + str(self._conf["METRIC" + index]) -+ if "METRIC" + index in self._conf -+ else "" -+ ) -+ buf.write( -+ "%s/%s via %s %s dev %s\n" -+ % ( -+ address_value, -+ prefix_value, -+ gateway_value, -+ metric_value, -+ self._route_name, -+ ) -+ ) - - return buf.getvalue() - -diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py -index 730ec586..e7980ab1 100644 ---- a/cloudinit/sources/DataSourceOpenNebula.py -+++ b/cloudinit/sources/DataSourceOpenNebula.py -@@ -233,7 +233,7 @@ class OpenNebulaNetwork(object): - # Set IPv4 address - devconf['addresses'] = [] - mask = self.get_mask(c_dev) -- prefix = str(net.mask_to_net_prefix(mask)) -+ prefix = str(net.ipv4_mask_to_net_prefix(mask)) - devconf['addresses'].append( - self.get_ip(c_dev, mac) + '/' + prefix) - -diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py -index 9cd2c0c0..3a45c67e 100644 ---- a/cloudinit/sources/helpers/vmware/imc/config_nic.py -+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py -@@ -9,7 +9,7 @@ import logging - import os - import re - --from cloudinit.net.network_state import mask_to_net_prefix -+from cloudinit.net.network_state import ipv4_mask_to_net_prefix - from cloudinit import subp - from cloudinit import util - -@@ -180,7 +180,7 @@ class NicConfigurator(object): - """ - route_list = [] - -- cidr = mask_to_net_prefix(netmask) -+ cidr = ipv4_mask_to_net_prefix(netmask) - - for gateway in gateways: - destination = "%s/%d" % (gen_subnet(gateway, netmask), cidr) -diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py -index 14d3462f..a7f6a1f7 100644 ---- a/tests/unittests/test_net.py -+++ b/tests/unittests/test_net.py -@@ -2025,10 +2025,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true - routes: - - gateway: 2001:67c:1562:1 - network: 2001:67c:1 -- netmask: ffff:ffff:0 -+ netmask: "ffff:ffff::" - - gateway: 3001:67c:1562:1 - network: 3001:67c:1 -- netmask: ffff:ffff:0 -+ netmask: "ffff:ffff::" - metric: 10000 - """), - 'expected_netplan': textwrap.dedent(""" -@@ -2295,8 +2295,8 @@ iface bond0 inet6 static - 'route6-bond0': textwrap.dedent("""\ - # Created by cloud-init on instance boot automatically, do not edit. - # -- 2001:67c:1/ffff:ffff:0 via 2001:67c:1562:1 dev bond0 -- 3001:67c:1/ffff:ffff:0 via 3001:67c:1562:1 metric 10000 dev bond0 -+ 2001:67c:1/32 via 2001:67c:1562:1 dev bond0 -+ 3001:67c:1/32 via 3001:67c:1562:1 metric 10000 dev bond0 - """), - 'route-bond0': textwrap.dedent("""\ - ADDRESS0=10.1.3.0 -@@ -3088,6 +3088,76 @@ USERCTL=no - renderer.render_network_state(ns, target=render_dir) - self.assertEqual([], os.listdir(render_dir)) - -+ def test_invalid_network_mask_ipv6(self): -+ net_json = { -+ "services": [{"type": "dns", "address": "172.19.0.12"}], -+ "networks": [ -+ { -+ "network_id": "public-ipv6", -+ "type": "ipv6", -+ "netmask": "", -+ "link": "tap1a81968a-79", -+ "routes": [ -+ { -+ "gateway": "2001:DB8::1", -+ "netmask": "ff:ff:ff:ff::", -+ "network": "2001:DB8:1::1", -+ }, -+ ], -+ "ip_address": "2001:DB8::10", -+ "id": "network1", -+ } -+ ], -+ "links": [ -+ { -+ "ethernet_mac_address": "fa:16:3e:ed:9a:59", -+ "mtu": None, -+ "type": "bridge", -+ "id": "tap1a81968a-79", -+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", -+ }, -+ ], -+ } -+ macs = {"fa:16:3e:ed:9a:59": "eth0"} -+ network_cfg = openstack.convert_net_json(net_json, known_macs=macs) -+ with self.assertRaises(ValueError): -+ network_state.parse_net_config_data(network_cfg, skip_broken=False) -+ -+ def test_invalid_network_mask_ipv4(self): -+ net_json = { -+ "services": [{"type": "dns", "address": "172.19.0.12"}], -+ "networks": [ -+ { -+ "network_id": "public-ipv4", -+ "type": "ipv4", -+ "netmask": "", -+ "link": "tap1a81968a-79", -+ "routes": [ -+ { -+ "gateway": "172.20.0.1", -+ "netmask": "255.234.255.0", -+ "network": "172.19.0.0", -+ }, -+ ], -+ "ip_address": "172.20.0.10", -+ "id": "network1", -+ } -+ ], -+ "links": [ -+ { -+ "ethernet_mac_address": "fa:16:3e:ed:9a:59", -+ "mtu": None, -+ "type": "bridge", -+ "id": "tap1a81968a-79", -+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", -+ }, -+ ], -+ } -+ macs = {"fa:16:3e:ed:9a:59": "eth0"} -+ network_cfg = openstack.convert_net_json(net_json, known_macs=macs) -+ with self.assertRaises(ValueError): -+ network_state.parse_net_config_data(network_cfg, skip_broken=False) -+ - def test_openstack_rendering_samples(self): - for os_sample in OS_SAMPLES: - render_dir = self.tmp_dir() --- -2.27.0 - diff --git a/SOURCES/ci-Fix-MIME-policy-failure-on-python-version-upgrade-93.patch b/SOURCES/ci-Fix-MIME-policy-failure-on-python-version-upgrade-93.patch deleted file mode 100644 index 889b8db..0000000 --- a/SOURCES/ci-Fix-MIME-policy-failure-on-python-version-upgrade-93.patch +++ /dev/null @@ -1,705 +0,0 @@ -From 04a4cc7b8da04ba4103118cf9d975d8e9548e0dc Mon Sep 17 00:00:00 2001 -From: Emanuele Giuseppe Esposito -Date: Fri, 4 Mar 2022 11:23:22 +0100 -Subject: [PATCH 2/2] Fix MIME policy failure on python version upgrade (#934) - -RH-Author: Emanuele Giuseppe Esposito -RH-MergeRequest: 54: - Detect a Python version change and clear the cache (#857) -RH-Commit: [2/2] 05fc8c52a39b5ad464ad146488703467e39d73b1 -RH-Bugzilla: 1935826 -RH-Acked-by: Eduardo Otubo -RH-Acked-by: Vitaly Kuznetsov -RH-Acked-by: Mohamed Gamal Morsy - -commit eacb0353803263934aa2ac827c37e461c87cb107 -Author: James Falcon -Date: Thu Jul 15 17:52:21 2021 -0500 - - Fix MIME policy failure on python version upgrade (#934) - - Python 3.6 added a new `policy` attribute to `MIMEMultipart`. - MIMEMultipart may be part of the cached object pickle of a datasource. - Upgrading from an old version of python to 3.6+ will cause the - datasource to be invalid after pickle load. - - This commit uses the upgrade framework to attempt to access the mime - message and fail early (thus discarding the cache) if we cannot. - Commit 78e89b03 should fix this issue more generally. - -Signed-off-by: Emanuele Giuseppe Esposito ---- - cloudinit/sources/__init__.py | 18 + - cloudinit/stages.py | 2 + - .../assets/trusty_with_mime.pkl | 572 ++++++++++++++++++ - .../modules/test_persistence.py | 30 + - 4 files changed, 622 insertions(+) - create mode 100644 tests/integration_tests/assets/trusty_with_mime.pkl - create mode 100644 tests/integration_tests/modules/test_persistence.py - -diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py -index 7d74f8d9..338861e6 100644 ---- a/cloudinit/sources/__init__.py -+++ b/cloudinit/sources/__init__.py -@@ -74,6 +74,10 @@ NetworkConfigSource = namedtuple('NetworkConfigSource', - _NETCFG_SOURCE_NAMES)(*_NETCFG_SOURCE_NAMES) - - -+class DatasourceUnpickleUserDataError(Exception): -+ """Raised when userdata is unable to be unpickled due to python upgrades""" -+ -+ - class DataSourceNotFoundException(Exception): - pass - -@@ -227,6 +231,20 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): - self.vendordata2 = None - if not hasattr(self, 'vendordata2_raw'): - self.vendordata2_raw = None -+ if hasattr(self, 'userdata') and self.userdata is not None: -+ # If userdata stores MIME data, on < python3.6 it will be -+ # missing the 'policy' attribute that exists on >=python3.6. -+ # Calling str() on the userdata will attempt to access this -+ # policy attribute. This will raise an exception, causing -+ # the pickle load to fail, so cloud-init will discard the cache -+ try: -+ str(self.userdata) -+ except AttributeError as e: -+ LOG.debug( -+ "Unable to unpickle datasource: %s." -+ " Ignoring current cache.", e -+ ) -+ raise DatasourceUnpickleUserDataError() from e - - def __str__(self): - return type_utils.obj_name(self) -diff --git a/cloudinit/stages.py b/cloudinit/stages.py -index 83e25dd1..e709a5cf 100644 ---- a/cloudinit/stages.py -+++ b/cloudinit/stages.py -@@ -980,6 +980,8 @@ def _pkl_load(fname): - return None - try: - return pickle.loads(pickle_contents) -+ except sources.DatasourceUnpickleUserDataError: -+ return None - except Exception: - util.logexc(LOG, "Failed loading pickled blob from %s", fname) - return None -diff --git a/tests/integration_tests/assets/trusty_with_mime.pkl b/tests/integration_tests/assets/trusty_with_mime.pkl -new file mode 100644 -index 00000000..a4089ecf ---- /dev/null -+++ b/tests/integration_tests/assets/trusty_with_mime.pkl -@@ -0,0 +1,572 @@ -+ccopy_reg -+_reconstructor -+p1 -+(ccloudinit.sources.DataSourceNoCloud -+DataSourceNoCloudNet -+p2 -+c__builtin__ -+object -+p3 -+NtRp4 -+(dp5 -+S'paths' -+p6 -+g1 -+(ccloudinit.helpers -+Paths -+p7 -+g3 -+NtRp8 -+(dp9 -+S'lookups' -+p10 -+(dp11 -+S'cloud_config' -+p12 -+S'cloud-config.txt' -+p13 -+sS'userdata' -+p14 -+S'user-data.txt.i' -+p15 -+sS'vendordata' -+p16 -+S'vendor-data.txt.i' -+p17 -+sS'userdata_raw' -+p18 -+S'user-data.txt' -+p19 -+sS'boothooks' -+p20 -+g20 -+sS'scripts' -+p21 -+g21 -+sS'sem' -+p22 -+g22 -+sS'data' -+p23 -+g23 -+sS'vendor_scripts' -+p24 -+S'scripts/vendor' -+p25 -+sS'handlers' -+p26 -+g26 -+sS'obj_pkl' -+p27 -+S'obj.pkl' -+p28 -+sS'vendordata_raw' -+p29 -+S'vendor-data.txt' -+p30 -+sS'vendor_cloud_config' -+p31 -+S'vendor-cloud-config.txt' -+p32 -+ssS'template_tpl' -+p33 -+S'/etc/cloud/templates/%s.tmpl' -+p34 -+sS'cfgs' -+p35 -+(dp36 -+S'cloud_dir' -+p37 -+S'/var/lib/cloud/' -+p38 -+sS'templates_dir' -+p39 -+S'/etc/cloud/templates/' -+p40 -+sS'upstart_dir' -+p41 -+S'/etc/init/' -+p42 -+ssS'cloud_dir' -+p43 -+g38 -+sS'datasource' -+p44 -+NsS'upstart_conf_d' -+p45 -+g42 -+sS'boot_finished' -+p46 -+S'/var/lib/cloud/instance/boot-finished' -+p47 -+sS'instance_link' -+p48 -+S'/var/lib/cloud/instance' -+p49 -+sS'seed_dir' -+p50 -+S'/var/lib/cloud/seed' -+p51 -+sbsS'supported_seed_starts' -+p52 -+(S'http://' -+p53 -+S'https://' -+p54 -+S'ftp://' -+p55 -+tp56 -+sS'sys_cfg' -+p57 -+(dp58 -+S'output' -+p59 -+(dp60 -+S'all' -+p61 -+S'| tee -a /var/log/cloud-init-output.log' -+p62 -+ssS'users' -+p63 -+(lp64 -+S'default' -+p65 -+asS'def_log_file' -+p66 -+S'/var/log/cloud-init.log' -+p67 -+sS'cloud_final_modules' -+p68 -+(lp69 -+S'rightscale_userdata' -+p70 -+aS'scripts-vendor' -+p71 -+aS'scripts-per-once' -+p72 -+aS'scripts-per-boot' -+p73 -+aS'scripts-per-instance' -+p74 -+aS'scripts-user' -+p75 -+aS'ssh-authkey-fingerprints' -+p76 -+aS'keys-to-console' -+p77 -+aS'phone-home' -+p78 -+aS'final-message' -+p79 -+aS'power-state-change' -+p80 -+asS'disable_root' -+p81 -+I01 -+sS'syslog_fix_perms' -+p82 -+S'syslog:adm' -+p83 -+sS'log_cfgs' -+p84 -+(lp85 -+(lp86 -+S'[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n' -+p87 -+aS'[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=("/dev/log", handlers.SysLogHandler.LOG_USER)\n' -+p88 -+aa(lp89 -+g87 -+aS"[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n" -+p90 -+aasS'cloud_init_modules' -+p91 -+(lp92 -+S'migrator' -+p93 -+aS'seed_random' -+p94 -+aS'bootcmd' -+p95 -+aS'write-files' -+p96 -+aS'growpart' -+p97 -+aS'resizefs' -+p98 -+aS'set_hostname' -+p99 -+aS'update_hostname' -+p100 -+aS'update_etc_hosts' -+p101 -+aS'ca-certs' -+p102 -+aS'rsyslog' -+p103 -+aS'users-groups' -+p104 -+aS'ssh' -+p105 -+asS'preserve_hostname' -+p106 -+I00 -+sS'_log' -+p107 -+(lp108 -+g87 -+ag90 -+ag88 -+asS'datasource_list' -+p109 -+(lp110 -+S'NoCloud' -+p111 -+aS'ConfigDrive' -+p112 -+aS'OpenNebula' -+p113 -+aS'Azure' -+p114 -+aS'AltCloud' -+p115 -+aS'OVF' -+p116 -+aS'MAAS' -+p117 -+aS'GCE' -+p118 -+aS'OpenStack' -+p119 -+aS'CloudSigma' -+p120 -+aS'Ec2' -+p121 -+aS'CloudStack' -+p122 -+aS'SmartOS' -+p123 -+aS'None' -+p124 -+asS'vendor_data' -+p125 -+(dp126 -+S'prefix' -+p127 -+(lp128 -+sS'enabled' -+p129 -+I01 -+ssS'cloud_config_modules' -+p130 -+(lp131 -+S'emit_upstart' -+p132 -+aS'disk_setup' -+p133 -+aS'mounts' -+p134 -+aS'ssh-import-id' -+p135 -+aS'locale' -+p136 -+aS'set-passwords' -+p137 -+aS'grub-dpkg' -+p138 -+aS'apt-pipelining' -+p139 -+aS'apt-configure' -+p140 -+aS'package-update-upgrade-install' -+p141 -+aS'landscape' -+p142 -+aS'timezone' -+p143 -+aS'puppet' -+p144 -+aS'chef' -+p145 -+aS'salt-minion' -+p146 -+aS'mcollective' -+p147 -+aS'disable-ec2-metadata' -+p148 -+aS'runcmd' -+p149 -+aS'byobu' -+p150 -+assg14 -+(iemail.mime.multipart -+MIMEMultipart -+p151 -+(dp152 -+S'_headers' -+p153 -+(lp154 -+(S'Content-Type' -+p155 -+S'multipart/mixed; boundary="===============4291038100093149247=="' -+tp156 -+a(S'MIME-Version' -+p157 -+S'1.0' -+p158 -+tp159 -+a(S'Number-Attachments' -+p160 -+S'1' -+tp161 -+asS'_payload' -+p162 -+(lp163 -+(iemail.mime.base -+MIMEBase -+p164 -+(dp165 -+g153 -+(lp166 -+(g157 -+g158 -+tp167 -+a(S'Content-Type' -+p168 -+S'text/x-not-multipart' -+tp169 -+a(S'Content-Disposition' -+p170 -+S'attachment; filename="part-001"' -+tp171 -+asg162 -+S'' -+sS'_charset' -+p172 -+NsS'_default_type' -+p173 -+S'text/plain' -+p174 -+sS'preamble' -+p175 -+NsS'defects' -+p176 -+(lp177 -+sS'_unixfrom' -+p178 -+NsS'epilogue' -+p179 -+Nsbasg172 -+Nsg173 -+g174 -+sg175 -+Nsg176 -+(lp180 -+sg178 -+Nsg179 -+Nsbsg16 -+S'#cloud-config\n{}\n\n' -+p181 -+sg18 -+S'Content-Type: multipart/mixed; boundary="===============1378281702283945349=="\nMIME-Version: 1.0\n\n--===============1378281702283945349==\nContent-Type: text/x-shellscript; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: base64\nContent-Disposition: attachment; filename="script1.sh"\n\nIyEvYmluL3NoCgplY2hvICdoaScgPiAvdmFyL3RtcC9oaQo=\n\n--===============1378281702283945349==\nContent-Type: text/x-shellscript; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: base64\nContent-Disposition: attachment; filename="script2.sh"\n\nIyEvYmluL2Jhc2gKCmVjaG8gJ2hpMicgPiAvdmFyL3RtcC9oaTIK\n\n--===============1378281702283945349==--\n\n#cloud-config\n# final_message: |\n# This is my final message!\n# $version\n# $timestamp\n# $datasource\n# $uptime\n# updates:\n# network:\n# when: [\'hotplug\']\n' -+p182 -+sg29 -+NsS'dsmode' -+p183 -+S'net' -+p184 -+sS'seed' -+p185 -+S'/var/lib/cloud/seed/nocloud-net' -+p186 -+sS'cmdline_id' -+p187 -+S'ds=nocloud-net' -+p188 -+sS'ud_proc' -+p189 -+g1 -+(ccloudinit.user_data -+UserDataProcessor -+p190 -+g3 -+NtRp191 -+(dp192 -+g6 -+g8 -+sS'ssl_details' -+p193 -+(dp194 -+sbsg50 -+g186 -+sS'ds_cfg' -+p195 -+(dp196 -+sS'distro' -+p197 -+g1 -+(ccloudinit.distros.ubuntu -+Distro -+p198 -+g3 -+NtRp199 -+(dp200 -+S'osfamily' -+p201 -+S'debian' -+p202 -+sS'_paths' -+p203 -+g8 -+sS'name' -+p204 -+S'ubuntu' -+p205 -+sS'_runner' -+p206 -+g1 -+(ccloudinit.helpers -+Runners -+p207 -+g3 -+NtRp208 -+(dp209 -+g6 -+g8 -+sS'sems' -+p210 -+(dp211 -+sbsS'_cfg' -+p212 -+(dp213 -+S'paths' -+p214 -+(dp215 -+g37 -+g38 -+sg39 -+g40 -+sg41 -+g42 -+ssS'default_user' -+p216 -+(dp217 -+S'shell' -+p218 -+S'/bin/bash' -+p219 -+sS'name' -+p220 -+S'ubuntu' -+p221 -+sS'sudo' -+p222 -+(lp223 -+S'ALL=(ALL) NOPASSWD:ALL' -+p224 -+asS'lock_passwd' -+p225 -+I01 -+sS'gecos' -+p226 -+S'Ubuntu' -+p227 -+sS'groups' -+p228 -+(lp229 -+S'adm' -+p230 -+aS'audio' -+p231 -+aS'cdrom' -+p232 -+aS'dialout' -+p233 -+aS'dip' -+p234 -+aS'floppy' -+p235 -+aS'netdev' -+p236 -+aS'plugdev' -+p237 -+aS'sudo' -+p238 -+aS'video' -+p239 -+assS'package_mirrors' -+p240 -+(lp241 -+(dp242 -+S'arches' -+p243 -+(lp244 -+S'i386' -+p245 -+aS'amd64' -+p246 -+asS'failsafe' -+p247 -+(dp248 -+S'security' -+p249 -+S'http://security.ubuntu.com/ubuntu' -+p250 -+sS'primary' -+p251 -+S'http://archive.ubuntu.com/ubuntu' -+p252 -+ssS'search' -+p253 -+(dp254 -+S'security' -+p255 -+(lp256 -+sS'primary' -+p257 -+(lp258 -+S'http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/' -+p259 -+aS'http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/' -+p260 -+aS'http://%(region)s.clouds.archive.ubuntu.com/ubuntu/' -+p261 -+assa(dp262 -+S'arches' -+p263 -+(lp264 -+S'armhf' -+p265 -+aS'armel' -+p266 -+aS'default' -+p267 -+asS'failsafe' -+p268 -+(dp269 -+S'security' -+p270 -+S'http://ports.ubuntu.com/ubuntu-ports' -+p271 -+sS'primary' -+p272 -+S'http://ports.ubuntu.com/ubuntu-ports' -+p273 -+ssasS'ssh_svcname' -+p274 -+S'ssh' -+p275 -+ssbsS'metadata' -+p276 -+(dp277 -+g183 -+g184 -+sS'local-hostname' -+p278 -+S'me' -+p279 -+sS'instance-id' -+p280 -+S'me' -+p281 -+ssb. -\ No newline at end of file -diff --git a/tests/integration_tests/modules/test_persistence.py b/tests/integration_tests/modules/test_persistence.py -new file mode 100644 -index 00000000..00fdeaea ---- /dev/null -+++ b/tests/integration_tests/modules/test_persistence.py -@@ -0,0 +1,30 @@ -+# This file is part of cloud-init. See LICENSE file for license information. -+"""Test the behavior of loading/discarding pickle data""" -+from pathlib import Path -+ -+import pytest -+ -+from tests.integration_tests.instances import IntegrationInstance -+from tests.integration_tests.util import ( -+ ASSETS_DIR, -+ verify_ordered_items_in_text, -+) -+ -+ -+PICKLE_PATH = Path('/var/lib/cloud/instance/obj.pkl') -+TEST_PICKLE = ASSETS_DIR / 'trusty_with_mime.pkl' -+ -+ -+@pytest.mark.lxd_container -+def test_log_message_on_missing_version_file(client: IntegrationInstance): -+ client.push_file(TEST_PICKLE, PICKLE_PATH) -+ client.restart() -+ assert client.execute('cloud-init status --wait').ok -+ log = client.read_from_file('/var/log/cloud-init.log') -+ verify_ordered_items_in_text([ -+ "Unable to unpickle datasource: 'MIMEMultipart' object has no " -+ "attribute 'policy'. Ignoring current cache.", -+ 'no cache found', -+ 'Searching for local data source', -+ 'SUCCESS: found local data from DataSourceNoCloud' -+ ], log) --- -2.31.1 - diff --git a/SOURCES/ci-Fix-home-permissions-modified-by-ssh-module-SC-338-9.patch b/SOURCES/ci-Fix-home-permissions-modified-by-ssh-module-SC-338-9.patch deleted file mode 100644 index 6a9cfcc..0000000 --- a/SOURCES/ci-Fix-home-permissions-modified-by-ssh-module-SC-338-9.patch +++ /dev/null @@ -1,262 +0,0 @@ -From 71989367e7a634fdd2af8ef58473975e0ef60464 Mon Sep 17 00:00:00 2001 -From: Emanuele Giuseppe Esposito -Date: Sat, 21 Aug 2021 13:53:27 +0200 -Subject: [PATCH] Fix home permissions modified by ssh module (SC-338) (#984) - -RH-Author: Emanuele Giuseppe Esposito -RH-MergeRequest: 29: Fix home permissions modified by ssh module (SC-338) (#984) -RH-Commit: [1/1] c409f2609b1d7e024eba77b55a196a4cafadd1d7 (eesposit/cloud-init) -RH-Bugzilla: 1995840 -RH-Acked-by: Mohamed Gamal Morsy -RH-Acked-by: Eduardo Otubo - -TESTED: By me and QA -BREW: 39178090 - -Fix home permissions modified by ssh module (SC-338) (#984) - -commit 7d3f5d750f6111c2716143364ea33486df67c927 -Author: James Falcon -Date: Fri Aug 20 17:09:49 2021 -0500 - - Fix home permissions modified by ssh module (SC-338) (#984) - - Fix home permissions modified by ssh module - - In #956, we updated the file and directory permissions for keys not in - the user's home directory. We also unintentionally modified the - permissions within the home directory as well. These should not change, - and this commit changes that back. - - LP: #1940233 - -Signed-off-by: Emanuele Giuseppe Esposito ---- - cloudinit/ssh_util.py | 35 ++++- - .../modules/test_ssh_keysfile.py | 132 +++++++++++++++--- - 2 files changed, 146 insertions(+), 21 deletions(-) - -diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py -index b8a3c8f7..9ccadf09 100644 ---- a/cloudinit/ssh_util.py -+++ b/cloudinit/ssh_util.py -@@ -321,23 +321,48 @@ def check_create_path(username, filename, strictmodes): - home_folder = os.path.dirname(user_pwent.pw_dir) - for directory in directories: - parent_folder += "/" + directory -- if home_folder.startswith(parent_folder): -+ -+ # security check, disallow symlinks in the AuthorizedKeysFile path. -+ if os.path.islink(parent_folder): -+ LOG.debug( -+ "Invalid directory. Symlink exists in path: %s", -+ parent_folder) -+ return False -+ -+ if os.path.isfile(parent_folder): -+ LOG.debug( -+ "Invalid directory. File exists in path: %s", -+ parent_folder) -+ return False -+ -+ if (home_folder.startswith(parent_folder) or -+ parent_folder == user_pwent.pw_dir): - continue - -- if not os.path.isdir(parent_folder): -+ if not os.path.exists(parent_folder): - # directory does not exist, and permission so far are good: - # create the directory, and make it accessible by everyone - # but owned by root, as it might be used by many users. - with util.SeLinuxGuard(parent_folder): -- os.makedirs(parent_folder, mode=0o755, exist_ok=True) -- util.chownbyid(parent_folder, root_pwent.pw_uid, -- root_pwent.pw_gid) -+ mode = 0o755 -+ uid = root_pwent.pw_uid -+ gid = root_pwent.pw_gid -+ if parent_folder.startswith(user_pwent.pw_dir): -+ mode = 0o700 -+ uid = user_pwent.pw_uid -+ gid = user_pwent.pw_gid -+ os.makedirs(parent_folder, mode=mode, exist_ok=True) -+ util.chownbyid(parent_folder, uid, gid) - - permissions = check_permissions(username, parent_folder, - filename, False, strictmodes) - if not permissions: - return False - -+ if os.path.islink(filename) or os.path.isdir(filename): -+ LOG.debug("%s is not a file!", filename) -+ return False -+ - # check the file - if not os.path.exists(filename): - # if file does not exist: we need to create it, since the -diff --git a/tests/integration_tests/modules/test_ssh_keysfile.py b/tests/integration_tests/modules/test_ssh_keysfile.py -index f82d7649..3159feb9 100644 ---- a/tests/integration_tests/modules/test_ssh_keysfile.py -+++ b/tests/integration_tests/modules/test_ssh_keysfile.py -@@ -10,10 +10,10 @@ TEST_USER1_KEYS = get_test_rsa_keypair('test1') - TEST_USER2_KEYS = get_test_rsa_keypair('test2') - TEST_DEFAULT_KEYS = get_test_rsa_keypair('test3') - --USERDATA = """\ -+_USERDATA = """\ - #cloud-config - bootcmd: -- - sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile /etc/ssh/authorized_keys %h/.ssh/authorized_keys2;' /etc/ssh/sshd_config -+ - {bootcmd} - ssh_authorized_keys: - - {default} - users: -@@ -24,27 +24,17 @@ users: - - name: test_user2 - ssh_authorized_keys: - - {user2} --""".format( # noqa: E501 -+""".format( -+ bootcmd='{bootcmd}', - default=TEST_DEFAULT_KEYS.public_key, - user1=TEST_USER1_KEYS.public_key, - user2=TEST_USER2_KEYS.public_key, - ) - - --@pytest.mark.ubuntu --@pytest.mark.user_data(USERDATA) --def test_authorized_keys(client: IntegrationInstance): -- expected_keys = [ -- ('test_user1', '/home/test_user1/.ssh/authorized_keys2', -- TEST_USER1_KEYS), -- ('test_user2', '/home/test_user2/.ssh/authorized_keys2', -- TEST_USER2_KEYS), -- ('ubuntu', '/home/ubuntu/.ssh/authorized_keys2', -- TEST_DEFAULT_KEYS), -- ('root', '/root/.ssh/authorized_keys2', TEST_DEFAULT_KEYS), -- ] -- -+def common_verify(client, expected_keys): - for user, filename, keys in expected_keys: -+ # Ensure key is in the key file - contents = client.read_from_file(filename) - if user in ['ubuntu', 'root']: - # Our personal public key gets added by pycloudlib -@@ -83,3 +73,113 @@ def test_authorized_keys(client: IntegrationInstance): - look_for_keys=False, - allow_agent=False, - ) -+ -+ # Ensure we haven't messed with any /home permissions -+ # See LP: #1940233 -+ home_dir = '/home/{}'.format(user) -+ home_perms = '755' -+ if user == 'root': -+ home_dir = '/root' -+ home_perms = '700' -+ assert '{} {}'.format(user, home_perms) == client.execute( -+ 'stat -c "%U %a" {}'.format(home_dir) -+ ) -+ if client.execute("test -d {}/.ssh".format(home_dir)).ok: -+ assert '{} 700'.format(user) == client.execute( -+ 'stat -c "%U %a" {}/.ssh'.format(home_dir) -+ ) -+ assert '{} 600'.format(user) == client.execute( -+ 'stat -c "%U %a" {}'.format(filename) -+ ) -+ -+ # Also ensure ssh-keygen works as expected -+ client.execute('mkdir {}/.ssh'.format(home_dir)) -+ assert client.execute( -+ "ssh-keygen -b 2048 -t rsa -f {}/.ssh/id_rsa -q -N ''".format( -+ home_dir) -+ ).ok -+ assert client.execute('test -f {}/.ssh/id_rsa'.format(home_dir)) -+ assert client.execute('test -f {}/.ssh/id_rsa.pub'.format(home_dir)) -+ -+ assert 'root 755' == client.execute('stat -c "%U %a" /home') -+ -+ -+DEFAULT_KEYS_USERDATA = _USERDATA.format(bootcmd='""') -+ -+ -+@pytest.mark.ubuntu -+@pytest.mark.user_data(DEFAULT_KEYS_USERDATA) -+def test_authorized_keys_default(client: IntegrationInstance): -+ expected_keys = [ -+ ('test_user1', '/home/test_user1/.ssh/authorized_keys', -+ TEST_USER1_KEYS), -+ ('test_user2', '/home/test_user2/.ssh/authorized_keys', -+ TEST_USER2_KEYS), -+ ('ubuntu', '/home/ubuntu/.ssh/authorized_keys', -+ TEST_DEFAULT_KEYS), -+ ('root', '/root/.ssh/authorized_keys', TEST_DEFAULT_KEYS), -+ ] -+ common_verify(client, expected_keys) -+ -+ -+AUTHORIZED_KEYS2_USERDATA = _USERDATA.format(bootcmd=( -+ "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile " -+ "/etc/ssh/authorized_keys %h/.ssh/authorized_keys2;' " -+ "/etc/ssh/sshd_config")) -+ -+ -+@pytest.mark.ubuntu -+@pytest.mark.user_data(AUTHORIZED_KEYS2_USERDATA) -+def test_authorized_keys2(client: IntegrationInstance): -+ expected_keys = [ -+ ('test_user1', '/home/test_user1/.ssh/authorized_keys2', -+ TEST_USER1_KEYS), -+ ('test_user2', '/home/test_user2/.ssh/authorized_keys2', -+ TEST_USER2_KEYS), -+ ('ubuntu', '/home/ubuntu/.ssh/authorized_keys2', -+ TEST_DEFAULT_KEYS), -+ ('root', '/root/.ssh/authorized_keys2', TEST_DEFAULT_KEYS), -+ ] -+ common_verify(client, expected_keys) -+ -+ -+NESTED_KEYS_USERDATA = _USERDATA.format(bootcmd=( -+ "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile " -+ "/etc/ssh/authorized_keys %h/foo/bar/ssh/keys;' " -+ "/etc/ssh/sshd_config")) -+ -+ -+@pytest.mark.ubuntu -+@pytest.mark.user_data(NESTED_KEYS_USERDATA) -+def test_nested_keys(client: IntegrationInstance): -+ expected_keys = [ -+ ('test_user1', '/home/test_user1/foo/bar/ssh/keys', -+ TEST_USER1_KEYS), -+ ('test_user2', '/home/test_user2/foo/bar/ssh/keys', -+ TEST_USER2_KEYS), -+ ('ubuntu', '/home/ubuntu/foo/bar/ssh/keys', -+ TEST_DEFAULT_KEYS), -+ ('root', '/root/foo/bar/ssh/keys', TEST_DEFAULT_KEYS), -+ ] -+ common_verify(client, expected_keys) -+ -+ -+EXTERNAL_KEYS_USERDATA = _USERDATA.format(bootcmd=( -+ "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile " -+ "/etc/ssh/authorized_keys /etc/ssh/authorized_keys/%u/keys;' " -+ "/etc/ssh/sshd_config")) -+ -+ -+@pytest.mark.ubuntu -+@pytest.mark.user_data(EXTERNAL_KEYS_USERDATA) -+def test_external_keys(client: IntegrationInstance): -+ expected_keys = [ -+ ('test_user1', '/etc/ssh/authorized_keys/test_user1/keys', -+ TEST_USER1_KEYS), -+ ('test_user2', '/etc/ssh/authorized_keys/test_user2/keys', -+ TEST_USER2_KEYS), -+ ('ubuntu', '/etc/ssh/authorized_keys/ubuntu/keys', -+ TEST_DEFAULT_KEYS), -+ ('root', '/etc/ssh/authorized_keys/root/keys', TEST_DEFAULT_KEYS), -+ ] -+ common_verify(client, expected_keys) --- -2.27.0 - diff --git a/SOURCES/ci-Leave-the-details-of-service-management-to-the-distr.patch b/SOURCES/ci-Leave-the-details-of-service-management-to-the-distr.patch deleted file mode 100644 index 0005779..0000000 --- a/SOURCES/ci-Leave-the-details-of-service-management-to-the-distr.patch +++ /dev/null @@ -1,481 +0,0 @@ -From 1b2602a4afeca35d5780e4f23913a31bc750d076 Mon Sep 17 00:00:00 2001 -From: Emanuele Giuseppe Esposito -Date: Tue, 31 May 2022 09:38:47 +0200 -Subject: [PATCH] Leave the details of service management to the distro (#1074) - -RH-Author: Emanuele Giuseppe Esposito -RH-MergeRequest: 74: Leave the details of service management to the distro (#1074) -RH-Commit: [1/1] 781e3e80d8f2d00af4cc5fff5720f690569c8de2 -RH-Bugzilla: 2091933 -RH-Acked-by: Vitaly Kuznetsov -RH-Acked-by: Mohamed Gamal Morsy - -commit 8c89009e75c7cf6c2f87635b82656f07f58095e1 -Author: Andy Fiddaman -Date: Wed Oct 20 20:58:27 2021 +0000 - - Leave the details of service management to the distro (#1074) - - Various modules restart services and they all have logic to try and - detect if they are running on a system that needs 'systemctl' or - 'service', and then have code to decide which order the arguments - need to be etc. On top of that, not all modules do this in the same way. - - The duplication and different approaches are not ideal but this also - makes it hard to add support for a new distribution that does not use - either 'systemctl' or 'service'. - - This change adds a new manage_service() method to the distro class - and updates several modules to use it. - -Signed-off-by: Emanuele Giuseppe Esposito ---- - cloudinit/config/cc_fan.py | 42 ++++++++----------- - cloudinit/config/cc_ntp.py | 20 ++------- - cloudinit/config/cc_rsyslog.py | 17 +++----- - cloudinit/config/cc_set_passwords.py | 17 ++------ - cloudinit/config/tests/test_set_passwords.py | 41 ++++++++---------- - cloudinit/distros/__init__.py | 28 +++++++++++++ - .../test_distros/test_manage_service.py | 38 +++++++++++++++++ - .../test_handler/test_handler_ntp.py | 29 +++---------- - 8 files changed, 119 insertions(+), 113 deletions(-) - create mode 100644 tests/unittests/test_distros/test_manage_service.py - -diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py -index 77984bca..91f50e22 100644 ---- a/cloudinit/config/cc_fan.py -+++ b/cloudinit/config/cc_fan.py -@@ -52,35 +52,26 @@ BUILTIN_CFG = { - } - - --def stop_update_start(service, config_file, content, systemd=False): -- if systemd: -- cmds = {'stop': ['systemctl', 'stop', service], -- 'start': ['systemctl', 'start', service], -- 'enable': ['systemctl', 'enable', service]} -- else: -- cmds = {'stop': ['service', 'stop'], -- 'start': ['service', 'start']} -- -- def run(cmd, msg): -- try: -- return subp.subp(cmd, capture=True) -- except subp.ProcessExecutionError as e: -- LOG.warning("failed: %s (%s): %s", service, cmd, e) -- return False -- -- stop_failed = not run(cmds['stop'], msg='stop %s' % service) -+def stop_update_start(distro, service, config_file, content): -+ try: -+ distro.manage_service('stop', service) -+ stop_failed = False -+ except subp.ProcessExecutionError as e: -+ stop_failed = True -+ LOG.warning("failed to stop %s: %s", service, e) -+ - if not content.endswith('\n'): - content += '\n' - util.write_file(config_file, content, omode="w") - -- ret = run(cmds['start'], msg='start %s' % service) -- if ret and stop_failed: -- LOG.warning("success: %s started", service) -- -- if 'enable' in cmds: -- ret = run(cmds['enable'], msg='enable %s' % service) -+ try: -+ distro.manage_service('start', service) -+ if stop_failed: -+ LOG.warning("success: %s started", service) -+ except subp.ProcessExecutionError as e: -+ LOG.warning("failed to start %s: %s", service, e) - -- return ret -+ distro.manage_service('enable', service) - - - def handle(name, cfg, cloud, log, args): -@@ -99,7 +90,8 @@ def handle(name, cfg, cloud, log, args): - distro.install_packages(['ubuntu-fan']) - - stop_update_start( -+ distro, - service='ubuntu-fan', config_file=mycfg.get('config_path'), -- content=mycfg.get('config'), systemd=distro.uses_systemd()) -+ content=mycfg.get('config')) - - # vi: ts=4 expandtab -diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py -index e183993f..4f358c8a 100644 ---- a/cloudinit/config/cc_ntp.py -+++ b/cloudinit/config/cc_ntp.py -@@ -459,21 +459,6 @@ def write_ntp_config_template(distro_name, service_name=None, servers=None, - util.del_file(template_fn) - - --def reload_ntp(service, systemd=False): -- """Restart or reload an ntp system service. -- -- @param service: A string specifying the name of the service to be affected. -- @param systemd: A boolean indicating if the distro uses systemd, defaults -- to False. -- @returns: A tuple of stdout, stderr results from executing the action. -- """ -- if systemd: -- cmd = ['systemctl', 'reload-or-restart', service] -- else: -- cmd = ['service', service, 'restart'] -- subp.subp(cmd, capture=True) -- -- - def supplemental_schema_validation(ntp_config): - """Validate user-provided ntp:config option values. - -@@ -583,10 +568,11 @@ def handle(name, cfg, cloud, log, _args): - packages=ntp_client_config['packages'], - check_exe=ntp_client_config['check_exe']) - try: -- reload_ntp(ntp_client_config['service_name'], -- systemd=cloud.distro.uses_systemd()) -+ cloud.distro.manage_service('reload', -+ ntp_client_config.get('service_name')) - except subp.ProcessExecutionError as e: - LOG.exception("Failed to reload/start ntp service: %s", e) - raise - -+ - # vi: ts=4 expandtab -diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py -index 2a2bc931..dd2bbd00 100644 ---- a/cloudinit/config/cc_rsyslog.py -+++ b/cloudinit/config/cc_rsyslog.py -@@ -207,16 +207,11 @@ HOST_PORT_RE = re.compile( - r'([:](?P[0-9]+))?$') - - --def reload_syslog(command=DEF_RELOAD, systemd=False): -- service = 'rsyslog' -+def reload_syslog(distro, command=DEF_RELOAD): - if command == DEF_RELOAD: -- if systemd: -- cmd = ['systemctl', 'reload-or-try-restart', service] -- else: -- cmd = ['service', service, 'restart'] -- else: -- cmd = command -- subp.subp(cmd, capture=True) -+ service = distro.get_option('rsyslog_svcname', 'rsyslog') -+ return distro.manage_service('try-reload', service) -+ return subp.subp(command, capture=True) - - - def load_config(cfg): -@@ -429,9 +424,7 @@ def handle(name, cfg, cloud, log, _args): - return - - try: -- restarted = reload_syslog( -- command=mycfg[KEYNAME_RELOAD], -- systemd=cloud.distro.uses_systemd()), -+ restarted = reload_syslog(cloud.distro, command=mycfg[KEYNAME_RELOAD]) - except subp.ProcessExecutionError as e: - restarted = False - log.warning("Failed to reload syslog", e) -diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py -index 433de751..9efbf61f 100755 ---- a/cloudinit/config/cc_set_passwords.py -+++ b/cloudinit/config/cc_set_passwords.py -@@ -94,18 +94,15 @@ PW_SET = (''.join([x for x in ascii_letters + digits - if x not in 'loLOI01'])) - - --def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"): -+def handle_ssh_pwauth(pw_auth, distro): - """Apply sshd PasswordAuthentication changes. - - @param pw_auth: config setting from 'pw_auth'. - Best given as True, False, or "unchanged". -- @param service_cmd: The service command list (['service']) -- @param service_name: The name of the sshd service for the system. -+ @param distro: an instance of the distro class for the target distribution - - @return: None""" - cfg_name = "PasswordAuthentication" -- if service_cmd is None: -- service_cmd = ["service"] - - if util.is_true(pw_auth): - cfg_val = 'yes' -@@ -124,11 +121,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"): - LOG.debug("No need to restart SSH service, %s not updated.", cfg_name) - return - -- if 'systemctl' in service_cmd: -- cmd = list(service_cmd) + ["restart", service_name] -- else: -- cmd = list(service_cmd) + [service_name, "restart"] -- subp.subp(cmd) -+ distro.manage_service('restart', distro.get_option('ssh_svcname', 'ssh')) - LOG.debug("Restarted the SSH daemon.") - - -@@ -229,9 +222,7 @@ def handle(_name, cfg, cloud, log, args): - if expired_users: - log.debug("Expired passwords for: %s users", expired_users) - -- handle_ssh_pwauth( -- cfg.get('ssh_pwauth'), service_cmd=cloud.distro.init_cmd, -- service_name=cloud.distro.get_option('ssh_svcname', 'ssh')) -+ handle_ssh_pwauth(cfg.get('ssh_pwauth'), cloud.distro) - - if len(errors): - log.debug("%s errors occured, re-raising the last one", len(errors)) -diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py -index bbe2ee8f..79118a12 100644 ---- a/cloudinit/config/tests/test_set_passwords.py -+++ b/cloudinit/config/tests/test_set_passwords.py -@@ -14,57 +14,52 @@ class TestHandleSshPwauth(CiTestCase): - - with_logs = True - -- @mock.patch(MODPATH + "subp.subp") -+ @mock.patch("cloudinit.distros.subp.subp") - def test_unknown_value_logs_warning(self, m_subp): -- setpass.handle_ssh_pwauth("floo") -+ cloud = self.tmp_cloud(distro='ubuntu') -+ setpass.handle_ssh_pwauth("floo", cloud.distro) - self.assertIn("Unrecognized value: ssh_pwauth=floo", - self.logs.getvalue()) - m_subp.assert_not_called() - - @mock.patch(MODPATH + "update_ssh_config", return_value=True) -- @mock.patch(MODPATH + "subp.subp") -+ @mock.patch("cloudinit.distros.subp.subp") - def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config): - """If systemctl in service cmd: systemctl restart name.""" -- setpass.handle_ssh_pwauth( -- True, service_cmd=["systemctl"], service_name="myssh") -- self.assertEqual(mock.call(["systemctl", "restart", "myssh"]), -- m_subp.call_args) -- -- @mock.patch(MODPATH + "update_ssh_config", return_value=True) -- @mock.patch(MODPATH + "subp.subp") -- def test_service_as_service_cmd(self, m_subp, m_update_ssh_config): -- """If systemctl in service cmd: systemctl restart name.""" -- setpass.handle_ssh_pwauth( -- True, service_cmd=["service"], service_name="myssh") -- self.assertEqual(mock.call(["service", "myssh", "restart"]), -- m_subp.call_args) -+ cloud = self.tmp_cloud(distro='ubuntu') -+ cloud.distro.init_cmd = ['systemctl'] -+ setpass.handle_ssh_pwauth(True, cloud.distro) -+ m_subp.assert_called_with( -+ ["systemctl", "restart", "ssh"], capture=True) - - @mock.patch(MODPATH + "update_ssh_config", return_value=False) -- @mock.patch(MODPATH + "subp.subp") -+ @mock.patch("cloudinit.distros.subp.subp") - def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config): - """If config is not updated, then no system restart should be done.""" -- setpass.handle_ssh_pwauth(True) -+ cloud = self.tmp_cloud(distro='ubuntu') -+ setpass.handle_ssh_pwauth(True, cloud.distro) - m_subp.assert_not_called() - self.assertIn("No need to restart SSH", self.logs.getvalue()) - - @mock.patch(MODPATH + "update_ssh_config", return_value=True) -- @mock.patch(MODPATH + "subp.subp") -+ @mock.patch("cloudinit.distros.subp.subp") - def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config): - """If 'unchanged', then no updates to config and no restart.""" -- setpass.handle_ssh_pwauth( -- "unchanged", service_cmd=["systemctl"], service_name="myssh") -+ cloud = self.tmp_cloud(distro='ubuntu') -+ setpass.handle_ssh_pwauth("unchanged", cloud.distro) - m_update_ssh_config.assert_not_called() - m_subp.assert_not_called() - -- @mock.patch(MODPATH + "subp.subp") -+ @mock.patch("cloudinit.distros.subp.subp") - def test_valid_change_values(self, m_subp): - """If value is a valid changen value, then update should be called.""" -+ cloud = self.tmp_cloud(distro='ubuntu') - upname = MODPATH + "update_ssh_config" - optname = "PasswordAuthentication" - for value in util.FALSE_STRINGS + util.TRUE_STRINGS: - optval = "yes" if value in util.TRUE_STRINGS else "no" - with mock.patch(upname, return_value=False) as m_update: -- setpass.handle_ssh_pwauth(value) -+ setpass.handle_ssh_pwauth(value, cloud.distro) - m_update.assert_called_with({optname: optval}) - m_subp.assert_not_called() - -diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py -index 220bd11f..a8c57cb8 100755 ---- a/cloudinit/distros/__init__.py -+++ b/cloudinit/distros/__init__.py -@@ -784,6 +784,34 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): - args.append(message) - return args - -+ def manage_service(self, action, service): -+ """ -+ Perform the requested action on a service. This handles the common -+ 'systemctl' and 'service' cases and may be overridden in subclasses -+ as necessary. -+ May raise ProcessExecutionError -+ """ -+ init_cmd = self.init_cmd -+ if self.uses_systemd() or 'systemctl' in init_cmd: -+ init_cmd = ['systemctl'] -+ cmds = {'stop': ['stop', service], -+ 'start': ['start', service], -+ 'enable': ['enable', service], -+ 'restart': ['restart', service], -+ 'reload': ['reload-or-restart', service], -+ 'try-reload': ['reload-or-try-restart', service], -+ } -+ else: -+ cmds = {'stop': [service, 'stop'], -+ 'start': [service, 'start'], -+ 'enable': [service, 'start'], -+ 'restart': [service, 'restart'], -+ 'reload': [service, 'restart'], -+ 'try-reload': [service, 'restart'], -+ } -+ cmd = list(init_cmd) + list(cmds[action]) -+ return subp.subp(cmd, capture=True) -+ - - def _apply_hostname_transformations_to_url(url: str, transformations: list): - """ -diff --git a/tests/unittests/test_distros/test_manage_service.py b/tests/unittests/test_distros/test_manage_service.py -new file mode 100644 -index 00000000..47e7cfb0 ---- /dev/null -+++ b/tests/unittests/test_distros/test_manage_service.py -@@ -0,0 +1,38 @@ -+# This file is part of cloud-init. See LICENSE file for license information. -+ -+from cloudinit.tests.helpers import (CiTestCase, mock) -+from tests.unittests.util import TestingDistro -+ -+ -+class TestManageService(CiTestCase): -+ -+ with_logs = True -+ -+ def setUp(self): -+ super(TestManageService, self).setUp() -+ self.dist = TestingDistro() -+ -+ @mock.patch.object(TestingDistro, 'uses_systemd', return_value=False) -+ @mock.patch("cloudinit.distros.subp.subp") -+ def test_manage_service_systemctl_initcmd(self, m_subp, m_sysd): -+ self.dist.init_cmd = ['systemctl'] -+ self.dist.manage_service('start', 'myssh') -+ m_subp.assert_called_with(['systemctl', 'start', 'myssh'], -+ capture=True) -+ -+ @mock.patch.object(TestingDistro, 'uses_systemd', return_value=False) -+ @mock.patch("cloudinit.distros.subp.subp") -+ def test_manage_service_service_initcmd(self, m_subp, m_sysd): -+ self.dist.init_cmd = ['service'] -+ self.dist.manage_service('start', 'myssh') -+ m_subp.assert_called_with(['service', 'myssh', 'start'], capture=True) -+ -+ @mock.patch.object(TestingDistro, 'uses_systemd', return_value=True) -+ @mock.patch("cloudinit.distros.subp.subp") -+ def test_manage_service_systemctl(self, m_subp, m_sysd): -+ self.dist.init_cmd = ['ignore'] -+ self.dist.manage_service('start', 'myssh') -+ m_subp.assert_called_with(['systemctl', 'start', 'myssh'], -+ capture=True) -+ -+# vi: ts=4 sw=4 expandtab -diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py -index 6b9c8377..c059e2e6 100644 ---- a/tests/unittests/test_handler/test_handler_ntp.py -+++ b/tests/unittests/test_handler/test_handler_ntp.py -@@ -112,22 +112,6 @@ class TestNtp(FilesystemMockingTestCase): - check_exe='timesyncd') - install_func.assert_called_once_with([]) - -- @mock.patch("cloudinit.config.cc_ntp.subp") -- def test_reload_ntp_defaults(self, mock_subp): -- """Test service is restarted/reloaded (defaults)""" -- service = 'ntp_service_name' -- cmd = ['service', service, 'restart'] -- cc_ntp.reload_ntp(service) -- mock_subp.subp.assert_called_with(cmd, capture=True) -- -- @mock.patch("cloudinit.config.cc_ntp.subp") -- def test_reload_ntp_systemd(self, mock_subp): -- """Test service is restarted/reloaded (systemd)""" -- service = 'ntp_service_name' -- cc_ntp.reload_ntp(service, systemd=True) -- cmd = ['systemctl', 'reload-or-restart', service] -- mock_subp.subp.assert_called_with(cmd, capture=True) -- - def test_ntp_rename_ntp_conf(self): - """When NTP_CONF exists, rename_ntp moves it.""" - ntpconf = self.tmp_path("ntp.conf", self.new_root) -@@ -488,10 +472,11 @@ class TestNtp(FilesystemMockingTestCase): - cc_ntp.handle('notimportant', cfg, mycloud, None, None) - self.assertEqual(0, m_select.call_count) - -+ @mock.patch("cloudinit.distros.subp") - @mock.patch("cloudinit.config.cc_ntp.subp") - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - @mock.patch("cloudinit.distros.Distro.uses_systemd") -- def test_ntp_the_whole_package(self, m_sysd, m_select, m_subp): -+ def test_ntp_the_whole_package(self, m_sysd, m_select, m_subp, m_dsubp): - """Test enabled config renders template, and restarts service """ - cfg = {'ntp': {'enabled': True}} - for distro in cc_ntp.distros: -@@ -509,7 +494,7 @@ class TestNtp(FilesystemMockingTestCase): - - if distro == 'alpine': - uses_systemd = False -- expected_service_call = ['service', service_name, 'restart'] -+ expected_service_call = ['rc-service', service_name, 'restart'] - # _mock_ntp_client_config call above did not specify a client - # value and so it defaults to "ntp" which on Alpine Linux only - # supports servers and not pools. -@@ -525,7 +510,7 @@ class TestNtp(FilesystemMockingTestCase): - m_util.is_false.return_value = util.is_false( - cfg['ntp']['enabled']) - cc_ntp.handle('notimportant', cfg, mycloud, None, None) -- m_subp.subp.assert_called_with( -+ m_dsubp.subp.assert_called_with( - expected_service_call, capture=True) - - self.assertEqual(expected_content, util.load_file(confpath)) -@@ -673,9 +658,8 @@ class TestNtp(FilesystemMockingTestCase): - self.assertEqual(sorted(expected_cfg), sorted(result)) - m_which.assert_has_calls([]) - -- @mock.patch('cloudinit.config.cc_ntp.reload_ntp') - @mock.patch('cloudinit.config.cc_ntp.install_ntp_client') -- def test_ntp_user_provided_config_with_template(self, m_install, m_reload): -+ def test_ntp_user_provided_config_with_template(self, m_install): - custom = r'\n#MyCustomTemplate' - user_template = NTP_TEMPLATE + custom - confpath = os.path.join(self.new_root, 'etc/myntp/myntp.conf') -@@ -702,11 +686,10 @@ class TestNtp(FilesystemMockingTestCase): - util.load_file(confpath)) - - @mock.patch('cloudinit.config.cc_ntp.supplemental_schema_validation') -- @mock.patch('cloudinit.config.cc_ntp.reload_ntp') - @mock.patch('cloudinit.config.cc_ntp.install_ntp_client') - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_user_provided_config_template_only(self, m_select, m_install, -- m_reload, m_schema): -+ m_schema): - """Test custom template for default client""" - custom = r'\n#MyCustomTemplate' - user_template = NTP_TEMPLATE + custom --- -2.35.3 - diff --git a/SOURCES/ci-Remove-rhel-specific-files.patch b/SOURCES/ci-Remove-rhel-specific-files.patch new file mode 100644 index 0000000..6765543 --- /dev/null +++ b/SOURCES/ci-Remove-rhel-specific-files.patch @@ -0,0 +1,373 @@ +From d43f0d93386f123892451d923c2b3c6fe7130c39 Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Thu, 19 May 2022 11:38:22 +0200 +Subject: [PATCH 4/4] Remove rhel specific files + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 65: Align rhel custom files with upstream (#1431) +RH-Commit: [2/2] 5e31f0bcb500682e7746ccbd2e628c2ef339d6c6 +RH-Bugzilla: 2082071 +RH-Acked-by: Mohamed Gamal Morsy +RH-Acked-by: Eduardo Otubo +RH-Acked-by: Vitaly Kuznetsov + +Remove all files in rhel/ directory and related commands that copy +and replace them with the generated ones. + +Also adjust setup.py, align it with upstream: +- by default, after rhel 8.3 ds-identify is in /usr/libexec, so no need to move it manually +- bash-completions work also in /usr/share, as upstream +- udev also works in /lib/udev + +Also remove rhel/README since it is outdated (chef is used in cloud.cfg) and cloud-init-tmpfiles.conf, +as it exists also in .distro. + +X-downstream-only: yes + +Signed-off-by: Emanuele Giuseppe Esposito +--- + redhat/cloud-init.spec.template | 21 ++------ + rhel/README.rhel | 5 -- + rhel/cloud-init-tmpfiles.conf | 1 - + rhel/cloud.cfg | 69 --------------------------- + rhel/systemd/cloud-config.service | 18 ------- + rhel/systemd/cloud-config.target | 11 ----- + rhel/systemd/cloud-final.service | 24 ---------- + rhel/systemd/cloud-init-local.service | 31 ------------ + rhel/systemd/cloud-init.service | 26 ---------- + rhel/systemd/cloud-init.target | 7 --- + setup.py | 28 ++++++++++- + 11 files changed, 31 insertions(+), 210 deletions(-) + delete mode 100644 rhel/README.rhel + delete mode 100644 rhel/cloud-init-tmpfiles.conf + delete mode 100644 rhel/cloud.cfg + delete mode 100644 rhel/systemd/cloud-config.service + delete mode 100644 rhel/systemd/cloud-config.target + delete mode 100644 rhel/systemd/cloud-final.service + delete mode 100644 rhel/systemd/cloud-init-local.service + delete mode 100644 rhel/systemd/cloud-init.service + delete mode 100644 rhel/systemd/cloud-init.target + + +diff --git a/rhel/README.rhel b/rhel/README.rhel +deleted file mode 100644 +index aa29630d..00000000 +--- a/rhel/README.rhel ++++ /dev/null +@@ -1,5 +0,0 @@ +-The following cloud-init modules are currently unsupported on this OS: +- - apt_update_upgrade ('apt_update', 'apt_upgrade', 'apt_mirror', 'apt_preserve_sources_list', 'apt_old_mirror', 'apt_sources', 'debconf_selections', 'packages' options) +- - byobu ('byobu_by_default' option) +- - chef +- - grub_dpkg +diff --git a/rhel/cloud-init-tmpfiles.conf b/rhel/cloud-init-tmpfiles.conf +deleted file mode 100644 +index 0c6d2a3b..00000000 +--- a/rhel/cloud-init-tmpfiles.conf ++++ /dev/null +@@ -1 +0,0 @@ +-d /run/cloud-init 0700 root root - - +diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg +deleted file mode 100644 +index cbee197a..00000000 +--- a/rhel/cloud.cfg ++++ /dev/null +@@ -1,69 +0,0 @@ +-users: +- - default +- +-disable_root: 1 +-ssh_pwauth: 0 +- +-mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service', '0', '2'] +-resize_rootfs_tmp: /dev +-ssh_deletekeys: 1 +-ssh_genkeytypes: ['rsa', 'ecdsa', 'ed25519'] +-syslog_fix_perms: ~ +-disable_vmware_customization: false +- +-cloud_init_modules: +- - disk_setup +- - migrator +- - bootcmd +- - write-files +- - growpart +- - resizefs +- - set_hostname +- - update_hostname +- - update_etc_hosts +- - rsyslog +- - users-groups +- - ssh +- +-cloud_config_modules: +- - mounts +- - locale +- - set-passwords +- - rh_subscription +- - yum-add-repo +- - package-update-upgrade-install +- - timezone +- - puppet +- - chef +- - salt-minion +- - mcollective +- - disable-ec2-metadata +- - runcmd +- +-cloud_final_modules: +- - rightscale_userdata +- - scripts-per-once +- - scripts-per-boot +- - scripts-per-instance +- - scripts-user +- - ssh-authkey-fingerprints +- - keys-to-console +- - phone-home +- - final-message +- - power-state-change +- +-system_info: +- default_user: +- name: cloud-user +- lock_passwd: true +- gecos: Cloud User +- groups: [adm, systemd-journal] +- sudo: ["ALL=(ALL) NOPASSWD:ALL"] +- shell: /bin/bash +- distro: rhel +- paths: +- cloud_dir: /var/lib/cloud +- templates_dir: /etc/cloud/templates +- ssh_svcname: sshd +- +-# vim:syntax=yaml +diff --git a/rhel/systemd/cloud-config.service b/rhel/systemd/cloud-config.service +deleted file mode 100644 +index f3dcd4be..00000000 +--- a/rhel/systemd/cloud-config.service ++++ /dev/null +@@ -1,18 +0,0 @@ +-[Unit] +-Description=Apply the settings specified in cloud-config +-After=network-online.target cloud-config.target +-Wants=network-online.target cloud-config.target +-ConditionPathExists=!/etc/cloud/cloud-init.disabled +-ConditionKernelCommandLine=!cloud-init=disabled +- +-[Service] +-Type=oneshot +-ExecStart=/usr/bin/cloud-init modules --mode=config +-RemainAfterExit=yes +-TimeoutSec=0 +- +-# Output needs to appear in instance console output +-StandardOutput=journal+console +- +-[Install] +-WantedBy=cloud-init.target +diff --git a/rhel/systemd/cloud-config.target b/rhel/systemd/cloud-config.target +deleted file mode 100644 +index ae9b7d02..00000000 +--- a/rhel/systemd/cloud-config.target ++++ /dev/null +@@ -1,11 +0,0 @@ +-# cloud-init normally emits a "cloud-config" upstart event to inform third +-# parties that cloud-config is available, which does us no good when we're +-# using systemd. cloud-config.target serves as this synchronization point +-# instead. Services that would "start on cloud-config" with upstart can +-# instead use "After=cloud-config.target" and "Wants=cloud-config.target" +-# as appropriate. +- +-[Unit] +-Description=Cloud-config availability +-Wants=cloud-init-local.service cloud-init.service +-After=cloud-init-local.service cloud-init.service +diff --git a/rhel/systemd/cloud-final.service b/rhel/systemd/cloud-final.service +deleted file mode 100644 +index e281c0cf..00000000 +--- a/rhel/systemd/cloud-final.service ++++ /dev/null +@@ -1,24 +0,0 @@ +-[Unit] +-Description=Execute cloud user/final scripts +-After=network-online.target cloud-config.service rc-local.service +-Wants=network-online.target cloud-config.service +-ConditionPathExists=!/etc/cloud/cloud-init.disabled +-ConditionKernelCommandLine=!cloud-init=disabled +- +-[Service] +-Type=oneshot +-ExecStart=/usr/bin/cloud-init modules --mode=final +-RemainAfterExit=yes +-TimeoutSec=0 +-KillMode=process +-# Restart NetworkManager if it is present and running. +-ExecStartPost=/bin/sh -c 'u=NetworkManager.service; \ +- out=$(systemctl show --property=SubState $u) || exit; \ +- [ "$out" = "SubState=running" ] || exit 0; \ +- systemctl reload-or-try-restart $u' +- +-# Output needs to appear in instance console output +-StandardOutput=journal+console +- +-[Install] +-WantedBy=cloud-init.target +diff --git a/rhel/systemd/cloud-init-local.service b/rhel/systemd/cloud-init-local.service +deleted file mode 100644 +index 8f9f6c9f..00000000 +--- a/rhel/systemd/cloud-init-local.service ++++ /dev/null +@@ -1,31 +0,0 @@ +-[Unit] +-Description=Initial cloud-init job (pre-networking) +-DefaultDependencies=no +-Wants=network-pre.target +-After=systemd-remount-fs.service +-Requires=dbus.socket +-After=dbus.socket +-Before=NetworkManager.service network.service +-Before=network-pre.target +-Before=shutdown.target +-Before=firewalld.target +-Conflicts=shutdown.target +-RequiresMountsFor=/var/lib/cloud +-ConditionPathExists=!/etc/cloud/cloud-init.disabled +-ConditionKernelCommandLine=!cloud-init=disabled +- +-[Service] +-Type=oneshot +-ExecStartPre=/bin/mkdir -p /run/cloud-init +-ExecStartPre=/sbin/restorecon /run/cloud-init +-ExecStartPre=/usr/bin/touch /run/cloud-init/enabled +-ExecStart=/usr/bin/cloud-init init --local +-ExecStart=/bin/touch /run/cloud-init/network-config-ready +-RemainAfterExit=yes +-TimeoutSec=0 +- +-# Output needs to appear in instance console output +-StandardOutput=journal+console +- +-[Install] +-WantedBy=cloud-init.target +diff --git a/rhel/systemd/cloud-init.service b/rhel/systemd/cloud-init.service +deleted file mode 100644 +index 0b3d796d..00000000 +--- a/rhel/systemd/cloud-init.service ++++ /dev/null +@@ -1,26 +0,0 @@ +-[Unit] +-Description=Initial cloud-init job (metadata service crawler) +-Wants=cloud-init-local.service +-Wants=sshd-keygen.service +-Wants=sshd.service +-After=cloud-init-local.service +-After=NetworkManager.service network.service +-After=NetworkManager-wait-online.service +-Before=network-online.target +-Before=sshd-keygen.service +-Before=sshd.service +-Before=systemd-user-sessions.service +-ConditionPathExists=!/etc/cloud/cloud-init.disabled +-ConditionKernelCommandLine=!cloud-init=disabled +- +-[Service] +-Type=oneshot +-ExecStart=/usr/bin/cloud-init init +-RemainAfterExit=yes +-TimeoutSec=0 +- +-# Output needs to appear in instance console output +-StandardOutput=journal+console +- +-[Install] +-WantedBy=cloud-init.target +diff --git a/rhel/systemd/cloud-init.target b/rhel/systemd/cloud-init.target +deleted file mode 100644 +index 083c3b6f..00000000 +--- a/rhel/systemd/cloud-init.target ++++ /dev/null +@@ -1,7 +0,0 @@ +-# cloud-init target is enabled by cloud-init-generator +-# To disable it you can either: +-# a.) boot with kernel cmdline of 'cloud-init=disabled' +-# b.) touch a file /etc/cloud/cloud-init.disabled +-[Unit] +-Description=Cloud-init target +-After=multi-user.target +diff --git a/setup.py b/setup.py +index 3c377eaa..a9132d2c 100755 +--- a/setup.py ++++ b/setup.py +@@ -139,6 +139,21 @@ INITSYS_FILES = { + "sysvinit_deb": [f for f in glob("sysvinit/debian/*") if is_f(f)], + "sysvinit_openrc": [f for f in glob("sysvinit/gentoo/*") if is_f(f)], + "sysvinit_suse": [f for f in glob("sysvinit/suse/*") if is_f(f)], ++ "systemd": [ ++ render_tmpl(f) ++ for f in ( ++ glob("systemd/*.tmpl") ++ + glob("systemd/*.service") ++ + glob("systemd/*.socket") ++ + glob("systemd/*.target") ++ ) ++ if (is_f(f) and not is_generator(f)) ++ ], ++ "systemd.generators": [ ++ render_tmpl(f, mode=0o755) ++ for f in glob("systemd/*") ++ if is_f(f) and is_generator(f) ++ ], + "upstart": [f for f in glob("upstart/*") if is_f(f)], + } + INITSYS_ROOTS = { +@@ -148,6 +163,10 @@ INITSYS_ROOTS = { + "sysvinit_deb": "etc/init.d", + "sysvinit_openrc": "etc/init.d", + "sysvinit_suse": "etc/init.d", ++ "systemd": pkg_config_read("systemd", "systemdsystemunitdir"), ++ "systemd.generators": pkg_config_read( ++ "systemd", "systemdsystemgeneratordir" ++ ), + "upstart": "etc/init/", + } + INITSYS_TYPES = sorted([f.partition(".")[0] for f in INITSYS_ROOTS.keys()]) +@@ -262,13 +281,15 @@ data_files = [ + ( + USR_LIB_EXEC + "/cloud-init", + [ ++ "tools/ds-identify", + "tools/hook-hotplug", + "tools/uncloud-init", + "tools/write-ssh-key-fingerprints", + ], + ), + ( +- ETC + "/bash_completion.d", ["bash_completion/cloud-init"], ++ USR + "/share/bash-completion/completions", ++ ["bash_completion/cloud-init"], + ), + (USR + "/share/doc/cloud-init", [f for f in glob("doc/*") if is_f(f)]), + ( +@@ -287,7 +308,8 @@ if not platform.system().endswith("BSD"): + ETC + "/NetworkManager/dispatcher.d/", + ["tools/hook-network-manager"], + ), +- ("/usr/lib/udev/rules.d", [f for f in glob("udev/*.rules")]), ++ (ETC + "/dhcp/dhclient-exit-hooks.d/", ["tools/hook-dhclient"]), ++ (LIB + "/udev/rules.d", [f for f in glob("udev/*.rules")]), + ( + ETC + "/systemd/system/sshd-keygen@.service.d/", + ["systemd/disable-sshd-keygen-if-cloud-init-active.conf"], +@@ -317,6 +339,8 @@ setuptools.setup( + scripts=["tools/cloud-init-per"], + license="Dual-licensed under GPLv3 or Apache 2.0", + data_files=data_files, ++ install_requires=requirements, ++ cmdclass=cmdclass, + entry_points={ + "console_scripts": [ + "cloud-init = cloudinit.cmd.main:main", +-- +2.35.3 + diff --git a/SOURCES/ci-Revert-Add-native-NetworkManager-support-1224.patch b/SOURCES/ci-Revert-Add-native-NetworkManager-support-1224.patch new file mode 100644 index 0000000..e4e3594 --- /dev/null +++ b/SOURCES/ci-Revert-Add-native-NetworkManager-support-1224.patch @@ -0,0 +1,2266 @@ +From f1836e78d20ef34b05b6aba002fc10a97eceb454 Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Mon, 8 Aug 2022 10:08:50 +0200 +Subject: [PATCH 1/2] Revert "Add native NetworkManager support (#1224)" + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 81: Revert "Use Network-Manager and Netplan as default renderers for RHEL and Fedora (#1465)" +RH-Commit: [1/2] 5b3e51502a89c2dcfbc97dc08a86b792454fedd3 +RH-Bugzilla: 2107464 2110066 2117526 2104393 2098624 +RH-Acked-by: Eduardo Otubo +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Mohamed Gamal Morsy + +NM is not yet stable, so we don't want to support it for now. +This reverts commit 0d93e53fd05c44b62e3456b7580c9de8135e6b5a. + +Signed-off-by: Emanuele Giuseppe Esposito +--- + cloudinit/cmd/devel/net_convert.py | 14 +- + cloudinit/net/activators.py | 25 +- + cloudinit/net/network_manager.py | 377 ------- + cloudinit/net/renderers.py | 3 - + cloudinit/net/sysconfig.py | 37 +- + tests/unittests/test_net.py | 1268 +++--------------------- + tests/unittests/test_net_activators.py | 93 +- + 7 files changed, 193 insertions(+), 1624 deletions(-) + delete mode 100644 cloudinit/net/network_manager.py + +diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py +index 647fe07b..18b1e7ff 100755 +--- a/cloudinit/cmd/devel/net_convert.py ++++ b/cloudinit/cmd/devel/net_convert.py +@@ -7,14 +7,7 @@ import os + import sys + + from cloudinit import distros, log, safeyaml +-from cloudinit.net import ( +- eni, +- netplan, +- network_manager, +- network_state, +- networkd, +- sysconfig, +-) ++from cloudinit.net import eni, netplan, network_state, networkd, sysconfig + from cloudinit.sources import DataSourceAzure as azure + from cloudinit.sources import DataSourceOVF as ovf + from cloudinit.sources.helpers import openstack +@@ -81,7 +74,7 @@ def get_parser(parser=None): + parser.add_argument( + "-O", + "--output-kind", +- choices=["eni", "netplan", "networkd", "sysconfig", "network-manager"], ++ choices=["eni", "netplan", "networkd", "sysconfig"], + required=True, + help="The network config format to emit", + ) +@@ -155,9 +148,6 @@ def handle_args(name, args): + elif args.output_kind == "sysconfig": + r_cls = sysconfig.Renderer + config = distro.renderer_configs.get("sysconfig") +- elif args.output_kind == "network-manager": +- r_cls = network_manager.Renderer +- config = distro.renderer_configs.get("network-manager") + else: + raise RuntimeError("Invalid output_kind") + +diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py +index edbc0c06..e80c26df 100644 +--- a/cloudinit/net/activators.py ++++ b/cloudinit/net/activators.py +@@ -1,14 +1,15 @@ + # This file is part of cloud-init. See LICENSE file for license information. + import logging ++import os + from abc import ABC, abstractmethod + from typing import Iterable, List, Type + + from cloudinit import subp, util + from cloudinit.net.eni import available as eni_available + from cloudinit.net.netplan import available as netplan_available +-from cloudinit.net.network_manager import available as nm_available + from cloudinit.net.network_state import NetworkState + from cloudinit.net.networkd import available as networkd_available ++from cloudinit.net.sysconfig import NM_CFG_FILE + + LOG = logging.getLogger(__name__) + +@@ -123,24 +124,20 @@ class IfUpDownActivator(NetworkActivator): + class NetworkManagerActivator(NetworkActivator): + @staticmethod + def available(target=None) -> bool: +- """Return true if NetworkManager can be used on this system.""" +- return nm_available(target=target) ++ """Return true if network manager can be used on this system.""" ++ config_present = os.path.isfile( ++ subp.target_path(target, path=NM_CFG_FILE) ++ ) ++ nmcli_present = subp.which("nmcli", target=target) ++ return config_present and bool(nmcli_present) + + @staticmethod + def bring_up_interface(device_name: str) -> bool: +- """Bring up connection using nmcli. ++ """Bring up interface using nmcli. + + Return True is successful, otherwise return False + """ +- from cloudinit.net.network_manager import conn_filename +- +- filename = conn_filename(device_name) +- cmd = ["nmcli", "connection", "load", filename] +- if _alter_interface(cmd, device_name): +- cmd = ["nmcli", "connection", "up", "filename", filename] +- else: +- _alter_interface(["nmcli", "connection", "reload"], device_name) +- cmd = ["nmcli", "connection", "up", "ifname", device_name] ++ cmd = ["nmcli", "connection", "up", "ifname", device_name] + return _alter_interface(cmd, device_name) + + @staticmethod +@@ -149,7 +146,7 @@ class NetworkManagerActivator(NetworkActivator): + + Return True is successful, otherwise return False + """ +- cmd = ["nmcli", "device", "disconnect", device_name] ++ cmd = ["nmcli", "connection", "down", device_name] + return _alter_interface(cmd, device_name) + + +diff --git a/cloudinit/net/network_manager.py b/cloudinit/net/network_manager.py +deleted file mode 100644 +index 79b0fe0b..00000000 +--- a/cloudinit/net/network_manager.py ++++ /dev/null +@@ -1,377 +0,0 @@ +-# Copyright 2022 Red Hat, Inc. +-# +-# Author: Lubomir Rintel +-# Fixes and suggestions contributed by James Falcon, Neal Gompa, +-# Zbigniew Jędrzejewski-Szmek and Emanuele Giuseppe Esposito. +-# +-# This file is part of cloud-init. See LICENSE file for license information. +- +-import configparser +-import io +-import itertools +-import os +-import uuid +- +-from cloudinit import log as logging +-from cloudinit import subp, util +- +-from . import renderer +-from .network_state import is_ipv6_addr, subnet_is_ipv6 +- +-NM_RUN_DIR = "/etc/NetworkManager" +-NM_LIB_DIR = "/usr/lib/NetworkManager" +-LOG = logging.getLogger(__name__) +- +- +-class NMConnection: +- """Represents a NetworkManager connection profile.""" +- +- def __init__(self, con_id): +- """ +- Initializes the connection with some very basic properties, +- notably the UUID so that the connection can be referred to. +- """ +- +- # Chosen by fair dice roll +- CI_NM_UUID = uuid.UUID("a3924cb8-09e0-43e9-890b-77972a800108") +- +- self.config = configparser.ConfigParser() +- # Identity option name mapping, to achieve case sensitivity +- self.config.optionxform = str +- +- self.config["connection"] = { +- "id": f"cloud-init {con_id}", +- "uuid": str(uuid.uuid5(CI_NM_UUID, con_id)), +- } +- +- # This is not actually used anywhere, but may be useful in future +- self.config["user"] = { +- "org.freedesktop.NetworkManager.origin": "cloud-init" +- } +- +- def _set_default(self, section, option, value): +- """ +- Sets a property unless it's already set, ensuring the section +- exists. +- """ +- +- if not self.config.has_section(section): +- self.config[section] = {} +- if not self.config.has_option(section, option): +- self.config[section][option] = value +- +- def _set_ip_method(self, family, subnet_type): +- """ +- Ensures there's appropriate [ipv4]/[ipv6] for given family +- appropriate for given configuration type +- """ +- +- method_map = { +- "static": "manual", +- "dhcp6": "dhcp", +- "ipv6_slaac": "auto", +- "ipv6_dhcpv6-stateless": "auto", +- "ipv6_dhcpv6-stateful": "auto", +- "dhcp4": "auto", +- "dhcp": "auto", +- } +- +- # Ensure we got an [ipvX] section +- self._set_default(family, "method", "disabled") +- +- try: +- method = method_map[subnet_type] +- except KeyError: +- # What else can we do +- method = "auto" +- self.config[family]["may-fail"] = "true" +- +- # Make sure we don't "downgrade" the method in case +- # we got conflicting subnets (e.g. static along with dhcp) +- if self.config[family]["method"] == "dhcp": +- return +- if self.config[family]["method"] == "auto" and method == "manual": +- return +- +- self.config[family]["method"] = method +- self._set_default(family, "may-fail", "false") +- if family == "ipv6": +- self._set_default(family, "addr-gen-mode", "stable-privacy") +- +- def _add_numbered(self, section, key_prefix, value): +- """ +- Adds a numbered property, such as address or route, ensuring +- the appropriate value gets used for . +- """ +- +- for index in itertools.count(1): +- key = f"{key_prefix}{index}" +- if not self.config.has_option(section, key): +- self.config[section][key] = value +- break +- +- def _add_address(self, family, subnet): +- """ +- Adds an ipv[46]address property. +- """ +- +- value = subnet["address"] + "/" + str(subnet["prefix"]) +- self._add_numbered(family, "address", value) +- +- def _add_route(self, family, route): +- """ +- Adds a ipv[46].route property. +- """ +- +- value = route["network"] + "/" + str(route["prefix"]) +- if "gateway" in route: +- value = value + "," + route["gateway"] +- self._add_numbered(family, "route", value) +- +- def _add_nameserver(self, dns): +- """ +- Extends the ipv[46].dns property with a name server. +- """ +- +- # FIXME: the subnet contains IPv4 and IPv6 name server mixed +- # together. We might be getting an IPv6 name server while +- # we're dealing with an IPv4 subnet. Sort this out by figuring +- # out the correct family and making sure a valid section exist. +- family = "ipv6" if is_ipv6_addr(dns) else "ipv4" +- self._set_default(family, "method", "disabled") +- +- self._set_default(family, "dns", "") +- self.config[family]["dns"] = self.config[family]["dns"] + dns + ";" +- +- def _add_dns_search(self, family, dns_search): +- """ +- Extends the ipv[46].dns-search property with a name server. +- """ +- +- self._set_default(family, "dns-search", "") +- self.config[family]["dns-search"] = ( +- self.config[family]["dns-search"] + ";".join(dns_search) + ";" +- ) +- +- def con_uuid(self): +- """ +- Returns the connection UUID +- """ +- return self.config["connection"]["uuid"] +- +- def valid(self): +- """ +- Can this be serialized into a meaningful connection profile? +- """ +- return self.config.has_option("connection", "type") +- +- @staticmethod +- def mac_addr(addr): +- """ +- Sanitize a MAC address. +- """ +- return addr.replace("-", ":").upper() +- +- def render_interface(self, iface, renderer): +- """ +- Integrate information from network state interface information +- into the connection. Most of the work is done here. +- """ +- +- # Initialize type & connectivity +- _type_map = { +- "physical": "ethernet", +- "vlan": "vlan", +- "bond": "bond", +- "bridge": "bridge", +- "infiniband": "infiniband", +- "loopback": None, +- } +- +- if_type = _type_map[iface["type"]] +- if if_type is None: +- return +- if "bond-master" in iface: +- slave_type = "bond" +- else: +- slave_type = None +- +- self.config["connection"]["type"] = if_type +- if slave_type is not None: +- self.config["connection"]["slave-type"] = slave_type +- self.config["connection"]["master"] = renderer.con_ref( +- iface[slave_type + "-master"] +- ) +- +- # Add type specific-section +- self.config[if_type] = {} +- +- # These are the interface properties that map nicely +- # to NetworkManager properties +- _prop_map = { +- "bond": { +- "mode": "bond-mode", +- "miimon": "bond_miimon", +- "xmit_hash_policy": "bond-xmit-hash-policy", +- "num_grat_arp": "bond-num-grat-arp", +- "downdelay": "bond-downdelay", +- "updelay": "bond-updelay", +- "fail_over_mac": "bond-fail-over-mac", +- "primary_reselect": "bond-primary-reselect", +- "primary": "bond-primary", +- }, +- "bridge": { +- "stp": "bridge_stp", +- "priority": "bridge_bridgeprio", +- }, +- "vlan": { +- "id": "vlan_id", +- }, +- "ethernet": {}, +- "infiniband": {}, +- } +- +- device_mtu = iface["mtu"] +- ipv4_mtu = None +- +- # Deal with Layer 3 configuration +- for subnet in iface["subnets"]: +- family = "ipv6" if subnet_is_ipv6(subnet) else "ipv4" +- +- self._set_ip_method(family, subnet["type"]) +- if "address" in subnet: +- self._add_address(family, subnet) +- if "gateway" in subnet: +- self.config[family]["gateway"] = subnet["gateway"] +- for route in subnet["routes"]: +- self._add_route(family, route) +- if "dns_nameservers" in subnet: +- for nameserver in subnet["dns_nameservers"]: +- self._add_nameserver(nameserver) +- if "dns_search" in subnet: +- self._add_dns_search(family, subnet["dns_search"]) +- if family == "ipv4" and "mtu" in subnet: +- ipv4_mtu = subnet["mtu"] +- +- if ipv4_mtu is None: +- ipv4_mtu = device_mtu +- if not ipv4_mtu == device_mtu: +- LOG.warning( +- "Network config: ignoring %s device-level mtu:%s" +- " because ipv4 subnet-level mtu:%s provided.", +- iface["name"], +- device_mtu, +- ipv4_mtu, +- ) +- +- # Parse type-specific properties +- for nm_prop, key in _prop_map[if_type].items(): +- if key not in iface: +- continue +- if iface[key] is None: +- continue +- if isinstance(iface[key], bool): +- self.config[if_type][nm_prop] = ( +- "true" if iface[key] else "false" +- ) +- else: +- self.config[if_type][nm_prop] = str(iface[key]) +- +- # These ones need special treatment +- if if_type == "ethernet": +- if iface["wakeonlan"] is True: +- # NM_SETTING_WIRED_WAKE_ON_LAN_MAGIC +- self.config["ethernet"]["wake-on-lan"] = str(0x40) +- if ipv4_mtu is not None: +- self.config["ethernet"]["mtu"] = str(ipv4_mtu) +- if iface["mac_address"] is not None: +- self.config["ethernet"]["mac-address"] = self.mac_addr( +- iface["mac_address"] +- ) +- if if_type == "vlan" and "vlan-raw-device" in iface: +- self.config["vlan"]["parent"] = renderer.con_ref( +- iface["vlan-raw-device"] +- ) +- if if_type == "bridge": +- # Bridge is ass-backwards compared to bond +- for port in iface["bridge_ports"]: +- port = renderer.get_conn(port) +- port._set_default("connection", "slave-type", "bridge") +- port._set_default("connection", "master", self.con_uuid()) +- if iface["mac_address"] is not None: +- self.config["bridge"]["mac-address"] = self.mac_addr( +- iface["mac_address"] +- ) +- if if_type == "infiniband" and ipv4_mtu is not None: +- self.config["infiniband"]["transport-mode"] = "datagram" +- self.config["infiniband"]["mtu"] = str(ipv4_mtu) +- if iface["mac_address"] is not None: +- self.config["infiniband"]["mac-address"] = self.mac_addr( +- iface["mac_address"] +- ) +- +- # Finish up +- if if_type == "bridge" or not self.config.has_option( +- if_type, "mac-address" +- ): +- self.config["connection"]["interface-name"] = iface["name"] +- +- def dump(self): +- """ +- Stringify. +- """ +- +- buf = io.StringIO() +- self.config.write(buf, space_around_delimiters=False) +- header = "# Generated by cloud-init. Changes will be lost.\n\n" +- return header + buf.getvalue() +- +- +-class Renderer(renderer.Renderer): +- """Renders network information in a NetworkManager keyfile format.""" +- +- def __init__(self, config=None): +- self.connections = {} +- +- def get_conn(self, con_id): +- return self.connections[con_id] +- +- def con_ref(self, con_id): +- if con_id in self.connections: +- return self.connections[con_id].con_uuid() +- else: +- # Well, what can we do... +- return con_id +- +- def render_network_state(self, network_state, templates=None, target=None): +- # First pass makes sure there's NMConnections for all known +- # interfaces that have UUIDs that can be linked to from related +- # interfaces +- for iface in network_state.iter_interfaces(): +- self.connections[iface["name"]] = NMConnection(iface["name"]) +- +- # Now render the actual interface configuration +- for iface in network_state.iter_interfaces(): +- conn = self.connections[iface["name"]] +- conn.render_interface(iface, self) +- +- # And finally write the files +- for con_id, conn in self.connections.items(): +- if not conn.valid(): +- continue +- name = conn_filename(con_id, target) +- util.write_file(name, conn.dump(), 0o600) +- +- +-def conn_filename(con_id, target=None): +- target_con_dir = subp.target_path(target, NM_RUN_DIR) +- con_file = f"cloud-init-{con_id}.nmconnection" +- return f"{target_con_dir}/system-connections/{con_file}" +- +- +-def available(target=None): +- target_nm_dir = subp.target_path(target, NM_LIB_DIR) +- return os.path.exists(target_nm_dir) +- +- +-# vi: ts=4 expandtab +diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py +index 7edc34b5..c755f04c 100644 +--- a/cloudinit/net/renderers.py ++++ b/cloudinit/net/renderers.py +@@ -8,7 +8,6 @@ from . import ( + freebsd, + netbsd, + netplan, +- network_manager, + networkd, + openbsd, + renderer, +@@ -20,7 +19,6 @@ NAME_TO_RENDERER = { + "freebsd": freebsd, + "netbsd": netbsd, + "netplan": netplan, +- "network-manager": network_manager, + "networkd": networkd, + "openbsd": openbsd, + "sysconfig": sysconfig, +@@ -30,7 +28,6 @@ DEFAULT_PRIORITY = [ + "eni", + "sysconfig", + "netplan", +- "network-manager", + "freebsd", + "netbsd", + "openbsd", +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index c3b0c795..362e8d19 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -5,6 +5,8 @@ import io + import os + import re + ++from configobj import ConfigObj ++ + from cloudinit import log as logging + from cloudinit import subp, util + from cloudinit.distros.parsers import networkmanager_conf, resolv_conf +@@ -64,6 +66,24 @@ def _quote_value(value): + return value + + ++def enable_ifcfg_rh(path): ++ """Add ifcfg-rh to NetworkManager.cfg plugins if main section is present""" ++ config = ConfigObj(path) ++ if "main" in config: ++ if "plugins" in config["main"]: ++ if "ifcfg-rh" in config["main"]["plugins"]: ++ return ++ else: ++ config["main"]["plugins"] = [] ++ ++ if isinstance(config["main"]["plugins"], list): ++ config["main"]["plugins"].append("ifcfg-rh") ++ else: ++ config["main"]["plugins"] = [config["main"]["plugins"], "ifcfg-rh"] ++ config.write() ++ LOG.debug("Enabled ifcfg-rh NetworkManager plugins") ++ ++ + class ConfigMap(object): + """Sysconfig like dictionary object.""" + +@@ -1011,6 +1031,8 @@ class Renderer(renderer.Renderer): + netrules_content = self._render_persistent_net(network_state) + netrules_path = subp.target_path(target, self.netrules_path) + util.write_file(netrules_path, netrules_content, file_mode) ++ if available_nm(target=target): ++ enable_ifcfg_rh(subp.target_path(target, path=NM_CFG_FILE)) + + sysconfig_path = subp.target_path(target, templates.get("control")) + # Distros configuring /etc/sysconfig/network as a file e.g. Centos +@@ -1049,9 +1071,14 @@ def _supported_vlan_names(rdev, vid): + + + def available(target=None): +- if not util.system_info()["variant"] in KNOWN_DISTROS: +- return False ++ sysconfig = available_sysconfig(target=target) ++ nm = available_nm(target=target) ++ return util.system_info()["variant"] in KNOWN_DISTROS and any( ++ [nm, sysconfig] ++ ) ++ + ++def available_sysconfig(target=None): + expected = ["ifup", "ifdown"] + search = ["/sbin", "/usr/sbin"] + for p in expected: +@@ -1068,4 +1095,10 @@ def available(target=None): + return False + + ++def available_nm(target=None): ++ if not os.path.isfile(subp.target_path(target, path=NM_CFG_FILE)): ++ return False ++ return True ++ ++ + # vi: ts=4 expandtab +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index ef21ad76..591241b3 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -21,7 +21,6 @@ from cloudinit.net import ( + interface_has_own_mac, + natural_sort_key, + netplan, +- network_manager, + network_state, + networkd, + renderers, +@@ -612,37 +611,6 @@ dns = none + ), + ), + ], +- "expected_network_manager": [ +- ( +- "".join( +- [ +- "etc/NetworkManager/system-connections", +- "/cloud-init-eth0.nmconnection", +- ] +- ), +- """ +-# Generated by cloud-init. Changes will be lost. +- +-[connection] +-id=cloud-init eth0 +-uuid=1dd9a779-d327-56e1-8454-c65e2556c12c +-type=ethernet +- +-[user] +-org.freedesktop.NetworkManager.origin=cloud-init +- +-[ethernet] +-mac-address=FA:16:3E:ED:9A:59 +- +-[ipv4] +-method=manual +-may-fail=false +-address1=172.19.1.34/22 +-route1=0.0.0.0/0,172.19.3.254 +- +-""".lstrip(), +- ), +- ], + }, + { + "in_data": { +@@ -1105,50 +1073,6 @@ NETWORK_CONFIGS = { + USERCTL=no""" + ), + }, +- "expected_network_manager": { +- "cloud-init-eth1.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init eth1 +- uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 +- type=ethernet +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- mac-address=CF:D6:AF:48:E8:80 +- +- """ +- ), +- "cloud-init-eth99.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init eth99 +- uuid=b1b88000-1f03-5360-8377-1a2205efffb4 +- type=ethernet +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- mac-address=C0:D6:9F:2C:E8:80 +- +- [ipv4] +- method=auto +- may-fail=false +- address1=192.168.21.3/24 +- route1=0.0.0.0/0,65.61.151.37 +- dns=8.8.8.8;8.8.4.4; +- dns-search=barley.maas;sach.maas; +- +- """ +- ), +- }, + "yaml": textwrap.dedent( + """ + version: 1 +@@ -1221,34 +1145,6 @@ NETWORK_CONFIGS = { + STARTMODE=auto""" + ) + }, +- "expected_network_manager": { +- "cloud-init-iface0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init iface0 +- uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 +- type=ethernet +- interface-name=iface0 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- +- [ipv4] +- method=auto +- may-fail=false +- +- [ipv6] +- method=dhcp +- may-fail=false +- addr-gen-mode=stable-privacy +- +- """ +- ), +- }, + "yaml": textwrap.dedent( + """\ + version: 1 +@@ -1351,37 +1247,6 @@ NETWORK_CONFIGS = { + """ + ), + }, +- "expected_network_manager": { +- "cloud-init-iface0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init iface0 +- uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 +- type=ethernet +- interface-name=iface0 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- mtu=9000 +- +- [ipv4] +- method=manual +- may-fail=false +- address1=192.168.14.2/24 +- +- [ipv6] +- method=manual +- may-fail=false +- addr-gen-mode=stable-privacy +- address1=2001:1::1/64 +- +- """ +- ), +- }, + }, + "v6_and_v4": { + "expected_sysconfig_opensuse": { +@@ -1392,34 +1257,6 @@ NETWORK_CONFIGS = { + STARTMODE=auto""" + ) + }, +- "expected_network_manager": { +- "cloud-init-iface0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init iface0 +- uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 +- type=ethernet +- interface-name=iface0 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- +- [ipv6] +- method=dhcp +- may-fail=false +- addr-gen-mode=stable-privacy +- +- [ipv4] +- method=auto +- may-fail=false +- +- """ +- ), +- }, + "yaml": textwrap.dedent( + """\ + version: 1 +@@ -1493,30 +1330,6 @@ NETWORK_CONFIGS = { + """ + ), + }, +- "expected_network_manager": { +- "cloud-init-iface0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init iface0 +- uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 +- type=ethernet +- interface-name=iface0 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- +- [ipv6] +- method=dhcp +- may-fail=false +- addr-gen-mode=stable-privacy +- +- """ +- ), +- }, + }, + "dhcpv6_accept_ra": { + "expected_eni": textwrap.dedent( +@@ -1724,30 +1537,6 @@ NETWORK_CONFIGS = { + """ + ), + }, +- "expected_network_manager": { +- "cloud-init-iface0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init iface0 +- uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 +- type=ethernet +- interface-name=iface0 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- +- [ipv6] +- method=auto +- may-fail=false +- addr-gen-mode=stable-privacy +- +- """ +- ), +- }, + }, + "static6": { + "yaml": textwrap.dedent( +@@ -1836,30 +1625,6 @@ NETWORK_CONFIGS = { + """ + ), + }, +- "expected_network_manager": { +- "cloud-init-iface0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init iface0 +- uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 +- type=ethernet +- interface-name=iface0 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- +- [ipv6] +- method=auto +- may-fail=false +- addr-gen-mode=stable-privacy +- +- """ +- ), +- }, + }, + "dhcpv6_stateful": { + "expected_eni": textwrap.dedent( +@@ -1959,29 +1724,6 @@ NETWORK_CONFIGS = { + """ + ), + }, +- "expected_network_manager": { +- "cloud-init-iface0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init iface0 +- uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 +- type=ethernet +- interface-name=iface0 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- +- [ipv4] +- method=auto +- may-fail=false +- +- """ +- ), +- }, + "yaml_v2": textwrap.dedent( + """\ + version: 2 +@@ -2035,30 +1777,6 @@ NETWORK_CONFIGS = { + """ + ), + }, +- "expected_network_manager": { +- "cloud-init-iface0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init iface0 +- uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 +- type=ethernet +- interface-name=iface0 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- wake-on-lan=64 +- +- [ipv4] +- method=auto +- may-fail=false +- +- """ +- ), +- }, + "yaml_v2": textwrap.dedent( + """\ + version: 2 +@@ -2497,254 +2215,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + USERCTL=no""" + ), + }, +- "expected_network_manager": { +- "cloud-init-eth3.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init eth3 +- uuid=b7e95dda-7746-5bf8-bf33-6e5f3c926790 +- type=ethernet +- slave-type=bridge +- master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- mac-address=66:BB:9F:2C:E8:80 +- +- """ +- ), +- "cloud-init-eth5.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init eth5 +- uuid=5fda13c7-9942-5e90-a41b-1d043bd725dc +- type=ethernet +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- mac-address=98:BB:9F:2C:E8:8A +- +- [ipv4] +- method=auto +- may-fail=false +- +- """ +- ), +- "cloud-init-ib0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init ib0 +- uuid=11a1dda7-78b4-5529-beba-d9b5f549ad7b +- type=infiniband +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [infiniband] +- transport-mode=datagram +- mtu=9000 +- mac-address=A0:00:02:20:FE:80:00:00:00:00:00:00:EC:0D:9A:03:00:15:E2:C1 +- +- [ipv4] +- method=manual +- may-fail=false +- address1=192.168.200.7/24 +- +- """ +- ), +- "cloud-init-bond0.200.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init bond0.200 +- uuid=88984a9c-ff22-5233-9267-86315e0acaa7 +- type=vlan +- interface-name=bond0.200 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [vlan] +- id=200 +- parent=54317911-f840-516b-a10d-82cb4c1f075c +- +- [ipv4] +- method=auto +- may-fail=false +- +- """ +- ), +- "cloud-init-eth0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init eth0 +- uuid=1dd9a779-d327-56e1-8454-c65e2556c12c +- type=ethernet +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- mac-address=C0:D6:9F:2C:E8:80 +- +- """ +- ), +- "cloud-init-eth4.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init eth4 +- uuid=e27e4959-fb50-5580-b9a4-2073554627b9 +- type=ethernet +- slave-type=bridge +- master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- mac-address=98:BB:9F:2C:E8:80 +- +- """ +- ), +- "cloud-init-eth1.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init eth1 +- uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 +- type=ethernet +- slave-type=bond +- master=54317911-f840-516b-a10d-82cb4c1f075c +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- mac-address=AA:D6:9F:2C:E8:80 +- +- """ +- ), +- "cloud-init-br0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init br0 +- uuid=dee46ce4-af7a-5e7c-aa08-b25533ae9213 +- type=bridge +- interface-name=br0 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [bridge] +- stp=false +- priority=22 +- mac-address=BB:BB:BB:BB:BB:AA +- +- [ipv4] +- method=manual +- may-fail=false +- address1=192.168.14.2/24 +- +- [ipv6] +- method=manual +- may-fail=false +- addr-gen-mode=stable-privacy +- address1=2001:1::1/64 +- route1=::/0,2001:4800:78ff:1b::1 +- +- """ +- ), +- "cloud-init-eth0.101.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init eth0.101 +- uuid=b5acec5e-db80-5935-8b02-0d5619fc42bf +- type=vlan +- interface-name=eth0.101 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [vlan] +- id=101 +- parent=1dd9a779-d327-56e1-8454-c65e2556c12c +- +- [ipv4] +- method=manual +- may-fail=false +- address1=192.168.0.2/24 +- gateway=192.168.0.1 +- dns=192.168.0.10;10.23.23.134; +- dns-search=barley.maas;sacchromyces.maas;brettanomyces.maas; +- address2=192.168.2.10/24 +- +- """ +- ), +- "cloud-init-bond0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init bond0 +- uuid=54317911-f840-516b-a10d-82cb4c1f075c +- type=bond +- interface-name=bond0 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [bond] +- mode=active-backup +- miimon=100 +- xmit_hash_policy=layer3+4 +- +- [ipv6] +- method=dhcp +- may-fail=false +- addr-gen-mode=stable-privacy +- +- """ +- ), +- "cloud-init-eth2.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init eth2 +- uuid=5559a242-3421-5fdd-896e-9cb8313d5804 +- type=ethernet +- slave-type=bond +- master=54317911-f840-516b-a10d-82cb4c1f075c +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- mac-address=C0:BB:9F:2C:E8:80 +- +- """ +- ), +- }, + "yaml": textwrap.dedent( + """ + version: 1 +@@ -2933,10 +2403,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + - type: static + address: 2001:1::1/92 + routes: +- - gateway: 2001:67c:1562::1 ++ - gateway: 2001:67c:1562:1 + network: 2001:67c:1 + netmask: "ffff:ffff::" +- - gateway: 3001:67c:15::1 ++ - gateway: 3001:67c:1562:1 + network: 3001:67c:1 + netmask: "ffff:ffff::" + metric: 10000 +@@ -2981,10 +2451,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + - to: 10.1.3.0/24 + via: 192.168.0.3 + - to: 2001:67c:1/32 +- via: 2001:67c:1562::1 ++ via: 2001:67c:1562:1 + - metric: 10000 + to: 3001:67c:1/32 +- via: 3001:67c:15::1 ++ via: 3001:67c:1562:1 + """ + ), + "expected_eni": textwrap.dedent( +@@ -3044,11 +2514,11 @@ iface bond0 inet static + # control-alias bond0 + iface bond0 inet6 static + address 2001:1::1/92 +- post-up route add -A inet6 2001:67c:1/32 gw 2001:67c:1562::1 || true +- pre-down route del -A inet6 2001:67c:1/32 gw 2001:67c:1562::1 || true +- post-up route add -A inet6 3001:67c:1/32 gw 3001:67c:15::1 metric 10000 \ ++ post-up route add -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true ++ pre-down route del -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true ++ post-up route add -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \ + || true +- pre-down route del -A inet6 3001:67c:1/32 gw 3001:67c:15::1 metric 10000 \ ++ pre-down route del -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \ + || true + """ + ), +@@ -3091,8 +2561,8 @@ iface bond0 inet6 static + - to: 2001:67c:1562:8007::1/64 + via: 2001:67c:1562:8007::aac:40b2 + - metric: 10000 +- to: 3001:67c:15:8007::1/64 +- via: 3001:67c:15:8007::aac:40b2 ++ to: 3001:67c:1562:8007::1/64 ++ via: 3001:67c:1562:8007::aac:40b2 + """ + ), + "expected_netplan-v2": textwrap.dedent( +@@ -3124,8 +2594,8 @@ iface bond0 inet6 static + - to: 2001:67c:1562:8007::1/64 + via: 2001:67c:1562:8007::aac:40b2 + - metric: 10000 +- to: 3001:67c:15:8007::1/64 +- via: 3001:67c:15:8007::aac:40b2 ++ to: 3001:67c:1562:8007::1/64 ++ via: 3001:67c:1562:8007::aac:40b2 + ethernets: + eth0: + match: +@@ -3224,8 +2694,8 @@ iface bond0 inet6 static + """\ + # Created by cloud-init on instance boot automatically, do not edit. + # +- 2001:67c:1/32 via 2001:67c:1562::1 dev bond0 +- 3001:67c:1/32 via 3001:67c:15::1 metric 10000 dev bond0 ++ 2001:67c:1/32 via 2001:67c:1562:1 dev bond0 ++ 3001:67c:1/32 via 3001:67c:1562:1 metric 10000 dev bond0 + """ + ), + "route-bond0": textwrap.dedent( +@@ -3248,88 +2718,6 @@ iface bond0 inet6 static + """ + ), + }, +- "expected_network_manager": { +- "cloud-init-bond0s0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init bond0s0 +- uuid=09d0b5b9-67e7-5577-a1af-74d1cf17a71e +- type=ethernet +- slave-type=bond +- master=54317911-f840-516b-a10d-82cb4c1f075c +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- mac-address=AA:BB:CC:DD:E8:00 +- +- """ +- ), +- "cloud-init-bond0s1.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init bond0s1 +- uuid=4d9aca96-b515-5630-ad83-d13daac7f9d0 +- type=ethernet +- slave-type=bond +- master=54317911-f840-516b-a10d-82cb4c1f075c +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- mac-address=AA:BB:CC:DD:E8:01 +- +- """ +- ), +- "cloud-init-bond0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init bond0 +- uuid=54317911-f840-516b-a10d-82cb4c1f075c +- type=bond +- interface-name=bond0 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [bond] +- mode=active-backup +- miimon=100 +- xmit_hash_policy=layer3+4 +- num_grat_arp=5 +- downdelay=10 +- updelay=20 +- fail_over_mac=active +- primary_reselect=always +- primary=bond0s0 +- +- [ipv4] +- method=manual +- may-fail=false +- address1=192.168.0.2/24 +- gateway=192.168.0.1 +- route1=10.1.3.0/24,192.168.0.3 +- address2=192.168.1.2/24 +- +- [ipv6] +- method=manual +- may-fail=false +- addr-gen-mode=stable-privacy +- address1=2001:1::1/92 +- route1=2001:67c:1/32,2001:67c:1562::1 +- route2=3001:67c:1/32,3001:67c:15::1 +- +- """ +- ), +- }, + }, + "vlan": { + "yaml": textwrap.dedent( +@@ -3413,58 +2801,6 @@ iface bond0 inet6 static + VLAN=yes""" + ), + }, +- "expected_network_manager": { +- "cloud-init-en0.99.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init en0.99 +- uuid=f594e2ed-f107-51df-b225-1dc530a5356b +- type=vlan +- interface-name=en0.99 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [vlan] +- id=99 +- parent=e0ca478b-8d84-52ab-8fae-628482c629b5 +- +- [ipv4] +- method=manual +- may-fail=false +- address1=192.168.2.2/24 +- address2=192.168.1.2/24 +- gateway=192.168.1.1 +- +- [ipv6] +- method=manual +- may-fail=false +- addr-gen-mode=stable-privacy +- address1=2001:1::bbbb/96 +- route1=::/0,2001:1::1 +- +- """ +- ), +- "cloud-init-en0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init en0 +- uuid=e0ca478b-8d84-52ab-8fae-628482c629b5 +- type=ethernet +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- mac-address=AA:BB:CC:DD:E8:00 +- +- """ +- ), +- }, + }, + "bridge": { + "yaml": textwrap.dedent( +@@ -3573,82 +2909,6 @@ iface bond0 inet6 static + """ + ), + }, +- "expected_network_manager": { +- "cloud-init-br0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init br0 +- uuid=dee46ce4-af7a-5e7c-aa08-b25533ae9213 +- type=bridge +- interface-name=br0 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [bridge] +- stp=false +- priority=22 +- +- [ipv4] +- method=manual +- may-fail=false +- address1=192.168.2.2/24 +- +- """ +- ), +- "cloud-init-eth0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init eth0 +- uuid=1dd9a779-d327-56e1-8454-c65e2556c12c +- type=ethernet +- slave-type=bridge +- master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- mac-address=52:54:00:12:34:00 +- +- [ipv6] +- method=manual +- may-fail=false +- addr-gen-mode=stable-privacy +- address1=2001:1::100/96 +- +- """ +- ), +- "cloud-init-eth1.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init eth1 +- uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 +- type=ethernet +- slave-type=bridge +- master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- mac-address=52:54:00:12:34:01 +- +- [ipv6] +- method=manual +- may-fail=false +- addr-gen-mode=stable-privacy +- address1=2001:1::101/96 +- +- """ +- ), +- }, + }, + "manual": { + "yaml": textwrap.dedent( +@@ -3777,92 +3037,25 @@ iface bond0 inet6 static + """ + ), + }, +- "expected_network_manager": { +- "cloud-init-eth0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init eth0 +- uuid=1dd9a779-d327-56e1-8454-c65e2556c12c +- type=ethernet ++ }, ++} + +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init + +- [ethernet] +- mac-address=52:54:00:12:34:00 +- +- [ipv4] +- method=manual +- may-fail=false +- address1=192.168.1.2/24 +- +- """ +- ), +- "cloud-init-eth1.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init eth1 +- uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 +- type=ethernet +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- mtu=1480 +- mac-address=52:54:00:12:34:AA +- +- [ipv4] +- method=auto +- may-fail=true +- +- """ +- ), +- "cloud-init-eth2.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init eth2 +- uuid=5559a242-3421-5fdd-896e-9cb8313d5804 +- type=ethernet +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- mac-address=52:54:00:12:34:FF +- +- [ipv4] +- method=auto +- may-fail=true +- +- """ +- ), +- }, +- }, +-} +- +- +-CONFIG_V1_EXPLICIT_LOOPBACK = { +- "version": 1, +- "config": [ +- { +- "name": "eth0", +- "type": "physical", +- "subnets": [{"control": "auto", "type": "dhcp"}], +- }, +- { +- "name": "lo", +- "type": "loopback", +- "subnets": [{"control": "auto", "type": "loopback"}], +- }, +- ], +-} ++CONFIG_V1_EXPLICIT_LOOPBACK = { ++ "version": 1, ++ "config": [ ++ { ++ "name": "eth0", ++ "type": "physical", ++ "subnets": [{"control": "auto", "type": "dhcp"}], ++ }, ++ { ++ "name": "lo", ++ "type": "loopback", ++ "subnets": [{"control": "auto", "type": "loopback"}], ++ }, ++ ], ++} + + + CONFIG_V1_SIMPLE_SUBNET = { +@@ -4304,6 +3497,7 @@ class TestRhelSysConfigRendering(CiTestCase): + + with_logs = True + ++ nm_cfg_file = "/etc/NetworkManager/NetworkManager.conf" + scripts_dir = "/etc/sysconfig/network-scripts" + header = ( + "# Created by cloud-init on instance boot automatically, " +@@ -4878,6 +4072,78 @@ USERCTL=no + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + ++ def test_check_ifcfg_rh(self): ++ """ifcfg-rh plugin is added NetworkManager.conf if conf present.""" ++ render_dir = self.tmp_dir() ++ nm_cfg = subp.target_path(render_dir, path=self.nm_cfg_file) ++ util.ensure_dir(os.path.dirname(nm_cfg)) ++ ++ # write a template nm.conf, note plugins is a list here ++ with open(nm_cfg, "w") as fh: ++ fh.write("# test_check_ifcfg_rh\n[main]\nplugins=foo,bar\n") ++ self.assertTrue(os.path.exists(nm_cfg)) ++ ++ # render and read ++ entry = NETWORK_CONFIGS["small"] ++ found = self._render_and_read( ++ network_config=yaml.load(entry["yaml"]), dir=render_dir ++ ) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ self._assert_headers(found) ++ ++ # check ifcfg-rh is in the 'plugins' list ++ config = sysconfig.ConfigObj(nm_cfg) ++ self.assertIn("ifcfg-rh", config["main"]["plugins"]) ++ ++ def test_check_ifcfg_rh_plugins_string(self): ++ """ifcfg-rh plugin is append when plugins is a string.""" ++ render_dir = self.tmp_path("render") ++ os.makedirs(render_dir) ++ nm_cfg = subp.target_path(render_dir, path=self.nm_cfg_file) ++ util.ensure_dir(os.path.dirname(nm_cfg)) ++ ++ # write a template nm.conf, note plugins is a value here ++ util.write_file(nm_cfg, "# test_check_ifcfg_rh\n[main]\nplugins=foo\n") ++ ++ # render and read ++ entry = NETWORK_CONFIGS["small"] ++ found = self._render_and_read( ++ network_config=yaml.load(entry["yaml"]), dir=render_dir ++ ) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ self._assert_headers(found) ++ ++ # check raw content has plugin ++ nm_file_content = util.load_file(nm_cfg) ++ self.assertIn("ifcfg-rh", nm_file_content) ++ ++ # check ifcfg-rh is in the 'plugins' list ++ config = sysconfig.ConfigObj(nm_cfg) ++ self.assertIn("ifcfg-rh", config["main"]["plugins"]) ++ ++ def test_check_ifcfg_rh_plugins_no_plugins(self): ++ """enable_ifcfg_plugin creates plugins value if missing.""" ++ render_dir = self.tmp_path("render") ++ os.makedirs(render_dir) ++ nm_cfg = subp.target_path(render_dir, path=self.nm_cfg_file) ++ util.ensure_dir(os.path.dirname(nm_cfg)) ++ ++ # write a template nm.conf, note plugins is missing ++ util.write_file(nm_cfg, "# test_check_ifcfg_rh\n[main]\n") ++ self.assertTrue(os.path.exists(nm_cfg)) ++ ++ # render and read ++ entry = NETWORK_CONFIGS["small"] ++ found = self._render_and_read( ++ network_config=yaml.load(entry["yaml"]), dir=render_dir ++ ) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ self._assert_headers(found) ++ ++ # check ifcfg-rh is in the 'plugins' list ++ config = sysconfig.ConfigObj(nm_cfg) ++ self.assertIn("ifcfg-rh", config["main"]["plugins"]) ++ + def test_netplan_dhcp_false_disable_dhcp_in_state(self): + """netplan config with dhcp[46]: False should not add dhcp in state""" + net_config = yaml.load(NETPLAN_DHCP_FALSE) +@@ -5433,281 +4699,6 @@ STARTMODE=auto + self._assert_headers(found) + + +-@mock.patch( +- "cloudinit.net.is_openvswitch_internal_interface", +- mock.Mock(return_value=False), +-) +-class TestNetworkManagerRendering(CiTestCase): +- +- with_logs = True +- +- scripts_dir = "/etc/NetworkManager/system-connections" +- +- expected_name = "expected_network_manager" +- +- def _get_renderer(self): +- return network_manager.Renderer() +- +- def _render_and_read(self, network_config=None, state=None, dir=None): +- if dir is None: +- dir = self.tmp_dir() +- +- if network_config: +- ns = network_state.parse_net_config_data(network_config) +- elif state: +- ns = state +- else: +- raise ValueError("Expected data or state, got neither") +- +- renderer = self._get_renderer() +- renderer.render_network_state(ns, target=dir) +- return dir2dict(dir) +- +- def _compare_files_to_expected(self, expected, found): +- orig_maxdiff = self.maxDiff +- expected_d = dict( +- (os.path.join(self.scripts_dir, k), v) for k, v in expected.items() +- ) +- +- try: +- self.maxDiff = None +- self.assertEqual(expected_d, found) +- finally: +- self.maxDiff = orig_maxdiff +- +- @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") +- @mock.patch("cloudinit.net.sys_dev_path") +- @mock.patch("cloudinit.net.read_sys_net") +- @mock.patch("cloudinit.net.get_devicelist") +- def test_default_generation( +- self, +- mock_get_devicelist, +- mock_read_sys_net, +- mock_sys_dev_path, +- m_get_cmdline, +- ): +- tmp_dir = self.tmp_dir() +- _setup_test( +- tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path +- ) +- +- network_cfg = net.generate_fallback_config() +- ns = network_state.parse_net_config_data( +- network_cfg, skip_broken=False +- ) +- +- render_dir = os.path.join(tmp_dir, "render") +- os.makedirs(render_dir) +- +- renderer = self._get_renderer() +- renderer.render_network_state(ns, target=render_dir) +- +- found = dir2dict(render_dir) +- self._compare_files_to_expected( +- { +- "cloud-init-eth1000.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init eth1000 +- uuid=8c517500-0c95-5308-9c8a-3092eebc44eb +- type=ethernet +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- mac-address=07:1C:C6:75:A4:BE +- +- [ipv4] +- method=auto +- may-fail=false +- +- """ +- ), +- }, +- found, +- ) +- +- def test_openstack_rendering_samples(self): +- for os_sample in OS_SAMPLES: +- render_dir = self.tmp_dir() +- ex_input = os_sample["in_data"] +- ex_mac_addrs = os_sample["in_macs"] +- network_cfg = openstack.convert_net_json( +- ex_input, known_macs=ex_mac_addrs +- ) +- ns = network_state.parse_net_config_data( +- network_cfg, skip_broken=False +- ) +- renderer = self._get_renderer() +- # render a multiple times to simulate reboots +- renderer.render_network_state(ns, target=render_dir) +- renderer.render_network_state(ns, target=render_dir) +- renderer.render_network_state(ns, target=render_dir) +- for fn, expected_content in os_sample.get(self.expected_name, []): +- with open(os.path.join(render_dir, fn)) as fh: +- self.assertEqual(expected_content, fh.read()) +- +- def test_network_config_v1_samples(self): +- ns = network_state.parse_net_config_data(CONFIG_V1_SIMPLE_SUBNET) +- render_dir = self.tmp_path("render") +- os.makedirs(render_dir) +- renderer = self._get_renderer() +- renderer.render_network_state(ns, target=render_dir) +- found = dir2dict(render_dir) +- self._compare_files_to_expected( +- { +- "cloud-init-interface0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init interface0 +- uuid=8b6862ed-dbd6-5830-93f7-a91451c13828 +- type=ethernet +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- mac-address=52:54:00:12:34:00 +- +- [ipv4] +- method=manual +- may-fail=false +- address1=10.0.2.15/24 +- gateway=10.0.2.2 +- +- """ +- ), +- }, +- found, +- ) +- +- def test_config_with_explicit_loopback(self): +- render_dir = self.tmp_path("render") +- os.makedirs(render_dir) +- ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) +- renderer = self._get_renderer() +- renderer.render_network_state(ns, target=render_dir) +- found = dir2dict(render_dir) +- self._compare_files_to_expected( +- { +- "cloud-init-eth0.nmconnection": textwrap.dedent( +- """\ +- # Generated by cloud-init. Changes will be lost. +- +- [connection] +- id=cloud-init eth0 +- uuid=1dd9a779-d327-56e1-8454-c65e2556c12c +- type=ethernet +- interface-name=eth0 +- +- [user] +- org.freedesktop.NetworkManager.origin=cloud-init +- +- [ethernet] +- +- [ipv4] +- method=auto +- may-fail=false +- +- """ +- ), +- }, +- found, +- ) +- +- def test_bond_config(self): +- entry = NETWORK_CONFIGS["bond"] +- found = self._render_and_read(network_config=yaml.load(entry["yaml"])) +- self._compare_files_to_expected(entry[self.expected_name], found) +- +- def test_vlan_config(self): +- entry = NETWORK_CONFIGS["vlan"] +- found = self._render_and_read(network_config=yaml.load(entry["yaml"])) +- self._compare_files_to_expected(entry[self.expected_name], found) +- +- def test_bridge_config(self): +- entry = NETWORK_CONFIGS["bridge"] +- found = self._render_and_read(network_config=yaml.load(entry["yaml"])) +- self._compare_files_to_expected(entry[self.expected_name], found) +- +- def test_manual_config(self): +- entry = NETWORK_CONFIGS["manual"] +- found = self._render_and_read(network_config=yaml.load(entry["yaml"])) +- self._compare_files_to_expected(entry[self.expected_name], found) +- +- def test_all_config(self): +- entry = NETWORK_CONFIGS["all"] +- found = self._render_and_read(network_config=yaml.load(entry["yaml"])) +- self._compare_files_to_expected(entry[self.expected_name], found) +- self.assertNotIn( +- "WARNING: Network config: ignoring eth0.101 device-level mtu", +- self.logs.getvalue(), +- ) +- +- def test_small_config(self): +- entry = NETWORK_CONFIGS["small"] +- found = self._render_and_read(network_config=yaml.load(entry["yaml"])) +- self._compare_files_to_expected(entry[self.expected_name], found) +- +- def test_v4_and_v6_static_config(self): +- entry = NETWORK_CONFIGS["v4_and_v6_static"] +- found = self._render_and_read(network_config=yaml.load(entry["yaml"])) +- self._compare_files_to_expected(entry[self.expected_name], found) +- expected_msg = ( +- "WARNING: Network config: ignoring iface0 device-level mtu:8999" +- " because ipv4 subnet-level mtu:9000 provided." +- ) +- self.assertIn(expected_msg, self.logs.getvalue()) +- +- def test_dhcpv6_only_config(self): +- entry = NETWORK_CONFIGS["dhcpv6_only"] +- found = self._render_and_read(network_config=yaml.load(entry["yaml"])) +- self._compare_files_to_expected(entry[self.expected_name], found) +- +- def test_simple_render_ipv6_slaac(self): +- entry = NETWORK_CONFIGS["ipv6_slaac"] +- found = self._render_and_read(network_config=yaml.load(entry["yaml"])) +- self._compare_files_to_expected(entry[self.expected_name], found) +- +- def test_dhcpv6_stateless_config(self): +- entry = NETWORK_CONFIGS["dhcpv6_stateless"] +- found = self._render_and_read(network_config=yaml.load(entry["yaml"])) +- self._compare_files_to_expected(entry[self.expected_name], found) +- +- def test_wakeonlan_disabled_config_v2(self): +- entry = NETWORK_CONFIGS["wakeonlan_disabled"] +- found = self._render_and_read( +- network_config=yaml.load(entry["yaml_v2"]) +- ) +- self._compare_files_to_expected(entry[self.expected_name], found) +- +- def test_wakeonlan_enabled_config_v2(self): +- entry = NETWORK_CONFIGS["wakeonlan_enabled"] +- found = self._render_and_read( +- network_config=yaml.load(entry["yaml_v2"]) +- ) +- self._compare_files_to_expected(entry[self.expected_name], found) +- +- def test_render_v4_and_v6(self): +- entry = NETWORK_CONFIGS["v4_and_v6"] +- found = self._render_and_read(network_config=yaml.load(entry["yaml"])) +- self._compare_files_to_expected(entry[self.expected_name], found) +- +- def test_render_v6_and_v4(self): +- entry = NETWORK_CONFIGS["v6_and_v4"] +- found = self._render_and_read(network_config=yaml.load(entry["yaml"])) +- self._compare_files_to_expected(entry[self.expected_name], found) +- +- +-@mock.patch( +- "cloudinit.net.is_openvswitch_internal_interface", +- mock.Mock(return_value=False), +-) + class TestEniNetRendering(CiTestCase): + @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") + @mock.patch("cloudinit.net.sys_dev_path") +@@ -7145,9 +6136,9 @@ class TestNetworkdRoundTrip(CiTestCase): + + class TestRenderersSelect: + @pytest.mark.parametrize( +- "renderer_selected,netplan,eni,sys,network_manager,networkd", ++ "renderer_selected,netplan,eni,nm,scfg,sys,networkd", + ( +- # -netplan -ifupdown -sys -network-manager -networkd raises error ++ # -netplan -ifupdown -nm -scfg -sys raises error + ( + net.RendererNotFoundError, + False, +@@ -7155,51 +6146,52 @@ class TestRenderersSelect: + False, + False, + False, ++ False, + ), +- # -netplan +ifupdown -sys -nm -networkd selects eni +- ("eni", False, True, False, False, False), +- # +netplan +ifupdown -sys -nm -networkd selects eni +- ("eni", True, True, False, False, False), +- # +netplan -ifupdown -sys -nm -networkd selects netplan +- ("netplan", True, False, False, False, False), +- # +netplan -ifupdown -sys -nm -networkd selects netplan +- ("netplan", True, False, False, False, False), +- # -netplan -ifupdown +sys -nm -networkd selects sysconfig +- ("sysconfig", False, False, True, False, False), +- # -netplan -ifupdown +sys +nm -networkd selects sysconfig +- ("sysconfig", False, False, True, True, False), +- # -netplan -ifupdown -sys +nm -networkd selects nm +- ("network-manager", False, False, False, True, False), +- # -netplan -ifupdown -sys +nm +networkd selects nm +- ("network-manager", False, False, False, True, True), +- # -netplan -ifupdown -sys -nm +networkd selects networkd +- ("networkd", False, False, False, False, True), ++ # -netplan +ifupdown -nm -scfg -sys selects eni ++ ("eni", False, True, False, False, False, False), ++ # +netplan +ifupdown -nm -scfg -sys selects eni ++ ("eni", True, True, False, False, False, False), ++ # +netplan -ifupdown -nm -scfg -sys selects netplan ++ ("netplan", True, False, False, False, False, False), ++ # Ubuntu with Network-Manager installed ++ # +netplan -ifupdown +nm -scfg -sys selects netplan ++ ("netplan", True, False, True, False, False, False), ++ # Centos/OpenSuse with Network-Manager installed selects sysconfig ++ # -netplan -ifupdown +nm -scfg +sys selects netplan ++ ("sysconfig", False, False, True, False, True, False), ++ # -netplan -ifupdown -nm -scfg -sys +networkd selects networkd ++ ("networkd", False, False, False, False, False, True), + ), + ) + @mock.patch("cloudinit.net.renderers.networkd.available") +- @mock.patch("cloudinit.net.renderers.network_manager.available") + @mock.patch("cloudinit.net.renderers.netplan.available") + @mock.patch("cloudinit.net.renderers.sysconfig.available") ++ @mock.patch("cloudinit.net.renderers.sysconfig.available_sysconfig") ++ @mock.patch("cloudinit.net.renderers.sysconfig.available_nm") + @mock.patch("cloudinit.net.renderers.eni.available") + def test_valid_renderer_from_defaults_depending_on_availability( + self, + m_eni_avail, ++ m_nm_avail, ++ m_scfg_avail, + m_sys_avail, + m_netplan_avail, +- m_network_manager_avail, + m_networkd_avail, + renderer_selected, + netplan, + eni, ++ nm, ++ scfg, + sys, +- network_manager, + networkd, + ): + """Assert proper renderer per DEFAULT_PRIORITY given availability.""" + m_eni_avail.return_value = eni # ifupdown pkg presence ++ m_nm_avail.return_value = nm # network-manager presence ++ m_scfg_avail.return_value = scfg # sysconfig presence + m_sys_avail.return_value = sys # sysconfig/ifup/down presence + m_netplan_avail.return_value = netplan # netplan presence +- m_network_manager_avail.return_value = network_manager # NM presence + m_networkd_avail.return_value = networkd # networkd presence + if isinstance(renderer_selected, str): + (renderer_name, _rnd_class) = renderers.select( +@@ -7257,7 +6249,7 @@ class TestNetRenderers(CiTestCase): + priority=["sysconfig", "eni"], + ) + +- @mock.patch("cloudinit.net.sysconfig.available") ++ @mock.patch("cloudinit.net.sysconfig.available_sysconfig") + @mock.patch("cloudinit.util.system_info") + def test_sysconfig_available_uses_variant_mapping(self, m_info, m_avail): + m_avail.return_value = True +diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py +index 4525c49c..3c29e2f7 100644 +--- a/tests/unittests/test_net_activators.py ++++ b/tests/unittests/test_net_activators.py +@@ -41,20 +41,18 @@ NETPLAN_CALL_LIST = [ + + @pytest.fixture + def available_mocks(): +- mocks = namedtuple("Mocks", "m_which, m_file, m_exists") ++ mocks = namedtuple("Mocks", "m_which, m_file") + with patch("cloudinit.subp.which", return_value=True) as m_which: + with patch("os.path.isfile", return_value=True) as m_file: +- with patch("os.path.exists", return_value=True) as m_exists: +- yield mocks(m_which, m_file, m_exists) ++ yield mocks(m_which, m_file) + + + @pytest.fixture + def unavailable_mocks(): +- mocks = namedtuple("Mocks", "m_which, m_file, m_exists") ++ mocks = namedtuple("Mocks", "m_which, m_file") + with patch("cloudinit.subp.which", return_value=False) as m_which: + with patch("os.path.isfile", return_value=False) as m_file: +- with patch("os.path.exists", return_value=False) as m_exists: +- yield mocks(m_which, m_file, m_exists) ++ yield mocks(m_which, m_file) + + + class TestSearchAndSelect: +@@ -115,6 +113,10 @@ NETPLAN_AVAILABLE_CALLS = [ + (("netplan",), {"search": ["/usr/sbin", "/sbin"], "target": None}), + ] + ++NETWORK_MANAGER_AVAILABLE_CALLS = [ ++ (("nmcli",), {"target": None}), ++] ++ + NETWORKD_AVAILABLE_CALLS = [ + (("ip",), {"search": ["/usr/sbin", "/bin"], "target": None}), + (("systemctl",), {"search": ["/usr/sbin", "/bin"], "target": None}), +@@ -126,6 +128,7 @@ NETWORKD_AVAILABLE_CALLS = [ + [ + (IfUpDownActivator, IF_UP_DOWN_AVAILABLE_CALLS), + (NetplanActivator, NETPLAN_AVAILABLE_CALLS), ++ (NetworkManagerActivator, NETWORK_MANAGER_AVAILABLE_CALLS), + (NetworkdActivator, NETWORKD_AVAILABLE_CALLS), + ], + ) +@@ -141,72 +144,8 @@ IF_UP_DOWN_BRING_UP_CALL_LIST = [ + ] + + NETWORK_MANAGER_BRING_UP_CALL_LIST = [ +- ( +- ( +- [ +- "nmcli", +- "connection", +- "load", +- "".join( +- [ +- "/etc/NetworkManager/system-connections", +- "/cloud-init-eth0.nmconnection", +- ] +- ), +- ], +- ), +- {}, +- ), +- ( +- ( +- [ +- "nmcli", +- "connection", +- "up", +- "filename", +- "".join( +- [ +- "/etc/NetworkManager/system-connections", +- "/cloud-init-eth0.nmconnection", +- ] +- ), +- ], +- ), +- {}, +- ), +- ( +- ( +- [ +- "nmcli", +- "connection", +- "load", +- "".join( +- [ +- "/etc/NetworkManager/system-connections", +- "/cloud-init-eth1.nmconnection", +- ] +- ), +- ], +- ), +- {}, +- ), +- ( +- ( +- [ +- "nmcli", +- "connection", +- "up", +- "filename", +- "".join( +- [ +- "/etc/NetworkManager/system-connections", +- "/cloud-init-eth1.nmconnection", +- ] +- ), +- ], +- ), +- {}, +- ), ++ ((["nmcli", "connection", "up", "ifname", "eth0"],), {}), ++ ((["nmcli", "connection", "up", "ifname", "eth1"],), {}), + ] + + NETWORKD_BRING_UP_CALL_LIST = [ +@@ -230,11 +169,9 @@ class TestActivatorsBringUp: + def test_bring_up_interface( + self, m_subp, activator, expected_call_list, available_mocks + ): +- index = 0 + activator.bring_up_interface("eth0") +- for call in m_subp.call_args_list: +- assert call == expected_call_list[index] +- index += 1 ++ assert len(m_subp.call_args_list) == 1 ++ assert m_subp.call_args_list[0] == expected_call_list[0] + + @patch("cloudinit.subp.subp", return_value=("", "")) + def test_bring_up_interfaces( +@@ -271,8 +208,8 @@ IF_UP_DOWN_BRING_DOWN_CALL_LIST = [ + ] + + NETWORK_MANAGER_BRING_DOWN_CALL_LIST = [ +- ((["nmcli", "device", "disconnect", "eth0"],), {}), +- ((["nmcli", "device", "disconnect", "eth1"],), {}), ++ ((["nmcli", "connection", "down", "eth0"],), {}), ++ ((["nmcli", "connection", "down", "eth1"],), {}), + ] + + NETWORKD_BRING_DOWN_CALL_LIST = [ +-- +2.27.0 + diff --git a/SOURCES/ci-Revert-Use-Network-Manager-and-Netplan-as-default-re.patch b/SOURCES/ci-Revert-Use-Network-Manager-and-Netplan-as-default-re.patch new file mode 100644 index 0000000..6532fab --- /dev/null +++ b/SOURCES/ci-Revert-Use-Network-Manager-and-Netplan-as-default-re.patch @@ -0,0 +1,75 @@ +From 02e7b89c157f8c3243f0d91cf5652cf27db44b72 Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Mon, 8 Aug 2022 10:10:26 +0200 +Subject: [PATCH 2/2] Revert "Use Network-Manager and Netplan as default + renderers for RHEL and Fedora (#1465)" + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 81: Revert "Use Network-Manager and Netplan as default renderers for RHEL and Fedora (#1465)" +RH-Commit: [2/2] 746b2e33356376e250b799261031676174e8ccc9 +RH-Bugzilla: 2107464 2110066 2117526 2104393 2098624 +RH-Acked-by: Eduardo Otubo +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Mohamed Gamal Morsy + +As NM is reverted, remove also documentation and any trace of it. +This reverts commit 13ded463a6a0b1b0bf0dffc0a997f006dd25c4f3. + +Signed-off-by: Emanuele Giuseppe Esposito +--- + config/cloud.cfg.tmpl | 3 --- + doc/rtd/topics/network-config.rst | 12 +----------- + 2 files changed, 1 insertion(+), 14 deletions(-) + +diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl +index f4d2fd14..80ab4f96 100644 +--- a/config/cloud.cfg.tmpl ++++ b/config/cloud.cfg.tmpl +@@ -353,7 +353,4 @@ system_info: + {% elif variant in ["dragonfly"] %} + network: + renderers: ['freebsd'] +-{% elif variant in ["rhel", "fedora"] %} +- network: +- renderers: ['netplan', 'network-manager', 'networkd', 'sysconfig', 'eni'] + {% endif %} +diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst +index f503caab..c461a3fe 100644 +--- a/doc/rtd/topics/network-config.rst ++++ b/doc/rtd/topics/network-config.rst +@@ -188,15 +188,6 @@ generated configuration into an internal network configuration state. From + this state `Cloud-init`_ delegates rendering of the configuration to Distro + supported formats. The following ``renderers`` are supported in cloud-init: + +-- **NetworkManager** +- +-`NetworkManager `_ is the standard Linux network +-configuration tool suite. It supports a wide range of networking setups. +-Configuration is typically stored in ``/etc/NetworkManager``. +- +-It is the default for a number of Linux distributions, notably Fedora; +-CentOS/RHEL; and derivatives. +- + - **ENI** + + /etc/network/interfaces or ``ENI`` is supported by the ``ifupdown`` package +@@ -224,7 +215,6 @@ is as follows: + - ENI + - Sysconfig + - Netplan +-- NetworkManager + + When applying the policy, `Cloud-init`_ checks if the current instance has the + correct binaries and paths to support the renderer. The first renderer that +@@ -233,7 +223,7 @@ supplying an updated configuration in cloud-config. :: + + system_info: + network: +- renderers: ['netplan', 'network-manager', 'eni', 'sysconfig', 'freebsd', 'netbsd', 'openbsd'] ++ renderers: ['netplan', 'eni', 'sysconfig', 'freebsd', 'netbsd', 'openbsd'] + + + Network Configuration Tools +-- +2.27.0 + diff --git a/SOURCES/ci-Revert-unnecesary-lcase-in-ds-identify-978.patch b/SOURCES/ci-Revert-unnecesary-lcase-in-ds-identify-978.patch deleted file mode 100644 index c47788f..0000000 --- a/SOURCES/ci-Revert-unnecesary-lcase-in-ds-identify-978.patch +++ /dev/null @@ -1,47 +0,0 @@ -From 0eeec94882779de76c08b1a7faf862e22f21f242 Mon Sep 17 00:00:00 2001 -From: Emanuele Giuseppe Esposito -Date: Fri, 14 Jan 2022 16:42:46 +0100 -Subject: [PATCH 5/6] Revert unnecesary lcase in ds-identify (#978) - -RH-Author: Emanuele Giuseppe Esposito -RH-MergeRequest: 44: Datasource for VMware -RH-Commit: [5/6] f7385c15cf17a9c4a2fa15b29afd1b8a96b24d1e -RH-Bugzilla: 2026587 -RH-Acked-by: Mohamed Gamal Morsy -RH-Acked-by: Eduardo Otubo - -commit f516a7d37c1654addc02485e681b4358d7e7c0db -Author: Andrew Kutz <101085+akutz@users.noreply.github.com> -Date: Fri Aug 13 14:30:55 2021 -0500 - - Revert unnecesary lcase in ds-identify (#978) - - This patch reverts an unnecessary lcase optimization in the - ds-identify script. SystemD documents the values produced by - the systemd-detect-virt command are lower case, and the mapping - table used by the FreeBSD check is also lower-case. - - The optimization added two new forked processes, needlessly - causing overhead. - -Signed-off-by: Emanuele Giuseppe Esposito ---- - tools/ds-identify | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/tools/ds-identify b/tools/ds-identify -index 0e12298f..7b782462 100755 ---- a/tools/ds-identify -+++ b/tools/ds-identify -@@ -449,7 +449,7 @@ detect_virt() { - read_virt() { - cached "$DI_VIRT" && return 0 - detect_virt -- DI_VIRT="$(echo "${_RET}" | tr '[:upper:]' '[:lower:]')" -+ DI_VIRT="${_RET}" - } - - is_container() { --- -2.27.0 - diff --git a/SOURCES/ci-Stop-copying-ssh-system-keys-and-check-folder-permis.patch b/SOURCES/ci-Stop-copying-ssh-system-keys-and-check-folder-permis.patch deleted file mode 100644 index e46b52b..0000000 --- a/SOURCES/ci-Stop-copying-ssh-system-keys-and-check-folder-permis.patch +++ /dev/null @@ -1,1385 +0,0 @@ -From 3b68aff3b7b1dc567ef6721a269c2d4e054b729f Mon Sep 17 00:00:00 2001 -From: Emanuele Giuseppe Esposito -Date: Mon, 9 Aug 2021 23:41:44 +0200 -Subject: [PATCH] Stop copying ssh system keys and check folder permissions - (#956) - -RH-Author: Emanuele Giuseppe Esposito -RH-MergeRequest: 28: Stop copying ssh system keys and check folder permissions (#956) -RH-Commit: [1/1] 7cada613be82f2f525ee56b86ef9f71edf40d2ef (eesposit/cloud-init) -RH-Bugzilla: 1862967 -RH-Acked-by: Miroslav Rezanina -RH-Acked-by: Eduardo Otubo - -TESTED: By me and QA -BREW: 38818284 - -This is a continuation of previous MR 25 and upstream PR #937. -There were still issues when using non-standard file paths like -/etc/ssh/userkeys/%u or /etc/ssh/authorized_keys, and the choice -of storing the keys of all authorized_keys files into a single -one was not ideal. This fix modifies cloudinit to support -all different cases of authorized_keys file locations, and -picks a user-specific file where to copy the new keys that -complies with ssh permissions. - -commit 00dbaf1e9ab0e59d81662f0f3561897bef499a3f -Author: Emanuele Giuseppe Esposito -Date: Mon Aug 9 16:49:56 2021 +0200 - - Stop copying ssh system keys and check folder permissions (#956) - - In /etc/ssh/sshd_config, it is possible to define a custom - authorized_keys file that will contain the keys allowed to access the - machine via the AuthorizedKeysFile option. Cloudinit is able to add - user-specific keys to the existing ones, but we need to be careful on - which of the authorized_keys files listed to pick. - Chosing a file that is shared by all user will cause security - issues, because the owner of that key can then access also other users. - - We therefore pick an authorized_keys file only if it satisfies the - following conditions: - 1. it is not a "global" file, ie it must be defined in - AuthorizedKeysFile with %u, %h or be in /home/. This avoids - security issues. - 2. it must comply with ssh permission requirements, otherwise the ssh - agent won't use that file. - - If it doesn't meet either of those conditions, write to - ~/.ssh/authorized_keys - - We also need to consider the case when the chosen authorized_keys file - does not exist. In this case, the existing behavior of cloud-init is - to create the new file. We therefore need to be sure that the file - complies with ssh permissions too, by setting: - - the actual file to permission 600, and owned by the user - - the directories in the path that do not exist must be root owned and - with permission 755. - -Signed-off-by: Emanuele Giuseppe Esposito ---- - cloudinit/ssh_util.py | 133 ++++- - cloudinit/util.py | 51 +- - tests/unittests/test_sshutil.py | 952 +++++++++++++++++++++++++------- - 3 files changed, 920 insertions(+), 216 deletions(-) - -diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py -index 89057262..b8a3c8f7 100644 ---- a/cloudinit/ssh_util.py -+++ b/cloudinit/ssh_util.py -@@ -249,6 +249,113 @@ def render_authorizedkeysfile_paths(value, homedir, username): - return rendered - - -+# Inspired from safe_path() in openssh source code (misc.c). -+def check_permissions(username, current_path, full_path, is_file, strictmodes): -+ """Check if the file/folder in @current_path has the right permissions. -+ -+ We need to check that: -+ 1. If StrictMode is enabled, the owner is either root or the user -+ 2. the user can access the file/folder, otherwise ssh won't use it -+ 3. If StrictMode is enabled, no write permission is given to group -+ and world users (022) -+ """ -+ -+ # group/world can only execute the folder (access) -+ minimal_permissions = 0o711 -+ if is_file: -+ # group/world can only read the file -+ minimal_permissions = 0o644 -+ -+ # 1. owner must be either root or the user itself -+ owner = util.get_owner(current_path) -+ if strictmodes and owner != username and owner != "root": -+ LOG.debug("Path %s in %s must be own by user %s or" -+ " by root, but instead is own by %s. Ignoring key.", -+ current_path, full_path, username, owner) -+ return False -+ -+ parent_permission = util.get_permissions(current_path) -+ # 2. the user can access the file/folder, otherwise ssh won't use it -+ if owner == username: -+ # need only the owner permissions -+ minimal_permissions &= 0o700 -+ else: -+ group_owner = util.get_group(current_path) -+ user_groups = util.get_user_groups(username) -+ -+ if group_owner in user_groups: -+ # need only the group permissions -+ minimal_permissions &= 0o070 -+ else: -+ # need only the world permissions -+ minimal_permissions &= 0o007 -+ -+ if parent_permission & minimal_permissions == 0: -+ LOG.debug("Path %s in %s must be accessible by user %s," -+ " check its permissions", -+ current_path, full_path, username) -+ return False -+ -+ # 3. no write permission (w) is given to group and world users (022) -+ # Group and world user can still have +rx. -+ if strictmodes and parent_permission & 0o022 != 0: -+ LOG.debug("Path %s in %s must not give write" -+ "permission to group or world users. Ignoring key.", -+ current_path, full_path) -+ return False -+ -+ return True -+ -+ -+def check_create_path(username, filename, strictmodes): -+ user_pwent = users_ssh_info(username)[1] -+ root_pwent = users_ssh_info("root")[1] -+ try: -+ # check the directories first -+ directories = filename.split("/")[1:-1] -+ -+ # scan in order, from root to file name -+ parent_folder = "" -+ # this is to comply also with unit tests, and -+ # strange home directories -+ home_folder = os.path.dirname(user_pwent.pw_dir) -+ for directory in directories: -+ parent_folder += "/" + directory -+ if home_folder.startswith(parent_folder): -+ continue -+ -+ if not os.path.isdir(parent_folder): -+ # directory does not exist, and permission so far are good: -+ # create the directory, and make it accessible by everyone -+ # but owned by root, as it might be used by many users. -+ with util.SeLinuxGuard(parent_folder): -+ os.makedirs(parent_folder, mode=0o755, exist_ok=True) -+ util.chownbyid(parent_folder, root_pwent.pw_uid, -+ root_pwent.pw_gid) -+ -+ permissions = check_permissions(username, parent_folder, -+ filename, False, strictmodes) -+ if not permissions: -+ return False -+ -+ # check the file -+ if not os.path.exists(filename): -+ # if file does not exist: we need to create it, since the -+ # folders at this point exist and have right permissions -+ util.write_file(filename, '', mode=0o600, ensure_dir_exists=True) -+ util.chownbyid(filename, user_pwent.pw_uid, user_pwent.pw_gid) -+ -+ permissions = check_permissions(username, filename, -+ filename, True, strictmodes) -+ if not permissions: -+ return False -+ except (IOError, OSError) as e: -+ util.logexc(LOG, str(e)) -+ return False -+ -+ return True -+ -+ - def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): - (ssh_dir, pw_ent) = users_ssh_info(username) - default_authorizedkeys_file = os.path.join(ssh_dir, 'authorized_keys') -@@ -259,6 +366,7 @@ def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): - ssh_cfg = parse_ssh_config_map(sshd_cfg_file) - key_paths = ssh_cfg.get("authorizedkeysfile", - "%h/.ssh/authorized_keys") -+ strictmodes = ssh_cfg.get("strictmodes", "yes") - auth_key_fns = render_authorizedkeysfile_paths( - key_paths, pw_ent.pw_dir, username) - -@@ -269,31 +377,31 @@ def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): - "config from %r, using 'AuthorizedKeysFile' file " - "%r instead", DEF_SSHD_CFG, auth_key_fns[0]) - -- # check if one of the keys is the user's one -+ # check if one of the keys is the user's one and has the right permissions - for key_path, auth_key_fn in zip(key_paths.split(), auth_key_fns): - if any([ - '%u' in key_path, - '%h' in key_path, - auth_key_fn.startswith('{}/'.format(pw_ent.pw_dir)) - ]): -- user_authorizedkeys_file = auth_key_fn -+ permissions_ok = check_create_path(username, auth_key_fn, -+ strictmodes == "yes") -+ if permissions_ok: -+ user_authorizedkeys_file = auth_key_fn -+ break - - if user_authorizedkeys_file != default_authorizedkeys_file: - LOG.debug( - "AuthorizedKeysFile has an user-specific authorized_keys, " - "using %s", user_authorizedkeys_file) - -- # always store all the keys in the user's private file -- return (user_authorizedkeys_file, parse_authorized_keys(auth_key_fns)) -+ return ( -+ user_authorizedkeys_file, -+ parse_authorized_keys([user_authorizedkeys_file]) -+ ) - - - def setup_user_keys(keys, username, options=None): -- # Make sure the users .ssh dir is setup accordingly -- (ssh_dir, pwent) = users_ssh_info(username) -- if not os.path.isdir(ssh_dir): -- util.ensure_dir(ssh_dir, mode=0o700) -- util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid) -- - # Turn the 'update' keys given into actual entries - parser = AuthKeyLineParser() - key_entries = [] -@@ -302,11 +410,10 @@ def setup_user_keys(keys, username, options=None): - - # Extract the old and make the new - (auth_key_fn, auth_key_entries) = extract_authorized_keys(username) -+ ssh_dir = os.path.dirname(auth_key_fn) - with util.SeLinuxGuard(ssh_dir, recursive=True): - content = update_authorized_keys(auth_key_entries, key_entries) -- util.ensure_dir(os.path.dirname(auth_key_fn), mode=0o700) -- util.write_file(auth_key_fn, content, mode=0o600) -- util.chownbyid(auth_key_fn, pwent.pw_uid, pwent.pw_gid) -+ util.write_file(auth_key_fn, content, preserve_mode=True) - - - class SshdConfigLine(object): -diff --git a/cloudinit/util.py b/cloudinit/util.py -index 4e0a72db..343976ad 100644 ---- a/cloudinit/util.py -+++ b/cloudinit/util.py -@@ -35,6 +35,7 @@ from base64 import b64decode, b64encode - from errno import ENOENT - from functools import lru_cache - from urllib import parse -+from typing import List - - from cloudinit import importer - from cloudinit import log as logging -@@ -1830,6 +1831,53 @@ def chmod(path, mode): - os.chmod(path, real_mode) - - -+def get_permissions(path: str) -> int: -+ """ -+ Returns the octal permissions of the file/folder pointed by the path, -+ encoded as an int. -+ -+ @param path: The full path of the file/folder. -+ """ -+ -+ return stat.S_IMODE(os.stat(path).st_mode) -+ -+ -+def get_owner(path: str) -> str: -+ """ -+ Returns the owner of the file/folder pointed by the path. -+ -+ @param path: The full path of the file/folder. -+ """ -+ st = os.stat(path) -+ return pwd.getpwuid(st.st_uid).pw_name -+ -+ -+def get_group(path: str) -> str: -+ """ -+ Returns the group of the file/folder pointed by the path. -+ -+ @param path: The full path of the file/folder. -+ """ -+ st = os.stat(path) -+ return grp.getgrgid(st.st_gid).gr_name -+ -+ -+def get_user_groups(username: str) -> List[str]: -+ """ -+ Returns a list of all groups to which the user belongs -+ -+ @param username: the user we want to check -+ """ -+ groups = [] -+ for group in grp.getgrall(): -+ if username in group.gr_mem: -+ groups.append(group.gr_name) -+ -+ gid = pwd.getpwnam(username).pw_gid -+ groups.append(grp.getgrgid(gid).gr_name) -+ return groups -+ -+ - def write_file( - filename, - content, -@@ -1856,8 +1904,7 @@ def write_file( - - if preserve_mode: - try: -- file_stat = os.stat(filename) -- mode = stat.S_IMODE(file_stat.st_mode) -+ mode = get_permissions(filename) - except OSError: - pass - -diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py -index bcb8044f..a66788bf 100644 ---- a/tests/unittests/test_sshutil.py -+++ b/tests/unittests/test_sshutil.py -@@ -1,6 +1,9 @@ - # This file is part of cloud-init. See LICENSE file for license information. - -+import os -+ - from collections import namedtuple -+from functools import partial - from unittest.mock import patch - - from cloudinit import ssh_util -@@ -8,13 +11,48 @@ from cloudinit.tests import helpers as test_helpers - from cloudinit import util - - # https://stackoverflow.com/questions/11351032/ --FakePwEnt = namedtuple( -- 'FakePwEnt', -- ['pw_dir', 'pw_gecos', 'pw_name', 'pw_passwd', 'pw_shell', 'pwd_uid']) -+FakePwEnt = namedtuple('FakePwEnt', [ -+ 'pw_name', -+ 'pw_passwd', -+ 'pw_uid', -+ 'pw_gid', -+ 'pw_gecos', -+ 'pw_dir', -+ 'pw_shell', -+]) - FakePwEnt.__new__.__defaults__ = tuple( - "UNSET_%s" % n for n in FakePwEnt._fields) - - -+def mock_get_owner(updated_permissions, value): -+ try: -+ return updated_permissions[value][0] -+ except ValueError: -+ return util.get_owner(value) -+ -+ -+def mock_get_group(updated_permissions, value): -+ try: -+ return updated_permissions[value][1] -+ except ValueError: -+ return util.get_group(value) -+ -+ -+def mock_get_user_groups(username): -+ return username -+ -+ -+def mock_get_permissions(updated_permissions, value): -+ try: -+ return updated_permissions[value][2] -+ except ValueError: -+ return util.get_permissions(value) -+ -+ -+def mock_getpwnam(users, username): -+ return users[username] -+ -+ - # Do not use these public keys, most of them are fetched from - # the testdata for OpenSSH, and their private keys are available - # https://github.com/openssh/openssh-portable/tree/master/regress/unittests/sshkey/testdata -@@ -552,12 +590,30 @@ class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase): - ssh_util.render_authorizedkeysfile_paths( - "/opt/%u/keys", "/home/bobby", "bobby")) - -+ def test_user_file(self): -+ self.assertEqual( -+ ["/opt/bobby"], -+ ssh_util.render_authorizedkeysfile_paths( -+ "/opt/%u", "/home/bobby", "bobby")) -+ -+ def test_user_file2(self): -+ self.assertEqual( -+ ["/opt/bobby/bobby"], -+ ssh_util.render_authorizedkeysfile_paths( -+ "/opt/%u/%u", "/home/bobby", "bobby")) -+ - def test_multiple(self): - self.assertEqual( - ["/keys/path1", "/keys/path2"], - ssh_util.render_authorizedkeysfile_paths( - "/keys/path1 /keys/path2", "/home/bobby", "bobby")) - -+ def test_multiple2(self): -+ self.assertEqual( -+ ["/keys/path1", "/keys/bobby"], -+ ssh_util.render_authorizedkeysfile_paths( -+ "/keys/path1 /keys/%u", "/home/bobby", "bobby")) -+ - def test_relative(self): - self.assertEqual( - ["/home/bobby/.secret/keys"], -@@ -581,269 +637,763 @@ class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase): - - class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): - -- @patch("cloudinit.ssh_util.pwd.getpwnam") -- def test_multiple_authorizedkeys_file_order1(self, m_getpwnam): -- fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') -- m_getpwnam.return_value = fpw -- user_ssh_folder = "%s/.ssh" % fpw.pw_dir -- -- # /tmp/home2/bobby/.ssh/authorized_keys = rsa -- authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) -- util.write_file(authorized_keys, VALID_CONTENT['rsa']) -- -- # /tmp/home2/bobby/.ssh/user_keys = dsa -- user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) -- util.write_file(user_keys, VALID_CONTENT['dsa']) -- -- # /tmp/sshd_config -+ def create_fake_users(self, names, mock_permissions, -+ m_get_group, m_get_owner, m_get_permissions, -+ m_getpwnam, users): -+ homes = [] -+ -+ root = '/tmp/root' -+ fpw = FakePwEnt(pw_name="root", pw_dir=root) -+ users["root"] = fpw -+ -+ for name in names: -+ home = '/tmp/home/' + name -+ fpw = FakePwEnt(pw_name=name, pw_dir=home) -+ users[name] = fpw -+ homes.append(home) -+ -+ m_get_permissions.side_effect = partial( -+ mock_get_permissions, mock_permissions) -+ m_get_owner.side_effect = partial(mock_get_owner, mock_permissions) -+ m_get_group.side_effect = partial(mock_get_group, mock_permissions) -+ m_getpwnam.side_effect = partial(mock_getpwnam, users) -+ return homes -+ -+ def create_user_authorized_file(self, home, filename, content_key, keys): -+ user_ssh_folder = "%s/.ssh" % home -+ # /tmp/home//.ssh/authorized_keys = content_key -+ authorized_keys = self.tmp_path(filename, dir=user_ssh_folder) -+ util.write_file(authorized_keys, VALID_CONTENT[content_key]) -+ keys[authorized_keys] = content_key -+ return authorized_keys -+ -+ def create_global_authorized_file(self, filename, content_key, keys): -+ authorized_keys = self.tmp_path(filename, dir='/tmp') -+ util.write_file(authorized_keys, VALID_CONTENT[content_key]) -+ keys[authorized_keys] = content_key -+ return authorized_keys -+ -+ def create_sshd_config(self, authorized_keys_files): - sshd_config = self.tmp_path('sshd_config', dir="/tmp") - util.write_file( - sshd_config, -- "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys) -+ "AuthorizedKeysFile " + authorized_keys_files - ) -+ return sshd_config - -+ def execute_and_check(self, user, sshd_config, solution, keys, -+ delete_keys=True): - (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( -- fpw.pw_name, sshd_config) -+ user, sshd_config) - content = ssh_util.update_authorized_keys(auth_key_entries, []) - -- self.assertEqual(user_keys, auth_key_fn) -- self.assertTrue(VALID_CONTENT['rsa'] in content) -- self.assertTrue(VALID_CONTENT['dsa'] in content) -+ self.assertEqual(auth_key_fn, solution) -+ for path, key in keys.items(): -+ if path == solution: -+ self.assertTrue(VALID_CONTENT[key] in content) -+ else: -+ self.assertFalse(VALID_CONTENT[key] in content) -+ -+ if delete_keys and os.path.isdir("/tmp/home/"): -+ util.delete_dir_contents("/tmp/home/") - - @patch("cloudinit.ssh_util.pwd.getpwnam") -- def test_multiple_authorizedkeys_file_order2(self, m_getpwnam): -- fpw = FakePwEnt(pw_name='suzie', pw_dir='/tmp/home/suzie') -- m_getpwnam.return_value = fpw -- user_ssh_folder = "%s/.ssh" % fpw.pw_dir -+ @patch("cloudinit.util.get_permissions") -+ @patch("cloudinit.util.get_owner") -+ @patch("cloudinit.util.get_group") -+ def test_single_user_two_local_files( -+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam -+ ): -+ user_bobby = 'bobby' -+ keys = {} -+ users = {} -+ mock_permissions = { -+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh/user_keys': ('bobby', 'bobby', 0o600), -+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), -+ } -+ -+ homes = self.create_fake_users( -+ [user_bobby], mock_permissions, m_get_group, m_get_owner, -+ m_get_permissions, m_getpwnam, users -+ ) -+ home = homes[0] - -- # /tmp/home/suzie/.ssh/authorized_keys = rsa -- authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) -- util.write_file(authorized_keys, VALID_CONTENT['rsa']) -+ # /tmp/home/bobby/.ssh/authorized_keys = rsa -+ authorized_keys = self.create_user_authorized_file( -+ home, 'authorized_keys', 'rsa', keys -+ ) - -- # /tmp/home/suzie/.ssh/user_keys = dsa -- user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) -- util.write_file(user_keys, VALID_CONTENT['dsa']) -+ # /tmp/home/bobby/.ssh/user_keys = dsa -+ user_keys = self.create_user_authorized_file( -+ home, 'user_keys', 'dsa', keys -+ ) - - # /tmp/sshd_config -- sshd_config = self.tmp_path('sshd_config', dir="/tmp") -- util.write_file( -- sshd_config, -- "AuthorizedKeysFile %s %s" % (user_keys, authorized_keys) -+ options = "%s %s" % (authorized_keys, user_keys) -+ sshd_config = self.create_sshd_config(options) -+ -+ self.execute_and_check(user_bobby, sshd_config, authorized_keys, keys) -+ -+ @patch("cloudinit.ssh_util.pwd.getpwnam") -+ @patch("cloudinit.util.get_permissions") -+ @patch("cloudinit.util.get_owner") -+ @patch("cloudinit.util.get_group") -+ def test_single_user_two_local_files_inverted( -+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam -+ ): -+ user_bobby = 'bobby' -+ keys = {} -+ users = {} -+ mock_permissions = { -+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh/user_keys': ('bobby', 'bobby', 0o600), -+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), -+ } -+ -+ homes = self.create_fake_users( -+ [user_bobby], mock_permissions, m_get_group, m_get_owner, -+ m_get_permissions, m_getpwnam, users - ) -+ home = homes[0] - -- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( -- fpw.pw_name, sshd_config) -- content = ssh_util.update_authorized_keys(auth_key_entries, []) -+ # /tmp/home/bobby/.ssh/authorized_keys = rsa -+ authorized_keys = self.create_user_authorized_file( -+ home, 'authorized_keys', 'rsa', keys -+ ) - -- self.assertEqual(authorized_keys, auth_key_fn) -- self.assertTrue(VALID_CONTENT['rsa'] in content) -- self.assertTrue(VALID_CONTENT['dsa'] in content) -+ # /tmp/home/bobby/.ssh/user_keys = dsa -+ user_keys = self.create_user_authorized_file( -+ home, 'user_keys', 'dsa', keys -+ ) - -- @patch("cloudinit.ssh_util.pwd.getpwnam") -- def test_multiple_authorizedkeys_file_local_global(self, m_getpwnam): -- fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') -- m_getpwnam.return_value = fpw -- user_ssh_folder = "%s/.ssh" % fpw.pw_dir -+ # /tmp/sshd_config -+ options = "%s %s" % (user_keys, authorized_keys) -+ sshd_config = self.create_sshd_config(options) - -- # /tmp/home2/bobby/.ssh/authorized_keys = rsa -- authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) -- util.write_file(authorized_keys, VALID_CONTENT['rsa']) -+ self.execute_and_check(user_bobby, sshd_config, user_keys, keys) - -- # /tmp/home2/bobby/.ssh/user_keys = dsa -- user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) -- util.write_file(user_keys, VALID_CONTENT['dsa']) -+ @patch("cloudinit.ssh_util.pwd.getpwnam") -+ @patch("cloudinit.util.get_permissions") -+ @patch("cloudinit.util.get_owner") -+ @patch("cloudinit.util.get_group") -+ def test_single_user_local_global_files( -+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam -+ ): -+ user_bobby = 'bobby' -+ keys = {} -+ users = {} -+ mock_permissions = { -+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh/user_keys': ('bobby', 'bobby', 0o600), -+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), -+ } -+ -+ homes = self.create_fake_users( -+ [user_bobby], mock_permissions, m_get_group, m_get_owner, -+ m_get_permissions, m_getpwnam, users -+ ) -+ home = homes[0] - -- # /tmp/etc/ssh/authorized_keys = ecdsa -- authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', -- dir="/tmp") -- util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) -+ # /tmp/home/bobby/.ssh/authorized_keys = rsa -+ authorized_keys = self.create_user_authorized_file( -+ home, 'authorized_keys', 'rsa', keys -+ ) - -- # /tmp/sshd_config -- sshd_config = self.tmp_path('sshd_config', dir="/tmp") -- util.write_file( -- sshd_config, -- "AuthorizedKeysFile %s %s %s" % (authorized_keys_global, -- user_keys, authorized_keys) -+ # /tmp/home/bobby/.ssh/user_keys = dsa -+ user_keys = self.create_user_authorized_file( -+ home, 'user_keys', 'dsa', keys - ) - -- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( -- fpw.pw_name, sshd_config) -- content = ssh_util.update_authorized_keys(auth_key_entries, []) -+ authorized_keys_global = self.create_global_authorized_file( -+ 'etc/ssh/authorized_keys', 'ecdsa', keys -+ ) - -- self.assertEqual(authorized_keys, auth_key_fn) -- self.assertTrue(VALID_CONTENT['rsa'] in content) -- self.assertTrue(VALID_CONTENT['ecdsa'] in content) -- self.assertTrue(VALID_CONTENT['dsa'] in content) -+ options = "%s %s %s" % (authorized_keys_global, user_keys, -+ authorized_keys) -+ sshd_config = self.create_sshd_config(options) - -- @patch("cloudinit.ssh_util.pwd.getpwnam") -- def test_multiple_authorizedkeys_file_local_global2(self, m_getpwnam): -- fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') -- m_getpwnam.return_value = fpw -- user_ssh_folder = "%s/.ssh" % fpw.pw_dir -+ self.execute_and_check(user_bobby, sshd_config, user_keys, keys) - -- # /tmp/home2/bobby/.ssh/authorized_keys2 = rsa -- authorized_keys = self.tmp_path('authorized_keys2', -- dir=user_ssh_folder) -- util.write_file(authorized_keys, VALID_CONTENT['rsa']) -+ @patch("cloudinit.ssh_util.pwd.getpwnam") -+ @patch("cloudinit.util.get_permissions") -+ @patch("cloudinit.util.get_owner") -+ @patch("cloudinit.util.get_group") -+ def test_single_user_local_global_files_inverted( -+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam -+ ): -+ user_bobby = 'bobby' -+ keys = {} -+ users = {} -+ mock_permissions = { -+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh/user_keys3': ('bobby', 'bobby', 0o600), -+ '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600), -+ } -+ -+ homes = self.create_fake_users( -+ [user_bobby], mock_permissions, m_get_group, m_get_owner, -+ m_get_permissions, m_getpwnam, users -+ ) -+ home = homes[0] - -- # /tmp/home2/bobby/.ssh/user_keys3 = dsa -- user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) -- util.write_file(user_keys, VALID_CONTENT['dsa']) -+ # /tmp/home/bobby/.ssh/authorized_keys = rsa -+ authorized_keys = self.create_user_authorized_file( -+ home, 'authorized_keys2', 'rsa', keys -+ ) - -- # /tmp/etc/ssh/authorized_keys = ecdsa -- authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', -- dir="/tmp") -- util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) -+ # /tmp/home/bobby/.ssh/user_keys = dsa -+ user_keys = self.create_user_authorized_file( -+ home, 'user_keys3', 'dsa', keys -+ ) - -- # /tmp/sshd_config -- sshd_config = self.tmp_path('sshd_config', dir="/tmp") -- util.write_file( -- sshd_config, -- "AuthorizedKeysFile %s %s %s" % (authorized_keys_global, -- authorized_keys, user_keys) -+ authorized_keys_global = self.create_global_authorized_file( -+ 'etc/ssh/authorized_keys', 'ecdsa', keys - ) - -- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( -- fpw.pw_name, sshd_config) -- content = ssh_util.update_authorized_keys(auth_key_entries, []) -+ options = "%s %s %s" % (authorized_keys_global, authorized_keys, -+ user_keys) -+ sshd_config = self.create_sshd_config(options) - -- self.assertEqual(user_keys, auth_key_fn) -- self.assertTrue(VALID_CONTENT['rsa'] in content) -- self.assertTrue(VALID_CONTENT['ecdsa'] in content) -- self.assertTrue(VALID_CONTENT['dsa'] in content) -+ self.execute_and_check(user_bobby, sshd_config, authorized_keys, keys) - - @patch("cloudinit.ssh_util.pwd.getpwnam") -- def test_multiple_authorizedkeys_file_global(self, m_getpwnam): -- fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') -- m_getpwnam.return_value = fpw -+ @patch("cloudinit.util.get_permissions") -+ @patch("cloudinit.util.get_owner") -+ @patch("cloudinit.util.get_group") -+ def test_single_user_global_file( -+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam -+ ): -+ user_bobby = 'bobby' -+ keys = {} -+ users = {} -+ mock_permissions = { -+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), -+ } -+ -+ homes = self.create_fake_users( -+ [user_bobby], mock_permissions, m_get_group, m_get_owner, -+ m_get_permissions, m_getpwnam, users -+ ) -+ home = homes[0] - - # /tmp/etc/ssh/authorized_keys = rsa -- authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', -- dir="/tmp") -- util.write_file(authorized_keys_global, VALID_CONTENT['rsa']) -+ authorized_keys_global = self.create_global_authorized_file( -+ 'etc/ssh/authorized_keys', 'rsa', keys -+ ) - -- # /tmp/sshd_config -- sshd_config = self.tmp_path('sshd_config') -- util.write_file( -- sshd_config, -- "AuthorizedKeysFile %s" % (authorized_keys_global) -+ options = "%s" % authorized_keys_global -+ sshd_config = self.create_sshd_config(options) -+ -+ default = "%s/.ssh/authorized_keys" % home -+ self.execute_and_check(user_bobby, sshd_config, default, keys) -+ -+ @patch("cloudinit.ssh_util.pwd.getpwnam") -+ @patch("cloudinit.util.get_permissions") -+ @patch("cloudinit.util.get_owner") -+ @patch("cloudinit.util.get_group") -+ def test_two_users_local_file_standard( -+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam -+ ): -+ keys = {} -+ users = {} -+ mock_permissions = { -+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), -+ '/tmp/home/suzie': ('suzie', 'suzie', 0o700), -+ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), -+ '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600), -+ } -+ -+ user_bobby = 'bobby' -+ user_suzie = 'suzie' -+ homes = self.create_fake_users( -+ [user_bobby, user_suzie], mock_permissions, m_get_group, -+ m_get_owner, m_get_permissions, m_getpwnam, users - ) -+ home_bobby = homes[0] -+ home_suzie = homes[1] - -- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( -- fpw.pw_name, sshd_config) -- content = ssh_util.update_authorized_keys(auth_key_entries, []) -+ # /tmp/home/bobby/.ssh/authorized_keys = rsa -+ authorized_keys = self.create_user_authorized_file( -+ home_bobby, 'authorized_keys', 'rsa', keys -+ ) - -- self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn) -- self.assertTrue(VALID_CONTENT['rsa'] in content) -+ # /tmp/home/suzie/.ssh/authorized_keys = rsa -+ authorized_keys2 = self.create_user_authorized_file( -+ home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys -+ ) -+ -+ options = ".ssh/authorized_keys" -+ sshd_config = self.create_sshd_config(options) -+ -+ self.execute_and_check( -+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False -+ ) -+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys) - - @patch("cloudinit.ssh_util.pwd.getpwnam") -- def test_multiple_authorizedkeys_file_multiuser(self, m_getpwnam): -- fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') -- m_getpwnam.return_value = fpw -- user_ssh_folder = "%s/.ssh" % fpw.pw_dir -- # /tmp/home2/bobby/.ssh/authorized_keys2 = rsa -- authorized_keys = self.tmp_path('authorized_keys2', -- dir=user_ssh_folder) -- util.write_file(authorized_keys, VALID_CONTENT['rsa']) -- # /tmp/home2/bobby/.ssh/user_keys3 = dsa -- user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) -- util.write_file(user_keys, VALID_CONTENT['dsa']) -- -- fpw2 = FakePwEnt(pw_name='suzie', pw_dir='/tmp/home/suzie') -- user_ssh_folder = "%s/.ssh" % fpw2.pw_dir -- # /tmp/home/suzie/.ssh/authorized_keys2 = ssh-xmss@openssh.com -- authorized_keys2 = self.tmp_path('authorized_keys2', -- dir=user_ssh_folder) -- util.write_file(authorized_keys2, -- VALID_CONTENT['ssh-xmss@openssh.com']) -+ @patch("cloudinit.util.get_permissions") -+ @patch("cloudinit.util.get_owner") -+ @patch("cloudinit.util.get_group") -+ def test_two_users_local_file_custom( -+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam -+ ): -+ keys = {} -+ users = {} -+ mock_permissions = { -+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600), -+ '/tmp/home/suzie': ('suzie', 'suzie', 0o700), -+ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), -+ '/tmp/home/suzie/.ssh/authorized_keys2': ('suzie', 'suzie', 0o600), -+ } -+ -+ user_bobby = 'bobby' -+ user_suzie = 'suzie' -+ homes = self.create_fake_users( -+ [user_bobby, user_suzie], mock_permissions, m_get_group, -+ m_get_owner, m_get_permissions, m_getpwnam, users -+ ) -+ home_bobby = homes[0] -+ home_suzie = homes[1] - -- # /tmp/etc/ssh/authorized_keys = ecdsa -- authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys2', -- dir="/tmp") -- util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) -+ # /tmp/home/bobby/.ssh/authorized_keys2 = rsa -+ authorized_keys = self.create_user_authorized_file( -+ home_bobby, 'authorized_keys2', 'rsa', keys -+ ) - -- # /tmp/sshd_config -- sshd_config = self.tmp_path('sshd_config', dir="/tmp") -- util.write_file( -- sshd_config, -- "AuthorizedKeysFile %s %%h/.ssh/authorized_keys2 %s" % -- (authorized_keys_global, user_keys) -+ # /tmp/home/suzie/.ssh/authorized_keys2 = rsa -+ authorized_keys2 = self.create_user_authorized_file( -+ home_suzie, 'authorized_keys2', 'ssh-xmss@openssh.com', keys - ) - -- # process first user -- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( -- fpw.pw_name, sshd_config) -- content = ssh_util.update_authorized_keys(auth_key_entries, []) -+ options = ".ssh/authorized_keys2" -+ sshd_config = self.create_sshd_config(options) -+ -+ self.execute_and_check( -+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False -+ ) -+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys) - -- self.assertEqual(user_keys, auth_key_fn) -- self.assertTrue(VALID_CONTENT['rsa'] in content) -- self.assertTrue(VALID_CONTENT['ecdsa'] in content) -- self.assertTrue(VALID_CONTENT['dsa'] in content) -- self.assertFalse(VALID_CONTENT['ssh-xmss@openssh.com'] in content) -+ @patch("cloudinit.ssh_util.pwd.getpwnam") -+ @patch("cloudinit.util.get_permissions") -+ @patch("cloudinit.util.get_owner") -+ @patch("cloudinit.util.get_group") -+ def test_two_users_local_global_files( -+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam -+ ): -+ keys = {} -+ users = {} -+ mock_permissions = { -+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600), -+ '/tmp/home/bobby/.ssh/user_keys3': ('bobby', 'bobby', 0o600), -+ '/tmp/home/suzie': ('suzie', 'suzie', 0o700), -+ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), -+ '/tmp/home/suzie/.ssh/authorized_keys2': ('suzie', 'suzie', 0o600), -+ '/tmp/home/suzie/.ssh/user_keys3': ('suzie', 'suzie', 0o600), -+ } -+ -+ user_bobby = 'bobby' -+ user_suzie = 'suzie' -+ homes = self.create_fake_users( -+ [user_bobby, user_suzie], mock_permissions, m_get_group, -+ m_get_owner, m_get_permissions, m_getpwnam, users -+ ) -+ home_bobby = homes[0] -+ home_suzie = homes[1] - -- m_getpwnam.return_value = fpw2 -- # process second user -- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( -- fpw2.pw_name, sshd_config) -- content = ssh_util.update_authorized_keys(auth_key_entries, []) -+ # /tmp/home/bobby/.ssh/authorized_keys2 = rsa -+ self.create_user_authorized_file( -+ home_bobby, 'authorized_keys2', 'rsa', keys -+ ) -+ # /tmp/home/bobby/.ssh/user_keys3 = dsa -+ user_keys = self.create_user_authorized_file( -+ home_bobby, 'user_keys3', 'dsa', keys -+ ) -+ -+ # /tmp/home/suzie/.ssh/authorized_keys2 = rsa -+ authorized_keys2 = self.create_user_authorized_file( -+ home_suzie, 'authorized_keys2', 'ssh-xmss@openssh.com', keys -+ ) -+ -+ # /tmp/etc/ssh/authorized_keys = ecdsa -+ authorized_keys_global = self.create_global_authorized_file( -+ 'etc/ssh/authorized_keys2', 'ecdsa', keys -+ ) -+ -+ options = "%s %s %%h/.ssh/authorized_keys2" % \ -+ (authorized_keys_global, user_keys) -+ sshd_config = self.create_sshd_config(options) - -- self.assertEqual(authorized_keys2, auth_key_fn) -- self.assertTrue(VALID_CONTENT['ssh-xmss@openssh.com'] in content) -- self.assertTrue(VALID_CONTENT['ecdsa'] in content) -- self.assertTrue(VALID_CONTENT['dsa'] in content) -- self.assertFalse(VALID_CONTENT['rsa'] in content) -+ self.execute_and_check( -+ user_bobby, sshd_config, user_keys, keys, delete_keys=False -+ ) -+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys) - -+ @patch("cloudinit.util.get_user_groups") - @patch("cloudinit.ssh_util.pwd.getpwnam") -- def test_multiple_authorizedkeys_file_multiuser2(self, m_getpwnam): -- fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home/bobby') -- m_getpwnam.return_value = fpw -- user_ssh_folder = "%s/.ssh" % fpw.pw_dir -+ @patch("cloudinit.util.get_permissions") -+ @patch("cloudinit.util.get_owner") -+ @patch("cloudinit.util.get_group") -+ def test_two_users_local_global_files_badguy( -+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, -+ m_get_user_groups -+ ): -+ keys = {} -+ users = {} -+ mock_permissions = { -+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600), -+ '/tmp/home/bobby/.ssh/user_keys3': ('bobby', 'bobby', 0o600), -+ '/tmp/home/badguy': ('root', 'root', 0o755), -+ '/tmp/home/badguy/home': ('root', 'root', 0o755), -+ '/tmp/home/badguy/home/bobby': ('root', 'root', 0o655), -+ } -+ -+ user_bobby = 'bobby' -+ user_badguy = 'badguy' -+ home_bobby, *_ = self.create_fake_users( -+ [user_bobby, user_badguy], mock_permissions, m_get_group, -+ m_get_owner, m_get_permissions, m_getpwnam, users -+ ) -+ m_get_user_groups.side_effect = mock_get_user_groups -+ - # /tmp/home/bobby/.ssh/authorized_keys2 = rsa -- authorized_keys = self.tmp_path('authorized_keys2', -- dir=user_ssh_folder) -- util.write_file(authorized_keys, VALID_CONTENT['rsa']) -+ authorized_keys = self.create_user_authorized_file( -+ home_bobby, 'authorized_keys2', 'rsa', keys -+ ) - # /tmp/home/bobby/.ssh/user_keys3 = dsa -- user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) -- util.write_file(user_keys, VALID_CONTENT['dsa']) -+ user_keys = self.create_user_authorized_file( -+ home_bobby, 'user_keys3', 'dsa', keys -+ ) - -- fpw2 = FakePwEnt(pw_name='badguy', pw_dir='/tmp/home/badguy') -- user_ssh_folder = "%s/.ssh" % fpw2.pw_dir - # /tmp/home/badguy/home/bobby = "" - authorized_keys2 = self.tmp_path('home/bobby', dir="/tmp/home/badguy") -+ util.write_file(authorized_keys2, '') - - # /tmp/etc/ssh/authorized_keys = ecdsa -- authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys2', -- dir="/tmp") -- util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) -+ authorized_keys_global = self.create_global_authorized_file( -+ 'etc/ssh/authorized_keys2', 'ecdsa', keys -+ ) - - # /tmp/sshd_config -- sshd_config = self.tmp_path('sshd_config', dir="/tmp") -- util.write_file( -- sshd_config, -- "AuthorizedKeysFile %s %%h/.ssh/authorized_keys2 %s %s" % -- (authorized_keys_global, user_keys, authorized_keys2) -+ options = "%s %%h/.ssh/authorized_keys2 %s %s" % \ -+ (authorized_keys2, authorized_keys_global, user_keys) -+ sshd_config = self.create_sshd_config(options) -+ -+ self.execute_and_check( -+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False -+ ) -+ self.execute_and_check( -+ user_badguy, sshd_config, authorized_keys2, keys - ) - -- # process first user -- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( -- fpw.pw_name, sshd_config) -- content = ssh_util.update_authorized_keys(auth_key_entries, []) -+ @patch("cloudinit.util.get_user_groups") -+ @patch("cloudinit.ssh_util.pwd.getpwnam") -+ @patch("cloudinit.util.get_permissions") -+ @patch("cloudinit.util.get_owner") -+ @patch("cloudinit.util.get_group") -+ def test_two_users_unaccessible_file( -+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, -+ m_get_user_groups -+ ): -+ keys = {} -+ users = {} -+ mock_permissions = { -+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), -+ -+ '/tmp/etc': ('root', 'root', 0o755), -+ '/tmp/etc/ssh': ('root', 'root', 0o755), -+ '/tmp/etc/ssh/userkeys': ('root', 'root', 0o700), -+ '/tmp/etc/ssh/userkeys/bobby': ('bobby', 'bobby', 0o600), -+ '/tmp/etc/ssh/userkeys/badguy': ('badguy', 'badguy', 0o600), -+ -+ '/tmp/home/badguy': ('badguy', 'badguy', 0o700), -+ '/tmp/home/badguy/.ssh': ('badguy', 'badguy', 0o700), -+ '/tmp/home/badguy/.ssh/authorized_keys': -+ ('badguy', 'badguy', 0o600), -+ } -+ -+ user_bobby = 'bobby' -+ user_badguy = 'badguy' -+ homes = self.create_fake_users( -+ [user_bobby, user_badguy], mock_permissions, m_get_group, -+ m_get_owner, m_get_permissions, m_getpwnam, users -+ ) -+ m_get_user_groups.side_effect = mock_get_user_groups -+ home_bobby = homes[0] -+ home_badguy = homes[1] - -- self.assertEqual(user_keys, auth_key_fn) -- self.assertTrue(VALID_CONTENT['rsa'] in content) -- self.assertTrue(VALID_CONTENT['ecdsa'] in content) -- self.assertTrue(VALID_CONTENT['dsa'] in content) -+ # /tmp/home/bobby/.ssh/authorized_keys = rsa -+ authorized_keys = self.create_user_authorized_file( -+ home_bobby, 'authorized_keys', 'rsa', keys -+ ) -+ # /tmp/etc/ssh/userkeys/bobby = dsa -+ # assume here that we can bypass userkeys, despite permissions -+ self.create_global_authorized_file( -+ 'etc/ssh/userkeys/bobby', 'dsa', keys -+ ) - -- m_getpwnam.return_value = fpw2 -- # process second user -- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( -- fpw2.pw_name, sshd_config) -- content = ssh_util.update_authorized_keys(auth_key_entries, []) -+ # /tmp/home/badguy/.ssh/authorized_keys = ssh-xmss@openssh.com -+ authorized_keys2 = self.create_user_authorized_file( -+ home_badguy, 'authorized_keys', 'ssh-xmss@openssh.com', keys -+ ) - -- # badguy should not take the key from the other user! -- self.assertEqual(authorized_keys2, auth_key_fn) -- self.assertTrue(VALID_CONTENT['ecdsa'] in content) -- self.assertTrue(VALID_CONTENT['dsa'] in content) -- self.assertFalse(VALID_CONTENT['rsa'] in content) -+ # /tmp/etc/ssh/userkeys/badguy = ecdsa -+ self.create_global_authorized_file( -+ 'etc/ssh/userkeys/badguy', 'ecdsa', keys -+ ) -+ -+ # /tmp/sshd_config -+ options = "/tmp/etc/ssh/userkeys/%u .ssh/authorized_keys" -+ sshd_config = self.create_sshd_config(options) -+ -+ self.execute_and_check( -+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False -+ ) -+ self.execute_and_check( -+ user_badguy, sshd_config, authorized_keys2, keys -+ ) -+ -+ @patch("cloudinit.util.get_user_groups") -+ @patch("cloudinit.ssh_util.pwd.getpwnam") -+ @patch("cloudinit.util.get_permissions") -+ @patch("cloudinit.util.get_owner") -+ @patch("cloudinit.util.get_group") -+ def test_two_users_accessible_file( -+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, -+ m_get_user_groups -+ ): -+ keys = {} -+ users = {} -+ mock_permissions = { -+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), -+ -+ '/tmp/etc': ('root', 'root', 0o755), -+ '/tmp/etc/ssh': ('root', 'root', 0o755), -+ '/tmp/etc/ssh/userkeys': ('root', 'root', 0o755), -+ '/tmp/etc/ssh/userkeys/bobby': ('bobby', 'bobby', 0o600), -+ '/tmp/etc/ssh/userkeys/badguy': ('badguy', 'badguy', 0o600), -+ -+ '/tmp/home/badguy': ('badguy', 'badguy', 0o700), -+ '/tmp/home/badguy/.ssh': ('badguy', 'badguy', 0o700), -+ '/tmp/home/badguy/.ssh/authorized_keys': -+ ('badguy', 'badguy', 0o600), -+ } -+ -+ user_bobby = 'bobby' -+ user_badguy = 'badguy' -+ homes = self.create_fake_users( -+ [user_bobby, user_badguy], mock_permissions, m_get_group, -+ m_get_owner, m_get_permissions, m_getpwnam, users -+ ) -+ m_get_user_groups.side_effect = mock_get_user_groups -+ home_bobby = homes[0] -+ home_badguy = homes[1] -+ -+ # /tmp/home/bobby/.ssh/authorized_keys = rsa -+ self.create_user_authorized_file( -+ home_bobby, 'authorized_keys', 'rsa', keys -+ ) -+ # /tmp/etc/ssh/userkeys/bobby = dsa -+ # assume here that we can bypass userkeys, despite permissions -+ authorized_keys = self.create_global_authorized_file( -+ 'etc/ssh/userkeys/bobby', 'dsa', keys -+ ) -+ -+ # /tmp/home/badguy/.ssh/authorized_keys = ssh-xmss@openssh.com -+ self.create_user_authorized_file( -+ home_badguy, 'authorized_keys', 'ssh-xmss@openssh.com', keys -+ ) -+ -+ # /tmp/etc/ssh/userkeys/badguy = ecdsa -+ authorized_keys2 = self.create_global_authorized_file( -+ 'etc/ssh/userkeys/badguy', 'ecdsa', keys -+ ) -+ -+ # /tmp/sshd_config -+ options = "/tmp/etc/ssh/userkeys/%u .ssh/authorized_keys" -+ sshd_config = self.create_sshd_config(options) -+ -+ self.execute_and_check( -+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False -+ ) -+ self.execute_and_check( -+ user_badguy, sshd_config, authorized_keys2, keys -+ ) -+ -+ @patch("cloudinit.util.get_user_groups") -+ @patch("cloudinit.ssh_util.pwd.getpwnam") -+ @patch("cloudinit.util.get_permissions") -+ @patch("cloudinit.util.get_owner") -+ @patch("cloudinit.util.get_group") -+ def test_two_users_hardcoded_single_user_file( -+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, -+ m_get_user_groups -+ ): -+ keys = {} -+ users = {} -+ mock_permissions = { -+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), -+ -+ '/tmp/home/suzie': ('suzie', 'suzie', 0o700), -+ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), -+ '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600), -+ } -+ -+ user_bobby = 'bobby' -+ user_suzie = 'suzie' -+ homes = self.create_fake_users( -+ [user_bobby, user_suzie], mock_permissions, m_get_group, -+ m_get_owner, m_get_permissions, m_getpwnam, users -+ ) -+ home_bobby = homes[0] -+ home_suzie = homes[1] -+ m_get_user_groups.side_effect = mock_get_user_groups -+ -+ # /tmp/home/bobby/.ssh/authorized_keys = rsa -+ authorized_keys = self.create_user_authorized_file( -+ home_bobby, 'authorized_keys', 'rsa', keys -+ ) -+ -+ # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com -+ self.create_user_authorized_file( -+ home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys -+ ) -+ -+ # /tmp/sshd_config -+ options = "%s" % (authorized_keys) -+ sshd_config = self.create_sshd_config(options) -+ -+ self.execute_and_check( -+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False -+ ) -+ default = "%s/.ssh/authorized_keys" % home_suzie -+ self.execute_and_check(user_suzie, sshd_config, default, keys) -+ -+ @patch("cloudinit.util.get_user_groups") -+ @patch("cloudinit.ssh_util.pwd.getpwnam") -+ @patch("cloudinit.util.get_permissions") -+ @patch("cloudinit.util.get_owner") -+ @patch("cloudinit.util.get_group") -+ def test_two_users_hardcoded_single_user_file_inverted( -+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, -+ m_get_user_groups -+ ): -+ keys = {} -+ users = {} -+ mock_permissions = { -+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), -+ -+ '/tmp/home/suzie': ('suzie', 'suzie', 0o700), -+ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), -+ '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600), -+ } -+ -+ user_bobby = 'bobby' -+ user_suzie = 'suzie' -+ homes = self.create_fake_users( -+ [user_bobby, user_suzie], mock_permissions, m_get_group, -+ m_get_owner, m_get_permissions, m_getpwnam, users -+ ) -+ home_bobby = homes[0] -+ home_suzie = homes[1] -+ m_get_user_groups.side_effect = mock_get_user_groups -+ -+ # /tmp/home/bobby/.ssh/authorized_keys = rsa -+ self.create_user_authorized_file( -+ home_bobby, 'authorized_keys', 'rsa', keys -+ ) -+ -+ # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com -+ authorized_keys2 = self.create_user_authorized_file( -+ home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys -+ ) -+ -+ # /tmp/sshd_config -+ options = "%s" % (authorized_keys2) -+ sshd_config = self.create_sshd_config(options) -+ -+ default = "%s/.ssh/authorized_keys" % home_bobby -+ self.execute_and_check( -+ user_bobby, sshd_config, default, keys, delete_keys=False -+ ) -+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys) -+ -+ @patch("cloudinit.util.get_user_groups") -+ @patch("cloudinit.ssh_util.pwd.getpwnam") -+ @patch("cloudinit.util.get_permissions") -+ @patch("cloudinit.util.get_owner") -+ @patch("cloudinit.util.get_group") -+ def test_two_users_hardcoded_user_files( -+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, -+ m_get_user_groups -+ ): -+ keys = {} -+ users = {} -+ mock_permissions = { -+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), -+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), -+ -+ '/tmp/home/suzie': ('suzie', 'suzie', 0o700), -+ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), -+ '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600), -+ } -+ -+ user_bobby = 'bobby' -+ user_suzie = 'suzie' -+ homes = self.create_fake_users( -+ [user_bobby, user_suzie], mock_permissions, m_get_group, -+ m_get_owner, m_get_permissions, m_getpwnam, users -+ ) -+ home_bobby = homes[0] -+ home_suzie = homes[1] -+ m_get_user_groups.side_effect = mock_get_user_groups -+ -+ # /tmp/home/bobby/.ssh/authorized_keys = rsa -+ authorized_keys = self.create_user_authorized_file( -+ home_bobby, 'authorized_keys', 'rsa', keys -+ ) -+ -+ # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com -+ authorized_keys2 = self.create_user_authorized_file( -+ home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys -+ ) -+ -+ # /tmp/etc/ssh/authorized_keys = ecdsa -+ authorized_keys_global = self.create_global_authorized_file( -+ 'etc/ssh/authorized_keys', 'ecdsa', keys -+ ) -+ -+ # /tmp/sshd_config -+ options = "%s %s %s" % \ -+ (authorized_keys_global, authorized_keys, authorized_keys2) -+ sshd_config = self.create_sshd_config(options) -+ -+ self.execute_and_check( -+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False -+ ) -+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys) - - # vi: ts=4 expandtab --- -2.27.0 - diff --git a/SOURCES/ci-Support-EC2-tags-in-instance-metadata-1309.patch b/SOURCES/ci-Support-EC2-tags-in-instance-metadata-1309.patch new file mode 100644 index 0000000..6e8e0fb --- /dev/null +++ b/SOURCES/ci-Support-EC2-tags-in-instance-metadata-1309.patch @@ -0,0 +1,164 @@ +From fbec3008305845072a787f46008bbb82d89dec53 Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Mon, 30 May 2022 16:46:41 +0200 +Subject: [PATCH] Support EC2 tags in instance metadata (#1309) + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 70: Support EC2 tags in instance metadata (#1309) +RH-Commit: [1/1] 2497547016173a4c6e7d3c900f80de390d445c44 +RH-Bugzilla: 2082686 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Mohamed Gamal Morsy + +commit 40c52ce1f4049449b04f93226721f63af874c5c7 +Author: Eduardo Dobay +Date: Wed Apr 6 01:28:01 2022 -0300 + + Support EC2 tags in instance metadata (#1309) + + Add support for newer EC2 metadata versions (up to 2021-03-23), so that + tags can be retrieved from the `ds.meta_data.tags` field, as well as + with any new fields that might have been added since the 2018-09-24 + version. + +Signed-off-by: Emanuele Giuseppe Esposito +--- + cloudinit/sources/DataSourceEc2.py | 5 +++-- + doc/rtd/topics/datasources/ec2.rst | 28 ++++++++++++++++++++++------ + tests/unittests/sources/test_ec2.py | 26 +++++++++++++++++++++++++- + tools/.github-cla-signers | 1 + + 4 files changed, 51 insertions(+), 9 deletions(-) + +diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py +index 03b3870c..a030b498 100644 +--- a/cloudinit/sources/DataSourceEc2.py ++++ b/cloudinit/sources/DataSourceEc2.py +@@ -61,8 +61,9 @@ class DataSourceEc2(sources.DataSource): + min_metadata_version = "2009-04-04" + + # Priority ordered list of additional metadata versions which will be tried +- # for extended metadata content. IPv6 support comes in 2016-09-02 +- extended_metadata_versions = ["2018-09-24", "2016-09-02"] ++ # for extended metadata content. IPv6 support comes in 2016-09-02. ++ # Tags support comes in 2021-03-23. ++ extended_metadata_versions = ["2021-03-23", "2018-09-24", "2016-09-02"] + + # Setup read_url parameters per get_url_params. + url_max_wait = 120 +diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst +index 94e4158d..77232269 100644 +--- a/doc/rtd/topics/datasources/ec2.rst ++++ b/doc/rtd/topics/datasources/ec2.rst +@@ -38,11 +38,26 @@ Userdata is accessible via the following URL: + GET http://169.254.169.254/2009-04-04/user-data + 1234,fred,reboot,true | 4512,jimbo, | 173,,, + +-Note that there are multiple versions of this data provided, cloud-init +-by default uses **2009-04-04** but newer versions can be supported with +-relative ease (newer versions have more data exposed, while maintaining +-backward compatibility with the previous versions). +-Version **2016-09-02** is required for secondary IP address support. ++Note that there are multiple EC2 Metadata versions of this data provided ++to instances. cloud-init will attempt to use the most recent API version it ++supports in order to get latest API features and instance-data. If a given ++API version is not exposed to the instance, those API features will be ++unavailable to the instance. ++ ++ +++----------------+----------------------------------------------------------+ +++ EC2 version | supported instance-data/feature | +++================+==========================================================+ +++ **2021-03-23** | Required for Instance tag support. This feature must be | ++| | enabled individually on each instance. See the | ++| | `EC2 tags user guide`_. | +++----------------+----------------------------------------------------------+ ++| **2016-09-02** | Required for secondary IP address support. | +++----------------+----------------------------------------------------------+ ++| **2009-04-04** | Minimum supports EC2 API version for meta-data and | ++| | user-data. | +++----------------+----------------------------------------------------------+ ++ + + To see which versions are supported from your cloud provider use the following + URL: +@@ -71,7 +86,7 @@ configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`). + + The settings that may be configured are: + +- * **metadata_urls**: This list of urls will be searched for an Ec2 ++ * **metadata_urls**: This list of urls will be searched for an EC2 + metadata service. The first entry that successfully returns a 200 response + for //meta-data/instance-id will be selected. + (default: ['http://169.254.169.254', 'http://instance-data:8773']). +@@ -121,4 +136,5 @@ Notes + For example: the primary NIC will have a DHCP route-metric of 100, + the next NIC will be 200. + ++.. _EC2 tags user guide: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS + .. vi: textwidth=79 +diff --git a/tests/unittests/sources/test_ec2.py b/tests/unittests/sources/test_ec2.py +index b376660d..7c8a5ea5 100644 +--- a/tests/unittests/sources/test_ec2.py ++++ b/tests/unittests/sources/test_ec2.py +@@ -210,6 +210,17 @@ SECONDARY_IP_METADATA_2018_09_24 = { + + M_PATH_NET = "cloudinit.sources.DataSourceEc2.net." + ++TAGS_METADATA_2021_03_23 = { ++ **DEFAULT_METADATA, ++ "tags": { ++ "instance": { ++ "Environment": "production", ++ "Application": "test", ++ "TagWithoutValue": "", ++ } ++ }, ++} ++ + + def _register_ssh_keys(rfunc, base_url, keys_data): + """handle ssh key inconsistencies. +@@ -670,7 +681,7 @@ class TestEc2(test_helpers.HttprettyTestCase): + logs_with_redacted = [log for log in all_logs if REDACT_TOK in log] + logs_with_token = [log for log in all_logs if "API-TOKEN" in log] + self.assertEqual(1, len(logs_with_redacted_ttl)) +- self.assertEqual(81, len(logs_with_redacted)) ++ self.assertEqual(83, len(logs_with_redacted)) + self.assertEqual(0, len(logs_with_token)) + + @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") +@@ -811,6 +822,19 @@ class TestEc2(test_helpers.HttprettyTestCase): + ) + self.assertIn("Crawl of metadata service took", self.logs.getvalue()) + ++ def test_get_instance_tags(self): ++ ds = self._setup_ds( ++ platform_data=self.valid_platform_data, ++ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, ++ md={"md": TAGS_METADATA_2021_03_23}, ++ ) ++ self.assertTrue(ds.get_data()) ++ self.assertIn("tags", ds.metadata) ++ self.assertIn("instance", ds.metadata["tags"]) ++ instance_tags = ds.metadata["tags"]["instance"] ++ self.assertEqual(instance_tags["Application"], "test") ++ self.assertEqual(instance_tags["Environment"], "production") ++ + + class TestGetSecondaryAddresses(test_helpers.CiTestCase): + +diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers +index ac157a2f..9f71ea0c 100644 +--- a/tools/.github-cla-signers ++++ b/tools/.github-cla-signers +@@ -26,6 +26,7 @@ dermotbradley + dhensby + eandersson + eb3095 ++edudobay + emmanuelthome + eslerm + esposem +-- +2.27.0 + diff --git a/SOURCES/ci-Update-dscheck_VMware-s-rpctool-check-970.patch b/SOURCES/ci-Update-dscheck_VMware-s-rpctool-check-970.patch deleted file mode 100644 index 07c44fe..0000000 --- a/SOURCES/ci-Update-dscheck_VMware-s-rpctool-check-970.patch +++ /dev/null @@ -1,97 +0,0 @@ -From ded01bd47c65636e59dc332d06fb8acb982ec677 Mon Sep 17 00:00:00 2001 -From: Emanuele Giuseppe Esposito -Date: Fri, 14 Jan 2022 16:41:52 +0100 -Subject: [PATCH 4/6] Update dscheck_VMware's rpctool check (#970) - -RH-Author: Emanuele Giuseppe Esposito -RH-MergeRequest: 44: Datasource for VMware -RH-Commit: [4/6] 509f68596f2d8f32027677f756b9d81e6a507ff1 -RH-Bugzilla: 2026587 -RH-Acked-by: Mohamed Gamal Morsy -RH-Acked-by: Eduardo Otubo - -commit 7781dec3306e9467f216cfcb36b7e10a8b38547a -Author: Shreenidhi Shedi <53473811+sshedi@users.noreply.github.com> -Date: Fri Aug 13 00:40:39 2021 +0530 - - Update dscheck_VMware's rpctool check (#970) - - This patch updates the dscheck_VMware function's use of "vmware-rpctool". - - When checking to see if a "guestinfo" property is set. - Because a successful exit code can occur even if there is an empty - string returned, it is possible that the VMware datasource will be - loaded as a false-positive. This patch ensures that in addition to - validating the exit code, the emitted output is also examined to ensure - a non-empty value is returned by rpctool before returning "${DS_FOUND}" - from "dscheck_VMware()". - -Signed-off-by: Emanuele Giuseppe Esposito ---- - tools/ds-identify | 15 +++++++++------ - 1 file changed, 9 insertions(+), 6 deletions(-) - -diff --git a/tools/ds-identify b/tools/ds-identify -index c01eae3d..0e12298f 100755 ---- a/tools/ds-identify -+++ b/tools/ds-identify -@@ -141,6 +141,7 @@ error() { - debug 0 "$@" - stderr "$@" - } -+ - warn() { - set -- "WARN:" "$@" - debug 0 "$@" -@@ -344,7 +345,6 @@ geom_label_status_as() { - return $ret - } - -- - read_fs_info_freebsd() { - local oifs="$IFS" line="" delim="," - local ret=0 labels="" dev="" label="" ftype="" isodevs="" -@@ -404,7 +404,6 @@ cached() { - [ -n "$1" ] && _RET="$1" && return || return 1 - } - -- - detect_virt() { - local virt="${UNAVAILABLE}" r="" out="" - if [ -d /run/systemd ]; then -@@ -450,7 +449,7 @@ detect_virt() { - read_virt() { - cached "$DI_VIRT" && return 0 - detect_virt -- DI_VIRT=${_RET} -+ DI_VIRT="$(echo "${_RET}" | tr '[:upper:]' '[:lower:]')" - } - - is_container() { -@@ -1370,16 +1369,20 @@ vmware_has_rpctool() { - command -v vmware-rpctool >/dev/null 2>&1 - } - -+vmware_rpctool_guestinfo() { -+ vmware-rpctool "info-get guestinfo.${1}" 2>/dev/null | grep "[[:alnum:]]" -+} -+ - vmware_rpctool_guestinfo_metadata() { -- vmware-rpctool "info-get guestinfo.metadata" -+ vmware_rpctool_guestinfo "metadata" - } - - vmware_rpctool_guestinfo_userdata() { -- vmware-rpctool "info-get guestinfo.userdata" -+ vmware_rpctool_guestinfo "userdata" - } - - vmware_rpctool_guestinfo_vendordata() { -- vmware-rpctool "info-get guestinfo.vendordata" -+ vmware_rpctool_guestinfo "vendordata" - } - - dscheck_VMware() { --- -2.27.0 - diff --git a/SOURCES/ci-Use-Network-Manager-and-Netplan-as-default-renderers.patch b/SOURCES/ci-Use-Network-Manager-and-Netplan-as-default-renderers.patch new file mode 100644 index 0000000..04d5e1f --- /dev/null +++ b/SOURCES/ci-Use-Network-Manager-and-Netplan-as-default-renderers.patch @@ -0,0 +1,110 @@ +From 13ded463a6a0b1b0bf0dffc0a997f006dd25c4f3 Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Thu, 19 May 2022 15:51:27 +0200 +Subject: [PATCH 2/4] Use Network-Manager and Netplan as default renderers for + RHEL and Fedora (#1465) + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 57: Add native NetworkManager support (#1224) +RH-Commit: [2/2] f2f977564bea496b0d76c0cef242959d03c2c73e +RH-Bugzilla: 2059872 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Jon Maloy +RH-Acked-by: Eduardo Otubo + +commit 7703aa98b89c8daba207c28a0422268ead10019a +Author: Emanuele Giuseppe Esposito +Date: Thu May 19 15:05:01 2022 +0200 + + Use Network-Manager and Netplan as default renderers for RHEL and Fedora (#1465) + + This is adapted from Neal Gompa's PR: + https://github.com/canonical/cloud-init/pull/1435 + + The only difference is that we are not modifying renderers.py (thus + modifying the priority of all distros), but just tweaking cloud.cfg to + apply this change to Fedora and RHEL. Other distros can optionally + add themselves afterwards. + + net: Prefer Netplan and NetworkManager renderers by default + + NetworkManager is used by default on a variety of Linux distributions, + and exists as a cross-distribution network management service. + + Additionally, add information about the NetworkManager renderer to + the cloud-init documentation. + + Because Netplan can be explicitly used to manage NetworkManager, + it needs to be preferred before NetworkManager. + + This change is a follow-up to #1224, which added the native + NetworkManager renderer. + This patch has been deployed on Fedora's cloud-init package throughout + the development of Fedora Linux 36 to verify that it works. + + This should also make it tremendously easier for Linux distributions + to use cloud-init because now a standard configuration is supported + by default. + + Signed-off-by: Neal Gompa + + Signed-off-by: Emanuele Giuseppe Esposito + +Signed-off-by: Emanuele Giuseppe Esposito +--- + config/cloud.cfg.tmpl | 3 +++ + doc/rtd/topics/network-config.rst | 12 +++++++++++- + 2 files changed, 14 insertions(+), 1 deletion(-) + +diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl +index fb4b456c..86beee3c 100644 +--- a/config/cloud.cfg.tmpl ++++ b/config/cloud.cfg.tmpl +@@ -330,4 +330,7 @@ system_info: + {% elif variant in ["dragonfly"] %} + network: + renderers: ['freebsd'] ++{% elif variant in ["rhel", "fedora"] %} ++ network: ++ renderers: ['netplan', 'network-manager', 'networkd', 'sysconfig', 'eni'] + {% endif %} +diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst +index c461a3fe..f503caab 100644 +--- a/doc/rtd/topics/network-config.rst ++++ b/doc/rtd/topics/network-config.rst +@@ -188,6 +188,15 @@ generated configuration into an internal network configuration state. From + this state `Cloud-init`_ delegates rendering of the configuration to Distro + supported formats. The following ``renderers`` are supported in cloud-init: + ++- **NetworkManager** ++ ++`NetworkManager `_ is the standard Linux network ++configuration tool suite. It supports a wide range of networking setups. ++Configuration is typically stored in ``/etc/NetworkManager``. ++ ++It is the default for a number of Linux distributions, notably Fedora; ++CentOS/RHEL; and derivatives. ++ + - **ENI** + + /etc/network/interfaces or ``ENI`` is supported by the ``ifupdown`` package +@@ -215,6 +224,7 @@ is as follows: + - ENI + - Sysconfig + - Netplan ++- NetworkManager + + When applying the policy, `Cloud-init`_ checks if the current instance has the + correct binaries and paths to support the renderer. The first renderer that +@@ -223,7 +233,7 @@ supplying an updated configuration in cloud-config. :: + + system_info: + network: +- renderers: ['netplan', 'eni', 'sysconfig', 'freebsd', 'netbsd', 'openbsd'] ++ renderers: ['netplan', 'network-manager', 'eni', 'sysconfig', 'freebsd', 'netbsd', 'openbsd'] + + + Network Configuration Tools +-- +2.35.3 + diff --git a/SOURCES/ci-azure-Removing-ability-to-invoke-walinuxagent-799.patch b/SOURCES/ci-azure-Removing-ability-to-invoke-walinuxagent-799.patch deleted file mode 100644 index 1ccfec9..0000000 --- a/SOURCES/ci-azure-Removing-ability-to-invoke-walinuxagent-799.patch +++ /dev/null @@ -1,470 +0,0 @@ -From 6e79106a09a0d142915da1fb48640575bb4bfe08 Mon Sep 17 00:00:00 2001 -From: Anh Vo -Date: Tue, 13 Apr 2021 17:39:39 -0400 -Subject: [PATCH 3/7] azure: Removing ability to invoke walinuxagent (#799) - -RH-Author: Eduardo Otubo -RH-MergeRequest: 45: Add support for userdata on Azure from IMDS -RH-Commit: [3/7] f5e98665bf2093edeeccfcd95b47df2e44a40536 -RH-Bugzilla: 2023940 -RH-Acked-by: Emanuele Giuseppe Esposito -RH-Acked-by: Mohamed Gamal Morsy - -Invoking walinuxagent from within cloud-init is no longer -supported/necessary ---- - cloudinit/sources/DataSourceAzure.py | 137 ++++-------------- - doc/rtd/topics/datasources/azure.rst | 62 ++------ - tests/unittests/test_datasource/test_azure.py | 97 ------------- - 3 files changed, 35 insertions(+), 261 deletions(-) - -diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py -index de1452ce..020b7006 100755 ---- a/cloudinit/sources/DataSourceAzure.py -+++ b/cloudinit/sources/DataSourceAzure.py -@@ -381,53 +381,6 @@ class DataSourceAzure(sources.DataSource): - util.logexc(LOG, "handling set_hostname failed") - return False - -- @azure_ds_telemetry_reporter -- def get_metadata_from_agent(self): -- temp_hostname = self.metadata.get('local-hostname') -- agent_cmd = self.ds_cfg['agent_command'] -- LOG.debug("Getting metadata via agent. hostname=%s cmd=%s", -- temp_hostname, agent_cmd) -- -- self.bounce_network_with_azure_hostname() -- -- try: -- invoke_agent(agent_cmd) -- except subp.ProcessExecutionError: -- # claim the datasource even if the command failed -- util.logexc(LOG, "agent command '%s' failed.", -- self.ds_cfg['agent_command']) -- -- ddir = self.ds_cfg['data_dir'] -- -- fp_files = [] -- key_value = None -- for pk in self.cfg.get('_pubkeys', []): -- if pk.get('value', None): -- key_value = pk['value'] -- LOG.debug("SSH authentication: using value from fabric") -- else: -- bname = str(pk['fingerprint'] + ".crt") -- fp_files += [os.path.join(ddir, bname)] -- LOG.debug("SSH authentication: " -- "using fingerprint from fabric") -- -- with events.ReportEventStack( -- name="waiting-for-ssh-public-key", -- description="wait for agents to retrieve SSH keys", -- parent=azure_ds_reporter): -- # wait very long for public SSH keys to arrive -- # https://bugs.launchpad.net/cloud-init/+bug/1717611 -- missing = util.log_time(logfunc=LOG.debug, -- msg="waiting for SSH public key files", -- func=util.wait_for_files, -- args=(fp_files, 900)) -- if len(missing): -- LOG.warning("Did not find files, but going on: %s", missing) -- -- metadata = {} -- metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) -- return metadata -- - def _get_subplatform(self): - """Return the subplatform metadata source details.""" - if self.seed.startswith('/dev'): -@@ -1354,35 +1307,32 @@ class DataSourceAzure(sources.DataSource): - On failure, returns False. - """ - -- if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN: -- self.bounce_network_with_azure_hostname() -+ self.bounce_network_with_azure_hostname() - -- pubkey_info = None -- try: -- raise KeyError( -- "Not using public SSH keys from IMDS" -- ) -- # pylint:disable=unreachable -- public_keys = self.metadata['imds']['compute']['publicKeys'] -- LOG.debug( -- 'Successfully retrieved %s key(s) from IMDS', -- len(public_keys) -- if public_keys is not None -- else 0 -- ) -- except KeyError: -- LOG.debug( -- 'Unable to retrieve SSH keys from IMDS during ' -- 'negotiation, falling back to OVF' -- ) -- pubkey_info = self.cfg.get('_pubkeys', None) -- -- metadata_func = partial(get_metadata_from_fabric, -- fallback_lease_file=self. -- dhclient_lease_file, -- pubkey_info=pubkey_info) -- else: -- metadata_func = self.get_metadata_from_agent -+ pubkey_info = None -+ try: -+ raise KeyError( -+ "Not using public SSH keys from IMDS" -+ ) -+ # pylint:disable=unreachable -+ public_keys = self.metadata['imds']['compute']['publicKeys'] -+ LOG.debug( -+ 'Successfully retrieved %s key(s) from IMDS', -+ len(public_keys) -+ if public_keys is not None -+ else 0 -+ ) -+ except KeyError: -+ LOG.debug( -+ 'Unable to retrieve SSH keys from IMDS during ' -+ 'negotiation, falling back to OVF' -+ ) -+ pubkey_info = self.cfg.get('_pubkeys', None) -+ -+ metadata_func = partial(get_metadata_from_fabric, -+ fallback_lease_file=self. -+ dhclient_lease_file, -+ pubkey_info=pubkey_info) - - LOG.debug("negotiating with fabric via agent command %s", - self.ds_cfg['agent_command']) -@@ -1617,33 +1567,6 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): - return True - - --@azure_ds_telemetry_reporter --def crtfile_to_pubkey(fname, data=None): -- pipeline = ('openssl x509 -noout -pubkey < "$0" |' -- 'ssh-keygen -i -m PKCS8 -f /dev/stdin') -- (out, _err) = subp.subp(['sh', '-c', pipeline, fname], -- capture=True, data=data) -- return out.rstrip() -- -- --@azure_ds_telemetry_reporter --def pubkeys_from_crt_files(flist): -- pubkeys = [] -- errors = [] -- for fname in flist: -- try: -- pubkeys.append(crtfile_to_pubkey(fname)) -- except subp.ProcessExecutionError: -- errors.append(fname) -- -- if errors: -- report_diagnostic_event( -- "failed to convert the crt files to pubkey: %s" % errors, -- logger_func=LOG.warning) -- -- return pubkeys -- -- - @azure_ds_telemetry_reporter - def write_files(datadir, files, dirmode=None): - -@@ -1672,16 +1595,6 @@ def write_files(datadir, files, dirmode=None): - util.write_file(filename=fname, content=content, mode=0o600) - - --@azure_ds_telemetry_reporter --def invoke_agent(cmd): -- # this is a function itself to simplify patching it for test -- if cmd: -- LOG.debug("invoking agent: %s", cmd) -- subp.subp(cmd, shell=(not isinstance(cmd, list))) -- else: -- LOG.debug("not invoking agent") -- -- - def find_child(node, filter_func): - ret = [] - if not node.hasChildNodes(): -diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst -index e04c3a33..ad9f2236 100644 ---- a/doc/rtd/topics/datasources/azure.rst -+++ b/doc/rtd/topics/datasources/azure.rst -@@ -5,28 +5,6 @@ Azure - - This datasource finds metadata and user-data from the Azure cloud platform. - --walinuxagent -------------- --walinuxagent has several functions within images. For cloud-init --specifically, the relevant functionality it performs is to register the --instance with the Azure cloud platform at boot so networking will be --permitted. For more information about the other functionality of --walinuxagent, see `Azure's documentation --`_ for more details. --(Note, however, that only one of walinuxagent's provisioning and cloud-init --should be used to perform instance customisation.) -- --If you are configuring walinuxagent yourself, you will want to ensure that you --have `Provisioning.UseCloudInit --`_ set to --``y``. -- -- --Builtin Agent --------------- --An alternative to using walinuxagent to register to the Azure cloud platform --is to use the ``__builtin__`` agent command. This section contains more --background on what that code path does, and how to enable it. - - The Azure cloud platform provides initial data to an instance via an attached - CD formatted in UDF. That CD contains a 'ovf-env.xml' file that provides some -@@ -41,16 +19,6 @@ by calling a script in /etc/dhcp/dhclient-exit-hooks or a file in - 'dhclient_hook' of cloud-init itself. This sub-command will write the client - information in json format to /run/cloud-init/dhclient.hook/.json. - --In order for cloud-init to leverage this method to find the endpoint, the --cloud.cfg file must contain: -- --.. sourcecode:: yaml -- -- datasource: -- Azure: -- set_hostname: False -- agent_command: __builtin__ -- - If those files are not available, the fallback is to check the leases file - for the endpoint server (again option 245). - -@@ -83,9 +51,6 @@ configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``). - - The settings that may be configured are: - -- * **agent_command**: Either __builtin__ (default) or a command to run to getcw -- metadata. If __builtin__, get metadata from walinuxagent. Otherwise run the -- provided command to obtain metadata. - * **apply_network_config**: Boolean set to True to use network configuration - described by Azure's IMDS endpoint instead of fallback network config of - dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is -@@ -121,7 +86,6 @@ An example configuration with the default values is provided below: - - datasource: - Azure: -- agent_command: __builtin__ - apply_network_config: true - data_dir: /var/lib/waagent - dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases -@@ -144,9 +108,7 @@ child of the ``LinuxProvisioningConfigurationSet`` (a sibling to ``UserName``) - If both ``UserData`` and ``CustomData`` are provided behavior is undefined on - which will be selected. - --In the example below, user-data provided is 'this is my userdata', and the --datasource config provided is ``{"agent_command": ["start", "walinuxagent"]}``. --That agent command will take affect as if it were specified in system config. -+In the example below, user-data provided is 'this is my userdata' - - Example: - -@@ -184,20 +146,16 @@ The hostname is provided to the instance in the ovf-env.xml file as - Whatever value the instance provides in its dhcp request will resolve in the - domain returned in the 'search' request. - --The interesting issue is that a generic image will already have a hostname --configured. The ubuntu cloud images have 'ubuntu' as the hostname of the --system, and the initial dhcp request on eth0 is not guaranteed to occur after --the datasource code has been run. So, on first boot, that initial value will --be sent in the dhcp request and *that* value will resolve. -- --In order to make the ``HostName`` provided in the ovf-env.xml resolve, a --dhcp request must be made with the new value. Walinuxagent (in its current --version) handles this by polling the state of hostname and bouncing ('``ifdown --eth0; ifup eth0``' the network interface if it sees that a change has been --made. -+A generic image will already have a hostname configured. The ubuntu -+cloud images have 'ubuntu' as the hostname of the system, and the -+initial dhcp request on eth0 is not guaranteed to occur after the -+datasource code has been run. So, on first boot, that initial value -+will be sent in the dhcp request and *that* value will resolve. - --cloud-init handles this by setting the hostname in the DataSource's 'get_data' --method via '``hostname $HostName``', and then bouncing the interface. This -+In order to make the ``HostName`` provided in the ovf-env.xml resolve, -+a dhcp request must be made with the new value. cloud-init handles -+this by setting the hostname in the DataSource's 'get_data' method via -+'``hostname $HostName``', and then bouncing the interface. This - behavior can be configured or disabled in the datasource config. See - 'Configuration' above. - -diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py -index dedebeb1..320fa857 100644 ---- a/tests/unittests/test_datasource/test_azure.py -+++ b/tests/unittests/test_datasource/test_azure.py -@@ -638,17 +638,10 @@ scbus-1 on xpt0 bus 0 - def dsdevs(): - return data.get('dsdevs', []) - -- def _invoke_agent(cmd): -- data['agent_invoked'] = cmd -- - def _wait_for_files(flist, _maxwait=None, _naplen=None): - data['waited'] = flist - return [] - -- def _pubkeys_from_crt_files(flist): -- data['pubkey_files'] = flist -- return ["pubkey_from: %s" % f for f in flist] -- - if data.get('ovfcontent') is not None: - populate_dir(os.path.join(self.paths.seed_dir, "azure"), - {'ovf-env.xml': data['ovfcontent']}) -@@ -675,8 +668,6 @@ scbus-1 on xpt0 bus 0 - - self.apply_patches([ - (dsaz, 'list_possible_azure_ds_devs', dsdevs), -- (dsaz, 'invoke_agent', _invoke_agent), -- (dsaz, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), - (dsaz, 'perform_hostname_bounce', mock.MagicMock()), - (dsaz, 'get_hostname', mock.MagicMock()), - (dsaz, 'set_hostname', mock.MagicMock()), -@@ -765,7 +756,6 @@ scbus-1 on xpt0 bus 0 - ret = dsrc.get_data() - self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) - self.assertFalse(ret) -- self.assertNotIn('agent_invoked', data) - # Assert that for non viable platforms, - # there is no communication with the Azure datasource. - self.assertEqual( -@@ -789,7 +779,6 @@ scbus-1 on xpt0 bus 0 - ret = dsrc.get_data() - self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) - self.assertFalse(ret) -- self.assertNotIn('agent_invoked', data) - self.assertEqual( - 1, - m_report_failure.call_count) -@@ -806,7 +795,6 @@ scbus-1 on xpt0 bus 0 - 1, - m_crawl_metadata.call_count) - self.assertFalse(ret) -- self.assertNotIn('agent_invoked', data) - - def test_crawl_metadata_exception_should_report_failure_with_msg(self): - data = {} -@@ -1086,21 +1074,6 @@ scbus-1 on xpt0 bus 0 - self.assertTrue(os.path.isdir(self.waagent_d)) - self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700) - -- def test_user_cfg_set_agent_command_plain(self): -- # set dscfg in via plaintext -- # we must have friendly-to-xml formatted plaintext in yaml_cfg -- # not all plaintext is expected to work. -- yaml_cfg = "{agent_command: my_command}\n" -- cfg = yaml.safe_load(yaml_cfg) -- odata = {'HostName': "myhost", 'UserName': "myuser", -- 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}} -- data = {'ovfcontent': construct_valid_ovf_env(data=odata)} -- -- dsrc = self._get_ds(data) -- ret = self._get_and_setup(dsrc) -- self.assertTrue(ret) -- self.assertEqual(data['agent_invoked'], cfg['agent_command']) -- - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - def test_network_config_set_from_imds(self, m_driver): -@@ -1205,29 +1178,6 @@ scbus-1 on xpt0 bus 0 - dsrc.get_data() - self.assertEqual('eastus2', dsrc.region) - -- def test_user_cfg_set_agent_command(self): -- # set dscfg in via base64 encoded yaml -- cfg = {'agent_command': "my_command"} -- odata = {'HostName': "myhost", 'UserName': "myuser", -- 'dscfg': {'text': b64e(yaml.dump(cfg)), -- 'encoding': 'base64'}} -- data = {'ovfcontent': construct_valid_ovf_env(data=odata)} -- -- dsrc = self._get_ds(data) -- ret = self._get_and_setup(dsrc) -- self.assertTrue(ret) -- self.assertEqual(data['agent_invoked'], cfg['agent_command']) -- -- def test_sys_cfg_set_agent_command(self): -- sys_cfg = {'datasource': {'Azure': {'agent_command': '_COMMAND'}}} -- data = {'ovfcontent': construct_valid_ovf_env(data={}), -- 'sys_cfg': sys_cfg} -- -- dsrc = self._get_ds(data) -- ret = self._get_and_setup(dsrc) -- self.assertTrue(ret) -- self.assertEqual(data['agent_invoked'], '_COMMAND') -- - def test_sys_cfg_set_never_destroy_ntfs(self): - sys_cfg = {'datasource': {'Azure': { - 'never_destroy_ntfs': 'user-supplied-value'}}} -@@ -1311,51 +1261,6 @@ scbus-1 on xpt0 bus 0 - self.assertTrue(ret) - self.assertEqual(dsrc.userdata_raw, mydata.encode('utf-8')) - -- def test_cfg_has_pubkeys_fingerprint(self): -- odata = {'HostName': "myhost", 'UserName': "myuser"} -- mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] -- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] -- data = {'ovfcontent': construct_valid_ovf_env(data=odata, -- pubkeys=pubkeys)} -- -- dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) -- ret = self._get_and_setup(dsrc) -- self.assertTrue(ret) -- for mypk in mypklist: -- self.assertIn(mypk, dsrc.cfg['_pubkeys']) -- self.assertIn('pubkey_from', dsrc.metadata['public-keys'][-1]) -- -- def test_cfg_has_pubkeys_value(self): -- # make sure that provided key is used over fingerprint -- odata = {'HostName': "myhost", 'UserName': "myuser"} -- mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': 'value1'}] -- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] -- data = {'ovfcontent': construct_valid_ovf_env(data=odata, -- pubkeys=pubkeys)} -- -- dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) -- ret = self._get_and_setup(dsrc) -- self.assertTrue(ret) -- -- for mypk in mypklist: -- self.assertIn(mypk, dsrc.cfg['_pubkeys']) -- self.assertIn(mypk['value'], dsrc.metadata['public-keys']) -- -- def test_cfg_has_no_fingerprint_has_value(self): -- # test value is used when fingerprint not provided -- odata = {'HostName': "myhost", 'UserName': "myuser"} -- mypklist = [{'fingerprint': None, 'path': 'path1', 'value': 'value1'}] -- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] -- data = {'ovfcontent': construct_valid_ovf_env(data=odata, -- pubkeys=pubkeys)} -- -- dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) -- ret = self._get_and_setup(dsrc) -- self.assertTrue(ret) -- -- for mypk in mypklist: -- self.assertIn(mypk['value'], dsrc.metadata['public-keys']) -- - def test_default_ephemeral_configs_ephemeral_exists(self): - # make sure the ephemeral configs are correct if disk present - odata = {} -@@ -1919,8 +1824,6 @@ class TestAzureBounce(CiTestCase): - with_logs = True - - def mock_out_azure_moving_parts(self): -- self.patches.enter_context( -- mock.patch.object(dsaz, 'invoke_agent')) - self.patches.enter_context( - mock.patch.object(dsaz.util, 'wait_for_files')) - self.patches.enter_context( --- -2.27.0 - diff --git a/SOURCES/ci-cc_ssh.py-fix-private-key-group-owner-and-permission.patch b/SOURCES/ci-cc_ssh.py-fix-private-key-group-owner-and-permission.patch deleted file mode 100644 index 44ad400..0000000 --- a/SOURCES/ci-cc_ssh.py-fix-private-key-group-owner-and-permission.patch +++ /dev/null @@ -1,97 +0,0 @@ -From 478709d7c157a085e3b2fee432e24978a3485234 Mon Sep 17 00:00:00 2001 -From: Emanuele Giuseppe Esposito -Date: Wed, 20 Oct 2021 16:28:42 +0200 -Subject: [PATCH] cc_ssh.py: fix private key group owner and permissions - (#1070) - -RH-Author: Emanuele Giuseppe Esposito -RH-MergeRequest: 32: cc_ssh.py: fix private key group owner and permissions (#1070) -RH-Commit: [1/1] 0382c3f671ae0fa9cab23dfad1f636967b012148 -RH-Bugzilla: 2013644 -RH-Acked-by: Vitaly Kuznetsov -RH-Acked-by: Mohamed Gamal Morsy - -commit ee296ced9c0a61b1484d850b807c601bcd670ec1 -Author: Emanuele Giuseppe Esposito -Date: Tue Oct 19 21:32:10 2021 +0200 - - cc_ssh.py: fix private key group owner and permissions (#1070) - - When default host keys are created by sshd-keygen (/etc/ssh/ssh_host_*_key) - in RHEL/CentOS/Fedora, openssh it performs the following: - - # create new keys - if ! $KEYGEN -q -t $KEYTYPE -f $KEY -C '' -N '' >&/dev/null; then - exit 1 - fi - - # sanitize permissions - /usr/bin/chgrp ssh_keys $KEY - /usr/bin/chmod 640 $KEY - /usr/bin/chmod 644 $KEY.pub - Note that the group ssh_keys exists only in RHEL/CentOS/Fedora. - - Now that we disable sshd-keygen to allow only cloud-init to create - them, we miss the "sanitize permissions" part, where we set the group - owner as ssh_keys and the private key mode to 640. - - According to https://bugzilla.redhat.com/show_bug.cgi?id=2013644#c8, failing - to set group ownership and permissions like openssh does makes the RHEL openscap - tool generate an error. - - Signed-off-by: Emanuele Giuseppe Esposito eesposit@redhat.com - - RHBZ: 2013644 - -Signed-off-by: Emanuele Giuseppe Esposito ---- - cloudinit/config/cc_ssh.py | 7 +++++++ - cloudinit/util.py | 14 ++++++++++++++ - 2 files changed, 21 insertions(+) - -diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py -index 05a16dbc..4e986c55 100755 ---- a/cloudinit/config/cc_ssh.py -+++ b/cloudinit/config/cc_ssh.py -@@ -240,6 +240,13 @@ def handle(_name, cfg, cloud, log, _args): - try: - out, err = subp.subp(cmd, capture=True, env=lang_c) - sys.stdout.write(util.decode_binary(out)) -+ -+ gid = util.get_group_id("ssh_keys") -+ if gid != -1: -+ # perform same "sanitize permissions" as sshd-keygen -+ os.chown(keyfile, -1, gid) -+ os.chmod(keyfile, 0o640) -+ os.chmod(keyfile + ".pub", 0o644) - except subp.ProcessExecutionError as e: - err = util.decode_binary(e.stderr).lower() - if (e.exit_code == 1 and -diff --git a/cloudinit/util.py b/cloudinit/util.py -index 343976ad..fe37ae89 100644 ---- a/cloudinit/util.py -+++ b/cloudinit/util.py -@@ -1831,6 +1831,20 @@ def chmod(path, mode): - os.chmod(path, real_mode) - - -+def get_group_id(grp_name: str) -> int: -+ """ -+ Returns the group id of a group name, or -1 if no group exists -+ -+ @param grp_name: the name of the group -+ """ -+ gid = -1 -+ try: -+ gid = grp.getgrnam(grp_name).gr_gid -+ except KeyError: -+ LOG.debug("Group %s is not a valid group name", grp_name) -+ return gid -+ -+ - def get_permissions(path: str) -> int: - """ - Returns the octal permissions of the file/folder pointed by the path, --- -2.27.0 - diff --git a/SOURCES/ci-cloudinit-net-handle-two-different-routes-for-the-sa.patch b/SOURCES/ci-cloudinit-net-handle-two-different-routes-for-the-sa.patch deleted file mode 100644 index 9ea95c1..0000000 --- a/SOURCES/ci-cloudinit-net-handle-two-different-routes-for-the-sa.patch +++ /dev/null @@ -1,87 +0,0 @@ -From ea83e72b335e652b080fda66a075c0d1322ed6dc Mon Sep 17 00:00:00 2001 -From: Emanuele Giuseppe Esposito -Date: Tue, 7 Dec 2021 10:00:41 +0100 -Subject: [PATCH] cloudinit/net: handle two different routes for the same ip - (#1124) - -RH-Author: Emanuele Giuseppe Esposito -RH-MergeRequest: 39: cloudinit/net: handle two different routes for the same ip (#1124) -RH-Commit: [1/1] 6810dc29ce786fbca96d2033386aa69c6ab65997 -RH-Bugzilla: 2028028 -RH-Acked-by: Mohamed Gamal Morsy -RH-Acked-by: Eduardo Otubo - -commit 0e25076b34fa995161b83996e866c0974cee431f -Author: Emanuele Giuseppe Esposito -Date: Mon Dec 6 18:34:26 2021 +0100 - - cloudinit/net: handle two different routes for the same ip (#1124) - - If we set a dhcp server side like this: - $ cat /var/tmp/cloud-init/cloud-init-dhcp-f0rie5tm/dhcp.leases - lease { - ... - option classless-static-routes 31.169.254.169.254 0.0.0.0,31.169.254.169.254 - 10.112.143.127,22.10.112.140 0.0.0.0,0 10.112.140.1; - ... - } - cloud-init fails to configure the routes via 'ip route add' because to there are - two different routes for 169.254.169.254: - - $ ip -4 route add 192.168.1.1/32 via 0.0.0.0 dev eth0 - $ ip -4 route add 192.168.1.1/32 via 10.112.140.248 dev eth0 - - But NetworkManager can handle such scenario successfully as it uses "ip route append". - So change cloud-init to also use "ip route append" to fix the issue: - - $ ip -4 route append 192.168.1.1/32 via 0.0.0.0 dev eth0 - $ ip -4 route append 192.168.1.1/32 via 10.112.140.248 dev eth0 - - Signed-off-by: Emanuele Giuseppe Esposito - - RHBZ: #2003231 - -Conflicts: - cloudinit/net/tests/test_init.py: a mock call in - test_ephemeral_ipv4_network_with_rfc3442_static_routes is not - present downstream. - -Signed-off-by: Emanuele Giuseppe Esposito ---- - cloudinit/net/__init__.py | 2 +- - cloudinit/net/tests/test_init.py | 4 ++-- - 2 files changed, 3 insertions(+), 3 deletions(-) - -diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py -index 385b7bcc..003efa2a 100644 ---- a/cloudinit/net/__init__.py -+++ b/cloudinit/net/__init__.py -@@ -1138,7 +1138,7 @@ class EphemeralIPv4Network(object): - if gateway != "0.0.0.0/0": - via_arg = ['via', gateway] - subp.subp( -- ['ip', '-4', 'route', 'add', net_address] + via_arg + -+ ['ip', '-4', 'route', 'append', net_address] + via_arg + - ['dev', self.interface], capture=True) - self.cleanup_cmds.insert( - 0, ['ip', '-4', 'route', 'del', net_address] + via_arg + -diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py -index 946f8ee2..2350837b 100644 ---- a/cloudinit/net/tests/test_init.py -+++ b/cloudinit/net/tests/test_init.py -@@ -719,10 +719,10 @@ class TestEphemeralIPV4Network(CiTestCase): - ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'], - capture=True), - mock.call( -- ['ip', '-4', 'route', 'add', '169.254.169.254/32', -+ ['ip', '-4', 'route', 'append', '169.254.169.254/32', - 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), - mock.call( -- ['ip', '-4', 'route', 'add', '0.0.0.0/0', -+ ['ip', '-4', 'route', 'append', '0.0.0.0/0', - 'via', '192.168.2.1', 'dev', 'eth0'], capture=True)] - expected_teardown_calls = [ - mock.call( --- -2.27.0 - diff --git a/SOURCES/ci-fix-error-on-upgrade-caused-by-new-vendordata2-attri.patch b/SOURCES/ci-fix-error-on-upgrade-caused-by-new-vendordata2-attri.patch deleted file mode 100644 index f257a67..0000000 --- a/SOURCES/ci-fix-error-on-upgrade-caused-by-new-vendordata2-attri.patch +++ /dev/null @@ -1,173 +0,0 @@ -From 005d0a98c69d154a00e9fd599c7fbe5aef73c933 Mon Sep 17 00:00:00 2001 -From: Amy Chen -Date: Thu, 25 Nov 2021 18:30:48 +0800 -Subject: [PATCH] fix error on upgrade caused by new vendordata2 attributes - -RH-Author: xiachen -RH-MergeRequest: 35: fix error on upgrade caused by new vendordata2 attributes -RH-Commit: [1/1] 9e00a7744838afbbdc5eb14628b7f572beba9f19 -RH-Bugzilla: 2021538 -RH-Acked-by: Mohamed Gamal Morsy -RH-Acked-by: Eduardo Otubo -RH-Acked-by: Emanuele Giuseppe Esposito - -commit d132356cc361abef2d90d4073438f3ab759d5964 -Author: James Falcon -Date: Mon Apr 19 11:31:28 2021 -0500 - - fix error on upgrade caused by new vendordata2 attributes (#869) - - In #777, we added 'vendordata2' and 'vendordata2_raw' attributes to - the DataSource class, but didn't use the upgrade framework to deal - with an unpickle after upgrade. This commit adds the necessary - upgrade code. - - Additionally, added a smaller-scope upgrade test to our integration - tests that will be run on every CI run so we catch these issues - immediately in the future. - - LP: #1922739 - -Signed-off-by: Amy Chen ---- - cloudinit/sources/__init__.py | 12 +++++++++++- - cloudinit/tests/test_upgrade.py | 4 ++++ - tests/integration_tests/clouds.py | 4 ++-- - tests/integration_tests/test_upgrade.py | 25 ++++++++++++++++++++++++- - 4 files changed, 41 insertions(+), 4 deletions(-) - -diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py -index 1ad1880d..7d74f8d9 100644 ---- a/cloudinit/sources/__init__.py -+++ b/cloudinit/sources/__init__.py -@@ -24,6 +24,7 @@ from cloudinit import util - from cloudinit.atomic_helper import write_json - from cloudinit.event import EventType - from cloudinit.filters import launch_index -+from cloudinit.persistence import CloudInitPickleMixin - from cloudinit.reporting import events - - DSMODE_DISABLED = "disabled" -@@ -134,7 +135,7 @@ URLParams = namedtuple( - 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries']) - - --class DataSource(metaclass=abc.ABCMeta): -+class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): - - dsmode = DSMODE_NETWORK - default_locale = 'en_US.UTF-8' -@@ -196,6 +197,8 @@ class DataSource(metaclass=abc.ABCMeta): - # non-root users - sensitive_metadata_keys = ('merged_cfg', 'security-credentials',) - -+ _ci_pkl_version = 1 -+ - def __init__(self, sys_cfg, distro, paths, ud_proc=None): - self.sys_cfg = sys_cfg - self.distro = distro -@@ -218,6 +221,13 @@ class DataSource(metaclass=abc.ABCMeta): - else: - self.ud_proc = ud_proc - -+ def _unpickle(self, ci_pkl_version: int) -> None: -+ """Perform deserialization fixes for Paths.""" -+ if not hasattr(self, 'vendordata2'): -+ self.vendordata2 = None -+ if not hasattr(self, 'vendordata2_raw'): -+ self.vendordata2_raw = None -+ - def __str__(self): - return type_utils.obj_name(self) - -diff --git a/cloudinit/tests/test_upgrade.py b/cloudinit/tests/test_upgrade.py -index f79a2536..fd3c5812 100644 ---- a/cloudinit/tests/test_upgrade.py -+++ b/cloudinit/tests/test_upgrade.py -@@ -43,3 +43,7 @@ class TestUpgrade: - def test_blacklist_drivers_set_on_networking(self, previous_obj_pkl): - """We always expect Networking.blacklist_drivers to be initialised.""" - assert previous_obj_pkl.distro.networking.blacklist_drivers is None -+ -+ def test_vendordata_exists(self, previous_obj_pkl): -+ assert previous_obj_pkl.vendordata2 is None -+ assert previous_obj_pkl.vendordata2_raw is None -diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py -index 9527a413..1d0b9d83 100644 ---- a/tests/integration_tests/clouds.py -+++ b/tests/integration_tests/clouds.py -@@ -100,14 +100,14 @@ class IntegrationCloud(ABC): - # Even if we're using the default key, it may still have a - # different name in the clouds, so we need to set it separately. - self.cloud_instance.key_pair.name = settings.KEYPAIR_NAME -- self._released_image_id = self._get_initial_image() -+ self.released_image_id = self._get_initial_image() - self.snapshot_id = None - - @property - def image_id(self): - if self.snapshot_id: - return self.snapshot_id -- return self._released_image_id -+ return self.released_image_id - - def emit_settings_to_log(self) -> None: - log.info( -diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py -index c20cb3c1..48e0691b 100644 ---- a/tests/integration_tests/test_upgrade.py -+++ b/tests/integration_tests/test_upgrade.py -@@ -1,4 +1,5 @@ - import logging -+import os - import pytest - import time - from pathlib import Path -@@ -8,6 +9,8 @@ from tests.integration_tests.conftest import ( - get_validated_source, - session_start_time, - ) -+from tests.integration_tests.instances import CloudInitSource -+ - - log = logging.getLogger('integration_testing') - -@@ -63,7 +66,7 @@ def test_upgrade(session_cloud: IntegrationCloud): - return # type checking doesn't understand that skip raises - - launch_kwargs = { -- 'image_id': session_cloud._get_initial_image(), -+ 'image_id': session_cloud.released_image_id, - } - - image = ImageSpecification.from_os_image() -@@ -93,6 +96,26 @@ def test_upgrade(session_cloud: IntegrationCloud): - instance.install_new_cloud_init(source, take_snapshot=False) - instance.execute('hostname something-else') - _restart(instance) -+ assert instance.execute('cloud-init status --wait --long').ok - _output_to_compare(instance, after_path, netcfg_path) - - log.info('Wrote upgrade test logs to %s and %s', before_path, after_path) -+ -+ -+@pytest.mark.ci -+@pytest.mark.ubuntu -+def test_upgrade_package(session_cloud: IntegrationCloud): -+ if get_validated_source(session_cloud) != CloudInitSource.DEB_PACKAGE: -+ not_run_message = 'Test only supports upgrading to build deb' -+ if os.environ.get('TRAVIS'): -+ # If this isn't running on CI, we should know -+ pytest.fail(not_run_message) -+ else: -+ pytest.skip(not_run_message) -+ -+ launch_kwargs = {'image_id': session_cloud.released_image_id} -+ -+ with session_cloud.launch(launch_kwargs=launch_kwargs) as instance: -+ instance.install_deb() -+ instance.restart() -+ assert instance.execute('cloud-init status --wait --long').ok --- -2.27.0 - diff --git a/SOURCES/ci-rhel-cloud.cfg-remove-ssh_genkeytypes-in-settings.py.patch b/SOURCES/ci-rhel-cloud.cfg-remove-ssh_genkeytypes-in-settings.py.patch deleted file mode 100644 index be1e283..0000000 --- a/SOURCES/ci-rhel-cloud.cfg-remove-ssh_genkeytypes-in-settings.py.patch +++ /dev/null @@ -1,65 +0,0 @@ -From abf1adeae8211f5acd87dc63b03b2ed995047efd Mon Sep 17 00:00:00 2001 -From: Emanuele Giuseppe Esposito -Date: Thu, 20 May 2021 08:53:55 +0200 -Subject: [PATCH 1/2] rhel/cloud.cfg: remove ssh_genkeytypes in settings.py and - set in cloud.cfg - -RH-Author: Emanuele Giuseppe Esposito -RH-MergeRequest: 10: rhel/cloud.cfg: remove ssh_genkeytypes in settings.py and set in cloud.cfg -RH-Commit: [1/1] 6da989423b9b6e017afbac2f1af3649b0487310f -RH-Bugzilla: 1957532 -RH-Acked-by: Eduardo Otubo -RH-Acked-by: Cathy Avery -RH-Acked-by: Vitaly Kuznetsov -RH-Acked-by: Mohamed Gamal Morsy - -Currently genkeytypes in cloud.cfg is set to None, so together with -ssh_deletekeys=1 cloudinit on first boot it will just delete the existing -keys and not generate new ones. - -Just removing that property in cloud.cfg is not enough, because -settings.py provides another empty default value that will be used -instead, resulting to no key generated even when the property is not defined. - -Removing genkeytypes also in settings.py will default to GENERATE_KEY_NAMES, -but since we want only 'rsa', 'ecdsa' and 'ed25519', add back genkeytypes in -cloud.cfg with the above defaults. - -Also remove ssh_deletekeys in settings.py as we always need -to 1 (and it also defaults to 1). - -Signed-off-by: Emanuele Giuseppe Esposito ---- - cloudinit/settings.py | 2 -- - rhel/cloud.cfg | 2 +- - 2 files changed, 1 insertion(+), 3 deletions(-) - -diff --git a/cloudinit/settings.py b/cloudinit/settings.py -index 43a1490c..2acf2615 100644 ---- a/cloudinit/settings.py -+++ b/cloudinit/settings.py -@@ -49,8 +49,6 @@ CFG_BUILTIN = { - 'def_log_file_mode': 0o600, - 'log_cfgs': [], - 'mount_default_fields': [None, None, 'auto', 'defaults,nofail', '0', '2'], -- 'ssh_deletekeys': False, -- 'ssh_genkeytypes': [], - 'syslog_fix_perms': [], - 'system_info': { - 'paths': { -diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg -index 9ecba215..cbee197a 100644 ---- a/rhel/cloud.cfg -+++ b/rhel/cloud.cfg -@@ -7,7 +7,7 @@ ssh_pwauth: 0 - mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service', '0', '2'] - resize_rootfs_tmp: /dev - ssh_deletekeys: 1 --ssh_genkeytypes: ~ -+ssh_genkeytypes: ['rsa', 'ecdsa', 'ed25519'] - syslog_fix_perms: ~ - disable_vmware_customization: false - --- -2.27.0 - diff --git a/SOURCES/ci-setup.py-adjust-udev-rules-default-path-1513.patch b/SOURCES/ci-setup.py-adjust-udev-rules-default-path-1513.patch new file mode 100644 index 0000000..1385aae --- /dev/null +++ b/SOURCES/ci-setup.py-adjust-udev-rules-default-path-1513.patch @@ -0,0 +1,57 @@ +From ed7060ac1d5003f70fc3da4d6006a1a958a47b04 Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Mon, 20 Jun 2022 10:31:14 +0200 +Subject: [PATCH 2/2] setup.py: adjust udev/rules default path (#1513) + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 80: setup.py: adjust udev/rules default path (#1513) +RH-Commit: [2/2] 2cb64b004acbe1b6a30f943b0da51d2d1f2f0d50 (eesposit/cloud-init) +RH-Bugzilla: 2096269 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Mohamed Gamal Morsy + +commit 70715125f3af118ae242770e61064c24f41e9a02 +Author: Emanuele Giuseppe Esposito +Date: Thu Jun 16 20:39:42 2022 +0200 + + setup.py: adjust udev/rules default path (#1513) + + RHEL must put cloudinit .rules files in /usr/lib/udev/rules.d + This place is a rhel standard and since it is used by all packages + cannot be modified. + + Signed-off-by: Emanuele Giuseppe Esposito + +Signed-off-by: Emanuele Giuseppe Esposito +--- + setup.py | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/setup.py b/setup.py +index a9132d2c..fdf27cd7 100755 +--- a/setup.py ++++ b/setup.py +@@ -302,6 +302,11 @@ data_files = [ + ), + ] + if not platform.system().endswith("BSD"): ++ ++ RULES_PATH = LIB ++ if os.path.isfile("/etc/redhat-release"): ++ RULES_PATH = "/usr/lib" ++ + data_files.extend( + [ + ( +@@ -309,7 +314,7 @@ if not platform.system().endswith("BSD"): + ["tools/hook-network-manager"], + ), + (ETC + "/dhcp/dhclient-exit-hooks.d/", ["tools/hook-dhclient"]), +- (LIB + "/udev/rules.d", [f for f in glob("udev/*.rules")]), ++ (RULES_PATH + "/udev/rules.d", [f for f in glob("udev/*.rules")]), + ( + ETC + "/systemd/system/sshd-keygen@.service.d/", + ["systemd/disable-sshd-keygen-if-cloud-init-active.conf"], +-- +2.31.1 + diff --git a/SOURCES/ci-ssh-util-allow-cloudinit-to-merge-all-ssh-keys-into-.patch b/SOURCES/ci-ssh-util-allow-cloudinit-to-merge-all-ssh-keys-into-.patch deleted file mode 100644 index bdec823..0000000 --- a/SOURCES/ci-ssh-util-allow-cloudinit-to-merge-all-ssh-keys-into-.patch +++ /dev/null @@ -1,653 +0,0 @@ -From aeab67600eb2d5e483812620b56ce5fb031a57d6 Mon Sep 17 00:00:00 2001 -From: Emanuele Giuseppe Esposito -Date: Mon, 12 Jul 2021 21:47:37 +0200 -Subject: [PATCH] ssh-util: allow cloudinit to merge all ssh keys into a custom - user file, defined in AuthorizedKeysFile (#937) - -RH-Author: Emanuele Giuseppe Esposito -RH-MergeRequest: 25: ssh-util: allow cloudinit to merge all ssh keys into a custom user file, defined in AuthorizedKeysFile (#937) -RH-Commit: [1/1] 27bbe94f3b9dd8734865766bd30b06cff83383ab (eesposit/cloud-init) -RH-Bugzilla: 1862967 -RH-Acked-by: Vitaly Kuznetsov -RH-Acked-by: Mohamed Gamal Morsy - -TESTED: By me and QA -BREW: 38030830 - -Conflicts: upstream patch modifies tests/integration_tests/util.py, that is -not present in RHEL. - -commit 9b52405c6f0de5e00d5ee9c1d13540425d8f6bf5 -Author: Emanuele Giuseppe Esposito -Date: Mon Jul 12 20:21:02 2021 +0200 - - ssh-util: allow cloudinit to merge all ssh keys into a custom user file, defined in AuthorizedKeysFile (#937) - - This patch aims to fix LP1911680, by analyzing the files provided - in sshd_config and merge all keys into an user-specific file. Also - introduces additional tests to cover this specific case. - - The file is picked by analyzing the path given in AuthorizedKeysFile. - - If it points inside the current user folder (path is /home/user/*), it - means it is an user-specific file, so we can copy all user-keys there. - If it contains a %u or %h, it means that there will be a specific - authorized_keys file for each user, so we can copy all user-keys there. - If no path points to an user-specific file, for example when only - /etc/ssh/authorized_keys is given, default to ~/.ssh/authorized_keys. - Note that if there are more than a single user-specific file, the last - one will be picked. - - Signed-off-by: Emanuele Giuseppe Esposito - Co-authored-by: James Falcon - - LP: #1911680 - RHBZ:1862967 - -Signed-off-by: Emanuele Giuseppe Esposito ---- - cloudinit/ssh_util.py | 22 +- - .../assets/keys/id_rsa.test1 | 38 +++ - .../assets/keys/id_rsa.test1.pub | 1 + - .../assets/keys/id_rsa.test2 | 38 +++ - .../assets/keys/id_rsa.test2.pub | 1 + - .../assets/keys/id_rsa.test3 | 38 +++ - .../assets/keys/id_rsa.test3.pub | 1 + - .../modules/test_ssh_keysfile.py | 85 ++++++ - tests/unittests/test_sshutil.py | 246 +++++++++++++++++- - 9 files changed, 456 insertions(+), 14 deletions(-) - create mode 100644 tests/integration_tests/assets/keys/id_rsa.test1 - create mode 100644 tests/integration_tests/assets/keys/id_rsa.test1.pub - create mode 100644 tests/integration_tests/assets/keys/id_rsa.test2 - create mode 100644 tests/integration_tests/assets/keys/id_rsa.test2.pub - create mode 100644 tests/integration_tests/assets/keys/id_rsa.test3 - create mode 100644 tests/integration_tests/assets/keys/id_rsa.test3.pub - create mode 100644 tests/integration_tests/modules/test_ssh_keysfile.py - -diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py -index c08042d6..89057262 100644 ---- a/cloudinit/ssh_util.py -+++ b/cloudinit/ssh_util.py -@@ -252,13 +252,15 @@ def render_authorizedkeysfile_paths(value, homedir, username): - def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): - (ssh_dir, pw_ent) = users_ssh_info(username) - default_authorizedkeys_file = os.path.join(ssh_dir, 'authorized_keys') -+ user_authorizedkeys_file = default_authorizedkeys_file - auth_key_fns = [] - with util.SeLinuxGuard(ssh_dir, recursive=True): - try: - ssh_cfg = parse_ssh_config_map(sshd_cfg_file) -+ key_paths = ssh_cfg.get("authorizedkeysfile", -+ "%h/.ssh/authorized_keys") - auth_key_fns = render_authorizedkeysfile_paths( -- ssh_cfg.get("authorizedkeysfile", "%h/.ssh/authorized_keys"), -- pw_ent.pw_dir, username) -+ key_paths, pw_ent.pw_dir, username) - - except (IOError, OSError): - # Give up and use a default key filename -@@ -267,8 +269,22 @@ def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): - "config from %r, using 'AuthorizedKeysFile' file " - "%r instead", DEF_SSHD_CFG, auth_key_fns[0]) - -+ # check if one of the keys is the user's one -+ for key_path, auth_key_fn in zip(key_paths.split(), auth_key_fns): -+ if any([ -+ '%u' in key_path, -+ '%h' in key_path, -+ auth_key_fn.startswith('{}/'.format(pw_ent.pw_dir)) -+ ]): -+ user_authorizedkeys_file = auth_key_fn -+ -+ if user_authorizedkeys_file != default_authorizedkeys_file: -+ LOG.debug( -+ "AuthorizedKeysFile has an user-specific authorized_keys, " -+ "using %s", user_authorizedkeys_file) -+ - # always store all the keys in the user's private file -- return (default_authorizedkeys_file, parse_authorized_keys(auth_key_fns)) -+ return (user_authorizedkeys_file, parse_authorized_keys(auth_key_fns)) - - - def setup_user_keys(keys, username, options=None): -diff --git a/tests/integration_tests/assets/keys/id_rsa.test1 b/tests/integration_tests/assets/keys/id_rsa.test1 -new file mode 100644 -index 00000000..bd4c822e ---- /dev/null -+++ b/tests/integration_tests/assets/keys/id_rsa.test1 -@@ -0,0 +1,38 @@ -+-----BEGIN OPENSSH PRIVATE KEY----- -+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn -+NhAAAAAwEAAQAAAYEAtRlG96aJ23URvAgO/bBsuLl+lquc350aSwV98/i8vlvOn5GVcHye -+t/rXQg4lZ4s0owG3kWyQFY8nvTk+G+UNU8fN0anAzBDi+4MzsejkF9scjTMFmXVrIpICqV -+3bYQNjPv6r+ubQdkD01du3eB9t5/zl84gtshp0hBdofyz8u1/A25s7fVU67GyI7PdKvaS+ -+yvJSInZnb2e9VQzfJC+qAnN7gUZatBKjdgUtJeiUUeDaVnaS17b0aoT9iBO0sIcQtOTBlY -+lCjFt1TAMLZ64Hj3SfGZB7Yj0Z+LzFB2IWX1zzsjI68YkYPKOSL/NYhQU9e55kJQ7WnngN -+HY/2n/A7dNKSFDmgM5c9IWgeZ7fjpsfIYAoJ/CAxFIND+PEHd1gCS6xoEhaUVyh5WH/Xkw -+Kv1nx4AiZ2BFCE+75kySRLZUJ+5y0r3DU5ktMXeURzVIP7pu0R8DCul+GU+M/+THyWtAEO -+geaNJ6fYpo2ipDhbmTYt3kk2lMIapRxGBFs+37sdAAAFgGGJssNhibLDAAAAB3NzaC1yc2 -+EAAAGBALUZRvemidt1EbwIDv2wbLi5fparnN+dGksFffP4vL5bzp+RlXB8nrf610IOJWeL -+NKMBt5FskBWPJ705PhvlDVPHzdGpwMwQ4vuDM7Ho5BfbHI0zBZl1ayKSAqld22EDYz7+q/ -+rm0HZA9NXbt3gfbef85fOILbIadIQXaH8s/LtfwNubO31VOuxsiOz3Sr2kvsryUiJ2Z29n -+vVUM3yQvqgJze4FGWrQSo3YFLSXolFHg2lZ2kte29GqE/YgTtLCHELTkwZWJQoxbdUwDC2 -+euB490nxmQe2I9Gfi8xQdiFl9c87IyOvGJGDyjki/zWIUFPXueZCUO1p54DR2P9p/wO3TS -+khQ5oDOXPSFoHme346bHyGAKCfwgMRSDQ/jxB3dYAkusaBIWlFcoeVh/15MCr9Z8eAImdg -+RQhPu+ZMkkS2VCfuctK9w1OZLTF3lEc1SD+6btEfAwrpfhlPjP/kx8lrQBDoHmjSen2KaN -+oqQ4W5k2Ld5JNpTCGqUcRgRbPt+7HQAAAAMBAAEAAAGBAJJCTOd70AC2ptEGbR0EHHqADT -+Wgefy7A94tHFEqxTy0JscGq/uCGimaY7kMdbcPXT59B4VieWeAC2cuUPP0ZHQSfS5ke7oT -+tU3N47U+0uBVbNS4rUAH7bOo2o9wptnOA5x/z+O+AARRZ6tEXQOd1oSy4gByLf2Wkh2QTi -+vP6Hln1vlFgKEzcXg6G8fN3MYWxKRhWmZM3DLERMvorlqqSBLcs5VvfZfLKcsKWTExioAq -+KgwEjYm8T9+rcpsw1xBus3j9k7wCI1Sus6PCDjq0pcYKLMYM7p8ygnU2tRYrOztdIxgWRA -+w/1oenm1Mqq2tV5xJcBCwCLOGe6SFwkIRywOYc57j5McH98Xhhg9cViyyBdXy/baF0mro+ -+qPhOsWDxqwD4VKZ9UmQ6O8kPNKcc7QcIpFJhcO0g9zbp/MT0KueaWYrTKs8y4lUkTT7Xz6 -++MzlR122/JwlAbBo6Y2kWtB+y+XwBZ0BfyJsm2czDhKm7OI5KfuBNhq0tFfKwOlYBq4QAA -+AMAyvUof1R8LLISkdO3EFTKn5RGNkPPoBJmGs6LwvU7NSjjLj/wPQe4jsIBc585tvbrddp -+60h72HgkZ5tqOfdeBYOKqX0qQQBHUEvI6M+NeQTQRev8bCHMLXQ21vzpClnrwNzlja359E -+uTRfiPRwIlyPLhOUiClBDSAnBI9h82Hkk3zzsQ/xGfsPB7iOjRbW69bMRSVCRpeweCVmWC -+77DTsEOq69V2TdljhQNIXE5OcOWonIlfgPiI74cdd+dLhzc/AAAADBAO1/JXd2kYiRyNkZ -+aXTLcwiSgBQIYbobqVP3OEtTclr0P1JAvby3Y4cCaEhkenx+fBqgXAku5lKM+U1Q9AEsMk -+cjIhaDpb43rU7GPjMn4zHwgGsEKd5pC1yIQ2PlK+cHanAdsDjIg+6RR+fuvid/mBeBOYXb -+Py0sa3HyekLJmCdx4UEyNASoiNaGFLQVAqo+RACsXy6VMxFH5dqDYlvwrfUQLwxJmse9Vb -+GEuuPAsklNugZqssC2XOIujFVUpslduQAAAMEAwzVHQVtsc3icCSzEAARpDTUdTbI29OhB -+/FMBnjzS9/3SWfLuBOSm9heNCHs2jdGNb8cPdKZuY7S9Fx6KuVUPyTbSSYkjj0F4fTeC9g -+0ym4p4UWYdF67WSWwLORkaG8K0d+G/CXkz8hvKUg6gcZWKBHAE1ROrHu1nsc8v7mkiKq4I -+bnTw5Q9TgjbWcQWtgPq0wXyyl/K8S1SFdkMCTOHDD0RQ+jTV2WNGVwFTodIRHenX+Rw2g4 -+CHbTWbsFrHR1qFAAAACmphbWVzQG5ld3Q= -+-----END OPENSSH PRIVATE KEY----- -diff --git a/tests/integration_tests/assets/keys/id_rsa.test1.pub b/tests/integration_tests/assets/keys/id_rsa.test1.pub -new file mode 100644 -index 00000000..3d2e26e1 ---- /dev/null -+++ b/tests/integration_tests/assets/keys/id_rsa.test1.pub -@@ -0,0 +1 @@ -+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC1GUb3ponbdRG8CA79sGy4uX6Wq5zfnRpLBX3z+Ly+W86fkZVwfJ63+tdCDiVnizSjAbeRbJAVjye9OT4b5Q1Tx83RqcDMEOL7gzOx6OQX2xyNMwWZdWsikgKpXdthA2M+/qv65tB2QPTV27d4H23n/OXziC2yGnSEF2h/LPy7X8Dbmzt9VTrsbIjs90q9pL7K8lIidmdvZ71VDN8kL6oCc3uBRlq0EqN2BS0l6JRR4NpWdpLXtvRqhP2IE7SwhxC05MGViUKMW3VMAwtnrgePdJ8ZkHtiPRn4vMUHYhZfXPOyMjrxiRg8o5Iv81iFBT17nmQlDtaeeA0dj/af8Dt00pIUOaAzlz0haB5nt+Omx8hgCgn8IDEUg0P48Qd3WAJLrGgSFpRXKHlYf9eTAq/WfHgCJnYEUIT7vmTJJEtlQn7nLSvcNTmS0xd5RHNUg/um7RHwMK6X4ZT4z/5MfJa0AQ6B5o0np9imjaKkOFuZNi3eSTaUwhqlHEYEWz7fux0= test1@host -diff --git a/tests/integration_tests/assets/keys/id_rsa.test2 b/tests/integration_tests/assets/keys/id_rsa.test2 -new file mode 100644 -index 00000000..5854d901 ---- /dev/null -+++ b/tests/integration_tests/assets/keys/id_rsa.test2 -@@ -0,0 +1,38 @@ -+-----BEGIN OPENSSH PRIVATE KEY----- -+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn -+NhAAAAAwEAAQAAAYEAvK50D2PWOc4ikyHVRJS6tDhqzjL5cKiivID4p1X8BYCVw83XAEGO -+LnItUyVXHNADlh6fpVq1NY6A2JVtygoPF6ZFx8ph7IWMmnhDdnxLLyGsbhd1M1tiXJD/R+ -+3WnGHRJ4PKrQavMLgqHRrieV3QVVfjFSeo6jX/4TruP6ZmvITMZWJrXaGphxJ/pPykEdkO -+i8AmKU9FNviojyPS2nNtj9B/635IdgWvrd7Vf5Ycsw9MR55LWSidwa856RH62Yl6LpEGTH -+m1lJiMk1u88JPSqvohhaUkLKkFpcQwcB0m76W1KOyllJsmX8bNXrlZsI+WiiYI7Xl5vQm2 -+17DEuNeavtPAtDMxu8HmTg2UJ55Naxehbfe2lx2k5kYGGw3i1O1OVN2pZ2/OB71LucYd/5 -+qxPaz03wswcGOJYGPkNc40vdES/Scc7Yt8HsnZuzqkyOgzn0HiUCzoYUYLYTpLf+yGmwxS -+yAEY056aOfkCsboKHOKiOmlJxNaZZFQkX1evep4DAAAFgC7HMbUuxzG1AAAAB3NzaC1yc2 -+EAAAGBALyudA9j1jnOIpMh1USUurQ4as4y+XCooryA+KdV/AWAlcPN1wBBji5yLVMlVxzQ -+A5Yen6VatTWOgNiVbcoKDxemRcfKYeyFjJp4Q3Z8Sy8hrG4XdTNbYlyQ/0ft1pxh0SeDyq -+0GrzC4Kh0a4nld0FVX4xUnqOo1/+E67j+mZryEzGVia12hqYcSf6T8pBHZDovAJilPRTb4 -+qI8j0tpzbY/Qf+t+SHYFr63e1X+WHLMPTEeeS1koncGvOekR+tmJei6RBkx5tZSYjJNbvP -+CT0qr6IYWlJCypBaXEMHAdJu+ltSjspZSbJl/GzV65WbCPloomCO15eb0JttewxLjXmr7T -+wLQzMbvB5k4NlCeeTWsXoW33tpcdpOZGBhsN4tTtTlTdqWdvzge9S7nGHf+asT2s9N8LMH -+BjiWBj5DXONL3REv0nHO2LfB7J2bs6pMjoM59B4lAs6GFGC2E6S3/shpsMUsgBGNOemjn5 -+ArG6ChziojppScTWmWRUJF9Xr3qeAwAAAAMBAAEAAAGASj/kkEHbhbfmxzujL2/P4Sfqb+ -+aDXqAeGkwujbs6h/fH99vC5ejmSMTJrVSeaUo6fxLiBDIj6UWA0rpLEBzRP59BCpRL4MXV -+RNxav/+9nniD4Hb+ug0WMhMlQmsH71ZW9lPYqCpfOq7ec8GmqdgPKeaCCEspH7HMVhfYtd -+eHylwAC02lrpz1l5/h900sS5G9NaWR3uPA+xbzThDs4uZVkSidjlCNt1QZhDSSk7jA5n34 -+qJ5UTGu9WQDZqyxWKND+RIyQuFAPGQyoyCC1FayHO2sEhT5qHuumL14Mn81XpzoXFoKyql -+rhBDe+pHhKArBYt92Evch0k1ABKblFxtxLXcvk4Fs7pHi+8k4+Cnazej2kcsu1kURlMZJB -+w2QT/8BV4uImbH05LtyscQuwGzpIoxqrnHrvg5VbohStmhoOjYybzqqW3/M0qhkn5JgTiy -+dJcHRJisRnAcmbmEchYtLDi6RW1e022H4I9AFXQqyr5HylBq6ugtWcFCsrcX8ibZ8xAAAA -+wQCAOPgwae6yZLkrYzRfbxZtGKNmhpI0EtNSDCHYuQQapFZJe7EFENs/VAaIiiut0yajGj -+c3aoKcwGIoT8TUM8E3GSNW6+WidUOC7H6W+/6N2OYZHRBACGz820xO+UBCl2oSk+dLBlfr -+IQzBGUWn5uVYCs0/2nxfCdFyHtMK8dMF/ypbdG+o1rXz5y9b7PVG6Mn+o1Rjsdkq7VERmy -+Pukd8hwATOIJqoKl3TuFyBeYFLqe+0e7uTeswQFw17PF31VjAAAADBAOpJRQb8c6qWqsvv -+vkve0uMuL0DfWW0G6+SxjPLcV6aTWL5xu0Grd8uBxDkkHU/CDrAwpchXyuLsvbw21Eje/u -+U5k9nLEscWZwcX7odxlK+EfAY2Bf5+Hd9bH5HMzTRJH8KkWK1EppOLPyiDxz4LZGzPLVyv -+/1PgSuvXkSWk1KIE4SvSemyxGX2tPVI6uO+URqevfnPOS1tMB7BMQlgkR6eh4bugx9UYx9 -+mwlXonNa4dN0iQxZ7N4rKFBbT/uyB2bQAAAMEAzisnkD8k9Tn8uyhxpWLHwb03X4ZUUHDV -+zu15e4a8dZ+mM8nHO986913Xz5JujlJKkGwFTvgWkIiR2zqTEauZHARH7gANpaweTm6lPd -+E4p2S0M3ulY7xtp9lCFIrDhMPPkGq8SFZB6qhgucHcZSRLq6ZDou3S2IdNOzDTpBtkhRCS -+0zFcdTLh3zZweoy8HGbW36bwB6s1CIL76Pd4F64i0Ms9CCCU6b+E5ArFhYQIsXiDbgHWbD -+tZRSm2GEgnDGAvAAAACmphbWVzQG5ld3Q= -+-----END OPENSSH PRIVATE KEY----- -diff --git a/tests/integration_tests/assets/keys/id_rsa.test2.pub b/tests/integration_tests/assets/keys/id_rsa.test2.pub -new file mode 100644 -index 00000000..f3831a57 ---- /dev/null -+++ b/tests/integration_tests/assets/keys/id_rsa.test2.pub -@@ -0,0 +1 @@ -+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC8rnQPY9Y5ziKTIdVElLq0OGrOMvlwqKK8gPinVfwFgJXDzdcAQY4uci1TJVcc0AOWHp+lWrU1joDYlW3KCg8XpkXHymHshYyaeEN2fEsvIaxuF3UzW2JckP9H7dacYdEng8qtBq8wuCodGuJ5XdBVV+MVJ6jqNf/hOu4/pma8hMxlYmtdoamHEn+k/KQR2Q6LwCYpT0U2+KiPI9Lac22P0H/rfkh2Ba+t3tV/lhyzD0xHnktZKJ3BrznpEfrZiXoukQZMebWUmIyTW7zwk9Kq+iGFpSQsqQWlxDBwHSbvpbUo7KWUmyZfxs1euVmwj5aKJgjteXm9CbbXsMS415q+08C0MzG7weZODZQnnk1rF6Ft97aXHaTmRgYbDeLU7U5U3alnb84HvUu5xh3/mrE9rPTfCzBwY4lgY+Q1zjS90RL9Jxzti3weydm7OqTI6DOfQeJQLOhhRgthOkt/7IabDFLIARjTnpo5+QKxugoc4qI6aUnE1plkVCRfV696ngM= test2@host -diff --git a/tests/integration_tests/assets/keys/id_rsa.test3 b/tests/integration_tests/assets/keys/id_rsa.test3 -new file mode 100644 -index 00000000..2596c762 ---- /dev/null -+++ b/tests/integration_tests/assets/keys/id_rsa.test3 -@@ -0,0 +1,38 @@ -+-----BEGIN OPENSSH PRIVATE KEY----- -+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn -+NhAAAAAwEAAQAAAYEApPG4MdkYQKD57/qreFrh9GRC22y66qZOWZWRjC887rrbvBzO69hV -+yJpTIXleJEvpWiHYcjMR5G6NNFsnNtZ4fxDqmSc4vcFj53JsE/XNqLKq6psXadCb5vkNpG -+bxA+Z5bJlzJ969PgJIIEbgc86sei4kgR2MuPWqtZbY5GkpNCTqWuLYeFK+14oFruA2nyWH -+9MOIRDHK/d597psHy+LTMtymO7ZPhO571abKw6jvvwiSeDxVE9kV7KAQIuM9/S3gftvgQQ -+ron3GL34pgmIabdSGdbfHqGDooryJhlbquJZELBN236KgRNTCAjVvUzjjQr1eRP3xssGwV -+O6ECBGCQLl/aYogAgtwnwj9iXqtfiLK3EwlgjquU4+JQ0CVtLhG3gIZB+qoMThco0pmHTr -+jtfQCwrztsBBFunSa2/CstuV1mQ5O5ZrZ6ACo9yPRBNkns6+CiKdtMtCtzi3k2RDz9jpYm -+Pcak03Lr7IkdC1Tp6+jA+//yPHSO1o4CqW89IQzNAAAFgEUd7lZFHe5WAAAAB3NzaC1yc2 -+EAAAGBAKTxuDHZGECg+e/6q3ha4fRkQttsuuqmTlmVkYwvPO6627wczuvYVciaUyF5XiRL -+6Voh2HIzEeRujTRbJzbWeH8Q6pknOL3BY+dybBP1zaiyquqbF2nQm+b5DaRm8QPmeWyZcy -+fevT4CSCBG4HPOrHouJIEdjLj1qrWW2ORpKTQk6lri2HhSvteKBa7gNp8lh/TDiEQxyv3e -+fe6bB8vi0zLcpju2T4Tue9WmysOo778Ikng8VRPZFeygECLjPf0t4H7b4EEK6J9xi9+KYJ -+iGm3UhnW3x6hg6KK8iYZW6riWRCwTdt+ioETUwgI1b1M440K9XkT98bLBsFTuhAgRgkC5f -+2mKIAILcJ8I/Yl6rX4iytxMJYI6rlOPiUNAlbS4Rt4CGQfqqDE4XKNKZh0647X0AsK87bA -+QRbp0mtvwrLbldZkOTuWa2egAqPcj0QTZJ7OvgoinbTLQrc4t5NkQ8/Y6WJj3GpNNy6+yJ -+HQtU6evowPv/8jx0jtaOAqlvPSEMzQAAAAMBAAEAAAGAGaqbdPZJNdVWzyb8g6/wtSzc0n -+Qq6dSTIJGLonq/So69HpqFAGIbhymsger24UMGvsXBfpO/1wH06w68HWZmPa+OMeLOi4iK -+WTuO4dQ/+l5DBlq32/lgKSLcIpb6LhcxEdsW9j9Mx1dnjc45owun/yMq/wRwH1/q/nLIsV -+JD3R9ZcGcYNDD8DWIm3D17gmw+qbG7hJES+0oh4n0xS2KyZpm7LFOEMDVEA8z+hE/HbryQ -+vjD1NC91n+qQWD1wKfN3WZDRwip3z1I5VHMpvXrA/spHpa9gzHK5qXNmZSz3/dfA1zHjCR -+2dHjJnrIUH8nyPfw8t+COC+sQBL3Nr0KUWEFPRM08cOcQm4ctzg17aDIZBONjlZGKlReR8 -+1zfAw84Q70q2spLWLBLXSFblHkaOfijEbejIbaz2UUEQT27WD7RHAORdQlkx7eitk66T9d -+DzIq/cpYhm5Fs8KZsh3PLldp9nsHbD2Oa9J9LJyI4ryuIW0mVwRdvPSiiYi3K+mDCpAAAA -+wBe+ugEEJ+V7orb1f4Zez0Bd4FNkEc52WZL4CWbaCtM+ZBg5KnQ6xW14JdC8IS9cNi/I5P -+yLsBvG4bWPLGgQruuKY6oLueD6BFnKjqF6ACUCiSQldh4BAW1nYc2U48+FFvo3ZQyudFSy -+QEFlhHmcaNMDo0AIJY5Xnq2BG3nEX7AqdtZ8hhenHwLCRQJatDwSYBHDpSDdh9vpTnGp/2 -+0jBz25Ko4UANzvSAc3sA4yN3jfpoM366TgdNf8x3g1v7yljQAAAMEA0HSQjzH5nhEwB58k -+mYYxnBYp1wb86zIuVhAyjZaeinvBQSTmLow8sXIHcCVuD3CgBezlU2SX5d9YuvRU9rcthi -+uzn4wWnbnzYy4SwzkMJXchUAkumFVD8Hq5TNPh2Z+033rLLE08EhYypSeVpuzdpFoStaS9 -+3DUZA2bR/zLZI9MOVZRUcYImNegqIjOYHY8Sbj3/0QPV6+WpUJFMPvvedWhfaOsRMTA6nr -+VLG4pxkrieVl0UtuRGbzD/exXhXVi7AAAAwQDKkJj4ez/+KZFYlZQKiV0BrfUFcgS6ElFM -+2CZIEagCtu8eedrwkNqx2FUX33uxdvUTr4c9I3NvWeEEGTB9pgD4lh1x/nxfuhyGXtimFM -+GnznGV9oyz0DmKlKiKSEGwWf5G+/NiiCwwVJ7wsQQm7TqNtkQ9b8MhWWXC7xlXKUs7dmTa -+e8AqAndCCMEnbS1UQFO/R5PNcZXkFWDggLQ/eWRYKlrXgdnUgH6h0saOcViKpNJBUXb3+x -+eauhOY52PS/BcAAAAKamFtZXNAbmV3dAE= -+-----END OPENSSH PRIVATE KEY----- -diff --git a/tests/integration_tests/assets/keys/id_rsa.test3.pub b/tests/integration_tests/assets/keys/id_rsa.test3.pub -new file mode 100644 -index 00000000..057db632 ---- /dev/null -+++ b/tests/integration_tests/assets/keys/id_rsa.test3.pub -@@ -0,0 +1 @@ -+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCk8bgx2RhAoPnv+qt4WuH0ZELbbLrqpk5ZlZGMLzzuutu8HM7r2FXImlMheV4kS+laIdhyMxHkbo00Wyc21nh/EOqZJzi9wWPncmwT9c2osqrqmxdp0Jvm+Q2kZvED5nlsmXMn3r0+AkggRuBzzqx6LiSBHYy49aq1ltjkaSk0JOpa4th4Ur7XigWu4DafJYf0w4hEMcr93n3umwfL4tMy3KY7tk+E7nvVpsrDqO+/CJJ4PFUT2RXsoBAi4z39LeB+2+BBCuifcYvfimCYhpt1IZ1t8eoYOiivImGVuq4lkQsE3bfoqBE1MICNW9TOONCvV5E/fGywbBU7oQIEYJAuX9piiACC3CfCP2Jeq1+IsrcTCWCOq5Tj4lDQJW0uEbeAhkH6qgxOFyjSmYdOuO19ALCvO2wEEW6dJrb8Ky25XWZDk7lmtnoAKj3I9EE2Sezr4KIp20y0K3OLeTZEPP2OliY9xqTTcuvsiR0LVOnr6MD7//I8dI7WjgKpbz0hDM0= test3@host -diff --git a/tests/integration_tests/modules/test_ssh_keysfile.py b/tests/integration_tests/modules/test_ssh_keysfile.py -new file mode 100644 -index 00000000..f82d7649 ---- /dev/null -+++ b/tests/integration_tests/modules/test_ssh_keysfile.py -@@ -0,0 +1,85 @@ -+import paramiko -+import pytest -+from io import StringIO -+from paramiko.ssh_exception import SSHException -+ -+from tests.integration_tests.instances import IntegrationInstance -+from tests.integration_tests.util import get_test_rsa_keypair -+ -+TEST_USER1_KEYS = get_test_rsa_keypair('test1') -+TEST_USER2_KEYS = get_test_rsa_keypair('test2') -+TEST_DEFAULT_KEYS = get_test_rsa_keypair('test3') -+ -+USERDATA = """\ -+#cloud-config -+bootcmd: -+ - sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile /etc/ssh/authorized_keys %h/.ssh/authorized_keys2;' /etc/ssh/sshd_config -+ssh_authorized_keys: -+ - {default} -+users: -+- default -+- name: test_user1 -+ ssh_authorized_keys: -+ - {user1} -+- name: test_user2 -+ ssh_authorized_keys: -+ - {user2} -+""".format( # noqa: E501 -+ default=TEST_DEFAULT_KEYS.public_key, -+ user1=TEST_USER1_KEYS.public_key, -+ user2=TEST_USER2_KEYS.public_key, -+) -+ -+ -+@pytest.mark.ubuntu -+@pytest.mark.user_data(USERDATA) -+def test_authorized_keys(client: IntegrationInstance): -+ expected_keys = [ -+ ('test_user1', '/home/test_user1/.ssh/authorized_keys2', -+ TEST_USER1_KEYS), -+ ('test_user2', '/home/test_user2/.ssh/authorized_keys2', -+ TEST_USER2_KEYS), -+ ('ubuntu', '/home/ubuntu/.ssh/authorized_keys2', -+ TEST_DEFAULT_KEYS), -+ ('root', '/root/.ssh/authorized_keys2', TEST_DEFAULT_KEYS), -+ ] -+ -+ for user, filename, keys in expected_keys: -+ contents = client.read_from_file(filename) -+ if user in ['ubuntu', 'root']: -+ # Our personal public key gets added by pycloudlib -+ lines = contents.split('\n') -+ assert len(lines) == 2 -+ assert keys.public_key.strip() in contents -+ else: -+ assert contents.strip() == keys.public_key.strip() -+ -+ # Ensure we can actually connect -+ ssh = paramiko.SSHClient() -+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) -+ paramiko_key = paramiko.RSAKey.from_private_key(StringIO( -+ keys.private_key)) -+ -+ # Will fail with AuthenticationException if -+ # we cannot connect -+ ssh.connect( -+ client.instance.ip, -+ username=user, -+ pkey=paramiko_key, -+ look_for_keys=False, -+ allow_agent=False, -+ ) -+ -+ # Ensure other uses can't connect using our key -+ other_users = [u[0] for u in expected_keys if u[2] != keys] -+ for other_user in other_users: -+ with pytest.raises(SSHException): -+ print('trying to connect as {} with key from {}'.format( -+ other_user, user)) -+ ssh.connect( -+ client.instance.ip, -+ username=other_user, -+ pkey=paramiko_key, -+ look_for_keys=False, -+ allow_agent=False, -+ ) -diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py -index fd1d1bac..bcb8044f 100644 ---- a/tests/unittests/test_sshutil.py -+++ b/tests/unittests/test_sshutil.py -@@ -570,20 +570,33 @@ class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase): - ssh_util.render_authorizedkeysfile_paths( - "%h/.keys", "/homedirs/bobby", "bobby")) - -+ def test_all(self): -+ self.assertEqual( -+ ["/homedirs/bobby/.keys", "/homedirs/bobby/.secret/keys", -+ "/keys/path1", "/opt/bobby/keys"], -+ ssh_util.render_authorizedkeysfile_paths( -+ "%h/.keys .secret/keys /keys/path1 /opt/%u/keys", -+ "/homedirs/bobby", "bobby")) -+ - - class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): - - @patch("cloudinit.ssh_util.pwd.getpwnam") - def test_multiple_authorizedkeys_file_order1(self, m_getpwnam): -- fpw = FakePwEnt(pw_name='bobby', pw_dir='/home2/bobby') -+ fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') - m_getpwnam.return_value = fpw -- authorized_keys = self.tmp_path('authorized_keys') -+ user_ssh_folder = "%s/.ssh" % fpw.pw_dir -+ -+ # /tmp/home2/bobby/.ssh/authorized_keys = rsa -+ authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) - util.write_file(authorized_keys, VALID_CONTENT['rsa']) - -- user_keys = self.tmp_path('user_keys') -+ # /tmp/home2/bobby/.ssh/user_keys = dsa -+ user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) - util.write_file(user_keys, VALID_CONTENT['dsa']) - -- sshd_config = self.tmp_path('sshd_config') -+ # /tmp/sshd_config -+ sshd_config = self.tmp_path('sshd_config', dir="/tmp") - util.write_file( - sshd_config, - "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys) -@@ -593,33 +606,244 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): - fpw.pw_name, sshd_config) - content = ssh_util.update_authorized_keys(auth_key_entries, []) - -- self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn) -+ self.assertEqual(user_keys, auth_key_fn) - self.assertTrue(VALID_CONTENT['rsa'] in content) - self.assertTrue(VALID_CONTENT['dsa'] in content) - - @patch("cloudinit.ssh_util.pwd.getpwnam") - def test_multiple_authorizedkeys_file_order2(self, m_getpwnam): -- fpw = FakePwEnt(pw_name='suzie', pw_dir='/home/suzie') -+ fpw = FakePwEnt(pw_name='suzie', pw_dir='/tmp/home/suzie') - m_getpwnam.return_value = fpw -- authorized_keys = self.tmp_path('authorized_keys') -+ user_ssh_folder = "%s/.ssh" % fpw.pw_dir -+ -+ # /tmp/home/suzie/.ssh/authorized_keys = rsa -+ authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) - util.write_file(authorized_keys, VALID_CONTENT['rsa']) - -- user_keys = self.tmp_path('user_keys') -+ # /tmp/home/suzie/.ssh/user_keys = dsa -+ user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) - util.write_file(user_keys, VALID_CONTENT['dsa']) - -- sshd_config = self.tmp_path('sshd_config') -+ # /tmp/sshd_config -+ sshd_config = self.tmp_path('sshd_config', dir="/tmp") - util.write_file( - sshd_config, -- "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys) -+ "AuthorizedKeysFile %s %s" % (user_keys, authorized_keys) - ) - - (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( -- fpw.pw_name, sshd_config -+ fpw.pw_name, sshd_config) -+ content = ssh_util.update_authorized_keys(auth_key_entries, []) -+ -+ self.assertEqual(authorized_keys, auth_key_fn) -+ self.assertTrue(VALID_CONTENT['rsa'] in content) -+ self.assertTrue(VALID_CONTENT['dsa'] in content) -+ -+ @patch("cloudinit.ssh_util.pwd.getpwnam") -+ def test_multiple_authorizedkeys_file_local_global(self, m_getpwnam): -+ fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') -+ m_getpwnam.return_value = fpw -+ user_ssh_folder = "%s/.ssh" % fpw.pw_dir -+ -+ # /tmp/home2/bobby/.ssh/authorized_keys = rsa -+ authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) -+ util.write_file(authorized_keys, VALID_CONTENT['rsa']) -+ -+ # /tmp/home2/bobby/.ssh/user_keys = dsa -+ user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) -+ util.write_file(user_keys, VALID_CONTENT['dsa']) -+ -+ # /tmp/etc/ssh/authorized_keys = ecdsa -+ authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', -+ dir="/tmp") -+ util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) -+ -+ # /tmp/sshd_config -+ sshd_config = self.tmp_path('sshd_config', dir="/tmp") -+ util.write_file( -+ sshd_config, -+ "AuthorizedKeysFile %s %s %s" % (authorized_keys_global, -+ user_keys, authorized_keys) -+ ) -+ -+ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( -+ fpw.pw_name, sshd_config) -+ content = ssh_util.update_authorized_keys(auth_key_entries, []) -+ -+ self.assertEqual(authorized_keys, auth_key_fn) -+ self.assertTrue(VALID_CONTENT['rsa'] in content) -+ self.assertTrue(VALID_CONTENT['ecdsa'] in content) -+ self.assertTrue(VALID_CONTENT['dsa'] in content) -+ -+ @patch("cloudinit.ssh_util.pwd.getpwnam") -+ def test_multiple_authorizedkeys_file_local_global2(self, m_getpwnam): -+ fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') -+ m_getpwnam.return_value = fpw -+ user_ssh_folder = "%s/.ssh" % fpw.pw_dir -+ -+ # /tmp/home2/bobby/.ssh/authorized_keys2 = rsa -+ authorized_keys = self.tmp_path('authorized_keys2', -+ dir=user_ssh_folder) -+ util.write_file(authorized_keys, VALID_CONTENT['rsa']) -+ -+ # /tmp/home2/bobby/.ssh/user_keys3 = dsa -+ user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) -+ util.write_file(user_keys, VALID_CONTENT['dsa']) -+ -+ # /tmp/etc/ssh/authorized_keys = ecdsa -+ authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', -+ dir="/tmp") -+ util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) -+ -+ # /tmp/sshd_config -+ sshd_config = self.tmp_path('sshd_config', dir="/tmp") -+ util.write_file( -+ sshd_config, -+ "AuthorizedKeysFile %s %s %s" % (authorized_keys_global, -+ authorized_keys, user_keys) -+ ) -+ -+ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( -+ fpw.pw_name, sshd_config) -+ content = ssh_util.update_authorized_keys(auth_key_entries, []) -+ -+ self.assertEqual(user_keys, auth_key_fn) -+ self.assertTrue(VALID_CONTENT['rsa'] in content) -+ self.assertTrue(VALID_CONTENT['ecdsa'] in content) -+ self.assertTrue(VALID_CONTENT['dsa'] in content) -+ -+ @patch("cloudinit.ssh_util.pwd.getpwnam") -+ def test_multiple_authorizedkeys_file_global(self, m_getpwnam): -+ fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') -+ m_getpwnam.return_value = fpw -+ -+ # /tmp/etc/ssh/authorized_keys = rsa -+ authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', -+ dir="/tmp") -+ util.write_file(authorized_keys_global, VALID_CONTENT['rsa']) -+ -+ # /tmp/sshd_config -+ sshd_config = self.tmp_path('sshd_config') -+ util.write_file( -+ sshd_config, -+ "AuthorizedKeysFile %s" % (authorized_keys_global) - ) -+ -+ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( -+ fpw.pw_name, sshd_config) - content = ssh_util.update_authorized_keys(auth_key_entries, []) - - self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn) - self.assertTrue(VALID_CONTENT['rsa'] in content) -+ -+ @patch("cloudinit.ssh_util.pwd.getpwnam") -+ def test_multiple_authorizedkeys_file_multiuser(self, m_getpwnam): -+ fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') -+ m_getpwnam.return_value = fpw -+ user_ssh_folder = "%s/.ssh" % fpw.pw_dir -+ # /tmp/home2/bobby/.ssh/authorized_keys2 = rsa -+ authorized_keys = self.tmp_path('authorized_keys2', -+ dir=user_ssh_folder) -+ util.write_file(authorized_keys, VALID_CONTENT['rsa']) -+ # /tmp/home2/bobby/.ssh/user_keys3 = dsa -+ user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) -+ util.write_file(user_keys, VALID_CONTENT['dsa']) -+ -+ fpw2 = FakePwEnt(pw_name='suzie', pw_dir='/tmp/home/suzie') -+ user_ssh_folder = "%s/.ssh" % fpw2.pw_dir -+ # /tmp/home/suzie/.ssh/authorized_keys2 = ssh-xmss@openssh.com -+ authorized_keys2 = self.tmp_path('authorized_keys2', -+ dir=user_ssh_folder) -+ util.write_file(authorized_keys2, -+ VALID_CONTENT['ssh-xmss@openssh.com']) -+ -+ # /tmp/etc/ssh/authorized_keys = ecdsa -+ authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys2', -+ dir="/tmp") -+ util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) -+ -+ # /tmp/sshd_config -+ sshd_config = self.tmp_path('sshd_config', dir="/tmp") -+ util.write_file( -+ sshd_config, -+ "AuthorizedKeysFile %s %%h/.ssh/authorized_keys2 %s" % -+ (authorized_keys_global, user_keys) -+ ) -+ -+ # process first user -+ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( -+ fpw.pw_name, sshd_config) -+ content = ssh_util.update_authorized_keys(auth_key_entries, []) -+ -+ self.assertEqual(user_keys, auth_key_fn) -+ self.assertTrue(VALID_CONTENT['rsa'] in content) -+ self.assertTrue(VALID_CONTENT['ecdsa'] in content) -+ self.assertTrue(VALID_CONTENT['dsa'] in content) -+ self.assertFalse(VALID_CONTENT['ssh-xmss@openssh.com'] in content) -+ -+ m_getpwnam.return_value = fpw2 -+ # process second user -+ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( -+ fpw2.pw_name, sshd_config) -+ content = ssh_util.update_authorized_keys(auth_key_entries, []) -+ -+ self.assertEqual(authorized_keys2, auth_key_fn) -+ self.assertTrue(VALID_CONTENT['ssh-xmss@openssh.com'] in content) -+ self.assertTrue(VALID_CONTENT['ecdsa'] in content) -+ self.assertTrue(VALID_CONTENT['dsa'] in content) -+ self.assertFalse(VALID_CONTENT['rsa'] in content) -+ -+ @patch("cloudinit.ssh_util.pwd.getpwnam") -+ def test_multiple_authorizedkeys_file_multiuser2(self, m_getpwnam): -+ fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home/bobby') -+ m_getpwnam.return_value = fpw -+ user_ssh_folder = "%s/.ssh" % fpw.pw_dir -+ # /tmp/home/bobby/.ssh/authorized_keys2 = rsa -+ authorized_keys = self.tmp_path('authorized_keys2', -+ dir=user_ssh_folder) -+ util.write_file(authorized_keys, VALID_CONTENT['rsa']) -+ # /tmp/home/bobby/.ssh/user_keys3 = dsa -+ user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) -+ util.write_file(user_keys, VALID_CONTENT['dsa']) -+ -+ fpw2 = FakePwEnt(pw_name='badguy', pw_dir='/tmp/home/badguy') -+ user_ssh_folder = "%s/.ssh" % fpw2.pw_dir -+ # /tmp/home/badguy/home/bobby = "" -+ authorized_keys2 = self.tmp_path('home/bobby', dir="/tmp/home/badguy") -+ -+ # /tmp/etc/ssh/authorized_keys = ecdsa -+ authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys2', -+ dir="/tmp") -+ util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) -+ -+ # /tmp/sshd_config -+ sshd_config = self.tmp_path('sshd_config', dir="/tmp") -+ util.write_file( -+ sshd_config, -+ "AuthorizedKeysFile %s %%h/.ssh/authorized_keys2 %s %s" % -+ (authorized_keys_global, user_keys, authorized_keys2) -+ ) -+ -+ # process first user -+ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( -+ fpw.pw_name, sshd_config) -+ content = ssh_util.update_authorized_keys(auth_key_entries, []) -+ -+ self.assertEqual(user_keys, auth_key_fn) -+ self.assertTrue(VALID_CONTENT['rsa'] in content) -+ self.assertTrue(VALID_CONTENT['ecdsa'] in content) -+ self.assertTrue(VALID_CONTENT['dsa'] in content) -+ -+ m_getpwnam.return_value = fpw2 -+ # process second user -+ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( -+ fpw2.pw_name, sshd_config) -+ content = ssh_util.update_authorized_keys(auth_key_entries, []) -+ -+ # badguy should not take the key from the other user! -+ self.assertEqual(authorized_keys2, auth_key_fn) -+ self.assertTrue(VALID_CONTENT['ecdsa'] in content) - self.assertTrue(VALID_CONTENT['dsa'] in content) -+ self.assertFalse(VALID_CONTENT['rsa'] in content) - - # vi: ts=4 expandtab --- -2.27.0 - diff --git a/SOURCES/ci-ssh_utils.py-ignore-when-sshd_config-options-are-not.patch b/SOURCES/ci-ssh_utils.py-ignore-when-sshd_config-options-are-not.patch deleted file mode 100644 index 13484d3..0000000 --- a/SOURCES/ci-ssh_utils.py-ignore-when-sshd_config-options-are-not.patch +++ /dev/null @@ -1,85 +0,0 @@ -From 7d4e16bfc1cefbdd4d1477480b02b1d6c1399e4d Mon Sep 17 00:00:00 2001 -From: Emanuele Giuseppe Esposito -Date: Mon, 20 Sep 2021 12:16:36 +0200 -Subject: [PATCH] ssh_utils.py: ignore when sshd_config options are not - key/value pairs (#1007) - -RH-Author: Emanuele Giuseppe Esposito -RH-MergeRequest: 31: ssh_utils.py: ignore when sshd_config options are not key/value pairs (#1007) -RH-Commit: [1/1] 9007fb8a116e98036ff17df0168a76e9a5843671 (eesposit/cloud-init) -RH-Bugzilla: 1862933 -RH-Acked-by: Mohamed Gamal Morsy -RH-Acked-by: Vitaly Kuznetsov - -TESTED: by me -BREW: 39832462 - -commit 2ce857248162957a785af61c135ca8433fdbbcde -Author: Emanuele Giuseppe Esposito -Date: Wed Sep 8 02:08:36 2021 +0200 - - ssh_utils.py: ignore when sshd_config options are not key/value pairs (#1007) - - As specified in #LP 1845552, - In cloudinit/ssh_util.py, in parse_ssh_config_lines(), we attempt to - parse each line of sshd_config. This function expects each line to - be one of the following forms: - - \# comment - key value - key=value - - However, options like DenyGroups and DenyUsers are specified to - *optionally* accepts values in sshd_config. - Cloud-init should comply to this and skip the option if a value - is not provided. - - Signed-off-by: Emanuele Giuseppe Esposito - -Signed-off-by: Emanuele Giuseppe Esposito ---- - cloudinit/ssh_util.py | 8 +++++++- - tests/unittests/test_sshutil.py | 8 ++++++++ - 2 files changed, 15 insertions(+), 1 deletion(-) - -diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py -index 9ccadf09..33679dcc 100644 ---- a/cloudinit/ssh_util.py -+++ b/cloudinit/ssh_util.py -@@ -484,7 +484,13 @@ def parse_ssh_config_lines(lines): - try: - key, val = line.split(None, 1) - except ValueError: -- key, val = line.split('=', 1) -+ try: -+ key, val = line.split('=', 1) -+ except ValueError: -+ LOG.debug( -+ "sshd_config: option \"%s\" has no key/value pair," -+ " skipping it", line) -+ continue - ret.append(SshdConfigLine(line, key, val)) - return ret - -diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py -index a66788bf..08e20050 100644 ---- a/tests/unittests/test_sshutil.py -+++ b/tests/unittests/test_sshutil.py -@@ -525,6 +525,14 @@ class TestUpdateSshConfigLines(test_helpers.CiTestCase): - self.assertEqual([self.pwauth], result) - self.check_line(lines[-1], self.pwauth, "no") - -+ def test_option_without_value(self): -+ """Implementation only accepts key-value pairs.""" -+ extended_exlines = self.exlines.copy() -+ denyusers_opt = "DenyUsers" -+ extended_exlines.append(denyusers_opt) -+ lines = ssh_util.parse_ssh_config_lines(list(extended_exlines)) -+ self.assertNotIn(denyusers_opt, str(lines)) -+ - def test_single_option_updated(self): - """A single update should have change made and line updated.""" - opt, val = ("UsePAM", "no") --- -2.27.0 - diff --git a/SOURCES/ci-systemd-Better-support-package-and-upgrade.patch b/SOURCES/ci-systemd-Better-support-package-and-upgrade.patch deleted file mode 100644 index 0421684..0000000 --- a/SOURCES/ci-systemd-Better-support-package-and-upgrade.patch +++ /dev/null @@ -1,80 +0,0 @@ -From 414f81cd855692ac1f6d2268acf2fe21f1772204 Mon Sep 17 00:00:00 2001 -From: Emanuele Giuseppe Esposito -Date: Tue, 3 May 2022 19:13:24 +0200 -Subject: [PATCH] systemd: Better support package and upgrade. - -RH-Author: Emanuele Giuseppe Esposito -RH-MergeRequest: 58: systemd: Better support package and upgrade. -RH-Commit: [1/1] 04d6c0910f7c52193bda82bfe45100129db7fa3d -RH-Bugzilla: 2081003 -RH-Acked-by: Vitaly Kuznetsov -RH-Acked-by: Jon Maloy - -commit 34a26f7f59f2963691e36ca0476bec9fc9ccef63 -Author: Scott Moser -Date: Thu Sep 8 13:17:37 2016 -0400 - - systemd: Better support package and upgrade. - - In systemd, package installation before the system is fully booted - (systemctl is-system-running == starting) may result in the package not - being started. Upgrade (package_upgrade: true) can also cause failure if - that is done during systemd boot. - - The solution here is: - a.) move config modules that do or may do package installation to - 'final_modules'. That list is: - - snappy - - package-update-upgrade-install - - fan - - landscape - - lxd - - puppet - - chef - - salt-minion - - mcollective - b.) move cloud-final.service to run as 'Type=idle' - - LP: #1576692, #1621336 - -Signed-off-by: Emanuele Giuseppe Esposito - -Conflicts: -- Upstream file changes config/cloud.cfg, instead here our config file is in -rhel/cloud.cfg. -- Packages modified in upstream but not present here: byobu, snappy -- multi-user.target is already present in cloud-final.service.tmpl, -but only for ubuntu, debian and unknown variants ---- - rhel/cloud.cfg | 10 +++++----- - 1 file changed, 5 insertions(+), 5 deletions(-) - -diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg -index cbee197a..7217781e 100644 ---- a/rhel/cloud.cfg -+++ b/rhel/cloud.cfg -@@ -31,16 +31,16 @@ cloud_config_modules: - - set-passwords - - rh_subscription - - yum-add-repo -- - package-update-upgrade-install - - timezone -- - puppet -- - chef -- - salt-minion -- - mcollective - - disable-ec2-metadata - - runcmd - - cloud_final_modules: -+ - package-update-upgrade-install -+ - puppet -+ - chef -+ - salt-minion -+ - mcollective - - rightscale_userdata - - scripts-per-once - - scripts-per-boot --- -2.27.0 - diff --git a/SOURCES/ci-write-passwords-only-to-serial-console-lock-down-clo.patch b/SOURCES/ci-write-passwords-only-to-serial-console-lock-down-clo.patch deleted file mode 100644 index 5cf4671..0000000 --- a/SOURCES/ci-write-passwords-only-to-serial-console-lock-down-clo.patch +++ /dev/null @@ -1,369 +0,0 @@ -From 769b9f8c9b1ecc294a197575108ae7cb54ad7f4b Mon Sep 17 00:00:00 2001 -From: Eduardo Otubo -Date: Mon, 5 Jul 2021 14:13:45 +0200 -Subject: [PATCH] write passwords only to serial console, lock down - cloud-init-output.log (#847) - -RH-Author: Eduardo Otubo -RH-MergeRequest: 21: write passwords only to serial console, lock down cloud-init-output.log (#847) -RH-Commit: [1/1] 8f30f2b7d0d6f9dca19994dbd0827b44e998f238 (otubo/cloud-init) -RH-Bugzilla: 1945891 -RH-Acked-by: Emanuele Giuseppe Esposito -RH-Acked-by: Mohamed Gamal Morsy - -commit b794d426b9ab43ea9d6371477466070d86e10668 -Author: Daniel Watkins -Date: Fri Mar 19 10:06:42 2021 -0400 - - write passwords only to serial console, lock down cloud-init-output.log (#847) - - Prior to this commit, when a user specified configuration which would - generate random passwords for users, cloud-init would cause those - passwords to be written to the serial console by emitting them on - stderr. In the default configuration, any stdout or stderr emitted by - cloud-init is also written to `/var/log/cloud-init-output.log`. This - file is world-readable, meaning that those randomly-generated passwords - were available to be read by any user with access to the system. This - presents an obvious security issue. - - This commit responds to this issue in two ways: - - * We address the direct issue by moving from writing the passwords to - sys.stderr to writing them directly to /dev/console (via - util.multi_log); this means that the passwords will never end up in - cloud-init-output.log - * To avoid future issues like this, we also modify the logging code so - that any files created in a log sink subprocess will only be - owner/group readable and, if it exists, will be owned by the adm - group. This results in `/var/log/cloud-init-output.log` no longer - being world-readable, meaning that if there are other parts of the - codebase that are emitting sensitive data intended for the serial - console, that data is no longer available to all users of the system. - - LP: #1918303 - -Signed-off-by: Eduardo Otubo ---- - cloudinit/config/cc_set_passwords.py | 5 +- - cloudinit/config/tests/test_set_passwords.py | 40 +++++++++---- - cloudinit/tests/test_util.py | 56 +++++++++++++++++++ - cloudinit/util.py | 38 +++++++++++-- - .../modules/test_set_password.py | 24 ++++++++ - tests/integration_tests/test_logging.py | 22 ++++++++ - tests/unittests/test_util.py | 4 ++ - 7 files changed, 173 insertions(+), 16 deletions(-) - create mode 100644 tests/integration_tests/test_logging.py - -diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py -index d6b5682d..433de751 100755 ---- a/cloudinit/config/cc_set_passwords.py -+++ b/cloudinit/config/cc_set_passwords.py -@@ -78,7 +78,6 @@ password. - """ - - import re --import sys - - from cloudinit.distros import ug_util - from cloudinit import log as logging -@@ -214,7 +213,9 @@ def handle(_name, cfg, cloud, log, args): - if len(randlist): - blurb = ("Set the following 'random' passwords\n", - '\n'.join(randlist)) -- sys.stderr.write("%s\n%s\n" % blurb) -+ util.multi_log( -+ "%s\n%s\n" % blurb, stderr=False, fallback_to_stdout=False -+ ) - - if expire: - expired_users = [] -diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py -index daa1ef51..bbe2ee8f 100644 ---- a/cloudinit/config/tests/test_set_passwords.py -+++ b/cloudinit/config/tests/test_set_passwords.py -@@ -74,10 +74,6 @@ class TestSetPasswordsHandle(CiTestCase): - - with_logs = True - -- def setUp(self): -- super(TestSetPasswordsHandle, self).setUp() -- self.add_patch('cloudinit.config.cc_set_passwords.sys.stderr', 'm_err') -- - def test_handle_on_empty_config(self, *args): - """handle logs that no password has changed when config is empty.""" - cloud = self.tmp_cloud(distro='ubuntu') -@@ -129,10 +125,12 @@ class TestSetPasswordsHandle(CiTestCase): - mock.call(['pw', 'usermod', 'ubuntu', '-p', '01-Jan-1970'])], - m_subp.call_args_list) - -+ @mock.patch(MODPATH + "util.multi_log") - @mock.patch(MODPATH + "util.is_BSD") - @mock.patch(MODPATH + "subp.subp") -- def test_handle_on_chpasswd_list_creates_random_passwords(self, m_subp, -- m_is_bsd): -+ def test_handle_on_chpasswd_list_creates_random_passwords( -+ self, m_subp, m_is_bsd, m_multi_log -+ ): - """handle parses command set random passwords.""" - m_is_bsd.return_value = False - cloud = self.tmp_cloud(distro='ubuntu') -@@ -146,10 +144,32 @@ class TestSetPasswordsHandle(CiTestCase): - self.assertIn( - 'DEBUG: Handling input for chpasswd as list.', - self.logs.getvalue()) -- self.assertNotEqual( -- [mock.call(['chpasswd'], -- '\n'.join(valid_random_pwds) + '\n')], -- m_subp.call_args_list) -+ -+ self.assertEqual(1, m_subp.call_count) -+ args, _kwargs = m_subp.call_args -+ self.assertEqual(["chpasswd"], args[0]) -+ -+ stdin = args[1] -+ user_pass = { -+ user: password -+ for user, password -+ in (line.split(":") for line in stdin.splitlines()) -+ } -+ -+ self.assertEqual(1, m_multi_log.call_count) -+ self.assertEqual( -+ mock.call(mock.ANY, stderr=False, fallback_to_stdout=False), -+ m_multi_log.call_args -+ ) -+ -+ self.assertEqual(set(["root", "ubuntu"]), set(user_pass.keys())) -+ written_lines = m_multi_log.call_args[0][0].splitlines() -+ for password in user_pass.values(): -+ for line in written_lines: -+ if password in line: -+ break -+ else: -+ self.fail("Password not emitted to console") - - - # vi: ts=4 expandtab -diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py -index b7a302f1..e811917e 100644 ---- a/cloudinit/tests/test_util.py -+++ b/cloudinit/tests/test_util.py -@@ -851,4 +851,60 @@ class TestEnsureFile: - assert "ab" == kwargs["omode"] - - -+@mock.patch("cloudinit.util.grp.getgrnam") -+@mock.patch("cloudinit.util.os.setgid") -+@mock.patch("cloudinit.util.os.umask") -+class TestRedirectOutputPreexecFn: -+ """This tests specifically the preexec_fn used in redirect_output.""" -+ -+ @pytest.fixture(params=["outfmt", "errfmt"]) -+ def preexec_fn(self, request): -+ """A fixture to gather the preexec_fn used by redirect_output. -+ -+ This enables simpler direct testing of it, and parameterises any tests -+ using it to cover both the stdout and stderr code paths. -+ """ -+ test_string = "| piped output to invoke subprocess" -+ if request.param == "outfmt": -+ args = (test_string, None) -+ elif request.param == "errfmt": -+ args = (None, test_string) -+ with mock.patch("cloudinit.util.subprocess.Popen") as m_popen: -+ util.redirect_output(*args) -+ -+ assert 1 == m_popen.call_count -+ _args, kwargs = m_popen.call_args -+ assert "preexec_fn" in kwargs, "preexec_fn not passed to Popen" -+ return kwargs["preexec_fn"] -+ -+ def test_preexec_fn_sets_umask( -+ self, m_os_umask, _m_setgid, _m_getgrnam, preexec_fn -+ ): -+ """preexec_fn should set a mask that avoids world-readable files.""" -+ preexec_fn() -+ -+ assert [mock.call(0o037)] == m_os_umask.call_args_list -+ -+ def test_preexec_fn_sets_group_id_if_adm_group_present( -+ self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn -+ ): -+ """We should setgrp to adm if present, so files are owned by them.""" -+ fake_group = mock.Mock(gr_gid=mock.sentinel.gr_gid) -+ m_getgrnam.return_value = fake_group -+ -+ preexec_fn() -+ -+ assert [mock.call("adm")] == m_getgrnam.call_args_list -+ assert [mock.call(mock.sentinel.gr_gid)] == m_setgid.call_args_list -+ -+ def test_preexec_fn_handles_absent_adm_group_gracefully( -+ self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn -+ ): -+ """We should handle an absent adm group gracefully.""" -+ m_getgrnam.side_effect = KeyError("getgrnam(): name not found: 'adm'") -+ -+ preexec_fn() -+ -+ assert 0 == m_setgid.call_count -+ - # vi: ts=4 expandtab -diff --git a/cloudinit/util.py b/cloudinit/util.py -index 769f3425..4e0a72db 100644 ---- a/cloudinit/util.py -+++ b/cloudinit/util.py -@@ -359,7 +359,7 @@ def find_modules(root_dir): - - - def multi_log(text, console=True, stderr=True, -- log=None, log_level=logging.DEBUG): -+ log=None, log_level=logging.DEBUG, fallback_to_stdout=True): - if stderr: - sys.stderr.write(text) - if console: -@@ -368,7 +368,7 @@ def multi_log(text, console=True, stderr=True, - with open(conpath, 'w') as wfh: - wfh.write(text) - wfh.flush() -- else: -+ elif fallback_to_stdout: - # A container may lack /dev/console (arguably a container bug). If - # it does not exist, then write output to stdout. this will result - # in duplicate stderr and stdout messages if stderr was True. -@@ -623,6 +623,26 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): - if not o_err: - o_err = sys.stderr - -+ # pylint: disable=subprocess-popen-preexec-fn -+ def set_subprocess_umask_and_gid(): -+ """Reconfigure umask and group ID to create output files securely. -+ -+ This is passed to subprocess.Popen as preexec_fn, so it is executed in -+ the context of the newly-created process. It: -+ -+ * sets the umask of the process so created files aren't world-readable -+ * if an adm group exists in the system, sets that as the process' GID -+ (so that the created file(s) are owned by root:adm) -+ """ -+ os.umask(0o037) -+ try: -+ group_id = grp.getgrnam("adm").gr_gid -+ except KeyError: -+ # No adm group, don't set a group -+ pass -+ else: -+ os.setgid(group_id) -+ - if outfmt: - LOG.debug("Redirecting %s to %s", o_out, outfmt) - (mode, arg) = outfmt.split(" ", 1) -@@ -632,7 +652,12 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): - owith = "wb" - new_fp = open(arg, owith) - elif mode == "|": -- proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) -+ proc = subprocess.Popen( -+ arg, -+ shell=True, -+ stdin=subprocess.PIPE, -+ preexec_fn=set_subprocess_umask_and_gid, -+ ) - new_fp = proc.stdin - else: - raise TypeError("Invalid type for output format: %s" % outfmt) -@@ -654,7 +679,12 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): - owith = "wb" - new_fp = open(arg, owith) - elif mode == "|": -- proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) -+ proc = subprocess.Popen( -+ arg, -+ shell=True, -+ stdin=subprocess.PIPE, -+ preexec_fn=set_subprocess_umask_and_gid, -+ ) - new_fp = proc.stdin - else: - raise TypeError("Invalid type for error format: %s" % errfmt) -diff --git a/tests/integration_tests/modules/test_set_password.py b/tests/integration_tests/modules/test_set_password.py -index b13f76fb..d7cf91a5 100644 ---- a/tests/integration_tests/modules/test_set_password.py -+++ b/tests/integration_tests/modules/test_set_password.py -@@ -116,6 +116,30 @@ class Mixin: - # Which are not the same - assert shadow_users["harry"] != shadow_users["dick"] - -+ def test_random_passwords_not_stored_in_cloud_init_output_log( -+ self, class_client -+ ): -+ """We should not emit passwords to the in-instance log file. -+ -+ LP: #1918303 -+ """ -+ cloud_init_output = class_client.read_from_file( -+ "/var/log/cloud-init-output.log" -+ ) -+ assert "dick:" not in cloud_init_output -+ assert "harry:" not in cloud_init_output -+ -+ def test_random_passwords_emitted_to_serial_console(self, class_client): -+ """We should emit passwords to the serial console. (LP: #1918303)""" -+ try: -+ console_log = class_client.instance.console_log() -+ except NotImplementedError: -+ # Assume that an exception here means that we can't use the console -+ # log -+ pytest.skip("NotImplementedError when requesting console log") -+ assert "dick:" in console_log -+ assert "harry:" in console_log -+ - def test_explicit_password_set_correctly(self, class_client): - """Test that an explicitly-specified password is set correctly.""" - shadow_users, _ = self._fetch_and_parse_etc_shadow(class_client) -diff --git a/tests/integration_tests/test_logging.py b/tests/integration_tests/test_logging.py -new file mode 100644 -index 00000000..b31a0434 ---- /dev/null -+++ b/tests/integration_tests/test_logging.py -@@ -0,0 +1,22 @@ -+"""Integration tests relating to cloud-init's logging.""" -+ -+ -+class TestVarLogCloudInitOutput: -+ """Integration tests relating to /var/log/cloud-init-output.log.""" -+ -+ def test_var_log_cloud_init_output_not_world_readable(self, client): -+ """ -+ The log can contain sensitive data, it shouldn't be world-readable. -+ -+ LP: #1918303 -+ """ -+ # Check the file exists -+ assert client.execute("test -f /var/log/cloud-init-output.log").ok -+ -+ # Check its permissions are as we expect -+ perms, user, group = client.execute( -+ "stat -c %a:%U:%G /var/log/cloud-init-output.log" -+ ).split(":") -+ assert "640" == perms -+ assert "root" == user -+ assert "adm" == group -diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py -index 857629f1..e5292001 100644 ---- a/tests/unittests/test_util.py -+++ b/tests/unittests/test_util.py -@@ -572,6 +572,10 @@ class TestMultiLog(helpers.FilesystemMockingTestCase): - util.multi_log(logged_string) - self.assertEqual(logged_string, self.stdout.getvalue()) - -+ def test_logs_dont_go_to_stdout_if_fallback_to_stdout_is_false(self): -+ util.multi_log('something', fallback_to_stdout=False) -+ self.assertEqual('', self.stdout.getvalue()) -+ - def test_logs_go_to_log_if_given(self): - log = mock.MagicMock() - logged_string = 'something very important' --- -2.27.0 - diff --git a/SOURCES/test_version_change.pkl b/SOURCES/test_version_change.pkl deleted file mode 100644 index 65ae93e..0000000 Binary files a/SOURCES/test_version_change.pkl and /dev/null differ diff --git a/SPECS/cloud-init.spec b/SPECS/cloud-init.spec index 1cbe510..58a1b58 100644 --- a/SPECS/cloud-init.spec +++ b/SPECS/cloud-init.spec @@ -5,8 +5,8 @@ %global debug_package %{nil} Name: cloud-init -Version: 21.1 -Release: 15%{?dist}.3 +Version: 22.1 +Release: 5%{?dist} Summary: Cloud instance init scripts Group: System Environment/Base @@ -14,72 +14,37 @@ License: GPLv3 URL: http://launchpad.net/cloud-init Source0: https://launchpad.net/cloud-init/trunk/%{version}/+download/%{name}-%{version}.tar.gz Source1: cloud-init-tmpfiles.conf -Source2: test_version_change.pkl Patch0001: 0001-Add-initial-redhat-setup.patch Patch0002: 0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch Patch0003: 0003-limit-permissions-on-def_log_file.patch -Patch0004: 0004-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch -Patch0005: 0005-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch -Patch0006: 0006-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch -Patch0007: 0007-Remove-race-condition-between-cloud-init-and-Network.patch -Patch0008: 0008-net-exclude-OVS-internal-interfaces-in-get_interface.patch -Patch0009: 0009-Fix-requiring-device-number-on-EC2-derivatives-836.patch -# For bz#1957532 - [cloud-init] From RHEL 82+ cloud-init no longer displays sshd keys fingerprints from instance launched from a backup image -Patch10: ci-rhel-cloud.cfg-remove-ssh_genkeytypes-in-settings.py.patch -# For bz#1945891 - CVE-2021-3429 cloud-init: randomly generated passwords logged in clear-text to world-readable file [rhel-8] -Patch11: ci-write-passwords-only-to-serial-console-lock-down-clo.patch -# For bz#1862967 - [cloud-init]Customize ssh AuthorizedKeysFile causes login failure -Patch12: ci-ssh-util-allow-cloudinit-to-merge-all-ssh-keys-into-.patch -# For bz#1862967 - [cloud-init]Customize ssh AuthorizedKeysFile causes login failure -Patch13: ci-Stop-copying-ssh-system-keys-and-check-folder-permis.patch -# For bz#1995840 - [cloudinit] Fix home permissions modified by ssh module -Patch14: ci-Fix-home-permissions-modified-by-ssh-module-SC-338-9.patch -# For bz#1862933 - cloud-init fails with ValueError: need more than 1 value to unpack[rhel-8] -Patch15: ci-ssh_utils.py-ignore-when-sshd_config-options-are-not.patch -# For bz#2013644 - cloud-init fails to set host key permissions correctly -Patch16: ci-cc_ssh.py-fix-private-key-group-owner-and-permission.patch -# For bz#2021538 - cloud-init.service fails to start after package update -Patch17: ci-fix-error-on-upgrade-caused-by-new-vendordata2-attri.patch -# For bz#2028028 - [RHEL-8] Above 19.2 of cloud-init fails to configure routes when configuring static and default routes to the same destination IP -Patch18: ci-cloudinit-net-handle-two-different-routes-for-the-sa.patch -# For bz#2039697 - [RHEL8] [Azure] cloud-init fails to configure the system -# For bz#2026587 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo' -Patch20: ci-Datasource-for-VMware-953.patch -# For bz#2026587 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo' -Patch21: ci-Change-netifaces-dependency-to-0.10.4-965.patch -# For bz#2026587 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo' -Patch22: ci-Update-dscheck_VMware-s-rpctool-check-970.patch -# For bz#2026587 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo' -Patch23: ci-Revert-unnecesary-lcase-in-ds-identify-978.patch -# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata -Patch24: ci-Add-flexibility-to-IMDS-api-version-793.patch -# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata -Patch25: ci-Azure-helper-Ensure-Azure-http-handler-sleeps-betwee.patch -# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata -Patch26: ci-azure-Removing-ability-to-invoke-walinuxagent-799.patch -# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata -Patch27: ci-Azure-eject-the-provisioning-iso-before-reporting-re.patch -# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata -Patch28: ci-Azure-Retrieve-username-and-hostname-from-IMDS-865.patch -# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata -Patch29: ci-Azure-Retry-net-metadata-during-nic-attach-for-non-t.patch -# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata -Patch30: ci-Azure-adding-support-for-consuming-userdata-from-IMD.patch -# For bz#2046540 - cloud-init writes route6-$DEVICE config with a HEX netmask. ip route does not like : Error: inet6 prefix is expected rather than "fd00:fd00:fd00::/ffff:ffff:ffff:ffff::". -Patch31: ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch -# For bz#1935826 - [rhel-8] Cloud-init init stage fails after upgrade from RHEL7 to RHEL8. -Patch32: ci-Detect-a-Python-version-change-and-clear-the-cache-8.patch -# For bz#1935826 - [rhel-8] Cloud-init init stage fails after upgrade from RHEL7 to RHEL8. -Patch33: ci-Fix-MIME-policy-failure-on-python-version-upgrade-93.patch -# For bz#2088028 - [RHEL-8.7] SSH keys with \r\n line breaks are not properly handled on Azure [rhel-8.6.0.z] -Patch34: ci-Add-r-n-check-for-SSH-keys-in-Azure-889.patch -# For bz#2081003 - Cloud-init is placing the puppet module in the wrong stage. [rhel-8.6.0.z] -Patch35: ci-systemd-Better-support-package-and-upgrade.patch -# For bz#2091933 - cloud-init has an undeclared dependency on the initscripts rpm [rhel-8.6.0.z] -Patch36: ci-Leave-the-details-of-service-management-to-the-distr.patch -# For bz#2026587 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo' - +Patch0004: 0004-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch +Patch0005: 0005-Remove-race-condition-between-cloud-init-and-Network.patch +Patch0006: 0006-rhel-cloud.cfg-remove-ssh_genkeytypes-in-settings.py.patch +# For bz#2059872 - [RHEL-8]Rebase cloud-init from Fedora so it can configure networking using NM keyfiles +Patch7: ci-Add-native-NetworkManager-support-1224.patch +# For bz#2059872 - [RHEL-8]Rebase cloud-init from Fedora so it can configure networking using NM keyfiles +Patch8: ci-Use-Network-Manager-and-Netplan-as-default-renderers.patch +# For bz#2082071 - Align cloud.cfg file and systemd with cloud-init upstream .tmpl files +Patch9: ci-Align-rhel-custom-files-with-upstream-1431.patch +# For bz#2082071 - Align cloud.cfg file and systemd with cloud-init upstream .tmpl files +Patch10: ci-Remove-rhel-specific-files.patch +# For bz#2082686 - [cloud][init] Add support for reading tags from instance metadata +Patch11: ci-Support-EC2-tags-in-instance-metadata-1309.patch +# For bz#2096269 - Adjust udev/rules default path[RHEL-8] +Patch12: ci-setup.py-adjust-udev-rules-default-path-1513.patch +# For bz#2107464 - [RHEL-8.7] Cannot run sysconfig when changing the priority of network renderers +# For bz#2110066 - DNS integration with OpenStack/cloud-init/NetworkManager is not working +# For bz#2117526 - [RHEL8.7] Revert patch of configuring networking by NM keyfiles +# For bz#2104393 - [RHEL-8.7]Failed to config static IP and IPv6 according to VMware Customization Config File +# For bz#2098624 - [RHEL-8.7] IPv6 not workable when cloud-init configure network using NM keyfiles +Patch13: ci-Revert-Add-native-NetworkManager-support-1224.patch +# For bz#2107464 - [RHEL-8.7] Cannot run sysconfig when changing the priority of network renderers +# For bz#2110066 - DNS integration with OpenStack/cloud-init/NetworkManager is not working +# For bz#2117526 - [RHEL8.7] Revert patch of configuring networking by NM keyfiles +# For bz#2104393 - [RHEL-8.7]Failed to config static IP and IPv6 according to VMware Customization Config File +# For bz#2098624 - [RHEL-8.7] IPv6 not workable when cloud-init configure network using NM keyfiles +Patch14: ci-Revert-Use-Network-Manager-and-Netplan-as-default-re.patch BuildArch: noarch @@ -148,8 +113,6 @@ ssh keys and to let the user run various scripts. sed -i -e 's|#!/usr/bin/env python|#!/usr/bin/env python3|' \ -e 's|#!/usr/bin/python|#!/usr/bin/python3|' tools/* cloudinit/ssh_util.py -cp -f %{SOURCE2} tests/integration_tests/assets/test_version_change.pkl - %build %py3_build @@ -157,8 +120,6 @@ cp -f %{SOURCE2} tests/integration_tests/assets/test_version_change.pkl %install %py3_install -- -python3 tools/render-cloudcfg --variant fedora > $RPM_BUILD_ROOT/%{_sysconfdir}/cloud/cloud.cfg - sed -i "s,@@PACKAGED_VERSION@@,%{version}-%{release}," $RPM_BUILD_ROOT/%{python3_sitelib}/cloudinit/version.py mkdir -p $RPM_BUILD_ROOT/var/lib/cloud @@ -168,9 +129,6 @@ mkdir -p $RPM_BUILD_ROOT/run/cloud-init mkdir -p $RPM_BUILD_ROOT/%{_tmpfilesdir} cp -p %{SOURCE1} $RPM_BUILD_ROOT/%{_tmpfilesdir}/%{name}.conf -# We supply our own config file since our software differs from Ubuntu's. -cp -p rhel/cloud.cfg $RPM_BUILD_ROOT/%{_sysconfdir}/cloud/cloud.cfg - mkdir -p $RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d cp -p tools/21-cloudinit.conf $RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d/21-cloudinit.conf @@ -178,17 +136,10 @@ cp -p tools/21-cloudinit.conf $RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d/21-cloudi mv $RPM_BUILD_ROOT/etc/NetworkManager/dispatcher.d/hook-network-manager \ $RPM_BUILD_ROOT/etc/NetworkManager/dispatcher.d/cloud-init-azure-hook -# Install our own systemd units (rhbz#1440831) -mkdir -p $RPM_BUILD_ROOT%{_unitdir} -cp rhel/systemd/* $RPM_BUILD_ROOT%{_unitdir}/ - [ ! -d $RPM_BUILD_ROOT/usr/lib/systemd/system-generators ] && mkdir -p $RPM_BUILD_ROOT/usr/lib/systemd/system-generators python3 tools/render-cloudcfg --variant rhel systemd/cloud-init-generator.tmpl > $RPM_BUILD_ROOT/usr/lib/systemd/system-generators/cloud-init-generator chmod 755 $RPM_BUILD_ROOT/usr/lib/systemd/system-generators/cloud-init-generator -[ ! -d $RPM_BUILD_ROOT/usr/lib/%{name} ] && mkdir -p $RPM_BUILD_ROOT/usr/lib/%{name} -cp -p tools/ds-identify $RPM_BUILD_ROOT%{_libexecdir}/%{name}/ds-identify - # installing man pages mkdir -p ${RPM_BUILD_ROOT}%{_mandir}/man1/ for man in cloud-id.1 cloud-init.1 cloud-init-per.1; do @@ -244,7 +195,6 @@ fi %files %license LICENSE -%doc ChangeLog rhel/README.rhel %config(noreplace) %{_sysconfdir}/cloud/cloud.cfg %dir %{_sysconfdir}/cloud/cloud.cfg.d %config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/*.cfg @@ -254,6 +204,8 @@ fi %{_unitdir}/cloud-config.service %{_unitdir}/cloud-config.target %{_unitdir}/cloud-final.service +%{_unitdir}/cloud-init-hotplugd.service +%{_unitdir}/cloud-init-hotplugd.socket %{_unitdir}/cloud-init-local.service %{_unitdir}/cloud-init.service %{_unitdir}/cloud-init.target @@ -266,37 +218,61 @@ fi %dir %verify(not mode) /run/cloud-init %dir /var/lib/cloud /etc/NetworkManager/dispatcher.d/cloud-init-azure-hook +/etc/dhcp/dhclient-exit-hooks.d/hook-dhclient %{_udevrulesdir}/66-azure-ephemeral.rules -%{_sysconfdir}/bash_completion.d/cloud-init +%{_datadir}/bash-completion/completions/cloud-init %{_bindir}/cloud-id -%{_libexecdir}/%{name}/ds-identify /usr/lib/systemd/system-generators/cloud-init-generator +%{_sysconfdir}/systemd/system/sshd-keygen@.service.d/disable-sshd-keygen-if-cloud-init-active.conf %dir %{_sysconfdir}/rsyslog.d %config(noreplace) %{_sysconfdir}/rsyslog.d/21-cloudinit.conf %changelog -* Tue Jun 14 2022 Jon Maloy - 21.1-15.el8_6.3 -- ci-Leave-the-details-of-service-management-to-the-distr.patch [bz#2091933] -- Resolves: bz#2091933 - (cloud-init has an undeclared dependency on the initscripts rpm [rhel-8.6.0.z]) - -* Wed May 25 2022 Jon Maloy - 21.1-15.el8_6.2 -- ci-systemd-Better-support-package-and-upgrade.patch [bz#2081003] -- Resolves: bz#2081003 - (Cloud-init is placing the puppet module in the wrong stage. [rhel-8.6.0.z]) - -* Wed May 18 2022 Miroslav Rezanina - 21.1-15.el8_6.1 -- ci-Add-r-n-check-for-SSH-keys-in-Azure-889.patch [bz#2088028] -- Resolves: bz#2088028 - ([RHEL-8.7] SSH keys with \r\n line breaks are not properly handled on Azure [rhel-8.6.0.z]) +* Wed Aug 17 2022 Jon Maloy - 22.1-5 +- ci-Revert-Add-native-NetworkManager-support-1224.patch [bz#2107464 bz#2110066 bz#2117526 bz#2104393 bz#2098624] +- ci-Revert-Use-Network-Manager-and-Netplan-as-default-re.patch [bz#2107464 bz#2110066 bz#2117526 bz#2104393 bz#2098624] +- Resolves: bz#2107464 + ([RHEL-8.7] Cannot run sysconfig when changing the priority of network renderers) +- Resolves: bz#2110066 + (DNS integration with OpenStack/cloud-init/NetworkManager is not working) +- Resolves: bz#2117526 + ([RHEL8.7] Revert patch of configuring networking by NM keyfiles) +- Resolves: bz#2104393 + ([RHEL-8.7]Failed to config static IP and IPv6 according to VMware Customization Config File) +- Resolves: bz#2098624 + ([RHEL-8.7] IPv6 not workable when cloud-init configure network using NM keyfiles) + +* Tue Jul 12 2022 Miroslav Rezanina - 22.1-4 +- ci-cloud-init.spec-adjust-path-for-66-azure-ephemeral.r.patch [bz#2096269] +- ci-setup.py-adjust-udev-rules-default-path-1513.patch [bz#2096269] +- Resolves: bz#2096269 + (Adjust udev/rules default path[RHEL-8]) + +* Thu Jun 23 2022 Jon Maloy - 22.1-3 +- ci-Support-EC2-tags-in-instance-metadata-1309.patch [bz#2082686] +- Resolves: bz#2082686 + ([cloud][init] Add support for reading tags from instance metadata) + +* Tue May 31 2022 Jon Maloy - 22.1-2 +- ci-Add-native-NetworkManager-support-1224.patch [bz#2059872] +- ci-Use-Network-Manager-and-Netplan-as-default-renderers.patch [bz#2059872] +- ci-Align-rhel-custom-files-with-upstream-1431.patch [bz#2082071] +- ci-Remove-rhel-specific-files.patch [bz#2082071] +- Resolves: bz#2059872 + ([RHEL-8]Rebase cloud-init from Fedora so it can configure networking using NM keyfiles) +- Resolves: bz#2082071 + (Align cloud.cfg file and systemd with cloud-init upstream .tmpl files) + +* Mon Apr 25 2022 Amy Chen - 22.1-1 +- Rebaes to 22.1 [bz#2065544] +- Resolves: bz#2065544 + ([RHEL-8.7.0] cloud-init rebase to 22.1) * Fri Apr 01 2022 Camilla Conte - 21.1-15 - ci-Detect-a-Python-version-change-and-clear-the-cache-8.patch [bz#1935826] - ci-Fix-MIME-policy-failure-on-python-version-upgrade-93.patch [bz#1935826] -- Resolves: bz#1935826 - ([rhel-8] Cloud-init init stage fails after upgrade from RHEL7 to RHEL8.) * Fri Feb 25 2022 Jon Maloy - 21.1-14 - ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch [bz#2046540] @@ -384,6 +360,17 @@ fi - Resolves: bz#1958174 ([RHEL-8.5.0] Rebase cloud-init to 21.1) +* Thu May 13 2021 Miroslav Rezanina - 20.3-10.el8_4.3 +- ci-get_interfaces-don-t-exclude-Open-vSwitch-bridge-bon.patch [bz#1957135] +- ci-net-exclude-OVS-internal-interfaces-in-get_interface.patch [bz#1957135] +- Resolves: bz#1957135 + (Intermittent failure to start cloud-init due to failure to detect macs [rhel-8.4.0.z]) + +* Tue Apr 06 2021 Miroslav Rezanina - 20.3-10.el8_4.1 +- ci-Fix-requiring-device-number-on-EC2-derivatives-836.patch [bz#1942699] +- Resolves: bz#1942699 + ([Aliyun][RHEL8.4][cloud-init] cloud-init service failed to start with Alibaba instance [rhel-8.4.0.z]) + * Tue Feb 02 2021 Miroslav Rezanina - 20.3-10.el8 - ci-fix-a-typo-in-man-page-cloud-init.1-752.patch [bz#1913127] - Resolves: bz#1913127