diff --git a/.cloud-init.metadata b/.cloud-init.metadata new file mode 100644 index 0000000..6948e77 --- /dev/null +++ b/.cloud-init.metadata @@ -0,0 +1 @@ +3b4345267e72e28b877e2e3f0735c1f672674cfc SOURCES/cloud-init-0.7.9.tar.gz diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6a72b4f --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +SOURCES/cloud-init-0.7.9.tar.gz diff --git a/README.md b/README.md deleted file mode 100644 index 5231011..0000000 --- a/README.md +++ /dev/null @@ -1,8 +0,0 @@ -The master branch has no content - -Look at the c7 branch if you are working with CentOS-7, or the c4/c5/c6 branch -for CentOS-4, 5 or 6. If you find this file in a distro specific branch, it -means that no content has been checked in yet - -More information on how these git repositories are setup, is available at -http://wiki.centos.org/Sources diff --git a/SOURCES/0001-configuration-changes-for-RHEL-package.patch b/SOURCES/0001-configuration-changes-for-RHEL-package.patch new file mode 100644 index 0000000..97e4a09 --- /dev/null +++ b/SOURCES/0001-configuration-changes-for-RHEL-package.patch @@ -0,0 +1,197 @@ +From 0efc7b53fd849b1faea9fca1ecc85dfe384de035 Mon Sep 17 00:00:00 2001 +From: Lars Kellogg-Stedman +Date: Thu, 1 Dec 2016 19:40:36 -0500 +Subject: [PATCH] configuration changes for RHEL package + +remove python dependencies from setup.py (because we handle these via +packages instead) and make any changes to default values to handle +RHEL environments. + +install README and configfiles for rhel. + +X-downstream-only: true +--- + cloudinit/config/cc_chef.py | 6 ++-- + cloudinit/settings.py | 7 +++-- + rhel/README.rhel | 5 ++++ + rhel/cloud-init-tmpfiles.conf | 1 + + rhel/cloud.cfg | 66 +++++++++++++++++++++++++++++++++++++++++++ + setup.py | 4 +-- + 6 files changed, 81 insertions(+), 8 deletions(-) + create mode 100644 rhel/README.rhel + create mode 100644 rhel/cloud-init-tmpfiles.conf + create mode 100644 rhel/cloud.cfg + +diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py +index f6564e5..38f01b7 100644 +--- a/cloudinit/config/cc_chef.py ++++ b/cloudinit/config/cc_chef.py +@@ -33,7 +33,7 @@ file). + + chef: + directories: (defaulting to /etc/chef, /var/log/chef, /var/lib/chef, +- /var/cache/chef, /var/backups/chef, /var/run/chef) ++ /var/cache/chef, /var/backups/chef, /run/chef) + validation_cert: (optional string to be written to file validation_key) + special value 'system' means set use existing file + validation_key: (optional the path for validation_cert. default +@@ -85,7 +85,7 @@ CHEF_DIRS = tuple([ + '/var/lib/chef', + '/var/cache/chef', + '/var/backups/chef', +- '/var/run/chef', ++ '/run/chef', + ]) + REQUIRED_CHEF_DIRS = tuple([ + '/etc/chef', +@@ -109,7 +109,7 @@ CHEF_RB_TPL_DEFAULTS = { + 'json_attribs': CHEF_FB_PATH, + 'file_cache_path': "/var/cache/chef", + 'file_backup_path': "/var/backups/chef", +- 'pid_file': "/var/run/chef/client.pid", ++ 'pid_file': "/run/chef/client.pid", + 'show_time': True, + } + CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time']) +diff --git a/cloudinit/settings.py b/cloudinit/settings.py +index b1fdd31..6d31bb6 100644 +--- a/cloudinit/settings.py ++++ b/cloudinit/settings.py +@@ -37,13 +37,16 @@ CFG_BUILTIN = { + ], + 'def_log_file': '/var/log/cloud-init.log', + 'log_cfgs': [], +- 'syslog_fix_perms': ['syslog:adm', 'root:adm'], ++ 'mount_default_fields': [None, None, 'auto', 'defaults,nofail', '0', '2'], ++ 'ssh_deletekeys': False, ++ 'ssh_genkeytypes': [], ++ 'syslog_fix_perms': [], + 'system_info': { + 'paths': { + 'cloud_dir': '/var/lib/cloud', + 'templates_dir': '/etc/cloud/templates/', + }, +- 'distro': 'ubuntu', ++ 'distro': 'rhel', + }, + 'vendor_data': {'enabled': True, 'prefix': []}, + } +diff --git a/rhel/README.rhel b/rhel/README.rhel +new file mode 100644 +index 0000000..aa29630 +--- /dev/null ++++ b/rhel/README.rhel +@@ -0,0 +1,5 @@ ++The following cloud-init modules are currently unsupported on this OS: ++ - apt_update_upgrade ('apt_update', 'apt_upgrade', 'apt_mirror', 'apt_preserve_sources_list', 'apt_old_mirror', 'apt_sources', 'debconf_selections', 'packages' options) ++ - byobu ('byobu_by_default' option) ++ - chef ++ - grub_dpkg +diff --git a/rhel/cloud-init-tmpfiles.conf b/rhel/cloud-init-tmpfiles.conf +new file mode 100644 +index 0000000..0c6d2a3 +--- /dev/null ++++ b/rhel/cloud-init-tmpfiles.conf +@@ -0,0 +1 @@ ++d /run/cloud-init 0700 root root - - +diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg +new file mode 100644 +index 0000000..986f241 +--- /dev/null ++++ b/rhel/cloud.cfg +@@ -0,0 +1,66 @@ ++users: ++ - default ++ ++disable_root: 1 ++ssh_pwauth: 0 ++ ++mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] ++resize_rootfs_tmp: /dev ++ssh_deletekeys: 0 ++ssh_genkeytypes: ~ ++syslog_fix_perms: ~ ++ ++cloud_init_modules: ++ - migrator ++ - bootcmd ++ - write-files ++ - growpart ++ - resizefs ++ - set_hostname ++ - update_hostname ++ - update_etc_hosts ++ - rsyslog ++ - users-groups ++ - ssh ++ ++cloud_config_modules: ++ - mounts ++ - locale ++ - set-passwords ++ - rh_subscription ++ - yum-add-repo ++ - package-update-upgrade-install ++ - timezone ++ - puppet ++ - chef ++ - salt-minion ++ - mcollective ++ - disable-ec2-metadata ++ - runcmd ++ ++cloud_final_modules: ++ - rightscale_userdata ++ - scripts-per-once ++ - scripts-per-boot ++ - scripts-per-instance ++ - scripts-user ++ - ssh-authkey-fingerprints ++ - keys-to-console ++ - phone-home ++ - final-message ++ ++system_info: ++ default_user: ++ name: cloud-user ++ lock_passwd: true ++ gecos: Cloud User ++ groups: [wheel, adm, systemd-journal] ++ sudo: ["ALL=(ALL) NOPASSWD:ALL"] ++ shell: /bin/bash ++ distro: rhel ++ paths: ++ cloud_dir: /var/lib/cloud ++ templates_dir: /etc/cloud/templates ++ ssh_svcname: sshd ++ ++# vim:syntax=yaml +diff --git a/setup.py b/setup.py +index 0403607..cc20c60 100755 +--- a/setup.py ++++ b/setup.py +@@ -167,7 +167,6 @@ else: + (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')), + (ETC + '/cloud/templates', glob('templates/*')), + (ETC + '/NetworkManager/dispatcher.d/', ['tools/hook-network-manager']), +- (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']), + (USR_LIB_EXEC + '/cloud-init', ['tools/uncloud-init', + 'tools/write-ssh-key-fingerprints']), + (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]), +@@ -175,7 +174,7 @@ else: + [f for f in glob('doc/examples/*') if is_f(f)]), + (USR + '/share/doc/cloud-init/examples/seed', + [f for f in glob('doc/examples/seed/*') if is_f(f)]), +- (LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')]), ++ ('/usr/lib/udev/rules.d', [f for f in glob('udev/*.rules')]), + ] + # Use a subclass for install that handles + # adding on the right init system configuration files +@@ -199,7 +198,6 @@ setuptools.setup( + scripts=['tools/cloud-init-per'], + license='Dual-licensed under GPLv3 or Apache 2.0', + data_files=data_files, +- install_requires=requirements, + cmdclass=cmdclass, + entry_points={ + 'console_scripts': [ diff --git a/SOURCES/0002-do-not-use-git-to-determine-version.patch b/SOURCES/0002-do-not-use-git-to-determine-version.patch new file mode 100644 index 0000000..5a2137e --- /dev/null +++ b/SOURCES/0002-do-not-use-git-to-determine-version.patch @@ -0,0 +1,43 @@ +From 13d7d4bf4a94b8cc95beccee0aad96d73d246acf Mon Sep 17 00:00:00 2001 +From: Lars Kellogg-Stedman +Date: Thu, 12 Jan 2017 16:21:10 -0500 +Subject: [PATCH] do not use git to determine version + +trying to use git to determine the current cloud-init version will +fail when building the package. + +X-downstream-only: true +--- + tools/read-version | 19 ++----------------- + 1 file changed, 2 insertions(+), 17 deletions(-) + +diff --git a/tools/read-version b/tools/read-version +index 3b30b49..1ce3ac6 100755 +--- a/tools/read-version ++++ b/tools/read-version +@@ -52,23 +52,8 @@ output_json = '--json' in sys.argv + src_version = ci_version.version_string() + version_long = None + +-if os.path.isdir(os.path.join(_tdir, ".git")) and which("git"): +- flags = [] +- if use_tags: +- flags = ['--tags'] +- cmd = ['git', 'describe'] + flags +- +- version = tiny_p(cmd).strip() +- +- if not version.startswith(src_version): +- sys.stderr.write("git describe version (%s) differs from " +- "cloudinit.version (%s)\n" % (version, src_version)) +- sys.exit(1) +- +- version_long = tiny_p(cmd + ["--long"]).strip() +-else: +- version = src_version +- version_long = None ++version = src_version ++version_long = None + + # version is X.Y.Z[+xxx.gHASH] + # version_long is None or X.Y.Z-xxx-gHASH diff --git a/SOURCES/0003-util-teach-write_file-about-copy_mode-option.patch b/SOURCES/0003-util-teach-write_file-about-copy_mode-option.patch new file mode 100644 index 0000000..84a05ca --- /dev/null +++ b/SOURCES/0003-util-teach-write_file-about-copy_mode-option.patch @@ -0,0 +1,145 @@ +From 29ed6e1c54a6ffbc3017660af5e2a81850e46b43 Mon Sep 17 00:00:00 2001 +From: Lars Kellogg-Stedman +Date: Mon, 10 Apr 2017 15:52:37 -0400 +Subject: [PATCH] util: teach write_file about copy_mode option + +On centos/fedora/rhel/derivatives, /etc/ssh/sshd_config has mode 0600, +but cloud-init unilaterally sets file modes to 0644 when no explicit +mode is passed to util.write_file. On ubuntu/debian, this file has +mode 0644. With this patch, write_file learns about the copy_mode +option, which will cause it to use the mode of the existing file by +default, falling back to the explicit mode parameter if the file does +not exist. + +LP: #1644064 +Resolves: rhbz#1295984 +(cherry picked from commit 721348a622a660b65acfdf7fdf53203b47f80748) +--- + cloudinit/atomic_helper.py | 12 +++++++++++- + cloudinit/config/cc_set_passwords.py | 3 ++- + cloudinit/util.py | 10 +++++++++- + tests/unittests/test_util.py | 33 +++++++++++++++++++++++++++++++-- + 4 files changed, 53 insertions(+), 5 deletions(-) + +diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py +index fb2df8d..587b994 100644 +--- a/cloudinit/atomic_helper.py ++++ b/cloudinit/atomic_helper.py +@@ -2,13 +2,23 @@ + + import json + import os ++import stat + import tempfile + + _DEF_PERMS = 0o644 + + +-def write_file(filename, content, mode=_DEF_PERMS, omode="wb"): ++def write_file(filename, content, mode=_DEF_PERMS, ++ omode="wb", copy_mode=False): + # open filename in mode 'omode', write content, set permissions to 'mode' ++ ++ if copy_mode: ++ try: ++ file_stat = os.stat(filename) ++ mode = stat.S_IMODE(file_stat.st_mode) ++ except OSError: ++ pass ++ + tf = None + try: + tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename), +diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py +index cf1f59e..2745df8 100755 +--- a/cloudinit/config/cc_set_passwords.py ++++ b/cloudinit/config/cc_set_passwords.py +@@ -174,7 +174,8 @@ def handle(_name, cfg, cloud, log, args): + pw_auth)) + + lines = [str(l) for l in new_lines] +- util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines)) ++ util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines), ++ copy_mode=True) + + try: + cmd = cloud.distro.init_cmd # Default service +diff --git a/cloudinit/util.py b/cloudinit/util.py +index 5725129..f90653d 100644 +--- a/cloudinit/util.py ++++ b/cloudinit/util.py +@@ -1732,7 +1732,7 @@ def chmod(path, mode): + os.chmod(path, real_mode) + + +-def write_file(filename, content, mode=0o644, omode="wb"): ++def write_file(filename, content, mode=0o644, omode="wb", copy_mode=False): + """ + Writes a file with the given content and sets the file mode as specified. + Resotres the SELinux context if possible. +@@ -1742,6 +1742,14 @@ def write_file(filename, content, mode=0o644, omode="wb"): + @param mode: The filesystem mode to set on the file. + @param omode: The open mode used when opening the file (w, wb, a, etc.) + """ ++ ++ if copy_mode: ++ try: ++ file_stat = os.stat(filename) ++ mode = stat.S_IMODE(file_stat.st_mode) ++ except OSError: ++ pass ++ + ensure_dir(os.path.dirname(filename)) + if 'b' in omode.lower(): + content = encode_text(content) +diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py +index ab74311..5d21b4b 100644 +--- a/tests/unittests/test_util.py ++++ b/tests/unittests/test_util.py +@@ -103,8 +103,8 @@ class TestWriteFile(helpers.TestCase): + self.assertTrue(os.path.isdir(dirname)) + self.assertTrue(os.path.isfile(path)) + +- def test_custom_mode(self): +- """Verify custom mode works properly.""" ++ def test_explicit_mode(self): ++ """Verify explicit file mode works properly.""" + path = os.path.join(self.tmp, "NewFile.txt") + contents = "Hey there" + +@@ -115,6 +115,35 @@ class TestWriteFile(helpers.TestCase): + file_stat = os.stat(path) + self.assertEqual(0o666, stat.S_IMODE(file_stat.st_mode)) + ++ def test_copy_mode_no_existing(self): ++ """Verify that file is created with mode 0o644 if copy_mode ++ is true and there is no prior existing file.""" ++ path = os.path.join(self.tmp, "NewFile.txt") ++ contents = "Hey there" ++ ++ util.write_file(path, contents, copy_mode=True) ++ ++ self.assertTrue(os.path.exists(path)) ++ self.assertTrue(os.path.isfile(path)) ++ file_stat = os.stat(path) ++ self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) ++ ++ def test_copy_mode_with_existing(self): ++ """Verify that file is created using mode of existing file ++ if copy_mode is true.""" ++ path = os.path.join(self.tmp, "NewFile.txt") ++ contents = "Hey there" ++ ++ open(path, 'w').close() ++ os.chmod(path, 0o666) ++ ++ util.write_file(path, contents, copy_mode=True) ++ ++ self.assertTrue(os.path.exists(path)) ++ self.assertTrue(os.path.isfile(path)) ++ file_stat = os.stat(path) ++ self.assertEqual(0o666, stat.S_IMODE(file_stat.st_mode)) ++ + def test_custom_omode(self): + """Verify custom omode works properly.""" + path = os.path.join(self.tmp, "NewFile.txt") diff --git a/SOURCES/0004-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch b/SOURCES/0004-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch new file mode 100644 index 0000000..990e36a --- /dev/null +++ b/SOURCES/0004-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch @@ -0,0 +1,45 @@ +From 2a7c89dc761c6415a97e44ea5cf55885179890cd Mon Sep 17 00:00:00 2001 +From: Lars Kellogg-Stedman +Date: Thu, 1 Dec 2016 19:40:36 -0500 +Subject: [PATCH] Do not write NM_CONTROLLED=no in generated interface config + files + +Resolves: rhbz#1385172 +X-downstream-only: true +--- + cloudinit/net/sysconfig.py | 1 - + tests/unittests/test_net.py | 2 -- + 2 files changed, 3 deletions(-) + +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index 9be7407..3c9f2d2 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -169,7 +169,6 @@ class Renderer(renderer.Renderer): + iface_defaults = tuple([ + ('ONBOOT', True), + ('USERCTL', False), +- ('NM_CONTROLLED', False), + ('BOOTPROTO', 'none'), + ]) + +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index 1090282..ce13664 100755 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -144,7 +144,6 @@ GATEWAY=172.19.3.254 + HWADDR=fa:16:3e:ed:9a:59 + IPADDR=172.19.1.34 + NETMASK=255.255.252.0 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -510,7 +509,6 @@ class TestSysConfigRendering(TestCase): + BOOTPROTO=dhcp + DEVICE=eth1000 + HWADDR=07-1C-C6-75-A4-BE +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no diff --git a/SOURCES/0005-url_helper-fail-gracefully-if-oauthlib-is-not-availa.patch b/SOURCES/0005-url_helper-fail-gracefully-if-oauthlib-is-not-availa.patch new file mode 100644 index 0000000..b8c1256 --- /dev/null +++ b/SOURCES/0005-url_helper-fail-gracefully-if-oauthlib-is-not-availa.patch @@ -0,0 +1,41 @@ +From 949f87447031d8d83b8108aaa11046eb8a50650b Mon Sep 17 00:00:00 2001 +From: Lars Kellogg-Stedman +Date: Thu, 8 Dec 2016 15:24:24 -0500 +Subject: [PATCH] url_helper: fail gracefully if oauthlib is not available + +We are unable to ship python-oauthlib in RHEL. Allow imports of +url_helper to succeed even when oauthlib is unavailable. + +X-downstream-only: true +--- + cloudinit/url_helper.py | 10 +++++++++- + 1 file changed, 9 insertions(+), 1 deletion(-) + +diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py +index 312b046..19c8148 100644 +--- a/cloudinit/url_helper.py ++++ b/cloudinit/url_helper.py +@@ -17,7 +17,11 @@ import time + from email.utils import parsedate + from functools import partial + +-import oauthlib.oauth1 as oauth1 ++try: ++ import oauthlib.oauth1 as oauth1 ++except ImportError: ++ oauth1 = None ++ + from requests import exceptions + + from six.moves.urllib.parse import ( +@@ -481,6 +485,10 @@ class OauthUrlHelper(object): + + def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret, + timestamp=None): ++ ++ if oauth1 is None: ++ raise NotImplementedError('oauth support is not available') ++ + if timestamp: + timestamp = str(timestamp) + else: diff --git a/SOURCES/0006-rsyslog-replace-with-stop.patch b/SOURCES/0006-rsyslog-replace-with-stop.patch new file mode 100644 index 0000000..11e059c --- /dev/null +++ b/SOURCES/0006-rsyslog-replace-with-stop.patch @@ -0,0 +1,32 @@ +From b5bc8596569ba46c89247d1df5f3cd6e6a85dce1 Mon Sep 17 00:00:00 2001 +From: Joshua Powers +Date: Mon, 10 Apr 2017 16:30:33 -0700 +Subject: [PATCH] rsyslog: replace ~ with stop +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +The discard action (tilde character) has been replaced by the +“stop” RainerScript directive. It is considered more intuitive and +offers slightly better performance. + +The tilde operator was deprecated in rsyslog 7. Distributions +using rsyslog older than that will need to patch. + +LP: #1367899 +Resolves: rhbz#1315615 +(cherry picked from commit b613de733fa7cfbf94666410f252b640019be205) +--- + tools/21-cloudinit.conf | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tools/21-cloudinit.conf b/tools/21-cloudinit.conf +index c65325c..150d800 100644 +--- a/tools/21-cloudinit.conf ++++ b/tools/21-cloudinit.conf +@@ -3,4 +3,4 @@ + + # comment out the following line to allow CLOUDINIT messages through. + # Doing so means you'll also get CLOUDINIT messages in /var/log/syslog +-& ~ ++& stop diff --git a/SOURCES/0007-OpenStack-Use-timeout-and-retries-from-config-in-get.patch b/SOURCES/0007-OpenStack-Use-timeout-and-retries-from-config-in-get.patch new file mode 100644 index 0000000..c949827 --- /dev/null +++ b/SOURCES/0007-OpenStack-Use-timeout-and-retries-from-config-in-get.patch @@ -0,0 +1,113 @@ +From c98408a2cf874435c7423a1574a9ffc81053707a Mon Sep 17 00:00:00 2001 +From: Lars Kellogg-Stedman +Date: Tue, 17 Jan 2017 08:53:22 -0500 +Subject: [PATCH] OpenStack: Use timeout and retries from config in get_data. + +This modifies get_data in DataSourceOpenStack.py to get the timeout +and retries values from the data source configuration, rather than +from keyword arguments. This permits get_data to use the same timeout +as other methods, and allows an operator to increase the timeout in +environments where the metadata service takes longer than five seconds +to respond. + +LP: #1657130 +Resolves: rhbz#1408589 +(cherry picked from commit 4cf53f1544f8f5629330eab3efef1a18255c277a) +--- + cloudinit/sources/DataSourceOpenStack.py | 15 ++++++++++++--- + tests/unittests/test_datasource/test_openstack.py | 8 ++++---- + 2 files changed, 16 insertions(+), 7 deletions(-) + +diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py +index 2a58f1c..e1ea21f 100644 +--- a/cloudinit/sources/DataSourceOpenStack.py ++++ b/cloudinit/sources/DataSourceOpenStack.py +@@ -45,6 +45,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): + # max_wait < 0 indicates do not wait + max_wait = -1 + timeout = 10 ++ retries = 5 + + try: + max_wait = int(self.ds_cfg.get("max_wait", max_wait)) +@@ -55,7 +56,13 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): + timeout = max(0, int(self.ds_cfg.get("timeout", timeout))) + except Exception: + util.logexc(LOG, "Failed to get timeout, using %s", timeout) +- return (max_wait, timeout) ++ ++ try: ++ retries = int(self.ds_cfg.get("retries", retries)) ++ except Exception: ++ util.logexc(LOG, "Failed to get max wait. using %s", retries) ++ ++ return (max_wait, timeout, retries) + + def wait_for_metadata_service(self): + urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL]) +@@ -76,7 +83,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): + md_urls.append(md_url) + url2base[md_url] = url + +- (max_wait, timeout) = self._get_url_settings() ++ (max_wait, timeout, retries) = self._get_url_settings() + start_time = time.time() + avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait, + timeout=timeout) +@@ -89,13 +96,15 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): + self.metadata_address = url2base.get(avail_url) + return bool(avail_url) + +- def get_data(self, retries=5, timeout=5): ++ def get_data(self): + try: + if not self.wait_for_metadata_service(): + return False + except IOError: + return False + ++ (max_wait, timeout, retries) = self._get_url_settings() ++ + try: + results = util.log_time(LOG.debug, + 'Crawl of openstack metadata service', +diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py +index e5b6fcc..28e1833 100644 +--- a/tests/unittests/test_datasource/test_openstack.py ++++ b/tests/unittests/test_datasource/test_openstack.py +@@ -232,7 +232,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): + None, + helpers.Paths({})) + self.assertIsNone(ds_os.version) +- found = ds_os.get_data(timeout=0.1, retries=0) ++ found = ds_os.get_data() + self.assertTrue(found) + self.assertEqual(2, ds_os.version) + md = dict(ds_os.metadata) +@@ -256,7 +256,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): + None, + helpers.Paths({})) + self.assertIsNone(ds_os.version) +- found = ds_os.get_data(timeout=0.1, retries=0) ++ found = ds_os.get_data() + self.assertFalse(found) + self.assertIsNone(ds_os.version) + +@@ -275,7 +275,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): + 'timeout': 0, + } + self.assertIsNone(ds_os.version) +- found = ds_os.get_data(timeout=0.1, retries=0) ++ found = ds_os.get_data() + self.assertFalse(found) + self.assertIsNone(ds_os.version) + +@@ -298,7 +298,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): + 'timeout': 0, + } + self.assertIsNone(ds_os.version) +- found = ds_os.get_data(timeout=0.1, retries=0) ++ found = ds_os.get_data() + self.assertFalse(found) + self.assertIsNone(ds_os.version) + diff --git a/SOURCES/0008-correct-errors-in-cloudinit-net-sysconfig.py.patch b/SOURCES/0008-correct-errors-in-cloudinit-net-sysconfig.py.patch new file mode 100644 index 0000000..d2f12ae --- /dev/null +++ b/SOURCES/0008-correct-errors-in-cloudinit-net-sysconfig.py.patch @@ -0,0 +1,34 @@ +From 0dab936fd1332749f905a20a44003f441eb76783 Mon Sep 17 00:00:00 2001 +From: Lars Kellogg-Stedman +Date: Thu, 16 Feb 2017 15:09:51 -0500 +Subject: [PATCH] correct errors in cloudinit/net/sysconfig.py + +There were some logic errors in sysconfig.py that appear to be the +result of accidentally typing "iface" where it should have been +"iface_cfg". This patch corrects those problems so that the module +can run successfully. + +LP: #1665441 +Resolves: rhbz#1389530 +(cherry picked from commit 07ef7f29c5ce9a97939eb32ca80fc1d8c2609216) +--- + cloudinit/net/sysconfig.py | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index 3c9f2d2..0b5f13c 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -282,10 +282,10 @@ class Renderer(renderer.Renderer): + cls._render_subnet(iface_cfg, route_cfg, iface_subnets[0]) + elif len(iface_subnets) > 1: + for i, iface_subnet in enumerate(iface_subnets, +- start=len(iface.children)): ++ start=len(iface_cfg.children)): + iface_sub_cfg = iface_cfg.copy() + iface_sub_cfg.name = "%s:%s" % (iface_name, i) +- iface.children.append(iface_sub_cfg) ++ iface_cfg.children.append(iface_sub_cfg) + cls._render_subnet(iface_sub_cfg, route_cfg, iface_subnet) + + @classmethod diff --git a/SOURCES/0009-net-do-not-raise-exception-for-3-nameservers.patch b/SOURCES/0009-net-do-not-raise-exception-for-3-nameservers.patch new file mode 100644 index 0000000..bcb4675 --- /dev/null +++ b/SOURCES/0009-net-do-not-raise-exception-for-3-nameservers.patch @@ -0,0 +1,59 @@ +From c4df9de80715ff76c39faea00d27acdb8a75aac7 Mon Sep 17 00:00:00 2001 +From: Lars Kellogg-Stedman +Date: Sat, 4 Mar 2017 17:07:16 -0500 +Subject: [PATCH] net: do not raise exception for > 3 nameservers + +log a warning rather than raising ValueError if we see more than three +nameserver addresses. + +LP: #1670052 +(cherry picked from commit 657fd40f9ee692a817ec4614cd0d6cb0539ffabf) +--- + cloudinit/distros/parsers/resolv_conf.py | 11 +++++++---- + tests/unittests/test_distros/test_resolv.py | 2 +- + 2 files changed, 8 insertions(+), 5 deletions(-) + +diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py +index ff6ee30..d1f8a04 100644 +--- a/cloudinit/distros/parsers/resolv_conf.py ++++ b/cloudinit/distros/parsers/resolv_conf.py +@@ -6,9 +6,11 @@ + + from six import StringIO + ++from cloudinit.distros.parsers import chop_comment ++from cloudinit import log as logging + from cloudinit import util + +-from cloudinit.distros.parsers import chop_comment ++LOG = logging.getLogger(__name__) + + + # See: man resolv.conf +@@ -79,9 +81,10 @@ class ResolvConf(object): + if len(new_ns) == len(current_ns): + return current_ns + if len(current_ns) >= 3: +- # Hard restriction on only 3 name servers +- raise ValueError(("Adding %r would go beyond the " +- "'3' maximum name servers") % (ns)) ++ LOG.warn("ignoring nameserver %r: adding would " ++ "exceed the maximum of " ++ "'3' name servers (see resolv.conf(5))" % (ns)) ++ return current_ns[:3] + self._remove_option('nameserver') + for n in new_ns: + self._contents.append(('option', ['nameserver', n, ''])) +diff --git a/tests/unittests/test_distros/test_resolv.py b/tests/unittests/test_distros/test_resolv.py +index 6b535a9..c9d0347 100644 +--- a/tests/unittests/test_distros/test_resolv.py ++++ b/tests/unittests/test_distros/test_resolv.py +@@ -46,7 +46,7 @@ class TestResolvHelper(TestCase): + self.assertNotIn('10.3', rp.nameservers) + self.assertEqual(len(rp.nameservers), 3) + rp.add_nameserver('10.2') +- self.assertRaises(ValueError, rp.add_nameserver, '10.3') ++ rp.add_nameserver('10.3') + self.assertNotIn('10.3', rp.nameservers) + + def test_search_domains(self): diff --git a/SOURCES/0010-net-support-both-ipv4-and-ipv6-gateways-in-sysconfig.patch b/SOURCES/0010-net-support-both-ipv4-and-ipv6-gateways-in-sysconfig.patch new file mode 100644 index 0000000..4e429f8 --- /dev/null +++ b/SOURCES/0010-net-support-both-ipv4-and-ipv6-gateways-in-sysconfig.patch @@ -0,0 +1,112 @@ +From 2d29c7af0c45186d3031c0ecd9ae0b8881a33c0b Mon Sep 17 00:00:00 2001 +From: Lars Kellogg-Stedman +Date: Thu, 2 Mar 2017 11:08:26 -0500 +Subject: [PATCH] net: support both ipv4 and ipv6 gateways in sysconfig. + +Previously, cloud-init would throw an exception if an interface had +both ipv4 and ipv6 addresses and a default gateway for each address +family. This change allows cloud-init to correctly configure +interfaces in this situation. + +LP: #1669504 +(cherry picked from commit 1d751a6f46f044e3c3827f3cef0e4a2e71d50fe7) +--- + cloudinit/net/sysconfig.py | 33 ++++++++++++++++++++++++--------- + 1 file changed, 24 insertions(+), 9 deletions(-) + +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index 0b5f13c..29c906f 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -87,7 +87,8 @@ class Route(ConfigMap): + def __init__(self, route_name, base_sysconf_dir): + super(Route, self).__init__() + self.last_idx = 1 +- self.has_set_default = False ++ self.has_set_default_ipv4 = False ++ self.has_set_default_ipv6 = False + self._route_name = route_name + self._base_sysconf_dir = base_sysconf_dir + +@@ -95,7 +96,8 @@ class Route(ConfigMap): + r = Route(self._route_name, self._base_sysconf_dir) + r._conf = self._conf.copy() + r.last_idx = self.last_idx +- r.has_set_default = self.has_set_default ++ r.has_set_default_ipv4 = self.has_set_default_ipv4 ++ r.has_set_default_ipv6 = self.has_set_default_ipv6 + return r + + @property +@@ -119,10 +121,10 @@ class NetInterface(ConfigMap): + super(NetInterface, self).__init__() + self.children = [] + self.routes = Route(iface_name, base_sysconf_dir) +- self._kind = kind ++ self.kind = kind ++ + self._iface_name = iface_name + self._conf['DEVICE'] = iface_name +- self._conf['TYPE'] = self.iface_types[kind] + self._base_sysconf_dir = base_sysconf_dir + + @property +@@ -140,6 +142,8 @@ class NetInterface(ConfigMap): + + @kind.setter + def kind(self, kind): ++ if kind not in self.iface_types: ++ raise ValueError(kind) + self._kind = kind + self._conf['TYPE'] = self.iface_types[kind] + +@@ -172,7 +176,7 @@ class Renderer(renderer.Renderer): + ('BOOTPROTO', 'none'), + ]) + +- # If these keys exist, then there values will be used to form ++ # If these keys exist, then their values will be used to form + # a BONDING_OPTS grouping; otherwise no grouping will be set. + bond_tpl_opts = tuple([ + ('bond_mode', "mode=%s"), +@@ -198,6 +202,7 @@ class Renderer(renderer.Renderer): + def _render_iface_shared(cls, iface, iface_cfg): + for k, v in cls.iface_defaults: + iface_cfg[k] = v ++ + for (old_key, new_key) in [('mac_address', 'HWADDR'), ('mtu', 'MTU')]: + old_value = iface.get(old_key) + if old_value is not None: +@@ -226,10 +231,20 @@ class Renderer(renderer.Renderer): + if 'netmask' in subnet: + iface_cfg['NETMASK'] = subnet['netmask'] + for route in subnet.get('routes', []): ++ if subnet.get('ipv6'): ++ gw_cfg = 'IPV6_DEFAULTGW' ++ else: ++ gw_cfg = 'GATEWAY' ++ + if _is_default_route(route): +- if route_cfg.has_set_default: +- raise ValueError("Duplicate declaration of default" +- " route found for interface '%s'" ++ if ( ++ (subnet.get('ipv4') and ++ route_cfg.has_set_default_ipv4) or ++ (subnet.get('ipv6') and ++ route_cfg.has_set_default_ipv6) ++ ): ++ raise ValueError("Duplicate declaration of default " ++ "route found for interface '%s'" + % (iface_cfg.name)) + # NOTE(harlowja): ipv6 and ipv4 default gateways + gw_key = 'GATEWAY0' +@@ -241,7 +256,7 @@ class Renderer(renderer.Renderer): + # also provided the default route? + iface_cfg['DEFROUTE'] = True + if 'gateway' in route: +- iface_cfg['GATEWAY'] = route['gateway'] ++ iface_cfg[gw_cfg] = route['gateway'] + route_cfg.has_set_default = True + else: + gw_key = 'GATEWAY%s' % route_cfg.last_idx diff --git a/SOURCES/0011-systemd-replace-generator-with-unit-conditionals.patch b/SOURCES/0011-systemd-replace-generator-with-unit-conditionals.patch new file mode 100644 index 0000000..e8cc403 --- /dev/null +++ b/SOURCES/0011-systemd-replace-generator-with-unit-conditionals.patch @@ -0,0 +1,258 @@ +From 326a466c0bdd89a161ba78b49e990c80ffacbb13 Mon Sep 17 00:00:00 2001 +From: Lars Kellogg-Stedman +Date: Mon, 1 May 2017 21:27:40 -0400 +Subject: [PATCH] systemd: replace generator with unit conditionals + +In order to avoid problems caused by changes to upstream unit files, +this patch completely separates our systemd units from those +distributed by upstream. The RHEL unit files can be found in the +rhel/systemd directory. + +This commit replaces the generator with Conditional* statements in the +unit files. You are still able to disable cloud-init by setting +cloud-init=disabled on the kernel command line or by touching +/etc/cloud/cloud-init.disable. + +We also retarget the cloud-init services from cloud-init.target back +to multi-user.target, which resolves the root cause of rhbz#1440831. + +Resolves: rhbz#1440831 +X-downstream-only: true +--- + rhel/systemd/cloud-config.service | 18 ++++++++++++ + rhel/systemd/cloud-config.target | 11 +++++++ + rhel/systemd/cloud-final.service | 19 ++++++++++++ + rhel/systemd/cloud-init-local.service | 26 +++++++++++++++++ + rhel/systemd/cloud-init.service | 25 ++++++++++++++++ + setup.py | 54 ----------------------------------- + 6 files changed, 99 insertions(+), 54 deletions(-) + create mode 100644 rhel/systemd/cloud-config.service + create mode 100644 rhel/systemd/cloud-config.target + create mode 100644 rhel/systemd/cloud-final.service + create mode 100644 rhel/systemd/cloud-init-local.service + create mode 100644 rhel/systemd/cloud-init.service + +diff --git a/rhel/systemd/cloud-config.service b/rhel/systemd/cloud-config.service +new file mode 100644 +index 0000000..12ca9df +--- /dev/null ++++ b/rhel/systemd/cloud-config.service +@@ -0,0 +1,18 @@ ++[Unit] ++Description=Apply the settings specified in cloud-config ++After=network-online.target cloud-config.target ++Wants=network-online.target cloud-config.target ++ConditionPathExists=!/etc/cloud/cloud-init.disabled ++ConditionKernelCommandLine=!cloud-init=disabled ++ ++[Service] ++Type=oneshot ++ExecStart=/usr/bin/cloud-init modules --mode=config ++RemainAfterExit=yes ++TimeoutSec=0 ++ ++# Output needs to appear in instance console output ++StandardOutput=journal+console ++ ++[Install] ++WantedBy=multi-user.target +diff --git a/rhel/systemd/cloud-config.target b/rhel/systemd/cloud-config.target +new file mode 100644 +index 0000000..ae9b7d0 +--- /dev/null ++++ b/rhel/systemd/cloud-config.target +@@ -0,0 +1,11 @@ ++# cloud-init normally emits a "cloud-config" upstart event to inform third ++# parties that cloud-config is available, which does us no good when we're ++# using systemd. cloud-config.target serves as this synchronization point ++# instead. Services that would "start on cloud-config" with upstart can ++# instead use "After=cloud-config.target" and "Wants=cloud-config.target" ++# as appropriate. ++ ++[Unit] ++Description=Cloud-config availability ++Wants=cloud-init-local.service cloud-init.service ++After=cloud-init-local.service cloud-init.service +diff --git a/rhel/systemd/cloud-final.service b/rhel/systemd/cloud-final.service +new file mode 100644 +index 0000000..32a83d8 +--- /dev/null ++++ b/rhel/systemd/cloud-final.service +@@ -0,0 +1,19 @@ ++[Unit] ++Description=Execute cloud user/final scripts ++After=network-online.target cloud-config.service rc-local.service ++Wants=network-online.target cloud-config.service ++ConditionPathExists=!/etc/cloud/cloud-init.disabled ++ConditionKernelCommandLine=!cloud-init=disabled ++ ++[Service] ++Type=oneshot ++ExecStart=/usr/bin/cloud-init modules --mode=final ++RemainAfterExit=yes ++TimeoutSec=0 ++KillMode=process ++ ++# Output needs to appear in instance console output ++StandardOutput=journal+console ++ ++[Install] ++WantedBy=multi-user.target +diff --git a/rhel/systemd/cloud-init-local.service b/rhel/systemd/cloud-init-local.service +new file mode 100644 +index 0000000..8174937 +--- /dev/null ++++ b/rhel/systemd/cloud-init-local.service +@@ -0,0 +1,26 @@ ++[Unit] ++Description=Initial cloud-init job (pre-networking) ++DefaultDependencies=no ++Wants=network-pre.target ++After=systemd-remount-fs.service ++Before=NetworkManager.service network.service ++Before=network-pre.target ++Before=shutdown.target ++Before=sysinit.target ++Conflicts=shutdown.target ++RequiresMountsFor=/var/lib/cloud ++ConditionPathExists=!/etc/cloud/cloud-init.disabled ++ConditionKernelCommandLine=!cloud-init=disabled ++ ++[Service] ++Type=oneshot ++ExecStart=/usr/bin/cloud-init init --local ++ExecStart=/bin/touch /run/cloud-init/network-config-ready ++RemainAfterExit=yes ++TimeoutSec=0 ++ ++# Output needs to appear in instance console output ++StandardOutput=journal+console ++ ++[Install] ++WantedBy=multi-user.target +diff --git a/rhel/systemd/cloud-init.service b/rhel/systemd/cloud-init.service +new file mode 100644 +index 0000000..68fc5f1 +--- /dev/null ++++ b/rhel/systemd/cloud-init.service +@@ -0,0 +1,25 @@ ++[Unit] ++Description=Initial cloud-init job (metadata service crawler) ++Wants=cloud-init-local.service ++Wants=sshd-keygen.service ++Wants=sshd.service ++After=cloud-init-local.service ++After=NetworkManager.service network.service ++Before=network-online.target ++Before=sshd-keygen.service ++Before=sshd.service ++Before=systemd-user-sessions.service ++ConditionPathExists=!/etc/cloud/cloud-init.disabled ++ConditionKernelCommandLine=!cloud-init=disabled ++ ++[Service] ++Type=oneshot ++ExecStart=/usr/bin/cloud-init init ++RemainAfterExit=yes ++TimeoutSec=0 ++ ++# Output needs to appear in instance console output ++StandardOutput=journal+console ++ ++[Install] ++WantedBy=multi-user.target +diff --git a/setup.py b/setup.py +index cc20c60..83723bf 100755 +--- a/setup.py ++++ b/setup.py +@@ -63,9 +63,6 @@ INITSYS_FILES = { + 'sysvinit_freebsd': [f for f in glob('sysvinit/freebsd/*') if is_f(f)], + 'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)], + 'sysvinit_openrc': [f for f in glob('sysvinit/gentoo/*') if is_f(f)], +- 'systemd': [f for f in (glob('systemd/*.service') + +- glob('systemd/*.target')) if is_f(f)], +- 'systemd.generators': [f for f in glob('systemd/*-generator') if is_f(f)], + 'upstart': [f for f in glob('upstart/*') if is_f(f)], + } + INITSYS_ROOTS = { +@@ -73,9 +70,6 @@ INITSYS_ROOTS = { + 'sysvinit_freebsd': '/usr/local/etc/rc.d', + 'sysvinit_deb': '/etc/init.d', + 'sysvinit_openrc': '/etc/init.d', +- 'systemd': pkg_config_read('systemd', 'systemdsystemunitdir'), +- 'systemd.generators': pkg_config_read('systemd', +- 'systemdsystemgeneratordir'), + 'upstart': '/etc/init/', + } + INITSYS_TYPES = sorted([f.partition(".")[0] for f in INITSYS_ROOTS.keys()]) +@@ -117,50 +111,8 @@ def read_requires(): + return str(deps).splitlines() + + +-# TODO: Is there a better way to do this?? +-class InitsysInstallData(install): +- init_system = None +- user_options = install.user_options + [ +- # This will magically show up in member variable 'init_sys' +- ('init-system=', None, +- ('init system(s) to configure (%s) [default: None]' % +- (", ".join(INITSYS_TYPES)))), +- ] +- +- def initialize_options(self): +- install.initialize_options(self) +- self.init_system = "" +- +- def finalize_options(self): +- install.finalize_options(self) +- +- if self.init_system and isinstance(self.init_system, str): +- self.init_system = self.init_system.split(",") +- +- if len(self.init_system) == 0: +- raise DistutilsArgError( +- ("You must specify one of (%s) when" +- " specifying init system(s)!") % (", ".join(INITSYS_TYPES))) +- +- bad = [f for f in self.init_system if f not in INITSYS_TYPES] +- if len(bad) != 0: +- raise DistutilsArgError( +- "Invalid --init-system: %s" % (','.join(bad))) +- +- for system in self.init_system: +- # add data files for anything that starts with '.' +- datakeys = [k for k in INITSYS_ROOTS +- if k.partition(".")[0] == system] +- for k in datakeys: +- self.distribution.data_files.append( +- (INITSYS_ROOTS[k], INITSYS_FILES[k])) +- # Force that command to reinitalize (with new file list) +- self.distribution.reinitialize_command('install_data', True) +- +- + if in_virtualenv(): + data_files = [] +- cmdclass = {} + else: + data_files = [ + (ETC + '/cloud', glob('config/*.cfg')), +@@ -176,11 +128,6 @@ else: + [f for f in glob('doc/examples/seed/*') if is_f(f)]), + ('/usr/lib/udev/rules.d', [f for f in glob('udev/*.rules')]), + ] +- # Use a subclass for install that handles +- # adding on the right init system configuration files +- cmdclass = { +- 'install': InitsysInstallData, +- } + + + requirements = read_requires() +@@ -198,7 +145,6 @@ setuptools.setup( + scripts=['tools/cloud-init-per'], + license='Dual-licensed under GPLv3 or Apache 2.0', + data_files=data_files, +- cmdclass=cmdclass, + entry_points={ + 'console_scripts': [ + 'cloud-init = cloudinit.cmd.main:main' diff --git a/SOURCES/0012-OpenStack-add-dvs-to-the-list-of-physical-link-types.patch b/SOURCES/0012-OpenStack-add-dvs-to-the-list-of-physical-link-types.patch new file mode 100644 index 0000000..43719d4 --- /dev/null +++ b/SOURCES/0012-OpenStack-add-dvs-to-the-list-of-physical-link-types.patch @@ -0,0 +1,27 @@ +From 615df299937ae99ecd571fec91dd2ad4651a6688 Mon Sep 17 00:00:00 2001 +From: Scott Moser +Date: Mon, 3 Apr 2017 10:04:43 -0400 +Subject: [PATCH] OpenStack: add 'dvs' to the list of physical link types. + +Links presented in network_data.json to the guest running on ESXi +are of type 'dvs'. + +LP: #1674946 +Resolves: rhbz#1442783 +(cherry picked from commit 61eb03fef92f435434d974fb46439189ef0b5f97) +--- + cloudinit/sources/helpers/openstack.py | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py +index 096062d..61cd36b 100644 +--- a/cloudinit/sources/helpers/openstack.py ++++ b/cloudinit/sources/helpers/openstack.py +@@ -52,6 +52,7 @@ OS_VERSIONS = ( + PHYSICAL_TYPES = ( + None, + 'bridge', ++ 'dvs', + 'ethernet', + 'hw_veb', + 'hyperv', diff --git a/SOURCES/0013-Bounce-network-interface-for-Azure-when-using-the-bu.patch b/SOURCES/0013-Bounce-network-interface-for-Azure-when-using-the-bu.patch new file mode 100644 index 0000000..7f08ba3 --- /dev/null +++ b/SOURCES/0013-Bounce-network-interface-for-Azure-when-using-the-bu.patch @@ -0,0 +1,143 @@ +From 9c2b33562da7e5ca3359a2e70b893d19c44eb66c Mon Sep 17 00:00:00 2001 +From: Brent Baude +Date: Mon, 20 Mar 2017 12:31:15 -0500 +Subject: [PATCH] Bounce network interface for Azure when using the built-in + path. + +When deploying on Azure and using only cloud-init, you must "bounce" the +network interface to trigger a DDNS update. This allows dhclient to +register the hostname with Azure so that DNS works correctly on their +private networks (i.e. between vm and vm). + +The agent path was already doing the bounce so this creates parity +between the built-in path and the agent. + +LP: #1674685 +Resolves: rhbz#1434109 +(cherry picked from commit 86715c88aab8561e1ddadac95671f6095d16f9e7) +--- + cloudinit/sources/DataSourceAzure.py | 78 +++++++++++++++++++++--------------- + cloudinit/sources/__init__.py | 2 +- + 2 files changed, 47 insertions(+), 33 deletions(-) + +diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py +index c5af8b8..48a3e1d 100644 +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -111,50 +111,62 @@ class DataSourceAzureNet(sources.DataSource): + root = sources.DataSource.__str__(self) + return "%s [seed=%s]" % (root, self.seed) + +- def get_metadata_from_agent(self): +- temp_hostname = self.metadata.get('local-hostname') ++ def bounce_network_with_azure_hostname(self): ++ # When using cloud-init to provision, we have to set the hostname from ++ # the metadata and "bounce" the network to force DDNS to update via ++ # dhclient ++ azure_hostname = self.metadata.get('local-hostname') ++ LOG.debug("Hostname in metadata is {}".format(azure_hostname)) + hostname_command = self.ds_cfg['hostname_bounce']['hostname_command'] +- agent_cmd = self.ds_cfg['agent_command'] +- LOG.debug("Getting metadata via agent. hostname=%s cmd=%s", +- temp_hostname, agent_cmd) +- with temporary_hostname(temp_hostname, self.ds_cfg, ++ ++ with temporary_hostname(azure_hostname, self.ds_cfg, + hostname_command=hostname_command) \ + as previous_hostname: + if (previous_hostname is not None and +- util.is_true(self.ds_cfg.get('set_hostname'))): ++ util.is_true(self.ds_cfg.get('set_hostname'))): + cfg = self.ds_cfg['hostname_bounce'] ++ ++ # "Bouncing" the network + try: +- perform_hostname_bounce(hostname=temp_hostname, ++ perform_hostname_bounce(hostname=azure_hostname, + cfg=cfg, + prev_hostname=previous_hostname) + except Exception as e: + LOG.warn("Failed publishing hostname: %s", e) + util.logexc(LOG, "handling set_hostname failed") + +- try: +- invoke_agent(agent_cmd) +- except util.ProcessExecutionError: +- # claim the datasource even if the command failed +- util.logexc(LOG, "agent command '%s' failed.", +- self.ds_cfg['agent_command']) +- +- ddir = self.ds_cfg['data_dir'] +- +- fp_files = [] +- key_value = None +- for pk in self.cfg.get('_pubkeys', []): +- if pk.get('value', None): +- key_value = pk['value'] +- LOG.debug("ssh authentication: using value from fabric") +- else: +- bname = str(pk['fingerprint'] + ".crt") +- fp_files += [os.path.join(ddir, bname)] +- LOG.debug("ssh authentication: " +- "using fingerprint from fabirc") +- +- missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", +- func=wait_for_files, +- args=(fp_files,)) ++ def get_metadata_from_agent(self): ++ temp_hostname = self.metadata.get('local-hostname') ++ agent_cmd = self.ds_cfg['agent_command'] ++ LOG.debug("Getting metadata via agent. hostname=%s cmd=%s", ++ temp_hostname, agent_cmd) ++ ++ self.bounce_network_with_azure_hostname() ++ ++ try: ++ invoke_agent(agent_cmd) ++ except util.ProcessExecutionError: ++ # claim the datasource even if the command failed ++ util.logexc(LOG, "agent command '%s' failed.", ++ self.ds_cfg['agent_command']) ++ ++ ddir = self.ds_cfg['data_dir'] ++ ++ fp_files = [] ++ key_value = None ++ for pk in self.cfg.get('_pubkeys', []): ++ if pk.get('value', None): ++ key_value = pk['value'] ++ LOG.debug("ssh authentication: using value from fabric") ++ else: ++ bname = str(pk['fingerprint'] + ".crt") ++ fp_files += [os.path.join(ddir, bname)] ++ LOG.debug("ssh authentication: " ++ "using fingerprint from fabirc") ++ ++ missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", ++ func=wait_for_files, ++ args=(fp_files,)) + if len(missing): + LOG.warn("Did not find files, but going on: %s", missing) + +@@ -220,6 +232,8 @@ class DataSourceAzureNet(sources.DataSource): + write_files(ddir, files, dirmode=0o700) + + if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN: ++ self.bounce_network_with_azure_hostname() ++ + metadata_func = partial(get_metadata_from_fabric, + fallback_lease_file=self. + dhclient_lease_file) +diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py +index 3d01072..1829450 100644 +--- a/cloudinit/sources/__init__.py ++++ b/cloudinit/sources/__init__.py +@@ -210,7 +210,7 @@ class DataSource(object): + else: + hostname = toks[0] + +- if fqdn: ++ if fqdn and domain != defdomain: + return "%s.%s" % (hostname, domain) + else: + return hostname diff --git a/SOURCES/0014-limit-permissions-on-def_log_file.patch b/SOURCES/0014-limit-permissions-on-def_log_file.patch new file mode 100644 index 0000000..defda9d --- /dev/null +++ b/SOURCES/0014-limit-permissions-on-def_log_file.patch @@ -0,0 +1,63 @@ +From 9855ef36cfbb148918cb223a997445d59f1dd4f7 Mon Sep 17 00:00:00 2001 +From: Lars Kellogg-Stedman +Date: Fri, 7 Apr 2017 18:50:54 -0400 +Subject: [PATCH] limit permissions on def_log_file + +This sets a default mode of 0600 on def_log_file, and makes this +configurable via the def_log_file_mode option in cloud.cfg. + +LP: #1541196 +Resolves: rhbz#1424612 +X-approved-upstream: true +--- + cloudinit/settings.py | 1 + + cloudinit/stages.py | 3 ++- + doc/examples/cloud-config.txt | 4 ++++ + 3 files changed, 7 insertions(+), 1 deletion(-) + +diff --git a/cloudinit/settings.py b/cloudinit/settings.py +index 6d31bb6..0d39aab 100644 +--- a/cloudinit/settings.py ++++ b/cloudinit/settings.py +@@ -36,6 +36,7 @@ CFG_BUILTIN = { + 'None', + ], + 'def_log_file': '/var/log/cloud-init.log', ++ 'def_log_file_mode': 0o600, + 'log_cfgs': [], + 'mount_default_fields': [None, None, 'auto', 'defaults,nofail', '0', '2'], + 'ssh_deletekeys': False, +diff --git a/cloudinit/stages.py b/cloudinit/stages.py +index b0552dd..bb20a6f 100644 +--- a/cloudinit/stages.py ++++ b/cloudinit/stages.py +@@ -145,8 +145,9 @@ class Init(object): + def _initialize_filesystem(self): + util.ensure_dirs(self._initial_subdirs()) + log_file = util.get_cfg_option_str(self.cfg, 'def_log_file') ++ log_file_mode = util.get_cfg_option_int(self.cfg, 'def_log_file_mode') + if log_file: +- util.ensure_file(log_file) ++ util.ensure_file(log_file, mode=log_file_mode) + perms = self.cfg.get('syslog_fix_perms') + if not perms: + perms = {} +diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt +index c5f84b1..8fcce73 100644 +--- a/doc/examples/cloud-config.txt ++++ b/doc/examples/cloud-config.txt +@@ -397,10 +397,14 @@ timezone: US/Eastern + # if syslog_fix_perms is a list, it will iterate through and use the + # first pair that does not raise error. + # ++# 'def_log_file' will be created with mode 'def_log_file_mode', which ++# is specified as a numeric value and defaults to 0600. ++# + # the default values are '/var/log/cloud-init.log' and 'syslog:adm' + # the value of 'def_log_file' should match what is configured in logging + # if either is empty, then no change of ownership will be done + def_log_file: /var/log/my-logging-file.log ++def_log_file_mode: 0600 + syslog_fix_perms: syslog:root + + # you can set passwords for a user or multiple users diff --git a/SOURCES/0015-remove-tee-command-from-logging-configuration.patch b/SOURCES/0015-remove-tee-command-from-logging-configuration.patch new file mode 100644 index 0000000..2618978 --- /dev/null +++ b/SOURCES/0015-remove-tee-command-from-logging-configuration.patch @@ -0,0 +1,29 @@ +From daa38854c690f4523d0665785180a5a62431eba7 Mon Sep 17 00:00:00 2001 +From: Lars Kellogg-Stedman +Date: Fri, 7 Apr 2017 19:09:13 -0400 +Subject: [PATCH] remove 'tee' command from logging configuration + +the default logging configuration passes all cloud-init output through +'tee' in order to generate /var/log/cloud-init-output.log. This is +redundant on modern systems, since stdout/stderr are captured by +systemd and available via journalctl. + +Resolves: rhbz#1424612 +X-downstream-only: true +--- + config/cloud.cfg.d/05_logging.cfg | 5 ----- + 1 file changed, 5 deletions(-) + +diff --git a/config/cloud.cfg.d/05_logging.cfg b/config/cloud.cfg.d/05_logging.cfg +index 937b07f..4be8866 100644 +--- a/config/cloud.cfg.d/05_logging.cfg ++++ b/config/cloud.cfg.d/05_logging.cfg +@@ -64,8 +64,3 @@ log_cfgs: + - [ *log_base, *log_file ] + # A file path can also be used. + # - /etc/log.conf +- +-# This tells cloud-init to redirect its stdout and stderr to +-# 'tee -a /var/log/cloud-init-output.log' so the user can see output +-# there without needing to look on the console. +-output: {all: '| tee -a /var/log/cloud-init-output.log'} diff --git a/SOURCES/0016-add-power-state-change-module-to-cloud_final_modules.patch b/SOURCES/0016-add-power-state-change-module-to-cloud_final_modules.patch new file mode 100644 index 0000000..6bd0f00 --- /dev/null +++ b/SOURCES/0016-add-power-state-change-module-to-cloud_final_modules.patch @@ -0,0 +1,23 @@ +From b48c5aede6fb5187b994a06462c79ff331888c81 Mon Sep 17 00:00:00 2001 +From: Lars Kellogg-Stedman +Date: Fri, 7 Apr 2017 00:40:22 -0400 +Subject: [PATCH] add power-state-change module to cloud_final_modules + +Resolves: rhbz#1252477 +X-downstream-only: true +--- + rhel/cloud.cfg | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg +index 986f241..8644872 100644 +--- a/rhel/cloud.cfg ++++ b/rhel/cloud.cfg +@@ -48,6 +48,7 @@ cloud_final_modules: + - keys-to-console + - phone-home + - final-message ++ - power-state-change + + system_info: + default_user: diff --git a/SOURCES/0017-sysconfig-Raise-ValueError-when-multiple-default-gat.patch b/SOURCES/0017-sysconfig-Raise-ValueError-when-multiple-default-gat.patch new file mode 100644 index 0000000..4cf526d --- /dev/null +++ b/SOURCES/0017-sysconfig-Raise-ValueError-when-multiple-default-gat.patch @@ -0,0 +1,143 @@ +From a298fd775b537839214802f161634215d2f827a8 Mon Sep 17 00:00:00 2001 +From: Chad Smith +Date: Tue, 9 May 2017 20:23:05 -0600 +Subject: [PATCH] sysconfig: Raise ValueError when multiple default gateways + are present. + +Fixed setting Route.has_set_default_ipv6 or *_ipv4 to track whether a +route already has a default gateway defined. The code was setting +Route.has_set_default which wasn't checked when raising "duplicate +gateway" ValueErrors. Added unit tests to exercise this expected raised +ValueError. Also moved is_ipv6 = subnet.get('ipv6') logic out of a for +loop because we don't need to recalculate the same value every route +iteration. + +LP: #1687485 +(cherry picked from commit dd03bb411c9a6f10854a3bbc3223b204c3d4d174) + +Resolves: rhbz#1438082 +--- + cloudinit/net/sysconfig.py | 14 ++++----- + tests/unittests/test_net.py | 76 +++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 83 insertions(+), 7 deletions(-) + +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index 29c906f..d521d5c 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -230,12 +230,8 @@ class Renderer(renderer.Renderer): + iface_cfg.name)) + if 'netmask' in subnet: + iface_cfg['NETMASK'] = subnet['netmask'] ++ is_ipv6 = subnet.get('ipv6') + for route in subnet.get('routes', []): +- if subnet.get('ipv6'): +- gw_cfg = 'IPV6_DEFAULTGW' +- else: +- gw_cfg = 'GATEWAY' +- + if _is_default_route(route): + if ( + (subnet.get('ipv4') and +@@ -256,8 +252,12 @@ class Renderer(renderer.Renderer): + # also provided the default route? + iface_cfg['DEFROUTE'] = True + if 'gateway' in route: +- iface_cfg[gw_cfg] = route['gateway'] +- route_cfg.has_set_default = True ++ if is_ipv6: ++ iface_cfg['IPV6_DEFAULTGW'] = route['gateway'] ++ route_cfg.has_set_default_ipv6 = True ++ else: ++ iface_cfg['GATEWAY'] = route['gateway'] ++ route_cfg.has_set_default_ipv4 = True + else: + gw_key = 'GATEWAY%s' % route_cfg.last_idx + nm_key = 'NETMASK%s' % route_cfg.last_idx +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index ce13664..262c6d5 100755 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -515,6 +515,82 @@ USERCTL=no + """.lstrip() + self.assertEqual(expected_content, content) + ++ def test_multiple_ipv4_default_gateways(self): ++ """ValueError is raised when duplicate ipv4 gateways exist.""" ++ net_json = { ++ "services": [{"type": "dns", "address": "172.19.0.12"}], ++ "networks": [{ ++ "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", ++ "type": "ipv4", "netmask": "255.255.252.0", ++ "link": "tap1a81968a-79", ++ "routes": [{ ++ "netmask": "0.0.0.0", ++ "network": "0.0.0.0", ++ "gateway": "172.19.3.254", ++ }, { ++ "netmask": "0.0.0.0", # A second default gateway ++ "network": "0.0.0.0", ++ "gateway": "172.20.3.254", ++ }], ++ "ip_address": "172.19.1.34", "id": "network0" ++ }], ++ "links": [ ++ { ++ "ethernet_mac_address": "fa:16:3e:ed:9a:59", ++ "mtu": None, "type": "bridge", "id": ++ "tap1a81968a-79", ++ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" ++ }, ++ ], ++ } ++ macs = {'fa:16:3e:ed:9a:59': 'eth0'} ++ render_dir = self.tmp_dir() ++ network_cfg = openstack.convert_net_json(net_json, known_macs=macs) ++ ns = network_state.parse_net_config_data(network_cfg, ++ skip_broken=False) ++ renderer = sysconfig.Renderer() ++ with self.assertRaises(ValueError): ++ renderer.render_network_state(ns, render_dir) ++ self.assertEqual([], os.listdir(render_dir)) ++ ++ def test_multiple_ipv6_default_gateways(self): ++ """ValueError is raised when duplicate ipv6 gateways exist.""" ++ net_json = { ++ "services": [{"type": "dns", "address": "172.19.0.12"}], ++ "networks": [{ ++ "network_id": "public-ipv6", ++ "type": "ipv6", "netmask": "", ++ "link": "tap1a81968a-79", ++ "routes": [{ ++ "gateway": "2001:DB8::1", ++ "netmask": "::", ++ "network": "::" ++ }, { ++ "gateway": "2001:DB9::1", ++ "netmask": "::", ++ "network": "::" ++ }], ++ "ip_address": "2001:DB8::10", "id": "network1" ++ }], ++ "links": [ ++ { ++ "ethernet_mac_address": "fa:16:3e:ed:9a:59", ++ "mtu": None, "type": "bridge", "id": ++ "tap1a81968a-79", ++ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" ++ }, ++ ], ++ } ++ macs = {'fa:16:3e:ed:9a:59': 'eth0'} ++ render_dir = self.tmp_dir() ++ network_cfg = openstack.convert_net_json(net_json, known_macs=macs) ++ ns = network_state.parse_net_config_data(network_cfg, ++ skip_broken=False) ++ renderer = sysconfig.Renderer() ++ with self.assertRaises(ValueError): ++ renderer.render_network_state(ns, render_dir) ++ self.assertEqual([], os.listdir(render_dir)) ++ + def test_openstack_rendering_samples(self): + tmp_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, tmp_dir) diff --git a/SOURCES/0018-Fix-dual-stack-IPv4-IPv6-configuration-for-RHEL.patch b/SOURCES/0018-Fix-dual-stack-IPv4-IPv6-configuration-for-RHEL.patch new file mode 100644 index 0000000..56830d4 --- /dev/null +++ b/SOURCES/0018-Fix-dual-stack-IPv4-IPv6-configuration-for-RHEL.patch @@ -0,0 +1,642 @@ +From 8741bba1983532e6aefa78f350fdae91b8b151a1 Mon Sep 17 00:00:00 2001 +From: Andreas Karis +Date: Fri, 21 Apr 2017 20:35:39 -0400 +Subject: [PATCH] Fix dual stack IPv4/IPv6 configuration for RHEL + +Dual stack IPv4/IPv6 configuration via config drive is broken for RHEL7. +This patch fixes several scenarios for IPv4/IPv6/dual stack with multiple IP assignment +Removes unpopular IPv4 alias files and invalid IPv6 alias files + +Also fixes associated unit tests + +LP: #1679817 +LP: #1685534 +LP: #1685532 + +Resolves: rhbz#1438082 +X-approved-upstream: true +--- + cloudinit/net/sysconfig.py | 244 ++++++++++++++++++------- + tests/unittests/test_distros/test_netconfig.py | 139 +++++++++++++- + tests/unittests/test_net.py | 144 ++++++++++++++- + 3 files changed, 455 insertions(+), 72 deletions(-) + +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index d521d5c..240ed23 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -58,6 +58,9 @@ class ConfigMap(object): + def __setitem__(self, key, value): + self._conf[key] = value + ++ def __getitem__(self, key): ++ return self._conf[key] ++ + def drop(self, key): + self._conf.pop(key, None) + +@@ -82,7 +85,8 @@ class ConfigMap(object): + class Route(ConfigMap): + """Represents a route configuration.""" + +- route_fn_tpl = '%(base)s/network-scripts/route-%(name)s' ++ route_fn_tpl_ipv4 = '%(base)s/network-scripts/route-%(name)s' ++ route_fn_tpl_ipv6 = '%(base)s/network-scripts/route6-%(name)s' + + def __init__(self, route_name, base_sysconf_dir): + super(Route, self).__init__() +@@ -101,9 +105,58 @@ class Route(ConfigMap): + return r + + @property +- def path(self): +- return self.route_fn_tpl % ({'base': self._base_sysconf_dir, +- 'name': self._route_name}) ++ def path_ipv4(self): ++ return self.route_fn_tpl_ipv4 % ({'base': self._base_sysconf_dir, ++ 'name': self._route_name}) ++ ++ @property ++ def path_ipv6(self): ++ return self.route_fn_tpl_ipv6 % ({'base': self._base_sysconf_dir, ++ 'name': self._route_name}) ++ ++ def is_ipv6_route(self, address): ++ return ':' in address ++ ++ def to_string(self, proto="ipv4"): ++ # only accept ipv4 and ipv6 ++ if proto not in ['ipv4', 'ipv6']: ++ raise ValueError("Unknown protocol '%s'" % (str(proto))) ++ buf = six.StringIO() ++ buf.write(_make_header()) ++ if self._conf: ++ buf.write("\n") ++ # need to reindex IPv4 addresses ++ # (because Route can contain a mix of IPv4 and IPv6) ++ reindex = -1 ++ for key in sorted(self._conf.keys()): ++ if 'ADDRESS' in key: ++ index = key.replace('ADDRESS', '') ++ address_value = str(self._conf[key]) ++ # only accept combinations: ++ # if proto ipv6 only display ipv6 routes ++ # if proto ipv4 only display ipv4 routes ++ # do not add ipv6 routes if proto is ipv4 ++ # do not add ipv4 routes if proto is ipv6 ++ # (this array will contain a mix of ipv4 and ipv6) ++ if proto == "ipv4" and not self.is_ipv6_route(address_value): ++ netmask_value = str(self._conf['NETMASK' + index]) ++ gateway_value = str(self._conf['GATEWAY' + index]) ++ # increase IPv4 index ++ reindex = reindex + 1 ++ buf.write("%s=%s\n" % ('ADDRESS' + str(reindex), ++ _quote_value(address_value))) ++ buf.write("%s=%s\n" % ('GATEWAY' + str(reindex), ++ _quote_value(gateway_value))) ++ buf.write("%s=%s\n" % ('NETMASK' + str(reindex), ++ _quote_value(netmask_value))) ++ elif proto == "ipv6" and self.is_ipv6_route(address_value): ++ netmask_value = str(self._conf['NETMASK' + index]) ++ gateway_value = str(self._conf['GATEWAY' + index]) ++ buf.write("%s/%s via %s\n" % (address_value, ++ netmask_value, ++ gateway_value)) ++ ++ return buf.getvalue() + + + class NetInterface(ConfigMap): +@@ -209,65 +262,119 @@ class Renderer(renderer.Renderer): + iface_cfg[new_key] = old_value + + @classmethod +- def _render_subnet(cls, iface_cfg, route_cfg, subnet): +- subnet_type = subnet.get('type') +- if subnet_type == 'dhcp6': +- iface_cfg['DHCPV6C'] = True +- iface_cfg['IPV6INIT'] = True +- iface_cfg['BOOTPROTO'] = 'dhcp' +- elif subnet_type in ['dhcp4', 'dhcp']: +- iface_cfg['BOOTPROTO'] = 'dhcp' +- elif subnet_type == 'static': +- iface_cfg['BOOTPROTO'] = 'static' +- if subnet.get('ipv6'): +- iface_cfg['IPV6ADDR'] = subnet['address'] ++ def _render_subnets(cls, iface_cfg, subnets): ++ # setting base values ++ iface_cfg['BOOTPROTO'] = 'none' ++ ++ # modifying base values according to subnets ++ for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): ++ subnet_type = subnet.get('type') ++ if subnet_type == 'dhcp6': + iface_cfg['IPV6INIT'] = True ++ iface_cfg['DHCPV6C'] = True ++ iface_cfg['BOOTPROTO'] = 'dhcp' ++ elif subnet_type in ['dhcp4', 'dhcp']: ++ iface_cfg['BOOTPROTO'] = 'dhcp' ++ elif subnet_type == 'static': ++ # grep BOOTPROTO sysconfig.txt -A2 | head -3 ++ # BOOTPROTO=none|bootp|dhcp ++ # 'bootp' or 'dhcp' cause a DHCP client ++ # to run on the device. Any other ++ # value causes any static configuration ++ # in the file to be applied. ++ # ==> the following should not be set to 'static' ++ # but should remain 'none' ++ # if iface_cfg['BOOTPROTO'] == 'none': ++ # iface_cfg['BOOTPROTO'] = 'static' ++ if subnet.get('ipv6'): ++ iface_cfg['IPV6INIT'] = True + else: +- iface_cfg['IPADDR'] = subnet['address'] +- else: +- raise ValueError("Unknown subnet type '%s' found" +- " for interface '%s'" % (subnet_type, +- iface_cfg.name)) +- if 'netmask' in subnet: +- iface_cfg['NETMASK'] = subnet['netmask'] +- is_ipv6 = subnet.get('ipv6') +- for route in subnet.get('routes', []): +- if _is_default_route(route): +- if ( +- (subnet.get('ipv4') and +- route_cfg.has_set_default_ipv4) or +- (subnet.get('ipv6') and +- route_cfg.has_set_default_ipv6) +- ): +- raise ValueError("Duplicate declaration of default " +- "route found for interface '%s'" +- % (iface_cfg.name)) +- # NOTE(harlowja): ipv6 and ipv4 default gateways +- gw_key = 'GATEWAY0' +- nm_key = 'NETMASK0' +- addr_key = 'ADDRESS0' +- # The owning interface provides the default route. +- # +- # TODO(harlowja): add validation that no other iface has +- # also provided the default route? +- iface_cfg['DEFROUTE'] = True +- if 'gateway' in route: +- if is_ipv6: +- iface_cfg['IPV6_DEFAULTGW'] = route['gateway'] +- route_cfg.has_set_default_ipv6 = True ++ raise ValueError("Unknown subnet type '%s' found" ++ " for interface '%s'" % (subnet_type, ++ iface_cfg.name)) ++ ++ # set IPv4 and IPv6 static addresses ++ ipv4_index = -1 ++ ipv6_index = -1 ++ for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): ++ subnet_type = subnet.get('type') ++ if subnet_type == 'dhcp6': ++ continue ++ elif subnet_type in ['dhcp4', 'dhcp']: ++ continue ++ elif subnet_type == 'static': ++ if subnet.get('ipv6'): ++ ipv6_index = ipv6_index + 1 ++ if 'netmask' in subnet and str(subnet['netmask']) != "": ++ ipv6_cidr = (subnet['address'] + ++ '/' + ++ str(subnet['netmask'])) + else: +- iface_cfg['GATEWAY'] = route['gateway'] +- route_cfg.has_set_default_ipv4 = True +- else: +- gw_key = 'GATEWAY%s' % route_cfg.last_idx +- nm_key = 'NETMASK%s' % route_cfg.last_idx +- addr_key = 'ADDRESS%s' % route_cfg.last_idx +- route_cfg.last_idx += 1 +- for (old_key, new_key) in [('gateway', gw_key), +- ('netmask', nm_key), +- ('network', addr_key)]: +- if old_key in route: +- route_cfg[new_key] = route[old_key] ++ ipv6_cidr = subnet['address'] ++ if ipv6_index == 0: ++ iface_cfg['IPV6ADDR'] = ipv6_cidr ++ elif ipv6_index == 1: ++ iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr ++ else: ++ iface_cfg['IPV6ADDR_SECONDARIES'] = ( ++ iface_cfg['IPV6ADDR_SECONDARIES'] + ++ " " + ipv6_cidr) ++ else: ++ ipv4_index = ipv4_index + 1 ++ if ipv4_index == 0: ++ iface_cfg['IPADDR'] = subnet['address'] ++ if 'netmask' in subnet: ++ iface_cfg['NETMASK'] = subnet['netmask'] ++ else: ++ iface_cfg['IPADDR' + str(ipv4_index)] = \ ++ subnet['address'] ++ if 'netmask' in subnet: ++ iface_cfg['NETMASK' + str(ipv4_index)] = \ ++ subnet['netmask'] ++ ++ @classmethod ++ def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets): ++ for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): ++ for route in subnet.get('routes', []): ++ is_ipv6 = subnet.get('ipv6') ++ ++ if _is_default_route(route): ++ if ( ++ (subnet.get('ipv4') and ++ route_cfg.has_set_default_ipv4) or ++ (subnet.get('ipv6') and ++ route_cfg.has_set_default_ipv6) ++ ): ++ raise ValueError("Duplicate declaration of default " ++ "route found for interface '%s'" ++ % (iface_cfg.name)) ++ # NOTE(harlowja): ipv6 and ipv4 default gateways ++ gw_key = 'GATEWAY0' ++ nm_key = 'NETMASK0' ++ addr_key = 'ADDRESS0' ++ # The owning interface provides the default route. ++ # ++ # TODO(harlowja): add validation that no other iface has ++ # also provided the default route? ++ iface_cfg['DEFROUTE'] = True ++ if 'gateway' in route: ++ if is_ipv6: ++ iface_cfg['IPV6_DEFAULTGW'] = route['gateway'] ++ route_cfg.has_set_default_ipv6 = True ++ else: ++ iface_cfg['GATEWAY'] = route['gateway'] ++ route_cfg.has_set_default_ipv4 = True ++ ++ else: ++ gw_key = 'GATEWAY%s' % route_cfg.last_idx ++ nm_key = 'NETMASK%s' % route_cfg.last_idx ++ addr_key = 'ADDRESS%s' % route_cfg.last_idx ++ route_cfg.last_idx += 1 ++ for (old_key, new_key) in [('gateway', gw_key), ++ ('netmask', nm_key), ++ ('network', addr_key)]: ++ if old_key in route: ++ route_cfg[new_key] = route[old_key] + + @classmethod + def _render_bonding_opts(cls, iface_cfg, iface): +@@ -293,15 +400,9 @@ class Renderer(renderer.Renderer): + iface_subnets = iface.get("subnets", []) + iface_cfg = iface_contents[iface_name] + route_cfg = iface_cfg.routes +- if len(iface_subnets) == 1: +- cls._render_subnet(iface_cfg, route_cfg, iface_subnets[0]) +- elif len(iface_subnets) > 1: +- for i, iface_subnet in enumerate(iface_subnets, +- start=len(iface_cfg.children)): +- iface_sub_cfg = iface_cfg.copy() +- iface_sub_cfg.name = "%s:%s" % (iface_name, i) +- iface_cfg.children.append(iface_sub_cfg) +- cls._render_subnet(iface_sub_cfg, route_cfg, iface_subnet) ++ ++ cls._render_subnets(iface_cfg, iface_subnets) ++ cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) + + @classmethod + def _render_bond_interfaces(cls, network_state, iface_contents): +@@ -383,7 +484,10 @@ class Renderer(renderer.Renderer): + if iface_cfg: + contents[iface_cfg.path] = iface_cfg.to_string() + if iface_cfg.routes: +- contents[iface_cfg.routes.path] = iface_cfg.routes.to_string() ++ contents[iface_cfg.routes.path_ipv4] = \ ++ iface_cfg.routes.to_string("ipv4") ++ contents[iface_cfg.routes.path_ipv6] = \ ++ iface_cfg.routes.to_string("ipv6") + return contents + + def render_network_state(self, target, network_state): +diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py +index bde3bb5..85982cf 100644 +--- a/tests/unittests/test_distros/test_netconfig.py ++++ b/tests/unittests/test_distros/test_netconfig.py +@@ -195,6 +195,76 @@ NETWORKING=yes + self.assertCfgEquals(expected_buf, str(write_buf)) + self.assertEqual(write_buf.mode, 0o644) + ++ def test_apply_network_config_rh(self): ++ rh_distro = self._get_distro('rhel') ++ ++ write_bufs = {} ++ ++ def replace_write(filename, content, mode=0o644, omode="wb"): ++ buf = WriteBuffer() ++ buf.mode = mode ++ buf.omode = omode ++ buf.write(content) ++ write_bufs[filename] = buf ++ ++ with ExitStack() as mocks: ++ # sysconfig availability checks ++ mocks.enter_context( ++ mock.patch.object(util, 'which', return_value=True)) ++ mocks.enter_context( ++ mock.patch.object(util, 'write_file', replace_write)) ++ mocks.enter_context( ++ mock.patch.object(util, 'load_file', return_value='')) ++ mocks.enter_context( ++ mock.patch.object(os.path, 'isfile', return_value=True)) ++ ++ rh_distro.apply_network_config(V1_NET_CFG, False) ++ ++ self.assertEqual(len(write_bufs), 5) ++ ++ # eth0 ++ self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', ++ write_bufs) ++ write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0'] ++ expected_buf = ''' ++# Created by cloud-init on instance boot automatically, do not edit. ++# ++BOOTPROTO=none ++DEVICE=eth0 ++IPADDR=192.168.1.5 ++NETMASK=255.255.255.0 ++ONBOOT=yes ++TYPE=Ethernet ++USERCTL=no ++''' ++ self.assertCfgEquals(expected_buf, str(write_buf)) ++ self.assertEqual(write_buf.mode, 0o644) ++ ++ # eth1 ++ self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', ++ write_bufs) ++ write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1'] ++ expected_buf = ''' ++# Created by cloud-init on instance boot automatically, do not edit. ++# ++BOOTPROTO=dhcp ++DEVICE=eth1 ++ONBOOT=yes ++TYPE=Ethernet ++USERCTL=no ++''' ++ self.assertCfgEquals(expected_buf, str(write_buf)) ++ self.assertEqual(write_buf.mode, 0o644) ++ ++ self.assertIn('/etc/sysconfig/network', write_bufs) ++ write_buf = write_bufs['/etc/sysconfig/network'] ++ expected_buf = ''' ++# Created by cloud-init v. 0.7 ++NETWORKING=yes ++''' ++ self.assertCfgEquals(expected_buf, str(write_buf)) ++ self.assertEqual(write_buf.mode, 0o644) ++ + def test_write_ipv6_rhel(self): + rh_distro = self._get_distro('rhel') + +@@ -214,7 +284,6 @@ NETWORKING=yes + mock.patch.object(util, 'load_file', return_value='')) + mocks.enter_context( + mock.patch.object(os.path, 'isfile', return_value=False)) +- + rh_distro.apply_network(BASE_NET_CFG_IPV6, False) + + self.assertEqual(len(write_bufs), 4) +@@ -274,6 +343,74 @@ IPV6_AUTOCONF=no + self.assertCfgEquals(expected_buf, str(write_buf)) + self.assertEqual(write_buf.mode, 0o644) + ++ def test_apply_network_config_ipv6_rh(self): ++ rh_distro = self._get_distro('rhel') ++ ++ write_bufs = {} ++ ++ def replace_write(filename, content, mode=0o644, omode="wb"): ++ buf = WriteBuffer() ++ buf.mode = mode ++ buf.omode = omode ++ buf.write(content) ++ write_bufs[filename] = buf ++ ++ with ExitStack() as mocks: ++ mocks.enter_context( ++ mock.patch.object(util, 'which', return_value=True)) ++ mocks.enter_context( ++ mock.patch.object(util, 'write_file', replace_write)) ++ mocks.enter_context( ++ mock.patch.object(util, 'load_file', return_value='')) ++ mocks.enter_context( ++ mock.patch.object(os.path, 'isfile', return_value=True)) ++ ++ rh_distro.apply_network_config(V1_NET_CFG_IPV6, False) ++ ++ self.assertEqual(len(write_bufs), 5) ++ ++ self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', ++ write_bufs) ++ write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0'] ++ expected_buf = ''' ++# Created by cloud-init on instance boot automatically, do not edit. ++# ++BOOTPROTO=none ++DEVICE=eth0 ++IPV6ADDR=2607:f0d0:1002:0011::2/64 ++IPV6INIT=yes ++ONBOOT=yes ++TYPE=Ethernet ++USERCTL=no ++''' ++ self.assertCfgEquals(expected_buf, str(write_buf)) ++ self.assertEqual(write_buf.mode, 0o644) ++ self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', ++ write_bufs) ++ write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1'] ++ expected_buf = ''' ++# Created by cloud-init on instance boot automatically, do not edit. ++# ++BOOTPROTO=dhcp ++DEVICE=eth1 ++ONBOOT=yes ++TYPE=Ethernet ++USERCTL=no ++''' ++ self.assertCfgEquals(expected_buf, str(write_buf)) ++ self.assertEqual(write_buf.mode, 0o644) ++ ++ self.assertIn('/etc/sysconfig/network', write_bufs) ++ write_buf = write_bufs['/etc/sysconfig/network'] ++ expected_buf = ''' ++# Created by cloud-init v. 0.7 ++NETWORKING=yes ++NETWORKING_IPV6=yes ++IPV6_AUTOCONF=no ++''' ++ self.assertCfgEquals(expected_buf, str(write_buf)) ++ self.assertEqual(write_buf.mode, 0o644) ++ + def test_simple_write_freebsd(self): + fbsd_distro = self._get_distro('freebsd') + +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index 262c6d5..172d604 100755 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -137,7 +137,7 @@ OS_SAMPLES = [ + """ + # Created by cloud-init on instance boot automatically, do not edit. + # +-BOOTPROTO=static ++BOOTPROTO=none + DEFROUTE=yes + DEVICE=eth0 + GATEWAY=172.19.3.254 +@@ -165,6 +165,148 @@ nameserver 172.19.0.12 + ('etc/udev/rules.d/70-persistent-net.rules', + "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))] ++ }, ++ { ++ 'in_data': { ++ "services": [{"type": "dns", "address": "172.19.0.12"}], ++ "networks": [{ ++ "network_id": "public-ipv4", ++ "type": "ipv4", "netmask": "255.255.252.0", ++ "link": "tap1a81968a-79", ++ "routes": [{ ++ "netmask": "0.0.0.0", ++ "network": "0.0.0.0", ++ "gateway": "172.19.3.254", ++ }], ++ "ip_address": "172.19.1.34", "id": "network0" ++ }, { ++ "network_id": "private-ipv4", ++ "type": "ipv4", "netmask": "255.255.255.0", ++ "link": "tap1a81968a-79", ++ "routes": [], ++ "ip_address": "10.0.0.10", "id": "network1" ++ }], ++ "links": [ ++ { ++ "ethernet_mac_address": "fa:16:3e:ed:9a:59", ++ "mtu": None, "type": "bridge", "id": ++ "tap1a81968a-79", ++ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" ++ }, ++ ], ++ }, ++ 'in_macs': { ++ 'fa:16:3e:ed:9a:59': 'eth0', ++ }, ++ 'out_sysconfig': [ ++ ('etc/sysconfig/network-scripts/ifcfg-eth0', ++ """ ++# Created by cloud-init on instance boot automatically, do not edit. ++# ++BOOTPROTO=none ++DEFROUTE=yes ++DEVICE=eth0 ++GATEWAY=172.19.3.254 ++HWADDR=fa:16:3e:ed:9a:59 ++IPADDR=172.19.1.34 ++IPADDR1=10.0.0.10 ++NETMASK=255.255.252.0 ++NETMASK1=255.255.255.0 ++ONBOOT=yes ++TYPE=Ethernet ++USERCTL=no ++""".lstrip()), ++ ('etc/resolv.conf', ++ """ ++; Created by cloud-init on instance boot automatically, do not edit. ++; ++nameserver 172.19.0.12 ++""".lstrip()), ++ ('etc/udev/rules.d/70-persistent-net.rules', ++ "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', ++ 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))] ++ }, ++ { ++ 'in_data': { ++ "services": [{"type": "dns", "address": "172.19.0.12"}], ++ "networks": [{ ++ "network_id": "public-ipv4", ++ "type": "ipv4", "netmask": "255.255.252.0", ++ "link": "tap1a81968a-79", ++ "routes": [{ ++ "netmask": "0.0.0.0", ++ "network": "0.0.0.0", ++ "gateway": "172.19.3.254", ++ }], ++ "ip_address": "172.19.1.34", "id": "network0" ++ }, { ++ "network_id": "public-ipv6-a", ++ "type": "ipv6", "netmask": "", ++ "link": "tap1a81968a-79", ++ "routes": [ ++ { ++ "gateway": "2001:DB8::1", ++ "netmask": "::", ++ "network": "::" ++ } ++ ], ++ "ip_address": "2001:DB8::10", "id": "network1" ++ }, { ++ "network_id": "public-ipv6-b", ++ "type": "ipv6", "netmask": "64", ++ "link": "tap1a81968a-79", ++ "routes": [ ++ ], ++ "ip_address": "2001:DB9::10", "id": "network2" ++ }, { ++ "network_id": "public-ipv6-c", ++ "type": "ipv6", "netmask": "64", ++ "link": "tap1a81968a-79", ++ "routes": [ ++ ], ++ "ip_address": "2001:DB10::10", "id": "network3" ++ }], ++ "links": [ ++ { ++ "ethernet_mac_address": "fa:16:3e:ed:9a:59", ++ "mtu": None, "type": "bridge", "id": ++ "tap1a81968a-79", ++ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" ++ }, ++ ], ++ }, ++ 'in_macs': { ++ 'fa:16:3e:ed:9a:59': 'eth0', ++ }, ++ 'out_sysconfig': [ ++ ('etc/sysconfig/network-scripts/ifcfg-eth0', ++ """ ++# Created by cloud-init on instance boot automatically, do not edit. ++# ++BOOTPROTO=none ++DEFROUTE=yes ++DEVICE=eth0 ++GATEWAY=172.19.3.254 ++HWADDR=fa:16:3e:ed:9a:59 ++IPADDR=172.19.1.34 ++IPV6ADDR=2001:DB8::10 ++IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" ++IPV6INIT=yes ++IPV6_DEFAULTGW=2001:DB8::1 ++NETMASK=255.255.252.0 ++ONBOOT=yes ++TYPE=Ethernet ++USERCTL=no ++""".lstrip()), ++ ('etc/resolv.conf', ++ """ ++; Created by cloud-init on instance boot automatically, do not edit. ++; ++nameserver 172.19.0.12 ++""".lstrip()), ++ ('etc/udev/rules.d/70-persistent-net.rules', ++ "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', ++ 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))] + } + ] + diff --git a/SOURCES/0019-Add-missing-sysconfig-unit-test-data.patch b/SOURCES/0019-Add-missing-sysconfig-unit-test-data.patch new file mode 100644 index 0000000..0ccf679 --- /dev/null +++ b/SOURCES/0019-Add-missing-sysconfig-unit-test-data.patch @@ -0,0 +1,66 @@ +From daa2bd7c780b91c81154c700aeea25755c53e0a6 Mon Sep 17 00:00:00 2001 +From: Ryan McCabe +Date: Thu, 18 May 2017 23:20:52 -0400 +Subject: [PATCH] Add missing sysconfig unit test data + +This includes portions of upstream commit: +ef18b8ac4cf7e3dfd98830fbdb298380a192a0fc + +Resolves: rhbz#1438082 +X-downstream-only: true +--- + cloudinit/net/sysconfig.py | 2 +- + tests/unittests/test_distros/test_netconfig.py | 27 ++++++++++++++++++++++++++ + 2 files changed, 28 insertions(+), 1 deletion(-) + +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index 240ed23..ef80d99 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -246,7 +246,7 @@ class Renderer(renderer.Renderer): + def __init__(self, config=None): + if not config: + config = {} +- self.sysconf_dir = config.get('sysconf_dir', 'etc/sysconfig/') ++ self.sysconf_dir = config.get('sysconf_dir', 'etc/sysconfig') + self.netrules_path = config.get( + 'netrules_path', 'etc/udev/rules.d/70-persistent-net.rules') + self.dns_path = config.get('dns_path', 'etc/resolv.conf') +diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py +index 85982cf..23ff570 100644 +--- a/tests/unittests/test_distros/test_netconfig.py ++++ b/tests/unittests/test_distros/test_netconfig.py +@@ -67,6 +67,33 @@ iface eth1 inet6 static + gateway 2607:f0d0:1002:0011::1 + ''' + ++V1_NET_CFG = {'config': [{'name': 'eth0', ++ ++ 'subnets': [{'address': '192.168.1.5', ++ 'broadcast': '192.168.1.0', ++ 'gateway': '192.168.1.254', ++ 'netmask': '255.255.255.0', ++ 'type': 'static'}], ++ 'type': 'physical'}, ++ {'name': 'eth1', ++ 'subnets': [{'control': 'auto', 'type': 'dhcp4'}], ++ 'type': 'physical'}], ++ 'version': 1} ++ ++V1_NET_CFG_IPV6 = {'config': [{'name': 'eth0', ++ 'subnets': [{'address': ++ '2607:f0d0:1002:0011::2', ++ 'gateway': ++ '2607:f0d0:1002:0011::1', ++ 'netmask': '64', ++ 'type': 'static'}], ++ 'type': 'physical'}, ++ {'name': 'eth1', ++ 'subnets': [{'control': 'auto', ++ 'type': 'dhcp4'}], ++ 'type': 'physical'}], ++ 'version': 1} ++ + + class WriteBuffer(object): + def __init__(self): diff --git a/SOURCES/0020-Fix-ipv6-subnet-detection.patch b/SOURCES/0020-Fix-ipv6-subnet-detection.patch new file mode 100644 index 0000000..2d1fe2b --- /dev/null +++ b/SOURCES/0020-Fix-ipv6-subnet-detection.patch @@ -0,0 +1,57 @@ +From bc2e1a64e64bddf04fd3e27c79432facbedca182 Mon Sep 17 00:00:00 2001 +From: Ryan McCabe +Date: Tue, 13 Jun 2017 11:45:07 -0400 +Subject: [PATCH] Fix ipv6 subnet detection + +Add better ipv6 subnet detection that was added as part of a +much larger patch set in upstream commit +ef18b8ac4cf7e3dfd98830fbdb298380a192a0fc + +Related: rhbz#1438082 +X-approved-upstream: true + +Signed-off-by: Ryan McCabe +--- + cloudinit/net/sysconfig.py | 15 +++++++++++++-- + 1 file changed, 13 insertions(+), 2 deletions(-) + +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index ef80d99..6a0dd43 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -11,6 +11,17 @@ from cloudinit import util + from . import renderer + + ++def _subnet_is_ipv6(subnet): ++ """Common helper for checking network_state subnets for ipv6.""" ++ # 'static6' or 'dhcp6' ++ if subnet['type'].endswith('6'): ++ # This is a request for DHCPv6. ++ return True ++ elif subnet['type'] == 'static' and ":" in subnet['address']: ++ return True ++ return False ++ ++ + def _make_header(sep='#'): + lines = [ + "Created by cloud-init on instance boot automatically, do not edit.", +@@ -286,7 +297,7 @@ class Renderer(renderer.Renderer): + # but should remain 'none' + # if iface_cfg['BOOTPROTO'] == 'none': + # iface_cfg['BOOTPROTO'] = 'static' +- if subnet.get('ipv6'): ++ if _subnet_is_ipv6(subnet): + iface_cfg['IPV6INIT'] = True + else: + raise ValueError("Unknown subnet type '%s' found" +@@ -303,7 +314,7 @@ class Renderer(renderer.Renderer): + elif subnet_type in ['dhcp4', 'dhcp']: + continue + elif subnet_type == 'static': +- if subnet.get('ipv6'): ++ if _subnet_is_ipv6(subnet): + ipv6_index = ipv6_index + 1 + if 'netmask' in subnet and str(subnet['netmask']) != "": + ipv6_cidr = (subnet['address'] + diff --git a/SOURCES/0022-RHEL-CentOS-Fix-default-routes-for-IPv4-IPv6-configu.patch b/SOURCES/0022-RHEL-CentOS-Fix-default-routes-for-IPv4-IPv6-configu.patch new file mode 100644 index 0000000..66bf585 --- /dev/null +++ b/SOURCES/0022-RHEL-CentOS-Fix-default-routes-for-IPv4-IPv6-configu.patch @@ -0,0 +1,61 @@ +From 9a7480d13ab3be53f0152a19c68d596490d5d290 Mon Sep 17 00:00:00 2001 +From: Andreas Karis +Date: Tue, 6 Jun 2017 12:55:50 -0400 +Subject: [PATCH] RHEL/CentOS: Fix default routes for IPv4/IPv6 configuration. + +Since f38fa413176, default routes get added to both ifcfg-* and route-* +and route6-* files. Default routes should only go to ifcfg-* files, +otherwise the information is redundant. + +LP: #1696176 +Resolves: rhbz#1438082 + +Signed-off-by: Ryan McCabe +(cherry picked from commit f99745cf916e707eaa1ded6f12e8b69837b7242d) +--- + cloudinit/net/sysconfig.py | 12 +++++++----- + tests/unittests/test_net.py | 8 -------- + 2 files changed, 7 insertions(+), 13 deletions(-) + +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index 6a0dd43..c67e6aa 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -381,11 +381,13 @@ class Renderer(renderer.Renderer): + nm_key = 'NETMASK%s' % route_cfg.last_idx + addr_key = 'ADDRESS%s' % route_cfg.last_idx + route_cfg.last_idx += 1 +- for (old_key, new_key) in [('gateway', gw_key), +- ('netmask', nm_key), +- ('network', addr_key)]: +- if old_key in route: +- route_cfg[new_key] = route[old_key] ++ # add default routes only to ifcfg files, not ++ # to route-* or route6-* ++ for (old_key, new_key) in [('gateway', gw_key), ++ ('netmask', nm_key), ++ ('network', addr_key)]: ++ if old_key in route: ++ route_cfg[new_key] = route[old_key] + + @classmethod + def _render_bonding_opts(cls, iface_cfg, iface): +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index 172d604..45ab732 100755 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -148,14 +148,6 @@ ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """.lstrip()), +- ('etc/sysconfig/network-scripts/route-eth0', +- """ +-# Created by cloud-init on instance boot automatically, do not edit. +-# +-ADDRESS0=0.0.0.0 +-GATEWAY0=172.19.3.254 +-NETMASK0=0.0.0.0 +-""".lstrip()), + ('etc/resolv.conf', + """ + ; Created by cloud-init on instance boot automatically, do not edit. diff --git a/SOURCES/0023-DatasourceEc2-add-warning-message-when-not-on-AWS.patch b/SOURCES/0023-DatasourceEc2-add-warning-message-when-not-on-AWS.patch new file mode 100644 index 0000000..89ae3c3 --- /dev/null +++ b/SOURCES/0023-DatasourceEc2-add-warning-message-when-not-on-AWS.patch @@ -0,0 +1,261 @@ +From 75ee377f902082f23de0feea190444e19a942420 Mon Sep 17 00:00:00 2001 +From: Scott Moser +Date: Thu, 23 Feb 2017 17:15:27 -0500 +Subject: [PATCH 2/5] DatasourceEc2: add warning message when not on AWS. + +Based on the setting Datasource/Ec2/strict_id, the datasource +will now warn once per instance. + +(cherry picked from commit 9bb55c6c45bcc5e310cf7e4d42cad53759dcca15) + +Resolves: rhbz#1496113 + +Signed-off-by: Ryan McCabe +--- + cloudinit/sources/DataSourceAliYun.py | 4 + + cloudinit/sources/DataSourceEc2.py | 178 +++++++++++++++++++++++++++++++++- + 2 files changed, 180 insertions(+), 2 deletions(-) + +diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py +index 2d00255c..9debe947 100644 +--- a/cloudinit/sources/DataSourceAliYun.py ++++ b/cloudinit/sources/DataSourceAliYun.py +@@ -22,6 +22,10 @@ class DataSourceAliYun(EC2.DataSourceEc2): + def get_public_ssh_keys(self): + return parse_public_keys(self.metadata.get('public-keys', {})) + ++ @property ++ def cloud_platform(self): ++ return EC2.Platforms.ALIYUN ++ + + def parse_public_keys(public_keys): + keys = [] +diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py +index c657fd09..26da263a 100644 +--- a/cloudinit/sources/DataSourceEc2.py ++++ b/cloudinit/sources/DataSourceEc2.py +@@ -9,6 +9,7 @@ + # This file is part of cloud-init. See LICENSE file for license information. + + import os ++import textwrap + import time + + from cloudinit import ec2_utils as ec2 +@@ -22,12 +23,23 @@ LOG = logging.getLogger(__name__) + # Which version we are requesting of the ec2 metadata apis + DEF_MD_VERSION = '2009-04-04' + ++STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") ++STRICT_ID_DEFAULT = "warn" ++ ++ ++class Platforms(object): ++ ALIYUN = "AliYun" ++ AWS = "AWS" ++ SEEDED = "Seeded" ++ UNKNOWN = "Unknown" ++ + + class DataSourceEc2(sources.DataSource): + # Default metadata urls that will be used if none are provided + # They will be checked for 'resolveability' and some of the + # following may be discarded if they do not resolve + metadata_urls = ["http://169.254.169.254", "http://instance-data.:8773"] ++ _cloud_platform = None + + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) +@@ -41,8 +53,18 @@ class DataSourceEc2(sources.DataSource): + self.userdata_raw = seed_ret['user-data'] + self.metadata = seed_ret['meta-data'] + LOG.debug("Using seeded ec2 data from %s", self.seed_dir) ++ self._cloud_platform = Platforms.SEEDED + return True + ++ strict_mode, _sleep = read_strict_mode( ++ util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH, ++ STRICT_ID_DEFAULT), ("warn", None)) ++ ++ LOG.debug("strict_mode: %s, cloud_platform=%s", ++ strict_mode, self.cloud_platform) ++ if strict_mode == "true" and self.cloud_platform == Platforms.UNKNOWN: ++ return False ++ + try: + if not self.wait_for_metadata_service(): + return False +@@ -51,8 +73,8 @@ class DataSourceEc2(sources.DataSource): + ec2.get_instance_userdata(self.api_ver, self.metadata_address) + self.metadata = ec2.get_instance_metadata(self.api_ver, + self.metadata_address) +- LOG.debug("Crawl of metadata service took %s seconds", +- int(time.time() - start_time)) ++ LOG.debug("Crawl of metadata service took %.3f seconds", ++ time.time() - start_time) + return True + except Exception: + util.logexc(LOG, "Failed reading from metadata address %s", +@@ -190,6 +212,158 @@ class DataSourceEc2(sources.DataSource): + return az[:-1] + return None + ++ @property ++ def cloud_platform(self): ++ if self._cloud_platform is None: ++ self._cloud_platform = identify_platform() ++ return self._cloud_platform ++ ++ def activate(self, cfg, is_new_instance): ++ if not is_new_instance: ++ return ++ if self.cloud_platform == Platforms.UNKNOWN: ++ warn_if_necessary( ++ util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT)) ++ ++ ++def read_strict_mode(cfgval, default): ++ try: ++ return parse_strict_mode(cfgval) ++ except ValueError as e: ++ LOG.warn(e) ++ return default ++ ++ ++def parse_strict_mode(cfgval): ++ # given a mode like: ++ # true, false, warn,[sleep] ++ # return tuple with string mode (true|false|warn) and sleep. ++ if cfgval is True: ++ return 'true', None ++ if cfgval is False: ++ return 'false', None ++ ++ if not cfgval: ++ return 'warn', 0 ++ ++ mode, _, sleep = cfgval.partition(",") ++ if mode not in ('true', 'false', 'warn'): ++ raise ValueError( ++ "Invalid mode '%s' in strict_id setting '%s': " ++ "Expected one of 'true', 'false', 'warn'." % (mode, cfgval)) ++ ++ if sleep: ++ try: ++ sleep = int(sleep) ++ except ValueError: ++ raise ValueError("Invalid sleep '%s' in strict_id setting '%s': " ++ "not an integer" % (sleep, cfgval)) ++ else: ++ sleep = None ++ ++ return mode, sleep ++ ++ ++def warn_if_necessary(cfgval): ++ try: ++ mode, sleep = parse_strict_mode(cfgval) ++ except ValueError as e: ++ LOG.warn(e) ++ return ++ ++ if mode == "false": ++ return ++ ++ show_warning(sleep) ++ ++ ++def show_warning(sleep): ++ message = textwrap.dedent(""" ++ **************************************************************** ++ # This system is using the EC2 Metadata Service, but does not # ++ # appear to be running on Amazon EC2 or one of cloud-init's # ++ # known platforms that provide a EC2 Metadata service. In the # ++ # future, cloud-init may stop reading metadata from the EC2 # ++ # Metadata Service unless the platform can be identified # ++ # # ++ # If you are seeing this message, please file a bug against # ++ # cloud-init at https://bugs.launchpad.net/cloud-init/+filebug # ++ # Make sure to include the cloud provider your instance is # ++ # running on. # ++ # # ++ # For more information see # ++ # https://bugs.launchpad.net/cloud-init/+bug/1660385 # ++ # # ++ # After you have filed a bug, you can disable this warning by # ++ # launching your instance with the cloud-config below, or # ++ # putting that content into # ++ # /etc/cloud/cloud.cfg.d/99-ec2-datasource.cfg # ++ # # ++ # #cloud-config # ++ # datasource: # ++ # Ec2: # ++ # strict_id: false # ++ # # ++ """) ++ closemsg = "" ++ if sleep: ++ closemsg = " [sleeping for %d seconds] " % sleep ++ message += closemsg.center(64, "*") ++ print(message) ++ LOG.warn(message) ++ if sleep: ++ time.sleep(sleep) ++ ++ ++def identify_aws(data): ++ # data is a dictionary returned by _collect_platform_data. ++ if (data['uuid'].startswith('ec2') and ++ (data['uuid_source'] == 'hypervisor' or ++ data['uuid'] == data['serial'])): ++ return Platforms.AWS ++ ++ return None ++ ++ ++def identify_platform(): ++ # identify the platform and return an entry in Platforms. ++ data = _collect_platform_data() ++ checks = (identify_aws, lambda x: Platforms.UNKNOWN) ++ for checker in checks: ++ try: ++ result = checker(data) ++ if result: ++ return result ++ except Exception as e: ++ LOG.warn("calling %s with %s raised exception: %s", ++ checker, data, e) ++ ++ ++def _collect_platform_data(): ++ # returns a dictionary with all lower case values: ++ # uuid: system-uuid from dmi or /sys/hypervisor ++ # uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' ++ # serial: dmi 'system-serial-number' (/sys/.../product_serial) ++ data = {} ++ try: ++ uuid = util.load_file("/sys/hypervisor/uuid").strip() ++ data['uuid_source'] = 'hypervisor' ++ except Exception: ++ uuid = util.read_dmi_data('system-uuid') ++ data['uuid_source'] = 'dmi' ++ ++ if uuid is None: ++ uuid = '' ++ data['uuid'] = uuid.lower() ++ ++ serial = util.read_dmi_data('system-serial-number') ++ if serial is None: ++ serial = '' ++ ++ data['serial'] = serial.lower() ++ ++ return data ++ + + # Used to match classes to dependencies + datasources = [ +-- +2.13.5 + diff --git a/SOURCES/0024-Identify-Brightbox-as-an-Ec2-datasource-user.patch b/SOURCES/0024-Identify-Brightbox-as-an-Ec2-datasource-user.patch new file mode 100644 index 0000000..148e7a1 --- /dev/null +++ b/SOURCES/0024-Identify-Brightbox-as-an-Ec2-datasource-user.patch @@ -0,0 +1,50 @@ +From 9044e39b1db9da242c244202ad649c5f8b05bc12 Mon Sep 17 00:00:00 2001 +From: Scott Moser +Date: Fri, 24 Feb 2017 14:19:20 -0500 +Subject: [PATCH 3/5] Identify Brightbox as an Ec2 datasource user. + +Brightbox will identify their platform to the guest by setting the +product serial to a string that ends with 'brightbox.com'. + +LP: #1661693 +(cherry picked from commit 5dd5b2cb539a84ed59f2b3181020d2bd18989718) + +Resolves: rhbz#1496113 + +Signed-off-by: Ryan McCabe +--- + cloudinit/sources/DataSourceEc2.py | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py +index 26da263a..c7df8060 100644 +--- a/cloudinit/sources/DataSourceEc2.py ++++ b/cloudinit/sources/DataSourceEc2.py +@@ -30,6 +30,7 @@ STRICT_ID_DEFAULT = "warn" + class Platforms(object): + ALIYUN = "AliYun" + AWS = "AWS" ++ BRIGHTBOX = "Brightbox" + SEEDED = "Seeded" + UNKNOWN = "Unknown" + +@@ -325,10 +326,15 @@ def identify_aws(data): + return None + + ++def identify_brightbox(data): ++ if data['serial'].endswith('brightbox.com'): ++ return Platforms.BRIGHTBOX ++ ++ + def identify_platform(): + # identify the platform and return an entry in Platforms. + data = _collect_platform_data() +- checks = (identify_aws, lambda x: Platforms.UNKNOWN) ++ checks = (identify_aws, identify_brightbox, lambda x: Platforms.UNKNOWN) + for checker in checks: + try: + result = checker(data) +-- +2.13.5 + diff --git a/SOURCES/0025-AliYun-Enable-platform-identification-and-enable-by-.patch b/SOURCES/0025-AliYun-Enable-platform-identification-and-enable-by-.patch new file mode 100644 index 0000000..ac01a97 --- /dev/null +++ b/SOURCES/0025-AliYun-Enable-platform-identification-and-enable-by-.patch @@ -0,0 +1,195 @@ +From a7727ecf117a2bc02f68405823796afe1d76d3e3 Mon Sep 17 00:00:00 2001 +From: Junjie Wang +Date: Fri, 21 Apr 2017 20:06:09 +0800 +Subject: [PATCH 4/5] AliYun: Enable platform identification and enable by + default. + +AliYun cloud platform is now identifying themselves by setting the dmi +product id to the well known value "Alibaba Cloud ECS". The changes here +identify that properly in tools/ds-identify and in the DataSourceAliYun. + +Since the 'get_data' for AliYun now identifies itself correctly, we can +enable AliYun by default. + +LP: #1638931 +(cherry picked from commit 4a60af54957634920e84a928aa22b4fc9a6dfd11) + +Resolves: rhbz#1496113 + +Signed-off-by: Ryan McCabe +--- + cloudinit/settings.py | 1 + + cloudinit/sources/DataSourceAliYun.py | 14 ++++++- + cloudinit/sources/DataSourceEc2.py | 7 ++++ + tests/unittests/test_datasource/test_aliyun.py | 51 +++++++++++++++++++++++++- + tests/unittests/test_datasource/test_common.py | 1 + + 5 files changed, 71 insertions(+), 3 deletions(-) + +diff --git a/cloudinit/settings.py b/cloudinit/settings.py +index 0d39aab7..d6046dc6 100644 +--- a/cloudinit/settings.py ++++ b/cloudinit/settings.py +@@ -27,6 +27,7 @@ CFG_BUILTIN = { + 'MAAS', + 'GCE', + 'OpenStack', ++ 'AliYun', + 'Ec2', + 'CloudSigma', + 'CloudStack', +diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py +index 9debe947..380e27cb 100644 +--- a/cloudinit/sources/DataSourceAliYun.py ++++ b/cloudinit/sources/DataSourceAliYun.py +@@ -4,8 +4,10 @@ import os + + from cloudinit import sources + from cloudinit.sources import DataSourceEc2 as EC2 ++from cloudinit import util + + DEF_MD_VERSION = "2016-01-01" ++ALIYUN_PRODUCT = "Alibaba Cloud ECS" + + + class DataSourceAliYun(EC2.DataSourceEc2): +@@ -24,7 +26,17 @@ class DataSourceAliYun(EC2.DataSourceEc2): + + @property + def cloud_platform(self): +- return EC2.Platforms.ALIYUN ++ if self._cloud_platform is None: ++ if _is_aliyun(): ++ self._cloud_platform = EC2.Platforms.ALIYUN ++ else: ++ self._cloud_platform = EC2.Platforms.NO_EC2_METADATA ++ ++ return self._cloud_platform ++ ++ ++def _is_aliyun(): ++ return util.read_dmi_data('system-product-name') == ALIYUN_PRODUCT + + + def parse_public_keys(public_keys): +diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py +index c7df8060..31825665 100644 +--- a/cloudinit/sources/DataSourceEc2.py ++++ b/cloudinit/sources/DataSourceEc2.py +@@ -32,7 +32,12 @@ class Platforms(object): + AWS = "AWS" + BRIGHTBOX = "Brightbox" + SEEDED = "Seeded" ++ # UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false', ++ # then an attempt at the Ec2 Metadata service will be made. + UNKNOWN = "Unknown" ++ # NO_EC2_METADATA indicates this platform does not have a Ec2 metadata ++ # service available. No attempt at the Ec2 Metadata service will be made. ++ NO_EC2_METADATA = "No-EC2-Metadata" + + + class DataSourceEc2(sources.DataSource): +@@ -65,6 +70,8 @@ class DataSourceEc2(sources.DataSource): + strict_mode, self.cloud_platform) + if strict_mode == "true" and self.cloud_platform == Platforms.UNKNOWN: + return False ++ elif self.cloud_platform == Platforms.NO_EC2_METADATA: ++ return False + + try: + if not self.wait_for_metadata_service(): +diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py +index c16d1a6e..990bff2c 100644 +--- a/tests/unittests/test_datasource/test_aliyun.py ++++ b/tests/unittests/test_datasource/test_aliyun.py +@@ -2,6 +2,7 @@ + + import functools + import httpretty ++import mock + import os + + from .. import helpers as test_helpers +@@ -111,15 +112,29 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): + self.assertEqual(self.default_metadata['hostname'], + self.ds.get_hostname()) + ++ @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") + @httpretty.activate +- def test_with_mock_server(self): ++ def test_with_mock_server(self, m_is_aliyun): ++ m_is_aliyun.return_value = True + self.regist_default_server() +- self.ds.get_data() ++ ret = self.ds.get_data() ++ self.assertEqual(True, ret) ++ self.assertEqual(1, m_is_aliyun.call_count) + self._test_get_data() + self._test_get_sshkey() + self._test_get_iid() + self._test_host_name() + ++ @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") ++ @httpretty.activate ++ def test_returns_false_when_not_on_aliyun(self, m_is_aliyun): ++ """If is_aliyun returns false, then get_data should return False.""" ++ m_is_aliyun.return_value = False ++ self.regist_default_server() ++ ret = self.ds.get_data() ++ self.assertEqual(1, m_is_aliyun.call_count) ++ self.assertEqual(False, ret) ++ + def test_parse_public_keys(self): + public_keys = {} + self.assertEqual(ay.parse_public_keys(public_keys), []) +@@ -149,4 +164,36 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): + self.assertEqual(ay.parse_public_keys(public_keys), + public_keys['key-pair-0']['openssh-key']) + ++ ++class TestIsAliYun(test_helpers.CiTestCase): ++ ALIYUN_PRODUCT = 'Alibaba Cloud ECS' ++ read_dmi_data_expected = [mock.call('system-product-name')] ++ ++ @mock.patch("cloudinit.sources.DataSourceAliYun.util.read_dmi_data") ++ def test_true_on_aliyun_product(self, m_read_dmi_data): ++ """Should return true if the dmi product data has expected value.""" ++ m_read_dmi_data.return_value = self.ALIYUN_PRODUCT ++ ret = ay._is_aliyun() ++ self.assertEqual(self.read_dmi_data_expected, ++ m_read_dmi_data.call_args_list) ++ self.assertEqual(True, ret) ++ ++ @mock.patch("cloudinit.sources.DataSourceAliYun.util.read_dmi_data") ++ def test_false_on_empty_string(self, m_read_dmi_data): ++ """Should return false on empty value returned.""" ++ m_read_dmi_data.return_value = "" ++ ret = ay._is_aliyun() ++ self.assertEqual(self.read_dmi_data_expected, ++ m_read_dmi_data.call_args_list) ++ self.assertEqual(False, ret) ++ ++ @mock.patch("cloudinit.sources.DataSourceAliYun.util.read_dmi_data") ++ def test_false_on_unknown_string(self, m_read_dmi_data): ++ """Should return false on an unrelated string.""" ++ m_read_dmi_data.return_value = "cubs win" ++ ret = ay._is_aliyun() ++ self.assertEqual(self.read_dmi_data_expected, ++ m_read_dmi_data.call_args_list) ++ self.assertEqual(False, ret) ++ + # vi: ts=4 expandtab +diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py +index c08717f3..7649b9ae 100644 +--- a/tests/unittests/test_datasource/test_common.py ++++ b/tests/unittests/test_datasource/test_common.py +@@ -36,6 +36,7 @@ DEFAULT_LOCAL = [ + ] + + DEFAULT_NETWORK = [ ++ AliYun.DataSourceAliYun, + AltCloud.DataSourceAltCloud, + Azure.DataSourceAzureNet, + Bigstep.DataSourceBigstep, +-- +2.13.5 + diff --git a/SOURCES/0026-Fix-alibaba-cloud-unit-tests-to-work-with-0.7.9.patch b/SOURCES/0026-Fix-alibaba-cloud-unit-tests-to-work-with-0.7.9.patch new file mode 100644 index 0000000..1755547 --- /dev/null +++ b/SOURCES/0026-Fix-alibaba-cloud-unit-tests-to-work-with-0.7.9.patch @@ -0,0 +1,29 @@ +From b87c46fe008dc4df50b0103d598d218f8dd26735 Mon Sep 17 00:00:00 2001 +From: Ryan McCabe +Date: Tue, 5 Sep 2017 13:02:00 -0400 +Subject: [PATCH 5/5] Fix alibaba cloud unit tests to work with 0.7.9 + +Resolves: rhbz#1496113 +X-downstream-only: Yes + +Signed-off-by: Ryan McCabe +--- + tests/unittests/test_datasource/test_aliyun.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py +index 990bff2c..90454c8d 100644 +--- a/tests/unittests/test_datasource/test_aliyun.py ++++ b/tests/unittests/test_datasource/test_aliyun.py +@@ -165,7 +165,7 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): + public_keys['key-pair-0']['openssh-key']) + + +-class TestIsAliYun(test_helpers.CiTestCase): ++class TestIsAliYun(test_helpers.HttprettyTestCase): + ALIYUN_PRODUCT = 'Alibaba Cloud ECS' + read_dmi_data_expected = [mock.call('system-product-name')] + +-- +2.13.5 + diff --git a/SOURCES/0027-systemd-create-run-cloud-init-enabled.patch b/SOURCES/0027-systemd-create-run-cloud-init-enabled.patch new file mode 100644 index 0000000..7b822e6 --- /dev/null +++ b/SOURCES/0027-systemd-create-run-cloud-init-enabled.patch @@ -0,0 +1,29 @@ +From e210660ecaee3b44a6e8b4e0fe39e4055450696e Mon Sep 17 00:00:00 2001 +From: Ryan McCabe +Date: Fri, 10 Nov 2017 10:03:44 -0500 +Subject: [PATCH] Create an explicit enabled file in /run/cloud-init/ to + control whether the dhclient and NM hooks run on Azure. + +X-downstream-only: Yes +Resolves: rhbz#1474226 +--- + rhel/systemd/cloud-init-local.service | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/rhel/systemd/cloud-init-local.service b/rhel/systemd/cloud-init-local.service +index 8174937b..047907c4 100644 +--- a/rhel/systemd/cloud-init-local.service ++++ b/rhel/systemd/cloud-init-local.service +@@ -14,6 +14,9 @@ ConditionKernelCommandLine=!cloud-init=disabled + + [Service] + Type=oneshot ++ExecStartPre=/bin/mkdir -p /run/cloud-init ++ExecStartPre=/sbin/restorecon /run/cloud-init ++ExecStartPre=/usr/bin/touch /run/cloud-init/enabled + ExecStart=/usr/bin/cloud-init init --local + ExecStart=/bin/touch /run/cloud-init/network-config-ready + RemainAfterExit=yes +-- +2.13.6 + diff --git a/SOURCES/cloud-init-README.rhel b/SOURCES/cloud-init-README.rhel new file mode 100644 index 0000000..aa29630 --- /dev/null +++ b/SOURCES/cloud-init-README.rhel @@ -0,0 +1,5 @@ +The following cloud-init modules are currently unsupported on this OS: + - apt_update_upgrade ('apt_update', 'apt_upgrade', 'apt_mirror', 'apt_preserve_sources_list', 'apt_old_mirror', 'apt_sources', 'debconf_selections', 'packages' options) + - byobu ('byobu_by_default' option) + - chef + - grub_dpkg diff --git a/SOURCES/cloud-init-rhel.cfg b/SOURCES/cloud-init-rhel.cfg new file mode 100644 index 0000000..986f241 --- /dev/null +++ b/SOURCES/cloud-init-rhel.cfg @@ -0,0 +1,66 @@ +users: + - default + +disable_root: 1 +ssh_pwauth: 0 + +mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] +resize_rootfs_tmp: /dev +ssh_deletekeys: 0 +ssh_genkeytypes: ~ +syslog_fix_perms: ~ + +cloud_init_modules: + - migrator + - bootcmd + - write-files + - growpart + - resizefs + - set_hostname + - update_hostname + - update_etc_hosts + - rsyslog + - users-groups + - ssh + +cloud_config_modules: + - mounts + - locale + - set-passwords + - rh_subscription + - yum-add-repo + - package-update-upgrade-install + - timezone + - puppet + - chef + - salt-minion + - mcollective + - disable-ec2-metadata + - runcmd + +cloud_final_modules: + - rightscale_userdata + - scripts-per-once + - scripts-per-boot + - scripts-per-instance + - scripts-user + - ssh-authkey-fingerprints + - keys-to-console + - phone-home + - final-message + +system_info: + default_user: + name: cloud-user + lock_passwd: true + gecos: Cloud User + groups: [wheel, adm, systemd-journal] + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + shell: /bin/bash + distro: rhel + paths: + cloud_dir: /var/lib/cloud + templates_dir: /etc/cloud/templates + ssh_svcname: sshd + +# vim:syntax=yaml diff --git a/SOURCES/cloud-init-tmpfiles.conf b/SOURCES/cloud-init-tmpfiles.conf new file mode 100644 index 0000000..0c6d2a3 --- /dev/null +++ b/SOURCES/cloud-init-tmpfiles.conf @@ -0,0 +1 @@ +d /run/cloud-init 0700 root root - - diff --git a/SPECS/cloud-init.spec b/SPECS/cloud-init.spec new file mode 100644 index 0000000..055443b --- /dev/null +++ b/SPECS/cloud-init.spec @@ -0,0 +1,263 @@ +%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} +%{!?license: %global license %%doc} + +# The only reason we are archful is because dmidecode is ExclusiveArch +# https://bugzilla.redhat.com/show_bug.cgi?id=1067089 +%global debug_package %{nil} + +Name: cloud-init +Version: 0.7.9 +Release: 9%{?dist}.2 +Summary: Cloud instance init scripts + +Group: System Environment/Base +License: GPLv3 +URL: http://launchpad.net/cloud-init +Source0: https://launchpad.net/cloud-init/trunk/%{version}/+download/%{name}-%{version}.tar.gz +Source1: cloud-init-rhel.cfg +Source2: cloud-init-README.rhel +Source3: cloud-init-tmpfiles.conf + +# The following line stops 'rdopkg update-patches' from inserting Patch +# directives in the middle of our Source directives. +# +# patches_base=0.7.9 +Patch0001: 0001-configuration-changes-for-RHEL-package.patch +Patch0002: 0002-do-not-use-git-to-determine-version.patch +Patch0003: 0003-util-teach-write_file-about-copy_mode-option.patch +Patch0004: 0004-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch +Patch0005: 0005-url_helper-fail-gracefully-if-oauthlib-is-not-availa.patch +Patch0006: 0006-rsyslog-replace-with-stop.patch +Patch0007: 0007-OpenStack-Use-timeout-and-retries-from-config-in-get.patch +Patch0008: 0008-correct-errors-in-cloudinit-net-sysconfig.py.patch +Patch0009: 0009-net-do-not-raise-exception-for-3-nameservers.patch +Patch0010: 0010-net-support-both-ipv4-and-ipv6-gateways-in-sysconfig.patch +Patch0011: 0011-systemd-replace-generator-with-unit-conditionals.patch +Patch0012: 0012-OpenStack-add-dvs-to-the-list-of-physical-link-types.patch +Patch0013: 0013-Bounce-network-interface-for-Azure-when-using-the-bu.patch +Patch0014: 0014-limit-permissions-on-def_log_file.patch +Patch0015: 0015-remove-tee-command-from-logging-configuration.patch +Patch0016: 0016-add-power-state-change-module-to-cloud_final_modules.patch +Patch0017: 0017-sysconfig-Raise-ValueError-when-multiple-default-gat.patch +Patch0018: 0018-Fix-dual-stack-IPv4-IPv6-configuration-for-RHEL.patch +Patch0019: 0019-Add-missing-sysconfig-unit-test-data.patch +Patch0020: 0020-Fix-ipv6-subnet-detection.patch +#Patch0021: 0021-azure-ensure-that-networkmanager-hook-script-runs.patch +Patch0022: 0022-RHEL-CentOS-Fix-default-routes-for-IPv4-IPv6-configu.patch +Patch0023: 0023-DatasourceEc2-add-warning-message-when-not-on-AWS.patch +Patch0024: 0024-Identify-Brightbox-as-an-Ec2-datasource-user.patch +Patch0025: 0025-AliYun-Enable-platform-identification-and-enable-by-.patch +Patch0026: 0026-Fix-alibaba-cloud-unit-tests-to-work-with-0.7.9.patch +Patch0027: 0027-systemd-create-run-cloud-init-enabled.patch + +# Deal with noarch -> arch +# https://bugzilla.redhat.com/show_bug.cgi?id=1067089 +Obsoletes: cloud-init < 0.7.5-3 + +BuildRequires: python-devel +BuildRequires: python-setuptools +BuildRequires: systemd-units +BuildRequires: git + +%ifarch %{?ix86} x86_64 ia64 +Requires: dmidecode +%endif +Requires: e2fsprogs +Requires: iproute +Requires: libselinux-python +Requires: net-tools +Requires: policycoreutils-python +Requires: procps +Requires: python-configobj +Requires: python-jinja2 +Requires: python-jsonpatch +Requires: python-prettytable +Requires: python-requests +Requires: python-setuptools +Requires: PyYAML +Requires: pyserial +Requires: shadow-utils +Requires(post): systemd-units +Requires(preun): systemd-units +Requires(postun): systemd-units + +%description +Cloud-init is a set of init scripts for cloud instances. Cloud instances +need special scripts to run during initialization to retrieve and install +ssh keys and to let the user run various scripts. + + +%prep +# on el7, autosetup -S git was failing with patches that +# create new files. rpm 4.11.3 and later has -S git_am, but +# el7 only has 4.11.1. +%autosetup -p1 -n %{name}-%{version} -S git + + +%build +%{__python} setup.py build + + +%install +rm -rf $RPM_BUILD_ROOT +%{__python} setup.py install -O1 --skip-build --root $RPM_BUILD_ROOT + +# Don't ship the tests +rm -r $RPM_BUILD_ROOT%{python_sitelib}/tests + +mkdir -p $RPM_BUILD_ROOT/var/lib/cloud + +# /run/cloud-init needs a tmpfiles.d entry +mkdir -p $RPM_BUILD_ROOT/run/cloud-init +mkdir -p $RPM_BUILD_ROOT/%{_tmpfilesdir} +cp -p rhel/cloud-init-tmpfiles.conf $RPM_BUILD_ROOT/%{_tmpfilesdir}/%{name}.conf + +# We supply our own config file since our software differs from Ubuntu's. +cp -p rhel/cloud.cfg $RPM_BUILD_ROOT/%{_sysconfdir}/cloud/cloud.cfg + +mkdir -p $RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d +cp -p tools/21-cloudinit.conf $RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d/21-cloudinit.conf + +# Make installed NetworkManager hook name less generic +mv $RPM_BUILD_ROOT/etc/NetworkManager/dispatcher.d/hook-network-manager \ + $RPM_BUILD_ROOT/etc/NetworkManager/dispatcher.d/cloud-init-azure-hook + +# Install our own systemd units (rhbz#1440831) +mkdir -p $RPM_BUILD_ROOT%{_unitdir} +cp rhel/systemd/* $RPM_BUILD_ROOT%{_unitdir}/ + + +%clean +rm -rf $RPM_BUILD_ROOT + + +%post +if [ $1 -eq 1 ] ; then + # Initial installation + # Enabled by default per "runs once then goes away" exception + /bin/systemctl enable cloud-config.service >/dev/null 2>&1 || : + /bin/systemctl enable cloud-final.service >/dev/null 2>&1 || : + /bin/systemctl enable cloud-init.service >/dev/null 2>&1 || : + /bin/systemctl enable cloud-init-local.service >/dev/null 2>&1 || : +fi + +%preun +if [ $1 -eq 0 ] ; then + # Package removal, not upgrade + /bin/systemctl --no-reload disable cloud-config.service >/dev/null 2>&1 || : + /bin/systemctl --no-reload disable cloud-final.service >/dev/null 2>&1 || : + /bin/systemctl --no-reload disable cloud-init.service >/dev/null 2>&1 || : + /bin/systemctl --no-reload disable cloud-init-local.service >/dev/null 2>&1 || : + # One-shot services -> no need to stop +fi + +%postun +/bin/systemctl daemon-reload >/dev/null 2>&1 || : +# One-shot services -> no need to restart + + +%files +%license LICENSE +%doc ChangeLog rhel/README.rhel +%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg +%dir %{_sysconfdir}/cloud/cloud.cfg.d +%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/*.cfg +%doc %{_sysconfdir}/cloud/cloud.cfg.d/README +%dir %{_sysconfdir}/cloud/templates +%config(noreplace) %{_sysconfdir}/cloud/templates/* +%{_unitdir}/cloud-config.service +%{_unitdir}/cloud-config.target +%{_unitdir}/cloud-final.service +%{_unitdir}/cloud-init-local.service +%{_unitdir}/cloud-init.service +%{_tmpfilesdir}/%{name}.conf +%{python_sitelib}/* +%{_libexecdir}/%{name} +%{_bindir}/cloud-init* +%doc %{_datadir}/doc/%{name} +%dir /run/cloud-init +%dir /var/lib/cloud +/etc/NetworkManager/dispatcher.d/cloud-init-azure-hook +%{_udevrulesdir}/66-azure-ephemeral.rules + +%dir %{_sysconfdir}/rsyslog.d +%config(noreplace) %{_sysconfdir}/rsyslog.d/21-cloudinit.conf + +%changelog +* Thu Dec 21 2017 Ryan McCabe 0.7.9-9.2 +- Prevent Azure NM and dhclient hooks from running when cloud-init is + disabled (rhbz#1530127) + +* Tue Sep 26 2017 Ryan McCabe 0.7.9-9.1 +- Support AliCloud datasource (rhbz#1496113) + +* Thu Jun 22 2017 Lars Kellogg-Stedman 0.7.9-9 +- RHEL/CentOS: Fix default routes for IPv4/IPv6 configuration. (rhbz#1438082) +- azure: ensure that networkmanager hook script runs (rhbz#1440831 rhbz#1460206) +- Fix ipv6 subnet detection (rhbz#1438082) + +* Tue May 23 2017 Lars Kellogg-Stedman 0.7.9-8 +- Update patches + +* Mon May 22 2017 Lars Kellogg-Stedman 0.7.9-7 +- Add missing sysconfig unit test data (rhbz#1438082) +- Fix dual stack IPv4/IPv6 configuration for RHEL (rhbz#1438082) +- sysconfig: Raise ValueError when multiple default gateways are present. (rhbz#1438082) +- Bounce network interface for Azure when using the built-in path. (rhbz#1434109) +- Do not write NM_CONTROLLED=no in generated interface config files (rhbz#1385172) + +* Wed May 10 2017 Lars Kellogg-Stedman 0.7.9-6 +- add power-state-change module to cloud_final_modules (rhbz#1252477) +- remove 'tee' command from logging configuration (rhbz#1424612) +- limit permissions on def_log_file (rhbz#1424612) +- Bounce network interface for Azure when using the built-in path. (rhbz#1434109) +- OpenStack: add 'dvs' to the list of physical link types. (rhbz#1442783) + +* Wed May 10 2017 Lars Kellogg-Stedman 0.7.9-5 +- systemd: replace generator with unit conditionals (rhbz#1440831) + +* Thu Apr 13 2017 Charalampos Stratakis 0.7.9-4 +- Import to RHEL 7 +Resolves: rhbz#1427280 + +* Tue Mar 07 2017 Lars Kellogg-Stedman 0.7.9-3 +- fixes for network config generation +- avoid dependency cycle at boot (rhbz#1420946) + +* Tue Jan 17 2017 Lars Kellogg-Stedman 0.7.9-2 +- use timeout from datasource config in openstack get_data (rhbz#1408589) + +* Thu Dec 01 2016 Lars Kellogg-Stedman - 0.7.9-1 +- Rebased on upstream 0.7.9. +- Remove dependency on run-parts + +* Wed Jan 06 2016 Lars Kellogg-Stedman - 0.7.6-8 +- make rh_subscription plugin do nothing in the absence of a valid + configuration [RH:1295953] +- move rh_subscription module to cloud_config stage + +* Wed Jan 06 2016 Lars Kellogg-Stedman - 0.7.6-7 +- correct permissions on /etc/ssh/sshd_config [RH:1296191] + +* Thu Sep 03 2015 Lars Kellogg-Stedman - 0.7.6-6 +- rebuild for ppc64le + +* Tue Jul 07 2015 Lars Kellogg-Stedman - 0.7.6-5 +- bump revision for new build + +* Tue Jul 07 2015 Lars Kellogg-Stedman - 0.7.6-4 +- ensure rh_subscription plugin is enabled by default + +* Wed Apr 29 2015 Lars Kellogg-Stedman - 0.7.6-3 +- added dependency on python-jinja2 [RH:1215913] +- added rhn_subscription plugin [RH:1227393] +- require pyserial to support smartos data source [RH:1226187] + +* Fri Jan 16 2015 Lars Kellogg-Stedman - 0.7.6-2 +- Rebased RHEL version to Fedora rawhide +- Backported fix for https://bugs.launchpad.net/cloud-init/+bug/1246485 +- Backported fix for https://bugs.launchpad.net/cloud-init/+bug/1411829 + +* Fri Nov 14 2014 Colin Walters - 0.7.6-1 +- New upstream version [RH:974327] +- Drop python-cheetah dependency (same as above bug)