diff --git a/.gitignore b/.gitignore index f7e3c78..b69af13 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,18 @@ -SOURCES/kdump-0c2bb28.tar.gz -SOURCES/network-3fc15de.tar.gz +SOURCES/ansible-sshd-e1de59b3c54e9d48a010eeca73755df339c7e628.tar.gz +SOURCES/auto-maintenance-4e47b3809a4e6c1dcd9af57fee117d6df0c261ad.tar.gz +SOURCES/certificate-0376ceece57882ade8ffaf431b7866aae3e7fed1.tar.gz +SOURCES/crypto_policies-2e2941c5545571fc8bc494099bdf970f498b9d38.tar.gz +SOURCES/ha_cluster-779bb78559de58bb5a1f25a4b92039c373ef59a4.tar.gz +SOURCES/kdump-77596fdd976c6160d6152c200a5432c609725a14.tar.gz +SOURCES/kernel_settings-4c81fd1380712ab0641b6837f092dd9caeeae0a6.tar.gz +SOURCES/logging-07e08107e7ccba5822f8a7aaec1a2ff0a221bede.tar.gz +SOURCES/metrics-e81b2650108727f38b1c856699aad26af0f44a46.tar.gz +SOURCES/nbde_client-19f06159582550c8463f7d8492669e26fbdf760b.tar.gz +SOURCES/nbde_server-4dfc5e2aca74cb82f2a50eec7e975a2b78ad9678.tar.gz +SOURCES/network-bda206d45c87ee8c1a5284de84f5acf5e629de97.tar.gz SOURCES/postfix-0.1.tar.gz -SOURCES/selinux-6cd1ec8.tar.gz -SOURCES/storage-1.1.0.tar.gz -SOURCES/timesync-1.0.2.tar.gz +SOURCES/selinux-1.1.1.tar.gz +SOURCES/ssh-21adc637511db86b5ba279a70a7301ef3a170669.tar.gz +SOURCES/storage-485de47b0dc0787aea077ba448ecb954f53e40c4.tar.gz +SOURCES/timesync-924650d0cd4117f73a7f0413ab745a8632bc5cec.tar.gz +SOURCES/tlog-1.1.0.tar.gz diff --git a/.rhel-system-roles.metadata b/.rhel-system-roles.metadata index 36d9271..0598dff 100644 --- a/.rhel-system-roles.metadata +++ b/.rhel-system-roles.metadata @@ -1,6 +1,18 @@ -36b200d1c6a8d1cb1ea87e3e9aa8c4f6bbd8155d SOURCES/kdump-0c2bb28.tar.gz -c6192d6b13aa644944fef28d9f90e81ff0e2f6dc SOURCES/network-3fc15de.tar.gz +77e952b62e634c69e36115845b4f24ee3bfe76b7 SOURCES/ansible-sshd-e1de59b3c54e9d48a010eeca73755df339c7e628.tar.gz +30f17b25c0971959762153c9509f0e5a25332d05 SOURCES/auto-maintenance-4e47b3809a4e6c1dcd9af57fee117d6df0c261ad.tar.gz +7017c00e2ceede1f6019ba17a56e0145e6012013 SOURCES/certificate-0376ceece57882ade8ffaf431b7866aae3e7fed1.tar.gz +469a1a39a19d346c10bf07071a7af52832885047 SOURCES/crypto_policies-2e2941c5545571fc8bc494099bdf970f498b9d38.tar.gz +838ed06d8d092271fff04bd5e7c16db4661e8567 SOURCES/ha_cluster-779bb78559de58bb5a1f25a4b92039c373ef59a4.tar.gz +fa3d5daf6cf1ceeaa87f58c16e11153cf250e2fa SOURCES/kdump-77596fdd976c6160d6152c200a5432c609725a14.tar.gz +471863c062a32a37a18c0ee1b7f0c50387baec99 SOURCES/kernel_settings-4c81fd1380712ab0641b6837f092dd9caeeae0a6.tar.gz +60efc730800600f87e386e16730980ea08417d34 SOURCES/logging-07e08107e7ccba5822f8a7aaec1a2ff0a221bede.tar.gz +821d8ebef2d30a41f0fa65bdc5e550f09b375370 SOURCES/metrics-e81b2650108727f38b1c856699aad26af0f44a46.tar.gz +66b84d088e2c3989f00b3151cc7fdc40f768f9a5 SOURCES/nbde_client-19f06159582550c8463f7d8492669e26fbdf760b.tar.gz +0e4e133b75e245d17c0c5a1097ab95f047ae6f65 SOURCES/nbde_server-4dfc5e2aca74cb82f2a50eec7e975a2b78ad9678.tar.gz +c2d1aaca43cbe787ee7b1e41e875a76b8f95831d SOURCES/network-bda206d45c87ee8c1a5284de84f5acf5e629de97.tar.gz 66c82331f4ac9598c506c3999965b4d07dbfe49d SOURCES/postfix-0.1.tar.gz -246383bd6823533ed3a51a0501b75e38ba852908 SOURCES/selinux-6cd1ec8.tar.gz -5cc010861c00ac6c3222dc0e93a216b4fea331da SOURCES/storage-1.1.0.tar.gz -2dc22970e0e660921acfbbdfab5282ca32d70947 SOURCES/timesync-1.0.2.tar.gz +f2ad38bd93487962de511b1f4bc9dc6607a5ab36 SOURCES/selinux-1.1.1.tar.gz +aef51c665e61166e091440862cfa4e6a8fe3c29d SOURCES/ssh-21adc637511db86b5ba279a70a7301ef3a170669.tar.gz +8b7d7c14e76aa1a872f22d5cd6d3c9a850868ed3 SOURCES/storage-485de47b0dc0787aea077ba448ecb954f53e40c4.tar.gz +ffd2a706e4e3007684aa9874c8457ad5c8920050 SOURCES/timesync-924650d0cd4117f73a7f0413ab745a8632bc5cec.tar.gz +486d7b845348755e7f189afd95f32bbe97c74661 SOURCES/tlog-1.1.0.tar.gz diff --git a/SOURCES/collection_readme.sh b/SOURCES/collection_readme.sh new file mode 100755 index 0000000..94e8cae --- /dev/null +++ b/SOURCES/collection_readme.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +set -euxo pipefail + +readme_md=${1:-"lsr_role2collection/collection_readme.md"} + +sed -i -e '/## Currently supported distributions/{:1;/## Dependencies/!{N;b 1};s|.*|## Dependencies|}' \ + -e 's/Linux/Red Hat Enterprise Linux/g' \ + -e 's/Ansible Galaxy/Automation Hub/g' \ + -e 's/fedora\(.\)linux_system_roles/redhat\1rhel_system_roles/g' \ + -e 's/linux-system-roles/rhel-system-roles/g' \ + -e '/## Documentation/{:a;/## Support/!{N;b a};s|.*|## Documentation\nThe official RHEL System Roles documentation can be found in the [Product Documentation section of the Red Hat Customer Portal](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/administration_and_configuration_tasks_using_system_roles_in_rhel/index).\n\n## Support|}' \ + -e 's/ $//' \ + $readme_md diff --git a/SOURCES/kdump-fix-newline.diff b/SOURCES/kdump-fix-newline.diff new file mode 100644 index 0000000..52a1a7e --- /dev/null +++ b/SOURCES/kdump-fix-newline.diff @@ -0,0 +1,28 @@ +commit cafd95d0b03360d12e86170eb10fc1fc3dcade06 +Author: Pavel Cahyna +Date: Thu Jan 14 11:42:48 2021 +0100 + + Get rid of the extra final newline in string + + Use the `-` chomping indicator to indicate that the trailing newline is + not intended as a part of the string. + https://yaml.org/spec/1.1/#chomping/ + + The trailing newline was causing an actual problem in the test. + + Also use the `>` folded style, which is more appropriate here than the + `|` literal style. + +diff --git a/tests/tests_ssh.yml b/tests/tests_ssh.yml +index 6d3699c..d3503f7 100644 +--- a/tests/tests_ssh.yml ++++ b/tests/tests_ssh.yml +@@ -27,7 +27,7 @@ + - include_role: + name: linux-system-roles.kdump + vars: +- kdump_ssh_user: | ++ kdump_ssh_user: >- + {{ hostvars[kdump_ssh_server_outside]['ansible_user_id'] }} + # This is the outside address. Ansible will connect to it to + # copy the ssh key. diff --git a/SOURCES/kdump-meta-el8.diff b/SOURCES/kdump-meta-el8.diff new file mode 100644 index 0000000..d8f2764 --- /dev/null +++ b/SOURCES/kdump-meta-el8.diff @@ -0,0 +1,13 @@ +diff --git a/meta/main.yml b/meta/main.yml +index 2478fa6..ad8f4c6 100644 +--- a/meta/main.yml ++++ b/meta/main.yml +@@ -7,6 +7,6 @@ galaxy_info: + min_ansible_version: 2.4 + platforms: + - name: Fedora +- versions: [ 27, 28 ] ++ versions: [ 31, 32 ] + - name: EL +- versions: [ 6, 7 ] ++ versions: [ 6, 7, 8 ] diff --git a/SOURCES/kdump-tier1-tags.diff b/SOURCES/kdump-tier1-tags.diff index e3c9fde..f80af83 100644 --- a/SOURCES/kdump-tier1-tags.diff +++ b/SOURCES/kdump-tier1-tags.diff @@ -45,30 +45,25 @@ index 0000000..2035dfc + with_items: "{{ restore_services }}" + tags: tests::cleanup diff --git a/tests/tests_default.yml b/tests/tests_default.yml -index 4c93830..60d7967 100644 +index af0b2a0..6ce5241 100644 --- a/tests/tests_default.yml +++ b/tests/tests_default.yml -@@ -1,6 +1,18 @@ - - - name: Ensure that the rule runs with default parameters -+ tags: -+ - 'tests::tier1' - hosts: all +@@ -3,3 +3,13 @@ roles: - - kdump + - linux-system-roles.kdump + + pre_tasks: + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: get_services_state.yml + + post_tasks: + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: restore_services_state.yml diff --git a/tests/tests_default_wrapper.yml b/tests/tests_default_wrapper.yml -index 2763fbd..617acb3 100644 +index eba31a0..857aab8 100644 --- a/tests/tests_default_wrapper.yml +++ b/tests/tests_default_wrapper.yml @@ -1,6 +1,9 @@ @@ -76,7 +71,7 @@ index 2763fbd..617acb3 100644 - name: Create static inventory from hostvars hosts: all + tags: -+ - 'tests::tier1' ++# - 'tests::tier1' + - 'tests::slow' tasks: - name: create temporary file @@ -86,15 +81,62 @@ index 2763fbd..617acb3 100644 - name: Run tests_default.yml normally + tags: -+ - 'tests::tier1' ++# - 'tests::tier1' + - 'tests::slow' import_playbook: tests_default.yml - name: Run tests_default.yml in check_mode hosts: all + tags: -+ - 'tests::tier1' ++# - 'tests::tier1' + - 'tests::slow' tasks: - name: Run ansible-playbook with tests_default.yml in check mode - command: ansible-playbook -vvv -i {{ tempinventory.path }} --check tests_default.yml + command: > +diff --git a/tests/tests_ssh.yml b/tests/tests_ssh.yml +index d12e884..6d3699c 100644 +--- a/tests/tests_ssh.yml ++++ b/tests/tests_ssh.yml +@@ -10,6 +10,13 @@ + # this is the address at which the ssh dump server can be reached + # from the managed host. Dumps will be uploaded there. + kdump_ssh_server_inside: "{{ kdump_ssh_source if kdump_ssh_source in hostvars[kdump_ssh_server_outside]['ansible_all_ipv4_addresses'] + hostvars[kdump_ssh_server_outside]['ansible_all_ipv6_addresses'] else hostvars[kdump_ssh_server_outside]['ansible_default_ipv4']['address'] }}" ++ tags: ++ # this test executes some tasks on localhost and relies on ++ # localhost being a different host than the managed host ++ # (localhost is being used as a second host in multihost ++ # scenario). This also means that localhost must be capable ++ # enough (not just a container - must be runnign a sshd). ++ - 'tests::multihost_localhost' + + tasks: + - name: gather facts from {{ kdump_ssh_server_outside }} +diff --git a/tests/tests_ssh_wrapper.yml b/tests/tests_ssh_wrapper.yml +index 2203f3f..96a764e 100644 +--- a/tests/tests_ssh_wrapper.yml ++++ b/tests/tests_ssh_wrapper.yml +@@ -1,6 +1,8 @@ + --- + - name: Create static inventory from hostvars + hosts: all ++ tags: ++ - 'tests::slow' + tasks: + - name: create temporary file + tempfile: +@@ -17,10 +19,15 @@ + + + - name: Run tests_ssh.yml normally ++ tags: ++ - 'tests::slow' + import_playbook: tests_ssh.yml + + - name: Run tests_ssh.yml in check_mode + hosts: all ++ tags: ++ - 'tests::slow' ++ - 'tests::multihost_localhost' + tasks: + - name: Run ansible-playbook with tests_ssh.yml in check mode + command: | diff --git a/SOURCES/metrics-mssql-x86.diff b/SOURCES/metrics-mssql-x86.diff new file mode 100644 index 0000000..80bb0e5 --- /dev/null +++ b/SOURCES/metrics-mssql-x86.diff @@ -0,0 +1,24 @@ +From 7ff86f2fa05998afcd8ae87d9cdd660ef5b6ee2c Mon Sep 17 00:00:00 2001 +From: Jan Kurik +Date: Thu, 18 Feb 2021 17:09:48 +1100 +Subject: [PATCH] Update mssql test to exclude non-x86_64 architectures + +pcp-pmda-mssql (and SQL Server itself) are x86_64-only. +--- + tests/tests_sanity_mssql.yml | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/tests/tests_sanity_mssql.yml b/tests/tests_sanity_mssql.yml +index 6f1e2cc..8602c36 100644 +--- a/tests/tests_sanity_mssql.yml ++++ b/tests/tests_sanity_mssql.yml +@@ -12,7 +12,8 @@ + - meta: end_host + when: (ansible_distribution in ['RedHat'] and + ( ansible_facts['distribution_version'] is version('8.4', '<'))) or +- ansible_distribution not in ['Fedora', 'RedHat'] ++ ansible_distribution not in ['Fedora', 'RedHat'] or ++ ansible_architecture not in ['x86_64'] + + - name: Save state of services + import_tasks: get_services_state.yml diff --git a/SOURCES/network-ansible-test.diff b/SOURCES/network-ansible-test.diff new file mode 100644 index 0000000..8f88e21 --- /dev/null +++ b/SOURCES/network-ansible-test.diff @@ -0,0 +1,835 @@ +From 7ae16e9ff5291f06ba0d7224a0d6c36b780ea0a2 Mon Sep 17 00:00:00 2001 +From: Rich Megginson +Date: Wed, 3 Mar 2021 11:37:56 -0700 +Subject: [PATCH] fix most ansible-test issues, suppress the rest + +Automation Hub, and possibly Galaxy in the future, require the +collection to be screened with `ansible-test sanity` among other +checks. The role had a number of issues: +* Use `AssertionError` instead of `assert` +* Use of `logging` module not in accordance with standards, but these + are ok and the errors were suppressed +* Several import errors which are ok because they are checked + elsewhere +* Many of the module files use `#!` shebang - not sure why, but + the usage is allowed +* __init__.py in the module_utils directories must be empty, so a + new file myerror.py was added to move the code from __init__.py +* The documentation block in the module was not properly constructed + or formatted. +* shellcheck issues, including removing unused files +* use `dummy` instead of `_` (underscore) for variables that are + unused + +add WARNING to module docs - collection users should not use directly + +Signed-off-by: Rich Megginson +(cherry picked from commit 7459a29e9104bf01987399153baf0a1c1df05929) +--- + .github/workflows/tox.yml | 4 +- + .sanity-ansible-ignore-2.9.txt | 47 ++++++++++ + README.md | 2 +- + library/network_connections.py | 88 ++++++++++++------- + module_utils/network_lsr/__init__.py | 7 -- + .../network_lsr/argument_validator.py | 9 +- + module_utils/network_lsr/ethtool.py | 6 +- + module_utils/network_lsr/myerror.py | 11 +++ + module_utils/network_lsr/nm/__init__.py | 4 + + .../network_lsr/nm/active_connection.py | 35 ++++---- + module_utils/network_lsr/nm/client.py | 4 + + module_utils/network_lsr/nm/connection.py | 18 ++-- + module_utils/network_lsr/nm/error.py | 4 + + module_utils/network_lsr/nm/provider.py | 8 +- + module_utils/network_lsr/nm_provider.py | 4 + + module_utils/network_lsr/utils.py | 10 ++- + tests/ensure_provider_tests.py | 8 +- + tests/get_coverage.sh | 6 +- + tests/get_total_coverage.sh | 2 +- + tests/integration/test_ethernet.py | 4 +- + tests/merge_coverage.sh | 3 + + tests/setup_module_utils.sh | 41 --------- + tox.ini | 3 - + 23 files changed, 199 insertions(+), 129 deletions(-) + create mode 100644 .sanity-ansible-ignore-2.9.txt + create mode 100644 module_utils/network_lsr/myerror.py + delete mode 100755 tests/setup_module_utils.sh + +diff --git a/.github/workflows/tox.yml b/.github/workflows/tox.yml +index 207bcba..ba0f4c6 100644 +--- a/.github/workflows/tox.yml ++++ b/.github/workflows/tox.yml +@@ -3,7 +3,7 @@ name: tox + on: # yamllint disable-line rule:truthy + - pull_request + env: +- TOX_LSR: "git+https://github.com/linux-system-roles/tox-lsr@2.2.0" ++ TOX_LSR: "git+https://github.com/linux-system-roles/tox-lsr@2.3.0" + LSR_ANSIBLES: 'ansible==2.8.* ansible==2.9.*' + LSR_MSCENARIOS: default + # LSR_EXTRA_PACKAGES: "libdbus-1-dev libgirepository1.0-dev python3-dev" +@@ -36,7 +36,7 @@ jobs: + toxenvs="py${toxpyver}" + case "$toxpyver" in + 27) toxenvs="${toxenvs},coveralls,flake8,pylint" ;; +- 36) toxenvs="${toxenvs},coveralls,black,yamllint,ansible-lint,collection" ;; ++ 36) toxenvs="${toxenvs},coveralls,black,yamllint,ansible-lint,collection,ansible-test" ;; + 37) toxenvs="${toxenvs},coveralls" ;; + 38) toxenvs="${toxenvs},coveralls" ;; + esac +diff --git a/.sanity-ansible-ignore-2.9.txt b/.sanity-ansible-ignore-2.9.txt +new file mode 100644 +index 0000000..439197e +--- /dev/null ++++ b/.sanity-ansible-ignore-2.9.txt +@@ -0,0 +1,47 @@ ++tests/network/ensure_provider_tests.py compile-2.7!skip ++tests/network/ensure_provider_tests.py compile-3.5!skip ++plugins/module_utils/network_lsr/nm/__init__.py empty-init!skip ++plugins/module_utils/network_lsr/nm/active_connection.py import-2.7!skip ++plugins/module_utils/network_lsr/nm/client.py import-2.7!skip ++plugins/module_utils/network_lsr/nm/connection.py import-2.7!skip ++plugins/module_utils/network_lsr/nm/provider.py import-2.7!skip ++plugins/module_utils/network_lsr/nm/active_connection.py import-3.5!skip ++plugins/module_utils/network_lsr/nm/client.py import-3.5!skip ++plugins/module_utils/network_lsr/nm/connection.py import-3.5!skip ++plugins/module_utils/network_lsr/nm/provider.py import-3.5!skip ++plugins/module_utils/network_lsr/nm/active_connection.py import-3.6!skip ++plugins/module_utils/network_lsr/nm/client.py import-3.6!skip ++plugins/module_utils/network_lsr/nm/connection.py import-3.6!skip ++plugins/module_utils/network_lsr/nm/provider.py import-3.6!skip ++plugins/module_utils/network_lsr/nm/active_connection.py import-3.7!skip ++plugins/module_utils/network_lsr/nm/client.py import-3.7!skip ++plugins/module_utils/network_lsr/nm/connection.py import-3.7!skip ++plugins/module_utils/network_lsr/nm/provider.py import-3.7!skip ++plugins/module_utils/network_lsr/nm/active_connection.py import-3.8!skip ++plugins/module_utils/network_lsr/nm/client.py import-3.8!skip ++plugins/module_utils/network_lsr/nm/connection.py import-3.8!skip ++plugins/module_utils/network_lsr/nm/provider.py import-3.8!skip ++plugins/module_utils/network_lsr/__init__.py shebang!skip ++plugins/module_utils/network_lsr/argument_validator.py shebang!skip ++plugins/module_utils/network_lsr/utils.py shebang!skip ++plugins/module_utils/network_lsr/myerror.py shebang!skip ++tests/network/covstats shebang!skip ++tests/network/ensure_provider_tests.py shebang!skip ++tests/network/get_coverage.sh shebang!skip ++tests/network/get_total_coverage.sh shebang!skip ++tests/network/merge_coverage.sh shebang!skip ++tests/network/ensure_provider_tests.py future-import-boilerplate!skip ++tests/network/integration/conftest.py future-import-boilerplate!skip ++tests/network/integration/test_ethernet.py future-import-boilerplate!skip ++tests/network/unit/test_network_connections.py future-import-boilerplate!skip ++tests/network/unit/test_nm_provider.py future-import-boilerplate!skip ++tests/network/ensure_provider_tests.py metaclass-boilerplate!skip ++tests/network/integration/conftest.py metaclass-boilerplate!skip ++tests/network/integration/test_ethernet.py metaclass-boilerplate!skip ++tests/network/unit/test_network_connections.py metaclass-boilerplate!skip ++tests/network/unit/test_nm_provider.py metaclass-boilerplate!skip ++plugins/modules/network_connections.py validate-modules:missing-examples ++plugins/modules/network_connections.py validate-modules:missing-gplv3-license ++plugins/modules/network_connections.py validate-modules:no-default-for-required-parameter ++plugins/modules/network_connections.py validate-modules:parameter-type-not-in-doc ++plugins/modules/network_connections.py validate-modules:undocumented-parameter +diff --git a/README.md b/README.md +index c1462b6..c257c08 100644 +--- a/README.md ++++ b/README.md +@@ -145,7 +145,7 @@ a consequence, `state: up` always changes the system. + + You can deactivate a connection profile, even if is currently not active. As a consequence, `state: down` always changes the system. + +-Note that if the `state` option is unset, the connection profile’s runtime state will not be changed. ++Note that if the `state` option is unset, the connection profile's runtime state will not be changed. + + + ### `persistent_state` +diff --git a/library/network_connections.py b/library/network_connections.py +index 3224892..3a6e47f 100644 +--- a/library/network_connections.py ++++ b/library/network_connections.py +@@ -2,6 +2,30 @@ + # -*- coding: utf-8 -*- + # SPDX-License-Identifier: BSD-3-Clause + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ ++DOCUMENTATION = """ ++--- ++module: network_connections ++author: Thomas Haller (@thom311) ++short_description: module for network role to manage connection profiles ++requirements: [pygobject, dbus, NetworkManager] ++version_added: "2.0" ++description: ++ - "WARNING: Do not use this module directly! It is only for role internal use." ++ - | ++ Manage networking profiles (connections) for NetworkManager and ++ initscripts networking providers. Documentation needs to be written. Note ++ that the network_connections module tightly integrates with the network ++ role and currently it is not expected to use this module outside the role. ++ Thus, consult README.md for examples for the role. The requirements are ++ only for the NetworkManager (nm) provider. ++options: {} ++""" ++ ++ + import errno + import functools + import os +@@ -16,7 +40,7 @@ import logging + # pylint: disable=import-error, no-name-in-module + from ansible.module_utils.basic import AnsibleModule + from ansible.module_utils.network_lsr import ethtool # noqa:E501 +-from ansible.module_utils.network_lsr import MyError # noqa:E501 ++from ansible.module_utils.network_lsr.myerror import MyError # noqa:E501 + + from ansible.module_utils.network_lsr.argument_validator import ( # noqa:E501 + ArgUtil, +@@ -30,22 +54,6 @@ from ansible.module_utils.network_lsr import nm_provider # noqa:E501 + # pylint: enable=import-error, no-name-in-module + + +-DOCUMENTATION = """ +---- +-module: network_connections +-author: "Thomas Haller (thaller@redhat.com)" +-short_description: module for network role to manage connection profiles +-requirements: for 'nm' provider requires pygobject, dbus and NetworkManager. +-version_added: "2.0" +-description: Manage networking profiles (connections) for NetworkManager and +- initscripts networking providers. +-options: Documentation needs to be written. Note that the network_connections +- module tightly integrates with the network role and currently it is not +- expected to use this module outside the role. Thus, consult README.md for +- examples for the role. +-""" +- +- + ############################################################################### + PERSISTENT_STATE = "persistent_state" + ABSENT_STATE = "absent" +@@ -772,7 +780,7 @@ class NMUtil: + if compare_flags is None: + compare_flags = NM.SettingCompareFlags.IGNORE_TIMESTAMP + +- return not (not (con_a.compare(con_b, compare_flags))) ++ return con_a.compare(con_b, compare_flags) + + def connection_is_active(self, con): + NM = Util.NM() +@@ -1390,7 +1398,7 @@ class RunEnvironment(object): + def check_mode_set(self, check_mode, connections=None): + c = self._check_mode + self._check_mode = check_mode +- assert ( ++ if not ( + (c is None and check_mode in [CheckMode.PREPARE]) + or ( + c == CheckMode.PREPARE +@@ -1399,7 +1407,8 @@ class RunEnvironment(object): + or (c == CheckMode.PRE_RUN and check_mode in [CheckMode.REAL_RUN]) + or (c == CheckMode.REAL_RUN and check_mode in [CheckMode.DONE]) + or (c == CheckMode.DRY_RUN and check_mode in [CheckMode.DONE]) +- ) ++ ): ++ raise AssertionError("check_mode value is incorrect {0}".format(c)) + self._check_mode_changed(c, check_mode, connections) + + +@@ -1461,7 +1470,8 @@ class RunEnvironmentAnsible(RunEnvironment): + warn_traceback=False, + force_fail=False, + ): +- assert idx >= -1 ++ if not idx >= -1: ++ raise AssertionError("idx {0} is less than -1".format(idx)) + self._log_idx += 1 + self.run_results[idx]["log"].append((severity, msg, self._log_idx)) + if severity == LogLevel.ERROR: +@@ -1598,14 +1608,15 @@ class Cmd(object): + def connections_data(self): + c = self._connections_data + if c is None: +- assert self.check_mode in [ ++ if self.check_mode not in [ + CheckMode.DRY_RUN, + CheckMode.PRE_RUN, + CheckMode.REAL_RUN, +- ] +- c = [] +- for _ in range(0, len(self.connections)): +- c.append({"changed": False}) ++ ]: ++ raise AssertionError( ++ "invalid value {0} for self.check_mode".format(self.check_mode) ++ ) ++ c = [{"changed": False}] * len(self.connections) + self._connections_data = c + return c + +@@ -1614,11 +1625,14 @@ class Cmd(object): + c["changed"] = False + + def connections_data_set_changed(self, idx, changed=True): +- assert self._check_mode in [ ++ if self._check_mode not in [ + CheckMode.PRE_RUN, + CheckMode.DRY_RUN, + CheckMode.REAL_RUN, +- ] ++ ]: ++ raise AssertionError( ++ "invalid value {0} for self._check_mode".format(self._check_mode) ++ ) + if not changed: + return + self.connections_data[idx]["changed"] = changed +@@ -1688,7 +1702,10 @@ class Cmd(object): + # modify the connection. + + con = self.connections[idx] +- assert con["state"] in ["up", "down"] ++ if con["state"] not in ["up", "down"]: ++ raise AssertionError( ++ "connection state {0} not 'up' or 'down'".format(con["state"]) ++ ) + + # also check, if the current profile is 'up' with a 'type' (which + # possibly modifies the connection as well) +@@ -1736,7 +1753,9 @@ class Cmd(object): + elif self._check_mode != CheckMode.DONE: + c = CheckMode.DONE + else: +- assert False ++ raise AssertionError( ++ "invalid value {0} for self._check_mode".format(self._check_mode) ++ ) + self._check_mode = c + self.run_env.check_mode_set(c) + return c +@@ -1902,7 +1921,12 @@ class Cmd_nm(Cmd): + + name = connection["name"] + if not name: +- assert connection["persistent_state"] == "absent" ++ if not connection["persistent_state"] == "absent": ++ raise AssertionError( ++ "persistent_state must be 'absent' not {0} when there is no connection 'name'".format( ++ connection["persistent_state"] ++ ) ++ ) + continue + if name in names: + exists = names[name]["nm.exists"] +@@ -1979,7 +2003,7 @@ class Cmd_nm(Cmd): + idx, "ethtool.%s specified but not supported by NM", specified + ) + +- for option, _ in specified.items(): ++ for option in specified.keys(): + nm_name = nm_get_name_fcnt(option) + if not nm_name: + self.log_fatal( +diff --git a/module_utils/network_lsr/__init__.py b/module_utils/network_lsr/__init__.py +index 22c717c..e69de29 100644 +--- a/module_utils/network_lsr/__init__.py ++++ b/module_utils/network_lsr/__init__.py +@@ -1,7 +0,0 @@ +-#!/usr/bin/python3 -tt +-# vim: fileencoding=utf8 +-# SPDX-License-Identifier: BSD-3-Clause +- +- +-class MyError(Exception): +- pass +diff --git a/module_utils/network_lsr/argument_validator.py b/module_utils/network_lsr/argument_validator.py +index 24ffdc4..f338489 100644 +--- a/module_utils/network_lsr/argument_validator.py ++++ b/module_utils/network_lsr/argument_validator.py +@@ -2,12 +2,16 @@ + # vim: fileencoding=utf8 + # SPDX-License-Identifier: BSD-3-Clause + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + import posixpath + import socket + import re + + # pylint: disable=import-error, no-name-in-module +-from ansible.module_utils.network_lsr import MyError # noqa:E501 ++from ansible.module_utils.network_lsr.myerror import MyError # noqa:E501 + from ansible.module_utils.network_lsr.utils import Util # noqa:E501 + + UINT32_MAX = 0xFFFFFFFF +@@ -72,7 +76,8 @@ class ArgUtil: + + class ValidationError(MyError): + def __init__(self, name, message): +- Exception.__init__(self, name + ": " + message) ++ # pylint: disable=non-parent-init-called ++ super(ValidationError, self).__init__(name + ": " + message) + self.error_message = message + self.name = name + +diff --git a/module_utils/network_lsr/ethtool.py b/module_utils/network_lsr/ethtool.py +index 21e2152..3246bef 100644 +--- a/module_utils/network_lsr/ethtool.py ++++ b/module_utils/network_lsr/ethtool.py +@@ -1,5 +1,9 @@ + # SPDX-License-Identifier: BSD-3-Clause + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + import array + import struct + import fcntl +@@ -46,7 +50,7 @@ def get_perm_addr(ifname): + res = ecmd.tobytes() + except AttributeError: # tobytes() is not available in python2 + res = ecmd.tostring() +- _, size, perm_addr = struct.unpack("II%is" % MAX_ADDR_LEN, res) ++ dummy, size, perm_addr = struct.unpack("II%is" % MAX_ADDR_LEN, res) + perm_addr = Util.mac_ntoa(perm_addr[:size]) + except IOError: + perm_addr = None +diff --git a/module_utils/network_lsr/myerror.py b/module_utils/network_lsr/myerror.py +new file mode 100644 +index 0000000..f785265 +--- /dev/null ++++ b/module_utils/network_lsr/myerror.py +@@ -0,0 +1,11 @@ ++#!/usr/bin/python3 -tt ++# vim: fileencoding=utf8 ++# SPDX-License-Identifier: BSD-3-Clause ++ ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ ++ ++class MyError(Exception): ++ pass +diff --git a/module_utils/network_lsr/nm/__init__.py b/module_utils/network_lsr/nm/__init__.py +index 58fbb5a..74c17cb 100644 +--- a/module_utils/network_lsr/nm/__init__.py ++++ b/module_utils/network_lsr/nm/__init__.py +@@ -1,5 +1,9 @@ + # Relative import is not support by ansible 2.8 yet + # pylint: disable=import-error, no-name-in-module ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + from ansible.module_utils.network_lsr.nm import provider # noqa:E501 + + # pylint: enable=import-error, no-name-in-module +diff --git a/module_utils/network_lsr/nm/active_connection.py b/module_utils/network_lsr/nm/active_connection.py +index a6c5a37..432142c 100644 +--- a/module_utils/network_lsr/nm/active_connection.py ++++ b/module_utils/network_lsr/nm/active_connection.py +@@ -2,6 +2,10 @@ + + # Handle NM.ActiveConnection + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + import logging + + # Relative import is not support by ansible 2.8 yet +@@ -21,19 +25,15 @@ def deactivate_active_connection(nm_ac, timeout, check_mode): + return False + if not check_mode: + main_loop = client.get_mainloop(timeout) +- logging.debug( +- "Deactivating {id} with timeout {timeout}".format( +- id=nm_ac.get_id(), timeout=timeout +- ) +- ) ++ logging.debug("Deactivating %s with timeout %s", nm_ac.get_id(), timeout) + user_data = main_loop + handler_id = nm_ac.connect( + NM_AC_STATE_CHANGED_SIGNAL, _nm_ac_state_change_callback, user_data + ) + logging.debug( +- "Registered {signal} on client.NM.ActiveConnection {id}".format( +- signal=NM_AC_STATE_CHANGED_SIGNAL, id=nm_ac.get_id() +- ) ++ "Registered %s on client.NM.ActiveConnection %s", ++ NM_AC_STATE_CHANGED_SIGNAL, ++ nm_ac.get_id(), + ) + if nm_ac.props.state != client.NM.ActiveConnectionState.DEACTIVATING: + nm_client = client.get_client() +@@ -44,9 +44,7 @@ def deactivate_active_connection(nm_ac, timeout, check_mode): + _nm_ac_deactivate_call_back, + user_data, + ) +- logging.debug( +- "Deactivating client.NM.ActiveConnection {0}".format(nm_ac.get_id()) +- ) ++ logging.debug("Deactivating client.NM.ActiveConnection %s", nm_ac.get_id()) + main_loop.run() + return True + +@@ -56,14 +54,13 @@ def _nm_ac_state_change_callback(nm_ac, state, reason, user_data): + if main_loop.is_cancelled: + return + logging.debug( +- "Got client.NM.ActiveConnection state change: {id}: {state} {reason}".format( +- id=nm_ac.get_id(), state=state, reason=reason +- ) ++ "Got client.NM.ActiveConnection state change: %s: %s %s", ++ nm_ac.get_id(), ++ state, ++ reason, + ) + if nm_ac.props.state == client.NM.ActiveConnectionState.DEACTIVATED: +- logging.debug( +- "client.NM.ActiveConnection {0} is deactivated".format(nm_ac.get_id()) +- ) ++ logging.debug("client.NM.ActiveConnection %s is deactivated", nm_ac.get_id()) + main_loop.quit() + + +@@ -82,9 +79,7 @@ def _nm_ac_deactivate_call_back(nm_client, result, user_data): + client.NM.ManagerError.quark(), client.NM.ManagerError.CONNECTIONNOTACTIVE + ): + logging.info( +- "Connection is not active on {0}, no need to deactivate".format( +- nm_ac_id +- ) ++ "Connection is not active on %s, no need to deactivate", nm_ac_id + ) + if nm_ac: + nm_ac.handler_disconnect(handler_id) +diff --git a/module_utils/network_lsr/nm/client.py b/module_utils/network_lsr/nm/client.py +index 4992887..f47cc53 100644 +--- a/module_utils/network_lsr/nm/client.py ++++ b/module_utils/network_lsr/nm/client.py +@@ -1,5 +1,9 @@ + # SPDX-License-Identifier: BSD-3-Clause + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + import logging + + # Relative import is not support by ansible 2.8 yet +diff --git a/module_utils/network_lsr/nm/connection.py b/module_utils/network_lsr/nm/connection.py +index 6982034..474da8d 100644 +--- a/module_utils/network_lsr/nm/connection.py ++++ b/module_utils/network_lsr/nm/connection.py +@@ -2,6 +2,10 @@ + + # Handle NM.RemoteConnection + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + import logging + + # Relative import is not support by ansible 2.8 yet +@@ -26,9 +30,10 @@ def delete_remote_connection(nm_profile, timeout, check_mode): + user_data, + ) + logging.debug( +- "Deleting profile {id}/{uuid} with timeout {timeout}".format( +- id=nm_profile.get_id(), uuid=nm_profile.get_uuid(), timeout=timeout +- ) ++ "Deleting profile %s/%s with timeout %s", ++ nm_profile.get_id(), ++ nm_profile.get_uuid(), ++ timeout, + ) + main_loop.run() + return True +@@ -78,9 +83,10 @@ def volatilize_remote_connection(nm_profile, timeout, check_mode): + user_data, + ) + logging.debug( +- "Volatilizing profile {id}/{uuid} with timeout {timeout}".format( +- id=nm_profile.get_id(), uuid=nm_profile.get_uuid(), timeout=timeout +- ) ++ "Volatilizing profile %s/%s with timeout %s", ++ nm_profile.get_id(), ++ nm_profile.get_uuid(), ++ timeout, + ) + main_loop.run() + return True +diff --git a/module_utils/network_lsr/nm/error.py b/module_utils/network_lsr/nm/error.py +index 42014ec..d87bc72 100644 +--- a/module_utils/network_lsr/nm/error.py ++++ b/module_utils/network_lsr/nm/error.py +@@ -1,5 +1,9 @@ + # SPDX-License-Identifier: BSD-3-Clause + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + + class LsrNetworkNmError(Exception): + pass +diff --git a/module_utils/network_lsr/nm/provider.py b/module_utils/network_lsr/nm/provider.py +index 52e7502..567c9d1 100644 +--- a/module_utils/network_lsr/nm/provider.py ++++ b/module_utils/network_lsr/nm/provider.py +@@ -1,5 +1,9 @@ + # SPDX-License-Identifier: BSD-3-Clause + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + import logging + + # Relative import is not support by ansible 2.8 yet +@@ -25,7 +29,7 @@ class NetworkManagerProvider: + nm_ac, timeout, check_mode + ) + if not changed: +- logging.info("No active connection for {0}".format(connection_name)) ++ logging.info("No active connection for %s", connection_name) + + return changed + +@@ -49,7 +53,7 @@ class NetworkManagerProvider: + nm_profile, timeout, check_mode + ) + if not changed: +- logging.info("No connection with UUID {0} to volatilize".format(uuid)) ++ logging.info("No connection with UUID %s to volatilize", uuid) + + return changed + +diff --git a/module_utils/network_lsr/nm_provider.py b/module_utils/network_lsr/nm_provider.py +index c75242a..d6168eb 100644 +--- a/module_utils/network_lsr/nm_provider.py ++++ b/module_utils/network_lsr/nm_provider.py +@@ -1,6 +1,10 @@ + # SPDX-License-Identifier: BSD-3-Clause + """ Support for NetworkManager aka the NM provider """ + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + # pylint: disable=import-error, no-name-in-module + from ansible.module_utils.network_lsr.utils import Util # noqa:E501 + +diff --git a/module_utils/network_lsr/utils.py b/module_utils/network_lsr/utils.py +index 73d9528..bc258fe 100644 +--- a/module_utils/network_lsr/utils.py ++++ b/module_utils/network_lsr/utils.py +@@ -2,18 +2,23 @@ + # SPDX-License-Identifier: BSD-3-Clause + # vim: fileencoding=utf8 + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + import socket + import sys + import uuid + + # pylint: disable=import-error, no-name-in-module +-from ansible.module_utils.network_lsr import MyError # noqa:E501 ++from ansible.module_utils.network_lsr.myerror import MyError # noqa:E501 + + + class Util: + + PY3 = sys.version_info[0] == 3 + ++ # pylint: disable=undefined-variable + STRING_TYPE = str if PY3 else basestring # noqa:F821 + + @staticmethod +@@ -241,7 +246,8 @@ class Util: + n = int(c, 16) * 16 + i = 1 + else: +- assert i == 1 ++ if not i == 1: ++ raise AssertionError("i != 1 - value is {0}".format(i)) + n = n + int(c, 16) + i = 2 + b.append(n) +diff --git a/tests/ensure_provider_tests.py b/tests/ensure_provider_tests.py +index 3620729..4e45e6a 100755 +--- a/tests/ensure_provider_tests.py ++++ b/tests/ensure_provider_tests.py +@@ -73,8 +73,6 @@ NM_ONLY_TESTS = { + MINIMUM_VERSION: "'1.25.1'", + "comment": "# NetworkManager 1.25.1 introduced ethtool coalesce support", + }, +- "playbooks/tests_802_1x_updated.yml": {}, +- "playbooks/tests_802_1x.yml": {}, + "playbooks/tests_reapply.yml": {}, + # team interface is not supported on Fedora + "playbooks/tests_team.yml": { +@@ -117,9 +115,7 @@ def create_nm_playbook(test_playbook): + EXTRA_RUN_CONDITION, "" + ) + if extra_run_condition: +- extra_run_condition = "{}{}\n".format( +- EXTRA_RUN_CONDITION_PREFIX, extra_run_condition +- ) ++ extra_run_condition = f"{EXTRA_RUN_CONDITION_PREFIX}{extra_run_condition}\n" + + nm_version_check = "" + if minimum_nm_version: +@@ -212,7 +208,7 @@ def main(): + + if missing: + print("ERROR: No NM or initscripts tests found for:\n" + ", \n".join(missing)) +- print("Try to generate them with '{} generate'".format(sys.argv[0])) ++ print(f"Try to generate them with '{sys.argv[0]} generate'") + returncode = 1 + + return returncode +diff --git a/tests/get_coverage.sh b/tests/get_coverage.sh +index 858a8cf..4524fab 100755 +--- a/tests/get_coverage.sh ++++ b/tests/get_coverage.sh +@@ -19,7 +19,6 @@ shift + playbook="${1}" + + coverage_data="remote-coveragedata-${host}-${playbook%.yml}" +-coverage="/root/.local/bin/coverage" + + echo "Getting coverage for ${playbook} on ${host}" >&2 + +@@ -32,10 +31,15 @@ call_ansible() { + } + + remote_coverage_dir="$(mktemp -d /tmp/remote_coverage-XXXXXX)" ++# we want to expand ${remote_coverage_dir} here, so tell SC to be quiet ++# https://github.com/koalaman/shellcheck/wiki/SC2064 ++# shellcheck disable=SC2064 + trap "rm -rf '${remote_coverage_dir}'" EXIT + ansible-playbook -i "${host}", get_coverage.yml -e "test_playbook=${playbook} destdir=${remote_coverage_dir}" + + #COVERAGE_FILE=remote-coverage coverage combine remote-coverage/tests_*/*/root/.coverage ++# https://github.com/koalaman/shellcheck/wiki/SC2046 ++# shellcheck disable=SC2046 + ./merge_coverage.sh coverage "${coverage_data}"-tmp $(find "${remote_coverage_dir}" -type f | tr , _) + + cat > tmp_merge_coveragerc < +Date: Thu, 8 Jul 2021 11:13:43 -0700 +Subject: [PATCH] EPEL yum repository configuration for tests + +In tests/tasks/enable_epel.yml, if /etc/yum.repos.d/epel.repo exists +and it is not enabled, it's left disabled. Without the epel enabled, +it fails to install necessary modules such as python-mock, which +makes tests_unit.yml and tests_wireless_nm.yml fail. + +This patch adds a task calling ini_file to ensure the repo is always +enabled. See also bz1980439 + +Signed-off-by: Noriko Hosoi +(cherry picked from commit 1f25fbb4fc10097423dd088b5a03834f65a67aa2) +--- + tests/tasks/enable_epel.yml | 28 ++++++++++++++++++++++------ + 1 file changed, 22 insertions(+), 6 deletions(-) + +diff --git a/tests/tasks/enable_epel.yml b/tests/tasks/enable_epel.yml +index 7924bd4..a647fc8 100644 +--- a/tests/tasks/enable_epel.yml ++++ b/tests/tasks/enable_epel.yml +@@ -1,11 +1,27 @@ + # SPDX-License-Identifier: BSD-3-Clause + --- +-- name: Enable EPEL {{ ansible_distribution_major_version }} +- # yamllint disable-line rule:line-length +- command: yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm +- args: +- warn: false +- creates: /etc/yum.repos.d/epel.repo ++- block: ++ - name: Create EPEL {{ ansible_distribution_major_version }} ++ # yamllint disable-line rule:line-length ++ command: yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm ++ args: ++ warn: false ++ creates: /etc/yum.repos.d/epel.repo ++ ++ - name: Retrieve EPEL {{ ansible_distribution_major_version }} file stats ++ stat: ++ path: /etc/yum.repos.d/epel.repo ++ register: _result ++ ++ - name: Enable EPEL {{ ansible_distribution_major_version }} ++ ini_file: ++ path: /etc/yum.repos.d/epel.repo ++ section: epel ++ mode: "{{ _result.stat.mode }}" ++ owner: "{{ _result.stat.pw_name }}" ++ group: "{{ _result.stat.gr_name }}" ++ option: enabled ++ value: "1" + when: + - ansible_distribution in ['RedHat', 'CentOS'] + - ansible_distribution_major_version in ['7', '8'] +-- +2.31.1 + diff --git a/SOURCES/network-epel-minimal.diff b/SOURCES/network-epel-minimal.diff new file mode 100644 index 0000000..af8b0ef --- /dev/null +++ b/SOURCES/network-epel-minimal.diff @@ -0,0 +1,401 @@ +diff --git a/tests/playbooks/integration_pytest_python3.yml b/tests/playbooks/integration_pytest_python3.yml +index 075355b..5fc9dea 100644 +--- a/tests/playbooks/integration_pytest_python3.yml ++++ b/tests/playbooks/integration_pytest_python3.yml +@@ -9,6 +9,11 @@ + - rsync + + tasks: ++ - name: Install EPEL for RHEL and CentOS ++ # yamllint disable-line rule:line-length ++ command: "yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm" ++ when: ansible_distribution in ["CentOS", "RedHat"] ++ + - name: Install rpm dependencies + package: + state: present +diff --git a/tests/tasks/el_repo_setup.yml b/tests/tasks/el_repo_setup.yml +deleted file mode 100644 +index 0656e8c..0000000 +--- a/tests/tasks/el_repo_setup.yml ++++ /dev/null +@@ -1,26 +0,0 @@ +-# SPDX-License-Identifier: BSD-3-Clause +-- name: Fix CentOS6 Base repo +- copy: +- dest: /etc/yum.repos.d/CentOS-Base.repo +- content: | +- [base] +- name=CentOS-$releasever - Base +- baseurl=https://vault.centos.org/6.10/os/$basearch/ +- gpgcheck=1 +- gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6 +- +- [updates] +- name=CentOS-$releasever - Updates +- baseurl=https://vault.centos.org/6.10/updates/$basearch/ +- gpgcheck=1 +- gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6 +- +- [extras] +- name=CentOS-$releasever - Extras +- baseurl=https://vault.centos.org/6.10/extras/$basearch/ +- gpgcheck=1 +- gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6 +- when: +- - ansible_distribution == 'CentOS' +- - ansible_distribution_major_version == '6' +-- include_tasks: enable_epel.yml +diff --git a/tests/tasks/setup_802_1x_server.yml b/tests/tasks/setup_802_1x_server.yml +index 49d1ce1..3bf16a9 100644 +--- a/tests/tasks/setup_802_1x_server.yml ++++ b/tests/tasks/setup_802_1x_server.yml +@@ -1,5 +1,7 @@ + # SPDX-License-Identifier: BSD-3-Clause + --- ++- include_tasks: enable_epel.yml ++ + - name: Install hostapd + package: + name: hostapd +diff --git a/tests/tasks/setup_mock_wifi.yml b/tests/tasks/setup_mock_wifi.yml +index 997b704..d7a1e22 100644 +--- a/tests/tasks/setup_mock_wifi.yml ++++ b/tests/tasks/setup_mock_wifi.yml +@@ -1,5 +1,7 @@ + # SPDX-License-Identifier: BSD-3-Clause + --- ++- include_tasks: enable_epel.yml ++ + - name: Install packages required to set up mock wifi network + package: + name: +diff --git a/tests/tests_802_1x_nm.yml b/tests/tests_802_1x_nm.yml +index a27d8ea..288cd5d 100644 +--- a/tests/tests_802_1x_nm.yml ++++ b/tests/tests_802_1x_nm.yml +@@ -5,7 +5,6 @@ + - hosts: all + name: Run playbook 'playbooks/tests_802_1x.yml' with nm as provider + tasks: +- - include_tasks: tasks/el_repo_setup.yml + - name: Set network provider to 'nm' + set_fact: + network_provider: nm +diff --git a/tests/tests_802_1x_updated_nm.yml b/tests/tests_802_1x_updated_nm.yml +index 5a25f5b..bd335e4 100644 +--- a/tests/tests_802_1x_updated_nm.yml ++++ b/tests/tests_802_1x_updated_nm.yml +@@ -5,7 +5,6 @@ + - hosts: all + name: Run playbook 'playbooks/tests_802_1x_updated.yml' with nm as provider + tasks: +- - include_tasks: tasks/el_repo_setup.yml + - name: Set network provider to 'nm' + set_fact: + network_provider: nm +diff --git a/tests/tests_bond_deprecated_initscripts.yml b/tests/tests_bond_deprecated_initscripts.yml +index 1e74bcc..383b488 100644 +--- a/tests/tests_bond_deprecated_initscripts.yml ++++ b/tests/tests_bond_deprecated_initscripts.yml +@@ -4,7 +4,6 @@ + - hosts: all + name: Run playbook 'playbooks/tests_bond_deprecated.yml' with initscripts + tasks: +- - include_tasks: tasks/el_repo_setup.yml + - name: Set network provider to 'initscripts' + set_fact: + network_provider: initscripts +diff --git a/tests/tests_bond_initscripts.yml b/tests/tests_bond_initscripts.yml +index 32fcc32..8fa74c5 100644 +--- a/tests/tests_bond_initscripts.yml ++++ b/tests/tests_bond_initscripts.yml +@@ -4,7 +4,6 @@ + - hosts: all + name: Run playbook 'playbooks/tests_bond.yml' with initscripts as provider + tasks: +- - include_tasks: tasks/el_repo_setup.yml + - name: Set network provider to 'initscripts' + set_fact: + network_provider: initscripts +diff --git a/tests/tests_bond_nm.yml b/tests/tests_bond_nm.yml +index 7075d95..8ac6cbd 100644 +--- a/tests/tests_bond_nm.yml ++++ b/tests/tests_bond_nm.yml +@@ -5,7 +5,6 @@ + - hosts: all + name: Run playbook 'playbooks/tests_bond.yml' with nm as provider + tasks: +- - include_tasks: tasks/el_repo_setup.yml + - name: Set network provider to 'nm' + set_fact: + network_provider: nm +diff --git a/tests/tests_bridge_initscripts.yml b/tests/tests_bridge_initscripts.yml +index 8ce42e6..db5663c 100644 +--- a/tests/tests_bridge_initscripts.yml ++++ b/tests/tests_bridge_initscripts.yml +@@ -4,7 +4,6 @@ + - hosts: all + name: Run playbook 'playbooks/tests_bridge.yml' with initscripts as provider + tasks: +- - include_tasks: tasks/el_repo_setup.yml + - name: Set network provider to 'initscripts' + set_fact: + network_provider: initscripts +diff --git a/tests/tests_bridge_nm.yml b/tests/tests_bridge_nm.yml +index 3d1b53a..c565952 100644 +--- a/tests/tests_bridge_nm.yml ++++ b/tests/tests_bridge_nm.yml +@@ -5,7 +5,6 @@ + - hosts: all + name: Run playbook 'playbooks/tests_bridge.yml' with nm as provider + tasks: +- - include_tasks: tasks/el_repo_setup.yml + - name: Set network provider to 'nm' + set_fact: + network_provider: nm +diff --git a/tests/tests_default.yml b/tests/tests_default.yml +index e196314..f6f7550 100644 +--- a/tests/tests_default.yml ++++ b/tests/tests_default.yml +@@ -5,7 +5,6 @@ + roles: + - linux-system-roles.network + tasks: +- - include_tasks: tasks/el_repo_setup.yml + - name: Test warning and info logs + assert: + that: +diff --git a/tests/tests_default_initscripts.yml b/tests/tests_default_initscripts.yml +index 006889c..cc8b875 100644 +--- a/tests/tests_default_initscripts.yml ++++ b/tests/tests_default_initscripts.yml +@@ -2,7 +2,6 @@ + --- + - hosts: all + tasks: +- - include_tasks: tasks/el_repo_setup.yml + - name: Set network provider to 'initscripts' + set_fact: + network_provider: initscripts +diff --git a/tests/tests_default_nm.yml b/tests/tests_default_nm.yml +index 54bc3e1..8138ca9 100644 +--- a/tests/tests_default_nm.yml ++++ b/tests/tests_default_nm.yml +@@ -2,7 +2,6 @@ + --- + - hosts: all + tasks: +- - include_tasks: tasks/el_repo_setup.yml + - name: Set network provider to 'nm' + set_fact: + network_provider: nm +diff --git a/tests/tests_ethernet_initscripts.yml b/tests/tests_ethernet_initscripts.yml +index 366b052..62e75fe 100644 +--- a/tests/tests_ethernet_initscripts.yml ++++ b/tests/tests_ethernet_initscripts.yml +@@ -4,8 +4,6 @@ + - hosts: all + name: Run playbook 'playbooks/tests_ethernet.yml' with initscripts as provider + tasks: +- - include_tasks: tasks/el_repo_setup.yml +- + - name: Set network provider to 'initscripts' + set_fact: + network_provider: initscripts +diff --git a/tests/tests_ethernet_nm.yml b/tests/tests_ethernet_nm.yml +index 238172d..ecefa14 100644 +--- a/tests/tests_ethernet_nm.yml ++++ b/tests/tests_ethernet_nm.yml +@@ -5,8 +5,6 @@ + - hosts: all + name: Run playbook 'playbooks/tests_ethernet.yml' with nm as provider + tasks: +- - include_tasks: tasks/el_repo_setup.yml +- + - name: Set network provider to 'nm' + set_fact: + network_provider: nm +diff --git a/tests/tests_ethtool_features_initscripts.yml b/tests/tests_ethtool_features_initscripts.yml +index 5bac5d3..6aea73b 100644 +--- a/tests/tests_ethtool_features_initscripts.yml ++++ b/tests/tests_ethtool_features_initscripts.yml +@@ -2,7 +2,6 @@ + # set network provider and gather facts + - hosts: all + tasks: +- - include_tasks: tasks/el_repo_setup.yml + - name: Set network provider to 'initscripts' + set_fact: + network_provider: initscripts +diff --git a/tests/tests_ethtool_features_nm.yml b/tests/tests_ethtool_features_nm.yml +index 2027862..30c6faa 100644 +--- a/tests/tests_ethtool_features_nm.yml ++++ b/tests/tests_ethtool_features_nm.yml +@@ -5,8 +5,6 @@ + - hosts: all + name: Run playbook 'playbooks/tests_ethtool_features.yml' with nm as provider + tasks: +- - include_tasks: tasks/el_repo_setup.yml +- + - name: Set network provider to 'nm' + set_fact: + network_provider: nm +diff --git a/tests/tests_helpers_and_asserts.yml b/tests/tests_helpers_and_asserts.yml +index 64e2875..5514182 100644 +--- a/tests/tests_helpers_and_asserts.yml ++++ b/tests/tests_helpers_and_asserts.yml +@@ -3,8 +3,6 @@ + - name: Check that creating and removing test devices and assertions work + hosts: all + tasks: +- - include_tasks: tasks/el_repo_setup.yml +- + - name: test veth interface management + include_tasks: tasks/create_and_remove_interface.yml + vars: +diff --git a/tests/tests_integration_pytest.yml b/tests/tests_integration_pytest.yml +index 9b80bd4..153214d 100644 +--- a/tests/tests_integration_pytest.yml ++++ b/tests/tests_integration_pytest.yml +@@ -1,8 +1,7 @@ + # SPDX-License-Identifier: BSD-3-Clause + --- +-- hosts: all +- tasks: +- - include_tasks: tasks/el_repo_setup.yml ++- name: Empty play to gather facts ++ hosts: all + + - import_playbook: playbooks/integration_pytest_python3.yml + when: (ansible_distribution in ["CentOS", "RedHat"] and +diff --git a/tests/tests_provider_nm.yml b/tests/tests_provider_nm.yml +index 67fcffe..99306a1 100644 +--- a/tests/tests_provider_nm.yml ++++ b/tests/tests_provider_nm.yml +@@ -5,8 +5,6 @@ + - hosts: all + name: Run playbook 'playbooks/tests_provider.yml' with nm as provider + tasks: +- - include_tasks: tasks/el_repo_setup.yml +- + - name: Set network provider to 'nm' + set_fact: + network_provider: nm +diff --git a/tests/tests_reapply_nm.yml b/tests/tests_reapply_nm.yml +index eb48ddb..69fb208 100644 +--- a/tests/tests_reapply_nm.yml ++++ b/tests/tests_reapply_nm.yml +@@ -5,8 +5,6 @@ + - hosts: all + name: Run playbook 'playbooks/tests_reapply.yml' with nm as provider + tasks: +- - include_tasks: tasks/el_repo_setup.yml +- + - name: Set network provider to 'nm' + set_fact: + network_provider: nm +diff --git a/tests/tests_regression_nm.yml b/tests/tests_regression_nm.yml +index b2c46e9..9eb8084 100644 +--- a/tests/tests_regression_nm.yml ++++ b/tests/tests_regression_nm.yml +@@ -3,8 +3,6 @@ + # set network provider and gather facts + - hosts: all + tasks: +- - include_tasks: tasks/el_repo_setup.yml +- + - name: Set network provider to 'nm' + set_fact: + network_provider: nm +diff --git a/tests/tests_states_initscripts.yml b/tests/tests_states_initscripts.yml +index fa94103..3e55a43 100644 +--- a/tests/tests_states_initscripts.yml ++++ b/tests/tests_states_initscripts.yml +@@ -4,8 +4,6 @@ + - hosts: all + name: Run playbook 'playbooks/tests_states.yml' with initscripts as provider + tasks: +- - include_tasks: tasks/el_repo_setup.yml +- + - name: Set network provider to 'initscripts' + set_fact: + network_provider: initscripts +diff --git a/tests/tests_states_nm.yml b/tests/tests_states_nm.yml +index 34c8a24..3164a3a 100644 +--- a/tests/tests_states_nm.yml ++++ b/tests/tests_states_nm.yml +@@ -5,8 +5,6 @@ + - hosts: all + name: Run playbook 'playbooks/tests_states.yml' with nm as provider + tasks: +- - include_tasks: tasks/el_repo_setup.yml +- + - name: Set network provider to 'nm' + set_fact: + network_provider: nm +diff --git a/tests/tests_team_nm.yml b/tests/tests_team_nm.yml +index 8048029..0516765 100644 +--- a/tests/tests_team_nm.yml ++++ b/tests/tests_team_nm.yml +@@ -5,8 +5,6 @@ + - hosts: all + name: Run playbook 'playbooks/tests_team.yml' with nm as provider + tasks: +- - include_tasks: tasks/el_repo_setup.yml +- + - name: Set network provider to 'nm' + set_fact: + network_provider: nm +diff --git a/tests/tests_unit.yml b/tests/tests_unit.yml +index 44dfaec..8c5388b 100644 +--- a/tests/tests_unit.yml ++++ b/tests/tests_unit.yml +@@ -3,7 +3,7 @@ + - hosts: all + name: Setup for test running + tasks: +- - include_tasks: tasks/el_repo_setup.yml ++ - include_tasks: tasks/enable_epel.yml + + - name: Install dependencies + package: +diff --git a/tests/tests_vlan_mtu_initscripts.yml b/tests/tests_vlan_mtu_initscripts.yml +index dcd5d74..37770a9 100644 +--- a/tests/tests_vlan_mtu_initscripts.yml ++++ b/tests/tests_vlan_mtu_initscripts.yml +@@ -4,8 +4,6 @@ + - hosts: all + name: Run playbook 'playbooks/tests_vlan_mtu.yml' with initscripts as provider + tasks: +- - include_tasks: tasks/el_repo_setup.yml +- + - name: Set network provider to 'initscripts' + set_fact: + network_provider: initscripts +diff --git a/tests/tests_vlan_mtu_nm.yml b/tests/tests_vlan_mtu_nm.yml +index c38263c..f201de3 100644 +--- a/tests/tests_vlan_mtu_nm.yml ++++ b/tests/tests_vlan_mtu_nm.yml +@@ -5,8 +5,6 @@ + - hosts: all + name: Run playbook 'playbooks/tests_vlan_mtu.yml' with nm as provider + tasks: +- - include_tasks: tasks/el_repo_setup.yml +- + - name: Set network provider to 'nm' + set_fact: + network_provider: nm +diff --git a/tests/tests_wireless_nm.yml b/tests/tests_wireless_nm.yml +index 03b5ad6..86baf67 100644 +--- a/tests/tests_wireless_nm.yml ++++ b/tests/tests_wireless_nm.yml +@@ -5,8 +5,6 @@ + - hosts: all + name: Run playbook 'playbooks/tests_wireless.yml' with nm as provider + tasks: +- - include_tasks: tasks/el_repo_setup.yml +- + - name: Set network provider to 'nm' + set_fact: + network_provider: nm diff --git a/SOURCES/network-nm-reload-profile.diff b/SOURCES/network-nm-reload-profile.diff deleted file mode 100644 index cd243a3..0000000 --- a/SOURCES/network-nm-reload-profile.diff +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/tests/playbooks/tests_ethtool_features.yml b/tests/playbooks/tests_ethtool_features.yml -index bdaddb0..c4b35b9 100644 ---- a/tests/playbooks/tests_ethtool_features.yml -+++ b/tests/playbooks/tests_ethtool_features.yml -@@ -115,5 +115,8 @@ - - include_tasks: tasks/manage_test_interface.yml - vars: - state: absent -+ - name: Reload NetworkManager config -+ command: nmcli connection reload -+ ignore_errors: true - tags: - - "tests::cleanup" diff --git a/SOURCES/network-permissions.diff b/SOURCES/network-permissions.diff new file mode 100644 index 0000000..05068ff --- /dev/null +++ b/SOURCES/network-permissions.diff @@ -0,0 +1,6 @@ +diff --git a/library/network_connections.py b/library/network_connections.py +old mode 100755 +new mode 100644 +diff --git a/tests/unit/test_network_connections.py b/tests/unit/test_network_connections.py +old mode 100755 +new mode 100644 diff --git a/SOURCES/network-pr353.diff b/SOURCES/network-pr353.diff new file mode 100644 index 0000000..e4398b7 --- /dev/null +++ b/SOURCES/network-pr353.diff @@ -0,0 +1,35 @@ +From f4fabea55429077c2a4e94fe8015c4ab57f58ff3 Mon Sep 17 00:00:00 2001 +From: Fernando Fernandez Mancera +Date: Mon, 15 Feb 2021 11:02:55 +0100 +Subject: [PATCH] README: remove "slaves" leftover from documentation + +Signed-off-by: Fernando Fernandez Mancera +--- + README.md | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/README.md b/README.md +index 6b15673..06a8b1b 100644 +--- a/README.md ++++ b/README.md +@@ -300,7 +300,7 @@ different or the profile may not be tied to an interface at all. + + The `zone` option sets the firewalld zone for the interface. + +-Slaves to the bridge, bond or team devices cannot specify a zone. ++Ports to the bridge, bond or team devices cannot specify a zone. + + + ### `ip` +@@ -367,7 +367,7 @@ The IP configuration supports the following options: + + **Note:** When `route_append_only` or `rule_append_only` is not specified, the `network` role deletes the current routes or routing rules. + +-**Note:** Slaves to the bridge, bond or team devices cannot specify `ip` settings. ++**Note:** Ports to the bridge, bond or team devices cannot specify `ip` settings. + + ### `ethtool` + +-- +2.29.2 + diff --git a/SOURCES/network-tier1-tags.diff b/SOURCES/network-tier1-tags.diff index 95efa90..1c4cb67 100644 --- a/SOURCES/network-tier1-tags.diff +++ b/SOURCES/network-tier1-tags.diff @@ -1,5 +1,5 @@ diff --git a/tests/playbooks/tests_802_1x.yml b/tests/playbooks/tests_802_1x.yml -index 8151294..0c6c965 100644 +index 9cce1ae..76d99e9 100644 --- a/tests/playbooks/tests_802_1x.yml +++ b/tests/playbooks/tests_802_1x.yml @@ -1,5 +1,10 @@ @@ -13,8 +13,32 @@ index 8151294..0c6c965 100644 - hosts: all vars: interface: 802-1x-test -@@ -105,3 +110,8 @@ - - include_tasks: tasks/cleanup_802_1x_server.yml +@@ -122,3 +127,8 @@ + command: update-ca-trust + tags: + - "tests::cleanup" ++ ++- name: Restore host state ++ hosts: all ++ tasks: ++ - import_tasks: tasks/restore_state.yml +diff --git a/tests/playbooks/tests_bond.yml b/tests/playbooks/tests_bond.yml +index 69f07f8..1e45788 100644 +--- a/tests/playbooks/tests_bond.yml ++++ b/tests/playbooks/tests_bond.yml +@@ -1,5 +1,10 @@ + # SPDX-License-Identifier: BSD-3-Clause + --- ++- name: Save host state ++ hosts: all ++ tasks: ++ - import_tasks: tasks/save_state.yml ++ + - hosts: all + vars: + controller_profile: bond0 +@@ -95,3 +100,8 @@ + - import_tasks: tasks/remove_test_interfaces_with_dhcp.yml tags: - "tests::cleanup" + @@ -171,6 +195,54 @@ index cd02579..adcffee 100644 + hosts: all + tasks: + - import_tasks: tasks/restore_state.yml +diff --git a/tests/playbooks/tests_ethtool_features.yml b/tests/playbooks/tests_ethtool_features.yml +index 43fddc3..d1a87fe 100644 +--- a/tests/playbooks/tests_ethtool_features.yml ++++ b/tests/playbooks/tests_ethtool_features.yml +@@ -1,5 +1,10 @@ + # SPDX-License-Identifier: BSD-3-Clause + --- ++- name: Save host state ++ hosts: all ++ tasks: ++ - import_tasks: tasks/save_state.yml ++ + - hosts: all + vars: + interface: testnic1 +@@ -198,3 +203,8 @@ + state: absent + tags: + - "tests::cleanup" ++ ++- name: Restore host state ++ hosts: all ++ tasks: ++ - import_tasks: tasks/restore_state.yml +diff --git a/tests/playbooks/tests_provider.yml b/tests/playbooks/tests_provider.yml +index 1db2d08..e097b4b 100644 +--- a/tests/playbooks/tests_provider.yml ++++ b/tests/playbooks/tests_provider.yml +@@ -1,5 +1,10 @@ + # SPDX-License-Identifier: BSD-3-Clause + --- ++- name: Save host state ++ hosts: all ++ tasks: ++ - import_tasks: tasks/save_state.yml ++ + - hosts: all + vars: + interface: testnic1 +@@ -33,3 +38,8 @@ + - tasks/cleanup_profile+device.yml + tags: + - tests::provider:initscripts_to_nm ++ ++- name: Restore host state ++ hosts: all ++ tasks: ++ - import_tasks: tasks/restore_state.yml diff --git a/tests/playbooks/tests_reapply.yml b/tests/playbooks/tests_reapply.yml index 4b1cb09..6995607 100644 --- a/tests/playbooks/tests_reapply.yml @@ -197,34 +269,30 @@ index 4b1cb09..6995607 100644 + tasks: + - import_tasks: tasks/restore_state.yml diff --git a/tests/playbooks/tests_states.yml b/tests/playbooks/tests_states.yml -index 8edbf8f..ef59063 100644 +index eec27c0..a8d0ecd 100644 --- a/tests/playbooks/tests_states.yml +++ b/tests/playbooks/tests_states.yml -@@ -22,6 +22,8 @@ - ip: - dhcp4: false - auto6: false -+ tags: -+ - 'tests::net::bridge' - - include_tasks: tasks/assert_device_present.yml - - include_tasks: tasks/assert_profile_present.yml - -@@ -75,3 +77,18 @@ - - assert: - that: __network_test_failed - fail_msg: "The role did not fail when it should have" +@@ -135,3 +135,23 @@ + - tasks/cleanup_profile+device.yml + tags: + - tests::states:remove_down_twice + + pre_tasks: + - name: Save host state + import_tasks: tasks/save_state.yml + + post_tasks: -+ - name: Remove test bridge in case it is still lingering -+ command: 'ip link delete "{{ interface }}"' ++ - name: Remove test profile + tags: + - 'tests::cleanup' + - 'tests::net::bridge::cleanup' -+ ignore_errors: yes ++ import_role: ++ name: linux-system-roles.network ++ vars: ++ network_connections: ++ - name: statebr ++ state: down ++ persistent_state: absent + + - name: Restore host state + import_tasks: tasks/restore_state.yml @@ -271,6 +339,30 @@ index 029b599..378d5fe 100644 + post_tasks: + - name: Restore host state + import_tasks: tasks/restore_state.yml +diff --git a/tests/playbooks/tests_wireless.yml b/tests/playbooks/tests_wireless.yml +index 822a15e..52661bd 100644 +--- a/tests/playbooks/tests_wireless.yml ++++ b/tests/playbooks/tests_wireless.yml +@@ -1,5 +1,10 @@ + # SPDX-License-Identifier: BSD-3-Clause + --- ++- name: Save host state ++ hosts: all ++ tasks: ++ - import_tasks: tasks/save_state.yml ++ + - hosts: all + vars: + interface: wlan0 +@@ -86,3 +91,8 @@ + - include_tasks: tasks/cleanup_mock_wifi.yml + tags: + - "tests::cleanup" ++ ++- name: Restore host state ++ hosts: all ++ tasks: ++ - import_tasks: tasks/restore_state.yml diff --git a/tests/tasks/commonvars.yml b/tests/tasks/commonvars.yml new file mode 100644 index 0000000..50452f7 @@ -364,6 +456,44 @@ index 0000000..5690aed + path: /etc/sysconfig/network + register: etc_sysconfig_network_stat + ignore_errors: yes +diff --git a/tests/tests_802_1x_nm.yml b/tests/tests_802_1x_nm.yml +index 288cd5d..840958d 100644 +--- a/tests/tests_802_1x_nm.yml ++++ b/tests/tests_802_1x_nm.yml +@@ -4,6 +4,8 @@ + # set network provider and gather facts + - hosts: all + name: Run playbook 'playbooks/tests_802_1x.yml' with nm as provider ++ tags: ++ - tests::expfail + tasks: + - name: Set network provider to 'nm' + set_fact: +@@ -17,3 +19,5 @@ + - import_playbook: playbooks/tests_802_1x.yml + when: + - ansible_distribution_major_version != '6' ++ tags: ++ - tests::expfail +diff --git a/tests/tests_802_1x_updated_nm.yml b/tests/tests_802_1x_updated_nm.yml +index bd335e4..4ebcaf9 100644 +--- a/tests/tests_802_1x_updated_nm.yml ++++ b/tests/tests_802_1x_updated_nm.yml +@@ -4,6 +4,8 @@ + # set network provider and gather facts + - hosts: all + name: Run playbook 'playbooks/tests_802_1x_updated.yml' with nm as provider ++ tags: ++ - tests::expfail + tasks: + - name: Set network provider to 'nm' + set_fact: +@@ -17,3 +19,5 @@ + - import_playbook: playbooks/tests_802_1x_updated.yml + when: + - ansible_distribution_major_version != '6' ++ tags: ++ - tests::expfail diff --git a/tests/tests_default.yml b/tests/tests_default.yml index f6f7550..98e3c7e 100644 --- a/tests/tests_default.yml diff --git a/SOURCES/postfix-meta-el8.diff b/SOURCES/postfix-meta-el8.diff new file mode 100644 index 0000000..41cb91b --- /dev/null +++ b/SOURCES/postfix-meta-el8.diff @@ -0,0 +1,16 @@ +diff --git a/meta/main.yml b/meta/main.yml +index a0ef6f4..da22270 100644 +--- a/meta/main.yml ++++ b/meta/main.yml +@@ -7,8 +7,8 @@ galaxy_info: + min_ansible_version: 2.2 + platforms: + - name: Fedora +- versions: [ 24, 25 ] ++ versions: [ 31, 32 ] + - name: EL +- versions: [ 6, 7 ] ++ versions: [ 6, 7, 8 ] + + + diff --git a/SOURCES/rhel-system-roles-kdump-pr22.diff b/SOURCES/rhel-system-roles-kdump-pr22.diff index d7d2796..342eddc 100644 --- a/SOURCES/rhel-system-roles-kdump-pr22.diff +++ b/SOURCES/rhel-system-roles-kdump-pr22.diff @@ -44,10 +44,10 @@ index bf24210..504ff34 100644 path {{ kdump_path }} {% if kdump_core_collector %} diff --git a/tests/tests_ssh.yml b/tests/tests_ssh.yml -index 679148e..14a59d9 100644 +index 1da99df..d12e884 100644 --- a/tests/tests_ssh.yml +++ b/tests/tests_ssh.yml -@@ -6,6 +6,11 @@ +@@ -5,6 +5,11 @@ # known and ansible is supposed to be configured to be able to # connect to it (via inventory). kdump_ssh_server_outside: localhost diff --git a/SOURCES/rhel-system-roles-network-prefix.diff b/SOURCES/rhel-system-roles-network-prefix.diff deleted file mode 100644 index a79e7ba..0000000 --- a/SOURCES/rhel-system-roles-network-prefix.diff +++ /dev/null @@ -1,107 +0,0 @@ -diff --git a/examples/bond_simple.yml b/examples/bond_simple.yml -index cd88676..6155634 100644 ---- a/examples/bond_simple.yml -+++ b/examples/bond_simple.yml -@@ -29,5 +29,5 @@ - master: bond0 - - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network - ... -diff --git a/examples/bond_with_vlan.yml b/examples/bond_with_vlan.yml -index 2e6be23..3b7a6dc 100644 ---- a/examples/bond_with_vlan.yml -+++ b/examples/bond_with_vlan.yml -@@ -35,4 +35,4 @@ - - "192.0.2.{{ network_iphost }}/24" - - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network -diff --git a/examples/bridge_with_vlan.yml b/examples/bridge_with_vlan.yml -index 037ff8e..83c586d 100644 ---- a/examples/bridge_with_vlan.yml -+++ b/examples/bridge_with_vlan.yml -@@ -33,4 +33,4 @@ - - "192.0.2.{{ network_iphost }}/24" - - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network -diff --git a/examples/eth_simple_auto.yml b/examples/eth_simple_auto.yml -index 0ba168a..e4c4a54 100644 ---- a/examples/eth_simple_auto.yml -+++ b/examples/eth_simple_auto.yml -@@ -15,4 +15,4 @@ - mtu: 1450 - - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network -diff --git a/examples/eth_with_802_1x.yml b/examples/eth_with_802_1x.yml -index 92a93a9..7731b7d 100644 ---- a/examples/eth_with_802_1x.yml -+++ b/examples/eth_with_802_1x.yml -@@ -27,4 +27,4 @@ - - client.pem - - cacert.pem - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network -diff --git a/examples/eth_with_vlan.yml b/examples/eth_with_vlan.yml -index 69da673..e0c2f11 100644 ---- a/examples/eth_with_vlan.yml -+++ b/examples/eth_with_vlan.yml -@@ -26,4 +26,4 @@ - - "192.0.2.{{ network_iphost }}/24" - - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network -diff --git a/examples/ethtool_features.yml b/examples/ethtool_features.yml -index d8842c2..b32ad0b 100644 ---- a/examples/ethtool_features.yml -+++ b/examples/ethtool_features.yml -@@ -3,7 +3,7 @@ - - hosts: all - tasks: - - include_role: -- name: linux-system-roles.network -+ name: rhel-system-roles.network - vars: - network_connections: - - name: "{{ network_interface_name1 }}" -diff --git a/examples/ethtool_features_default.yml b/examples/ethtool_features_default.yml -index 78965e6..3cdd731 100644 ---- a/examples/ethtool_features_default.yml -+++ b/examples/ethtool_features_default.yml -@@ -3,7 +3,7 @@ - - hosts: all - tasks: - - include_role: -- name: linux-system-roles.network -+ name: rhel-system-roles.network - vars: - network_connections: - - name: "{{ network_interface_name1 }}" -diff --git a/examples/infiniband.yml b/examples/infiniband.yml -index 22603d9..9e7e267 100644 ---- a/examples/infiniband.yml -+++ b/examples/infiniband.yml -@@ -23,4 +23,4 @@ - - 198.51.100.133/30 - - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network -diff --git a/examples/macvlan.yml b/examples/macvlan.yml -index 90cd09d..0064ad4 100644 ---- a/examples/macvlan.yml -+++ b/examples/macvlan.yml -@@ -26,4 +26,4 @@ - - 192.168.1.1/24 - - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network diff --git a/SOURCES/rhel-system-roles-postfix-prefix.diff b/SOURCES/rhel-system-roles-postfix-prefix.diff deleted file mode 100644 index 65ab2a1..0000000 --- a/SOURCES/rhel-system-roles-postfix-prefix.diff +++ /dev/null @@ -1,40 +0,0 @@ -diff --git a/README.md b/README.md -index 5950215..a59d72f 100644 ---- a/README.md -+++ b/README.md -@@ -25,7 +25,7 @@ Install and enable postfix. Configure "relay_domains=$mydestination" and - relay_domains: "$mydestination" - relay_host: "example.com" - roles: -- - postfix -+ - linux-system-roles.postfix - ``` - - Install and enable postfix. Do not run 'postfix check' before restarting -@@ -37,7 +37,7 @@ postfix: - vars: - postfix_check: false - roles: -- - postfix -+ - linux-system-roles.postfix - ``` - - Install and enable postfix. Do single backup of main.cf (older backup will be -@@ -51,7 +51,7 @@ rewritten) and configure "relay_host=example.com": - relay_host: "example.com" - postfix_backup: true - roles: -- - postfix -+ - linux-system-roles.postfix - ``` - - Install and enable postfix. Do timestamped backup of main.cf and -@@ -66,7 +66,7 @@ set to true postfix_backup is ignored): - relay_host: "example.com" - postfix_backup_multiple: true - roles: -- - postfix -+ - linux-system-roles.postfix - ``` - - diff --git a/SOURCES/rhel-system-roles-selinux-prefix.diff b/SOURCES/rhel-system-roles-selinux-prefix.diff deleted file mode 100644 index 7e80daa..0000000 --- a/SOURCES/rhel-system-roles-selinux-prefix.diff +++ /dev/null @@ -1,32 +0,0 @@ -diff --git a/README.md b/README.md -index a0385b0..6efc62d 100644 ---- a/README.md -+++ b/README.md -@@ -42,7 +42,7 @@ This role can be configured using variab - vars: - [ see below ] - roles: -- - role: linux-system-roles.selinux -+ - role: rhel-system-roles.selinux - become: true - ``` - -diff --git a/selinux-playbook.yml b/selinux-playbook.yml -index 78d3953..b2348d5 100644 ---- a/selinux-playbook.yml -+++ b/selinux-playbook.yml -@@ -31,7 +31,7 @@ - - name: execute the role and catch errors - block: - - include_role: -- name: linux-system-roles.selinux -+ name: rhel-system-roles.selinux - rescue: - # Fail if failed for a different reason than selinux_reboot_required. - - name: handle errors -@@ -52,4 +52,4 @@ - - - name: reapply the role - include_role: -- name: linux-system-roles.selinux -+ name: rhel-system-roles.selinux diff --git a/SOURCES/rhel-system-roles-storage-prefix.diff b/SOURCES/rhel-system-roles-storage-prefix.diff deleted file mode 100644 index c618f26..0000000 --- a/SOURCES/rhel-system-roles-storage-prefix.diff +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/README.md b/README.md -index c2debc9..d9e40b3 100644 ---- a/README.md -+++ b/README.md -@@ -84,7 +84,7 @@ Example Playbook - - hosts: all - - roles: -- - name: linux-system-roles.storage -+ - name: rhel-system-roles.storage - storage_pools: - - name: app - disks: diff --git a/SOURCES/rhel-system-roles-timesync-prefix.diff b/SOURCES/rhel-system-roles-timesync-prefix.diff deleted file mode 100644 index 6fe1889..0000000 --- a/SOURCES/rhel-system-roles-timesync-prefix.diff +++ /dev/null @@ -1,46 +0,0 @@ -diff -up timesync-1.0.0/README.md.orig timesync-1.0.0/README.md ---- timesync-1.0.0/README.md.orig 2018-08-21 11:46:41.000000000 +0200 -+++ timesync-1.0.0/README.md 2018-11-06 22:29:14.586770442 +0100 -@@ -82,7 +82,7 @@ Install and configure ntp to synchronize - - hostname: baz.example.com - iburst: yes - roles: -- - linux-system-roles.timesync -+ - rhel-system-roles.timesync - ``` - - Install and configure linuxptp to synchronize the system clock with a -@@ -95,7 +95,7 @@ grandmaster in PTP domain number 0, whic - - number: 0 - interfaces: [ eth0 ] - roles: -- - linux-system-roles.timesync -+ - rhel-system-roles.timesync - ``` - - Install and configure chrony and linuxptp to synchronize the system clock with -@@ -122,5 +122,5 @@ synchronization: - transport: UDPv4 - delay: 0.000010 - roles: -- - linux-system-roles.timesync -+ - rhel-system-roles.timesync - ``` -diff -up timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/multiple-ntp-servers.yml.orig timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/multiple-ntp-servers.yml ---- timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/multiple-ntp-servers.yml.orig 2019-06-03 18:03:18.081868584 +0200 -+++ timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/multiple-ntp-servers.yml 2019-06-03 18:03:26.718704991 +0200 -@@ -11,4 +11,4 @@ - - hostname: 3.pool.ntp.org - iburst: yes - roles: -- - linux-system-roles.timesync -+ - rhel-system-roles.timesync -diff -up timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/single-pool.yml.orig timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/single-pool.yml ---- timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/single-pool.yml.orig 2019-06-03 16:36:40.000000000 +0200 -+++ timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/single-pool.yml 2019-06-03 18:03:36.721515519 +0200 -@@ -6,4 +6,4 @@ - pool: yes - iburst: yes - roles: -- - linux-system-roles.timesync -+ - rhel-system-roles.timesync diff --git a/SOURCES/selinux-ansible-test-issues.diff b/SOURCES/selinux-ansible-test-issues.diff new file mode 100644 index 0000000..ef16241 --- /dev/null +++ b/SOURCES/selinux-ansible-test-issues.diff @@ -0,0 +1,164 @@ +From 9cbbc3f63052bef0b6a697e066e092a5f9722ce8 Mon Sep 17 00:00:00 2001 +From: Noriko Hosoi +Date: Mon, 22 Feb 2021 17:11:05 -0800 +Subject: [PATCH] Patch23: selinux-ansible-test-issues.diff + +--- + .sanity-ansible-ignore-2.10.txt | 2 ++ + .sanity-ansible-ignore-2.9.txt | 2 ++ + library/selogin.py | 26 ++++++++++----------- + tests/setup_module_utils.sh | 41 --------------------------------- + 4 files changed, 16 insertions(+), 55 deletions(-) + create mode 100644 .sanity-ansible-ignore-2.10.txt + create mode 100644 .sanity-ansible-ignore-2.9.txt + delete mode 100755 tests/setup_module_utils.sh + +diff --git a/.sanity-ansible-ignore-2.10.txt b/.sanity-ansible-ignore-2.10.txt +new file mode 100644 +index 0000000..5f8ce1e +--- /dev/null ++++ b/.sanity-ansible-ignore-2.10.txt +@@ -0,0 +1,2 @@ ++plugins/modules/selogin.py no-get-exception ++plugins/modules/selogin.py validate-modules!skip +diff --git a/.sanity-ansible-ignore-2.9.txt b/.sanity-ansible-ignore-2.9.txt +new file mode 100644 +index 0000000..5f8ce1e +--- /dev/null ++++ b/.sanity-ansible-ignore-2.9.txt +@@ -0,0 +1,2 @@ ++plugins/modules/selogin.py no-get-exception ++plugins/modules/selogin.py validate-modules!skip +diff --git a/library/selogin.py b/library/selogin.py +index b785c27..6e3fd32 100644 +--- a/library/selogin.py ++++ b/library/selogin.py +@@ -15,6 +15,9 @@ + # + # You should have received a copy of the GNU General Public License + # along with this program. If not, see . ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type + + ANSIBLE_METADATA = { + "status": ["preview"], +@@ -22,13 +25,14 @@ ANSIBLE_METADATA = { + "version": "1.0", + } + +-DOCUMENTATION = """ ++DOCUMENTATION = r""" + --- + module: selogin + short_description: Manages linux user to SELinux user mapping + description: +- - Manages linux user to SELinux user mapping +-version_added: "1.0" ++ - "WARNING: Do not use this module directly! It is only for role internal use." ++ - Manages linux user to SELinux user mapping ++version_added: '1.0' + options: + login: + description: +@@ -41,8 +45,7 @@ options: + required: true + default: null + serange: +- description: +- - >- ++ description: > + MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login + mapping - defaults to the SELinux user record range. + required: false +@@ -62,8 +65,9 @@ notes: + - The changes are persistent across reboots + - Not tested on any debian based system + requirements: [ 'libselinux-python', 'policycoreutils-python' ] +-author: Dan Keder +-author: Petr Lautrbach ++author: ++ - Dan Keder (@dkeder) ++ - Petr Lautrbach (@bachradsusi) + """ + + EXAMPLES = """ +@@ -82,7 +86,7 @@ EXAMPLES = """ + + # Assign all users in the engineering group to the staff_u user + - selogin: +- login: %engineering ++ login: "%engineering" + seuser: staff_u + state: present + """ +@@ -198,9 +202,6 @@ def semanage_login_add(module, login, seuser, do_reload, serange="s0", sestore=" + except KeyError: + e = get_exception() + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) +- except OSError: +- e = get_exception() +- module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) + except RuntimeError: + e = get_exception() + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) +@@ -248,9 +249,6 @@ def semanage_login_del(module, login, seuser, do_reload, sestore=""): + except KeyError: + e = get_exception() + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) +- except OSError: +- e = get_exception() +- module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) + except RuntimeError: + e = get_exception() + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) +diff --git a/tests/setup_module_utils.sh b/tests/setup_module_utils.sh +deleted file mode 100755 +index 94d102d..0000000 +--- a/tests/setup_module_utils.sh ++++ /dev/null +@@ -1,41 +0,0 @@ +-#!/bin/bash +-# SPDX-License-Identifier: MIT +- +-set -euo pipefail +- +-if [ -n "${DEBUG:-}" ] ; then +- set -x +-fi +- +-if [ ! -d "${1:-}" ] ; then +- echo Either ansible is not installed, or there is no ansible/module_utils +- echo in "$1" - Skipping +- exit 0 +-fi +- +-if [ ! -d "${2:-}" ] ; then +- echo Role has no module_utils - Skipping +- exit 0 +-fi +- +-# we need absolute path for $2 +-absmoddir=$( readlink -f "$2" ) +- +-# clean up old links to module_utils +-for item in "$1"/* ; do +- if lnitem=$( readlink "$item" ) && test -n "$lnitem" ; then +- case "$lnitem" in +- *"${2}"*) rm -f "$item" ;; +- esac +- fi +-done +- +-# add new links to module_utils +-for item in "$absmoddir"/* ; do +- case "$item" in +- *__pycache__) continue;; +- *.pyc) continue;; +- esac +- bnitem=$( basename "$item" ) +- ln -s "$item" "$1/$bnitem" +-done +-- +2.26.2 + diff --git a/SOURCES/selinux-bz-1926947-no-variable-named-present.diff b/SOURCES/selinux-bz-1926947-no-variable-named-present.diff new file mode 100644 index 0000000..f9bdf2e --- /dev/null +++ b/SOURCES/selinux-bz-1926947-no-variable-named-present.diff @@ -0,0 +1,34 @@ +From 035a9b2db26af071a95e02a0af08bcbb73b69abf Mon Sep 17 00:00:00 2001 +From: Florian Bachmann +Date: Fri, 5 Feb 2021 11:48:53 +0100 +Subject: [PATCH] fix incorrect default value (there is no variable named + "present") + +--- + tasks/main.yml | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/tasks/main.yml b/tasks/main.yml +index afbe81f..702e369 100644 +--- a/tasks/main.yml ++++ b/tasks/main.yml +@@ -118,7 +118,7 @@ + ports: "{{ item.ports }}" + proto: "{{ item.proto | default('tcp') }}" + setype: "{{ item.setype }}" +- state: "{{ item.state | default(present) }}" ++ state: "{{ item.state | default('present') }}" + with_items: "{{ selinux_ports }}" + + - name: Set linux user to SELinux user mapping +@@ -126,6 +126,6 @@ + login: "{{ item.login }}" + seuser: "{{ item.seuser }}" + serange: "{{ item.serange | default('s0') }}" +- state: "{{ item.state | default(present) }}" ++ state: "{{ item.state | default('present') }}" + reload: "{{ item.reload | default(False) }}" + with_items: "{{ selinux_logins }}" +-- +2.29.2 + diff --git a/SOURCES/selinux-tier1-tags.diff b/SOURCES/selinux-tier1-tags.diff index a5bdf86..c2c4abd 100644 --- a/SOURCES/selinux-tier1-tags.diff +++ b/SOURCES/selinux-tier1-tags.diff @@ -15,14 +15,49 @@ index f294101..7571066 100644 - name: Get local modifications - boolean command: /usr/sbin/semanage boolean -l -n -C register: selinux_role_boolean +diff --git a/tests/tests_all_purge.yml b/tests/tests_all_purge.yml +index 03dfe05..6775847 100644 +--- a/tests/tests_all_purge.yml ++++ b/tests/tests_all_purge.yml +@@ -8,13 +8,17 @@ + fcontext -a -t user_home_dir_t /tmp/test_dir + login -a -s staff_u sar-user + ++ tags: ++ - 'tests::avc' + tasks: + - name: Install SELinux tool semanage on Fedora + package: + name: + - policycoreutils-python-utils + state: present +- when: ansible_distribution == "Fedora" ++ when: ansible_distribution == "Fedora" or ++ ( ansible_distribution_major_version > "7" and ++ ( ansible_distribution == "CentOS" or ansible_distribution == "RedHat" )) + + - name: Add a Linux System Roles SELinux User + user: +diff --git a/tests/tests_all_transitions.yml b/tests/tests_all_transitions.yml +index f608a42..d0d209b 100644 +--- a/tests/tests_all_transitions.yml ++++ b/tests/tests_all_transitions.yml +@@ -1,6 +1,8 @@ + - name: Test all the possible selinux_state transitions + hosts: all + become: true ++ tags: ++ - 'tests::reboot' + vars: + states: + - permissive diff --git a/tests/tests_boolean.yml b/tests/tests_boolean.yml -index 47eafc0..ca85922 100644 +index 47eafc0..2aa0025 100644 --- a/tests/tests_boolean.yml +++ b/tests/tests_boolean.yml -@@ -1,5 +1,6 @@ - +@@ -1,4 +1,5 @@ - name: Check if selinux role sets SELinux booleans -+ tags: [ 'tests::tier1', 'tests::expfail' ] ++ tags: tests::expfail hosts: all become: true @@ -35,39 +70,11 @@ index 47eafc0..ca85922 100644 - name: save state after initial changes and before other changes set_fact: boolean_before: "{{ selinux_role_boolean.stdout_lines }}" -diff --git a/tests/tests_default.yml b/tests/tests_default.yml -index a837c73..25bf39d 100644 ---- a/tests/tests_default.yml -+++ b/tests/tests_default.yml -@@ -1,5 +1,6 @@ - - - name: Ensure that the role runs with default parameters -+ tags: tests::tier1 - hosts: all - - roles: -diff --git a/tests/tests_default_vars.yml b/tests/tests_default_vars.yml -index b6a6b5a..7cd321d 100644 ---- a/tests/tests_default_vars.yml -+++ b/tests/tests_default_vars.yml -@@ -1,4 +1,5 @@ - - name: Ensure that the role declares all paremeters in defaults -+ tags: tests::tier1 - hosts: all - - roles: diff --git a/tests/tests_fcontext.yml b/tests/tests_fcontext.yml -index 0a411fb..f4a3923 100644 +index 0a411fb..f6f1bf4 100644 --- a/tests/tests_fcontext.yml +++ b/tests/tests_fcontext.yml -@@ -1,5 +1,6 @@ - - - name: Check if selinux role sets SELinux fcontext mappings -+ tags: tests::tier1 - hosts: all - become: true - -@@ -13,7 +14,7 @@ +@@ -13,7 +13,7 @@ selinux_fcontexts: - { target: '/tmp/test_dir1(/.*)?', setype: 'user_home_dir_t', ftype: 'd' } @@ -77,37 +84,22 @@ index 0a411fb..f4a3923 100644 set_fact: fcontext_before: "{{ selinux_role_fcontext.stdout }}" diff --git a/tests/tests_login.yml b/tests/tests_login.yml -index efa826d..e4f55ca 100644 +index efa826d..c7ce462 100644 --- a/tests/tests_login.yml +++ b/tests/tests_login.yml -@@ -1,5 +1,6 @@ - - - name: Check if selinux role sets SELinux login mappings -+ tags: tests::tier1 - hosts: all - become: true - -@@ -18,7 +19,7 @@ +@@ -18,6 +18,6 @@ - { login: 'sar-user', seuser: 'staff_u', serange: 's0-s0:c0.c1023', state: 'present' } - - - include: set_selinux_variables.yml + - import_tasks: set_selinux_variables.yml - name: save state after initial changes and before other changes set_fact: login_before: "{{ selinux_role_login.stdout }}" diff --git a/tests/tests_port.yml b/tests/tests_port.yml -index 446f79d..03276b5 100644 +index 446f79d..7bb112e 100644 --- a/tests/tests_port.yml +++ b/tests/tests_port.yml -@@ -1,5 +1,6 @@ - - - name: Check if selinux role sets SELinux port mapping -+ tags: tests::tier1 - hosts: all - become: true - -@@ -29,7 +30,7 @@ +@@ -29,7 +29,7 @@ selinux_ports: - { ports: '22022', proto: 'tcp', setype: 'ssh_port_t', state: 'present' } @@ -117,10 +109,18 @@ index 446f79d..03276b5 100644 set_fact: port_after: "{{ selinux_role_port.stdout }}" diff --git a/tests/tests_selinux_disabled.yml b/tests/tests_selinux_disabled.yml -index afd23e4..d30de2b 100644 +index afd23e4..883dc6d 100644 --- a/tests/tests_selinux_disabled.yml +++ b/tests/tests_selinux_disabled.yml -@@ -18,7 +19,9 @@ +@@ -12,13 +12,17 @@ + fcontext -a -t user_home_dir_t /tmp/test_dir + login -a -s staff_u sar-user + ++ tags: ++ - 'tests::avc' + tasks: + - name: Install SELinux tool semanage on Fedora + package: name: - policycoreutils-python-utils state: present @@ -131,7 +131,7 @@ index afd23e4..d30de2b 100644 - name: Add a Linux System Roles SELinux User user: -@@ -67,17 +71,28 @@ +@@ -67,17 +69,28 @@ assert: that: "{{ ansible_selinux.config_mode == 'enforcing' }}" msg: "SELinux config mode should be enforcing instead of {{ ansible_selinux.config_mode }}" @@ -171,6 +171,6 @@ index afd23e4..d30de2b 100644 + state: absent + + - import_role: -+ name: selinux ++ name: linux-system-roles.selinux + vars: + selinux_all_purge: true diff --git a/SOURCES/sshd-example.diff b/SOURCES/sshd-example.diff new file mode 100644 index 0000000..48243e3 --- /dev/null +++ b/SOURCES/sshd-example.diff @@ -0,0 +1,43 @@ +diff --git a/README.md b/README.md +index 676ad72..dc06d85 100644 +--- a/README.md ++++ b/README.md +@@ -190,7 +190,7 @@ defaults. This is useful if the role is used in deployment stage to make sure + the service is able to start on the first attempt. To disable this check, set + this to empty list. + +-* `sshd_hostkey_owner`, `sshd_hostkey_group`, `sshd_hostkey_group` ++* `sshd_hostkey_owner`, `sshd_hostkey_group`, `sshd_hostkey_mode` + + Use these variables to set the ownership and permissions for the host keys from + the above list. +@@ -273,6 +273,8 @@ for example: + X11Forwarding: yes + ``` + ++More example playbooks can be found in [`examples/`](examples/) directory. ++ + Template Generation + ------------------- + +diff --git a/examples/example-root-login.yml b/examples/example-root-login.yml +new file mode 100644 +index 0000000..156e629 +--- /dev/null ++++ b/examples/example-root-login.yml +@@ -0,0 +1,15 @@ ++--- ++- hosts: all ++ tasks: ++ - name: Configure sshd to prevent root and password login except from particular subnet ++ include_role: ++ name: ansible-sshd ++ vars: ++ sshd: ++ # root login and password login is enabled only from a particular subnet ++ PermitRootLogin: no ++ PasswordAuthentication: no ++ Match: ++ - Condition: "Address 192.0.2.0/24" ++ PermitRootLogin: yes ++ PasswordAuthentication: yes diff --git a/SOURCES/sshd-work-on-ansible28-jinja27.diff b/SOURCES/sshd-work-on-ansible28-jinja27.diff new file mode 100644 index 0000000..268d31f --- /dev/null +++ b/SOURCES/sshd-work-on-ansible28-jinja27.diff @@ -0,0 +1,25 @@ +From bb612fb6c5f76a40fce368acb43d2847e699213d Mon Sep 17 00:00:00 2001 +From: Rich Megginson +Date: Thu, 28 Jan 2021 15:56:14 -0700 +Subject: [PATCH] use state: absent instead of state: missing + +--- + tests/tests_hostkeys_missing.yml | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tests/tests_hostkeys_missing.yml b/tests/tests_hostkeys_missing.yml +index 9dfe77b..5790684 100644 +--- a/tests/tests_hostkeys_missing.yml ++++ b/tests/tests_hostkeys_missing.yml +@@ -40,7 +40,7 @@ + - name: Make sure the key was not created + file: + path: /tmp/missing_ssh_host_rsa_key +- state: missing ++ state: absent + register: key + failed_when: key.changed + tags: tests::verify +-- +2.29.2 + diff --git a/SOURCES/storage-ansible-test.diff b/SOURCES/storage-ansible-test.diff new file mode 100644 index 0000000..3cb42d8 --- /dev/null +++ b/SOURCES/storage-ansible-test.diff @@ -0,0 +1,3663 @@ +From 1d7f9d53c5be6588a7a6c34e4c623b2a8f6fff19 Mon Sep 17 00:00:00 2001 +From: Rich Megginson +Date: Wed, 3 Mar 2021 07:55:20 -0700 +Subject: [PATCH] resolve ansible-test issues + +This fixes many formatting issues as well to make black, flake8, +pylint, yamllint, and ansible-lint happier. + +(cherry picked from commit bb2a1af5f63d00c3ff178f3b44696189d9adf542) +--- + .github/workflows/tox.yml | 4 +- + .sanity-ansible-ignore-2.9.txt | 13 + + library/blivet.py | 968 +++++++++++------- + library/blockdev_info.py | 45 +- + library/bsize.py | 56 +- + library/find_unused_disk.py | 101 +- + library/lvm_gensym.py | 119 ++- + library/resolve_blockdev.py | 71 +- + module_utils/storage_lsr/size.py | 86 +- + tests/setup_module_utils.sh | 41 - + tests/test-verify-volume-device.yml | 4 +- + tests/test-verify-volume-md.yml | 2 +- + tests/test.yml | 2 +- + tests/tests_create_lv_size_equal_to_vg.yml | 28 +- + ...ts_create_partition_volume_then_remove.yml | 4 +- + tests/tests_existing_lvm_pool.yml | 12 +- + tests/tests_lvm_auto_size_cap.yml | 42 +- + tests/tests_lvm_one_disk_one_volume.yml | 46 +- + tests/tests_misc.yml | 2 +- + tests/tests_null_raid_pool.yml | 14 +- + tests/tests_resize.yml | 86 +- + tests/unit/bsize_test.py | 5 + + tests/unit/gensym_test.py | 103 +- + tests/unit/resolve_blockdev_test.py | 74 +- + tests/unit/test_unused_disk.py | 73 +- + tox.ini | 6 - + 26 files changed, 1177 insertions(+), 830 deletions(-) + create mode 100644 .sanity-ansible-ignore-2.9.txt + delete mode 100755 tests/setup_module_utils.sh + +diff --git a/.github/workflows/tox.yml b/.github/workflows/tox.yml +index eceb71f..ec3ec9f 100644 +--- a/.github/workflows/tox.yml ++++ b/.github/workflows/tox.yml +@@ -3,7 +3,7 @@ name: tox + on: # yamllint disable-line rule:truthy + - pull_request + env: +- TOX_LSR: "git+https://github.com/linux-system-roles/tox-lsr@2.2.0" ++ TOX_LSR: "git+https://github.com/linux-system-roles/tox-lsr@2.3.0" + LSR_ANSIBLES: 'ansible==2.8.* ansible==2.9.*' + LSR_MSCENARIOS: default + # LSR_EXTRA_PACKAGES: libdbus-1-dev +@@ -36,7 +36,7 @@ jobs: + toxenvs="py${toxpyver}" + case "$toxpyver" in + 27) toxenvs="${toxenvs},coveralls,flake8,pylint,custom" ;; +- 36) toxenvs="${toxenvs},coveralls,black,yamllint,ansible-lint,shellcheck,custom,collection" ;; ++ 36) toxenvs="${toxenvs},coveralls,black,yamllint,ansible-lint,shellcheck,custom,collection,ansible-test" ;; + 37) toxenvs="${toxenvs},coveralls,custom" ;; + 38) toxenvs="${toxenvs},coveralls,custom" ;; + esac +diff --git a/.sanity-ansible-ignore-2.9.txt b/.sanity-ansible-ignore-2.9.txt +new file mode 100644 +index 0000000..bf700c6 +--- /dev/null ++++ b/.sanity-ansible-ignore-2.9.txt +@@ -0,0 +1,13 @@ ++plugins/modules/blivet.py import-2.7!skip ++plugins/modules/blivet.py import-3.5!skip ++plugins/modules/blivet.py import-3.6!skip ++plugins/modules/blivet.py import-3.7!skip ++plugins/modules/blivet.py import-3.8!skip ++tests/storage/unit/gensym_test.py shebang!skip ++plugins/modules/blivet.py validate-modules:import-error ++plugins/modules/blivet.py validate-modules:missing-gplv3-license ++plugins/modules/blockdev_info.py validate-modules:missing-gplv3-license ++plugins/modules/bsize.py validate-modules:missing-gplv3-license ++plugins/modules/find_unused_disk.py validate-modules:missing-gplv3-license ++plugins/modules/lvm_gensym.py validate-modules:missing-gplv3-license ++plugins/modules/resolve_blockdev.py validate-modules:missing-gplv3-license +diff --git a/library/blivet.py b/library/blivet.py +index 946b640..0e0b30c 100644 +--- a/library/blivet.py ++++ b/library/blivet.py +@@ -1,12 +1,16 @@ + #!/usr/bin/python + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + ANSIBLE_METADATA = { +- 'metadata_version': '1.1', +- 'status': ['preview'], +- 'supported_by': 'community' ++ "metadata_version": "1.1", ++ "status": ["preview"], ++ "supported_by": "community", + } + +-DOCUMENTATION = ''' ++DOCUMENTATION = """ + --- + module: blivet + +@@ -15,6 +19,7 @@ short_description: Module for management of linux block device stacks + version_added: "2.5" + + description: ++ - "WARNING: Do not use this module directly! It is only for role internal use." + - "Module configures storage pools and volumes to match the state specified + in input parameters. It does not do any management of /etc/fstab entries." + +@@ -30,7 +35,8 @@ options: + - boolean indicating whether to create partitions on disks for pool backing devices + disklabel_type: + description: +- - disklabel type string (eg: 'gpt') to use, overriding the built-in logic in blivet ++ - | ++ disklabel type string (eg: 'gpt') to use, overriding the built-in logic in blivet + safe_mode: + description: + - boolean indicating that we should fail rather than implicitly/automatically +@@ -41,10 +47,10 @@ options: + when creating a disk volume (that is, a whole disk filesystem) + + author: +- - David Lehman (dlehman@redhat.com) +-''' ++ - David Lehman (@dwlehman) ++""" + +-EXAMPLES = ''' ++EXAMPLES = """ + + - name: Manage devices + blivet: +@@ -64,28 +70,40 @@ EXAMPLES = ''' + mount_point: /whole_disk1 + fs_type: ext4 + mount_options: journal_checksum,async,noexec +-''' ++""" + +-RETURN = ''' ++RETURN = """ + actions: + description: list of dicts describing actions taken +- type: list of dict ++ returned: success ++ type: list ++ elements: dict + leaves: + description: list of paths to leaf devices +- type: list of str ++ returned: success ++ type: list ++ elements: dict + mounts: + description: list of dicts describing mounts to set up +- type: list of dict ++ returned: success ++ type: list ++ elements: dict + crypts: + description: list of dicts describing crypttab entries to set up +- type: list of dict ++ returned: success ++ type: list ++ elements: dict + pools: + description: list of dicts describing the pools w/ device path for each volume +- type: list of dict ++ returned: success ++ type: list ++ elements: dict + volumes: + description: list of dicts describing the volumes w/ device path for each +- type: list of dict +-''' ++ returned: success ++ type: list ++ elements: dict ++""" + + import logging + import os +@@ -106,7 +124,8 @@ try: + from blivet3.size import Size + from blivet3.udev import trigger + from blivet3.util import set_up_logging +- BLIVET_PACKAGE = 'blivet3' ++ ++ BLIVET_PACKAGE = "blivet3" + except ImportError: + LIB_IMP_ERR3 = traceback.format_exc() + try: +@@ -119,7 +138,8 @@ except ImportError: + from blivet.size import Size + from blivet.udev import trigger + from blivet.util import set_up_logging +- BLIVET_PACKAGE = 'blivet' ++ ++ BLIVET_PACKAGE = "blivet" + except ImportError: + LIB_IMP_ERR = traceback.format_exc() + +@@ -135,23 +155,23 @@ MAX_TRIM_PERCENT = 2 + + use_partitions = None # create partitions on pool backing device disks? + disklabel_type = None # user-specified disklabel type +-safe_mode = None # do not remove any existing devices or formatting ++safe_mode = None # do not remove any existing devices or formatting + pool_defaults = dict() + volume_defaults = dict() + + + def find_duplicate_names(dicts): +- """ Return a list of names that appear more than once in a list of dicts. ++ """Return a list of names that appear more than once in a list of dicts. + +- Items can be a list of any dicts with a 'name' key; that's all we're +- looking at. """ ++ Items can be a list of any dicts with a 'name' key; that's all we're ++ looking at.""" + names = list() + duplicates = list() + for item in dicts: +- if item['name'] in names and item['name'] not in duplicates: +- duplicates.append(item['name']) ++ if item["name"] in names and item["name"] not in duplicates: ++ duplicates.append(item["name"]) + else: +- names.append(item['name']) ++ names.append(item["name"]) + + return duplicates + +@@ -177,41 +197,54 @@ class BlivetBase(object): + global safe_mode + ret = device + # Make sure to handle adjusting both existing stacks and future stacks. +- if device == device.raw_device and self._spec_dict['encryption']: ++ if device == device.raw_device and self._spec_dict["encryption"]: + # add luks + luks_name = "luks-%s" % device._name +- if safe_mode and (device.original_format.type is not None or +- device.original_format.name != get_format(None).name): +- raise BlivetAnsibleError("cannot remove existing formatting on device '%s' in safe mode due to adding encryption" % +- device._name) ++ if safe_mode and ( ++ device.original_format.type is not None ++ or device.original_format.name != get_format(None).name ++ ): ++ raise BlivetAnsibleError( ++ "cannot remove existing formatting on device '%s' in safe mode due to adding encryption" ++ % device._name ++ ) + if not device.format.exists: + fmt = device.format + else: + fmt = get_format(None) + +- self._blivet.format_device(device, +- get_format("luks", +- name=luks_name, +- cipher=self._spec_dict.get('encryption_cipher'), +- key_size=self._spec_dict.get('encryption_key_size'), +- luks_version=self._spec_dict.get('encryption_luks_version'), +- passphrase=self._spec_dict.get('encryption_password') or None, +- key_file=self._spec_dict.get('encryption_key') or None)) ++ self._blivet.format_device( ++ device, ++ get_format( ++ "luks", ++ name=luks_name, ++ cipher=self._spec_dict.get("encryption_cipher"), ++ key_size=self._spec_dict.get("encryption_key_size"), ++ luks_version=self._spec_dict.get("encryption_luks_version"), ++ passphrase=self._spec_dict.get("encryption_password") or None, ++ key_file=self._spec_dict.get("encryption_key") or None, ++ ), ++ ) + + if not device.format.has_key: +- raise BlivetAnsibleError("encrypted %s '%s' missing key/password" % (self._type, self._spec_dict['name'])) ++ raise BlivetAnsibleError( ++ "encrypted %s '%s' missing key/password" ++ % (self._type, self._spec_dict["name"]) ++ ) + +- luks_device = devices.LUKSDevice(luks_name, +- fmt=fmt, +- parents=[device]) ++ luks_device = devices.LUKSDevice(luks_name, fmt=fmt, parents=[device]) + self._blivet.create_device(luks_device) + ret = luks_device +- elif device != device.raw_device and not self._spec_dict['encryption']: ++ elif device != device.raw_device and not self._spec_dict["encryption"]: + # remove luks +- if safe_mode and (device.original_format.type is not None or +- device.original_format.name != get_format(None).name): +- raise BlivetAnsibleError("cannot remove existing formatting on device '%s' in safe mode due to encryption removal" % +- device._name) ++ if safe_mode and ( ++ device.original_format.type is not None ++ or device.original_format.name != get_format(None).name ++ ): ++ raise BlivetAnsibleError( ++ "cannot remove existing formatting on device '%s' in safe mode due to encryption removal" ++ % device._name ++ ) + if not device.format.exists: + fmt = device.format + else: +@@ -240,12 +273,21 @@ class BlivetBase(object): + requested_spares = self._spec_dict.get("raid_spare_count") + + if requested_actives is not None and requested_spares is not None: +- if (requested_actives + requested_spares != len(members) or +- requested_actives < 0 or requested_spares < 0): +- raise BlivetAnsibleError("failed to set up '%s': cannot create RAID " +- "with %s members (%s active and %s spare)" +- % (self._spec_dict["name"], len(members), +- requested_actives, requested_spares)) ++ if ( ++ requested_actives + requested_spares != len(members) ++ or requested_actives < 0 ++ or requested_spares < 0 ++ ): ++ raise BlivetAnsibleError( ++ "failed to set up '%s': cannot create RAID " ++ "with %s members (%s active and %s spare)" ++ % ( ++ self._spec_dict["name"], ++ len(members), ++ requested_actives, ++ requested_spares, ++ ) ++ ) + + if requested_actives is not None: + active_count = requested_actives +@@ -264,16 +306,20 @@ class BlivetBase(object): + raise BlivetAnsibleError("chunk size must be multiple of 4 KiB") + + try: +- raid_array = self._blivet.new_mdarray(name=raid_name, +- level=self._spec_dict["raid_level"], +- member_devices=active_count, +- total_devices=len(members), +- parents=members, +- chunk_size=chunk_size, +- metadata_version=self._spec_dict.get("raid_metadata_version"), +- fmt=self._get_format()) ++ raid_array = self._blivet.new_mdarray( ++ name=raid_name, ++ level=self._spec_dict["raid_level"], ++ member_devices=active_count, ++ total_devices=len(members), ++ parents=members, ++ chunk_size=chunk_size, ++ metadata_version=self._spec_dict.get("raid_metadata_version"), ++ fmt=self._get_format(), ++ ) + except ValueError as e: +- raise BlivetAnsibleError("cannot create RAID '%s': %s" % (raid_name, str(e))) ++ raise BlivetAnsibleError( ++ "cannot create RAID '%s': %s" % (raid_name, str(e)) ++ ) + + return raid_array + +@@ -298,17 +344,18 @@ class BlivetVolume(BlivetBase): + if self.__class__.blivet_device_class is not None: + packages.extend(self.__class__.blivet_device_class._packages) + +- fmt = get_format(self._volume.get('fs_type')) ++ fmt = get_format(self._volume.get("fs_type")) + packages.extend(fmt.packages) +- if self._volume.get('encryption'): +- packages.extend(get_format('luks').packages) ++ if self._volume.get("encryption"): ++ packages.extend(get_format("luks").packages) + return packages + + @property + def ultimately_present(self): + """ Should this volume be present when we are finished? """ +- return (self._volume.get('state', 'present') == 'present' and +- (self._blivet_pool is None or self._blivet_pool.ultimately_present)) ++ return self._volume.get("state", "present") == "present" and ( ++ self._blivet_pool is None or self._blivet_pool.ultimately_present ++ ) + + def _type_check(self): # pylint: disable=no-self-use + """ Is self._device of the correct type? """ +@@ -316,7 +363,7 @@ class BlivetVolume(BlivetBase): + + def _get_device_id(self): + """ Return an identifier by which to try looking the volume up. """ +- return self._volume['name'] ++ return self._volume["name"] + + def _look_up_device(self): + """ Try to look up this volume in blivet's device tree. """ +@@ -331,14 +378,14 @@ class BlivetVolume(BlivetBase): + if device is None: + return + +- if device.format.type == 'luks': ++ if device.format.type == "luks": + # XXX If we have no key we will always re-encrypt. +- device.format._key_file = self._volume.get('encryption_key') +- device.format.passphrase = self._volume.get('encryption_password') ++ device.format._key_file = self._volume.get("encryption_key") ++ device.format.passphrase = self._volume.get("encryption_password") + + # set up the original format as well since it'll get used for processing +- device.original_format._key_file = self._volume.get('encryption_key') +- device.original_format.passphrase = self._volume.get('encryption_password') ++ device.original_format._key_file = self._volume.get("encryption_key") ++ device.original_format.passphrase = self._volume.get("encryption_password") + if device.isleaf: + self._blivet.populate() + +@@ -361,26 +408,31 @@ class BlivetVolume(BlivetBase): + elif encrypted: + luks_fmt = self._device.format + +- if param_name == 'size': +- self._volume['size'] = int(self._device.size.convert_to()) +- elif param_name == 'fs_type' and (self._device.format.type or self._device.format.name != get_format(None).name): +- self._volume['fs_type'] = self._device.format.type +- elif param_name == 'fs_label': +- self._volume['fs_label'] = getattr(self._device.format, 'label', "") or "" +- elif param_name == 'mount_point': +- self._volume['mount_point'] = getattr(self._device.format, 'mountpoint', None) +- elif param_name == 'disks': +- self._volume['disks'] = [d.name for d in self._device.disks] +- elif param_name == 'encryption': +- self._volume['encryption'] = encrypted +- elif param_name == 'encryption_key_size' and encrypted: +- self._volume['encryption_key_size'] = luks_fmt.key_size +- elif param_name == 'encryption_key_file' and encrypted: +- self._volume['encryption_key_file'] = luks_fmt.key_file +- elif param_name == 'encryption_cipher' and encrypted: +- self._volume['encryption_cipher'] = luks_fmt.cipher +- elif param_name == 'encryption_luks_version' and encrypted: +- self._volume['encryption_luks_version'] = luks_fmt.luks_version ++ if param_name == "size": ++ self._volume["size"] = int(self._device.size.convert_to()) ++ elif param_name == "fs_type" and ( ++ self._device.format.type ++ or self._device.format.name != get_format(None).name ++ ): ++ self._volume["fs_type"] = self._device.format.type ++ elif param_name == "fs_label": ++ self._volume["fs_label"] = getattr(self._device.format, "label", "") or "" ++ elif param_name == "mount_point": ++ self._volume["mount_point"] = getattr( ++ self._device.format, "mountpoint", None ++ ) ++ elif param_name == "disks": ++ self._volume["disks"] = [d.name for d in self._device.disks] ++ elif param_name == "encryption": ++ self._volume["encryption"] = encrypted ++ elif param_name == "encryption_key_size" and encrypted: ++ self._volume["encryption_key_size"] = luks_fmt.key_size ++ elif param_name == "encryption_key_file" and encrypted: ++ self._volume["encryption_key_file"] = luks_fmt.key_file ++ elif param_name == "encryption_cipher" and encrypted: ++ self._volume["encryption_cipher"] = luks_fmt.cipher ++ elif param_name == "encryption_luks_version" and encrypted: ++ self._volume["encryption_luks_version"] = luks_fmt.luks_version + else: + return False + +@@ -392,7 +444,7 @@ class BlivetVolume(BlivetBase): + if name in self._volume: + continue + +- default = None if default in ('none', 'None', 'null') else default ++ default = None if default in ("none", "None", "null") else default + + if self._device: + # Apply values from the device if it already exists. +@@ -403,12 +455,17 @@ class BlivetVolume(BlivetBase): + + def _get_format(self): + """ Return a blivet.formats.DeviceFormat instance for this volume. """ +- fmt = get_format(self._volume['fs_type'], +- mountpoint=self._volume.get('mount_point'), +- label=self._volume['fs_label'], +- create_options=self._volume['fs_create_options']) ++ fmt = get_format( ++ self._volume["fs_type"], ++ mountpoint=self._volume.get("mount_point"), ++ label=self._volume["fs_label"], ++ create_options=self._volume["fs_create_options"], ++ ) + if not fmt.supported or not fmt.formattable: +- raise BlivetAnsibleError("required tools for file system '%s' are missing" % self._volume['fs_type']) ++ raise BlivetAnsibleError( ++ "required tools for file system '%s' are missing" ++ % self._volume["fs_type"] ++ ) + + return fmt + +@@ -422,9 +479,9 @@ class BlivetVolume(BlivetBase): + return + + # save device identifiers for use by the role +- self._volume['_device'] = self._device.path +- self._volume['_raw_device'] = self._device.raw_device.path +- self._volume['_mount_id'] = self._device.fstab_spec ++ self._volume["_device"] = self._device.path ++ self._volume["_raw_device"] = self._device.raw_device.path ++ self._volume["_mount_id"] = self._device.fstab_spec + + # schedule removal of this device and any descendant devices + self._blivet.devicetree.recursive_remove(self._device.raw_device) +@@ -435,9 +492,12 @@ class BlivetVolume(BlivetBase): + def _resize(self): + """ Schedule actions as needed to ensure the device has the desired size. """ + try: +- size = Size(self._volume['size']) ++ size = Size(self._volume["size"]) + except Exception: +- raise BlivetAnsibleError("invalid size specification for volume '%s': '%s'" % (self._volume['name'], self._volume['size'])) ++ raise BlivetAnsibleError( ++ "invalid size specification for volume '%s': '%s'" ++ % (self._volume["name"], self._volume["size"]) ++ ) + + if size and self._device.size != size: + try: +@@ -448,28 +508,44 @@ class BlivetVolume(BlivetBase): + if not self._device.resizable: + return + +- trim_percent = (1.0 - float(self._device.max_size / size))*100 +- log.debug("resize: size=%s->%s ; trim=%s", self._device.size, size, trim_percent) ++ trim_percent = (1.0 - float(self._device.max_size / size)) * 100 ++ log.debug( ++ "resize: size=%s->%s ; trim=%s", self._device.size, size, trim_percent ++ ) + if size > self._device.max_size and trim_percent <= MAX_TRIM_PERCENT: +- log.info("adjusting %s resize target from %s to %s to fit in free space", +- self._volume['name'], +- size, +- self._device.max_size) ++ log.info( ++ "adjusting %s resize target from %s to %s to fit in free space", ++ self._volume["name"], ++ size, ++ self._device.max_size, ++ ) + size = self._device.max_size + if size == self._device.size: + return + + if not self._device.min_size <= size <= self._device.max_size: +- raise BlivetAnsibleError("volume '%s' cannot be resized to '%s'" % (self._volume['name'], size)) ++ raise BlivetAnsibleError( ++ "volume '%s' cannot be resized to '%s'" ++ % (self._volume["name"], size) ++ ) + + try: + self._blivet.resize_device(self._device, size) + except ValueError as e: +- raise BlivetAnsibleError("volume '%s' cannot be resized from %s to %s: %s" % (self._device.name, +- self._device.size, +- size, str(e))) +- elif size and self._device.exists and self._device.size != size and not self._device.resizable: +- raise BlivetAnsibleError("volume '%s' cannot be resized from %s to %s" % (self._device.name, self._device.size, size)) ++ raise BlivetAnsibleError( ++ "volume '%s' cannot be resized from %s to %s: %s" ++ % (self._device.name, self._device.size, size, str(e)) ++ ) ++ elif ( ++ size ++ and self._device.exists ++ and self._device.size != size ++ and not self._device.resizable ++ ): ++ raise BlivetAnsibleError( ++ "volume '%s' cannot be resized from %s to %s" ++ % (self._device.name, self._device.size, size) ++ ) + + def _reformat(self): + """ Schedule actions as needed to ensure the volume is formatted as specified. """ +@@ -477,10 +553,18 @@ class BlivetVolume(BlivetBase): + if self._device.format.type == fmt.type: + return + +- if safe_mode and (self._device.format.type is not None or self._device.format.name != get_format(None).name): +- raise BlivetAnsibleError("cannot remove existing formatting on volume '%s' in safe mode" % self._volume['name']) +- +- if self._device.format.status and (self._device.format.mountable or self._device.format.type == "swap"): ++ if safe_mode and ( ++ self._device.format.type is not None ++ or self._device.format.name != get_format(None).name ++ ): ++ raise BlivetAnsibleError( ++ "cannot remove existing formatting on volume '%s' in safe mode" ++ % self._volume["name"] ++ ) ++ ++ if self._device.format.status and ( ++ self._device.format.mountable or self._device.format.type == "swap" ++ ): + self._device.format.teardown() + if not self._device.isleaf: + self._blivet.devicetree.recursive_remove(self._device, remove_device=False) +@@ -503,7 +587,9 @@ class BlivetVolume(BlivetBase): + + # at this point we should have a blivet.devices.StorageDevice instance + if self._device is None: +- raise BlivetAnsibleError("failed to look up or create device '%s'" % self._volume['name']) ++ raise BlivetAnsibleError( ++ "failed to look up or create device '%s'" % self._volume["name"] ++ ) + + self._manage_encryption() + +@@ -511,24 +597,31 @@ class BlivetVolume(BlivetBase): + if self._device.raw_device.exists: + self._reformat() + +- if self.ultimately_present and self._volume['mount_point'] and not self._device.format.mountable: +- raise BlivetAnsibleError("volume '%s' has a mount point but no mountable file system" % self._volume['name']) ++ if ( ++ self.ultimately_present ++ and self._volume["mount_point"] ++ and not self._device.format.mountable ++ ): ++ raise BlivetAnsibleError( ++ "volume '%s' has a mount point but no mountable file system" ++ % self._volume["name"] ++ ) + + # schedule resize if appropriate +- if self._device.raw_device.exists and self._volume['size']: ++ if self._device.raw_device.exists and self._volume["size"]: + self._resize() + + # save device identifiers for use by the role +- self._volume['_device'] = self._device.path +- self._volume['_raw_device'] = self._device.raw_device.path +- self._volume['_mount_id'] = self._device.fstab_spec ++ self._volume["_device"] = self._device.path ++ self._volume["_raw_device"] = self._device.raw_device.path ++ self._volume["_mount_id"] = self._device.fstab_spec + + + class BlivetDiskVolume(BlivetVolume): + blivet_device_class = devices.DiskDevice + + def _get_device_id(self): +- return self._volume['disks'][0] ++ return self._volume["disks"][0] + + def _type_check(self): + return self._device.raw_device.is_disk +@@ -536,7 +629,7 @@ class BlivetDiskVolume(BlivetVolume): + def _get_format(self): + fmt = super(BlivetDiskVolume, self)._get_format() + # pass -F to mke2fs on whole disks in RHEL7 +- mkfs_options = diskvolume_mkfs_option_map.get(self._volume['fs_type']) ++ mkfs_options = diskvolume_mkfs_option_map.get(self._volume["fs_type"]) + if mkfs_options: + if fmt.create_options: + fmt.create_options += " " +@@ -552,23 +645,31 @@ class BlivetDiskVolume(BlivetVolume): + def _look_up_device(self): + super(BlivetDiskVolume, self)._look_up_device() + if not self._get_device_id(): +- raise BlivetAnsibleError("no disks specified for volume '%s'" % self._volume['name']) +- elif not isinstance(self._volume['disks'], list): ++ raise BlivetAnsibleError( ++ "no disks specified for volume '%s'" % self._volume["name"] ++ ) ++ elif not isinstance(self._volume["disks"], list): + raise BlivetAnsibleError("volume disks must be specified as a list") + + if self._device is None: +- raise BlivetAnsibleError("unable to resolve disk specified for volume '%s' (%s)" % (self._volume['name'], self._volume['disks'])) ++ raise BlivetAnsibleError( ++ "unable to resolve disk specified for volume '%s' (%s)" ++ % (self._volume["name"], self._volume["disks"]) ++ ) + + + class BlivetPartitionVolume(BlivetVolume): + blivet_device_class = devices.PartitionDevice + + def _type_check(self): +- return self._device.raw_device.type == 'partition' ++ return self._device.raw_device.type == "partition" + + def _get_device_id(self): + device_id = None +- if self._blivet_pool._disks[0].partitioned and len(self._blivet_pool._disks[0].children) == 1: ++ if ( ++ self._blivet_pool._disks[0].partitioned ++ and len(self._blivet_pool._disks[0].children) == 1 ++ ): + device_id = self._blivet_pool._disks[0].children[0].name + + return device_id +@@ -583,22 +684,29 @@ class BlivetPartitionVolume(BlivetVolume): + if self._blivet_pool: + parent = self._blivet_pool._device + else: +- parent = self._blivet.devicetree.resolve_device(self._volume['pool']) ++ parent = self._blivet.devicetree.resolve_device(self._volume["pool"]) + + if parent is None: +- raise BlivetAnsibleError("failed to find pool '%s' for volume '%s'" % (self._blivet_pool['name'], self._volume['name'])) ++ raise BlivetAnsibleError( ++ "failed to find pool '%s' for volume '%s'" ++ % (self._blivet_pool["name"], self._volume["name"]) ++ ) + + size = Size("256 MiB") + try: +- device = self._blivet.new_partition(parents=[parent], size=size, grow=True, fmt=self._get_format()) ++ device = self._blivet.new_partition( ++ parents=[parent], size=size, grow=True, fmt=self._get_format() ++ ) + except Exception: +- raise BlivetAnsibleError("failed set up volume '%s'" % self._volume['name']) ++ raise BlivetAnsibleError("failed set up volume '%s'" % self._volume["name"]) + + self._blivet.create_device(device) + try: + do_partitioning(self._blivet) + except Exception: +- raise BlivetAnsibleError("partition allocation failed for volume '%s'" % self._volume['name']) ++ raise BlivetAnsibleError( ++ "partition allocation failed for volume '%s'" % self._volume["name"] ++ ) + + self._device = device + +@@ -609,7 +717,7 @@ class BlivetLVMVolume(BlivetVolume): + def _get_device_id(self): + if not self._blivet_pool._device: + return None +- return "%s-%s" % (self._blivet_pool._device.name, self._volume['name']) ++ return "%s-%s" % (self._blivet_pool._device.name, self._volume["name"]) + + def _create(self): + if self._device: +@@ -617,51 +725,75 @@ class BlivetLVMVolume(BlivetVolume): + + parent = self._blivet_pool._device + if parent is None: +- raise BlivetAnsibleError("failed to find pool '%s' for volume '%s'" % (self._blivet_pool['name'], self._volume['name'])) ++ raise BlivetAnsibleError( ++ "failed to find pool '%s' for volume '%s'" ++ % (self._blivet_pool["name"], self._volume["name"]) ++ ) + + try: +- size = Size(self._volume['size']) ++ size = Size(self._volume["size"]) + except Exception: +- raise BlivetAnsibleError("invalid size '%s' specified for volume '%s'" % (self._volume['size'], self._volume['name'])) ++ raise BlivetAnsibleError( ++ "invalid size '%s' specified for volume '%s'" ++ % (self._volume["size"], self._volume["name"]) ++ ) + + fmt = self._get_format() +- trim_percent = (1.0 - float(parent.free_space / size))*100 ++ trim_percent = (1.0 - float(parent.free_space / size)) * 100 + log.debug("size: %s ; %s", size, trim_percent) + if size > parent.free_space: + if trim_percent > MAX_TRIM_PERCENT: +- raise BlivetAnsibleError("specified size for volume '%s' exceeds available space in pool '%s' (%s)" +- % (size, parent.name, parent.free_space)) ++ raise BlivetAnsibleError( ++ "specified size for volume '%s' exceeds available space in pool '%s' (%s)" ++ % (size, parent.name, parent.free_space) ++ ) + else: +- log.info("adjusting %s size from %s to %s to fit in %s free space", self._volume['name'], +- size, +- parent.free_space, +- parent.name) ++ log.info( ++ "adjusting %s size from %s to %s to fit in %s free space", ++ self._volume["name"], ++ size, ++ parent.free_space, ++ parent.name, ++ ) + size = parent.free_space + + try: +- device = self._blivet.new_lv(name=self._volume['name'], +- parents=[parent], size=size, fmt=fmt) ++ device = self._blivet.new_lv( ++ name=self._volume["name"], parents=[parent], size=size, fmt=fmt ++ ) + except Exception as e: +- raise BlivetAnsibleError("failed to set up volume '%s': %s" % (self._volume['name'], str(e))) ++ raise BlivetAnsibleError( ++ "failed to set up volume '%s': %s" % (self._volume["name"], str(e)) ++ ) + + self._blivet.create_device(device) + self._device = device + + + class BlivetMDRaidVolume(BlivetVolume): +- +- def _process_device_numbers(self, members_count, requested_actives, requested_spares): ++ def _process_device_numbers( ++ self, members_count, requested_actives, requested_spares ++ ): + + active_count = members_count + spare_count = 0 + + if requested_actives is not None and requested_spares is not None: +- if (requested_actives + requested_spares != members_count or +- requested_actives < 0 or requested_spares < 0): +- raise BlivetAnsibleError("failed to set up volume '%s': cannot create RAID " +- "with %s members (%s active and %s spare)" +- % (self._volume['name'], members_count, +- requested_actives, requested_spares)) ++ if ( ++ requested_actives + requested_spares != members_count ++ or requested_actives < 0 ++ or requested_spares < 0 ++ ): ++ raise BlivetAnsibleError( ++ "failed to set up volume '%s': cannot create RAID " ++ "with %s members (%s active and %s spare)" ++ % ( ++ self._volume["name"], ++ members_count, ++ requested_actives, ++ requested_spares, ++ ) ++ ) + + if requested_actives is not None: + active_count = requested_actives +@@ -685,7 +817,9 @@ class BlivetMDRaidVolume(BlivetVolume): + self._blivet.format_device(member_disk, label) + + # create new partition +- member = self._blivet.new_partition(parents=[member_disk], grow=True) ++ member = self._blivet.new_partition( ++ parents=[member_disk], grow=True ++ ) + self._blivet.create_device(member) + self._blivet.format_device(member, fmt=get_format("mdmember")) + members.append(member) +@@ -697,16 +831,16 @@ class BlivetMDRaidVolume(BlivetVolume): + + def _update_from_device(self, param_name): + """ Return True if param_name's value was retrieved from a looked-up device. """ +- if param_name == 'raid_level': +- self._volume['raid_level'] = self._device.level.name +- elif param_name == 'raid_chunk_size': +- self._volume['raid_chunk_size'] = str(self._device.chunk_size) +- elif param_name == 'raid_device_count': +- self._volume['raid_device_count'] = self._device.member_devices +- elif param_name == 'raid_spare_count': +- self._volume['raid_spare_count'] = self._device.spares +- elif param_name == 'raid_metadata_version': +- self._volume['raid_metadata_version'] = self._device.metadata_version ++ if param_name == "raid_level": ++ self._volume["raid_level"] = self._device.level.name ++ elif param_name == "raid_chunk_size": ++ self._volume["raid_chunk_size"] = str(self._device.chunk_size) ++ elif param_name == "raid_device_count": ++ self._volume["raid_device_count"] = self._device.member_devices ++ elif param_name == "raid_spare_count": ++ self._volume["raid_spare_count"] = self._device.spares ++ elif param_name == "raid_metadata_version": ++ self._volume["raid_metadata_version"] = self._device.metadata_version + else: + return super(BlivetMDRaidVolume, self)._update_from_device(param_name) + +@@ -728,7 +862,10 @@ class BlivetMDRaidVolume(BlivetVolume): + try: + do_partitioning(self._blivet) + except Exception as e: +- raise BlivetAnsibleError("failed to allocate partitions for mdraid '%s': %s" % (self._volume['name'], str(e))) ++ raise BlivetAnsibleError( ++ "failed to allocate partitions for mdraid '%s': %s" ++ % (self._volume["name"], str(e)) ++ ) + + raid_array = self._new_mdarray(members) + +@@ -764,16 +901,20 @@ _BLIVET_VOLUME_TYPES = { + "disk": BlivetDiskVolume, + "lvm": BlivetLVMVolume, + "partition": BlivetPartitionVolume, +- "raid": BlivetMDRaidVolume ++ "raid": BlivetMDRaidVolume, + } + + + def _get_blivet_volume(blivet_obj, volume, bpool=None): + """ Return a BlivetVolume instance appropriate for the volume dict. """ + global volume_defaults +- volume_type = volume.get('type', bpool._pool['type'] if bpool else volume_defaults['type']) ++ volume_type = volume.get( ++ "type", bpool._pool["type"] if bpool else volume_defaults["type"] ++ ) + if volume_type not in _BLIVET_VOLUME_TYPES: +- raise BlivetAnsibleError("Volume '%s' has unknown type '%s'" % (volume['name'], volume_type)) ++ raise BlivetAnsibleError( ++ "Volume '%s' has unknown type '%s'" % (volume["name"], volume_type) ++ ) + + return _BLIVET_VOLUME_TYPES[volume_type](blivet_obj, volume, bpool=bpool) + +@@ -796,19 +937,19 @@ class BlivetPool(BlivetBase): + if self.ultimately_present and self.__class__.blivet_device_class is not None: + packages.extend(self.__class__.blivet_device_class._packages) + +- if self._pool.get('encryption'): +- packages.extend(get_format('luks').packages) ++ if self._pool.get("encryption"): ++ packages.extend(get_format("luks").packages) + + return packages + + @property + def ultimately_present(self): + """ Should this pool be present when we are finished? """ +- return self._pool.get('state', 'present') == 'present' ++ return self._pool.get("state", "present") == "present" + + @property + def _is_raid(self): +- return self._pool.get('raid_level') not in [None, "null", ""] ++ return self._pool.get("raid_level") not in [None, "null", ""] + + def _member_management_is_destructive(self): + return False +@@ -849,25 +990,30 @@ class BlivetPool(BlivetBase): + if self._disks: + return + +- if not self._device and not self._pool['disks']: +- raise BlivetAnsibleError("no disks specified for pool '%s'" % self._pool['name']) +- elif not isinstance(self._pool['disks'], list): ++ if not self._device and not self._pool["disks"]: ++ raise BlivetAnsibleError( ++ "no disks specified for pool '%s'" % self._pool["name"] ++ ) ++ elif not isinstance(self._pool["disks"], list): + raise BlivetAnsibleError("pool disks must be specified as a list") + + disks = list() +- for spec in self._pool['disks']: ++ for spec in self._pool["disks"]: + device = self._blivet.devicetree.resolve_device(spec) + if device is not None: # XXX fail if any disk isn't resolved? + disks.append(device) + +- if self._pool['disks'] and not self._device and not disks: +- raise BlivetAnsibleError("unable to resolve any disks specified for pool '%s' (%s)" % (self._pool['name'], self._pool['disks'])) ++ if self._pool["disks"] and not self._device and not disks: ++ raise BlivetAnsibleError( ++ "unable to resolve any disks specified for pool '%s' (%s)" ++ % (self._pool["name"], self._pool["disks"]) ++ ) + + self._disks = disks + + def _look_up_device(self): + """ Look up the pool in blivet's device tree. """ +- device = self._blivet.devicetree.resolve_device(self._pool['name']) ++ device = self._blivet.devicetree.resolve_device(self._pool["name"]) + if device is None: + return + +@@ -895,45 +1041,62 @@ class BlivetPool(BlivetBase): + """ Return True if param_name's value was retrieved from a looked-up device. """ + # We wouldn't have the pool device if the member devices weren't unlocked, so we do not + # have to consider the case where the devices are unlocked like we do for volumes. +- encrypted = bool(self._device.parents) and all("luks" in d.type for d in self._device.parents) +- raid = len(self._device.parents) == 1 and hasattr(self._device.parents[0].raw_device, 'level') ++ encrypted = bool(self._device.parents) and all( ++ "luks" in d.type for d in self._device.parents ++ ) ++ raid = len(self._device.parents) == 1 and hasattr( ++ self._device.parents[0].raw_device, "level" ++ ) + log.debug("BlivetPool._update_from_device: %s", self._device) + +- if param_name == 'disks': +- self._pool['disks'] = [d.name for d in self._device.disks] +- elif param_name == 'encryption': +- self._pool['encryption'] = encrypted +- elif param_name == 'encryption_key_size' and encrypted: +- self._pool['encryption_key_size'] = self._device.parents[0].parents[0].format.key_size +- elif param_name == 'encryption_key_file' and encrypted: +- self._pool['encryption_key_file'] = self._device.parents[0].parents[0].format.key_file +- elif param_name == 'encryption_cipher' and encrypted: +- self._pool['encryption_cipher'] = self._device.parents[0].parents[0].format.cipher +- elif param_name == 'encryption_luks_version' and encrypted: +- self._pool['encryption_luks_version'] = self._device.parents[0].parents[0].format.luks_version +- elif param_name == 'raid_level' and raid: +- self._pool['raid_level'] = self._device.parents[0].raw_device.level.name +- elif param_name == 'raid_chunk_size' and raid: +- self._pool['raid_chunk_size'] = str(self._device.parents[0].raw_device.chunk_size) +- elif param_name == 'raid_device_count' and raid: +- self._pool['raid_device_count'] = self._device.parents[0].raw_device.member_devices +- elif param_name == 'raid_spare_count' and raid: +- self._pool['raid_spare_count'] = self._device.parents[0].raw_device.spares +- elif param_name == 'raid_metadata_version' and raid: +- self._pool['raid_metadata_version'] = self._device.parents[0].raw_device.metadata_version ++ if param_name == "disks": ++ self._pool["disks"] = [d.name for d in self._device.disks] ++ elif param_name == "encryption": ++ self._pool["encryption"] = encrypted ++ elif param_name == "encryption_key_size" and encrypted: ++ self._pool["encryption_key_size"] = ( ++ self._device.parents[0].parents[0].format.key_size ++ ) ++ elif param_name == "encryption_key_file" and encrypted: ++ self._pool["encryption_key_file"] = ( ++ self._device.parents[0].parents[0].format.key_file ++ ) ++ elif param_name == "encryption_cipher" and encrypted: ++ self._pool["encryption_cipher"] = ( ++ self._device.parents[0].parents[0].format.cipher ++ ) ++ elif param_name == "encryption_luks_version" and encrypted: ++ self._pool["encryption_luks_version"] = ( ++ self._device.parents[0].parents[0].format.luks_version ++ ) ++ elif param_name == "raid_level" and raid: ++ self._pool["raid_level"] = self._device.parents[0].raw_device.level.name ++ elif param_name == "raid_chunk_size" and raid: ++ self._pool["raid_chunk_size"] = str( ++ self._device.parents[0].raw_device.chunk_size ++ ) ++ elif param_name == "raid_device_count" and raid: ++ self._pool["raid_device_count"] = self._device.parents[ ++ 0 ++ ].raw_device.member_devices ++ elif param_name == "raid_spare_count" and raid: ++ self._pool["raid_spare_count"] = self._device.parents[0].raw_device.spares ++ elif param_name == "raid_metadata_version" and raid: ++ self._pool["raid_metadata_version"] = self._device.parents[ ++ 0 ++ ].raw_device.metadata_version + else: + return False + + return True + +- + def _apply_defaults(self): + global pool_defaults + for name, default in pool_defaults.items(): + if name in self._pool: + continue + +- default = None if default in ('none', 'None', 'null') else default ++ default = None if default in ("none", "None", "null") else default + + if self._device: + if not self._update_from_device(name): +@@ -948,14 +1111,19 @@ class BlivetPool(BlivetBase): + for disk in self._disks: + if not disk.isleaf or disk.format.type is not None: + if safe_mode: +- raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (disk.name, self._pool['name'])) ++ raise BlivetAnsibleError( ++ "cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" ++ % (disk.name, self._pool["name"]) ++ ) + else: + self._blivet.devicetree.recursive_remove(disk) + + if use_partitions: + label = get_format("disklabel", device=disk.path) + self._blivet.format_device(disk, label) +- member = self._blivet.new_partition(parents=[disk], size=Size("256MiB"), grow=True) ++ member = self._blivet.new_partition( ++ parents=[disk], size=Size("256MiB"), grow=True ++ ) + self._blivet.create_device(member) + else: + member = disk +@@ -966,9 +1134,8 @@ class BlivetPool(BlivetBase): + self._blivet.format_device(member, self._get_format()) + members.append(member) + +- + if self._is_raid: +- raid_name = "%s-1" % self._pool['name'] ++ raid_name = "%s-1" % self._pool["name"] + + raid_array = self._new_mdarray(members, raid_name=raid_name) + +@@ -981,14 +1148,15 @@ class BlivetPool(BlivetBase): + try: + do_partitioning(self._blivet) + except Exception: +- raise BlivetAnsibleError("failed to allocate partitions for pool '%s'" % self._pool['name']) ++ raise BlivetAnsibleError( ++ "failed to allocate partitions for pool '%s'" % self._pool["name"] ++ ) + + return result + +- + def _get_volumes(self): + """ Set up BlivetVolume instances for this pool's volumes. """ +- for volume in self._pool.get('volumes', []): ++ for volume in self._pool.get("volumes", []): + bvolume = _get_blivet_volume(self._blivet, volume, self) + self._blivet_volumes.append(bvolume) + +@@ -1013,7 +1181,10 @@ class BlivetPool(BlivetBase): + return + elif self._member_management_is_destructive(): + if safe_mode: +- raise BlivetAnsibleError("cannot remove and recreate existing pool '%s' in safe mode" % self._pool['name']) ++ raise BlivetAnsibleError( ++ "cannot remove and recreate existing pool '%s' in safe mode" ++ % self._pool["name"] ++ ) + else: + self._destroy() + +@@ -1031,15 +1202,22 @@ class BlivetPartitionPool(BlivetPool): + self._device = self._disks[0] + + def _create(self): +- if self._device.format.type != "disklabel" or \ +- (disklabel_type and self._device.format.label_type != disklabel_type): ++ if self._device.format.type != "disklabel" or ( ++ disklabel_type and self._device.format.label_type != disklabel_type ++ ): + if safe_mode: +- raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' " +- "(pool '%s') in safe mode" % (self._device.name, self._pool['name'])) ++ raise BlivetAnsibleError( ++ "cannot remove existing formatting and/or devices on disk '%s' " ++ "(pool '%s') in safe mode" % (self._device.name, self._pool["name"]) ++ ) + else: +- self._blivet.devicetree.recursive_remove(self._device, remove_device=False) ++ self._blivet.devicetree.recursive_remove( ++ self._device, remove_device=False ++ ) + +- label = get_format("disklabel", device=self._device.path, label_type=disklabel_type) ++ label = get_format( ++ "disklabel", device=self._device.path, label_type=disklabel_type ++ ) + self._blivet.format_device(self._device, label) + + +@@ -1053,9 +1231,13 @@ class BlivetLVMPool(BlivetPool): + if self._device is None: + return False + +- if self._pool['encryption'] and not all(m.encrypted for m in self._device.parents): ++ if self._pool["encryption"] and not all( ++ m.encrypted for m in self._device.parents ++ ): + return True +- elif not self._pool['encryption'] and any(m.encrypted for m in self._device.parents): ++ elif not self._pool["encryption"] and any( ++ m.encrypted for m in self._device.parents ++ ): + return True + + return False +@@ -1080,49 +1262,50 @@ class BlivetLVMPool(BlivetPool): + + members = self._manage_encryption(self._create_members()) + try: +- pool_device = self._blivet.new_vg(name=self._pool['name'], parents=members) ++ pool_device = self._blivet.new_vg(name=self._pool["name"], parents=members) + except Exception as e: +- raise BlivetAnsibleError("failed to set up pool '%s': %s" % (self._pool['name'], str(e))) ++ raise BlivetAnsibleError( ++ "failed to set up pool '%s': %s" % (self._pool["name"], str(e)) ++ ) + + self._blivet.create_device(pool_device) + self._device = pool_device + + +-_BLIVET_POOL_TYPES = { +- "partition": BlivetPartitionPool, +- "lvm": BlivetLVMPool +-} ++_BLIVET_POOL_TYPES = {"partition": BlivetPartitionPool, "lvm": BlivetLVMPool} + + + def _get_blivet_pool(blivet_obj, pool): + """ Return an appropriate BlivetPool instance for the pool dict. """ +- if 'type' not in pool: ++ if "type" not in pool: + global pool_defaults +- pool['type'] = pool_defaults['type'] ++ pool["type"] = pool_defaults["type"] + +- if pool['type'] not in _BLIVET_POOL_TYPES: +- raise BlivetAnsibleError("Pool '%s' has unknown type '%s'" % (pool['name'], pool['type'])) ++ if pool["type"] not in _BLIVET_POOL_TYPES: ++ raise BlivetAnsibleError( ++ "Pool '%s' has unknown type '%s'" % (pool["name"], pool["type"]) ++ ) + +- return _BLIVET_POOL_TYPES[pool['type']](blivet_obj, pool) ++ return _BLIVET_POOL_TYPES[pool["type"]](blivet_obj, pool) + + + def manage_volume(b, volume): + """ Schedule actions as needed to manage a single standalone volume. """ + bvolume = _get_blivet_volume(b, volume) + bvolume.manage() +- volume['_device'] = bvolume._volume.get('_device', '') +- volume['_raw_device'] = bvolume._volume.get('_raw_device', '') +- volume['_mount_id'] = bvolume._volume.get('_mount_id', '') ++ volume["_device"] = bvolume._volume.get("_device", "") ++ volume["_raw_device"] = bvolume._volume.get("_raw_device", "") ++ volume["_mount_id"] = bvolume._volume.get("_mount_id", "") + + + def manage_pool(b, pool): + """ Schedule actions as needed to manage a single pool and its volumes. """ + bpool = _get_blivet_pool(b, pool) + bpool.manage() +- for (volume, bvolume) in zip(pool['volumes'], bpool._blivet_volumes): +- volume['_device'] = bvolume._volume.get('_device', '') +- volume['_raw_device'] = bvolume._volume.get('_raw_device', '') +- volume['_mount_id'] = bvolume._volume.get('_mount_id', '') ++ for (volume, bvolume) in zip(pool["volumes"], bpool._blivet_volumes): ++ volume["_device"] = bvolume._volume.get("_device", "") ++ volume["_raw_device"] = bvolume._volume.get("_raw_device", "") ++ volume["_mount_id"] = bvolume._volume.get("_mount_id", "") + + + class FSTab(object): +@@ -1141,7 +1324,7 @@ class FSTab(object): + if self._entries: + self.reset() + +- for line in open('/etc/fstab').readlines(): ++ for line in open("/etc/fstab").readlines(): + if line.lstrip().startswith("#"): + continue + +@@ -1150,23 +1333,27 @@ class FSTab(object): + continue + + device = self._blivet.devicetree.resolve_device(fields[0]) +- self._entries.append(dict(device_id=fields[0], +- device_path=getattr(device, 'path', None), +- fs_type=fields[2], +- mount_point=fields[1], +- mount_options=fields[3])) ++ self._entries.append( ++ dict( ++ device_id=fields[0], ++ device_path=getattr(device, "path", None), ++ fs_type=fields[2], ++ mount_point=fields[1], ++ mount_options=fields[3], ++ ) ++ ) + + + def get_mount_info(pools, volumes, actions, fstab): +- """ Return a list of argument dicts to pass to the mount module to manage mounts. ++ """Return a list of argument dicts to pass to the mount module to manage mounts. + +- The overall approach is to remove existing mounts associated with file systems +- we are removing and those with changed mount points, re-adding them with the +- new mount point later. ++ The overall approach is to remove existing mounts associated with file systems ++ we are removing and those with changed mount points, re-adding them with the ++ new mount point later. + +- Removed mounts go directly into the mount_info list, which is the return value, +- while added/active mounts to a list that gets appended to the mount_info list +- at the end to ensure that removals happen first. ++ Removed mounts go directly into the mount_info list, which is the return value, ++ while added/active mounts to a list that gets appended to the mount_info list ++ at the end to ensure that removals happen first. + """ + mount_info = list() + mount_vols = list() +@@ -1174,33 +1361,50 @@ def get_mount_info(pools, volumes, actions, fstab): + # account for mounts removed by removing or reformatting volumes + if actions: + for action in actions: +- if action.is_destroy and action.is_format and action.format.type is not None: +- mount = fstab.lookup('device_path', action.device.path) ++ if ( ++ action.is_destroy ++ and action.is_format ++ and action.format.type is not None ++ ): ++ mount = fstab.lookup("device_path", action.device.path) + if mount is not None: +- mount_info.append({"src": mount['device_id'], "path": mount['mount_point'], +- 'state': 'absent', 'fstype': mount['fs_type']}) ++ mount_info.append( ++ { ++ "src": mount["device_id"], ++ "path": mount["mount_point"], ++ "state": "absent", ++ "fstype": mount["fs_type"], ++ } ++ ) + + def handle_new_mount(volume, fstab): + replace = None + mounted = False + +- mount = fstab.lookup('device_path', volume['_device']) +- if (volume['mount_point'] and volume['mount_point'].startswith('/')) \ +- or volume['fs_type'] == 'swap': ++ mount = fstab.lookup("device_path", volume["_device"]) ++ if (volume["mount_point"] and volume["mount_point"].startswith("/")) or volume[ ++ "fs_type" ++ ] == "swap": + mounted = True + + # handle removal of existing mounts of this volume +- if mount and mount['fs_type'] != 'swap' and mount['mount_point'] != volume['mount_point']: +- replace = dict(path=mount['mount_point'], state="absent") +- elif mount and mount['fs_type'] == 'swap': +- replace = dict(src=mount['device_id'], fstype="swap", path="none", state="absent") ++ if ( ++ mount ++ and mount["fs_type"] != "swap" ++ and mount["mount_point"] != volume["mount_point"] ++ ): ++ replace = dict(path=mount["mount_point"], state="absent") ++ elif mount and mount["fs_type"] == "swap": ++ replace = dict( ++ src=mount["device_id"], fstype="swap", path="none", state="absent" ++ ) + + return mounted, replace + + # account for mounts that we set up or are replacing in pools + for pool in pools: +- for volume in pool['volumes']: +- if pool['state'] == 'present' and volume['state'] == 'present': ++ for volume in pool["volumes"]: ++ if pool["state"] == "present" and volume["state"] == "present": + mounted, replace = handle_new_mount(volume, fstab) + if replace: + mount_info.append(replace) +@@ -1209,7 +1413,7 @@ def get_mount_info(pools, volumes, actions, fstab): + + # account for mounts that we set up or are replacing in standalone volumes + for volume in volumes: +- if volume['state'] == 'present': ++ if volume["state"] == "present": + mounted, replace = handle_new_mount(volume, fstab) + if replace: + mount_info.append(replace) +@@ -1217,13 +1421,19 @@ def get_mount_info(pools, volumes, actions, fstab): + mount_vols.append(volume) + + for volume in mount_vols: +- mount_info.append({'src': volume['_mount_id'], +- 'path': volume['mount_point'] if volume['fs_type'] != "swap" else "none", +- 'fstype': volume['fs_type'], +- 'opts': volume['mount_options'], +- 'dump': volume['mount_check'], +- 'passno': volume['mount_passno'], +- 'state': 'mounted' if volume['fs_type'] != "swap" else "present"}) ++ mount_info.append( ++ { ++ "src": volume["_mount_id"], ++ "path": volume["mount_point"] ++ if volume["fs_type"] != "swap" ++ else "none", ++ "fstype": volume["fs_type"], ++ "opts": volume["mount_options"], ++ "dump": volume["mount_check"], ++ "passno": volume["mount_passno"], ++ "state": "mounted" if volume["fs_type"] != "swap" else "present", ++ } ++ ) + + return mount_info + +@@ -1231,15 +1441,19 @@ def get_mount_info(pools, volumes, actions, fstab): + def get_crypt_info(actions): + info = list() + for action in actions: +- if not (action.is_format and action.format.type == 'luks'): ++ if not (action.is_format and action.format.type == "luks"): + continue + +- info.append(dict(backing_device=action.device.path, +- name=action.format.map_name, +- password=action.format.key_file or '-', +- state='present' if action.is_create else 'absent')) ++ info.append( ++ dict( ++ backing_device=action.device.path, ++ name=action.format.map_name, ++ password=action.format.key_file or "-", ++ state="present" if action.is_create else "absent", ++ ) ++ ) + +- return sorted(info, key=lambda e: e['state']) ++ return sorted(info, key=lambda e: e["state"]) + + + def get_required_packages(b, pools, volumes): +@@ -1259,66 +1473,70 @@ def get_required_packages(b, pools, volumes): + + + def update_fstab_identifiers(b, pools, volumes): +- """ Update fstab device identifiers. ++ """Update fstab device identifiers. + +- This is to pick up new UUIDs for newly-formatted devices. ++ This is to pick up new UUIDs for newly-formatted devices. + """ + all_volumes = volumes[:] + for pool in pools: +- if not pool['state'] == 'present': ++ if not pool["state"] == "present": + continue + +- all_volumes += pool['volumes'] ++ all_volumes += pool["volumes"] + + for volume in all_volumes: +- if volume['state'] == 'present': +- device = b.devicetree.resolve_device(volume['_mount_id']) +- if device is None and volume['encryption']: +- device = b.devicetree.resolve_device(volume['_raw_device']) ++ if volume["state"] == "present": ++ device = b.devicetree.resolve_device(volume["_mount_id"]) ++ if device is None and volume["encryption"]: ++ device = b.devicetree.resolve_device(volume["_raw_device"]) + if device is not None and not device.isleaf: + device = device.children[0] +- volume['_device'] = device.path ++ volume["_device"] = device.path + + if device is None: +- raise BlivetAnsibleError("failed to look up device for volume %s (%s/%s)" % (volume['name'], volume['_device'], volume['_mount_id'])) +- volume['_mount_id'] = device.fstab_spec +- if device.format.type == 'swap': ++ raise BlivetAnsibleError( ++ "failed to look up device for volume %s (%s/%s)" ++ % (volume["name"], volume["_device"], volume["_mount_id"]) ++ ) ++ volume["_mount_id"] = device.fstab_spec ++ if device.format.type == "swap": + device.format.setup() + + if device.status: +- volume['_kernel_device'] = os.path.realpath(device.path) ++ volume["_kernel_device"] = os.path.realpath(device.path) + if device.raw_device.status: +- volume['_raw_kernel_device'] = os.path.realpath(device.raw_device.path) ++ volume["_raw_kernel_device"] = os.path.realpath(device.raw_device.path) + + + def activate_swaps(b, pools, volumes): + """ Activate all swaps specified as present. """ + all_volumes = volumes[:] + for pool in pools: +- if not pool['state'] == 'present': ++ if not pool["state"] == "present": + continue + +- all_volumes += pool['volumes'] ++ all_volumes += pool["volumes"] + + for volume in all_volumes: +- if volume['state'] == 'present': +- device = b.devicetree.resolve_device(volume['_mount_id']) +- if device.format.type == 'swap': ++ if volume["state"] == "present": ++ device = b.devicetree.resolve_device(volume["_mount_id"]) ++ if device.format.type == "swap": + device.format.setup() + + + def run_module(): + # available arguments/parameters that a user can pass + module_args = dict( +- pools=dict(type='list'), +- volumes=dict(type='list'), +- packages_only=dict(type='bool', required=False, default=False), +- disklabel_type=dict(type='str', required=False, default=None), +- safe_mode=dict(type='bool', required=False, default=True), +- pool_defaults=dict(type='dict', required=False), +- volume_defaults=dict(type='dict', required=False), +- use_partitions=dict(type='bool', required=False, default=True), +- diskvolume_mkfs_option_map=dict(type='dict', required=False, default={})) ++ pools=dict(type="list"), ++ volumes=dict(type="list"), ++ packages_only=dict(type="bool", required=False, default=False), ++ disklabel_type=dict(type="str", required=False, default=None), ++ safe_mode=dict(type="bool", required=False, default=True), ++ pool_defaults=dict(type="dict", required=False), ++ volume_defaults=dict(type="dict", required=False), ++ use_partitions=dict(type="bool", required=False, default=True), ++ diskvolume_mkfs_option_map=dict(type="dict", required=False, default={}), ++ ) + + # seed the result dict in the object + result = dict( +@@ -1332,47 +1550,52 @@ def run_module(): + packages=list(), + ) + +- module = AnsibleModule(argument_spec=module_args, +- supports_check_mode=True) ++ module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + if not BLIVET_PACKAGE: +- module.fail_json(msg="Failed to import the blivet or blivet3 Python modules", +- exception=inspect.cleandoc(""" ++ module.fail_json( ++ msg="Failed to import the blivet or blivet3 Python modules", ++ exception=inspect.cleandoc( ++ """ + blivet3 exception: + {} + blivet exception: +- {}""").format(LIB_IMP_ERR3, LIB_IMP_ERR)) ++ {}""" ++ ).format(LIB_IMP_ERR3, LIB_IMP_ERR), ++ ) + +- if not module.params['pools'] and not module.params['volumes']: ++ if not module.params["pools"] and not module.params["volumes"]: + module.exit_json(**result) + + global disklabel_type +- disklabel_type = module.params['disklabel_type'] ++ disklabel_type = module.params["disklabel_type"] + + global use_partitions +- use_partitions = module.params['use_partitions'] ++ use_partitions = module.params["use_partitions"] + + global safe_mode +- safe_mode = module.params['safe_mode'] ++ safe_mode = module.params["safe_mode"] + + global diskvolume_mkfs_option_map +- diskvolume_mkfs_option_map = module.params['diskvolume_mkfs_option_map'] ++ diskvolume_mkfs_option_map = module.params["diskvolume_mkfs_option_map"] + + global pool_defaults +- if 'pool_defaults' in module.params: +- pool_defaults = module.params['pool_defaults'] ++ if "pool_defaults" in module.params: ++ pool_defaults = module.params["pool_defaults"] + + global volume_defaults +- if 'volume_defaults' in module.params: +- volume_defaults = module.params['volume_defaults'] ++ if "volume_defaults" in module.params: ++ volume_defaults = module.params["volume_defaults"] + + b = Blivet() + b.reset() + fstab = FSTab(b) + actions = list() + +- if module.params['packages_only']: ++ if module.params["packages_only"]: + try: +- result['packages'] = get_required_packages(b, module.params['pools'], module.params['volumes']) ++ result["packages"] = get_required_packages( ++ b, module.params["pools"], module.params["volumes"] ++ ) + except BlivetAnsibleError as e: + module.fail_json(msg=str(e), **result) + module.exit_json(**result) +@@ -1388,44 +1611,56 @@ def run_module(): + sys_path = action.device.path + if os.path.islink(sys_path): + sys_path = os.readlink(action.device.path) +- trigger(action='change', subsystem='block', name=os.path.basename(sys_path)) ++ trigger(action="change", subsystem="block", name=os.path.basename(sys_path)) + + def action_dict(action): +- return dict(action=action.type_desc_str, +- fs_type=action.format.type if action.is_format else None, +- device=action.device.path) ++ return dict( ++ action=action.type_desc_str, ++ fs_type=action.format.type if action.is_format else None, ++ device=action.device.path, ++ ) + +- duplicates = find_duplicate_names(module.params['pools']) ++ duplicates = find_duplicate_names(module.params["pools"]) + if duplicates: +- module.fail_json(msg="multiple pools with the same name: {0}".format(",".join(duplicates)), +- **result) +- for pool in module.params['pools']: +- duplicates = find_duplicate_names(pool.get('volumes', list())) ++ module.fail_json( ++ msg="multiple pools with the same name: {0}".format(",".join(duplicates)), ++ **result ++ ) ++ for pool in module.params["pools"]: ++ duplicates = find_duplicate_names(pool.get("volumes", list())) + if duplicates: +- module.fail_json(msg="multiple volumes in pool '{0}' with the " +- "same name: {1}".format(pool['name'], ",".join(duplicates)), +- **result) ++ module.fail_json( ++ msg="multiple volumes in pool '{0}' with the " ++ "same name: {1}".format(pool["name"], ",".join(duplicates)), ++ **result ++ ) + try: + manage_pool(b, pool) + except BlivetAnsibleError as e: + module.fail_json(msg=str(e), **result) + +- duplicates = find_duplicate_names(module.params['volumes']) ++ duplicates = find_duplicate_names(module.params["volumes"]) + if duplicates: +- module.fail_json(msg="multiple volumes with the same name: {0}".format(",".join(duplicates)), +- **result) +- for volume in module.params['volumes']: ++ module.fail_json( ++ msg="multiple volumes with the same name: {0}".format(",".join(duplicates)), ++ **result ++ ) ++ for volume in module.params["volumes"]: + try: + manage_volume(b, volume) + except BlivetAnsibleError as e: + module.fail_json(msg=str(e), **result) + + scheduled = b.devicetree.actions.find() +- result['packages'] = b.packages[:] ++ result["packages"] = b.packages[:] + + for action in scheduled: +- if (action.is_destroy or action.is_resize) and action.is_format and action.format.exists and \ +- (action.format.mountable or action.format.type == "swap"): ++ if ( ++ (action.is_destroy or action.is_resize) ++ and action.is_format ++ and action.format.exists ++ and (action.format.mountable or action.format.type == "swap") ++ ): + action.format.teardown() + + if scheduled: +@@ -1433,21 +1668,27 @@ def run_module(): + callbacks.action_executed.add(record_action) + callbacks.action_executed.add(ensure_udev_update) + try: +- b.devicetree.actions.process(devices=b.devicetree.devices, dry_run=module.check_mode) ++ b.devicetree.actions.process( ++ devices=b.devicetree.devices, dry_run=module.check_mode ++ ) + except Exception as e: +- module.fail_json(msg="Failed to commit changes to disk: %s" % str(e), **result) ++ module.fail_json( ++ msg="Failed to commit changes to disk: %s" % str(e), **result ++ ) + finally: +- result['changed'] = True +- result['actions'] = [action_dict(a) for a in actions] ++ result["changed"] = True ++ result["actions"] = [action_dict(a) for a in actions] + +- update_fstab_identifiers(b, module.params['pools'], module.params['volumes']) +- activate_swaps(b, module.params['pools'], module.params['volumes']) ++ update_fstab_identifiers(b, module.params["pools"], module.params["volumes"]) ++ activate_swaps(b, module.params["pools"], module.params["volumes"]) + +- result['mounts'] = get_mount_info(module.params['pools'], module.params['volumes'], actions, fstab) +- result['crypts'] = get_crypt_info(actions) +- result['leaves'] = [d.path for d in b.devicetree.leaves] +- result['pools'] = module.params['pools'] +- result['volumes'] = module.params['volumes'] ++ result["mounts"] = get_mount_info( ++ module.params["pools"], module.params["volumes"], actions, fstab ++ ) ++ result["crypts"] = get_crypt_info(actions) ++ result["leaves"] = [d.path for d in b.devicetree.leaves] ++ result["pools"] = module.params["pools"] ++ result["volumes"] = module.params["volumes"] + + # success - return result + module.exit_json(**result) +@@ -1456,5 +1697,6 @@ def run_module(): + def main(): + run_module() + +-if __name__ == '__main__': ++ ++if __name__ == "__main__": + main() +diff --git a/library/blockdev_info.py b/library/blockdev_info.py +index 52ddd78..ca1577f 100644 +--- a/library/blockdev_info.py ++++ b/library/blockdev_info.py +@@ -1,35 +1,41 @@ + #!/usr/bin/python + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + ANSIBLE_METADATA = { +- 'metadata_version': '1.1', +- 'status': ['preview'], +- 'supported_by': 'community' ++ "metadata_version": "1.1", ++ "status": ["preview"], ++ "supported_by": "community", + } + +-DOCUMENTATION = ''' ++DOCUMENTATION = """ + --- + module: blockdev_info + short_description: Collect info about block devices in the system. + version_added: "2.5" + description: ++ - "WARNING: Do not use this module directly! It is only for role internal use." + - "This module collects information about block devices" +-options: ++options: {} + author: +- - David Lehman (dlehman@redhat.com) +-''' ++ - David Lehman (@dwlehman) ++""" + +-EXAMPLES = ''' ++EXAMPLES = """ + - name: Get info about block devices + blockdev_info: + register: blk_info + +-''' ++""" + +-RETURN = ''' ++RETURN = """ + info: + description: dict w/ device path keys and device info dict values ++ returned: success + type: dict +-''' ++""" + + import os + import shlex +@@ -38,7 +44,7 @@ from ansible.module_utils.basic import AnsibleModule + + + LSBLK_DEVICE_TYPES = {"part": "partition"} +-DEV_MD_DIR = '/dev/md' ++DEV_MD_DIR = "/dev/md" + + + def fixup_md_path(path): +@@ -59,7 +65,9 @@ def fixup_md_path(path): + + + def get_block_info(run_cmd): +- buf = run_cmd(["lsblk", "-o", "NAME,FSTYPE,LABEL,UUID,TYPE,SIZE", "-p", "-P", "-a"])[1] ++ buf = run_cmd( ++ ["lsblk", "-o", "NAME,FSTYPE,LABEL,UUID,TYPE,SIZE", "-p", "-P", "-a"] ++ )[1] + info = dict() + for line in buf.splitlines(): + dev = dict() +@@ -75,7 +83,7 @@ def get_block_info(run_cmd): + + dev[key.lower()] = LSBLK_DEVICE_TYPES.get(value, value) + if dev: +- info[dev['name']] = dev ++ info[dev["name"]] = dev + + return info + +@@ -87,13 +95,10 @@ def run_module(): + info=None, + ) + +- module = AnsibleModule( +- argument_spec=module_args, +- supports_check_mode=True +- ) ++ module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + + try: +- result['info'] = get_block_info(module.run_command) ++ result["info"] = get_block_info(module.run_command) + except Exception: + module.fail_json(msg="Failed to collect block device info.") + +@@ -104,5 +109,5 @@ def main(): + run_module() + + +-if __name__ == '__main__': ++if __name__ == "__main__": + main() +diff --git a/library/bsize.py b/library/bsize.py +index 40442f5..524b0f9 100644 +--- a/library/bsize.py ++++ b/library/bsize.py +@@ -1,12 +1,16 @@ + #!/usr/bin/python + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + ANSIBLE_METADATA = { +- 'metadata_version': '1.1', +- 'status': ['preview'], +- 'supported_by': 'community' ++ "metadata_version": "1.1", ++ "status": ["preview"], ++ "supported_by": "community", + } + +-DOCUMENTATION = ''' ++DOCUMENTATION = """ + --- + module: bsize + +@@ -15,6 +19,7 @@ short_description: Module for basic manipulation with byte sizes + version_added: "2.5" + + description: ++ - "WARNING: Do not use this module directly! It is only for role internal use." + - "Module accepts byte size strings with the units and produces strings in + form of input accepted by different storage tools" + +@@ -23,67 +28,72 @@ options: + description: + - String containing number and byte units + required: true ++ type: str + + author: +- - Jan Pokorny (japokorn@redhat.com) +-''' ++ - Jan Pokorny (@japokorn) ++""" + +-EXAMPLES = ''' ++EXAMPLES = """ + # Obtain sizes in format for various tools + - name: Get 10 KiB size + bsize: + size: 10 KiB +-''' ++""" + +-RETURN = ''' ++RETURN = """ + size: + description: Size in binary format units + type: str ++ returned: success + bytes: + description: Size in bytes + type: int ++ returned: success + lvm: + description: Size in binary format. No space after the number, + first letter of unit prefix in lowercase only + type: str ++ returned: success + parted: + description: Size in binary format. No space after the number + type: str +-''' ++ returned: success ++""" + + from ansible.module_utils.basic import AnsibleModule + from ansible.module_utils.storage_lsr.size import Size + ++ + def run_module(): + # available arguments/parameters that a user can pass + module_args = dict( +- size=dict(type='str', required=True), ++ size=dict(type="str", required=True), + ) + + # seed the result dict in the object +- result = dict( +- changed=False +- ) ++ result = dict(changed=False) + +- module = AnsibleModule(argument_spec=module_args, +- supports_check_mode=True) ++ module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + +- size = Size(module.params['size']) ++ size = Size(module.params["size"]) + +- result['size'] = size.get(fmt="%d %sb") +- result['bytes'] = size.bytes +- result['lvm'] = size.get(fmt="%d%sb").lower()[:-2] +- result['parted'] = size.get(fmt="%d%sb") ++ result["size"] = size.get(fmt="%d %sb") ++ result["bytes"] = size.bytes ++ result["lvm"] = size.get(fmt="%d%sb").lower()[:-2] ++ result["parted"] = size.get(fmt="%d%sb") + + # use whatever logic you need to determine whether or not this module + # made any modifications to your target +- result['changed'] = False ++ result["changed"] = False + + # success - return result + module.exit_json(**result) + ++ + def main(): + run_module() + +-if __name__ == '__main__': ++ ++if __name__ == "__main__": + main() +diff --git a/library/find_unused_disk.py b/library/find_unused_disk.py +index 0a6fc7d..c688170 100644 +--- a/library/find_unused_disk.py ++++ b/library/find_unused_disk.py +@@ -1,10 +1,15 @@ + #!/usr/bin/python + +-DOCUMENTATION = ''' ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ ++DOCUMENTATION = """ + --- + module: find_unused_disk + short_description: Gets unused disks + description: ++ - "WARNING: Do not use this module directly! It is only for role internal use." + - Disks are considered in ascending alphanumeric sorted order. + - Disks that meet all conditions are considered 'empty' and returned (using kernel device name) in a list. + - 1. No known signatures exist on the disk, with the exception of partition tables. +@@ -15,18 +20,18 @@ description: + - Number of returned disks defaults to first 10, but can be specified with 'max_return' argument. + author: Eda Zhou (@edamamez) + options: +- option-name: max_return +- description: Sets the maximum number of unused disks to return. +- default: 10 +- type: int +- +- option-name: min_size +- description: Specifies the minimum disk size to return an unused disk. +- default: 0 +- type: str +-''' +- +-EXAMPLES = ''' ++ max_return: ++ description: Sets the maximum number of unused disks to return. ++ default: 10 ++ type: int ++ ++ min_size: ++ description: Specifies the minimum disk size to return an unused disk. ++ default: 0 ++ type: str ++""" ++ ++EXAMPLES = """ + - name: test finding first unused device module + hosts: localhost + tasks: +@@ -38,9 +43,9 @@ EXAMPLES = ''' + - name: dump test output + debug: + msg: '{{ testout }}' +-''' ++""" + +-RETURN = ''' ++RETURN = """ + disk_name: + description: Information about unused disks + returned: On success +@@ -50,14 +55,15 @@ disk_name: + description: Unused disk(s) that have been found + returned: On success + type: list +- samples: ["sda1", "dm-0", "dm-3"] +- ["sda"] ++ samples: | ++ ["sda1", "dm-0", "dm-3"] ++ ["sda"] + none: + description: No unused disks were found + returned: On success + type: string + sample: "Unable to find unused disk" +-''' ++""" + + + import os +@@ -68,7 +74,7 @@ from ansible.module_utils.storage_lsr.size import Size + + + SYS_CLASS_BLOCK = "/sys/class/block/" +-IGNORED_DEVICES = [re.compile(r'^/dev/nullb\d+$')] ++IGNORED_DEVICES = [re.compile(r"^/dev/nullb\d+$")] + + + def is_ignored(disk_path): +@@ -78,13 +84,13 @@ def is_ignored(disk_path): + + def no_signature(run_command, disk_path): + """Return true if no known signatures exist on the disk.""" +- signatures = run_command(['blkid', '-p', disk_path]) +- return not 'UUID' in signatures[1] ++ signatures = run_command(["blkid", "-p", disk_path]) ++ return "UUID" not in signatures[1] + + + def no_holders(disk_path): + """Return true if the disk has no holders.""" +- holders = os.listdir(SYS_CLASS_BLOCK + get_sys_name(disk_path) + '/holders/') ++ holders = os.listdir(SYS_CLASS_BLOCK + get_sys_name(disk_path) + "/holders/") + return len(holders) == 0 + + +@@ -101,36 +107,45 @@ def get_sys_name(disk_path): + if not os.path.islink(disk_path): + return os.path.basename(disk_path) + +- node_dir = '/'.join(disk_path.split('/')[-1]) +- return os.path.normpath(node_dir + '/' + os.readlink(disk_path)) ++ node_dir = "/".join(disk_path.split("/")[-1]) ++ return os.path.normpath(node_dir + "/" + os.readlink(disk_path)) + + + def get_partitions(disk_path): + sys_name = get_sys_name(disk_path) + partitions = list() + for filename in os.listdir(SYS_CLASS_BLOCK + sys_name): +- if re.match(sys_name + r'p?\d+$', filename): ++ if re.match(sys_name + r"p?\d+$", filename): + partitions.append(filename) + + return partitions + + + def get_disks(run_command): +- buf = run_command(["lsblk", "-p", "--pairs", "--bytes", "-o", "NAME,TYPE,SIZE,FSTYPE"])[1] ++ buf = run_command( ++ ["lsblk", "-p", "--pairs", "--bytes", "-o", "NAME,TYPE,SIZE,FSTYPE"] ++ )[1] + disks = dict() + for line in buf.splitlines(): + if not line: + continue + +- m = re.search(r'NAME="(?P[^"]*)" TYPE="(?P[^"]*)" SIZE="(?P\d+)" FSTYPE="(?P[^"]*)"', line) ++ m = re.search( ++ r'NAME="(?P[^"]*)" TYPE="(?P[^"]*)" SIZE="(?P\d+)" FSTYPE="(?P[^"]*)"', ++ line, ++ ) + if m is None: + print(line) + continue + +- if m.group('type') != "disk": ++ if m.group("type") != "disk": + continue + +- disks[m.group('path')] = {"type": m.group('type'), "size": m.group('size'), "fstype": m.group('fstype')} ++ disks[m.group("path")] = { ++ "type": m.group("type"), ++ "size": m.group("size"), ++ "fstype": m.group("fstype"), ++ } + + return disks + +@@ -138,19 +153,13 @@ def get_disks(run_command): + def run_module(): + """Create the module""" + module_args = dict( +- max_return=dict(type='int', required=False, default=10), +- min_size=dict(type='str', required=False, default=0) ++ max_return=dict(type="int", required=False, default=10), ++ min_size=dict(type="str", required=False, default=0), + ) + +- result = dict( +- changed=False, +- disks=[] +- ) ++ result = dict(changed=False, disks=[]) + +- module = AnsibleModule( +- argument_spec=module_args, +- supports_check_mode=True +- ) ++ module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + + run_command = module.run_command + +@@ -161,7 +170,7 @@ def run_module(): + if attrs["fstype"]: + continue + +- if Size(attrs["size"]).bytes < Size(module.params['min_size']).bytes: ++ if Size(attrs["size"]).bytes < Size(module.params["min_size"]).bytes: + continue + + if get_partitions(path): +@@ -173,14 +182,14 @@ def run_module(): + if not can_open(path): + continue + +- result['disks'].append(os.path.basename(path)) +- if len(result['disks']) >= module.params['max_return']: ++ result["disks"].append(os.path.basename(path)) ++ if len(result["disks"]) >= module.params["max_return"]: + break + +- if not result['disks']: +- result['disks'] = "Unable to find unused disk" ++ if not result["disks"]: ++ result["disks"] = "Unable to find unused disk" + else: +- result['disks'].sort() ++ result["disks"].sort() + + module.exit_json(**result) + +@@ -190,5 +199,5 @@ def main(): + run_module() + + +-if __name__ == '__main__': ++if __name__ == "__main__": + main() +diff --git a/library/lvm_gensym.py b/library/lvm_gensym.py +index 49d1822..3e0f613 100644 +--- a/library/lvm_gensym.py ++++ b/library/lvm_gensym.py +@@ -1,66 +1,75 @@ + #!/usr/bin/python + """Generates unique, default names for a volume group and logical volume""" + +-from ansible.module_utils.basic import AnsibleModule +-from ansible.module_utils import facts ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type + + ANSIBLE_METADATA = { +- 'metadata_version': '1.1', +- 'status': ['preview'], +- 'supported_by': 'community' ++ "metadata_version": "1.1", ++ "status": ["preview"], ++ "supported_by": "community", + } + +-DOCUMENTATION = ''' ++DOCUMENTATION = """ + --- + module: lvm_gensym + short_description: Generate default names for lvm variables + version_added: "2.4" +-description: +- - "Module accepts two input strings consisting of a file system type and ++description: ++ - "WARNING: Do not use this module directly! It is only for role internal use." ++ - "Module accepts two input strings consisting of a file system type and + a mount point path, and outputs names based on system information" + options: + fs_type: + description: +- - String describing the desired file system type +- required: true ++ - String describing the desired file system type ++ required: true ++ type: str + mount: + description: +- - String describing the mount point path ++ - String describing the mount point path + required: true +-author: +- - Tim Flannagan (tflannag@redhat.com) +-''' ++ type: str ++author: ++ - Tim Flannagan (@timflannagan) ++""" + +-EXAMPLES = ''' +-- name: Generate names ++EXAMPLES = """ ++- name: Generate names + lvm_gensym: + fs_type: "{{ fs_type }}" + mount: "{{ mount_point }}" + register: lvm_results + when: lvm_vg == "" and mount_point != "" and fs_type != "" +-''' ++""" + +-RETURN = ''' ++RETURN = """ + vg_name: + description: The default generated name for an unspecified volume group + type: str +- ++ returned: success + lv_name: + description: The default generated name for an unspecified logical volume + type: str +-''' ++ returned: success ++""" ++ ++from ansible.module_utils.basic import AnsibleModule ++from ansible.module_utils import facts + + + def get_os_name(): + """Search the host file and return the name in the ID column""" +- for line in open('/etc/os-release').readlines(): +- if not line.find('ID='): ++ for line in open("/etc/os-release").readlines(): ++ if not line.find("ID="): + os_name = line[3:] + break + +- os_name = os_name.replace('\n', '').replace('"', '') ++ os_name = os_name.replace("\n", "").replace('"', "") + return os_name + ++ + def name_is_unique(name, used_names): + """Check if name is contained in the used_names list and return boolean value""" + if name not in used_names: +@@ -68,14 +77,15 @@ def name_is_unique(name, used_names): + + return False + ++ + def get_unique_name_from_base(base_name, used_names): + """Generate a unique name given a base name and a list of used names, and return that unique name""" + counter = 0 + while not name_is_unique(base_name, used_names): + if counter == 0: +- base_name = base_name + '_' + str(counter) ++ base_name = base_name + "_" + str(counter) + else: +- base_name = base_name[:-2] + '_' + str(counter) ++ base_name = base_name[:-2] + "_" + str(counter) + counter += 1 + + return base_name +@@ -83,8 +93,8 @@ def get_unique_name_from_base(base_name, used_names): + + def get_vg_name_base(host_name, os_name): + """Return a base name for a volume group based on the host and os names""" +- if host_name != None and len(host_name) != 0: +- vg_default = os_name + '_' + host_name ++ if host_name is not None and len(host_name) != 0: ++ vg_default = os_name + "_" + host_name + else: + vg_default = os_name + +@@ -93,65 +103,68 @@ def get_vg_name_base(host_name, os_name): + + def get_vg_name(host_name, lvm_facts): + """Generate a base volume group name, verify its uniqueness, and return that unique name""" +- used_vg_names = lvm_facts['vgs'].keys() ++ used_vg_names = lvm_facts["vgs"].keys() + os_name = get_os_name() + name = get_vg_name_base(host_name, os_name) + + return get_unique_name_from_base(name, used_vg_names) + ++ + def get_lv_name_base(fs_type, mount_point): + """Return a logical volume base name using given parameters""" +- if 'swap' in fs_type.lower(): +- lv_default = 'swap' +- elif mount_point.startswith('/'): +- if mount_point == '/': +- lv_default = 'root' ++ if "swap" in fs_type.lower(): ++ lv_default = "swap" ++ elif mount_point.startswith("/"): ++ if mount_point == "/": ++ lv_default = "root" + else: +- lv_default = mount_point[1:].replace('/', '_') ++ lv_default = mount_point[1:].replace("/", "_") + else: +- lv_default = 'lv' ++ lv_default = "lv" + + return lv_default + + + def get_lv_name(fs_type, mount_point, lvm_facts): + """Return a unique logical volume name based on specified file system type, mount point, and system facts""" +- used_lv_names = lvm_facts['lvs'].keys() ++ used_lv_names = lvm_facts["lvs"].keys() + name = get_lv_name_base(fs_type, mount_point) + + return get_unique_name_from_base(name, used_lv_names) + ++ + def run_module(): + """Setup and initialize all relevant ansible module data""" + module_args = dict( +- mount=dict(type='str', required=True), +- fs_type=dict(type='str', required=True) ++ mount=dict(type="str", required=True), fs_type=dict(type="str", required=True) + ) + +- result = dict( +- changed=False, +- vg_name='', +- lv_name='' +- ) ++ result = dict(changed=False, vg_name="", lv_name="") + +- module = AnsibleModule( +- argument_spec=module_args, +- supports_check_mode=True +- ) ++ module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + +- lvm_facts = facts.ansible_facts(module)['lvm'] +- host_name = facts.ansible_facts(module)['nodename'].lower().replace('.', '_').replace('-', '_') ++ lvm_facts = facts.ansible_facts(module)["lvm"] ++ host_name = ( ++ facts.ansible_facts(module)["nodename"] ++ .lower() ++ .replace(".", "_") ++ .replace("-", "_") ++ ) + +- result['lv_name'] = get_lv_name(module.params['fs_type'], module.params['mount'], lvm_facts) +- result['vg_name'] = get_vg_name(host_name, lvm_facts) ++ result["lv_name"] = get_lv_name( ++ module.params["fs_type"], module.params["mount"], lvm_facts ++ ) ++ result["vg_name"] = get_vg_name(host_name, lvm_facts) + +- if result['lv_name'] != '' and result['vg_name'] != '': ++ if result["lv_name"] != "" and result["vg_name"] != "": + module.exit_json(**result) + else: + module.fail_json(msg="Unable to initialize both group and volume names") + ++ + def main(): + run_module() + +-if __name__ == '__main__': ++ ++if __name__ == "__main__": + main() +diff --git a/library/resolve_blockdev.py b/library/resolve_blockdev.py +index 007bb28..df9dcb1 100644 +--- a/library/resolve_blockdev.py ++++ b/library/resolve_blockdev.py +@@ -1,17 +1,22 @@ + #!/usr/bin/python + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + ANSIBLE_METADATA = { +- 'metadata_version': '1.1', +- 'status': ['preview'], +- 'supported_by': 'community' ++ "metadata_version": "1.1", ++ "status": ["preview"], ++ "supported_by": "community", + } + +-DOCUMENTATION = ''' ++DOCUMENTATION = """ + --- + module: resolve_blockdev + short_description: Resolve block device specification to device node path. + version_added: "2.5" + description: ++ - "WARNING: Do not use this module directly! It is only for role internal use." + - "This module accepts various forms of block device identifiers and + resolves them to the correct block device node path." + options: +@@ -19,11 +24,12 @@ options: + description: + - String describing a block device + required: true ++ type: str + author: +- - David Lehman (dlehman@redhat.com) +-''' ++ - David Lehman (@dwlehman) ++""" + +-EXAMPLES = ''' ++EXAMPLES = """ + - name: Resolve device by label + resolve_blockdev: + spec: LABEL=MyData +@@ -35,13 +41,14 @@ EXAMPLES = ''' + - name: Resolve device by /dev/disk/by-id symlink name + resolve_blockdev: + spec: wwn-0x5000c5005bc37f3f +-''' ++""" + +-RETURN = ''' ++RETURN = """ + device: + description: Path to block device node + type: str +-''' ++ returned: success ++""" + + import glob + import os +@@ -52,37 +59,42 @@ from ansible.module_utils.basic import AnsibleModule + DEV_MD = "/dev/md" + DEV_MAPPER = "/dev/mapper" + SYS_CLASS_BLOCK = "/sys/class/block" +-SEARCH_DIRS = ['/dev', DEV_MAPPER, DEV_MD] + glob.glob("/dev/disk/by-*") +-MD_KERNEL_DEV = re.compile(r'/dev/md\d+(p\d+)?$') ++SEARCH_DIRS = ["/dev", DEV_MAPPER, DEV_MD] + glob.glob("/dev/disk/by-*") ++MD_KERNEL_DEV = re.compile(r"/dev/md\d+(p\d+)?$") + + + def resolve_blockdev(spec, run_cmd): + if "=" in spec: + device = run_cmd("blkid -t %s -o device" % spec)[1].strip() +- elif not spec.startswith('/'): ++ elif not spec.startswith("/"): + for devdir in SEARCH_DIRS: + device = "%s/%s" % (devdir, spec) + if os.path.exists(device): + break + else: +- device = '' ++ device = "" + else: + device = spec + + if not device or not os.path.exists(device): +- return '' ++ return "" + + return canonical_device(os.path.realpath(device)) + + + def _get_dm_name_from_kernel_dev(kdev): +- return open("%s/%s/dm/name" % (SYS_CLASS_BLOCK, os.path.basename(kdev))).read().strip() ++ return ( ++ open("%s/%s/dm/name" % (SYS_CLASS_BLOCK, os.path.basename(kdev))).read().strip() ++ ) + + + def _get_md_name_from_kernel_dev(kdev): + minor = os.minor(os.stat(kdev).st_rdev) +- return next(name for name in os.listdir(DEV_MD) +- if os.minor(os.stat("%s/%s" % (DEV_MD, name)).st_rdev) == minor) ++ return next( ++ name ++ for name in os.listdir(DEV_MD) ++ if os.minor(os.stat("%s/%s" % (DEV_MD, name)).st_rdev) == minor ++ ) + + + def canonical_device(device): +@@ -94,26 +106,27 @@ def canonical_device(device): + + + def run_module(): +- module_args = dict( +- spec=dict(type='str') +- ) ++ module_args = dict(spec=dict(type="str")) + + result = dict( + device=None, + ) + +- module = AnsibleModule( +- argument_spec=module_args, +- supports_check_mode=True +- ) ++ module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + + try: +- result['device'] = resolve_blockdev(module.params['spec'], run_cmd=module.run_command) ++ result["device"] = resolve_blockdev( ++ module.params["spec"], run_cmd=module.run_command ++ ) + except Exception: + pass + +- if not result['device'] or not os.path.exists(result['device']): +- module.fail_json(msg="The {} device spec could not be resolved".format(module.params['spec'])) ++ if not result["device"] or not os.path.exists(result["device"]): ++ module.fail_json( ++ msg="The {0} device spec could not be resolved".format( ++ module.params["spec"] ++ ) ++ ) + + module.exit_json(**result) + +@@ -122,5 +135,5 @@ def main(): + run_module() + + +-if __name__ == '__main__': ++if __name__ == "__main__": + main() +diff --git a/module_utils/storage_lsr/size.py b/module_utils/storage_lsr/size.py +index 16f3d7c..1e91faa 100644 +--- a/module_utils/storage_lsr/size.py ++++ b/module_utils/storage_lsr/size.py +@@ -1,4 +1,6 @@ +-#!/bin/python2 ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type + + import re + +@@ -7,15 +9,20 @@ BINARY_FACTOR = 2 ** 10 + + # index of the item in the list determines the exponent for size computation + # e.g. size_in_bytes = value * (DECIMAL_FACTOR ** (index(mega)+1)) = value * (1000 ** (1+1)) +-PREFIXES_DECIMAL = [["k", "M", "G", "T", "P", "E", "Z", "Y"], +- ["kilo", "mega", "giga", "tera", "peta", "exa", "zetta", "yotta"]] +-PREFIXES_BINARY = [["Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"], +- ["kibi", "mebi", "gibi", "tebi", "pebi", "exbi", "zebi", "yobi"]] ++# pylint: disable=bad-whitespace ++PREFIXES_DECIMAL = [ ++ ["k", "M", "G", "T", "P", "E", "Z", "Y"], # nopep8 ++ ["kilo", "mega", "giga", "tera", "peta", "exa", "zetta", "yotta"], ++] # nopep8 ++PREFIXES_BINARY = [ ++ ["Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"], # nopep8 ++ ["kibi", "mebi", "gibi", "tebi", "pebi", "exbi", "zebi", "yobi"], ++] # nopep8 + SUFFIXES = ["bytes", "byte", "B"] + ++ + class Size(object): +- ''' Class for basic manipulation of the sizes in *bytes +- ''' ++ """Class for basic manipulation of the sizes in *bytes""" + + def __init__(self, value): + raw_number, raw_units = self._parse_input(str(value)) +@@ -25,9 +32,9 @@ class Size(object): + self.units = raw_units + + def _parse_input(self, value): +- ''' splits input string into number and unit parts +- returns number part, unit part +- ''' ++ """splits input string into number and unit parts ++ returns number part, unit part ++ """ + m = re.search("^(.*?)([^0-9]*)$", value) + + raw_number = m.group(1).strip() +@@ -39,12 +46,12 @@ class Size(object): + return raw_number, raw_units + + def _parse_units(self, raw_units): +- ''' +- gets string containing size units and +- returns *_FACTOR (BINARY or DECIMAL) and the prefix position (not index!) +- in the PREFIXES_* list +- If no unit is specified defaults to BINARY and Bytes +- ''' ++ """ ++ gets string containing size units and ++ returns *_FACTOR (BINARY or DECIMAL) and the prefix position (not index!) ++ in the PREFIXES_* list ++ If no unit is specified defaults to BINARY and Bytes ++ """ + + prefix = raw_units + no_suffix_flag = True +@@ -54,7 +61,7 @@ class Size(object): + for suffix in SUFFIXES: + if raw_units.lower().endswith(suffix.lower()): + no_suffix_flag = False +- prefix = raw_units[:-len(suffix)] ++ prefix = raw_units[: -len(suffix)] + break + + if prefix == "": +@@ -87,18 +94,18 @@ class Size(object): + if idx < 0 or not valid_suffix: + raise ValueError("Unable to identify unit '%s'" % raw_units) + +- return used_factor, idx+1 ++ return used_factor, idx + 1 + + def _parse_number(self, raw_number): +- ''' parse input string containing number +- return float +- ''' ++ """parse input string containing number ++ return float ++ """ + return float(raw_number) + + def _get_unit(self, factor, exponent, unit_type=0): +- ''' based on decimal or binary factor and exponent +- obtain and return correct unit +- ''' ++ """based on decimal or binary factor and exponent ++ obtain and return correct unit ++ """ + + if unit_type == 0: + suffix = "B" +@@ -112,12 +119,11 @@ class Size(object): + prefix_lst = PREFIXES_DECIMAL[unit_type] + else: + prefix_lst = PREFIXES_BINARY[unit_type] +- return prefix_lst[exponent-1] + suffix ++ return prefix_lst[exponent - 1] + suffix + + @property + def bytes(self): +- ''' returns size value in bytes as int +- ''' ++ """returns size value in bytes as int""" + return int((self.factor ** self.exponent) * self.number) + + def _format(self, format_str, factor, exponent): +@@ -129,20 +135,20 @@ class Size(object): + return result + + def get(self, units="autobin", fmt="%0.1f %sb"): +- ''' returns size value as a string with given units and format ++ """returns size value as a string with given units and format + +- "units" parameter allows to select preferred unit: +- for example "KiB" or "megabytes" +- accepted values are also: +- "autobin" (default) - uses the highest human readable unit (binary) +- "autodec" - uses the highest human readable unit (decimal) ++ "units" parameter allows to select preferred unit: ++ for example "KiB" or "megabytes" ++ accepted values are also: ++ "autobin" (default) - uses the highest human readable unit (binary) ++ "autodec" - uses the highest human readable unit (decimal) + +- "fmt" parameter allows to specify the output format: +- %sb - will be replaced with the short byte size unit (e.g. MiB) +- %lb - will be replaced with the long byte size unit (e.g. kilobytes) +- value can be formatted using standard string replacements (e.g. %d, %f) ++ "fmt" parameter allows to specify the output format: ++ %sb - will be replaced with the short byte size unit (e.g. MiB) ++ %lb - will be replaced with the long byte size unit (e.g. kilobytes) ++ value can be formatted using standard string replacements (e.g. %d, %f) + +- ''' ++ """ + + ftr = BINARY_FACTOR + if units == "autodec": +@@ -155,6 +161,8 @@ class Size(object): + exp += 1 + else: + ftr, exp = self._parse_units(units.strip()) +- value = (float(self.factor ** self.exponent) / float(ftr ** exp)) * self.number ++ value = ( ++ float(self.factor ** self.exponent) / float(ftr ** exp) ++ ) * self.number + + return self._format(fmt, ftr, exp) % value +diff --git a/tests/setup_module_utils.sh b/tests/setup_module_utils.sh +deleted file mode 100755 +index 94d102d..0000000 +--- a/tests/setup_module_utils.sh ++++ /dev/null +@@ -1,41 +0,0 @@ +-#!/bin/bash +-# SPDX-License-Identifier: MIT +- +-set -euo pipefail +- +-if [ -n "${DEBUG:-}" ] ; then +- set -x +-fi +- +-if [ ! -d "${1:-}" ] ; then +- echo Either ansible is not installed, or there is no ansible/module_utils +- echo in "$1" - Skipping +- exit 0 +-fi +- +-if [ ! -d "${2:-}" ] ; then +- echo Role has no module_utils - Skipping +- exit 0 +-fi +- +-# we need absolute path for $2 +-absmoddir=$( readlink -f "$2" ) +- +-# clean up old links to module_utils +-for item in "$1"/* ; do +- if lnitem=$( readlink "$item" ) && test -n "$lnitem" ; then +- case "$lnitem" in +- *"${2}"*) rm -f "$item" ;; +- esac +- fi +-done +- +-# add new links to module_utils +-for item in "$absmoddir"/* ; do +- case "$item" in +- *__pycache__) continue;; +- *.pyc) continue;; +- esac +- bnitem=$( basename "$item" ) +- ln -s "$item" "$1/$bnitem" +-done +diff --git a/tests/test-verify-volume-device.yml b/tests/test-verify-volume-device.yml +index 3fb56a6..c7ba5ec 100644 +--- a/tests/test-verify-volume-device.yml ++++ b/tests/test-verify-volume-device.yml +@@ -23,11 +23,11 @@ + + - name: (1/2) Process volume type (set initial value) + set_fact: +- st_volume_type: "{{ storage_test_volume.type }}" ++ st_volume_type: "{{ storage_test_volume.type }}" + + - name: (2/2) Process volume type (get RAID value) + set_fact: +- st_volume_type: "{{ storage_test_volume.raid_level }}" ++ st_volume_type: "{{ storage_test_volume.raid_level }}" + when: storage_test_volume.type == "raid" + + - name: Verify the volume's device type +diff --git a/tests/test-verify-volume-md.yml b/tests/test-verify-volume-md.yml +index b21d8d2..27e8333 100644 +--- a/tests/test-verify-volume-md.yml ++++ b/tests/test-verify-volume-md.yml +@@ -9,7 +9,7 @@ + register: storage_test_mdadm + changed_when: false + +- # pre-chew regex search patterns ++ # pre-chew regex search patterns + - set_fact: + storage_test_md_active_devices_re: "{{ ('Active Devices : ' ~ storage_test_volume.raid_device_count ~ '\n')|regex_escape() }}" + when: storage_test_volume.raid_device_count is defined +diff --git a/tests/test.yml b/tests/test.yml +index 944b3cd..cb718a7 100644 +--- a/tests/test.yml ++++ b/tests/test.yml +@@ -16,7 +16,7 @@ + mount_point: '/opt/test1' + - name: bar + disks: ['vdc'] +- #state: "absent" ++ # state: "absent" + volumes: + - name: test2 + size: 8g +diff --git a/tests/tests_create_lv_size_equal_to_vg.yml b/tests/tests_create_lv_size_equal_to_vg.yml +index 21a5788..1737036 100644 +--- a/tests/tests_create_lv_size_equal_to_vg.yml ++++ b/tests/tests_create_lv_size_equal_to_vg.yml +@@ -23,13 +23,13 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- volumes: +- - name: test1 +- size: "{{ lv_size }}" +- mount_point: "{{ mount_location }}" ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ lv_size }}" ++ mount_point: "{{ mount_location }}" + + - include_tasks: verify-role-results.yml + +@@ -37,12 +37,12 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- state: "absent" +- volumes: +- - name: test1 +- mount_point: "{{ mount_location }}" ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ state: "absent" ++ volumes: ++ - name: test1 ++ mount_point: "{{ mount_location }}" + + - include_tasks: verify-role-results.yml +diff --git a/tests/tests_create_partition_volume_then_remove.yml b/tests/tests_create_partition_volume_then_remove.yml +index 351b022..567f8dd 100644 +--- a/tests/tests_create_partition_volume_then_remove.yml ++++ b/tests/tests_create_partition_volume_then_remove.yml +@@ -53,7 +53,7 @@ + name: linux-system-roles.storage + vars: + storage_pools: +- - name: "{{ unused_disks[0] }}" ++ - name: "{{ unused_disks[0] }}" + type: partition + disks: "{{ unused_disks }}" + state: absent +@@ -70,7 +70,7 @@ + name: linux-system-roles.storage + vars: + storage_pools: +- - name: "{{ unused_disks[0] }}" ++ - name: "{{ unused_disks[0] }}" + type: partition + disks: "{{ unused_disks }}" + state: absent +diff --git a/tests/tests_existing_lvm_pool.yml b/tests/tests_existing_lvm_pool.yml +index 854ac0d..2490914 100644 +--- a/tests/tests_existing_lvm_pool.yml ++++ b/tests/tests_existing_lvm_pool.yml +@@ -20,12 +20,12 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: "{{ pool_name }}" +- disks: "{{ unused_disks }}" +- volumes: +- - name: test1 +- size: "{{ volume_size }}" ++ storage_pools: ++ - name: "{{ pool_name }}" ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ volume_size }}" + + - include_tasks: verify-role-results.yml + +diff --git a/tests/tests_lvm_auto_size_cap.yml b/tests/tests_lvm_auto_size_cap.yml +index fb17c23..8c754a6 100644 +--- a/tests/tests_lvm_auto_size_cap.yml ++++ b/tests/tests_lvm_auto_size_cap.yml +@@ -33,12 +33,12 @@ + name: linux-system-roles.storage + vars: + storage_pools: +- - name: foo +- type: lvm +- disks: "{{ unused_disks }}" +- volumes: +- - name: test1 +- size: "{{ doubled_size.stdout|trim }}" ++ - name: foo ++ type: lvm ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ doubled_size.stdout|trim }}" + - name: unreachable task + fail: + msg: UNREACH +@@ -56,11 +56,11 @@ + name: linux-system-roles.storage + vars: + storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- volumes: +- - name: test1 +- size: "{{ test_disk_size }}" ++ - name: foo ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ test_disk_size }}" + + - include_tasks: verify-role-results.yml + +@@ -69,12 +69,12 @@ + name: linux-system-roles.storage + vars: + storage_pools: +- - name: foo +- type: lvm +- disks: "{{ unused_disks }}" +- volumes: +- - name: test1 +- size: "{{ test_disk_size }}" ++ - name: foo ++ type: lvm ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ test_disk_size }}" + + - include_tasks: verify-role-results.yml + +@@ -83,7 +83,7 @@ + name: linux-system-roles.storage + vars: + storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- state: absent +- volumes: [] ++ - name: foo ++ disks: "{{ unused_disks }}" ++ state: absent ++ volumes: [] +diff --git a/tests/tests_lvm_one_disk_one_volume.yml b/tests/tests_lvm_one_disk_one_volume.yml +index b1096cf..6452f54 100644 +--- a/tests/tests_lvm_one_disk_one_volume.yml ++++ b/tests/tests_lvm_one_disk_one_volume.yml +@@ -19,13 +19,13 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- volumes: +- - name: test1 +- size: "{{ volume_size }}" +- mount_point: "{{ mount_location }}" ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ volume_size }}" ++ mount_point: "{{ mount_location }}" + + - include_tasks: verify-role-results.yml + +@@ -33,13 +33,13 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- volumes: +- - name: test1 +- size: "{{ volume_size }}" +- mount_point: "{{ mount_location }}" ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ volume_size }}" ++ mount_point: "{{ mount_location }}" + + - include_tasks: verify-role-results.yml + +@@ -47,14 +47,14 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- state: absent +- volumes: +- - name: test1 +- size: "{{ volume_size }}" +- mount_point: "{{ mount_location }}" +- state: absent ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ state: absent ++ volumes: ++ - name: test1 ++ size: "{{ volume_size }}" ++ mount_point: "{{ mount_location }}" ++ state: absent + + - include_tasks: verify-role-results.yml +diff --git a/tests/tests_misc.yml b/tests/tests_misc.yml +index 3139bc7..afe753a 100644 +--- a/tests/tests_misc.yml ++++ b/tests/tests_misc.yml +@@ -197,7 +197,7 @@ + block: + - name: Try to mount swap filesystem to "{{ mount_location }}" + include_role: +- name: linux-system-roles.storage ++ name: linux-system-roles.storage + vars: + storage_volumes: + - name: test1 +diff --git a/tests/tests_null_raid_pool.yml b/tests/tests_null_raid_pool.yml +index 2b7b9f3..5c3c785 100644 +--- a/tests/tests_null_raid_pool.yml ++++ b/tests/tests_null_raid_pool.yml +@@ -31,9 +31,9 @@ + raid_level: "null" + state: present + volumes: +- - name: lv1 +- size: "{{ volume1_size }}" +- mount_point: "{{ mount_location1 }}" ++ - name: lv1 ++ size: "{{ volume1_size }}" ++ mount_point: "{{ mount_location1 }}" + + - name: get existing raids (after run) + command: "cat /proc/mdstat" +@@ -52,12 +52,12 @@ + raid_level: "null" + state: absent + volumes: +- - name: lv1 +- size: "{{ volume1_size }}" +- mount_point: "{{ mount_location1 }}" ++ - name: lv1 ++ size: "{{ volume1_size }}" ++ mount_point: "{{ mount_location1 }}" + + - name: compare mdstat results + assert: + that: +- - storage_test_mdstat1.stdout == storage_test_mdstat2.stdout ++ - storage_test_mdstat1.stdout == storage_test_mdstat2.stdout + msg: "Raid created when it should not be" +diff --git a/tests/tests_resize.yml b/tests/tests_resize.yml +index 209d129..4fd8583 100644 +--- a/tests/tests_resize.yml ++++ b/tests/tests_resize.yml +@@ -29,16 +29,16 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- type: lvm +- volumes: +- - name: test1 +- # resizing is currently supported only for ext2/3/4 +- fs_type: 'ext4' +- size: "{{ volume_size_before }}" +- mount_point: "{{ mount_location }}" ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ type: lvm ++ volumes: ++ - name: test1 ++ # resizing is currently supported only for ext2/3/4 ++ fs_type: 'ext4' ++ size: "{{ volume_size_before }}" ++ mount_point: "{{ mount_location }}" + + - include_tasks: verify-role-results.yml + +@@ -46,15 +46,15 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- type: lvm +- disks: "{{ unused_disks }}" +- volumes: +- - name: test1 +- fs_type: 'ext4' +- size: "{{ volume_size_after }}" +- mount_point: "{{ mount_location }}" ++ storage_pools: ++ - name: foo ++ type: lvm ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ fs_type: 'ext4' ++ size: "{{ volume_size_after }}" ++ mount_point: "{{ mount_location }}" + + - include_tasks: verify-role-results.yml + +@@ -194,14 +194,14 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- state: absent +- volumes: +- - name: test1 +- size: "{{ volume_size_before }}" +- mount_point: "{{ mount_location }}" ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ state: absent ++ volumes: ++ - name: test1 ++ size: "{{ volume_size_before }}" ++ mount_point: "{{ mount_location }}" + + - include_tasks: verify-role-results.yml + +@@ -259,14 +259,14 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- state: absent +- volumes: +- - name: test1 +- size: "{{ volume_size_before }}" +- mount_point: "{{ mount_location }}" ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ state: absent ++ volumes: ++ - name: test1 ++ size: "{{ volume_size_before }}" ++ mount_point: "{{ mount_location }}" + + - include_tasks: verify-role-results.yml + +@@ -324,13 +324,13 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- state: absent +- volumes: +- - name: test1 +- size: "{{ volume_size_before }}" +- mount_point: "{{ mount_location }}" ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ state: absent ++ volumes: ++ - name: test1 ++ size: "{{ volume_size_before }}" ++ mount_point: "{{ mount_location }}" + + - include_tasks: verify-role-results.yml +diff --git a/tests/unit/bsize_test.py b/tests/unit/bsize_test.py +index f88a9c1..fae0f5f 100644 +--- a/tests/unit/bsize_test.py ++++ b/tests/unit/bsize_test.py +@@ -1,7 +1,12 @@ ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + import pytest + + from storage_lsr.size import Size + ++ + def test_bsize(): + # check failure on incorrect string + with pytest.raises(ValueError) as e: +diff --git a/tests/unit/gensym_test.py b/tests/unit/gensym_test.py +index 6d164dc..fd00ddf 100644 +--- a/tests/unit/gensym_test.py ++++ b/tests/unit/gensym_test.py +@@ -1,68 +1,115 @@ + #!/usr/bin/python + """This module tests methods defined in the lvm_gensym.py module using the pytest framework""" +-import pytest +- ++from __future__ import absolute_import, division, print_function + +-import lvm_gensym ++__metaclass__ = type + ++import pytest + +-used_lv_names = ['root', 'root_0', 'root_1', 'root_2', 'root_3', 'swap_0', 'swap', 'swap_1'] + +-test_lv_names = [{'fs_type': 'ext', 'mount': '/'}, +- {'fs_type': 'zfs', 'mount': '/home/user'}, +- {'fs_type': 'swap', 'mount': ''} +- ] ++import lvm_gensym + +-used_vg_names = ['linux_host', 'rhel_user0', 'rhel_0_user'] + +-test_vg_names = ['rhel_user', 'rhel_user_0', 'rhel_user_1', +- 'rhel_user_2', 'rhel_user_3', 'linux_user', +- 'fedora_user', 'fedora_user_0', 'fedora_user_1' +- ] ++used_lv_names = [ ++ "root", ++ "root_0", ++ "root_1", ++ "root_2", ++ "root_3", ++ "swap_0", ++ "swap", ++ "swap_1", ++] ++ ++test_lv_names = [ ++ {"fs_type": "ext", "mount": "/"}, ++ {"fs_type": "zfs", "mount": "/home/user"}, ++ {"fs_type": "swap", "mount": ""}, ++] ++ ++used_vg_names = ["linux_host", "rhel_user0", "rhel_0_user"] ++ ++test_vg_names = [ ++ "rhel_user", ++ "rhel_user_0", ++ "rhel_user_1", ++ "rhel_user_2", ++ "rhel_user_3", ++ "linux_user", ++ "fedora_user", ++ "fedora_user_0", ++ "fedora_user_1", ++] ++ ++lvm_facts = { ++ "lvs": { ++ "Home": "", ++ "Swap": "", ++ "Root": "", ++ "Root_0": "", ++ "root": "", ++ "root_0": "", ++ "swap": "", ++ "swap_0": "", ++ "swap_1": "", ++ }, ++ "vgs": {"rhel_user": "", "rhel_user_0": "", "rhel_user_1": ""}, ++} + +-lvm_facts = {'lvs': {'Home': '', 'Swap': '', 'Root': '', +- 'Root_0': '', 'root': '', 'root_0': '', +- 'swap': '', 'swap_0': '', 'swap_1': '', +- }, +- 'vgs': {'rhel_user': '', 'rhel_user_0': '', 'rhel_user_1': ''} +- } + + def test_unique_base_name(): + """Test whether the returned name is unique using a supplied list of test names""" +- assert lvm_gensym.get_unique_name_from_base('root', used_lv_names) == 'root_4' +- assert lvm_gensym.get_unique_name_from_base('rhel_user', test_vg_names) == 'rhel_user_4' ++ assert lvm_gensym.get_unique_name_from_base("root", used_lv_names) == "root_4" ++ assert ( ++ lvm_gensym.get_unique_name_from_base("rhel_user", test_vg_names) ++ == "rhel_user_4" ++ ) ++ + + def test_return_val(): + """Verify that a supplied unique name and a list of used names returns True""" + for (index, name) in enumerate(test_vg_names): + assert lvm_gensym.name_is_unique(name[index], used_vg_names) + ++ + def test_get_base_vg_name(): + """Check generated base volume group name against the expected base name""" +- assert lvm_gensym.get_vg_name_base('hostname', 'rhel') == 'rhel_hostname' ++ assert lvm_gensym.get_vg_name_base("hostname", "rhel") == "rhel_hostname" ++ + + @pytest.mark.parametrize("os_name", ["foo", "bar", "baz"]) + def test_vg_eval(monkeypatch, os_name): + """Check generated unique volume group name against the expected name""" ++ + def get_os_name(): + return os_name + + vg_names = [os_name + "_user", os_name + "_user_0", os_name + "_user_1"] + _lvm_facts = dict(vgs=dict.fromkeys(vg_names), lvs=dict()) + monkeypatch.setattr(lvm_gensym, "get_os_name", get_os_name) +- assert lvm_gensym.get_vg_name('user', _lvm_facts) == os_name + '_user_2' +- assert lvm_gensym.get_vg_name('', _lvm_facts) == os_name ++ assert lvm_gensym.get_vg_name("user", _lvm_facts) == os_name + "_user_2" ++ assert lvm_gensym.get_vg_name("", _lvm_facts) == os_name ++ + + def test_lv_eval(): + """Test the generated unique logical volume name against the expected name""" +- expected = ['root_1', 'home_user', 'swap_2'] ++ expected = ["root_1", "home_user", "swap_2"] + + for (ctr, name_inputs) in enumerate(test_lv_names): +- assert lvm_gensym.get_lv_name(name_inputs['fs_type'], name_inputs['mount'], lvm_facts) == expected[ctr] ++ assert ( ++ lvm_gensym.get_lv_name( ++ name_inputs["fs_type"], name_inputs["mount"], lvm_facts ++ ) ++ == expected[ctr] ++ ) ++ + + def test_get_base_lv_name(): + """Test the generated base logical volume name against the expected name""" +- expected = ['root', 'home_user', 'swap'] ++ expected = ["root", "home_user", "swap"] + + for (ctr, names_input) in enumerate(test_lv_names): +- assert lvm_gensym.get_lv_name_base(names_input['fs_type'], names_input['mount']) == expected[ctr] ++ assert ( ++ lvm_gensym.get_lv_name_base(names_input["fs_type"], names_input["mount"]) ++ == expected[ctr] ++ ) +diff --git a/tests/unit/resolve_blockdev_test.py b/tests/unit/resolve_blockdev_test.py +index 0eafe7b..ad50628 100644 +--- a/tests/unit/resolve_blockdev_test.py ++++ b/tests/unit/resolve_blockdev_test.py +@@ -1,3 +1,6 @@ ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type + + import os + import pytest +@@ -5,73 +8,80 @@ import pytest + import resolve_blockdev + + +-blkid_data = [('LABEL=target', '/dev/sdx3'), +- ('UUID=6c75fa75-e5ab-4a12-a567-c8aa0b4b60a5', '/dev/sdaz'), +- ('LABEL=missing', '')] ++blkid_data = [ ++ ("LABEL=target", "/dev/sdx3"), ++ ("UUID=6c75fa75-e5ab-4a12-a567-c8aa0b4b60a5", "/dev/sdaz"), ++ ("LABEL=missing", ""), ++] + +-path_data = ['/dev/md/unreal', +- '/dev/mapper/fakevg-fakelv', +- '/dev/adisk', +- '/dev/disk/by-id/wwn-0x123456789abc'] ++path_data = [ ++ "/dev/md/unreal", ++ "/dev/mapper/fakevg-fakelv", ++ "/dev/adisk", ++ "/dev/disk/by-id/wwn-0x123456789abc", ++] + +-canonical_paths = {"/dev/sda": "/dev/sda", +- "/dev/dm-3": "/dev/mapper/vg_system-lv_data", +- "/dev/md127": "/dev/md/userdb", +- "/dev/notfound": ""} ++canonical_paths = { ++ "/dev/sda": "/dev/sda", ++ "/dev/dm-3": "/dev/mapper/vg_system-lv_data", ++ "/dev/md127": "/dev/md/userdb", ++ "/dev/notfound": "", ++} + + +-@pytest.mark.parametrize('spec,device', blkid_data) ++@pytest.mark.parametrize("spec,device", blkid_data) + def test_key_value_pair(spec, device, monkeypatch): + def run_cmd(args): + for _spec, _dev in blkid_data: + if _spec in args: + break + else: +- _dev = '' +- return (0, _dev, '') ++ _dev = "" ++ return (0, _dev, "") + +- monkeypatch.setattr(os.path, 'exists', lambda p: True) ++ monkeypatch.setattr(os.path, "exists", lambda p: True) + assert resolve_blockdev.resolve_blockdev(spec, run_cmd) == device + + +-@pytest.mark.parametrize('name', [os.path.basename(p) for p in path_data]) ++@pytest.mark.parametrize("name", [os.path.basename(p) for p in path_data]) + def test_device_names(name, monkeypatch): + """ Test return values for basename specs, assuming all paths are real. """ ++ + def path_exists(path): + return next((data for data in path_data if data == path), False) + +- expected = next((data for data in path_data if os.path.basename(data) == name), '') +- monkeypatch.setattr(os.path, 'exists', path_exists) ++ expected = next((data for data in path_data if os.path.basename(data) == name), "") ++ monkeypatch.setattr(os.path, "exists", path_exists) + assert resolve_blockdev.resolve_blockdev(name, None) == expected + + + def test_device_name(monkeypatch): +- assert os.path.exists('/dev/xxx') is False ++ assert os.path.exists("/dev/xxx") is False + +- monkeypatch.setattr(os.path, 'exists', lambda p: True) +- assert resolve_blockdev.resolve_blockdev('xxx', None) == '/dev/xxx' ++ monkeypatch.setattr(os.path, "exists", lambda p: True) ++ assert resolve_blockdev.resolve_blockdev("xxx", None) == "/dev/xxx" + +- monkeypatch.setattr(os.path, 'exists', lambda p: False) +- assert resolve_blockdev.resolve_blockdev('xxx', None) == '' ++ monkeypatch.setattr(os.path, "exists", lambda p: False) ++ assert resolve_blockdev.resolve_blockdev("xxx", None) == "" + + + def test_full_path(monkeypatch): + path = "/dev/idonotexist" +- monkeypatch.setattr(os.path, 'exists', lambda p: True) ++ monkeypatch.setattr(os.path, "exists", lambda p: True) + assert resolve_blockdev.resolve_blockdev(path, None) == path + +- monkeypatch.setattr(os.path, 'exists', lambda p: False) +- assert resolve_blockdev.resolve_blockdev(path, None) == '' ++ monkeypatch.setattr(os.path, "exists", lambda p: False) ++ assert resolve_blockdev.resolve_blockdev(path, None) == "" + + path = "/dev/disk/by-label/alabel" +- monkeypatch.setattr(os.path, 'exists', lambda p: True) ++ monkeypatch.setattr(os.path, "exists", lambda p: True) + assert resolve_blockdev.resolve_blockdev(path, None) == path + +- monkeypatch.setattr(os.path, 'exists', lambda p: False) +- assert resolve_blockdev.resolve_blockdev(path, None) == '' ++ monkeypatch.setattr(os.path, "exists", lambda p: False) ++ assert resolve_blockdev.resolve_blockdev(path, None) == "" + + +-@pytest.mark.parametrize('device', list(canonical_paths.keys())) ++@pytest.mark.parametrize("device", list(canonical_paths.keys())) + def test_canonical_path(device, monkeypatch): + def _get_name(device): + name = os.path.basename(canonical_paths[device]) +@@ -79,8 +89,8 @@ def test_canonical_path(device, monkeypatch): + raise Exception("failed to find name") + return name + +- monkeypatch.setattr(resolve_blockdev, '_get_dm_name_from_kernel_dev', _get_name) +- monkeypatch.setattr(resolve_blockdev, '_get_md_name_from_kernel_dev', _get_name) ++ monkeypatch.setattr(resolve_blockdev, "_get_dm_name_from_kernel_dev", _get_name) ++ monkeypatch.setattr(resolve_blockdev, "_get_md_name_from_kernel_dev", _get_name) + + canonical = canonical_paths[device] + if canonical: +diff --git a/tests/unit/test_unused_disk.py b/tests/unit/test_unused_disk.py +index a4339c4..493b4b0 100644 +--- a/tests/unit/test_unused_disk.py ++++ b/tests/unit/test_unused_disk.py +@@ -1,72 +1,91 @@ ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + import pytest + import find_unused_disk + import os + + +-blkid_data_pttype = [('/dev/sdx', '/dev/sdx: PTTYPE=\"dos\"'), +- ('/dev/sdy', '/dev/sdy: PTTYPE=\"test\"')] ++blkid_data_pttype = [ ++ ("/dev/sdx", '/dev/sdx: PTTYPE="dos"'), ++ ("/dev/sdy", '/dev/sdy: PTTYPE="test"'), ++] + +-blkid_data = [('/dev/sdx', 'UUID=\"hello-1234-56789\" TYPE=\"crypto_LUKS\"'), +- ('/dev/sdy', 'UUID=\"this-1s-a-t3st-f0r-ansible\" VERSION=\"LVM2 001\" TYPE=\"LVM2_member\" USAGE=\"raid\"'), +- ('/dev/sdz', 'LABEL=\"/data\" UUID=\"a12bcdef-345g-67h8-90i1-234j56789k10\" VERSION=\"1.0\" TYPE=\"ext4\" USAGE=\"filesystem\"')] ++blkid_data = [ ++ ("/dev/sdx", 'UUID="hello-1234-56789" TYPE="crypto_LUKS"'), ++ ( ++ "/dev/sdy", ++ 'UUID="this-1s-a-t3st-f0r-ansible" VERSION="LVM2 001" TYPE="LVM2_member" USAGE="raid"', ++ ), ++ ( ++ "/dev/sdz", ++ 'LABEL="/data" UUID="a12bcdef-345g-67h8-90i1-234j56789k10" VERSION="1.0" TYPE="ext4" USAGE="filesystem"', ++ ), ++] + +-holders_data_none = [('/dev/sdx', ''), +- ('/dev/dm-99', '')] ++holders_data_none = [("/dev/sdx", ""), ("/dev/dm-99", "")] + +-holders_data = [('/dev/sdx', 'dm-0'), +- ('/dev/dm-99', 'dm-2 dm-3 dm-4')] ++holders_data = [("/dev/sdx", "dm-0"), ("/dev/dm-99", "dm-2 dm-3 dm-4")] + + +-@pytest.mark.parametrize('disk, blkid', blkid_data_pttype) ++@pytest.mark.parametrize("disk, blkid", blkid_data_pttype) + def test_no_signature_true(disk, blkid): + def run_command(args): +- return [0, blkid, ''] ++ return [0, blkid, ""] ++ + assert find_unused_disk.no_signature(run_command, disk) is True + + +-@pytest.mark.parametrize('disk, blkid', blkid_data) ++@pytest.mark.parametrize("disk, blkid", blkid_data) + def test_no_signature_false(disk, blkid): + def run_command(args): +- return [0, blkid, ''] ++ return [0, blkid, ""] ++ + assert find_unused_disk.no_signature(run_command, disk) is False + + +-@pytest.mark.parametrize('disk, holders', holders_data_none) ++@pytest.mark.parametrize("disk, holders", holders_data_none) + def test_no_holders_true(disk, holders, monkeypatch): + def mock_return(args): + return holders +- monkeypatch.setattr(os, 'listdir', mock_return) ++ ++ monkeypatch.setattr(os, "listdir", mock_return) + assert find_unused_disk.no_holders(disk) is True + + +-@pytest.mark.parametrize('disk, holders', holders_data) ++@pytest.mark.parametrize("disk, holders", holders_data) + def test_no_holders_false(disk, holders, monkeypatch): + def mock_return(args): + return holders +- monkeypatch.setattr(os, 'listdir', mock_return) ++ ++ monkeypatch.setattr(os, "listdir", mock_return) + assert find_unused_disk.no_holders(disk) is False + + + def test_can_open_true(monkeypatch): + def mock_return(args, flag): + return True +- monkeypatch.setattr(os, 'open', mock_return) +- assert find_unused_disk.can_open('/hello') is True ++ ++ monkeypatch.setattr(os, "open", mock_return) ++ assert find_unused_disk.can_open("/hello") is True + + + def test_can_open_false(monkeypatch): + def mock_return(args, flag): + raise OSError +- monkeypatch.setattr(os, 'open', mock_return) +- assert find_unused_disk.can_open('/hello') is False ++ ++ monkeypatch.setattr(os, "open", mock_return) ++ assert find_unused_disk.can_open("/hello") is False + + + def test_is_ignored(monkeypatch): + def mock_realpath(path): + return path +- monkeypatch.setattr(os.path, 'realpath', mock_realpath) +- assert find_unused_disk.is_ignored('/dev/sda') is False +- assert find_unused_disk.is_ignored('/dev/vda') is False +- assert find_unused_disk.is_ignored('/dev/mapper/mpatha') is False +- assert find_unused_disk.is_ignored('/dev/md/Volume0') is False +- assert find_unused_disk.is_ignored('/dev/nullb0') is True ++ ++ monkeypatch.setattr(os.path, "realpath", mock_realpath) ++ assert find_unused_disk.is_ignored("/dev/sda") is False ++ assert find_unused_disk.is_ignored("/dev/vda") is False ++ assert find_unused_disk.is_ignored("/dev/mapper/mpatha") is False ++ assert find_unused_disk.is_ignored("/dev/md/Volume0") is False ++ assert find_unused_disk.is_ignored("/dev/nullb0") is True +diff --git a/tox.ini b/tox.ini +index 92482d5..91c22a8 100644 +--- a/tox.ini ++++ b/tox.ini +@@ -13,9 +13,3 @@ configfile = .ansible-lint + setenv = + RUN_PYTEST_SETUP_MODULE_UTILS = true + RUN_PYLINT_SETUP_MODULE_UTILS = true +- +-[testenv:black] +-commands = bash -c 'echo black is currently not enabled - please fix this' +- +-[testenv:flake8] +-commands = bash -c 'echo flake8 is currently not enabled - please fix this' +-- +2.30.2 + diff --git a/SOURCES/storage-common-fixes.diff b/SOURCES/storage-common-fixes.diff deleted file mode 100644 index 3b6a642..0000000 --- a/SOURCES/storage-common-fixes.diff +++ /dev/null @@ -1,53 +0,0 @@ -diff --git a/defaults/main.yml b/defaults/main.yml -index 476616b..743bbbb 100644 ---- a/defaults/main.yml -+++ b/defaults/main.yml -@@ -8,6 +8,7 @@ storage_safe_mode: true # fail instead of implicitly/automatically removing dev - storage_pool_defaults: - state: "present" - type: lvm -+ volumes: [] - - storage_volume_defaults: - state: "present" -diff --git a/library/blivet.py b/library/blivet.py -index 858ca2f..d288f8e 100644 ---- a/library/blivet.py -+++ b/library/blivet.py -@@ -167,7 +167,7 @@ class BlivetVolume(object): - fmt = get_format(self._volume['fs_type'], - mountpoint=self._volume.get('mount_point'), - label=self._volume['fs_label'], -- options=self._volume['fs_create_options']) -+ create_options=self._volume['fs_create_options']) - if not fmt.supported or not fmt.formattable: - raise BlivetAnsibleError("required tools for file system '%s' are missing" % self._volume['fs_type']) - -@@ -209,6 +209,8 @@ class BlivetVolume(object): - raise BlivetAnsibleError("volume '%s' cannot be resized from %s to %s: %s" % (self._device.name, - self._device.size, - size, str(e))) -+ elif size and self._device.size != size and not self._device.resizable: -+ raise BlivetAnsibleError("volume '%s' cannot be resized from %s to %s" % (self._device.name, self._device.size, size)) - - def _reformat(self): - """ Schedule actions as needed to ensure the volume is formatted as specified. """ -@@ -224,6 +226,8 @@ class BlivetVolume(object): - - if self._device.format.status and not packages_only: - self._device.format.teardown() -+ if not self._device.isleaf: -+ self._blivet.devicetree.recursive_remove(self._device, remove_device=False) - self._blivet.format_device(self._device, fmt) - - def manage(self): -@@ -247,6 +251,9 @@ class BlivetVolume(object): - if self._device.exists: - self._reformat() - -+ if self.ultimately_present and self._volume['mount_point'] and not self._device.format.mountable: -+ raise BlivetAnsibleError("volume '%s' has a mount point but no mountable file system" % self._volume['name']) -+ - # schedule resize if appropriate - if self._device.exists and self._volume['size']: - self._resize() diff --git a/SOURCES/storage-no-disks-existing.diff b/SOURCES/storage-no-disks-existing.diff new file mode 100644 index 0000000..68b1e8d --- /dev/null +++ b/SOURCES/storage-no-disks-existing.diff @@ -0,0 +1,142 @@ +diff --git a/library/blivet.py b/library/blivet.py +index eb8bb11..e927121 100644 +--- a/library/blivet.py ++++ b/library/blivet.py +@@ -104,6 +104,7 @@ try: + from blivet3.formats import get_format + from blivet3.partitioning import do_partitioning + from blivet3.size import Size ++ from blivet3.udev import trigger + from blivet3.util import set_up_logging + BLIVET_PACKAGE = 'blivet3' + except ImportError: +@@ -116,6 +117,7 @@ except ImportError: + from blivet.formats import get_format + from blivet.partitioning import do_partitioning + from blivet.size import Size ++ from blivet.udev import trigger + from blivet.util import set_up_logging + BLIVET_PACKAGE = 'blivet' + except ImportError: +@@ -821,7 +823,10 @@ class BlivetPool(BlivetBase): + + def _look_up_disks(self): + """ Look up the pool's disks in blivet's device tree. """ +- if not self._pool['disks']: ++ if self._disks: ++ return ++ ++ if not self._device and not self._pool['disks']: + raise BlivetAnsibleError("no disks specified for pool '%s'" % self._pool['name']) + elif not isinstance(self._pool['disks'], list): + raise BlivetAnsibleError("pool disks must be specified as a list") +@@ -832,7 +837,7 @@ class BlivetPool(BlivetBase): + if device is not None: # XXX fail if any disk isn't resolved? + disks.append(device) + +- if self._pool['disks'] and not disks: ++ if self._pool['disks'] and not self._device and not disks: + raise BlivetAnsibleError("unable to resolve any disks specified for pool '%s' (%s)" % (self._pool['name'], self._pool['disks'])) + + self._disks = disks +@@ -974,9 +979,9 @@ class BlivetPool(BlivetBase): + """ Schedule actions to configure this pool according to the yaml input. """ + global safe_mode + # look up the device +- self._look_up_disks() + self._look_up_device() + self._apply_defaults() ++ self._look_up_disks() + + # schedule destroy if appropriate, including member type change + if not self.ultimately_present: +@@ -999,6 +1004,7 @@ class BlivetPartitionPool(BlivetPool): + return self._device.partitionable + + def _look_up_device(self): ++ self._look_up_disks() + self._device = self._disks[0] + + def _create(self): +@@ -1354,6 +1360,13 @@ def run_module(): + + actions.append(action) + ++ def ensure_udev_update(action): ++ if action.is_create: ++ sys_path = action.device.path ++ if os.path.islink(sys_path): ++ sys_path = os.readlink(action.device.path) ++ trigger(action='change', subsystem='block', name=os.path.basename(sys_path)) ++ + def action_dict(action): + return dict(action=action.type_desc_str, + fs_type=action.format.type if action.is_format else None, +@@ -1395,6 +1408,7 @@ def run_module(): + if scheduled: + # execute the scheduled actions, committing changes to disk + callbacks.action_executed.add(record_action) ++ callbacks.action_executed.add(ensure_udev_update) + try: + b.devicetree.actions.process(devices=b.devicetree.devices, dry_run=module.check_mode) + except Exception as e: +diff --git a/tests/tests_existing_lvm_pool.yml b/tests/tests_existing_lvm_pool.yml +new file mode 100644 +index 0000000..854ac0d +--- /dev/null ++++ b/tests/tests_existing_lvm_pool.yml +@@ -0,0 +1,54 @@ ++--- ++- hosts: all ++ become: true ++ vars: ++ mount_location: '/opt/test1' ++ volume_group_size: '5g' ++ volume_size: '4g' ++ pool_name: foo ++ ++ tasks: ++ - include_role: ++ name: linux-system-roles.storage ++ ++ - include_tasks: get_unused_disk.yml ++ vars: ++ min_size: "{{ volume_group_size }}" ++ max_return: 1 ++ ++ - name: Create one LVM logical volume under one volume group ++ include_role: ++ name: linux-system-roles.storage ++ vars: ++ storage_pools: ++ - name: "{{ pool_name }}" ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ volume_size }}" ++ ++ - include_tasks: verify-role-results.yml ++ ++ - name: Create another volume in the existing pool, identified only by name. ++ include_role: ++ name: linux-system-roles.storage ++ vars: ++ storage_pools: ++ - name: "{{ pool_name }}" ++ volumes: ++ - name: newvol ++ size: '2 GiB' ++ fs_type: ext4 ++ fs_label: newvol ++ ++ - include_tasks: verify-role-results.yml ++ ++ - name: Clean up. ++ include_role: ++ name: linux-system-roles.storage ++ vars: ++ storage_pools: ++ - name: "{{ pool_name }}" ++ state: absent ++ ++ - include_tasks: verify-role-results.yml diff --git a/SOURCES/storage-partition-name.diff b/SOURCES/storage-partition-name.diff new file mode 100644 index 0000000..c206dc0 --- /dev/null +++ b/SOURCES/storage-partition-name.diff @@ -0,0 +1,30 @@ +commit effb7faf20301ddcee8ee36a1b156a0b9f006bb0 +Author: David Lehman +Date: Tue Aug 4 16:00:33 2020 -0400 + + Be smarter in choosing expected partition name. + + BlivetVolume._get_device_id is only used to look up pre-existing + volumes, so we don't have to try too hard to guess it by name. + We can just see if the disk has a single partition and, if so, + return the name of that partition. + + Fixes: #141 + +diff --git a/library/blivet.py b/library/blivet.py +index eb8bb11..0f7ce98 100644 +--- a/library/blivet.py ++++ b/library/blivet.py +@@ -554,7 +554,11 @@ class BlivetPartitionVolume(BlivetVolume): + return self._device.raw_device.type == 'partition' + + def _get_device_id(self): +- return self._blivet_pool._disks[0].name + '1' ++ device_id = None ++ if self._blivet_pool._disks[0].partitioned and len(self._blivet_pool._disks[0].children) == 1: ++ device_id = self._blivet_pool._disks[0].children[0].name ++ ++ return device_id + + def _resize(self): + pass diff --git a/SOURCES/storage-trim-volume-size.diff b/SOURCES/storage-trim-volume-size.diff new file mode 100644 index 0000000..ef947c7 --- /dev/null +++ b/SOURCES/storage-trim-volume-size.diff @@ -0,0 +1,326 @@ +diff --git a/library/blivet.py b/library/blivet.py +index e927121..f59f821 100644 +--- a/library/blivet.py ++++ b/library/blivet.py +@@ -130,6 +130,9 @@ if BLIVET_PACKAGE: + set_up_logging() + log = logging.getLogger(BLIVET_PACKAGE + ".ansible") + ++ ++MAX_TRIM_PERCENT = 2 ++ + use_partitions = None # create partitions on pool backing device disks? + disklabel_type = None # user-specified disklabel type + safe_mode = None # do not remove any existing devices or formatting +@@ -445,8 +448,16 @@ class BlivetVolume(BlivetBase): + if not self._device.resizable: + return + +- if self._device.format.resizable: +- self._device.format.update_size_info() ++ trim_percent = (1.0 - float(self._device.max_size / size))*100 ++ log.debug("resize: size=%s->%s ; trim=%s", self._device.size, size, trim_percent) ++ if size > self._device.max_size and trim_percent <= MAX_TRIM_PERCENT: ++ log.info("adjusting %s resize target from %s to %s to fit in free space", ++ self._volume['name'], ++ size, ++ self._device.max_size) ++ size = self._device.max_size ++ if size == self._device.size: ++ return + + if not self._device.min_size <= size <= self._device.max_size: + raise BlivetAnsibleError("volume '%s' cannot be resized to '%s'" % (self._volume['name'], size)) +@@ -610,10 +621,18 @@ class BlivetLVMVolume(BlivetVolume): + raise BlivetAnsibleError("invalid size '%s' specified for volume '%s'" % (self._volume['size'], self._volume['name'])) + + fmt = self._get_format() ++ trim_percent = (1.0 - float(parent.free_space / size))*100 ++ log.debug("size: %s ; %s", size, trim_percent) + if size > parent.free_space: +- raise BlivetAnsibleError("specified size for volume '%s' exceeds available space in pool '%s' (%s)" % (size, +- parent.name, +- parent.free_space)) ++ if trim_percent > MAX_TRIM_PERCENT: ++ raise BlivetAnsibleError("specified size for volume '%s' exceeds available space in pool '%s' (%s)" ++ % (size, parent.name, parent.free_space)) ++ else: ++ log.info("adjusting %s size from %s to %s to fit in %s free space", self._volume['name'], ++ size, ++ parent.free_space, ++ parent.name) ++ size = parent.free_space + + try: + device = self._blivet.new_lv(name=self._volume['name'], +diff --git a/tests/tests_create_lv_size_equal_to_vg.yml b/tests/tests_create_lv_size_equal_to_vg.yml +new file mode 100644 +index 0000000..21a5788 +--- /dev/null ++++ b/tests/tests_create_lv_size_equal_to_vg.yml +@@ -0,0 +1,48 @@ ++--- ++- hosts: all ++ become: true ++ vars: ++ storage_safe_mode: false ++ mount_location: '/opt/test1' ++ volume_group_size: '10g' ++ lv_size: '10g' ++ unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}' ++ disk_size: '{{ unused_disk_subfact.sectors|int * ++ unused_disk_subfact.sectorsize|int }}' ++ ++ tasks: ++ - include_role: ++ name: linux-system-roles.storage ++ ++ - include_tasks: get_unused_disk.yml ++ vars: ++ min_size: "{{ volume_group_size }}" ++ max_return: 1 ++ ++ - name: Create one lv which size is equal to vg size ++ include_role: ++ name: linux-system-roles.storage ++ vars: ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ lv_size }}" ++ mount_point: "{{ mount_location }}" ++ ++ - include_tasks: verify-role-results.yml ++ ++ - name: Clean up ++ include_role: ++ name: linux-system-roles.storage ++ vars: ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ state: "absent" ++ volumes: ++ - name: test1 ++ mount_point: "{{ mount_location }}" ++ ++ - include_tasks: verify-role-results.yml +diff --git a/tests/tests_lvm_auto_size_cap.yml b/tests/tests_lvm_auto_size_cap.yml +new file mode 100644 +index 0000000..fb17c23 +--- /dev/null ++++ b/tests/tests_lvm_auto_size_cap.yml +@@ -0,0 +1,89 @@ ++--- ++- hosts: all ++ become: true ++ ++ tasks: ++ - include_role: ++ name: linux-system-roles.storage ++ ++ - include_tasks: get_unused_disk.yml ++ vars: ++ min_size: 10g ++ max_return: 1 ++ ++ - command: lsblk -b -l --noheadings -o NAME,SIZE ++ register: storage_test_lsblk ++ ++ - set_fact: ++ test_disk_size: "{{ storage_test_lsblk.stdout_lines|map('regex_search', '^' + unused_disks[0] + '\\s+\\d+$')|select('string')|first|regex_replace('^\\w+\\s+', '') }}" ++ ++ - package: ++ name: bc ++ state: installed ++ ++ - command: ++ cmd: bc ++ stdin: "{{ test_disk_size }} *2" ++ register: doubled_size ++ ++ - name: Test handling of too-large LVM volume size ++ block: ++ - name: Try to create a pool containing one volume twice the size of the backing disk ++ include_role: ++ name: linux-system-roles.storage ++ vars: ++ storage_pools: ++ - name: foo ++ type: lvm ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ doubled_size.stdout|trim }}" ++ - name: unreachable task ++ fail: ++ msg: UNREACH ++ rescue: ++ - name: Check that we failed in the role ++ assert: ++ that: ++ - ansible_failed_result.msg != 'UNREACH' ++ - blivet_output.failed and ++ blivet_output.msg|regex_search('specified size for volume.+exceeds available') ++ msg: "Role has not failed when it should have" ++ ++ - name: Create a pool containing one volume the same size as the backing disk ++ include_role: ++ name: linux-system-roles.storage ++ vars: ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ test_disk_size }}" ++ ++ - include_tasks: verify-role-results.yml ++ ++ - name: Repeat the previous invocation to verify idempotence ++ include_role: ++ name: linux-system-roles.storage ++ vars: ++ storage_pools: ++ - name: foo ++ type: lvm ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ test_disk_size }}" ++ ++ - include_tasks: verify-role-results.yml ++ ++ - name: Clean up ++ include_role: ++ name: linux-system-roles.storage ++ vars: ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ state: absent ++ volumes: [] +diff --git a/tests/tests_lvm_errors.yml b/tests/tests_lvm_errors.yml +index 37d41dc..e8dc4f4 100644 +--- a/tests/tests_lvm_errors.yml ++++ b/tests/tests_lvm_errors.yml +@@ -11,8 +11,6 @@ + - '/non/existent/disk' + invalid_size: 'xyz GiB' + unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}' +- too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) * +- unused_disk_subfact.sectorsize|int }}' + + tasks: + - include_role: +@@ -86,39 +84,6 @@ + - ansible_failed_result.msg != 'UNREACH' + msg: "Role has not failed when it should have" + +- # the following does not work properly +- # - name: Verify the output +- # assert: +- # that: "{{ blivet_output.failed and +- # blivet_output.msg|regex_search('invalid size.+for volume') and +- # not blivet_output.changed }}" +- # msg: "Unexpected behavior w/ invalid volume size" +- +- - name: Test for correct handling of too-large volume size. +- block: +- - name: Try to create LVM with a too-large volume size. +- include_role: +- name: linux-system-roles.storage +- vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- volumes: +- - name: test1 +- size: "{{ too_large_size }}" +- mount_point: "{{ mount_location1 }}" +- +- - name: unreachable task +- fail: +- msg: UNREACH +- +- rescue: +- - name: Check that we failed in the role +- assert: +- that: +- - ansible_failed_result.msg != 'UNREACH' +- msg: "Role has not failed when it should have" +- + # the following does not work properly + # - name: Verify the output + # assert: +@@ -138,7 +103,7 @@ + disks: "{{ unused_disks[0] }}" + volumes: + - name: test1 +- size: "{{ too_large_size }}" ++ size: "{{ volume_size }}" + mount_point: "{{ mount_location1 }}" + + - name: unreachable task +@@ -171,7 +136,7 @@ + disks: [] + volumes: + - name: test1 +- size: "{{ too_large_size }}" ++ size: "{{ volume1_size }}" + mount_point: "{{ mount_location1 }}" + + - name: unreachable task +diff --git a/tests/tests_misc.yml b/tests/tests_misc.yml +index a69ee98..3139bc7 100644 +--- a/tests/tests_misc.yml ++++ b/tests/tests_misc.yml +@@ -7,7 +7,7 @@ + volume_group_size: '5g' + volume1_size: '4g' + unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}' +- too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) * ++ too_large_size: '{{ (unused_disk_subfact.sectors|int * 1.2) * + unused_disk_subfact.sectorsize|int }}' + + tasks: +diff --git a/tests/tests_resize.yml b/tests/tests_resize.yml +index 9eeb2b9..209d129 100644 +--- a/tests/tests_resize.yml ++++ b/tests/tests_resize.yml +@@ -9,7 +9,7 @@ + invalid_size1: 'xyz GiB' + invalid_size2: 'none' + unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}' +- too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) * ++ too_large_size: '{{ unused_disk_subfact.sectors|int * 1.2 * + unused_disk_subfact.sectorsize|int }}' + disk_size: '{{ unused_disk_subfact.sectors|int * + unused_disk_subfact.sectorsize|int }}' +@@ -122,23 +122,7 @@ + size: "{{ disk_size }}" + mount_point: "{{ mount_location }}" + +- - name: Unreachable task +- fail: +- msg: UNREACH +- +- rescue: +- - name: Check that we failed in the role +- assert: +- that: +- - ansible_failed_result.msg != 'UNREACH' +- msg: "Role has not failed when it should have" +- +- - name: Verify the output +- assert: +- that: "blivet_output.failed and +- blivet_output.msg|regex_search('volume.+cannot be resized to.+') and +- not blivet_output.changed" +- msg: "Unexpected behavior w/ invalid volume size" ++ - include_tasks: verify-role-results.yml + + - name: Test for correct handling of invalid size specification + block: diff --git a/SOURCES/timesync-ansible-test-issues.diff b/SOURCES/timesync-ansible-test-issues.diff new file mode 100644 index 0000000..2ec733a --- /dev/null +++ b/SOURCES/timesync-ansible-test-issues.diff @@ -0,0 +1,22 @@ +From b55af45842482768f29704d90a1e019ffe0f7770 Mon Sep 17 00:00:00 2001 +From: Noriko Hosoi +Date: Tue, 2 Mar 2021 13:39:19 -0800 +Subject: [PATCH] Patch32: timesync-ansible-test-issues.diff + +RHELPLAN-68118 - Collections - Timesync - fixing ansible-test errors +RHELPLAN-68789 - Collections - ignore file for each role +--- + .sanity-ansible-ignore-2.9.txt | 1 + + 1 file changed, 1 insertion(+) + create mode 100644 .sanity-ansible-ignore-2.9.txt + +diff --git a/.sanity-ansible-ignore-2.9.txt b/.sanity-ansible-ignore-2.9.txt +new file mode 100644 +index 0000000..e6d5e4d +--- /dev/null ++++ b/.sanity-ansible-ignore-2.9.txt +@@ -0,0 +1 @@ ++plugins/modules/timesync_provider.sh shebang +-- +2.26.2 + diff --git a/SOURCES/timesync-tier1-tags.diff b/SOURCES/timesync-tier1-tags.diff index 4d7c80c..3abd22d 100644 --- a/SOURCES/timesync-tier1-tags.diff +++ b/SOURCES/timesync-tier1-tags.diff @@ -37,10 +37,7 @@ diff --git a/tests/tests_default.yml b/tests/tests_default.yml index 856ebe5..fb298c9 100644 --- a/tests/tests_default.yml +++ b/tests/tests_default.yml -@@ -1,6 +1,17 @@ - - - name: Ensure that the role runs with default parameters -+ tags: tests::tier1 +@@ -3,4 +4,14 @@ hosts: all roles: @@ -48,24 +45,13 @@ index 856ebe5..fb298c9 100644 + + pre_tasks: + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: get_services_state.yml + + post_tasks: + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: restore_services_state.yml -diff --git a/tests/tests_default_vars.yml b/tests/tests_default_vars.yml -index 366a9f5..df989a5 100644 ---- a/tests/tests_default_vars.yml -+++ b/tests/tests_default_vars.yml -@@ -1,5 +1,6 @@ - --- - - name: Ensure that the role declares all parameters in defaults -+ tags: tests::tier1 - hosts: all - - tasks: diff --git a/tests/tests_default_wrapper.yml b/tests/tests_default_wrapper.yml index a768f4c..b0c0ab3 100644 --- a/tests/tests_default_wrapper.yml @@ -74,7 +60,7 @@ index a768f4c..b0c0ab3 100644 --- - name: Create static inventory from hostvars + tags: -+ - 'tests::tier1' ++# - 'tests::tier1' + - 'tests::slow' hosts: all tasks: @@ -84,13 +70,13 @@ index a768f4c..b0c0ab3 100644 - name: Run tests_default.yml normally + tags: -+ - 'tests::tier1' ++# - 'tests::tier1' + - 'tests::slow' import_playbook: tests_default.yml - name: Run tests_default.yml in check_mode + tags: -+ - 'tests::tier1' ++# - 'tests::tier1' + - 'tests::slow' hosts: all tasks: @@ -99,20 +85,13 @@ diff --git a/tests/tests_ntp.yml b/tests/tests_ntp.yml index e4b1b5e..446f1dc 100644 --- a/tests/tests_ntp.yml +++ b/tests/tests_ntp.yml -@@ -1,5 +1,6 @@ - - - name: Configure time synchronization with NTP servers -+ tags: tests::tier1 - hosts: all - vars: - timesync_ntp_servers: @@ -18,6 +19,11 @@ roles: - linux-system-roles.timesync + pre_tasks: + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: get_services_state.yml + tasks: @@ -125,25 +104,18 @@ index e4b1b5e..446f1dc 100644 + + post_tasks: + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: restore_services_state.yml diff --git a/tests/tests_ntp_provider1.yml b/tests/tests_ntp_provider1.yml index 08ecab9..9fe0db3 100644 --- a/tests/tests_ntp_provider1.yml +++ b/tests/tests_ntp_provider1.yml -@@ -1,5 +1,6 @@ - - - name: Configure NTP with default provider -+ tags: tests::tier1 - hosts: all - vars: - timesync_ntp_servers: @@ -8,6 +9,10 @@ - linux-system-roles.timesync pre_tasks: + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: get_services_state.yml + - name: Remove NTP providers @@ -155,25 +127,18 @@ index 08ecab9..9fe0db3 100644 - "'172.16.123.1' in sources.stdout" + + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: restore_services_state.yml diff --git a/tests/tests_ntp_provider2.yml b/tests/tests_ntp_provider2.yml index 5476ae4..e0d5c96 100644 --- a/tests/tests_ntp_provider2.yml +++ b/tests/tests_ntp_provider2.yml -@@ -1,5 +1,6 @@ - - - name: Configure NTP with chrony as current provider -+ tags: tests::tier1 - hosts: all - vars: - timesync_ntp_servers: @@ -8,6 +9,10 @@ - linux-system-roles.timesync pre_tasks: + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: get_services_state.yml + - name: Remove ntp @@ -185,25 +150,18 @@ index 5476ae4..e0d5c96 100644 shell: chronyc -n tracking + + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: restore_services_state.yml diff --git a/tests/tests_ntp_provider3.yml b/tests/tests_ntp_provider3.yml index 44ca101..d440a64 100644 --- a/tests/tests_ntp_provider3.yml +++ b/tests/tests_ntp_provider3.yml -@@ -1,5 +1,6 @@ - - - name: Configure NTP with ntp as current provider -+ tags: tests::tier1 - hosts: all - vars: - timesync_ntp_servers: @@ -8,6 +9,10 @@ - linux-system-roles.timesync pre_tasks: + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: get_services_state.yml + - name: Remove chrony @@ -221,19 +179,12 @@ diff --git a/tests/tests_ntp_provider4.yml b/tests/tests_ntp_provider4.yml index 8b452b8..8bccba0 100644 --- a/tests/tests_ntp_provider4.yml +++ b/tests/tests_ntp_provider4.yml -@@ -1,5 +1,6 @@ - - - name: Configure NTP with chrony as specified provider -+ tags: tests::tier1 - hosts: all - vars: - timesync_ntp_servers: @@ -9,6 +10,10 @@ - linux-system-roles.timesync pre_tasks: + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: get_services_state.yml + - name: Install chrony @@ -245,25 +196,18 @@ index 8b452b8..8bccba0 100644 shell: chronyc -n tracking + + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: restore_services_state.yml diff --git a/tests/tests_ntp_provider5.yml b/tests/tests_ntp_provider5.yml index 1740164..98a054f 100644 --- a/tests/tests_ntp_provider5.yml +++ b/tests/tests_ntp_provider5.yml -@@ -1,5 +1,6 @@ - - - name: Configure NTP with ntp as specified provider -+ tags: tests::tier1 - hosts: all - vars: - timesync_ntp_servers: @@ -9,6 +10,10 @@ - linux-system-roles.timesync pre_tasks: + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: get_services_state.yml + - name: Install ntp @@ -275,24 +219,18 @@ index 1740164..98a054f 100644 shell: ntpq -c rv | grep 'associd=0' + + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: restore_services_state.yml diff --git a/tests/tests_ntp_provider6.yml b/tests/tests_ntp_provider6.yml index 21a2039..fb41824 100644 --- a/tests/tests_ntp_provider6.yml +++ b/tests/tests_ntp_provider6.yml -@@ -1,11 +1,16 @@ - - - name: Configure NTP with OS release non-default provider and then change it to the default provider -+ tags: tests::tier1 - hosts: all - vars: - is_ntp_default: "ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_version is version('7.0', '<')" +@@ -6,6 +7,10 @@ both_avail: true tasks: + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: get_services_state.yml + - name: Check for availability of both NTP providers @@ -304,26 +242,19 @@ index 21a2039..fb41824 100644 - not is_ntp_default + + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: restore_services_state.yml diff --git a/tests/tests_ntp_ptp.yml b/tests/tests_ntp_ptp.yml index cab706f..7f4cdfc 100644 --- a/tests/tests_ntp_ptp.yml +++ b/tests/tests_ntp_ptp.yml -@@ -1,5 +1,6 @@ - - - name: Configure time synchronization with NTP servers and PTP domains -+ tags: tests::tier1 - hosts: all - vars: - timesync_ntp_servers: @@ -22,6 +23,11 @@ roles: - linux-system-roles.timesync + pre_tasks: + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: get_services_state.yml + tasks: @@ -336,7 +267,7 @@ index cab706f..7f4cdfc 100644 + + post_tasks: + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: restore_services_state.yml diff --git a/tests/tests_ptp_multi.yml b/tests/tests_ptp_multi.yml index d52d439..936e467 100644 @@ -345,7 +276,7 @@ index d52d439..936e467 100644 @@ -1,5 +1,6 @@ - name: Configure time synchronization with multiple PTP domains -+ tags: [ 'tests::tier1', 'tests::expfail' ] ++ tags: tests::expfail hosts: all vars: timesync_ptp_domains: @@ -355,7 +286,7 @@ index d52d439..936e467 100644 + pre_tasks: + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: get_services_state.yml + tasks: @@ -368,7 +299,7 @@ index d52d439..936e467 100644 + + post_tasks: + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: restore_services_state.yml diff --git a/tests/tests_ptp_single.yml b/tests/tests_ptp_single.yml index 74da310..36d141e 100644 @@ -377,7 +308,7 @@ index 74da310..36d141e 100644 @@ -1,5 +1,6 @@ - name: Configure time synchronization with single PTP domain -+ tags: [ 'tests::tier1', 'tests::expfail' ] ++ tags: tests::expfail hosts: all vars: timesync_ptp_domains: @@ -387,7 +318,7 @@ index 74da310..36d141e 100644 + pre_tasks: + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: get_services_state.yml + tasks: @@ -400,5 +331,5 @@ index 74da310..36d141e 100644 + + post_tasks: + - name: Import tasks -+ tags: tests::tier1::cleanup ++# tags: tests::tier1::cleanup + import_tasks: restore_services_state.yml diff --git a/SPECS/rhel-system-roles.spec b/SPECS/rhel-system-roles.spec index 511811b..80269a0 100644 --- a/SPECS/rhel-system-roles.spec +++ b/SPECS/rhel-system-roles.spec @@ -1,92 +1,269 @@ +%if 0%{?rhel} && ! 0%{?epel} +%bcond_with ansible +%else +%bcond_without ansible +%endif + +%bcond_with collection_artifact + +%if 0%{?fedora} || 0%{?rhel} >= 8 +%bcond_without html +%else +# pandoc is not supported in rhel 7 and older, +# which is needed for converting .md to .html. +%bcond_with html +%endif + %if 0%{?rhel} Name: rhel-system-roles %else Name: linux-system-roles %endif +Url: https://github.com/linux-system-roles/ Summary: Set of interfaces for unified system management -Version: 1.0 -Release: 12%{?dist} +Version: 1.0.1 +Release: 4%{?dist} #Group: Development/Libraries License: GPLv3+ and MIT and BSD -%if 0%{?rhel} +%global installbase %{_datadir}/linux-system-roles +%global _pkglicensedir %{_licensedir}/%{name} %global rolealtprefix linux-system-roles. -%endif %global roleprefix %{name}. -%global _python_bytecompile_errors_terminate_build 0 +%global roleinstprefix %{nil} +%global rolealtrelpath ../../linux-system-roles/ +%if 0%{?rhel} +%global roleinstprefix %{roleprefix} +%global installbase %{_datadir}/ansible/roles +%global rolealtrelpath %{nil} +%endif + +%if 0%{?rhel} +%global collection_namespace redhat +%global collection_name rhel_system_roles +%else +%global collection_namespace fedora +%global collection_name linux_system_roles +%endif +%global subrole_prefix "private_${role}_subrole_" + +%global collection_version %{version} + +# Helper macros originally from macros.ansible by Igor Raits +# Not available on RHEL, so we must define those macros locally here without using ansible-galaxy + +# Not used (yet). Could be made to point to AH in RHEL - but what about CentOS Stream? +#%%{!?ansible_collection_url:%%define ansible_collection_url() https://galaxy.ansible.com/%%{collection_namespace}/%%{collection_name}} + +%if 0%{?fedora} || 0%{?rhel} >= 8 +%{!?ansible_collection_files:%define ansible_collection_files %{_datadir}/ansible/collections/ansible_collections/%{collection_namespace}} +%else +# Define undefined macro using "!?ansible_collection_files:..." does not work for rhel-7 +%if %{?ansible_collection_files:0}%{!?ansible_collection_files:1} +%define ansible_collection_files %{_datadir}/ansible/collections/ansible_collections/%{collection_namespace} +%endif +%endif + + +%if %{with ansible} +BuildRequires: ansible >= 2.9.10 +%endif + +%if %{without ansible} +# We don't have ansible-galaxy. +# Simply copy everything instead of galaxy-installing the built artifact. +%define ansible_collection_build_install() tar -cf %{_tmppath}/%{collection_namespace}-%{collection_name}-%{version}.tar.gz .; mkdir -p %{buildroot}%{ansible_collection_files}/%{collection_name}; (cd %{buildroot}%{ansible_collection_files}/%{collection_name}; tar -xf %{_tmppath}/%{collection_namespace}-%{collection_name}-%{version}.tar.gz) +%else +%define ansible_collection_build_install() ansible-galaxy collection build; ansible-galaxy collection install -n -p %{buildroot}%{_datadir}/ansible/collections %{collection_namespace}-%{collection_name}-%{version}.tar.gz +%endif # For each role, call either defcommit() or deftag(). The other macros # (%%id and %%shortid) can be then used in the same way in both cases. # This way the rest of the spec file des not need to know whether we are # dealing with a tag or a commit. -%define defcommit() %{expand:%%global id%{1} %{2} -%%global shortid%{1} %%(c=%%{id%{1}}; echo ${c:0:7}) +%global archiveext tar.gz +# list of role names +%global rolenames %nil +# list of assignments that can be used to populate a bash associative array variable +%global rolestodir %nil +%define getarchivedir() %(p=%{basename:%{S:%{1}}}; echo ${p%%.%{archiveext}}) + +%define defcommit() %{expand:%%global ref%{1} %{2} +%%global shortcommit%{1} %%(c=%%{ref%{1}}; echo ${c:0:7}) +%%global extractdir%{1} %%{expand:%%getarchivedir %{1}} +%%{!?repo%{1}:%%global repo%{1} %%{rolename%{1}}} +%%global archiveurl%{1} %%{?forgeorg%{1}}%%{!?forgeorg%{1}:%%{url}}%%{repo%{1}}/archive/%%{ref%{1}}/%%{repo%{1}}-%%{ref%{1}}.tar.gz +%%global rolenames %%{?rolenames} %%{rolename%{1}} +%%global roletodir%{1} [%{rolename%{1}}]="%{extractdir%{1}}" +%%global rolestodir %%{?rolestodir} %{roletodir%{1}} } -%define deftag() %{expand:%%global id%{1} %{2} -%%global shortid%{1} %{2} +%define deftag() %{expand:%%global ref%{1} %{2} +%%global extractdir%{1} %%{expand:%%getarchivedir %{1}} +%%{!?repo%{1}:%%global repo%{1} %%{rolename%{1}}} +%%global archiveurl%{1} %%{?forgeorg%{1}}%%{!?forgeorg%{1}:%%{url}}%%{repo%{1}}/archive/%%{ref%{1}}/%%{repo%{1}}-%%{ref%{1}}.tar.gz +%%global rolenames %%{?rolenames} %%{rolename%{1}} +%%global roletodir%{1} [%{rolename%{1}}]="%{extractdir%{1}}" +%%global rolestodir %%{?rolestodir} %%{roletodir%{1}} } -%defcommit 0 0c2bb286bbc1b73d728226924e0010c0fa1ce30a -%global rolename0 kdump -#%%deftag 0 1.0.0 - #%%defcommit 1 43eec5668425d295dce3801216c19b1916df1f9b %global rolename1 postfix %deftag 1 0.1 -%defcommit 2 6cd1ec8fdebdb92a789b14e5a44fe77f0a3d8ecd +#%%defcommit 2 6cd1ec8fdebdb92a789b14e5a44fe77f0a3d8ecd %global rolename2 selinux -#%%deftag 2 1.0.0 +%deftag 2 1.1.1 -#%%defcommit 3 924650d0cd4117f73a7f0413ab745a8632bc5cec +%defcommit 3 924650d0cd4117f73a7f0413ab745a8632bc5cec %global rolename3 timesync -%deftag 3 1.0.2 +#%%deftag 3 1.0.0 + +%defcommit 4 77596fdd976c6160d6152c200a5432c609725a14 +%global rolename4 kdump +#%%deftag 4 1.0.0 -%defcommit 5 3fc15de068f0ba3586f899f2592476aec9f5dc18 +%defcommit 5 bda206d45c87ee8c1a5284de84f5acf5e629de97 %global rolename5 network -#%%deftag 5 1.1.0 +#%%deftag 5 1.0.0 +%defcommit 6 485de47b0dc0787aea077ba448ecb954f53e40c4 %global rolename6 storage -%deftag 6 1.1.0 - -Source: https://github.com/linux-system-roles/%{rolename0}/archive/%{id0}.tar.gz#/%{rolename0}-%{shortid0}.tar.gz -Source1: https://github.com/linux-system-roles/%{rolename1}/archive/%{id1}.tar.gz#/%{rolename1}-%{shortid1}.tar.gz -Source2: https://github.com/linux-system-roles/%{rolename2}/archive/%{id2}.tar.gz#/%{rolename2}-%{shortid2}.tar.gz -Source3: https://github.com/linux-system-roles/%{rolename3}/archive/%{id3}.tar.gz#/%{rolename3}-%{shortid3}.tar.gz -Source5: https://github.com/linux-system-roles/%{rolename5}/archive/%{id5}.tar.gz#/%{rolename5}-%{shortid5}.tar.gz -Source6: https://github.com/linux-system-roles/%{rolename6}/archive/%{id6}.tar.gz#/%{rolename6}-%{shortid6}.tar.gz - -%if "%{roleprefix}" != "linux-system-roles." -Patch1: rhel-system-roles-%{rolename1}-prefix.diff -Patch2: rhel-system-roles-%{rolename2}-prefix.diff -Patch3: rhel-system-roles-%{rolename3}-prefix.diff -Patch5: rhel-system-roles-%{rolename5}-prefix.diff -Patch6: rhel-system-roles-%{rolename6}-prefix.diff -%endif +#%%deftag 6 1.2.2 + +%defcommit 7 e81b2650108727f38b1c856699aad26af0f44a46 +%global rolename7 metrics +#%%deftag 7 0.1.0 + +#%%defcommit 8 cfa70b6b5910b3198aba2679f8fc36aad45ca45a +%global rolename8 tlog +%deftag 8 1.1.0 + +%defcommit 9 4c81fd1380712ab0641b6837f092dd9caeeae0a6 +%global rolename9 kernel_settings +#%%deftag 9 1.0.1 + +%defcommit 10 07e08107e7ccba5822f8a7aaec1a2ff0a221bede +%global rolename10 logging +#%%deftag 10 0.2.0 + +%defcommit 11 4dfc5e2aca74cb82f2a50eec7e975a2b78ad9678 +%global rolename11 nbde_server +#%%deftag 11 1.0.1 + +%defcommit 12 19f06159582550c8463f7d8492669e26fbdf760b +%global rolename12 nbde_client +#%%deftag 12 1.0.1 + +%defcommit 13 0376ceece57882ade8ffaf431b7866aae3e7fed1 +%global rolename13 certificate +#%%deftag 13 1.0.1 + +%defcommit 14 2e2941c5545571fc8bc494099bdf970f498b9d38 +%global rolename14 crypto_policies + +%global forgeorg15 https://github.com/willshersystems/ +%global repo15 ansible-sshd +%global rolename15 sshd +%defcommit 15 e1de59b3c54e9d48a010eeca73755df339c7e628 + +%defcommit 16 21adc637511db86b5ba279a70a7301ef3a170669 +%global rolename16 ssh + +%defcommit 17 779bb78559de58bb5a1f25a4b92039c373ef59a4 +%global rolename17 ha_cluster + +%global mainid 4e47b3809a4e6c1dcd9af57fee117d6df0c261ad +Source: %{url}auto-maintenance/archive/%{mainid}/auto-maintenance-%{mainid}.tar.gz +Source1: %{archiveurl1} +Source2: %{archiveurl2} +Source3: %{archiveurl3} +Source4: %{archiveurl4} +Source5: %{archiveurl5} +Source6: %{archiveurl6} +Source7: %{archiveurl7} +Source8: %{archiveurl8} +Source9: %{archiveurl9} +Source10: %{archiveurl10} +Source11: %{archiveurl11} +Source12: %{archiveurl12} +Source13: %{archiveurl13} +Source14: %{archiveurl14} +Source15: %{archiveurl15} +Source16: %{archiveurl16} +Source17: %{archiveurl17} + +# Script to convert the collection README to Automation Hub. +# Not used on Fedora. +Source998: collection_readme.sh Patch11: rhel-system-roles-postfix-pr5.diff -Patch101: rhel-system-roles-kdump-pr22.diff - -Patch102: kdump-tier1-tags.diff +Patch12: postfix-meta-el8.diff Patch21: selinux-tier1-tags.diff +Patch22: selinux-bz-1926947-no-variable-named-present.diff +Patch23: selinux-ansible-test-issues.diff Patch31: timesync-tier1-tags.diff +Patch32: timesync-ansible-test-issues.diff + +Patch41: rhel-system-roles-kdump-pr22.diff +Patch42: kdump-tier1-tags.diff +Patch43: kdump-meta-el8.diff +Patch44: kdump-fix-newline.diff +Patch51: network-epel-minimal.diff +# Not suitable for upstream, since the files need to be executable there +Patch52: network-permissions.diff Patch53: network-tier1-tags.diff -Patch54: network-nm-reload-profile.diff +Patch55: network-disable-bondtests.diff +Patch56: network-pr353.diff +Patch57: network-ansible-test.diff +Patch58: network-epel-enable.diff -# PR#64 -Patch62: storage-common-fixes.diff +Patch62: storage-partition-name.diff +Patch63: storage-no-disks-existing.diff +Patch64: storage-trim-volume-size.diff +Patch65: storage-ansible-test.diff + +Patch71: metrics-mssql-x86.diff + +Patch151: sshd-example.diff +Patch152: sshd-work-on-ansible28-jinja27.diff -Url: https://github.com/linux-system-roles/ BuildArch: noarch -Requires: python2-jmespath +%if %{with html} +# Requirements for md2html.sh to build the documentation +%if 0%{?fedora} || 0%{?rhel} >= 9 +BuildRequires: rubygem-kramdown-parser-gfm +%else +BuildRequires: pandoc +BuildRequires: asciidoc +BuildRequires: highlight +%endif +%endif + +# Requirements for galaxy_transform.py +BuildRequires: python3 +%if 0%{?fedora} || 0%{?rhel} >= 8 +BuildRequires: python3dist(ruamel.yaml) + +Requires: python3-jmespath +%else +BuildRequires: python3-ruamel-yaml + +Requires: python-jmespath +%endif Obsoletes: rhel-system-roles-techpreview < 1.0-3 +%if %{undefined __ansible_provides} +Provides: ansible-collection(%{collection_namespace}.%{collection_name}) = %{collection_version} +%endif +# be compatible with the usual Fedora Provides: +Provides: ansible-collection-%{collection_namespace}-%{collection_name} = %{version}-%{release} + # We need to put %%description within the if block to avoid empty # lines showing up. %if 0%{?rhel} @@ -101,178 +278,410 @@ consistent configuration interface for managing multiple versions of Fedora, Red Hat Enterprise Linux & CentOS. %endif -%prep -%setup -qc -a1 -a2 -a3 -a5 -a6 -cd %{rolename0}-%{id0} -%patch101 -p1 -%patch102 -p1 -cd .. -cd %{rolename1}-%{id1} -%if "%{roleprefix}" != "linux-system-roles." -%patch1 -p1 +%if %{with collection_artifact} +%package collection-artifact +Summary: Collection artifact to import to Automation Hub / Ansible Galaxy + +%description collection-artifact +Collection artifact for %{name}. This package contains %{collection_namespace}-%{collection_name}-%{version}.tar.gz %endif + +%prep +%setup -q -a1 -a2 -a3 -a4 -a5 -a6 -a7 -a8 -a9 -a10 -a11 -a12 -a13 -a14 -a15 -a16 -a17 -n %{getarchivedir 0} + +declare -A ROLESTODIR=(%{rolestodir}) +for rolename in %{rolenames}; do + mv "${ROLESTODIR[${rolename}]}" ${rolename} +done + +cd %{rolename1} %patch11 -p1 +%patch12 -p1 cd .. -cd %{rolename2}-%{id2} -%if "%{roleprefix}" != "linux-system-roles." -%patch2 -p1 -%endif +cd %{rolename2} %patch21 -p1 +%patch22 -p1 +%patch23 -p1 cd .. -cd %{rolename3}-%{id3} -%if "%{roleprefix}" != "linux-system-roles." -%patch3 -p1 -%endif +cd %{rolename3} %patch31 -p1 +%patch32 -p1 cd .. -cd %{rolename5}-%{id5} -%if "%{roleprefix}" != "linux-system-roles." -%patch5 -p1 -%endif +cd %{rolename4} +%patch41 -p1 +%patch42 -p1 +%patch43 -p1 +%patch44 -p1 +cd .. +cd %{rolename5} +%patch51 -p1 +%patch52 -p1 %patch53 -p1 -%patch54 -p1 +%patch55 -p1 +%patch56 -p1 +%patch57 -p1 +%patch58 -p1 cd .. -cd %{rolename6}-%{id6} -%if "%{roleprefix}" != "linux-system-roles." -%patch6 -p1 -%endif +cd %{rolename6} %patch62 -p1 +%patch63 -p1 +%patch64 -p1 +%patch65 -p1 +cd .. +cd %{rolename7} +%patch71 -p1 +cd .. +cd %{rolename15} +%patch151 -p1 +%patch152 -p1 +sed -r -i -e "s/ansible-sshd/linux-system-roles.sshd/" tests/*.yml examples/*.yml +sed -r -i -e "s/ willshersystems.sshd/ linux-system-roles.sshd/" tests/*.yml examples/*.yml README.md cd .. +# Replacing "linux-system-roles.rolename" with "rhel-system-roles.rolename" in each role +%if "%{roleprefix}" != "linux-system-roles." +for rolename in %{rolenames}; do + find $rolename -type f -exec \ + sed "s/linux-system-roles[.]${rolename}\\>/%{roleprefix}${rolename}/g" -i {} \; +done +%endif + +# Removing symlinks in tests/roles +for rolename in %{rolenames}; do + if [ -d ${rolename}/tests/roles ]; then + find ${rolename}/tests/roles -type l -exec rm {} \; + if [ -d ${rolename}/tests/roles/linux-system-roles.${rolename} ]; then + rm -r ${rolename}/tests/roles/linux-system-roles.${rolename} + fi + fi +done +rm %{rolename5}/tests/modules +rm %{rolename5}/tests/module_utils +rm %{rolename5}/tests/playbooks/roles +# Drop network/{scripts/print_all_options.py,tests/ensure_provider_tests.py} +# from rpm. These 2 files fail in brp-python-bytecompile due to f-strings +# when python2 is default python. +rm %{rolename5}/scripts/print_all_options.py +rm %{rolename5}/tests/ensure_provider_tests.py + +# transform ambiguous #!/usr/bin/env python shebangs to python3 to stop brp-mangle-shebangs complaining +find -type f -executable -name '*.py' -exec \ + sed -i -r -e '1s@^(#! */usr/bin/env python)(\s|$)@#\13\2@' '{}' + + %build +%if %{with html} +readmes="" +for role in %{rolenames}; do + readmes="${readmes} $role/README.md" +done +sh md2html.sh $readmes +%endif + +mkdir .collections +%if 0%{?rhel} +# Convert the upstream collection readme to the downstream one +%{SOURCE998} lsr_role2collection/collection_readme.md +%endif +./galaxy_transform.py "%{collection_namespace}" "%{collection_name}" "%{collection_version}" "Red Hat Enterprise Linux System Roles Ansible Collection" > galaxy.yml.tmp +mv galaxy.yml.tmp galaxy.yml + +for role in %{rolenames}; do + python3 lsr_role2collection.py --role "$role" --src-path "$role" \ + --src-owner %{name} --subrole-prefix %{subrole_prefix} --dest-path .collections \ + --readme lsr_role2collection/collection_readme.md \ + --namespace %{collection_namespace} --collection %{collection_name} +done + +# copy requirements.txt and bindep.txt from auto-maintenance/lsr_role2collection +if [ -f lsr_role2collection/collection_requirements.txt ]; then + cp lsr_role2collection/collection_requirements.txt \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/requirements.txt +fi +if [ -f lsr_role2collection/collection_bindep.txt ]; then + cp lsr_role2collection/collection_bindep.txt \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/bindep.txt +fi + +rm -f .collections/ansible_collections/%{collection_namespace}/%{collection_name}/tests/sanity/ignore-2.9.txt +# Merge .sanity-ansible-ignore-2.9-ROLENAME.txt into tests/sanity/ignore-2.9.txt +mkdir -p .collections/ansible_collections/%{collection_namespace}/%{collection_name}/tests/sanity +for role in %{rolenames}; do + if [ -f .collections/ansible_collections/%{collection_namespace}/%{collection_name}/.sanity-ansible-ignore-2.9-"$role".txt ]; + then + cat .collections/ansible_collections/%{collection_namespace}/%{collection_name}/.sanity-ansible-ignore-2.9-"$role".txt \ + >> .collections/ansible_collections/%{collection_namespace}/%{collection_name}/tests/sanity/ignore-2.9.txt + rm -f .collections/ansible_collections/%{collection_namespace}/%{collection_name}/.sanity-ansible-ignore-*-"$role".txt + fi +done + +# removing dot files/dirs +rm -r .collections/ansible_collections/%{collection_namespace}/%{collection_name}/.[A-Za-z]* + +cp -p galaxy.yml lsr_role2collection/.ansible-lint \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name} + +# Remove table of contents from logging README.md +# It is not needed for html and AH/Galaxy +sed -i -e 's/^\(## Table of Contents\)/## Background\n\1/' \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/logging/README.md +sed -i -e '/^## Table of Contents/,/^## Background/d' \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/logging/README.md + +# Remove internal links from readme files +# They are not rendered properly on AH. +for role in %{rolenames}; do + sed -r -i -e 's/\[([^[]+)\]\(#.*\)/\1/g' \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/$role/README.md +done %install +mkdir -p $RPM_BUILD_ROOT%{installbase} mkdir -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles -cp -pR %{rolename0}-%{id0} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename0} -cp -pR %{rolename1}-%{id1} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename1} -cp -pR %{rolename2}-%{id2} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename2} -cp -pR %{rolename3}-%{id3} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename3} -cp -pR %{rolename5}-%{id5} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename5} -cp -pR %{rolename6}-%{id6} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename6} +for role in %{rolenames}; do + cp -pR "$role" "$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}$role" +done %if 0%{?rolealtprefix:1} -ln -s %{roleprefix}%{rolename0} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename0} -ln -s %{roleprefix}%{rolename1} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename1} -ln -s %{roleprefix}%{rolename2} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename2} -ln -s %{roleprefix}%{rolename3} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename3} -ln -s %{roleprefix}%{rolename5} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename5} -ln -s %{roleprefix}%{rolename6} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename6} +for role in %{rolenames}; do + ln -s "%{rolealtrelpath}%{roleinstprefix}$role" "$RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}$role" +done %endif -mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/kdump -mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/postfix -mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/selinux -mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/timesync -mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/network -mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/storage - -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}kdump/README.md \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}kdump/COPYING \ - $RPM_BUILD_ROOT%{_pkgdocdir}/kdump - -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}postfix/README.md \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}postfix/COPYING \ - $RPM_BUILD_ROOT%{_pkgdocdir}/postfix - -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}selinux/README.md \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}selinux/COPYING \ - $RPM_BUILD_ROOT%{_pkgdocdir}/selinux -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}selinux/selinux-playbook.yml \ - $RPM_BUILD_ROOT%{_pkgdocdir}/selinux/example-selinux-playbook.yml - -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}timesync/README.md \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}timesync/COPYING \ - $RPM_BUILD_ROOT%{_pkgdocdir}/timesync -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}timesync/examples/multiple-ntp-servers.yml \ - $RPM_BUILD_ROOT%{_pkgdocdir}/timesync/example-timesync-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}timesync/examples/single-pool.yml \ - $RPM_BUILD_ROOT%{_pkgdocdir}/timesync/example-timesync-pool-playbook.yml - -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/README.md \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/LICENSE \ - $RPM_BUILD_ROOT%{_pkgdocdir}/network -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/bond_with_vlan.yml \ - $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-bond_with_vlan-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/bridge_with_vlan.yml \ - $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-bridge_with_vlan-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/eth_simple_auto.yml \ - $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-eth_simple_auto-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/eth_with_vlan.yml \ - $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-eth_with_vlan-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/infiniband.yml \ - $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-infiniband-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/macvlan.yml \ - $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-macvlan-playbook.yml -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/remove_profile.yml \ - $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-remove_profile-playbook.yml -rm $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/remove_profile.yml -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/down_profile.yml \ - $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-down_profile-playbook.yml -rm $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/down_profile.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/inventory \ - $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-inventory -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/ethtool_features.yml \ - $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-ethtool_features-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/ethtool_features_default.yml \ - $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-ethtool_features_default-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/bond_simple.yml \ - $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-bond_simple-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/eth_with_802_1x.yml \ - $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-eth_with_802_1x-playbook.yml - -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}storage/README.md \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}storage/LICENSE \ - $RPM_BUILD_ROOT%{_pkgdocdir}/storage - - -rm $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}*/semaphore -rm -r $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}*/molecule -rm $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}*/.travis.yml -rm $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}*/.ansible-lint - -rm $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/.gitignore -rm $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/tests/.gitignore -rm $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/roles -rmdir $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples - -%files -%dir %{_datadir}/ansible -%dir %{_datadir}/ansible/roles -%if 0%{?rolealtprefix:1} -%{_datadir}/ansible/roles/%{rolealtprefix}kdump -%{_datadir}/ansible/roles/%{rolealtprefix}postfix -%{_datadir}/ansible/roles/%{rolealtprefix}selinux -%{_datadir}/ansible/roles/%{rolealtprefix}timesync -%{_datadir}/ansible/roles/%{rolealtprefix}network -%{_datadir}/ansible/roles/%{rolealtprefix}storage +mkdir -p $RPM_BUILD_ROOT%{_pkglicensedir} +rm $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/roles +for role in %{rolenames}; do + mkdir -p "$RPM_BUILD_ROOT%{_pkgdocdir}/$role" + cp -p "$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}$role/README.md" \ + "$RPM_BUILD_ROOT%{_pkgdocdir}/$role" +%if %{with html} + cp -p "$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}$role/README.html" \ + "$RPM_BUILD_ROOT%{_pkgdocdir}/$role" +%endif + if [ -f "$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}$role/COPYING" ]; then + cp -p "$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}$role/COPYING" \ + "$RPM_BUILD_ROOT%{_pkglicensedir}/$role.COPYING" + fi + if [ -f "$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}$role/LICENSE" ]; then + cp -p "$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}$role/LICENSE" \ + "$RPM_BUILD_ROOT%{_pkglicensedir}/$role.LICENSE" + fi + if [ -d "$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}$role/examples" ]; then + for file in "$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}$role/examples/"*.yml ; do + basename=$(basename "$file" .yml) + newname="$basename" + if [[ "$newname" != example-* ]]; then + newname="example-$newname" + fi + if [[ "$newname" != *-playbook ]]; then + newname="${newname}-playbook" + fi + cp "$file" "$RPM_BUILD_ROOT%{_pkgdocdir}/$role/${newname}.yml" + rm "$file" + done + if [ -f "$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}$role/examples/inventory" ]; then + cp "$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}$role/examples/inventory" \ + "$RPM_BUILD_ROOT%{_pkgdocdir}/$role/example-inventory" + rm "$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}$role/examples/inventory" + fi + # special case for network + # this will error if the directory is unexpectedly empty + rmdir "$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}$role/examples" + fi +done + +rm $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}*/semaphore +rm -r $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}*/molecule + +rm -r $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}*/.[A-Za-z]* +rm $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}*/tests/.git* + +# NOTE: sshd/examples/example-root-login.yml is +# referenced in the configuring-openssh-servers-using-the-sshd-system-role documentation module +# must be updated if changing the file path + +pushd .collections/ansible_collections/%{collection_namespace}/%{collection_name}/ +%ansible_collection_build_install +popd + +mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/collection +mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/collection/roles + +cp -p %{buildroot}%{ansible_collection_files}/%{collection_name}/README.md \ + $RPM_BUILD_ROOT%{_pkgdocdir}/collection + +for rolename in %{rolenames}; do + if [ -f %{buildroot}%{ansible_collection_files}/%{collection_name}/roles/${rolename}/README.md ]; then + mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/collection/roles/${rolename} + cp -p %{buildroot}%{ansible_collection_files}/%{collection_name}/roles/${rolename}/README.md \ + $RPM_BUILD_ROOT%{_pkgdocdir}/collection/roles/${rolename} + fi +done + +%if %{with html} +# converting README.md to README.html for collection in $RPM_BUILD_ROOT%{_pkgdocdir}/collection +readmes="$RPM_BUILD_ROOT%{_pkgdocdir}/collection/README.md" +for role in %{rolenames}; do + readmes="${readmes} $RPM_BUILD_ROOT%{_pkgdocdir}/collection/roles/${role}/README.md" +done +sh md2html.sh $readmes +%endif + +%if %{with collection_artifact} +# Copy collection artifact to /usr/share/ansible/collections/ for collection-artifact +pushd .collections/ansible_collections/%{collection_namespace}/%{collection_name}/ +if [ -f %{collection_namespace}-%{collection_name}-%{version}.tar.gz ]; then + mv %{collection_namespace}-%{collection_name}-%{version}.tar.gz \ + $RPM_BUILD_ROOT%{_datadir}/ansible/collections/ +fi +popd +%endif + +# generate the %files section in the file files_section.txt +format_item_for_files() { + # $1 is directory or file name in buildroot + # $2 - if true, and item is a directory, use %dir + local item + local files_item + item="$1" + files_item=${item##"%{buildroot}"} + if [ -L "$item" ]; then + echo "$files_item" + elif [ -d "$item" ]; then + if [[ "$item" == */doc* ]]; then + echo "%doc $files_item" + elif [ "${2:-false}" = true ]; then + echo "%dir $files_item" + else + echo "$files_item" + fi + elif [[ "$item" == */README.md ]] || [[ "$item" == */README.html ]]; then + if [[ "$item" == */private_* ]]; then + # mark as regular file, not %doc + echo "$files_item" + else + echo "%doc $files_item" + fi + elif [[ "$item" != */COPYING* ]] && [[ "$item" != */LICENSE* ]]; then + # Avoid dynamically using the license macro since the license macro + # is replaced with the value of License directive in the older rpmbuild. + echo "$files_item" + fi +} + +files_section=files_section.txt +rm -f $files_section +touch $files_section +%if %{without ansible} +echo '%dir %{_datadir}/ansible' >> $files_section +echo '%dir %{_datadir}/ansible/roles' >> $files_section +%endif +%if "%{installbase}" != "%{_datadir}/ansible/roles" +echo '%dir %{installbase}' >> $files_section +%endif +echo '%dir %{ansible_collection_files}' >> $files_section +echo '%dir %{ansible_collection_files}/%{collection_name}' >> $files_section +find %{buildroot}%{ansible_collection_files}/%{collection_name} -mindepth 1 -maxdepth 1 | \ + while read item; do + if [[ "$item" == */roles ]]; then + format_item_for_files "$item" true >> $files_section + find "$item" -mindepth 1 -maxdepth 1 | while read roles_dir; do + format_item_for_files "$roles_dir" true >> $files_section + find "$roles_dir" -mindepth 1 -maxdepth 1 | while read roles_item; do + format_item_for_files "$roles_item" >> $files_section + done + done + else + format_item_for_files "$item" >> $files_section + fi + done + +find %{buildroot}%{installbase} -mindepth 1 -maxdepth 1 | \ + while read item; do + if [ -d "$item" ]; then + format_item_for_files "$item" true >> $files_section + find "$item" -mindepth 1 -maxdepth 1 | while read roles_item; do + format_item_for_files "$roles_item" >> $files_section + done + else + format_item_for_files "$item" >> $files_section + fi + done +if [ "%{installbase}" != "%{_datadir}/ansible/roles" ]; then + find %{buildroot}%{_datadir}/ansible/roles -mindepth 1 -maxdepth 1 | \ + while read item; do + if [ -d "$item" ]; then + format_item_for_files "$item" true >> $files_section + find "$item" -mindepth 1 -maxdepth 1 | while read roles_item; do + format_item_for_files "$roles_item" >> $files_section + done + else + format_item_for_files "$item" >> $files_section + fi + done +fi +# cat files_section.txt +# done with files_section.txt generation + + +%files -f files_section.txt +%{_pkgdocdir}/*/README.md +%if %{with html} +%{_pkgdocdir}/*/README.html +%endif +%{_pkgdocdir}/*/example-* +%{_pkgdocdir}/collection/roles/*/README.md +%if %{with html} +%{_pkgdocdir}/collection/roles/*/README.html +%endif +%license %{_pkglicensedir}/* +%license %{installbase}/*/COPYING* +%license %{installbase}/*/LICENSE* +%license %{ansible_collection_files}/%{collection_name}/COPYING* +%license %{ansible_collection_files}/%{collection_name}/LICENSE* +%if 0%{?rhel} < 8 +# Needs to list excluded files in this hardcoded style since when +# format_item_for_files is executed, brp-python-bytecompile is not +# executed yet. +%exclude %{installbase}/*/*.py? +%exclude %{installbase}/*/*/*.py? +%exclude %{installbase}/*/*/*/*.py? +%exclude %{installbase}/*/*/*/*/*.py? +%exclude %{ansible_collection_files}/%{collection_name}/*/*/*.py? +%exclude %{ansible_collection_files}/%{collection_name}/*/*/*/*.py? +%exclude %{ansible_collection_files}/%{collection_name}/*/*/*/*/*.py? +%endif + +%if %{with collection_artifact} +%files collection-artifact +%{_datadir}/ansible/collections/%{collection_namespace}-%{collection_name}-%{version}.tar.gz %endif -%{_datadir}/ansible/roles/%{roleprefix}kdump -%{_datadir}/ansible/roles/%{roleprefix}postfix -%{_datadir}/ansible/roles/%{roleprefix}selinux -%{_datadir}/ansible/roles/%{roleprefix}timesync -%{_datadir}/ansible/roles/%{roleprefix}network -%{_datadir}/ansible/roles/%{roleprefix}storage -%doc %{_pkgdocdir}/*/example-*-playbook.yml -%doc %{_pkgdocdir}/network/example-inventory -%doc %{_pkgdocdir}/*/README.md -%doc %{_datadir}/ansible/roles/%{roleprefix}kdump/README.md -%doc %{_datadir}/ansible/roles/%{roleprefix}postfix/README.md -%doc %{_datadir}/ansible/roles/%{roleprefix}selinux/README.md -%doc %{_datadir}/ansible/roles/%{roleprefix}timesync/README.md -%doc %{_datadir}/ansible/roles/%{roleprefix}network/README.md - - -%license %{_pkgdocdir}/*/COPYING -%license %{_pkgdocdir}/*/LICENSE -%license %{_datadir}/ansible/roles/%{roleprefix}kdump/COPYING -%license %{_datadir}/ansible/roles/%{roleprefix}postfix/COPYING -%license %{_datadir}/ansible/roles/%{roleprefix}selinux/COPYING -%license %{_datadir}/ansible/roles/%{roleprefix}timesync/COPYING -%license %{_datadir}/ansible/roles/%{roleprefix}network/LICENSE -%license %{_datadir}/ansible/roles/%{roleprefix}storage/LICENSE %changelog +* Fri Jul 23 2021 Noriko Hosoi - 1.0.1-4 +- Remove unnecessary dependency on python3-netaddr + Related: rhbz#1970165 + +* Fri Jul 16 2021 Noriko Hosoi - 1.0.1-3 +- Fix the tier1 test failure in network due to the epel repo not enabled. + Related: rhbz#1970165 + +* Wed Jul 7 2021 Noriko Hosoi - 1.0.1-2 +- Fix an invalid indentation in the spec file. + Related: rhbz#1970165 +- Mention bz1854189 here as it was fixed in backporting rhel-8.4.0. + Related: rhbz#1854189 +- Remove gating.yaml which was introduced in backporting rhel-8.4.0. + Related: rhbz#197016 + +* Tue Jun 29 2021 Noriko Hosoi - 1.0.1-1 +- Synchronize roles with rhel-8.4.0. + Update roles kdump, selinux, timesync, network, and storage. + Add new roles metrics, tlog, kernel_settings, logging, nbde_server, + nbde_client, certificate, crypto_policies, sshd, ssh, and ha_cluster. +- Synchronize spec file with rawhide's up to commit 6218fe9. + Related: rhbz#1970165 + * Tue Jun 9 2020 Pavel Cahyna - 1.0-12 - Rebase the network role Resolves rhbz#1767177, rhbz#1842605, rhbz#1789813, rhbz#1724280