diff --git a/.gitignore b/.gitignore index c60ecba..9fa6d86 100644 --- a/.gitignore +++ b/.gitignore @@ -1,13 +1,18 @@ -SOURCES/certificate-fedef6e.tar.gz -SOURCES/kdump-0c2bb28.tar.gz -SOURCES/kernel_settings-901a73a.tar.gz -SOURCES/logging-fe3f658.tar.gz -SOURCES/metrics-7f94b49.tar.gz -SOURCES/nbde_client-6306def.tar.gz -SOURCES/nbde_server-4b6cfca.tar.gz -SOURCES/network-bf4501b.tar.gz +SOURCES/ansible-sshd-e1de59b3c54e9d48a010eeca73755df339c7e628.tar.gz +SOURCES/auto-maintenance-8f069305caa0a142c2c6ac14bd4d331282a1c079.tar.gz +SOURCES/certificate-0376ceece57882ade8ffaf431b7866aae3e7fed1.tar.gz +SOURCES/crypto_policies-2e2941c5545571fc8bc494099bdf970f498b9d38.tar.gz +SOURCES/ha_cluster-779bb78559de58bb5a1f25a4b92039c373ef59a4.tar.gz +SOURCES/kdump-77596fdd976c6160d6152c200a5432c609725a14.tar.gz +SOURCES/kernel_settings-4c81fd1380712ab0641b6837f092dd9caeeae0a6.tar.gz +SOURCES/logging-07e08107e7ccba5822f8a7aaec1a2ff0a221bede.tar.gz +SOURCES/metrics-e81b2650108727f38b1c856699aad26af0f44a46.tar.gz +SOURCES/nbde_client-19f06159582550c8463f7d8492669e26fbdf760b.tar.gz +SOURCES/nbde_server-4dfc5e2aca74cb82f2a50eec7e975a2b78ad9678.tar.gz +SOURCES/network-bda206d45c87ee8c1a5284de84f5acf5e629de97.tar.gz SOURCES/postfix-0.1.tar.gz -SOURCES/selinux-6cd1ec8.tar.gz -SOURCES/storage-81f30ab.tar.gz -SOURCES/timesync-924650d.tar.gz -SOURCES/tlog-cfa70b6.tar.gz +SOURCES/selinux-1.1.1.tar.gz +SOURCES/ssh-21adc637511db86b5ba279a70a7301ef3a170669.tar.gz +SOURCES/storage-485de47b0dc0787aea077ba448ecb954f53e40c4.tar.gz +SOURCES/timesync-924650d0cd4117f73a7f0413ab745a8632bc5cec.tar.gz +SOURCES/tlog-1.1.0.tar.gz diff --git a/.rhel-system-roles.metadata b/.rhel-system-roles.metadata index a34a76e..e7beb14 100644 --- a/.rhel-system-roles.metadata +++ b/.rhel-system-roles.metadata @@ -1,13 +1,18 @@ -5aa98ec9e109c5ebfae327718e5cad1d3c837e4f SOURCES/certificate-fedef6e.tar.gz -36b200d1c6a8d1cb1ea87e3e9aa8c4f6bbd8155d SOURCES/kdump-0c2bb28.tar.gz -263a6bbe7b25fbbc13c60b6b30861b63ec2648cd SOURCES/kernel_settings-901a73a.tar.gz -9f365ee569d0d6e542983842ffd7c81c82e2c3ca SOURCES/logging-fe3f658.tar.gz -3c25f49356e9325ba694d14ece036c8ea3aa16f6 SOURCES/metrics-7f94b49.tar.gz -435fed277e03b6c409ebbfa421c15f97ba15e8c8 SOURCES/nbde_client-6306def.tar.gz -e936390ddc7440e25190d6ff98cf5e5b3bf1fc3b SOURCES/nbde_server-4b6cfca.tar.gz -d1e3e5cd724e7a61a9b3f4eb2bf669d6ed6f9cde SOURCES/network-bf4501b.tar.gz +77e952b62e634c69e36115845b4f24ee3bfe76b7 SOURCES/ansible-sshd-e1de59b3c54e9d48a010eeca73755df339c7e628.tar.gz +31d33f92384e423baebb073d3a6e3d271cbef5a5 SOURCES/auto-maintenance-8f069305caa0a142c2c6ac14bd4d331282a1c079.tar.gz +7017c00e2ceede1f6019ba17a56e0145e6012013 SOURCES/certificate-0376ceece57882ade8ffaf431b7866aae3e7fed1.tar.gz +469a1a39a19d346c10bf07071a7af52832885047 SOURCES/crypto_policies-2e2941c5545571fc8bc494099bdf970f498b9d38.tar.gz +838ed06d8d092271fff04bd5e7c16db4661e8567 SOURCES/ha_cluster-779bb78559de58bb5a1f25a4b92039c373ef59a4.tar.gz +fa3d5daf6cf1ceeaa87f58c16e11153cf250e2fa SOURCES/kdump-77596fdd976c6160d6152c200a5432c609725a14.tar.gz +471863c062a32a37a18c0ee1b7f0c50387baec99 SOURCES/kernel_settings-4c81fd1380712ab0641b6837f092dd9caeeae0a6.tar.gz +60efc730800600f87e386e16730980ea08417d34 SOURCES/logging-07e08107e7ccba5822f8a7aaec1a2ff0a221bede.tar.gz +821d8ebef2d30a41f0fa65bdc5e550f09b375370 SOURCES/metrics-e81b2650108727f38b1c856699aad26af0f44a46.tar.gz +66b84d088e2c3989f00b3151cc7fdc40f768f9a5 SOURCES/nbde_client-19f06159582550c8463f7d8492669e26fbdf760b.tar.gz +0e4e133b75e245d17c0c5a1097ab95f047ae6f65 SOURCES/nbde_server-4dfc5e2aca74cb82f2a50eec7e975a2b78ad9678.tar.gz +c2d1aaca43cbe787ee7b1e41e875a76b8f95831d SOURCES/network-bda206d45c87ee8c1a5284de84f5acf5e629de97.tar.gz 66c82331f4ac9598c506c3999965b4d07dbfe49d SOURCES/postfix-0.1.tar.gz -246383bd6823533ed3a51a0501b75e38ba852908 SOURCES/selinux-6cd1ec8.tar.gz -d1ba125b693ac5b8705e79d92b13f24c01c51a86 SOURCES/storage-81f30ab.tar.gz -ffd2a706e4e3007684aa9874c8457ad5c8920050 SOURCES/timesync-924650d.tar.gz -66538d3279cb5972f73a70960a4407d2abe56883 SOURCES/tlog-cfa70b6.tar.gz +f2ad38bd93487962de511b1f4bc9dc6607a5ab36 SOURCES/selinux-1.1.1.tar.gz +aef51c665e61166e091440862cfa4e6a8fe3c29d SOURCES/ssh-21adc637511db86b5ba279a70a7301ef3a170669.tar.gz +8b7d7c14e76aa1a872f22d5cd6d3c9a850868ed3 SOURCES/storage-485de47b0dc0787aea077ba448ecb954f53e40c4.tar.gz +ffd2a706e4e3007684aa9874c8457ad5c8920050 SOURCES/timesync-924650d0cd4117f73a7f0413ab745a8632bc5cec.tar.gz +486d7b845348755e7f189afd95f32bbe97c74661 SOURCES/tlog-1.1.0.tar.gz diff --git a/SOURCES/collection_readme.sh b/SOURCES/collection_readme.sh new file mode 100755 index 0000000..94e8cae --- /dev/null +++ b/SOURCES/collection_readme.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +set -euxo pipefail + +readme_md=${1:-"lsr_role2collection/collection_readme.md"} + +sed -i -e '/## Currently supported distributions/{:1;/## Dependencies/!{N;b 1};s|.*|## Dependencies|}' \ + -e 's/Linux/Red Hat Enterprise Linux/g' \ + -e 's/Ansible Galaxy/Automation Hub/g' \ + -e 's/fedora\(.\)linux_system_roles/redhat\1rhel_system_roles/g' \ + -e 's/linux-system-roles/rhel-system-roles/g' \ + -e '/## Documentation/{:a;/## Support/!{N;b a};s|.*|## Documentation\nThe official RHEL System Roles documentation can be found in the [Product Documentation section of the Red Hat Customer Portal](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/administration_and_configuration_tasks_using_system_roles_in_rhel/index).\n\n## Support|}' \ + -e 's/ $//' \ + $readme_md diff --git a/SOURCES/kdump-fix-newline.diff b/SOURCES/kdump-fix-newline.diff new file mode 100644 index 0000000..52a1a7e --- /dev/null +++ b/SOURCES/kdump-fix-newline.diff @@ -0,0 +1,28 @@ +commit cafd95d0b03360d12e86170eb10fc1fc3dcade06 +Author: Pavel Cahyna +Date: Thu Jan 14 11:42:48 2021 +0100 + + Get rid of the extra final newline in string + + Use the `-` chomping indicator to indicate that the trailing newline is + not intended as a part of the string. + https://yaml.org/spec/1.1/#chomping/ + + The trailing newline was causing an actual problem in the test. + + Also use the `>` folded style, which is more appropriate here than the + `|` literal style. + +diff --git a/tests/tests_ssh.yml b/tests/tests_ssh.yml +index 6d3699c..d3503f7 100644 +--- a/tests/tests_ssh.yml ++++ b/tests/tests_ssh.yml +@@ -27,7 +27,7 @@ + - include_role: + name: linux-system-roles.kdump + vars: +- kdump_ssh_user: | ++ kdump_ssh_user: >- + {{ hostvars[kdump_ssh_server_outside]['ansible_user_id'] }} + # This is the outside address. Ansible will connect to it to + # copy the ssh key. diff --git a/SOURCES/kdump-tier1-tags.diff b/SOURCES/kdump-tier1-tags.diff index 22c0684..f80af83 100644 --- a/SOURCES/kdump-tier1-tags.diff +++ b/SOURCES/kdump-tier1-tags.diff @@ -45,13 +45,13 @@ index 0000000..2035dfc + with_items: "{{ restore_services }}" + tags: tests::cleanup diff --git a/tests/tests_default.yml b/tests/tests_default.yml -index 4c93830..9e7743a 100644 +index af0b2a0..6ce5241 100644 --- a/tests/tests_default.yml +++ b/tests/tests_default.yml -@@ -4,3 +4,13 @@ +@@ -3,3 +3,13 @@ roles: - - kdump + - linux-system-roles.kdump + + pre_tasks: + - name: Import tasks @@ -63,7 +63,7 @@ index 4c93830..9e7743a 100644 +# tags: tests::tier1::cleanup + import_tasks: restore_services_state.yml diff --git a/tests/tests_default_wrapper.yml b/tests/tests_default_wrapper.yml -index 2763fbd..95b3886 100644 +index eba31a0..857aab8 100644 --- a/tests/tests_default_wrapper.yml +++ b/tests/tests_default_wrapper.yml @@ -1,6 +1,9 @@ @@ -92,12 +92,12 @@ index 2763fbd..95b3886 100644 + - 'tests::slow' tasks: - name: Run ansible-playbook with tests_default.yml in check mode - command: ansible-playbook -vvv -i {{ tempinventory.path }} --check tests_default.yml + command: > diff --git a/tests/tests_ssh.yml b/tests/tests_ssh.yml -index 14a59d9..23bc7eb 100644 +index d12e884..6d3699c 100644 --- a/tests/tests_ssh.yml +++ b/tests/tests_ssh.yml -@@ -11,6 +11,13 @@ +@@ -10,6 +10,13 @@ # this is the address at which the ssh dump server can be reached # from the managed host. Dumps will be uploaded there. kdump_ssh_server_inside: "{{ kdump_ssh_source if kdump_ssh_source in hostvars[kdump_ssh_server_outside]['ansible_all_ipv4_addresses'] + hostvars[kdump_ssh_server_outside]['ansible_all_ipv6_addresses'] else hostvars[kdump_ssh_server_outside]['ansible_default_ipv4']['address'] }}" @@ -112,7 +112,7 @@ index 14a59d9..23bc7eb 100644 tasks: - name: gather facts from {{ kdump_ssh_server_outside }} diff --git a/tests/tests_ssh_wrapper.yml b/tests/tests_ssh_wrapper.yml -index 9a8ecfd..1a6db73 100644 +index 2203f3f..96a764e 100644 --- a/tests/tests_ssh_wrapper.yml +++ b/tests/tests_ssh_wrapper.yml @@ -1,6 +1,8 @@ @@ -139,4 +139,4 @@ index 9a8ecfd..1a6db73 100644 + - 'tests::multihost_localhost' tasks: - name: Run ansible-playbook with tests_ssh.yml in check mode - command: ansible-playbook -vvv -i {{ tempinventory.path }} --check tests_ssh.yml + command: | diff --git a/SOURCES/logging-0001-test-playbooks-enhancement.diff b/SOURCES/logging-0001-test-playbooks-enhancement.diff deleted file mode 100644 index 69bf819..0000000 --- a/SOURCES/logging-0001-test-playbooks-enhancement.diff +++ /dev/null @@ -1,136 +0,0 @@ -From 90952a1bb7ddbba45ed8cbd62e6a8e0edb6f6148 Mon Sep 17 00:00:00 2001 -From: Noriko Hosoi -Date: Tue, 25 Aug 2020 09:05:03 -0700 -Subject: [PATCH 1/7] Test playbooks enhancement - -In the code to check the log message is successfully logged or not -in the /var/log/messages file, adding "until: __result is success" -and waiting up to 5 seconds. ---- - tests/tests_basics_files.yml | 4 ++++ - tests/tests_basics_files2.yml | 4 ++++ - tests/tests_basics_files_forwards.yml | 4 ++++ - tests/tests_basics_files_log_dir.yml | 4 ++++ - tests/tests_basics_forwards_implicit_files.yml | 4 ++++ - tests/tests_combination.yml | 4 ++++ - tests/tests_combination2.yml | 4 ++++ - tests/tests_imuxsock_files.yml | 4 ++++ - 8 files changed, 32 insertions(+) - -diff --git a/tests/tests_basics_files.yml b/tests/tests_basics_files.yml -index 080890f..87950d8 100644 ---- a/tests/tests_basics_files.yml -+++ b/tests/tests_basics_files.yml -@@ -74,4 +74,8 @@ - - - name: Check the test log message in {{ __default_system_log }} - command: /bin/grep testMessage0000 {{ __default_system_log }} -+ register: __result -+ until: __result is success -+ retries: 5 -+ delay: 1 - changed_when: false -diff --git a/tests/tests_basics_files2.yml b/tests/tests_basics_files2.yml -index ae61be2..094b125 100644 ---- a/tests/tests_basics_files2.yml -+++ b/tests/tests_basics_files2.yml -@@ -99,4 +99,8 @@ - - - name: Check the test log message in {{ __default_system_log }} - command: /bin/grep testMessage0000 "{{ __default_system_log }}" -+ register: __result -+ until: __result is success -+ retries: 5 -+ delay: 1 - changed_when: false -diff --git a/tests/tests_basics_files_forwards.yml b/tests/tests_basics_files_forwards.yml -index f43b8eb..d08a207 100644 ---- a/tests/tests_basics_files_forwards.yml -+++ b/tests/tests_basics_files_forwards.yml -@@ -105,6 +105,10 @@ - - - name: Check the test log message in {{ __default_system_log }} - command: /bin/grep testMessage0000 '{{ __default_system_log }}' -+ register: __result -+ until: __result is success -+ retries: 5 -+ delay: 1 - changed_when: false - - - name: Check if the forwarding config exists -diff --git a/tests/tests_basics_files_log_dir.yml b/tests/tests_basics_files_log_dir.yml -index ca900b8..f5ca266 100644 ---- a/tests/tests_basics_files_log_dir.yml -+++ b/tests/tests_basics_files_log_dir.yml -@@ -78,6 +78,10 @@ - - - name: Check the files output config that the path is {{ logging_system_log_dir }}/messages - command: /bin/grep '\*.info;mail.none;authpriv.none;cron.none.*{{ logging_system_log_dir }}/messages' {{ __test_files_conf }} -+ register: __result -+ until: __result is success -+ retries: 5 -+ delay: 1 - changed_when: false - - - name: Check the test log message in {{ logging_system_log_dir }}/messages -diff --git a/tests/tests_basics_forwards_implicit_files.yml b/tests/tests_basics_forwards_implicit_files.yml -index 6744d53..1d23911 100644 ---- a/tests/tests_basics_forwards_implicit_files.yml -+++ b/tests/tests_basics_forwards_implicit_files.yml -@@ -92,6 +92,10 @@ - - - name: Check if the test message is in {{ __default_system_log }} - command: /bin/grep testMessage0000 '{{ __default_system_log }}' -+ register: __result -+ until: __result is success -+ retries: 5 -+ delay: 1 - changed_when: false - - - name: Get the forwarding config stat -diff --git a/tests/tests_combination.yml b/tests/tests_combination.yml -index 99d57dc..8aae855 100644 ---- a/tests/tests_combination.yml -+++ b/tests/tests_combination.yml -@@ -129,6 +129,10 @@ - - - name: Check the test log message in {{ __default_system_log }} - command: /bin/grep testMessage0000 '{{ __default_system_log }}' -+ register: __result -+ until: __result is success -+ retries: 5 -+ delay: 1 - changed_when: false - - - name: Generated a file to check severity_and_facility -diff --git a/tests/tests_combination2.yml b/tests/tests_combination2.yml -index 5d49a57..5fe43cb 100644 ---- a/tests/tests_combination2.yml -+++ b/tests/tests_combination2.yml -@@ -138,6 +138,10 @@ - - - name: Check the test log message in {{ __default_system_log }} - command: /bin/grep testMessage0000 '{{ __default_system_log }}' -+ register: __result -+ until: __result is success -+ retries: 5 -+ delay: 1 - changed_when: false - - - name: Check the forwarding config stat -diff --git a/tests/tests_imuxsock_files.yml b/tests/tests_imuxsock_files.yml -index 2d6840d..35db253 100644 ---- a/tests/tests_imuxsock_files.yml -+++ b/tests/tests_imuxsock_files.yml -@@ -76,4 +76,8 @@ - - - name: Check the test log message in {{ __default_system_log }} - command: /bin/grep testMessage0000 "{{ __default_system_log }}" -+ register: __result -+ until: __result is success -+ retries: 5 -+ delay: 1 - changed_when: false --- -2.26.2 - diff --git a/SOURCES/logging-0002-elasticsearch-output-template.diff b/SOURCES/logging-0002-elasticsearch-output-template.diff deleted file mode 100644 index 6bb8a3a..0000000 --- a/SOURCES/logging-0002-elasticsearch-output-template.diff +++ /dev/null @@ -1,81 +0,0 @@ -From e7f255a64a1ffe83b06e93c944c73b8079f1db3a Mon Sep 17 00:00:00 2001 -From: Noriko Hosoi -Date: Thu, 10 Sep 2020 17:15:32 -0700 -Subject: [PATCH 2/7] Fixing a logic bug in elasticsearch output template. - -When evaluated, the retryfailures value was denied by "not", which -should not have been. Removing the "not" and adding a test case to -tests_files_elasticsearch_use_local_cert.yml. - -(cherry picked from commit 108f06926f7bec929fdfc24ce2fbcfe195078ae2) ---- - roles/rsyslog/templates/output_elasticsearch.j2 | 2 +- - .../tests_files_elasticsearch_use_local_cert.yml | 16 +++++++++++++--- - 2 files changed, 14 insertions(+), 4 deletions(-) - -diff --git a/roles/rsyslog/templates/output_elasticsearch.j2 b/roles/rsyslog/templates/output_elasticsearch.j2 -index c3cd1df..c4db10f 100644 ---- a/roles/rsyslog/templates/output_elasticsearch.j2 -+++ b/roles/rsyslog/templates/output_elasticsearch.j2 -@@ -44,7 +44,7 @@ ruleset(name="{{ item.name }}") { - bulkid="{{ item.bulkid | d("id_template") }}" - dynbulkid="{{ item.dynbulkid | d('on') }}" - allowUnsignedCerts="{{ item.allowUnsignedCerts | d("off") }}" --{% if not item.retryfailures | d(true) %} -+{% if item.retryfailures | d(true) %} - {% if item.retryruleset | d() | length > 0 %} - retryfailures="on" - retryruleset="{{ item.retryruleset }}" -diff --git a/tests/tests_files_elasticsearch_use_local_cert.yml b/tests/tests_files_elasticsearch_use_local_cert.yml -index 2559ce7..8b1eaa4 100644 ---- a/tests/tests_files_elasticsearch_use_local_cert.yml -+++ b/tests/tests_files_elasticsearch_use_local_cert.yml -@@ -44,6 +44,7 @@ - __test_ca_cert: /tmp/es-ca.crt - __test_cert: /tmp/es-cert.pem - __test_key: /tmp/es-key.pem -+ __test_el: elasticsearch_output - - tasks: - - name: Generate fake key/certs files. -@@ -60,13 +61,13 @@ - - name: deploy config to send to elasticsearch - vars: - logging_outputs: -- - name: elasticsearch_output -+ - name: "{{ __test_el }}" - type: elasticsearch - server_host: logging-es - server_port: 9200 - index_prefix: project. - input_type: ovirt -- retryfailures: false -+ retryfailures: on - ca_cert_src: "{{ __test_ca_cert }}" - cert_src: "{{ __test_cert }}" - private_key_src: "{{ __test_key }}" -@@ -77,7 +78,7 @@ - logging_flows: - - name: flow_0 - inputs: [files_input] -- outputs: [elasticsearch_output, elasticsearch_output_ops] -+ outputs: "[{{ __test_el }}]" - include_role: - name: linux-system-roles.logging - -@@ -119,3 +120,12 @@ - - mycert: "{{ __test_cert }}" - - myprivkey: "{{ __test_key }}" - changed_when: false -+ -+ - name: Check retryfailures in {{ __test_outputfiles_conf }} -+ command: /bin/grep 'retryfailures="on"' {{ __test_outputfiles_conf }} -+ changed_when: false -+ -+ - name: Check retryruleset in {{ __test_outputfiles_conf }} -+ command: /bin/grep 'retryruleset="{{ __test_el }}"' {{ __test_outputfiles_conf }} -+ changed_when: false -+ --- -2.26.2 - diff --git a/SOURCES/logging-0003-README.diff b/SOURCES/logging-0003-README.diff deleted file mode 100644 index 8f7fcdd..0000000 --- a/SOURCES/logging-0003-README.diff +++ /dev/null @@ -1,55 +0,0 @@ -From 76b4418f937fd1dbaa1061fa5f83f11ea046dc40 Mon Sep 17 00:00:00 2001 -From: Noriko Hosoi -Date: Thu, 10 Sep 2020 16:35:43 -0700 -Subject: [PATCH 3/7] Adding "Port and SELinux" section to README. - -(cherry picked from commit 5f144bc74edbcd80a53a2fe84aa464f7ea9f44ef) ---- - README.md | 16 +++++++++++++--- - 1 file changed, 13 insertions(+), 3 deletions(-) - -diff --git a/README.md b/README.md -index 0eafde8..db29dc5 100644 ---- a/README.md -+++ b/README.md -@@ -19,6 +19,7 @@ - * [Standalone configuration](#standalone-configuration) - * [Client configuration](#client-configuration) - * [Server configuration](#server-configuration) -+ * [Port and SELinux](#port-and-selinux) - * [Providers](#providers) - * [Tests](#tests) - * [Implementation Details](#implementation-details) -@@ -111,10 +112,10 @@ This is a schematic logging configuration to show log messages from input_nameA - - `ovirt` type - `ovirt` input supports oVirt specific inputs.
- For the details, visit [oVirt Support](../../design_docs/rsyslog_ovirt_support.md). - --- `remote` type - `remote` input supports receiving logs from the remote logging system over the network. This input type makes rsyslog a server.
-+- `remote` type - `remote` input supports receiving logs from the remote logging system over the network.
- **available options** -- - `udp_ports`: List of UDP port numbers to listen. If set, the `remote` input listens on the UDP ports. No defaults. If both `udp_ports` and `tcp_ports` are set in a `remote` input item, `udp_ports` is used and `tcp_ports` is dropped. -- - `tcp_ports`: List of TCP port numbers to listen. If set, the `remote` input listens on the TCP ports. Default to `[514]`. If both `udp_ports` and `tcp_ports` are set in a `remote` input item, `udp_ports` is used and `tcp_ports` is dropped. If both `udp_ports` and `tcp_ports` are not set in a `remote` input item, `tcp_ports: [514]` is added to the item. -+ - `udp_ports`: List of UDP port numbers to listen. If set, the `remote` input listens on the UDP ports. No defaults. If both `udp_ports` and `tcp_ports` are set in a `remote` input item, `udp_ports` is used and `tcp_ports` is dropped. See also [Port and SELinux](#port-and-selinux). -+ - `tcp_ports`: List of TCP port numbers to listen. If set, the `remote` input listens on the TCP ports. Default to `[514]`. If both `udp_ports` and `tcp_ports` are set in a `remote` input item, `udp_ports` is used and `tcp_ports` is dropped. If both `udp_ports` and `tcp_ports` are not set in a `remote` input item, `tcp_ports: [514]` is added to the item. See also [Port and SELinux](#port-and-selinux). - - `tls`: Set to `true` to encrypt the connection using the default TLS implementation used by the provider. Default to `false`. - - `pki_authmode`: Specifying the default network driver authentication mode. `x509/name`, `x509/fingerprint`, `anon` is accepted. Default to `x509/name`. - - `permitted_clients`: List of hostnames, IP addresses, fingerprints(sha1), and wildcard DNS domains which will be allowed by the `logging` server to connect and send logs over TLS. Default to `['*.{{ logging_domain }}']` -@@ -591,6 +592,15 @@ The following playbook generates the same logging configuration files. - outputs: [remote_files_output0, remote_files_output1] - ``` - -+### Port and SELinux -+ -+SELinux is only configured to allow sending and receiving on the following ports by default: -+``` -+syslogd_port_t tcp 514, 20514 -+syslogd_port_t udp 514, 20514 -+``` -+If other ports need to be configured, you can use [linux-system-roles/selinux](https://github.com/linux-system-roles/selinux) to manage SELinux contexts. -+ - ## Providers - - [Rsyslog](roles/rsyslog) - This documentation contains rsyslog specific information. --- -2.26.2 - diff --git a/SOURCES/logging-0004-yamllint-errors.diff b/SOURCES/logging-0004-yamllint-errors.diff deleted file mode 100644 index 8adf0e1..0000000 --- a/SOURCES/logging-0004-yamllint-errors.diff +++ /dev/null @@ -1,31 +0,0 @@ -From 6ef1f1020abb074525724e9060ddada526ad0102 Mon Sep 17 00:00:00 2001 -From: Noriko Hosoi -Date: Tue, 29 Sep 2020 15:50:03 -0700 -Subject: [PATCH 4/7] Fixing yamllint errors. - -(cherry picked from commit b131f9e26b3fd74d759b237d7b3b26b6732371d2) ---- - tests/tests_files_elasticsearch_use_local_cert.yml | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - -diff --git a/tests/tests_files_elasticsearch_use_local_cert.yml b/tests/tests_files_elasticsearch_use_local_cert.yml -index 8b1eaa4..90b12af 100644 ---- a/tests/tests_files_elasticsearch_use_local_cert.yml -+++ b/tests/tests_files_elasticsearch_use_local_cert.yml -@@ -67,7 +67,7 @@ - server_port: 9200 - index_prefix: project. - input_type: ovirt -- retryfailures: on -+ retryfailures: true - ca_cert_src: "{{ __test_ca_cert }}" - cert_src: "{{ __test_cert }}" - private_key_src: "{{ __test_key }}" -@@ -128,4 +128,3 @@ - - name: Check retryruleset in {{ __test_outputfiles_conf }} - command: /bin/grep 'retryruleset="{{ __test_el }}"' {{ __test_outputfiles_conf }} - changed_when: false -- --- -2.26.2 - diff --git a/SOURCES/logging-0005-property-based-filters.diff b/SOURCES/logging-0005-property-based-filters.diff deleted file mode 100644 index 1158774..0000000 --- a/SOURCES/logging-0005-property-based-filters.diff +++ /dev/null @@ -1,324 +0,0 @@ -From b72e8a48be07a1cebce8b2237d7344220678c2ec Mon Sep 17 00:00:00 2001 -From: Noriko Hosoi -Date: Fri, 16 Oct 2020 08:15:11 -0700 -Subject: [PATCH 5/7] Logging - support property-based filters in the files and - forwards outputs - -Adding property-based filter options to files, forwards and remote_files output. -A test case is added to tests_basics_files2.yml. - -In addition, fixing a bug caused by a left over file from the previous tests. - -Issue - https://github.com/linux-system-roles/logging/issues/179 - -(cherry picked from commit 6ac8f9ff680a4b0230446062f5927f5921829f80) ---- - README.md | 68 ++++++++++++------- - roles/rsyslog/templates/output_files.j2 | 4 +- - roles/rsyslog/templates/output_forwards.j2 | 4 +- - .../rsyslog/templates/output_remote_files.j2 | 4 +- - tests/tests_basics_files2.yml | 40 +++++++++-- - tests/tests_basics_forwards_cert.yml | 8 +++ - tests/tests_basics_forwards_cert_missing.yml | 4 ++ - tests/tests_server_conflict.yml | 8 +++ - 8 files changed, 108 insertions(+), 32 deletions(-) - -diff --git a/README.md b/README.md -index db29dc5..4352ee7 100644 ---- a/README.md -+++ b/README.md -@@ -180,11 +180,16 @@ This is a schematic logging configuration to show log messages from input_nameA - - - `files` type - `files` output supports storing logs in the local files usually in /var/log.
- **available options** -- - `facility`: Facility; default to `*`. -- - `severity`: Severity; default to `*`. -- - `exclude`: Exclude list; default to none. -+ - `facility`: Facility in selector; default to `*`. -+ - `severity`: Severity in selector; default to `*`. -+ - `exclude`: Exclude list used in selector; default to none. -+ - `property`: Property in property-based filter; no default -+ - `prop_op`: Operation in property-based filter; In case of not `!`, put the `prop_op` value in quotes; default to `contains` -+ - `prop_value`: Value in property-based filter; default to `error` - - `path`: Path to the output file. - -+ Selector options and property-based filter options are exclusive. If Property-based filter options are defined, selector options will be ignored. -+ - Unless the above options are given, these local file outputs are configured. - ``` - kern.* /dev/console -@@ -199,8 +204,12 @@ This is a schematic logging configuration to show log messages from input_nameA - - - `forwards` type - `forwards` output sends logs to the remote logging system over the network. This is for the client rsyslog.
- **available options** -- - `facility`: Facility; default to `*`. -- - `severity`: Severity; default to `*`. -+ - `facility`: Facility in selector; default to `*`. -+ - `severity`: Severity in selector; default to `*`. -+ - `exclude`: Exclude list used in selector; default to none. -+ - `property`: Property in property-based filter; no default -+ - `prop_op`: Operation in property-based filter; In case of not `!`, put the `prop_op` value in quotes; default to `contains` -+ - `prop_value`: Value in property-based filter; default to `error` - - `target`: Target host (fqdn). **Required**. - - `udp_port`: UDP port number. Default to `514`. - - `tcp_port`: TCP port number. Default to `514`. -@@ -208,11 +217,16 @@ This is a schematic logging configuration to show log messages from input_nameA - - `pki_authmode`: Specifying the default network driver authentication mode. `x509/name`, `x509/fingerprint`, `anon` is accepted. Default to `x509/name`. - - `permitted_server`: Hostname, IP address, fingerprint(sha1) or wildcard DNS domain of the server which this client will be allowed to connect and send logs over TLS. Default to `*.{{ logging_domain }}` - -+ Selector options and property-based filter options are exclusive. If Property-based filter options are defined, selector options will be ignored. -+ - - `remote_files` type - `remote_files` output stores logs to the local files per remote host and program name originated the logs.
- **available options** -- - `facility`: Facility; default to `*`. -- - `severity`: Severity; default to `*`. -- - `exclude`: Exclude list; default to none. -+ - `facility`: Facility in selector; default to `*`. -+ - `severity`: Severity in selector; default to `*`. -+ - `exclude`: Exclude list used in selector; default to none. -+ - `property`: Property in property-based filter; no default -+ - `prop_op`: Operation in property-based filter; In case of not `!`, put the `prop_op` value in quotes; default to `contains` -+ - `prop_value`: Value in property-based filter; default to `error` - - `async_writing`: If set to `true`, the files are written asynchronously. Allowed value is `true` or `false`. Default to `false`. - - `client_count`: Count of client logging system supported this rsyslog server. Default to `10`. - - `io_buffer_size`: Buffer size used to write output data. Default to `65536` bytes. -@@ -221,6 +235,8 @@ This is a schematic logging configuration to show log messages from input_nameA - `/path/to/output/dir/%HOSTNAME%/%PROGRAMNAME:::secpath-replace%.log` - - `remote_sub_path`: Relative path to logging_system_log_dir to store the filtered logs. - -+ Selector options and property-based filter options are exclusive. If Property-based filter options are defined, selector options will be ignored. -+ - if both `remote_log_path` and `remote_sub_path` are _not_ specified, the remote_file output configured with the following settings. - ``` - template( -@@ -446,32 +462,38 @@ The following playbook generates the same logging configuration files. - outputs: [files_output0, files_output1] - ``` - --5. Deploying `files input` reading logs from a local file and `elasticsearch output` to store the logs. Assuming the ca_cert, cert and key to connect to Elasticsearch are prepared. -+5. Deploying `files input` reading logs from local files and `files output` to write to the local files based on the property-based filters. - ```yaml - --- --- name: Deploying basic input and elasticsearch output -+- name: Deploying files input and configured files output - hosts: all - roles: - - linux-system-roles.logging - vars: - logging_inputs: -- - name: files_input -+ - name: files_input0 - type: files -- input_log_path: /var/log/containers/*.log -+ input_log_path: /var/log/containerA/*.log -+ - name: files_input1 -+ type: files -+ input_log_path: /var/log/containerB/*.log - logging_outputs: -- - name: elasticsearch_output -- type: elasticsearch -- server_host: your_target_host -- server_port: 9200 -- index_prefix: project. -- input_type: ovirt -- ca_cert_src: /local/path/to/ca_cert -- cert_src: /local/path/to/cert -- private_key_src: /local/path/to/key -+ - name: files_output0 -+ type: files -+ property: msg -+ prop_op: contains -+ prop_value: error -+ path: /var/log/errors.log -+ - name: files_output1 -+ type: files -+ property: msg -+ prop_op: "!contains" -+ prop_value: error -+ path: /var/log/others.log - logging_flows: - - name: flow0 -- inputs: [files_input] -- outputs: [elasticsearch_output] -+ inputs: [files_input0, files_input1] -+ outputs: [files_output0, files_output1] - ``` - - ### Client configuration -diff --git a/roles/rsyslog/templates/output_files.j2 b/roles/rsyslog/templates/output_files.j2 -index d994414..e15e4cd 100644 ---- a/roles/rsyslog/templates/output_files.j2 -+++ b/roles/rsyslog/templates/output_files.j2 -@@ -1,6 +1,8 @@ - {% if item.path is defined %} - ruleset(name="{{ item.name }}") { --{% if item.exclude | d([]) %} -+{% if item.property | d() %} -+ :{{ item.property }}, {{ item.prop_op | d('contains') }}, "{{ item.prop_value | d('error') }}" {{ item.path }} -+{% elif item.exclude | d([]) %} - {{ item.facility | d('*') }}.{{ item.severity | d('*') }};{{ item.exclude | join(';') }} {{ item.path }} - {% else %} - {{ item.facility | d('*') }}.{{ item.severity | d('*') }} {{ item.path }} -diff --git a/roles/rsyslog/templates/output_forwards.j2 b/roles/rsyslog/templates/output_forwards.j2 -index 61254ee..35030b4 100644 ---- a/roles/rsyslog/templates/output_forwards.j2 -+++ b/roles/rsyslog/templates/output_forwards.j2 -@@ -9,7 +9,9 @@ - {% set __forwards_protocol = '' %} - {% endif %} - ruleset(name="{{ item.name }}") { --{% if item.exclude | d([]) %} -+{% if item.property | d() %} -+ :{{ item.property }}, {{ item.prop_op | d('contains') }}, "{{ item.prop_value | d('error') }}" action(name="{{ item.name }}" -+{% elif item.exclude | d([]) %} - {{ item.facility | d('*') }}.{{ item.severity | d('*') }};{{ item.exclude | join(';') }} action(name="{{ item.name }}" - {% else %} - {{ item.facility | d('*') }}.{{ item.severity | d('*') }} action(name="{{ item.name }}" -diff --git a/roles/rsyslog/templates/output_remote_files.j2 b/roles/rsyslog/templates/output_remote_files.j2 -index 3c9339f..aaf547e 100644 ---- a/roles/rsyslog/templates/output_remote_files.j2 -+++ b/roles/rsyslog/templates/output_remote_files.j2 -@@ -17,7 +17,9 @@ ruleset(name="{{ item.name }}" - queue.size="{{ logging_server_queue_size }}" - queue.workerThreads="{{ logging_server_threads }}") { - # Store remote logs in separate logfiles --{% if item.exclude | d([]) %} -+{% if item.property | d() %} -+ :{{ item.property }}, {{ item.prop_op | d('contains') }}, "{{ item.prop_value | d('error') }}" action(name="{{ item.name }}" type="omfile" DynaFile="{{ item.name }}_template" DynaFileCacheSize="{{ item.client_count | d(10) }}" ioBufferSize="{{ item.io_buffer_size | d('65536') }}" asyncWriting="{{ 'on' if item.async_writing | d(false) | bool else 'off' }}") -+{% elif item.exclude | d([]) %} - {{ item.facility | d('*') }}.{{ item.severity | d('*') }};{{ item.exclude | join(';') }} action(name="{{ item.name }}" type="omfile" DynaFile="{{ item.name }}_template" DynaFileCacheSize="{{ item.client_count | d(10) }}" ioBufferSize="{{ item.io_buffer_size | d('65536') }}" asyncWriting="{{ 'on' if item.async_writing | d(false) | bool else 'off' }}") - {% else %} - {{ item.facility | d('*') }}.{{ item.severity | d('*') }} action(name="{{ item.name }}" type="omfile" DynaFile="{{ item.name }}_template" DynaFileCacheSize="{{ item.client_count | d(10) }}" ioBufferSize="{{ item.io_buffer_size | d('65536') }}" asyncWriting="{{ 'on' if item.async_writing | d(false) | bool else 'off' }}") -diff --git a/tests/tests_basics_files2.yml b/tests/tests_basics_files2.yml -index 094b125..b1a0f62 100644 ---- a/tests/tests_basics_files2.yml -+++ b/tests/tests_basics_files2.yml -@@ -10,9 +10,9 @@ - # If logging role is executed, the file size is about 100 bytes. - # Thus, assert the size is less than 1000. - # 2. Check file count in /etc/rsyslog.d. --# If logging role is executed, 8 config files are generated. -+# If logging role is executed, 9 config files are generated. - # By setting logging_purge_confs, pre-existing config files are deleted. --# Thus, assert the the count is equal to 8. -+# Thus, assert the the count is equal to 9. - # 3. Check systemctl status of rsyslog as well as error or specific message in the output. - # 4. To verify the generated filename is correct, check the config file of files output exists. - # 4.1 Check the config file contains the expected filter and the output file as configured. -@@ -24,6 +24,8 @@ - vars: - __test_files_conf: /etc/rsyslog.d/30-output-files-files_output1.conf - __default_system_log: /var/log/messages -+ __prop_based_log0: /var/log/property_based_filter_in.log -+ __prop_based_log1: /var/log/property_based_filter_out.log - - tasks: - - name: deploy config to output into local files -@@ -49,15 +51,23 @@ - path: :omusrmsg:* - - name: files_output3 - type: files -- facility: local7 -- path: /var/log/boot.log -+ property: msg -+ prop_op: contains -+ prop_value: property_based_filter_test -+ path: "{{ __prop_based_log0 }}" -+ - name: files_output4 -+ type: files -+ property: msg -+ prop_op: "!contains" -+ prop_value: property_based_filter_test -+ path: "{{ __prop_based_log1 }}" - logging_inputs: - - name: basic_input - type: basics - logging_flows: - - name: flow_0 - inputs: [basic_input] -- outputs: [files_output0, files_output1, files_output2, files_output3] -+ outputs: [files_output0, files_output1, files_output2, files_output3, files_output4] - include_role: - name: linux-system-roles.logging - -@@ -74,7 +84,7 @@ - - - name: Check file counts in rsyslog.d - assert: -- that: rsyslog_d_file_count.matched == 8 -+ that: rsyslog_d_file_count.matched == 9 - - # Checking 'error' in stdout from systemctl status is for detecting the case in which rsyslog is running, - # but some functionality is disabled due to some error, e.g., error: 'tls.cacert' file couldn't be accessed. -@@ -104,3 +114,21 @@ - retries: 5 - delay: 1 - changed_when: false -+ -+ - name: Run logger to generate a test log message containing property_based_filter_test -+ command: /bin/logger -i -p local6.info -t testTag1 property_based_filter_test -+ changed_when: false -+ -+ - name: Check the test log message in {{ __prop_based_log0 }} -+ command: /bin/grep property_based_filter_test "{{ __prop_based_log0 }}" -+ register: __result -+ until: __result is success -+ retries: 5 -+ delay: 1 -+ changed_when: false -+ -+ - name: Check the test log message not in {{ __prop_based_log1 }} -+ command: /bin/grep property_based_filter_test "{{ __prop_based_log1 }}" -+ register: __result -+ changed_when: false -+ failed_when: "__result is not failed" -diff --git a/tests/tests_basics_forwards_cert.yml b/tests/tests_basics_forwards_cert.yml -index e27e016..48263ae 100644 ---- a/tests/tests_basics_forwards_cert.yml -+++ b/tests/tests_basics_forwards_cert.yml -@@ -139,3 +139,11 @@ - - /etc/pki/tls/certs/{{ __test_ca_cert_name }} - - /etc/pki/tls/certs/{{ __test_cert_name }} - - /etc/pki/tls/private/{{ __test_key_name }} -+ -+ - name: clean up test files -+ file: path="{{ item }}" state=absent -+ loop: -+ - "{{ __test_ca_cert }}" -+ - "{{ __test_cert }}" -+ - "{{ __test_key }}" -+ delegate_to: localhost -diff --git a/tests/tests_basics_forwards_cert_missing.yml b/tests/tests_basics_forwards_cert_missing.yml -index 3e82856..0ad0569 100644 ---- a/tests/tests_basics_forwards_cert_missing.yml -+++ b/tests/tests_basics_forwards_cert_missing.yml -@@ -63,6 +63,10 @@ - assert: - that: "'{{ ansible_failed_result.results.0.msg }}' is match('{{ __expected_error }}')" - -+ - name: clean up test files -+ file: path="{{ __test_key }}" state=absent -+ delegate_to: localhost -+ - - name: default run for cleanup - vars: - logging_inputs: -diff --git a/tests/tests_server_conflict.yml b/tests/tests_server_conflict.yml -index 36eeeb7..8c182f6 100644 ---- a/tests/tests_server_conflict.yml -+++ b/tests/tests_server_conflict.yml -@@ -76,3 +76,11 @@ - - assert: - that: item.msg is not defined or item.msg is defined and item.msg == __expected_error - loop: "{{ ansible_failed_result.results }}" -+ -+ - name: clean up test files -+ file: path="{{ item }}" state=absent -+ loop: -+ - "{{ __test_ca_cert }}" -+ - "{{ __test_cert }}" -+ - "{{ __test_key }}" -+ delegate_to: localhost --- -2.26.2 - diff --git a/SOURCES/logging-0006-property_op.diff b/SOURCES/logging-0006-property_op.diff deleted file mode 100644 index 1f1ed57..0000000 --- a/SOURCES/logging-0006-property_op.diff +++ /dev/null @@ -1,136 +0,0 @@ -From ca2baffbfc14fba077c7c70d849c02b9c69c9e1f Mon Sep 17 00:00:00 2001 -From: Noriko Hosoi -Date: Fri, 16 Oct 2020 11:08:00 -0700 -Subject: [PATCH 6/7] Replacing prop_op with property_op and prop_value with - property_value. - -(cherry picked from commit 1c951e6acef886548029151dbca9d002f20ef425) ---- - README.md | 20 +++++++++---------- - roles/rsyslog/templates/output_files.j2 | 2 +- - roles/rsyslog/templates/output_forwards.j2 | 2 +- - .../rsyslog/templates/output_remote_files.j2 | 2 +- - tests/tests_basics_files2.yml | 8 ++++---- - 5 files changed, 17 insertions(+), 17 deletions(-) - -diff --git a/README.md b/README.md -index 4352ee7..d94ec04 100644 ---- a/README.md -+++ b/README.md -@@ -184,8 +184,8 @@ This is a schematic logging configuration to show log messages from input_nameA - - `severity`: Severity in selector; default to `*`. - - `exclude`: Exclude list used in selector; default to none. - - `property`: Property in property-based filter; no default -- - `prop_op`: Operation in property-based filter; In case of not `!`, put the `prop_op` value in quotes; default to `contains` -- - `prop_value`: Value in property-based filter; default to `error` -+ - `property_op`: Operation in property-based filter; In case of not `!`, put the `property_op` value in quotes; default to `contains` -+ - `property_value`: Value in property-based filter; default to `error` - - `path`: Path to the output file. - - Selector options and property-based filter options are exclusive. If Property-based filter options are defined, selector options will be ignored. -@@ -208,8 +208,8 @@ This is a schematic logging configuration to show log messages from input_nameA - - `severity`: Severity in selector; default to `*`. - - `exclude`: Exclude list used in selector; default to none. - - `property`: Property in property-based filter; no default -- - `prop_op`: Operation in property-based filter; In case of not `!`, put the `prop_op` value in quotes; default to `contains` -- - `prop_value`: Value in property-based filter; default to `error` -+ - `property_op`: Operation in property-based filter; In case of not `!`, put the `property_op` value in quotes; default to `contains` -+ - `property_value`: Value in property-based filter; default to `error` - - `target`: Target host (fqdn). **Required**. - - `udp_port`: UDP port number. Default to `514`. - - `tcp_port`: TCP port number. Default to `514`. -@@ -225,8 +225,8 @@ This is a schematic logging configuration to show log messages from input_nameA - - `severity`: Severity in selector; default to `*`. - - `exclude`: Exclude list used in selector; default to none. - - `property`: Property in property-based filter; no default -- - `prop_op`: Operation in property-based filter; In case of not `!`, put the `prop_op` value in quotes; default to `contains` -- - `prop_value`: Value in property-based filter; default to `error` -+ - `property_op`: Operation in property-based filter; In case of not `!`, put the `property_op` value in quotes; default to `contains` -+ - `property_value`: Value in property-based filter; default to `error` - - `async_writing`: If set to `true`, the files are written asynchronously. Allowed value is `true` or `false`. Default to `false`. - - `client_count`: Count of client logging system supported this rsyslog server. Default to `10`. - - `io_buffer_size`: Buffer size used to write output data. Default to `65536` bytes. -@@ -481,14 +481,14 @@ The following playbook generates the same logging configuration files. - - name: files_output0 - type: files - property: msg -- prop_op: contains -- prop_value: error -+ property_op: contains -+ property_value: error - path: /var/log/errors.log - - name: files_output1 - type: files - property: msg -- prop_op: "!contains" -- prop_value: error -+ property_op: "!contains" -+ property_value: error - path: /var/log/others.log - logging_flows: - - name: flow0 -diff --git a/roles/rsyslog/templates/output_files.j2 b/roles/rsyslog/templates/output_files.j2 -index e15e4cd..40f5b90 100644 ---- a/roles/rsyslog/templates/output_files.j2 -+++ b/roles/rsyslog/templates/output_files.j2 -@@ -1,7 +1,7 @@ - {% if item.path is defined %} - ruleset(name="{{ item.name }}") { - {% if item.property | d() %} -- :{{ item.property }}, {{ item.prop_op | d('contains') }}, "{{ item.prop_value | d('error') }}" {{ item.path }} -+ :{{ item.property }}, {{ item.property_op | d('contains') }}, "{{ item.property_value | d('error') }}" {{ item.path }} - {% elif item.exclude | d([]) %} - {{ item.facility | d('*') }}.{{ item.severity | d('*') }};{{ item.exclude | join(';') }} {{ item.path }} - {% else %} -diff --git a/roles/rsyslog/templates/output_forwards.j2 b/roles/rsyslog/templates/output_forwards.j2 -index 35030b4..87d7a09 100644 ---- a/roles/rsyslog/templates/output_forwards.j2 -+++ b/roles/rsyslog/templates/output_forwards.j2 -@@ -10,7 +10,7 @@ - {% endif %} - ruleset(name="{{ item.name }}") { - {% if item.property | d() %} -- :{{ item.property }}, {{ item.prop_op | d('contains') }}, "{{ item.prop_value | d('error') }}" action(name="{{ item.name }}" -+ :{{ item.property }}, {{ item.property_op | d('contains') }}, "{{ item.property_value | d('error') }}" action(name="{{ item.name }}" - {% elif item.exclude | d([]) %} - {{ item.facility | d('*') }}.{{ item.severity | d('*') }};{{ item.exclude | join(';') }} action(name="{{ item.name }}" - {% else %} -diff --git a/roles/rsyslog/templates/output_remote_files.j2 b/roles/rsyslog/templates/output_remote_files.j2 -index aaf547e..84317f2 100644 ---- a/roles/rsyslog/templates/output_remote_files.j2 -+++ b/roles/rsyslog/templates/output_remote_files.j2 -@@ -18,7 +18,7 @@ ruleset(name="{{ item.name }}" - queue.workerThreads="{{ logging_server_threads }}") { - # Store remote logs in separate logfiles - {% if item.property | d() %} -- :{{ item.property }}, {{ item.prop_op | d('contains') }}, "{{ item.prop_value | d('error') }}" action(name="{{ item.name }}" type="omfile" DynaFile="{{ item.name }}_template" DynaFileCacheSize="{{ item.client_count | d(10) }}" ioBufferSize="{{ item.io_buffer_size | d('65536') }}" asyncWriting="{{ 'on' if item.async_writing | d(false) | bool else 'off' }}") -+ :{{ item.property }}, {{ item.property_op | d('contains') }}, "{{ item.property_value | d('error') }}" action(name="{{ item.name }}" type="omfile" DynaFile="{{ item.name }}_template" DynaFileCacheSize="{{ item.client_count | d(10) }}" ioBufferSize="{{ item.io_buffer_size | d('65536') }}" asyncWriting="{{ 'on' if item.async_writing | d(false) | bool else 'off' }}") - {% elif item.exclude | d([]) %} - {{ item.facility | d('*') }}.{{ item.severity | d('*') }};{{ item.exclude | join(';') }} action(name="{{ item.name }}" type="omfile" DynaFile="{{ item.name }}_template" DynaFileCacheSize="{{ item.client_count | d(10) }}" ioBufferSize="{{ item.io_buffer_size | d('65536') }}" asyncWriting="{{ 'on' if item.async_writing | d(false) | bool else 'off' }}") - {% else %} -diff --git a/tests/tests_basics_files2.yml b/tests/tests_basics_files2.yml -index b1a0f62..9f69ed5 100644 ---- a/tests/tests_basics_files2.yml -+++ b/tests/tests_basics_files2.yml -@@ -52,14 +52,14 @@ - - name: files_output3 - type: files - property: msg -- prop_op: contains -- prop_value: property_based_filter_test -+ property_op: contains -+ property_value: property_based_filter_test - path: "{{ __prop_based_log0 }}" - - name: files_output4 - type: files - property: msg -- prop_op: "!contains" -- prop_value: property_based_filter_test -+ property_op: "!contains" -+ property_value: property_based_filter_test - path: "{{ __prop_based_log1 }}" - logging_inputs: - - name: basic_input --- -2.26.2 - diff --git a/SOURCES/logging-0007-RHELPLAN-56807.diff b/SOURCES/logging-0007-RHELPLAN-56807.diff deleted file mode 100644 index e3db3f5..0000000 --- a/SOURCES/logging-0007-RHELPLAN-56807.diff +++ /dev/null @@ -1,114 +0,0 @@ -From 3967a2b0e7e61dfb6317296a4cf15d0fe91a1638 Mon Sep 17 00:00:00 2001 -From: Noriko Hosoi -Date: Thu, 15 Oct 2020 10:52:29 -0700 -Subject: [PATCH 7/7] RHELPLAN-56807 - Logging - elasticsearch - need to adjust - jinja2 boolean values to the rsyslog config values - -Resetting the values of the following params as rsyslog expects. - dynSearchIndex, bulkmode, dynbulkid, allowUnsignedCerts, usehttps - -Adding test cases to tests_ovirt_elasticsearch_params.yml - -(cherry picked from commit c98aabd864f6d07c11d6db991bf0af0aaee7f123) ---- - .../rsyslog/templates/output_elasticsearch.j2 | 13 ++++----- - tests/tests_ovirt_elasticsearch_params.yml | 29 +++++++++++++++++-- - 2 files changed, 33 insertions(+), 9 deletions(-) - -diff --git a/roles/rsyslog/templates/output_elasticsearch.j2 b/roles/rsyslog/templates/output_elasticsearch.j2 -index c4db10f..6c6255b 100644 ---- a/roles/rsyslog/templates/output_elasticsearch.j2 -+++ b/roles/rsyslog/templates/output_elasticsearch.j2 -@@ -37,25 +37,24 @@ ruleset(name="{{ item.name }}") { - serverport="{{ item.server_port | d(9200) | int }}" - template="{{ item.template | d("es_template") }}" - searchIndex="{{ item.searchIndex | d("index_template") }}" -- dynSearchIndex="{{ item.dynSearchIndex | d("on") }}" -+ dynSearchIndex="{{ item.dynSearchIndex | d(true) | ternary('on', 'off') }}" - searchType="{{ item.searchType | d("com.redhat.viaq.common") }}" -- bulkmode="{{ item.bulkmode | d("on") }}" -+ bulkmode="{{ item.bulkmode | d(true) | ternary('on', 'off') }}" - writeoperation="{{ item.writeoperation | d("create") }}" - bulkid="{{ item.bulkid | d("id_template") }}" -- dynbulkid="{{ item.dynbulkid | d('on') }}" -- allowUnsignedCerts="{{ item.allowUnsignedCerts | d("off") }}" -+ dynbulkid="{{ item.dynbulkid | d(true) | ternary('on', 'off') }}" -+ allowUnsignedCerts="{{ item.allowUnsignedCerts | d(false) | ternary('on', 'off') }}" - {% if item.retryfailures | d(true) %} --{% if item.retryruleset | d() | length > 0 %} - retryfailures="on" -+{% if item.retryruleset | d() | length > 0 %} - retryruleset="{{ item.retryruleset }}" - {% else %} -- retryfailures="on" - retryruleset="{{ item.name }}" - {% endif %} - {% else %} - retryfailures="off" - {% endif %} -- usehttps="{{ item.usehttps | default("on") }}" -+ usehttps="{{ item.usehttps | d(true) | ternary('on', 'off') }}" - {% if item.use_cert | default(true) %} - tls.cacert="{{ item.ca_cert | default('/etc/rsyslog.d/es-ca.crt') }}" - tls.mycert="{{ item.cert | default('/etc/rsyslog.d/es-cert.pem') }}" -diff --git a/tests/tests_ovirt_elasticsearch_params.yml b/tests/tests_ovirt_elasticsearch_params.yml -index 34d9e1d..4fefe59 100644 ---- a/tests/tests_ovirt_elasticsearch_params.yml -+++ b/tests/tests_ovirt_elasticsearch_params.yml -@@ -34,6 +34,8 @@ - __test_ovirt_engine_conf: /etc/rsyslog.d/90-input-ovirt-ovirt_engine_input.conf - __test_ovirt_vdsm_conf: /etc/rsyslog.d/90-input-ovirt-ovirt_vdsm_input.conf - __test_ovirt_bogus_conf: /etc/rsyslog.d/90-input-ovirt-ovirt_bogus_input.conf -+ __test_es_conf: /etc/rsyslog.d/31-output-elasticsearch-elasticsearch_output.conf -+ __test_es_ops_conf: /etc/rsyslog.d/31-output-elasticsearch-elasticsearch_output_ops.conf - __test_collectd_name: ovirt_collectd_input - __test_engine_name: ovirt_engine_input - __test_vdsm_name: ovirt_vdsm_input -@@ -56,7 +58,6 @@ - server_port: 9200 - index_prefix: project. - input_type: ovirt -- retryfailures: false - ca_cert: "/etc/rsyslog.d/es-ca.crt" - cert: "/etc/rsyslog.d/es-cert.pem" - private_key: "/etc/rsyslog.d/es-key.pem" -@@ -70,6 +71,11 @@ - ca_cert: "/etc/rsyslog.d/es-ca.crt" - cert: "/etc/rsyslog.d/es-cert.pem" - private_key: "/etc/rsyslog.d/es-key.pem" -+ dynSearchIndex: false -+ bulkmode: false -+ dynbulkid: false -+ allowUnsignedCerts: true -+ usehttps: false - logging_inputs: - - name: basic_input - type: basics -@@ -164,4 +170,23 @@ - - - name: Check index_prefix is "{{ __test_logs_index }}" in "{{ __test_ovirt_vdsm_conf }}" - command: /bin/grep 'set $.index_prefix = "{{ __test_logs_index }}"' {{ __test_ovirt_vdsm_conf }} -- changed_when: false -+ -+ - name: Check default config params in "{{ __test_es_conf }}" -+ command: /bin/grep {{ item }} {{ __test_es_conf }} -+ loop: -+ - "dynSearchIndex=.on." -+ - "bulkmode=.on." -+ - "dynbulkid=.on." -+ - "allowUnsignedCerts=.off." -+ - "usehttps=.on." -+ - "retryfailures=.on." -+ -+ - name: Check modified config params in "{{ __test_es_ops_conf }}" -+ command: /bin/grep {{ item }} {{ __test_es_ops_conf }} -+ loop: -+ - "dynSearchIndex=.off." -+ - "bulkmode=.off." -+ - "dynbulkid=.off." -+ - "allowUnsignedCerts=.on." -+ - "usehttps=.off." -+ - "retryfailures=.off." --- -2.26.2 - diff --git a/SOURCES/md2html.sh b/SOURCES/md2html.sh deleted file mode 100644 index f3da649..0000000 --- a/SOURCES/md2html.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -set -euxo pipefail - -for file in "$@"; do - pandoc -f markdown_github "${file}" -t asciidoc -o "${file%.md}.tmp.adoc" - touch -r "${file}" "${file%.md}.tmp.adoc" - TZ=UTC asciidoc -o "${file%.md}.html" -a footer-style=none -a toc2 -a source-highlighter=highlight "${file%.md}.tmp.adoc" - rm "${file%.md}.tmp.adoc" -done diff --git a/SOURCES/metrics-mssql-x86.diff b/SOURCES/metrics-mssql-x86.diff new file mode 100644 index 0000000..80bb0e5 --- /dev/null +++ b/SOURCES/metrics-mssql-x86.diff @@ -0,0 +1,24 @@ +From 7ff86f2fa05998afcd8ae87d9cdd660ef5b6ee2c Mon Sep 17 00:00:00 2001 +From: Jan Kurik +Date: Thu, 18 Feb 2021 17:09:48 +1100 +Subject: [PATCH] Update mssql test to exclude non-x86_64 architectures + +pcp-pmda-mssql (and SQL Server itself) are x86_64-only. +--- + tests/tests_sanity_mssql.yml | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/tests/tests_sanity_mssql.yml b/tests/tests_sanity_mssql.yml +index 6f1e2cc..8602c36 100644 +--- a/tests/tests_sanity_mssql.yml ++++ b/tests/tests_sanity_mssql.yml +@@ -12,7 +12,8 @@ + - meta: end_host + when: (ansible_distribution in ['RedHat'] and + ( ansible_facts['distribution_version'] is version('8.4', '<'))) or +- ansible_distribution not in ['Fedora', 'RedHat'] ++ ansible_distribution not in ['Fedora', 'RedHat'] or ++ ansible_architecture not in ['x86_64'] + + - name: Save state of services + import_tasks: get_services_state.yml diff --git a/SOURCES/network-ansible-test.diff b/SOURCES/network-ansible-test.diff new file mode 100644 index 0000000..8f88e21 --- /dev/null +++ b/SOURCES/network-ansible-test.diff @@ -0,0 +1,835 @@ +From 7ae16e9ff5291f06ba0d7224a0d6c36b780ea0a2 Mon Sep 17 00:00:00 2001 +From: Rich Megginson +Date: Wed, 3 Mar 2021 11:37:56 -0700 +Subject: [PATCH] fix most ansible-test issues, suppress the rest + +Automation Hub, and possibly Galaxy in the future, require the +collection to be screened with `ansible-test sanity` among other +checks. The role had a number of issues: +* Use `AssertionError` instead of `assert` +* Use of `logging` module not in accordance with standards, but these + are ok and the errors were suppressed +* Several import errors which are ok because they are checked + elsewhere +* Many of the module files use `#!` shebang - not sure why, but + the usage is allowed +* __init__.py in the module_utils directories must be empty, so a + new file myerror.py was added to move the code from __init__.py +* The documentation block in the module was not properly constructed + or formatted. +* shellcheck issues, including removing unused files +* use `dummy` instead of `_` (underscore) for variables that are + unused + +add WARNING to module docs - collection users should not use directly + +Signed-off-by: Rich Megginson +(cherry picked from commit 7459a29e9104bf01987399153baf0a1c1df05929) +--- + .github/workflows/tox.yml | 4 +- + .sanity-ansible-ignore-2.9.txt | 47 ++++++++++ + README.md | 2 +- + library/network_connections.py | 88 ++++++++++++------- + module_utils/network_lsr/__init__.py | 7 -- + .../network_lsr/argument_validator.py | 9 +- + module_utils/network_lsr/ethtool.py | 6 +- + module_utils/network_lsr/myerror.py | 11 +++ + module_utils/network_lsr/nm/__init__.py | 4 + + .../network_lsr/nm/active_connection.py | 35 ++++---- + module_utils/network_lsr/nm/client.py | 4 + + module_utils/network_lsr/nm/connection.py | 18 ++-- + module_utils/network_lsr/nm/error.py | 4 + + module_utils/network_lsr/nm/provider.py | 8 +- + module_utils/network_lsr/nm_provider.py | 4 + + module_utils/network_lsr/utils.py | 10 ++- + tests/ensure_provider_tests.py | 8 +- + tests/get_coverage.sh | 6 +- + tests/get_total_coverage.sh | 2 +- + tests/integration/test_ethernet.py | 4 +- + tests/merge_coverage.sh | 3 + + tests/setup_module_utils.sh | 41 --------- + tox.ini | 3 - + 23 files changed, 199 insertions(+), 129 deletions(-) + create mode 100644 .sanity-ansible-ignore-2.9.txt + create mode 100644 module_utils/network_lsr/myerror.py + delete mode 100755 tests/setup_module_utils.sh + +diff --git a/.github/workflows/tox.yml b/.github/workflows/tox.yml +index 207bcba..ba0f4c6 100644 +--- a/.github/workflows/tox.yml ++++ b/.github/workflows/tox.yml +@@ -3,7 +3,7 @@ name: tox + on: # yamllint disable-line rule:truthy + - pull_request + env: +- TOX_LSR: "git+https://github.com/linux-system-roles/tox-lsr@2.2.0" ++ TOX_LSR: "git+https://github.com/linux-system-roles/tox-lsr@2.3.0" + LSR_ANSIBLES: 'ansible==2.8.* ansible==2.9.*' + LSR_MSCENARIOS: default + # LSR_EXTRA_PACKAGES: "libdbus-1-dev libgirepository1.0-dev python3-dev" +@@ -36,7 +36,7 @@ jobs: + toxenvs="py${toxpyver}" + case "$toxpyver" in + 27) toxenvs="${toxenvs},coveralls,flake8,pylint" ;; +- 36) toxenvs="${toxenvs},coveralls,black,yamllint,ansible-lint,collection" ;; ++ 36) toxenvs="${toxenvs},coveralls,black,yamllint,ansible-lint,collection,ansible-test" ;; + 37) toxenvs="${toxenvs},coveralls" ;; + 38) toxenvs="${toxenvs},coveralls" ;; + esac +diff --git a/.sanity-ansible-ignore-2.9.txt b/.sanity-ansible-ignore-2.9.txt +new file mode 100644 +index 0000000..439197e +--- /dev/null ++++ b/.sanity-ansible-ignore-2.9.txt +@@ -0,0 +1,47 @@ ++tests/network/ensure_provider_tests.py compile-2.7!skip ++tests/network/ensure_provider_tests.py compile-3.5!skip ++plugins/module_utils/network_lsr/nm/__init__.py empty-init!skip ++plugins/module_utils/network_lsr/nm/active_connection.py import-2.7!skip ++plugins/module_utils/network_lsr/nm/client.py import-2.7!skip ++plugins/module_utils/network_lsr/nm/connection.py import-2.7!skip ++plugins/module_utils/network_lsr/nm/provider.py import-2.7!skip ++plugins/module_utils/network_lsr/nm/active_connection.py import-3.5!skip ++plugins/module_utils/network_lsr/nm/client.py import-3.5!skip ++plugins/module_utils/network_lsr/nm/connection.py import-3.5!skip ++plugins/module_utils/network_lsr/nm/provider.py import-3.5!skip ++plugins/module_utils/network_lsr/nm/active_connection.py import-3.6!skip ++plugins/module_utils/network_lsr/nm/client.py import-3.6!skip ++plugins/module_utils/network_lsr/nm/connection.py import-3.6!skip ++plugins/module_utils/network_lsr/nm/provider.py import-3.6!skip ++plugins/module_utils/network_lsr/nm/active_connection.py import-3.7!skip ++plugins/module_utils/network_lsr/nm/client.py import-3.7!skip ++plugins/module_utils/network_lsr/nm/connection.py import-3.7!skip ++plugins/module_utils/network_lsr/nm/provider.py import-3.7!skip ++plugins/module_utils/network_lsr/nm/active_connection.py import-3.8!skip ++plugins/module_utils/network_lsr/nm/client.py import-3.8!skip ++plugins/module_utils/network_lsr/nm/connection.py import-3.8!skip ++plugins/module_utils/network_lsr/nm/provider.py import-3.8!skip ++plugins/module_utils/network_lsr/__init__.py shebang!skip ++plugins/module_utils/network_lsr/argument_validator.py shebang!skip ++plugins/module_utils/network_lsr/utils.py shebang!skip ++plugins/module_utils/network_lsr/myerror.py shebang!skip ++tests/network/covstats shebang!skip ++tests/network/ensure_provider_tests.py shebang!skip ++tests/network/get_coverage.sh shebang!skip ++tests/network/get_total_coverage.sh shebang!skip ++tests/network/merge_coverage.sh shebang!skip ++tests/network/ensure_provider_tests.py future-import-boilerplate!skip ++tests/network/integration/conftest.py future-import-boilerplate!skip ++tests/network/integration/test_ethernet.py future-import-boilerplate!skip ++tests/network/unit/test_network_connections.py future-import-boilerplate!skip ++tests/network/unit/test_nm_provider.py future-import-boilerplate!skip ++tests/network/ensure_provider_tests.py metaclass-boilerplate!skip ++tests/network/integration/conftest.py metaclass-boilerplate!skip ++tests/network/integration/test_ethernet.py metaclass-boilerplate!skip ++tests/network/unit/test_network_connections.py metaclass-boilerplate!skip ++tests/network/unit/test_nm_provider.py metaclass-boilerplate!skip ++plugins/modules/network_connections.py validate-modules:missing-examples ++plugins/modules/network_connections.py validate-modules:missing-gplv3-license ++plugins/modules/network_connections.py validate-modules:no-default-for-required-parameter ++plugins/modules/network_connections.py validate-modules:parameter-type-not-in-doc ++plugins/modules/network_connections.py validate-modules:undocumented-parameter +diff --git a/README.md b/README.md +index c1462b6..c257c08 100644 +--- a/README.md ++++ b/README.md +@@ -145,7 +145,7 @@ a consequence, `state: up` always changes the system. + + You can deactivate a connection profile, even if is currently not active. As a consequence, `state: down` always changes the system. + +-Note that if the `state` option is unset, the connection profile’s runtime state will not be changed. ++Note that if the `state` option is unset, the connection profile's runtime state will not be changed. + + + ### `persistent_state` +diff --git a/library/network_connections.py b/library/network_connections.py +index 3224892..3a6e47f 100644 +--- a/library/network_connections.py ++++ b/library/network_connections.py +@@ -2,6 +2,30 @@ + # -*- coding: utf-8 -*- + # SPDX-License-Identifier: BSD-3-Clause + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ ++DOCUMENTATION = """ ++--- ++module: network_connections ++author: Thomas Haller (@thom311) ++short_description: module for network role to manage connection profiles ++requirements: [pygobject, dbus, NetworkManager] ++version_added: "2.0" ++description: ++ - "WARNING: Do not use this module directly! It is only for role internal use." ++ - | ++ Manage networking profiles (connections) for NetworkManager and ++ initscripts networking providers. Documentation needs to be written. Note ++ that the network_connections module tightly integrates with the network ++ role and currently it is not expected to use this module outside the role. ++ Thus, consult README.md for examples for the role. The requirements are ++ only for the NetworkManager (nm) provider. ++options: {} ++""" ++ ++ + import errno + import functools + import os +@@ -16,7 +40,7 @@ import logging + # pylint: disable=import-error, no-name-in-module + from ansible.module_utils.basic import AnsibleModule + from ansible.module_utils.network_lsr import ethtool # noqa:E501 +-from ansible.module_utils.network_lsr import MyError # noqa:E501 ++from ansible.module_utils.network_lsr.myerror import MyError # noqa:E501 + + from ansible.module_utils.network_lsr.argument_validator import ( # noqa:E501 + ArgUtil, +@@ -30,22 +54,6 @@ from ansible.module_utils.network_lsr import nm_provider # noqa:E501 + # pylint: enable=import-error, no-name-in-module + + +-DOCUMENTATION = """ +---- +-module: network_connections +-author: "Thomas Haller (thaller@redhat.com)" +-short_description: module for network role to manage connection profiles +-requirements: for 'nm' provider requires pygobject, dbus and NetworkManager. +-version_added: "2.0" +-description: Manage networking profiles (connections) for NetworkManager and +- initscripts networking providers. +-options: Documentation needs to be written. Note that the network_connections +- module tightly integrates with the network role and currently it is not +- expected to use this module outside the role. Thus, consult README.md for +- examples for the role. +-""" +- +- + ############################################################################### + PERSISTENT_STATE = "persistent_state" + ABSENT_STATE = "absent" +@@ -772,7 +780,7 @@ class NMUtil: + if compare_flags is None: + compare_flags = NM.SettingCompareFlags.IGNORE_TIMESTAMP + +- return not (not (con_a.compare(con_b, compare_flags))) ++ return con_a.compare(con_b, compare_flags) + + def connection_is_active(self, con): + NM = Util.NM() +@@ -1390,7 +1398,7 @@ class RunEnvironment(object): + def check_mode_set(self, check_mode, connections=None): + c = self._check_mode + self._check_mode = check_mode +- assert ( ++ if not ( + (c is None and check_mode in [CheckMode.PREPARE]) + or ( + c == CheckMode.PREPARE +@@ -1399,7 +1407,8 @@ class RunEnvironment(object): + or (c == CheckMode.PRE_RUN and check_mode in [CheckMode.REAL_RUN]) + or (c == CheckMode.REAL_RUN and check_mode in [CheckMode.DONE]) + or (c == CheckMode.DRY_RUN and check_mode in [CheckMode.DONE]) +- ) ++ ): ++ raise AssertionError("check_mode value is incorrect {0}".format(c)) + self._check_mode_changed(c, check_mode, connections) + + +@@ -1461,7 +1470,8 @@ class RunEnvironmentAnsible(RunEnvironment): + warn_traceback=False, + force_fail=False, + ): +- assert idx >= -1 ++ if not idx >= -1: ++ raise AssertionError("idx {0} is less than -1".format(idx)) + self._log_idx += 1 + self.run_results[idx]["log"].append((severity, msg, self._log_idx)) + if severity == LogLevel.ERROR: +@@ -1598,14 +1608,15 @@ class Cmd(object): + def connections_data(self): + c = self._connections_data + if c is None: +- assert self.check_mode in [ ++ if self.check_mode not in [ + CheckMode.DRY_RUN, + CheckMode.PRE_RUN, + CheckMode.REAL_RUN, +- ] +- c = [] +- for _ in range(0, len(self.connections)): +- c.append({"changed": False}) ++ ]: ++ raise AssertionError( ++ "invalid value {0} for self.check_mode".format(self.check_mode) ++ ) ++ c = [{"changed": False}] * len(self.connections) + self._connections_data = c + return c + +@@ -1614,11 +1625,14 @@ class Cmd(object): + c["changed"] = False + + def connections_data_set_changed(self, idx, changed=True): +- assert self._check_mode in [ ++ if self._check_mode not in [ + CheckMode.PRE_RUN, + CheckMode.DRY_RUN, + CheckMode.REAL_RUN, +- ] ++ ]: ++ raise AssertionError( ++ "invalid value {0} for self._check_mode".format(self._check_mode) ++ ) + if not changed: + return + self.connections_data[idx]["changed"] = changed +@@ -1688,7 +1702,10 @@ class Cmd(object): + # modify the connection. + + con = self.connections[idx] +- assert con["state"] in ["up", "down"] ++ if con["state"] not in ["up", "down"]: ++ raise AssertionError( ++ "connection state {0} not 'up' or 'down'".format(con["state"]) ++ ) + + # also check, if the current profile is 'up' with a 'type' (which + # possibly modifies the connection as well) +@@ -1736,7 +1753,9 @@ class Cmd(object): + elif self._check_mode != CheckMode.DONE: + c = CheckMode.DONE + else: +- assert False ++ raise AssertionError( ++ "invalid value {0} for self._check_mode".format(self._check_mode) ++ ) + self._check_mode = c + self.run_env.check_mode_set(c) + return c +@@ -1902,7 +1921,12 @@ class Cmd_nm(Cmd): + + name = connection["name"] + if not name: +- assert connection["persistent_state"] == "absent" ++ if not connection["persistent_state"] == "absent": ++ raise AssertionError( ++ "persistent_state must be 'absent' not {0} when there is no connection 'name'".format( ++ connection["persistent_state"] ++ ) ++ ) + continue + if name in names: + exists = names[name]["nm.exists"] +@@ -1979,7 +2003,7 @@ class Cmd_nm(Cmd): + idx, "ethtool.%s specified but not supported by NM", specified + ) + +- for option, _ in specified.items(): ++ for option in specified.keys(): + nm_name = nm_get_name_fcnt(option) + if not nm_name: + self.log_fatal( +diff --git a/module_utils/network_lsr/__init__.py b/module_utils/network_lsr/__init__.py +index 22c717c..e69de29 100644 +--- a/module_utils/network_lsr/__init__.py ++++ b/module_utils/network_lsr/__init__.py +@@ -1,7 +0,0 @@ +-#!/usr/bin/python3 -tt +-# vim: fileencoding=utf8 +-# SPDX-License-Identifier: BSD-3-Clause +- +- +-class MyError(Exception): +- pass +diff --git a/module_utils/network_lsr/argument_validator.py b/module_utils/network_lsr/argument_validator.py +index 24ffdc4..f338489 100644 +--- a/module_utils/network_lsr/argument_validator.py ++++ b/module_utils/network_lsr/argument_validator.py +@@ -2,12 +2,16 @@ + # vim: fileencoding=utf8 + # SPDX-License-Identifier: BSD-3-Clause + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + import posixpath + import socket + import re + + # pylint: disable=import-error, no-name-in-module +-from ansible.module_utils.network_lsr import MyError # noqa:E501 ++from ansible.module_utils.network_lsr.myerror import MyError # noqa:E501 + from ansible.module_utils.network_lsr.utils import Util # noqa:E501 + + UINT32_MAX = 0xFFFFFFFF +@@ -72,7 +76,8 @@ class ArgUtil: + + class ValidationError(MyError): + def __init__(self, name, message): +- Exception.__init__(self, name + ": " + message) ++ # pylint: disable=non-parent-init-called ++ super(ValidationError, self).__init__(name + ": " + message) + self.error_message = message + self.name = name + +diff --git a/module_utils/network_lsr/ethtool.py b/module_utils/network_lsr/ethtool.py +index 21e2152..3246bef 100644 +--- a/module_utils/network_lsr/ethtool.py ++++ b/module_utils/network_lsr/ethtool.py +@@ -1,5 +1,9 @@ + # SPDX-License-Identifier: BSD-3-Clause + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + import array + import struct + import fcntl +@@ -46,7 +50,7 @@ def get_perm_addr(ifname): + res = ecmd.tobytes() + except AttributeError: # tobytes() is not available in python2 + res = ecmd.tostring() +- _, size, perm_addr = struct.unpack("II%is" % MAX_ADDR_LEN, res) ++ dummy, size, perm_addr = struct.unpack("II%is" % MAX_ADDR_LEN, res) + perm_addr = Util.mac_ntoa(perm_addr[:size]) + except IOError: + perm_addr = None +diff --git a/module_utils/network_lsr/myerror.py b/module_utils/network_lsr/myerror.py +new file mode 100644 +index 0000000..f785265 +--- /dev/null ++++ b/module_utils/network_lsr/myerror.py +@@ -0,0 +1,11 @@ ++#!/usr/bin/python3 -tt ++# vim: fileencoding=utf8 ++# SPDX-License-Identifier: BSD-3-Clause ++ ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ ++ ++class MyError(Exception): ++ pass +diff --git a/module_utils/network_lsr/nm/__init__.py b/module_utils/network_lsr/nm/__init__.py +index 58fbb5a..74c17cb 100644 +--- a/module_utils/network_lsr/nm/__init__.py ++++ b/module_utils/network_lsr/nm/__init__.py +@@ -1,5 +1,9 @@ + # Relative import is not support by ansible 2.8 yet + # pylint: disable=import-error, no-name-in-module ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + from ansible.module_utils.network_lsr.nm import provider # noqa:E501 + + # pylint: enable=import-error, no-name-in-module +diff --git a/module_utils/network_lsr/nm/active_connection.py b/module_utils/network_lsr/nm/active_connection.py +index a6c5a37..432142c 100644 +--- a/module_utils/network_lsr/nm/active_connection.py ++++ b/module_utils/network_lsr/nm/active_connection.py +@@ -2,6 +2,10 @@ + + # Handle NM.ActiveConnection + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + import logging + + # Relative import is not support by ansible 2.8 yet +@@ -21,19 +25,15 @@ def deactivate_active_connection(nm_ac, timeout, check_mode): + return False + if not check_mode: + main_loop = client.get_mainloop(timeout) +- logging.debug( +- "Deactivating {id} with timeout {timeout}".format( +- id=nm_ac.get_id(), timeout=timeout +- ) +- ) ++ logging.debug("Deactivating %s with timeout %s", nm_ac.get_id(), timeout) + user_data = main_loop + handler_id = nm_ac.connect( + NM_AC_STATE_CHANGED_SIGNAL, _nm_ac_state_change_callback, user_data + ) + logging.debug( +- "Registered {signal} on client.NM.ActiveConnection {id}".format( +- signal=NM_AC_STATE_CHANGED_SIGNAL, id=nm_ac.get_id() +- ) ++ "Registered %s on client.NM.ActiveConnection %s", ++ NM_AC_STATE_CHANGED_SIGNAL, ++ nm_ac.get_id(), + ) + if nm_ac.props.state != client.NM.ActiveConnectionState.DEACTIVATING: + nm_client = client.get_client() +@@ -44,9 +44,7 @@ def deactivate_active_connection(nm_ac, timeout, check_mode): + _nm_ac_deactivate_call_back, + user_data, + ) +- logging.debug( +- "Deactivating client.NM.ActiveConnection {0}".format(nm_ac.get_id()) +- ) ++ logging.debug("Deactivating client.NM.ActiveConnection %s", nm_ac.get_id()) + main_loop.run() + return True + +@@ -56,14 +54,13 @@ def _nm_ac_state_change_callback(nm_ac, state, reason, user_data): + if main_loop.is_cancelled: + return + logging.debug( +- "Got client.NM.ActiveConnection state change: {id}: {state} {reason}".format( +- id=nm_ac.get_id(), state=state, reason=reason +- ) ++ "Got client.NM.ActiveConnection state change: %s: %s %s", ++ nm_ac.get_id(), ++ state, ++ reason, + ) + if nm_ac.props.state == client.NM.ActiveConnectionState.DEACTIVATED: +- logging.debug( +- "client.NM.ActiveConnection {0} is deactivated".format(nm_ac.get_id()) +- ) ++ logging.debug("client.NM.ActiveConnection %s is deactivated", nm_ac.get_id()) + main_loop.quit() + + +@@ -82,9 +79,7 @@ def _nm_ac_deactivate_call_back(nm_client, result, user_data): + client.NM.ManagerError.quark(), client.NM.ManagerError.CONNECTIONNOTACTIVE + ): + logging.info( +- "Connection is not active on {0}, no need to deactivate".format( +- nm_ac_id +- ) ++ "Connection is not active on %s, no need to deactivate", nm_ac_id + ) + if nm_ac: + nm_ac.handler_disconnect(handler_id) +diff --git a/module_utils/network_lsr/nm/client.py b/module_utils/network_lsr/nm/client.py +index 4992887..f47cc53 100644 +--- a/module_utils/network_lsr/nm/client.py ++++ b/module_utils/network_lsr/nm/client.py +@@ -1,5 +1,9 @@ + # SPDX-License-Identifier: BSD-3-Clause + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + import logging + + # Relative import is not support by ansible 2.8 yet +diff --git a/module_utils/network_lsr/nm/connection.py b/module_utils/network_lsr/nm/connection.py +index 6982034..474da8d 100644 +--- a/module_utils/network_lsr/nm/connection.py ++++ b/module_utils/network_lsr/nm/connection.py +@@ -2,6 +2,10 @@ + + # Handle NM.RemoteConnection + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + import logging + + # Relative import is not support by ansible 2.8 yet +@@ -26,9 +30,10 @@ def delete_remote_connection(nm_profile, timeout, check_mode): + user_data, + ) + logging.debug( +- "Deleting profile {id}/{uuid} with timeout {timeout}".format( +- id=nm_profile.get_id(), uuid=nm_profile.get_uuid(), timeout=timeout +- ) ++ "Deleting profile %s/%s with timeout %s", ++ nm_profile.get_id(), ++ nm_profile.get_uuid(), ++ timeout, + ) + main_loop.run() + return True +@@ -78,9 +83,10 @@ def volatilize_remote_connection(nm_profile, timeout, check_mode): + user_data, + ) + logging.debug( +- "Volatilizing profile {id}/{uuid} with timeout {timeout}".format( +- id=nm_profile.get_id(), uuid=nm_profile.get_uuid(), timeout=timeout +- ) ++ "Volatilizing profile %s/%s with timeout %s", ++ nm_profile.get_id(), ++ nm_profile.get_uuid(), ++ timeout, + ) + main_loop.run() + return True +diff --git a/module_utils/network_lsr/nm/error.py b/module_utils/network_lsr/nm/error.py +index 42014ec..d87bc72 100644 +--- a/module_utils/network_lsr/nm/error.py ++++ b/module_utils/network_lsr/nm/error.py +@@ -1,5 +1,9 @@ + # SPDX-License-Identifier: BSD-3-Clause + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + + class LsrNetworkNmError(Exception): + pass +diff --git a/module_utils/network_lsr/nm/provider.py b/module_utils/network_lsr/nm/provider.py +index 52e7502..567c9d1 100644 +--- a/module_utils/network_lsr/nm/provider.py ++++ b/module_utils/network_lsr/nm/provider.py +@@ -1,5 +1,9 @@ + # SPDX-License-Identifier: BSD-3-Clause + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + import logging + + # Relative import is not support by ansible 2.8 yet +@@ -25,7 +29,7 @@ class NetworkManagerProvider: + nm_ac, timeout, check_mode + ) + if not changed: +- logging.info("No active connection for {0}".format(connection_name)) ++ logging.info("No active connection for %s", connection_name) + + return changed + +@@ -49,7 +53,7 @@ class NetworkManagerProvider: + nm_profile, timeout, check_mode + ) + if not changed: +- logging.info("No connection with UUID {0} to volatilize".format(uuid)) ++ logging.info("No connection with UUID %s to volatilize", uuid) + + return changed + +diff --git a/module_utils/network_lsr/nm_provider.py b/module_utils/network_lsr/nm_provider.py +index c75242a..d6168eb 100644 +--- a/module_utils/network_lsr/nm_provider.py ++++ b/module_utils/network_lsr/nm_provider.py +@@ -1,6 +1,10 @@ + # SPDX-License-Identifier: BSD-3-Clause + """ Support for NetworkManager aka the NM provider """ + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + # pylint: disable=import-error, no-name-in-module + from ansible.module_utils.network_lsr.utils import Util # noqa:E501 + +diff --git a/module_utils/network_lsr/utils.py b/module_utils/network_lsr/utils.py +index 73d9528..bc258fe 100644 +--- a/module_utils/network_lsr/utils.py ++++ b/module_utils/network_lsr/utils.py +@@ -2,18 +2,23 @@ + # SPDX-License-Identifier: BSD-3-Clause + # vim: fileencoding=utf8 + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + import socket + import sys + import uuid + + # pylint: disable=import-error, no-name-in-module +-from ansible.module_utils.network_lsr import MyError # noqa:E501 ++from ansible.module_utils.network_lsr.myerror import MyError # noqa:E501 + + + class Util: + + PY3 = sys.version_info[0] == 3 + ++ # pylint: disable=undefined-variable + STRING_TYPE = str if PY3 else basestring # noqa:F821 + + @staticmethod +@@ -241,7 +246,8 @@ class Util: + n = int(c, 16) * 16 + i = 1 + else: +- assert i == 1 ++ if not i == 1: ++ raise AssertionError("i != 1 - value is {0}".format(i)) + n = n + int(c, 16) + i = 2 + b.append(n) +diff --git a/tests/ensure_provider_tests.py b/tests/ensure_provider_tests.py +index 3620729..4e45e6a 100755 +--- a/tests/ensure_provider_tests.py ++++ b/tests/ensure_provider_tests.py +@@ -73,8 +73,6 @@ NM_ONLY_TESTS = { + MINIMUM_VERSION: "'1.25.1'", + "comment": "# NetworkManager 1.25.1 introduced ethtool coalesce support", + }, +- "playbooks/tests_802_1x_updated.yml": {}, +- "playbooks/tests_802_1x.yml": {}, + "playbooks/tests_reapply.yml": {}, + # team interface is not supported on Fedora + "playbooks/tests_team.yml": { +@@ -117,9 +115,7 @@ def create_nm_playbook(test_playbook): + EXTRA_RUN_CONDITION, "" + ) + if extra_run_condition: +- extra_run_condition = "{}{}\n".format( +- EXTRA_RUN_CONDITION_PREFIX, extra_run_condition +- ) ++ extra_run_condition = f"{EXTRA_RUN_CONDITION_PREFIX}{extra_run_condition}\n" + + nm_version_check = "" + if minimum_nm_version: +@@ -212,7 +208,7 @@ def main(): + + if missing: + print("ERROR: No NM or initscripts tests found for:\n" + ", \n".join(missing)) +- print("Try to generate them with '{} generate'".format(sys.argv[0])) ++ print(f"Try to generate them with '{sys.argv[0]} generate'") + returncode = 1 + + return returncode +diff --git a/tests/get_coverage.sh b/tests/get_coverage.sh +index 858a8cf..4524fab 100755 +--- a/tests/get_coverage.sh ++++ b/tests/get_coverage.sh +@@ -19,7 +19,6 @@ shift + playbook="${1}" + + coverage_data="remote-coveragedata-${host}-${playbook%.yml}" +-coverage="/root/.local/bin/coverage" + + echo "Getting coverage for ${playbook} on ${host}" >&2 + +@@ -32,10 +31,15 @@ call_ansible() { + } + + remote_coverage_dir="$(mktemp -d /tmp/remote_coverage-XXXXXX)" ++# we want to expand ${remote_coverage_dir} here, so tell SC to be quiet ++# https://github.com/koalaman/shellcheck/wiki/SC2064 ++# shellcheck disable=SC2064 + trap "rm -rf '${remote_coverage_dir}'" EXIT + ansible-playbook -i "${host}", get_coverage.yml -e "test_playbook=${playbook} destdir=${remote_coverage_dir}" + + #COVERAGE_FILE=remote-coverage coverage combine remote-coverage/tests_*/*/root/.coverage ++# https://github.com/koalaman/shellcheck/wiki/SC2046 ++# shellcheck disable=SC2046 + ./merge_coverage.sh coverage "${coverage_data}"-tmp $(find "${remote_coverage_dir}" -type f | tr , _) + + cat > tmp_merge_coveragerc < +Date: Mon, 15 Feb 2021 11:02:55 +0100 +Subject: [PATCH] README: remove "slaves" leftover from documentation + +Signed-off-by: Fernando Fernandez Mancera +--- + README.md | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/README.md b/README.md +index 6b15673..06a8b1b 100644 +--- a/README.md ++++ b/README.md +@@ -300,7 +300,7 @@ different or the profile may not be tied to an interface at all. + + The `zone` option sets the firewalld zone for the interface. + +-Slaves to the bridge, bond or team devices cannot specify a zone. ++Ports to the bridge, bond or team devices cannot specify a zone. + + + ### `ip` +@@ -367,7 +367,7 @@ The IP configuration supports the following options: + + **Note:** When `route_append_only` or `rule_append_only` is not specified, the `network` role deletes the current routes or routing rules. + +-**Note:** Slaves to the bridge, bond or team devices cannot specify `ip` settings. ++**Note:** Ports to the bridge, bond or team devices cannot specify `ip` settings. + + ### `ethtool` + +-- +2.29.2 + diff --git a/SOURCES/network-tier1-tags.diff b/SOURCES/network-tier1-tags.diff index 803d4f1..1c4cb67 100644 --- a/SOURCES/network-tier1-tags.diff +++ b/SOURCES/network-tier1-tags.diff @@ -23,7 +23,7 @@ index 9cce1ae..76d99e9 100644 + tasks: + - import_tasks: tasks/restore_state.yml diff --git a/tests/playbooks/tests_bond.yml b/tests/playbooks/tests_bond.yml -index ab3ee43..d646a0b 100644 +index 69f07f8..1e45788 100644 --- a/tests/playbooks/tests_bond.yml +++ b/tests/playbooks/tests_bond.yml @@ -1,5 +1,10 @@ @@ -36,8 +36,8 @@ index ab3ee43..d646a0b 100644 + - hosts: all vars: - master_profile: bond0 -@@ -94,3 +99,8 @@ + controller_profile: bond0 +@@ -95,3 +100,8 @@ - import_tasks: tasks/remove_test_interfaces_with_dhcp.yml tags: - "tests::cleanup" @@ -457,7 +457,7 @@ index 0000000..5690aed + register: etc_sysconfig_network_stat + ignore_errors: yes diff --git a/tests/tests_802_1x_nm.yml b/tests/tests_802_1x_nm.yml -index 3bd0719..77cf2d9 100644 +index 288cd5d..840958d 100644 --- a/tests/tests_802_1x_nm.yml +++ b/tests/tests_802_1x_nm.yml @@ -4,6 +4,8 @@ @@ -469,14 +469,14 @@ index 3bd0719..77cf2d9 100644 tasks: - name: Set network provider to 'nm' set_fact: -@@ -21,3 +23,5 @@ +@@ -17,3 +19,5 @@ - import_playbook: playbooks/tests_802_1x.yml when: - ansible_distribution_major_version != '6' + tags: + - tests::expfail diff --git a/tests/tests_802_1x_updated_nm.yml b/tests/tests_802_1x_updated_nm.yml -index 0d4c741..ca666a6 100644 +index bd335e4..4ebcaf9 100644 --- a/tests/tests_802_1x_updated_nm.yml +++ b/tests/tests_802_1x_updated_nm.yml @@ -4,6 +4,8 @@ @@ -488,7 +488,7 @@ index 0d4c741..ca666a6 100644 tasks: - name: Set network provider to 'nm' set_fact: -@@ -21,3 +23,5 @@ +@@ -17,3 +19,5 @@ - import_playbook: playbooks/tests_802_1x_updated.yml when: - ansible_distribution_major_version != '6' diff --git a/SOURCES/rhel-system-roles-kdump-pr22.diff b/SOURCES/rhel-system-roles-kdump-pr22.diff index d7d2796..342eddc 100644 --- a/SOURCES/rhel-system-roles-kdump-pr22.diff +++ b/SOURCES/rhel-system-roles-kdump-pr22.diff @@ -44,10 +44,10 @@ index bf24210..504ff34 100644 path {{ kdump_path }} {% if kdump_core_collector %} diff --git a/tests/tests_ssh.yml b/tests/tests_ssh.yml -index 679148e..14a59d9 100644 +index 1da99df..d12e884 100644 --- a/tests/tests_ssh.yml +++ b/tests/tests_ssh.yml -@@ -6,6 +6,11 @@ +@@ -5,6 +5,11 @@ # known and ansible is supposed to be configured to be able to # connect to it (via inventory). kdump_ssh_server_outside: localhost diff --git a/SOURCES/rhel-system-roles-network-prefix.diff b/SOURCES/rhel-system-roles-network-prefix.diff deleted file mode 100644 index f729eee..0000000 --- a/SOURCES/rhel-system-roles-network-prefix.diff +++ /dev/null @@ -1,148 +0,0 @@ -diff --git a/examples/bond_simple.yml b/examples/bond_simple.yml -index 4ca9811..f6f5897 100644 ---- a/examples/bond_simple.yml -+++ b/examples/bond_simple.yml -@@ -32,5 +32,5 @@ - interface_name: eth2 - master: bond0 - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network - ... -diff --git a/examples/bond_with_vlan.yml b/examples/bond_with_vlan.yml -index 2e6be23..3b7a6dc 100644 ---- a/examples/bond_with_vlan.yml -+++ b/examples/bond_with_vlan.yml -@@ -35,4 +35,4 @@ - - "192.0.2.{{ network_iphost }}/24" - - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network -diff --git a/examples/bridge_with_vlan.yml b/examples/bridge_with_vlan.yml -index 037ff8e..83c586d 100644 ---- a/examples/bridge_with_vlan.yml -+++ b/examples/bridge_with_vlan.yml -@@ -33,4 +33,4 @@ - - "192.0.2.{{ network_iphost }}/24" - - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network -diff --git a/examples/eth_simple_auto.yml b/examples/eth_simple_auto.yml -index 0ba168a..e4c4a54 100644 ---- a/examples/eth_simple_auto.yml -+++ b/examples/eth_simple_auto.yml -@@ -15,4 +15,4 @@ - mtu: 1450 - - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network -diff --git a/examples/eth_with_802_1x.yml b/examples/eth_with_802_1x.yml -index 92a93a9..7731b7d 100644 ---- a/examples/eth_with_802_1x.yml -+++ b/examples/eth_with_802_1x.yml -@@ -27,4 +27,4 @@ - - client.pem - - cacert.pem - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network -diff --git a/examples/eth_with_vlan.yml b/examples/eth_with_vlan.yml -index 69da673..e0c2f11 100644 ---- a/examples/eth_with_vlan.yml -+++ b/examples/eth_with_vlan.yml -@@ -26,4 +26,4 @@ - - "192.0.2.{{ network_iphost }}/24" - - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network -diff --git a/examples/ethtool_features.yml b/examples/ethtool_features.yml -index c580f89..0881316 100644 ---- a/examples/ethtool_features.yml -+++ b/examples/ethtool_features.yml -@@ -3,7 +3,7 @@ - - hosts: all - tasks: - - include_role: -- name: linux-system-roles.network -+ name: rhel-system-roles.network - vars: - network_connections: - - name: "{{ network_interface_name1 }}" -diff --git a/examples/ethtool_features_default.yml b/examples/ethtool_features_default.yml -index 78965e6..3cdd731 100644 ---- a/examples/ethtool_features_default.yml -+++ b/examples/ethtool_features_default.yml -@@ -3,7 +3,7 @@ - - hosts: all - tasks: - - include_role: -- name: linux-system-roles.network -+ name: rhel-system-roles.network - vars: - network_connections: - - name: "{{ network_interface_name1 }}" -diff --git a/examples/infiniband.yml b/examples/infiniband.yml -index 22603d9..9e7e267 100644 ---- a/examples/infiniband.yml -+++ b/examples/infiniband.yml -@@ -23,4 +23,4 @@ - - 198.51.100.133/30 - - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network -diff --git a/examples/macvlan.yml b/examples/macvlan.yml -index 90cd09d..0064ad4 100644 ---- a/examples/macvlan.yml -+++ b/examples/macvlan.yml -@@ -26,4 +26,4 @@ - - 192.168.1.1/24 - - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network -diff --git a/examples/remove+down_profile.yml b/examples/remove+down_profile.yml -index da2b1b8..f2d93e8 100644 ---- a/examples/remove+down_profile.yml -+++ b/examples/remove+down_profile.yml -@@ -8,5 +8,5 @@ - persistent_state: absent - state: down - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network - ... -diff --git a/examples/wireless_wpa_psk.yml b/examples/wireless_wpa_psk.yml -index eeec22f..60b0d83 100644 ---- a/examples/wireless_wpa_psk.yml -+++ b/examples/wireless_wpa_psk.yml -@@ -12,4 +12,4 @@ - # see https://docs.ansible.com/ansible/latest/user_guide/vault.html - password: "p@55w0rD" - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network -diff --git a/tests/playbooks/down_profile.yml b/tests/playbooks/down_profile.yml -index 5087240..65e542d 100644 ---- a/tests/playbooks/down_profile.yml -+++ b/tests/playbooks/down_profile.yml -@@ -7,4 +7,4 @@ - - name: "{{ profile }}" - state: down - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network -diff --git a/tests/playbooks/remove_profile.yml b/tests/playbooks/remove_profile.yml -index a50e848..b6e6796 100644 ---- a/tests/playbooks/remove_profile.yml -+++ b/tests/playbooks/remove_profile.yml -@@ -7,4 +7,4 @@ - - name: "{{ profile }}" - persistent_state: absent - roles: -- - linux-system-roles.network -+ - rhel-system-roles.network diff --git a/SOURCES/rhel-system-roles-postfix-prefix.diff b/SOURCES/rhel-system-roles-postfix-prefix.diff deleted file mode 100644 index 65ab2a1..0000000 --- a/SOURCES/rhel-system-roles-postfix-prefix.diff +++ /dev/null @@ -1,40 +0,0 @@ -diff --git a/README.md b/README.md -index 5950215..a59d72f 100644 ---- a/README.md -+++ b/README.md -@@ -25,7 +25,7 @@ Install and enable postfix. Configure "relay_domains=$mydestination" and - relay_domains: "$mydestination" - relay_host: "example.com" - roles: -- - postfix -+ - linux-system-roles.postfix - ``` - - Install and enable postfix. Do not run 'postfix check' before restarting -@@ -37,7 +37,7 @@ postfix: - vars: - postfix_check: false - roles: -- - postfix -+ - linux-system-roles.postfix - ``` - - Install and enable postfix. Do single backup of main.cf (older backup will be -@@ -51,7 +51,7 @@ rewritten) and configure "relay_host=example.com": - relay_host: "example.com" - postfix_backup: true - roles: -- - postfix -+ - linux-system-roles.postfix - ``` - - Install and enable postfix. Do timestamped backup of main.cf and -@@ -66,7 +66,7 @@ set to true postfix_backup is ignored): - relay_host: "example.com" - postfix_backup_multiple: true - roles: -- - postfix -+ - linux-system-roles.postfix - ``` - - diff --git a/SOURCES/rhel-system-roles-selinux-prefix.diff b/SOURCES/rhel-system-roles-selinux-prefix.diff deleted file mode 100644 index 7e80daa..0000000 --- a/SOURCES/rhel-system-roles-selinux-prefix.diff +++ /dev/null @@ -1,32 +0,0 @@ -diff --git a/README.md b/README.md -index a0385b0..6efc62d 100644 ---- a/README.md -+++ b/README.md -@@ -42,7 +42,7 @@ This role can be configured using variab - vars: - [ see below ] - roles: -- - role: linux-system-roles.selinux -+ - role: rhel-system-roles.selinux - become: true - ``` - -diff --git a/selinux-playbook.yml b/selinux-playbook.yml -index 78d3953..b2348d5 100644 ---- a/selinux-playbook.yml -+++ b/selinux-playbook.yml -@@ -31,7 +31,7 @@ - - name: execute the role and catch errors - block: - - include_role: -- name: linux-system-roles.selinux -+ name: rhel-system-roles.selinux - rescue: - # Fail if failed for a different reason than selinux_reboot_required. - - name: handle errors -@@ -52,4 +52,4 @@ - - - name: reapply the role - include_role: -- name: linux-system-roles.selinux -+ name: rhel-system-roles.selinux diff --git a/SOURCES/rhel-system-roles-storage-prefix.diff b/SOURCES/rhel-system-roles-storage-prefix.diff deleted file mode 100644 index 7855b38..0000000 --- a/SOURCES/rhel-system-roles-storage-prefix.diff +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/README.md b/README.md -index c2debc9..d9e40b3 100644 ---- a/README.md -+++ b/README.md -@@ -154,7 +154,7 @@ Example Playbook - - hosts: all - - roles: -- - name: linux-system-roles.storage -+ - name: rhel-system-roles.storage - storage_pools: - - name: app - disks: diff --git a/SOURCES/rhel-system-roles-timesync-prefix.diff b/SOURCES/rhel-system-roles-timesync-prefix.diff deleted file mode 100644 index 6fe1889..0000000 --- a/SOURCES/rhel-system-roles-timesync-prefix.diff +++ /dev/null @@ -1,46 +0,0 @@ -diff -up timesync-1.0.0/README.md.orig timesync-1.0.0/README.md ---- timesync-1.0.0/README.md.orig 2018-08-21 11:46:41.000000000 +0200 -+++ timesync-1.0.0/README.md 2018-11-06 22:29:14.586770442 +0100 -@@ -82,7 +82,7 @@ Install and configure ntp to synchronize - - hostname: baz.example.com - iburst: yes - roles: -- - linux-system-roles.timesync -+ - rhel-system-roles.timesync - ``` - - Install and configure linuxptp to synchronize the system clock with a -@@ -95,7 +95,7 @@ grandmaster in PTP domain number 0, whic - - number: 0 - interfaces: [ eth0 ] - roles: -- - linux-system-roles.timesync -+ - rhel-system-roles.timesync - ``` - - Install and configure chrony and linuxptp to synchronize the system clock with -@@ -122,5 +122,5 @@ synchronization: - transport: UDPv4 - delay: 0.000010 - roles: -- - linux-system-roles.timesync -+ - rhel-system-roles.timesync - ``` -diff -up timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/multiple-ntp-servers.yml.orig timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/multiple-ntp-servers.yml ---- timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/multiple-ntp-servers.yml.orig 2019-06-03 18:03:18.081868584 +0200 -+++ timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/multiple-ntp-servers.yml 2019-06-03 18:03:26.718704991 +0200 -@@ -11,4 +11,4 @@ - - hostname: 3.pool.ntp.org - iburst: yes - roles: -- - linux-system-roles.timesync -+ - rhel-system-roles.timesync -diff -up timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/single-pool.yml.orig timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/single-pool.yml ---- timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/single-pool.yml.orig 2019-06-03 16:36:40.000000000 +0200 -+++ timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/single-pool.yml 2019-06-03 18:03:36.721515519 +0200 -@@ -6,4 +6,4 @@ - pool: yes - iburst: yes - roles: -- - linux-system-roles.timesync -+ - rhel-system-roles.timesync diff --git a/SOURCES/selinux-ansible-test-issues.diff b/SOURCES/selinux-ansible-test-issues.diff new file mode 100644 index 0000000..ef16241 --- /dev/null +++ b/SOURCES/selinux-ansible-test-issues.diff @@ -0,0 +1,164 @@ +From 9cbbc3f63052bef0b6a697e066e092a5f9722ce8 Mon Sep 17 00:00:00 2001 +From: Noriko Hosoi +Date: Mon, 22 Feb 2021 17:11:05 -0800 +Subject: [PATCH] Patch23: selinux-ansible-test-issues.diff + +--- + .sanity-ansible-ignore-2.10.txt | 2 ++ + .sanity-ansible-ignore-2.9.txt | 2 ++ + library/selogin.py | 26 ++++++++++----------- + tests/setup_module_utils.sh | 41 --------------------------------- + 4 files changed, 16 insertions(+), 55 deletions(-) + create mode 100644 .sanity-ansible-ignore-2.10.txt + create mode 100644 .sanity-ansible-ignore-2.9.txt + delete mode 100755 tests/setup_module_utils.sh + +diff --git a/.sanity-ansible-ignore-2.10.txt b/.sanity-ansible-ignore-2.10.txt +new file mode 100644 +index 0000000..5f8ce1e +--- /dev/null ++++ b/.sanity-ansible-ignore-2.10.txt +@@ -0,0 +1,2 @@ ++plugins/modules/selogin.py no-get-exception ++plugins/modules/selogin.py validate-modules!skip +diff --git a/.sanity-ansible-ignore-2.9.txt b/.sanity-ansible-ignore-2.9.txt +new file mode 100644 +index 0000000..5f8ce1e +--- /dev/null ++++ b/.sanity-ansible-ignore-2.9.txt +@@ -0,0 +1,2 @@ ++plugins/modules/selogin.py no-get-exception ++plugins/modules/selogin.py validate-modules!skip +diff --git a/library/selogin.py b/library/selogin.py +index b785c27..6e3fd32 100644 +--- a/library/selogin.py ++++ b/library/selogin.py +@@ -15,6 +15,9 @@ + # + # You should have received a copy of the GNU General Public License + # along with this program. If not, see . ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type + + ANSIBLE_METADATA = { + "status": ["preview"], +@@ -22,13 +25,14 @@ ANSIBLE_METADATA = { + "version": "1.0", + } + +-DOCUMENTATION = """ ++DOCUMENTATION = r""" + --- + module: selogin + short_description: Manages linux user to SELinux user mapping + description: +- - Manages linux user to SELinux user mapping +-version_added: "1.0" ++ - "WARNING: Do not use this module directly! It is only for role internal use." ++ - Manages linux user to SELinux user mapping ++version_added: '1.0' + options: + login: + description: +@@ -41,8 +45,7 @@ options: + required: true + default: null + serange: +- description: +- - >- ++ description: > + MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login + mapping - defaults to the SELinux user record range. + required: false +@@ -62,8 +65,9 @@ notes: + - The changes are persistent across reboots + - Not tested on any debian based system + requirements: [ 'libselinux-python', 'policycoreutils-python' ] +-author: Dan Keder +-author: Petr Lautrbach ++author: ++ - Dan Keder (@dkeder) ++ - Petr Lautrbach (@bachradsusi) + """ + + EXAMPLES = """ +@@ -82,7 +86,7 @@ EXAMPLES = """ + + # Assign all users in the engineering group to the staff_u user + - selogin: +- login: %engineering ++ login: "%engineering" + seuser: staff_u + state: present + """ +@@ -198,9 +202,6 @@ def semanage_login_add(module, login, seuser, do_reload, serange="s0", sestore=" + except KeyError: + e = get_exception() + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) +- except OSError: +- e = get_exception() +- module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) + except RuntimeError: + e = get_exception() + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) +@@ -248,9 +249,6 @@ def semanage_login_del(module, login, seuser, do_reload, sestore=""): + except KeyError: + e = get_exception() + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) +- except OSError: +- e = get_exception() +- module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) + except RuntimeError: + e = get_exception() + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) +diff --git a/tests/setup_module_utils.sh b/tests/setup_module_utils.sh +deleted file mode 100755 +index 94d102d..0000000 +--- a/tests/setup_module_utils.sh ++++ /dev/null +@@ -1,41 +0,0 @@ +-#!/bin/bash +-# SPDX-License-Identifier: MIT +- +-set -euo pipefail +- +-if [ -n "${DEBUG:-}" ] ; then +- set -x +-fi +- +-if [ ! -d "${1:-}" ] ; then +- echo Either ansible is not installed, or there is no ansible/module_utils +- echo in "$1" - Skipping +- exit 0 +-fi +- +-if [ ! -d "${2:-}" ] ; then +- echo Role has no module_utils - Skipping +- exit 0 +-fi +- +-# we need absolute path for $2 +-absmoddir=$( readlink -f "$2" ) +- +-# clean up old links to module_utils +-for item in "$1"/* ; do +- if lnitem=$( readlink "$item" ) && test -n "$lnitem" ; then +- case "$lnitem" in +- *"${2}"*) rm -f "$item" ;; +- esac +- fi +-done +- +-# add new links to module_utils +-for item in "$absmoddir"/* ; do +- case "$item" in +- *__pycache__) continue;; +- *.pyc) continue;; +- esac +- bnitem=$( basename "$item" ) +- ln -s "$item" "$1/$bnitem" +-done +-- +2.26.2 + diff --git a/SOURCES/selinux-bz-1926947-no-variable-named-present.diff b/SOURCES/selinux-bz-1926947-no-variable-named-present.diff new file mode 100644 index 0000000..f9bdf2e --- /dev/null +++ b/SOURCES/selinux-bz-1926947-no-variable-named-present.diff @@ -0,0 +1,34 @@ +From 035a9b2db26af071a95e02a0af08bcbb73b69abf Mon Sep 17 00:00:00 2001 +From: Florian Bachmann +Date: Fri, 5 Feb 2021 11:48:53 +0100 +Subject: [PATCH] fix incorrect default value (there is no variable named + "present") + +--- + tasks/main.yml | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/tasks/main.yml b/tasks/main.yml +index afbe81f..702e369 100644 +--- a/tasks/main.yml ++++ b/tasks/main.yml +@@ -118,7 +118,7 @@ + ports: "{{ item.ports }}" + proto: "{{ item.proto | default('tcp') }}" + setype: "{{ item.setype }}" +- state: "{{ item.state | default(present) }}" ++ state: "{{ item.state | default('present') }}" + with_items: "{{ selinux_ports }}" + + - name: Set linux user to SELinux user mapping +@@ -126,6 +126,6 @@ + login: "{{ item.login }}" + seuser: "{{ item.seuser }}" + serange: "{{ item.serange | default('s0') }}" +- state: "{{ item.state | default(present) }}" ++ state: "{{ item.state | default('present') }}" + reload: "{{ item.reload | default(False) }}" + with_items: "{{ selinux_logins }}" +-- +2.29.2 + diff --git a/SOURCES/selinux-tier1-tags.diff b/SOURCES/selinux-tier1-tags.diff index d0c785c..c2c4abd 100644 --- a/SOURCES/selinux-tier1-tags.diff +++ b/SOURCES/selinux-tier1-tags.diff @@ -16,10 +16,18 @@ index f294101..7571066 100644 command: /usr/sbin/semanage boolean -l -n -C register: selinux_role_boolean diff --git a/tests/tests_all_purge.yml b/tests/tests_all_purge.yml -index 03dfe05..c686837 100644 +index 03dfe05..6775847 100644 --- a/tests/tests_all_purge.yml +++ b/tests/tests_all_purge.yml -@@ -14,7 +14,9 @@ +@@ -8,13 +8,17 @@ + fcontext -a -t user_home_dir_t /tmp/test_dir + login -a -s staff_u sar-user + ++ tags: ++ - 'tests::avc' + tasks: + - name: Install SELinux tool semanage on Fedora + package: name: - policycoreutils-python-utils state: present @@ -47,8 +55,7 @@ diff --git a/tests/tests_boolean.yml b/tests/tests_boolean.yml index 47eafc0..2aa0025 100644 --- a/tests/tests_boolean.yml +++ b/tests/tests_boolean.yml -@@ -1,5 +1,6 @@ - +@@ -1,4 +1,5 @@ - name: Check if selinux role sets SELinux booleans + tags: tests::expfail hosts: all @@ -80,10 +87,9 @@ diff --git a/tests/tests_login.yml b/tests/tests_login.yml index efa826d..c7ce462 100644 --- a/tests/tests_login.yml +++ b/tests/tests_login.yml -@@ -18,7 +18,7 @@ +@@ -18,6 +18,6 @@ - { login: 'sar-user', seuser: 'staff_u', serange: 's0-s0:c0.c1023', state: 'present' } - - - include: set_selinux_variables.yml + - import_tasks: set_selinux_variables.yml - name: save state after initial changes and before other changes @@ -103,10 +109,18 @@ index 446f79d..7bb112e 100644 set_fact: port_after: "{{ selinux_role_port.stdout }}" diff --git a/tests/tests_selinux_disabled.yml b/tests/tests_selinux_disabled.yml -index afd23e4..706882f 100644 +index afd23e4..883dc6d 100644 --- a/tests/tests_selinux_disabled.yml +++ b/tests/tests_selinux_disabled.yml -@@ -18,7 +18,9 @@ +@@ -12,13 +12,17 @@ + fcontext -a -t user_home_dir_t /tmp/test_dir + login -a -s staff_u sar-user + ++ tags: ++ - 'tests::avc' + tasks: + - name: Install SELinux tool semanage on Fedora + package: name: - policycoreutils-python-utils state: present @@ -157,6 +171,6 @@ index afd23e4..706882f 100644 + state: absent + + - import_role: -+ name: selinux ++ name: linux-system-roles.selinux + vars: + selinux_all_purge: true diff --git a/SOURCES/sshd-example.diff b/SOURCES/sshd-example.diff new file mode 100644 index 0000000..48243e3 --- /dev/null +++ b/SOURCES/sshd-example.diff @@ -0,0 +1,43 @@ +diff --git a/README.md b/README.md +index 676ad72..dc06d85 100644 +--- a/README.md ++++ b/README.md +@@ -190,7 +190,7 @@ defaults. This is useful if the role is used in deployment stage to make sure + the service is able to start on the first attempt. To disable this check, set + this to empty list. + +-* `sshd_hostkey_owner`, `sshd_hostkey_group`, `sshd_hostkey_group` ++* `sshd_hostkey_owner`, `sshd_hostkey_group`, `sshd_hostkey_mode` + + Use these variables to set the ownership and permissions for the host keys from + the above list. +@@ -273,6 +273,8 @@ for example: + X11Forwarding: yes + ``` + ++More example playbooks can be found in [`examples/`](examples/) directory. ++ + Template Generation + ------------------- + +diff --git a/examples/example-root-login.yml b/examples/example-root-login.yml +new file mode 100644 +index 0000000..156e629 +--- /dev/null ++++ b/examples/example-root-login.yml +@@ -0,0 +1,15 @@ ++--- ++- hosts: all ++ tasks: ++ - name: Configure sshd to prevent root and password login except from particular subnet ++ include_role: ++ name: ansible-sshd ++ vars: ++ sshd: ++ # root login and password login is enabled only from a particular subnet ++ PermitRootLogin: no ++ PasswordAuthentication: no ++ Match: ++ - Condition: "Address 192.0.2.0/24" ++ PermitRootLogin: yes ++ PasswordAuthentication: yes diff --git a/SOURCES/sshd-work-on-ansible28-jinja27.diff b/SOURCES/sshd-work-on-ansible28-jinja27.diff new file mode 100644 index 0000000..268d31f --- /dev/null +++ b/SOURCES/sshd-work-on-ansible28-jinja27.diff @@ -0,0 +1,25 @@ +From bb612fb6c5f76a40fce368acb43d2847e699213d Mon Sep 17 00:00:00 2001 +From: Rich Megginson +Date: Thu, 28 Jan 2021 15:56:14 -0700 +Subject: [PATCH] use state: absent instead of state: missing + +--- + tests/tests_hostkeys_missing.yml | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tests/tests_hostkeys_missing.yml b/tests/tests_hostkeys_missing.yml +index 9dfe77b..5790684 100644 +--- a/tests/tests_hostkeys_missing.yml ++++ b/tests/tests_hostkeys_missing.yml +@@ -40,7 +40,7 @@ + - name: Make sure the key was not created + file: + path: /tmp/missing_ssh_host_rsa_key +- state: missing ++ state: absent + register: key + failed_when: key.changed + tags: tests::verify +-- +2.29.2 + diff --git a/SOURCES/storage-ansible-test.diff b/SOURCES/storage-ansible-test.diff new file mode 100644 index 0000000..3cb42d8 --- /dev/null +++ b/SOURCES/storage-ansible-test.diff @@ -0,0 +1,3663 @@ +From 1d7f9d53c5be6588a7a6c34e4c623b2a8f6fff19 Mon Sep 17 00:00:00 2001 +From: Rich Megginson +Date: Wed, 3 Mar 2021 07:55:20 -0700 +Subject: [PATCH] resolve ansible-test issues + +This fixes many formatting issues as well to make black, flake8, +pylint, yamllint, and ansible-lint happier. + +(cherry picked from commit bb2a1af5f63d00c3ff178f3b44696189d9adf542) +--- + .github/workflows/tox.yml | 4 +- + .sanity-ansible-ignore-2.9.txt | 13 + + library/blivet.py | 968 +++++++++++------- + library/blockdev_info.py | 45 +- + library/bsize.py | 56 +- + library/find_unused_disk.py | 101 +- + library/lvm_gensym.py | 119 ++- + library/resolve_blockdev.py | 71 +- + module_utils/storage_lsr/size.py | 86 +- + tests/setup_module_utils.sh | 41 - + tests/test-verify-volume-device.yml | 4 +- + tests/test-verify-volume-md.yml | 2 +- + tests/test.yml | 2 +- + tests/tests_create_lv_size_equal_to_vg.yml | 28 +- + ...ts_create_partition_volume_then_remove.yml | 4 +- + tests/tests_existing_lvm_pool.yml | 12 +- + tests/tests_lvm_auto_size_cap.yml | 42 +- + tests/tests_lvm_one_disk_one_volume.yml | 46 +- + tests/tests_misc.yml | 2 +- + tests/tests_null_raid_pool.yml | 14 +- + tests/tests_resize.yml | 86 +- + tests/unit/bsize_test.py | 5 + + tests/unit/gensym_test.py | 103 +- + tests/unit/resolve_blockdev_test.py | 74 +- + tests/unit/test_unused_disk.py | 73 +- + tox.ini | 6 - + 26 files changed, 1177 insertions(+), 830 deletions(-) + create mode 100644 .sanity-ansible-ignore-2.9.txt + delete mode 100755 tests/setup_module_utils.sh + +diff --git a/.github/workflows/tox.yml b/.github/workflows/tox.yml +index eceb71f..ec3ec9f 100644 +--- a/.github/workflows/tox.yml ++++ b/.github/workflows/tox.yml +@@ -3,7 +3,7 @@ name: tox + on: # yamllint disable-line rule:truthy + - pull_request + env: +- TOX_LSR: "git+https://github.com/linux-system-roles/tox-lsr@2.2.0" ++ TOX_LSR: "git+https://github.com/linux-system-roles/tox-lsr@2.3.0" + LSR_ANSIBLES: 'ansible==2.8.* ansible==2.9.*' + LSR_MSCENARIOS: default + # LSR_EXTRA_PACKAGES: libdbus-1-dev +@@ -36,7 +36,7 @@ jobs: + toxenvs="py${toxpyver}" + case "$toxpyver" in + 27) toxenvs="${toxenvs},coveralls,flake8,pylint,custom" ;; +- 36) toxenvs="${toxenvs},coveralls,black,yamllint,ansible-lint,shellcheck,custom,collection" ;; ++ 36) toxenvs="${toxenvs},coveralls,black,yamllint,ansible-lint,shellcheck,custom,collection,ansible-test" ;; + 37) toxenvs="${toxenvs},coveralls,custom" ;; + 38) toxenvs="${toxenvs},coveralls,custom" ;; + esac +diff --git a/.sanity-ansible-ignore-2.9.txt b/.sanity-ansible-ignore-2.9.txt +new file mode 100644 +index 0000000..bf700c6 +--- /dev/null ++++ b/.sanity-ansible-ignore-2.9.txt +@@ -0,0 +1,13 @@ ++plugins/modules/blivet.py import-2.7!skip ++plugins/modules/blivet.py import-3.5!skip ++plugins/modules/blivet.py import-3.6!skip ++plugins/modules/blivet.py import-3.7!skip ++plugins/modules/blivet.py import-3.8!skip ++tests/storage/unit/gensym_test.py shebang!skip ++plugins/modules/blivet.py validate-modules:import-error ++plugins/modules/blivet.py validate-modules:missing-gplv3-license ++plugins/modules/blockdev_info.py validate-modules:missing-gplv3-license ++plugins/modules/bsize.py validate-modules:missing-gplv3-license ++plugins/modules/find_unused_disk.py validate-modules:missing-gplv3-license ++plugins/modules/lvm_gensym.py validate-modules:missing-gplv3-license ++plugins/modules/resolve_blockdev.py validate-modules:missing-gplv3-license +diff --git a/library/blivet.py b/library/blivet.py +index 946b640..0e0b30c 100644 +--- a/library/blivet.py ++++ b/library/blivet.py +@@ -1,12 +1,16 @@ + #!/usr/bin/python + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + ANSIBLE_METADATA = { +- 'metadata_version': '1.1', +- 'status': ['preview'], +- 'supported_by': 'community' ++ "metadata_version": "1.1", ++ "status": ["preview"], ++ "supported_by": "community", + } + +-DOCUMENTATION = ''' ++DOCUMENTATION = """ + --- + module: blivet + +@@ -15,6 +19,7 @@ short_description: Module for management of linux block device stacks + version_added: "2.5" + + description: ++ - "WARNING: Do not use this module directly! It is only for role internal use." + - "Module configures storage pools and volumes to match the state specified + in input parameters. It does not do any management of /etc/fstab entries." + +@@ -30,7 +35,8 @@ options: + - boolean indicating whether to create partitions on disks for pool backing devices + disklabel_type: + description: +- - disklabel type string (eg: 'gpt') to use, overriding the built-in logic in blivet ++ - | ++ disklabel type string (eg: 'gpt') to use, overriding the built-in logic in blivet + safe_mode: + description: + - boolean indicating that we should fail rather than implicitly/automatically +@@ -41,10 +47,10 @@ options: + when creating a disk volume (that is, a whole disk filesystem) + + author: +- - David Lehman (dlehman@redhat.com) +-''' ++ - David Lehman (@dwlehman) ++""" + +-EXAMPLES = ''' ++EXAMPLES = """ + + - name: Manage devices + blivet: +@@ -64,28 +70,40 @@ EXAMPLES = ''' + mount_point: /whole_disk1 + fs_type: ext4 + mount_options: journal_checksum,async,noexec +-''' ++""" + +-RETURN = ''' ++RETURN = """ + actions: + description: list of dicts describing actions taken +- type: list of dict ++ returned: success ++ type: list ++ elements: dict + leaves: + description: list of paths to leaf devices +- type: list of str ++ returned: success ++ type: list ++ elements: dict + mounts: + description: list of dicts describing mounts to set up +- type: list of dict ++ returned: success ++ type: list ++ elements: dict + crypts: + description: list of dicts describing crypttab entries to set up +- type: list of dict ++ returned: success ++ type: list ++ elements: dict + pools: + description: list of dicts describing the pools w/ device path for each volume +- type: list of dict ++ returned: success ++ type: list ++ elements: dict + volumes: + description: list of dicts describing the volumes w/ device path for each +- type: list of dict +-''' ++ returned: success ++ type: list ++ elements: dict ++""" + + import logging + import os +@@ -106,7 +124,8 @@ try: + from blivet3.size import Size + from blivet3.udev import trigger + from blivet3.util import set_up_logging +- BLIVET_PACKAGE = 'blivet3' ++ ++ BLIVET_PACKAGE = "blivet3" + except ImportError: + LIB_IMP_ERR3 = traceback.format_exc() + try: +@@ -119,7 +138,8 @@ except ImportError: + from blivet.size import Size + from blivet.udev import trigger + from blivet.util import set_up_logging +- BLIVET_PACKAGE = 'blivet' ++ ++ BLIVET_PACKAGE = "blivet" + except ImportError: + LIB_IMP_ERR = traceback.format_exc() + +@@ -135,23 +155,23 @@ MAX_TRIM_PERCENT = 2 + + use_partitions = None # create partitions on pool backing device disks? + disklabel_type = None # user-specified disklabel type +-safe_mode = None # do not remove any existing devices or formatting ++safe_mode = None # do not remove any existing devices or formatting + pool_defaults = dict() + volume_defaults = dict() + + + def find_duplicate_names(dicts): +- """ Return a list of names that appear more than once in a list of dicts. ++ """Return a list of names that appear more than once in a list of dicts. + +- Items can be a list of any dicts with a 'name' key; that's all we're +- looking at. """ ++ Items can be a list of any dicts with a 'name' key; that's all we're ++ looking at.""" + names = list() + duplicates = list() + for item in dicts: +- if item['name'] in names and item['name'] not in duplicates: +- duplicates.append(item['name']) ++ if item["name"] in names and item["name"] not in duplicates: ++ duplicates.append(item["name"]) + else: +- names.append(item['name']) ++ names.append(item["name"]) + + return duplicates + +@@ -177,41 +197,54 @@ class BlivetBase(object): + global safe_mode + ret = device + # Make sure to handle adjusting both existing stacks and future stacks. +- if device == device.raw_device and self._spec_dict['encryption']: ++ if device == device.raw_device and self._spec_dict["encryption"]: + # add luks + luks_name = "luks-%s" % device._name +- if safe_mode and (device.original_format.type is not None or +- device.original_format.name != get_format(None).name): +- raise BlivetAnsibleError("cannot remove existing formatting on device '%s' in safe mode due to adding encryption" % +- device._name) ++ if safe_mode and ( ++ device.original_format.type is not None ++ or device.original_format.name != get_format(None).name ++ ): ++ raise BlivetAnsibleError( ++ "cannot remove existing formatting on device '%s' in safe mode due to adding encryption" ++ % device._name ++ ) + if not device.format.exists: + fmt = device.format + else: + fmt = get_format(None) + +- self._blivet.format_device(device, +- get_format("luks", +- name=luks_name, +- cipher=self._spec_dict.get('encryption_cipher'), +- key_size=self._spec_dict.get('encryption_key_size'), +- luks_version=self._spec_dict.get('encryption_luks_version'), +- passphrase=self._spec_dict.get('encryption_password') or None, +- key_file=self._spec_dict.get('encryption_key') or None)) ++ self._blivet.format_device( ++ device, ++ get_format( ++ "luks", ++ name=luks_name, ++ cipher=self._spec_dict.get("encryption_cipher"), ++ key_size=self._spec_dict.get("encryption_key_size"), ++ luks_version=self._spec_dict.get("encryption_luks_version"), ++ passphrase=self._spec_dict.get("encryption_password") or None, ++ key_file=self._spec_dict.get("encryption_key") or None, ++ ), ++ ) + + if not device.format.has_key: +- raise BlivetAnsibleError("encrypted %s '%s' missing key/password" % (self._type, self._spec_dict['name'])) ++ raise BlivetAnsibleError( ++ "encrypted %s '%s' missing key/password" ++ % (self._type, self._spec_dict["name"]) ++ ) + +- luks_device = devices.LUKSDevice(luks_name, +- fmt=fmt, +- parents=[device]) ++ luks_device = devices.LUKSDevice(luks_name, fmt=fmt, parents=[device]) + self._blivet.create_device(luks_device) + ret = luks_device +- elif device != device.raw_device and not self._spec_dict['encryption']: ++ elif device != device.raw_device and not self._spec_dict["encryption"]: + # remove luks +- if safe_mode and (device.original_format.type is not None or +- device.original_format.name != get_format(None).name): +- raise BlivetAnsibleError("cannot remove existing formatting on device '%s' in safe mode due to encryption removal" % +- device._name) ++ if safe_mode and ( ++ device.original_format.type is not None ++ or device.original_format.name != get_format(None).name ++ ): ++ raise BlivetAnsibleError( ++ "cannot remove existing formatting on device '%s' in safe mode due to encryption removal" ++ % device._name ++ ) + if not device.format.exists: + fmt = device.format + else: +@@ -240,12 +273,21 @@ class BlivetBase(object): + requested_spares = self._spec_dict.get("raid_spare_count") + + if requested_actives is not None and requested_spares is not None: +- if (requested_actives + requested_spares != len(members) or +- requested_actives < 0 or requested_spares < 0): +- raise BlivetAnsibleError("failed to set up '%s': cannot create RAID " +- "with %s members (%s active and %s spare)" +- % (self._spec_dict["name"], len(members), +- requested_actives, requested_spares)) ++ if ( ++ requested_actives + requested_spares != len(members) ++ or requested_actives < 0 ++ or requested_spares < 0 ++ ): ++ raise BlivetAnsibleError( ++ "failed to set up '%s': cannot create RAID " ++ "with %s members (%s active and %s spare)" ++ % ( ++ self._spec_dict["name"], ++ len(members), ++ requested_actives, ++ requested_spares, ++ ) ++ ) + + if requested_actives is not None: + active_count = requested_actives +@@ -264,16 +306,20 @@ class BlivetBase(object): + raise BlivetAnsibleError("chunk size must be multiple of 4 KiB") + + try: +- raid_array = self._blivet.new_mdarray(name=raid_name, +- level=self._spec_dict["raid_level"], +- member_devices=active_count, +- total_devices=len(members), +- parents=members, +- chunk_size=chunk_size, +- metadata_version=self._spec_dict.get("raid_metadata_version"), +- fmt=self._get_format()) ++ raid_array = self._blivet.new_mdarray( ++ name=raid_name, ++ level=self._spec_dict["raid_level"], ++ member_devices=active_count, ++ total_devices=len(members), ++ parents=members, ++ chunk_size=chunk_size, ++ metadata_version=self._spec_dict.get("raid_metadata_version"), ++ fmt=self._get_format(), ++ ) + except ValueError as e: +- raise BlivetAnsibleError("cannot create RAID '%s': %s" % (raid_name, str(e))) ++ raise BlivetAnsibleError( ++ "cannot create RAID '%s': %s" % (raid_name, str(e)) ++ ) + + return raid_array + +@@ -298,17 +344,18 @@ class BlivetVolume(BlivetBase): + if self.__class__.blivet_device_class is not None: + packages.extend(self.__class__.blivet_device_class._packages) + +- fmt = get_format(self._volume.get('fs_type')) ++ fmt = get_format(self._volume.get("fs_type")) + packages.extend(fmt.packages) +- if self._volume.get('encryption'): +- packages.extend(get_format('luks').packages) ++ if self._volume.get("encryption"): ++ packages.extend(get_format("luks").packages) + return packages + + @property + def ultimately_present(self): + """ Should this volume be present when we are finished? """ +- return (self._volume.get('state', 'present') == 'present' and +- (self._blivet_pool is None or self._blivet_pool.ultimately_present)) ++ return self._volume.get("state", "present") == "present" and ( ++ self._blivet_pool is None or self._blivet_pool.ultimately_present ++ ) + + def _type_check(self): # pylint: disable=no-self-use + """ Is self._device of the correct type? """ +@@ -316,7 +363,7 @@ class BlivetVolume(BlivetBase): + + def _get_device_id(self): + """ Return an identifier by which to try looking the volume up. """ +- return self._volume['name'] ++ return self._volume["name"] + + def _look_up_device(self): + """ Try to look up this volume in blivet's device tree. """ +@@ -331,14 +378,14 @@ class BlivetVolume(BlivetBase): + if device is None: + return + +- if device.format.type == 'luks': ++ if device.format.type == "luks": + # XXX If we have no key we will always re-encrypt. +- device.format._key_file = self._volume.get('encryption_key') +- device.format.passphrase = self._volume.get('encryption_password') ++ device.format._key_file = self._volume.get("encryption_key") ++ device.format.passphrase = self._volume.get("encryption_password") + + # set up the original format as well since it'll get used for processing +- device.original_format._key_file = self._volume.get('encryption_key') +- device.original_format.passphrase = self._volume.get('encryption_password') ++ device.original_format._key_file = self._volume.get("encryption_key") ++ device.original_format.passphrase = self._volume.get("encryption_password") + if device.isleaf: + self._blivet.populate() + +@@ -361,26 +408,31 @@ class BlivetVolume(BlivetBase): + elif encrypted: + luks_fmt = self._device.format + +- if param_name == 'size': +- self._volume['size'] = int(self._device.size.convert_to()) +- elif param_name == 'fs_type' and (self._device.format.type or self._device.format.name != get_format(None).name): +- self._volume['fs_type'] = self._device.format.type +- elif param_name == 'fs_label': +- self._volume['fs_label'] = getattr(self._device.format, 'label', "") or "" +- elif param_name == 'mount_point': +- self._volume['mount_point'] = getattr(self._device.format, 'mountpoint', None) +- elif param_name == 'disks': +- self._volume['disks'] = [d.name for d in self._device.disks] +- elif param_name == 'encryption': +- self._volume['encryption'] = encrypted +- elif param_name == 'encryption_key_size' and encrypted: +- self._volume['encryption_key_size'] = luks_fmt.key_size +- elif param_name == 'encryption_key_file' and encrypted: +- self._volume['encryption_key_file'] = luks_fmt.key_file +- elif param_name == 'encryption_cipher' and encrypted: +- self._volume['encryption_cipher'] = luks_fmt.cipher +- elif param_name == 'encryption_luks_version' and encrypted: +- self._volume['encryption_luks_version'] = luks_fmt.luks_version ++ if param_name == "size": ++ self._volume["size"] = int(self._device.size.convert_to()) ++ elif param_name == "fs_type" and ( ++ self._device.format.type ++ or self._device.format.name != get_format(None).name ++ ): ++ self._volume["fs_type"] = self._device.format.type ++ elif param_name == "fs_label": ++ self._volume["fs_label"] = getattr(self._device.format, "label", "") or "" ++ elif param_name == "mount_point": ++ self._volume["mount_point"] = getattr( ++ self._device.format, "mountpoint", None ++ ) ++ elif param_name == "disks": ++ self._volume["disks"] = [d.name for d in self._device.disks] ++ elif param_name == "encryption": ++ self._volume["encryption"] = encrypted ++ elif param_name == "encryption_key_size" and encrypted: ++ self._volume["encryption_key_size"] = luks_fmt.key_size ++ elif param_name == "encryption_key_file" and encrypted: ++ self._volume["encryption_key_file"] = luks_fmt.key_file ++ elif param_name == "encryption_cipher" and encrypted: ++ self._volume["encryption_cipher"] = luks_fmt.cipher ++ elif param_name == "encryption_luks_version" and encrypted: ++ self._volume["encryption_luks_version"] = luks_fmt.luks_version + else: + return False + +@@ -392,7 +444,7 @@ class BlivetVolume(BlivetBase): + if name in self._volume: + continue + +- default = None if default in ('none', 'None', 'null') else default ++ default = None if default in ("none", "None", "null") else default + + if self._device: + # Apply values from the device if it already exists. +@@ -403,12 +455,17 @@ class BlivetVolume(BlivetBase): + + def _get_format(self): + """ Return a blivet.formats.DeviceFormat instance for this volume. """ +- fmt = get_format(self._volume['fs_type'], +- mountpoint=self._volume.get('mount_point'), +- label=self._volume['fs_label'], +- create_options=self._volume['fs_create_options']) ++ fmt = get_format( ++ self._volume["fs_type"], ++ mountpoint=self._volume.get("mount_point"), ++ label=self._volume["fs_label"], ++ create_options=self._volume["fs_create_options"], ++ ) + if not fmt.supported or not fmt.formattable: +- raise BlivetAnsibleError("required tools for file system '%s' are missing" % self._volume['fs_type']) ++ raise BlivetAnsibleError( ++ "required tools for file system '%s' are missing" ++ % self._volume["fs_type"] ++ ) + + return fmt + +@@ -422,9 +479,9 @@ class BlivetVolume(BlivetBase): + return + + # save device identifiers for use by the role +- self._volume['_device'] = self._device.path +- self._volume['_raw_device'] = self._device.raw_device.path +- self._volume['_mount_id'] = self._device.fstab_spec ++ self._volume["_device"] = self._device.path ++ self._volume["_raw_device"] = self._device.raw_device.path ++ self._volume["_mount_id"] = self._device.fstab_spec + + # schedule removal of this device and any descendant devices + self._blivet.devicetree.recursive_remove(self._device.raw_device) +@@ -435,9 +492,12 @@ class BlivetVolume(BlivetBase): + def _resize(self): + """ Schedule actions as needed to ensure the device has the desired size. """ + try: +- size = Size(self._volume['size']) ++ size = Size(self._volume["size"]) + except Exception: +- raise BlivetAnsibleError("invalid size specification for volume '%s': '%s'" % (self._volume['name'], self._volume['size'])) ++ raise BlivetAnsibleError( ++ "invalid size specification for volume '%s': '%s'" ++ % (self._volume["name"], self._volume["size"]) ++ ) + + if size and self._device.size != size: + try: +@@ -448,28 +508,44 @@ class BlivetVolume(BlivetBase): + if not self._device.resizable: + return + +- trim_percent = (1.0 - float(self._device.max_size / size))*100 +- log.debug("resize: size=%s->%s ; trim=%s", self._device.size, size, trim_percent) ++ trim_percent = (1.0 - float(self._device.max_size / size)) * 100 ++ log.debug( ++ "resize: size=%s->%s ; trim=%s", self._device.size, size, trim_percent ++ ) + if size > self._device.max_size and trim_percent <= MAX_TRIM_PERCENT: +- log.info("adjusting %s resize target from %s to %s to fit in free space", +- self._volume['name'], +- size, +- self._device.max_size) ++ log.info( ++ "adjusting %s resize target from %s to %s to fit in free space", ++ self._volume["name"], ++ size, ++ self._device.max_size, ++ ) + size = self._device.max_size + if size == self._device.size: + return + + if not self._device.min_size <= size <= self._device.max_size: +- raise BlivetAnsibleError("volume '%s' cannot be resized to '%s'" % (self._volume['name'], size)) ++ raise BlivetAnsibleError( ++ "volume '%s' cannot be resized to '%s'" ++ % (self._volume["name"], size) ++ ) + + try: + self._blivet.resize_device(self._device, size) + except ValueError as e: +- raise BlivetAnsibleError("volume '%s' cannot be resized from %s to %s: %s" % (self._device.name, +- self._device.size, +- size, str(e))) +- elif size and self._device.exists and self._device.size != size and not self._device.resizable: +- raise BlivetAnsibleError("volume '%s' cannot be resized from %s to %s" % (self._device.name, self._device.size, size)) ++ raise BlivetAnsibleError( ++ "volume '%s' cannot be resized from %s to %s: %s" ++ % (self._device.name, self._device.size, size, str(e)) ++ ) ++ elif ( ++ size ++ and self._device.exists ++ and self._device.size != size ++ and not self._device.resizable ++ ): ++ raise BlivetAnsibleError( ++ "volume '%s' cannot be resized from %s to %s" ++ % (self._device.name, self._device.size, size) ++ ) + + def _reformat(self): + """ Schedule actions as needed to ensure the volume is formatted as specified. """ +@@ -477,10 +553,18 @@ class BlivetVolume(BlivetBase): + if self._device.format.type == fmt.type: + return + +- if safe_mode and (self._device.format.type is not None or self._device.format.name != get_format(None).name): +- raise BlivetAnsibleError("cannot remove existing formatting on volume '%s' in safe mode" % self._volume['name']) +- +- if self._device.format.status and (self._device.format.mountable or self._device.format.type == "swap"): ++ if safe_mode and ( ++ self._device.format.type is not None ++ or self._device.format.name != get_format(None).name ++ ): ++ raise BlivetAnsibleError( ++ "cannot remove existing formatting on volume '%s' in safe mode" ++ % self._volume["name"] ++ ) ++ ++ if self._device.format.status and ( ++ self._device.format.mountable or self._device.format.type == "swap" ++ ): + self._device.format.teardown() + if not self._device.isleaf: + self._blivet.devicetree.recursive_remove(self._device, remove_device=False) +@@ -503,7 +587,9 @@ class BlivetVolume(BlivetBase): + + # at this point we should have a blivet.devices.StorageDevice instance + if self._device is None: +- raise BlivetAnsibleError("failed to look up or create device '%s'" % self._volume['name']) ++ raise BlivetAnsibleError( ++ "failed to look up or create device '%s'" % self._volume["name"] ++ ) + + self._manage_encryption() + +@@ -511,24 +597,31 @@ class BlivetVolume(BlivetBase): + if self._device.raw_device.exists: + self._reformat() + +- if self.ultimately_present and self._volume['mount_point'] and not self._device.format.mountable: +- raise BlivetAnsibleError("volume '%s' has a mount point but no mountable file system" % self._volume['name']) ++ if ( ++ self.ultimately_present ++ and self._volume["mount_point"] ++ and not self._device.format.mountable ++ ): ++ raise BlivetAnsibleError( ++ "volume '%s' has a mount point but no mountable file system" ++ % self._volume["name"] ++ ) + + # schedule resize if appropriate +- if self._device.raw_device.exists and self._volume['size']: ++ if self._device.raw_device.exists and self._volume["size"]: + self._resize() + + # save device identifiers for use by the role +- self._volume['_device'] = self._device.path +- self._volume['_raw_device'] = self._device.raw_device.path +- self._volume['_mount_id'] = self._device.fstab_spec ++ self._volume["_device"] = self._device.path ++ self._volume["_raw_device"] = self._device.raw_device.path ++ self._volume["_mount_id"] = self._device.fstab_spec + + + class BlivetDiskVolume(BlivetVolume): + blivet_device_class = devices.DiskDevice + + def _get_device_id(self): +- return self._volume['disks'][0] ++ return self._volume["disks"][0] + + def _type_check(self): + return self._device.raw_device.is_disk +@@ -536,7 +629,7 @@ class BlivetDiskVolume(BlivetVolume): + def _get_format(self): + fmt = super(BlivetDiskVolume, self)._get_format() + # pass -F to mke2fs on whole disks in RHEL7 +- mkfs_options = diskvolume_mkfs_option_map.get(self._volume['fs_type']) ++ mkfs_options = diskvolume_mkfs_option_map.get(self._volume["fs_type"]) + if mkfs_options: + if fmt.create_options: + fmt.create_options += " " +@@ -552,23 +645,31 @@ class BlivetDiskVolume(BlivetVolume): + def _look_up_device(self): + super(BlivetDiskVolume, self)._look_up_device() + if not self._get_device_id(): +- raise BlivetAnsibleError("no disks specified for volume '%s'" % self._volume['name']) +- elif not isinstance(self._volume['disks'], list): ++ raise BlivetAnsibleError( ++ "no disks specified for volume '%s'" % self._volume["name"] ++ ) ++ elif not isinstance(self._volume["disks"], list): + raise BlivetAnsibleError("volume disks must be specified as a list") + + if self._device is None: +- raise BlivetAnsibleError("unable to resolve disk specified for volume '%s' (%s)" % (self._volume['name'], self._volume['disks'])) ++ raise BlivetAnsibleError( ++ "unable to resolve disk specified for volume '%s' (%s)" ++ % (self._volume["name"], self._volume["disks"]) ++ ) + + + class BlivetPartitionVolume(BlivetVolume): + blivet_device_class = devices.PartitionDevice + + def _type_check(self): +- return self._device.raw_device.type == 'partition' ++ return self._device.raw_device.type == "partition" + + def _get_device_id(self): + device_id = None +- if self._blivet_pool._disks[0].partitioned and len(self._blivet_pool._disks[0].children) == 1: ++ if ( ++ self._blivet_pool._disks[0].partitioned ++ and len(self._blivet_pool._disks[0].children) == 1 ++ ): + device_id = self._blivet_pool._disks[0].children[0].name + + return device_id +@@ -583,22 +684,29 @@ class BlivetPartitionVolume(BlivetVolume): + if self._blivet_pool: + parent = self._blivet_pool._device + else: +- parent = self._blivet.devicetree.resolve_device(self._volume['pool']) ++ parent = self._blivet.devicetree.resolve_device(self._volume["pool"]) + + if parent is None: +- raise BlivetAnsibleError("failed to find pool '%s' for volume '%s'" % (self._blivet_pool['name'], self._volume['name'])) ++ raise BlivetAnsibleError( ++ "failed to find pool '%s' for volume '%s'" ++ % (self._blivet_pool["name"], self._volume["name"]) ++ ) + + size = Size("256 MiB") + try: +- device = self._blivet.new_partition(parents=[parent], size=size, grow=True, fmt=self._get_format()) ++ device = self._blivet.new_partition( ++ parents=[parent], size=size, grow=True, fmt=self._get_format() ++ ) + except Exception: +- raise BlivetAnsibleError("failed set up volume '%s'" % self._volume['name']) ++ raise BlivetAnsibleError("failed set up volume '%s'" % self._volume["name"]) + + self._blivet.create_device(device) + try: + do_partitioning(self._blivet) + except Exception: +- raise BlivetAnsibleError("partition allocation failed for volume '%s'" % self._volume['name']) ++ raise BlivetAnsibleError( ++ "partition allocation failed for volume '%s'" % self._volume["name"] ++ ) + + self._device = device + +@@ -609,7 +717,7 @@ class BlivetLVMVolume(BlivetVolume): + def _get_device_id(self): + if not self._blivet_pool._device: + return None +- return "%s-%s" % (self._blivet_pool._device.name, self._volume['name']) ++ return "%s-%s" % (self._blivet_pool._device.name, self._volume["name"]) + + def _create(self): + if self._device: +@@ -617,51 +725,75 @@ class BlivetLVMVolume(BlivetVolume): + + parent = self._blivet_pool._device + if parent is None: +- raise BlivetAnsibleError("failed to find pool '%s' for volume '%s'" % (self._blivet_pool['name'], self._volume['name'])) ++ raise BlivetAnsibleError( ++ "failed to find pool '%s' for volume '%s'" ++ % (self._blivet_pool["name"], self._volume["name"]) ++ ) + + try: +- size = Size(self._volume['size']) ++ size = Size(self._volume["size"]) + except Exception: +- raise BlivetAnsibleError("invalid size '%s' specified for volume '%s'" % (self._volume['size'], self._volume['name'])) ++ raise BlivetAnsibleError( ++ "invalid size '%s' specified for volume '%s'" ++ % (self._volume["size"], self._volume["name"]) ++ ) + + fmt = self._get_format() +- trim_percent = (1.0 - float(parent.free_space / size))*100 ++ trim_percent = (1.0 - float(parent.free_space / size)) * 100 + log.debug("size: %s ; %s", size, trim_percent) + if size > parent.free_space: + if trim_percent > MAX_TRIM_PERCENT: +- raise BlivetAnsibleError("specified size for volume '%s' exceeds available space in pool '%s' (%s)" +- % (size, parent.name, parent.free_space)) ++ raise BlivetAnsibleError( ++ "specified size for volume '%s' exceeds available space in pool '%s' (%s)" ++ % (size, parent.name, parent.free_space) ++ ) + else: +- log.info("adjusting %s size from %s to %s to fit in %s free space", self._volume['name'], +- size, +- parent.free_space, +- parent.name) ++ log.info( ++ "adjusting %s size from %s to %s to fit in %s free space", ++ self._volume["name"], ++ size, ++ parent.free_space, ++ parent.name, ++ ) + size = parent.free_space + + try: +- device = self._blivet.new_lv(name=self._volume['name'], +- parents=[parent], size=size, fmt=fmt) ++ device = self._blivet.new_lv( ++ name=self._volume["name"], parents=[parent], size=size, fmt=fmt ++ ) + except Exception as e: +- raise BlivetAnsibleError("failed to set up volume '%s': %s" % (self._volume['name'], str(e))) ++ raise BlivetAnsibleError( ++ "failed to set up volume '%s': %s" % (self._volume["name"], str(e)) ++ ) + + self._blivet.create_device(device) + self._device = device + + + class BlivetMDRaidVolume(BlivetVolume): +- +- def _process_device_numbers(self, members_count, requested_actives, requested_spares): ++ def _process_device_numbers( ++ self, members_count, requested_actives, requested_spares ++ ): + + active_count = members_count + spare_count = 0 + + if requested_actives is not None and requested_spares is not None: +- if (requested_actives + requested_spares != members_count or +- requested_actives < 0 or requested_spares < 0): +- raise BlivetAnsibleError("failed to set up volume '%s': cannot create RAID " +- "with %s members (%s active and %s spare)" +- % (self._volume['name'], members_count, +- requested_actives, requested_spares)) ++ if ( ++ requested_actives + requested_spares != members_count ++ or requested_actives < 0 ++ or requested_spares < 0 ++ ): ++ raise BlivetAnsibleError( ++ "failed to set up volume '%s': cannot create RAID " ++ "with %s members (%s active and %s spare)" ++ % ( ++ self._volume["name"], ++ members_count, ++ requested_actives, ++ requested_spares, ++ ) ++ ) + + if requested_actives is not None: + active_count = requested_actives +@@ -685,7 +817,9 @@ class BlivetMDRaidVolume(BlivetVolume): + self._blivet.format_device(member_disk, label) + + # create new partition +- member = self._blivet.new_partition(parents=[member_disk], grow=True) ++ member = self._blivet.new_partition( ++ parents=[member_disk], grow=True ++ ) + self._blivet.create_device(member) + self._blivet.format_device(member, fmt=get_format("mdmember")) + members.append(member) +@@ -697,16 +831,16 @@ class BlivetMDRaidVolume(BlivetVolume): + + def _update_from_device(self, param_name): + """ Return True if param_name's value was retrieved from a looked-up device. """ +- if param_name == 'raid_level': +- self._volume['raid_level'] = self._device.level.name +- elif param_name == 'raid_chunk_size': +- self._volume['raid_chunk_size'] = str(self._device.chunk_size) +- elif param_name == 'raid_device_count': +- self._volume['raid_device_count'] = self._device.member_devices +- elif param_name == 'raid_spare_count': +- self._volume['raid_spare_count'] = self._device.spares +- elif param_name == 'raid_metadata_version': +- self._volume['raid_metadata_version'] = self._device.metadata_version ++ if param_name == "raid_level": ++ self._volume["raid_level"] = self._device.level.name ++ elif param_name == "raid_chunk_size": ++ self._volume["raid_chunk_size"] = str(self._device.chunk_size) ++ elif param_name == "raid_device_count": ++ self._volume["raid_device_count"] = self._device.member_devices ++ elif param_name == "raid_spare_count": ++ self._volume["raid_spare_count"] = self._device.spares ++ elif param_name == "raid_metadata_version": ++ self._volume["raid_metadata_version"] = self._device.metadata_version + else: + return super(BlivetMDRaidVolume, self)._update_from_device(param_name) + +@@ -728,7 +862,10 @@ class BlivetMDRaidVolume(BlivetVolume): + try: + do_partitioning(self._blivet) + except Exception as e: +- raise BlivetAnsibleError("failed to allocate partitions for mdraid '%s': %s" % (self._volume['name'], str(e))) ++ raise BlivetAnsibleError( ++ "failed to allocate partitions for mdraid '%s': %s" ++ % (self._volume["name"], str(e)) ++ ) + + raid_array = self._new_mdarray(members) + +@@ -764,16 +901,20 @@ _BLIVET_VOLUME_TYPES = { + "disk": BlivetDiskVolume, + "lvm": BlivetLVMVolume, + "partition": BlivetPartitionVolume, +- "raid": BlivetMDRaidVolume ++ "raid": BlivetMDRaidVolume, + } + + + def _get_blivet_volume(blivet_obj, volume, bpool=None): + """ Return a BlivetVolume instance appropriate for the volume dict. """ + global volume_defaults +- volume_type = volume.get('type', bpool._pool['type'] if bpool else volume_defaults['type']) ++ volume_type = volume.get( ++ "type", bpool._pool["type"] if bpool else volume_defaults["type"] ++ ) + if volume_type not in _BLIVET_VOLUME_TYPES: +- raise BlivetAnsibleError("Volume '%s' has unknown type '%s'" % (volume['name'], volume_type)) ++ raise BlivetAnsibleError( ++ "Volume '%s' has unknown type '%s'" % (volume["name"], volume_type) ++ ) + + return _BLIVET_VOLUME_TYPES[volume_type](blivet_obj, volume, bpool=bpool) + +@@ -796,19 +937,19 @@ class BlivetPool(BlivetBase): + if self.ultimately_present and self.__class__.blivet_device_class is not None: + packages.extend(self.__class__.blivet_device_class._packages) + +- if self._pool.get('encryption'): +- packages.extend(get_format('luks').packages) ++ if self._pool.get("encryption"): ++ packages.extend(get_format("luks").packages) + + return packages + + @property + def ultimately_present(self): + """ Should this pool be present when we are finished? """ +- return self._pool.get('state', 'present') == 'present' ++ return self._pool.get("state", "present") == "present" + + @property + def _is_raid(self): +- return self._pool.get('raid_level') not in [None, "null", ""] ++ return self._pool.get("raid_level") not in [None, "null", ""] + + def _member_management_is_destructive(self): + return False +@@ -849,25 +990,30 @@ class BlivetPool(BlivetBase): + if self._disks: + return + +- if not self._device and not self._pool['disks']: +- raise BlivetAnsibleError("no disks specified for pool '%s'" % self._pool['name']) +- elif not isinstance(self._pool['disks'], list): ++ if not self._device and not self._pool["disks"]: ++ raise BlivetAnsibleError( ++ "no disks specified for pool '%s'" % self._pool["name"] ++ ) ++ elif not isinstance(self._pool["disks"], list): + raise BlivetAnsibleError("pool disks must be specified as a list") + + disks = list() +- for spec in self._pool['disks']: ++ for spec in self._pool["disks"]: + device = self._blivet.devicetree.resolve_device(spec) + if device is not None: # XXX fail if any disk isn't resolved? + disks.append(device) + +- if self._pool['disks'] and not self._device and not disks: +- raise BlivetAnsibleError("unable to resolve any disks specified for pool '%s' (%s)" % (self._pool['name'], self._pool['disks'])) ++ if self._pool["disks"] and not self._device and not disks: ++ raise BlivetAnsibleError( ++ "unable to resolve any disks specified for pool '%s' (%s)" ++ % (self._pool["name"], self._pool["disks"]) ++ ) + + self._disks = disks + + def _look_up_device(self): + """ Look up the pool in blivet's device tree. """ +- device = self._blivet.devicetree.resolve_device(self._pool['name']) ++ device = self._blivet.devicetree.resolve_device(self._pool["name"]) + if device is None: + return + +@@ -895,45 +1041,62 @@ class BlivetPool(BlivetBase): + """ Return True if param_name's value was retrieved from a looked-up device. """ + # We wouldn't have the pool device if the member devices weren't unlocked, so we do not + # have to consider the case where the devices are unlocked like we do for volumes. +- encrypted = bool(self._device.parents) and all("luks" in d.type for d in self._device.parents) +- raid = len(self._device.parents) == 1 and hasattr(self._device.parents[0].raw_device, 'level') ++ encrypted = bool(self._device.parents) and all( ++ "luks" in d.type for d in self._device.parents ++ ) ++ raid = len(self._device.parents) == 1 and hasattr( ++ self._device.parents[0].raw_device, "level" ++ ) + log.debug("BlivetPool._update_from_device: %s", self._device) + +- if param_name == 'disks': +- self._pool['disks'] = [d.name for d in self._device.disks] +- elif param_name == 'encryption': +- self._pool['encryption'] = encrypted +- elif param_name == 'encryption_key_size' and encrypted: +- self._pool['encryption_key_size'] = self._device.parents[0].parents[0].format.key_size +- elif param_name == 'encryption_key_file' and encrypted: +- self._pool['encryption_key_file'] = self._device.parents[0].parents[0].format.key_file +- elif param_name == 'encryption_cipher' and encrypted: +- self._pool['encryption_cipher'] = self._device.parents[0].parents[0].format.cipher +- elif param_name == 'encryption_luks_version' and encrypted: +- self._pool['encryption_luks_version'] = self._device.parents[0].parents[0].format.luks_version +- elif param_name == 'raid_level' and raid: +- self._pool['raid_level'] = self._device.parents[0].raw_device.level.name +- elif param_name == 'raid_chunk_size' and raid: +- self._pool['raid_chunk_size'] = str(self._device.parents[0].raw_device.chunk_size) +- elif param_name == 'raid_device_count' and raid: +- self._pool['raid_device_count'] = self._device.parents[0].raw_device.member_devices +- elif param_name == 'raid_spare_count' and raid: +- self._pool['raid_spare_count'] = self._device.parents[0].raw_device.spares +- elif param_name == 'raid_metadata_version' and raid: +- self._pool['raid_metadata_version'] = self._device.parents[0].raw_device.metadata_version ++ if param_name == "disks": ++ self._pool["disks"] = [d.name for d in self._device.disks] ++ elif param_name == "encryption": ++ self._pool["encryption"] = encrypted ++ elif param_name == "encryption_key_size" and encrypted: ++ self._pool["encryption_key_size"] = ( ++ self._device.parents[0].parents[0].format.key_size ++ ) ++ elif param_name == "encryption_key_file" and encrypted: ++ self._pool["encryption_key_file"] = ( ++ self._device.parents[0].parents[0].format.key_file ++ ) ++ elif param_name == "encryption_cipher" and encrypted: ++ self._pool["encryption_cipher"] = ( ++ self._device.parents[0].parents[0].format.cipher ++ ) ++ elif param_name == "encryption_luks_version" and encrypted: ++ self._pool["encryption_luks_version"] = ( ++ self._device.parents[0].parents[0].format.luks_version ++ ) ++ elif param_name == "raid_level" and raid: ++ self._pool["raid_level"] = self._device.parents[0].raw_device.level.name ++ elif param_name == "raid_chunk_size" and raid: ++ self._pool["raid_chunk_size"] = str( ++ self._device.parents[0].raw_device.chunk_size ++ ) ++ elif param_name == "raid_device_count" and raid: ++ self._pool["raid_device_count"] = self._device.parents[ ++ 0 ++ ].raw_device.member_devices ++ elif param_name == "raid_spare_count" and raid: ++ self._pool["raid_spare_count"] = self._device.parents[0].raw_device.spares ++ elif param_name == "raid_metadata_version" and raid: ++ self._pool["raid_metadata_version"] = self._device.parents[ ++ 0 ++ ].raw_device.metadata_version + else: + return False + + return True + +- + def _apply_defaults(self): + global pool_defaults + for name, default in pool_defaults.items(): + if name in self._pool: + continue + +- default = None if default in ('none', 'None', 'null') else default ++ default = None if default in ("none", "None", "null") else default + + if self._device: + if not self._update_from_device(name): +@@ -948,14 +1111,19 @@ class BlivetPool(BlivetBase): + for disk in self._disks: + if not disk.isleaf or disk.format.type is not None: + if safe_mode: +- raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (disk.name, self._pool['name'])) ++ raise BlivetAnsibleError( ++ "cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" ++ % (disk.name, self._pool["name"]) ++ ) + else: + self._blivet.devicetree.recursive_remove(disk) + + if use_partitions: + label = get_format("disklabel", device=disk.path) + self._blivet.format_device(disk, label) +- member = self._blivet.new_partition(parents=[disk], size=Size("256MiB"), grow=True) ++ member = self._blivet.new_partition( ++ parents=[disk], size=Size("256MiB"), grow=True ++ ) + self._blivet.create_device(member) + else: + member = disk +@@ -966,9 +1134,8 @@ class BlivetPool(BlivetBase): + self._blivet.format_device(member, self._get_format()) + members.append(member) + +- + if self._is_raid: +- raid_name = "%s-1" % self._pool['name'] ++ raid_name = "%s-1" % self._pool["name"] + + raid_array = self._new_mdarray(members, raid_name=raid_name) + +@@ -981,14 +1148,15 @@ class BlivetPool(BlivetBase): + try: + do_partitioning(self._blivet) + except Exception: +- raise BlivetAnsibleError("failed to allocate partitions for pool '%s'" % self._pool['name']) ++ raise BlivetAnsibleError( ++ "failed to allocate partitions for pool '%s'" % self._pool["name"] ++ ) + + return result + +- + def _get_volumes(self): + """ Set up BlivetVolume instances for this pool's volumes. """ +- for volume in self._pool.get('volumes', []): ++ for volume in self._pool.get("volumes", []): + bvolume = _get_blivet_volume(self._blivet, volume, self) + self._blivet_volumes.append(bvolume) + +@@ -1013,7 +1181,10 @@ class BlivetPool(BlivetBase): + return + elif self._member_management_is_destructive(): + if safe_mode: +- raise BlivetAnsibleError("cannot remove and recreate existing pool '%s' in safe mode" % self._pool['name']) ++ raise BlivetAnsibleError( ++ "cannot remove and recreate existing pool '%s' in safe mode" ++ % self._pool["name"] ++ ) + else: + self._destroy() + +@@ -1031,15 +1202,22 @@ class BlivetPartitionPool(BlivetPool): + self._device = self._disks[0] + + def _create(self): +- if self._device.format.type != "disklabel" or \ +- (disklabel_type and self._device.format.label_type != disklabel_type): ++ if self._device.format.type != "disklabel" or ( ++ disklabel_type and self._device.format.label_type != disklabel_type ++ ): + if safe_mode: +- raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' " +- "(pool '%s') in safe mode" % (self._device.name, self._pool['name'])) ++ raise BlivetAnsibleError( ++ "cannot remove existing formatting and/or devices on disk '%s' " ++ "(pool '%s') in safe mode" % (self._device.name, self._pool["name"]) ++ ) + else: +- self._blivet.devicetree.recursive_remove(self._device, remove_device=False) ++ self._blivet.devicetree.recursive_remove( ++ self._device, remove_device=False ++ ) + +- label = get_format("disklabel", device=self._device.path, label_type=disklabel_type) ++ label = get_format( ++ "disklabel", device=self._device.path, label_type=disklabel_type ++ ) + self._blivet.format_device(self._device, label) + + +@@ -1053,9 +1231,13 @@ class BlivetLVMPool(BlivetPool): + if self._device is None: + return False + +- if self._pool['encryption'] and not all(m.encrypted for m in self._device.parents): ++ if self._pool["encryption"] and not all( ++ m.encrypted for m in self._device.parents ++ ): + return True +- elif not self._pool['encryption'] and any(m.encrypted for m in self._device.parents): ++ elif not self._pool["encryption"] and any( ++ m.encrypted for m in self._device.parents ++ ): + return True + + return False +@@ -1080,49 +1262,50 @@ class BlivetLVMPool(BlivetPool): + + members = self._manage_encryption(self._create_members()) + try: +- pool_device = self._blivet.new_vg(name=self._pool['name'], parents=members) ++ pool_device = self._blivet.new_vg(name=self._pool["name"], parents=members) + except Exception as e: +- raise BlivetAnsibleError("failed to set up pool '%s': %s" % (self._pool['name'], str(e))) ++ raise BlivetAnsibleError( ++ "failed to set up pool '%s': %s" % (self._pool["name"], str(e)) ++ ) + + self._blivet.create_device(pool_device) + self._device = pool_device + + +-_BLIVET_POOL_TYPES = { +- "partition": BlivetPartitionPool, +- "lvm": BlivetLVMPool +-} ++_BLIVET_POOL_TYPES = {"partition": BlivetPartitionPool, "lvm": BlivetLVMPool} + + + def _get_blivet_pool(blivet_obj, pool): + """ Return an appropriate BlivetPool instance for the pool dict. """ +- if 'type' not in pool: ++ if "type" not in pool: + global pool_defaults +- pool['type'] = pool_defaults['type'] ++ pool["type"] = pool_defaults["type"] + +- if pool['type'] not in _BLIVET_POOL_TYPES: +- raise BlivetAnsibleError("Pool '%s' has unknown type '%s'" % (pool['name'], pool['type'])) ++ if pool["type"] not in _BLIVET_POOL_TYPES: ++ raise BlivetAnsibleError( ++ "Pool '%s' has unknown type '%s'" % (pool["name"], pool["type"]) ++ ) + +- return _BLIVET_POOL_TYPES[pool['type']](blivet_obj, pool) ++ return _BLIVET_POOL_TYPES[pool["type"]](blivet_obj, pool) + + + def manage_volume(b, volume): + """ Schedule actions as needed to manage a single standalone volume. """ + bvolume = _get_blivet_volume(b, volume) + bvolume.manage() +- volume['_device'] = bvolume._volume.get('_device', '') +- volume['_raw_device'] = bvolume._volume.get('_raw_device', '') +- volume['_mount_id'] = bvolume._volume.get('_mount_id', '') ++ volume["_device"] = bvolume._volume.get("_device", "") ++ volume["_raw_device"] = bvolume._volume.get("_raw_device", "") ++ volume["_mount_id"] = bvolume._volume.get("_mount_id", "") + + + def manage_pool(b, pool): + """ Schedule actions as needed to manage a single pool and its volumes. """ + bpool = _get_blivet_pool(b, pool) + bpool.manage() +- for (volume, bvolume) in zip(pool['volumes'], bpool._blivet_volumes): +- volume['_device'] = bvolume._volume.get('_device', '') +- volume['_raw_device'] = bvolume._volume.get('_raw_device', '') +- volume['_mount_id'] = bvolume._volume.get('_mount_id', '') ++ for (volume, bvolume) in zip(pool["volumes"], bpool._blivet_volumes): ++ volume["_device"] = bvolume._volume.get("_device", "") ++ volume["_raw_device"] = bvolume._volume.get("_raw_device", "") ++ volume["_mount_id"] = bvolume._volume.get("_mount_id", "") + + + class FSTab(object): +@@ -1141,7 +1324,7 @@ class FSTab(object): + if self._entries: + self.reset() + +- for line in open('/etc/fstab').readlines(): ++ for line in open("/etc/fstab").readlines(): + if line.lstrip().startswith("#"): + continue + +@@ -1150,23 +1333,27 @@ class FSTab(object): + continue + + device = self._blivet.devicetree.resolve_device(fields[0]) +- self._entries.append(dict(device_id=fields[0], +- device_path=getattr(device, 'path', None), +- fs_type=fields[2], +- mount_point=fields[1], +- mount_options=fields[3])) ++ self._entries.append( ++ dict( ++ device_id=fields[0], ++ device_path=getattr(device, "path", None), ++ fs_type=fields[2], ++ mount_point=fields[1], ++ mount_options=fields[3], ++ ) ++ ) + + + def get_mount_info(pools, volumes, actions, fstab): +- """ Return a list of argument dicts to pass to the mount module to manage mounts. ++ """Return a list of argument dicts to pass to the mount module to manage mounts. + +- The overall approach is to remove existing mounts associated with file systems +- we are removing and those with changed mount points, re-adding them with the +- new mount point later. ++ The overall approach is to remove existing mounts associated with file systems ++ we are removing and those with changed mount points, re-adding them with the ++ new mount point later. + +- Removed mounts go directly into the mount_info list, which is the return value, +- while added/active mounts to a list that gets appended to the mount_info list +- at the end to ensure that removals happen first. ++ Removed mounts go directly into the mount_info list, which is the return value, ++ while added/active mounts to a list that gets appended to the mount_info list ++ at the end to ensure that removals happen first. + """ + mount_info = list() + mount_vols = list() +@@ -1174,33 +1361,50 @@ def get_mount_info(pools, volumes, actions, fstab): + # account for mounts removed by removing or reformatting volumes + if actions: + for action in actions: +- if action.is_destroy and action.is_format and action.format.type is not None: +- mount = fstab.lookup('device_path', action.device.path) ++ if ( ++ action.is_destroy ++ and action.is_format ++ and action.format.type is not None ++ ): ++ mount = fstab.lookup("device_path", action.device.path) + if mount is not None: +- mount_info.append({"src": mount['device_id'], "path": mount['mount_point'], +- 'state': 'absent', 'fstype': mount['fs_type']}) ++ mount_info.append( ++ { ++ "src": mount["device_id"], ++ "path": mount["mount_point"], ++ "state": "absent", ++ "fstype": mount["fs_type"], ++ } ++ ) + + def handle_new_mount(volume, fstab): + replace = None + mounted = False + +- mount = fstab.lookup('device_path', volume['_device']) +- if (volume['mount_point'] and volume['mount_point'].startswith('/')) \ +- or volume['fs_type'] == 'swap': ++ mount = fstab.lookup("device_path", volume["_device"]) ++ if (volume["mount_point"] and volume["mount_point"].startswith("/")) or volume[ ++ "fs_type" ++ ] == "swap": + mounted = True + + # handle removal of existing mounts of this volume +- if mount and mount['fs_type'] != 'swap' and mount['mount_point'] != volume['mount_point']: +- replace = dict(path=mount['mount_point'], state="absent") +- elif mount and mount['fs_type'] == 'swap': +- replace = dict(src=mount['device_id'], fstype="swap", path="none", state="absent") ++ if ( ++ mount ++ and mount["fs_type"] != "swap" ++ and mount["mount_point"] != volume["mount_point"] ++ ): ++ replace = dict(path=mount["mount_point"], state="absent") ++ elif mount and mount["fs_type"] == "swap": ++ replace = dict( ++ src=mount["device_id"], fstype="swap", path="none", state="absent" ++ ) + + return mounted, replace + + # account for mounts that we set up or are replacing in pools + for pool in pools: +- for volume in pool['volumes']: +- if pool['state'] == 'present' and volume['state'] == 'present': ++ for volume in pool["volumes"]: ++ if pool["state"] == "present" and volume["state"] == "present": + mounted, replace = handle_new_mount(volume, fstab) + if replace: + mount_info.append(replace) +@@ -1209,7 +1413,7 @@ def get_mount_info(pools, volumes, actions, fstab): + + # account for mounts that we set up or are replacing in standalone volumes + for volume in volumes: +- if volume['state'] == 'present': ++ if volume["state"] == "present": + mounted, replace = handle_new_mount(volume, fstab) + if replace: + mount_info.append(replace) +@@ -1217,13 +1421,19 @@ def get_mount_info(pools, volumes, actions, fstab): + mount_vols.append(volume) + + for volume in mount_vols: +- mount_info.append({'src': volume['_mount_id'], +- 'path': volume['mount_point'] if volume['fs_type'] != "swap" else "none", +- 'fstype': volume['fs_type'], +- 'opts': volume['mount_options'], +- 'dump': volume['mount_check'], +- 'passno': volume['mount_passno'], +- 'state': 'mounted' if volume['fs_type'] != "swap" else "present"}) ++ mount_info.append( ++ { ++ "src": volume["_mount_id"], ++ "path": volume["mount_point"] ++ if volume["fs_type"] != "swap" ++ else "none", ++ "fstype": volume["fs_type"], ++ "opts": volume["mount_options"], ++ "dump": volume["mount_check"], ++ "passno": volume["mount_passno"], ++ "state": "mounted" if volume["fs_type"] != "swap" else "present", ++ } ++ ) + + return mount_info + +@@ -1231,15 +1441,19 @@ def get_mount_info(pools, volumes, actions, fstab): + def get_crypt_info(actions): + info = list() + for action in actions: +- if not (action.is_format and action.format.type == 'luks'): ++ if not (action.is_format and action.format.type == "luks"): + continue + +- info.append(dict(backing_device=action.device.path, +- name=action.format.map_name, +- password=action.format.key_file or '-', +- state='present' if action.is_create else 'absent')) ++ info.append( ++ dict( ++ backing_device=action.device.path, ++ name=action.format.map_name, ++ password=action.format.key_file or "-", ++ state="present" if action.is_create else "absent", ++ ) ++ ) + +- return sorted(info, key=lambda e: e['state']) ++ return sorted(info, key=lambda e: e["state"]) + + + def get_required_packages(b, pools, volumes): +@@ -1259,66 +1473,70 @@ def get_required_packages(b, pools, volumes): + + + def update_fstab_identifiers(b, pools, volumes): +- """ Update fstab device identifiers. ++ """Update fstab device identifiers. + +- This is to pick up new UUIDs for newly-formatted devices. ++ This is to pick up new UUIDs for newly-formatted devices. + """ + all_volumes = volumes[:] + for pool in pools: +- if not pool['state'] == 'present': ++ if not pool["state"] == "present": + continue + +- all_volumes += pool['volumes'] ++ all_volumes += pool["volumes"] + + for volume in all_volumes: +- if volume['state'] == 'present': +- device = b.devicetree.resolve_device(volume['_mount_id']) +- if device is None and volume['encryption']: +- device = b.devicetree.resolve_device(volume['_raw_device']) ++ if volume["state"] == "present": ++ device = b.devicetree.resolve_device(volume["_mount_id"]) ++ if device is None and volume["encryption"]: ++ device = b.devicetree.resolve_device(volume["_raw_device"]) + if device is not None and not device.isleaf: + device = device.children[0] +- volume['_device'] = device.path ++ volume["_device"] = device.path + + if device is None: +- raise BlivetAnsibleError("failed to look up device for volume %s (%s/%s)" % (volume['name'], volume['_device'], volume['_mount_id'])) +- volume['_mount_id'] = device.fstab_spec +- if device.format.type == 'swap': ++ raise BlivetAnsibleError( ++ "failed to look up device for volume %s (%s/%s)" ++ % (volume["name"], volume["_device"], volume["_mount_id"]) ++ ) ++ volume["_mount_id"] = device.fstab_spec ++ if device.format.type == "swap": + device.format.setup() + + if device.status: +- volume['_kernel_device'] = os.path.realpath(device.path) ++ volume["_kernel_device"] = os.path.realpath(device.path) + if device.raw_device.status: +- volume['_raw_kernel_device'] = os.path.realpath(device.raw_device.path) ++ volume["_raw_kernel_device"] = os.path.realpath(device.raw_device.path) + + + def activate_swaps(b, pools, volumes): + """ Activate all swaps specified as present. """ + all_volumes = volumes[:] + for pool in pools: +- if not pool['state'] == 'present': ++ if not pool["state"] == "present": + continue + +- all_volumes += pool['volumes'] ++ all_volumes += pool["volumes"] + + for volume in all_volumes: +- if volume['state'] == 'present': +- device = b.devicetree.resolve_device(volume['_mount_id']) +- if device.format.type == 'swap': ++ if volume["state"] == "present": ++ device = b.devicetree.resolve_device(volume["_mount_id"]) ++ if device.format.type == "swap": + device.format.setup() + + + def run_module(): + # available arguments/parameters that a user can pass + module_args = dict( +- pools=dict(type='list'), +- volumes=dict(type='list'), +- packages_only=dict(type='bool', required=False, default=False), +- disklabel_type=dict(type='str', required=False, default=None), +- safe_mode=dict(type='bool', required=False, default=True), +- pool_defaults=dict(type='dict', required=False), +- volume_defaults=dict(type='dict', required=False), +- use_partitions=dict(type='bool', required=False, default=True), +- diskvolume_mkfs_option_map=dict(type='dict', required=False, default={})) ++ pools=dict(type="list"), ++ volumes=dict(type="list"), ++ packages_only=dict(type="bool", required=False, default=False), ++ disklabel_type=dict(type="str", required=False, default=None), ++ safe_mode=dict(type="bool", required=False, default=True), ++ pool_defaults=dict(type="dict", required=False), ++ volume_defaults=dict(type="dict", required=False), ++ use_partitions=dict(type="bool", required=False, default=True), ++ diskvolume_mkfs_option_map=dict(type="dict", required=False, default={}), ++ ) + + # seed the result dict in the object + result = dict( +@@ -1332,47 +1550,52 @@ def run_module(): + packages=list(), + ) + +- module = AnsibleModule(argument_spec=module_args, +- supports_check_mode=True) ++ module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + if not BLIVET_PACKAGE: +- module.fail_json(msg="Failed to import the blivet or blivet3 Python modules", +- exception=inspect.cleandoc(""" ++ module.fail_json( ++ msg="Failed to import the blivet or blivet3 Python modules", ++ exception=inspect.cleandoc( ++ """ + blivet3 exception: + {} + blivet exception: +- {}""").format(LIB_IMP_ERR3, LIB_IMP_ERR)) ++ {}""" ++ ).format(LIB_IMP_ERR3, LIB_IMP_ERR), ++ ) + +- if not module.params['pools'] and not module.params['volumes']: ++ if not module.params["pools"] and not module.params["volumes"]: + module.exit_json(**result) + + global disklabel_type +- disklabel_type = module.params['disklabel_type'] ++ disklabel_type = module.params["disklabel_type"] + + global use_partitions +- use_partitions = module.params['use_partitions'] ++ use_partitions = module.params["use_partitions"] + + global safe_mode +- safe_mode = module.params['safe_mode'] ++ safe_mode = module.params["safe_mode"] + + global diskvolume_mkfs_option_map +- diskvolume_mkfs_option_map = module.params['diskvolume_mkfs_option_map'] ++ diskvolume_mkfs_option_map = module.params["diskvolume_mkfs_option_map"] + + global pool_defaults +- if 'pool_defaults' in module.params: +- pool_defaults = module.params['pool_defaults'] ++ if "pool_defaults" in module.params: ++ pool_defaults = module.params["pool_defaults"] + + global volume_defaults +- if 'volume_defaults' in module.params: +- volume_defaults = module.params['volume_defaults'] ++ if "volume_defaults" in module.params: ++ volume_defaults = module.params["volume_defaults"] + + b = Blivet() + b.reset() + fstab = FSTab(b) + actions = list() + +- if module.params['packages_only']: ++ if module.params["packages_only"]: + try: +- result['packages'] = get_required_packages(b, module.params['pools'], module.params['volumes']) ++ result["packages"] = get_required_packages( ++ b, module.params["pools"], module.params["volumes"] ++ ) + except BlivetAnsibleError as e: + module.fail_json(msg=str(e), **result) + module.exit_json(**result) +@@ -1388,44 +1611,56 @@ def run_module(): + sys_path = action.device.path + if os.path.islink(sys_path): + sys_path = os.readlink(action.device.path) +- trigger(action='change', subsystem='block', name=os.path.basename(sys_path)) ++ trigger(action="change", subsystem="block", name=os.path.basename(sys_path)) + + def action_dict(action): +- return dict(action=action.type_desc_str, +- fs_type=action.format.type if action.is_format else None, +- device=action.device.path) ++ return dict( ++ action=action.type_desc_str, ++ fs_type=action.format.type if action.is_format else None, ++ device=action.device.path, ++ ) + +- duplicates = find_duplicate_names(module.params['pools']) ++ duplicates = find_duplicate_names(module.params["pools"]) + if duplicates: +- module.fail_json(msg="multiple pools with the same name: {0}".format(",".join(duplicates)), +- **result) +- for pool in module.params['pools']: +- duplicates = find_duplicate_names(pool.get('volumes', list())) ++ module.fail_json( ++ msg="multiple pools with the same name: {0}".format(",".join(duplicates)), ++ **result ++ ) ++ for pool in module.params["pools"]: ++ duplicates = find_duplicate_names(pool.get("volumes", list())) + if duplicates: +- module.fail_json(msg="multiple volumes in pool '{0}' with the " +- "same name: {1}".format(pool['name'], ",".join(duplicates)), +- **result) ++ module.fail_json( ++ msg="multiple volumes in pool '{0}' with the " ++ "same name: {1}".format(pool["name"], ",".join(duplicates)), ++ **result ++ ) + try: + manage_pool(b, pool) + except BlivetAnsibleError as e: + module.fail_json(msg=str(e), **result) + +- duplicates = find_duplicate_names(module.params['volumes']) ++ duplicates = find_duplicate_names(module.params["volumes"]) + if duplicates: +- module.fail_json(msg="multiple volumes with the same name: {0}".format(",".join(duplicates)), +- **result) +- for volume in module.params['volumes']: ++ module.fail_json( ++ msg="multiple volumes with the same name: {0}".format(",".join(duplicates)), ++ **result ++ ) ++ for volume in module.params["volumes"]: + try: + manage_volume(b, volume) + except BlivetAnsibleError as e: + module.fail_json(msg=str(e), **result) + + scheduled = b.devicetree.actions.find() +- result['packages'] = b.packages[:] ++ result["packages"] = b.packages[:] + + for action in scheduled: +- if (action.is_destroy or action.is_resize) and action.is_format and action.format.exists and \ +- (action.format.mountable or action.format.type == "swap"): ++ if ( ++ (action.is_destroy or action.is_resize) ++ and action.is_format ++ and action.format.exists ++ and (action.format.mountable or action.format.type == "swap") ++ ): + action.format.teardown() + + if scheduled: +@@ -1433,21 +1668,27 @@ def run_module(): + callbacks.action_executed.add(record_action) + callbacks.action_executed.add(ensure_udev_update) + try: +- b.devicetree.actions.process(devices=b.devicetree.devices, dry_run=module.check_mode) ++ b.devicetree.actions.process( ++ devices=b.devicetree.devices, dry_run=module.check_mode ++ ) + except Exception as e: +- module.fail_json(msg="Failed to commit changes to disk: %s" % str(e), **result) ++ module.fail_json( ++ msg="Failed to commit changes to disk: %s" % str(e), **result ++ ) + finally: +- result['changed'] = True +- result['actions'] = [action_dict(a) for a in actions] ++ result["changed"] = True ++ result["actions"] = [action_dict(a) for a in actions] + +- update_fstab_identifiers(b, module.params['pools'], module.params['volumes']) +- activate_swaps(b, module.params['pools'], module.params['volumes']) ++ update_fstab_identifiers(b, module.params["pools"], module.params["volumes"]) ++ activate_swaps(b, module.params["pools"], module.params["volumes"]) + +- result['mounts'] = get_mount_info(module.params['pools'], module.params['volumes'], actions, fstab) +- result['crypts'] = get_crypt_info(actions) +- result['leaves'] = [d.path for d in b.devicetree.leaves] +- result['pools'] = module.params['pools'] +- result['volumes'] = module.params['volumes'] ++ result["mounts"] = get_mount_info( ++ module.params["pools"], module.params["volumes"], actions, fstab ++ ) ++ result["crypts"] = get_crypt_info(actions) ++ result["leaves"] = [d.path for d in b.devicetree.leaves] ++ result["pools"] = module.params["pools"] ++ result["volumes"] = module.params["volumes"] + + # success - return result + module.exit_json(**result) +@@ -1456,5 +1697,6 @@ def run_module(): + def main(): + run_module() + +-if __name__ == '__main__': ++ ++if __name__ == "__main__": + main() +diff --git a/library/blockdev_info.py b/library/blockdev_info.py +index 52ddd78..ca1577f 100644 +--- a/library/blockdev_info.py ++++ b/library/blockdev_info.py +@@ -1,35 +1,41 @@ + #!/usr/bin/python + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + ANSIBLE_METADATA = { +- 'metadata_version': '1.1', +- 'status': ['preview'], +- 'supported_by': 'community' ++ "metadata_version": "1.1", ++ "status": ["preview"], ++ "supported_by": "community", + } + +-DOCUMENTATION = ''' ++DOCUMENTATION = """ + --- + module: blockdev_info + short_description: Collect info about block devices in the system. + version_added: "2.5" + description: ++ - "WARNING: Do not use this module directly! It is only for role internal use." + - "This module collects information about block devices" +-options: ++options: {} + author: +- - David Lehman (dlehman@redhat.com) +-''' ++ - David Lehman (@dwlehman) ++""" + +-EXAMPLES = ''' ++EXAMPLES = """ + - name: Get info about block devices + blockdev_info: + register: blk_info + +-''' ++""" + +-RETURN = ''' ++RETURN = """ + info: + description: dict w/ device path keys and device info dict values ++ returned: success + type: dict +-''' ++""" + + import os + import shlex +@@ -38,7 +44,7 @@ from ansible.module_utils.basic import AnsibleModule + + + LSBLK_DEVICE_TYPES = {"part": "partition"} +-DEV_MD_DIR = '/dev/md' ++DEV_MD_DIR = "/dev/md" + + + def fixup_md_path(path): +@@ -59,7 +65,9 @@ def fixup_md_path(path): + + + def get_block_info(run_cmd): +- buf = run_cmd(["lsblk", "-o", "NAME,FSTYPE,LABEL,UUID,TYPE,SIZE", "-p", "-P", "-a"])[1] ++ buf = run_cmd( ++ ["lsblk", "-o", "NAME,FSTYPE,LABEL,UUID,TYPE,SIZE", "-p", "-P", "-a"] ++ )[1] + info = dict() + for line in buf.splitlines(): + dev = dict() +@@ -75,7 +83,7 @@ def get_block_info(run_cmd): + + dev[key.lower()] = LSBLK_DEVICE_TYPES.get(value, value) + if dev: +- info[dev['name']] = dev ++ info[dev["name"]] = dev + + return info + +@@ -87,13 +95,10 @@ def run_module(): + info=None, + ) + +- module = AnsibleModule( +- argument_spec=module_args, +- supports_check_mode=True +- ) ++ module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + + try: +- result['info'] = get_block_info(module.run_command) ++ result["info"] = get_block_info(module.run_command) + except Exception: + module.fail_json(msg="Failed to collect block device info.") + +@@ -104,5 +109,5 @@ def main(): + run_module() + + +-if __name__ == '__main__': ++if __name__ == "__main__": + main() +diff --git a/library/bsize.py b/library/bsize.py +index 40442f5..524b0f9 100644 +--- a/library/bsize.py ++++ b/library/bsize.py +@@ -1,12 +1,16 @@ + #!/usr/bin/python + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + ANSIBLE_METADATA = { +- 'metadata_version': '1.1', +- 'status': ['preview'], +- 'supported_by': 'community' ++ "metadata_version": "1.1", ++ "status": ["preview"], ++ "supported_by": "community", + } + +-DOCUMENTATION = ''' ++DOCUMENTATION = """ + --- + module: bsize + +@@ -15,6 +19,7 @@ short_description: Module for basic manipulation with byte sizes + version_added: "2.5" + + description: ++ - "WARNING: Do not use this module directly! It is only for role internal use." + - "Module accepts byte size strings with the units and produces strings in + form of input accepted by different storage tools" + +@@ -23,67 +28,72 @@ options: + description: + - String containing number and byte units + required: true ++ type: str + + author: +- - Jan Pokorny (japokorn@redhat.com) +-''' ++ - Jan Pokorny (@japokorn) ++""" + +-EXAMPLES = ''' ++EXAMPLES = """ + # Obtain sizes in format for various tools + - name: Get 10 KiB size + bsize: + size: 10 KiB +-''' ++""" + +-RETURN = ''' ++RETURN = """ + size: + description: Size in binary format units + type: str ++ returned: success + bytes: + description: Size in bytes + type: int ++ returned: success + lvm: + description: Size in binary format. No space after the number, + first letter of unit prefix in lowercase only + type: str ++ returned: success + parted: + description: Size in binary format. No space after the number + type: str +-''' ++ returned: success ++""" + + from ansible.module_utils.basic import AnsibleModule + from ansible.module_utils.storage_lsr.size import Size + ++ + def run_module(): + # available arguments/parameters that a user can pass + module_args = dict( +- size=dict(type='str', required=True), ++ size=dict(type="str", required=True), + ) + + # seed the result dict in the object +- result = dict( +- changed=False +- ) ++ result = dict(changed=False) + +- module = AnsibleModule(argument_spec=module_args, +- supports_check_mode=True) ++ module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + +- size = Size(module.params['size']) ++ size = Size(module.params["size"]) + +- result['size'] = size.get(fmt="%d %sb") +- result['bytes'] = size.bytes +- result['lvm'] = size.get(fmt="%d%sb").lower()[:-2] +- result['parted'] = size.get(fmt="%d%sb") ++ result["size"] = size.get(fmt="%d %sb") ++ result["bytes"] = size.bytes ++ result["lvm"] = size.get(fmt="%d%sb").lower()[:-2] ++ result["parted"] = size.get(fmt="%d%sb") + + # use whatever logic you need to determine whether or not this module + # made any modifications to your target +- result['changed'] = False ++ result["changed"] = False + + # success - return result + module.exit_json(**result) + ++ + def main(): + run_module() + +-if __name__ == '__main__': ++ ++if __name__ == "__main__": + main() +diff --git a/library/find_unused_disk.py b/library/find_unused_disk.py +index 0a6fc7d..c688170 100644 +--- a/library/find_unused_disk.py ++++ b/library/find_unused_disk.py +@@ -1,10 +1,15 @@ + #!/usr/bin/python + +-DOCUMENTATION = ''' ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ ++DOCUMENTATION = """ + --- + module: find_unused_disk + short_description: Gets unused disks + description: ++ - "WARNING: Do not use this module directly! It is only for role internal use." + - Disks are considered in ascending alphanumeric sorted order. + - Disks that meet all conditions are considered 'empty' and returned (using kernel device name) in a list. + - 1. No known signatures exist on the disk, with the exception of partition tables. +@@ -15,18 +20,18 @@ description: + - Number of returned disks defaults to first 10, but can be specified with 'max_return' argument. + author: Eda Zhou (@edamamez) + options: +- option-name: max_return +- description: Sets the maximum number of unused disks to return. +- default: 10 +- type: int +- +- option-name: min_size +- description: Specifies the minimum disk size to return an unused disk. +- default: 0 +- type: str +-''' +- +-EXAMPLES = ''' ++ max_return: ++ description: Sets the maximum number of unused disks to return. ++ default: 10 ++ type: int ++ ++ min_size: ++ description: Specifies the minimum disk size to return an unused disk. ++ default: 0 ++ type: str ++""" ++ ++EXAMPLES = """ + - name: test finding first unused device module + hosts: localhost + tasks: +@@ -38,9 +43,9 @@ EXAMPLES = ''' + - name: dump test output + debug: + msg: '{{ testout }}' +-''' ++""" + +-RETURN = ''' ++RETURN = """ + disk_name: + description: Information about unused disks + returned: On success +@@ -50,14 +55,15 @@ disk_name: + description: Unused disk(s) that have been found + returned: On success + type: list +- samples: ["sda1", "dm-0", "dm-3"] +- ["sda"] ++ samples: | ++ ["sda1", "dm-0", "dm-3"] ++ ["sda"] + none: + description: No unused disks were found + returned: On success + type: string + sample: "Unable to find unused disk" +-''' ++""" + + + import os +@@ -68,7 +74,7 @@ from ansible.module_utils.storage_lsr.size import Size + + + SYS_CLASS_BLOCK = "/sys/class/block/" +-IGNORED_DEVICES = [re.compile(r'^/dev/nullb\d+$')] ++IGNORED_DEVICES = [re.compile(r"^/dev/nullb\d+$")] + + + def is_ignored(disk_path): +@@ -78,13 +84,13 @@ def is_ignored(disk_path): + + def no_signature(run_command, disk_path): + """Return true if no known signatures exist on the disk.""" +- signatures = run_command(['blkid', '-p', disk_path]) +- return not 'UUID' in signatures[1] ++ signatures = run_command(["blkid", "-p", disk_path]) ++ return "UUID" not in signatures[1] + + + def no_holders(disk_path): + """Return true if the disk has no holders.""" +- holders = os.listdir(SYS_CLASS_BLOCK + get_sys_name(disk_path) + '/holders/') ++ holders = os.listdir(SYS_CLASS_BLOCK + get_sys_name(disk_path) + "/holders/") + return len(holders) == 0 + + +@@ -101,36 +107,45 @@ def get_sys_name(disk_path): + if not os.path.islink(disk_path): + return os.path.basename(disk_path) + +- node_dir = '/'.join(disk_path.split('/')[-1]) +- return os.path.normpath(node_dir + '/' + os.readlink(disk_path)) ++ node_dir = "/".join(disk_path.split("/")[-1]) ++ return os.path.normpath(node_dir + "/" + os.readlink(disk_path)) + + + def get_partitions(disk_path): + sys_name = get_sys_name(disk_path) + partitions = list() + for filename in os.listdir(SYS_CLASS_BLOCK + sys_name): +- if re.match(sys_name + r'p?\d+$', filename): ++ if re.match(sys_name + r"p?\d+$", filename): + partitions.append(filename) + + return partitions + + + def get_disks(run_command): +- buf = run_command(["lsblk", "-p", "--pairs", "--bytes", "-o", "NAME,TYPE,SIZE,FSTYPE"])[1] ++ buf = run_command( ++ ["lsblk", "-p", "--pairs", "--bytes", "-o", "NAME,TYPE,SIZE,FSTYPE"] ++ )[1] + disks = dict() + for line in buf.splitlines(): + if not line: + continue + +- m = re.search(r'NAME="(?P[^"]*)" TYPE="(?P[^"]*)" SIZE="(?P\d+)" FSTYPE="(?P[^"]*)"', line) ++ m = re.search( ++ r'NAME="(?P[^"]*)" TYPE="(?P[^"]*)" SIZE="(?P\d+)" FSTYPE="(?P[^"]*)"', ++ line, ++ ) + if m is None: + print(line) + continue + +- if m.group('type') != "disk": ++ if m.group("type") != "disk": + continue + +- disks[m.group('path')] = {"type": m.group('type'), "size": m.group('size'), "fstype": m.group('fstype')} ++ disks[m.group("path")] = { ++ "type": m.group("type"), ++ "size": m.group("size"), ++ "fstype": m.group("fstype"), ++ } + + return disks + +@@ -138,19 +153,13 @@ def get_disks(run_command): + def run_module(): + """Create the module""" + module_args = dict( +- max_return=dict(type='int', required=False, default=10), +- min_size=dict(type='str', required=False, default=0) ++ max_return=dict(type="int", required=False, default=10), ++ min_size=dict(type="str", required=False, default=0), + ) + +- result = dict( +- changed=False, +- disks=[] +- ) ++ result = dict(changed=False, disks=[]) + +- module = AnsibleModule( +- argument_spec=module_args, +- supports_check_mode=True +- ) ++ module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + + run_command = module.run_command + +@@ -161,7 +170,7 @@ def run_module(): + if attrs["fstype"]: + continue + +- if Size(attrs["size"]).bytes < Size(module.params['min_size']).bytes: ++ if Size(attrs["size"]).bytes < Size(module.params["min_size"]).bytes: + continue + + if get_partitions(path): +@@ -173,14 +182,14 @@ def run_module(): + if not can_open(path): + continue + +- result['disks'].append(os.path.basename(path)) +- if len(result['disks']) >= module.params['max_return']: ++ result["disks"].append(os.path.basename(path)) ++ if len(result["disks"]) >= module.params["max_return"]: + break + +- if not result['disks']: +- result['disks'] = "Unable to find unused disk" ++ if not result["disks"]: ++ result["disks"] = "Unable to find unused disk" + else: +- result['disks'].sort() ++ result["disks"].sort() + + module.exit_json(**result) + +@@ -190,5 +199,5 @@ def main(): + run_module() + + +-if __name__ == '__main__': ++if __name__ == "__main__": + main() +diff --git a/library/lvm_gensym.py b/library/lvm_gensym.py +index 49d1822..3e0f613 100644 +--- a/library/lvm_gensym.py ++++ b/library/lvm_gensym.py +@@ -1,66 +1,75 @@ + #!/usr/bin/python + """Generates unique, default names for a volume group and logical volume""" + +-from ansible.module_utils.basic import AnsibleModule +-from ansible.module_utils import facts ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type + + ANSIBLE_METADATA = { +- 'metadata_version': '1.1', +- 'status': ['preview'], +- 'supported_by': 'community' ++ "metadata_version": "1.1", ++ "status": ["preview"], ++ "supported_by": "community", + } + +-DOCUMENTATION = ''' ++DOCUMENTATION = """ + --- + module: lvm_gensym + short_description: Generate default names for lvm variables + version_added: "2.4" +-description: +- - "Module accepts two input strings consisting of a file system type and ++description: ++ - "WARNING: Do not use this module directly! It is only for role internal use." ++ - "Module accepts two input strings consisting of a file system type and + a mount point path, and outputs names based on system information" + options: + fs_type: + description: +- - String describing the desired file system type +- required: true ++ - String describing the desired file system type ++ required: true ++ type: str + mount: + description: +- - String describing the mount point path ++ - String describing the mount point path + required: true +-author: +- - Tim Flannagan (tflannag@redhat.com) +-''' ++ type: str ++author: ++ - Tim Flannagan (@timflannagan) ++""" + +-EXAMPLES = ''' +-- name: Generate names ++EXAMPLES = """ ++- name: Generate names + lvm_gensym: + fs_type: "{{ fs_type }}" + mount: "{{ mount_point }}" + register: lvm_results + when: lvm_vg == "" and mount_point != "" and fs_type != "" +-''' ++""" + +-RETURN = ''' ++RETURN = """ + vg_name: + description: The default generated name for an unspecified volume group + type: str +- ++ returned: success + lv_name: + description: The default generated name for an unspecified logical volume + type: str +-''' ++ returned: success ++""" ++ ++from ansible.module_utils.basic import AnsibleModule ++from ansible.module_utils import facts + + + def get_os_name(): + """Search the host file and return the name in the ID column""" +- for line in open('/etc/os-release').readlines(): +- if not line.find('ID='): ++ for line in open("/etc/os-release").readlines(): ++ if not line.find("ID="): + os_name = line[3:] + break + +- os_name = os_name.replace('\n', '').replace('"', '') ++ os_name = os_name.replace("\n", "").replace('"', "") + return os_name + ++ + def name_is_unique(name, used_names): + """Check if name is contained in the used_names list and return boolean value""" + if name not in used_names: +@@ -68,14 +77,15 @@ def name_is_unique(name, used_names): + + return False + ++ + def get_unique_name_from_base(base_name, used_names): + """Generate a unique name given a base name and a list of used names, and return that unique name""" + counter = 0 + while not name_is_unique(base_name, used_names): + if counter == 0: +- base_name = base_name + '_' + str(counter) ++ base_name = base_name + "_" + str(counter) + else: +- base_name = base_name[:-2] + '_' + str(counter) ++ base_name = base_name[:-2] + "_" + str(counter) + counter += 1 + + return base_name +@@ -83,8 +93,8 @@ def get_unique_name_from_base(base_name, used_names): + + def get_vg_name_base(host_name, os_name): + """Return a base name for a volume group based on the host and os names""" +- if host_name != None and len(host_name) != 0: +- vg_default = os_name + '_' + host_name ++ if host_name is not None and len(host_name) != 0: ++ vg_default = os_name + "_" + host_name + else: + vg_default = os_name + +@@ -93,65 +103,68 @@ def get_vg_name_base(host_name, os_name): + + def get_vg_name(host_name, lvm_facts): + """Generate a base volume group name, verify its uniqueness, and return that unique name""" +- used_vg_names = lvm_facts['vgs'].keys() ++ used_vg_names = lvm_facts["vgs"].keys() + os_name = get_os_name() + name = get_vg_name_base(host_name, os_name) + + return get_unique_name_from_base(name, used_vg_names) + ++ + def get_lv_name_base(fs_type, mount_point): + """Return a logical volume base name using given parameters""" +- if 'swap' in fs_type.lower(): +- lv_default = 'swap' +- elif mount_point.startswith('/'): +- if mount_point == '/': +- lv_default = 'root' ++ if "swap" in fs_type.lower(): ++ lv_default = "swap" ++ elif mount_point.startswith("/"): ++ if mount_point == "/": ++ lv_default = "root" + else: +- lv_default = mount_point[1:].replace('/', '_') ++ lv_default = mount_point[1:].replace("/", "_") + else: +- lv_default = 'lv' ++ lv_default = "lv" + + return lv_default + + + def get_lv_name(fs_type, mount_point, lvm_facts): + """Return a unique logical volume name based on specified file system type, mount point, and system facts""" +- used_lv_names = lvm_facts['lvs'].keys() ++ used_lv_names = lvm_facts["lvs"].keys() + name = get_lv_name_base(fs_type, mount_point) + + return get_unique_name_from_base(name, used_lv_names) + ++ + def run_module(): + """Setup and initialize all relevant ansible module data""" + module_args = dict( +- mount=dict(type='str', required=True), +- fs_type=dict(type='str', required=True) ++ mount=dict(type="str", required=True), fs_type=dict(type="str", required=True) + ) + +- result = dict( +- changed=False, +- vg_name='', +- lv_name='' +- ) ++ result = dict(changed=False, vg_name="", lv_name="") + +- module = AnsibleModule( +- argument_spec=module_args, +- supports_check_mode=True +- ) ++ module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + +- lvm_facts = facts.ansible_facts(module)['lvm'] +- host_name = facts.ansible_facts(module)['nodename'].lower().replace('.', '_').replace('-', '_') ++ lvm_facts = facts.ansible_facts(module)["lvm"] ++ host_name = ( ++ facts.ansible_facts(module)["nodename"] ++ .lower() ++ .replace(".", "_") ++ .replace("-", "_") ++ ) + +- result['lv_name'] = get_lv_name(module.params['fs_type'], module.params['mount'], lvm_facts) +- result['vg_name'] = get_vg_name(host_name, lvm_facts) ++ result["lv_name"] = get_lv_name( ++ module.params["fs_type"], module.params["mount"], lvm_facts ++ ) ++ result["vg_name"] = get_vg_name(host_name, lvm_facts) + +- if result['lv_name'] != '' and result['vg_name'] != '': ++ if result["lv_name"] != "" and result["vg_name"] != "": + module.exit_json(**result) + else: + module.fail_json(msg="Unable to initialize both group and volume names") + ++ + def main(): + run_module() + +-if __name__ == '__main__': ++ ++if __name__ == "__main__": + main() +diff --git a/library/resolve_blockdev.py b/library/resolve_blockdev.py +index 007bb28..df9dcb1 100644 +--- a/library/resolve_blockdev.py ++++ b/library/resolve_blockdev.py +@@ -1,17 +1,22 @@ + #!/usr/bin/python + ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + ANSIBLE_METADATA = { +- 'metadata_version': '1.1', +- 'status': ['preview'], +- 'supported_by': 'community' ++ "metadata_version": "1.1", ++ "status": ["preview"], ++ "supported_by": "community", + } + +-DOCUMENTATION = ''' ++DOCUMENTATION = """ + --- + module: resolve_blockdev + short_description: Resolve block device specification to device node path. + version_added: "2.5" + description: ++ - "WARNING: Do not use this module directly! It is only for role internal use." + - "This module accepts various forms of block device identifiers and + resolves them to the correct block device node path." + options: +@@ -19,11 +24,12 @@ options: + description: + - String describing a block device + required: true ++ type: str + author: +- - David Lehman (dlehman@redhat.com) +-''' ++ - David Lehman (@dwlehman) ++""" + +-EXAMPLES = ''' ++EXAMPLES = """ + - name: Resolve device by label + resolve_blockdev: + spec: LABEL=MyData +@@ -35,13 +41,14 @@ EXAMPLES = ''' + - name: Resolve device by /dev/disk/by-id symlink name + resolve_blockdev: + spec: wwn-0x5000c5005bc37f3f +-''' ++""" + +-RETURN = ''' ++RETURN = """ + device: + description: Path to block device node + type: str +-''' ++ returned: success ++""" + + import glob + import os +@@ -52,37 +59,42 @@ from ansible.module_utils.basic import AnsibleModule + DEV_MD = "/dev/md" + DEV_MAPPER = "/dev/mapper" + SYS_CLASS_BLOCK = "/sys/class/block" +-SEARCH_DIRS = ['/dev', DEV_MAPPER, DEV_MD] + glob.glob("/dev/disk/by-*") +-MD_KERNEL_DEV = re.compile(r'/dev/md\d+(p\d+)?$') ++SEARCH_DIRS = ["/dev", DEV_MAPPER, DEV_MD] + glob.glob("/dev/disk/by-*") ++MD_KERNEL_DEV = re.compile(r"/dev/md\d+(p\d+)?$") + + + def resolve_blockdev(spec, run_cmd): + if "=" in spec: + device = run_cmd("blkid -t %s -o device" % spec)[1].strip() +- elif not spec.startswith('/'): ++ elif not spec.startswith("/"): + for devdir in SEARCH_DIRS: + device = "%s/%s" % (devdir, spec) + if os.path.exists(device): + break + else: +- device = '' ++ device = "" + else: + device = spec + + if not device or not os.path.exists(device): +- return '' ++ return "" + + return canonical_device(os.path.realpath(device)) + + + def _get_dm_name_from_kernel_dev(kdev): +- return open("%s/%s/dm/name" % (SYS_CLASS_BLOCK, os.path.basename(kdev))).read().strip() ++ return ( ++ open("%s/%s/dm/name" % (SYS_CLASS_BLOCK, os.path.basename(kdev))).read().strip() ++ ) + + + def _get_md_name_from_kernel_dev(kdev): + minor = os.minor(os.stat(kdev).st_rdev) +- return next(name for name in os.listdir(DEV_MD) +- if os.minor(os.stat("%s/%s" % (DEV_MD, name)).st_rdev) == minor) ++ return next( ++ name ++ for name in os.listdir(DEV_MD) ++ if os.minor(os.stat("%s/%s" % (DEV_MD, name)).st_rdev) == minor ++ ) + + + def canonical_device(device): +@@ -94,26 +106,27 @@ def canonical_device(device): + + + def run_module(): +- module_args = dict( +- spec=dict(type='str') +- ) ++ module_args = dict(spec=dict(type="str")) + + result = dict( + device=None, + ) + +- module = AnsibleModule( +- argument_spec=module_args, +- supports_check_mode=True +- ) ++ module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + + try: +- result['device'] = resolve_blockdev(module.params['spec'], run_cmd=module.run_command) ++ result["device"] = resolve_blockdev( ++ module.params["spec"], run_cmd=module.run_command ++ ) + except Exception: + pass + +- if not result['device'] or not os.path.exists(result['device']): +- module.fail_json(msg="The {} device spec could not be resolved".format(module.params['spec'])) ++ if not result["device"] or not os.path.exists(result["device"]): ++ module.fail_json( ++ msg="The {0} device spec could not be resolved".format( ++ module.params["spec"] ++ ) ++ ) + + module.exit_json(**result) + +@@ -122,5 +135,5 @@ def main(): + run_module() + + +-if __name__ == '__main__': ++if __name__ == "__main__": + main() +diff --git a/module_utils/storage_lsr/size.py b/module_utils/storage_lsr/size.py +index 16f3d7c..1e91faa 100644 +--- a/module_utils/storage_lsr/size.py ++++ b/module_utils/storage_lsr/size.py +@@ -1,4 +1,6 @@ +-#!/bin/python2 ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type + + import re + +@@ -7,15 +9,20 @@ BINARY_FACTOR = 2 ** 10 + + # index of the item in the list determines the exponent for size computation + # e.g. size_in_bytes = value * (DECIMAL_FACTOR ** (index(mega)+1)) = value * (1000 ** (1+1)) +-PREFIXES_DECIMAL = [["k", "M", "G", "T", "P", "E", "Z", "Y"], +- ["kilo", "mega", "giga", "tera", "peta", "exa", "zetta", "yotta"]] +-PREFIXES_BINARY = [["Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"], +- ["kibi", "mebi", "gibi", "tebi", "pebi", "exbi", "zebi", "yobi"]] ++# pylint: disable=bad-whitespace ++PREFIXES_DECIMAL = [ ++ ["k", "M", "G", "T", "P", "E", "Z", "Y"], # nopep8 ++ ["kilo", "mega", "giga", "tera", "peta", "exa", "zetta", "yotta"], ++] # nopep8 ++PREFIXES_BINARY = [ ++ ["Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"], # nopep8 ++ ["kibi", "mebi", "gibi", "tebi", "pebi", "exbi", "zebi", "yobi"], ++] # nopep8 + SUFFIXES = ["bytes", "byte", "B"] + ++ + class Size(object): +- ''' Class for basic manipulation of the sizes in *bytes +- ''' ++ """Class for basic manipulation of the sizes in *bytes""" + + def __init__(self, value): + raw_number, raw_units = self._parse_input(str(value)) +@@ -25,9 +32,9 @@ class Size(object): + self.units = raw_units + + def _parse_input(self, value): +- ''' splits input string into number and unit parts +- returns number part, unit part +- ''' ++ """splits input string into number and unit parts ++ returns number part, unit part ++ """ + m = re.search("^(.*?)([^0-9]*)$", value) + + raw_number = m.group(1).strip() +@@ -39,12 +46,12 @@ class Size(object): + return raw_number, raw_units + + def _parse_units(self, raw_units): +- ''' +- gets string containing size units and +- returns *_FACTOR (BINARY or DECIMAL) and the prefix position (not index!) +- in the PREFIXES_* list +- If no unit is specified defaults to BINARY and Bytes +- ''' ++ """ ++ gets string containing size units and ++ returns *_FACTOR (BINARY or DECIMAL) and the prefix position (not index!) ++ in the PREFIXES_* list ++ If no unit is specified defaults to BINARY and Bytes ++ """ + + prefix = raw_units + no_suffix_flag = True +@@ -54,7 +61,7 @@ class Size(object): + for suffix in SUFFIXES: + if raw_units.lower().endswith(suffix.lower()): + no_suffix_flag = False +- prefix = raw_units[:-len(suffix)] ++ prefix = raw_units[: -len(suffix)] + break + + if prefix == "": +@@ -87,18 +94,18 @@ class Size(object): + if idx < 0 or not valid_suffix: + raise ValueError("Unable to identify unit '%s'" % raw_units) + +- return used_factor, idx+1 ++ return used_factor, idx + 1 + + def _parse_number(self, raw_number): +- ''' parse input string containing number +- return float +- ''' ++ """parse input string containing number ++ return float ++ """ + return float(raw_number) + + def _get_unit(self, factor, exponent, unit_type=0): +- ''' based on decimal or binary factor and exponent +- obtain and return correct unit +- ''' ++ """based on decimal or binary factor and exponent ++ obtain and return correct unit ++ """ + + if unit_type == 0: + suffix = "B" +@@ -112,12 +119,11 @@ class Size(object): + prefix_lst = PREFIXES_DECIMAL[unit_type] + else: + prefix_lst = PREFIXES_BINARY[unit_type] +- return prefix_lst[exponent-1] + suffix ++ return prefix_lst[exponent - 1] + suffix + + @property + def bytes(self): +- ''' returns size value in bytes as int +- ''' ++ """returns size value in bytes as int""" + return int((self.factor ** self.exponent) * self.number) + + def _format(self, format_str, factor, exponent): +@@ -129,20 +135,20 @@ class Size(object): + return result + + def get(self, units="autobin", fmt="%0.1f %sb"): +- ''' returns size value as a string with given units and format ++ """returns size value as a string with given units and format + +- "units" parameter allows to select preferred unit: +- for example "KiB" or "megabytes" +- accepted values are also: +- "autobin" (default) - uses the highest human readable unit (binary) +- "autodec" - uses the highest human readable unit (decimal) ++ "units" parameter allows to select preferred unit: ++ for example "KiB" or "megabytes" ++ accepted values are also: ++ "autobin" (default) - uses the highest human readable unit (binary) ++ "autodec" - uses the highest human readable unit (decimal) + +- "fmt" parameter allows to specify the output format: +- %sb - will be replaced with the short byte size unit (e.g. MiB) +- %lb - will be replaced with the long byte size unit (e.g. kilobytes) +- value can be formatted using standard string replacements (e.g. %d, %f) ++ "fmt" parameter allows to specify the output format: ++ %sb - will be replaced with the short byte size unit (e.g. MiB) ++ %lb - will be replaced with the long byte size unit (e.g. kilobytes) ++ value can be formatted using standard string replacements (e.g. %d, %f) + +- ''' ++ """ + + ftr = BINARY_FACTOR + if units == "autodec": +@@ -155,6 +161,8 @@ class Size(object): + exp += 1 + else: + ftr, exp = self._parse_units(units.strip()) +- value = (float(self.factor ** self.exponent) / float(ftr ** exp)) * self.number ++ value = ( ++ float(self.factor ** self.exponent) / float(ftr ** exp) ++ ) * self.number + + return self._format(fmt, ftr, exp) % value +diff --git a/tests/setup_module_utils.sh b/tests/setup_module_utils.sh +deleted file mode 100755 +index 94d102d..0000000 +--- a/tests/setup_module_utils.sh ++++ /dev/null +@@ -1,41 +0,0 @@ +-#!/bin/bash +-# SPDX-License-Identifier: MIT +- +-set -euo pipefail +- +-if [ -n "${DEBUG:-}" ] ; then +- set -x +-fi +- +-if [ ! -d "${1:-}" ] ; then +- echo Either ansible is not installed, or there is no ansible/module_utils +- echo in "$1" - Skipping +- exit 0 +-fi +- +-if [ ! -d "${2:-}" ] ; then +- echo Role has no module_utils - Skipping +- exit 0 +-fi +- +-# we need absolute path for $2 +-absmoddir=$( readlink -f "$2" ) +- +-# clean up old links to module_utils +-for item in "$1"/* ; do +- if lnitem=$( readlink "$item" ) && test -n "$lnitem" ; then +- case "$lnitem" in +- *"${2}"*) rm -f "$item" ;; +- esac +- fi +-done +- +-# add new links to module_utils +-for item in "$absmoddir"/* ; do +- case "$item" in +- *__pycache__) continue;; +- *.pyc) continue;; +- esac +- bnitem=$( basename "$item" ) +- ln -s "$item" "$1/$bnitem" +-done +diff --git a/tests/test-verify-volume-device.yml b/tests/test-verify-volume-device.yml +index 3fb56a6..c7ba5ec 100644 +--- a/tests/test-verify-volume-device.yml ++++ b/tests/test-verify-volume-device.yml +@@ -23,11 +23,11 @@ + + - name: (1/2) Process volume type (set initial value) + set_fact: +- st_volume_type: "{{ storage_test_volume.type }}" ++ st_volume_type: "{{ storage_test_volume.type }}" + + - name: (2/2) Process volume type (get RAID value) + set_fact: +- st_volume_type: "{{ storage_test_volume.raid_level }}" ++ st_volume_type: "{{ storage_test_volume.raid_level }}" + when: storage_test_volume.type == "raid" + + - name: Verify the volume's device type +diff --git a/tests/test-verify-volume-md.yml b/tests/test-verify-volume-md.yml +index b21d8d2..27e8333 100644 +--- a/tests/test-verify-volume-md.yml ++++ b/tests/test-verify-volume-md.yml +@@ -9,7 +9,7 @@ + register: storage_test_mdadm + changed_when: false + +- # pre-chew regex search patterns ++ # pre-chew regex search patterns + - set_fact: + storage_test_md_active_devices_re: "{{ ('Active Devices : ' ~ storage_test_volume.raid_device_count ~ '\n')|regex_escape() }}" + when: storage_test_volume.raid_device_count is defined +diff --git a/tests/test.yml b/tests/test.yml +index 944b3cd..cb718a7 100644 +--- a/tests/test.yml ++++ b/tests/test.yml +@@ -16,7 +16,7 @@ + mount_point: '/opt/test1' + - name: bar + disks: ['vdc'] +- #state: "absent" ++ # state: "absent" + volumes: + - name: test2 + size: 8g +diff --git a/tests/tests_create_lv_size_equal_to_vg.yml b/tests/tests_create_lv_size_equal_to_vg.yml +index 21a5788..1737036 100644 +--- a/tests/tests_create_lv_size_equal_to_vg.yml ++++ b/tests/tests_create_lv_size_equal_to_vg.yml +@@ -23,13 +23,13 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- volumes: +- - name: test1 +- size: "{{ lv_size }}" +- mount_point: "{{ mount_location }}" ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ lv_size }}" ++ mount_point: "{{ mount_location }}" + + - include_tasks: verify-role-results.yml + +@@ -37,12 +37,12 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- state: "absent" +- volumes: +- - name: test1 +- mount_point: "{{ mount_location }}" ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ state: "absent" ++ volumes: ++ - name: test1 ++ mount_point: "{{ mount_location }}" + + - include_tasks: verify-role-results.yml +diff --git a/tests/tests_create_partition_volume_then_remove.yml b/tests/tests_create_partition_volume_then_remove.yml +index 351b022..567f8dd 100644 +--- a/tests/tests_create_partition_volume_then_remove.yml ++++ b/tests/tests_create_partition_volume_then_remove.yml +@@ -53,7 +53,7 @@ + name: linux-system-roles.storage + vars: + storage_pools: +- - name: "{{ unused_disks[0] }}" ++ - name: "{{ unused_disks[0] }}" + type: partition + disks: "{{ unused_disks }}" + state: absent +@@ -70,7 +70,7 @@ + name: linux-system-roles.storage + vars: + storage_pools: +- - name: "{{ unused_disks[0] }}" ++ - name: "{{ unused_disks[0] }}" + type: partition + disks: "{{ unused_disks }}" + state: absent +diff --git a/tests/tests_existing_lvm_pool.yml b/tests/tests_existing_lvm_pool.yml +index 854ac0d..2490914 100644 +--- a/tests/tests_existing_lvm_pool.yml ++++ b/tests/tests_existing_lvm_pool.yml +@@ -20,12 +20,12 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: "{{ pool_name }}" +- disks: "{{ unused_disks }}" +- volumes: +- - name: test1 +- size: "{{ volume_size }}" ++ storage_pools: ++ - name: "{{ pool_name }}" ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ volume_size }}" + + - include_tasks: verify-role-results.yml + +diff --git a/tests/tests_lvm_auto_size_cap.yml b/tests/tests_lvm_auto_size_cap.yml +index fb17c23..8c754a6 100644 +--- a/tests/tests_lvm_auto_size_cap.yml ++++ b/tests/tests_lvm_auto_size_cap.yml +@@ -33,12 +33,12 @@ + name: linux-system-roles.storage + vars: + storage_pools: +- - name: foo +- type: lvm +- disks: "{{ unused_disks }}" +- volumes: +- - name: test1 +- size: "{{ doubled_size.stdout|trim }}" ++ - name: foo ++ type: lvm ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ doubled_size.stdout|trim }}" + - name: unreachable task + fail: + msg: UNREACH +@@ -56,11 +56,11 @@ + name: linux-system-roles.storage + vars: + storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- volumes: +- - name: test1 +- size: "{{ test_disk_size }}" ++ - name: foo ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ test_disk_size }}" + + - include_tasks: verify-role-results.yml + +@@ -69,12 +69,12 @@ + name: linux-system-roles.storage + vars: + storage_pools: +- - name: foo +- type: lvm +- disks: "{{ unused_disks }}" +- volumes: +- - name: test1 +- size: "{{ test_disk_size }}" ++ - name: foo ++ type: lvm ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ test_disk_size }}" + + - include_tasks: verify-role-results.yml + +@@ -83,7 +83,7 @@ + name: linux-system-roles.storage + vars: + storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- state: absent +- volumes: [] ++ - name: foo ++ disks: "{{ unused_disks }}" ++ state: absent ++ volumes: [] +diff --git a/tests/tests_lvm_one_disk_one_volume.yml b/tests/tests_lvm_one_disk_one_volume.yml +index b1096cf..6452f54 100644 +--- a/tests/tests_lvm_one_disk_one_volume.yml ++++ b/tests/tests_lvm_one_disk_one_volume.yml +@@ -19,13 +19,13 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- volumes: +- - name: test1 +- size: "{{ volume_size }}" +- mount_point: "{{ mount_location }}" ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ volume_size }}" ++ mount_point: "{{ mount_location }}" + + - include_tasks: verify-role-results.yml + +@@ -33,13 +33,13 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- volumes: +- - name: test1 +- size: "{{ volume_size }}" +- mount_point: "{{ mount_location }}" ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ volume_size }}" ++ mount_point: "{{ mount_location }}" + + - include_tasks: verify-role-results.yml + +@@ -47,14 +47,14 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- state: absent +- volumes: +- - name: test1 +- size: "{{ volume_size }}" +- mount_point: "{{ mount_location }}" +- state: absent ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ state: absent ++ volumes: ++ - name: test1 ++ size: "{{ volume_size }}" ++ mount_point: "{{ mount_location }}" ++ state: absent + + - include_tasks: verify-role-results.yml +diff --git a/tests/tests_misc.yml b/tests/tests_misc.yml +index 3139bc7..afe753a 100644 +--- a/tests/tests_misc.yml ++++ b/tests/tests_misc.yml +@@ -197,7 +197,7 @@ + block: + - name: Try to mount swap filesystem to "{{ mount_location }}" + include_role: +- name: linux-system-roles.storage ++ name: linux-system-roles.storage + vars: + storage_volumes: + - name: test1 +diff --git a/tests/tests_null_raid_pool.yml b/tests/tests_null_raid_pool.yml +index 2b7b9f3..5c3c785 100644 +--- a/tests/tests_null_raid_pool.yml ++++ b/tests/tests_null_raid_pool.yml +@@ -31,9 +31,9 @@ + raid_level: "null" + state: present + volumes: +- - name: lv1 +- size: "{{ volume1_size }}" +- mount_point: "{{ mount_location1 }}" ++ - name: lv1 ++ size: "{{ volume1_size }}" ++ mount_point: "{{ mount_location1 }}" + + - name: get existing raids (after run) + command: "cat /proc/mdstat" +@@ -52,12 +52,12 @@ + raid_level: "null" + state: absent + volumes: +- - name: lv1 +- size: "{{ volume1_size }}" +- mount_point: "{{ mount_location1 }}" ++ - name: lv1 ++ size: "{{ volume1_size }}" ++ mount_point: "{{ mount_location1 }}" + + - name: compare mdstat results + assert: + that: +- - storage_test_mdstat1.stdout == storage_test_mdstat2.stdout ++ - storage_test_mdstat1.stdout == storage_test_mdstat2.stdout + msg: "Raid created when it should not be" +diff --git a/tests/tests_resize.yml b/tests/tests_resize.yml +index 209d129..4fd8583 100644 +--- a/tests/tests_resize.yml ++++ b/tests/tests_resize.yml +@@ -29,16 +29,16 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- type: lvm +- volumes: +- - name: test1 +- # resizing is currently supported only for ext2/3/4 +- fs_type: 'ext4' +- size: "{{ volume_size_before }}" +- mount_point: "{{ mount_location }}" ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ type: lvm ++ volumes: ++ - name: test1 ++ # resizing is currently supported only for ext2/3/4 ++ fs_type: 'ext4' ++ size: "{{ volume_size_before }}" ++ mount_point: "{{ mount_location }}" + + - include_tasks: verify-role-results.yml + +@@ -46,15 +46,15 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- type: lvm +- disks: "{{ unused_disks }}" +- volumes: +- - name: test1 +- fs_type: 'ext4' +- size: "{{ volume_size_after }}" +- mount_point: "{{ mount_location }}" ++ storage_pools: ++ - name: foo ++ type: lvm ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ fs_type: 'ext4' ++ size: "{{ volume_size_after }}" ++ mount_point: "{{ mount_location }}" + + - include_tasks: verify-role-results.yml + +@@ -194,14 +194,14 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- state: absent +- volumes: +- - name: test1 +- size: "{{ volume_size_before }}" +- mount_point: "{{ mount_location }}" ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ state: absent ++ volumes: ++ - name: test1 ++ size: "{{ volume_size_before }}" ++ mount_point: "{{ mount_location }}" + + - include_tasks: verify-role-results.yml + +@@ -259,14 +259,14 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- state: absent +- volumes: +- - name: test1 +- size: "{{ volume_size_before }}" +- mount_point: "{{ mount_location }}" ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ state: absent ++ volumes: ++ - name: test1 ++ size: "{{ volume_size_before }}" ++ mount_point: "{{ mount_location }}" + + - include_tasks: verify-role-results.yml + +@@ -324,13 +324,13 @@ + include_role: + name: linux-system-roles.storage + vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- state: absent +- volumes: +- - name: test1 +- size: "{{ volume_size_before }}" +- mount_point: "{{ mount_location }}" ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ state: absent ++ volumes: ++ - name: test1 ++ size: "{{ volume_size_before }}" ++ mount_point: "{{ mount_location }}" + + - include_tasks: verify-role-results.yml +diff --git a/tests/unit/bsize_test.py b/tests/unit/bsize_test.py +index f88a9c1..fae0f5f 100644 +--- a/tests/unit/bsize_test.py ++++ b/tests/unit/bsize_test.py +@@ -1,7 +1,12 @@ ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + import pytest + + from storage_lsr.size import Size + ++ + def test_bsize(): + # check failure on incorrect string + with pytest.raises(ValueError) as e: +diff --git a/tests/unit/gensym_test.py b/tests/unit/gensym_test.py +index 6d164dc..fd00ddf 100644 +--- a/tests/unit/gensym_test.py ++++ b/tests/unit/gensym_test.py +@@ -1,68 +1,115 @@ + #!/usr/bin/python + """This module tests methods defined in the lvm_gensym.py module using the pytest framework""" +-import pytest +- ++from __future__ import absolute_import, division, print_function + +-import lvm_gensym ++__metaclass__ = type + ++import pytest + +-used_lv_names = ['root', 'root_0', 'root_1', 'root_2', 'root_3', 'swap_0', 'swap', 'swap_1'] + +-test_lv_names = [{'fs_type': 'ext', 'mount': '/'}, +- {'fs_type': 'zfs', 'mount': '/home/user'}, +- {'fs_type': 'swap', 'mount': ''} +- ] ++import lvm_gensym + +-used_vg_names = ['linux_host', 'rhel_user0', 'rhel_0_user'] + +-test_vg_names = ['rhel_user', 'rhel_user_0', 'rhel_user_1', +- 'rhel_user_2', 'rhel_user_3', 'linux_user', +- 'fedora_user', 'fedora_user_0', 'fedora_user_1' +- ] ++used_lv_names = [ ++ "root", ++ "root_0", ++ "root_1", ++ "root_2", ++ "root_3", ++ "swap_0", ++ "swap", ++ "swap_1", ++] ++ ++test_lv_names = [ ++ {"fs_type": "ext", "mount": "/"}, ++ {"fs_type": "zfs", "mount": "/home/user"}, ++ {"fs_type": "swap", "mount": ""}, ++] ++ ++used_vg_names = ["linux_host", "rhel_user0", "rhel_0_user"] ++ ++test_vg_names = [ ++ "rhel_user", ++ "rhel_user_0", ++ "rhel_user_1", ++ "rhel_user_2", ++ "rhel_user_3", ++ "linux_user", ++ "fedora_user", ++ "fedora_user_0", ++ "fedora_user_1", ++] ++ ++lvm_facts = { ++ "lvs": { ++ "Home": "", ++ "Swap": "", ++ "Root": "", ++ "Root_0": "", ++ "root": "", ++ "root_0": "", ++ "swap": "", ++ "swap_0": "", ++ "swap_1": "", ++ }, ++ "vgs": {"rhel_user": "", "rhel_user_0": "", "rhel_user_1": ""}, ++} + +-lvm_facts = {'lvs': {'Home': '', 'Swap': '', 'Root': '', +- 'Root_0': '', 'root': '', 'root_0': '', +- 'swap': '', 'swap_0': '', 'swap_1': '', +- }, +- 'vgs': {'rhel_user': '', 'rhel_user_0': '', 'rhel_user_1': ''} +- } + + def test_unique_base_name(): + """Test whether the returned name is unique using a supplied list of test names""" +- assert lvm_gensym.get_unique_name_from_base('root', used_lv_names) == 'root_4' +- assert lvm_gensym.get_unique_name_from_base('rhel_user', test_vg_names) == 'rhel_user_4' ++ assert lvm_gensym.get_unique_name_from_base("root", used_lv_names) == "root_4" ++ assert ( ++ lvm_gensym.get_unique_name_from_base("rhel_user", test_vg_names) ++ == "rhel_user_4" ++ ) ++ + + def test_return_val(): + """Verify that a supplied unique name and a list of used names returns True""" + for (index, name) in enumerate(test_vg_names): + assert lvm_gensym.name_is_unique(name[index], used_vg_names) + ++ + def test_get_base_vg_name(): + """Check generated base volume group name against the expected base name""" +- assert lvm_gensym.get_vg_name_base('hostname', 'rhel') == 'rhel_hostname' ++ assert lvm_gensym.get_vg_name_base("hostname", "rhel") == "rhel_hostname" ++ + + @pytest.mark.parametrize("os_name", ["foo", "bar", "baz"]) + def test_vg_eval(monkeypatch, os_name): + """Check generated unique volume group name against the expected name""" ++ + def get_os_name(): + return os_name + + vg_names = [os_name + "_user", os_name + "_user_0", os_name + "_user_1"] + _lvm_facts = dict(vgs=dict.fromkeys(vg_names), lvs=dict()) + monkeypatch.setattr(lvm_gensym, "get_os_name", get_os_name) +- assert lvm_gensym.get_vg_name('user', _lvm_facts) == os_name + '_user_2' +- assert lvm_gensym.get_vg_name('', _lvm_facts) == os_name ++ assert lvm_gensym.get_vg_name("user", _lvm_facts) == os_name + "_user_2" ++ assert lvm_gensym.get_vg_name("", _lvm_facts) == os_name ++ + + def test_lv_eval(): + """Test the generated unique logical volume name against the expected name""" +- expected = ['root_1', 'home_user', 'swap_2'] ++ expected = ["root_1", "home_user", "swap_2"] + + for (ctr, name_inputs) in enumerate(test_lv_names): +- assert lvm_gensym.get_lv_name(name_inputs['fs_type'], name_inputs['mount'], lvm_facts) == expected[ctr] ++ assert ( ++ lvm_gensym.get_lv_name( ++ name_inputs["fs_type"], name_inputs["mount"], lvm_facts ++ ) ++ == expected[ctr] ++ ) ++ + + def test_get_base_lv_name(): + """Test the generated base logical volume name against the expected name""" +- expected = ['root', 'home_user', 'swap'] ++ expected = ["root", "home_user", "swap"] + + for (ctr, names_input) in enumerate(test_lv_names): +- assert lvm_gensym.get_lv_name_base(names_input['fs_type'], names_input['mount']) == expected[ctr] ++ assert ( ++ lvm_gensym.get_lv_name_base(names_input["fs_type"], names_input["mount"]) ++ == expected[ctr] ++ ) +diff --git a/tests/unit/resolve_blockdev_test.py b/tests/unit/resolve_blockdev_test.py +index 0eafe7b..ad50628 100644 +--- a/tests/unit/resolve_blockdev_test.py ++++ b/tests/unit/resolve_blockdev_test.py +@@ -1,3 +1,6 @@ ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type + + import os + import pytest +@@ -5,73 +8,80 @@ import pytest + import resolve_blockdev + + +-blkid_data = [('LABEL=target', '/dev/sdx3'), +- ('UUID=6c75fa75-e5ab-4a12-a567-c8aa0b4b60a5', '/dev/sdaz'), +- ('LABEL=missing', '')] ++blkid_data = [ ++ ("LABEL=target", "/dev/sdx3"), ++ ("UUID=6c75fa75-e5ab-4a12-a567-c8aa0b4b60a5", "/dev/sdaz"), ++ ("LABEL=missing", ""), ++] + +-path_data = ['/dev/md/unreal', +- '/dev/mapper/fakevg-fakelv', +- '/dev/adisk', +- '/dev/disk/by-id/wwn-0x123456789abc'] ++path_data = [ ++ "/dev/md/unreal", ++ "/dev/mapper/fakevg-fakelv", ++ "/dev/adisk", ++ "/dev/disk/by-id/wwn-0x123456789abc", ++] + +-canonical_paths = {"/dev/sda": "/dev/sda", +- "/dev/dm-3": "/dev/mapper/vg_system-lv_data", +- "/dev/md127": "/dev/md/userdb", +- "/dev/notfound": ""} ++canonical_paths = { ++ "/dev/sda": "/dev/sda", ++ "/dev/dm-3": "/dev/mapper/vg_system-lv_data", ++ "/dev/md127": "/dev/md/userdb", ++ "/dev/notfound": "", ++} + + +-@pytest.mark.parametrize('spec,device', blkid_data) ++@pytest.mark.parametrize("spec,device", blkid_data) + def test_key_value_pair(spec, device, monkeypatch): + def run_cmd(args): + for _spec, _dev in blkid_data: + if _spec in args: + break + else: +- _dev = '' +- return (0, _dev, '') ++ _dev = "" ++ return (0, _dev, "") + +- monkeypatch.setattr(os.path, 'exists', lambda p: True) ++ monkeypatch.setattr(os.path, "exists", lambda p: True) + assert resolve_blockdev.resolve_blockdev(spec, run_cmd) == device + + +-@pytest.mark.parametrize('name', [os.path.basename(p) for p in path_data]) ++@pytest.mark.parametrize("name", [os.path.basename(p) for p in path_data]) + def test_device_names(name, monkeypatch): + """ Test return values for basename specs, assuming all paths are real. """ ++ + def path_exists(path): + return next((data for data in path_data if data == path), False) + +- expected = next((data for data in path_data if os.path.basename(data) == name), '') +- monkeypatch.setattr(os.path, 'exists', path_exists) ++ expected = next((data for data in path_data if os.path.basename(data) == name), "") ++ monkeypatch.setattr(os.path, "exists", path_exists) + assert resolve_blockdev.resolve_blockdev(name, None) == expected + + + def test_device_name(monkeypatch): +- assert os.path.exists('/dev/xxx') is False ++ assert os.path.exists("/dev/xxx") is False + +- monkeypatch.setattr(os.path, 'exists', lambda p: True) +- assert resolve_blockdev.resolve_blockdev('xxx', None) == '/dev/xxx' ++ monkeypatch.setattr(os.path, "exists", lambda p: True) ++ assert resolve_blockdev.resolve_blockdev("xxx", None) == "/dev/xxx" + +- monkeypatch.setattr(os.path, 'exists', lambda p: False) +- assert resolve_blockdev.resolve_blockdev('xxx', None) == '' ++ monkeypatch.setattr(os.path, "exists", lambda p: False) ++ assert resolve_blockdev.resolve_blockdev("xxx", None) == "" + + + def test_full_path(monkeypatch): + path = "/dev/idonotexist" +- monkeypatch.setattr(os.path, 'exists', lambda p: True) ++ monkeypatch.setattr(os.path, "exists", lambda p: True) + assert resolve_blockdev.resolve_blockdev(path, None) == path + +- monkeypatch.setattr(os.path, 'exists', lambda p: False) +- assert resolve_blockdev.resolve_blockdev(path, None) == '' ++ monkeypatch.setattr(os.path, "exists", lambda p: False) ++ assert resolve_blockdev.resolve_blockdev(path, None) == "" + + path = "/dev/disk/by-label/alabel" +- monkeypatch.setattr(os.path, 'exists', lambda p: True) ++ monkeypatch.setattr(os.path, "exists", lambda p: True) + assert resolve_blockdev.resolve_blockdev(path, None) == path + +- monkeypatch.setattr(os.path, 'exists', lambda p: False) +- assert resolve_blockdev.resolve_blockdev(path, None) == '' ++ monkeypatch.setattr(os.path, "exists", lambda p: False) ++ assert resolve_blockdev.resolve_blockdev(path, None) == "" + + +-@pytest.mark.parametrize('device', list(canonical_paths.keys())) ++@pytest.mark.parametrize("device", list(canonical_paths.keys())) + def test_canonical_path(device, monkeypatch): + def _get_name(device): + name = os.path.basename(canonical_paths[device]) +@@ -79,8 +89,8 @@ def test_canonical_path(device, monkeypatch): + raise Exception("failed to find name") + return name + +- monkeypatch.setattr(resolve_blockdev, '_get_dm_name_from_kernel_dev', _get_name) +- monkeypatch.setattr(resolve_blockdev, '_get_md_name_from_kernel_dev', _get_name) ++ monkeypatch.setattr(resolve_blockdev, "_get_dm_name_from_kernel_dev", _get_name) ++ monkeypatch.setattr(resolve_blockdev, "_get_md_name_from_kernel_dev", _get_name) + + canonical = canonical_paths[device] + if canonical: +diff --git a/tests/unit/test_unused_disk.py b/tests/unit/test_unused_disk.py +index a4339c4..493b4b0 100644 +--- a/tests/unit/test_unused_disk.py ++++ b/tests/unit/test_unused_disk.py +@@ -1,72 +1,91 @@ ++from __future__ import absolute_import, division, print_function ++ ++__metaclass__ = type ++ + import pytest + import find_unused_disk + import os + + +-blkid_data_pttype = [('/dev/sdx', '/dev/sdx: PTTYPE=\"dos\"'), +- ('/dev/sdy', '/dev/sdy: PTTYPE=\"test\"')] ++blkid_data_pttype = [ ++ ("/dev/sdx", '/dev/sdx: PTTYPE="dos"'), ++ ("/dev/sdy", '/dev/sdy: PTTYPE="test"'), ++] + +-blkid_data = [('/dev/sdx', 'UUID=\"hello-1234-56789\" TYPE=\"crypto_LUKS\"'), +- ('/dev/sdy', 'UUID=\"this-1s-a-t3st-f0r-ansible\" VERSION=\"LVM2 001\" TYPE=\"LVM2_member\" USAGE=\"raid\"'), +- ('/dev/sdz', 'LABEL=\"/data\" UUID=\"a12bcdef-345g-67h8-90i1-234j56789k10\" VERSION=\"1.0\" TYPE=\"ext4\" USAGE=\"filesystem\"')] ++blkid_data = [ ++ ("/dev/sdx", 'UUID="hello-1234-56789" TYPE="crypto_LUKS"'), ++ ( ++ "/dev/sdy", ++ 'UUID="this-1s-a-t3st-f0r-ansible" VERSION="LVM2 001" TYPE="LVM2_member" USAGE="raid"', ++ ), ++ ( ++ "/dev/sdz", ++ 'LABEL="/data" UUID="a12bcdef-345g-67h8-90i1-234j56789k10" VERSION="1.0" TYPE="ext4" USAGE="filesystem"', ++ ), ++] + +-holders_data_none = [('/dev/sdx', ''), +- ('/dev/dm-99', '')] ++holders_data_none = [("/dev/sdx", ""), ("/dev/dm-99", "")] + +-holders_data = [('/dev/sdx', 'dm-0'), +- ('/dev/dm-99', 'dm-2 dm-3 dm-4')] ++holders_data = [("/dev/sdx", "dm-0"), ("/dev/dm-99", "dm-2 dm-3 dm-4")] + + +-@pytest.mark.parametrize('disk, blkid', blkid_data_pttype) ++@pytest.mark.parametrize("disk, blkid", blkid_data_pttype) + def test_no_signature_true(disk, blkid): + def run_command(args): +- return [0, blkid, ''] ++ return [0, blkid, ""] ++ + assert find_unused_disk.no_signature(run_command, disk) is True + + +-@pytest.mark.parametrize('disk, blkid', blkid_data) ++@pytest.mark.parametrize("disk, blkid", blkid_data) + def test_no_signature_false(disk, blkid): + def run_command(args): +- return [0, blkid, ''] ++ return [0, blkid, ""] ++ + assert find_unused_disk.no_signature(run_command, disk) is False + + +-@pytest.mark.parametrize('disk, holders', holders_data_none) ++@pytest.mark.parametrize("disk, holders", holders_data_none) + def test_no_holders_true(disk, holders, monkeypatch): + def mock_return(args): + return holders +- monkeypatch.setattr(os, 'listdir', mock_return) ++ ++ monkeypatch.setattr(os, "listdir", mock_return) + assert find_unused_disk.no_holders(disk) is True + + +-@pytest.mark.parametrize('disk, holders', holders_data) ++@pytest.mark.parametrize("disk, holders", holders_data) + def test_no_holders_false(disk, holders, monkeypatch): + def mock_return(args): + return holders +- monkeypatch.setattr(os, 'listdir', mock_return) ++ ++ monkeypatch.setattr(os, "listdir", mock_return) + assert find_unused_disk.no_holders(disk) is False + + + def test_can_open_true(monkeypatch): + def mock_return(args, flag): + return True +- monkeypatch.setattr(os, 'open', mock_return) +- assert find_unused_disk.can_open('/hello') is True ++ ++ monkeypatch.setattr(os, "open", mock_return) ++ assert find_unused_disk.can_open("/hello") is True + + + def test_can_open_false(monkeypatch): + def mock_return(args, flag): + raise OSError +- monkeypatch.setattr(os, 'open', mock_return) +- assert find_unused_disk.can_open('/hello') is False ++ ++ monkeypatch.setattr(os, "open", mock_return) ++ assert find_unused_disk.can_open("/hello") is False + + + def test_is_ignored(monkeypatch): + def mock_realpath(path): + return path +- monkeypatch.setattr(os.path, 'realpath', mock_realpath) +- assert find_unused_disk.is_ignored('/dev/sda') is False +- assert find_unused_disk.is_ignored('/dev/vda') is False +- assert find_unused_disk.is_ignored('/dev/mapper/mpatha') is False +- assert find_unused_disk.is_ignored('/dev/md/Volume0') is False +- assert find_unused_disk.is_ignored('/dev/nullb0') is True ++ ++ monkeypatch.setattr(os.path, "realpath", mock_realpath) ++ assert find_unused_disk.is_ignored("/dev/sda") is False ++ assert find_unused_disk.is_ignored("/dev/vda") is False ++ assert find_unused_disk.is_ignored("/dev/mapper/mpatha") is False ++ assert find_unused_disk.is_ignored("/dev/md/Volume0") is False ++ assert find_unused_disk.is_ignored("/dev/nullb0") is True +diff --git a/tox.ini b/tox.ini +index 92482d5..91c22a8 100644 +--- a/tox.ini ++++ b/tox.ini +@@ -13,9 +13,3 @@ configfile = .ansible-lint + setenv = + RUN_PYTEST_SETUP_MODULE_UTILS = true + RUN_PYLINT_SETUP_MODULE_UTILS = true +- +-[testenv:black] +-commands = bash -c 'echo black is currently not enabled - please fix this' +- +-[testenv:flake8] +-commands = bash -c 'echo flake8 is currently not enabled - please fix this' +-- +2.30.2 + diff --git a/SOURCES/storage-no-disks-existing.diff b/SOURCES/storage-no-disks-existing.diff new file mode 100644 index 0000000..68b1e8d --- /dev/null +++ b/SOURCES/storage-no-disks-existing.diff @@ -0,0 +1,142 @@ +diff --git a/library/blivet.py b/library/blivet.py +index eb8bb11..e927121 100644 +--- a/library/blivet.py ++++ b/library/blivet.py +@@ -104,6 +104,7 @@ try: + from blivet3.formats import get_format + from blivet3.partitioning import do_partitioning + from blivet3.size import Size ++ from blivet3.udev import trigger + from blivet3.util import set_up_logging + BLIVET_PACKAGE = 'blivet3' + except ImportError: +@@ -116,6 +117,7 @@ except ImportError: + from blivet.formats import get_format + from blivet.partitioning import do_partitioning + from blivet.size import Size ++ from blivet.udev import trigger + from blivet.util import set_up_logging + BLIVET_PACKAGE = 'blivet' + except ImportError: +@@ -821,7 +823,10 @@ class BlivetPool(BlivetBase): + + def _look_up_disks(self): + """ Look up the pool's disks in blivet's device tree. """ +- if not self._pool['disks']: ++ if self._disks: ++ return ++ ++ if not self._device and not self._pool['disks']: + raise BlivetAnsibleError("no disks specified for pool '%s'" % self._pool['name']) + elif not isinstance(self._pool['disks'], list): + raise BlivetAnsibleError("pool disks must be specified as a list") +@@ -832,7 +837,7 @@ class BlivetPool(BlivetBase): + if device is not None: # XXX fail if any disk isn't resolved? + disks.append(device) + +- if self._pool['disks'] and not disks: ++ if self._pool['disks'] and not self._device and not disks: + raise BlivetAnsibleError("unable to resolve any disks specified for pool '%s' (%s)" % (self._pool['name'], self._pool['disks'])) + + self._disks = disks +@@ -974,9 +979,9 @@ class BlivetPool(BlivetBase): + """ Schedule actions to configure this pool according to the yaml input. """ + global safe_mode + # look up the device +- self._look_up_disks() + self._look_up_device() + self._apply_defaults() ++ self._look_up_disks() + + # schedule destroy if appropriate, including member type change + if not self.ultimately_present: +@@ -999,6 +1004,7 @@ class BlivetPartitionPool(BlivetPool): + return self._device.partitionable + + def _look_up_device(self): ++ self._look_up_disks() + self._device = self._disks[0] + + def _create(self): +@@ -1354,6 +1360,13 @@ def run_module(): + + actions.append(action) + ++ def ensure_udev_update(action): ++ if action.is_create: ++ sys_path = action.device.path ++ if os.path.islink(sys_path): ++ sys_path = os.readlink(action.device.path) ++ trigger(action='change', subsystem='block', name=os.path.basename(sys_path)) ++ + def action_dict(action): + return dict(action=action.type_desc_str, + fs_type=action.format.type if action.is_format else None, +@@ -1395,6 +1408,7 @@ def run_module(): + if scheduled: + # execute the scheduled actions, committing changes to disk + callbacks.action_executed.add(record_action) ++ callbacks.action_executed.add(ensure_udev_update) + try: + b.devicetree.actions.process(devices=b.devicetree.devices, dry_run=module.check_mode) + except Exception as e: +diff --git a/tests/tests_existing_lvm_pool.yml b/tests/tests_existing_lvm_pool.yml +new file mode 100644 +index 0000000..854ac0d +--- /dev/null ++++ b/tests/tests_existing_lvm_pool.yml +@@ -0,0 +1,54 @@ ++--- ++- hosts: all ++ become: true ++ vars: ++ mount_location: '/opt/test1' ++ volume_group_size: '5g' ++ volume_size: '4g' ++ pool_name: foo ++ ++ tasks: ++ - include_role: ++ name: linux-system-roles.storage ++ ++ - include_tasks: get_unused_disk.yml ++ vars: ++ min_size: "{{ volume_group_size }}" ++ max_return: 1 ++ ++ - name: Create one LVM logical volume under one volume group ++ include_role: ++ name: linux-system-roles.storage ++ vars: ++ storage_pools: ++ - name: "{{ pool_name }}" ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ volume_size }}" ++ ++ - include_tasks: verify-role-results.yml ++ ++ - name: Create another volume in the existing pool, identified only by name. ++ include_role: ++ name: linux-system-roles.storage ++ vars: ++ storage_pools: ++ - name: "{{ pool_name }}" ++ volumes: ++ - name: newvol ++ size: '2 GiB' ++ fs_type: ext4 ++ fs_label: newvol ++ ++ - include_tasks: verify-role-results.yml ++ ++ - name: Clean up. ++ include_role: ++ name: linux-system-roles.storage ++ vars: ++ storage_pools: ++ - name: "{{ pool_name }}" ++ state: absent ++ ++ - include_tasks: verify-role-results.yml diff --git a/SOURCES/storage-partition-name.diff b/SOURCES/storage-partition-name.diff new file mode 100644 index 0000000..c206dc0 --- /dev/null +++ b/SOURCES/storage-partition-name.diff @@ -0,0 +1,30 @@ +commit effb7faf20301ddcee8ee36a1b156a0b9f006bb0 +Author: David Lehman +Date: Tue Aug 4 16:00:33 2020 -0400 + + Be smarter in choosing expected partition name. + + BlivetVolume._get_device_id is only used to look up pre-existing + volumes, so we don't have to try too hard to guess it by name. + We can just see if the disk has a single partition and, if so, + return the name of that partition. + + Fixes: #141 + +diff --git a/library/blivet.py b/library/blivet.py +index eb8bb11..0f7ce98 100644 +--- a/library/blivet.py ++++ b/library/blivet.py +@@ -554,7 +554,11 @@ class BlivetPartitionVolume(BlivetVolume): + return self._device.raw_device.type == 'partition' + + def _get_device_id(self): +- return self._blivet_pool._disks[0].name + '1' ++ device_id = None ++ if self._blivet_pool._disks[0].partitioned and len(self._blivet_pool._disks[0].children) == 1: ++ device_id = self._blivet_pool._disks[0].children[0].name ++ ++ return device_id + + def _resize(self): + pass diff --git a/SOURCES/storage-safemode-luks.diff b/SOURCES/storage-safemode-luks.diff deleted file mode 100644 index fd78028..0000000 --- a/SOURCES/storage-safemode-luks.diff +++ /dev/null @@ -1,602 +0,0 @@ -diff --git a/library/blivet.py b/library/blivet.py -index cb48e71..e1903f3 100644 ---- a/library/blivet.py -+++ b/library/blivet.py -@@ -167,11 +167,16 @@ class BlivetBase(object): - raise NotImplementedError() - - def _manage_one_encryption(self, device): -+ global safe_mode - ret = device - # Make sure to handle adjusting both existing stacks and future stacks. - if device == device.raw_device and self._spec_dict['encryption']: - # add luks - luks_name = "luks-%s" % device._name -+ if safe_mode and (device.original_format.type is not None or -+ device.original_format.name != get_format(None).name): -+ raise BlivetAnsibleError("cannot remove existing formatting on device '%s' in safe mode due to adding encryption" % -+ device._name) - if not device.format.exists: - fmt = device.format - else: -@@ -196,6 +201,10 @@ class BlivetBase(object): - ret = luks_device - elif device != device.raw_device and not self._spec_dict['encryption']: - # remove luks -+ if safe_mode and (device.original_format.type is not None or -+ device.original_format.name != get_format(None).name): -+ raise BlivetAnsibleError("cannot remove existing formatting on device '%s' in safe mode due to encryption removal" % -+ device._name) - if not device.format.exists: - fmt = device.format - else: -@@ -823,17 +832,21 @@ class BlivetPool(BlivetBase): - - def manage(self): - """ Schedule actions to configure this pool according to the yaml input. """ -+ global safe_mode - # look up the device - self._look_up_disks() - self._look_up_device() - - # schedule destroy if appropriate, including member type change -- if not self.ultimately_present or self._member_management_is_destructive(): -- if not self.ultimately_present: -- self._manage_volumes() -+ if not self.ultimately_present: -+ self._manage_volumes() - self._destroy() -- if not self.ultimately_present: -- return -+ return -+ elif self._member_management_is_destructive(): -+ if safe_mode: -+ raise BlivetAnsibleError("cannot remove and recreate existing pool '%s' in safe mode" % self._pool['name']) -+ else: -+ self._destroy() - - # schedule create if appropriate - self._create() -diff --git a/tests/create-test-file.yml b/tests/create-test-file.yml -new file mode 100644 -index 0000000..d1091e2 ---- /dev/null -+++ b/tests/create-test-file.yml -@@ -0,0 +1,13 @@ -+# Create a file to be checked that it still exists and no data loss has occured. -+# To use: -+# - set testfile to a path under the mountpoint being tested -+# - include this file (create-test-file.yml) before executing the -+# operation to be tested -+# - execute the operation that could potentially result in a loss of -+# data in the filesystem where testfile is located -+# - include verify-data-preservation.yml -+ -+- name: create a file -+ file: -+ path: "{{ testfile }}" -+ state: touch -diff --git a/tests/tests_luks.yml b/tests/tests_luks.yml -index f93efe5..f733714 100644 ---- a/tests/tests_luks.yml -+++ b/tests/tests_luks.yml -@@ -2,8 +2,8 @@ - - hosts: all - become: true - vars: -- storage_safe_mode: false - mount_location: '/opt/test1' -+ testfile: "{{ mount_location }}/quux" - volume_size: '5g' - - tasks: -@@ -64,10 +64,47 @@ - - - include_tasks: verify-role-results.yml - -+ - import_tasks: create-test-file.yml -+ -+ - name: Test for correct handling of safe_mode -+ block: -+ - name: Remove the encryption layer -+ include_role: -+ name: storage -+ vars: -+ storage_volumes: -+ - name: foo -+ type: disk -+ disks: "{{ unused_disks }}" -+ mount_point: "{{ mount_location }}" -+ encryption: false -+ encryption_password: 'yabbadabbadoo' -+ - name: unreachable task -+ fail: -+ msg: UNREACH -+ rescue: -+ - name: Check that we failed in the role -+ assert: -+ that: -+ - ansible_failed_result.msg != 'UNREACH' -+ msg: "Role has not failed when it should have" -+ -+ - name: Verify the output of the safe_mode test -+ assert: -+ that: "blivet_output.failed and -+ blivet_output.msg -+ |regex_search('cannot remove existing -+ formatting.*in safe mode due to encryption removal') -+ and not blivet_output.changed" -+ msg: "Unexpected behavior w/ existing filesystem in safe mode" -+ -+ - import_tasks: verify-data-preservation.yml -+ - - name: Remove the encryption layer - include_role: - name: storage - vars: -+ storage_safe_mode: false - storage_volumes: - - name: foo - type: disk -@@ -78,10 +115,47 @@ - - - include_tasks: verify-role-results.yml - -+ - import_tasks: create-test-file.yml -+ -+ - name: Test for correct handling of safe_mode -+ block: -+ - name: Add encryption to the volume -+ include_role: -+ name: storage -+ vars: -+ storage_volumes: -+ - name: foo -+ type: disk -+ disks: "{{ unused_disks }}" -+ mount_point: "{{ mount_location }}" -+ encryption: true -+ encryption_password: 'yabbadabbadoo' -+ - name: unreachable task -+ fail: -+ msg: UNREACH -+ rescue: -+ - name: Check that we failed in the role -+ assert: -+ that: -+ - ansible_failed_result.msg != 'UNREACH' -+ msg: "Role has not failed when it should have" -+ -+ - name: Verify the output of the safe_mode test -+ assert: -+ that: "blivet_output.failed and -+ blivet_output.msg -+ |regex_search('cannot remove existing -+ formatting.*in safe mode due to adding encryption') -+ and not blivet_output.changed" -+ msg: "Unexpected behavior w/ existing filesystem in safe mode" -+ -+ - import_tasks: verify-data-preservation.yml -+ - - name: Add encryption to the volume - include_role: - name: storage - vars: -+ storage_safe_mode: false - storage_volumes: - - name: foo - type: disk -@@ -102,6 +176,7 @@ - include_role: - name: storage - vars: -+ storage_safe_mode: false - storage_pools: - - name: foo - type: partition -@@ -135,6 +210,7 @@ - include_role: - name: storage - vars: -+ storage_safe_mode: false - storage_pools: - - name: foo - type: partition -@@ -149,10 +225,51 @@ - - - include_tasks: verify-role-results.yml - -+ - import_tasks: create-test-file.yml -+ -+ - name: Test for correct handling of safe_mode -+ block: -+ - name: Remove the encryption layer -+ include_role: -+ name: storage -+ vars: -+ storage_pools: -+ - name: foo -+ type: partition -+ disks: "{{ unused_disks }}" -+ volumes: -+ - name: test1 -+ type: partition -+ mount_point: "{{ mount_location }}" -+ size: 4g -+ encryption: false -+ encryption_password: 'yabbadabbadoo' -+ - name: unreachable task -+ fail: -+ msg: UNREACH -+ rescue: -+ - name: Check that we failed in the role -+ assert: -+ that: -+ - ansible_failed_result.msg != 'UNREACH' -+ msg: "Role has not failed when it should have" -+ -+ - name: Verify the output of the safe_mode test -+ assert: -+ that: "blivet_output.failed and -+ blivet_output.msg -+ |regex_search('cannot remove existing -+ formatting.*in safe mode due to encryption removal') -+ and not blivet_output.changed" -+ msg: "Unexpected behavior w/ existing filesystem in safe mode" -+ -+ - import_tasks: verify-data-preservation.yml -+ - - name: Remove the encryption layer - include_role: - name: storage - vars: -+ storage_safe_mode: false - storage_pools: - - name: foo - type: partition -@@ -167,6 +284,48 @@ - - - include_tasks: verify-role-results.yml - -+ - import_tasks: create-test-file.yml -+ -+ - name: Test for correct handling of safe_mode -+ block: -+ - name: Add encryption to the volume -+ include_role: -+ name: storage -+ vars: -+ storage_pools: -+ - name: foo -+ type: partition -+ disks: "{{ unused_disks }}" -+ volumes: -+ - name: test1 -+ type: partition -+ mount_point: "{{ mount_location }}" -+ size: 4g -+ encryption: true -+ encryption_password: 'yabbadabbadoo' -+ -+ - name: unreachable task -+ fail: -+ msg: UNREACH -+ -+ rescue: -+ - name: Check that we failed in the role -+ assert: -+ that: -+ - ansible_failed_result.msg != 'UNREACH' -+ msg: "Role has not failed when it should have" -+ -+ - name: Verify the output of the safe_mode test -+ assert: -+ that: "blivet_output.failed and -+ blivet_output.msg -+ |regex_search('cannot remove existing -+ formatting.*in safe mode due to adding encryption') -+ and not blivet_output.changed" -+ msg: "Unexpected behavior w/ existing volume in safe mode" -+ -+ - import_tasks: verify-data-preservation.yml -+ - - name: Test key file handling - block: - - name: Create a key file -@@ -186,6 +345,7 @@ - include_role: - name: storage - vars: -+ storage_safe_mode: false - storage_pools: - - name: foo - type: partition -@@ -216,6 +376,7 @@ - include_role: - name: storage - vars: -+ storage_safe_mode: false - storage_pools: - - name: foo - type: lvm -@@ -248,6 +409,7 @@ - include_role: - name: storage - vars: -+ storage_safe_mode: false - storage_pools: - - name: foo - type: lvm -@@ -264,10 +426,52 @@ - - - include_tasks: verify-role-results.yml - -+ - import_tasks: create-test-file.yml -+ -+ - name: Test for correct handling of safe_mode -+ block: -+ - name: Remove the encryption layer -+ include_role: -+ name: storage -+ vars: -+ storage_pools: -+ - name: foo -+ type: lvm -+ disks: "{{ unused_disks }}" -+ volumes: -+ - name: test1 -+ mount_point: "{{ mount_location }}" -+ size: 4g -+ encryption: false -+ encryption_password: 'yabbadabbadoo' -+ -+ - name: unreachable task -+ fail: -+ msg: UNREACH -+ -+ rescue: -+ - name: Check that we failed in the role -+ assert: -+ that: -+ - ansible_failed_result.msg != 'UNREACH' -+ msg: "Role has not failed when it should have" -+ -+ - name: Verify the output of the safe_mode test -+ assert: -+ that: "blivet_output.failed and -+ blivet_output.msg -+ |regex_search('cannot remove existing -+ formatting.*in safe mode due to encryption removal') -+ and not blivet_output.changed" -+ msg: "Unexpected behavior w/ existing volume in safe mode" -+ -+ - import_tasks: verify-data-preservation.yml -+ - - name: Remove the encryption layer - include_role: - name: storage - vars: -+ storage_safe_mode: false - storage_pools: - - name: foo - type: lvm -@@ -281,10 +485,52 @@ - - - include_tasks: verify-role-results.yml - -+ - import_tasks: create-test-file.yml -+ -+ - name: Test for correct handling of safe_mode -+ block: -+ - name: Add encryption to the volume -+ include_role: -+ name: storage -+ vars: -+ storage_pools: -+ - name: foo -+ type: lvm -+ disks: "{{ unused_disks }}" -+ volumes: -+ - name: test1 -+ mount_point: "{{ mount_location }}" -+ size: 4g -+ encryption: true -+ encryption_password: 'yabbadabbadoo' -+ -+ - name: unreachable task -+ fail: -+ msg: UNREACH -+ -+ rescue: -+ - name: Check that we failed in the role -+ assert: -+ that: -+ - ansible_failed_result.msg != 'UNREACH' -+ msg: "Role has not failed when it should have" -+ -+ - name: Verify the output of the safe_mode test -+ assert: -+ that: "blivet_output.failed and -+ blivet_output.msg -+ |regex_search('cannot remove existing -+ formatting.*in safe mode due to adding encryption') -+ and not blivet_output.changed" -+ msg: "Unexpected behavior w/ existing volume in safe mode" -+ -+ - import_tasks: verify-data-preservation.yml -+ - - name: Add encryption to the volume - include_role: - name: storage - vars: -+ storage_safe_mode: false - storage_pools: - - name: foo - type: lvm -diff --git a/tests/tests_luks_pool.yml b/tests/tests_luks_pool.yml -index b20b806..f44916f 100644 ---- a/tests/tests_luks_pool.yml -+++ b/tests/tests_luks_pool.yml -@@ -2,9 +2,10 @@ - - hosts: all - become: true - vars: -- storage_safe_mode: false - mount_location: '/opt/test1' - mount_location_2: '/opt/test2' -+ testfile: "{{ mount_location }}/quux" -+ testfile_location_2: "{{ mount_location_2 }}/quux" - volume_size: '5g' - - tasks: -@@ -92,10 +93,50 @@ - state: absent - changed_when: false - -+ - import_tasks: create-test-file.yml -+ -+ - name: Test for correct handling of safe_mode -+ block: -+ - name: Remove the encryption layer -+ include_role: -+ name: storage -+ vars: -+ storage_pools: -+ - name: foo -+ type: lvm -+ disks: "{{ unused_disks }}" -+ encryption: false -+ encryption_password: 'yabbadabbadoo' -+ volumes: -+ - name: test1 -+ mount_point: "{{ mount_location }}" -+ size: 4g -+ - name: unreachable task -+ fail: -+ msg: UNREACH -+ rescue: -+ - name: Check that we failed in the role -+ assert: -+ that: -+ - ansible_failed_result.msg != 'UNREACH' -+ msg: "Role has not failed when it should have" -+ -+ - name: Verify the output of the safe_mode test -+ assert: -+ that: "blivet_output.failed and -+ blivet_output.msg -+ |regex_search('cannot remove and recreate existing -+ pool.*in safe mode') -+ and not blivet_output.changed" -+ msg: "Unexpected behavior w/ existing pool in safe mode" -+ -+ - import_tasks: verify-data-preservation.yml -+ - - name: Remove the encryption layer - include_role: - name: storage - vars: -+ storage_safe_mode: false - storage_pools: - - name: foo - type: lvm -@@ -109,10 +150,53 @@ - - - include_tasks: verify-role-results.yml - -- - name: Add encryption to the volume -+ - import_tasks: create-test-file.yml -+ -+ - name: Test for correct handling of safe_mode -+ block: -+ - name: Add encryption to the pool -+ include_role: -+ name: storage -+ vars: -+ storage_pools: -+ - name: foo -+ type: lvm -+ disks: "{{ unused_disks }}" -+ encryption: true -+ encryption_password: 'yabbadabbadoo' -+ encryption_luks_version: luks1 -+ encryption_key_size: 512 -+ encryption_cipher: 'serpent-xts-plain64' -+ volumes: -+ - name: test1 -+ mount_point: "{{ mount_location }}" -+ size: 4g -+ - name: unreachable task -+ fail: -+ msg: UNREACH -+ rescue: -+ - name: Check that we failed in the role -+ assert: -+ that: -+ - ansible_failed_result.msg != 'UNREACH' -+ msg: "Role has not failed when it should have" -+ -+ - name: Verify the output of the safe_mode test -+ assert: -+ that: "blivet_output.failed and -+ blivet_output.msg -+ |regex_search('cannot remove and recreate existing -+ pool.*in safe mode') -+ and not blivet_output.changed" -+ msg: "Unexpected behavior w/ existing pool in safe mode" -+ -+ - import_tasks: verify-data-preservation.yml -+ -+ - name: Add encryption to the pool - include_role: - name: storage - vars: -+ storage_safe_mode: false - storage_pools: - - name: foo - type: lvm -@@ -129,6 +213,8 @@ - - - include_tasks: verify-role-results.yml - -+ - import_tasks: create-test-file.yml -+ - - name: Change the mountpoint, leaving encryption in place - include_role: - name: storage -@@ -144,6 +230,10 @@ - mount_point: "{{ mount_location_2 }}" - size: 4g - -+ - import_tasks: verify-data-preservation.yml -+ vars: -+ testfile: "{{ testfile_location_2 }}" -+ - - include_tasks: verify-role-results.yml - - - name: Clean up -diff --git a/tests/verify-data-preservation.yml b/tests/verify-data-preservation.yml -new file mode 100644 -index 0000000..eed790f ---- /dev/null -+++ b/tests/verify-data-preservation.yml -@@ -0,0 +1,19 @@ -+# Verify that a file still exists and no data loss has occured. -+# To use: -+# - set testfile to a path under the mountpoint being tested -+# - include create-test-file.yml before executing the operation to be -+# tested -+# - execute the operation that could potentially result in a loss of -+# data in the filesystem where testfile is located -+# - include this file (verify-data-preservation.yml) -+ -+- name: stat the file -+ stat: -+ path: "{{ testfile }}" -+ register: stat_r -+ -+- name: assert file presence -+ assert: -+ that: -+ stat_r.stat.isreg is defined and stat_r.stat.isreg -+ msg: "data lost!" diff --git a/SOURCES/storage-trim-volume-size.diff b/SOURCES/storage-trim-volume-size.diff new file mode 100644 index 0000000..ef947c7 --- /dev/null +++ b/SOURCES/storage-trim-volume-size.diff @@ -0,0 +1,326 @@ +diff --git a/library/blivet.py b/library/blivet.py +index e927121..f59f821 100644 +--- a/library/blivet.py ++++ b/library/blivet.py +@@ -130,6 +130,9 @@ if BLIVET_PACKAGE: + set_up_logging() + log = logging.getLogger(BLIVET_PACKAGE + ".ansible") + ++ ++MAX_TRIM_PERCENT = 2 ++ + use_partitions = None # create partitions on pool backing device disks? + disklabel_type = None # user-specified disklabel type + safe_mode = None # do not remove any existing devices or formatting +@@ -445,8 +448,16 @@ class BlivetVolume(BlivetBase): + if not self._device.resizable: + return + +- if self._device.format.resizable: +- self._device.format.update_size_info() ++ trim_percent = (1.0 - float(self._device.max_size / size))*100 ++ log.debug("resize: size=%s->%s ; trim=%s", self._device.size, size, trim_percent) ++ if size > self._device.max_size and trim_percent <= MAX_TRIM_PERCENT: ++ log.info("adjusting %s resize target from %s to %s to fit in free space", ++ self._volume['name'], ++ size, ++ self._device.max_size) ++ size = self._device.max_size ++ if size == self._device.size: ++ return + + if not self._device.min_size <= size <= self._device.max_size: + raise BlivetAnsibleError("volume '%s' cannot be resized to '%s'" % (self._volume['name'], size)) +@@ -610,10 +621,18 @@ class BlivetLVMVolume(BlivetVolume): + raise BlivetAnsibleError("invalid size '%s' specified for volume '%s'" % (self._volume['size'], self._volume['name'])) + + fmt = self._get_format() ++ trim_percent = (1.0 - float(parent.free_space / size))*100 ++ log.debug("size: %s ; %s", size, trim_percent) + if size > parent.free_space: +- raise BlivetAnsibleError("specified size for volume '%s' exceeds available space in pool '%s' (%s)" % (size, +- parent.name, +- parent.free_space)) ++ if trim_percent > MAX_TRIM_PERCENT: ++ raise BlivetAnsibleError("specified size for volume '%s' exceeds available space in pool '%s' (%s)" ++ % (size, parent.name, parent.free_space)) ++ else: ++ log.info("adjusting %s size from %s to %s to fit in %s free space", self._volume['name'], ++ size, ++ parent.free_space, ++ parent.name) ++ size = parent.free_space + + try: + device = self._blivet.new_lv(name=self._volume['name'], +diff --git a/tests/tests_create_lv_size_equal_to_vg.yml b/tests/tests_create_lv_size_equal_to_vg.yml +new file mode 100644 +index 0000000..21a5788 +--- /dev/null ++++ b/tests/tests_create_lv_size_equal_to_vg.yml +@@ -0,0 +1,48 @@ ++--- ++- hosts: all ++ become: true ++ vars: ++ storage_safe_mode: false ++ mount_location: '/opt/test1' ++ volume_group_size: '10g' ++ lv_size: '10g' ++ unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}' ++ disk_size: '{{ unused_disk_subfact.sectors|int * ++ unused_disk_subfact.sectorsize|int }}' ++ ++ tasks: ++ - include_role: ++ name: linux-system-roles.storage ++ ++ - include_tasks: get_unused_disk.yml ++ vars: ++ min_size: "{{ volume_group_size }}" ++ max_return: 1 ++ ++ - name: Create one lv which size is equal to vg size ++ include_role: ++ name: linux-system-roles.storage ++ vars: ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ lv_size }}" ++ mount_point: "{{ mount_location }}" ++ ++ - include_tasks: verify-role-results.yml ++ ++ - name: Clean up ++ include_role: ++ name: linux-system-roles.storage ++ vars: ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ state: "absent" ++ volumes: ++ - name: test1 ++ mount_point: "{{ mount_location }}" ++ ++ - include_tasks: verify-role-results.yml +diff --git a/tests/tests_lvm_auto_size_cap.yml b/tests/tests_lvm_auto_size_cap.yml +new file mode 100644 +index 0000000..fb17c23 +--- /dev/null ++++ b/tests/tests_lvm_auto_size_cap.yml +@@ -0,0 +1,89 @@ ++--- ++- hosts: all ++ become: true ++ ++ tasks: ++ - include_role: ++ name: linux-system-roles.storage ++ ++ - include_tasks: get_unused_disk.yml ++ vars: ++ min_size: 10g ++ max_return: 1 ++ ++ - command: lsblk -b -l --noheadings -o NAME,SIZE ++ register: storage_test_lsblk ++ ++ - set_fact: ++ test_disk_size: "{{ storage_test_lsblk.stdout_lines|map('regex_search', '^' + unused_disks[0] + '\\s+\\d+$')|select('string')|first|regex_replace('^\\w+\\s+', '') }}" ++ ++ - package: ++ name: bc ++ state: installed ++ ++ - command: ++ cmd: bc ++ stdin: "{{ test_disk_size }} *2" ++ register: doubled_size ++ ++ - name: Test handling of too-large LVM volume size ++ block: ++ - name: Try to create a pool containing one volume twice the size of the backing disk ++ include_role: ++ name: linux-system-roles.storage ++ vars: ++ storage_pools: ++ - name: foo ++ type: lvm ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ doubled_size.stdout|trim }}" ++ - name: unreachable task ++ fail: ++ msg: UNREACH ++ rescue: ++ - name: Check that we failed in the role ++ assert: ++ that: ++ - ansible_failed_result.msg != 'UNREACH' ++ - blivet_output.failed and ++ blivet_output.msg|regex_search('specified size for volume.+exceeds available') ++ msg: "Role has not failed when it should have" ++ ++ - name: Create a pool containing one volume the same size as the backing disk ++ include_role: ++ name: linux-system-roles.storage ++ vars: ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ test_disk_size }}" ++ ++ - include_tasks: verify-role-results.yml ++ ++ - name: Repeat the previous invocation to verify idempotence ++ include_role: ++ name: linux-system-roles.storage ++ vars: ++ storage_pools: ++ - name: foo ++ type: lvm ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: test1 ++ size: "{{ test_disk_size }}" ++ ++ - include_tasks: verify-role-results.yml ++ ++ - name: Clean up ++ include_role: ++ name: linux-system-roles.storage ++ vars: ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ state: absent ++ volumes: [] +diff --git a/tests/tests_lvm_errors.yml b/tests/tests_lvm_errors.yml +index 37d41dc..e8dc4f4 100644 +--- a/tests/tests_lvm_errors.yml ++++ b/tests/tests_lvm_errors.yml +@@ -11,8 +11,6 @@ + - '/non/existent/disk' + invalid_size: 'xyz GiB' + unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}' +- too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) * +- unused_disk_subfact.sectorsize|int }}' + + tasks: + - include_role: +@@ -86,39 +84,6 @@ + - ansible_failed_result.msg != 'UNREACH' + msg: "Role has not failed when it should have" + +- # the following does not work properly +- # - name: Verify the output +- # assert: +- # that: "{{ blivet_output.failed and +- # blivet_output.msg|regex_search('invalid size.+for volume') and +- # not blivet_output.changed }}" +- # msg: "Unexpected behavior w/ invalid volume size" +- +- - name: Test for correct handling of too-large volume size. +- block: +- - name: Try to create LVM with a too-large volume size. +- include_role: +- name: linux-system-roles.storage +- vars: +- storage_pools: +- - name: foo +- disks: "{{ unused_disks }}" +- volumes: +- - name: test1 +- size: "{{ too_large_size }}" +- mount_point: "{{ mount_location1 }}" +- +- - name: unreachable task +- fail: +- msg: UNREACH +- +- rescue: +- - name: Check that we failed in the role +- assert: +- that: +- - ansible_failed_result.msg != 'UNREACH' +- msg: "Role has not failed when it should have" +- + # the following does not work properly + # - name: Verify the output + # assert: +@@ -138,7 +103,7 @@ + disks: "{{ unused_disks[0] }}" + volumes: + - name: test1 +- size: "{{ too_large_size }}" ++ size: "{{ volume_size }}" + mount_point: "{{ mount_location1 }}" + + - name: unreachable task +@@ -171,7 +136,7 @@ + disks: [] + volumes: + - name: test1 +- size: "{{ too_large_size }}" ++ size: "{{ volume1_size }}" + mount_point: "{{ mount_location1 }}" + + - name: unreachable task +diff --git a/tests/tests_misc.yml b/tests/tests_misc.yml +index a69ee98..3139bc7 100644 +--- a/tests/tests_misc.yml ++++ b/tests/tests_misc.yml +@@ -7,7 +7,7 @@ + volume_group_size: '5g' + volume1_size: '4g' + unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}' +- too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) * ++ too_large_size: '{{ (unused_disk_subfact.sectors|int * 1.2) * + unused_disk_subfact.sectorsize|int }}' + + tasks: +diff --git a/tests/tests_resize.yml b/tests/tests_resize.yml +index 9eeb2b9..209d129 100644 +--- a/tests/tests_resize.yml ++++ b/tests/tests_resize.yml +@@ -9,7 +9,7 @@ + invalid_size1: 'xyz GiB' + invalid_size2: 'none' + unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}' +- too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) * ++ too_large_size: '{{ unused_disk_subfact.sectors|int * 1.2 * + unused_disk_subfact.sectorsize|int }}' + disk_size: '{{ unused_disk_subfact.sectors|int * + unused_disk_subfact.sectorsize|int }}' +@@ -122,23 +122,7 @@ + size: "{{ disk_size }}" + mount_point: "{{ mount_location }}" + +- - name: Unreachable task +- fail: +- msg: UNREACH +- +- rescue: +- - name: Check that we failed in the role +- assert: +- that: +- - ansible_failed_result.msg != 'UNREACH' +- msg: "Role has not failed when it should have" +- +- - name: Verify the output +- assert: +- that: "blivet_output.failed and +- blivet_output.msg|regex_search('volume.+cannot be resized to.+') and +- not blivet_output.changed" +- msg: "Unexpected behavior w/ invalid volume size" ++ - include_tasks: verify-role-results.yml + + - name: Test for correct handling of invalid size specification + block: diff --git a/SOURCES/timesync-ansible-test-issues.diff b/SOURCES/timesync-ansible-test-issues.diff new file mode 100644 index 0000000..2ec733a --- /dev/null +++ b/SOURCES/timesync-ansible-test-issues.diff @@ -0,0 +1,22 @@ +From b55af45842482768f29704d90a1e019ffe0f7770 Mon Sep 17 00:00:00 2001 +From: Noriko Hosoi +Date: Tue, 2 Mar 2021 13:39:19 -0800 +Subject: [PATCH] Patch32: timesync-ansible-test-issues.diff + +RHELPLAN-68118 - Collections - Timesync - fixing ansible-test errors +RHELPLAN-68789 - Collections - ignore file for each role +--- + .sanity-ansible-ignore-2.9.txt | 1 + + 1 file changed, 1 insertion(+) + create mode 100644 .sanity-ansible-ignore-2.9.txt + +diff --git a/.sanity-ansible-ignore-2.9.txt b/.sanity-ansible-ignore-2.9.txt +new file mode 100644 +index 0000000..e6d5e4d +--- /dev/null ++++ b/.sanity-ansible-ignore-2.9.txt +@@ -0,0 +1 @@ ++plugins/modules/timesync_provider.sh shebang +-- +2.26.2 + diff --git a/SPECS/rhel-system-roles.spec b/SPECS/rhel-system-roles.spec index 8eca7b3..27257f3 100644 --- a/SPECS/rhel-system-roles.spec +++ b/SPECS/rhel-system-roles.spec @@ -1,144 +1,247 @@ +%if 0%{?rhel} && ! 0%{?epel} +%bcond_with ansible +%else +%bcond_without ansible +%endif + %if 0%{?rhel} Name: rhel-system-roles %else Name: linux-system-roles %endif +Url: https://github.com/linux-system-roles/ Summary: Set of interfaces for unified system management -Version: 1.0 -Release: 21%{?dist} +Version: 1.0.1 +Release: 1%{?dist} #Group: Development/Libraries License: GPLv3+ and MIT and BSD -%if 0%{?rhel} +%global installbase %{_datadir}/linux-system-roles +%global _pkglicensedir %{_licensedir}/%{name} %global rolealtprefix linux-system-roles. -%endif %global roleprefix %{name}. +%global roleinstprefix %{nil} +%global rolealtrelpath ../../linux-system-roles/ +%if 0%{?rhel} +%global roleinstprefix %{roleprefix} +%global installbase %{_datadir}/ansible/roles +%global rolealtrelpath %{nil} +%endif + +%if 0%{?rhel} +%global collection_namespace redhat +%global collection_name rhel_system_roles +%else +%global collection_namespace fedora +%global collection_name linux_system_roles +%endif +%global subrole_prefix "private_${role}_subrole_" + +%global collection_version %{version} + +# Helper macros originally from macros.ansible by Igor Raits +# Not available on RHEL, so we must define those macros locally here without using ansible-galaxy + +# Not used (yet). Could be made to point to AH in RHEL - but what about CentOS Stream? +#%%{!?ansible_collection_url:%%define ansible_collection_url() https://galaxy.ansible.com/%%{collection_namespace}/%%{collection_name}} + +%{!?ansible_collection_files:%define ansible_collection_files %{_datadir}/ansible/collections/ansible_collections/%{collection_namespace}/} + +%if %{with ansible} +BuildRequires: ansible >= 2.9.10 +%endif + +%if %{undefined ansible_collection_build} +%if %{without ansible} +# Empty command. We don't have ansible-galaxy. +%define ansible_collection_build() : +%else +%define ansible_collection_build() ansible-galaxy collection build +%endif +%endif + +%if %{undefined ansible_collection_install} +%if %{without ansible} +# Simply copy everything instead of galaxy-installing the built artifact. +%define ansible_collection_install() mkdir -p %{buildroot}%{ansible_collection_files}; cp -a . %{buildroot}%{ansible_collection_files}/%{collection_name}/ +%else +%define ansible_collection_install() ansible-galaxy collection install -n -p %{buildroot}%{_datadir}/ansible/collections %{collection_namespace}-%{collection_name}-%{version}.tar.gz +%endif +%endif # For each role, call either defcommit() or deftag(). The other macros # (%%id and %%shortid) can be then used in the same way in both cases. # This way the rest of the spec file des not need to know whether we are # dealing with a tag or a commit. -%define defcommit() %{expand:%%global id%{1} %{2} -%%global shortid%{1} %%(c=%%{id%{1}}; echo ${c:0:7}) +%global archiveext tar.gz +# list of role names +%global rolenames %nil +# list of assignments that can be used to populate a bash associative array variable +%global rolestodir %nil +%define getarchivedir() %(p=%{basename:%{S:%{1}}}; echo ${p%%.%{archiveext}}) + +%define defcommit() %{expand:%%global ref%{1} %{2} +%%global shortcommit%{1} %%(c=%%{ref%{1}}; echo ${c:0:7}) +%%global extractdir%{1} %%{expand:%%getarchivedir %{1}} +%%{!?repo%{1}:%%global repo%{1} %%{rolename%{1}}} +%%global archiveurl%{1} %%{?forgeorg%{1}}%%{!?forgeorg%{1}:%%{url}}%%{repo%{1}}/archive/%%{ref%{1}}/%%{repo%{1}}-%%{ref%{1}}.tar.gz +%%global rolenames %%{?rolenames} %%{rolename%{1}} +%%global roletodir%{1} [%{rolename%{1}}]="%{extractdir%{1}}" +%%global rolestodir %%{?rolestodir} %{roletodir%{1}} } -%define deftag() %{expand:%%global id%{1} %{2} -%%global shortid%{1} %{2} +%define deftag() %{expand:%%global ref%{1} %{2} +%%global extractdir%{1} %%{expand:%%getarchivedir %{1}} +%%{!?repo%{1}:%%global repo%{1} %%{rolename%{1}}} +%%global archiveurl%{1} %%{?forgeorg%{1}}%%{!?forgeorg%{1}:%%{url}}%%{repo%{1}}/archive/%%{ref%{1}}/%%{repo%{1}}-%%{ref%{1}}.tar.gz +%%global rolenames %%{?rolenames} %%{rolename%{1}} +%%global roletodir%{1} [%{rolename%{1}}]="%{extractdir%{1}}" +%%global rolestodir %%{?rolestodir} %%{roletodir%{1}} } -%defcommit 0 0c2bb286bbc1b73d728226924e0010c0fa1ce30a -%global rolename0 kdump -#%%deftag 0 1.0.0 - #%%defcommit 1 43eec5668425d295dce3801216c19b1916df1f9b %global rolename1 postfix %deftag 1 0.1 -%defcommit 2 6cd1ec8fdebdb92a789b14e5a44fe77f0a3d8ecd +#%%defcommit 2 6cd1ec8fdebdb92a789b14e5a44fe77f0a3d8ecd %global rolename2 selinux -#%%deftag 2 1.0.0 +%deftag 2 1.1.1 %defcommit 3 924650d0cd4117f73a7f0413ab745a8632bc5cec %global rolename3 timesync #%%deftag 3 1.0.0 -%defcommit 5 bf4501bb8770d3ef761e1684011c905f99a9752f +%defcommit 4 77596fdd976c6160d6152c200a5432c609725a14 +%global rolename4 kdump +#%%deftag 4 1.0.0 + +%defcommit 5 bda206d45c87ee8c1a5284de84f5acf5e629de97 %global rolename5 network #%%deftag 5 1.0.0 -%defcommit 6 81f30ab336f4ecc61b4a30ffcb080e17fd35de2e +%defcommit 6 485de47b0dc0787aea077ba448ecb954f53e40c4 %global rolename6 storage -#%%deftag 6 1.0.2 +#%%deftag 6 1.2.2 -%defcommit 7 7f94b49688902eb507e0ebeda1fbf08621bc3c6b +%defcommit 7 e81b2650108727f38b1c856699aad26af0f44a46 %global rolename7 metrics #%%deftag 7 0.1.0 -%defcommit 8 cfa70b6b5910b3198aba2679f8fc36aad45ca45a +#%%defcommit 8 cfa70b6b5910b3198aba2679f8fc36aad45ca45a %global rolename8 tlog -#%%deftag 8 0.2.0 +%deftag 8 1.1.0 -%defcommit 9 901a73a4285469ef50a6cc37135ae55ce9d2e41b +%defcommit 9 4c81fd1380712ab0641b6837f092dd9caeeae0a6 %global rolename9 kernel_settings -#%%deftag 9 0.2.0 +#%%deftag 9 1.0.1 -%defcommit 10 fe3f658e72b2883d2a1460d453105c7a53dd70e8 +%defcommit 10 07e08107e7ccba5822f8a7aaec1a2ff0a221bede %global rolename10 logging #%%deftag 10 0.2.0 -%defcommit 11 4b6cfca4dd24e53a4bc4e07635601d7c104346c1 +%defcommit 11 4dfc5e2aca74cb82f2a50eec7e975a2b78ad9678 %global rolename11 nbde_server -#%%deftag 11 0.1.0 +#%%deftag 11 1.0.1 -%defcommit 12 6306defad146d8274b04f438a04e17e44672f1a6 +%defcommit 12 19f06159582550c8463f7d8492669e26fbdf760b %global rolename12 nbde_client -#%%deftag 12 0.1.0 +#%%deftag 12 1.0.1 -%defcommit 13 fedef6e7844bb623bb54695a602137e332f5509f +%defcommit 13 0376ceece57882ade8ffaf431b7866aae3e7fed1 %global rolename13 certificate -#%%deftag 13 0.1.0 - -Source: https://github.com/linux-system-roles/%{rolename0}/archive/%{id0}.tar.gz#/%{rolename0}-%{shortid0}.tar.gz -Source1: https://github.com/linux-system-roles/%{rolename1}/archive/%{id1}.tar.gz#/%{rolename1}-%{shortid1}.tar.gz -Source2: https://github.com/linux-system-roles/%{rolename2}/archive/%{id2}.tar.gz#/%{rolename2}-%{shortid2}.tar.gz -Source3: https://github.com/linux-system-roles/%{rolename3}/archive/%{id3}.tar.gz#/%{rolename3}-%{shortid3}.tar.gz -Source5: https://github.com/linux-system-roles/%{rolename5}/archive/%{id5}.tar.gz#/%{rolename5}-%{shortid5}.tar.gz -Source6: https://github.com/linux-system-roles/%{rolename6}/archive/%{id6}.tar.gz#/%{rolename6}-%{shortid6}.tar.gz -Source7: https://github.com/linux-system-roles/%{rolename7}/archive/%{id7}.tar.gz#/%{rolename7}-%{shortid7}.tar.gz -Source8: https://github.com/linux-system-roles/%{rolename8}/archive/%{id8}.tar.gz#/%{rolename8}-%{shortid8}.tar.gz -Source9: https://github.com/linux-system-roles/%{rolename9}/archive/%{id9}.tar.gz#/%{rolename9}-%{shortid9}.tar.gz -Source10: https://github.com/linux-system-roles/%{rolename10}/archive/%{id10}.tar.gz#/%{rolename10}-%{shortid10}.tar.gz -Source11: https://github.com/linux-system-roles/%{rolename11}/archive/%{id11}.tar.gz#/%{rolename11}-%{shortid11}.tar.gz -Source12: https://github.com/linux-system-roles/%{rolename12}/archive/%{id12}.tar.gz#/%{rolename12}-%{shortid12}.tar.gz -Source13: https://github.com/linux-system-roles/%{rolename13}/archive/%{id13}.tar.gz#/%{rolename13}-%{shortid13}.tar.gz - -Source999: md2html.sh - -%if "%{roleprefix}" != "linux-system-roles." -Patch1: rhel-system-roles-%{rolename1}-prefix.diff -Patch2: rhel-system-roles-%{rolename2}-prefix.diff -Patch3: rhel-system-roles-%{rolename3}-prefix.diff -Patch5: rhel-system-roles-%{rolename5}-prefix.diff -Patch6: rhel-system-roles-%{rolename6}-prefix.diff -# for some roles, the prefix change can be scripted - see below -%endif +#%%deftag 13 1.0.1 + +%defcommit 14 2e2941c5545571fc8bc494099bdf970f498b9d38 +%global rolename14 crypto_policies + +%global forgeorg15 https://github.com/willshersystems/ +%global repo15 ansible-sshd +%global rolename15 sshd +%defcommit 15 e1de59b3c54e9d48a010eeca73755df339c7e628 + +%defcommit 16 21adc637511db86b5ba279a70a7301ef3a170669 +%global rolename16 ssh + +%defcommit 17 779bb78559de58bb5a1f25a4b92039c373ef59a4 +%global rolename17 ha_cluster + +%global mainid 8f069305caa0a142c2c6ac14bd4d331282a1c079 +Source: %{url}auto-maintenance/archive/%{mainid}/auto-maintenance-%{mainid}.tar.gz +Source1: %{archiveurl1} +Source2: %{archiveurl2} +Source3: %{archiveurl3} +Source4: %{archiveurl4} +Source5: %{archiveurl5} +Source6: %{archiveurl6} +Source7: %{archiveurl7} +Source8: %{archiveurl8} +Source9: %{archiveurl9} +Source10: %{archiveurl10} +Source11: %{archiveurl11} +Source12: %{archiveurl12} +Source13: %{archiveurl13} +Source14: %{archiveurl14} +Source15: %{archiveurl15} +Source16: %{archiveurl16} +Source17: %{archiveurl17} + +# Script to convert the collection README to Automation Hub. +# Not used on Fedora. +Source998: collection_readme.sh Patch11: rhel-system-roles-postfix-pr5.diff Patch12: postfix-meta-el8.diff -Patch101: rhel-system-roles-kdump-pr22.diff - -Patch102: kdump-tier1-tags.diff -Patch103: kdump-meta-el8.diff Patch21: selinux-tier1-tags.diff +Patch22: selinux-bz-1926947-no-variable-named-present.diff +Patch23: selinux-ansible-test-issues.diff Patch31: timesync-tier1-tags.diff +Patch32: timesync-ansible-test-issues.diff +Patch41: rhel-system-roles-kdump-pr22.diff +Patch42: kdump-tier1-tags.diff +Patch43: kdump-meta-el8.diff +Patch44: kdump-fix-newline.diff + +Patch51: network-epel-minimal.diff +# Not suitable for upstream, since the files need to be executable there Patch52: network-permissions.diff Patch53: network-tier1-tags.diff +Patch55: network-disable-bondtests.diff +Patch56: network-pr353.diff +Patch57: network-ansible-test.diff -Patch61: storage-safemode-luks.diff +Patch62: storage-partition-name.diff +Patch63: storage-no-disks-existing.diff +Patch64: storage-trim-volume-size.diff +Patch65: storage-ansible-test.diff -Patch1001: logging-0001-test-playbooks-enhancement.diff -Patch1002: logging-0002-elasticsearch-output-template.diff -Patch1003: logging-0003-README.diff -Patch1004: logging-0004-yamllint-errors.diff -Patch1005: logging-0005-property-based-filters.diff -Patch1006: logging-0006-property_op.diff -Patch1007: logging-0007-RHELPLAN-56807.diff +Patch71: metrics-mssql-x86.diff +Patch151: sshd-example.diff +Patch152: sshd-work-on-ansible28-jinja27.diff -Url: https://github.com/linux-system-roles/ BuildArch: noarch +# These are needed for md2html.sh to build the documentation BuildRequires: asciidoc BuildRequires: pandoc BuildRequires: highlight +BuildRequires: python3 +BuildRequires: python3-six +BuildRequires: python3dist(ruamel.yaml) Requires: python3-jmespath Obsoletes: rhel-system-roles-techpreview < 1.0-3 +%if %{undefined __ansible_provides} +Provides: ansible-collection(%{collection_namespace}.%{collection_name}) = %{collection_version} +%endif +# be compatible with the usual Fedora Provides: +Provides: ansible-collection-%{collection_namespace}-%{collection_name} = %{version}-%{release} + # We need to put %%description within the if block to avoid empty # lines showing up. %if 0%{?rhel} @@ -154,115 +257,197 @@ of Fedora, Red Hat Enterprise Linux & CentOS. %endif %prep -%setup -qc -a1 -a2 -a3 -a5 -a6 -a7 -a8 -a9 -a10 -a11 -a12 -a13 -cd %{rolename0}-%{id0} -%patch101 -p1 -%patch102 -p1 -%patch103 -p1 -cd .. -cd %{rolename1}-%{id1} -%if "%{roleprefix}" != "linux-system-roles." -%patch1 -p1 -%endif +%setup -q -a1 -a2 -a3 -a4 -a5 -a6 -a7 -a8 -a9 -a10 -a11 -a12 -a13 -a14 -a15 -a16 -a17 -n %{getarchivedir 0} + +declare -A ROLESTODIR=(%{rolestodir}) +for rolename in %{rolenames}; do + mv "${ROLESTODIR[${rolename}]}" ${rolename} +done + +cd %{rolename1} %patch11 -p1 %patch12 -p1 cd .. -cd %{rolename2}-%{id2} -%if "%{roleprefix}" != "linux-system-roles." -%patch2 -p1 -%endif +cd %{rolename2} %patch21 -p1 +%patch22 -p1 +%patch23 -p1 cd .. -cd %{rolename3}-%{id3} -%if "%{roleprefix}" != "linux-system-roles." -%patch3 -p1 -%endif +cd %{rolename3} %patch31 -p1 +%patch32 -p1 cd .. -cd %{rolename5}-%{id5} -%if "%{roleprefix}" != "linux-system-roles." -%patch5 -p1 -%endif +cd %{rolename4} +%patch41 -p1 +%patch42 -p1 +%patch43 -p1 +%patch44 -p1 +cd .. +cd %{rolename5} +%patch51 -p1 %patch52 -p1 %patch53 -p1 +%patch55 -p1 +%patch56 -p1 +%patch57 -p1 cd .. -cd %{rolename6}-%{id6} -%if "%{roleprefix}" != "linux-system-roles." -%patch6 -p1 -%endif -%patch61 -p1 +cd %{rolename6} +%patch62 -p1 +%patch63 -p1 +%patch64 -p1 +%patch65 -p1 cd .. -cd %{rolename10}-%{id10} -%patch1001 -p1 -%patch1002 -p1 -%patch1003 -p1 -%patch1004 -p1 -%patch1005 -p1 -%patch1006 -p1 -%patch1007 -p1 +cd %{rolename7} +%patch71 -p1 +cd .. +cd %{rolename15} +%patch151 -p1 +%patch152 -p1 +sed -r -i -e "s/ansible-sshd/linux-system-roles.sshd/" tests/*.yml examples/*.yml README.md cd .. -# for some roles, the prefix change can be scripted - see below +# Replacing "linux-system-roles.rolename" with "rhel-system-roles.rolename" in each role %if "%{roleprefix}" != "linux-system-roles." -for rolename_id in %{rolename7}-%{id7} %{rolename8}-%{id8} %{rolename9}-%{id9} \ - %{rolename10}-%{id10} %{rolename11}-%{id11} %{rolename12}-%{id12} \ - %{rolename13}-%{id13}; do - # assumes rolename has no dash in it - # note that we have to use double %% - # in order for a single % to be passed to bash - rolename=${rolename_id%%-*} - find $rolename_id -type f -exec \ +for rolename in %{rolenames}; do + find $rolename -type f -exec \ sed "s/linux-system-roles[.]${rolename}\\>/%{roleprefix}${rolename}/g" -i {} \; done %endif +# Removing symlinks in tests/roles +for rolename in %{rolenames}; do + if [ -d ${rolename}/tests/roles ]; then + find ${rolename}/tests/roles -type l -exec rm {} \; + if [ -d ${rolename}/tests/roles/linux-system-roles.${rolename} ]; then + rm -r ${rolename}/tests/roles/linux-system-roles.${rolename} + fi + fi +done +rm %{rolename5}/tests/modules +rm %{rolename5}/tests/module_utils +rm %{rolename5}/tests/playbooks/roles + +# transform ambiguous #!/usr/bin/env python shebangs to python3 to stop brp-mangle-shebangs complaining +find -type f -executable -name '*.py' -exec \ + sed -i -r -e '1s@^(#! */usr/bin/env python)(\s|$)@#\13\2@' '{}' + + %build -sh %{SOURCE999} \ -%{rolename0}-%{id0}/README.md \ -%{rolename1}-%{id1}/README.md \ -%{rolename2}-%{id2}/README.md \ -%{rolename3}-%{id3}/README.md \ -%{rolename5}-%{id5}/README.md \ -%{rolename6}-%{id6}/README.md \ -%{rolename7}-%{id7}/README.md \ -%{rolename8}-%{id8}/README.md \ -%{rolename9}-%{id9}/README.md \ -%{rolename10}-%{id10}/README.md \ -%{rolename11}-%{id11}/README.md \ -%{rolename12}-%{id12}/README.md \ -%{rolename13}-%{id13}/README.md +sh md2html.sh \ +%{rolename1}/README.md \ +%{rolename2}/README.md \ +%{rolename3}/README.md \ +%{rolename4}/README.md \ +%{rolename5}/README.md \ +%{rolename6}/README.md \ +%{rolename7}/README.md \ +%{rolename8}/README.md \ +%{rolename9}/README.md \ +%{rolename10}/README.md \ +%{rolename11}/README.md \ +%{rolename12}/README.md \ +%{rolename13}/README.md \ +%{rolename14}/README.md \ +%{rolename15}/README.md \ +%{rolename16}/README.md \ +%{rolename17}/README.md + +mkdir .collections +%if 0%{?rhel} +# Convert the upstream collection readme to the downstream one +%{SOURCE998} lsr_role2collection/collection_readme.md +%endif +./galaxy_transform.py "%{collection_namespace}" "%{collection_name}" "%{collection_version}" "Red Hat Enterprise Linux System Roles Ansible Collection" > galaxy.yml.tmp +mv galaxy.yml.tmp galaxy.yml + +for role in %{rolenames}; do + python3 lsr_role2collection.py --role "$role" --src-path "$role" \ + --src-owner %{name} --subrole-prefix %{subrole_prefix} --dest-path .collections \ + --readme lsr_role2collection/collection_readme.md \ + --namespace %{collection_namespace} --collection %{collection_name} +done + +rm -f .collections/ansible_collections/%{collection_namespace}/%{collection_name}/tests/sanity/ignore-2.9.txt +# Merge .sanity-ansible-ignore-2.9-ROLENAME.txt into tests/sanity/ignore-2.9.txt +mkdir -p .collections/ansible_collections/%{collection_namespace}/%{collection_name}/tests/sanity +for role in %{rolenames}; do + if [ -f .collections/ansible_collections/%{collection_namespace}/%{collection_name}/.sanity-ansible-ignore-2.9-"$role".txt ]; + then + cat .collections/ansible_collections/%{collection_namespace}/%{collection_name}/.sanity-ansible-ignore-2.9-"$role".txt \ + >> .collections/ansible_collections/%{collection_namespace}/%{collection_name}/tests/sanity/ignore-2.9.txt + rm -f .collections/ansible_collections/%{collection_namespace}/%{collection_name}/.sanity-ansible-ignore-*-"$role".txt + fi +done + +# removing dot files/dirs +rm -r .collections/ansible_collections/%{collection_namespace}/%{collection_name}/.[A-Za-z]* + +cp -p galaxy.yml lsr_role2collection/.ansible-lint \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name} + +# converting README.md to README.html +sh md2html.sh -l \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/README.md \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename1}/README.md \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename2}/README.md \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename3}/README.md \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename4}/README.md \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename5}/README.md \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename6}/README.md \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename7}/README.md \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename8}/README.md \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename9}/README.md \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename10}/README.md \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename11}/README.md \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename12}/README.md \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename13}/README.md \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename14}/README.md \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename15}/README.md \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename16}/README.md \ + .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename17}/README.md + +cd .collections/ansible_collections/%{collection_namespace}/%{collection_name}/ +%ansible_collection_build %install +mkdir -p $RPM_BUILD_ROOT%{installbase} mkdir -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles -cp -pR %{rolename0}-%{id0} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename0} -cp -pR %{rolename1}-%{id1} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename1} -cp -pR %{rolename2}-%{id2} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename2} -cp -pR %{rolename3}-%{id3} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename3} -cp -pR %{rolename5}-%{id5} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename5} -cp -pR %{rolename6}-%{id6} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename6} -cp -pR %{rolename7}-%{id7} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename7} -cp -pR %{rolename8}-%{id8} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename8} -cp -pR %{rolename9}-%{id9} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename9} -cp -pR %{rolename10}-%{id10} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename10} -cp -pR %{rolename11}-%{id11} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename11} -cp -pR %{rolename12}-%{id12} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename12} -cp -pR %{rolename13}-%{id13} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename13} +cp -pR %{rolename1} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename1} +cp -pR %{rolename2} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename2} +cp -pR %{rolename3} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename3} +cp -pR %{rolename4} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename4} +cp -pR %{rolename5} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename5} +cp -pR %{rolename6} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename6} +cp -pR %{rolename7} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename7} +cp -pR %{rolename8} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename8} +cp -pR %{rolename9} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename9} +cp -pR %{rolename10} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename10} +cp -pR %{rolename11} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename11} +cp -pR %{rolename12} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename12} +cp -pR %{rolename13} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename13} +cp -pR %{rolename14} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename14} +cp -pR %{rolename15} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename15} +cp -pR %{rolename16} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename16} +cp -pR %{rolename17} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename17} %if 0%{?rolealtprefix:1} -ln -s %{roleprefix}%{rolename0} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename0} -ln -s %{roleprefix}%{rolename1} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename1} -ln -s %{roleprefix}%{rolename2} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename2} -ln -s %{roleprefix}%{rolename3} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename3} -ln -s %{roleprefix}%{rolename5} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename5} -ln -s %{roleprefix}%{rolename6} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename6} -ln -s %{roleprefix}%{rolename7} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename7} -ln -s %{roleprefix}%{rolename8} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename8} -ln -s %{roleprefix}%{rolename9} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename9} -ln -s %{roleprefix}%{rolename10} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename10} -ln -s %{roleprefix}%{rolename11} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename11} -ln -s %{roleprefix}%{rolename12} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename12} -ln -s %{roleprefix}%{rolename13} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename13} +ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename1} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename1} +ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename2} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename2} +ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename3} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename3} +ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename4} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename4} +ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename5} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename5} +ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename6} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename6} +ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename7} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename7} +ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename8} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename8} +ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename9} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename9} +ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename10} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename10} +ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename11} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename11} +ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename12} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename12} +ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename13} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename13} +ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename14} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename14} +ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename15} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename15} +ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename16} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename16} +ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename17} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename17} %endif mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/kdump @@ -278,125 +463,209 @@ mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/logging mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/nbde_server mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/nbde_client mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/certificate - -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}kdump/README.md \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}kdump/README.html \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}kdump/COPYING \ +mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/crypto_policies +mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/sshd +mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/ssh +mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/ha_cluster +mkdir -p $RPM_BUILD_ROOT%{_pkglicensedir} + +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}kdump/README.md \ + $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}kdump/README.html \ $RPM_BUILD_ROOT%{_pkgdocdir}/kdump +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}kdump/COPYING \ + $RPM_BUILD_ROOT%{_pkglicensedir}/kdump.COPYING -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}postfix/README.md \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}postfix/README.html \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}postfix/COPYING \ +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}postfix/README.md \ + $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}postfix/README.html \ $RPM_BUILD_ROOT%{_pkgdocdir}/postfix +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}postfix/COPYING \ + $RPM_BUILD_ROOT%{_pkglicensedir}/postfix.COPYING -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}selinux/README.md \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}selinux/README.html \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}selinux/COPYING \ +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}selinux/README.md \ + $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}selinux/README.html \ $RPM_BUILD_ROOT%{_pkgdocdir}/selinux -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}selinux/selinux-playbook.yml \ +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}selinux/COPYING \ + $RPM_BUILD_ROOT%{_pkglicensedir}/selinux.COPYING +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}selinux/selinux-playbook.yml \ $RPM_BUILD_ROOT%{_pkgdocdir}/selinux/example-selinux-playbook.yml -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}timesync/README.md \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}timesync/README.html \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}timesync/COPYING \ +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}timesync/README.md \ + $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}timesync/README.html \ $RPM_BUILD_ROOT%{_pkgdocdir}/timesync -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}timesync/examples/multiple-ntp-servers.yml \ +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}timesync/COPYING \ + $RPM_BUILD_ROOT%{_pkglicensedir}/timesync.COPYING +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}timesync/examples/multiple-ntp-servers.yml \ $RPM_BUILD_ROOT%{_pkgdocdir}/timesync/example-timesync-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}timesync/examples/single-pool.yml \ +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}timesync/examples/single-pool.yml \ $RPM_BUILD_ROOT%{_pkgdocdir}/timesync/example-timesync-pool-playbook.yml -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/README.md \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/README.html \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/LICENSE \ +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/README.md \ + $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/README.html \ $RPM_BUILD_ROOT%{_pkgdocdir}/network -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/bond_with_vlan.yml \ +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/LICENSE \ + $RPM_BUILD_ROOT%{_pkglicensedir}/network.LICENSE +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/bond_with_vlan.yml \ $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-bond_with_vlan-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/bridge_with_vlan.yml \ +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/bridge_with_vlan.yml \ $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-bridge_with_vlan-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/eth_simple_auto.yml \ +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/eth_simple_auto.yml \ $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-eth_simple_auto-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/eth_with_vlan.yml \ +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/eth_with_vlan.yml \ $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-eth_with_vlan-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/infiniband.yml \ +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/infiniband.yml \ $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-infiniband-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/macvlan.yml \ +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/macvlan.yml \ $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-macvlan-playbook.yml -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/remove_profile.yml \ +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/remove_profile.yml \ $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-remove_profile-playbook.yml -rm $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/remove_profile.yml -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/down_profile.yml \ +rm $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/remove_profile.yml +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/down_profile.yml \ $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-down_profile-playbook.yml -rm $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/down_profile.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/inventory \ +rm $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/down_profile.yml +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/inventory \ $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-inventory -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/ethtool_features.yml \ +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/ethtool_features.yml \ $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-ethtool_features-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/ethtool_features_default.yml \ +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/ethtool_features_default.yml \ $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-ethtool_features_default-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/bond_simple.yml \ +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/bond_simple.yml \ $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-bond_simple-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/eth_with_802_1x.yml \ +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/eth_with_802_1x.yml \ $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-eth_with_802_1x-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/wireless_wpa_psk.yml \ +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/wireless_wpa_psk.yml \ $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-wireless_wpa_psk-playbook.yml -mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/remove+down_profile.yml \ +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/remove+down_profile.yml \ $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-remove+down_profile-playbook.yml - -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}storage/README.md \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}storage/README.html \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}storage/LICENSE \ +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/dummy_simple.yml \ + $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-dummy_simple-playbook.yml +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/ethtool_coalesce.yml \ + $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-ethtool_coalesce-playbook.yml +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/team_simple.yml \ + $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-team_simple-playbook.yml +mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/eth_dns_support.yml \ + $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-eth_dns_support-playbook.yml + +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}storage/README.md \ + $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}storage/README.html \ $RPM_BUILD_ROOT%{_pkgdocdir}/storage +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}storage/LICENSE \ + $RPM_BUILD_ROOT%{_pkglicensedir}/storage.LICENSE + +rm $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}*/semaphore +rm -r $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}*/molecule -rm $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}*/semaphore -rm -r $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}*/molecule -rm $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}*/.travis.yml -rm $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}*/.ansible-lint +rm -r $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}*/.[A-Za-z]* +rm $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}*/tests/.git* -rm $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/.gitignore -rm $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/tests/.gitignore -rm $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/roles -rmdir $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples +rm $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/roles +rmdir $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}metrics/README.md \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}metrics/README.html \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}metrics/LICENSE \ +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}metrics/README.md \ + $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}metrics/README.html \ $RPM_BUILD_ROOT%{_pkgdocdir}/metrics +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}metrics/LICENSE \ + $RPM_BUILD_ROOT%{_pkglicensedir}/metrics.LICENSE -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}tlog/README.md \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}tlog/README.html \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}tlog/LICENSE \ +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}tlog/README.md \ + $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}tlog/README.html \ $RPM_BUILD_ROOT%{_pkgdocdir}/tlog +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}tlog/LICENSE \ + $RPM_BUILD_ROOT%{_pkglicensedir}/tlog.LICENSE -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}kernel_settings/README.md \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}kernel_settings/README.html \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}kernel_settings/LICENSE \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}kernel_settings/COPYING \ +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}kernel_settings/README.md \ + $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}kernel_settings/README.html \ $RPM_BUILD_ROOT%{_pkgdocdir}/kernel_settings +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}kernel_settings/LICENSE \ + $RPM_BUILD_ROOT%{_pkglicensedir}/kernel_settings.LICENSE +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}kernel_settings/COPYING \ + $RPM_BUILD_ROOT%{_pkglicensedir}/kernel_settings.COPYING -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}logging/README.md \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}logging/README.html \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}logging/LICENSE \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}logging/COPYING \ +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}logging/README.md \ + $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}logging/README.html \ $RPM_BUILD_ROOT%{_pkgdocdir}/logging +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}logging/LICENSE \ + $RPM_BUILD_ROOT%{_pkglicensedir}/logging.LICENSE +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}logging/COPYING \ + $RPM_BUILD_ROOT%{_pkglicensedir}/logging.COPYING -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}nbde_server/README.md \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}nbde_server/README.html \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}nbde_server/LICENSE \ +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}nbde_server/README.md \ + $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}nbde_server/README.html \ $RPM_BUILD_ROOT%{_pkgdocdir}/nbde_server +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}nbde_server/LICENSE \ + $RPM_BUILD_ROOT%{_pkglicensedir}/nbde_server.LICENSE -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}nbde_client/README.md \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}nbde_client/README.html \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}nbde_client/LICENSE \ +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}nbde_client/README.md \ + $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}nbde_client/README.html \ $RPM_BUILD_ROOT%{_pkgdocdir}/nbde_client +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}nbde_client/LICENSE \ + $RPM_BUILD_ROOT%{_pkglicensedir}/nbde_client.LICENSE -cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}certificate/README.md \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}certificate/README.html \ - $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}certificate/LICENSE \ +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}certificate/README.md \ + $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}certificate/README.html \ $RPM_BUILD_ROOT%{_pkgdocdir}/certificate +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}certificate/LICENSE \ + $RPM_BUILD_ROOT%{_pkglicensedir}/certificate.LICENSE + +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}crypto_policies/README.md \ + $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}crypto_policies/README.html \ + $RPM_BUILD_ROOT%{_pkgdocdir}/crypto_policies +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}crypto_policies/LICENSE \ + $RPM_BUILD_ROOT%{_pkglicensedir}/crypto_policies.LICENSE + +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}sshd/README.md \ + $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}sshd/README.html \ + $RPM_BUILD_ROOT%{_pkgdocdir}/sshd +cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}sshd/LICENSE \ + $RPM_BUILD_ROOT%{_pkglicensedir}/sshd.LICENSE +# referenced in the configuring-openssh-servers-using-the-sshd-system-role documentation module +# must be updated if changing the file path +mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}sshd/examples/example-root-login.yml \ + $RPM_BUILD_ROOT%{_pkgdocdir}/sshd/example-root-login-playbook.yml +rmdir $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}sshd/examples + +cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}ssh/README.md \ + $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}ssh/README.html \ + $RPM_BUILD_ROOT%{_pkgdocdir}/ssh +cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}ssh/LICENSE \ + $RPM_BUILD_ROOT%{_pkglicensedir}/ssh.LICENSE + +cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}ha_cluster/README.md \ + $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}ha_cluster/README.html \ + $RPM_BUILD_ROOT%{_pkgdocdir}/ha_cluster +cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}ha_cluster/LICENSE \ + $RPM_BUILD_ROOT%{_pkglicensedir}/ha_cluster.LICENSE +mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}ha_cluster/examples/simple.yml \ + $RPM_BUILD_ROOT%{_pkgdocdir}/ha_cluster/example-simple-playbook.yml +rmdir $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}ha_cluster/examples + +cd .collections/ansible_collections/%{collection_namespace}/%{collection_name}/ +%ansible_collection_install + +mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/collection +mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/collection/roles + +cp -p %{buildroot}%{ansible_collection_files}%{collection_name}/README.md \ + %{buildroot}%{ansible_collection_files}%{collection_name}/README.html \ + $RPM_BUILD_ROOT%{_pkgdocdir}/collection + +for rolename in %{rolenames}; do + if [ -f %{buildroot}%{ansible_collection_files}%{collection_name}/roles/${rolename}/README.md ]; then + mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/collection/roles/${rolename} + cp -p %{buildroot}%{ansible_collection_files}%{collection_name}/roles/${rolename}/README.md \ + %{buildroot}%{ansible_collection_files}%{collection_name}/roles/${rolename}/README.html \ + $RPM_BUILD_ROOT%{_pkgdocdir}/collection/roles/${rolename} + fi +done + %files +%if %{without ansible} %dir %{_datadir}/ansible %dir %{_datadir}/ansible/roles +%endif +%if "%{installbase}" != "%{_datadir}/ansible/roles" +%dir %{installbase} +%endif %if 0%{?rolealtprefix:1} %{_datadir}/ansible/roles/%{rolealtprefix}kdump %{_datadir}/ansible/roles/%{rolealtprefix}postfix @@ -411,82 +680,186 @@ cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}certificate/README.m %{_datadir}/ansible/roles/%{rolealtprefix}nbde_server %{_datadir}/ansible/roles/%{rolealtprefix}nbde_client %{_datadir}/ansible/roles/%{rolealtprefix}certificate +%{_datadir}/ansible/roles/%{rolealtprefix}crypto_policies +%{_datadir}/ansible/roles/%{rolealtprefix}sshd +%{_datadir}/ansible/roles/%{rolealtprefix}ssh +%{_datadir}/ansible/roles/%{rolealtprefix}ha_cluster %endif -%{_datadir}/ansible/roles/%{roleprefix}kdump -%{_datadir}/ansible/roles/%{roleprefix}postfix -%{_datadir}/ansible/roles/%{roleprefix}selinux -%{_datadir}/ansible/roles/%{roleprefix}timesync -%{_datadir}/ansible/roles/%{roleprefix}network -%{_datadir}/ansible/roles/%{roleprefix}storage -%{_datadir}/ansible/roles/%{roleprefix}metrics -%{_datadir}/ansible/roles/%{roleprefix}tlog -%{_datadir}/ansible/roles/%{roleprefix}kernel_settings -%{_datadir}/ansible/roles/%{roleprefix}logging -%{_datadir}/ansible/roles/%{roleprefix}nbde_server -%{_datadir}/ansible/roles/%{roleprefix}nbde_client -%{_datadir}/ansible/roles/%{roleprefix}certificate -%doc %{_pkgdocdir}/*/example-*-playbook.yml -%doc %{_pkgdocdir}/network/example-inventory -%doc %{_pkgdocdir}/*/README.md -%doc %{_pkgdocdir}/*/README.html -%doc %{_datadir}/ansible/roles/%{roleprefix}kdump/README.md -%doc %{_datadir}/ansible/roles/%{roleprefix}postfix/README.md -%doc %{_datadir}/ansible/roles/%{roleprefix}selinux/README.md -%doc %{_datadir}/ansible/roles/%{roleprefix}timesync/README.md -%doc %{_datadir}/ansible/roles/%{roleprefix}network/README.md -%doc %{_datadir}/ansible/roles/%{roleprefix}storage/README.md -%doc %{_datadir}/ansible/roles/%{roleprefix}metrics/README.md -%doc %{_datadir}/ansible/roles/%{roleprefix}tlog/README.md -%doc %{_datadir}/ansible/roles/%{roleprefix}kernel_settings/README.md -%doc %{_datadir}/ansible/roles/%{roleprefix}logging/README.md -%doc %{_datadir}/ansible/roles/%{roleprefix}nbde_server/README.md -%doc %{_datadir}/ansible/roles/%{roleprefix}nbde_client/README.md -%doc %{_datadir}/ansible/roles/%{roleprefix}certificate/README.md -%doc %{_datadir}/ansible/roles/%{roleprefix}kdump/README.html -%doc %{_datadir}/ansible/roles/%{roleprefix}postfix/README.html -%doc %{_datadir}/ansible/roles/%{roleprefix}selinux/README.html -%doc %{_datadir}/ansible/roles/%{roleprefix}timesync/README.html -%doc %{_datadir}/ansible/roles/%{roleprefix}network/README.html -%doc %{_datadir}/ansible/roles/%{roleprefix}storage/README.html -%doc %{_datadir}/ansible/roles/%{roleprefix}metrics/README.html -%doc %{_datadir}/ansible/roles/%{roleprefix}tlog/README.html -%doc %{_datadir}/ansible/roles/%{roleprefix}kernel_settings/README.html -%doc %{_datadir}/ansible/roles/%{roleprefix}logging/README.html -%doc %{_datadir}/ansible/roles/%{roleprefix}nbde_server/README.html -%doc %{_datadir}/ansible/roles/%{roleprefix}nbde_client/README.html -%doc %{_datadir}/ansible/roles/%{roleprefix}certificate/README.html - - -%license %{_pkgdocdir}/*/COPYING -%license %{_pkgdocdir}/*/LICENSE -%license %{_datadir}/ansible/roles/%{roleprefix}kdump/COPYING -%license %{_datadir}/ansible/roles/%{roleprefix}postfix/COPYING -%license %{_datadir}/ansible/roles/%{roleprefix}selinux/COPYING -%license %{_datadir}/ansible/roles/%{roleprefix}timesync/COPYING -%license %{_datadir}/ansible/roles/%{roleprefix}network/LICENSE -%license %{_datadir}/ansible/roles/%{roleprefix}storage/LICENSE -%license %{_datadir}/ansible/roles/%{roleprefix}metrics/LICENSE -%license %{_datadir}/ansible/roles/%{roleprefix}tlog/LICENSE -%license %{_datadir}/ansible/roles/%{roleprefix}kernel_settings/LICENSE -%license %{_datadir}/ansible/roles/%{roleprefix}kernel_settings/COPYING -%license %{_datadir}/ansible/roles/%{roleprefix}logging/LICENSE -%license %{_datadir}/ansible/roles/%{roleprefix}logging/COPYING -%license %{_datadir}/ansible/roles/%{roleprefix}nbde_server/LICENSE -%license %{_datadir}/ansible/roles/%{roleprefix}nbde_client/LICENSE -%license %{_datadir}/ansible/roles/%{roleprefix}certificate/LICENSE +%{installbase}/%{roleinstprefix}kdump +%{installbase}/%{roleinstprefix}postfix +%{installbase}/%{roleinstprefix}selinux +%{installbase}/%{roleinstprefix}timesync +%{installbase}/%{roleinstprefix}network +%{installbase}/%{roleinstprefix}storage +%{installbase}/%{roleinstprefix}metrics +%{installbase}/%{roleinstprefix}tlog +%{installbase}/%{roleinstprefix}kernel_settings +%{installbase}/%{roleinstprefix}logging +%{installbase}/%{roleinstprefix}nbde_server +%{installbase}/%{roleinstprefix}nbde_client +%{installbase}/%{roleinstprefix}certificate +%{installbase}/%{roleinstprefix}crypto_policies +%{installbase}/%{roleinstprefix}sshd +%{installbase}/%{roleinstprefix}ssh +%{installbase}/%{roleinstprefix}ha_cluster +%{_pkgdocdir}/*/example-*-playbook.yml +%{_pkgdocdir}/network/example-inventory +%{_pkgdocdir}/*/README.md +%{_pkgdocdir}/*/README.html +%{_pkgdocdir}/collection/roles/*/README.md +%{_pkgdocdir}/collection/roles/*/README.html +%doc %{installbase}/%{roleinstprefix}*/README.md +%doc %{installbase}/%{roleinstprefix}*/README.html +%doc %{ansible_collection_files}/%{collection_name}/README.md +%doc %{ansible_collection_files}/%{collection_name}/roles/kdump/README.md +%doc %{ansible_collection_files}/%{collection_name}/roles/postfix/README.md +%doc %{ansible_collection_files}/%{collection_name}/roles/selinux/README.md +%doc %{ansible_collection_files}/%{collection_name}/roles/timesync/README.md +%doc %{ansible_collection_files}/%{collection_name}/roles/network/README.md +%doc %{ansible_collection_files}/%{collection_name}/roles/storage/README.md +%doc %{ansible_collection_files}/%{collection_name}/roles/metrics/README.md +%doc %{ansible_collection_files}/%{collection_name}/roles/tlog/README.md +%doc %{ansible_collection_files}/%{collection_name}/roles/kernel_settings/README.md +%doc %{ansible_collection_files}/%{collection_name}/roles/logging/README.md +%doc %{ansible_collection_files}/%{collection_name}/roles/nbde_server/README.md +%doc %{ansible_collection_files}/%{collection_name}/roles/nbde_client/README.md +%doc %{ansible_collection_files}/%{collection_name}/roles/certificate/README.md +%doc %{ansible_collection_files}/%{collection_name}/roles/crypto_policies/README.md +%doc %{ansible_collection_files}/%{collection_name}/roles/sshd/README.md +%doc %{ansible_collection_files}/%{collection_name}/roles/ssh/README.md +%doc %{ansible_collection_files}/%{collection_name}/roles/ha_cluster/README.md +# can't use a glob for .md files, only for .html. .md files include READMEs +# for private subroles, and we don;t want to tag those as docs. +%doc %{ansible_collection_files}/%{collection_name}/README.html +%doc %{ansible_collection_files}/%{collection_name}/roles/*/README.html + +%license %{_pkglicensedir}/* +%license %{installbase}/%{roleinstprefix}kdump/COPYING +%license %{installbase}/%{roleinstprefix}postfix/COPYING +%license %{installbase}/%{roleinstprefix}selinux/COPYING +%license %{installbase}/%{roleinstprefix}timesync/COPYING +%license %{installbase}/%{roleinstprefix}network/LICENSE +%license %{installbase}/%{roleinstprefix}storage/LICENSE +%license %{installbase}/%{roleinstprefix}metrics/LICENSE +%license %{installbase}/%{roleinstprefix}tlog/LICENSE +%license %{installbase}/%{roleinstprefix}kernel_settings/LICENSE +%license %{installbase}/%{roleinstprefix}kernel_settings/COPYING +%license %{installbase}/%{roleinstprefix}logging/LICENSE +%license %{installbase}/%{roleinstprefix}logging/COPYING +%license %{installbase}/%{roleinstprefix}nbde_server/LICENSE +%license %{installbase}/%{roleinstprefix}nbde_client/LICENSE +%license %{installbase}/%{roleinstprefix}certificate/LICENSE +%license %{installbase}/%{roleinstprefix}crypto_policies/LICENSE +%license %{installbase}/%{roleinstprefix}sshd/LICENSE +%license %{installbase}/%{roleinstprefix}ssh/LICENSE +%license %{installbase}/%{roleinstprefix}ha_cluster/LICENSE + +%{ansible_collection_files} %changelog -* Tue Nov 24 2020 Noriko Hosoi - 1.0-21 -- logging: Support oVirt input + elasticsearch output. - Resolves: rhbz#1889893 -- logging: Fixing a logic bug in elasticsearch output template. - Resolves: rhbz#1878857 -- logging: Support property-based filters in the files and forwards outputs. - Resolves: rhbz#1889492 - -* Tue Sep 22 2020 Pavel Cahyna - 1.0-20 -- storage: backport upstream PR #168 to prevent toggling encryption in safe mode, - as it is a destructive operation. Resolves rhbz#1881524 +* Wed Mar 17 2021 Noriko Hosoi - 1.0.1-1 +- Fix description field in galaxy.yml +- Remove "Technology Preview" from Collection README +- Merging individual ignore file and add it to the package +- Add a note to each module Doc to indicate it is private +- Add patches for network and storage role ansible-test fixes + Resolves rhbz#1935451 +- Simplify doc tags in %%files, corrects a forgotten doc tag for ha_cluster +- Suppress one ansible-lint warning in ha_cluster + +* Tue Feb 23 2021 Fernando Fernandez Mancera - 1.0.0-32 +- Add patch for the inclusive language leftover on network-role README.md, + Resolves rhbz#1931931 + +* Mon Feb 22 2021 Pavel Cahyna - 1.0.0-31 +- Rebase certificate role to pick up a test fix, Resolves rhbz#1931568 +- Rebase logging role to fix default private key path, + upstream PR #218 + +* Mon Feb 22 2021 Pavel Cahyna - 1.0.0-30 +- Correct merge botch in previous (ssh/README.md is a doc file) +- Update galaxy.yml even on Fedora, auto-maintenance may not have + a consistent version number +- Update collection doc transformation to match a modified text + and include the Tech Preview note again + +* Thu Feb 18 2021 Pavel Cahyna - 1.0.0-29 +- Change internal role prefix to more descriptive private_${role}_subrole_ +- Sync spec improvements from Fedora and introduce helper macros + No functional change except for license files location +- Disable mssql metrics test on non-x86_64 where the packages + are not available. Upstream PR #73 + +* Wed Feb 17 2021 Rich Megginson - 1.0.0-28 +- Add patch for sshd https://github.com/willshersystems/ansible-sshd/pull/155 + for ansible 2.8/jinja 2.7 support for sshd role +- Rebase certificate, kernel_settings, nbde_client for jinja27 +- Rebase the logging role, Resolves rhbz#1927943 +- Rebase storage role, Resolves rhbz#1894651 - interpreatation of + omitted parameters +- Apply storage PR #201 to dispense with the need of listing all disks + in existing pools, Resolves rhbz1894676 +- Apply storage PR #199 to allow reducing the requested volume sizes + if needed to fit, Resolves rhbz1894647 +- Rebase the network role, Resolves rhbz1893959, rhbz1893957 +- Add the ssh client role, Resolves rhbz1893712 +- Minor issue in selinux - no variable named present + Resolves rhbz1926947 +- Prefix internal roles with private_, resolves rhbz#1927417 +- Add the ha_cluster role, Resolves rhbz#1893743 + +* Thu Feb 11 2021 Pavel Cahyna - 1.0.0-27 +- Rebase the logging role, Resolves rhbz#1889484 +- Fixes to collection docs and galaxy metadata from nhosoi +- Apply network PR #350 Resolves rhbz#1927392 + +* Wed Feb 3 2021 Pavel Cahyna - 1.0.0-26 +- Rebase the metrics role, Resolves rhbz#1895188, rhbz#1893908 + +* Tue Jan 26 2021 Pavel Cahyna - 1.0.0-25 +- Apply storage PR #153 to fix a problem with partition name on NVMe devices + Resolves: rhbz1865990 +- Remove symlinks to roles under tests +- Cleanup of role directories - remove files starting with . in roles' root + directories and Git files under tests. Resolves rhbz#1650550 +- Add collection support, make Version semver compatible: 1.0 -> 1.0.0 + Resolves rhbz#1893906 +- Autogenerate Automation-Hub README.md if building for RHEL +- Renumber sources, Source is now auto-maintenance since it is the root + of the source tree, kdump becomes Source4 (4 was originally firewall) +- Introduce bcond_with/without ansible, work on Fedora, RHEL and EPEL +- Rebase certificate role to include collection-related workarounds, + no change in behavior intended +- Rebase network role, includes collection-related workarounds +- Revert an invasive network change to enable EPEL (PR #335) and implement + a minimal version + +* Fri Jan 15 2021 Pavel Cahyna - 1.0-24 +- Apply PR #63 for kdump to fix a problem in test introduced by rebase + +* Fri Jan 8 2021 Pavel Cahyna - 1.0-23 +- Add {crypto_policies,sshd}/README.md to docfiles, thanks jjelen +- Fix role name in selinux patch +- Add sshd role example and README fix +- Fix role name in sshd role tests and docs +- Backport network role PR #298 to fix problems often triggered by the CI + "error: down connection failed while waiting", Resolves rhbz#1817242 +- Disable bond test in downstream CI, it started to break DNS in RHEL 8.4. + Related rhbz#1915017 + +* Thu Jan 7 2021 Pavel Cahyna - 1.0-22 +- Rebase kdump, certificate, storage, selinux, nbde_client/server, + kernel_settings in preparation for collections + Includes upstream PR #168 for storage to prevent toggling encryption + in safe mode, as it is a destructive operation. Resolves rhbz#1881524 +- Introduce & use simpler macros for Sources management, + similar to %%forgemeta + https://docs.fedoraproject.org/en-US/packaging-guidelines/SourceURL/ +- Use a script to perform prefix transformation for all roles to reduce + the number of patches +- Rebase tlog to add exclude_{users,groups} support, Resolves rhbz#1895472 +- Add crypto_policies role, Resolves rhbz#1893699 +- Add sshd role, Resolves rhbz#1893696 * Mon Aug 24 2020 Pavel Cahyna - 1.0-19 - Rebase network role to latest upstream, resolves rhbz#1800627 @@ -611,7 +984,7 @@ cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}certificate/README.m - Fix merge botch * Mon Mar 19 2018 Troy Dawson - 0.6-3.1 -- Use -a (after cd) instead of -b (before cd) in %setup +- Use -a (after cd) instead of -b (before cd) in %%setup * Wed Mar 14 2018 Pavel Cahyna - 0.6-3 - Minor corrections of the previous change by Till Maas.