diff --git a/.gitignore b/.gitignore
index 48b2ab6..9fa6d86 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,18 +1,18 @@
 SOURCES/ansible-sshd-e1de59b3c54e9d48a010eeca73755df339c7e628.tar.gz
-SOURCES/auto-maintenance-e5ed203b2d7224c0bf0c3fd55452456c8f468cad.tar.gz
-SOURCES/certificate-50041ce55348fcce34aba4cbe3ea160c5d890ab3.tar.gz
-SOURCES/crypto_policies-76b2d5b0460dba22c5d290c1af96e4fdb3434cb9.tar.gz
+SOURCES/auto-maintenance-8f069305caa0a142c2c6ac14bd4d331282a1c079.tar.gz
+SOURCES/certificate-0376ceece57882ade8ffaf431b7866aae3e7fed1.tar.gz
+SOURCES/crypto_policies-2e2941c5545571fc8bc494099bdf970f498b9d38.tar.gz
 SOURCES/ha_cluster-779bb78559de58bb5a1f25a4b92039c373ef59a4.tar.gz
 SOURCES/kdump-77596fdd976c6160d6152c200a5432c609725a14.tar.gz
-SOURCES/kernel_settings-e5e5abb35fb695e22ccffa855c98ab882650480e.tar.gz
-SOURCES/logging-4b07edf4e84882c9d0fb979092ba5953aac0b4d5.tar.gz
+SOURCES/kernel_settings-4c81fd1380712ab0641b6837f092dd9caeeae0a6.tar.gz
+SOURCES/logging-07e08107e7ccba5822f8a7aaec1a2ff0a221bede.tar.gz
 SOURCES/metrics-e81b2650108727f38b1c856699aad26af0f44a46.tar.gz
-SOURCES/nbde_client-3af7452e4861ee2363b29b23bf78bf11e06be142.tar.gz
-SOURCES/nbde_server-1.0.1.tar.gz
+SOURCES/nbde_client-19f06159582550c8463f7d8492669e26fbdf760b.tar.gz
+SOURCES/nbde_server-4dfc5e2aca74cb82f2a50eec7e975a2b78ad9678.tar.gz
 SOURCES/network-bda206d45c87ee8c1a5284de84f5acf5e629de97.tar.gz
 SOURCES/postfix-0.1.tar.gz
 SOURCES/selinux-1.1.1.tar.gz
-SOURCES/ssh-effa0a0d993832dee726290f263a2182cf3eacda.tar.gz
+SOURCES/ssh-21adc637511db86b5ba279a70a7301ef3a170669.tar.gz
 SOURCES/storage-485de47b0dc0787aea077ba448ecb954f53e40c4.tar.gz
 SOURCES/timesync-924650d0cd4117f73a7f0413ab745a8632bc5cec.tar.gz
 SOURCES/tlog-1.1.0.tar.gz
diff --git a/.rhel-system-roles.metadata b/.rhel-system-roles.metadata
index dedf84c..e7beb14 100644
--- a/.rhel-system-roles.metadata
+++ b/.rhel-system-roles.metadata
@@ -1,18 +1,18 @@
 77e952b62e634c69e36115845b4f24ee3bfe76b7 SOURCES/ansible-sshd-e1de59b3c54e9d48a010eeca73755df339c7e628.tar.gz
-c354a1d24b522a356ef487cf8a3f357ab6213b41 SOURCES/auto-maintenance-e5ed203b2d7224c0bf0c3fd55452456c8f468cad.tar.gz
-20590d1e4ed8df7578926b7aab79e8fa1344be73 SOURCES/certificate-50041ce55348fcce34aba4cbe3ea160c5d890ab3.tar.gz
-513057251590e81b629a69a4ed704b0976b1bc44 SOURCES/crypto_policies-76b2d5b0460dba22c5d290c1af96e4fdb3434cb9.tar.gz
+31d33f92384e423baebb073d3a6e3d271cbef5a5 SOURCES/auto-maintenance-8f069305caa0a142c2c6ac14bd4d331282a1c079.tar.gz
+7017c00e2ceede1f6019ba17a56e0145e6012013 SOURCES/certificate-0376ceece57882ade8ffaf431b7866aae3e7fed1.tar.gz
+469a1a39a19d346c10bf07071a7af52832885047 SOURCES/crypto_policies-2e2941c5545571fc8bc494099bdf970f498b9d38.tar.gz
 838ed06d8d092271fff04bd5e7c16db4661e8567 SOURCES/ha_cluster-779bb78559de58bb5a1f25a4b92039c373ef59a4.tar.gz
 fa3d5daf6cf1ceeaa87f58c16e11153cf250e2fa SOURCES/kdump-77596fdd976c6160d6152c200a5432c609725a14.tar.gz
-c81700b81d7acf48b9eadb1ed7a9bf04b994cdb1 SOURCES/kernel_settings-e5e5abb35fb695e22ccffa855c98ab882650480e.tar.gz
-3c94b12780f01bbdb8b77fc3515ccef1200ec1f6 SOURCES/logging-4b07edf4e84882c9d0fb979092ba5953aac0b4d5.tar.gz
+471863c062a32a37a18c0ee1b7f0c50387baec99 SOURCES/kernel_settings-4c81fd1380712ab0641b6837f092dd9caeeae0a6.tar.gz
+60efc730800600f87e386e16730980ea08417d34 SOURCES/logging-07e08107e7ccba5822f8a7aaec1a2ff0a221bede.tar.gz
 821d8ebef2d30a41f0fa65bdc5e550f09b375370 SOURCES/metrics-e81b2650108727f38b1c856699aad26af0f44a46.tar.gz
-a59b3d28bf5da0abbb3e2e188b0b65b2a78cd500 SOURCES/nbde_client-3af7452e4861ee2363b29b23bf78bf11e06be142.tar.gz
-c55d45d134042b00ece17f2a21bb945c571310b3 SOURCES/nbde_server-1.0.1.tar.gz
+66b84d088e2c3989f00b3151cc7fdc40f768f9a5 SOURCES/nbde_client-19f06159582550c8463f7d8492669e26fbdf760b.tar.gz
+0e4e133b75e245d17c0c5a1097ab95f047ae6f65 SOURCES/nbde_server-4dfc5e2aca74cb82f2a50eec7e975a2b78ad9678.tar.gz
 c2d1aaca43cbe787ee7b1e41e875a76b8f95831d SOURCES/network-bda206d45c87ee8c1a5284de84f5acf5e629de97.tar.gz
 66c82331f4ac9598c506c3999965b4d07dbfe49d SOURCES/postfix-0.1.tar.gz
 f2ad38bd93487962de511b1f4bc9dc6607a5ab36 SOURCES/selinux-1.1.1.tar.gz
-b160fd539c99429a33c0d65c818ad0c98c5ca7a4 SOURCES/ssh-effa0a0d993832dee726290f263a2182cf3eacda.tar.gz
+aef51c665e61166e091440862cfa4e6a8fe3c29d SOURCES/ssh-21adc637511db86b5ba279a70a7301ef3a170669.tar.gz
 8b7d7c14e76aa1a872f22d5cd6d3c9a850868ed3 SOURCES/storage-485de47b0dc0787aea077ba448ecb954f53e40c4.tar.gz
 ffd2a706e4e3007684aa9874c8457ad5c8920050 SOURCES/timesync-924650d0cd4117f73a7f0413ab745a8632bc5cec.tar.gz
 486d7b845348755e7f189afd95f32bbe97c74661 SOURCES/tlog-1.1.0.tar.gz
diff --git a/SOURCES/collection_readme.sh b/SOURCES/collection_readme.sh
index a229311..94e8cae 100755
--- a/SOURCES/collection_readme.sh
+++ b/SOURCES/collection_readme.sh
@@ -5,8 +5,7 @@ set -euxo pipefail
 readme_md=${1:-"lsr_role2collection/collection_readme.md"}
 
 sed -i -e '/## Currently supported distributions/{:1;/## Dependencies/!{N;b 1};s|.*|## Dependencies|}' \
-    -e 's/\(Linux System Roles is a set of roles for managing Linux system components.\)/\1\n\nThis collection is available as a Technology Preview./' \
-    -e 's/Linux/RHEL/g' \
+    -e 's/Linux/Red Hat Enterprise Linux/g' \
     -e 's/Ansible Galaxy/Automation Hub/g' \
     -e 's/fedora\(.\)linux_system_roles/redhat\1rhel_system_roles/g' \
     -e 's/linux-system-roles/rhel-system-roles/g' \
diff --git a/SOURCES/network-ansible-test.diff b/SOURCES/network-ansible-test.diff
new file mode 100644
index 0000000..8f88e21
--- /dev/null
+++ b/SOURCES/network-ansible-test.diff
@@ -0,0 +1,835 @@
+From 7ae16e9ff5291f06ba0d7224a0d6c36b780ea0a2 Mon Sep 17 00:00:00 2001
+From: Rich Megginson <rmeggins@redhat.com>
+Date: Wed, 3 Mar 2021 11:37:56 -0700
+Subject: [PATCH] fix most ansible-test issues, suppress the rest
+
+Automation Hub, and possibly Galaxy in the future, require the
+collection to be screened with `ansible-test sanity` among other
+checks.  The role had a number of issues:
+* Use `AssertionError` instead of `assert`
+* Use of `logging` module not in accordance with standards, but these
+  are ok and the errors were suppressed
+* Several import errors which are ok because they are checked
+  elsewhere
+* Many of the module files use `#!` shebang - not sure why, but
+  the usage is allowed
+* __init__.py in the module_utils directories must be empty, so a
+  new file myerror.py was added to move the code from __init__.py
+* The documentation block in the module was not properly constructed
+  or formatted.
+* shellcheck issues, including removing unused files
+* use `dummy` instead of `_` (underscore) for variables that are
+  unused
+
+add WARNING to module docs - collection users should not use directly
+
+Signed-off-by: Rich Megginson <rmeggins@redhat.com>
+(cherry picked from commit 7459a29e9104bf01987399153baf0a1c1df05929)
+---
+ .github/workflows/tox.yml                     |  4 +-
+ .sanity-ansible-ignore-2.9.txt                | 47 ++++++++++
+ README.md                                     |  2 +-
+ library/network_connections.py                | 88 ++++++++++++-------
+ module_utils/network_lsr/__init__.py          |  7 --
+ .../network_lsr/argument_validator.py         |  9 +-
+ module_utils/network_lsr/ethtool.py           |  6 +-
+ module_utils/network_lsr/myerror.py           | 11 +++
+ module_utils/network_lsr/nm/__init__.py       |  4 +
+ .../network_lsr/nm/active_connection.py       | 35 ++++----
+ module_utils/network_lsr/nm/client.py         |  4 +
+ module_utils/network_lsr/nm/connection.py     | 18 ++--
+ module_utils/network_lsr/nm/error.py          |  4 +
+ module_utils/network_lsr/nm/provider.py       |  8 +-
+ module_utils/network_lsr/nm_provider.py       |  4 +
+ module_utils/network_lsr/utils.py             | 10 ++-
+ tests/ensure_provider_tests.py                |  8 +-
+ tests/get_coverage.sh                         |  6 +-
+ tests/get_total_coverage.sh                   |  2 +-
+ tests/integration/test_ethernet.py            |  4 +-
+ tests/merge_coverage.sh                       |  3 +
+ tests/setup_module_utils.sh                   | 41 ---------
+ tox.ini                                       |  3 -
+ 23 files changed, 199 insertions(+), 129 deletions(-)
+ create mode 100644 .sanity-ansible-ignore-2.9.txt
+ create mode 100644 module_utils/network_lsr/myerror.py
+ delete mode 100755 tests/setup_module_utils.sh
+
+diff --git a/.github/workflows/tox.yml b/.github/workflows/tox.yml
+index 207bcba..ba0f4c6 100644
+--- a/.github/workflows/tox.yml
++++ b/.github/workflows/tox.yml
+@@ -3,7 +3,7 @@ name: tox
+ on:  # yamllint disable-line rule:truthy
+   - pull_request
+ env:
+-  TOX_LSR: "git+https://github.com/linux-system-roles/tox-lsr@2.2.0"
++  TOX_LSR: "git+https://github.com/linux-system-roles/tox-lsr@2.3.0"
+   LSR_ANSIBLES: 'ansible==2.8.* ansible==2.9.*'
+   LSR_MSCENARIOS: default
+   # LSR_EXTRA_PACKAGES: "libdbus-1-dev libgirepository1.0-dev python3-dev"
+@@ -36,7 +36,7 @@ jobs:
+           toxenvs="py${toxpyver}"
+           case "$toxpyver" in
+           27) toxenvs="${toxenvs},coveralls,flake8,pylint" ;;
+-          36) toxenvs="${toxenvs},coveralls,black,yamllint,ansible-lint,collection" ;;
++          36) toxenvs="${toxenvs},coveralls,black,yamllint,ansible-lint,collection,ansible-test" ;;
+           37) toxenvs="${toxenvs},coveralls" ;;
+           38) toxenvs="${toxenvs},coveralls" ;;
+           esac
+diff --git a/.sanity-ansible-ignore-2.9.txt b/.sanity-ansible-ignore-2.9.txt
+new file mode 100644
+index 0000000..439197e
+--- /dev/null
++++ b/.sanity-ansible-ignore-2.9.txt
+@@ -0,0 +1,47 @@
++tests/network/ensure_provider_tests.py compile-2.7!skip
++tests/network/ensure_provider_tests.py compile-3.5!skip
++plugins/module_utils/network_lsr/nm/__init__.py empty-init!skip
++plugins/module_utils/network_lsr/nm/active_connection.py import-2.7!skip
++plugins/module_utils/network_lsr/nm/client.py import-2.7!skip
++plugins/module_utils/network_lsr/nm/connection.py import-2.7!skip
++plugins/module_utils/network_lsr/nm/provider.py import-2.7!skip
++plugins/module_utils/network_lsr/nm/active_connection.py import-3.5!skip
++plugins/module_utils/network_lsr/nm/client.py import-3.5!skip
++plugins/module_utils/network_lsr/nm/connection.py import-3.5!skip
++plugins/module_utils/network_lsr/nm/provider.py import-3.5!skip
++plugins/module_utils/network_lsr/nm/active_connection.py import-3.6!skip
++plugins/module_utils/network_lsr/nm/client.py import-3.6!skip
++plugins/module_utils/network_lsr/nm/connection.py import-3.6!skip
++plugins/module_utils/network_lsr/nm/provider.py import-3.6!skip
++plugins/module_utils/network_lsr/nm/active_connection.py import-3.7!skip
++plugins/module_utils/network_lsr/nm/client.py import-3.7!skip
++plugins/module_utils/network_lsr/nm/connection.py import-3.7!skip
++plugins/module_utils/network_lsr/nm/provider.py import-3.7!skip
++plugins/module_utils/network_lsr/nm/active_connection.py import-3.8!skip
++plugins/module_utils/network_lsr/nm/client.py import-3.8!skip
++plugins/module_utils/network_lsr/nm/connection.py import-3.8!skip
++plugins/module_utils/network_lsr/nm/provider.py import-3.8!skip
++plugins/module_utils/network_lsr/__init__.py shebang!skip
++plugins/module_utils/network_lsr/argument_validator.py shebang!skip
++plugins/module_utils/network_lsr/utils.py shebang!skip
++plugins/module_utils/network_lsr/myerror.py shebang!skip
++tests/network/covstats shebang!skip
++tests/network/ensure_provider_tests.py shebang!skip
++tests/network/get_coverage.sh shebang!skip
++tests/network/get_total_coverage.sh shebang!skip
++tests/network/merge_coverage.sh shebang!skip
++tests/network/ensure_provider_tests.py future-import-boilerplate!skip
++tests/network/integration/conftest.py future-import-boilerplate!skip
++tests/network/integration/test_ethernet.py future-import-boilerplate!skip
++tests/network/unit/test_network_connections.py future-import-boilerplate!skip
++tests/network/unit/test_nm_provider.py future-import-boilerplate!skip
++tests/network/ensure_provider_tests.py metaclass-boilerplate!skip
++tests/network/integration/conftest.py metaclass-boilerplate!skip
++tests/network/integration/test_ethernet.py metaclass-boilerplate!skip
++tests/network/unit/test_network_connections.py metaclass-boilerplate!skip
++tests/network/unit/test_nm_provider.py metaclass-boilerplate!skip
++plugins/modules/network_connections.py validate-modules:missing-examples
++plugins/modules/network_connections.py validate-modules:missing-gplv3-license
++plugins/modules/network_connections.py validate-modules:no-default-for-required-parameter
++plugins/modules/network_connections.py validate-modules:parameter-type-not-in-doc
++plugins/modules/network_connections.py validate-modules:undocumented-parameter
+diff --git a/README.md b/README.md
+index c1462b6..c257c08 100644
+--- a/README.md
++++ b/README.md
+@@ -145,7 +145,7 @@ a consequence, `state: up` always changes the system.
+ 
+ You can deactivate a connection profile, even if is currently not active. As a consequence, `state: down` always changes the system.
+ 
+-Note that if the `state` option is unset, the connection profile’s runtime state will not be changed.
++Note that if the `state` option is unset, the connection profile's runtime state will not be changed.
+ 
+ 
+ ### `persistent_state`
+diff --git a/library/network_connections.py b/library/network_connections.py
+index 3224892..3a6e47f 100644
+--- a/library/network_connections.py
++++ b/library/network_connections.py
+@@ -2,6 +2,30 @@
+ # -*- coding: utf-8 -*-
+ # SPDX-License-Identifier: BSD-3-Clause
+ 
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
++
++DOCUMENTATION = """
++---
++module: network_connections
++author: Thomas Haller (@thom311)
++short_description: module for network role to manage connection profiles
++requirements: [pygobject, dbus, NetworkManager]
++version_added: "2.0"
++description:
++  - "WARNING: Do not use this module directly! It is only for role internal use."
++  - |
++    Manage networking profiles (connections) for NetworkManager and
++    initscripts networking providers. Documentation needs to be written. Note
++    that the network_connections module tightly integrates with the network
++    role and currently it is not expected to use this module outside the role.
++    Thus, consult README.md for examples for the role.  The requirements are
++    only for the NetworkManager (nm) provider.
++options: {}
++"""
++
++
+ import errno
+ import functools
+ import os
+@@ -16,7 +40,7 @@ import logging
+ # pylint: disable=import-error, no-name-in-module
+ from ansible.module_utils.basic import AnsibleModule
+ from ansible.module_utils.network_lsr import ethtool  # noqa:E501
+-from ansible.module_utils.network_lsr import MyError  # noqa:E501
++from ansible.module_utils.network_lsr.myerror import MyError  # noqa:E501
+ 
+ from ansible.module_utils.network_lsr.argument_validator import (  # noqa:E501
+     ArgUtil,
+@@ -30,22 +54,6 @@ from ansible.module_utils.network_lsr import nm_provider  # noqa:E501
+ # pylint: enable=import-error, no-name-in-module
+ 
+ 
+-DOCUMENTATION = """
+----
+-module: network_connections
+-author: "Thomas Haller (thaller@redhat.com)"
+-short_description: module for network role to manage connection profiles
+-requirements: for 'nm' provider requires pygobject, dbus and NetworkManager.
+-version_added: "2.0"
+-description: Manage networking profiles (connections) for NetworkManager and
+-  initscripts networking providers.
+-options: Documentation needs to be written. Note that the network_connections
+-  module tightly integrates with the network role and currently it is not
+-  expected to use this module outside the role. Thus, consult README.md for
+-  examples for the role.
+-"""
+-
+-
+ ###############################################################################
+ PERSISTENT_STATE = "persistent_state"
+ ABSENT_STATE = "absent"
+@@ -772,7 +780,7 @@ class NMUtil:
+         if compare_flags is None:
+             compare_flags = NM.SettingCompareFlags.IGNORE_TIMESTAMP
+ 
+-        return not (not (con_a.compare(con_b, compare_flags)))
++        return con_a.compare(con_b, compare_flags)
+ 
+     def connection_is_active(self, con):
+         NM = Util.NM()
+@@ -1390,7 +1398,7 @@ class RunEnvironment(object):
+     def check_mode_set(self, check_mode, connections=None):
+         c = self._check_mode
+         self._check_mode = check_mode
+-        assert (
++        if not (
+             (c is None and check_mode in [CheckMode.PREPARE])
+             or (
+                 c == CheckMode.PREPARE
+@@ -1399,7 +1407,8 @@ class RunEnvironment(object):
+             or (c == CheckMode.PRE_RUN and check_mode in [CheckMode.REAL_RUN])
+             or (c == CheckMode.REAL_RUN and check_mode in [CheckMode.DONE])
+             or (c == CheckMode.DRY_RUN and check_mode in [CheckMode.DONE])
+-        )
++        ):
++            raise AssertionError("check_mode value is incorrect {0}".format(c))
+         self._check_mode_changed(c, check_mode, connections)
+ 
+ 
+@@ -1461,7 +1470,8 @@ class RunEnvironmentAnsible(RunEnvironment):
+         warn_traceback=False,
+         force_fail=False,
+     ):
+-        assert idx >= -1
++        if not idx >= -1:
++            raise AssertionError("idx {0} is less than -1".format(idx))
+         self._log_idx += 1
+         self.run_results[idx]["log"].append((severity, msg, self._log_idx))
+         if severity == LogLevel.ERROR:
+@@ -1598,14 +1608,15 @@ class Cmd(object):
+     def connections_data(self):
+         c = self._connections_data
+         if c is None:
+-            assert self.check_mode in [
++            if self.check_mode not in [
+                 CheckMode.DRY_RUN,
+                 CheckMode.PRE_RUN,
+                 CheckMode.REAL_RUN,
+-            ]
+-            c = []
+-            for _ in range(0, len(self.connections)):
+-                c.append({"changed": False})
++            ]:
++                raise AssertionError(
++                    "invalid value {0} for self.check_mode".format(self.check_mode)
++                )
++            c = [{"changed": False}] * len(self.connections)
+             self._connections_data = c
+         return c
+ 
+@@ -1614,11 +1625,14 @@ class Cmd(object):
+             c["changed"] = False
+ 
+     def connections_data_set_changed(self, idx, changed=True):
+-        assert self._check_mode in [
++        if self._check_mode not in [
+             CheckMode.PRE_RUN,
+             CheckMode.DRY_RUN,
+             CheckMode.REAL_RUN,
+-        ]
++        ]:
++            raise AssertionError(
++                "invalid value {0} for self._check_mode".format(self._check_mode)
++            )
+         if not changed:
+             return
+         self.connections_data[idx]["changed"] = changed
+@@ -1688,7 +1702,10 @@ class Cmd(object):
+         # modify the connection.
+ 
+         con = self.connections[idx]
+-        assert con["state"] in ["up", "down"]
++        if con["state"] not in ["up", "down"]:
++            raise AssertionError(
++                "connection state {0} not 'up' or 'down'".format(con["state"])
++            )
+ 
+         # also check, if the current profile is 'up' with a 'type' (which
+         # possibly modifies the connection as well)
+@@ -1736,7 +1753,9 @@ class Cmd(object):
+         elif self._check_mode != CheckMode.DONE:
+             c = CheckMode.DONE
+         else:
+-            assert False
++            raise AssertionError(
++                "invalid value {0} for self._check_mode".format(self._check_mode)
++            )
+         self._check_mode = c
+         self.run_env.check_mode_set(c)
+         return c
+@@ -1902,7 +1921,12 @@ class Cmd_nm(Cmd):
+ 
+             name = connection["name"]
+             if not name:
+-                assert connection["persistent_state"] == "absent"
++                if not connection["persistent_state"] == "absent":
++                    raise AssertionError(
++                        "persistent_state must be 'absent' not {0} when there is no connection 'name'".format(
++                            connection["persistent_state"]
++                        )
++                    )
+                 continue
+             if name in names:
+                 exists = names[name]["nm.exists"]
+@@ -1979,7 +2003,7 @@ class Cmd_nm(Cmd):
+                     idx, "ethtool.%s specified but not supported by NM", specified
+                 )
+ 
+-            for option, _ in specified.items():
++            for option in specified.keys():
+                 nm_name = nm_get_name_fcnt(option)
+                 if not nm_name:
+                     self.log_fatal(
+diff --git a/module_utils/network_lsr/__init__.py b/module_utils/network_lsr/__init__.py
+index 22c717c..e69de29 100644
+--- a/module_utils/network_lsr/__init__.py
++++ b/module_utils/network_lsr/__init__.py
+@@ -1,7 +0,0 @@
+-#!/usr/bin/python3 -tt
+-# vim: fileencoding=utf8
+-# SPDX-License-Identifier: BSD-3-Clause
+-
+-
+-class MyError(Exception):
+-    pass
+diff --git a/module_utils/network_lsr/argument_validator.py b/module_utils/network_lsr/argument_validator.py
+index 24ffdc4..f338489 100644
+--- a/module_utils/network_lsr/argument_validator.py
++++ b/module_utils/network_lsr/argument_validator.py
+@@ -2,12 +2,16 @@
+ # vim: fileencoding=utf8
+ # SPDX-License-Identifier: BSD-3-Clause
+ 
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
++
+ import posixpath
+ import socket
+ import re
+ 
+ # pylint: disable=import-error, no-name-in-module
+-from ansible.module_utils.network_lsr import MyError  # noqa:E501
++from ansible.module_utils.network_lsr.myerror import MyError  # noqa:E501
+ from ansible.module_utils.network_lsr.utils import Util  # noqa:E501
+ 
+ UINT32_MAX = 0xFFFFFFFF
+@@ -72,7 +76,8 @@ class ArgUtil:
+ 
+ class ValidationError(MyError):
+     def __init__(self, name, message):
+-        Exception.__init__(self, name + ": " + message)
++        # pylint: disable=non-parent-init-called
++        super(ValidationError, self).__init__(name + ": " + message)
+         self.error_message = message
+         self.name = name
+ 
+diff --git a/module_utils/network_lsr/ethtool.py b/module_utils/network_lsr/ethtool.py
+index 21e2152..3246bef 100644
+--- a/module_utils/network_lsr/ethtool.py
++++ b/module_utils/network_lsr/ethtool.py
+@@ -1,5 +1,9 @@
+ # SPDX-License-Identifier: BSD-3-Clause
+ 
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
++
+ import array
+ import struct
+ import fcntl
+@@ -46,7 +50,7 @@ def get_perm_addr(ifname):
+             res = ecmd.tobytes()
+         except AttributeError:  # tobytes() is not available in python2
+             res = ecmd.tostring()
+-        _, size, perm_addr = struct.unpack("II%is" % MAX_ADDR_LEN, res)
++        dummy, size, perm_addr = struct.unpack("II%is" % MAX_ADDR_LEN, res)
+         perm_addr = Util.mac_ntoa(perm_addr[:size])
+     except IOError:
+         perm_addr = None
+diff --git a/module_utils/network_lsr/myerror.py b/module_utils/network_lsr/myerror.py
+new file mode 100644
+index 0000000..f785265
+--- /dev/null
++++ b/module_utils/network_lsr/myerror.py
+@@ -0,0 +1,11 @@
++#!/usr/bin/python3 -tt
++# vim: fileencoding=utf8
++# SPDX-License-Identifier: BSD-3-Clause
++
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
++
++
++class MyError(Exception):
++    pass
+diff --git a/module_utils/network_lsr/nm/__init__.py b/module_utils/network_lsr/nm/__init__.py
+index 58fbb5a..74c17cb 100644
+--- a/module_utils/network_lsr/nm/__init__.py
++++ b/module_utils/network_lsr/nm/__init__.py
+@@ -1,5 +1,9 @@
+ # Relative import is not support by ansible 2.8 yet
+ # pylint: disable=import-error, no-name-in-module
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
++
+ from ansible.module_utils.network_lsr.nm import provider  # noqa:E501
+ 
+ # pylint: enable=import-error, no-name-in-module
+diff --git a/module_utils/network_lsr/nm/active_connection.py b/module_utils/network_lsr/nm/active_connection.py
+index a6c5a37..432142c 100644
+--- a/module_utils/network_lsr/nm/active_connection.py
++++ b/module_utils/network_lsr/nm/active_connection.py
+@@ -2,6 +2,10 @@
+ 
+ # Handle NM.ActiveConnection
+ 
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
++
+ import logging
+ 
+ # Relative import is not support by ansible 2.8 yet
+@@ -21,19 +25,15 @@ def deactivate_active_connection(nm_ac, timeout, check_mode):
+         return False
+     if not check_mode:
+         main_loop = client.get_mainloop(timeout)
+-        logging.debug(
+-            "Deactivating {id} with timeout {timeout}".format(
+-                id=nm_ac.get_id(), timeout=timeout
+-            )
+-        )
++        logging.debug("Deactivating %s with timeout %s", nm_ac.get_id(), timeout)
+         user_data = main_loop
+         handler_id = nm_ac.connect(
+             NM_AC_STATE_CHANGED_SIGNAL, _nm_ac_state_change_callback, user_data
+         )
+         logging.debug(
+-            "Registered {signal} on client.NM.ActiveConnection {id}".format(
+-                signal=NM_AC_STATE_CHANGED_SIGNAL, id=nm_ac.get_id()
+-            )
++            "Registered %s on client.NM.ActiveConnection %s",
++            NM_AC_STATE_CHANGED_SIGNAL,
++            nm_ac.get_id(),
+         )
+         if nm_ac.props.state != client.NM.ActiveConnectionState.DEACTIVATING:
+             nm_client = client.get_client()
+@@ -44,9 +44,7 @@ def deactivate_active_connection(nm_ac, timeout, check_mode):
+                 _nm_ac_deactivate_call_back,
+                 user_data,
+             )
+-            logging.debug(
+-                "Deactivating client.NM.ActiveConnection {0}".format(nm_ac.get_id())
+-            )
++            logging.debug("Deactivating client.NM.ActiveConnection %s", nm_ac.get_id())
+         main_loop.run()
+     return True
+ 
+@@ -56,14 +54,13 @@ def _nm_ac_state_change_callback(nm_ac, state, reason, user_data):
+     if main_loop.is_cancelled:
+         return
+     logging.debug(
+-        "Got client.NM.ActiveConnection state change: {id}: {state} {reason}".format(
+-            id=nm_ac.get_id(), state=state, reason=reason
+-        )
++        "Got client.NM.ActiveConnection state change: %s: %s %s",
++        nm_ac.get_id(),
++        state,
++        reason,
+     )
+     if nm_ac.props.state == client.NM.ActiveConnectionState.DEACTIVATED:
+-        logging.debug(
+-            "client.NM.ActiveConnection {0} is deactivated".format(nm_ac.get_id())
+-        )
++        logging.debug("client.NM.ActiveConnection %s is deactivated", nm_ac.get_id())
+         main_loop.quit()
+ 
+ 
+@@ -82,9 +79,7 @@ def _nm_ac_deactivate_call_back(nm_client, result, user_data):
+             client.NM.ManagerError.quark(), client.NM.ManagerError.CONNECTIONNOTACTIVE
+         ):
+             logging.info(
+-                "Connection is not active on {0}, no need to deactivate".format(
+-                    nm_ac_id
+-                )
++                "Connection is not active on %s, no need to deactivate", nm_ac_id
+             )
+             if nm_ac:
+                 nm_ac.handler_disconnect(handler_id)
+diff --git a/module_utils/network_lsr/nm/client.py b/module_utils/network_lsr/nm/client.py
+index 4992887..f47cc53 100644
+--- a/module_utils/network_lsr/nm/client.py
++++ b/module_utils/network_lsr/nm/client.py
+@@ -1,5 +1,9 @@
+ # SPDX-License-Identifier: BSD-3-Clause
+ 
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
++
+ import logging
+ 
+ # Relative import is not support by ansible 2.8 yet
+diff --git a/module_utils/network_lsr/nm/connection.py b/module_utils/network_lsr/nm/connection.py
+index 6982034..474da8d 100644
+--- a/module_utils/network_lsr/nm/connection.py
++++ b/module_utils/network_lsr/nm/connection.py
+@@ -2,6 +2,10 @@
+ 
+ # Handle NM.RemoteConnection
+ 
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
++
+ import logging
+ 
+ # Relative import is not support by ansible 2.8 yet
+@@ -26,9 +30,10 @@ def delete_remote_connection(nm_profile, timeout, check_mode):
+             user_data,
+         )
+         logging.debug(
+-            "Deleting profile {id}/{uuid} with timeout {timeout}".format(
+-                id=nm_profile.get_id(), uuid=nm_profile.get_uuid(), timeout=timeout
+-            )
++            "Deleting profile %s/%s with timeout %s",
++            nm_profile.get_id(),
++            nm_profile.get_uuid(),
++            timeout,
+         )
+         main_loop.run()
+     return True
+@@ -78,9 +83,10 @@ def volatilize_remote_connection(nm_profile, timeout, check_mode):
+             user_data,
+         )
+         logging.debug(
+-            "Volatilizing profile {id}/{uuid} with timeout {timeout}".format(
+-                id=nm_profile.get_id(), uuid=nm_profile.get_uuid(), timeout=timeout
+-            )
++            "Volatilizing profile %s/%s with timeout %s",
++            nm_profile.get_id(),
++            nm_profile.get_uuid(),
++            timeout,
+         )
+         main_loop.run()
+     return True
+diff --git a/module_utils/network_lsr/nm/error.py b/module_utils/network_lsr/nm/error.py
+index 42014ec..d87bc72 100644
+--- a/module_utils/network_lsr/nm/error.py
++++ b/module_utils/network_lsr/nm/error.py
+@@ -1,5 +1,9 @@
+ # SPDX-License-Identifier: BSD-3-Clause
+ 
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
++
+ 
+ class LsrNetworkNmError(Exception):
+     pass
+diff --git a/module_utils/network_lsr/nm/provider.py b/module_utils/network_lsr/nm/provider.py
+index 52e7502..567c9d1 100644
+--- a/module_utils/network_lsr/nm/provider.py
++++ b/module_utils/network_lsr/nm/provider.py
+@@ -1,5 +1,9 @@
+ # SPDX-License-Identifier: BSD-3-Clause
+ 
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
++
+ import logging
+ 
+ # Relative import is not support by ansible 2.8 yet
+@@ -25,7 +29,7 @@ class NetworkManagerProvider:
+                     nm_ac, timeout, check_mode
+                 )
+         if not changed:
+-            logging.info("No active connection for {0}".format(connection_name))
++            logging.info("No active connection for %s", connection_name)
+ 
+         return changed
+ 
+@@ -49,7 +53,7 @@ class NetworkManagerProvider:
+                         nm_profile, timeout, check_mode
+                     )
+         if not changed:
+-            logging.info("No connection with UUID {0} to volatilize".format(uuid))
++            logging.info("No connection with UUID %s to volatilize", uuid)
+ 
+         return changed
+ 
+diff --git a/module_utils/network_lsr/nm_provider.py b/module_utils/network_lsr/nm_provider.py
+index c75242a..d6168eb 100644
+--- a/module_utils/network_lsr/nm_provider.py
++++ b/module_utils/network_lsr/nm_provider.py
+@@ -1,6 +1,10 @@
+ # SPDX-License-Identifier: BSD-3-Clause
+ """ Support for NetworkManager aka the NM provider """
+ 
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
++
+ # pylint: disable=import-error, no-name-in-module
+ from ansible.module_utils.network_lsr.utils import Util  # noqa:E501
+ 
+diff --git a/module_utils/network_lsr/utils.py b/module_utils/network_lsr/utils.py
+index 73d9528..bc258fe 100644
+--- a/module_utils/network_lsr/utils.py
++++ b/module_utils/network_lsr/utils.py
+@@ -2,18 +2,23 @@
+ # SPDX-License-Identifier: BSD-3-Clause
+ # vim: fileencoding=utf8
+ 
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
++
+ import socket
+ import sys
+ import uuid
+ 
+ # pylint: disable=import-error, no-name-in-module
+-from ansible.module_utils.network_lsr import MyError  # noqa:E501
++from ansible.module_utils.network_lsr.myerror import MyError  # noqa:E501
+ 
+ 
+ class Util:
+ 
+     PY3 = sys.version_info[0] == 3
+ 
++    # pylint: disable=undefined-variable
+     STRING_TYPE = str if PY3 else basestring  # noqa:F821
+ 
+     @staticmethod
+@@ -241,7 +246,8 @@ class Util:
+                     n = int(c, 16) * 16
+                     i = 1
+                 else:
+-                    assert i == 1
++                    if not i == 1:
++                        raise AssertionError("i != 1 - value is {0}".format(i))
+                     n = n + int(c, 16)
+                     i = 2
+                     b.append(n)
+diff --git a/tests/ensure_provider_tests.py b/tests/ensure_provider_tests.py
+index 3620729..4e45e6a 100755
+--- a/tests/ensure_provider_tests.py
++++ b/tests/ensure_provider_tests.py
+@@ -73,8 +73,6 @@ NM_ONLY_TESTS = {
+         MINIMUM_VERSION: "'1.25.1'",
+         "comment": "# NetworkManager 1.25.1 introduced ethtool coalesce support",
+     },
+-    "playbooks/tests_802_1x_updated.yml": {},
+-    "playbooks/tests_802_1x.yml": {},
+     "playbooks/tests_reapply.yml": {},
+     # team interface is not supported on Fedora
+     "playbooks/tests_team.yml": {
+@@ -117,9 +115,7 @@ def create_nm_playbook(test_playbook):
+         EXTRA_RUN_CONDITION, ""
+     )
+     if extra_run_condition:
+-        extra_run_condition = "{}{}\n".format(
+-            EXTRA_RUN_CONDITION_PREFIX, extra_run_condition
+-        )
++        extra_run_condition = f"{EXTRA_RUN_CONDITION_PREFIX}{extra_run_condition}\n"
+ 
+     nm_version_check = ""
+     if minimum_nm_version:
+@@ -212,7 +208,7 @@ def main():
+ 
+     if missing:
+         print("ERROR: No NM or initscripts tests found for:\n" + ", \n".join(missing))
+-        print("Try to generate them with '{} generate'".format(sys.argv[0]))
++        print(f"Try to generate them with '{sys.argv[0]} generate'")
+         returncode = 1
+ 
+     return returncode
+diff --git a/tests/get_coverage.sh b/tests/get_coverage.sh
+index 858a8cf..4524fab 100755
+--- a/tests/get_coverage.sh
++++ b/tests/get_coverage.sh
+@@ -19,7 +19,6 @@ shift
+ playbook="${1}"
+ 
+ coverage_data="remote-coveragedata-${host}-${playbook%.yml}"
+-coverage="/root/.local/bin/coverage"
+ 
+ echo "Getting coverage for ${playbook} on ${host}" >&2
+ 
+@@ -32,10 +31,15 @@ call_ansible() {
+ }
+ 
+ remote_coverage_dir="$(mktemp -d /tmp/remote_coverage-XXXXXX)"
++# we want to expand ${remote_coverage_dir} here, so tell SC to be quiet
++# https://github.com/koalaman/shellcheck/wiki/SC2064
++# shellcheck disable=SC2064
+ trap "rm -rf '${remote_coverage_dir}'" EXIT
+ ansible-playbook -i "${host}", get_coverage.yml -e "test_playbook=${playbook} destdir=${remote_coverage_dir}"
+ 
+ #COVERAGE_FILE=remote-coverage coverage combine remote-coverage/tests_*/*/root/.coverage
++# https://github.com/koalaman/shellcheck/wiki/SC2046
++# shellcheck disable=SC2046
+ ./merge_coverage.sh coverage "${coverage_data}"-tmp $(find "${remote_coverage_dir}" -type f | tr , _)
+ 
+ cat > tmp_merge_coveragerc <<EOF
+diff --git a/tests/get_total_coverage.sh b/tests/get_total_coverage.sh
+index ca61746..6413b18 100755
+--- a/tests/get_total_coverage.sh
++++ b/tests/get_total_coverage.sh
+@@ -12,7 +12,7 @@ then
+     exit 1
+ fi
+ 
+-rm -f remote-coveragedata* "${coveragedata}"
++rm -f remote-coveragedata* "${coverage_data}"
+ 
+ 
+ # collect pytest coverage
+diff --git a/tests/integration/test_ethernet.py b/tests/integration/test_ethernet.py
+index d104d23..4fc7417 100644
+--- a/tests/integration/test_ethernet.py
++++ b/tests/integration/test_ethernet.py
+@@ -25,10 +25,10 @@ with mock.patch.dict(
+ class PytestRunEnvironment(nc.RunEnvironment):
+     def log(self, connections, idx, severity, msg, **kwargs):
+         if severity == nc.LogLevel.ERROR:
+-            logging.error("Error: {}".format(connections[idx]))
++            logging.error("Error: %s", connections[idx])
+             raise RuntimeError(msg)
+         else:
+-            logging.debug("Log: {}".format(connections[idx]))
++            logging.debug("Log: %s", connections[idx])
+ 
+     def run_command(self, argv, encoding=None):
+         command = subprocess.Popen(
+diff --git a/tests/merge_coverage.sh b/tests/merge_coverage.sh
+index a33e94d..61fcd00 100755
+--- a/tests/merge_coverage.sh
++++ b/tests/merge_coverage.sh
+@@ -23,6 +23,9 @@ export COVERAGE_FILE="${1}"
+ shift
+ 
+ tempdir="$(mktemp -d /tmp/coverage_merge-XXXXXX)"
++# we want to expand ${tempdir} here, so tell SC to be quiet
++# https://github.com/koalaman/shellcheck/wiki/SC2064
++# shellcheck disable=SC2064
+ trap "rm -rf '${tempdir}'" EXIT
+ 
+ cp --backup=numbered -- "${@}" "${tempdir}"
+diff --git a/tests/setup_module_utils.sh b/tests/setup_module_utils.sh
+deleted file mode 100755
+index 18d6a00..0000000
+--- a/tests/setup_module_utils.sh
++++ /dev/null
+@@ -1,41 +0,0 @@
+-#!/bin/bash
+-# SPDX-License-Identifier: MIT
+-
+-set -euo pipefail
+-
+-if [ -n "${DEBUG:-}" ] ; then
+-    set -x
+-fi
+-
+-if [ ! -d "${1:-}" ] ; then
+-    echo Either ansible is not installed, or there is no ansible/module_utils
+-    echo in $1 - Skipping
+-    exit 0
+-fi
+-
+-if [ ! -d "${2:-}" ] ; then
+-    echo Role has no module_utils - Skipping
+-    exit 0
+-fi
+-
+-# we need absolute path for $2
+-absmoddir=$( readlink -f "$2" )
+-
+-# clean up old links to module_utils
+-for item in "$1"/* ; do
+-    if lnitem=$( readlink "$item" ) && test -n "$lnitem" ; then
+-        case "$lnitem" in
+-            *"${2}"*) rm -f "$item" ;;
+-        esac
+-    fi
+-done
+-
+-# add new links to module_utils
+-for item in "$absmoddir"/* ; do
+-    case "$item" in
+-        *__pycache__) continue;;
+-        *.pyc) continue;;
+-    esac
+-    bnitem=$( basename "$item" )
+-    ln -s "$item" "$1/$bnitem"
+-done
+diff --git a/tox.ini b/tox.ini
+index 6ff26e7..59c58a2 100644
+--- a/tox.ini
++++ b/tox.ini
+@@ -17,6 +17,3 @@ setenv =
+     RUN_PYTEST_EXTRA_ARGS = -v
+     RUN_FLAKE8_EXTRA_ARGS = --exclude tests/ensure_provider_tests.py,scripts/print_all_options.py,tests/network/ensure_provider_tests.py,.svn,CVS,.bzr,.hg,.git,__pycache__,.tox,.eggs,*.egg
+     LSR_PUBLISH_COVERAGE = normal
+-
+-[testenv:shellcheck]
+-commands = bash -c 'echo shellcheck is currently not enabled - please fix this'
+-- 
+2.30.2
+
diff --git a/SOURCES/selinux-ansible-test-issues.diff b/SOURCES/selinux-ansible-test-issues.diff
new file mode 100644
index 0000000..ef16241
--- /dev/null
+++ b/SOURCES/selinux-ansible-test-issues.diff
@@ -0,0 +1,164 @@
+From 9cbbc3f63052bef0b6a697e066e092a5f9722ce8 Mon Sep 17 00:00:00 2001
+From: Noriko Hosoi <nhosoi@redhat.com>
+Date: Mon, 22 Feb 2021 17:11:05 -0800
+Subject: [PATCH] Patch23: selinux-ansible-test-issues.diff
+
+---
+ .sanity-ansible-ignore-2.10.txt |  2 ++
+ .sanity-ansible-ignore-2.9.txt  |  2 ++
+ library/selogin.py              | 26 ++++++++++-----------
+ tests/setup_module_utils.sh     | 41 ---------------------------------
+ 4 files changed, 16 insertions(+), 55 deletions(-)
+ create mode 100644 .sanity-ansible-ignore-2.10.txt
+ create mode 100644 .sanity-ansible-ignore-2.9.txt
+ delete mode 100755 tests/setup_module_utils.sh
+
+diff --git a/.sanity-ansible-ignore-2.10.txt b/.sanity-ansible-ignore-2.10.txt
+new file mode 100644
+index 0000000..5f8ce1e
+--- /dev/null
++++ b/.sanity-ansible-ignore-2.10.txt
+@@ -0,0 +1,2 @@
++plugins/modules/selogin.py no-get-exception
++plugins/modules/selogin.py validate-modules!skip
+diff --git a/.sanity-ansible-ignore-2.9.txt b/.sanity-ansible-ignore-2.9.txt
+new file mode 100644
+index 0000000..5f8ce1e
+--- /dev/null
++++ b/.sanity-ansible-ignore-2.9.txt
+@@ -0,0 +1,2 @@
++plugins/modules/selogin.py no-get-exception
++plugins/modules/selogin.py validate-modules!skip
+diff --git a/library/selogin.py b/library/selogin.py
+index b785c27..6e3fd32 100644
+--- a/library/selogin.py
++++ b/library/selogin.py
+@@ -15,6 +15,9 @@
+ #
+ # You should have received a copy of the GNU General Public License
+ # along with this program.  If not, see <http://www.gnu.org/licenses/>.
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
+ 
+ ANSIBLE_METADATA = {
+     "status": ["preview"],
+@@ -22,13 +25,14 @@ ANSIBLE_METADATA = {
+     "version": "1.0",
+ }
+ 
+-DOCUMENTATION = """
++DOCUMENTATION = r"""
+ ---
+ module: selogin
+ short_description: Manages linux user to SELinux user mapping
+ description:
+-     - Manages linux user to SELinux user mapping
+-version_added: "1.0"
++    - "WARNING: Do not use this module directly! It is only for role internal use."
++    - Manages linux user to SELinux user mapping
++version_added: '1.0'
+ options:
+   login:
+     description:
+@@ -41,8 +45,7 @@ options:
+     required: true
+     default: null
+   serange:
+-    description:
+-      - >-
++    description: >
+       MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login
+       mapping - defaults to the SELinux user record range.
+     required: false
+@@ -62,8 +65,9 @@ notes:
+    - The changes are persistent across reboots
+    - Not tested on any debian based system
+ requirements: [ 'libselinux-python', 'policycoreutils-python' ]
+-author: Dan Keder
+-author: Petr Lautrbach
++author:
++    - Dan Keder (@dkeder)
++    - Petr Lautrbach (@bachradsusi)
+ """
+ 
+ EXAMPLES = """
+@@ -82,7 +86,7 @@ EXAMPLES = """
+ 
+ # Assign all users in the engineering group to the staff_u user
+ - selogin:
+-    login: %engineering
++    login: "%engineering"
+     seuser: staff_u
+     state: present
+ """
+@@ -198,9 +202,6 @@ def semanage_login_add(module, login, seuser, do_reload, serange="s0", sestore="
+     except KeyError:
+         e = get_exception()
+         module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
+-    except OSError:
+-        e = get_exception()
+-        module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
+     except RuntimeError:
+         e = get_exception()
+         module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
+@@ -248,9 +249,6 @@ def semanage_login_del(module, login, seuser, do_reload, sestore=""):
+     except KeyError:
+         e = get_exception()
+         module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
+-    except OSError:
+-        e = get_exception()
+-        module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
+     except RuntimeError:
+         e = get_exception()
+         module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
+diff --git a/tests/setup_module_utils.sh b/tests/setup_module_utils.sh
+deleted file mode 100755
+index 94d102d..0000000
+--- a/tests/setup_module_utils.sh
++++ /dev/null
+@@ -1,41 +0,0 @@
+-#!/bin/bash
+-# SPDX-License-Identifier: MIT
+-
+-set -euo pipefail
+-
+-if [ -n "${DEBUG:-}" ] ; then
+-    set -x
+-fi
+-
+-if [ ! -d "${1:-}" ] ; then
+-    echo Either ansible is not installed, or there is no ansible/module_utils
+-    echo in "$1" - Skipping
+-    exit 0
+-fi
+-
+-if [ ! -d "${2:-}" ] ; then
+-    echo Role has no module_utils - Skipping
+-    exit 0
+-fi
+-
+-# we need absolute path for $2
+-absmoddir=$( readlink -f "$2" )
+-
+-# clean up old links to module_utils
+-for item in "$1"/* ; do
+-    if lnitem=$( readlink "$item" ) && test -n "$lnitem" ; then
+-        case "$lnitem" in
+-            *"${2}"*) rm -f "$item" ;;
+-        esac
+-    fi
+-done
+-
+-# add new links to module_utils
+-for item in "$absmoddir"/* ; do
+-    case "$item" in
+-        *__pycache__) continue;;
+-        *.pyc) continue;;
+-    esac
+-    bnitem=$( basename "$item" )
+-    ln -s "$item" "$1/$bnitem"
+-done
+-- 
+2.26.2
+
diff --git a/SOURCES/storage-ansible-test.diff b/SOURCES/storage-ansible-test.diff
new file mode 100644
index 0000000..3cb42d8
--- /dev/null
+++ b/SOURCES/storage-ansible-test.diff
@@ -0,0 +1,3663 @@
+From 1d7f9d53c5be6588a7a6c34e4c623b2a8f6fff19 Mon Sep 17 00:00:00 2001
+From: Rich Megginson <rmeggins@redhat.com>
+Date: Wed, 3 Mar 2021 07:55:20 -0700
+Subject: [PATCH] resolve ansible-test issues
+
+This fixes many formatting issues as well to make black, flake8,
+pylint, yamllint, and ansible-lint happier.
+
+(cherry picked from commit bb2a1af5f63d00c3ff178f3b44696189d9adf542)
+---
+ .github/workflows/tox.yml                     |   4 +-
+ .sanity-ansible-ignore-2.9.txt                |  13 +
+ library/blivet.py                             | 968 +++++++++++-------
+ library/blockdev_info.py                      |  45 +-
+ library/bsize.py                              |  56 +-
+ library/find_unused_disk.py                   | 101 +-
+ library/lvm_gensym.py                         | 119 ++-
+ library/resolve_blockdev.py                   |  71 +-
+ module_utils/storage_lsr/size.py              |  86 +-
+ tests/setup_module_utils.sh                   |  41 -
+ tests/test-verify-volume-device.yml           |   4 +-
+ tests/test-verify-volume-md.yml               |   2 +-
+ tests/test.yml                                |   2 +-
+ tests/tests_create_lv_size_equal_to_vg.yml    |  28 +-
+ ...ts_create_partition_volume_then_remove.yml |   4 +-
+ tests/tests_existing_lvm_pool.yml             |  12 +-
+ tests/tests_lvm_auto_size_cap.yml             |  42 +-
+ tests/tests_lvm_one_disk_one_volume.yml       |  46 +-
+ tests/tests_misc.yml                          |   2 +-
+ tests/tests_null_raid_pool.yml                |  14 +-
+ tests/tests_resize.yml                        |  86 +-
+ tests/unit/bsize_test.py                      |   5 +
+ tests/unit/gensym_test.py                     | 103 +-
+ tests/unit/resolve_blockdev_test.py           |  74 +-
+ tests/unit/test_unused_disk.py                |  73 +-
+ tox.ini                                       |   6 -
+ 26 files changed, 1177 insertions(+), 830 deletions(-)
+ create mode 100644 .sanity-ansible-ignore-2.9.txt
+ delete mode 100755 tests/setup_module_utils.sh
+
+diff --git a/.github/workflows/tox.yml b/.github/workflows/tox.yml
+index eceb71f..ec3ec9f 100644
+--- a/.github/workflows/tox.yml
++++ b/.github/workflows/tox.yml
+@@ -3,7 +3,7 @@ name: tox
+ on:  # yamllint disable-line rule:truthy
+   - pull_request
+ env:
+-  TOX_LSR: "git+https://github.com/linux-system-roles/tox-lsr@2.2.0"
++  TOX_LSR: "git+https://github.com/linux-system-roles/tox-lsr@2.3.0"
+   LSR_ANSIBLES: 'ansible==2.8.* ansible==2.9.*'
+   LSR_MSCENARIOS: default
+   # LSR_EXTRA_PACKAGES: libdbus-1-dev
+@@ -36,7 +36,7 @@ jobs:
+           toxenvs="py${toxpyver}"
+           case "$toxpyver" in
+           27) toxenvs="${toxenvs},coveralls,flake8,pylint,custom" ;;
+-          36) toxenvs="${toxenvs},coveralls,black,yamllint,ansible-lint,shellcheck,custom,collection" ;;
++          36) toxenvs="${toxenvs},coveralls,black,yamllint,ansible-lint,shellcheck,custom,collection,ansible-test" ;;
+           37) toxenvs="${toxenvs},coveralls,custom" ;;
+           38) toxenvs="${toxenvs},coveralls,custom" ;;
+           esac
+diff --git a/.sanity-ansible-ignore-2.9.txt b/.sanity-ansible-ignore-2.9.txt
+new file mode 100644
+index 0000000..bf700c6
+--- /dev/null
++++ b/.sanity-ansible-ignore-2.9.txt
+@@ -0,0 +1,13 @@
++plugins/modules/blivet.py import-2.7!skip
++plugins/modules/blivet.py import-3.5!skip
++plugins/modules/blivet.py import-3.6!skip
++plugins/modules/blivet.py import-3.7!skip
++plugins/modules/blivet.py import-3.8!skip
++tests/storage/unit/gensym_test.py shebang!skip
++plugins/modules/blivet.py validate-modules:import-error
++plugins/modules/blivet.py validate-modules:missing-gplv3-license
++plugins/modules/blockdev_info.py validate-modules:missing-gplv3-license
++plugins/modules/bsize.py validate-modules:missing-gplv3-license
++plugins/modules/find_unused_disk.py validate-modules:missing-gplv3-license
++plugins/modules/lvm_gensym.py validate-modules:missing-gplv3-license
++plugins/modules/resolve_blockdev.py validate-modules:missing-gplv3-license
+diff --git a/library/blivet.py b/library/blivet.py
+index 946b640..0e0b30c 100644
+--- a/library/blivet.py
++++ b/library/blivet.py
+@@ -1,12 +1,16 @@
+ #!/usr/bin/python
+ 
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
++
+ ANSIBLE_METADATA = {
+-    'metadata_version': '1.1',
+-    'status': ['preview'],
+-    'supported_by': 'community'
++    "metadata_version": "1.1",
++    "status": ["preview"],
++    "supported_by": "community",
+ }
+ 
+-DOCUMENTATION = '''
++DOCUMENTATION = """
+ ---
+ module: blivet
+ 
+@@ -15,6 +19,7 @@ short_description: Module for management of linux block device stacks
+ version_added: "2.5"
+ 
+ description:
++    - "WARNING: Do not use this module directly! It is only for role internal use."
+     - "Module configures storage pools and volumes to match the state specified
+        in input parameters. It does not do any management of /etc/fstab entries."
+ 
+@@ -30,7 +35,8 @@ options:
+             - boolean indicating whether to create partitions on disks for pool backing devices
+     disklabel_type:
+         description:
+-            - disklabel type string (eg: 'gpt') to use, overriding the built-in logic in blivet
++            - |
++              disklabel type string (eg: 'gpt') to use, overriding the built-in logic in blivet
+     safe_mode:
+         description:
+             - boolean indicating that we should fail rather than implicitly/automatically
+@@ -41,10 +47,10 @@ options:
+               when creating a disk volume (that is, a whole disk filesystem)
+ 
+ author:
+-    - David Lehman (dlehman@redhat.com)
+-'''
++    - David Lehman (@dwlehman)
++"""
+ 
+-EXAMPLES = '''
++EXAMPLES = """
+ 
+ - name: Manage devices
+   blivet:
+@@ -64,28 +70,40 @@ EXAMPLES = '''
+           mount_point: /whole_disk1
+           fs_type: ext4
+           mount_options: journal_checksum,async,noexec
+-'''
++"""
+ 
+-RETURN = '''
++RETURN = """
+ actions:
+     description: list of dicts describing actions taken
+-    type: list of dict
++    returned: success
++    type: list
++    elements: dict
+ leaves:
+     description: list of paths to leaf devices
+-    type: list of str
++    returned: success
++    type: list
++    elements: dict
+ mounts:
+     description: list of dicts describing mounts to set up
+-    type: list of dict
++    returned: success
++    type: list
++    elements: dict
+ crypts:
+     description: list of dicts describing crypttab entries to set up
+-    type: list of dict
++    returned: success
++    type: list
++    elements: dict
+ pools:
+     description: list of dicts describing the pools w/ device path for each volume
+-    type: list of dict
++    returned: success
++    type: list
++    elements: dict
+ volumes:
+     description: list of dicts describing the volumes w/ device path for each
+-    type: list of dict
+-'''
++    returned: success
++    type: list
++    elements: dict
++"""
+ 
+ import logging
+ import os
+@@ -106,7 +124,8 @@ try:
+     from blivet3.size import Size
+     from blivet3.udev import trigger
+     from blivet3.util import set_up_logging
+-    BLIVET_PACKAGE = 'blivet3'
++
++    BLIVET_PACKAGE = "blivet3"
+ except ImportError:
+     LIB_IMP_ERR3 = traceback.format_exc()
+     try:
+@@ -119,7 +138,8 @@ except ImportError:
+         from blivet.size import Size
+         from blivet.udev import trigger
+         from blivet.util import set_up_logging
+-        BLIVET_PACKAGE = 'blivet'
++
++        BLIVET_PACKAGE = "blivet"
+     except ImportError:
+         LIB_IMP_ERR = traceback.format_exc()
+ 
+@@ -135,23 +155,23 @@ MAX_TRIM_PERCENT = 2
+ 
+ use_partitions = None  # create partitions on pool backing device disks?
+ disklabel_type = None  # user-specified disklabel type
+-safe_mode = None       # do not remove any existing devices or formatting
++safe_mode = None  # do not remove any existing devices or formatting
+ pool_defaults = dict()
+ volume_defaults = dict()
+ 
+ 
+ def find_duplicate_names(dicts):
+-    """ Return a list of names that appear more than once in a list of dicts.
++    """Return a list of names that appear more than once in a list of dicts.
+ 
+-        Items can be a list of any dicts with a 'name' key; that's all we're
+-        looking at. """
++    Items can be a list of any dicts with a 'name' key; that's all we're
++    looking at."""
+     names = list()
+     duplicates = list()
+     for item in dicts:
+-        if item['name'] in names and item['name'] not in duplicates:
+-            duplicates.append(item['name'])
++        if item["name"] in names and item["name"] not in duplicates:
++            duplicates.append(item["name"])
+         else:
+-            names.append(item['name'])
++            names.append(item["name"])
+ 
+     return duplicates
+ 
+@@ -177,41 +197,54 @@ class BlivetBase(object):
+         global safe_mode
+         ret = device
+         # Make sure to handle adjusting both existing stacks and future stacks.
+-        if device == device.raw_device and self._spec_dict['encryption']:
++        if device == device.raw_device and self._spec_dict["encryption"]:
+             # add luks
+             luks_name = "luks-%s" % device._name
+-            if safe_mode and (device.original_format.type is not None or
+-                              device.original_format.name != get_format(None).name):
+-                raise BlivetAnsibleError("cannot remove existing formatting on device '%s' in safe mode due to adding encryption" %
+-                                         device._name)
++            if safe_mode and (
++                device.original_format.type is not None
++                or device.original_format.name != get_format(None).name
++            ):
++                raise BlivetAnsibleError(
++                    "cannot remove existing formatting on device '%s' in safe mode due to adding encryption"
++                    % device._name
++                )
+             if not device.format.exists:
+                 fmt = device.format
+             else:
+                 fmt = get_format(None)
+ 
+-            self._blivet.format_device(device,
+-                                       get_format("luks",
+-                                                  name=luks_name,
+-                                                  cipher=self._spec_dict.get('encryption_cipher'),
+-                                                  key_size=self._spec_dict.get('encryption_key_size'),
+-                                                  luks_version=self._spec_dict.get('encryption_luks_version'),
+-                                                  passphrase=self._spec_dict.get('encryption_password') or None,
+-                                                  key_file=self._spec_dict.get('encryption_key') or None))
++            self._blivet.format_device(
++                device,
++                get_format(
++                    "luks",
++                    name=luks_name,
++                    cipher=self._spec_dict.get("encryption_cipher"),
++                    key_size=self._spec_dict.get("encryption_key_size"),
++                    luks_version=self._spec_dict.get("encryption_luks_version"),
++                    passphrase=self._spec_dict.get("encryption_password") or None,
++                    key_file=self._spec_dict.get("encryption_key") or None,
++                ),
++            )
+ 
+             if not device.format.has_key:
+-                raise BlivetAnsibleError("encrypted %s '%s' missing key/password" % (self._type, self._spec_dict['name']))
++                raise BlivetAnsibleError(
++                    "encrypted %s '%s' missing key/password"
++                    % (self._type, self._spec_dict["name"])
++                )
+ 
+-            luks_device = devices.LUKSDevice(luks_name,
+-                                             fmt=fmt,
+-                                             parents=[device])
++            luks_device = devices.LUKSDevice(luks_name, fmt=fmt, parents=[device])
+             self._blivet.create_device(luks_device)
+             ret = luks_device
+-        elif device != device.raw_device and not self._spec_dict['encryption']:
++        elif device != device.raw_device and not self._spec_dict["encryption"]:
+             # remove luks
+-            if safe_mode and (device.original_format.type is not None or
+-                              device.original_format.name != get_format(None).name):
+-                raise BlivetAnsibleError("cannot remove existing formatting on device '%s' in safe mode due to encryption removal" %
+-                                         device._name)
++            if safe_mode and (
++                device.original_format.type is not None
++                or device.original_format.name != get_format(None).name
++            ):
++                raise BlivetAnsibleError(
++                    "cannot remove existing formatting on device '%s' in safe mode due to encryption removal"
++                    % device._name
++                )
+             if not device.format.exists:
+                 fmt = device.format
+             else:
+@@ -240,12 +273,21 @@ class BlivetBase(object):
+         requested_spares = self._spec_dict.get("raid_spare_count")
+ 
+         if requested_actives is not None and requested_spares is not None:
+-            if (requested_actives + requested_spares != len(members) or
+-                    requested_actives < 0 or requested_spares < 0):
+-                raise BlivetAnsibleError("failed to set up '%s': cannot create RAID "
+-                                        "with %s members (%s active and %s spare)"
+-                                        % (self._spec_dict["name"], len(members),
+-                                            requested_actives, requested_spares))
++            if (
++                requested_actives + requested_spares != len(members)
++                or requested_actives < 0
++                or requested_spares < 0
++            ):
++                raise BlivetAnsibleError(
++                    "failed to set up '%s': cannot create RAID "
++                    "with %s members (%s active and %s spare)"
++                    % (
++                        self._spec_dict["name"],
++                        len(members),
++                        requested_actives,
++                        requested_spares,
++                    )
++                )
+ 
+         if requested_actives is not None:
+             active_count = requested_actives
+@@ -264,16 +306,20 @@ class BlivetBase(object):
+             raise BlivetAnsibleError("chunk size must be multiple of 4 KiB")
+ 
+         try:
+-            raid_array = self._blivet.new_mdarray(name=raid_name,
+-                                                  level=self._spec_dict["raid_level"],
+-                                                  member_devices=active_count,
+-                                                  total_devices=len(members),
+-                                                  parents=members,
+-                                                  chunk_size=chunk_size,
+-                                                  metadata_version=self._spec_dict.get("raid_metadata_version"),
+-                                                  fmt=self._get_format())
++            raid_array = self._blivet.new_mdarray(
++                name=raid_name,
++                level=self._spec_dict["raid_level"],
++                member_devices=active_count,
++                total_devices=len(members),
++                parents=members,
++                chunk_size=chunk_size,
++                metadata_version=self._spec_dict.get("raid_metadata_version"),
++                fmt=self._get_format(),
++            )
+         except ValueError as e:
+-            raise BlivetAnsibleError("cannot create RAID '%s': %s" % (raid_name, str(e)))
++            raise BlivetAnsibleError(
++                "cannot create RAID '%s': %s" % (raid_name, str(e))
++            )
+ 
+         return raid_array
+ 
+@@ -298,17 +344,18 @@ class BlivetVolume(BlivetBase):
+         if self.__class__.blivet_device_class is not None:
+             packages.extend(self.__class__.blivet_device_class._packages)
+ 
+-        fmt = get_format(self._volume.get('fs_type'))
++        fmt = get_format(self._volume.get("fs_type"))
+         packages.extend(fmt.packages)
+-        if self._volume.get('encryption'):
+-            packages.extend(get_format('luks').packages)
++        if self._volume.get("encryption"):
++            packages.extend(get_format("luks").packages)
+         return packages
+ 
+     @property
+     def ultimately_present(self):
+         """ Should this volume be present when we are finished? """
+-        return (self._volume.get('state', 'present') == 'present' and
+-                (self._blivet_pool is None or self._blivet_pool.ultimately_present))
++        return self._volume.get("state", "present") == "present" and (
++            self._blivet_pool is None or self._blivet_pool.ultimately_present
++        )
+ 
+     def _type_check(self):  # pylint: disable=no-self-use
+         """ Is self._device of the correct type? """
+@@ -316,7 +363,7 @@ class BlivetVolume(BlivetBase):
+ 
+     def _get_device_id(self):
+         """ Return an identifier by which to try looking the volume up. """
+-        return self._volume['name']
++        return self._volume["name"]
+ 
+     def _look_up_device(self):
+         """ Try to look up this volume in blivet's device tree. """
+@@ -331,14 +378,14 @@ class BlivetVolume(BlivetBase):
+         if device is None:
+             return
+ 
+-        if device.format.type == 'luks':
++        if device.format.type == "luks":
+             # XXX If we have no key we will always re-encrypt.
+-            device.format._key_file = self._volume.get('encryption_key')
+-            device.format.passphrase = self._volume.get('encryption_password')
++            device.format._key_file = self._volume.get("encryption_key")
++            device.format.passphrase = self._volume.get("encryption_password")
+ 
+             # set up the original format as well since it'll get used for processing
+-            device.original_format._key_file = self._volume.get('encryption_key')
+-            device.original_format.passphrase = self._volume.get('encryption_password')
++            device.original_format._key_file = self._volume.get("encryption_key")
++            device.original_format.passphrase = self._volume.get("encryption_password")
+             if device.isleaf:
+                 self._blivet.populate()
+ 
+@@ -361,26 +408,31 @@ class BlivetVolume(BlivetBase):
+         elif encrypted:
+             luks_fmt = self._device.format
+ 
+-        if param_name == 'size':
+-            self._volume['size'] = int(self._device.size.convert_to())
+-        elif param_name == 'fs_type' and (self._device.format.type or self._device.format.name != get_format(None).name):
+-            self._volume['fs_type'] = self._device.format.type
+-        elif param_name == 'fs_label':
+-            self._volume['fs_label'] = getattr(self._device.format, 'label', "") or ""
+-        elif param_name == 'mount_point':
+-            self._volume['mount_point'] = getattr(self._device.format, 'mountpoint', None)
+-        elif param_name == 'disks':
+-            self._volume['disks'] = [d.name for d in self._device.disks]
+-        elif param_name == 'encryption':
+-            self._volume['encryption'] = encrypted
+-        elif param_name == 'encryption_key_size' and encrypted:
+-            self._volume['encryption_key_size'] = luks_fmt.key_size
+-        elif param_name == 'encryption_key_file' and encrypted:
+-            self._volume['encryption_key_file'] = luks_fmt.key_file
+-        elif param_name == 'encryption_cipher' and encrypted:
+-            self._volume['encryption_cipher'] = luks_fmt.cipher
+-        elif param_name == 'encryption_luks_version' and encrypted:
+-            self._volume['encryption_luks_version'] = luks_fmt.luks_version
++        if param_name == "size":
++            self._volume["size"] = int(self._device.size.convert_to())
++        elif param_name == "fs_type" and (
++            self._device.format.type
++            or self._device.format.name != get_format(None).name
++        ):
++            self._volume["fs_type"] = self._device.format.type
++        elif param_name == "fs_label":
++            self._volume["fs_label"] = getattr(self._device.format, "label", "") or ""
++        elif param_name == "mount_point":
++            self._volume["mount_point"] = getattr(
++                self._device.format, "mountpoint", None
++            )
++        elif param_name == "disks":
++            self._volume["disks"] = [d.name for d in self._device.disks]
++        elif param_name == "encryption":
++            self._volume["encryption"] = encrypted
++        elif param_name == "encryption_key_size" and encrypted:
++            self._volume["encryption_key_size"] = luks_fmt.key_size
++        elif param_name == "encryption_key_file" and encrypted:
++            self._volume["encryption_key_file"] = luks_fmt.key_file
++        elif param_name == "encryption_cipher" and encrypted:
++            self._volume["encryption_cipher"] = luks_fmt.cipher
++        elif param_name == "encryption_luks_version" and encrypted:
++            self._volume["encryption_luks_version"] = luks_fmt.luks_version
+         else:
+             return False
+ 
+@@ -392,7 +444,7 @@ class BlivetVolume(BlivetBase):
+             if name in self._volume:
+                 continue
+ 
+-            default = None if default in ('none', 'None', 'null') else default
++            default = None if default in ("none", "None", "null") else default
+ 
+             if self._device:
+                 # Apply values from the device if it already exists.
+@@ -403,12 +455,17 @@ class BlivetVolume(BlivetBase):
+ 
+     def _get_format(self):
+         """ Return a blivet.formats.DeviceFormat instance for this volume. """
+-        fmt = get_format(self._volume['fs_type'],
+-                         mountpoint=self._volume.get('mount_point'),
+-                         label=self._volume['fs_label'],
+-                         create_options=self._volume['fs_create_options'])
++        fmt = get_format(
++            self._volume["fs_type"],
++            mountpoint=self._volume.get("mount_point"),
++            label=self._volume["fs_label"],
++            create_options=self._volume["fs_create_options"],
++        )
+         if not fmt.supported or not fmt.formattable:
+-            raise BlivetAnsibleError("required tools for file system '%s' are missing" % self._volume['fs_type'])
++            raise BlivetAnsibleError(
++                "required tools for file system '%s' are missing"
++                % self._volume["fs_type"]
++            )
+ 
+         return fmt
+ 
+@@ -422,9 +479,9 @@ class BlivetVolume(BlivetBase):
+             return
+ 
+         # save device identifiers for use by the role
+-        self._volume['_device'] = self._device.path
+-        self._volume['_raw_device'] = self._device.raw_device.path
+-        self._volume['_mount_id'] = self._device.fstab_spec
++        self._volume["_device"] = self._device.path
++        self._volume["_raw_device"] = self._device.raw_device.path
++        self._volume["_mount_id"] = self._device.fstab_spec
+ 
+         # schedule removal of this device and any descendant devices
+         self._blivet.devicetree.recursive_remove(self._device.raw_device)
+@@ -435,9 +492,12 @@ class BlivetVolume(BlivetBase):
+     def _resize(self):
+         """ Schedule actions as needed to ensure the device has the desired size. """
+         try:
+-            size = Size(self._volume['size'])
++            size = Size(self._volume["size"])
+         except Exception:
+-            raise BlivetAnsibleError("invalid size specification for volume '%s': '%s'" % (self._volume['name'], self._volume['size']))
++            raise BlivetAnsibleError(
++                "invalid size specification for volume '%s': '%s'"
++                % (self._volume["name"], self._volume["size"])
++            )
+ 
+         if size and self._device.size != size:
+             try:
+@@ -448,28 +508,44 @@ class BlivetVolume(BlivetBase):
+             if not self._device.resizable:
+                 return
+ 
+-            trim_percent = (1.0 - float(self._device.max_size / size))*100
+-            log.debug("resize: size=%s->%s ; trim=%s", self._device.size, size, trim_percent)
++            trim_percent = (1.0 - float(self._device.max_size / size)) * 100
++            log.debug(
++                "resize: size=%s->%s ; trim=%s", self._device.size, size, trim_percent
++            )
+             if size > self._device.max_size and trim_percent <= MAX_TRIM_PERCENT:
+-                log.info("adjusting %s resize target from %s to %s to fit in free space",
+-                         self._volume['name'],
+-                         size,
+-                         self._device.max_size)
++                log.info(
++                    "adjusting %s resize target from %s to %s to fit in free space",
++                    self._volume["name"],
++                    size,
++                    self._device.max_size,
++                )
+                 size = self._device.max_size
+                 if size == self._device.size:
+                     return
+ 
+             if not self._device.min_size <= size <= self._device.max_size:
+-                raise BlivetAnsibleError("volume '%s' cannot be resized to '%s'" % (self._volume['name'], size))
++                raise BlivetAnsibleError(
++                    "volume '%s' cannot be resized to '%s'"
++                    % (self._volume["name"], size)
++                )
+ 
+             try:
+                 self._blivet.resize_device(self._device, size)
+             except ValueError as e:
+-                raise BlivetAnsibleError("volume '%s' cannot be resized from %s to %s: %s" % (self._device.name,
+-                                                                                              self._device.size,
+-                                                                                              size, str(e)))
+-        elif size and self._device.exists and self._device.size != size and not self._device.resizable:
+-            raise BlivetAnsibleError("volume '%s' cannot be resized from %s to %s" % (self._device.name, self._device.size, size))
++                raise BlivetAnsibleError(
++                    "volume '%s' cannot be resized from %s to %s: %s"
++                    % (self._device.name, self._device.size, size, str(e))
++                )
++        elif (
++            size
++            and self._device.exists
++            and self._device.size != size
++            and not self._device.resizable
++        ):
++            raise BlivetAnsibleError(
++                "volume '%s' cannot be resized from %s to %s"
++                % (self._device.name, self._device.size, size)
++            )
+ 
+     def _reformat(self):
+         """ Schedule actions as needed to ensure the volume is formatted as specified. """
+@@ -477,10 +553,18 @@ class BlivetVolume(BlivetBase):
+         if self._device.format.type == fmt.type:
+             return
+ 
+-        if safe_mode and (self._device.format.type is not None or self._device.format.name != get_format(None).name):
+-            raise BlivetAnsibleError("cannot remove existing formatting on volume '%s' in safe mode" % self._volume['name'])
+-
+-        if self._device.format.status and (self._device.format.mountable or self._device.format.type == "swap"):
++        if safe_mode and (
++            self._device.format.type is not None
++            or self._device.format.name != get_format(None).name
++        ):
++            raise BlivetAnsibleError(
++                "cannot remove existing formatting on volume '%s' in safe mode"
++                % self._volume["name"]
++            )
++
++        if self._device.format.status and (
++            self._device.format.mountable or self._device.format.type == "swap"
++        ):
+             self._device.format.teardown()
+         if not self._device.isleaf:
+             self._blivet.devicetree.recursive_remove(self._device, remove_device=False)
+@@ -503,7 +587,9 @@ class BlivetVolume(BlivetBase):
+ 
+         # at this point we should have a blivet.devices.StorageDevice instance
+         if self._device is None:
+-            raise BlivetAnsibleError("failed to look up or create device '%s'" % self._volume['name'])
++            raise BlivetAnsibleError(
++                "failed to look up or create device '%s'" % self._volume["name"]
++            )
+ 
+         self._manage_encryption()
+ 
+@@ -511,24 +597,31 @@ class BlivetVolume(BlivetBase):
+         if self._device.raw_device.exists:
+             self._reformat()
+ 
+-        if self.ultimately_present and self._volume['mount_point'] and not self._device.format.mountable:
+-            raise BlivetAnsibleError("volume '%s' has a mount point but no mountable file system" % self._volume['name'])
++        if (
++            self.ultimately_present
++            and self._volume["mount_point"]
++            and not self._device.format.mountable
++        ):
++            raise BlivetAnsibleError(
++                "volume '%s' has a mount point but no mountable file system"
++                % self._volume["name"]
++            )
+ 
+         # schedule resize if appropriate
+-        if self._device.raw_device.exists and self._volume['size']:
++        if self._device.raw_device.exists and self._volume["size"]:
+             self._resize()
+ 
+         # save device identifiers for use by the role
+-        self._volume['_device'] = self._device.path
+-        self._volume['_raw_device'] = self._device.raw_device.path
+-        self._volume['_mount_id'] = self._device.fstab_spec
++        self._volume["_device"] = self._device.path
++        self._volume["_raw_device"] = self._device.raw_device.path
++        self._volume["_mount_id"] = self._device.fstab_spec
+ 
+ 
+ class BlivetDiskVolume(BlivetVolume):
+     blivet_device_class = devices.DiskDevice
+ 
+     def _get_device_id(self):
+-        return self._volume['disks'][0]
++        return self._volume["disks"][0]
+ 
+     def _type_check(self):
+         return self._device.raw_device.is_disk
+@@ -536,7 +629,7 @@ class BlivetDiskVolume(BlivetVolume):
+     def _get_format(self):
+         fmt = super(BlivetDiskVolume, self)._get_format()
+         # pass -F to mke2fs on whole disks in RHEL7
+-        mkfs_options = diskvolume_mkfs_option_map.get(self._volume['fs_type'])
++        mkfs_options = diskvolume_mkfs_option_map.get(self._volume["fs_type"])
+         if mkfs_options:
+             if fmt.create_options:
+                 fmt.create_options += " "
+@@ -552,23 +645,31 @@ class BlivetDiskVolume(BlivetVolume):
+     def _look_up_device(self):
+         super(BlivetDiskVolume, self)._look_up_device()
+         if not self._get_device_id():
+-            raise BlivetAnsibleError("no disks specified for volume '%s'" % self._volume['name'])
+-        elif not isinstance(self._volume['disks'], list):
++            raise BlivetAnsibleError(
++                "no disks specified for volume '%s'" % self._volume["name"]
++            )
++        elif not isinstance(self._volume["disks"], list):
+             raise BlivetAnsibleError("volume disks must be specified as a list")
+ 
+         if self._device is None:
+-            raise BlivetAnsibleError("unable to resolve disk specified for volume '%s' (%s)" % (self._volume['name'], self._volume['disks']))
++            raise BlivetAnsibleError(
++                "unable to resolve disk specified for volume '%s' (%s)"
++                % (self._volume["name"], self._volume["disks"])
++            )
+ 
+ 
+ class BlivetPartitionVolume(BlivetVolume):
+     blivet_device_class = devices.PartitionDevice
+ 
+     def _type_check(self):
+-        return self._device.raw_device.type == 'partition'
++        return self._device.raw_device.type == "partition"
+ 
+     def _get_device_id(self):
+         device_id = None
+-        if self._blivet_pool._disks[0].partitioned and len(self._blivet_pool._disks[0].children) == 1:
++        if (
++            self._blivet_pool._disks[0].partitioned
++            and len(self._blivet_pool._disks[0].children) == 1
++        ):
+             device_id = self._blivet_pool._disks[0].children[0].name
+ 
+         return device_id
+@@ -583,22 +684,29 @@ class BlivetPartitionVolume(BlivetVolume):
+         if self._blivet_pool:
+             parent = self._blivet_pool._device
+         else:
+-            parent = self._blivet.devicetree.resolve_device(self._volume['pool'])
++            parent = self._blivet.devicetree.resolve_device(self._volume["pool"])
+ 
+         if parent is None:
+-            raise BlivetAnsibleError("failed to find pool '%s' for volume '%s'" % (self._blivet_pool['name'], self._volume['name']))
++            raise BlivetAnsibleError(
++                "failed to find pool '%s' for volume '%s'"
++                % (self._blivet_pool["name"], self._volume["name"])
++            )
+ 
+         size = Size("256 MiB")
+         try:
+-            device = self._blivet.new_partition(parents=[parent], size=size, grow=True, fmt=self._get_format())
++            device = self._blivet.new_partition(
++                parents=[parent], size=size, grow=True, fmt=self._get_format()
++            )
+         except Exception:
+-            raise BlivetAnsibleError("failed set up volume '%s'" % self._volume['name'])
++            raise BlivetAnsibleError("failed set up volume '%s'" % self._volume["name"])
+ 
+         self._blivet.create_device(device)
+         try:
+             do_partitioning(self._blivet)
+         except Exception:
+-            raise BlivetAnsibleError("partition allocation failed for volume '%s'" % self._volume['name'])
++            raise BlivetAnsibleError(
++                "partition allocation failed for volume '%s'" % self._volume["name"]
++            )
+ 
+         self._device = device
+ 
+@@ -609,7 +717,7 @@ class BlivetLVMVolume(BlivetVolume):
+     def _get_device_id(self):
+         if not self._blivet_pool._device:
+             return None
+-        return "%s-%s" % (self._blivet_pool._device.name, self._volume['name'])
++        return "%s-%s" % (self._blivet_pool._device.name, self._volume["name"])
+ 
+     def _create(self):
+         if self._device:
+@@ -617,51 +725,75 @@ class BlivetLVMVolume(BlivetVolume):
+ 
+         parent = self._blivet_pool._device
+         if parent is None:
+-            raise BlivetAnsibleError("failed to find pool '%s' for volume '%s'" % (self._blivet_pool['name'], self._volume['name']))
++            raise BlivetAnsibleError(
++                "failed to find pool '%s' for volume '%s'"
++                % (self._blivet_pool["name"], self._volume["name"])
++            )
+ 
+         try:
+-            size = Size(self._volume['size'])
++            size = Size(self._volume["size"])
+         except Exception:
+-            raise BlivetAnsibleError("invalid size '%s' specified for volume '%s'" % (self._volume['size'], self._volume['name']))
++            raise BlivetAnsibleError(
++                "invalid size '%s' specified for volume '%s'"
++                % (self._volume["size"], self._volume["name"])
++            )
+ 
+         fmt = self._get_format()
+-        trim_percent = (1.0 - float(parent.free_space / size))*100
++        trim_percent = (1.0 - float(parent.free_space / size)) * 100
+         log.debug("size: %s ; %s", size, trim_percent)
+         if size > parent.free_space:
+             if trim_percent > MAX_TRIM_PERCENT:
+-                raise BlivetAnsibleError("specified size for volume '%s' exceeds available space in pool '%s' (%s)"
+-                                         % (size, parent.name, parent.free_space))
++                raise BlivetAnsibleError(
++                    "specified size for volume '%s' exceeds available space in pool '%s' (%s)"
++                    % (size, parent.name, parent.free_space)
++                )
+             else:
+-                log.info("adjusting %s size from %s to %s to fit in %s free space", self._volume['name'],
+-                                                                                    size,
+-                                                                                    parent.free_space,
+-                                                                                    parent.name)
++                log.info(
++                    "adjusting %s size from %s to %s to fit in %s free space",
++                    self._volume["name"],
++                    size,
++                    parent.free_space,
++                    parent.name,
++                )
+                 size = parent.free_space
+ 
+         try:
+-            device = self._blivet.new_lv(name=self._volume['name'],
+-                                         parents=[parent], size=size, fmt=fmt)
++            device = self._blivet.new_lv(
++                name=self._volume["name"], parents=[parent], size=size, fmt=fmt
++            )
+         except Exception as e:
+-            raise BlivetAnsibleError("failed to set up volume '%s': %s" % (self._volume['name'], str(e)))
++            raise BlivetAnsibleError(
++                "failed to set up volume '%s': %s" % (self._volume["name"], str(e))
++            )
+ 
+         self._blivet.create_device(device)
+         self._device = device
+ 
+ 
+ class BlivetMDRaidVolume(BlivetVolume):
+-
+-    def _process_device_numbers(self, members_count, requested_actives, requested_spares):
++    def _process_device_numbers(
++        self, members_count, requested_actives, requested_spares
++    ):
+ 
+         active_count = members_count
+         spare_count = 0
+ 
+         if requested_actives is not None and requested_spares is not None:
+-            if (requested_actives + requested_spares != members_count or
+-                    requested_actives < 0 or requested_spares < 0):
+-                raise BlivetAnsibleError("failed to set up volume '%s': cannot create RAID "
+-                                         "with %s members (%s active and %s spare)"
+-                                         % (self._volume['name'], members_count,
+-                                            requested_actives, requested_spares))
++            if (
++                requested_actives + requested_spares != members_count
++                or requested_actives < 0
++                or requested_spares < 0
++            ):
++                raise BlivetAnsibleError(
++                    "failed to set up volume '%s': cannot create RAID "
++                    "with %s members (%s active and %s spare)"
++                    % (
++                        self._volume["name"],
++                        members_count,
++                        requested_actives,
++                        requested_spares,
++                    )
++                )
+ 
+         if requested_actives is not None:
+             active_count = requested_actives
+@@ -685,7 +817,9 @@ class BlivetMDRaidVolume(BlivetVolume):
+                     self._blivet.format_device(member_disk, label)
+ 
+                     # create new partition
+-                    member = self._blivet.new_partition(parents=[member_disk], grow=True)
++                    member = self._blivet.new_partition(
++                        parents=[member_disk], grow=True
++                    )
+                     self._blivet.create_device(member)
+                     self._blivet.format_device(member, fmt=get_format("mdmember"))
+                     members.append(member)
+@@ -697,16 +831,16 @@ class BlivetMDRaidVolume(BlivetVolume):
+ 
+     def _update_from_device(self, param_name):
+         """ Return True if param_name's value was retrieved from a looked-up device. """
+-        if param_name == 'raid_level':
+-            self._volume['raid_level'] = self._device.level.name
+-        elif param_name == 'raid_chunk_size':
+-            self._volume['raid_chunk_size'] = str(self._device.chunk_size)
+-        elif param_name == 'raid_device_count':
+-            self._volume['raid_device_count'] = self._device.member_devices
+-        elif param_name == 'raid_spare_count':
+-            self._volume['raid_spare_count'] = self._device.spares
+-        elif param_name == 'raid_metadata_version':
+-            self._volume['raid_metadata_version'] = self._device.metadata_version
++        if param_name == "raid_level":
++            self._volume["raid_level"] = self._device.level.name
++        elif param_name == "raid_chunk_size":
++            self._volume["raid_chunk_size"] = str(self._device.chunk_size)
++        elif param_name == "raid_device_count":
++            self._volume["raid_device_count"] = self._device.member_devices
++        elif param_name == "raid_spare_count":
++            self._volume["raid_spare_count"] = self._device.spares
++        elif param_name == "raid_metadata_version":
++            self._volume["raid_metadata_version"] = self._device.metadata_version
+         else:
+             return super(BlivetMDRaidVolume, self)._update_from_device(param_name)
+ 
+@@ -728,7 +862,10 @@ class BlivetMDRaidVolume(BlivetVolume):
+             try:
+                 do_partitioning(self._blivet)
+             except Exception as e:
+-                raise BlivetAnsibleError("failed to allocate partitions for mdraid '%s': %s" % (self._volume['name'], str(e)))
++                raise BlivetAnsibleError(
++                    "failed to allocate partitions for mdraid '%s': %s"
++                    % (self._volume["name"], str(e))
++                )
+ 
+         raid_array = self._new_mdarray(members)
+ 
+@@ -764,16 +901,20 @@ _BLIVET_VOLUME_TYPES = {
+     "disk": BlivetDiskVolume,
+     "lvm": BlivetLVMVolume,
+     "partition": BlivetPartitionVolume,
+-    "raid": BlivetMDRaidVolume
++    "raid": BlivetMDRaidVolume,
+ }
+ 
+ 
+ def _get_blivet_volume(blivet_obj, volume, bpool=None):
+     """ Return a BlivetVolume instance appropriate for the volume dict. """
+     global volume_defaults
+-    volume_type = volume.get('type', bpool._pool['type'] if bpool else volume_defaults['type'])
++    volume_type = volume.get(
++        "type", bpool._pool["type"] if bpool else volume_defaults["type"]
++    )
+     if volume_type not in _BLIVET_VOLUME_TYPES:
+-        raise BlivetAnsibleError("Volume '%s' has unknown type '%s'" % (volume['name'], volume_type))
++        raise BlivetAnsibleError(
++            "Volume '%s' has unknown type '%s'" % (volume["name"], volume_type)
++        )
+ 
+     return _BLIVET_VOLUME_TYPES[volume_type](blivet_obj, volume, bpool=bpool)
+ 
+@@ -796,19 +937,19 @@ class BlivetPool(BlivetBase):
+         if self.ultimately_present and self.__class__.blivet_device_class is not None:
+             packages.extend(self.__class__.blivet_device_class._packages)
+ 
+-        if self._pool.get('encryption'):
+-            packages.extend(get_format('luks').packages)
++        if self._pool.get("encryption"):
++            packages.extend(get_format("luks").packages)
+ 
+         return packages
+ 
+     @property
+     def ultimately_present(self):
+         """ Should this pool be present when we are finished? """
+-        return self._pool.get('state', 'present') == 'present'
++        return self._pool.get("state", "present") == "present"
+ 
+     @property
+     def _is_raid(self):
+-        return self._pool.get('raid_level') not in [None, "null", ""]
++        return self._pool.get("raid_level") not in [None, "null", ""]
+ 
+     def _member_management_is_destructive(self):
+         return False
+@@ -849,25 +990,30 @@ class BlivetPool(BlivetBase):
+         if self._disks:
+             return
+ 
+-        if not self._device and not self._pool['disks']:
+-            raise BlivetAnsibleError("no disks specified for pool '%s'" % self._pool['name'])
+-        elif not isinstance(self._pool['disks'], list):
++        if not self._device and not self._pool["disks"]:
++            raise BlivetAnsibleError(
++                "no disks specified for pool '%s'" % self._pool["name"]
++            )
++        elif not isinstance(self._pool["disks"], list):
+             raise BlivetAnsibleError("pool disks must be specified as a list")
+ 
+         disks = list()
+-        for spec in self._pool['disks']:
++        for spec in self._pool["disks"]:
+             device = self._blivet.devicetree.resolve_device(spec)
+             if device is not None:  # XXX fail if any disk isn't resolved?
+                 disks.append(device)
+ 
+-        if self._pool['disks'] and not self._device and not disks:
+-            raise BlivetAnsibleError("unable to resolve any disks specified for pool '%s' (%s)" % (self._pool['name'], self._pool['disks']))
++        if self._pool["disks"] and not self._device and not disks:
++            raise BlivetAnsibleError(
++                "unable to resolve any disks specified for pool '%s' (%s)"
++                % (self._pool["name"], self._pool["disks"])
++            )
+ 
+         self._disks = disks
+ 
+     def _look_up_device(self):
+         """ Look up the pool in blivet's device tree. """
+-        device = self._blivet.devicetree.resolve_device(self._pool['name'])
++        device = self._blivet.devicetree.resolve_device(self._pool["name"])
+         if device is None:
+             return
+ 
+@@ -895,45 +1041,62 @@ class BlivetPool(BlivetBase):
+         """ Return True if param_name's value was retrieved from a looked-up device. """
+         # We wouldn't have the pool device if the member devices weren't unlocked, so we do not
+         # have to consider the case where the devices are unlocked like we do for volumes.
+-        encrypted = bool(self._device.parents) and all("luks" in d.type for d in self._device.parents)
+-        raid = len(self._device.parents) == 1 and hasattr(self._device.parents[0].raw_device, 'level')
++        encrypted = bool(self._device.parents) and all(
++            "luks" in d.type for d in self._device.parents
++        )
++        raid = len(self._device.parents) == 1 and hasattr(
++            self._device.parents[0].raw_device, "level"
++        )
+         log.debug("BlivetPool._update_from_device: %s", self._device)
+ 
+-        if param_name == 'disks':
+-            self._pool['disks'] = [d.name for d in self._device.disks]
+-        elif param_name == 'encryption':
+-            self._pool['encryption'] = encrypted
+-        elif param_name == 'encryption_key_size' and encrypted:
+-            self._pool['encryption_key_size'] = self._device.parents[0].parents[0].format.key_size
+-        elif param_name == 'encryption_key_file' and encrypted:
+-            self._pool['encryption_key_file'] = self._device.parents[0].parents[0].format.key_file
+-        elif param_name == 'encryption_cipher' and encrypted:
+-            self._pool['encryption_cipher'] = self._device.parents[0].parents[0].format.cipher
+-        elif param_name == 'encryption_luks_version' and encrypted:
+-            self._pool['encryption_luks_version'] = self._device.parents[0].parents[0].format.luks_version
+-        elif param_name == 'raid_level' and raid:
+-            self._pool['raid_level'] = self._device.parents[0].raw_device.level.name
+-        elif param_name == 'raid_chunk_size' and raid:
+-            self._pool['raid_chunk_size'] = str(self._device.parents[0].raw_device.chunk_size)
+-        elif param_name == 'raid_device_count' and raid:
+-            self._pool['raid_device_count'] = self._device.parents[0].raw_device.member_devices
+-        elif param_name == 'raid_spare_count' and raid:
+-            self._pool['raid_spare_count'] = self._device.parents[0].raw_device.spares
+-        elif param_name == 'raid_metadata_version' and raid:
+-            self._pool['raid_metadata_version'] = self._device.parents[0].raw_device.metadata_version
++        if param_name == "disks":
++            self._pool["disks"] = [d.name for d in self._device.disks]
++        elif param_name == "encryption":
++            self._pool["encryption"] = encrypted
++        elif param_name == "encryption_key_size" and encrypted:
++            self._pool["encryption_key_size"] = (
++                self._device.parents[0].parents[0].format.key_size
++            )
++        elif param_name == "encryption_key_file" and encrypted:
++            self._pool["encryption_key_file"] = (
++                self._device.parents[0].parents[0].format.key_file
++            )
++        elif param_name == "encryption_cipher" and encrypted:
++            self._pool["encryption_cipher"] = (
++                self._device.parents[0].parents[0].format.cipher
++            )
++        elif param_name == "encryption_luks_version" and encrypted:
++            self._pool["encryption_luks_version"] = (
++                self._device.parents[0].parents[0].format.luks_version
++            )
++        elif param_name == "raid_level" and raid:
++            self._pool["raid_level"] = self._device.parents[0].raw_device.level.name
++        elif param_name == "raid_chunk_size" and raid:
++            self._pool["raid_chunk_size"] = str(
++                self._device.parents[0].raw_device.chunk_size
++            )
++        elif param_name == "raid_device_count" and raid:
++            self._pool["raid_device_count"] = self._device.parents[
++                0
++            ].raw_device.member_devices
++        elif param_name == "raid_spare_count" and raid:
++            self._pool["raid_spare_count"] = self._device.parents[0].raw_device.spares
++        elif param_name == "raid_metadata_version" and raid:
++            self._pool["raid_metadata_version"] = self._device.parents[
++                0
++            ].raw_device.metadata_version
+         else:
+             return False
+ 
+         return True
+ 
+-
+     def _apply_defaults(self):
+         global pool_defaults
+         for name, default in pool_defaults.items():
+             if name in self._pool:
+                 continue
+ 
+-            default = None if default in ('none', 'None', 'null') else default
++            default = None if default in ("none", "None", "null") else default
+ 
+             if self._device:
+                 if not self._update_from_device(name):
+@@ -948,14 +1111,19 @@ class BlivetPool(BlivetBase):
+         for disk in self._disks:
+             if not disk.isleaf or disk.format.type is not None:
+                 if safe_mode:
+-                    raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (disk.name, self._pool['name']))
++                    raise BlivetAnsibleError(
++                        "cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode"
++                        % (disk.name, self._pool["name"])
++                    )
+                 else:
+                     self._blivet.devicetree.recursive_remove(disk)
+ 
+             if use_partitions:
+                 label = get_format("disklabel", device=disk.path)
+                 self._blivet.format_device(disk, label)
+-                member = self._blivet.new_partition(parents=[disk], size=Size("256MiB"), grow=True)
++                member = self._blivet.new_partition(
++                    parents=[disk], size=Size("256MiB"), grow=True
++                )
+                 self._blivet.create_device(member)
+             else:
+                 member = disk
+@@ -966,9 +1134,8 @@ class BlivetPool(BlivetBase):
+                 self._blivet.format_device(member, self._get_format())
+             members.append(member)
+ 
+-
+         if self._is_raid:
+-            raid_name = "%s-1" % self._pool['name']
++            raid_name = "%s-1" % self._pool["name"]
+ 
+             raid_array = self._new_mdarray(members, raid_name=raid_name)
+ 
+@@ -981,14 +1148,15 @@ class BlivetPool(BlivetBase):
+             try:
+                 do_partitioning(self._blivet)
+             except Exception:
+-                raise BlivetAnsibleError("failed to allocate partitions for pool '%s'" % self._pool['name'])
++                raise BlivetAnsibleError(
++                    "failed to allocate partitions for pool '%s'" % self._pool["name"]
++                )
+ 
+         return result
+ 
+-
+     def _get_volumes(self):
+         """ Set up BlivetVolume instances for this pool's volumes. """
+-        for volume in self._pool.get('volumes', []):
++        for volume in self._pool.get("volumes", []):
+             bvolume = _get_blivet_volume(self._blivet, volume, self)
+             self._blivet_volumes.append(bvolume)
+ 
+@@ -1013,7 +1181,10 @@ class BlivetPool(BlivetBase):
+             return
+         elif self._member_management_is_destructive():
+             if safe_mode:
+-                raise BlivetAnsibleError("cannot remove and recreate existing pool '%s' in safe mode" % self._pool['name'])
++                raise BlivetAnsibleError(
++                    "cannot remove and recreate existing pool '%s' in safe mode"
++                    % self._pool["name"]
++                )
+             else:
+                 self._destroy()
+ 
+@@ -1031,15 +1202,22 @@ class BlivetPartitionPool(BlivetPool):
+         self._device = self._disks[0]
+ 
+     def _create(self):
+-        if self._device.format.type != "disklabel" or \
+-           (disklabel_type and self._device.format.label_type != disklabel_type):
++        if self._device.format.type != "disklabel" or (
++            disklabel_type and self._device.format.label_type != disklabel_type
++        ):
+             if safe_mode:
+-                raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' "
+-                                         "(pool '%s') in safe mode" % (self._device.name, self._pool['name']))
++                raise BlivetAnsibleError(
++                    "cannot remove existing formatting and/or devices on disk '%s' "
++                    "(pool '%s') in safe mode" % (self._device.name, self._pool["name"])
++                )
+             else:
+-                self._blivet.devicetree.recursive_remove(self._device, remove_device=False)
++                self._blivet.devicetree.recursive_remove(
++                    self._device, remove_device=False
++                )
+ 
+-            label = get_format("disklabel", device=self._device.path, label_type=disklabel_type)
++            label = get_format(
++                "disklabel", device=self._device.path, label_type=disklabel_type
++            )
+             self._blivet.format_device(self._device, label)
+ 
+ 
+@@ -1053,9 +1231,13 @@ class BlivetLVMPool(BlivetPool):
+         if self._device is None:
+             return False
+ 
+-        if self._pool['encryption'] and not all(m.encrypted for m in self._device.parents):
++        if self._pool["encryption"] and not all(
++            m.encrypted for m in self._device.parents
++        ):
+             return True
+-        elif not self._pool['encryption'] and any(m.encrypted for m in self._device.parents):
++        elif not self._pool["encryption"] and any(
++            m.encrypted for m in self._device.parents
++        ):
+             return True
+ 
+         return False
+@@ -1080,49 +1262,50 @@ class BlivetLVMPool(BlivetPool):
+ 
+         members = self._manage_encryption(self._create_members())
+         try:
+-            pool_device = self._blivet.new_vg(name=self._pool['name'], parents=members)
++            pool_device = self._blivet.new_vg(name=self._pool["name"], parents=members)
+         except Exception as e:
+-            raise BlivetAnsibleError("failed to set up pool '%s': %s" % (self._pool['name'], str(e)))
++            raise BlivetAnsibleError(
++                "failed to set up pool '%s': %s" % (self._pool["name"], str(e))
++            )
+ 
+         self._blivet.create_device(pool_device)
+         self._device = pool_device
+ 
+ 
+-_BLIVET_POOL_TYPES = {
+-    "partition": BlivetPartitionPool,
+-    "lvm": BlivetLVMPool
+-}
++_BLIVET_POOL_TYPES = {"partition": BlivetPartitionPool, "lvm": BlivetLVMPool}
+ 
+ 
+ def _get_blivet_pool(blivet_obj, pool):
+     """ Return an appropriate BlivetPool instance for the pool dict. """
+-    if 'type' not in pool:
++    if "type" not in pool:
+         global pool_defaults
+-        pool['type'] = pool_defaults['type']
++        pool["type"] = pool_defaults["type"]
+ 
+-    if pool['type'] not in _BLIVET_POOL_TYPES:
+-        raise BlivetAnsibleError("Pool '%s' has unknown type '%s'" % (pool['name'], pool['type']))
++    if pool["type"] not in _BLIVET_POOL_TYPES:
++        raise BlivetAnsibleError(
++            "Pool '%s' has unknown type '%s'" % (pool["name"], pool["type"])
++        )
+ 
+-    return _BLIVET_POOL_TYPES[pool['type']](blivet_obj, pool)
++    return _BLIVET_POOL_TYPES[pool["type"]](blivet_obj, pool)
+ 
+ 
+ def manage_volume(b, volume):
+     """ Schedule actions as needed to manage a single standalone volume. """
+     bvolume = _get_blivet_volume(b, volume)
+     bvolume.manage()
+-    volume['_device'] = bvolume._volume.get('_device', '')
+-    volume['_raw_device'] = bvolume._volume.get('_raw_device', '')
+-    volume['_mount_id'] = bvolume._volume.get('_mount_id', '')
++    volume["_device"] = bvolume._volume.get("_device", "")
++    volume["_raw_device"] = bvolume._volume.get("_raw_device", "")
++    volume["_mount_id"] = bvolume._volume.get("_mount_id", "")
+ 
+ 
+ def manage_pool(b, pool):
+     """ Schedule actions as needed to manage a single pool and its volumes. """
+     bpool = _get_blivet_pool(b, pool)
+     bpool.manage()
+-    for (volume, bvolume) in zip(pool['volumes'], bpool._blivet_volumes):
+-        volume['_device'] = bvolume._volume.get('_device', '')
+-        volume['_raw_device'] = bvolume._volume.get('_raw_device', '')
+-        volume['_mount_id'] = bvolume._volume.get('_mount_id', '')
++    for (volume, bvolume) in zip(pool["volumes"], bpool._blivet_volumes):
++        volume["_device"] = bvolume._volume.get("_device", "")
++        volume["_raw_device"] = bvolume._volume.get("_raw_device", "")
++        volume["_mount_id"] = bvolume._volume.get("_mount_id", "")
+ 
+ 
+ class FSTab(object):
+@@ -1141,7 +1324,7 @@ class FSTab(object):
+         if self._entries:
+             self.reset()
+ 
+-        for line in open('/etc/fstab').readlines():
++        for line in open("/etc/fstab").readlines():
+             if line.lstrip().startswith("#"):
+                 continue
+ 
+@@ -1150,23 +1333,27 @@ class FSTab(object):
+                 continue
+ 
+             device = self._blivet.devicetree.resolve_device(fields[0])
+-            self._entries.append(dict(device_id=fields[0],
+-                                      device_path=getattr(device, 'path', None),
+-                                      fs_type=fields[2],
+-                                      mount_point=fields[1],
+-                                      mount_options=fields[3]))
++            self._entries.append(
++                dict(
++                    device_id=fields[0],
++                    device_path=getattr(device, "path", None),
++                    fs_type=fields[2],
++                    mount_point=fields[1],
++                    mount_options=fields[3],
++                )
++            )
+ 
+ 
+ def get_mount_info(pools, volumes, actions, fstab):
+-    """ Return a list of argument dicts to pass to the mount module to manage mounts.
++    """Return a list of argument dicts to pass to the mount module to manage mounts.
+ 
+-        The overall approach is to remove existing mounts associated with file systems
+-        we are removing and those with changed mount points, re-adding them with the
+-        new mount point later.
++    The overall approach is to remove existing mounts associated with file systems
++    we are removing and those with changed mount points, re-adding them with the
++    new mount point later.
+ 
+-        Removed mounts go directly into the mount_info list, which is the return value,
+-        while added/active mounts to a list that gets appended to the mount_info list
+-        at the end to ensure that removals happen first.
++    Removed mounts go directly into the mount_info list, which is the return value,
++    while added/active mounts to a list that gets appended to the mount_info list
++    at the end to ensure that removals happen first.
+     """
+     mount_info = list()
+     mount_vols = list()
+@@ -1174,33 +1361,50 @@ def get_mount_info(pools, volumes, actions, fstab):
+     # account for mounts removed by removing or reformatting volumes
+     if actions:
+         for action in actions:
+-            if action.is_destroy and action.is_format and action.format.type is not None:
+-                mount = fstab.lookup('device_path', action.device.path)
++            if (
++                action.is_destroy
++                and action.is_format
++                and action.format.type is not None
++            ):
++                mount = fstab.lookup("device_path", action.device.path)
+                 if mount is not None:
+-                    mount_info.append({"src": mount['device_id'], "path": mount['mount_point'],
+-                                       'state': 'absent', 'fstype': mount['fs_type']})
++                    mount_info.append(
++                        {
++                            "src": mount["device_id"],
++                            "path": mount["mount_point"],
++                            "state": "absent",
++                            "fstype": mount["fs_type"],
++                        }
++                    )
+ 
+     def handle_new_mount(volume, fstab):
+         replace = None
+         mounted = False
+ 
+-        mount = fstab.lookup('device_path', volume['_device'])
+-        if (volume['mount_point'] and volume['mount_point'].startswith('/')) \
+-           or volume['fs_type'] == 'swap':
++        mount = fstab.lookup("device_path", volume["_device"])
++        if (volume["mount_point"] and volume["mount_point"].startswith("/")) or volume[
++            "fs_type"
++        ] == "swap":
+             mounted = True
+ 
+         # handle removal of existing mounts of this volume
+-        if mount and mount['fs_type'] != 'swap' and mount['mount_point'] != volume['mount_point']:
+-            replace = dict(path=mount['mount_point'], state="absent")
+-        elif mount and mount['fs_type'] == 'swap':
+-            replace = dict(src=mount['device_id'], fstype="swap", path="none", state="absent")
++        if (
++            mount
++            and mount["fs_type"] != "swap"
++            and mount["mount_point"] != volume["mount_point"]
++        ):
++            replace = dict(path=mount["mount_point"], state="absent")
++        elif mount and mount["fs_type"] == "swap":
++            replace = dict(
++                src=mount["device_id"], fstype="swap", path="none", state="absent"
++            )
+ 
+         return mounted, replace
+ 
+     # account for mounts that we set up or are replacing in pools
+     for pool in pools:
+-        for volume in pool['volumes']:
+-            if pool['state'] == 'present' and volume['state'] == 'present':
++        for volume in pool["volumes"]:
++            if pool["state"] == "present" and volume["state"] == "present":
+                 mounted, replace = handle_new_mount(volume, fstab)
+                 if replace:
+                     mount_info.append(replace)
+@@ -1209,7 +1413,7 @@ def get_mount_info(pools, volumes, actions, fstab):
+ 
+     # account for mounts that we set up or are replacing in standalone volumes
+     for volume in volumes:
+-        if volume['state'] == 'present':
++        if volume["state"] == "present":
+             mounted, replace = handle_new_mount(volume, fstab)
+             if replace:
+                 mount_info.append(replace)
+@@ -1217,13 +1421,19 @@ def get_mount_info(pools, volumes, actions, fstab):
+                 mount_vols.append(volume)
+ 
+     for volume in mount_vols:
+-        mount_info.append({'src': volume['_mount_id'],
+-                           'path': volume['mount_point'] if volume['fs_type'] != "swap" else "none",
+-                           'fstype': volume['fs_type'],
+-                           'opts': volume['mount_options'],
+-                           'dump': volume['mount_check'],
+-                           'passno': volume['mount_passno'],
+-                           'state': 'mounted' if volume['fs_type'] != "swap" else "present"})
++        mount_info.append(
++            {
++                "src": volume["_mount_id"],
++                "path": volume["mount_point"]
++                if volume["fs_type"] != "swap"
++                else "none",
++                "fstype": volume["fs_type"],
++                "opts": volume["mount_options"],
++                "dump": volume["mount_check"],
++                "passno": volume["mount_passno"],
++                "state": "mounted" if volume["fs_type"] != "swap" else "present",
++            }
++        )
+ 
+     return mount_info
+ 
+@@ -1231,15 +1441,19 @@ def get_mount_info(pools, volumes, actions, fstab):
+ def get_crypt_info(actions):
+     info = list()
+     for action in actions:
+-        if not (action.is_format and action.format.type == 'luks'):
++        if not (action.is_format and action.format.type == "luks"):
+             continue
+ 
+-        info.append(dict(backing_device=action.device.path,
+-                         name=action.format.map_name,
+-                         password=action.format.key_file or '-',
+-                         state='present' if action.is_create else 'absent'))
++        info.append(
++            dict(
++                backing_device=action.device.path,
++                name=action.format.map_name,
++                password=action.format.key_file or "-",
++                state="present" if action.is_create else "absent",
++            )
++        )
+ 
+-    return sorted(info, key=lambda e: e['state'])
++    return sorted(info, key=lambda e: e["state"])
+ 
+ 
+ def get_required_packages(b, pools, volumes):
+@@ -1259,66 +1473,70 @@ def get_required_packages(b, pools, volumes):
+ 
+ 
+ def update_fstab_identifiers(b, pools, volumes):
+-    """ Update fstab device identifiers.
++    """Update fstab device identifiers.
+ 
+-        This is to pick up new UUIDs for newly-formatted devices.
++    This is to pick up new UUIDs for newly-formatted devices.
+     """
+     all_volumes = volumes[:]
+     for pool in pools:
+-        if not pool['state'] == 'present':
++        if not pool["state"] == "present":
+             continue
+ 
+-        all_volumes += pool['volumes']
++        all_volumes += pool["volumes"]
+ 
+     for volume in all_volumes:
+-        if volume['state'] == 'present':
+-            device = b.devicetree.resolve_device(volume['_mount_id'])
+-            if device is None and volume['encryption']:
+-                device = b.devicetree.resolve_device(volume['_raw_device'])
++        if volume["state"] == "present":
++            device = b.devicetree.resolve_device(volume["_mount_id"])
++            if device is None and volume["encryption"]:
++                device = b.devicetree.resolve_device(volume["_raw_device"])
+                 if device is not None and not device.isleaf:
+                     device = device.children[0]
+-                    volume['_device'] = device.path
++                    volume["_device"] = device.path
+ 
+             if device is None:
+-                raise BlivetAnsibleError("failed to look up device for volume %s (%s/%s)" % (volume['name'], volume['_device'], volume['_mount_id']))
+-            volume['_mount_id'] = device.fstab_spec
+-            if device.format.type == 'swap':
++                raise BlivetAnsibleError(
++                    "failed to look up device for volume %s (%s/%s)"
++                    % (volume["name"], volume["_device"], volume["_mount_id"])
++                )
++            volume["_mount_id"] = device.fstab_spec
++            if device.format.type == "swap":
+                 device.format.setup()
+ 
+             if device.status:
+-                volume['_kernel_device'] = os.path.realpath(device.path)
++                volume["_kernel_device"] = os.path.realpath(device.path)
+             if device.raw_device.status:
+-                volume['_raw_kernel_device'] = os.path.realpath(device.raw_device.path)
++                volume["_raw_kernel_device"] = os.path.realpath(device.raw_device.path)
+ 
+ 
+ def activate_swaps(b, pools, volumes):
+     """ Activate all swaps specified as present. """
+     all_volumes = volumes[:]
+     for pool in pools:
+-        if not pool['state'] == 'present':
++        if not pool["state"] == "present":
+             continue
+ 
+-        all_volumes += pool['volumes']
++        all_volumes += pool["volumes"]
+ 
+     for volume in all_volumes:
+-        if volume['state'] == 'present':
+-            device = b.devicetree.resolve_device(volume['_mount_id'])
+-            if device.format.type == 'swap':
++        if volume["state"] == "present":
++            device = b.devicetree.resolve_device(volume["_mount_id"])
++            if device.format.type == "swap":
+                 device.format.setup()
+ 
+ 
+ def run_module():
+     # available arguments/parameters that a user can pass
+     module_args = dict(
+-        pools=dict(type='list'),
+-        volumes=dict(type='list'),
+-        packages_only=dict(type='bool', required=False, default=False),
+-        disklabel_type=dict(type='str', required=False, default=None),
+-        safe_mode=dict(type='bool', required=False, default=True),
+-        pool_defaults=dict(type='dict', required=False),
+-        volume_defaults=dict(type='dict', required=False),
+-        use_partitions=dict(type='bool', required=False, default=True),
+-        diskvolume_mkfs_option_map=dict(type='dict', required=False, default={}))
++        pools=dict(type="list"),
++        volumes=dict(type="list"),
++        packages_only=dict(type="bool", required=False, default=False),
++        disklabel_type=dict(type="str", required=False, default=None),
++        safe_mode=dict(type="bool", required=False, default=True),
++        pool_defaults=dict(type="dict", required=False),
++        volume_defaults=dict(type="dict", required=False),
++        use_partitions=dict(type="bool", required=False, default=True),
++        diskvolume_mkfs_option_map=dict(type="dict", required=False, default={}),
++    )
+ 
+     # seed the result dict in the object
+     result = dict(
+@@ -1332,47 +1550,52 @@ def run_module():
+         packages=list(),
+     )
+ 
+-    module = AnsibleModule(argument_spec=module_args,
+-                           supports_check_mode=True)
++    module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
+     if not BLIVET_PACKAGE:
+-        module.fail_json(msg="Failed to import the blivet or blivet3 Python modules",
+-                         exception=inspect.cleandoc("""
++        module.fail_json(
++            msg="Failed to import the blivet or blivet3 Python modules",
++            exception=inspect.cleandoc(
++                """
+                          blivet3 exception:
+                          {}
+                          blivet exception:
+-                         {}""").format(LIB_IMP_ERR3, LIB_IMP_ERR))
++                         {}"""
++            ).format(LIB_IMP_ERR3, LIB_IMP_ERR),
++        )
+ 
+-    if not module.params['pools'] and not module.params['volumes']:
++    if not module.params["pools"] and not module.params["volumes"]:
+         module.exit_json(**result)
+ 
+     global disklabel_type
+-    disklabel_type = module.params['disklabel_type']
++    disklabel_type = module.params["disklabel_type"]
+ 
+     global use_partitions
+-    use_partitions = module.params['use_partitions']
++    use_partitions = module.params["use_partitions"]
+ 
+     global safe_mode
+-    safe_mode = module.params['safe_mode']
++    safe_mode = module.params["safe_mode"]
+ 
+     global diskvolume_mkfs_option_map
+-    diskvolume_mkfs_option_map = module.params['diskvolume_mkfs_option_map']
++    diskvolume_mkfs_option_map = module.params["diskvolume_mkfs_option_map"]
+ 
+     global pool_defaults
+-    if 'pool_defaults' in module.params:
+-        pool_defaults = module.params['pool_defaults']
++    if "pool_defaults" in module.params:
++        pool_defaults = module.params["pool_defaults"]
+ 
+     global volume_defaults
+-    if 'volume_defaults' in module.params:
+-        volume_defaults = module.params['volume_defaults']
++    if "volume_defaults" in module.params:
++        volume_defaults = module.params["volume_defaults"]
+ 
+     b = Blivet()
+     b.reset()
+     fstab = FSTab(b)
+     actions = list()
+ 
+-    if module.params['packages_only']:
++    if module.params["packages_only"]:
+         try:
+-            result['packages'] = get_required_packages(b, module.params['pools'], module.params['volumes'])
++            result["packages"] = get_required_packages(
++                b, module.params["pools"], module.params["volumes"]
++            )
+         except BlivetAnsibleError as e:
+             module.fail_json(msg=str(e), **result)
+         module.exit_json(**result)
+@@ -1388,44 +1611,56 @@ def run_module():
+             sys_path = action.device.path
+             if os.path.islink(sys_path):
+                 sys_path = os.readlink(action.device.path)
+-            trigger(action='change', subsystem='block', name=os.path.basename(sys_path))
++            trigger(action="change", subsystem="block", name=os.path.basename(sys_path))
+ 
+     def action_dict(action):
+-        return dict(action=action.type_desc_str,
+-                    fs_type=action.format.type if action.is_format else None,
+-                    device=action.device.path)
++        return dict(
++            action=action.type_desc_str,
++            fs_type=action.format.type if action.is_format else None,
++            device=action.device.path,
++        )
+ 
+-    duplicates = find_duplicate_names(module.params['pools'])
++    duplicates = find_duplicate_names(module.params["pools"])
+     if duplicates:
+-        module.fail_json(msg="multiple pools with the same name: {0}".format(",".join(duplicates)),
+-                         **result)
+-    for pool in module.params['pools']:
+-        duplicates = find_duplicate_names(pool.get('volumes', list()))
++        module.fail_json(
++            msg="multiple pools with the same name: {0}".format(",".join(duplicates)),
++            **result
++        )
++    for pool in module.params["pools"]:
++        duplicates = find_duplicate_names(pool.get("volumes", list()))
+         if duplicates:
+-            module.fail_json(msg="multiple volumes in pool '{0}' with the "
+-                                 "same name: {1}".format(pool['name'], ",".join(duplicates)),
+-                             **result)
++            module.fail_json(
++                msg="multiple volumes in pool '{0}' with the "
++                "same name: {1}".format(pool["name"], ",".join(duplicates)),
++                **result
++            )
+         try:
+             manage_pool(b, pool)
+         except BlivetAnsibleError as e:
+             module.fail_json(msg=str(e), **result)
+ 
+-    duplicates = find_duplicate_names(module.params['volumes'])
++    duplicates = find_duplicate_names(module.params["volumes"])
+     if duplicates:
+-        module.fail_json(msg="multiple volumes with the same name: {0}".format(",".join(duplicates)),
+-                         **result)
+-    for volume in module.params['volumes']:
++        module.fail_json(
++            msg="multiple volumes with the same name: {0}".format(",".join(duplicates)),
++            **result
++        )
++    for volume in module.params["volumes"]:
+         try:
+             manage_volume(b, volume)
+         except BlivetAnsibleError as e:
+             module.fail_json(msg=str(e), **result)
+ 
+     scheduled = b.devicetree.actions.find()
+-    result['packages'] = b.packages[:]
++    result["packages"] = b.packages[:]
+ 
+     for action in scheduled:
+-        if (action.is_destroy or action.is_resize) and action.is_format and action.format.exists and \
+-           (action.format.mountable or action.format.type == "swap"):
++        if (
++            (action.is_destroy or action.is_resize)
++            and action.is_format
++            and action.format.exists
++            and (action.format.mountable or action.format.type == "swap")
++        ):
+             action.format.teardown()
+ 
+     if scheduled:
+@@ -1433,21 +1668,27 @@ def run_module():
+         callbacks.action_executed.add(record_action)
+         callbacks.action_executed.add(ensure_udev_update)
+         try:
+-            b.devicetree.actions.process(devices=b.devicetree.devices, dry_run=module.check_mode)
++            b.devicetree.actions.process(
++                devices=b.devicetree.devices, dry_run=module.check_mode
++            )
+         except Exception as e:
+-            module.fail_json(msg="Failed to commit changes to disk: %s" % str(e), **result)
++            module.fail_json(
++                msg="Failed to commit changes to disk: %s" % str(e), **result
++            )
+         finally:
+-            result['changed'] = True
+-            result['actions'] = [action_dict(a) for a in actions]
++            result["changed"] = True
++            result["actions"] = [action_dict(a) for a in actions]
+ 
+-    update_fstab_identifiers(b, module.params['pools'], module.params['volumes'])
+-    activate_swaps(b, module.params['pools'], module.params['volumes'])
++    update_fstab_identifiers(b, module.params["pools"], module.params["volumes"])
++    activate_swaps(b, module.params["pools"], module.params["volumes"])
+ 
+-    result['mounts'] = get_mount_info(module.params['pools'], module.params['volumes'], actions, fstab)
+-    result['crypts'] = get_crypt_info(actions)
+-    result['leaves'] = [d.path for d in b.devicetree.leaves]
+-    result['pools'] = module.params['pools']
+-    result['volumes'] = module.params['volumes']
++    result["mounts"] = get_mount_info(
++        module.params["pools"], module.params["volumes"], actions, fstab
++    )
++    result["crypts"] = get_crypt_info(actions)
++    result["leaves"] = [d.path for d in b.devicetree.leaves]
++    result["pools"] = module.params["pools"]
++    result["volumes"] = module.params["volumes"]
+ 
+     # success - return result
+     module.exit_json(**result)
+@@ -1456,5 +1697,6 @@ def run_module():
+ def main():
+     run_module()
+ 
+-if __name__ == '__main__':
++
++if __name__ == "__main__":
+     main()
+diff --git a/library/blockdev_info.py b/library/blockdev_info.py
+index 52ddd78..ca1577f 100644
+--- a/library/blockdev_info.py
++++ b/library/blockdev_info.py
+@@ -1,35 +1,41 @@
+ #!/usr/bin/python
+ 
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
++
+ ANSIBLE_METADATA = {
+-    'metadata_version': '1.1',
+-    'status': ['preview'],
+-    'supported_by': 'community'
++    "metadata_version": "1.1",
++    "status": ["preview"],
++    "supported_by": "community",
+ }
+ 
+-DOCUMENTATION = '''
++DOCUMENTATION = """
+ ---
+ module: blockdev_info
+ short_description: Collect info about block devices in the system.
+ version_added: "2.5"
+ description:
++    - "WARNING: Do not use this module directly! It is only for role internal use."
+     - "This module collects information about block devices"
+-options:
++options: {}
+ author:
+-    - David Lehman (dlehman@redhat.com)
+-'''
++    - David Lehman (@dwlehman)
++"""
+ 
+-EXAMPLES = '''
++EXAMPLES = """
+ - name: Get info about block devices
+   blockdev_info:
+   register: blk_info
+ 
+-'''
++"""
+ 
+-RETURN = '''
++RETURN = """
+ info:
+     description: dict w/ device path keys and device info dict values
++    returned: success
+     type: dict
+-'''
++"""
+ 
+ import os
+ import shlex
+@@ -38,7 +44,7 @@ from ansible.module_utils.basic import AnsibleModule
+ 
+ 
+ LSBLK_DEVICE_TYPES = {"part": "partition"}
+-DEV_MD_DIR = '/dev/md'
++DEV_MD_DIR = "/dev/md"
+ 
+ 
+ def fixup_md_path(path):
+@@ -59,7 +65,9 @@ def fixup_md_path(path):
+ 
+ 
+ def get_block_info(run_cmd):
+-    buf = run_cmd(["lsblk", "-o", "NAME,FSTYPE,LABEL,UUID,TYPE,SIZE", "-p", "-P", "-a"])[1]
++    buf = run_cmd(
++        ["lsblk", "-o", "NAME,FSTYPE,LABEL,UUID,TYPE,SIZE", "-p", "-P", "-a"]
++    )[1]
+     info = dict()
+     for line in buf.splitlines():
+         dev = dict()
+@@ -75,7 +83,7 @@ def get_block_info(run_cmd):
+ 
+                 dev[key.lower()] = LSBLK_DEVICE_TYPES.get(value, value)
+         if dev:
+-            info[dev['name']] = dev
++            info[dev["name"]] = dev
+ 
+     return info
+ 
+@@ -87,13 +95,10 @@ def run_module():
+         info=None,
+     )
+ 
+-    module = AnsibleModule(
+-        argument_spec=module_args,
+-        supports_check_mode=True
+-    )
++    module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
+ 
+     try:
+-        result['info'] = get_block_info(module.run_command)
++        result["info"] = get_block_info(module.run_command)
+     except Exception:
+         module.fail_json(msg="Failed to collect block device info.")
+ 
+@@ -104,5 +109,5 @@ def main():
+     run_module()
+ 
+ 
+-if __name__ == '__main__':
++if __name__ == "__main__":
+     main()
+diff --git a/library/bsize.py b/library/bsize.py
+index 40442f5..524b0f9 100644
+--- a/library/bsize.py
++++ b/library/bsize.py
+@@ -1,12 +1,16 @@
+ #!/usr/bin/python
+ 
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
++
+ ANSIBLE_METADATA = {
+-    'metadata_version': '1.1',
+-    'status': ['preview'],
+-    'supported_by': 'community'
++    "metadata_version": "1.1",
++    "status": ["preview"],
++    "supported_by": "community",
+ }
+ 
+-DOCUMENTATION = '''
++DOCUMENTATION = """
+ ---
+ module: bsize
+ 
+@@ -15,6 +19,7 @@ short_description: Module for basic manipulation with byte sizes
+ version_added: "2.5"
+ 
+ description:
++    - "WARNING: Do not use this module directly! It is only for role internal use."
+     - "Module accepts byte size strings with the units and produces strings in
+       form of input accepted by different storage tools"
+ 
+@@ -23,67 +28,72 @@ options:
+         description:
+             - String containing number and byte units
+         required: true
++        type: str
+ 
+ author:
+-    - Jan Pokorny (japokorn@redhat.com)
+-'''
++    - Jan Pokorny (@japokorn)
++"""
+ 
+-EXAMPLES = '''
++EXAMPLES = """
+ # Obtain sizes in format for various tools
+ - name: Get 10 KiB size
+   bsize:
+     size: 10 KiB
+-'''
++"""
+ 
+-RETURN = '''
++RETURN = """
+ size:
+     description: Size in binary format units
+     type: str
++    returned: success
+ bytes:
+     description: Size in bytes
+     type: int
++    returned: success
+ lvm:
+     description: Size in binary format. No space after the number,
+                  first letter of unit prefix in lowercase only
+     type: str
++    returned: success
+ parted:
+     description: Size in binary format. No space after the number
+     type: str
+-'''
++    returned: success
++"""
+ 
+ from ansible.module_utils.basic import AnsibleModule
+ from ansible.module_utils.storage_lsr.size import Size
+ 
++
+ def run_module():
+     # available arguments/parameters that a user can pass
+     module_args = dict(
+-        size=dict(type='str', required=True),
++        size=dict(type="str", required=True),
+     )
+ 
+     # seed the result dict in the object
+-    result = dict(
+-        changed=False
+-    )
++    result = dict(changed=False)
+ 
+-    module = AnsibleModule(argument_spec=module_args,
+-                           supports_check_mode=True)
++    module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
+ 
+-    size = Size(module.params['size'])
++    size = Size(module.params["size"])
+ 
+-    result['size'] = size.get(fmt="%d %sb")
+-    result['bytes'] = size.bytes
+-    result['lvm'] = size.get(fmt="%d%sb").lower()[:-2]
+-    result['parted'] = size.get(fmt="%d%sb")
++    result["size"] = size.get(fmt="%d %sb")
++    result["bytes"] = size.bytes
++    result["lvm"] = size.get(fmt="%d%sb").lower()[:-2]
++    result["parted"] = size.get(fmt="%d%sb")
+ 
+     # use whatever logic you need to determine whether or not this module
+     # made any modifications to your target
+-    result['changed'] = False
++    result["changed"] = False
+ 
+     # success - return result
+     module.exit_json(**result)
+ 
++
+ def main():
+     run_module()
+ 
+-if __name__ == '__main__':
++
++if __name__ == "__main__":
+     main()
+diff --git a/library/find_unused_disk.py b/library/find_unused_disk.py
+index 0a6fc7d..c688170 100644
+--- a/library/find_unused_disk.py
++++ b/library/find_unused_disk.py
+@@ -1,10 +1,15 @@
+ #!/usr/bin/python
+ 
+-DOCUMENTATION = '''
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
++
++DOCUMENTATION = """
+ ---
+ module: find_unused_disk
+ short_description: Gets unused disks
+ description:
++    - "WARNING: Do not use this module directly! It is only for role internal use."
+     - Disks are considered in ascending alphanumeric sorted order.
+     - Disks that meet all conditions are considered 'empty' and returned (using kernel device name) in a list.
+         - 1. No known signatures exist on the disk, with the exception of partition tables.
+@@ -15,18 +20,18 @@ description:
+     - Number of returned disks defaults to first 10, but can be specified with 'max_return' argument.
+ author: Eda Zhou (@edamamez)
+ options:
+-    option-name: max_return
+-    description: Sets the maximum number of unused disks to return.
+-    default: 10
+-    type: int
+-
+-    option-name: min_size
+-    description: Specifies the minimum disk size to return an unused disk.
+-    default: 0
+-    type: str
+-'''
+-
+-EXAMPLES = '''
++    max_return:
++        description: Sets the maximum number of unused disks to return.
++        default: 10
++        type: int
++
++    min_size:
++        description: Specifies the minimum disk size to return an unused disk.
++        default: 0
++        type: str
++"""
++
++EXAMPLES = """
+ - name: test finding first unused device module
+   hosts: localhost
+   tasks:
+@@ -38,9 +43,9 @@ EXAMPLES = '''
+     - name: dump test output
+       debug:
+         msg: '{{ testout }}'
+-'''
++"""
+ 
+-RETURN = '''
++RETURN = """
+ disk_name:
+     description: Information about unused disks
+     returned: On success
+@@ -50,14 +55,15 @@ disk_name:
+             description: Unused disk(s) that have been found
+             returned: On success
+             type: list
+-            samples: ["sda1", "dm-0", "dm-3"]
+-                     ["sda"]
++            samples: |
++              ["sda1", "dm-0", "dm-3"]
++              ["sda"]
+         none:
+             description: No unused disks were found
+             returned: On success
+             type: string
+             sample: "Unable to find unused disk"
+-'''
++"""
+ 
+ 
+ import os
+@@ -68,7 +74,7 @@ from ansible.module_utils.storage_lsr.size import Size
+ 
+ 
+ SYS_CLASS_BLOCK = "/sys/class/block/"
+-IGNORED_DEVICES = [re.compile(r'^/dev/nullb\d+$')]
++IGNORED_DEVICES = [re.compile(r"^/dev/nullb\d+$")]
+ 
+ 
+ def is_ignored(disk_path):
+@@ -78,13 +84,13 @@ def is_ignored(disk_path):
+ 
+ def no_signature(run_command, disk_path):
+     """Return true if no known signatures exist on the disk."""
+-    signatures = run_command(['blkid', '-p', disk_path])
+-    return not 'UUID' in signatures[1]
++    signatures = run_command(["blkid", "-p", disk_path])
++    return "UUID" not in signatures[1]
+ 
+ 
+ def no_holders(disk_path):
+     """Return true if the disk has no holders."""
+-    holders = os.listdir(SYS_CLASS_BLOCK + get_sys_name(disk_path) + '/holders/')
++    holders = os.listdir(SYS_CLASS_BLOCK + get_sys_name(disk_path) + "/holders/")
+     return len(holders) == 0
+ 
+ 
+@@ -101,36 +107,45 @@ def get_sys_name(disk_path):
+     if not os.path.islink(disk_path):
+         return os.path.basename(disk_path)
+ 
+-    node_dir = '/'.join(disk_path.split('/')[-1])
+-    return os.path.normpath(node_dir + '/' + os.readlink(disk_path))
++    node_dir = "/".join(disk_path.split("/")[-1])
++    return os.path.normpath(node_dir + "/" + os.readlink(disk_path))
+ 
+ 
+ def get_partitions(disk_path):
+     sys_name = get_sys_name(disk_path)
+     partitions = list()
+     for filename in os.listdir(SYS_CLASS_BLOCK + sys_name):
+-        if re.match(sys_name + r'p?\d+$', filename):
++        if re.match(sys_name + r"p?\d+$", filename):
+             partitions.append(filename)
+ 
+     return partitions
+ 
+ 
+ def get_disks(run_command):
+-    buf = run_command(["lsblk", "-p", "--pairs", "--bytes", "-o", "NAME,TYPE,SIZE,FSTYPE"])[1]
++    buf = run_command(
++        ["lsblk", "-p", "--pairs", "--bytes", "-o", "NAME,TYPE,SIZE,FSTYPE"]
++    )[1]
+     disks = dict()
+     for line in buf.splitlines():
+         if not line:
+             continue
+ 
+-        m = re.search(r'NAME="(?P<path>[^"]*)" TYPE="(?P<type>[^"]*)" SIZE="(?P<size>\d+)" FSTYPE="(?P<fstype>[^"]*)"', line)
++        m = re.search(
++            r'NAME="(?P<path>[^"]*)" TYPE="(?P<type>[^"]*)" SIZE="(?P<size>\d+)" FSTYPE="(?P<fstype>[^"]*)"',
++            line,
++        )
+         if m is None:
+             print(line)
+             continue
+ 
+-        if m.group('type') != "disk":
++        if m.group("type") != "disk":
+             continue
+ 
+-        disks[m.group('path')] = {"type": m.group('type'), "size": m.group('size'), "fstype": m.group('fstype')}
++        disks[m.group("path")] = {
++            "type": m.group("type"),
++            "size": m.group("size"),
++            "fstype": m.group("fstype"),
++        }
+ 
+     return disks
+ 
+@@ -138,19 +153,13 @@ def get_disks(run_command):
+ def run_module():
+     """Create the module"""
+     module_args = dict(
+-        max_return=dict(type='int', required=False, default=10),
+-        min_size=dict(type='str', required=False, default=0)
++        max_return=dict(type="int", required=False, default=10),
++        min_size=dict(type="str", required=False, default=0),
+     )
+ 
+-    result = dict(
+-        changed=False,
+-        disks=[]
+-    )
++    result = dict(changed=False, disks=[])
+ 
+-    module = AnsibleModule(
+-        argument_spec=module_args,
+-        supports_check_mode=True
+-    )
++    module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
+ 
+     run_command = module.run_command
+ 
+@@ -161,7 +170,7 @@ def run_module():
+         if attrs["fstype"]:
+             continue
+ 
+-        if Size(attrs["size"]).bytes < Size(module.params['min_size']).bytes:
++        if Size(attrs["size"]).bytes < Size(module.params["min_size"]).bytes:
+             continue
+ 
+         if get_partitions(path):
+@@ -173,14 +182,14 @@ def run_module():
+         if not can_open(path):
+             continue
+ 
+-        result['disks'].append(os.path.basename(path))
+-        if len(result['disks']) >= module.params['max_return']:
++        result["disks"].append(os.path.basename(path))
++        if len(result["disks"]) >= module.params["max_return"]:
+             break
+ 
+-    if not result['disks']:
+-        result['disks'] = "Unable to find unused disk"
++    if not result["disks"]:
++        result["disks"] = "Unable to find unused disk"
+     else:
+-        result['disks'].sort()
++        result["disks"].sort()
+ 
+     module.exit_json(**result)
+ 
+@@ -190,5 +199,5 @@ def main():
+     run_module()
+ 
+ 
+-if __name__ == '__main__':
++if __name__ == "__main__":
+     main()
+diff --git a/library/lvm_gensym.py b/library/lvm_gensym.py
+index 49d1822..3e0f613 100644
+--- a/library/lvm_gensym.py
++++ b/library/lvm_gensym.py
+@@ -1,66 +1,75 @@
+ #!/usr/bin/python
+ """Generates unique, default names for a volume group and logical volume"""
+ 
+-from ansible.module_utils.basic import AnsibleModule
+-from ansible.module_utils import facts
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
+ 
+ ANSIBLE_METADATA = {
+-    'metadata_version': '1.1',
+-    'status': ['preview'],
+-    'supported_by': 'community'
++    "metadata_version": "1.1",
++    "status": ["preview"],
++    "supported_by": "community",
+ }
+ 
+-DOCUMENTATION = '''
++DOCUMENTATION = """
+ ---
+ module: lvm_gensym
+ short_description: Generate default names for lvm variables
+ version_added: "2.4"
+-description: 
+-    - "Module accepts two input strings consisting of a file system type and 
++description:
++    - "WARNING: Do not use this module directly! It is only for role internal use."
++    - "Module accepts two input strings consisting of a file system type and
+        a mount point path, and outputs names based on system information"
+ options:
+     fs_type:
+         description:
+-            - String describing the desired file system type 
+-        required: true 
++            - String describing the desired file system type
++        required: true
++        type: str
+     mount:
+         description:
+-            - String describing the mount point path 
++            - String describing the mount point path
+         required: true
+-author: 
+-    - Tim Flannagan (tflannag@redhat.com)
+-'''
++        type: str
++author:
++    - Tim Flannagan (@timflannagan)
++"""
+ 
+-EXAMPLES = '''
+-- name: Generate names 
++EXAMPLES = """
++- name: Generate names
+   lvm_gensym:
+     fs_type: "{{ fs_type }}"
+     mount: "{{ mount_point }}"
+   register: lvm_results
+   when: lvm_vg == "" and mount_point != "" and fs_type != ""
+-'''
++"""
+ 
+-RETURN = '''
++RETURN = """
+ vg_name:
+     description: The default generated name for an unspecified volume group
+     type: str
+-
++    returned: success
+ lv_name:
+     description: The default generated name for an unspecified logical volume
+     type: str
+-'''
++    returned: success
++"""
++
++from ansible.module_utils.basic import AnsibleModule
++from ansible.module_utils import facts
+ 
+ 
+ def get_os_name():
+     """Search the host file and return the name in the ID column"""
+-    for line in open('/etc/os-release').readlines():
+-        if not line.find('ID='):
++    for line in open("/etc/os-release").readlines():
++        if not line.find("ID="):
+             os_name = line[3:]
+             break
+ 
+-    os_name = os_name.replace('\n', '').replace('"', '')
++    os_name = os_name.replace("\n", "").replace('"', "")
+     return os_name
+ 
++
+ def name_is_unique(name, used_names):
+     """Check if name is contained in the used_names list and return boolean value"""
+     if name not in used_names:
+@@ -68,14 +77,15 @@ def name_is_unique(name, used_names):
+ 
+     return False
+ 
++
+ def get_unique_name_from_base(base_name, used_names):
+     """Generate a unique name given a base name and a list of used names, and return that unique name"""
+     counter = 0
+     while not name_is_unique(base_name, used_names):
+         if counter == 0:
+-            base_name = base_name + '_' + str(counter)
++            base_name = base_name + "_" + str(counter)
+         else:
+-            base_name = base_name[:-2] + '_' + str(counter)
++            base_name = base_name[:-2] + "_" + str(counter)
+         counter += 1
+ 
+     return base_name
+@@ -83,8 +93,8 @@ def get_unique_name_from_base(base_name, used_names):
+ 
+ def get_vg_name_base(host_name, os_name):
+     """Return a base name for a volume group based on the host and os names"""
+-    if host_name != None and len(host_name) != 0:
+-        vg_default = os_name + '_' + host_name
++    if host_name is not None and len(host_name) != 0:
++        vg_default = os_name + "_" + host_name
+     else:
+         vg_default = os_name
+ 
+@@ -93,65 +103,68 @@ def get_vg_name_base(host_name, os_name):
+ 
+ def get_vg_name(host_name, lvm_facts):
+     """Generate a base volume group name, verify its uniqueness, and return that unique name"""
+-    used_vg_names = lvm_facts['vgs'].keys()
++    used_vg_names = lvm_facts["vgs"].keys()
+     os_name = get_os_name()
+     name = get_vg_name_base(host_name, os_name)
+ 
+     return get_unique_name_from_base(name, used_vg_names)
+ 
++
+ def get_lv_name_base(fs_type, mount_point):
+     """Return a logical volume base name using given parameters"""
+-    if 'swap' in fs_type.lower():
+-        lv_default = 'swap'
+-    elif mount_point.startswith('/'):
+-        if mount_point == '/':
+-            lv_default = 'root'
++    if "swap" in fs_type.lower():
++        lv_default = "swap"
++    elif mount_point.startswith("/"):
++        if mount_point == "/":
++            lv_default = "root"
+         else:
+-            lv_default = mount_point[1:].replace('/', '_')
++            lv_default = mount_point[1:].replace("/", "_")
+     else:
+-        lv_default = 'lv'
++        lv_default = "lv"
+ 
+     return lv_default
+ 
+ 
+ def get_lv_name(fs_type, mount_point, lvm_facts):
+     """Return a unique logical volume name based on specified file system type, mount point, and system facts"""
+-    used_lv_names = lvm_facts['lvs'].keys()
++    used_lv_names = lvm_facts["lvs"].keys()
+     name = get_lv_name_base(fs_type, mount_point)
+ 
+     return get_unique_name_from_base(name, used_lv_names)
+ 
++
+ def run_module():
+     """Setup and initialize all relevant ansible module data"""
+     module_args = dict(
+-        mount=dict(type='str', required=True),
+-        fs_type=dict(type='str', required=True)
++        mount=dict(type="str", required=True), fs_type=dict(type="str", required=True)
+     )
+ 
+-    result = dict(
+-        changed=False,
+-        vg_name='',
+-        lv_name=''
+-    )
++    result = dict(changed=False, vg_name="", lv_name="")
+ 
+-    module = AnsibleModule(
+-        argument_spec=module_args,
+-        supports_check_mode=True
+-    )
++    module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
+ 
+-    lvm_facts = facts.ansible_facts(module)['lvm']
+-    host_name = facts.ansible_facts(module)['nodename'].lower().replace('.', '_').replace('-', '_')
++    lvm_facts = facts.ansible_facts(module)["lvm"]
++    host_name = (
++        facts.ansible_facts(module)["nodename"]
++        .lower()
++        .replace(".", "_")
++        .replace("-", "_")
++    )
+ 
+-    result['lv_name'] = get_lv_name(module.params['fs_type'], module.params['mount'], lvm_facts)
+-    result['vg_name'] = get_vg_name(host_name, lvm_facts)
++    result["lv_name"] = get_lv_name(
++        module.params["fs_type"], module.params["mount"], lvm_facts
++    )
++    result["vg_name"] = get_vg_name(host_name, lvm_facts)
+ 
+-    if result['lv_name'] != '' and result['vg_name'] != '':
++    if result["lv_name"] != "" and result["vg_name"] != "":
+         module.exit_json(**result)
+     else:
+         module.fail_json(msg="Unable to initialize both group and volume names")
+ 
++
+ def main():
+     run_module()
+ 
+-if __name__ == '__main__':
++
++if __name__ == "__main__":
+     main()
+diff --git a/library/resolve_blockdev.py b/library/resolve_blockdev.py
+index 007bb28..df9dcb1 100644
+--- a/library/resolve_blockdev.py
++++ b/library/resolve_blockdev.py
+@@ -1,17 +1,22 @@
+ #!/usr/bin/python
+ 
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
++
+ ANSIBLE_METADATA = {
+-    'metadata_version': '1.1',
+-    'status': ['preview'],
+-    'supported_by': 'community'
++    "metadata_version": "1.1",
++    "status": ["preview"],
++    "supported_by": "community",
+ }
+ 
+-DOCUMENTATION = '''
++DOCUMENTATION = """
+ ---
+ module: resolve_blockdev
+ short_description: Resolve block device specification to device node path.
+ version_added: "2.5"
+ description:
++    - "WARNING: Do not use this module directly! It is only for role internal use."
+     - "This module accepts various forms of block device identifiers and
+        resolves them to the correct block device node path."
+ options:
+@@ -19,11 +24,12 @@ options:
+         description:
+             - String describing a block device
+         required: true
++        type: str
+ author:
+-    - David Lehman (dlehman@redhat.com)
+-'''
++    - David Lehman (@dwlehman)
++"""
+ 
+-EXAMPLES = '''
++EXAMPLES = """
+ - name: Resolve device by label
+   resolve_blockdev:
+     spec: LABEL=MyData
+@@ -35,13 +41,14 @@ EXAMPLES = '''
+ - name: Resolve device by /dev/disk/by-id symlink name
+   resolve_blockdev:
+     spec: wwn-0x5000c5005bc37f3f
+-'''
++"""
+ 
+-RETURN = '''
++RETURN = """
+ device:
+     description: Path to block device node
+     type: str
+-'''
++    returned: success
++"""
+ 
+ import glob
+ import os
+@@ -52,37 +59,42 @@ from ansible.module_utils.basic import AnsibleModule
+ DEV_MD = "/dev/md"
+ DEV_MAPPER = "/dev/mapper"
+ SYS_CLASS_BLOCK = "/sys/class/block"
+-SEARCH_DIRS = ['/dev', DEV_MAPPER, DEV_MD] + glob.glob("/dev/disk/by-*")
+-MD_KERNEL_DEV = re.compile(r'/dev/md\d+(p\d+)?$')
++SEARCH_DIRS = ["/dev", DEV_MAPPER, DEV_MD] + glob.glob("/dev/disk/by-*")
++MD_KERNEL_DEV = re.compile(r"/dev/md\d+(p\d+)?$")
+ 
+ 
+ def resolve_blockdev(spec, run_cmd):
+     if "=" in spec:
+         device = run_cmd("blkid -t %s -o device" % spec)[1].strip()
+-    elif not spec.startswith('/'):
++    elif not spec.startswith("/"):
+         for devdir in SEARCH_DIRS:
+             device = "%s/%s" % (devdir, spec)
+             if os.path.exists(device):
+                 break
+             else:
+-                device = ''
++                device = ""
+     else:
+         device = spec
+ 
+     if not device or not os.path.exists(device):
+-        return ''
++        return ""
+ 
+     return canonical_device(os.path.realpath(device))
+ 
+ 
+ def _get_dm_name_from_kernel_dev(kdev):
+-    return open("%s/%s/dm/name" % (SYS_CLASS_BLOCK, os.path.basename(kdev))).read().strip()
++    return (
++        open("%s/%s/dm/name" % (SYS_CLASS_BLOCK, os.path.basename(kdev))).read().strip()
++    )
+ 
+ 
+ def _get_md_name_from_kernel_dev(kdev):
+     minor = os.minor(os.stat(kdev).st_rdev)
+-    return next(name for name in os.listdir(DEV_MD)
+-                if os.minor(os.stat("%s/%s" % (DEV_MD, name)).st_rdev) == minor)
++    return next(
++        name
++        for name in os.listdir(DEV_MD)
++        if os.minor(os.stat("%s/%s" % (DEV_MD, name)).st_rdev) == minor
++    )
+ 
+ 
+ def canonical_device(device):
+@@ -94,26 +106,27 @@ def canonical_device(device):
+ 
+ 
+ def run_module():
+-    module_args = dict(
+-        spec=dict(type='str')
+-    )
++    module_args = dict(spec=dict(type="str"))
+ 
+     result = dict(
+         device=None,
+     )
+ 
+-    module = AnsibleModule(
+-        argument_spec=module_args,
+-        supports_check_mode=True
+-    )
++    module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
+ 
+     try:
+-        result['device'] = resolve_blockdev(module.params['spec'], run_cmd=module.run_command)
++        result["device"] = resolve_blockdev(
++            module.params["spec"], run_cmd=module.run_command
++        )
+     except Exception:
+         pass
+ 
+-    if not result['device'] or not os.path.exists(result['device']):
+-        module.fail_json(msg="The {} device spec could not be resolved".format(module.params['spec']))
++    if not result["device"] or not os.path.exists(result["device"]):
++        module.fail_json(
++            msg="The {0} device spec could not be resolved".format(
++                module.params["spec"]
++            )
++        )
+ 
+     module.exit_json(**result)
+ 
+@@ -122,5 +135,5 @@ def main():
+     run_module()
+ 
+ 
+-if __name__ == '__main__':
++if __name__ == "__main__":
+     main()
+diff --git a/module_utils/storage_lsr/size.py b/module_utils/storage_lsr/size.py
+index 16f3d7c..1e91faa 100644
+--- a/module_utils/storage_lsr/size.py
++++ b/module_utils/storage_lsr/size.py
+@@ -1,4 +1,6 @@
+-#!/bin/python2
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
+ 
+ import re
+ 
+@@ -7,15 +9,20 @@ BINARY_FACTOR = 2 ** 10
+ 
+ # index of the item in the list determines the exponent for size computation
+ # e.g. size_in_bytes = value * (DECIMAL_FACTOR ** (index(mega)+1)) = value * (1000 ** (1+1))
+-PREFIXES_DECIMAL = [["k",    "M",    "G",    "T",    "P",    "E",    "Z",     "Y"],
+-                    ["kilo", "mega", "giga", "tera", "peta", "exa",  "zetta", "yotta"]]
+-PREFIXES_BINARY  = [["Ki",   "Mi",   "Gi",   "Ti",   "Pi",   "Ei",   "Zi",    "Yi"],
+-                    ["kibi", "mebi", "gibi", "tebi", "pebi", "exbi", "zebi",  "yobi"]]
++# pylint: disable=bad-whitespace
++PREFIXES_DECIMAL = [
++    ["k", "M", "G", "T", "P", "E", "Z", "Y"],  # nopep8
++    ["kilo", "mega", "giga", "tera", "peta", "exa", "zetta", "yotta"],
++]  # nopep8
++PREFIXES_BINARY = [
++    ["Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"],  # nopep8
++    ["kibi", "mebi", "gibi", "tebi", "pebi", "exbi", "zebi", "yobi"],
++]  # nopep8
+ SUFFIXES = ["bytes", "byte", "B"]
+ 
++
+ class Size(object):
+-    ''' Class for basic manipulation of the sizes in *bytes
+-    '''
++    """Class for basic manipulation of the sizes in *bytes"""
+ 
+     def __init__(self, value):
+         raw_number, raw_units = self._parse_input(str(value))
+@@ -25,9 +32,9 @@ class Size(object):
+         self.units = raw_units
+ 
+     def _parse_input(self, value):
+-        ''' splits input string into number and unit parts
+-            returns number part, unit part
+-        '''
++        """splits input string into number and unit parts
++        returns number part, unit part
++        """
+         m = re.search("^(.*?)([^0-9]*)$", value)
+ 
+         raw_number = m.group(1).strip()
+@@ -39,12 +46,12 @@ class Size(object):
+         return raw_number, raw_units
+ 
+     def _parse_units(self, raw_units):
+-        '''
+-            gets string containing size units and
+-            returns *_FACTOR (BINARY or DECIMAL) and the prefix position (not index!)
+-            in the PREFIXES_* list
+-            If no unit is specified defaults to BINARY and Bytes
+-        '''
++        """
++        gets string containing size units and
++        returns *_FACTOR (BINARY or DECIMAL) and the prefix position (not index!)
++        in the PREFIXES_* list
++        If no unit is specified defaults to BINARY and Bytes
++        """
+ 
+         prefix = raw_units
+         no_suffix_flag = True
+@@ -54,7 +61,7 @@ class Size(object):
+         for suffix in SUFFIXES:
+             if raw_units.lower().endswith(suffix.lower()):
+                 no_suffix_flag = False
+-                prefix = raw_units[:-len(suffix)]
++                prefix = raw_units[: -len(suffix)]
+                 break
+ 
+         if prefix == "":
+@@ -87,18 +94,18 @@ class Size(object):
+         if idx < 0 or not valid_suffix:
+             raise ValueError("Unable to identify unit '%s'" % raw_units)
+ 
+-        return used_factor, idx+1
++        return used_factor, idx + 1
+ 
+     def _parse_number(self, raw_number):
+-        ''' parse input string containing number
+-            return float
+-        '''
++        """parse input string containing number
++        return float
++        """
+         return float(raw_number)
+ 
+     def _get_unit(self, factor, exponent, unit_type=0):
+-        ''' based on decimal or binary factor and exponent
+-            obtain and return correct unit
+-        '''
++        """based on decimal or binary factor and exponent
++        obtain and return correct unit
++        """
+ 
+         if unit_type == 0:
+             suffix = "B"
+@@ -112,12 +119,11 @@ class Size(object):
+             prefix_lst = PREFIXES_DECIMAL[unit_type]
+         else:
+             prefix_lst = PREFIXES_BINARY[unit_type]
+-        return prefix_lst[exponent-1] + suffix
++        return prefix_lst[exponent - 1] + suffix
+ 
+     @property
+     def bytes(self):
+-        ''' returns size value in bytes as int
+-        '''
++        """returns size value in bytes as int"""
+         return int((self.factor ** self.exponent) * self.number)
+ 
+     def _format(self, format_str, factor, exponent):
+@@ -129,20 +135,20 @@ class Size(object):
+         return result
+ 
+     def get(self, units="autobin", fmt="%0.1f %sb"):
+-        ''' returns size value as a string with given units and format
++        """returns size value as a string with given units and format
+ 
+-            "units" parameter allows to select preferred unit:
+-                for example "KiB" or "megabytes"
+-                accepted values are also:
+-                "autobin" (default) - uses the highest human readable unit (binary)
+-                "autodec" - uses the highest human readable unit (decimal)
++        "units" parameter allows to select preferred unit:
++            for example "KiB" or "megabytes"
++            accepted values are also:
++            "autobin" (default) - uses the highest human readable unit (binary)
++            "autodec" - uses the highest human readable unit (decimal)
+ 
+-            "fmt" parameter allows to specify the output format:
+-                %sb - will be replaced with the short byte size unit (e.g. MiB)
+-                %lb - will be replaced with the long byte size unit (e.g. kilobytes)
+-                value can be formatted using standard string replacements (e.g. %d, %f)
++        "fmt" parameter allows to specify the output format:
++            %sb - will be replaced with the short byte size unit (e.g. MiB)
++            %lb - will be replaced with the long byte size unit (e.g. kilobytes)
++            value can be formatted using standard string replacements (e.g. %d, %f)
+ 
+-        '''
++        """
+ 
+         ftr = BINARY_FACTOR
+         if units == "autodec":
+@@ -155,6 +161,8 @@ class Size(object):
+                 exp += 1
+         else:
+             ftr, exp = self._parse_units(units.strip())
+-            value = (float(self.factor ** self.exponent) / float(ftr ** exp)) * self.number
++            value = (
++                float(self.factor ** self.exponent) / float(ftr ** exp)
++            ) * self.number
+ 
+         return self._format(fmt, ftr, exp) % value
+diff --git a/tests/setup_module_utils.sh b/tests/setup_module_utils.sh
+deleted file mode 100755
+index 94d102d..0000000
+--- a/tests/setup_module_utils.sh
++++ /dev/null
+@@ -1,41 +0,0 @@
+-#!/bin/bash
+-# SPDX-License-Identifier: MIT
+-
+-set -euo pipefail
+-
+-if [ -n "${DEBUG:-}" ] ; then
+-    set -x
+-fi
+-
+-if [ ! -d "${1:-}" ] ; then
+-    echo Either ansible is not installed, or there is no ansible/module_utils
+-    echo in "$1" - Skipping
+-    exit 0
+-fi
+-
+-if [ ! -d "${2:-}" ] ; then
+-    echo Role has no module_utils - Skipping
+-    exit 0
+-fi
+-
+-# we need absolute path for $2
+-absmoddir=$( readlink -f "$2" )
+-
+-# clean up old links to module_utils
+-for item in "$1"/* ; do
+-    if lnitem=$( readlink "$item" ) && test -n "$lnitem" ; then
+-        case "$lnitem" in
+-            *"${2}"*) rm -f "$item" ;;
+-        esac
+-    fi
+-done
+-
+-# add new links to module_utils
+-for item in "$absmoddir"/* ; do
+-    case "$item" in
+-        *__pycache__) continue;;
+-        *.pyc) continue;;
+-    esac
+-    bnitem=$( basename "$item" )
+-    ln -s "$item" "$1/$bnitem"
+-done
+diff --git a/tests/test-verify-volume-device.yml b/tests/test-verify-volume-device.yml
+index 3fb56a6..c7ba5ec 100644
+--- a/tests/test-verify-volume-device.yml
++++ b/tests/test-verify-volume-device.yml
+@@ -23,11 +23,11 @@
+ 
+ - name: (1/2) Process volume type (set initial value)
+   set_fact:
+-      st_volume_type: "{{ storage_test_volume.type }}"
++    st_volume_type: "{{ storage_test_volume.type }}"
+ 
+ - name: (2/2) Process volume type (get RAID value)
+   set_fact:
+-      st_volume_type: "{{ storage_test_volume.raid_level }}"
++    st_volume_type: "{{ storage_test_volume.raid_level }}"
+   when: storage_test_volume.type == "raid"
+ 
+ - name: Verify the volume's device type
+diff --git a/tests/test-verify-volume-md.yml b/tests/test-verify-volume-md.yml
+index b21d8d2..27e8333 100644
+--- a/tests/test-verify-volume-md.yml
++++ b/tests/test-verify-volume-md.yml
+@@ -9,7 +9,7 @@
+       register: storage_test_mdadm
+       changed_when: false
+ 
+-    # pre-chew regex search patterns 
++    # pre-chew regex search patterns
+     - set_fact:
+         storage_test_md_active_devices_re: "{{ ('Active Devices : ' ~ storage_test_volume.raid_device_count ~ '\n')|regex_escape() }}"
+       when: storage_test_volume.raid_device_count is defined
+diff --git a/tests/test.yml b/tests/test.yml
+index 944b3cd..cb718a7 100644
+--- a/tests/test.yml
++++ b/tests/test.yml
+@@ -16,7 +16,7 @@
+               mount_point: '/opt/test1'
+         - name: bar
+           disks: ['vdc']
+-          #state: "absent"
++          # state: "absent"
+           volumes:
+             - name: test2
+               size: 8g
+diff --git a/tests/tests_create_lv_size_equal_to_vg.yml b/tests/tests_create_lv_size_equal_to_vg.yml
+index 21a5788..1737036 100644
+--- a/tests/tests_create_lv_size_equal_to_vg.yml
++++ b/tests/tests_create_lv_size_equal_to_vg.yml
+@@ -23,13 +23,13 @@
+       include_role:
+         name: linux-system-roles.storage
+       vars:
+-          storage_pools:
+-            - name: foo
+-              disks: "{{ unused_disks }}"
+-              volumes:
+-                - name: test1
+-                  size: "{{ lv_size }}"
+-                  mount_point: "{{ mount_location }}"
++        storage_pools:
++          - name: foo
++            disks: "{{ unused_disks }}"
++            volumes:
++              - name: test1
++                size: "{{ lv_size }}"
++                mount_point: "{{ mount_location }}"
+ 
+     - include_tasks: verify-role-results.yml
+ 
+@@ -37,12 +37,12 @@
+       include_role:
+         name: linux-system-roles.storage
+       vars:
+-          storage_pools:
+-            - name: foo
+-              disks: "{{ unused_disks }}"
+-              state: "absent"
+-              volumes:
+-                - name: test1
+-                  mount_point: "{{ mount_location }}"
++        storage_pools:
++          - name: foo
++            disks: "{{ unused_disks }}"
++            state: "absent"
++            volumes:
++              - name: test1
++                mount_point: "{{ mount_location }}"
+ 
+     - include_tasks: verify-role-results.yml
+diff --git a/tests/tests_create_partition_volume_then_remove.yml b/tests/tests_create_partition_volume_then_remove.yml
+index 351b022..567f8dd 100644
+--- a/tests/tests_create_partition_volume_then_remove.yml
++++ b/tests/tests_create_partition_volume_then_remove.yml
+@@ -53,7 +53,7 @@
+         name: linux-system-roles.storage
+       vars:
+         storage_pools:
+-          - name:  "{{ unused_disks[0] }}"
++          - name: "{{ unused_disks[0] }}"
+             type: partition
+             disks: "{{ unused_disks }}"
+             state: absent
+@@ -70,7 +70,7 @@
+         name: linux-system-roles.storage
+       vars:
+         storage_pools:
+-          - name:  "{{ unused_disks[0] }}"
++          - name: "{{ unused_disks[0] }}"
+             type: partition
+             disks: "{{ unused_disks }}"
+             state: absent
+diff --git a/tests/tests_existing_lvm_pool.yml b/tests/tests_existing_lvm_pool.yml
+index 854ac0d..2490914 100644
+--- a/tests/tests_existing_lvm_pool.yml
++++ b/tests/tests_existing_lvm_pool.yml
+@@ -20,12 +20,12 @@
+       include_role:
+         name: linux-system-roles.storage
+       vars:
+-          storage_pools:
+-            - name: "{{ pool_name }}"
+-              disks: "{{ unused_disks }}"
+-              volumes:
+-                - name: test1
+-                  size: "{{ volume_size }}"
++        storage_pools:
++          - name: "{{ pool_name }}"
++            disks: "{{ unused_disks }}"
++            volumes:
++              - name: test1
++                size: "{{ volume_size }}"
+ 
+     - include_tasks: verify-role-results.yml
+ 
+diff --git a/tests/tests_lvm_auto_size_cap.yml b/tests/tests_lvm_auto_size_cap.yml
+index fb17c23..8c754a6 100644
+--- a/tests/tests_lvm_auto_size_cap.yml
++++ b/tests/tests_lvm_auto_size_cap.yml
+@@ -33,12 +33,12 @@
+             name: linux-system-roles.storage
+           vars:
+             storage_pools:
+-                - name: foo
+-                  type: lvm
+-                  disks: "{{ unused_disks }}"
+-                  volumes:
+-                    - name: test1
+-                      size: "{{ doubled_size.stdout|trim }}"
++              - name: foo
++                type: lvm
++                disks: "{{ unused_disks }}"
++                volumes:
++                  - name: test1
++                    size: "{{ doubled_size.stdout|trim }}"
+         - name: unreachable task
+           fail:
+             msg: UNREACH
+@@ -56,11 +56,11 @@
+         name: linux-system-roles.storage
+       vars:
+         storage_pools:
+-            - name: foo
+-              disks: "{{ unused_disks }}"
+-              volumes:
+-                - name: test1
+-                  size: "{{ test_disk_size }}"
++          - name: foo
++            disks: "{{ unused_disks }}"
++            volumes:
++              - name: test1
++                size: "{{ test_disk_size }}"
+ 
+     - include_tasks: verify-role-results.yml
+ 
+@@ -69,12 +69,12 @@
+         name: linux-system-roles.storage
+       vars:
+         storage_pools:
+-            - name: foo
+-              type: lvm
+-              disks: "{{ unused_disks }}"
+-              volumes:
+-                - name: test1
+-                  size: "{{ test_disk_size }}"
++          - name: foo
++            type: lvm
++            disks: "{{ unused_disks }}"
++            volumes:
++              - name: test1
++                size: "{{ test_disk_size }}"
+ 
+     - include_tasks: verify-role-results.yml
+ 
+@@ -83,7 +83,7 @@
+         name: linux-system-roles.storage
+       vars:
+         storage_pools:
+-            - name: foo
+-              disks: "{{ unused_disks }}"
+-              state: absent
+-              volumes: []
++          - name: foo
++            disks: "{{ unused_disks }}"
++            state: absent
++            volumes: []
+diff --git a/tests/tests_lvm_one_disk_one_volume.yml b/tests/tests_lvm_one_disk_one_volume.yml
+index b1096cf..6452f54 100644
+--- a/tests/tests_lvm_one_disk_one_volume.yml
++++ b/tests/tests_lvm_one_disk_one_volume.yml
+@@ -19,13 +19,13 @@
+       include_role:
+         name: linux-system-roles.storage
+       vars:
+-          storage_pools:
+-            - name: foo
+-              disks: "{{ unused_disks }}"
+-              volumes:
+-                - name: test1
+-                  size: "{{ volume_size }}"
+-                  mount_point: "{{ mount_location }}"
++        storage_pools:
++          - name: foo
++            disks: "{{ unused_disks }}"
++            volumes:
++              - name: test1
++                size: "{{ volume_size }}"
++                mount_point: "{{ mount_location }}"
+ 
+     - include_tasks: verify-role-results.yml
+ 
+@@ -33,13 +33,13 @@
+       include_role:
+         name: linux-system-roles.storage
+       vars:
+-          storage_pools:
+-            - name: foo
+-              disks: "{{ unused_disks }}"
+-              volumes:
+-                - name: test1
+-                  size: "{{ volume_size }}"
+-                  mount_point: "{{ mount_location }}"
++        storage_pools:
++          - name: foo
++            disks: "{{ unused_disks }}"
++            volumes:
++              - name: test1
++                size: "{{ volume_size }}"
++                mount_point: "{{ mount_location }}"
+ 
+     - include_tasks: verify-role-results.yml
+ 
+@@ -47,14 +47,14 @@
+       include_role:
+         name: linux-system-roles.storage
+       vars:
+-          storage_pools:
+-            - name: foo
+-              disks: "{{ unused_disks }}"
+-              state: absent
+-              volumes:
+-                - name: test1
+-                  size: "{{ volume_size }}"
+-                  mount_point: "{{ mount_location }}"
+-                  state: absent
++        storage_pools:
++          - name: foo
++            disks: "{{ unused_disks }}"
++            state: absent
++            volumes:
++              - name: test1
++                size: "{{ volume_size }}"
++                mount_point: "{{ mount_location }}"
++                state: absent
+ 
+     - include_tasks: verify-role-results.yml
+diff --git a/tests/tests_misc.yml b/tests/tests_misc.yml
+index 3139bc7..afe753a 100644
+--- a/tests/tests_misc.yml
++++ b/tests/tests_misc.yml
+@@ -197,7 +197,7 @@
+       block:
+         - name: Try to mount swap filesystem to "{{  mount_location }}"
+           include_role:
+-              name: linux-system-roles.storage
++            name: linux-system-roles.storage
+           vars:
+             storage_volumes:
+               - name: test1
+diff --git a/tests/tests_null_raid_pool.yml b/tests/tests_null_raid_pool.yml
+index 2b7b9f3..5c3c785 100644
+--- a/tests/tests_null_raid_pool.yml
++++ b/tests/tests_null_raid_pool.yml
+@@ -31,9 +31,9 @@
+             raid_level: "null"
+             state: present
+             volumes:
+-            - name: lv1
+-              size: "{{ volume1_size }}"
+-              mount_point: "{{ mount_location1 }}"
++              - name: lv1
++                size: "{{ volume1_size }}"
++                mount_point: "{{ mount_location1 }}"
+ 
+     - name: get existing raids (after run)
+       command: "cat /proc/mdstat"
+@@ -52,12 +52,12 @@
+             raid_level: "null"
+             state: absent
+             volumes:
+-            - name: lv1
+-              size: "{{ volume1_size }}"
+-              mount_point: "{{ mount_location1 }}"
++              - name: lv1
++                size: "{{ volume1_size }}"
++                mount_point: "{{ mount_location1 }}"
+ 
+     - name: compare mdstat results
+       assert:
+         that:
+-            - storage_test_mdstat1.stdout == storage_test_mdstat2.stdout
++          - storage_test_mdstat1.stdout == storage_test_mdstat2.stdout
+         msg: "Raid created when it should not be"
+diff --git a/tests/tests_resize.yml b/tests/tests_resize.yml
+index 209d129..4fd8583 100644
+--- a/tests/tests_resize.yml
++++ b/tests/tests_resize.yml
+@@ -29,16 +29,16 @@
+       include_role:
+         name: linux-system-roles.storage
+       vars:
+-          storage_pools:
+-            - name: foo
+-              disks: "{{ unused_disks }}"
+-              type: lvm
+-              volumes:
+-                - name: test1
+-                  # resizing is currently supported only for ext2/3/4
+-                  fs_type: 'ext4'
+-                  size: "{{ volume_size_before }}"
+-                  mount_point: "{{ mount_location }}"
++        storage_pools:
++          - name: foo
++            disks: "{{ unused_disks }}"
++            type: lvm
++            volumes:
++              - name: test1
++                # resizing is currently supported only for ext2/3/4
++                fs_type: 'ext4'
++                size: "{{ volume_size_before }}"
++                mount_point: "{{ mount_location }}"
+ 
+     - include_tasks: verify-role-results.yml
+ 
+@@ -46,15 +46,15 @@
+       include_role:
+         name: linux-system-roles.storage
+       vars:
+-          storage_pools:
+-            - name: foo
+-              type: lvm
+-              disks: "{{ unused_disks }}"
+-              volumes:
+-                - name: test1
+-                  fs_type: 'ext4'
+-                  size: "{{ volume_size_after }}"
+-                  mount_point: "{{ mount_location }}"
++        storage_pools:
++          - name: foo
++            type: lvm
++            disks: "{{ unused_disks }}"
++            volumes:
++              - name: test1
++                fs_type: 'ext4'
++                size: "{{ volume_size_after }}"
++                mount_point: "{{ mount_location }}"
+ 
+     - include_tasks: verify-role-results.yml
+ 
+@@ -194,14 +194,14 @@
+       include_role:
+         name: linux-system-roles.storage
+       vars:
+-          storage_pools:
+-            - name: foo
+-              disks: "{{ unused_disks }}"
+-              state: absent
+-              volumes:
+-                - name: test1
+-                  size: "{{ volume_size_before }}"
+-                  mount_point: "{{ mount_location }}"
++        storage_pools:
++          - name: foo
++            disks: "{{ unused_disks }}"
++            state: absent
++            volumes:
++              - name: test1
++                size: "{{ volume_size_before }}"
++                mount_point: "{{ mount_location }}"
+ 
+     - include_tasks: verify-role-results.yml
+ 
+@@ -259,14 +259,14 @@
+       include_role:
+         name: linux-system-roles.storage
+       vars:
+-          storage_pools:
+-            - name: foo
+-              disks: "{{ unused_disks }}"
+-              state: absent
+-              volumes:
+-                - name: test1
+-                  size: "{{ volume_size_before }}"
+-                  mount_point: "{{ mount_location }}"
++        storage_pools:
++          - name: foo
++            disks: "{{ unused_disks }}"
++            state: absent
++            volumes:
++              - name: test1
++                size: "{{ volume_size_before }}"
++                mount_point: "{{ mount_location }}"
+ 
+     - include_tasks: verify-role-results.yml
+ 
+@@ -324,13 +324,13 @@
+       include_role:
+         name: linux-system-roles.storage
+       vars:
+-          storage_pools:
+-            - name: foo
+-              disks: "{{ unused_disks }}"
+-              state: absent
+-              volumes:
+-                - name: test1
+-                  size: "{{ volume_size_before }}"
+-                  mount_point: "{{ mount_location }}"
++        storage_pools:
++          - name: foo
++            disks: "{{ unused_disks }}"
++            state: absent
++            volumes:
++              - name: test1
++                size: "{{ volume_size_before }}"
++                mount_point: "{{ mount_location }}"
+ 
+     - include_tasks: verify-role-results.yml
+diff --git a/tests/unit/bsize_test.py b/tests/unit/bsize_test.py
+index f88a9c1..fae0f5f 100644
+--- a/tests/unit/bsize_test.py
++++ b/tests/unit/bsize_test.py
+@@ -1,7 +1,12 @@
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
++
+ import pytest
+ 
+ from storage_lsr.size import Size
+ 
++
+ def test_bsize():
+     # check failure on incorrect string
+     with pytest.raises(ValueError) as e:
+diff --git a/tests/unit/gensym_test.py b/tests/unit/gensym_test.py
+index 6d164dc..fd00ddf 100644
+--- a/tests/unit/gensym_test.py
++++ b/tests/unit/gensym_test.py
+@@ -1,68 +1,115 @@
+ #!/usr/bin/python
+ """This module tests methods defined in the lvm_gensym.py module using the pytest framework"""
+-import pytest
+-
++from __future__ import absolute_import, division, print_function
+ 
+-import lvm_gensym
++__metaclass__ = type
+ 
++import pytest
+ 
+-used_lv_names = ['root', 'root_0', 'root_1', 'root_2', 'root_3', 'swap_0', 'swap', 'swap_1']
+ 
+-test_lv_names = [{'fs_type': 'ext', 'mount': '/'},
+-                 {'fs_type': 'zfs', 'mount': '/home/user'},
+-                 {'fs_type': 'swap', 'mount': ''}
+-                ]
++import lvm_gensym
+ 
+-used_vg_names = ['linux_host', 'rhel_user0', 'rhel_0_user']
+ 
+-test_vg_names = ['rhel_user', 'rhel_user_0', 'rhel_user_1',
+-                 'rhel_user_2', 'rhel_user_3', 'linux_user',
+-                 'fedora_user', 'fedora_user_0', 'fedora_user_1'
+-                ]
++used_lv_names = [
++    "root",
++    "root_0",
++    "root_1",
++    "root_2",
++    "root_3",
++    "swap_0",
++    "swap",
++    "swap_1",
++]
++
++test_lv_names = [
++    {"fs_type": "ext", "mount": "/"},
++    {"fs_type": "zfs", "mount": "/home/user"},
++    {"fs_type": "swap", "mount": ""},
++]
++
++used_vg_names = ["linux_host", "rhel_user0", "rhel_0_user"]
++
++test_vg_names = [
++    "rhel_user",
++    "rhel_user_0",
++    "rhel_user_1",
++    "rhel_user_2",
++    "rhel_user_3",
++    "linux_user",
++    "fedora_user",
++    "fedora_user_0",
++    "fedora_user_1",
++]
++
++lvm_facts = {
++    "lvs": {
++        "Home": "",
++        "Swap": "",
++        "Root": "",
++        "Root_0": "",
++        "root": "",
++        "root_0": "",
++        "swap": "",
++        "swap_0": "",
++        "swap_1": "",
++    },
++    "vgs": {"rhel_user": "", "rhel_user_0": "", "rhel_user_1": ""},
++}
+ 
+-lvm_facts = {'lvs': {'Home': '', 'Swap': '', 'Root': '',
+-                     'Root_0': '', 'root': '', 'root_0': '',
+-                     'swap': '', 'swap_0': '', 'swap_1': '',
+-                    },
+-             'vgs': {'rhel_user': '', 'rhel_user_0': '', 'rhel_user_1': ''}
+-            }
+ 
+ def test_unique_base_name():
+     """Test whether the returned name is unique using a supplied list of test names"""
+-    assert lvm_gensym.get_unique_name_from_base('root', used_lv_names) == 'root_4'
+-    assert lvm_gensym.get_unique_name_from_base('rhel_user', test_vg_names) == 'rhel_user_4'
++    assert lvm_gensym.get_unique_name_from_base("root", used_lv_names) == "root_4"
++    assert (
++        lvm_gensym.get_unique_name_from_base("rhel_user", test_vg_names)
++        == "rhel_user_4"
++    )
++
+ 
+ def test_return_val():
+     """Verify that a supplied unique name and a list of used names returns True"""
+     for (index, name) in enumerate(test_vg_names):
+         assert lvm_gensym.name_is_unique(name[index], used_vg_names)
+ 
++
+ def test_get_base_vg_name():
+     """Check generated base volume group name against the expected base name"""
+-    assert lvm_gensym.get_vg_name_base('hostname', 'rhel') == 'rhel_hostname'
++    assert lvm_gensym.get_vg_name_base("hostname", "rhel") == "rhel_hostname"
++
+ 
+ @pytest.mark.parametrize("os_name", ["foo", "bar", "baz"])
+ def test_vg_eval(monkeypatch, os_name):
+     """Check generated unique volume group name against the expected name"""
++
+     def get_os_name():
+         return os_name
+ 
+     vg_names = [os_name + "_user", os_name + "_user_0", os_name + "_user_1"]
+     _lvm_facts = dict(vgs=dict.fromkeys(vg_names), lvs=dict())
+     monkeypatch.setattr(lvm_gensym, "get_os_name", get_os_name)
+-    assert lvm_gensym.get_vg_name('user', _lvm_facts) == os_name + '_user_2'
+-    assert lvm_gensym.get_vg_name('', _lvm_facts) == os_name
++    assert lvm_gensym.get_vg_name("user", _lvm_facts) == os_name + "_user_2"
++    assert lvm_gensym.get_vg_name("", _lvm_facts) == os_name
++
+ 
+ def test_lv_eval():
+     """Test the generated unique logical volume name against the expected name"""
+-    expected = ['root_1', 'home_user', 'swap_2']
++    expected = ["root_1", "home_user", "swap_2"]
+ 
+     for (ctr, name_inputs) in enumerate(test_lv_names):
+-        assert lvm_gensym.get_lv_name(name_inputs['fs_type'], name_inputs['mount'], lvm_facts) == expected[ctr]
++        assert (
++            lvm_gensym.get_lv_name(
++                name_inputs["fs_type"], name_inputs["mount"], lvm_facts
++            )
++            == expected[ctr]
++        )
++
+ 
+ def test_get_base_lv_name():
+     """Test the generated base logical volume name against the expected name"""
+-    expected = ['root', 'home_user', 'swap']
++    expected = ["root", "home_user", "swap"]
+ 
+     for (ctr, names_input) in enumerate(test_lv_names):
+-        assert lvm_gensym.get_lv_name_base(names_input['fs_type'], names_input['mount']) == expected[ctr]
++        assert (
++            lvm_gensym.get_lv_name_base(names_input["fs_type"], names_input["mount"])
++            == expected[ctr]
++        )
+diff --git a/tests/unit/resolve_blockdev_test.py b/tests/unit/resolve_blockdev_test.py
+index 0eafe7b..ad50628 100644
+--- a/tests/unit/resolve_blockdev_test.py
++++ b/tests/unit/resolve_blockdev_test.py
+@@ -1,3 +1,6 @@
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
+ 
+ import os
+ import pytest
+@@ -5,73 +8,80 @@ import pytest
+ import resolve_blockdev
+ 
+ 
+-blkid_data = [('LABEL=target', '/dev/sdx3'),
+-              ('UUID=6c75fa75-e5ab-4a12-a567-c8aa0b4b60a5', '/dev/sdaz'),
+-              ('LABEL=missing', '')]
++blkid_data = [
++    ("LABEL=target", "/dev/sdx3"),
++    ("UUID=6c75fa75-e5ab-4a12-a567-c8aa0b4b60a5", "/dev/sdaz"),
++    ("LABEL=missing", ""),
++]
+ 
+-path_data = ['/dev/md/unreal',
+-             '/dev/mapper/fakevg-fakelv',
+-             '/dev/adisk',
+-             '/dev/disk/by-id/wwn-0x123456789abc']
++path_data = [
++    "/dev/md/unreal",
++    "/dev/mapper/fakevg-fakelv",
++    "/dev/adisk",
++    "/dev/disk/by-id/wwn-0x123456789abc",
++]
+ 
+-canonical_paths = {"/dev/sda": "/dev/sda",
+-                   "/dev/dm-3": "/dev/mapper/vg_system-lv_data",
+-                   "/dev/md127": "/dev/md/userdb",
+-                   "/dev/notfound": ""}
++canonical_paths = {
++    "/dev/sda": "/dev/sda",
++    "/dev/dm-3": "/dev/mapper/vg_system-lv_data",
++    "/dev/md127": "/dev/md/userdb",
++    "/dev/notfound": "",
++}
+ 
+ 
+-@pytest.mark.parametrize('spec,device', blkid_data)
++@pytest.mark.parametrize("spec,device", blkid_data)
+ def test_key_value_pair(spec, device, monkeypatch):
+     def run_cmd(args):
+         for _spec, _dev in blkid_data:
+             if _spec in args:
+                 break
+         else:
+-            _dev = ''
+-        return (0, _dev, '')
++            _dev = ""
++        return (0, _dev, "")
+ 
+-    monkeypatch.setattr(os.path, 'exists', lambda p: True)
++    monkeypatch.setattr(os.path, "exists", lambda p: True)
+     assert resolve_blockdev.resolve_blockdev(spec, run_cmd) == device
+ 
+ 
+-@pytest.mark.parametrize('name', [os.path.basename(p) for p in path_data])
++@pytest.mark.parametrize("name", [os.path.basename(p) for p in path_data])
+ def test_device_names(name, monkeypatch):
+     """ Test return values for basename specs, assuming all paths are real. """
++
+     def path_exists(path):
+         return next((data for data in path_data if data == path), False)
+ 
+-    expected = next((data for data in path_data if os.path.basename(data) == name), '')
+-    monkeypatch.setattr(os.path, 'exists', path_exists)
++    expected = next((data for data in path_data if os.path.basename(data) == name), "")
++    monkeypatch.setattr(os.path, "exists", path_exists)
+     assert resolve_blockdev.resolve_blockdev(name, None) == expected
+ 
+ 
+ def test_device_name(monkeypatch):
+-    assert os.path.exists('/dev/xxx') is False
++    assert os.path.exists("/dev/xxx") is False
+ 
+-    monkeypatch.setattr(os.path, 'exists', lambda p: True)
+-    assert resolve_blockdev.resolve_blockdev('xxx', None) == '/dev/xxx'
++    monkeypatch.setattr(os.path, "exists", lambda p: True)
++    assert resolve_blockdev.resolve_blockdev("xxx", None) == "/dev/xxx"
+ 
+-    monkeypatch.setattr(os.path, 'exists', lambda p: False)
+-    assert resolve_blockdev.resolve_blockdev('xxx', None) == ''
++    monkeypatch.setattr(os.path, "exists", lambda p: False)
++    assert resolve_blockdev.resolve_blockdev("xxx", None) == ""
+ 
+ 
+ def test_full_path(monkeypatch):
+     path = "/dev/idonotexist"
+-    monkeypatch.setattr(os.path, 'exists', lambda p: True)
++    monkeypatch.setattr(os.path, "exists", lambda p: True)
+     assert resolve_blockdev.resolve_blockdev(path, None) == path
+ 
+-    monkeypatch.setattr(os.path, 'exists', lambda p: False)
+-    assert resolve_blockdev.resolve_blockdev(path, None) == ''
++    monkeypatch.setattr(os.path, "exists", lambda p: False)
++    assert resolve_blockdev.resolve_blockdev(path, None) == ""
+ 
+     path = "/dev/disk/by-label/alabel"
+-    monkeypatch.setattr(os.path, 'exists', lambda p: True)
++    monkeypatch.setattr(os.path, "exists", lambda p: True)
+     assert resolve_blockdev.resolve_blockdev(path, None) == path
+ 
+-    monkeypatch.setattr(os.path, 'exists', lambda p: False)
+-    assert resolve_blockdev.resolve_blockdev(path, None) == ''
++    monkeypatch.setattr(os.path, "exists", lambda p: False)
++    assert resolve_blockdev.resolve_blockdev(path, None) == ""
+ 
+ 
+-@pytest.mark.parametrize('device', list(canonical_paths.keys()))
++@pytest.mark.parametrize("device", list(canonical_paths.keys()))
+ def test_canonical_path(device, monkeypatch):
+     def _get_name(device):
+         name = os.path.basename(canonical_paths[device])
+@@ -79,8 +89,8 @@ def test_canonical_path(device, monkeypatch):
+             raise Exception("failed to find name")
+         return name
+ 
+-    monkeypatch.setattr(resolve_blockdev, '_get_dm_name_from_kernel_dev', _get_name)
+-    monkeypatch.setattr(resolve_blockdev, '_get_md_name_from_kernel_dev', _get_name)
++    monkeypatch.setattr(resolve_blockdev, "_get_dm_name_from_kernel_dev", _get_name)
++    monkeypatch.setattr(resolve_blockdev, "_get_md_name_from_kernel_dev", _get_name)
+ 
+     canonical = canonical_paths[device]
+     if canonical:
+diff --git a/tests/unit/test_unused_disk.py b/tests/unit/test_unused_disk.py
+index a4339c4..493b4b0 100644
+--- a/tests/unit/test_unused_disk.py
++++ b/tests/unit/test_unused_disk.py
+@@ -1,72 +1,91 @@
++from __future__ import absolute_import, division, print_function
++
++__metaclass__ = type
++
+ import pytest
+ import find_unused_disk
+ import os
+ 
+ 
+-blkid_data_pttype = [('/dev/sdx', '/dev/sdx: PTTYPE=\"dos\"'),
+-                     ('/dev/sdy', '/dev/sdy: PTTYPE=\"test\"')]
++blkid_data_pttype = [
++    ("/dev/sdx", '/dev/sdx: PTTYPE="dos"'),
++    ("/dev/sdy", '/dev/sdy: PTTYPE="test"'),
++]
+ 
+-blkid_data = [('/dev/sdx', 'UUID=\"hello-1234-56789\" TYPE=\"crypto_LUKS\"'),
+-              ('/dev/sdy', 'UUID=\"this-1s-a-t3st-f0r-ansible\" VERSION=\"LVM2 001\" TYPE=\"LVM2_member\" USAGE=\"raid\"'),
+-              ('/dev/sdz', 'LABEL=\"/data\" UUID=\"a12bcdef-345g-67h8-90i1-234j56789k10\" VERSION=\"1.0\" TYPE=\"ext4\" USAGE=\"filesystem\"')]
++blkid_data = [
++    ("/dev/sdx", 'UUID="hello-1234-56789" TYPE="crypto_LUKS"'),
++    (
++        "/dev/sdy",
++        'UUID="this-1s-a-t3st-f0r-ansible" VERSION="LVM2 001" TYPE="LVM2_member" USAGE="raid"',
++    ),
++    (
++        "/dev/sdz",
++        'LABEL="/data" UUID="a12bcdef-345g-67h8-90i1-234j56789k10" VERSION="1.0" TYPE="ext4" USAGE="filesystem"',
++    ),
++]
+ 
+-holders_data_none = [('/dev/sdx', ''),
+-                     ('/dev/dm-99', '')]
++holders_data_none = [("/dev/sdx", ""), ("/dev/dm-99", "")]
+ 
+-holders_data = [('/dev/sdx', 'dm-0'),
+-                ('/dev/dm-99', 'dm-2 dm-3 dm-4')]
++holders_data = [("/dev/sdx", "dm-0"), ("/dev/dm-99", "dm-2 dm-3 dm-4")]
+ 
+ 
+-@pytest.mark.parametrize('disk, blkid', blkid_data_pttype)
++@pytest.mark.parametrize("disk, blkid", blkid_data_pttype)
+ def test_no_signature_true(disk, blkid):
+     def run_command(args):
+-        return [0, blkid, '']
++        return [0, blkid, ""]
++
+     assert find_unused_disk.no_signature(run_command, disk) is True
+ 
+ 
+-@pytest.mark.parametrize('disk, blkid', blkid_data)
++@pytest.mark.parametrize("disk, blkid", blkid_data)
+ def test_no_signature_false(disk, blkid):
+     def run_command(args):
+-        return [0, blkid, '']
++        return [0, blkid, ""]
++
+     assert find_unused_disk.no_signature(run_command, disk) is False
+ 
+ 
+-@pytest.mark.parametrize('disk, holders', holders_data_none)
++@pytest.mark.parametrize("disk, holders", holders_data_none)
+ def test_no_holders_true(disk, holders, monkeypatch):
+     def mock_return(args):
+         return holders
+-    monkeypatch.setattr(os, 'listdir', mock_return)
++
++    monkeypatch.setattr(os, "listdir", mock_return)
+     assert find_unused_disk.no_holders(disk) is True
+ 
+ 
+-@pytest.mark.parametrize('disk, holders', holders_data)
++@pytest.mark.parametrize("disk, holders", holders_data)
+ def test_no_holders_false(disk, holders, monkeypatch):
+     def mock_return(args):
+         return holders
+-    monkeypatch.setattr(os, 'listdir', mock_return)
++
++    monkeypatch.setattr(os, "listdir", mock_return)
+     assert find_unused_disk.no_holders(disk) is False
+ 
+ 
+ def test_can_open_true(monkeypatch):
+     def mock_return(args, flag):
+         return True
+-    monkeypatch.setattr(os, 'open', mock_return)
+-    assert find_unused_disk.can_open('/hello') is True
++
++    monkeypatch.setattr(os, "open", mock_return)
++    assert find_unused_disk.can_open("/hello") is True
+ 
+ 
+ def test_can_open_false(monkeypatch):
+     def mock_return(args, flag):
+         raise OSError
+-    monkeypatch.setattr(os, 'open', mock_return)
+-    assert find_unused_disk.can_open('/hello') is False
++
++    monkeypatch.setattr(os, "open", mock_return)
++    assert find_unused_disk.can_open("/hello") is False
+ 
+ 
+ def test_is_ignored(monkeypatch):
+     def mock_realpath(path):
+         return path
+-    monkeypatch.setattr(os.path, 'realpath', mock_realpath)
+-    assert find_unused_disk.is_ignored('/dev/sda') is False
+-    assert find_unused_disk.is_ignored('/dev/vda') is False
+-    assert find_unused_disk.is_ignored('/dev/mapper/mpatha') is False
+-    assert find_unused_disk.is_ignored('/dev/md/Volume0') is False
+-    assert find_unused_disk.is_ignored('/dev/nullb0') is True
++
++    monkeypatch.setattr(os.path, "realpath", mock_realpath)
++    assert find_unused_disk.is_ignored("/dev/sda") is False
++    assert find_unused_disk.is_ignored("/dev/vda") is False
++    assert find_unused_disk.is_ignored("/dev/mapper/mpatha") is False
++    assert find_unused_disk.is_ignored("/dev/md/Volume0") is False
++    assert find_unused_disk.is_ignored("/dev/nullb0") is True
+diff --git a/tox.ini b/tox.ini
+index 92482d5..91c22a8 100644
+--- a/tox.ini
++++ b/tox.ini
+@@ -13,9 +13,3 @@ configfile = .ansible-lint
+ setenv =
+     RUN_PYTEST_SETUP_MODULE_UTILS = true
+     RUN_PYLINT_SETUP_MODULE_UTILS = true
+-
+-[testenv:black]
+-commands = bash -c 'echo black is currently not enabled - please fix this'
+-
+-[testenv:flake8]
+-commands = bash -c 'echo flake8 is currently not enabled - please fix this'
+-- 
+2.30.2
+
diff --git a/SOURCES/timesync-ansible-test-issues.diff b/SOURCES/timesync-ansible-test-issues.diff
new file mode 100644
index 0000000..2ec733a
--- /dev/null
+++ b/SOURCES/timesync-ansible-test-issues.diff
@@ -0,0 +1,22 @@
+From b55af45842482768f29704d90a1e019ffe0f7770 Mon Sep 17 00:00:00 2001
+From: Noriko Hosoi <nhosoi@redhat.com>
+Date: Tue, 2 Mar 2021 13:39:19 -0800
+Subject: [PATCH] Patch32: timesync-ansible-test-issues.diff
+
+RHELPLAN-68118 - Collections - Timesync - fixing ansible-test errors
+RHELPLAN-68789 - Collections - ignore file for each role
+---
+ .sanity-ansible-ignore-2.9.txt | 1 +
+ 1 file changed, 1 insertion(+)
+ create mode 100644 .sanity-ansible-ignore-2.9.txt
+
+diff --git a/.sanity-ansible-ignore-2.9.txt b/.sanity-ansible-ignore-2.9.txt
+new file mode 100644
+index 0000000..e6d5e4d
+--- /dev/null
++++ b/.sanity-ansible-ignore-2.9.txt
+@@ -0,0 +1 @@
++plugins/modules/timesync_provider.sh shebang
+-- 
+2.26.2
+
diff --git a/SPECS/rhel-system-roles.spec b/SPECS/rhel-system-roles.spec
index bea841e..27257f3 100644
--- a/SPECS/rhel-system-roles.spec
+++ b/SPECS/rhel-system-roles.spec
@@ -11,8 +11,8 @@ Name: linux-system-roles
 %endif
 Url: https://github.com/linux-system-roles/
 Summary: Set of interfaces for unified system management
-Version: 1.0.0
-Release: 32%{?dist}
+Version: 1.0.1
+Release: 1%{?dist}
 
 #Group: Development/Libraries
 License: GPLv3+ and MIT and BSD
@@ -131,27 +131,27 @@ BuildRequires: ansible >= 2.9.10
 %global rolename8 tlog
 %deftag 8 1.1.0
 
-%defcommit 9 e5e5abb35fb695e22ccffa855c98ab882650480e
+%defcommit 9 4c81fd1380712ab0641b6837f092dd9caeeae0a6
 %global rolename9 kernel_settings
 #%%deftag 9 1.0.1
 
-%defcommit 10 4b07edf4e84882c9d0fb979092ba5953aac0b4d5
+%defcommit 10 07e08107e7ccba5822f8a7aaec1a2ff0a221bede
 %global rolename10 logging
 #%%deftag 10 0.2.0
 
-#%%defcommit 11 4b6cfca4dd24e53a4bc4e07635601d7c104346c1
+%defcommit 11 4dfc5e2aca74cb82f2a50eec7e975a2b78ad9678
 %global rolename11 nbde_server
-%deftag 11 1.0.1
+#%%deftag 11 1.0.1
 
-%defcommit 12 3af7452e4861ee2363b29b23bf78bf11e06be142
+%defcommit 12 19f06159582550c8463f7d8492669e26fbdf760b
 %global rolename12 nbde_client
 #%%deftag 12 1.0.1
 
-%defcommit 13 50041ce55348fcce34aba4cbe3ea160c5d890ab3
+%defcommit 13 0376ceece57882ade8ffaf431b7866aae3e7fed1
 %global rolename13 certificate
 #%%deftag 13 1.0.1
 
-%defcommit 14 76b2d5b0460dba22c5d290c1af96e4fdb3434cb9
+%defcommit 14 2e2941c5545571fc8bc494099bdf970f498b9d38
 %global rolename14 crypto_policies
 
 %global forgeorg15 https://github.com/willshersystems/
@@ -159,13 +159,13 @@ BuildRequires: ansible >= 2.9.10
 %global rolename15 sshd
 %defcommit 15 e1de59b3c54e9d48a010eeca73755df339c7e628
 
-%defcommit 16 effa0a0d993832dee726290f263a2182cf3eacda
+%defcommit 16 21adc637511db86b5ba279a70a7301ef3a170669
 %global rolename16 ssh
 
 %defcommit 17 779bb78559de58bb5a1f25a4b92039c373ef59a4
 %global rolename17 ha_cluster
 
-%global mainid e5ed203b2d7224c0bf0c3fd55452456c8f468cad
+%global mainid 8f069305caa0a142c2c6ac14bd4d331282a1c079
 Source: %{url}auto-maintenance/archive/%{mainid}/auto-maintenance-%{mainid}.tar.gz
 Source1: %{archiveurl1}
 Source2: %{archiveurl2}
@@ -194,8 +194,10 @@ Patch12: postfix-meta-el8.diff
 
 Patch21: selinux-tier1-tags.diff
 Patch22: selinux-bz-1926947-no-variable-named-present.diff
+Patch23: selinux-ansible-test-issues.diff
 
 Patch31: timesync-tier1-tags.diff
+Patch32: timesync-ansible-test-issues.diff
 
 Patch41: rhel-system-roles-kdump-pr22.diff
 Patch42: kdump-tier1-tags.diff
@@ -208,10 +210,12 @@ Patch52: network-permissions.diff
 Patch53: network-tier1-tags.diff
 Patch55: network-disable-bondtests.diff
 Patch56: network-pr353.diff
+Patch57: network-ansible-test.diff
 
 Patch62: storage-partition-name.diff
 Patch63: storage-no-disks-existing.diff
 Patch64: storage-trim-volume-size.diff
+Patch65: storage-ansible-test.diff
 
 Patch71: metrics-mssql-x86.diff
 
@@ -267,9 +271,11 @@ cd ..
 cd %{rolename2}
 %patch21 -p1
 %patch22 -p1
+%patch23 -p1
 cd ..
 cd %{rolename3}
 %patch31 -p1
+%patch32 -p1
 cd ..
 cd %{rolename4}
 %patch41 -p1
@@ -283,11 +289,13 @@ cd %{rolename5}
 %patch53 -p1
 %patch55 -p1
 %patch56 -p1
+%patch57 -p1
 cd ..
 cd %{rolename6}
 %patch62 -p1
 %patch63 -p1
 %patch64 -p1
+%patch65 -p1
 cd ..
 cd %{rolename7}
 %patch71 -p1
@@ -348,26 +356,54 @@ mkdir .collections
 # Convert the upstream collection readme to the downstream one
 %{SOURCE998} lsr_role2collection/collection_readme.md
 %endif
-./galaxy_transform.py "%{collection_namespace}" "%{collection_name}" "%{collection_version}" > galaxy.yml.tmp
+./galaxy_transform.py "%{collection_namespace}" "%{collection_name}" "%{collection_version}" "Red Hat Enterprise Linux System Roles Ansible Collection" > galaxy.yml.tmp
 mv galaxy.yml.tmp galaxy.yml
 
-for role in %{rolename1} %{rolename2} %{rolename3} \
-    %{rolename4} %{rolename5} %{rolename6} \
-    %{rolename7} %{rolename8} %{rolename9} \
-    %{rolename10} %{rolename11} %{rolename12} \
-    %{rolename13} %{rolename14} %{rolename15} \
-    %{rolename16} %{rolename17}; do
+for role in %{rolenames}; do
     python3 lsr_role2collection.py --role "$role" --src-path "$role" \
         --src-owner %{name} --subrole-prefix %{subrole_prefix} --dest-path .collections \
         --readme lsr_role2collection/collection_readme.md \
         --namespace %{collection_namespace} --collection %{collection_name}
 done
 
+rm -f .collections/ansible_collections/%{collection_namespace}/%{collection_name}/tests/sanity/ignore-2.9.txt
+# Merge .sanity-ansible-ignore-2.9-ROLENAME.txt into tests/sanity/ignore-2.9.txt
+mkdir -p .collections/ansible_collections/%{collection_namespace}/%{collection_name}/tests/sanity
+for role in %{rolenames}; do
+    if [ -f .collections/ansible_collections/%{collection_namespace}/%{collection_name}/.sanity-ansible-ignore-2.9-"$role".txt ];
+    then
+      cat .collections/ansible_collections/%{collection_namespace}/%{collection_name}/.sanity-ansible-ignore-2.9-"$role".txt \
+        >> .collections/ansible_collections/%{collection_namespace}/%{collection_name}/tests/sanity/ignore-2.9.txt
+      rm -f .collections/ansible_collections/%{collection_namespace}/%{collection_name}/.sanity-ansible-ignore-*-"$role".txt
+    fi
+done
+
+# removing dot files/dirs
+rm -r .collections/ansible_collections/%{collection_namespace}/%{collection_name}/.[A-Za-z]*
+
 cp -p galaxy.yml lsr_role2collection/.ansible-lint \
     .collections/ansible_collections/%{collection_namespace}/%{collection_name}
-mkdir -p .collections/ansible_collections/%{collection_namespace}/%{collection_name}/tests/sanity
-cp -p lsr_role2collection/ignore-2.9.txt \
-    .collections/ansible_collections/%{collection_namespace}/%{collection_name}/tests/sanity
+
+# converting README.md to README.html
+sh md2html.sh -l \
+    .collections/ansible_collections/%{collection_namespace}/%{collection_name}/README.md \
+    .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename1}/README.md \
+    .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename2}/README.md \
+    .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename3}/README.md \
+    .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename4}/README.md \
+    .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename5}/README.md \
+    .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename6}/README.md \
+    .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename7}/README.md \
+    .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename8}/README.md \
+    .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename9}/README.md \
+    .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename10}/README.md \
+    .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename11}/README.md \
+    .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename12}/README.md \
+    .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename13}/README.md \
+    .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename14}/README.md \
+    .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename15}/README.md \
+    .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename16}/README.md \
+    .collections/ansible_collections/%{collection_namespace}/%{collection_name}/roles/%{rolename17}/README.md
 
 cd .collections/ansible_collections/%{collection_namespace}/%{collection_name}/
 %ansible_collection_build
@@ -605,6 +641,22 @@ rmdir $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}ha_cluster/examples
 cd .collections/ansible_collections/%{collection_namespace}/%{collection_name}/
 %ansible_collection_install
 
+mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/collection
+mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/collection/roles
+
+cp -p %{buildroot}%{ansible_collection_files}%{collection_name}/README.md \
+    %{buildroot}%{ansible_collection_files}%{collection_name}/README.html \
+    $RPM_BUILD_ROOT%{_pkgdocdir}/collection
+
+for rolename in %{rolenames}; do
+  if [ -f %{buildroot}%{ansible_collection_files}%{collection_name}/roles/${rolename}/README.md ]; then
+    mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/collection/roles/${rolename}
+    cp -p %{buildroot}%{ansible_collection_files}%{collection_name}/roles/${rolename}/README.md \
+        %{buildroot}%{ansible_collection_files}%{collection_name}/roles/${rolename}/README.html \
+        $RPM_BUILD_ROOT%{_pkgdocdir}/collection/roles/${rolename}
+  fi
+done
+
 
 %files
 %if %{without ansible}
@@ -654,39 +706,32 @@ cd .collections/ansible_collections/%{collection_namespace}/%{collection_name}/
 %{_pkgdocdir}/network/example-inventory
 %{_pkgdocdir}/*/README.md
 %{_pkgdocdir}/*/README.html
-%doc %{installbase}/%{roleinstprefix}kdump/README.md
-%doc %{installbase}/%{roleinstprefix}postfix/README.md
-%doc %{installbase}/%{roleinstprefix}selinux/README.md
-%doc %{installbase}/%{roleinstprefix}timesync/README.md
-%doc %{installbase}/%{roleinstprefix}network/README.md
-%doc %{installbase}/%{roleinstprefix}storage/README.md
-%doc %{installbase}/%{roleinstprefix}metrics/README.md
-%doc %{installbase}/%{roleinstprefix}tlog/README.md
-%doc %{installbase}/%{roleinstprefix}kernel_settings/README.md
-%doc %{installbase}/%{roleinstprefix}logging/README.md
-%doc %{installbase}/%{roleinstprefix}nbde_server/README.md
-%doc %{installbase}/%{roleinstprefix}nbde_client/README.md
-%doc %{installbase}/%{roleinstprefix}certificate/README.md
-%doc %{installbase}/%{roleinstprefix}crypto_policies/README.md
-%doc %{installbase}/%{roleinstprefix}sshd/README.md
-%doc %{installbase}/%{roleinstprefix}ssh/README.md
-%doc %{installbase}/%{roleinstprefix}kdump/README.html
-%doc %{installbase}/%{roleinstprefix}postfix/README.html
-%doc %{installbase}/%{roleinstprefix}selinux/README.html
-%doc %{installbase}/%{roleinstprefix}timesync/README.html
-%doc %{installbase}/%{roleinstprefix}network/README.html
-%doc %{installbase}/%{roleinstprefix}storage/README.html
-%doc %{installbase}/%{roleinstprefix}metrics/README.html
-%doc %{installbase}/%{roleinstprefix}tlog/README.html
-%doc %{installbase}/%{roleinstprefix}kernel_settings/README.html
-%doc %{installbase}/%{roleinstprefix}logging/README.html
-%doc %{installbase}/%{roleinstprefix}nbde_server/README.html
-%doc %{installbase}/%{roleinstprefix}nbde_client/README.html
-%doc %{installbase}/%{roleinstprefix}certificate/README.html
-%doc %{installbase}/%{roleinstprefix}crypto_policies/README.html
-%doc %{installbase}/%{roleinstprefix}sshd/README.html
-%doc %{installbase}/%{roleinstprefix}ssh/README.html
-%doc %{installbase}/%{roleinstprefix}ha_cluster/README.html
+%{_pkgdocdir}/collection/roles/*/README.md
+%{_pkgdocdir}/collection/roles/*/README.html
+%doc %{installbase}/%{roleinstprefix}*/README.md
+%doc %{installbase}/%{roleinstprefix}*/README.html
+%doc %{ansible_collection_files}/%{collection_name}/README.md
+%doc %{ansible_collection_files}/%{collection_name}/roles/kdump/README.md
+%doc %{ansible_collection_files}/%{collection_name}/roles/postfix/README.md
+%doc %{ansible_collection_files}/%{collection_name}/roles/selinux/README.md
+%doc %{ansible_collection_files}/%{collection_name}/roles/timesync/README.md
+%doc %{ansible_collection_files}/%{collection_name}/roles/network/README.md
+%doc %{ansible_collection_files}/%{collection_name}/roles/storage/README.md
+%doc %{ansible_collection_files}/%{collection_name}/roles/metrics/README.md
+%doc %{ansible_collection_files}/%{collection_name}/roles/tlog/README.md
+%doc %{ansible_collection_files}/%{collection_name}/roles/kernel_settings/README.md
+%doc %{ansible_collection_files}/%{collection_name}/roles/logging/README.md
+%doc %{ansible_collection_files}/%{collection_name}/roles/nbde_server/README.md
+%doc %{ansible_collection_files}/%{collection_name}/roles/nbde_client/README.md
+%doc %{ansible_collection_files}/%{collection_name}/roles/certificate/README.md
+%doc %{ansible_collection_files}/%{collection_name}/roles/crypto_policies/README.md
+%doc %{ansible_collection_files}/%{collection_name}/roles/sshd/README.md
+%doc %{ansible_collection_files}/%{collection_name}/roles/ssh/README.md
+%doc %{ansible_collection_files}/%{collection_name}/roles/ha_cluster/README.md
+# can't use a glob for .md files, only for .html. .md files include READMEs
+# for private subroles, and we don;t want to tag those as docs.
+%doc %{ansible_collection_files}/%{collection_name}/README.html
+%doc %{ansible_collection_files}/%{collection_name}/roles/*/README.html
 
 %license %{_pkglicensedir}/*
 %license %{installbase}/%{roleinstprefix}kdump/COPYING
@@ -712,6 +757,16 @@ cd .collections/ansible_collections/%{collection_namespace}/%{collection_name}/
 %{ansible_collection_files}
 
 %changelog
+* Wed Mar 17 2021 Noriko Hosoi <nhosoi@redhat.com> - 1.0.1-1
+- Fix description field in galaxy.yml
+- Remove "Technology Preview" from Collection README
+- Merging individual ignore file and add it to the package
+- Add a note to each module Doc to indicate it is private
+- Add patches for network and storage role ansible-test fixes
+  Resolves rhbz#1935451
+- Simplify doc tags in %%files, corrects a forgotten doc tag for ha_cluster
+- Suppress one ansible-lint warning in ha_cluster
+
 * Tue Feb 23 2021 Fernando Fernandez Mancera <ferferna@redhat.com> - 1.0.0-32
 - Add patch for the inclusive language leftover on network-role README.md,
   Resolves rhbz#1931931