diff --git a/.gitignore b/.gitignore index 823fed3..58508f5 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -SOURCES/libblockdev-2.19.tar.gz +SOURCES/libblockdev-2.24.tar.gz diff --git a/.libblockdev.metadata b/.libblockdev.metadata index 68871ad..babb2b1 100644 --- a/.libblockdev.metadata +++ b/.libblockdev.metadata @@ -1 +1 @@ -849d4ab5a9b78b568c8928337691a1052bd6ab57 SOURCES/libblockdev-2.19.tar.gz +17e84346cae1ba0fa4cedeada2f8aa10b5000bd3 SOURCES/libblockdev-2.24.tar.gz diff --git a/SOURCES/0001-swap-error-codes.patch b/SOURCES/0001-swap-error-codes.patch deleted file mode 100644 index f52aa37..0000000 --- a/SOURCES/0001-swap-error-codes.patch +++ /dev/null @@ -1,407 +0,0 @@ -From 3f9f1fa90a087186dcc96060537543d2685616d8 Mon Sep 17 00:00:00 2001 -From: Vojtech Trefny -Date: Mon, 1 Oct 2018 16:06:42 +0200 -Subject: [PATCH 1/2] Use libblkid to check swap status before swapon - -libblkid probe is more reliable than our custom check. ---- - configure.ac | 2 +- - src/plugins/Makefile.am | 4 +- - src/plugins/swap.c | 149 ++++++++++++++++++++++++++++++++-------- - 3 files changed, 124 insertions(+), 31 deletions(-) - -diff --git a/configure.ac b/configure.ac -index 6285a48..7ac5089 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -227,7 +227,7 @@ AS_IF([test "x$with_fs" != "xno"], - AC_SUBST([PARTED_FS_CFLAGS], [])])], - []) - --AS_IF([test "x$with_fs" != "xno" -o "x$with_crypto" != "xno"], -+AS_IF([test "x$with_fs" != "xno" -o "x$with_crypto" != "xno" -o "x$with_swap" != "xno"], - [LIBBLOCKDEV_PKG_CHECK_MODULES([BLKID], [blkid >= 2.23.0]) - # older versions of libblkid don't support BLKID_SUBLKS_BADCSUM so let's just - # define it as 0 (neutral value for bit combinations of flags) -diff --git a/src/plugins/Makefile.am b/src/plugins/Makefile.am -index e7b4bf0..6219a40 100644 ---- a/src/plugins/Makefile.am -+++ b/src/plugins/Makefile.am -@@ -149,8 +149,8 @@ libbd_nvdimm_la_SOURCES = nvdimm.c nvdimm.h check_deps.c check_deps.h - endif - - if WITH_SWAP --libbd_swap_la_CFLAGS = $(GLIB_CFLAGS) -Wall -Wextra -Werror --libbd_swap_la_LIBADD = $(GLIB_LIBS) ${builddir}/../utils/libbd_utils.la -+libbd_swap_la_CFLAGS = $(GLIB_CFLAGS) $(BLKID_CFLAGS) -Wall -Wextra -Werror -+libbd_swap_la_LIBADD = $(GLIB_LIBS) $(BLKID_LIBS) ${builddir}/../utils/libbd_utils.la - libbd_swap_la_LDFLAGS = -L${srcdir}/../utils/ -version-info 2:0:0 -Wl,--no-undefined - libbd_swap_la_CPPFLAGS = -I${builddir}/../../include/ - libbd_swap_la_SOURCES = swap.c swap.h check_deps.c check_deps.h -diff --git a/src/plugins/swap.c b/src/plugins/swap.c -index bc52637..bfe653f 100644 ---- a/src/plugins/swap.c -+++ b/src/plugins/swap.c -@@ -21,6 +21,8 @@ - #include - #include - #include -+#include -+#include - #include - - #include "swap.h" -@@ -179,13 +181,14 @@ gboolean bd_swap_mkswap (const gchar *device, const gchar *label, const BDExtraA - * Tech category: %BD_SWAP_TECH_SWAP-%BD_SWAP_TECH_MODE_ACTIVATE_DEACTIVATE - */ - gboolean bd_swap_swapon (const gchar *device, gint priority, GError **error) { -- GIOChannel *dev_file = NULL; -- GIOStatus io_status = G_IO_STATUS_ERROR; -- GError *tmp_error = NULL; -- gsize num_read = 0; -- gchar dev_status[11]; -- dev_status[10] = '\0'; -- gint page_size; -+ blkid_probe probe = NULL; -+ gint fd = 0; -+ gint status = 0; -+ guint n_try = 0; -+ const gchar *value = NULL; -+ gint64 status_len = 0; -+ gint64 swap_pagesize = 0; -+ gint64 sys_pagesize = 0; - gint flags = 0; - gint ret = 0; - guint64 progress_id = 0; -@@ -198,53 +201,143 @@ gboolean bd_swap_swapon (const gchar *device, gint priority, GError **error) { - - bd_utils_report_progress (progress_id, 0, "Analysing the swap device"); - /* check the device if it is an activatable swap */ -- dev_file = g_io_channel_new_file (device, "r", error); -- if (!dev_file) { -- /* error is already populated */ -+ probe = blkid_new_probe (); -+ if (!probe) { -+ g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_UNKNOWN_STATE, -+ "Failed to create a new probe"); - bd_utils_report_finished (progress_id, (*error)->message); - return FALSE; - } - -- page_size = getpagesize (); -- page_size = MAX (2048, page_size); -- io_status = g_io_channel_seek_position (dev_file, page_size - 10, G_SEEK_SET, &tmp_error); -- if (io_status != G_IO_STATUS_NORMAL) { -+ fd = open (device, O_RDONLY|O_CLOEXEC); -+ if (fd == -1) { - g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_UNKNOWN_STATE, -- "Failed to determine device's state: %s", tmp_error->message); -- g_clear_error (&tmp_error); -- g_io_channel_shutdown (dev_file, FALSE, &tmp_error); -+ "Failed to open the device '%s'", device); - bd_utils_report_finished (progress_id, (*error)->message); -+ blkid_free_probe (probe); - return FALSE; - } - -- io_status = g_io_channel_read_chars (dev_file, dev_status, 10, &num_read, &tmp_error); -- if ((io_status != G_IO_STATUS_NORMAL) || (num_read != 10)) { -+ /* we may need to try mutliple times with some delays in case the device is -+ busy at the very moment */ -+ for (n_try=5, status=-1; (status != 0) && (n_try > 0); n_try--) { -+ status = blkid_probe_set_device (probe, fd, 0, 0); -+ if (status != 0) -+ g_usleep (100 * 1000); /* microseconds */ -+ } -+ if (status != 0) { - g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_UNKNOWN_STATE, -- "Failed to determine device's state: %s", tmp_error->message); -- g_clear_error (&tmp_error); -- g_io_channel_shutdown (dev_file, FALSE, &tmp_error); -- g_clear_error (&tmp_error); -+ "Failed to create a probe for the device '%s'", device); - bd_utils_report_finished (progress_id, (*error)->message); -+ blkid_free_probe (probe); -+ close (fd); - return FALSE; - } - -- g_io_channel_shutdown (dev_file, FALSE, &tmp_error); -- g_clear_error (&tmp_error); -+ blkid_probe_enable_superblocks (probe, 1); -+ blkid_probe_set_superblocks_flags (probe, BLKID_SUBLKS_TYPE | BLKID_SUBLKS_MAGIC); - -- if (g_str_has_prefix (dev_status, "SWAP-SPACE")) { -+ /* we may need to try mutliple times with some delays in case the device is -+ busy at the very moment */ -+ for (n_try=5, status=-1; !(status == 0 || status == 1) && (n_try > 0); n_try--) { -+ status = blkid_do_safeprobe (probe); -+ if (status < 0) -+ g_usleep (100 * 1000); /* microseconds */ -+ } -+ if (status < 0) { -+ /* -1 or -2 = error during probing*/ -+ g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_UNKNOWN_STATE, -+ "Failed to probe the device '%s'", device); -+ bd_utils_report_finished (progress_id, (*error)->message); -+ blkid_free_probe (probe); -+ close (fd); -+ return FALSE; -+ } else if (status == 1) { -+ /* 1 = nothing detected */ -+ g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_UNKNOWN_STATE, -+ "No superblock detected on the device '%s'", device); -+ bd_utils_report_finished (progress_id, (*error)->message); -+ blkid_free_probe (probe); -+ close (fd); -+ return FALSE; -+ } -+ -+ status = blkid_probe_lookup_value (probe, "TYPE", &value, NULL); -+ if (status != 0) { -+ g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_UNKNOWN_STATE, -+ "Failed to get format type for the device '%s'", device); -+ bd_utils_report_finished (progress_id, (*error)->message); -+ blkid_free_probe (probe); -+ close (fd); -+ return FALSE; -+ } -+ -+ if (g_strcmp0 (value, "swap") != 0) { -+ g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_UNKNOWN_STATE, -+ "Device '%s' is not formatted as swap", device); -+ bd_utils_report_finished (progress_id, (*error)->message); -+ blkid_free_probe (probe); -+ close (fd); -+ return FALSE; -+ } -+ -+ status = blkid_probe_lookup_value (probe, "SBMAGIC", &value, NULL); -+ if (status != 0) { -+ g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_UNKNOWN_STATE, -+ "Failed to get swap status on the device '%s'", device); -+ bd_utils_report_finished (progress_id, (*error)->message); -+ blkid_free_probe (probe); -+ close (fd); -+ return FALSE; -+ } -+ -+ if (g_strcmp0 (value, "SWAP-SPACE") == 0) { - g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_ACTIVATE, - "Old swap format, cannot activate."); - bd_utils_report_finished (progress_id, (*error)->message); -+ blkid_free_probe (probe); -+ close (fd); - return FALSE; -- } else if (g_str_has_prefix (dev_status, "S1SUSPEND") || g_str_has_prefix (dev_status, "S2SUSPEND")) { -+ } else if (g_strcmp0 (value, "S1SUSPEND") == 0 || g_strcmp0 (value, "S2SUSPEND") == 0) { - g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_ACTIVATE, - "Suspended system on the swap device, cannot activate."); - bd_utils_report_finished (progress_id, (*error)->message); -+ blkid_free_probe (probe); -+ close (fd); - return FALSE; -- } else if (!g_str_has_prefix (dev_status, "SWAPSPACE2")) { -+ } else if (g_strcmp0 (value, "SWAPSPACE2") != 0) { - g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_ACTIVATE, - "Unknown swap space format, cannot activate."); - bd_utils_report_finished (progress_id, (*error)->message); -+ blkid_free_probe (probe); -+ close (fd); -+ return FALSE; -+ } -+ -+ status_len = (gint64) strlen (value); -+ -+ status = blkid_probe_lookup_value (probe, "SBMAGIC_OFFSET", &value, NULL); -+ if (status != 0 || !value) { -+ g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_UNKNOWN_STATE, -+ "Failed to get swap status on the device '%s'", device); -+ bd_utils_report_finished (progress_id, (*error)->message); -+ blkid_free_probe (probe); -+ close (fd); -+ return FALSE; -+ } -+ -+ swap_pagesize = status_len + g_ascii_strtoll (value, (char **)NULL, 10); -+ -+ blkid_free_probe (probe); -+ close (fd); -+ -+ sys_pagesize = getpagesize (); -+ -+ if (swap_pagesize != sys_pagesize) { -+ g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_UNKNOWN_STATE, -+ "Swap format pagesize (%"G_GINT64_FORMAT") and system pagesize (%"G_GINT64_FORMAT") don't match", -+ swap_pagesize, sys_pagesize); -+ bd_utils_report_finished (progress_id, (*error)->message); - return FALSE; - } - --- -2.17.1 - - -From f6508829e7cac138e4961a1c3ef6170d6f67bfd9 Mon Sep 17 00:00:00 2001 -From: Vojtech Trefny -Date: Thu, 4 Oct 2018 08:07:55 +0200 -Subject: [PATCH 2/2] Add error codes and Python exceptions for swapon fails - -We need to be able to tell why swapon failed so our users can -decide what to do. ---- - src/lib/plugin_apis/swap.api | 4 ++++ - src/plugins/swap.c | 10 +++++----- - src/plugins/swap.h | 4 ++++ - src/python/gi/overrides/BlockDev.py | 19 +++++++++++++++++-- - tests/swap_test.py | 13 +++++++++++++ - 5 files changed, 43 insertions(+), 7 deletions(-) - -diff --git a/src/lib/plugin_apis/swap.api b/src/lib/plugin_apis/swap.api -index d0906fe..3fcc0e5 100644 ---- a/src/lib/plugin_apis/swap.api -+++ b/src/lib/plugin_apis/swap.api -@@ -10,6 +10,10 @@ typedef enum { - BD_SWAP_ERROR_UNKNOWN_STATE, - BD_SWAP_ERROR_ACTIVATE, - BD_SWAP_ERROR_TECH_UNAVAIL, -+ BD_SWAP_ERROR_ACTIVATE_OLD, -+ BD_SWAP_ERROR_ACTIVATE_SUSPEND, -+ BD_SWAP_ERROR_ACTIVATE_UNKNOWN, -+ BD_SWAP_ERROR_ACTIVATE_PAGESIZE, - } BDSwapError; - - typedef enum { -diff --git a/src/plugins/swap.c b/src/plugins/swap.c -index bfe653f..28db6f3 100644 ---- a/src/plugins/swap.c -+++ b/src/plugins/swap.c -@@ -292,21 +292,21 @@ gboolean bd_swap_swapon (const gchar *device, gint priority, GError **error) { - } - - if (g_strcmp0 (value, "SWAP-SPACE") == 0) { -- g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_ACTIVATE, -+ g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_ACTIVATE_OLD, - "Old swap format, cannot activate."); - bd_utils_report_finished (progress_id, (*error)->message); - blkid_free_probe (probe); - close (fd); - return FALSE; - } else if (g_strcmp0 (value, "S1SUSPEND") == 0 || g_strcmp0 (value, "S2SUSPEND") == 0) { -- g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_ACTIVATE, -+ g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_ACTIVATE_SUSPEND, - "Suspended system on the swap device, cannot activate."); - bd_utils_report_finished (progress_id, (*error)->message); - blkid_free_probe (probe); - close (fd); - return FALSE; - } else if (g_strcmp0 (value, "SWAPSPACE2") != 0) { -- g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_ACTIVATE, -+ g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_ACTIVATE_UNKNOWN, - "Unknown swap space format, cannot activate."); - bd_utils_report_finished (progress_id, (*error)->message); - blkid_free_probe (probe); -@@ -318,7 +318,7 @@ gboolean bd_swap_swapon (const gchar *device, gint priority, GError **error) { - - status = blkid_probe_lookup_value (probe, "SBMAGIC_OFFSET", &value, NULL); - if (status != 0 || !value) { -- g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_UNKNOWN_STATE, -+ g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_ACTIVATE_PAGESIZE, - "Failed to get swap status on the device '%s'", device); - bd_utils_report_finished (progress_id, (*error)->message); - blkid_free_probe (probe); -@@ -334,7 +334,7 @@ gboolean bd_swap_swapon (const gchar *device, gint priority, GError **error) { - sys_pagesize = getpagesize (); - - if (swap_pagesize != sys_pagesize) { -- g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_UNKNOWN_STATE, -+ g_set_error (error, BD_SWAP_ERROR, BD_SWAP_ERROR_ACTIVATE_PAGESIZE, - "Swap format pagesize (%"G_GINT64_FORMAT") and system pagesize (%"G_GINT64_FORMAT") don't match", - swap_pagesize, sys_pagesize); - bd_utils_report_finished (progress_id, (*error)->message); -diff --git a/src/plugins/swap.h b/src/plugins/swap.h -index a01c873..9947bad 100644 ---- a/src/plugins/swap.h -+++ b/src/plugins/swap.h -@@ -12,6 +12,10 @@ typedef enum { - BD_SWAP_ERROR_UNKNOWN_STATE, - BD_SWAP_ERROR_ACTIVATE, - BD_SWAP_ERROR_TECH_UNAVAIL, -+ BD_SWAP_ERROR_ACTIVATE_OLD, -+ BD_SWAP_ERROR_ACTIVATE_SUSPEND, -+ BD_SWAP_ERROR_ACTIVATE_UNKNOWN, -+ BD_SWAP_ERROR_ACTIVATE_PAGESIZE, - } BDSwapError; - - typedef enum { -diff --git a/src/python/gi/overrides/BlockDev.py b/src/python/gi/overrides/BlockDev.py -index c2ef2f4..e608887 100644 ---- a/src/python/gi/overrides/BlockDev.py -+++ b/src/python/gi/overrides/BlockDev.py -@@ -1031,7 +1031,17 @@ __all__.append("MpathError") - - class SwapError(BlockDevError): - pass --__all__.append("SwapError") -+class SwapActivateError(SwapError): -+ pass -+class SwapOldError(SwapActivateError): -+ pass -+class SwapSuspendError(SwapActivateError): -+ pass -+class SwapUnknownError(SwapActivateError): -+ pass -+class SwapPagesizeError(SwapActivateError): -+ pass -+__all__.extend(("SwapError", "SwapActivateError", "SwapOldError", "SwapSuspendError", "SwapUnknownError", "SwapPagesizeError")) - - class KbdError(BlockDevError): - pass -@@ -1070,6 +1080,11 @@ __all__.append("BlockDevNotImplementedError") - not_implemented_rule = XRule(GLib.Error, re.compile(r".*The function '.*' called, but not implemented!"), None, BlockDevNotImplementedError) - - fs_nofs_rule = XRule(GLib.Error, None, 3, FSNoFSError) -+swap_activate_rule = XRule(GLib.Error, None, 1, SwapActivateError) -+swap_old_rule = XRule(GLib.Error, None, 3, SwapOldError) -+swap_suspend_rule = XRule(GLib.Error, None, 4, SwapSuspendError) -+swap_unknown_rule = XRule(GLib.Error, None, 5, SwapUnknownError) -+swap_pagesize_rule = XRule(GLib.Error, None, 6, SwapPagesizeError) - - btrfs = ErrorProxy("btrfs", BlockDev, [(GLib.Error, BtrfsError)], [not_implemented_rule]) - __all__.append("btrfs") -@@ -1092,7 +1107,7 @@ __all__.append("md") - mpath = ErrorProxy("mpath", BlockDev, [(GLib.Error, MpathError)], [not_implemented_rule]) - __all__.append("mpath") - --swap = ErrorProxy("swap", BlockDev, [(GLib.Error, SwapError)], [not_implemented_rule]) -+swap = ErrorProxy("swap", BlockDev, [(GLib.Error, SwapError)], [not_implemented_rule, swap_activate_rule, swap_old_rule, swap_suspend_rule, swap_unknown_rule, swap_pagesize_rule]) - __all__.append("swap") - - kbd = ErrorProxy("kbd", BlockDev, [(GLib.Error, KbdError)], [not_implemented_rule]) -diff --git a/tests/swap_test.py b/tests/swap_test.py -index 05d0c19..395fdf5 100644 ---- a/tests/swap_test.py -+++ b/tests/swap_test.py -@@ -97,6 +97,19 @@ class SwapTestCase(SwapTest): - _ret, out, _err = run_command("blkid -ovalue -sLABEL -p %s" % self.loop_dev) - self.assertEqual(out, "BlockDevSwap") - -+ def test_swapon_pagesize(self): -+ """Verify that activating swap with different pagesize fails""" -+ -+ # create swap with 64k pagesize -+ ret, out, err = run_command("mkswap --pagesize 65536 %s" % self.loop_dev) -+ if ret != 0: -+ self.fail("Failed to prepare swap for pagesize test: %s %s" % (out, err)) -+ -+ # activation should fail because swap has different pagesize -+ with self.assertRaises(BlockDev.SwapPagesizeError): -+ BlockDev.swap.swapon(self.loop_dev) -+ -+ - class SwapUnloadTest(SwapTest): - def setUp(self): - # make sure the library is initialized with all plugins loaded for other --- -2.17.1 - diff --git a/SOURCES/0002-major-minor-macros.patch b/SOURCES/0002-major-minor-macros.patch deleted file mode 100644 index c582a3b..0000000 --- a/SOURCES/0002-major-minor-macros.patch +++ /dev/null @@ -1,53 +0,0 @@ -From f5585f5839b51e734d28059f8a6b6d92ce036d93 Mon Sep 17 00:00:00 2001 -From: Vojtech Trefny -Date: Mon, 17 Dec 2018 10:36:49 +0100 -Subject: [PATCH] Use major/minor macros from sys/sysmacros.h instead of - linux/kdev_t.h - -The macros from linux/kdev_t.h don't work for devices with -minor > 255. - -Resolves: rhbz#1644825 ---- - src/plugins/mpath.c | 14 +++++++------- - 1 file changed, 7 insertions(+), 7 deletions(-) - -diff --git a/src/plugins/mpath.c b/src/plugins/mpath.c -index 4fb75849..639c00b9 100644 ---- a/src/plugins/mpath.c -+++ b/src/plugins/mpath.c -@@ -18,8 +18,8 @@ - */ - - #include --/* provides MAJOR, MINOR macros */ --#include -+/* provides major and minor macros */ -+#include - #include - #include - #include -@@ -273,8 +273,8 @@ static gboolean map_is_multipath (const gchar *map_name, GError **error) { - static gchar** get_map_deps (const gchar *map_name, guint64 *n_deps, GError **error) { - struct dm_task *task; - struct dm_deps *deps; -- guint64 major = 0; -- guint64 minor = 0; -+ guint64 dev_major = 0; -+ guint64 dev_minor = 0; - guint64 i = 0; - gchar **dep_devs = NULL; - gchar *major_minor = NULL; -@@ -319,9 +319,9 @@ static gchar** get_map_deps (const gchar *map_name, guint64 *n_deps, GError **er - dep_devs = g_new0 (gchar*, deps->count + 1); - - for (i = 0; i < deps->count; i++) { -- major = (guint64) MAJOR(deps->device[i]); -- minor = (guint64) MINOR(deps->device[i]); -- major_minor = g_strdup_printf ("%"G_GUINT64_FORMAT":%"G_GUINT64_FORMAT, major, minor); -+ dev_major = (guint64) major (deps->device[i]); -+ dev_minor = (guint64) minor (deps->device[i]); -+ major_minor = g_strdup_printf ("%"G_GUINT64_FORMAT":%"G_GUINT64_FORMAT, dev_major, dev_minor); - dep_devs[i] = get_device_name (major_minor, error); - if (*error) { - g_prefix_error (error, "Failed to resolve '%s' to device name", diff --git a/SOURCES/0003-gating-tests-changes.patch b/SOURCES/0003-gating-tests-changes.patch deleted file mode 100644 index 5ec9c06..0000000 --- a/SOURCES/0003-gating-tests-changes.patch +++ /dev/null @@ -1,2676 +0,0 @@ -From 3a513b4d7f14406d94614643327ce2d8ab35f7eb Mon Sep 17 00:00:00 2001 -From: Vojtech Trefny -Date: Thu, 14 Mar 2019 09:34:02 +0100 -Subject: [PATCH 01/10] Add a decorator for "tagging" tests - -Tests function (or classes) can be tagged with one or more tags. -This provides easier way to skip slow/unstable/dangerous tests -and/or or some subset of the tests (e.g. run only unstable tests -to check if the test is still failing). ---- - tests/utils.py | 26 ++++++++++++++++++++++++++ - 1 file changed, 26 insertions(+) - -diff --git a/tests/utils.py b/tests/utils.py -index a9eb430..82b5494 100644 ---- a/tests/utils.py -+++ b/tests/utils.py -@@ -10,6 +10,7 @@ import unittest - import time - import sys - from contextlib import contextmanager -+from enum import Enum - from itertools import chain - - from gi.repository import GLib -@@ -342,6 +343,31 @@ def unstable_test(test): - return decorated_test - - -+class TestTags(Enum): -+ SLOW = 1 # slow tests -+ UNSTABLE = 2 # randomly failing tests -+ UNSAFE = 3 # tests that change system configuration -+ CORE = 4 # tests covering core functionality -+ NOSTORAGE = 5 # tests that don't work with storage -+ EXTRADEPS = 6 # tests that require special configuration and/or device to run -+ REGRESSION = 7 # regression tests -+ -+ -+def tag_test(*tags): -+ def decorator(func): -+ func.slow = TestTags.SLOW in tags -+ func.unstable = TestTags.UNSTABLE in tags -+ func.unsafe = TestTags.UNSAFE in tags -+ func.core = TestTags.CORE in tags -+ func.nostorage = TestTags.NOSTORAGE in tags -+ func.extradeps = TestTags.EXTRADEPS in tags -+ func.regression = TestTags.REGRESSION in tags -+ -+ return func -+ -+ return decorator -+ -+ - def run(cmd_string): - """ - Run the a command with file descriptors closed as lvm is trying to --- -2.20.1 - - -From f3648fed9c98e779c9f265262a4a77a905689fe7 Mon Sep 17 00:00:00 2001 -From: Vojtech Trefny -Date: Thu, 14 Mar 2019 09:37:18 +0100 -Subject: [PATCH 02/10] Use test tags for skipping tests - -This loads all individual test functions from the suite and checks -the tags to decide whether the test should be skippend or not. -We are abusing some unittest internal API to do this, but all -private functions works with Python 2 and Python 3 and general -looks like "stable enough" API. ---- - tests/run_tests.py | 130 ++++++++++++++++++++++++++++++++++++++++++--- - 1 file changed, 124 insertions(+), 6 deletions(-) - -diff --git a/tests/run_tests.py b/tests/run_tests.py -index 6f74430..5301d07 100644 ---- a/tests/run_tests.py -+++ b/tests/run_tests.py -@@ -16,6 +16,82 @@ LIBDIRS = 'src/utils/.libs:src/plugins/.libs:src/plugins/fs/.libs:src/lib/.libs' - GIDIR = 'src/lib' - - -+def _get_tests_from_suite(suite, tests): -+ """ Extract tests from the test suite """ -+ # 'tests' we get from 'unittest.defaultTestLoader.discover' are "wrapped" -+ # in multiple 'unittest.suite.TestSuite' classes/lists so we need to "unpack" -+ # the indivudual test cases -+ for test in suite: -+ if isinstance(test, unittest.suite.TestSuite): -+ _get_tests_from_suite(test, tests) -+ -+ if isinstance(test, unittest.TestCase): -+ tests.append(test) -+ -+ return tests -+ -+ -+def _get_test_tags(test): -+ """ Get test tags for single test case """ -+ -+ tags = [] -+ -+ # test failed to load, usually some ImportError or something really broken -+ # in the test file, just return empty list and let it fail -+ # with python2 the loader will raise an exception directly without returning -+ # a "fake" FailedTest test case -+ if six.PY3 and isinstance(test, unittest.loader._FailedTest): -+ return tags -+ -+ test_fn = getattr(test, test._testMethodName) -+ -+ # it is possible to either tag a test funcion or the class so we need to -+ # check both for the tag -+ if getattr(test_fn, "slow", False) or getattr(test_fn.__self__, "slow", False): -+ tags.append(TestTags.SLOW) -+ if getattr(test_fn, "unstable", False) or getattr(test_fn.__self__, "unstable", False): -+ tags.append(TestTags.UNSTABLE) -+ if getattr(test_fn, "unsafe", False) or getattr(test_fn.__self__, "unsafe", False): -+ tags.append(TestTags.UNSAFE) -+ if getattr(test_fn, "core", False) or getattr(test_fn.__self__, "core", False): -+ tags.append(TestTags.CORE) -+ if getattr(test_fn, "nostorage", False) or getattr(test_fn.__self__, "nostorage", False): -+ tags.append(TestTags.NOSTORAGE) -+ if getattr(test_fn, "extradeps", False) or getattr(test_fn.__self__, "extradeps", False): -+ tags.append(TestTags.EXTRADEPS) -+ if getattr(test_fn, "regression", False) or getattr(test_fn.__self__, "regression", False): -+ tags.append(TestTags.REGRESSION) -+ -+ return tags -+ -+ -+def _print_skip_message(test, skip_tag): -+ -+ # test.id() looks like 'crypto_test.CryptoTestResize.test_luks2_resize' -+ # and we want to print 'test_luks2_resize (crypto_test.CryptoTestResize)' -+ test_desc = test.id().split(".") -+ test_name = test_desc[-1] -+ test_module = ".".join(test_desc[:-1]) -+ -+ if skip_tag == TestTags.SLOW: -+ reason = "skipping slow tests" -+ elif skip_tag == TestTags.UNSTABLE: -+ reason = "skipping unstable tests" -+ elif skip_tag == TestTags.UNSAFE: -+ reason = "skipping test that modifies system configuration" -+ elif skip_tag == TestTags.EXTRADEPS: -+ reason = "skipping test that requires special configuration" -+ elif skip_tag == TestTags.CORE: -+ reason = "skipping non-core test" -+ else: -+ reason = "unknown reason" # just to be sure there is some default value -+ -+ if test._testMethodDoc: -+ print("%s (%s)\n%s ... skipped '%s'" % (test_name, test_module, test._testMethodDoc, reason)) -+ else: -+ print("%s (%s) ... skipped '%s'" % (test_name, test_module, reason)) -+ -+ - if __name__ == '__main__': - - testdir = os.path.abspath(os.path.dirname(__file__)) -@@ -45,6 +121,9 @@ if __name__ == '__main__': - argparser.add_argument('-j', '--jenkins', dest='jenkins', - help='run also tests that should run only in a CI environment', - action='store_true') -+ argparser.add_argument('-c', '--core', dest='core', -+ help='run tests that cover basic functionality of the library and regression tests', -+ action='store_true') - argparser.add_argument('-s', '--stop', dest='stop', - help='stop executing after first failed test', - action='store_true') -@@ -57,21 +136,60 @@ if __name__ == '__main__': - if args.jenkins: - os.environ['JENKINS_HOME'] = '' - -+ # read the environmental variables for backwards compatibility -+ if 'JENKINS_HOME' in os.environ: -+ args.jenkins = True -+ if 'SKIP_SLOW' in os.environ: -+ args.fast = True -+ if 'FEELINGLUCKY' in os.environ: -+ args.lucky = True -+ - sys.path.append(testdir) - sys.path.append(projdir) - sys.path.append(os.path.join(projdir, 'src/python')) - - start_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") - -+ loader = unittest.defaultTestLoader - suite = unittest.TestSuite() -+ - if args.testname: -- loader = unittest.TestLoader() -- tests = loader.loadTestsFromNames(args.testname) -- suite.addTests(tests) -+ test_cases = loader.loadTestsFromNames(args.testname) - else: -- loader = unittest.TestLoader() -- tests = loader.discover(start_dir=testdir, pattern='*_test*.py') -- suite.addTests(tests) -+ test_cases = loader.discover(start_dir=testdir, pattern='*_test*.py') -+ -+ # extract list of test classes so we can check/run them manually one by one -+ tests = [] -+ tests = _get_tests_from_suite(test_cases, tests) -+ -+ # for some reason overrides_hack will fail if we import this at the start -+ # of the file -+ from utils import TestTags -+ -+ for test in tests: -+ # get tags and (possibly) skip the test -+ tags = _get_test_tags(test) -+ -+ if TestTags.SLOW in tags and args.fast: -+ _print_skip_message(test, TestTags.SLOW) -+ continue -+ if TestTags.UNSTABLE in tags and not args.lucky: -+ _print_skip_message(test, TestTags.UNSTABLE) -+ continue -+ if TestTags.UNSAFE in tags or TestTags.EXTRADEPS in tags and not args.jenkins: -+ _print_skip_message(test, TestTags.UNSAFE) -+ continue -+ if TestTags.EXTRADEPS in tags and not args.jenkins: -+ _print_skip_message(test, TestTags.EXTRADEPS) -+ continue -+ -+ if args.core and TestTags.CORE not in tags and TestTags.REGRESSION not in tags: -+ _print_skip_message(test, TestTags.CORE) -+ continue -+ -+ # finally add the test to the suite -+ suite.addTest(test) -+ - result = unittest.TextTestRunner(verbosity=2, failfast=args.stop).run(suite) - - # dump cropped journal to log file --- -2.20.1 - - -From f4dd0bfff61d117096c1802ab793b3ab9a31dae3 Mon Sep 17 00:00:00 2001 -From: Vojtech Trefny -Date: Thu, 14 Mar 2019 09:43:37 +0100 -Subject: [PATCH 03/10] Use the new test tags in tests - ---- - tests/btrfs_test.py | 14 ++++++-- - tests/crypto_test.py | 73 +++++++++++++++++++++-------------------- - tests/dm_test.py | 5 ++- - tests/fs_test.py | 18 +++++++--- - tests/kbd_test.py | 24 +++++++------- - tests/library_test.py | 13 +++++--- - tests/loop_test.py | 5 ++- - tests/lvm_dbus_tests.py | 44 ++++++++++++++++--------- - tests/lvm_test.py | 44 ++++++++++++++++--------- - tests/mdraid_test.py | 38 ++++++++++++--------- - tests/mpath_test.py | 5 ++- - tests/nvdimm_test.py | 9 +++-- - tests/overrides_test.py | 5 +++ - tests/part_test.py | 11 ++++++- - tests/s390_test.py | 6 +++- - tests/swap_test.py | 7 +++- - tests/utils_test.py | 10 ++++-- - tests/vdo_test.py | 13 ++++++-- - 18 files changed, 227 insertions(+), 117 deletions(-) - -diff --git a/tests/btrfs_test.py b/tests/btrfs_test.py -index ac1594f..eab25db 100644 ---- a/tests/btrfs_test.py -+++ b/tests/btrfs_test.py -@@ -10,7 +10,7 @@ from distutils.version import LooseVersion - from distutils.spawn import find_executable - - import overrides_hack --from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, fake_utils, fake_path, skip_on, mount, umount, run_command -+from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, fake_utils, fake_path, mount, umount, run_command, TestTags, tag_test - from gi.repository import GLib, BlockDev - - TEST_MNT = "/tmp/libblockdev_test_mnt" -@@ -74,6 +74,7 @@ class BtrfsTestCase(unittest.TestCase): - return LooseVersion(m.groups()[0]) - - class BtrfsTestCreateQuerySimple(BtrfsTestCase): -+ @tag_test(TestTags.CORE) - def test_create_and_query_volume(self): - """Verify that btrfs volume creation and querying works""" - -@@ -180,6 +181,7 @@ class BtrfsTestAddRemoveDevice(BtrfsTestCase): - self.assertEqual(len(devs), 1) - - class BtrfsTestCreateDeleteSubvolume(BtrfsTestCase): -+ @tag_test(TestTags.CORE) - def test_create_delete_subvolume(self): - """Verify that it is possible to create/delete subvolume""" - -@@ -306,6 +308,7 @@ class BtrfsTestSetDefaultSubvolumeID(BtrfsTestCase): - self.assertEqual(ret, 5) - - class BtrfsTestListDevices(BtrfsTestCase): -+ @tag_test(TestTags.CORE) - def test_list_devices(self): - """Verify that it is possible to get info about devices""" - -@@ -324,6 +327,7 @@ class BtrfsTestListDevices(BtrfsTestCase): - self.assertTrue(devs[1].used >= 0) - - class BtrfsTestListSubvolumes(BtrfsTestCase): -+ @tag_test(TestTags.CORE) - def test_list_subvolumes(self): - """Verify that it is possible to get info about subvolumes""" - -@@ -344,6 +348,7 @@ class BtrfsTestListSubvolumes(BtrfsTestCase): - self.assertEqual(subvols[0].path, "subvol1") - - class BtrfsTestFilesystemInfo(BtrfsTestCase): -+ @tag_test(TestTags.CORE) - def test_filesystem_info(self): - """Verify that it is possible to get filesystem info""" - -@@ -376,6 +381,7 @@ class BtrfsTestFilesystemInfoNoLabel(BtrfsTestCase): - self.assertTrue(info.used >= 0) - - class BtrfsTestMkfs(BtrfsTestCase): -+ @tag_test(TestTags.CORE) - def test_mkfs(self): - """Verify that it is possible to create a btrfs filesystem""" - -@@ -489,7 +495,6 @@ class BtrfsTooSmallTestCase (BtrfsTestCase): - pass - os.unlink(self.dev_file2) - -- @skip_on("fedora", "25", reason="Min sizes for Btrfs are different on F25") - def test_create_too_small(self): - """Verify that an attempt to create BTRFS on a too small device fails""" - -@@ -527,7 +532,6 @@ class BtrfsJustBigEnoughTestCase (BtrfsTestCase): - pass - os.unlink(self.dev_file2) - -- @skip_on("fedora", "25", reason="Min sizes for Btrfs are different on F25") - def test_create_just_enough(self): - """Verify that creating BTRFS on a just big enough devices works""" - -@@ -541,6 +545,7 @@ class FakeBtrfsUtilsTestCase(BtrfsTestCase): - def setUp(self): - pass - -+ @tag_test(TestTags.NOSTORAGE) - def test_list_subvols_weird_docker_data(self): - """Verify that list_subvolumes works as expected on weird data from one Docker use case""" - -@@ -562,6 +567,7 @@ class BTRFSUnloadTest(BtrfsTestCase): - # tests - self.addCleanup(BlockDev.reinit, self.requested_plugins, True, None) - -+ @tag_test(TestTags.NOSTORAGE) - def test_check_low_version(self): - """Verify that checking the minimum BTRFS version works as expected""" - -@@ -579,6 +585,7 @@ class BTRFSUnloadTest(BtrfsTestCase): - self.assertTrue(BlockDev.reinit(self.requested_plugins, True, None)) - self.assertIn("btrfs", BlockDev.get_available_plugin_names()) - -+ @tag_test(TestTags.NOSTORAGE) - def test_check_new_version_format(self): - """Verify that checking the minimum BTRFS version works as expected with the new format""" - -@@ -594,6 +601,7 @@ class BTRFSUnloadTest(BtrfsTestCase): - BlockDev.reinit(self.requested_plugins, True, None) - self.assertIn("btrfs", BlockDev.get_available_plugin_names()) - -+ @tag_test(TestTags.NOSTORAGE) - def test_check_no_btrfs(self): - """Verify that checking btrfs tool availability works as expected""" - -diff --git a/tests/crypto_test.py b/tests/crypto_test.py -index 7320e74..b062505 100644 ---- a/tests/crypto_test.py -+++ b/tests/crypto_test.py -@@ -9,7 +9,7 @@ import locale - import re - import tarfile - --from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, skip_on, get_avail_locales, requires_locales, run_command, read_file -+from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, skip_on, get_avail_locales, requires_locales, run_command, read_file, TestTags, tag_test - from gi.repository import BlockDev, GLib - - PASSWD = "myshinylittlepassword" -@@ -92,6 +92,7 @@ class CryptoTestGenerateBackupPassphrase(CryptoTestCase): - # we don't need block devices for this test - pass - -+ @tag_test(TestTags.NOSTORAGE) - def test_generate_backup_passhprase(self): - """Verify that backup passphrase generation works as expected""" - -@@ -101,7 +102,7 @@ class CryptoTestGenerateBackupPassphrase(CryptoTestCase): - six.assertRegex(self, bp, exp) - - class CryptoTestFormat(CryptoTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW, TestTags.CORE) - def test_luks_format(self): - """Verify that formating device as LUKS works""" - -@@ -121,7 +122,7 @@ class CryptoTestFormat(CryptoTestCase): - succ = BlockDev.crypto_luks_format_blob(self.loop_dev, "aes-cbc-essiv:sha256", 0, [ord(c) for c in PASSWD], 0) - self.assertTrue(succ) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW, TestTags.CORE) - @unittest.skipUnless(HAVE_LUKS2, "LUKS 2 not supported") - def test_luks2_format(self): - """Verify that formating device as LUKS 2 works""" -@@ -222,7 +223,7 @@ class CryptoTestFormat(CryptoTestCase): - self.assertEqual(int(m.group(1)), 5) - - class CryptoTestResize(CryptoTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_luks_resize(self): - """Verify that resizing LUKS device works""" - -@@ -244,7 +245,7 @@ class CryptoTestResize(CryptoTestCase): - succ = BlockDev.crypto_luks_close("libblockdevTestLUKS") - self.assertTrue(succ) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - @unittest.skipUnless(HAVE_LUKS2, "LUKS 2 not supported") - def test_luks2_resize(self): - """Verify that resizing LUKS 2 device works""" -@@ -304,11 +305,11 @@ class CryptoTestOpenClose(CryptoTestCase): - succ = BlockDev.crypto_luks_close("libblockdevTestLUKS") - self.assertTrue(succ) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW, TestTags.CORE) - def test_luks_open_close(self): - self._luks_open_close(self._luks_format) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW, TestTags.CORE) - @unittest.skipUnless(HAVE_LUKS2, "LUKS 2 not supported") - def test_luks2_open_close(self): - self._luks_open_close(self._luks2_format) -@@ -329,11 +330,11 @@ class CryptoTestAddKey(CryptoTestCase): - succ = BlockDev.crypto_luks_add_key_blob(self.loop_dev, [ord(c) for c in PASSWD2], [ord(c) for c in PASSWD3]) - self.assertTrue(succ) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_luks_add_key(self): - self._add_key(self._luks_format) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - @unittest.skipUnless(HAVE_LUKS2, "LUKS 2 not supported") - def test_luks2_add_key(self): - self._add_key(self._luks2_format) -@@ -360,11 +361,11 @@ class CryptoTestRemoveKey(CryptoTestCase): - succ = BlockDev.crypto_luks_remove_key_blob(self.loop_dev, [ord(c) for c in PASSWD2]) - self.assertTrue(succ) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_luks_remove_key(self): - self._remove_key(self._luks_format) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - @unittest.skipUnless(HAVE_LUKS2, "LUKS 2 not supported") - def test_luks2_remove_key(self): - self._remove_key(self._luks2_format) -@@ -380,7 +381,7 @@ class CryptoTestErrorLocale(CryptoTestCase): - if self._orig_loc: - locale.setlocale(locale.LC_ALL, self._orig_loc) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - @requires_locales({"cs_CZ.UTF-8"}) - def test_error_locale_key(self): - """Verify that the error msg is locale agnostic""" -@@ -407,11 +408,11 @@ class CryptoTestChangeKey(CryptoTestCase): - succ = BlockDev.crypto_luks_change_key_blob(self.loop_dev, [ord(c) for c in PASSWD2], [ord(c) for c in PASSWD3]) - self.assertTrue(succ) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_luks_change_key(self): - self._change_key(self._luks_format) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - @unittest.skipUnless(HAVE_LUKS2, "LUKS 2 not supported") - def test_luks2_change_key(self): - self._change_key(self._luks2_format) -@@ -432,11 +433,11 @@ class CryptoTestIsLuks(CryptoTestCase): - is_luks = BlockDev.crypto_device_is_luks(self.loop_dev2) - self.assertFalse(is_luks) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_is_luks(self): - self._is_luks(self._luks_format) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - @unittest.skipUnless(HAVE_LUKS2, "LUKS 2 not supported") - def test_is_luks2(self): - self._is_luks(self._luks2_format) -@@ -468,11 +469,11 @@ class CryptoTestLuksStatus(CryptoTestCase): - with self.assertRaises(GLib.GError): - BlockDev.crypto_luks_status("libblockdevTestLUKS") - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_luks_status(self): - self._luks_status(self._luks_format) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - @unittest.skipUnless(HAVE_LUKS2, "LUKS 2 not supported") - def test_luks2_status(self): - self._luks_status(self._luks2_format) -@@ -490,18 +491,18 @@ class CryptoTestGetUUID(CryptoTestCase): - with self.assertRaises(GLib.GError): - uuid = BlockDev.crypto_luks_uuid(self.loop_dev2) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_luks_get_uuid(self): - self._get_uuid(self._luks_format) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - @unittest.skipUnless(HAVE_LUKS2, "LUKS 2 not supported") - def test_luks2_get_uuid(self): - self._get_uuid(self._luks2_format) - - class CryptoTestGetMetadataSize(CryptoTestCase): - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - @unittest.skipUnless(HAVE_LUKS2, "LUKS 2 not supported") - def test_luks2_get_metadata_size(self): - """Verify that getting LUKS 2 device metadata size works""" -@@ -521,7 +522,7 @@ class CryptoTestGetMetadataSize(CryptoTestCase): - offset = int(m.group(1)) - self.assertEquals(meta_size, offset, "LUKS 2 metadata sizes differ") - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_luks_get_metadata_size(self): - """Verify that getting LUKS device metadata size works""" - -@@ -570,11 +571,11 @@ class CryptoTestLuksOpenRW(CryptoTestCase): - succ = BlockDev.crypto_luks_close("libblockdevTestLUKS") - self.assertTrue(succ) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_luks_open_rw(self): - self._luks_open_rw(self._luks_format) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - @unittest.skipUnless(HAVE_LUKS2, "LUKS 2 not supported") - def test_luks2_open_rw(self): - self._luks_open_rw(self._luks2_format) -@@ -609,7 +610,7 @@ class CryptoTestEscrow(CryptoTestCase): - '-a', '-o', self.public_cert]) - self.addCleanup(os.unlink, self.public_cert) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - @skip_on(("centos", "enterprise_linux"), "7", reason="volume_key asks for password in non-interactive mode on this release") - @skip_on("debian", reason="volume_key asks for password in non-interactive mode on this release") - def test_escrow_packet(self): -@@ -654,7 +655,7 @@ class CryptoTestEscrow(CryptoTestCase): - succ = BlockDev.crypto_luks_open(self.loop_dev, 'libblockdevTestLUKS', PASSWD3, None) - self.assertTrue(succ) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_backup_passphrase(self): - """Verify that a backup passphrase can be created for a device""" - succ = BlockDev.crypto_luks_format(self.loop_dev, None, 0, PASSWD, None, 0) -@@ -735,12 +736,12 @@ class CryptoTestSuspendResume(CryptoTestCase): - succ = BlockDev.crypto_luks_close("libblockdevTestLUKS") - self.assertTrue(succ) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_luks_suspend_resume(self): - """Verify that suspending/resuming LUKS device works""" - self._luks_suspend_resume(self._luks_format) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - @unittest.skipUnless(HAVE_LUKS2, "LUKS 2 not supported") - def test_luks2_suspend_resume(self): - """Verify that suspending/resuming LUKS 2 device works""" -@@ -781,12 +782,12 @@ class CryptoTestKillSlot(CryptoTestCase): - succ = BlockDev.crypto_luks_close("libblockdevTestLUKS") - self.assertTrue(succ) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_luks_kill_slot(self): - """Verify that killing a key slot on LUKS device works""" - self._luks_kill_slot(self._luks_format) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - @unittest.skipUnless(HAVE_LUKS2, "LUKS 2 not supported") - def test_luks2_kill_slot(self): - """Verify that killing a key slot on LUKS 2 device works""" -@@ -836,19 +837,19 @@ class CryptoTestHeaderBackupRestore(CryptoTestCase): - succ = BlockDev.crypto_luks_close("libblockdevTestLUKS") - self.assertTrue(succ) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_luks_header_backup_restore(self): - """Verify that header backup/restore with LUKS works""" - self._luks_header_backup_restore(self._luks_format) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - @unittest.skipUnless(HAVE_LUKS2, "LUKS 2 not supported") - def test_luks2_header_backup_restore(self): - """Verify that header backup/restore with LUKS2 works""" - self._luks_header_backup_restore(self._luks2_format) - - class CryptoTestInfo(CryptoTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW, TestTags.CORE) - def test_luks_format(self): - """Verify that we can get information about a LUKS device""" - -@@ -872,7 +873,7 @@ class CryptoTestInfo(CryptoTestCase): - succ = BlockDev.crypto_luks_close("libblockdevTestLUKS") - self.assertTrue(succ) - -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW, TestTags.CORE) - @unittest.skipUnless(HAVE_LUKS2, "LUKS 2 not supported") - def test_luks2_format(self): - """Verify that we can get information about a LUKS 2 device""" -@@ -903,7 +904,7 @@ class CryptoTestInfo(CryptoTestCase): - self.assertTrue(succ) - - class CryptoTestIntegrity(CryptoTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - @unittest.skipUnless(HAVE_LUKS2, "LUKS 2 not supported") - def test_luks2_integrity(self): - """Verify that we can get create a LUKS 2 device with integrity""" -@@ -983,6 +984,7 @@ class CryptoTestTrueCrypt(CryptoTestCase): - if not succ: - raise RuntimeError("Failed to tear down loop device used for testing") - -+ @tag_test(TestTags.NOSTORAGE) - def test_truecrypt_open_close(self): - """Verify that opening/closing TrueCrypt device works""" - -@@ -1003,6 +1005,7 @@ class CryptoTestTrueCrypt(CryptoTestCase): - self.assertTrue(succ) - self.assertFalse(os.path.exists("/dev/mapper/libblockdevTestTC")) - -+ @tag_test(TestTags.NOSTORAGE) - def test_veracrypt_open_close(self): - """Verify that opening/closing VeraCrypt device works""" - -diff --git a/tests/dm_test.py b/tests/dm_test.py -index 0dd1861..936e305 100644 ---- a/tests/dm_test.py -+++ b/tests/dm_test.py -@@ -2,7 +2,7 @@ import unittest - import os - import overrides_hack - --from utils import run, create_sparse_tempfile, create_lio_device, delete_lio_device, fake_utils, fake_path -+from utils import run, create_sparse_tempfile, create_lio_device, delete_lio_device, fake_utils, fake_path, TestTags, tag_test - from gi.repository import BlockDev, GLib - - -@@ -65,6 +65,7 @@ class DevMapperGetSubsystemFromName(DevMapperTestCase): - self.assertEqual(subsystem, "CRYPT") - - class DevMapperCreateRemoveLinear(DevMapperTestCase): -+ @tag_test(TestTags.CORE) - def test_create_remove_linear(self): - """Verify that it is possible to create new linear mapping and remove it""" - -@@ -120,6 +121,7 @@ class DMUnloadTest(DevMapperTestCase): - # tests - self.addCleanup(BlockDev.reinit, self.requested_plugins, True, None) - -+ @tag_test(TestTags.NOSTORAGE) - def test_check_low_version(self): - """Verify that checking the minimum dmsetup version works as expected""" - -@@ -137,6 +139,7 @@ class DMUnloadTest(DevMapperTestCase): - self.assertTrue(BlockDev.reinit(self.requested_plugins, True, None)) - self.assertIn("dm", BlockDev.get_available_plugin_names()) - -+ @tag_test(TestTags.NOSTORAGE) - def test_check_no_dm(self): - """Verify that checking dmsetup tool availability works as expected""" - -diff --git a/tests/fs_test.py b/tests/fs_test.py -index d3f3353..adcf312 100644 ---- a/tests/fs_test.py -+++ b/tests/fs_test.py -@@ -5,7 +5,7 @@ import subprocess - import tempfile - from contextlib import contextmanager - import utils --from utils import run, create_sparse_tempfile, mount, umount, unstable_test -+from utils import run, create_sparse_tempfile, mount, umount, TestTags, tag_test - import six - import overrides_hack - -@@ -97,6 +97,7 @@ class FSTestCase(unittest.TestCase): - self.fail("Failed to set %s read-write" % device) - - class TestGenericWipe(FSTestCase): -+ @tag_test(TestTags.CORE) - def test_generic_wipe(self): - """Verify that generic signature wipe works as expected""" - -@@ -210,16 +211,19 @@ class ExtTestMkfs(FSTestCase): - - BlockDev.fs_wipe(self.loop_dev, True) - -+ @tag_test(TestTags.CORE) - def test_ext2_mkfs(self): - """Verify that it is possible to create a new ext2 file system""" - self._test_ext_mkfs(mkfs_function=BlockDev.fs_ext2_mkfs, - ext_version="ext2") - -+ @tag_test(TestTags.CORE) - def test_ext3_mkfs(self): - """Verify that it is possible to create a new ext3 file system""" - self._test_ext_mkfs(mkfs_function=BlockDev.fs_ext3_mkfs, - ext_version="ext3") - -+ @tag_test(TestTags.CORE) - def test_ext4_mkfs(self): - """Verify that it is possible to create a new ext4 file system""" - self._test_ext_mkfs(mkfs_function=BlockDev.fs_ext4_mkfs, -@@ -385,16 +389,19 @@ class ExtGetInfo(FSTestCase): - self.assertTrue(fi.uuid) - self.assertTrue(fi.state, "clean") - -+ @tag_test(TestTags.CORE) - def test_ext2_get_info(self): - """Verify that it is possible to get info about an ext2 file system""" - self._test_ext_get_info(mkfs_function=BlockDev.fs_ext2_mkfs, - info_function=BlockDev.fs_ext2_get_info) - -+ @tag_test(TestTags.CORE) - def test_ext3_get_info(self): - """Verify that it is possible to get info about an ext3 file system""" - self._test_ext_get_info(mkfs_function=BlockDev.fs_ext3_mkfs, - info_function=BlockDev.fs_ext3_get_info) - -+ @tag_test(TestTags.CORE) - def test_ext4_get_info(self): - """Verify that it is possible to get info about an ext4 file system""" - self._test_ext_get_info(mkfs_function=BlockDev.fs_ext4_mkfs, -@@ -511,6 +518,7 @@ class ExtResize(FSTestCase): - resize_function=BlockDev.fs_ext4_resize) - - class XfsTestMkfs(FSTestCase): -+ @tag_test(TestTags.CORE) - def test_xfs_mkfs(self): - """Verify that it is possible to create a new xfs file system""" - -@@ -597,6 +605,7 @@ class XfsTestRepair(FSTestCase): - self.assertTrue(succ) - - class XfsGetInfo(FSTestCase): -+ @tag_test(TestTags.CORE) - def test_xfs_get_info(self): - """Verify that it is possible to get info about an xfs file system""" - -@@ -970,6 +979,7 @@ class MountTest(FSTestCase): - if ret != 0: - self.fail("Failed to remove user user '%s': %s" % (self.username, err)) - -+ @tag_test(TestTags.CORE) - def test_mount(self): - """ Test basic mounting and unmounting """ - -@@ -1053,7 +1063,7 @@ class MountTest(FSTestCase): - BlockDev.fs_mount(loop_dev, tmp_dir, None, "rw", None) - self.assertFalse(os.path.ismount(tmp_dir)) - -- @unittest.skipUnless("JENKINS_HOME" in os.environ, "skipping test that modifies system configuration") -+ @tag_test(TestTags.UNSAFE) - def test_mount_fstab(self): - """ Test mounting and unmounting devices in /etc/fstab """ - # this test will change /etc/fstab, we want to revert the changes when it finishes -@@ -1088,7 +1098,7 @@ class MountTest(FSTestCase): - self.assertTrue(succ) - self.assertFalse(os.path.ismount(tmp)) - -- @unittest.skipUnless("JENKINS_HOME" in os.environ, "skipping test that modifies system configuration") -+ @tag_test(TestTags.UNSAFE) - def test_mount_fstab_user(self): - """ Test mounting and unmounting devices in /etc/fstab as non-root user """ - # this test will change /etc/fstab, we want to revert the changes when it finishes -@@ -1360,7 +1370,7 @@ class GenericResize(FSTestCase): - fs_info_func=info_prepare, - info_size_func=expected_size) - -- @unstable_test -+ @tag_test(TestTags.UNSTABLE) - def test_vfat_generic_resize(self): - """Test generic resize function with a vfat file system""" - self._test_generic_resize(mkfs_function=BlockDev.fs_vfat_mkfs) -diff --git a/tests/kbd_test.py b/tests/kbd_test.py -index b6cfb3c..5e872c4 100644 ---- a/tests/kbd_test.py -+++ b/tests/kbd_test.py -@@ -4,7 +4,7 @@ import re - import time - from contextlib import contextmanager - from distutils.version import LooseVersion --from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, wipe_all, fake_path, read_file, skip_on, unstable_test -+from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, wipe_all, fake_path, read_file, skip_on, TestTags, tag_test - from bytesize import bytesize - import overrides_hack - -@@ -66,7 +66,7 @@ class KbdZRAMTestCase(unittest.TestCase): - - class KbdZRAMDevicesTestCase(KbdZRAMTestCase): - @unittest.skipUnless(_can_load_zram(), "cannot load the 'zram' module") -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_create_destroy_devices(self): - # the easiest case - with _track_module_load(self, "zram", "_loaded_zram_module"): -@@ -113,7 +113,7 @@ class KbdZRAMDevicesTestCase(KbdZRAMTestCase): - time.sleep(1) - - @unittest.skipUnless(_can_load_zram(), "cannot load the 'zram' module") -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_zram_add_remove_device(self): - """Verify that it is possible to add and remove a zram device""" - -@@ -268,6 +268,7 @@ class KbdBcacheNodevTestCase(unittest.TestCase): - BlockDev.reinit(cls.requested_plugins, True, None) - - @skip_on(("centos", "enterprise_linux")) -+ @tag_test(TestTags.NOSTORAGE) - def test_bcache_mode_str_bijection(self): - """Verify that it's possible to transform between cache modes and their string representations""" - -@@ -333,7 +334,7 @@ class KbdBcacheTestCase(unittest.TestCase): - - class KbdTestBcacheCreate(KbdBcacheTestCase): - @skip_on(("centos", "enterprise_linux")) -- @unstable_test -+ @tag_test(TestTags.UNSTABLE) - def test_bcache_create_destroy(self): - """Verify that it's possible to create and destroy a bcache device""" - -@@ -352,7 +353,7 @@ class KbdTestBcacheCreate(KbdBcacheTestCase): - wipe_all(self.loop_dev, self.loop_dev2) - - @skip_on(("centos", "enterprise_linux")) -- @unstable_test -+ @tag_test(TestTags.UNSTABLE) - def test_bcache_create_destroy_full_path(self): - """Verify that it's possible to create and destroy a bcache device with full device path""" - -@@ -372,7 +373,7 @@ class KbdTestBcacheCreate(KbdBcacheTestCase): - - class KbdTestBcacheAttachDetach(KbdBcacheTestCase): - @skip_on(("centos", "enterprise_linux")) -- @unstable_test -+ @tag_test(TestTags.UNSTABLE) - def test_bcache_attach_detach(self): - """Verify that it's possible to detach/attach a cache from/to a bcache device""" - -@@ -398,7 +399,7 @@ class KbdTestBcacheAttachDetach(KbdBcacheTestCase): - wipe_all(self.loop_dev, self.loop_dev2) - - @skip_on(("centos", "enterprise_linux")) -- @unstable_test -+ @tag_test(TestTags.UNSTABLE) - def test_bcache_attach_detach_full_path(self): - """Verify that it's possible to detach/attach a cache from/to a bcache device with full device path""" - -@@ -424,7 +425,7 @@ class KbdTestBcacheAttachDetach(KbdBcacheTestCase): - wipe_all(self.loop_dev, self.loop_dev2) - - @skip_on(("centos", "enterprise_linux")) -- @unstable_test -+ @tag_test(TestTags.UNSTABLE) - def test_bcache_detach_destroy(self): - """Verify that it's possible to destroy a bcache device with no cache attached""" - -@@ -448,7 +449,7 @@ class KbdTestBcacheAttachDetach(KbdBcacheTestCase): - - class KbdTestBcacheGetSetMode(KbdBcacheTestCase): - @skip_on(("centos", "enterprise_linux")) -- @unstable_test -+ @tag_test(TestTags.UNSTABLE) - def test_bcache_get_set_mode(self): - """Verify that it is possible to get and set Bcache mode""" - -@@ -505,7 +506,7 @@ class KbdTestBcacheStatusTest(KbdBcacheTestCase): - return sum(int(read_file(os.path.realpath(c) + '/../size')) for c in caches) - - @skip_on(("centos", "enterprise_linux")) -- @unstable_test -+ @tag_test(TestTags.UNSTABLE) - def test_bcache_status(self): - succ, dev = BlockDev.kbd_bcache_create(self.loop_dev, self.loop_dev2, None) - self.assertTrue(succ) -@@ -538,7 +539,7 @@ class KbdTestBcacheStatusTest(KbdBcacheTestCase): - - class KbdTestBcacheBackingCacheDevTest(KbdBcacheTestCase): - @skip_on(("centos", "enterprise_linux")) -- @unstable_test -+ @tag_test(TestTags.UNSTABLE) - def test_bcache_backing_cache_dev(self): - """Verify that is is possible to get the backing and cache devices for a Bcache""" - -@@ -566,6 +567,7 @@ class KbdUnloadTest(KbdBcacheTestCase): - self.addCleanup(BlockDev.reinit, self.requested_plugins, True, None) - - @skip_on(("centos", "enterprise_linux")) -+ @tag_test(TestTags.NOSTORAGE) - def test_check_no_bcache_progs(self): - """Verify that checking the availability of make-bcache works as expected""" - -diff --git a/tests/library_test.py b/tests/library_test.py -index 159031a..fa33b53 100644 ---- a/tests/library_test.py -+++ b/tests/library_test.py -@@ -2,7 +2,7 @@ import os - import unittest - import re - import overrides_hack --from utils import fake_path -+from utils import fake_path, TestTags, tag_test - - from gi.repository import GLib, BlockDev - -@@ -40,7 +40,7 @@ class LibraryOpsTestCase(unittest.TestCase): - BlockDev.reinit(self.requested_plugins, True, None) - - # recompiles the LVM plugin -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW, TestTags.CORE) - def test_reload(self): - """Verify that reloading plugins works as expected""" - -@@ -72,7 +72,7 @@ class LibraryOpsTestCase(unittest.TestCase): - self.assertTrue(BlockDev.reinit(self.requested_plugins, True, None)) - - # recompiles the LVM plugin -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_force_plugin(self): - """Verify that forcing plugin to be used works as expected""" - -@@ -118,7 +118,7 @@ class LibraryOpsTestCase(unittest.TestCase): - self.assertEqual(BlockDev.lvm_get_max_lv_size(), orig_max_size) - - # recompiles the LVM plugin -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_plugin_priority(self): - """Verify that preferring plugin to be used works as expected""" - -@@ -181,7 +181,7 @@ class LibraryOpsTestCase(unittest.TestCase): - os.system ("rm -f src/plugins/.libs/libbd_lvm2.so") - - # recompiles the LVM plugin -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_plugin_fallback(self): - """Verify that fallback when loading plugins works as expected""" - -@@ -250,6 +250,7 @@ class LibraryOpsTestCase(unittest.TestCase): - - self.log += msg + "\n" - -+ @tag_test(TestTags.CORE) - def test_logging_setup(self): - """Verify that setting up logging works as expected""" - -@@ -280,6 +281,7 @@ class LibraryOpsTestCase(unittest.TestCase): - self.assertIn("stderr[%s]:" % task_id2, self.log) - self.assertIn("...done [%s] (exit code: 0)" % task_id2, self.log) - -+ @tag_test(TestTags.CORE) - def test_require_plugins(self): - """Verify that loading only required plugins works as expected""" - -@@ -290,6 +292,7 @@ class LibraryOpsTestCase(unittest.TestCase): - self.assertEqual(BlockDev.get_available_plugin_names(), ["swap"]) - self.assertTrue(BlockDev.reinit(self.requested_plugins, True, None)) - -+ @tag_test(TestTags.CORE) - def test_not_implemented(self): - """Verify that unloaded/unimplemented functions report errors""" - -diff --git a/tests/loop_test.py b/tests/loop_test.py -index 9e9d9ac..5aaf928 100644 ---- a/tests/loop_test.py -+++ b/tests/loop_test.py -@@ -3,7 +3,7 @@ import unittest - import time - import overrides_hack - --from utils import create_sparse_tempfile -+from utils import create_sparse_tempfile, TestTags, tag_test - from gi.repository import BlockDev, GLib - - -@@ -31,6 +31,7 @@ class LoopTestCase(unittest.TestCase): - os.unlink(self.dev_file) - - class LoopTestSetupBasic(LoopTestCase): -+ @tag_test(TestTags.CORE) - def testLoop_setup_teardown_basic(self): - """Verify that basic loop_setup and loop_teardown work as expected""" - -@@ -97,6 +98,7 @@ class LoopTestSetupReadOnly(LoopTestCase): - # XXX: any sane way how to test part_probe=True/False? - - class LoopTestGetLoopName(LoopTestCase): -+ @tag_test(TestTags.CORE) - def testLoop_get_loop_name(self): - """Verify that loop_get_loop_name works as expected""" - -@@ -107,6 +109,7 @@ class LoopTestGetLoopName(LoopTestCase): - self.assertEqual(ret_loop, self.loop) - - class LoopTestGetBackingFile(LoopTestCase): -+ @tag_test(TestTags.CORE) - def testLoop_get_backing_file(self): - """Verify that loop_get_backing_file works as expected""" - -diff --git a/tests/lvm_dbus_tests.py b/tests/lvm_dbus_tests.py -index 7c3b4cc..625a392 100644 ---- a/tests/lvm_dbus_tests.py -+++ b/tests/lvm_dbus_tests.py -@@ -8,7 +8,7 @@ import re - import subprocess - from itertools import chain - --from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, skip_on, run_command -+from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, run_command, TestTags, tag_test - from gi.repository import BlockDev, GLib - - import dbus -@@ -38,6 +38,7 @@ class LvmNoDevTestCase(LVMTestCase): - super(LvmNoDevTestCase, self).__init__(*args, **kwargs) - self._log = "" - -+ @tag_test(TestTags.NOSTORAGE) - def test_is_supported_pe_size(self): - """Verify that lvm_is_supported_pe_size works as expected""" - -@@ -53,12 +54,14 @@ class LvmNoDevTestCase(LVMTestCase): - self.assertFalse(BlockDev.lvm_is_supported_pe_size(65535)) - self.assertFalse(BlockDev.lvm_is_supported_pe_size(32 * 1024**3)) - -+ @tag_test(TestTags.NOSTORAGE) - def test_get_supported_pe_sizes(self): - """Verify that supported PE sizes are really supported""" - - for size in BlockDev.lvm_get_supported_pe_sizes(): - self.assertTrue(BlockDev.lvm_is_supported_pe_size(size)) - -+ @tag_test(TestTags.NOSTORAGE) - def test_get_max_lv_size(self): - """Verify that max LV size is correctly determined""" - -@@ -71,6 +74,7 @@ class LvmNoDevTestCase(LVMTestCase): - - self.assertEqual(BlockDev.lvm_get_max_lv_size(), expected) - -+ @tag_test(TestTags.NOSTORAGE) - def test_round_size_to_pe(self): - """Verify that round_size_to_pe works as expected""" - -@@ -95,6 +99,7 @@ class LvmNoDevTestCase(LVMTestCase): - self.assertEqual(BlockDev.lvm_round_size_to_pe(biggest_multiple - (2 * 4 * 1024**2) + 1, 4 * 1024**2, False), - biggest_multiple - (2 * 4 * 1024**2)) - -+ @tag_test(TestTags.NOSTORAGE) - def test_get_lv_physical_size(self): - """Verify that get_lv_physical_size works as expected""" - -@@ -108,6 +113,7 @@ class LvmNoDevTestCase(LVMTestCase): - self.assertEqual(BlockDev.lvm_get_lv_physical_size(11 * 1024**2, 4 * 1024**2), - 12 * 1024**2) - -+ @tag_test(TestTags.NOSTORAGE) - def test_get_thpool_padding(self): - """Verify that get_thpool_padding works as expected""" - -@@ -121,6 +127,7 @@ class LvmNoDevTestCase(LVMTestCase): - self.assertEqual(BlockDev.lvm_get_thpool_padding(11 * 1024**2, 4 * 1024**2, True), - expected_padding) - -+ @tag_test(TestTags.NOSTORAGE) - def test_get_thpool_meta_size(self): - """Verify that getting recommended thin pool metadata size works as expected""" - -@@ -139,6 +146,7 @@ class LvmNoDevTestCase(LVMTestCase): - self.assertEqual(BlockDev.lvm_get_thpool_meta_size (100 * 1024**2, 128 * 1024, 100), - BlockDev.LVM_MIN_THPOOL_MD_SIZE) - -+ @tag_test(TestTags.NOSTORAGE) - def test_is_valid_thpool_md_size(self): - """Verify that is_valid_thpool_md_size works as expected""" - -@@ -149,6 +157,7 @@ class LvmNoDevTestCase(LVMTestCase): - self.assertFalse(BlockDev.lvm_is_valid_thpool_md_size(1 * 1024**2)) - self.assertFalse(BlockDev.lvm_is_valid_thpool_md_size(17 * 1024**3)) - -+ @tag_test(TestTags.NOSTORAGE) - def test_is_valid_thpool_chunk_size(self): - """Verify that is_valid_thpool_chunk_size works as expected""" - -@@ -167,6 +176,7 @@ class LvmNoDevTestCase(LVMTestCase): - def _store_log(self, lvl, msg): - self._log += str((lvl, msg)) - -+ @tag_test(TestTags.NOSTORAGE) - def test_get_set_global_config(self): - """Verify that getting and setting global config works as expected""" - -@@ -207,6 +217,7 @@ class LvmNoDevTestCase(LVMTestCase): - succ = BlockDev.lvm_set_global_config(None) - self.assertTrue(succ) - -+ @tag_test(TestTags.NOSTORAGE) - def test_cache_get_default_md_size(self): - """Verify that default cache metadata size is calculated properly""" - -@@ -215,6 +226,7 @@ class LvmNoDevTestCase(LVMTestCase): - self.assertEqual(BlockDev.lvm_cache_get_default_md_size(80 * 1024**3), (80 * 1024**3) // 1000) - self.assertEqual(BlockDev.lvm_cache_get_default_md_size(6 * 1024**3), 8 * 1024**2) - -+ @tag_test(TestTags.NOSTORAGE) - def test_cache_mode_bijection(self): - """Verify that cache modes and their string representations map to each other""" - -@@ -275,6 +287,7 @@ class LvmPVonlyTestCase(LVMTestCase): - - @unittest.skipUnless(lvm_dbus_running, "LVM DBus not running") - class LvmTestPVcreateRemove(LvmPVonlyTestCase): -+ @tag_test(TestTags.CORE) - def test_pvcreate_and_pvremove(self): - """Verify that it's possible to create and destroy a PV""" - -@@ -380,6 +393,7 @@ class LvmPVVGTestCase(LvmPVonlyTestCase): - - @unittest.skipUnless(lvm_dbus_running, "LVM DBus not running") - class LvmTestVGcreateRemove(LvmPVVGTestCase): -+ @tag_test(TestTags.CORE) - def test_vgcreate_vgremove(self): - """Verify that it is possible to create and destroy a VG""" - -@@ -406,6 +420,7 @@ class LvmTestVGcreateRemove(LvmPVVGTestCase): - with self.assertRaises(GLib.GError): - BlockDev.lvm_vgremove("testVG", None) - -+@unittest.skipUnless(lvm_dbus_running, "LVM DBus not running") - class LvmTestVGrename(LvmPVVGTestCase): - def test_vgrename(self): - """Verify that it is possible to rename a VG""" -@@ -465,7 +480,6 @@ class LvmTestVGactivateDeactivate(LvmPVVGTestCase): - - @unittest.skipUnless(lvm_dbus_running, "LVM DBus not running") - class LvmTestVGextendReduce(LvmPVVGTestCase): -- @skip_on("fedora", "27", reason="LVM is broken in many ways on rawhide") - def test_vgextend_vgreduce(self): - """Verify that it is possible to extend/reduce a VG""" - -@@ -571,6 +585,7 @@ class LvmPVVGLVTestCase(LvmPVVGTestCase): - - @unittest.skipUnless(lvm_dbus_running, "LVM DBus not running") - class LvmTestLVcreateRemove(LvmPVVGLVTestCase): -+ @tag_test(TestTags.CORE) - def test_lvcreate_lvremove(self): - """Verify that it's possible to create/destroy an LV""" - -@@ -619,6 +634,7 @@ class LvmTestLVcreateRemove(LvmPVVGLVTestCase): - with self.assertRaises(GLib.GError): - BlockDev.lvm_lvremove("testVG", "testLV", True, None) - -+@unittest.skipUnless(lvm_dbus_running, "LVM DBus not running") - class LvmTestLVcreateWithExtra(LvmPVVGLVTestCase): - def __init__(self, *args, **kwargs): - LvmPVVGLVTestCase.__init__(self, *args, **kwargs) -@@ -669,7 +685,6 @@ class LvmTestLVcreateWithExtra(LvmPVVGLVTestCase): - - @unittest.skipUnless(lvm_dbus_running, "LVM DBus not running") - class LvmTestLVcreateType(LvmPVVGLVTestCase): -- @skip_on("fedora", "27", reason="LVM is broken in many ways on rawhide") - def test_lvcreate_type(self): - """Verify it's possible to create LVs with various types""" - -@@ -842,7 +857,7 @@ class LvmTestLVrename(LvmPVVGLVTestCase): - - @unittest.skipUnless(lvm_dbus_running, "LVM DBus not running") - class LvmTestLVsnapshots(LvmPVVGLVTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_snapshotcreate_lvorigin_snapshotmerge(self): - """Verify that LV snapshot support works""" - -@@ -957,6 +972,7 @@ class LvmTestLVsAll(LvmPVVGthpoolTestCase): - - @unittest.skipUnless(lvm_dbus_running, "LVM DBus not running") - class LvmTestThpoolCreate(LvmPVVGthpoolTestCase): -+ @tag_test(TestTags.CORE) - def test_thpoolcreate(self): - """Verify that it is possible to create a thin pool""" - -@@ -1056,6 +1072,7 @@ class LvmPVVGLVthLVTestCase(LvmPVVGthpoolTestCase): - - @unittest.skipUnless(lvm_dbus_running, "LVM DBus not running") - class LvmTestThLVcreate(LvmPVVGLVthLVTestCase): -+ @tag_test(TestTags.CORE) - def test_thlvcreate_thpoolname(self): - """Verify that it is possible to create a thin LV and get its pool name""" - -@@ -1142,8 +1159,7 @@ class LvmPVVGLVcachePoolTestCase(LvmPVVGLVTestCase): - - @unittest.skipUnless(lvm_dbus_running, "LVM DBus not running") - class LvmPVVGLVcachePoolCreateRemoveTestCase(LvmPVVGLVcachePoolTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -- @skip_on("fedora", "27", reason="LVM is broken in many ways on rawhide") -+ @tag_test(TestTags.SLOW) - def test_cache_pool_create_remove(self): - """Verify that is it possible to create and remove a cache pool""" - -@@ -1169,8 +1185,7 @@ class LvmPVVGLVcachePoolCreateRemoveTestCase(LvmPVVGLVcachePoolTestCase): - - @unittest.skipUnless(lvm_dbus_running, "LVM DBus not running") - class LvmTestCachePoolConvert(LvmPVVGLVcachePoolTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -- @skip_on("fedora", "27", reason="LVM is broken in many ways on rawhide") -+ @tag_test(TestTags.SLOW) - def test_cache_pool_convert(self): - """Verify that it is possible to create a cache pool by conversion""" - -@@ -1193,8 +1208,7 @@ class LvmTestCachePoolConvert(LvmPVVGLVcachePoolTestCase): - - @unittest.skipUnless(lvm_dbus_running, "LVM DBus not running") - class LvmPVVGLVcachePoolAttachDetachTestCase(LvmPVVGLVcachePoolTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -- @skip_on("fedora", "27", reason="LVM is broken in many ways on rawhide") -+ @tag_test(TestTags.SLOW) - def test_cache_pool_attach_detach(self): - """Verify that is it possible to attach and detach a cache pool""" - -@@ -1235,8 +1249,7 @@ class LvmPVVGLVcachePoolAttachDetachTestCase(LvmPVVGLVcachePoolTestCase): - - @unittest.skipUnless(lvm_dbus_running, "LVM DBus not running") - class LvmPVVGcachedLVTestCase(LvmPVVGLVTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -- @skip_on("fedora", "27", reason="LVM is broken in many ways on rawhide") -+ @tag_test(TestTags.SLOW) - def test_create_cached_lv(self): - """Verify that it is possible to create a cached LV in a single step""" - -@@ -1256,8 +1269,7 @@ class LvmPVVGcachedLVTestCase(LvmPVVGLVTestCase): - - @unittest.skipUnless(lvm_dbus_running, "LVM DBus not running") - class LvmPVVGcachedLVpoolTestCase(LvmPVVGLVTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -- @skip_on("fedora", "27", reason="LVM is broken in many ways on rawhide") -+ @tag_test(TestTags.SLOW) - def test_cache_get_pool_name(self): - """Verify that it is possible to get the name of the cache pool""" - -@@ -1283,8 +1295,7 @@ class LvmPVVGcachedLVpoolTestCase(LvmPVVGLVTestCase): - - @unittest.skipUnless(lvm_dbus_running, "LVM DBus not running") - class LvmPVVGcachedLVstatsTestCase(LvmPVVGLVTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -- @skip_on("fedora", "27", reason="LVM is broken in many ways on rawhide") -+ @tag_test(TestTags.SLOW) - def test_cache_get_stats(self): - """Verify that it is possible to get stats for a cached LV""" - -@@ -1323,6 +1334,7 @@ class LVMTechTest(LVMTestCase): - self.addCleanup(BlockDev.switch_init_checks, True) - self.addCleanup(BlockDev.reinit, [self.ps, self.ps2], True, None) - -+ @tag_test(TestTags.NOSTORAGE) - def test_tech_available(self): - """Verify that checking lvm dbus availability by technology works as expected""" - -diff --git a/tests/lvm_test.py b/tests/lvm_test.py -index 4f1640f..28a4b05 100644 ---- a/tests/lvm_test.py -+++ b/tests/lvm_test.py -@@ -7,7 +7,7 @@ import six - import re - import subprocess - --from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, fake_utils, fake_path, skip_on -+from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, fake_utils, fake_path, skip_on, TestTags, tag_test - from gi.repository import BlockDev, GLib - - -@@ -27,6 +27,7 @@ class LvmNoDevTestCase(LVMTestCase): - super(LvmNoDevTestCase, self).__init__(*args, **kwargs) - self._log = "" - -+ @tag_test(TestTags.NOSTORAGE) - def test_is_supported_pe_size(self): - """Verify that lvm_is_supported_pe_size works as expected""" - -@@ -42,12 +43,14 @@ class LvmNoDevTestCase(LVMTestCase): - self.assertFalse(BlockDev.lvm_is_supported_pe_size(65535)) - self.assertFalse(BlockDev.lvm_is_supported_pe_size(32 * 1024**3)) - -+ @tag_test(TestTags.NOSTORAGE) - def test_get_supported_pe_sizes(self): - """Verify that supported PE sizes are really supported""" - - for size in BlockDev.lvm_get_supported_pe_sizes(): - self.assertTrue(BlockDev.lvm_is_supported_pe_size(size)) - -+ @tag_test(TestTags.NOSTORAGE) - def test_get_max_lv_size(self): - """Verify that max LV size is correctly determined""" - -@@ -60,6 +63,7 @@ class LvmNoDevTestCase(LVMTestCase): - - self.assertEqual(BlockDev.lvm_get_max_lv_size(), expected) - -+ @tag_test(TestTags.NOSTORAGE) - def test_round_size_to_pe(self): - """Verify that round_size_to_pe works as expected""" - -@@ -84,6 +88,7 @@ class LvmNoDevTestCase(LVMTestCase): - self.assertEqual(BlockDev.lvm_round_size_to_pe(biggest_multiple - (2 * 4 * 1024**2) + 1, 4 * 1024**2, False), - biggest_multiple - (2 * 4 * 1024**2)) - -+ @tag_test(TestTags.NOSTORAGE) - def test_get_lv_physical_size(self): - """Verify that get_lv_physical_size works as expected""" - -@@ -97,6 +102,7 @@ class LvmNoDevTestCase(LVMTestCase): - self.assertEqual(BlockDev.lvm_get_lv_physical_size(11 * 1024**2, 4 * 1024**2), - 12 * 1024**2) - -+ @tag_test(TestTags.NOSTORAGE) - def test_get_thpool_padding(self): - """Verify that get_thpool_padding works as expected""" - -@@ -110,6 +116,7 @@ class LvmNoDevTestCase(LVMTestCase): - self.assertEqual(BlockDev.lvm_get_thpool_padding(11 * 1024**2, 4 * 1024**2, True), - expected_padding) - -+ @tag_test(TestTags.NOSTORAGE) - def test_get_thpool_meta_size(self): - """Verify that getting recommended thin pool metadata size works as expected""" - -@@ -128,6 +135,7 @@ class LvmNoDevTestCase(LVMTestCase): - self.assertEqual(BlockDev.lvm_get_thpool_meta_size (100 * 1024**2, 128 * 1024, 100), - BlockDev.LVM_MIN_THPOOL_MD_SIZE) - -+ @tag_test(TestTags.NOSTORAGE) - def test_is_valid_thpool_md_size(self): - """Verify that is_valid_thpool_md_size works as expected""" - -@@ -138,6 +146,7 @@ class LvmNoDevTestCase(LVMTestCase): - self.assertFalse(BlockDev.lvm_is_valid_thpool_md_size(1 * 1024**2)) - self.assertFalse(BlockDev.lvm_is_valid_thpool_md_size(17 * 1024**3)) - -+ @tag_test(TestTags.NOSTORAGE) - def test_is_valid_thpool_chunk_size(self): - """Verify that is_valid_thpool_chunk_size works as expected""" - -@@ -156,6 +165,7 @@ class LvmNoDevTestCase(LVMTestCase): - def _store_log(self, lvl, msg): - self._log += str((lvl, msg)) - -+ @tag_test(TestTags.NOSTORAGE) - def test_get_set_global_config(self): - """Verify that getting and setting global config works as expected""" - -@@ -192,6 +202,7 @@ class LvmNoDevTestCase(LVMTestCase): - succ = BlockDev.lvm_set_global_config(None) - self.assertTrue(succ) - -+ @tag_test(TestTags.NOSTORAGE) - def test_cache_get_default_md_size(self): - """Verify that default cache metadata size is calculated properly""" - -@@ -200,6 +211,7 @@ class LvmNoDevTestCase(LVMTestCase): - self.assertEqual(BlockDev.lvm_cache_get_default_md_size(80 * 1024**3), (80 * 1024**3) // 1000) - self.assertEqual(BlockDev.lvm_cache_get_default_md_size(6 * 1024**3), 8 * 1024**2) - -+ @tag_test(TestTags.NOSTORAGE) - def test_cache_mode_bijection(self): - """Verify that cache modes and their string representations map to each other""" - -@@ -258,6 +270,7 @@ class LvmPVonlyTestCase(LVMTestCase): - os.unlink(self.dev_file2) - - class LvmTestPVcreateRemove(LvmPVonlyTestCase): -+ @tag_test(TestTags.CORE) - def test_pvcreate_and_pvremove(self): - """Verify that it's possible to create and destroy a PV""" - -@@ -357,7 +370,8 @@ class LvmPVVGTestCase(LvmPVonlyTestCase): - LvmPVonlyTestCase._clean_up(self) - - class LvmTestVGcreateRemove(LvmPVVGTestCase): -- @skip_on("debian", skip_on_arch="i686", reason="vgremove is broken on 32bit Debian") -+ @skip_on("debian", skip_on_version="9", skip_on_arch="i686", reason="vgremove is broken on 32bit Debian stable") -+ @tag_test(TestTags.CORE) - def test_vgcreate_vgremove(self): - """Verify that it is possible to create and destroy a VG""" - -@@ -543,6 +557,7 @@ class LvmPVVGLVTestCase(LvmPVVGTestCase): - LvmPVVGTestCase._clean_up(self) - - class LvmTestLVcreateRemove(LvmPVVGLVTestCase): -+ @tag_test(TestTags.CORE) - def test_lvcreate_lvremove(self): - """Verify that it's possible to create/destroy an LV""" - -@@ -810,7 +825,7 @@ class LvmTestLVrename(LvmPVVGLVTestCase): - BlockDev.lvm_lvrename("testVG", "testLV", "testLV", None) - - class LvmTestLVsnapshots(LvmPVVGLVTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_snapshotcreate_lvorigin_snapshotmerge(self): - """Verify that LV snapshot support works""" - -@@ -920,6 +935,7 @@ class LvmTestLVsAll(LvmPVVGthpoolTestCase): - self.assertGreater(len(lvs), 3) - - class LvmTestThpoolCreate(LvmPVVGthpoolTestCase): -+ @tag_test(TestTags.CORE) - def test_thpoolcreate(self): - """Verify that it is possible to create a thin pool""" - -@@ -1016,6 +1032,7 @@ class LvmPVVGLVthLVTestCase(LvmPVVGthpoolTestCase): - LvmPVVGthpoolTestCase._clean_up(self) - - class LvmTestThLVcreate(LvmPVVGLVthLVTestCase): -+ @tag_test(TestTags.CORE) - def test_thlvcreate_thpoolname(self): - """Verify that it is possible to create a thin LV and get its pool name""" - -@@ -1098,8 +1115,7 @@ class LvmPVVGLVcachePoolTestCase(LvmPVVGLVTestCase): - LvmPVVGLVTestCase._clean_up(self) - - class LvmPVVGLVcachePoolCreateRemoveTestCase(LvmPVVGLVcachePoolTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -- @skip_on("fedora", "27", reason="LVM is broken in many ways on rawhide") -+ @tag_test(TestTags.SLOW) - @skip_on(("centos", "enterprise_linux"), "7") - def test_cache_pool_create_remove(self): - """Verify that is it possible to create and remove a cache pool""" -@@ -1125,8 +1141,7 @@ class LvmPVVGLVcachePoolCreateRemoveTestCase(LvmPVVGLVcachePoolTestCase): - self.assertTrue(succ) - - class LvmTestCachePoolConvert(LvmPVVGLVcachePoolTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -- @skip_on("fedora", "27", reason="LVM is broken in many ways on rawhide") -+ @tag_test(TestTags.SLOW) - def test_cache_pool_convert(self): - """Verify that it is possible to create a cache pool by conversion""" - -@@ -1149,8 +1164,7 @@ class LvmTestCachePoolConvert(LvmPVVGLVcachePoolTestCase): - - - class LvmPVVGLVcachePoolAttachDetachTestCase(LvmPVVGLVcachePoolTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -- @skip_on("fedora", "27", reason="LVM is broken in many ways on rawhide") -+ @tag_test(TestTags.SLOW) - def test_cache_pool_attach_detach(self): - """Verify that is it possible to attach and detach a cache pool""" - -@@ -1190,8 +1204,7 @@ class LvmPVVGLVcachePoolAttachDetachTestCase(LvmPVVGLVcachePoolTestCase): - self.assertTrue(any(info.lv_name == "testCache" for info in lvs)) - - class LvmPVVGcachedLVTestCase(LvmPVVGLVTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -- @skip_on("fedora", "27", reason="LVM is broken in many ways on rawhide") -+ @tag_test(TestTags.SLOW) - def test_create_cached_lv(self): - """Verify that it is possible to create a cached LV in a single step""" - -@@ -1210,8 +1223,7 @@ class LvmPVVGcachedLVTestCase(LvmPVVGLVTestCase): - self.assertTrue(succ) - - class LvmPVVGcachedLVpoolTestCase(LvmPVVGLVTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -- @skip_on("fedora", "27", reason="LVM is broken in many ways on rawhide") -+ @tag_test(TestTags.SLOW) - def test_cache_get_pool_name(self): - """Verify that it is possible to get the name of the cache pool""" - -@@ -1236,8 +1248,7 @@ class LvmPVVGcachedLVpoolTestCase(LvmPVVGLVTestCase): - self.assertEqual(BlockDev.lvm_cache_pool_name("testVG", "testLV"), "testCache") - - class LvmPVVGcachedLVstatsTestCase(LvmPVVGLVTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -- @skip_on("fedora", "27", reason="LVM is broken in many ways on rawhide") -+ @tag_test(TestTags.SLOW) - def test_cache_get_stats(self): - """Verify that it is possible to get stats for a cached LV""" - -@@ -1271,6 +1282,7 @@ class LVMUnloadTest(LVMTestCase): - # tests - self.addCleanup(BlockDev.reinit, self.requested_plugins, True, None) - -+ @tag_test(TestTags.NOSTORAGE) - def test_check_low_version(self): - """Verify that checking the minimum LVM version works as expected""" - -@@ -1288,6 +1300,7 @@ class LVMUnloadTest(LVMTestCase): - self.assertTrue(BlockDev.reinit(self.requested_plugins, True, None)) - self.assertIn("lvm", BlockDev.get_available_plugin_names()) - -+ @tag_test(TestTags.NOSTORAGE) - def test_check_no_lvm(self): - """Verify that checking lvm tool availability works as expected""" - -@@ -1315,6 +1328,7 @@ class LVMTechTest(LVMTestCase): - self.addCleanup(BlockDev.switch_init_checks, True) - self.addCleanup(BlockDev.reinit, self.requested_plugins, True, None) - -+ @tag_test(TestTags.NOSTORAGE) - def test_tech_available(self): - """Verify that checking lvm tool availability by technology works as expected""" - -diff --git a/tests/mdraid_test.py b/tests/mdraid_test.py -index ea182b2..ea489db 100644 ---- a/tests/mdraid_test.py -+++ b/tests/mdraid_test.py -@@ -6,7 +6,7 @@ from contextlib import contextmanager - import overrides_hack - import six - --from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, fake_utils, fake_path -+from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, fake_utils, fake_path, skip_on, TestTags, tag_test - from gi.repository import BlockDev, GLib - - -@@ -46,6 +46,7 @@ class MDNoDevTestCase(MDTest): - else: - BlockDev.reinit(cls.requested_plugins, True, None) - -+ @tag_test(TestTags.NOSTORAGE) - def test_get_superblock_size(self): - """Verify that superblock size si calculated properly""" - -@@ -67,6 +68,7 @@ class MDNoDevTestCase(MDTest): - self.assertEqual(BlockDev.md_get_superblock_size(257 * 1024**2, version="unknown version"), - 2 * 1024**2) - -+ @tag_test(TestTags.NOSTORAGE) - def test_canonicalize_uuid(self): - """Verify that UUID canonicalization works as expected""" - -@@ -76,6 +78,7 @@ class MDNoDevTestCase(MDTest): - with six.assertRaisesRegex(self, GLib.GError, r'malformed or invalid'): - BlockDev.md_canonicalize_uuid("malformed-uuid-example") - -+ @tag_test(TestTags.NOSTORAGE) - def test_get_md_uuid(self): - """Verify that getting UUID in MD RAID format works as expected""" - -@@ -162,7 +165,7 @@ class MDTestCase(MDTest): - - - class MDTestCreateDeactivateDestroy(MDTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW, TestTags.CORE) - def test_create_deactivate_destroy(self): - """Verify that it is possible to create, deactivate and destroy an MD RAID""" - -@@ -192,7 +195,7 @@ class MDTestCreateDeactivateDestroy(MDTestCase): - self.assertTrue(succ) - - class MDTestCreateWithChunkSize(MDTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_create_with_chunk_size(self): - """Verify that it is possible to create and MD RAID with specific chunk size """ - -@@ -216,7 +219,7 @@ class MDTestCreateWithChunkSize(MDTestCase): - self.assertTrue(succ) - - class MDTestActivateDeactivate(MDTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW, TestTags.CORE) - def test_activate_deactivate(self): - """Verify that it is possible to activate and deactivate an MD RAID""" - -@@ -255,7 +258,7 @@ class MDTestActivateDeactivate(MDTestCase): - self.assertTrue(succ) - - class MDTestActivateWithUUID(MDTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_activate_with_uuid(self): - """Verify that it is possible to activate an MD RAID with UUID""" - -@@ -277,7 +280,7 @@ class MDTestActivateWithUUID(MDTestCase): - succ = BlockDev.md_activate("bd_test_md", [self.loop_dev, self.loop_dev2, self.loop_dev3], md_info.uuid) - - class MDTestActivateByUUID(MDTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_activate_by_uuid(self): - """Verify that it is possible to activate an MD RAID by UUID""" - -@@ -309,7 +312,7 @@ class MDTestActivateByUUID(MDTestCase): - - - class MDTestNominateDenominate(MDTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_nominate_denominate(self): - """Verify that it is possible to nominate and denominate an MD RAID device""" - -@@ -342,9 +345,7 @@ class MDTestNominateDenominate(MDTestCase): - class MDTestNominateDenominateActive(MDTestCase): - # slow and leaking an MD array because with a nominated spare device, it - # cannot be deactivated in the end (don't ask me why) -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -- @unittest.skipIf("JENKINS_HOME" in os.environ, "skipping leaky test in jenkins") -- @unittest.skipUnless("FEELINGLUCKY" in os.environ, "skipping, not feeling lucky") -+ @tag_test(TestTags.SLOW, TestTags.UNSAFE, TestTags.UNSTABLE) - def test_nominate_denominate_active(self): - """Verify that nominate and denominate deivice works as expected on (de)activated MD RAID""" - -@@ -371,7 +372,8 @@ class MDTestNominateDenominateActive(MDTestCase): - self.assertTrue(succ) - - class MDTestAddRemove(MDTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) -+ @skip_on("debian", reason="Removing spare disks from an array is broken on Debian") - def test_add_remove(self): - """Verify that it is possible to add a device to and remove from an MD RAID""" - -@@ -431,7 +433,7 @@ class MDTestAddRemove(MDTestCase): - - class MDTestExamineDetail(MDTestCase): - # sleeps to let MD RAID sync things -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_examine_detail(self): - """Verify that it is possible to get info about an MD RAID""" - -@@ -487,7 +489,7 @@ class MDTestExamineDetail(MDTestCase): - self.assertTrue(de_data) - - class MDTestNameNodeBijection(MDTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_name_node_bijection(self): - """Verify that MD RAID node and name match each other""" - -@@ -518,7 +520,7 @@ class MDTestNameNodeBijection(MDTestCase): - self.assertTrue(succ) - - class MDTestSetBitmapLocation(MDTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_set_bitmap_location(self): - """Verify we can change bitmap location for an existing MD array""" - -@@ -567,7 +569,7 @@ class MDTestSetBitmapLocation(MDTestCase): - - - class MDTestRequestSyncAction(MDTestCase): -- @unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") -+ @tag_test(TestTags.SLOW) - def test_request_sync_action(self): - """Verify we can request sync action on an existing MD array""" - -@@ -587,6 +589,7 @@ class MDTestRequestSyncAction(MDTestCase): - - class FakeMDADMutilTest(MDTest): - # no setUp nor tearDown needed, we are gonna use fake utils -+ @tag_test(TestTags.NOSTORAGE) - def test_fw_raid_uppercase_examine(self): - """Verify that md_examine works with output using "RAID" instead of "Raid" and other quirks """ - -@@ -598,6 +601,7 @@ class FakeMDADMutilTest(MDTest): - self.assertEqual(ex_data.uuid, "b42756a2-37e4-3e47-674b-d1dd6e822145") - self.assertEqual(ex_data.device, None) - -+ @tag_test(TestTags.NOSTORAGE) - def test_no_metadata_examine(self): - """Verify that md_examine works as expected with no metadata spec""" - -@@ -607,6 +611,7 @@ class FakeMDADMutilTest(MDTest): - - self.assertIs(ex_data.metadata, None) - -+ @tag_test(TestTags.NOSTORAGE) - def test_fw_raid_migrating(self): - """Verify that md_examine works when array is migrating ("foo <-- bar" values in output) """ - -@@ -615,6 +620,7 @@ class FakeMDADMutilTest(MDTest): - - self.assertEqual(ex_data.chunk_size, 128 * 1024) - -+ @tag_test(TestTags.NOSTORAGE) - def test_mdadm_name_extra_info(self): - """Verify that md_examine and md_detail work with extra MD RAID name info""" - -@@ -632,6 +638,7 @@ class MDUnloadTest(MDTestCase): - # tests - self.addCleanup(BlockDev.reinit, self.requested_plugins, True, None) - -+ @tag_test(TestTags.NOSTORAGE) - def test_check_low_version(self): - """Verify that checking the minimum mdsetup version works as expected""" - -@@ -649,6 +656,7 @@ class MDUnloadTest(MDTestCase): - self.assertTrue(BlockDev.reinit(self.requested_plugins, True, None)) - self.assertIn("mdraid", BlockDev.get_available_plugin_names()) - -+ @tag_test(TestTags.NOSTORAGE) - def test_check_no_md(self): - """Verify that checking mdsetup tool availability works as expected""" - -diff --git a/tests/mpath_test.py b/tests/mpath_test.py -index e9d1e42..acd3053 100644 ---- a/tests/mpath_test.py -+++ b/tests/mpath_test.py -@@ -2,7 +2,7 @@ import unittest - import os - import overrides_hack - --from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, fake_utils, fake_path, skip_on, get_version -+from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, fake_utils, fake_path, skip_on, get_version, TestTags, tag_test - from gi.repository import BlockDev, GLib - - class MpathTest(unittest.TestCase): -@@ -56,6 +56,7 @@ class MpathUnloadTest(MpathTest): - # tests - self.addCleanup(BlockDev.reinit, self.requested_plugins, True, None) - -+ @tag_test(TestTags.NOSTORAGE) - def test_check_low_version(self): - """Verify that checking the minimum dmsetup version works as expected""" - -@@ -74,6 +75,7 @@ class MpathUnloadTest(MpathTest): - self.assertTrue(BlockDev.reinit(self.requested_plugins, True, None)) - self.assertIn("mpath", BlockDev.get_available_plugin_names()) - -+ @tag_test(TestTags.NOSTORAGE) - def test_check_no_multipath(self): - """Verify that checking multipath tool availability works as expected""" - -@@ -91,6 +93,7 @@ class MpathUnloadTest(MpathTest): - self.assertTrue(BlockDev.reinit(self.requested_plugins, True, None)) - self.assertIn("mpath", BlockDev.get_available_plugin_names()) - -+ @tag_test(TestTags.NOSTORAGE) - def test_check_no_mpathconf(self): - """Verify that checking mpathconf tool availability works as expected""" - -diff --git a/tests/nvdimm_test.py b/tests/nvdimm_test.py -index a4e6854..1c2fb3c 100644 ---- a/tests/nvdimm_test.py -+++ b/tests/nvdimm_test.py -@@ -6,7 +6,7 @@ import overrides_hack - - from distutils.version import LooseVersion - --from utils import run_command, read_file, skip_on, fake_path -+from utils import run_command, read_file, skip_on, fake_path, TestTags, tag_test - from gi.repository import BlockDev, GLib - - -@@ -80,6 +80,7 @@ class NVDIMMNamespaceTestCase(NVDIMMTestCase): - # even for modes where sector size doesn't make sense - self.assertEqual(bd_info.sector_size, 512) - -+ @tag_test(TestTags.EXTRADEPS, TestTags.CORE) - def test_namespace_info(self): - # get info about our 'testing' namespace - info = BlockDev.nvdimm_namespace_info(self.sys_info["dev"]) -@@ -96,14 +97,15 @@ class NVDIMMNamespaceTestCase(NVDIMMTestCase): - info = BlockDev.nvdimm_namespace_info("definitely-not-a-namespace") - self.assertIsNone(info) - -+ @tag_test(TestTags.EXTRADEPS, TestTags.CORE) - def test_list_namespaces(self): - bd_namespaces = BlockDev.nvdimm_list_namespaces() - self.assertEqual(len(bd_namespaces), 1) - - self._check_namespace_info(bd_namespaces[0]) - -- @unittest.skipUnless("JENKINS_HOME" in os.environ, "skipping test that modifies system configuration") - @skip_on("fedora", "29", reason="Disabling is broken on rawhide and makes the 'fake' NVDIMM unusable.") -+ @tag_test(TestTags.EXTRADEPS, TestTags.UNSAFE) - def test_enable_disable(self): - # non-existing/unknow namespace - with self.assertRaises(GLib.GError): -@@ -130,8 +132,8 @@ class NVDIMMNamespaceTestCase(NVDIMMTestCase): - info = BlockDev.nvdimm_namespace_info(self.sys_info["dev"]) - self.assertTrue(info.enabled) - -- @unittest.skipUnless("JENKINS_HOME" in os.environ, "skipping test that modifies system configuration") - @skip_on("fedora", "29", reason="Disabling is broken on rawhide and makes the 'fake' NVDIMM unusable.") -+ @tag_test(TestTags.EXTRADEPS, TestTags.UNSAFE) - def test_namespace_reconfigure(self): - # active namespace -- reconfigure doesn't work without force - with self.assertRaises(GLib.GError): -@@ -188,6 +190,7 @@ class NVDIMMUnloadTest(NVDIMMTestCase): - # tests - self.addCleanup(BlockDev.reinit, self.requested_plugins, True, None) - -+ @tag_test(TestTags.NOSTORAGE) - def test_check_no_ndctl(self): - """Verify that checking ndctl tool availability works as expected""" - -diff --git a/tests/overrides_test.py b/tests/overrides_test.py -index 53c65b5..8e7f5a5 100644 ---- a/tests/overrides_test.py -+++ b/tests/overrides_test.py -@@ -3,6 +3,8 @@ import math - import overrides_hack - from gi.repository import BlockDev - -+from utils import TestTags, tag_test -+ - - class OverridesTest(unittest.TestCase): - # all plugins except for 'btrfs', 'fs' and 'mpath' -- these don't have all -@@ -19,6 +21,7 @@ class OverridesTest(unittest.TestCase): - BlockDev.reinit(cls.requested_plugins, True, None) - - class OverridesTestCase(OverridesTest): -+ @tag_test(TestTags.NOSTORAGE, TestTags.CORE) - def test_error_proxy(self): - """Verify that the error proxy works as expected""" - -@@ -68,6 +71,7 @@ class OverridesUnloadTestCase(OverridesTest): - # tests - self.assertTrue(BlockDev.reinit(self.requested_plugins, True, None)) - -+ @tag_test(TestTags.NOSTORAGE, TestTags.CORE) - def test_xrules(self): - """Verify that regexp-based transformation rules work as expected""" - -@@ -81,6 +85,7 @@ class OverridesUnloadTestCase(OverridesTest): - # load the plugins back - self.assertTrue(BlockDev.reinit(self.requested_plugins, True, None)) - -+ @tag_test(TestTags.NOSTORAGE, TestTags.CORE) - def test_exception_inheritance(self): - # unload all plugins first - self.assertTrue(BlockDev.reinit([], True, None)) -diff --git a/tests/part_test.py b/tests/part_test.py -index adbaa9a..9e58cc6 100644 ---- a/tests/part_test.py -+++ b/tests/part_test.py -@@ -1,6 +1,6 @@ - import unittest - import os --from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, skip_on -+from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, skip_on, TestTags, tag_test - import overrides_hack - - from gi.repository import BlockDev, GLib -@@ -46,6 +46,7 @@ class PartTestCase(unittest.TestCase): - os.unlink(self.dev_file2) - - class PartCreateTableCase(PartTestCase): -+ @tag_test(TestTags.CORE) - def test_create_table(self): - """Verify that it is possible to create a new partition table""" - -@@ -78,6 +79,7 @@ class PartCreateTableCase(PartTestCase): - - - class PartGetDiskSpecCase(PartTestCase): -+ @tag_test(TestTags.CORE) - def test_get_disk_spec(self): - """Verify that it is possible to get information about disk""" - -@@ -115,6 +117,7 @@ class PartGetDiskSpecCase(PartTestCase): - self.assertEqual(ps.flags, 0) - - class PartCreatePartCase(PartTestCase): -+ @tag_test(TestTags.CORE) - def test_create_part_simple(self): - """Verify that it is possible to create a parition""" - -@@ -221,6 +224,7 @@ class PartCreatePartCase(PartTestCase): - self.assertEqual(ps.flags, ps3.flags) - - class PartCreatePartFullCase(PartTestCase): -+ @tag_test(TestTags.CORE) - def test_full_device_partition(self): - # we first need a partition table - succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.GPT, True) -@@ -362,6 +366,7 @@ class PartCreatePartFullCase(PartTestCase): - BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.EXTENDED, ps4.start + ps4.size + 1, - 10 * 1024**2, BlockDev.PartAlign.OPTIMAL) - -+ @tag_test(TestTags.CORE) - def test_create_part_with_extended_logical(self): - """Verify that partition creation works as expected with primary, extended and logical parts""" - -@@ -502,6 +507,7 @@ class PartCreatePartFullCase(PartTestCase): - BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.LOGICAL, ps3.start + ps3.size + 1, - 10 * 1024**2, BlockDev.PartAlign.OPTIMAL) - -+ @tag_test(TestTags.CORE) - def test_create_part_next(self): - """Verify that partition creation works as expected with the NEXT (auto) type""" - -@@ -589,6 +595,7 @@ class PartCreatePartFullCase(PartTestCase): - BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.EXTENDED, ps4.start + ps4.size + 1, - 10 * 1024**2, BlockDev.PartAlign.OPTIMAL) - -+ @tag_test(TestTags.CORE) - def test_create_part_next_gpt(self): - """Verify that partition creation works as expected with the NEXT (auto) type on GPT""" - -@@ -664,6 +671,7 @@ class PartGetDiskPartsCase(PartTestCase): - class PartGetDiskFreeRegions(PartTestCase): - @skip_on(("centos", "enterprise_linux"), "7", reason="libparted provides weird values here") - @skip_on("debian", reason="libparted provides weird values here") -+ @tag_test(TestTags.CORE) - def test_get_disk_free_regions(self): - """Verify that it is possible to get info about free regions on a disk""" - -@@ -1057,6 +1065,7 @@ class PartCreateResizePartCase(PartTestCase): - self.assertGreaterEqual(ps.size, initial_size) # at least the requested size - - class PartCreateDeletePartCase(PartTestCase): -+ @tag_test(TestTags.CORE) - def test_create_delete_part_simple(self): - """Verify that it is possible to create and delete a parition""" - -diff --git a/tests/s390_test.py b/tests/s390_test.py -index 98c6b1b..da23a55 100644 ---- a/tests/s390_test.py -+++ b/tests/s390_test.py -@@ -2,7 +2,7 @@ import unittest - import os - import overrides_hack - --from utils import fake_path -+from utils import fake_path, TestTags, tag_test - from gi.repository import BlockDev, GLib - - @unittest.skipUnless(os.uname()[4].startswith('s390'), "s390x architecture required") -@@ -18,6 +18,7 @@ class S390TestCase(unittest.TestCase): - else: - BlockDev.reinit(cls.requested_plugins, True, None) - -+ @tag_test(TestTags.EXTRADEPS, TestTags.NOSTORAGE) - def test_device_input(self): - """Verify that s390_sanitize_dev_input works as expected""" - dev = "1234" -@@ -42,6 +43,7 @@ class S390TestCase(unittest.TestCase): - dev = "0.0.abcdefgh" - self.assertEqual(BlockDev.s390_sanitize_dev_input(dev), dev) - -+ @tag_test(TestTags.EXTRADEPS, TestTags.NOSTORAGE) - def test_wwpn_input(self): - """Verify that s390_zfcp_sanitize_wwpn_input works as expected""" - # missing "0x" from beginning of wwpn; this should be added by fx -@@ -56,6 +58,7 @@ class S390TestCase(unittest.TestCase): - with self.assertRaises(GLib.GError): - BlockDev.s390_zfcp_sanitize_wwpn_input(wwpn) - -+ @tag_test(TestTags.EXTRADEPS, TestTags.NOSTORAGE) - def test_lun_input(self): - """Verify that s390_zfcp_sanitize_lun_input works as expected""" - # user does not prepend lun with "0x"; this should get added -@@ -91,6 +94,7 @@ class S390UnloadTest(unittest.TestCase): - else: - BlockDev.reinit(cls.requested_plugins, True, None) - -+ @tag_test(TestTags.EXTRADEPS, TestTags.NOSTORAGE) - def test_check_no_dasdfmt(self): - """Verify that checking dasdfmt tool availability works as expected""" - -diff --git a/tests/swap_test.py b/tests/swap_test.py -index 05d0c19..66b5eb2 100644 ---- a/tests/swap_test.py -+++ b/tests/swap_test.py -@@ -2,7 +2,7 @@ import unittest - import os - import overrides_hack - --from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, fake_utils, fake_path, run_command -+from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, fake_utils, fake_path, run_command, run, TestTags, tag_test - from gi.repository import BlockDev, GLib - - -@@ -38,6 +38,7 @@ class SwapTestCase(SwapTest): - pass - os.unlink(self.dev_file) - -+ @tag_test(TestTags.CORE) - def test_all(self): - """Verify that swap_* functions work as expected""" - -@@ -103,6 +104,7 @@ class SwapUnloadTest(SwapTest): - # tests - self.addCleanup(BlockDev.reinit, self.requested_plugins, True, None) - -+ @tag_test(TestTags.NOSTORAGE) - def test_check_low_version(self): - """Verify that checking the minimum swap utils versions works as expected""" - -@@ -120,6 +122,7 @@ class SwapUnloadTest(SwapTest): - self.assertTrue(BlockDev.reinit(self.requested_plugins, True, None)) - self.assertIn("swap", BlockDev.get_available_plugin_names()) - -+ @tag_test(TestTags.NOSTORAGE) - def test_check_no_mkswap(self): - """Verify that checking mkswap and swaplabel tools availability - works as expected -@@ -146,6 +149,7 @@ class SwapUnloadTest(SwapTest): - self.assertTrue(BlockDev.reinit(self.requested_plugins, True, None)) - self.assertIn("swap", BlockDev.get_available_plugin_names()) - -+ @tag_test(TestTags.NOSTORAGE) - def test_check_no_mkswap_runtime(self): - """Verify that runtime checking mkswap tool availability works as expected""" - -@@ -172,6 +176,7 @@ class SwapTechAvailable(SwapTest): - self.addCleanup(BlockDev.switch_init_checks, True) - self.addCleanup(BlockDev.reinit, self.requested_plugins, True, None) - -+ @tag_test(TestTags.NOSTORAGE) - def test_check_tech_available(self): - """Verify that runtime checking mkswap and swaplabel tools availability - works as expected -diff --git a/tests/utils_test.py b/tests/utils_test.py -index 02b0203..66d8a32 100644 ---- a/tests/utils_test.py -+++ b/tests/utils_test.py -@@ -2,7 +2,7 @@ import unittest - import re - import os - import overrides_hack --from utils import fake_utils, create_sparse_tempfile, create_lio_device, delete_lio_device, run_command -+from utils import fake_utils, create_sparse_tempfile, create_lio_device, delete_lio_device, run_command, TestTags, tag_test - - from gi.repository import BlockDev, GLib - -@@ -25,6 +25,7 @@ class UtilsExecProgressTest(UtilsTestCase): - self.assertTrue(isinstance(completion, int)) - self.log.append(completion) - -+ @tag_test(TestTags.NOSTORAGE, TestTags.CORE) - def test_initialization(self): - """ Verify that progress report can (de)initialized""" - -@@ -54,6 +55,7 @@ class UtilsExecLoggingTest(UtilsTestCase): - - self.log += msg + "\n" - -+ @tag_test(TestTags.NOSTORAGE, TestTags.CORE) - def test_logging(self): - """Verify that setting up and using exec logging works as expected""" - -@@ -91,6 +93,7 @@ class UtilsExecLoggingTest(UtilsTestCase): - self.assertTrue(succ) - self.assertEqual(old_log, self.log) - -+ @tag_test(TestTags.NOSTORAGE, TestTags.CORE) - def test_version_cmp(self): - """Verify that version comparison works as expected""" - -@@ -124,6 +127,7 @@ class UtilsExecLoggingTest(UtilsTestCase): - self.assertEqual(BlockDev.utils_version_cmp("1.1.1", "1.1.1-1"), -1) - self.assertEqual(BlockDev.utils_version_cmp("1.1.2", "1.2"), -1) - -+ @tag_test(TestTags.NOSTORAGE, TestTags.CORE) - def test_util_version(self): - """Verify that checking utility availability works as expected""" - -@@ -167,6 +171,7 @@ class UtilsExecLoggingTest(UtilsTestCase): - self.assertTrue(BlockDev.utils_check_util_version("libblockdev-fake-util-fail", "1.1", "version", "Version:\\s(.*)")) - - class UtilsDevUtilsTestCase(UtilsTestCase): -+ @tag_test(TestTags.NOSTORAGE, TestTags.CORE) - def test_resolve_device(self): - """Verify that resolving device spec works as expected""" - -@@ -199,6 +204,7 @@ class UtilsDevUtilsTestCase(UtilsTestCase): - self.assertEqual(BlockDev.utils_resolve_device(dev_link[5:]), dev) - - class UtilsDevUtilsTestCase(UtilsTestCase): -+ @tag_test(TestTags.NOSTORAGE, TestTags.CORE) - def test_resolve_device(self): - """Verify that resolving device spec works as expected""" - -@@ -248,7 +254,7 @@ class UtilsDevUtilsSymlinksTestCase(UtilsTestCase): - pass - os.unlink(self.dev_file) - -- -+ @tag_test(TestTags.CORE) - def test_get_device_symlinks(self): - """Verify that getting device symlinks works as expected""" - -diff --git a/tests/vdo_test.py b/tests/vdo_test.py -index be8103a..f20ccd5 100644 ---- a/tests/vdo_test.py -+++ b/tests/vdo_test.py -@@ -6,7 +6,7 @@ import unittest - import overrides_hack - import six - --from utils import run_command, read_file, skip_on, fake_path, create_sparse_tempfile, create_lio_device, delete_lio_device -+from utils import run_command, read_file, skip_on, fake_path, create_sparse_tempfile, create_lio_device, delete_lio_device, TestTags, tag_test - from gi.repository import BlockDev, GLib - from bytesize import bytesize - from distutils.spawn import find_executable -@@ -48,7 +48,6 @@ class VDOTestCase(unittest.TestCase): - os.unlink(self.dev_file) - - --@unittest.skipIf("SKIP_SLOW" in os.environ, "skipping slow tests") - class VDOTest(VDOTestCase): - - vdo_name = "bd-test-vdo" -@@ -56,6 +55,7 @@ class VDOTest(VDOTestCase): - def _remove_vdo(self, name): - run_command("vdo remove --force -n %s" % name) - -+ @tag_test(TestTags.SLOW, TestTags.CORE) - def test_create_remove(self): - """Verify that it is possible to create and remove a VDO volume""" - -@@ -85,6 +85,7 @@ class VDOTest(VDOTestCase): - - self.assertFalse(os.path.exists("/dev/mapper/%s" % self.vdo_name)) - -+ @tag_test(TestTags.SLOW) - def test_enable_disable_compression(self): - """Verify that it is possible to enable/disable compression on an existing VDO volume""" - -@@ -110,6 +111,7 @@ class VDOTest(VDOTestCase): - info = BlockDev.vdo_info(self.vdo_name) - self.assertTrue(info.compression) - -+ @tag_test(TestTags.SLOW) - def test_enable_disable_deduplication(self): - """Verify that it is possible to enable/disable deduplication on an existing VDO volume""" - -@@ -135,6 +137,7 @@ class VDOTest(VDOTestCase): - info = BlockDev.vdo_info(self.vdo_name) - self.assertTrue(info.deduplication) - -+ @tag_test(TestTags.SLOW) - def test_activate_deactivate(self): - """Verify that it is possible to activate/deactivate an existing VDO volume""" - -@@ -172,6 +175,7 @@ class VDOTest(VDOTestCase): - - self.assertTrue(os.path.exists("/dev/mapper/%s" % self.vdo_name)) - -+ @tag_test(TestTags.SLOW) - def test_change_write_policy(self): - - ret = BlockDev.vdo_create(self.vdo_name, self.loop_dev, 3 * self.loop_size, 0, -@@ -203,6 +207,7 @@ class VDOTest(VDOTestCase): - - return info["VDOs"][name] - -+ @tag_test(TestTags.SLOW, TestTags.CORE) - def test_get_info(self): - """Verify that it is possible to get information about an existing VDO volume""" - -@@ -229,6 +234,7 @@ class VDOTest(VDOTestCase): - self.assertEqual(bd_info.physical_size, bytesize.Size(sys_info["Physical size"])) - self.assertEqual(bd_info.logical_size, bytesize.Size(sys_info["Logical size"])) - -+ @tag_test(TestTags.SLOW) - def test_grow_logical(self): - """Verify that it is possible to grow logical size of an existing VDO volume""" - -@@ -249,6 +255,7 @@ class VDOTest(VDOTestCase): - - self.assertEqual(info.logical_size, new_size) - -+ @tag_test(TestTags.SLOW, TestTags.UNSTABLE) - def test_grow_physical(self): - """Verify that it is possible to grow physical size of an existing VDO volume""" - -@@ -284,6 +291,7 @@ class VDOTest(VDOTestCase): - self.assertEqual(info_before.logical_size, info_after.logical_size) - self.assertGreater(info_after.physical_size, info_before.physical_size) - -+ @tag_test(TestTags.SLOW) - def test_statistics(self): - """Verify that it is possible to retrieve statistics of an existing VDO volume""" - -@@ -311,6 +319,7 @@ class VDOUnloadTest(VDOTestCase): - # tests - self.addCleanup(BlockDev.reinit, self.requested_plugins, True, None) - -+ @tag_test(TestTags.NOSTORAGE) - def test_check_no_vdo(self): - """Verify that checking vdo tool availability works as expected""" - --- -2.20.1 - - -From c709805db97621889c4354f9771db47916dbc2e5 Mon Sep 17 00:00:00 2001 -From: Vojtech Trefny -Date: Thu, 4 Apr 2019 10:39:21 +0200 -Subject: [PATCH 04/10] Remove duplicate test case - -UtilsDevUtilsTestCase was defined twice, probably because of some -copy paste or merging mistake. ---- - tests/utils_test.py | 32 -------------------------------- - 1 file changed, 32 deletions(-) - -diff --git a/tests/utils_test.py b/tests/utils_test.py -index 66d8a32..e268409 100644 ---- a/tests/utils_test.py -+++ b/tests/utils_test.py -@@ -170,38 +170,6 @@ class UtilsExecLoggingTest(UtilsTestCase): - # exit code != 0 - self.assertTrue(BlockDev.utils_check_util_version("libblockdev-fake-util-fail", "1.1", "version", "Version:\\s(.*)")) - --class UtilsDevUtilsTestCase(UtilsTestCase): -- @tag_test(TestTags.NOSTORAGE, TestTags.CORE) -- def test_resolve_device(self): -- """Verify that resolving device spec works as expected""" -- -- with self.assertRaises(GLib.GError): -- BlockDev.utils_resolve_device("no_such_device") -- -- dev = "/dev/libblockdev-test-dev" -- with open(dev, "w"): -- pass -- self.addCleanup(os.unlink, dev) -- -- # full path, no symlink, should just return the same -- self.assertEqual(BlockDev.utils_resolve_device(dev), dev) -- -- # just the name of the device, should return the full path -- self.assertEqual(BlockDev.utils_resolve_device(dev[5:]), dev) -- -- dev_dir = "/dev/libblockdev-test-dir" -- os.mkdir(dev_dir) -- self.addCleanup(os.rmdir, dev_dir) -- -- dev_link = dev_dir + "/test-dev-link" -- os.symlink("../" + dev[5:], dev_link) -- self.addCleanup(os.unlink, dev_link) -- -- # should resolve the symlink -- self.assertEqual(BlockDev.utils_resolve_device(dev_link), dev) -- -- # should resolve the symlink even without the "/dev" prefix -- self.assertEqual(BlockDev.utils_resolve_device(dev_link[5:]), dev) - - class UtilsDevUtilsTestCase(UtilsTestCase): - @tag_test(TestTags.NOSTORAGE, TestTags.CORE) --- -2.20.1 - - -From 09cee5780854f92b28aaeb7c67ea76c6fc853e30 Mon Sep 17 00:00:00 2001 -From: Vojtech Trefny -Date: Thu, 4 Apr 2019 11:23:12 +0200 -Subject: [PATCH 05/10] Allow running tests against installed libblockdev - ---- - tests/overrides_hack.py | 5 +- - tests/run_tests.py | 106 +++++++++++++++++++++++----------------- - 2 files changed, 64 insertions(+), 47 deletions(-) - -diff --git a/tests/overrides_hack.py b/tests/overrides_hack.py -index 0f10ee5..509a961 100644 ---- a/tests/overrides_hack.py -+++ b/tests/overrides_hack.py -@@ -1,5 +1,8 @@ -+import os - import gi.overrides --if not gi.overrides.__path__[0].endswith("src/python/gi/overrides"): -+ -+if 'LIBBLOCKDEV_TESTS_SKIP_OVERRIDE' not in os.environ and \ -+ not gi.overrides.__path__[0].endswith("src/python/gi/overrides"): - local_overrides = None - # our overrides don't take precedence, let's fix it - for i, path in enumerate(gi.overrides.__path__): -diff --git a/tests/run_tests.py b/tests/run_tests.py -index 5301d07..b67c2e7 100644 ---- a/tests/run_tests.py -+++ b/tests/run_tests.py -@@ -65,49 +65,8 @@ def _get_test_tags(test): - return tags - - --def _print_skip_message(test, skip_tag): -- -- # test.id() looks like 'crypto_test.CryptoTestResize.test_luks2_resize' -- # and we want to print 'test_luks2_resize (crypto_test.CryptoTestResize)' -- test_desc = test.id().split(".") -- test_name = test_desc[-1] -- test_module = ".".join(test_desc[:-1]) -- -- if skip_tag == TestTags.SLOW: -- reason = "skipping slow tests" -- elif skip_tag == TestTags.UNSTABLE: -- reason = "skipping unstable tests" -- elif skip_tag == TestTags.UNSAFE: -- reason = "skipping test that modifies system configuration" -- elif skip_tag == TestTags.EXTRADEPS: -- reason = "skipping test that requires special configuration" -- elif skip_tag == TestTags.CORE: -- reason = "skipping non-core test" -- else: -- reason = "unknown reason" # just to be sure there is some default value -- -- if test._testMethodDoc: -- print("%s (%s)\n%s ... skipped '%s'" % (test_name, test_module, test._testMethodDoc, reason)) -- else: -- print("%s (%s) ... skipped '%s'" % (test_name, test_module, reason)) -- -- --if __name__ == '__main__': -- -- testdir = os.path.abspath(os.path.dirname(__file__)) -- projdir = os.path.abspath(os.path.normpath(os.path.join(testdir, '..'))) -- -- if 'LD_LIBRARY_PATH' not in os.environ and 'GI_TYPELIB_PATH' not in os.environ: -- os.environ['LD_LIBRARY_PATH'] = LIBDIRS -- os.environ['GI_TYPELIB_PATH'] = GIDIR -- os.environ['LIBBLOCKDEV_CONFIG_DIR'] = os.path.join(testdir, 'default_config') -- -- try: -- pyver = 'python3' if six.PY3 else 'python' -- os.execv(sys.executable, [pyver] + sys.argv) -- except OSError as e: -- print('Failed re-exec with a new LD_LIBRARY_PATH and GI_TYPELIB_PATH: %s' % str(e)) -- sys.exit(1) -+def parse_args(): -+ """ Parse cmdline arguments """ - - argparser = argparse.ArgumentParser(description='libblockdev test suite') - argparser.add_argument('testname', nargs='*', help='name of test class or ' -@@ -127,6 +86,9 @@ if __name__ == '__main__': - argparser.add_argument('-s', '--stop', dest='stop', - help='stop executing after first failed test', - action='store_true') -+ argparser.add_argument('-i', '--installed', dest='installed', -+ help='run tests against installed version of libblockdev', -+ action='store_true') - args = argparser.parse_args() - - if args.fast: -@@ -144,9 +106,61 @@ if __name__ == '__main__': - if 'FEELINGLUCKY' in os.environ: - args.lucky = True - -- sys.path.append(testdir) -- sys.path.append(projdir) -- sys.path.append(os.path.join(projdir, 'src/python')) -+ return args -+ -+ -+def _print_skip_message(test, skip_tag): -+ -+ # test.id() looks like 'crypto_test.CryptoTestResize.test_luks2_resize' -+ # and we want to print 'test_luks2_resize (crypto_test.CryptoTestResize)' -+ test_desc = test.id().split(".") -+ test_name = test_desc[-1] -+ test_module = ".".join(test_desc[:-1]) -+ -+ if skip_tag == TestTags.SLOW: -+ reason = "skipping slow tests" -+ elif skip_tag == TestTags.UNSTABLE: -+ reason = "skipping unstable tests" -+ elif skip_tag == TestTags.UNSAFE: -+ reason = "skipping test that modifies system configuration" -+ elif skip_tag == TestTags.EXTRADEPS: -+ reason = "skipping test that requires special configuration" -+ elif skip_tag == TestTags.CORE: -+ reason = "skipping non-core test" -+ else: -+ reason = "unknown reason" # just to be sure there is some default value -+ -+ if test._testMethodDoc: -+ print("%s (%s)\n%s ... skipped '%s'" % (test_name, test_module, test._testMethodDoc, reason)) -+ else: -+ print("%s (%s) ... skipped '%s'" % (test_name, test_module, reason)) -+ -+ -+if __name__ == '__main__': -+ -+ testdir = os.path.abspath(os.path.dirname(__file__)) -+ projdir = os.path.abspath(os.path.normpath(os.path.join(testdir, '..'))) -+ -+ args = parse_args() -+ if args.installed: -+ os.environ['LIBBLOCKDEV_TESTS_SKIP_OVERRIDE'] = '' -+ os.environ['LIBBLOCKDEV_CONFIG_DIR'] = '/etc/libblockdev/conf.d/' -+ else: -+ if 'LD_LIBRARY_PATH' not in os.environ and 'GI_TYPELIB_PATH' not in os.environ: -+ os.environ['LD_LIBRARY_PATH'] = LIBDIRS -+ os.environ['GI_TYPELIB_PATH'] = GIDIR -+ os.environ['LIBBLOCKDEV_CONFIG_DIR'] = os.path.join(testdir, 'default_config') -+ -+ try: -+ pyver = 'python3' if six.PY3 else 'python' -+ os.execv(sys.executable, [pyver] + sys.argv) -+ except OSError as e: -+ print('Failed re-exec with a new LD_LIBRARY_PATH and GI_TYPELIB_PATH: %s' % str(e)) -+ sys.exit(1) -+ -+ sys.path.append(testdir) -+ sys.path.append(projdir) -+ sys.path.append(os.path.join(projdir, 'src/python')) - - start_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") - --- -2.20.1 - - -From df65462618e72602b7760f6c750085094b2bddff Mon Sep 17 00:00:00 2001 -From: Vojtech Trefny -Date: Thu, 4 Apr 2019 11:50:04 +0200 -Subject: [PATCH 06/10] Add a special test tag for library tests that recompile - plugins - -We have some tests that changes lvm plugin and recompile it to -test loading of plugins. We can't run these tests against -installed library, so we need a special tag to skip them. ---- - tests/library_test.py | 8 ++++---- - tests/run_tests.py | 2 ++ - tests/utils.py | 2 ++ - 3 files changed, 8 insertions(+), 4 deletions(-) - -diff --git a/tests/library_test.py b/tests/library_test.py -index fa33b53..e8bb175 100644 ---- a/tests/library_test.py -+++ b/tests/library_test.py -@@ -40,7 +40,7 @@ class LibraryOpsTestCase(unittest.TestCase): - BlockDev.reinit(self.requested_plugins, True, None) - - # recompiles the LVM plugin -- @tag_test(TestTags.SLOW, TestTags.CORE) -+ @tag_test(TestTags.SLOW, TestTags.CORE, TestTags.SOURCEONLY) - def test_reload(self): - """Verify that reloading plugins works as expected""" - -@@ -72,7 +72,7 @@ class LibraryOpsTestCase(unittest.TestCase): - self.assertTrue(BlockDev.reinit(self.requested_plugins, True, None)) - - # recompiles the LVM plugin -- @tag_test(TestTags.SLOW) -+ @tag_test(TestTags.SLOW, TestTags.SOURCEONLY) - def test_force_plugin(self): - """Verify that forcing plugin to be used works as expected""" - -@@ -118,7 +118,7 @@ class LibraryOpsTestCase(unittest.TestCase): - self.assertEqual(BlockDev.lvm_get_max_lv_size(), orig_max_size) - - # recompiles the LVM plugin -- @tag_test(TestTags.SLOW) -+ @tag_test(TestTags.SLOW, TestTags.SOURCEONLY) - def test_plugin_priority(self): - """Verify that preferring plugin to be used works as expected""" - -@@ -181,7 +181,7 @@ class LibraryOpsTestCase(unittest.TestCase): - os.system ("rm -f src/plugins/.libs/libbd_lvm2.so") - - # recompiles the LVM plugin -- @tag_test(TestTags.SLOW) -+ @tag_test(TestTags.SLOW, TestTags.SOURCEONLY) - def test_plugin_fallback(self): - """Verify that fallback when loading plugins works as expected""" - -diff --git a/tests/run_tests.py b/tests/run_tests.py -index b67c2e7..7df9e7d 100644 ---- a/tests/run_tests.py -+++ b/tests/run_tests.py -@@ -61,6 +61,8 @@ def _get_test_tags(test): - tags.append(TestTags.EXTRADEPS) - if getattr(test_fn, "regression", False) or getattr(test_fn.__self__, "regression", False): - tags.append(TestTags.REGRESSION) -+ if getattr(test_fn, "sourceonly", False) or getattr(test_fn.__self__, "sourceonly", False): -+ tags.append(TestTags.SOURCEONLY) - - return tags - -diff --git a/tests/utils.py b/tests/utils.py -index 82b5494..df8e787 100644 ---- a/tests/utils.py -+++ b/tests/utils.py -@@ -351,6 +351,7 @@ class TestTags(Enum): - NOSTORAGE = 5 # tests that don't work with storage - EXTRADEPS = 6 # tests that require special configuration and/or device to run - REGRESSION = 7 # regression tests -+ SOURCEONLY = 8 # tests that can't run against installed library - - - def tag_test(*tags): -@@ -362,6 +363,7 @@ def tag_test(*tags): - func.nostorage = TestTags.NOSTORAGE in tags - func.extradeps = TestTags.EXTRADEPS in tags - func.regression = TestTags.REGRESSION in tags -+ func.sourceonly = TestTags.SOURCEONLY in tags - - return func - --- -2.20.1 - - -From 74a6630db44bd141b76aec80c4eb81fa15dea593 Mon Sep 17 00:00:00 2001 -From: Vojtech Trefny -Date: Thu, 4 Apr 2019 11:55:32 +0200 -Subject: [PATCH 07/10] Skip "source only" tests when running against installed - version - ---- - tests/run_tests.py | 5 +++++ - 1 file changed, 5 insertions(+) - -diff --git a/tests/run_tests.py b/tests/run_tests.py -index 7df9e7d..4244c06 100644 ---- a/tests/run_tests.py -+++ b/tests/run_tests.py -@@ -129,6 +129,8 @@ def _print_skip_message(test, skip_tag): - reason = "skipping test that requires special configuration" - elif skip_tag == TestTags.CORE: - reason = "skipping non-core test" -+ elif skip_tag == TestTags.SOURCEONLY: -+ reason = "skipping test that can run only against library compiled from source" - else: - reason = "unknown reason" # just to be sure there is some default value - -@@ -198,6 +200,9 @@ if __name__ == '__main__': - if TestTags.EXTRADEPS in tags and not args.jenkins: - _print_skip_message(test, TestTags.EXTRADEPS) - continue -+ if TestTags.SOURCEONLY in tags and args.installed: -+ _print_skip_message(test, TestTags.SOURCEONLY) -+ continue - - if args.core and TestTags.CORE not in tags and TestTags.REGRESSION not in tags: - _print_skip_message(test, TestTags.CORE) --- -2.20.1 - - -From d19f2508dbfb00473b21ab1bd6f5603aec66bf4e Mon Sep 17 00:00:00 2001 -From: Vojtech Trefny -Date: Thu, 4 Apr 2019 14:26:22 +0200 -Subject: [PATCH 08/10] Force LVM cli plugin in lvm_test - -We can't rely on LVM cli plugin being default in config when -running against installed library. ---- - tests/lvm_test.py | 6 +++++- - 1 file changed, 5 insertions(+), 1 deletion(-) - -diff --git a/tests/lvm_test.py b/tests/lvm_test.py -index 28a4b05..0b2c5ad 100644 ---- a/tests/lvm_test.py -+++ b/tests/lvm_test.py -@@ -12,10 +12,14 @@ from gi.repository import BlockDev, GLib - - - class LVMTestCase(unittest.TestCase): -- requested_plugins = BlockDev.plugin_specs_from_names(("lvm",)) - - @classmethod - def setUpClass(cls): -+ ps = BlockDev.PluginSpec() -+ ps.name = BlockDev.Plugin.LVM -+ ps.so_name = "libbd_lvm.so" -+ cls.requested_plugins = [ps] -+ - if not BlockDev.is_initialized(): - BlockDev.init(cls.requested_plugins, None) - else: --- -2.20.1 - - -From f15a7428382d5ee086ed13755b3ba1f8705b79cf Mon Sep 17 00:00:00 2001 -From: Vojtech Trefny -Date: Thu, 6 Sep 2018 10:27:52 +0200 -Subject: [PATCH 09/10] Fix how we check zram stats from /sys/block/zram0/stat - -There are four new stats since kernel 4.19. Checking if we read -more than 11 values should be enough to be sure that the file -has the stats we want. ---- - tests/kbd_test.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/tests/kbd_test.py b/tests/kbd_test.py -index 5e872c4..23b63c9 100644 ---- a/tests/kbd_test.py -+++ b/tests/kbd_test.py -@@ -192,7 +192,7 @@ class KbdZRAMStatsTestCase(KbdZRAMTestCase): - - # read 'num_reads' and 'num_writes' from '/sys/block/zram0/stat' - sys_stats = read_file("/sys/block/zram0/stat").strip().split() -- self.assertEqual(len(sys_stats), 11) -+ self.assertGreaterEqual(len(sys_stats), 11) # 15 stats since 4.19 - num_reads = int(sys_stats[0]) - num_writes = int(sys_stats[4]) - self.assertEqual(stats.num_reads, num_reads) --- -2.20.1 - - -From 7e8f6ef031fe3e5e0b117d910cbf8de36f5bd75e Mon Sep 17 00:00:00 2001 -From: Vojtech Trefny -Date: Wed, 10 Apr 2019 07:51:48 +0200 -Subject: [PATCH 10/10] Mark 'test_set_bitmap_location' as unstable - -This test randomly fails with error message from mdadm: -"mdadm: failed to remove internal bitmap". ---- - tests/mdraid_test.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/tests/mdraid_test.py b/tests/mdraid_test.py -index ea489db..14d0bd4 100644 ---- a/tests/mdraid_test.py -+++ b/tests/mdraid_test.py -@@ -520,7 +520,7 @@ class MDTestNameNodeBijection(MDTestCase): - self.assertTrue(succ) - - class MDTestSetBitmapLocation(MDTestCase): -- @tag_test(TestTags.SLOW) -+ @tag_test(TestTags.SLOW, TestTags.UNSTABLE) - def test_set_bitmap_location(self): - """Verify we can change bitmap location for an existing MD array""" - --- -2.20.1 - diff --git a/SOURCES/0004-memory-leaks.patch b/SOURCES/0004-memory-leaks.patch deleted file mode 100644 index 1c3f087..0000000 --- a/SOURCES/0004-memory-leaks.patch +++ /dev/null @@ -1,914 +0,0 @@ -From 0789fc7f227b557d9633402bf9971ff7a6360447 Mon Sep 17 00:00:00 2001 -From: Tomas Bzatek -Date: Wed, 24 Apr 2019 16:03:31 +0200 -Subject: [PATCH 01/17] lvm: Fix some obvious memory leaks - ---- - src/plugins/lvm.c | 7 +++++++ - 1 file changed, 7 insertions(+) - -diff --git a/src/plugins/lvm.c b/src/plugins/lvm.c -index 87ff5a4..a23f8fd 100644 ---- a/src/plugins/lvm.c -+++ b/src/plugins/lvm.c -@@ -371,6 +371,7 @@ static GHashTable* parse_lvm_vars (const gchar *str, guint *num_items) { - if (g_strv_length (key_val) == 2) { - /* we only want to process valid lines (with the '=' character) */ - g_hash_table_insert (table, key_val[0], key_val[1]); -+ g_free (key_val); - (*num_items)++; - } else - /* invalid line, just free key_val */ -@@ -972,6 +973,7 @@ BDLVMPVdata* bd_lvm_pvinfo (const gchar *device, GError **error) { - if (table) - g_hash_table_destroy (table); - } -+ g_strfreev (lines); - - /* getting here means no usable info was found */ - g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_PARSE, -@@ -1039,6 +1041,7 @@ BDLVMPVdata** bd_lvm_pvs (GError **error) { - if (pvs->len == 0) { - g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_PARSE, - "Failed to parse information about PVs"); -+ g_ptr_array_free (pvs, TRUE); - return NULL; - } - -@@ -1247,6 +1250,7 @@ BDLVMVGdata* bd_lvm_vginfo (const gchar *vg_name, GError **error) { - if (table) - g_hash_table_destroy (table); - } -+ g_strfreev (lines); - - /* getting here means no usable info was found */ - g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_PARSE, -@@ -1312,6 +1316,7 @@ BDLVMVGdata** bd_lvm_vgs (GError **error) { - if (vgs->len == 0) { - g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_PARSE, - "Failed to parse information about VGs"); -+ g_ptr_array_free (vgs, TRUE); - return NULL; - } - -@@ -1641,6 +1646,7 @@ BDLVMLVdata* bd_lvm_lvinfo (const gchar *vg_name, const gchar *lv_name, GError * - if (table) - g_hash_table_destroy (table); - } -+ g_strfreev (lines); - - /* getting here means no usable info was found */ - g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_PARSE, -@@ -1713,6 +1719,7 @@ BDLVMLVdata** bd_lvm_lvs (const gchar *vg_name, GError **error) { - if (lvs->len == 0) { - g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_PARSE, - "Failed to parse information about LVs"); -+ g_ptr_array_free (lvs, FALSE); - return NULL; - } - --- -2.21.0 - - -From 552173cfcb77d9ed3476b55e0170627998081912 Mon Sep 17 00:00:00 2001 -From: Tomas Bzatek -Date: Wed, 24 Apr 2019 16:27:07 +0200 -Subject: [PATCH 02/17] lvm: Use g_ptr_array_free() for creating lists - -No need to allocate separate array and copy elements one by one, use -g_ptr_array_free() instead and only add the trailing NULL element as -a regular item. - -This fixes leaks of the array shell. ---- - src/plugins/lvm.c | 88 ++++++++++++++++++++--------------------------- - 1 file changed, 37 insertions(+), 51 deletions(-) - -diff --git a/src/plugins/lvm.c b/src/plugins/lvm.c -index a23f8fd..c2f2bf8 100644 ---- a/src/plugins/lvm.c -+++ b/src/plugins/lvm.c -@@ -1001,24 +1001,25 @@ BDLVMPVdata** bd_lvm_pvs (GError **error) { - gchar **lines = NULL; - gchar **lines_p = NULL; - guint num_items; -- GPtrArray *pvs = g_ptr_array_new (); -+ GPtrArray *pvs; - BDLVMPVdata *pvdata = NULL; -- BDLVMPVdata **ret = NULL; -- guint64 i = 0; - -- success = call_lvm_and_capture_output (args, NULL, &output, error); -+ pvs = g_ptr_array_new (); - -+ success = call_lvm_and_capture_output (args, NULL, &output, error); - if (!success) { - if (g_error_matches (*error, BD_UTILS_EXEC_ERROR, BD_UTILS_EXEC_ERROR_NOOUT)) { - /* no output => no VGs, not an error */ - g_clear_error (error); -- ret = g_new0 (BDLVMPVdata*, 1); -- ret[0] = NULL; -- return ret; -+ /* return an empty list */ -+ g_ptr_array_add (pvs, NULL); -+ return (BDLVMPVdata **) g_ptr_array_free (pvs, FALSE); - } -- else -+ else { - /* the error is already populated from the call */ -+ g_ptr_array_free (pvs, TRUE); - return NULL; -+ } - } - - lines = g_strsplit (output, "\n", 0); -@@ -1045,15 +1046,9 @@ BDLVMPVdata** bd_lvm_pvs (GError **error) { - return NULL; - } - -- /* now create the return value -- NULL-terminated array of BDLVMPVdata */ -- ret = g_new0 (BDLVMPVdata*, pvs->len + 1); -- for (i=0; i < pvs->len; i++) -- ret[i] = (BDLVMPVdata*) g_ptr_array_index (pvs, i); -- ret[i] = NULL; -- -- g_ptr_array_free (pvs, FALSE); -- -- return ret; -+ /* returning NULL-terminated array of BDLVMPVdata */ -+ g_ptr_array_add (pvs, NULL); -+ return (BDLVMPVdata **) g_ptr_array_free (pvs, FALSE); - } - - /** -@@ -1277,23 +1272,25 @@ BDLVMVGdata** bd_lvm_vgs (GError **error) { - gchar **lines = NULL; - gchar **lines_p = NULL; - guint num_items; -- GPtrArray *vgs = g_ptr_array_new (); -+ GPtrArray *vgs; - BDLVMVGdata *vgdata = NULL; -- BDLVMVGdata **ret = NULL; -- guint64 i = 0; -+ -+ vgs = g_ptr_array_new (); - - success = call_lvm_and_capture_output (args, NULL, &output, error); - if (!success) { - if (g_error_matches (*error, BD_UTILS_EXEC_ERROR, BD_UTILS_EXEC_ERROR_NOOUT)) { - /* no output => no VGs, not an error */ - g_clear_error (error); -- ret = g_new0 (BDLVMVGdata*, 1); -- ret[0] = NULL; -- return ret; -+ /* return an empty list */ -+ g_ptr_array_add (vgs, NULL); -+ return (BDLVMVGdata **) g_ptr_array_free (vgs, FALSE); - } -- else -+ else { - /* the error is already populated from the call */ -+ g_ptr_array_free (vgs, TRUE); - return NULL; -+ } - } - - lines = g_strsplit (output, "\n", 0); -@@ -1320,15 +1317,9 @@ BDLVMVGdata** bd_lvm_vgs (GError **error) { - return NULL; - } - -- /* now create the return value -- NULL-terminated array of BDLVMVGdata */ -- ret = g_new0 (BDLVMVGdata*, vgs->len + 1); -- for (i=0; i < vgs->len; i++) -- ret[i] = (BDLVMVGdata*) g_ptr_array_index (vgs, i); -- ret[i] = NULL; -- -- g_ptr_array_free (vgs, FALSE); -- -- return ret; -+ /* returning NULL-terminated array of BDLVMVGdata */ -+ g_ptr_array_add (vgs, NULL); -+ return (BDLVMVGdata **) g_ptr_array_free (vgs, FALSE); - } - - /** -@@ -1676,27 +1667,28 @@ BDLVMLVdata** bd_lvm_lvs (const gchar *vg_name, GError **error) { - gchar **lines = NULL; - gchar **lines_p = NULL; - guint num_items; -- GPtrArray *lvs = g_ptr_array_new (); -+ GPtrArray *lvs; - BDLVMLVdata *lvdata = NULL; -- BDLVMLVdata **ret = NULL; -- guint64 i = 0; -+ -+ lvs = g_ptr_array_new (); - - if (vg_name) - args[9] = vg_name; - - success = call_lvm_and_capture_output (args, NULL, &output, error); -- - if (!success) { - if (g_error_matches (*error, BD_UTILS_EXEC_ERROR, BD_UTILS_EXEC_ERROR_NOOUT)) { - /* no output => no LVs, not an error */ - g_clear_error (error); -- ret = g_new0 (BDLVMLVdata*, 1); -- ret[0] = NULL; -- return ret; -+ /* return an empty list */ -+ g_ptr_array_add (lvs, NULL); -+ return (BDLVMLVdata **) g_ptr_array_free (lvs, FALSE); - } -- else -+ else { - /* the error is already populated from the call */ -+ g_ptr_array_free (lvs, TRUE); - return NULL; -+ } - } - - lines = g_strsplit (output, "\n", 0); -@@ -1719,19 +1711,13 @@ BDLVMLVdata** bd_lvm_lvs (const gchar *vg_name, GError **error) { - if (lvs->len == 0) { - g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_PARSE, - "Failed to parse information about LVs"); -- g_ptr_array_free (lvs, FALSE); -+ g_ptr_array_free (lvs, TRUE); - return NULL; - } - -- /* now create the return value -- NULL-terminated array of BDLVMLVdata */ -- ret = g_new0 (BDLVMLVdata*, lvs->len + 1); -- for (i=0; i < lvs->len; i++) -- ret[i] = (BDLVMLVdata*) g_ptr_array_index (lvs, i); -- ret[i] = NULL; -- -- g_ptr_array_free (lvs, FALSE); -- -- return ret; -+ /* returning NULL-terminated array of BDLVMLVdata */ -+ g_ptr_array_add (lvs, NULL); -+ return (BDLVMLVdata **) g_ptr_array_free (lvs, FALSE); - } - - /** --- -2.21.0 - - -From fa48a6e64181e7becadbad8202be6de1829b4b9b Mon Sep 17 00:00:00 2001 -From: Tomas Bzatek -Date: Wed, 24 Apr 2019 16:31:52 +0200 -Subject: [PATCH 03/17] lvm: Fix leaking BDLVMPVdata.vg_uuid - ---- - src/lib/plugin_apis/lvm.api | 2 ++ - src/plugins/lvm.c | 2 ++ - 2 files changed, 4 insertions(+) - -diff --git a/src/lib/plugin_apis/lvm.api b/src/lib/plugin_apis/lvm.api -index ce47c51..bffe2ce 100644 ---- a/src/lib/plugin_apis/lvm.api -+++ b/src/lib/plugin_apis/lvm.api -@@ -114,6 +114,7 @@ BDLVMPVdata* bd_lvm_pvdata_copy (BDLVMPVdata *data) { - new_data->pv_size = data->pv_size; - new_data->pe_start = data->pe_start; - new_data->vg_name = g_strdup (data->vg_name); -+ new_data->vg_uuid = g_strdup (data->vg_uuid); - new_data->vg_size = data->vg_size; - new_data->vg_free = data->vg_free; - new_data->vg_extent_size = data->vg_extent_size; -@@ -136,6 +137,7 @@ void bd_lvm_pvdata_free (BDLVMPVdata *data) { - g_free (data->pv_name); - g_free (data->pv_uuid); - g_free (data->vg_name); -+ g_free (data->vg_uuid); - g_free (data); - } - -diff --git a/src/plugins/lvm.c b/src/plugins/lvm.c -index c2f2bf8..a6d738d 100644 ---- a/src/plugins/lvm.c -+++ b/src/plugins/lvm.c -@@ -63,6 +63,7 @@ BDLVMPVdata* bd_lvm_pvdata_copy (BDLVMPVdata *data) { - new_data->pv_size = data->pv_size; - new_data->pe_start = data->pe_start; - new_data->vg_name = g_strdup (data->vg_name); -+ new_data->vg_uuid = g_strdup (data->vg_uuid); - new_data->vg_size = data->vg_size; - new_data->vg_free = data->vg_free; - new_data->vg_extent_size = data->vg_extent_size; -@@ -80,6 +81,7 @@ void bd_lvm_pvdata_free (BDLVMPVdata *data) { - g_free (data->pv_name); - g_free (data->pv_uuid); - g_free (data->vg_name); -+ g_free (data->vg_uuid); - g_free (data); - } - --- -2.21.0 - - -From 1b44335a35d8d886f3a251f1c51d1f1039651d4e Mon Sep 17 00:00:00 2001 -From: Tomas Bzatek -Date: Wed, 24 Apr 2019 18:36:21 +0200 -Subject: [PATCH 04/17] exec: Fix some memory leaks - ---- - src/utils/exec.c | 10 ++++++++-- - 1 file changed, 8 insertions(+), 2 deletions(-) - -diff --git a/src/utils/exec.c b/src/utils/exec.c -index 28635a1..b1ca436 100644 ---- a/src/utils/exec.c -+++ b/src/utils/exec.c -@@ -228,8 +228,12 @@ gboolean bd_utils_exec_and_report_status_error (const gchar **argv, const BDExtr - log_out (task_id, stdout_data, stderr_data); - log_done (task_id, *status); - -+ g_free (args); -+ - if (!success) { - /* error is already populated from the call */ -+ g_free (stdout_data); -+ g_free (stderr_data); - return FALSE; - } - -@@ -247,7 +251,6 @@ gboolean bd_utils_exec_and_report_status_error (const gchar **argv, const BDExtr - return FALSE; - } - -- g_free (args); - g_free (stdout_data); - g_free (stderr_data); - return TRUE; -@@ -398,14 +401,17 @@ gboolean bd_utils_exec_and_report_progress (const gchar **argv, const BDExtraArg - G_SPAWN_DEFAULT|G_SPAWN_SEARCH_PATH|G_SPAWN_DO_NOT_REAP_CHILD, - NULL, NULL, &pid, NULL, &out_fd, &err_fd, error); - -- if (!ret) -+ if (!ret) { - /* error is already populated */ -+ g_free (args); - return FALSE; -+ } - - args_str = g_strjoinv (" ", args ? (gchar **) args : (gchar **) argv); - msg = g_strdup_printf ("Started '%s'", args_str); - progress_id = bd_utils_report_started (msg); - g_free (args_str); -+ g_free (args); - g_free (msg); - - out_pipe = g_io_channel_unix_new (out_fd); --- -2.21.0 - - -From e943381bf7c1aeb27f6e308f95e3f64436f25247 Mon Sep 17 00:00:00 2001 -From: Tomas Bzatek -Date: Thu, 9 May 2019 15:39:24 +0200 -Subject: [PATCH 05/17] mdraid: Fix g_strsplit() leaks - ---- - src/plugins/mdraid.c | 5 +++++ - 1 file changed, 5 insertions(+) - -diff --git a/src/plugins/mdraid.c b/src/plugins/mdraid.c -index 6465dfe..178379e 100644 ---- a/src/plugins/mdraid.c -+++ b/src/plugins/mdraid.c -@@ -255,10 +255,15 @@ static GHashTable* parse_mdadm_vars (const gchar *str, const gchar *item_sep, co - /* mdadm --examine output for a set being migrated */ - vals = g_strsplit (key_val[1], "<--", 2); - g_hash_table_insert (table, g_strstrip (key_val[0]), g_strstrip (vals[0])); -+ g_free (key_val[1]); - g_free (vals[1]); -+ g_free (vals); - } else { - g_hash_table_insert (table, g_strstrip (key_val[0]), g_strstrip (key_val[1])); - } -+ g_free (key_val); -+ } else { -+ g_strfreev (key_val); - } - (*num_items)++; - } else --- -2.21.0 - - -From 21fba5737901b6fa82b3c04bb9058ac650b8e54d Mon Sep 17 00:00:00 2001 -From: Tomas Bzatek -Date: Thu, 9 May 2019 15:39:39 +0200 -Subject: [PATCH 06/17] s390: Fix g_strsplit() leaks - ---- - src/plugins/s390.c | 8 ++------ - 1 file changed, 2 insertions(+), 6 deletions(-) - -diff --git a/src/plugins/s390.c b/src/plugins/s390.c -index dcb5bc9..ac12b04 100644 ---- a/src/plugins/s390.c -+++ b/src/plugins/s390.c -@@ -775,8 +775,6 @@ gboolean bd_s390_zfcp_scsi_offline(const gchar *devno, const gchar *wwpn, const - gchar *hba_path = NULL; - gchar *wwpn_path = NULL; - gchar *lun_path = NULL; -- gchar *host = NULL; -- gchar *fcplun = NULL; - gchar *scsidev = NULL; - gchar *fcpsysfs = NULL; - gchar *scsidel = NULL; -@@ -804,13 +802,11 @@ gboolean bd_s390_zfcp_scsi_offline(const gchar *devno, const gchar *wwpn, const - /* tokenize line and assign certain values we'll need later */ - tokens = g_strsplit (line, delim, 8); - -- host = tokens[1]; -- fcplun = tokens[7]; -- -- scsidev = g_strdup_printf ("%s:%s:%s:%s", host + 4, channel, devid, fcplun); -+ scsidev = g_strdup_printf ("%s:%s:%s:%s", tokens[1] /* host */ + 4, channel, devid, tokens[7] /* fcplun */); - scsidev = g_strchomp (scsidev); - fcpsysfs = g_strdup_printf ("%s/%s", scsidevsysfs, scsidev); - fcpsysfs = g_strchomp (fcpsysfs); -+ g_strfreev (tokens); - - /* get HBA path value (same as device number) */ - hba_path = g_strdup_printf ("%s/hba_id", fcpsysfs); --- -2.21.0 - - -From 9e751457a983a4ba07fcd285b15af29aad051b25 Mon Sep 17 00:00:00 2001 -From: Tomas Bzatek -Date: Thu, 9 May 2019 16:10:11 +0200 -Subject: [PATCH 07/17] ext: Fix g_strsplit() leaks - ---- - src/plugins/fs/ext.c | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/src/plugins/fs/ext.c b/src/plugins/fs/ext.c -index 03ac1c5..98f2861 100644 ---- a/src/plugins/fs/ext.c -+++ b/src/plugins/fs/ext.c -@@ -534,6 +534,7 @@ static GHashTable* parse_output_vars (const gchar *str, const gchar *item_sep, c - if (g_strv_length (key_val) == 2) { - /* we only want to process valid lines (with the separator) */ - g_hash_table_insert (table, g_strstrip (key_val[0]), g_strstrip (key_val[1])); -+ g_free (key_val); - (*num_items)++; - } else - /* invalid line, just free key_val */ --- -2.21.0 - - -From bb774818d210f7159ed0b7db11c1a8490ac7ee0f Mon Sep 17 00:00:00 2001 -From: Tomas Bzatek -Date: Thu, 9 May 2019 16:15:36 +0200 -Subject: [PATCH 08/17] ext: Fix g_match_info_fetch() leaks - ---- - src/plugins/fs/ext.c | 16 +++++++++++++--- - 1 file changed, 13 insertions(+), 3 deletions(-) - -diff --git a/src/plugins/fs/ext.c b/src/plugins/fs/ext.c -index 98f2861..91b0ff5 100644 ---- a/src/plugins/fs/ext.c -+++ b/src/plugins/fs/ext.c -@@ -96,13 +96,23 @@ static gint8 filter_line_fsck (const gchar * line, guint8 total_stages, GError * - guint8 stage; - gint64 val_cur; - gint64 val_total; -+ gchar *s; - - /* The output_regex ensures we have a number in these matches, so we can skip - * tests for conversion errors. - */ -- stage = (guint8) g_ascii_strtoull (g_match_info_fetch (match_info, 1), (char **)NULL, 10); -- val_cur = g_ascii_strtoll (g_match_info_fetch (match_info, 2), (char **)NULL, 10); -- val_total = g_ascii_strtoll (g_match_info_fetch (match_info, 3), (char **)NULL, 10); -+ s = g_match_info_fetch (match_info, 1); -+ stage = (guint8) g_ascii_strtoull (s, (char **)NULL, 10); -+ g_free (s); -+ -+ s = g_match_info_fetch (match_info, 2); -+ val_cur = g_ascii_strtoll (s, (char **)NULL, 10); -+ g_free (s); -+ -+ s = g_match_info_fetch (match_info, 3); -+ val_total = g_ascii_strtoll (s, (char **)NULL, 10); -+ g_free (s); -+ - perc = compute_percents (stage, total_stages, val_cur, val_total); - } else { - g_match_info_free (match_info); --- -2.21.0 - - -From 70779be74507b5e8a3d4d6c5a226906186844f0b Mon Sep 17 00:00:00 2001 -From: Tomas Bzatek -Date: Thu, 9 May 2019 16:16:06 +0200 -Subject: [PATCH 09/17] kbd: Fix g_match_info_fetch() leaks - ---- - src/plugins/kbd.c | 6 +++++- - 1 file changed, 5 insertions(+), 1 deletion(-) - -diff --git a/src/plugins/kbd.c b/src/plugins/kbd.c -index 33208c7..a2908ec 100644 ---- a/src/plugins/kbd.c -+++ b/src/plugins/kbd.c -@@ -782,7 +782,11 @@ gboolean bd_kbd_bcache_create (const gchar *backing_device, const gchar *cache_d - for (i=0; lines[i] && n < 2; i++) { - success = g_regex_match (regex, lines[i], 0, &match_info); - if (success) { -- strncpy (device_uuid[n], g_match_info_fetch (match_info, 1), 63); -+ gchar *s; -+ -+ s = g_match_info_fetch (match_info, 1); -+ strncpy (device_uuid[n], s, 63); -+ g_free (s); - device_uuid[n][63] = '\0'; - n++; - g_match_info_free (match_info); --- -2.21.0 - - -From 9c364521d3a7b0b8c04061003f16d73eee8778c8 Mon Sep 17 00:00:00 2001 -From: Tomas Bzatek -Date: Thu, 9 May 2019 17:16:37 +0200 -Subject: [PATCH 10/17] part: Fix leaking objects - ---- - src/plugins/part.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/src/plugins/part.c b/src/plugins/part.c -index 31c6591..cf62366 100644 ---- a/src/plugins/part.c -+++ b/src/plugins/part.c -@@ -913,6 +913,8 @@ static gboolean resize_part (PedPartition *part, PedDevice *dev, PedDisk *disk, - return FALSE; - } - -+ ped_geometry_destroy (geom); -+ ped_constraint_destroy (constr); - finish_alignment_constraint (disk, orig_flag_state); - return TRUE; - } --- -2.21.0 - - -From 2d24ea310fe65b020d7ef3450057bde1383910aa Mon Sep 17 00:00:00 2001 -From: Tomas Bzatek -Date: Thu, 9 May 2019 17:17:01 +0200 -Subject: [PATCH 11/17] ext: Fix leaking string - ---- - src/plugins/fs/ext.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/src/plugins/fs/ext.c b/src/plugins/fs/ext.c -index 91b0ff5..fbec90a 100644 ---- a/src/plugins/fs/ext.c -+++ b/src/plugins/fs/ext.c -@@ -560,8 +560,10 @@ static BDFSExtInfo* get_ext_info_from_table (GHashTable *table, gboolean free_ta - gchar *value = NULL; - - ret->label = g_strdup ((gchar*) g_hash_table_lookup (table, "Filesystem volume name")); -- if ((!ret->label) || (g_strcmp0 (ret->label, "") == 0)) -+ if (!ret->label || g_strcmp0 (ret->label, "") == 0) { -+ g_free (ret->label); - ret->label = g_strdup (""); -+ } - ret->uuid = g_strdup ((gchar*) g_hash_table_lookup (table, "Filesystem UUID")); - ret->state = g_strdup ((gchar*) g_hash_table_lookup (table, "Filesystem state")); - --- -2.21.0 - - -From 957b4b84eeaacab612ea5e267dc37b194f9d65f3 Mon Sep 17 00:00:00 2001 -From: Tomas Bzatek -Date: Thu, 9 May 2019 18:23:46 +0200 -Subject: [PATCH 12/17] part: Fix leaking string in args - ---- - src/plugins/part.c | 5 ++--- - 1 file changed, 2 insertions(+), 3 deletions(-) - -diff --git a/src/plugins/part.c b/src/plugins/part.c -index cf62366..8b2285f 100644 ---- a/src/plugins/part.c -+++ b/src/plugins/part.c -@@ -373,10 +373,9 @@ static gchar* get_part_type_guid_and_gpt_flags (const gchar *device, int part_nu - - args[1] = g_strdup_printf ("-i%d", part_num); - success = bd_utils_exec_and_capture_output (args, NULL, &output, error); -- if (!success) { -- g_free ((gchar *) args[1]); -+ g_free ((gchar *) args[1]); -+ if (!success) - return FALSE; -- } - - lines = g_strsplit (output, "\n", 0); - g_free (output); --- -2.21.0 - - -From 6613cc6b28766607801f95b54bcbc872de02412b Mon Sep 17 00:00:00 2001 -From: Tomas Bzatek -Date: Thu, 16 May 2019 12:46:56 +0200 -Subject: [PATCH 13/17] mdraid: Fix leaking error - ---- - src/plugins/mdraid.c | 5 ++--- - 1 file changed, 2 insertions(+), 3 deletions(-) - -diff --git a/src/plugins/mdraid.c b/src/plugins/mdraid.c -index 178379e..8f5b2ca 100644 ---- a/src/plugins/mdraid.c -+++ b/src/plugins/mdraid.c -@@ -178,7 +178,7 @@ gboolean bd_md_check_deps (void) { - } - - if (!ret) -- g_warning("Cannot load the MDRAID plugin"); -+ g_warning ("Cannot load the MDRAID plugin"); - - return ret; - } -@@ -357,8 +357,7 @@ static BDMDExamineData* get_examine_data_from_table (GHashTable *table, gboolean - } - - if (bs_error) { -- g_set_error (error, BD_MD_ERROR, BD_MD_ERROR_PARSE, -- "Failed to parse chunk size from mdexamine data: %s", bs_error->msg); -+ g_warning ("get_examine_data_from_table(): Failed to parse chunk size from mdexamine data: %s", bs_error->msg); - bs_clear_error (&bs_error); - } - } else --- -2.21.0 - - -From 9ad488f460f65abded312be4b5cf1f00f9fc8aa5 Mon Sep 17 00:00:00 2001 -From: Tomas Bzatek -Date: Thu, 16 May 2019 12:54:09 +0200 -Subject: [PATCH 14/17] mdraid: Mark 'error' arg in - get_examine_data_from_table() as unused - ---- - src/plugins/mdraid.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/src/plugins/mdraid.c b/src/plugins/mdraid.c -index 8f5b2ca..a333e6f 100644 ---- a/src/plugins/mdraid.c -+++ b/src/plugins/mdraid.c -@@ -275,7 +275,7 @@ static GHashTable* parse_mdadm_vars (const gchar *str, const gchar *item_sep, co - return table; - } - --static BDMDExamineData* get_examine_data_from_table (GHashTable *table, gboolean free_table, GError **error) { -+static BDMDExamineData* get_examine_data_from_table (GHashTable *table, gboolean free_table, G_GNUC_UNUSED GError **error) { - BDMDExamineData *data = g_new0 (BDMDExamineData, 1); - gchar *value = NULL; - gchar *first_space = NULL; --- -2.21.0 - - -From eaa07958b928141783202967cbae0e86fdee488d Mon Sep 17 00:00:00 2001 -From: Tomas Bzatek -Date: Thu, 16 May 2019 12:55:50 +0200 -Subject: [PATCH 15/17] mdraid: Fix leaking BDMDExamineData.metadata - ---- - src/plugins/mdraid.c | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/src/plugins/mdraid.c b/src/plugins/mdraid.c -index a333e6f..74af744 100644 ---- a/src/plugins/mdraid.c -+++ b/src/plugins/mdraid.c -@@ -1049,6 +1049,7 @@ BDMDExamineData* bd_md_examine (const gchar *device, GError **error) { - } - - /* try to get metadata version from the output (may be missing) */ -+ g_free (ret->metadata); - value = (gchar*) g_hash_table_lookup (table, "metadata"); - if (value) - ret->metadata = g_strdup (value); --- -2.21.0 - - -From 6b384841027c22f3fac28dd295e8a6124d4d7498 Mon Sep 17 00:00:00 2001 -From: Tomas Bzatek -Date: Thu, 16 May 2019 17:40:51 +0200 -Subject: [PATCH 16/17] btrfs: Fix number of memory leaks - ---- - src/plugins/btrfs.c | 40 ++++++++++++++++++++++------------------ - 1 file changed, 22 insertions(+), 18 deletions(-) - -diff --git a/src/plugins/btrfs.c b/src/plugins/btrfs.c -index c76ea3f..8a2c81a 100644 ---- a/src/plugins/btrfs.c -+++ b/src/plugins/btrfs.c -@@ -619,9 +619,7 @@ BDBtrfsDeviceInfo** bd_btrfs_list_devices (const gchar *device, GError **error) - "path[ \\t]+(?P\\S+)\n"; - GRegex *regex = NULL; - GMatchInfo *match_info = NULL; -- guint8 i = 0; -- GPtrArray *dev_infos = g_ptr_array_new (); -- BDBtrfsDeviceInfo** ret = NULL; -+ GPtrArray *dev_infos; - - if (!check_deps (&avail_deps, DEPS_BTRFS_MASK, deps, DEPS_LAST, &deps_check_lock, error) || - !check_module_deps (&avail_module_deps, MODULE_DEPS_BTRFS_MASK, module_deps, MODULE_DEPS_LAST, &deps_check_lock, error)) -@@ -635,13 +633,16 @@ BDBtrfsDeviceInfo** bd_btrfs_list_devices (const gchar *device, GError **error) - } - - success = bd_utils_exec_and_capture_output (argv, NULL, &output, error); -- if (!success) -+ if (!success) { -+ g_regex_unref (regex); - /* error is already populated from the previous call */ - return NULL; -+ } - - lines = g_strsplit (output, "\n", 0); - g_free (output); - -+ dev_infos = g_ptr_array_new (); - for (line_p = lines; *line_p; line_p++) { - success = g_regex_match (regex, *line_p, 0, &match_info); - if (!success) { -@@ -654,21 +655,16 @@ BDBtrfsDeviceInfo** bd_btrfs_list_devices (const gchar *device, GError **error) - } - - g_strfreev (lines); -+ g_regex_unref (regex); - - if (dev_infos->len == 0) { - g_set_error (error, BD_BTRFS_ERROR, BD_BTRFS_ERROR_PARSE, "Failed to parse information about devices"); -+ g_ptr_array_free (dev_infos, TRUE); - return NULL; - } - -- /* now create the return value -- NULL-terminated array of BDBtrfsDeviceInfo */ -- ret = g_new0 (BDBtrfsDeviceInfo*, dev_infos->len + 1); -- for (i=0; i < dev_infos->len; i++) -- ret[i] = (BDBtrfsDeviceInfo*) g_ptr_array_index (dev_infos, i); -- ret[i] = NULL; -- -- g_ptr_array_free (dev_infos, FALSE); -- -- return ret; -+ g_ptr_array_add (dev_infos, NULL); -+ return (BDBtrfsDeviceInfo **) g_ptr_array_free (dev_infos, FALSE); - } - - /** -@@ -700,7 +696,7 @@ BDBtrfsSubvolumeInfo** bd_btrfs_list_subvolumes (const gchar *mountpoint, gboole - guint64 i = 0; - guint64 y = 0; - guint64 next_sorted_idx = 0; -- GPtrArray *subvol_infos = g_ptr_array_new (); -+ GPtrArray *subvol_infos; - BDBtrfsSubvolumeInfo* item = NULL; - BDBtrfsSubvolumeInfo* swap_item = NULL; - BDBtrfsSubvolumeInfo** ret = NULL; -@@ -724,11 +720,11 @@ BDBtrfsSubvolumeInfo** bd_btrfs_list_subvolumes (const gchar *mountpoint, gboole - - success = bd_utils_exec_and_capture_output (argv, NULL, &output, error); - if (!success) { -+ g_regex_unref (regex); - if (g_error_matches (*error, BD_UTILS_EXEC_ERROR, BD_UTILS_EXEC_ERROR_NOOUT)) { - /* no output -> no subvolumes */ -- ret = g_new0 (BDBtrfsSubvolumeInfo*, 1); - g_clear_error (error); -- return ret; -+ return g_new0 (BDBtrfsSubvolumeInfo*, 1); - } else { - /* error is already populated from the call above or simply no output*/ - return NULL; -@@ -738,6 +734,7 @@ BDBtrfsSubvolumeInfo** bd_btrfs_list_subvolumes (const gchar *mountpoint, gboole - lines = g_strsplit (output, "\n", 0); - g_free (output); - -+ subvol_infos = g_ptr_array_new (); - for (line_p = lines; *line_p; line_p++) { - success = g_regex_match (regex, *line_p, 0, &match_info); - if (!success) { -@@ -750,9 +747,11 @@ BDBtrfsSubvolumeInfo** bd_btrfs_list_subvolumes (const gchar *mountpoint, gboole - } - - g_strfreev (lines); -+ g_regex_unref (regex); - - if (subvol_infos->len == 0) { - g_set_error (error, BD_BTRFS_ERROR, BD_BTRFS_ERROR_PARSE, "Failed to parse information about subvolumes"); -+ g_ptr_array_free (subvol_infos, TRUE); - return NULL; - } - -@@ -828,21 +827,26 @@ BDBtrfsFilesystemInfo* bd_btrfs_filesystem_info (const gchar *device, GError **e - } - - success = bd_utils_exec_and_capture_output (argv, NULL, &output, error); -- if (!success) -+ if (!success) { - /* error is already populated from the call above or just empty - output */ -+ g_regex_unref (regex); - return NULL; -+ } - - success = g_regex_match (regex, output, 0, &match_info); - if (!success) { - g_regex_unref (regex); - g_match_info_free (match_info); -+ g_free (output); - return NULL; - } - -- g_regex_unref (regex); - ret = get_filesystem_info_from_match (match_info); - g_match_info_free (match_info); -+ g_regex_unref (regex); -+ -+ g_free (output); - - return ret; - } --- -2.21.0 - - -From 6f0ec1d90584c59da9bb5f22f692b7d5ebfb2708 Mon Sep 17 00:00:00 2001 -From: Tomas Bzatek -Date: Fri, 17 May 2019 16:09:50 +0200 -Subject: [PATCH 17/17] module: Fix libkmod related leak - ---- - src/utils/module.c | 7 ++++--- - 1 file changed, 4 insertions(+), 3 deletions(-) - -diff --git a/src/utils/module.c b/src/utils/module.c -index 0709633..cdad960 100644 ---- a/src/utils/module.c -+++ b/src/utils/module.c -@@ -60,7 +60,7 @@ gboolean bd_utils_have_kernel_module (const gchar *module_name, GError **error) - return FALSE; - } - /* prevent libkmod from spamming our STDERR */ -- kmod_set_log_priority(ctx, LOG_CRIT); -+ kmod_set_log_priority (ctx, LOG_CRIT); - - ret = kmod_module_new_from_name (ctx, module_name, &mod); - if (ret < 0) { -@@ -106,7 +106,7 @@ gboolean bd_utils_load_kernel_module (const gchar *module_name, const gchar *opt - return FALSE; - } - /* prevent libkmod from spamming our STDERR */ -- kmod_set_log_priority(ctx, LOG_CRIT); -+ kmod_set_log_priority (ctx, LOG_CRIT); - - ret = kmod_module_new_from_name (ctx, module_name, &mod); - if (ret < 0) { -@@ -169,7 +169,7 @@ gboolean bd_utils_unload_kernel_module (const gchar *module_name, GError **error - return FALSE; - } - /* prevent libkmod from spamming our STDERR */ -- kmod_set_log_priority(ctx, LOG_CRIT); -+ kmod_set_log_priority (ctx, LOG_CRIT); - - ret = kmod_module_new_from_loaded (ctx, &list); - if (ret < 0) { -@@ -187,6 +187,7 @@ gboolean bd_utils_unload_kernel_module (const gchar *module_name, GError **error - else - kmod_module_unref (mod); - } -+ kmod_module_unref_list (list); - - if (!found) { - g_set_error (error, BD_UTILS_MODULE_ERROR, BD_UTILS_MODULE_ERROR_NOEXIST, --- -2.21.0 - diff --git a/SOURCES/0005-swap-status-on-dm.patch b/SOURCES/0005-swap-status-on-dm.patch deleted file mode 100644 index 2f95a3c..0000000 --- a/SOURCES/0005-swap-status-on-dm.patch +++ /dev/null @@ -1,66 +0,0 @@ -From 3d75598da3fda5344934fe9cd86297856f340909 Mon Sep 17 00:00:00 2001 -From: Vojtech Trefny -Date: Tue, 12 Feb 2019 12:21:03 +0100 -Subject: [PATCH] Fix checking swap status on lvm/md - -'bd_utils_resolve_device' returns already resolved part, there is -no '../' to remove. - -Resolves: rhbz#1649815 ---- - src/plugins/swap.c | 11 ++--------- - 1 file changed, 2 insertions(+), 9 deletions(-) - -diff --git a/src/plugins/swap.c b/src/plugins/swap.c -index 28db6f3..c8cdb57 100644 ---- a/src/plugins/swap.c -+++ b/src/plugins/swap.c -@@ -400,7 +400,6 @@ gboolean bd_swap_swapoff (const gchar *device, GError **error) { - gboolean bd_swap_swapstatus (const gchar *device, GError **error) { - gchar *file_content; - gchar *real_device = NULL; -- gchar *dev_path = NULL; - gsize length; - gchar *next_line; - gboolean success; -@@ -414,19 +413,15 @@ gboolean bd_swap_swapstatus (const gchar *device, GError **error) { - /* get the real device node for device-mapper devices since the ones - with meaningful names are just dev_paths */ - if (g_str_has_prefix (device, "/dev/mapper/") || g_str_has_prefix (device, "/dev/md/")) { -- dev_path = bd_utils_resolve_device (device, error); -- if (!dev_path) { -+ real_device = bd_utils_resolve_device (device, error); -+ if (!real_device) { - /* the device doesn't exist and thus is not an active swap */ - g_clear_error (error); - return FALSE; - } -- -- /* the dev_path starts with "../" */ -- real_device = g_strdup_printf ("/dev/%s", dev_path + 3); - } - - if (g_str_has_prefix (file_content, real_device ? real_device : device)) { -- g_free (dev_path); - g_free (real_device); - g_free (file_content); - return TRUE; -@@ -435,7 +430,6 @@ gboolean bd_swap_swapstatus (const gchar *device, GError **error) { - next_line = (strchr (file_content, '\n') + 1); - while (next_line && ((gsize)(next_line - file_content) < length)) { - if (g_str_has_prefix (next_line, real_device ? real_device : device)) { -- g_free (dev_path); - g_free (real_device); - g_free (file_content); - return TRUE; -@@ -444,7 +438,6 @@ gboolean bd_swap_swapstatus (const gchar *device, GError **error) { - next_line = (strchr (next_line, '\n') + 1); - } - -- g_free (dev_path); - g_free (real_device); - g_free (file_content); - return FALSE; --- -2.20.1 - diff --git a/SOURCES/0006-use-cryptsetup-to-check-LUKS2-label.patch b/SOURCES/0006-use-cryptsetup-to-check-LUKS2-label.patch deleted file mode 100644 index 49e519a..0000000 --- a/SOURCES/0006-use-cryptsetup-to-check-LUKS2-label.patch +++ /dev/null @@ -1,31 +0,0 @@ -From d6c4429bbb09fae249d7b97b06a9346cdc99f962 Mon Sep 17 00:00:00 2001 -From: Vojtech Trefny -Date: Wed, 19 Dec 2018 09:36:30 +0100 -Subject: [PATCH] Use cryptsetup to check LUKS2 label - -libblkid on CentOS 7.6 doesn't support reading LUKS2 labels ---- - tests/crypto_test.py | 7 +++++-- - 1 file changed, 5 insertions(+), 2 deletions(-) - -diff --git a/tests/crypto_test.py b/tests/crypto_test.py -index 7320e74..b8aacee 100644 ---- a/tests/crypto_test.py -+++ b/tests/crypto_test.py -@@ -148,8 +148,11 @@ class CryptoTestFormat(CryptoTestCase): - BlockDev.CryptoLUKSVersion.LUKS2, extra) - self.assertTrue(succ) - -- _ret, label, _err = run_command("lsblk -oLABEL -n %s" % self.loop_dev) -- self.assertEqual(label, "blockdevLUKS") -+ _ret, out, err = run_command("cryptsetup luksDump %s" % self.loop_dev) -+ m = re.search(r"Label:\s*(\S+)\s*", out) -+ if not m or len(m.groups()) != 1: -+ self.fail("Failed to get label information from:\n%s %s" % (out, err)) -+ self.assertEqual(m.group(1), "blockdevLUKS") - - # different key derivation function - pbkdf = BlockDev.CryptoLUKSPBKDF(type="pbkdf2") --- -2.21.0 - diff --git a/SOURCES/0007-fix-expected-cache-pool-name-with-newest-LVM.patch b/SOURCES/0007-fix-expected-cache-pool-name-with-newest-LVM.patch deleted file mode 100644 index 1b508c2..0000000 --- a/SOURCES/0007-fix-expected-cache-pool-name-with-newest-LVM.patch +++ /dev/null @@ -1,101 +0,0 @@ -From 23e6f2024c34fc6e1b3a67c416334bba2b55d5a9 Mon Sep 17 00:00:00 2001 -From: Vojtech Trefny -Date: Thu, 31 Oct 2019 12:50:03 +0100 -Subject: [PATCH] Fix expected cache pool name with newest LVM - -LVM now adds "_cpool" suffix to attached pools. ---- - tests/lvm_dbus_tests.py | 17 ++++++++++++++++- - tests/lvm_test.py | 19 +++++++++++++++++-- - 2 files changed, 33 insertions(+), 3 deletions(-) - -diff --git a/tests/lvm_dbus_tests.py b/tests/lvm_dbus_tests.py -index 625a392..f2a17c9 100644 ---- a/tests/lvm_dbus_tests.py -+++ b/tests/lvm_dbus_tests.py -@@ -6,6 +6,7 @@ import overrides_hack - import six - import re - import subprocess -+from distutils.version import LooseVersion - from itertools import chain - - from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, run_command, TestTags, tag_test -@@ -31,6 +32,13 @@ class LVMTestCase(unittest.TestCase): - else: - BlockDev.reinit([cls.ps, cls.ps2], True, None) - -+ def _get_lvm_version(self): -+ _ret, out, _err = run_command("lvm version") -+ m = re.search(r"LVM version:\s+([\d\.]+)", out) -+ if not m or len(m.groups()) != 1: -+ raise RuntimeError("Failed to determine LVM version from: %s" % out) -+ return LooseVersion(m.groups()[0]) -+ - @unittest.skipUnless(lvm_dbus_running, "LVM DBus not running") - class LvmNoDevTestCase(LVMTestCase): - -@@ -1291,7 +1299,14 @@ class LvmPVVGcachedLVpoolTestCase(LvmPVVGLVTestCase): - succ = BlockDev.lvm_cache_attach("testVG", "testLV", "testCache", None) - self.assertTrue(succ) - -- self.assertEqual(BlockDev.lvm_cache_pool_name("testVG", "testLV"), "testCache") -+ lvm_version = self._get_lvm_version() -+ if lvm_version < LooseVersion("2.03.06"): -+ cpool_name = "testCache" -+ else: -+ # since 2.03.06 LVM adds _cpool suffix to the cache pool after attaching it -+ cpool_name = "testCache_cpool" -+ -+ self.assertEqual(BlockDev.lvm_cache_pool_name("testVG", "testLV"), cpool_name) - - @unittest.skipUnless(lvm_dbus_running, "LVM DBus not running") - class LvmPVVGcachedLVstatsTestCase(LvmPVVGLVTestCase): -diff --git a/tests/lvm_test.py b/tests/lvm_test.py -index 0b2c5ad..242ca94 100644 ---- a/tests/lvm_test.py -+++ b/tests/lvm_test.py -@@ -6,8 +6,9 @@ import overrides_hack - import six - import re - import subprocess -+from distutils.version import LooseVersion - --from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, fake_utils, fake_path, skip_on, TestTags, tag_test -+from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, fake_utils, fake_path, skip_on, TestTags, tag_test, run_command - from gi.repository import BlockDev, GLib - - -@@ -25,6 +26,13 @@ class LVMTestCase(unittest.TestCase): - else: - BlockDev.reinit(cls.requested_plugins, True, None) - -+ def _get_lvm_version(self): -+ _ret, out, _err = run_command("lvm version") -+ m = re.search(r"LVM version:\s+([\d\.]+)", out) -+ if not m or len(m.groups()) != 1: -+ raise RuntimeError("Failed to determine LVM version from: %s" % out) -+ return LooseVersion(m.groups()[0]) -+ - - class LvmNoDevTestCase(LVMTestCase): - def __init__(self, *args, **kwargs): -@@ -1249,7 +1257,14 @@ class LvmPVVGcachedLVpoolTestCase(LvmPVVGLVTestCase): - succ = BlockDev.lvm_cache_attach("testVG", "testLV", "testCache", None) - self.assertTrue(succ) - -- self.assertEqual(BlockDev.lvm_cache_pool_name("testVG", "testLV"), "testCache") -+ lvm_version = self._get_lvm_version() -+ if lvm_version < LooseVersion("2.03.06"): -+ cpool_name = "testCache" -+ else: -+ # since 2.03.06 LVM adds _cpool suffix to the cache pool after attaching it -+ cpool_name = "testCache_cpool" -+ -+ self.assertEqual(BlockDev.lvm_cache_pool_name("testVG", "testLV"), cpool_name) - - class LvmPVVGcachedLVstatsTestCase(LvmPVVGLVTestCase): - @tag_test(TestTags.SLOW) --- -2.21.0 - diff --git a/SPECS/libblockdev.spec b/SPECS/libblockdev.spec index 116d353..96757cf 100644 --- a/SPECS/libblockdev.spec +++ b/SPECS/libblockdev.spec @@ -19,6 +19,7 @@ %define with_gi 1 %define with_escrow 1 %define with_dmraid 0 +%define with_tools 1 # python2 is not available on RHEL > 7 and not needed on Fedora > 29 %if 0%{?rhel} > 7 || 0%{?fedora} > 29 || %{with_python2} == 0 @@ -44,17 +45,13 @@ %define lvm_dbus_copts --without-lvm-dbus %endif -# vdo is not available on Fedora -%if (0%{?fedora}) || %{with_vdo} == 0 -%define with_vdo 0 -%define vdo_copts --without-vdo -%endif - -# vdo is available only on x86_64, aarch64, s390x and ppc64le +# vdo is not available on non-x86_64 on older RHEL +%if (0%{?rhel} && 0%{?rhel} <= 7) %ifnarch x86_64 aarch64 s390x ppc64le %define with_vdo 0 %define vdo_copts --without-vdo %endif +%endif # btrfs is not available on RHEL > 7 %if 0%{?rhel} > 7 || %{with_btrfs} == 0 @@ -62,6 +59,11 @@ %define btrfs_copts --without-btrfs %endif +# dmraid is not available on RHEL > 7 +%if 0%{?rhel} > 7 +%define with_dmraid 0 +%endif + %if %{with_btrfs} != 1 %define btrfs_copts --without-btrfs %endif @@ -112,26 +114,22 @@ %if %{with_vdo} != 1 %define vdo_copts --without-vdo %endif +%if %{with_tools} != 1 +%define tools_copts --without-tools +%endif %if %{with_gi} != 1 %define gi_copts --disable-introspection %endif -%define configure_opts %{?python2_copts} %{?python3_copts} %{?bcache_copts} %{?lvm_dbus_copts} %{?btrfs_copts} %{?crypto_copts} %{?dm_copts} %{?loop_copts} %{?lvm_copts} %{?lvm_dbus_copts} %{?mdraid_copts} %{?mpath_copts} %{?swap_copts} %{?kbd_copts} %{?part_copts} %{?fs_copts} %{?nvdimm_copts} %{?vdo_copts} %{?gi_copts} +%define configure_opts %{?python2_copts} %{?python3_copts} %{?bcache_copts} %{?lvm_dbus_copts} %{?btrfs_copts} %{?crypto_copts} %{?dm_copts} %{?loop_copts} %{?lvm_copts} %{?lvm_dbus_copts} %{?mdraid_copts} %{?mpath_copts} %{?swap_copts} %{?kbd_copts} %{?part_copts} %{?fs_copts} %{?nvdimm_copts} %{?vdo_copts} %{?tools_copts} %{?gi_copts} Name: libblockdev -Version: 2.19 -Release: 12%{?dist} +Version: 2.24 +Release: 1%{?dist} Summary: A library for low-level manipulation with block devices License: LGPLv2+ URL: https://github.com/storaged-project/libblockdev Source0: https://github.com/storaged-project/libblockdev/releases/download/%{version}-%{release}/%{name}-%{version}.tar.gz -Patch0: 0001-swap-error-codes.patch -Patch1: 0002-major-minor-macros.patch -Patch2: 0003-gating-tests-changes.patch -Patch3: 0004-memory-leaks.patch -Patch4: 0005-swap-status-on-dm.patch -Patch5: 0006-use-cryptsetup-to-check-LUKS2-label.patch -Patch6: 0007-fix-expected-cache-pool-name-with-newest-LVM.patch BuildRequires: glib2-devel %if %{with_gi} @@ -139,6 +137,9 @@ BuildRequires: gobject-introspection-devel %endif %if %{with_python2} BuildRequires: python2-devel +%else +# Obsolete the python2 subpackage to avoid errors on upgrade +Obsoletes: python2-blockdev < %{version}-%{release} %endif %if %{with_python3} BuildRequires: python3-devel @@ -147,6 +148,7 @@ BuildRequires: python3-devel BuildRequires: gtk-doc %endif BuildRequires: glib2-doc +BuildRequires: autoconf-archive Requires: %{name}-utils%{?_isa} = %{version}-%{release} @@ -554,8 +556,17 @@ BuildRequires: libbytesize-devel BuildRequires: libyaml-devel Summary: The vdo plugin for the libblockdev library Requires: %{name}-utils%{?_isa} = %{version}-%{release} + +# weak dependencies doesn't work on older RHEL +%if (0%{?rhel} && 0%{?rhel} <= 7) Requires: vdo Requires: kmod-kvdo +%else +# we want to build the plugin everywhere but the dependencies might not be +# available so just use weak dependency +Recommends: vdo +Recommends: kmod-kvdo +%endif %description vdo The libblockdev library plugin (and in the same time a standalone library) @@ -572,6 +583,20 @@ This package contains header files and pkg-config files needed for development with the libblockdev-vdo plugin/library. %endif +%if %{with_tools} +%package tools +Summary: Various nice tools based on libblockdev +Requires: %{name} +Requires: %{name}-lvm +BuildRequires: libbytesize-devel +%if %{with_lvm_dbus} == 1 +Recommends: %{name}-lvm-dbus +%endif + +%description tools +Various nice storage-related tools based on libblockdev. + +%endif %ifarch s390 s390x %package s390 @@ -660,13 +685,6 @@ A meta-package that pulls all the libblockdev plugins as dependencies. %prep %setup -q -n %{name}-%{version} -%patch0 -p1 -%patch1 -p1 -%patch2 -p1 -%patch3 -p1 -%patch4 -p1 -%patch5 -p1 -%patch6 -p1 %build autoreconf -ivf @@ -952,6 +970,10 @@ find %{buildroot} -type f -name "*.la" | xargs %{__rm} %{_includedir}/blockdev/vdo.h %endif +%if %{with_tools} +%files tools +%{_bindir}/lvm-cache-stats +%endif %ifarch s390 s390x %files s390 @@ -966,13 +988,17 @@ find %{buildroot} -type f -name "*.la" | xargs %{__rm} %files plugins-all %changelog +* Fri May 22 2020 Vojtech Trefny - 2.24-1 +- Rebased to the latest upstream release 2.24 + Resolves: rhbz#1824153 + * Mon Dec 02 2019 Vojtech Trefny - 2.19-12 - Use cryptsetup to check LUKS2 label Resolves: rhbz#1778689 - Fix expected cache pool name with newest LVM Related: rhbz#1778689 -* Mon Jun 06 2019 Vojtech Trefny - 2.19-11 +* Thu Jun 06 2019 Vojtech Trefny - 2.19-11 - Fix checking swap status on lvm/md (vtrefny) Resolves: rhbz#1649815