diff --git a/SOURCES/0012-xfs-grow-support.patch b/SOURCES/0012-xfs-grow-support.patch new file mode 100644 index 0000000..1607c51 --- /dev/null +++ b/SOURCES/0012-xfs-grow-support.patch @@ -0,0 +1,459 @@ +From 433d863cd8a57e5fc30948ff905e6a477ed5f17c Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 14 Jul 2020 11:27:08 +0200 +Subject: [PATCH 1/4] Add support for XFS format grow + +--- + blivet/formats/fs.py | 2 ++ + blivet/tasks/availability.py | 1 + + blivet/tasks/fsresize.py | 54 ++++++++++++++++++++++++++++++++++++ + 3 files changed, 57 insertions(+) + +diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py +index eee15aaa..12cb9885 100644 +--- a/blivet/formats/fs.py ++++ b/blivet/formats/fs.py +@@ -1089,11 +1089,13 @@ class XFS(FS): + _formattable = True + _linux_native = True + _supported = True ++ _resizable = True + _packages = ["xfsprogs"] + _info_class = fsinfo.XFSInfo + _mkfs_class = fsmkfs.XFSMkfs + _readlabel_class = fsreadlabel.XFSReadLabel + _size_info_class = fssize.XFSSize ++ _resize_class = fsresize.XFSResize + _sync_class = fssync.XFSSync + _writelabel_class = fswritelabel.XFSWriteLabel + _writeuuid_class = fswriteuuid.XFSWriteUUID +diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py +index b6b5955a..df62780c 100644 +--- a/blivet/tasks/availability.py ++++ b/blivet/tasks/availability.py +@@ -455,5 +455,6 @@ TUNE2FS_APP = application_by_version("tune2fs", E2FSPROGS_VERSION) + XFSADMIN_APP = application("xfs_admin") + XFSDB_APP = application("xfs_db") + XFSFREEZE_APP = application("xfs_freeze") ++XFSRESIZE_APP = application("xfs_growfs") + + MOUNT_APP = application("mount") +diff --git a/blivet/tasks/fsresize.py b/blivet/tasks/fsresize.py +index e7e26984..12c0367f 100644 +--- a/blivet/tasks/fsresize.py ++++ b/blivet/tasks/fsresize.py +@@ -20,7 +20,10 @@ + # Red Hat Author(s): Anne Mulhern + + import abc ++import os ++import tempfile + ++from contextlib import contextmanager + from six import add_metaclass + + from ..errors import FSError +@@ -32,6 +35,9 @@ from . import task + from . import fstask + from . import dfresize + ++import logging ++log = logging.getLogger("blivet") ++ + + @add_metaclass(abc.ABCMeta) + class FSResizeTask(fstask.FSTask): +@@ -115,6 +121,54 @@ class NTFSResize(FSResize): + ] + + ++class XFSResize(FSResize): ++ ext = availability.XFSRESIZE_APP ++ unit = B ++ size_fmt = None ++ ++ @contextmanager ++ def _do_temp_mount(self): ++ if self.fs.status: ++ yield ++ else: ++ dev_name = os.path.basename(self.fs.device) ++ tmpdir = tempfile.mkdtemp(prefix="xfs-tempmount-%s" % dev_name) ++ log.debug("mounting XFS on '%s' to '%s' for resize", self.fs.device, tmpdir) ++ try: ++ self.fs.mount(mountpoint=tmpdir) ++ except FSError as e: ++ raise FSError("Failed to mount XFS filesystem for resize: %s" % str(e)) ++ ++ try: ++ yield ++ finally: ++ util.umount(mountpoint=tmpdir) ++ os.rmdir(tmpdir) ++ ++ def _get_block_size(self): ++ if self.fs._current_info: ++ # this should be set by update_size_info() ++ for line in self.fs._current_info.split("\n"): ++ if line.startswith("blocksize ="): ++ return int(line.split("=")[-1]) ++ ++ raise FSError("Failed to get XFS filesystem block size for resize") ++ ++ def size_spec(self): ++ # size for xfs_growfs is in blocks ++ return str(self.fs.target_size.convert_to(self.unit) / self._get_block_size()) ++ ++ @property ++ def args(self): ++ return [self.fs.system_mountpoint, "-D", self.size_spec()] ++ ++ def do_task(self): ++ """ Resizes the XFS format. """ ++ ++ with self._do_temp_mount(): ++ super(XFSResize, self).do_task() ++ ++ + class TmpFSResize(FSResize): + + ext = availability.MOUNT_APP +-- +2.26.2 + + +From 56d05334231c30699a9c77dedbc23fdb021b9dee Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 14 Jul 2020 11:27:51 +0200 +Subject: [PATCH 2/4] Add tests for XFS resize + +XFS supports only grow so we can't reuse most of the fstesting +code and we also need to test the resize on partition because +XFS won't allow grow to size bigger than the underlying block +device. +--- + tests/formats_test/fs_test.py | 91 +++++++++++++++++++++++++++++++++ + tests/formats_test/fstesting.py | 33 ++++++------ + 2 files changed, 107 insertions(+), 17 deletions(-) + +diff --git a/tests/formats_test/fs_test.py b/tests/formats_test/fs_test.py +index 15fc0c35..9bc5d20d 100644 +--- a/tests/formats_test/fs_test.py ++++ b/tests/formats_test/fs_test.py +@@ -2,8 +2,13 @@ import os + import tempfile + import unittest + ++import parted ++ + import blivet.formats.fs as fs + from blivet.size import Size, ROUND_DOWN ++from blivet.errors import DeviceFormatError ++from blivet.formats import get_format ++from blivet.devices import PartitionDevice, DiskDevice + + from tests import loopbackedtestcase + +@@ -50,6 +55,92 @@ class ReiserFSTestCase(fstesting.FSAsRoot): + class XFSTestCase(fstesting.FSAsRoot): + _fs_class = fs.XFS + ++ def can_resize(self, an_fs): ++ resize_tasks = (an_fs._resize, an_fs._size_info) ++ return not any(t.availability_errors for t in resize_tasks) ++ ++ def _create_partition(self, disk, size): ++ disk.format = get_format("disklabel", device=disk.path, label_type="msdos") ++ disk.format.create() ++ pstart = disk.format.alignment.grainSize ++ pend = pstart + int(Size(size) / disk.format.parted_device.sectorSize) ++ disk.format.add_partition(pstart, pend, parted.PARTITION_NORMAL) ++ disk.format.parted_disk.commit() ++ part = disk.format.parted_disk.getPartitionBySector(pstart) ++ ++ device = PartitionDevice(os.path.basename(part.path)) ++ device.disk = disk ++ device.exists = True ++ device.parted_partition = part ++ ++ return device ++ ++ def _remove_partition(self, partition, disk): ++ disk.format.remove_partition(partition.parted_partition) ++ disk.format.parted_disk.commit() ++ ++ def test_resize(self): ++ an_fs = self._fs_class() ++ if not an_fs.formattable: ++ self.skipTest("can not create filesystem %s" % an_fs.name) ++ an_fs.device = self.loop_devices[0] ++ self.assertIsNone(an_fs.create()) ++ an_fs.update_size_info() ++ ++ self._test_sizes(an_fs) ++ # CHECKME: target size is still 0 after updated_size_info is called. ++ self.assertEqual(an_fs.size, Size(0) if an_fs.resizable else an_fs._size) ++ ++ if not self.can_resize(an_fs): ++ self.assertFalse(an_fs.resizable) ++ # Not resizable, so can not do resizing actions. ++ with self.assertRaises(DeviceFormatError): ++ an_fs.target_size = Size("64 MiB") ++ with self.assertRaises(DeviceFormatError): ++ an_fs.do_resize() ++ else: ++ disk = DiskDevice(os.path.basename(self.loop_devices[0])) ++ part = self._create_partition(disk, Size("50 MiB")) ++ an_fs = self._fs_class() ++ an_fs.device = part.path ++ self.assertIsNone(an_fs.create()) ++ an_fs.update_size_info() ++ ++ self.assertTrue(an_fs.resizable) ++ ++ # grow the partition so we can grow the filesystem ++ self._remove_partition(part, disk) ++ part = self._create_partition(disk, size=part.size + Size("40 MiB")) ++ ++ # Try a reasonable target size ++ TARGET_SIZE = Size("64 MiB") ++ an_fs.target_size = TARGET_SIZE ++ self.assertEqual(an_fs.target_size, TARGET_SIZE) ++ self.assertNotEqual(an_fs._size, TARGET_SIZE) ++ self.assertIsNone(an_fs.do_resize()) ++ ACTUAL_SIZE = TARGET_SIZE.round_to_nearest(an_fs._resize.unit, rounding=ROUND_DOWN) ++ self.assertEqual(an_fs.size, ACTUAL_SIZE) ++ self.assertEqual(an_fs._size, ACTUAL_SIZE) ++ self._test_sizes(an_fs) ++ ++ self._remove_partition(part, disk) ++ ++ # and no errors should occur when checking ++ self.assertIsNone(an_fs.do_check()) ++ ++ def test_shrink(self): ++ self.skipTest("Not checking resize for this test category.") ++ ++ def test_too_small(self): ++ self.skipTest("Not checking resize for this test category.") ++ ++ def test_no_explicit_target_size2(self): ++ self.skipTest("Not checking resize for this test category.") ++ ++ def test_too_big2(self): ++ # XXX this tests assumes that resizing to max size - 1 B will fail, but xfs_grow won't ++ self.skipTest("Not checking resize for this test category.") ++ + + class HFSTestCase(fstesting.FSAsRoot): + _fs_class = fs.HFS +diff --git a/tests/formats_test/fstesting.py b/tests/formats_test/fstesting.py +index 62f806f9..86b2a116 100644 +--- a/tests/formats_test/fstesting.py ++++ b/tests/formats_test/fstesting.py +@@ -11,16 +11,6 @@ from blivet.size import Size, ROUND_DOWN + from blivet.formats import fs + + +-def can_resize(an_fs): +- """ Returns True if this filesystem has all necessary resizing tools +- available. +- +- :param an_fs: a filesystem object +- """ +- resize_tasks = (an_fs._resize, an_fs._size_info, an_fs._minsize) +- return not any(t.availability_errors for t in resize_tasks) +- +- + @add_metaclass(abc.ABCMeta) + class FSAsRoot(loopbackedtestcase.LoopBackedTestCase): + +@@ -32,6 +22,15 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase): + def __init__(self, methodName='run_test'): + super(FSAsRoot, self).__init__(methodName=methodName, device_spec=[self._DEVICE_SIZE]) + ++ def can_resize(self, an_fs): ++ """ Returns True if this filesystem has all necessary resizing tools ++ available. ++ ++ :param an_fs: a filesystem object ++ """ ++ resize_tasks = (an_fs._resize, an_fs._size_info, an_fs._minsize) ++ return not any(t.availability_errors for t in resize_tasks) ++ + def _test_sizes(self, an_fs): + """ Test relationships between different size values. + +@@ -190,7 +189,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase): + # CHECKME: target size is still 0 after updated_size_info is called. + self.assertEqual(an_fs.size, Size(0) if an_fs.resizable else an_fs._size) + +- if not can_resize(an_fs): ++ if not self.can_resize(an_fs): + self.assertFalse(an_fs.resizable) + # Not resizable, so can not do resizing actions. + with self.assertRaises(DeviceFormatError): +@@ -221,7 +220,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase): + # in constructor call behavior would be different. + + an_fs = self._fs_class() +- if not can_resize(an_fs): ++ if not self.can_resize(an_fs): + self.skipTest("Not checking resize for this test category.") + if not an_fs.formattable: + self.skipTest("can not create filesystem %s" % an_fs.name) +@@ -244,7 +243,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase): + """ + SIZE = Size("64 MiB") + an_fs = self._fs_class(size=SIZE) +- if not can_resize(an_fs): ++ if not self.can_resize(an_fs): + self.skipTest("Not checking resize for this test category.") + if not an_fs.formattable: + self.skipTest("can not create filesystem %s" % an_fs.name) +@@ -264,7 +263,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase): + + def test_shrink(self): + an_fs = self._fs_class() +- if not can_resize(an_fs): ++ if not self.can_resize(an_fs): + self.skipTest("Not checking resize for this test category.") + if not an_fs.formattable: + self.skipTest("can not create filesystem %s" % an_fs.name) +@@ -296,7 +295,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase): + + def test_too_small(self): + an_fs = self._fs_class() +- if not can_resize(an_fs): ++ if not self.can_resize(an_fs): + self.skipTest("Not checking resize for this test category.") + if not an_fs.formattable: + self.skipTest("can not create or resize filesystem %s" % an_fs.name) +@@ -315,7 +314,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase): + + def test_too_big(self): + an_fs = self._fs_class() +- if not can_resize(an_fs): ++ if not self.can_resize(an_fs): + self.skipTest("Not checking resize for this test category.") + if not an_fs.formattable: + self.skipTest("can not create filesystem %s" % an_fs.name) +@@ -334,7 +333,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase): + + def test_too_big2(self): + an_fs = self._fs_class() +- if not can_resize(an_fs): ++ if not self.can_resize(an_fs): + self.skipTest("Not checking resize for this test category.") + if not an_fs.formattable: + self.skipTest("can not create filesystem %s" % an_fs.name) +-- +2.26.2 + + +From 51acc04f4639f143b55789a06a68aae988a91296 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Wed, 15 Jul 2020 12:59:04 +0200 +Subject: [PATCH 3/4] Add support for checking and fixing XFS using xfs_repair + +--- + blivet/formats/fs.py | 1 + + blivet/tasks/availability.py | 1 + + blivet/tasks/fsck.py | 12 ++++++++++++ + tests/formats_test/fs_test.py | 6 +++--- + 4 files changed, 17 insertions(+), 3 deletions(-) + +diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py +index 12cb9885..06fbdf10 100644 +--- a/blivet/formats/fs.py ++++ b/blivet/formats/fs.py +@@ -1091,6 +1091,7 @@ class XFS(FS): + _supported = True + _resizable = True + _packages = ["xfsprogs"] ++ _fsck_class = fsck.XFSCK + _info_class = fsinfo.XFSInfo + _mkfs_class = fsmkfs.XFSMkfs + _readlabel_class = fsreadlabel.XFSReadLabel +diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py +index df62780c..f3b76650 100644 +--- a/blivet/tasks/availability.py ++++ b/blivet/tasks/availability.py +@@ -456,5 +456,6 @@ XFSADMIN_APP = application("xfs_admin") + XFSDB_APP = application("xfs_db") + XFSFREEZE_APP = application("xfs_freeze") + XFSRESIZE_APP = application("xfs_growfs") ++XFSREPAIR_APP = application("xfs_repair") + + MOUNT_APP = application("mount") +diff --git a/blivet/tasks/fsck.py b/blivet/tasks/fsck.py +index 5274f13a..8477f5f8 100644 +--- a/blivet/tasks/fsck.py ++++ b/blivet/tasks/fsck.py +@@ -123,6 +123,18 @@ class Ext2FSCK(FSCK): + return "\n".join(msgs) or None + + ++class XFSCK(FSCK): ++ _fsck_errors = {1: "Runtime error encountered during repair operation.", ++ 2: "XFS repair was unable to proceed due to a dirty log."} ++ ++ ext = availability.XFSREPAIR_APP ++ options = [] ++ ++ def _error_message(self, rc): ++ msgs = (self._fsck_errors[c] for c in self._fsck_errors.keys() if rc & c) ++ return "\n".join(msgs) or None ++ ++ + class HFSPlusFSCK(FSCK): + _fsck_errors = {3: "Quick check found a dirty filesystem; no repairs done.", + 4: "Root filesystem was dirty. System should be rebooted.", +diff --git a/tests/formats_test/fs_test.py b/tests/formats_test/fs_test.py +index 9bc5d20d..8fb099fd 100644 +--- a/tests/formats_test/fs_test.py ++++ b/tests/formats_test/fs_test.py +@@ -123,10 +123,10 @@ class XFSTestCase(fstesting.FSAsRoot): + self.assertEqual(an_fs._size, ACTUAL_SIZE) + self._test_sizes(an_fs) + +- self._remove_partition(part, disk) ++ # and no errors should occur when checking ++ self.assertIsNone(an_fs.do_check()) + +- # and no errors should occur when checking +- self.assertIsNone(an_fs.do_check()) ++ self._remove_partition(part, disk) + + def test_shrink(self): + self.skipTest("Not checking resize for this test category.") +-- +2.26.2 + + +From 2a6947098e66f880193f3bac2282a6c7857ca5f7 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Thu, 16 Jul 2020 09:05:35 +0200 +Subject: [PATCH 4/4] Use xfs_db in read-only mode when getting XFS information + +This way it will also work on mounted filesystems. +--- + blivet/tasks/fsinfo.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/blivet/tasks/fsinfo.py b/blivet/tasks/fsinfo.py +index af208f5d..41ff700f 100644 +--- a/blivet/tasks/fsinfo.py ++++ b/blivet/tasks/fsinfo.py +@@ -95,7 +95,7 @@ class ReiserFSInfo(FSInfo): + + class XFSInfo(FSInfo): + ext = availability.XFSDB_APP +- options = ["-c", "sb 0", "-c", "p dblocks", "-c", "p blocksize"] ++ options = ["-c", "sb 0", "-c", "p dblocks", "-c", "p blocksize", "-r"] + + + class UnimplementedFSInfo(fstask.UnimplementedFSTask): +-- +2.26.2 + diff --git a/SOURCES/0013-Do-not-limit-swap-to-128-GiB.patch b/SOURCES/0013-Do-not-limit-swap-to-128-GiB.patch new file mode 100644 index 0000000..5b9f0ed --- /dev/null +++ b/SOURCES/0013-Do-not-limit-swap-to-128-GiB.patch @@ -0,0 +1,76 @@ +From aa4ce218fe9b4ee3571d872ff1575a499596181c Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 29 May 2020 12:14:30 +0200 +Subject: [PATCH 1/2] Do not limit swap to 128 GiB + +The limit was part of change to limit suggested swap size in +kickstart which doesn't use the SwapSpace._max_size so there is no +reason to limit this for manual installations. +16 TiB seems to be max usable swap size based on mkswap code. + +Resolves: rhbz#1656485 +--- + blivet/formats/swap.py | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/blivet/formats/swap.py b/blivet/formats/swap.py +index 4b8a7edf..3cc59138 100644 +--- a/blivet/formats/swap.py ++++ b/blivet/formats/swap.py +@@ -52,8 +52,7 @@ class SwapSpace(DeviceFormat): + _linux_native = True # for clearpart + _plugin = availability.BLOCKDEV_SWAP_PLUGIN + +- # see rhbz#744129 for details +- _max_size = Size("128 GiB") ++ _max_size = Size("16 TiB") + + config_actions_map = {"label": "write_label"} + +-- +2.26.2 + + +From 93aa6ad87116f1c86616d73dbe561251c4a0c286 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Thu, 11 Jun 2020 14:27:44 +0200 +Subject: [PATCH 2/2] Add test for SwapSpace max size + +--- + tests/formats_test/swap_test.py | 24 ++++++++++++++++++++++++ + 1 file changed, 24 insertions(+) + create mode 100644 tests/formats_test/swap_test.py + +diff --git a/tests/formats_test/swap_test.py b/tests/formats_test/swap_test.py +new file mode 100644 +index 00000000..56356144 +--- /dev/null ++++ b/tests/formats_test/swap_test.py +@@ -0,0 +1,24 @@ ++import test_compat # pylint: disable=unused-import ++ ++import six ++import unittest ++ ++from blivet.devices.storage import StorageDevice ++from blivet.errors import DeviceError ++from blivet.formats import get_format ++ ++from blivet.size import Size ++ ++ ++class SwapNodevTestCase(unittest.TestCase): ++ ++ def test_swap_max_size(self): ++ StorageDevice("dev", size=Size("129 GiB"), ++ fmt=get_format("swap")) ++ ++ StorageDevice("dev", size=Size("15 TiB"), ++ fmt=get_format("swap")) ++ ++ with six.assertRaisesRegex(self, DeviceError, "device is too large for new format"): ++ StorageDevice("dev", size=Size("17 TiB"), ++ fmt=get_format("swap")) +-- +2.26.2 + diff --git a/SOURCES/0014-Use-UnusableConfigurationError-for-patially-hidden-multipath-devices.patch b/SOURCES/0014-Use-UnusableConfigurationError-for-patially-hidden-multipath-devices.patch new file mode 100644 index 0000000..1e14de6 --- /dev/null +++ b/SOURCES/0014-Use-UnusableConfigurationError-for-patially-hidden-multipath-devices.patch @@ -0,0 +1,78 @@ +From 4e6a322d32d2a12f8a87ab763a6286cf3d7b5c27 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 8 Sep 2020 13:57:40 +0200 +Subject: [PATCH] Use UnusableConfigurationError for partially hidden multipath + devices + +Follow-up for https://github.com/storaged-project/blivet/pull/883 +to make Anaconda show an error message instead of crashing. + +Resolves: rhbz#1877052 +--- + blivet/devicetree.py | 4 ++-- + blivet/errors.py | 6 ++++++ + tests/devicetree_test.py | 4 ++-- + 3 files changed, 10 insertions(+), 4 deletions(-) + +diff --git a/blivet/devicetree.py b/blivet/devicetree.py +index 2afb0d0e..57a9bbd7 100644 +--- a/blivet/devicetree.py ++++ b/blivet/devicetree.py +@@ -32,7 +32,7 @@ from gi.repository import BlockDev as blockdev + + from .actionlist import ActionList + from .callbacks import callbacks +-from .errors import DeviceError, DeviceTreeError, StorageError, DuplicateUUIDError ++from .errors import DeviceError, DeviceTreeError, StorageError, DuplicateUUIDError, InvalidMultideviceSelection + from .deviceaction import ActionDestroyDevice, ActionDestroyFormat + from .devices import BTRFSDevice, NoDevice, PartitionDevice + from .devices import LVMLogicalVolumeDevice, LVMVolumeGroupDevice +@@ -936,7 +936,7 @@ class DeviceTreeBase(object): + if is_ignored: + if len(disk.children) == 1: + if not all(self._is_ignored_disk(d) for d in disk.children[0].parents): +- raise DeviceTreeError("Including only a subset of raid/multipath member disks is not allowed.") ++ raise InvalidMultideviceSelection("Including only a subset of raid/multipath member disks is not allowed.") + + # and also children like fwraid or mpath + self.hide(disk.children[0]) +diff --git a/blivet/errors.py b/blivet/errors.py +index 811abf81..7a93f1ce 100644 +--- a/blivet/errors.py ++++ b/blivet/errors.py +@@ -233,6 +233,12 @@ class DuplicateVGError(UnusableConfigurationError): + "Hint 2: You can get the VG UUIDs by running " + "'pvs -o +vg_uuid'.") + ++ ++class InvalidMultideviceSelection(UnusableConfigurationError): ++ suggestion = N_("All parent devices must be selected when choosing exclusive " ++ "or ignored disks for a multipath or firmware RAID device.") ++ ++ + # DeviceAction + + +diff --git a/tests/devicetree_test.py b/tests/devicetree_test.py +index 6032e7f6..4e47ffc3 100644 +--- a/tests/devicetree_test.py ++++ b/tests/devicetree_test.py +@@ -5,7 +5,7 @@ import six + import unittest + + from blivet.actionlist import ActionList +-from blivet.errors import DeviceTreeError, DuplicateUUIDError ++from blivet.errors import DeviceTreeError, DuplicateUUIDError, InvalidMultideviceSelection + from blivet.deviceaction import ACTION_TYPE_DESTROY, ACTION_OBJECT_DEVICE + from blivet.devicelibs import lvm + from blivet.devices import DiskDevice +@@ -512,5 +512,5 @@ class DeviceTreeIgnoredExclusiveMultipathTestCase(unittest.TestCase): + self.tree.ignored_disks = ["sda", "sdb"] + self.tree.exclusive_disks = [] + +- with self.assertRaises(DeviceTreeError): ++ with self.assertRaises(InvalidMultideviceSelection): + self.tree._hide_ignored_disks() +-- +2.26.2 + diff --git a/SOURCES/0015-Fix-possible-UnicodeDecodeError-when-reading-model-f.patch b/SOURCES/0015-Fix-possible-UnicodeDecodeError-when-reading-model-f.patch new file mode 100644 index 0000000..24e408e --- /dev/null +++ b/SOURCES/0015-Fix-possible-UnicodeDecodeError-when-reading-model-f.patch @@ -0,0 +1,32 @@ +From 866a48e6c3d8246d2897bb402a191df5f2848aa4 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 23 Jun 2020 10:33:33 +0200 +Subject: [PATCH] Fix possible UnicodeDecodeError when reading model from sysfs + +Some Innovation IT NVMe devices have an (invalid) unicode in their +model name. + +Resolves: rhbz#1849326 +--- + blivet/udev.py | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/blivet/udev.py b/blivet/udev.py +index 41c99496..2c795225 100644 +--- a/blivet/udev.py ++++ b/blivet/udev.py +@@ -185,8 +185,9 @@ def __is_blacklisted_blockdev(dev_name): + if any(re.search(expr, dev_name) for expr in device_name_blacklist): + return True + +- if os.path.exists("/sys/class/block/%s/device/model" % (dev_name,)): +- model = open("/sys/class/block/%s/device/model" % (dev_name,)).read() ++ model_path = "/sys/class/block/%s/device/model" % dev_name ++ if os.path.exists(model_path): ++ model = open(model_path, encoding="utf-8", errors="replace").read() + for bad in ("IBM *STMF KERNEL", "SCEI Flash-5", "DGC LUNZ"): + if model.find(bad) != -1: + log.info("ignoring %s with model %s", dev_name, model) +-- +2.26.2 + diff --git a/SOURCES/0016-Basic-LVM-VDO-support.patch b/SOURCES/0016-Basic-LVM-VDO-support.patch new file mode 100644 index 0000000..b52342b --- /dev/null +++ b/SOURCES/0016-Basic-LVM-VDO-support.patch @@ -0,0 +1,415 @@ +From 3f6bbf52442609b8e6e3919a3fdd8c5af64923e6 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 12 May 2020 12:48:41 +0200 +Subject: [PATCH 1/3] Add basic support for LVM VDO devices + +This adds support for LVM VDO devices detection during populate +and allows removing both VDO LVs and VDO pools using actions. +--- + blivet/devices/lvm.py | 150 +++++++++++++++++++++++++++++++- + blivet/populator/helpers/lvm.py | 16 +++- + tests/action_test.py | 39 +++++++++ + tests/devices_test/lvm_test.py | 34 ++++++++ + tests/storagetestcase.py | 11 ++- + 5 files changed, 245 insertions(+), 5 deletions(-) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index 97de6acd..d9e24a33 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -1789,8 +1789,132 @@ class LVMThinLogicalVolumeMixin(object): + data.pool_name = self.pool.lvname + + ++class LVMVDOPoolMixin(object): ++ def __init__(self): ++ self._lvs = [] ++ ++ @property ++ def is_vdo_pool(self): ++ return self.seg_type == "vdo-pool" ++ ++ @property ++ def type(self): ++ return "lvmvdopool" ++ ++ @property ++ def resizable(self): ++ return False ++ ++ @util.requires_property("is_vdo_pool") ++ def _add_log_vol(self, lv): ++ """ Add an LV to this VDO pool. """ ++ if lv in self._lvs: ++ raise ValueError("lv is already part of this VDO pool") ++ ++ self.vg._add_log_vol(lv) ++ log.debug("Adding %s/%s to %s", lv.name, lv.size, self.name) ++ self._lvs.append(lv) ++ ++ @util.requires_property("is_vdo_pool") ++ def _remove_log_vol(self, lv): ++ """ Remove an LV from this VDO pool. """ ++ if lv not in self._lvs: ++ raise ValueError("specified lv is not part of this VDO pool") ++ ++ self._lvs.remove(lv) ++ self.vg._remove_log_vol(lv) ++ ++ @property ++ @util.requires_property("is_vdo_pool") ++ def lvs(self): ++ """ A list of this VDO pool's LVs """ ++ return self._lvs[:] # we don't want folks changing our list ++ ++ @property ++ def direct(self): ++ """ Is this device directly accessible? """ ++ return False ++ ++ def _create(self): ++ """ Create the device. """ ++ raise NotImplementedError ++ ++ ++class LVMVDOLogicalVolumeMixin(object): ++ def __init__(self): ++ pass ++ ++ def _init_check(self): ++ pass ++ ++ def _check_parents(self): ++ """Check that this device has parents as expected""" ++ if isinstance(self.parents, (list, ParentList)): ++ if len(self.parents) != 1: ++ raise ValueError("constructor requires a single vdo-pool LV") ++ ++ container = self.parents[0] ++ else: ++ container = self.parents ++ ++ if not container or not isinstance(container, LVMLogicalVolumeDevice) or not container.is_vdo_pool: ++ raise ValueError("constructor requires a vdo-pool LV") ++ ++ @property ++ def vg_space_used(self): ++ return Size(0) # the pool's size is already accounted for in the vg ++ ++ @property ++ def is_vdo_lv(self): ++ return self.seg_type == "vdo" ++ ++ @property ++ def vg(self): ++ # parents[0] is the pool, not the VG so set the VG here ++ return self.pool.vg ++ ++ @property ++ def type(self): ++ return "vdolv" ++ ++ @property ++ def resizable(self): ++ return False ++ ++ @property ++ @util.requires_property("is_vdo_lv") ++ def pool(self): ++ return self.parents[0] ++ ++ def _create(self): ++ """ Create the device. """ ++ raise NotImplementedError ++ ++ def _destroy(self): ++ # nothing to do here, VDO LV is destroyed automatically together with ++ # the VDO pool ++ pass ++ ++ def remove_hook(self, modparent=True): ++ if modparent: ++ self.pool._remove_log_vol(self) ++ ++ # pylint: disable=bad-super-call ++ super(LVMLogicalVolumeBase, self).remove_hook(modparent=modparent) ++ ++ def add_hook(self, new=True): ++ # pylint: disable=bad-super-call ++ super(LVMLogicalVolumeBase, self).add_hook(new=new) ++ if new: ++ return ++ ++ if self not in self.pool.lvs: ++ self.pool._add_log_vol(self) ++ ++ + class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin, LVMSnapshotMixin, +- LVMThinPoolMixin, LVMThinLogicalVolumeMixin): ++ LVMThinPoolMixin, LVMThinLogicalVolumeMixin, LVMVDOPoolMixin, ++ LVMVDOLogicalVolumeMixin): + """ An LVM Logical Volume """ + + # generally resizable, see :property:`resizable` for details +@@ -1879,6 +2003,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin + LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type, + fmt, exists, sysfs_path, grow, maxsize, + percent, cache_request, pvs, from_lvs) ++ LVMVDOPoolMixin.__init__(self) ++ LVMVDOLogicalVolumeMixin.__init__(self) + + LVMInternalLogicalVolumeMixin._init_check(self) + LVMSnapshotMixin._init_check(self) +@@ -1905,6 +2031,10 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin + ret.append(LVMThinPoolMixin) + if self.is_thin_lv: + ret.append(LVMThinLogicalVolumeMixin) ++ if self.is_vdo_pool: ++ ret.append(LVMVDOPoolMixin) ++ if self.is_vdo_lv: ++ ret.append(LVMVDOLogicalVolumeMixin) + return ret + + def _try_specific_call(self, name, *args, **kwargs): +@@ -2066,6 +2196,11 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin + def display_lv_name(self): + return self.lvname + ++ @property ++ @type_specific ++ def pool(self): ++ return super(LVMLogicalVolumeDevice, self).pool ++ + def _setup(self, orig=False): + """ Open, or set up, a device. """ + log_method_call(self, self.name, orig=orig, status=self.status, +@@ -2167,6 +2302,19 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin + udev.settle() + blockdev.lvm.lvresize(self.vg.name, self._name, self.size) + ++ @type_specific ++ def _add_log_vol(self, lv): ++ pass ++ ++ @type_specific ++ def _remove_log_vol(self, lv): ++ pass ++ ++ @property ++ @type_specific ++ def lvs(self): ++ return [] ++ + @property + @type_specific + def direct(self): +diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py +index 4b674fac..ff8bf59f 100644 +--- a/blivet/populator/helpers/lvm.py ++++ b/blivet/populator/helpers/lvm.py +@@ -211,9 +211,6 @@ class LVMFormatPopulator(FormatPopulator): + origin = self._devicetree.get_device_by_name(origin_device_name) + + lv_kwargs["origin"] = origin +- elif lv_attr[0] == 'v': +- # skip vorigins +- return + elif lv_attr[0] in 'IrielTCo' and lv_name.endswith(']'): + # an internal LV, add the an instance of the appropriate class + # to internal_lvs for later processing when non-internal LVs are +@@ -237,6 +234,19 @@ class LVMFormatPopulator(FormatPopulator): + origin = self._devicetree.get_device_by_name(origin_device_name) + lv_kwargs["origin"] = origin + ++ lv_parents = [self._devicetree.get_device_by_name(pool_device_name)] ++ elif lv_attr[0] == 'd': ++ # vdo pool ++ # nothing to do here ++ pass ++ elif lv_attr[0] == 'v': ++ if lv_type != "vdo": ++ # skip vorigins ++ return ++ pool_name = blockdev.lvm.vdolvpoolname(vg_name, lv_name) ++ pool_device_name = "%s-%s" % (vg_name, pool_name) ++ add_required_lv(pool_device_name, "failed to look up VDO pool") ++ + lv_parents = [self._devicetree.get_device_by_name(pool_device_name)] + elif lv_name.endswith(']'): + # unrecognized Internal LVM2 device +diff --git a/tests/action_test.py b/tests/action_test.py +index 90c1b312..8f9a7424 100644 +--- a/tests/action_test.py ++++ b/tests/action_test.py +@@ -1252,6 +1252,45 @@ class DeviceActionTestCase(StorageTestCase): + self.assertEqual(set(self.storage.lvs), {pool}) + self.assertEqual(set(pool._internal_lvs), {lv1, lv2}) + ++ def test_lvm_vdo_destroy(self): ++ self.destroy_all_devices() ++ sdc = self.storage.devicetree.get_device_by_name("sdc") ++ sdc1 = self.new_device(device_class=PartitionDevice, name="sdc1", ++ size=Size("50 GiB"), parents=[sdc], ++ fmt=blivet.formats.get_format("lvmpv")) ++ self.schedule_create_device(sdc1) ++ ++ vg = self.new_device(device_class=LVMVolumeGroupDevice, ++ name="vg", parents=[sdc1]) ++ self.schedule_create_device(vg) ++ ++ pool = self.new_device(device_class=LVMLogicalVolumeDevice, ++ name="data", parents=[vg], ++ size=Size("10 GiB"), ++ seg_type="vdo-pool", exists=True) ++ self.storage.devicetree._add_device(pool) ++ lv = self.new_device(device_class=LVMLogicalVolumeDevice, ++ name="meta", parents=[pool], ++ size=Size("50 GiB"), ++ seg_type="vdo", exists=True) ++ self.storage.devicetree._add_device(lv) ++ ++ remove_lv = self.schedule_destroy_device(lv) ++ self.assertListEqual(pool.lvs, []) ++ self.assertNotIn(lv, vg.lvs) ++ ++ # cancelling the action should put lv back to both vg and pool lvs ++ self.storage.devicetree.actions.remove(remove_lv) ++ self.assertListEqual(pool.lvs, [lv]) ++ self.assertIn(lv, vg.lvs) ++ ++ # can't remove non-leaf pool ++ with self.assertRaises(ValueError): ++ self.schedule_destroy_device(pool) ++ ++ self.schedule_destroy_device(lv) ++ self.schedule_destroy_device(pool) ++ + + class ConfigurationActionsTest(unittest.TestCase): + +diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py +index 9e701d18..204cb99a 100644 +--- a/tests/devices_test/lvm_test.py ++++ b/tests/devices_test/lvm_test.py +@@ -405,6 +405,40 @@ class LVMDeviceTest(unittest.TestCase): + exists=False) + self.assertFalse(vg.is_empty) + ++ def test_lvm_vdo_pool(self): ++ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), ++ size=Size("1 GiB"), exists=True) ++ vg = LVMVolumeGroupDevice("testvg", parents=[pv]) ++ pool = LVMLogicalVolumeDevice("testpool", parents=[vg], size=Size("512 MiB"), ++ seg_type="vdo-pool", exists=True) ++ self.assertTrue(pool.is_vdo_pool) ++ ++ free = vg.free_space ++ lv = LVMLogicalVolumeDevice("testlv", parents=[pool], size=Size("2 GiB"), ++ seg_type="vdo", exists=True) ++ self.assertTrue(lv.is_vdo_lv) ++ self.assertEqual(lv.vg, vg) ++ self.assertEqual(lv.pool, pool) ++ ++ # free space in the vg shouldn't be affected by the vdo lv ++ self.assertEqual(lv.vg_space_used, 0) ++ self.assertEqual(free, vg.free_space) ++ ++ self.assertListEqual(pool.lvs, [lv]) ++ ++ # now try to destroy both the pool and the vdo lv ++ # for the lv this should be a no-op, destroying the pool should destroy both ++ with patch("blivet.devices.lvm.blockdev.lvm") as lvm: ++ lv.destroy() ++ lv.remove_hook() ++ self.assertFalse(lv.exists) ++ self.assertFalse(lvm.lvremove.called) ++ self.assertListEqual(pool.lvs, []) ++ ++ pool.destroy() ++ self.assertFalse(pool.exists) ++ self.assertTrue(lvm.lvremove.called) ++ + + class TypeSpecificCallsTest(unittest.TestCase): + def test_type_specific_calls(self): +diff --git a/tests/storagetestcase.py b/tests/storagetestcase.py +index e581bca6..1844dec5 100644 +--- a/tests/storagetestcase.py ++++ b/tests/storagetestcase.py +@@ -96,7 +96,16 @@ class StorageTestCase(unittest.TestCase): + def new_device(self, *args, **kwargs): + """ Return a new Device instance suitable for testing. """ + device_class = kwargs.pop("device_class") +- exists = kwargs.pop("exists", False) ++ ++ # we intentionally don't pass the "exists" kwarg to the constructor ++ # becauses this causes issues with some devices (especially partitions) ++ # but we still need it for some LVs like VDO because we can't create ++ # those so we need to fake their existence even for the constructor ++ if device_class is blivet.devices.LVMLogicalVolumeDevice: ++ exists = kwargs.get("exists", False) ++ else: ++ exists = kwargs.pop("exists", False) ++ + part_type = kwargs.pop("part_type", parted.PARTITION_NORMAL) + device = device_class(*args, **kwargs) + +-- +2.26.2 + + +From f05a66e1bed1ca1f3cd7d7ffecd6693ab4d7f32a Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 12 May 2020 12:52:47 +0200 +Subject: [PATCH 2/3] Fix checking for filesystem support in action_test + +--- + tests/action_test.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tests/action_test.py b/tests/action_test.py +index 8f9a7424..228eb97a 100644 +--- a/tests/action_test.py ++++ b/tests/action_test.py +@@ -56,7 +56,7 @@ FORMAT_CLASSES = [ + + + @unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test") +-@unittest.skipUnless(not any(x().utils_available for x in FORMAT_CLASSES), "some unsupported format classes required for this test") ++@unittest.skipUnless(all(x().utils_available for x in FORMAT_CLASSES), "some unsupported format classes required for this test") + class DeviceActionTestCase(StorageTestCase): + + """ DeviceActionTestSuite """ +-- +2.26.2 + + +From 69bd2e69e21c8779377a6f54b3d83cb35138867a Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 12 May 2020 12:54:03 +0200 +Subject: [PATCH 3/3] Fix LV min size for resize in test_action_dependencies + +We've recently changed min size for all filesystems so we can't +resize the LV to the device minimal size. +This was overlooked in the original change because these tests +were skipped. +--- + tests/action_test.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tests/action_test.py b/tests/action_test.py +index 228eb97a..77176f46 100644 +--- a/tests/action_test.py ++++ b/tests/action_test.py +@@ -870,7 +870,7 @@ class DeviceActionTestCase(StorageTestCase): + name="testlv2", parents=[testvg]) + testlv2.format = self.new_format("ext4", device=testlv2.path, + exists=True, device_instance=testlv2) +- shrink_lv2 = ActionResizeDevice(testlv2, testlv2.size - Size("10 GiB")) ++ shrink_lv2 = ActionResizeDevice(testlv2, testlv2.size - Size("10 GiB") + Ext4FS._min_size) + shrink_lv2.apply() + + self.assertTrue(grow_lv.requires(shrink_lv2)) +-- +2.26.2 + diff --git a/SOURCES/0017-Let-parted-fix-fixable-issues-with-partition-table.patch b/SOURCES/0017-Let-parted-fix-fixable-issues-with-partition-table.patch new file mode 100644 index 0000000..af2c4d8 --- /dev/null +++ b/SOURCES/0017-Let-parted-fix-fixable-issues-with-partition-table.patch @@ -0,0 +1,30 @@ +From d477f8d076789cbe1c0a85545ea8b5133fdc4bdf Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 18 Sep 2020 13:58:48 +0200 +Subject: [PATCH] Let parted fix fixable issues with partition table + +This will automatically fix issues like GPT partition table not +covering whole device after disk size change. + +Resolves: rhbz#1846869 +--- + blivet/populator/populator.py | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/blivet/populator/populator.py b/blivet/populator/populator.py +index 465c272d..fe566816 100644 +--- a/blivet/populator/populator.py ++++ b/blivet/populator/populator.py +@@ -64,6 +64,9 @@ def parted_exn_handler(exn_type, exn_options, exn_msg): + if exn_type == parted.EXCEPTION_TYPE_ERROR and \ + exn_options == parted.EXCEPTION_OPT_YES_NO: + ret = parted.EXCEPTION_RESOLVE_YES ++ elif exn_type == parted.EXCEPTION_TYPE_WARNING and \ ++ exn_options & parted.EXCEPTION_RESOLVE_FIX: ++ ret = parted.EXCEPTION_RESOLVE_FIX + return ret + + +-- +2.29.2 + diff --git a/SOURCES/0018-Fix-possible-UnicodeDecodeError-when-reading-sysfs-a.patch b/SOURCES/0018-Fix-possible-UnicodeDecodeError-when-reading-sysfs-a.patch new file mode 100644 index 0000000..11b6a40 --- /dev/null +++ b/SOURCES/0018-Fix-possible-UnicodeDecodeError-when-reading-sysfs-a.patch @@ -0,0 +1,112 @@ +From 430cd2cdba8fba434b5bed2d2a7ed97803c62f6d Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 5 Jan 2021 16:56:52 +0100 +Subject: [PATCH 1/3] Fix possible UnicodeDecodeError when reading sysfs + attributes + +This is a follow-up for https://github.com/storaged-project/blivet/pull/861 +where we fixed reading device model in "__is_blacklisted_blockdev" +but we read the device model from other places too so it makes +more sense to "fix" all sysfs attribute reads. +--- + blivet/util.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/blivet/util.py b/blivet/util.py +index 2fa9c8fc..48b7818f 100644 +--- a/blivet/util.py ++++ b/blivet/util.py +@@ -379,7 +379,7 @@ def get_sysfs_attr(path, attr, root=None): + log.warning("%s is not a valid attribute", attr) + return None + +- f = open(fullattr, "r") ++ f = open(fullattr, "r", encoding="utf-8", errors="replace") + data = f.read() + f.close() + sdata = "".join(["%02x" % (ord(x),) for x in data]) +-- +2.29.2 + + +From 15350b52f30910d4fadad92da0195710adcb69a0 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 5 Jan 2021 16:59:14 +0100 +Subject: [PATCH 2/3] Use util.get_sysfs_attr in __is_ignored_blockdev to read + device mode + +--- + blivet/udev.py | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/blivet/udev.py b/blivet/udev.py +index 2c795225..25375459 100644 +--- a/blivet/udev.py ++++ b/blivet/udev.py +@@ -185,9 +185,8 @@ def __is_blacklisted_blockdev(dev_name): + if any(re.search(expr, dev_name) for expr in device_name_blacklist): + return True + +- model_path = "/sys/class/block/%s/device/model" % dev_name +- if os.path.exists(model_path): +- model = open(model_path, encoding="utf-8", errors="replace").read() ++ model = util.get_sysfs_attr("/sys/class/block/%s" % dev_name, "device/model") ++ if model: + for bad in ("IBM *STMF KERNEL", "SCEI Flash-5", "DGC LUNZ"): + if model.find(bad) != -1: + log.info("ignoring %s with model %s", dev_name, model) +-- +2.29.2 + + +From 64ece8c0dafb550bbde4798a766515fb04f44568 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Wed, 6 Jan 2021 12:34:49 +0100 +Subject: [PATCH 3/3] Add test for util.get_sysfs_attr + +--- + tests/util_test.py | 23 +++++++++++++++++++++++ + 1 file changed, 23 insertions(+) + +diff --git a/tests/util_test.py b/tests/util_test.py +index 9a2ff492..853b6166 100644 +--- a/tests/util_test.py ++++ b/tests/util_test.py +@@ -2,7 +2,9 @@ + import test_compat + + from six.moves import mock ++import os + import six ++import tempfile + import unittest + from decimal import Decimal + +@@ -157,3 +159,24 @@ class DependencyGuardTestCase(unittest.TestCase): + with mock.patch.object(_requires_something, '_check_avail', return_value=True): + self.assertEqual(self._test_dependency_guard_non_critical(), True) + self.assertEqual(self._test_dependency_guard_critical(), True) ++ ++ ++class GetSysfsAttrTestCase(unittest.TestCase): ++ ++ def test_get_sysfs_attr(self): ++ ++ with tempfile.TemporaryDirectory() as sysfs: ++ model_file = os.path.join(sysfs, "model") ++ with open(model_file, "w") as f: ++ f.write("test model\n") ++ ++ model = util.get_sysfs_attr(sysfs, "model") ++ self.assertEqual(model, "test model") ++ ++ # now with some invalid byte in the model ++ with open(model_file, "wb") as f: ++ f.write(b"test model\xef\n") ++ ++ # the unicode replacement character (U+FFFD) should be used instead ++ model = util.get_sysfs_attr(sysfs, "model") ++ self.assertEqual(model, "test model\ufffd") +-- +2.29.2 + diff --git a/SOURCES/0019-LVM-VDO-support.patch b/SOURCES/0019-LVM-VDO-support.patch new file mode 100644 index 0000000..c79d6c1 --- /dev/null +++ b/SOURCES/0019-LVM-VDO-support.patch @@ -0,0 +1,2027 @@ +From 18f05802f07f580ed31f38931b1103842397d598 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 2 Nov 2020 14:19:52 +0100 +Subject: [PATCH 01/17] Fix type of LVM VDO logical volumes + +We should use "lvmvdolv" to make it similar to other "lvmXYZ" +types. +--- + blivet/devices/lvm.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index d9e24a33..9639256d 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -1875,7 +1875,7 @@ def vg(self): + + @property + def type(self): +- return "vdolv" ++ return "lvmvdolv" + + @property + def resizable(self): + +From 7f4815e14075550f55f2afb44bfba461eacea1c4 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 2 Nov 2020 14:21:33 +0100 +Subject: [PATCH 02/17] Add VDO pool data LV to internal LVs during populate + +--- + blivet/devices/lvm.py | 9 ++++++++- + blivet/populator/helpers/lvm.py | 2 +- + 2 files changed, 9 insertions(+), 2 deletions(-) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index 9639256d..d0957d6a 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -1119,7 +1119,7 @@ class LVMInternalLVtype(Enum): + + @classmethod + def get_type(cls, lv_attr, lv_name): # pylint: disable=unused-argument +- attr_letters = {cls.data: ("T", "C"), ++ attr_letters = {cls.data: ("T", "C", "D"), + cls.meta: ("e",), + cls.log: ("l", "L"), + cls.image: ("i", "I"), +@@ -1824,6 +1824,13 @@ def _remove_log_vol(self, lv): + self._lvs.remove(lv) + self.vg._remove_log_vol(lv) + ++ @property ++ @util.requires_property("is_vdo_pool") ++ def _vdopool_data_lv(self): ++ if not self._internal_lvs: ++ return None ++ return self._internal_lvs[0] ++ + @property + @util.requires_property("is_vdo_pool") + def lvs(self): +diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py +index ff8bf59f..b1626306 100644 +--- a/blivet/populator/helpers/lvm.py ++++ b/blivet/populator/helpers/lvm.py +@@ -211,7 +211,7 @@ def add_lv(lv): + origin = self._devicetree.get_device_by_name(origin_device_name) + + lv_kwargs["origin"] = origin +- elif lv_attr[0] in 'IrielTCo' and lv_name.endswith(']'): ++ elif lv_attr[0] in 'IrielTCoD' and lv_name.endswith(']'): + # an internal LV, add the an instance of the appropriate class + # to internal_lvs for later processing when non-internal LVs are + # processed + +From c164864955e371aef78b5020f28bf0c9d235ac7c Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 2 Nov 2020 14:22:12 +0100 +Subject: [PATCH 03/17] Add availability functions for LVM VDO + +VDO is currently available only on RHEL/CentOS so we need a +separate availability check for LVM VDO devices. +--- + blivet/devices/lvm.py | 6 ++++++ + blivet/tasks/availability.py | 8 ++++++++ + 2 files changed, 14 insertions(+) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index d0957d6a..ffc65dcd 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -1790,6 +1790,9 @@ def populate_ksdata(self, data): + + + class LVMVDOPoolMixin(object): ++ ++ _external_dependencies = [availability.BLOCKDEV_LVM_PLUGIN, availability.BLOCKDEV_LVM_PLUGIN_VDO] ++ + def __init__(self): + self._lvs = [] + +@@ -1848,6 +1851,9 @@ def _create(self): + + + class LVMVDOLogicalVolumeMixin(object): ++ ++ _external_dependencies = [availability.BLOCKDEV_LVM_PLUGIN, availability.BLOCKDEV_LVM_PLUGIN_VDO] ++ + def __init__(self): + pass + +diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py +index f3b76650..b107428e 100644 +--- a/blivet/tasks/availability.py ++++ b/blivet/tasks/availability.py +@@ -372,6 +372,13 @@ def available_resource(name): + blockdev.LVMTechMode.MODIFY)}) + BLOCKDEV_LVM_TECH = BlockDevMethod(BLOCKDEV_LVM) + ++BLOCKDEV_LVM_VDO = BlockDevTechInfo(plugin_name="lvm", ++ check_fn=blockdev.lvm_is_tech_avail, ++ technologies={blockdev.LVMTech.VDO: (blockdev.LVMTechMode.CREATE | ++ blockdev.LVMTechMode.REMOVE | ++ blockdev.LVMTechMode.QUERY)}) ++BLOCKDEV_LVM_TECH_VDO = BlockDevMethod(BLOCKDEV_LVM_VDO) ++ + # libblockdev mdraid plugin required technologies and modes + BLOCKDEV_MD_ALL_MODES = (blockdev.MDTechMode.CREATE | + blockdev.MDTechMode.DELETE | +@@ -410,6 +417,7 @@ def available_resource(name): + BLOCKDEV_DM_PLUGIN_RAID = blockdev_plugin("dm", BLOCKDEV_DM_TECH_RAID) + BLOCKDEV_LOOP_PLUGIN = blockdev_plugin("loop", BLOCKDEV_LOOP_TECH) + BLOCKDEV_LVM_PLUGIN = blockdev_plugin("lvm", BLOCKDEV_LVM_TECH) ++BLOCKDEV_LVM_PLUGIN_VDO = blockdev_plugin("lvm", BLOCKDEV_LVM_TECH_VDO) + BLOCKDEV_MDRAID_PLUGIN = blockdev_plugin("mdraid", BLOCKDEV_MD_TECH) + BLOCKDEV_MPATH_PLUGIN = blockdev_plugin("mpath", BLOCKDEV_MPATH_TECH) + BLOCKDEV_SWAP_PLUGIN = blockdev_plugin("swap", BLOCKDEV_SWAP_TECH) + +From d782620129d47a7b79b0e6b80455e6d93f8bcc88 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 2 Nov 2020 14:27:55 +0100 +Subject: [PATCH 04/17] Read the LVM VDO pool current size from the internal + data LV + +The pool device mapper device size is always 512k when active. +--- + blivet/devices/lvm.py | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index ffc65dcd..73743fa8 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -1845,6 +1845,15 @@ def direct(self): + """ Is this device directly accessible? """ + return False + ++ def read_current_size(self): ++ log_method_call(self, exists=self.exists, path=self.path, ++ sysfs_path=self.sysfs_path) ++ if self.size != Size(0): ++ return self.size ++ if self._vdopool_data_lv: ++ return self._vdopool_data_lv.read_current_size() ++ return Size(0) ++ + def _create(self): + """ Create the device. """ + raise NotImplementedError + +From 2da48ae84f4eac84e8cf998ee2402249a5a52626 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 2 Nov 2020 14:29:43 +0100 +Subject: [PATCH 05/17] Add "vdo_lv" property to LVMVDOPoolMixin + +--- + blivet/devices/lvm.py | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index 73743fa8..2f93fa22 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -1840,6 +1840,13 @@ def lvs(self): + """ A list of this VDO pool's LVs """ + return self._lvs[:] # we don't want folks changing our list + ++ @property ++ @util.requires_property("is_vdo_pool") ++ def vdo_lv(self): ++ if not self._lvs: ++ return None ++ return self._lvs[0] ++ + @property + def direct(self): + """ Is this device directly accessible? """ + +From bbfa2cbdc6cb85d405b895c66eb4867cea4218b4 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 2 Nov 2020 14:30:37 +0100 +Subject: [PATCH 06/17] Add support for creating LVM VDO pools and LVM VDO + volumes + +The pool and the volume are created by one call but these can have +different properties (like size) and are in fact two block devices +when created, we also need to create two devices and add them to +the devicetree. The pool device must be always created first and +the _create function for the VDO volume is a no-op. +--- + blivet/devices/lvm.py | 63 +++++++++++++++++++++++++++++++++++++------ + 1 file changed, 55 insertions(+), 8 deletions(-) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index 2f93fa22..0802e2de 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -311,7 +311,7 @@ def _add_log_vol(self, lv): + + # verify we have the space, then add it + # do not verify for growing vg (because of ks) +- if not lv.exists and not self.growable and not lv.is_thin_lv and lv.size > self.free_space: ++ if not lv.exists and not self.growable and not (lv.is_thin_lv or lv.is_vdo_lv) and lv.size > self.free_space: + raise errors.DeviceError("new lv is too large to fit in free space", self.name) + + log.debug("Adding %s/%s to %s", lv.name, lv.size, self.name) +@@ -639,7 +639,7 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None, + percent=None, cache_request=None, pvs=None, from_lvs=None): + + if not exists: +- if seg_type not in [None, "linear", "thin", "thin-pool", "cache"] + lvm.raid_seg_types: ++ if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo"] + lvm.raid_seg_types: + raise ValueError("Invalid or unsupported segment type: %s" % seg_type) + if seg_type and seg_type in lvm.raid_seg_types and not pvs: + raise ValueError("List of PVs has to be given for every non-linear LV") +@@ -1793,7 +1793,11 @@ class LVMVDOPoolMixin(object): + + _external_dependencies = [availability.BLOCKDEV_LVM_PLUGIN, availability.BLOCKDEV_LVM_PLUGIN_VDO] + +- def __init__(self): ++ def __init__(self, compression=True, deduplication=True, index_memory=0, write_policy=None): ++ self.compression = compression ++ self.deduplication = deduplication ++ self.index_memory = index_memory ++ self.write_policy = write_policy + self._lvs = [] + + @property +@@ -1863,7 +1867,19 @@ def read_current_size(self): + + def _create(self): + """ Create the device. """ +- raise NotImplementedError ++ ++ if not self.vdo_lv: ++ raise errors.DeviceError("Cannot create new VDO pool without a VDO LV.") ++ ++ if self.write_policy: ++ write_policy = blockdev.lvm_get_vdo_write_policy_str(self.write_policy) ++ else: ++ write_policy = blockdev.LVMVDOWritePolicy.AUTO ++ ++ blockdev.lvm.vdo_pool_create(self.vg.name, self.vdo_lv.lvname, self.lvname, ++ self.size, self.vdo_lv.size, self.index_memory, ++ self.compression, self.deduplication, ++ write_policy) + + + class LVMVDOLogicalVolumeMixin(object): +@@ -1915,9 +1931,26 @@ def resizable(self): + def pool(self): + return self.parents[0] + ++ def _set_size(self, newsize): ++ if not isinstance(newsize, Size): ++ raise AttributeError("new size must of type Size") ++ ++ newsize = self.vg.align(newsize) ++ newsize = self.vg.align(util.numeric_type(newsize)) ++ # just make sure the size is set (no VG size/free space check needed for ++ # a VDO LV) ++ DMDevice._set_size(self, newsize) ++ ++ def _pre_create(self): ++ # skip LVMLogicalVolumeDevice's _pre_create() method as it checks for a ++ # free space in a VG which doesn't make sense for a VDO LV and causes a ++ # bug by limitting the VDO LV's size to VG free space which is nonsense ++ super(LVMLogicalVolumeBase, self)._pre_create() # pylint: disable=bad-super-call ++ + def _create(self): +- """ Create the device. """ +- raise NotImplementedError ++ # nothing to do here, VDO LV is created automatically together with ++ # the VDO pool ++ pass + + def _destroy(self): + # nothing to do here, VDO LV is destroyed automatically together with +@@ -1953,7 +1986,9 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None, + fmt=None, exists=False, sysfs_path='', grow=None, maxsize=None, + percent=None, cache_request=None, pvs=None, + parent_lv=None, int_type=None, origin=None, vorigin=False, +- metadata_size=None, chunk_size=None, profile=None, from_lvs=None): ++ metadata_size=None, chunk_size=None, profile=None, from_lvs=None, ++ compression=False, deduplication=False, index_memory=0, ++ write_policy=None): + """ + :param name: the device name (generally a device node's basename) + :type name: str +@@ -2012,6 +2047,17 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None, + :keyword from_lvs: LVs to create the new LV from (in the (data_lv, metadata_lv) order) + :type from_lvs: tuple of :class:`LVMLogicalVolumeDevice` + ++ For VDO pools only: ++ ++ :keyword compression: whether to enable compression on the VDO pool ++ :type compression: bool ++ :keyword dudplication: whether to enable dudplication on the VDO pool ++ :type dudplication: bool ++ :keyword index_memory: amount of index memory (in bytes) or 0 for default ++ :type index_memory: int ++ :keyword write_policy: write policy for the volume or None for default ++ :type write_policy: str ++ + """ + + if isinstance(parents, (list, ParentList)): +@@ -2032,7 +2078,8 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None, + LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type, + fmt, exists, sysfs_path, grow, maxsize, + percent, cache_request, pvs, from_lvs) +- LVMVDOPoolMixin.__init__(self) ++ LVMVDOPoolMixin.__init__(self, compression, deduplication, index_memory, ++ write_policy) + LVMVDOLogicalVolumeMixin.__init__(self) + + LVMInternalLogicalVolumeMixin._init_check(self) + +From 2d1593b50dc6232e213b4df86dfbf5cf6d282dcd Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 2 Nov 2020 14:31:35 +0100 +Subject: [PATCH 07/17] Allow creating LVM VDO pools and volumes using + "blivet.new_lv" + +The steps to create the VDO devices would typically look like: + +pool = b.new_lv(vdo_pool=True, parents=[data], size=Size("8 GiB")) +vdolv = b.new_lv(vdo_lv=True, parents=[pool], size=Size("40 GiB")) +b.create_device(pool) +b.create_device(vdolv) +b.do_it() +--- + blivet/blivet.py | 18 ++++++++++++++---- + tests/devices_test/lvm_test.py | 31 +++++++++++++++++++++++++++++++ + 2 files changed, 45 insertions(+), 4 deletions(-) + +diff --git a/blivet/blivet.py b/blivet/blivet.py +index e7dbd37b..754eb152 100644 +--- a/blivet/blivet.py ++++ b/blivet/blivet.py +@@ -573,6 +573,10 @@ def new_lv(self, *args, **kwargs): + :type thin_pool: bool + :keyword thin_volume: whether to create a thin volume + :type thin_volume: bool ++ :keyword vdo_pool: whether to create a vdo pool ++ :type vdo_pool: bool ++ :keyword vdo_lv: whether to create a vdo lv ++ :type vdo_lv: bool + :returns: the new device + :rtype: :class:`~.devices.LVMLogicalVolumeDevice` + +@@ -589,8 +593,10 @@ def new_lv(self, *args, **kwargs): + """ + thin_volume = kwargs.pop("thin_volume", False) + thin_pool = kwargs.pop("thin_pool", False) ++ vdo_pool = kwargs.pop("vdo_pool", False) ++ vdo_lv = kwargs.pop("vdo_lv", False) + parent = kwargs.get("parents", [None])[0] +- if thin_volume and parent: ++ if (thin_volume or vdo_lv) and parent: + # kwargs["parents"] will contain the pool device, so... + vg = parent.vg + else: +@@ -600,6 +606,10 @@ def new_lv(self, *args, **kwargs): + kwargs["seg_type"] = "thin" + if thin_pool: + kwargs["seg_type"] = "thin-pool" ++ if vdo_pool: ++ kwargs["seg_type"] = "vdo-pool" ++ if vdo_lv: ++ kwargs["seg_type"] = "vdo" + + mountpoint = kwargs.pop("mountpoint", None) + if 'fmt_type' in kwargs: +@@ -625,7 +635,7 @@ def new_lv(self, *args, **kwargs): + swap = False + + prefix = "" +- if thin_pool: ++ if thin_pool or vdo_pool: + prefix = "pool" + + name = self.suggest_device_name(parent=vg, +@@ -636,10 +646,10 @@ def new_lv(self, *args, **kwargs): + if "%s-%s" % (vg.name, name) in self.names: + raise ValueError("name already in use") + +- if thin_pool or thin_volume: ++ if thin_pool or thin_volume or vdo_pool or vdo_lv: + cache_req = kwargs.pop("cache_request", None) + if cache_req: +- raise ValueError("Creating cached thin volumes and pools is not supported") ++ raise ValueError("Creating cached thin and VDO volumes and pools is not supported") + + return LVMLogicalVolumeDevice(name, *args, **kwargs) + +diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py +index 204cb99a..493d3ba1 100644 +--- a/tests/devices_test/lvm_test.py ++++ b/tests/devices_test/lvm_test.py +@@ -689,3 +689,34 @@ def test_new_lv_from_non_existing_lvs(self): + with patch.object(pool, "_pre_create"): + pool.create() + self.assertTrue(lvm.thpool_convert.called) ++ ++ def test_new_vdo_pool(self): ++ b = blivet.Blivet() ++ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), ++ size=Size("10 GiB"), exists=True) ++ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True) ++ ++ for dev in (pv, vg): ++ b.devicetree._add_device(dev) ++ ++ # check that all the above devices are in the expected places ++ self.assertEqual(set(b.devices), {pv, vg}) ++ self.assertEqual(set(b.vgs), {vg}) ++ ++ self.assertEqual(vg.size, Size("10236 MiB")) ++ ++ vdopool = b.new_lv(name="vdopool", vdo_pool=True, ++ parents=[vg], compression=True, ++ deduplication=True, ++ size=blivet.size.Size("8 GiB")) ++ ++ vdolv = b.new_lv(name="vdolv", vdo_lv=True, ++ parents=[vdopool], ++ size=blivet.size.Size("40 GiB")) ++ ++ b.create_device(vdopool) ++ b.create_device(vdolv) ++ ++ self.assertEqual(vdopool.children[0], vdolv) ++ self.assertEqual(vdolv.parents[0], vdopool) ++ self.assertListEqual(vg.lvs, [vdopool, vdolv]) + +From 31ec429ad7bd0857a768e2dfebe1de088dafc144 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 2 Nov 2020 14:32:47 +0100 +Subject: [PATCH 08/17] Add LVM VDO device factory + +--- + blivet/devicefactory.py | 100 +++++++++++++++++++++++++++- + tests/devicefactory_test.py | 128 +++++++++++++++++++++++++++++++++--- + 2 files changed, 218 insertions(+), 10 deletions(-) + +diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py +index 9214ad54..c95037cc 100644 +--- a/blivet/devicefactory.py ++++ b/blivet/devicefactory.py +@@ -27,7 +27,7 @@ + from .devices import BTRFSDevice, DiskDevice + from .devices import LUKSDevice, LVMLogicalVolumeDevice + from .devices import PartitionDevice, MDRaidArrayDevice +-from .devices.lvm import DEFAULT_THPOOL_RESERVE ++from .devices.lvm import LVMVDOPoolMixin, DEFAULT_THPOOL_RESERVE + from .formats import get_format + from .devicelibs import btrfs + from .devicelibs import mdraid +@@ -58,6 +58,7 @@ + DEVICE_TYPE_BTRFS = 3 + DEVICE_TYPE_DISK = 4 + DEVICE_TYPE_LVM_THINP = 5 ++DEVICE_TYPE_LVM_VDO = 6 + + + def is_supported_device_type(device_type): +@@ -69,6 +70,9 @@ def is_supported_device_type(device_type): + :returns: True if this device type is supported + :rtype: bool + """ ++ if device_type == DEVICE_TYPE_LVM_VDO: ++ return not any(e for e in LVMVDOPoolMixin._external_dependencies if not e.available) ++ + devices = [] + if device_type == DEVICE_TYPE_BTRFS: + devices = [BTRFSDevice] +@@ -96,7 +100,7 @@ def get_supported_raid_levels(device_type): + pkg = None + if device_type == DEVICE_TYPE_BTRFS: + pkg = btrfs +- elif device_type in (DEVICE_TYPE_LVM, DEVICE_TYPE_LVM_THINP): ++ elif device_type in (DEVICE_TYPE_LVM, DEVICE_TYPE_LVM_THINP, DEVICE_TYPE_LVM_VDO): + pkg = lvm + elif device_type == DEVICE_TYPE_MD: + pkg = mdraid +@@ -116,6 +120,8 @@ def get_device_type(device): + "lvmlv": DEVICE_TYPE_LVM, + "lvmthinlv": DEVICE_TYPE_LVM_THINP, + "lvmthinpool": DEVICE_TYPE_LVM, ++ "lvmvdolv": DEVICE_TYPE_LVM_VDO, ++ "lvmvdopool": DEVICE_TYPE_LVM, + "btrfs subvolume": DEVICE_TYPE_BTRFS, + "btrfs volume": DEVICE_TYPE_BTRFS, + "mdarray": DEVICE_TYPE_MD} +@@ -136,6 +142,7 @@ def get_device_factory(blivet, device_type=DEVICE_TYPE_LVM, **kwargs): + DEVICE_TYPE_PARTITION: PartitionFactory, + DEVICE_TYPE_MD: MDFactory, + DEVICE_TYPE_LVM_THINP: LVMThinPFactory, ++ DEVICE_TYPE_LVM_VDO: LVMVDOFactory, + DEVICE_TYPE_DISK: DeviceFactory} + + factory_class = class_table[device_type] +@@ -1738,6 +1745,95 @@ def _get_new_device(self, *args, **kwargs): + return super(LVMThinPFactory, self)._get_new_device(*args, **kwargs) + + ++class LVMVDOFactory(LVMFactory): ++ ++ """ Factory for creating LVM VDO volumes. ++ ++ :keyword pool_name: name for the VDO pool, if not specified unique name will be generated ++ :type pool_name: str ++ :keyword virtual_size: size for the VDO volume, usually bigger than pool size, if not ++ specified physical size (pool size) will be used ++ :type size: :class:`~.size.Size` ++ :keyword compression: whether to enable compression (defaults to True) ++ :type compression: bool ++ :keyword deduplication: whether to enable deduplication (defaults to True) ++ :type deduplication: bool ++ """ ++ ++ def __init__(self, storage, **kwargs): ++ self.pool_name = kwargs.pop("pool_name", None) ++ self.virtual_size = kwargs.pop("virtual_size", None) ++ self.compression = kwargs.pop("compression", True) ++ self.deduplication = kwargs.pop("deduplication", True) ++ super(LVMVDOFactory, self).__init__(storage, **kwargs) ++ ++ def _get_new_pool(self, *args, **kwargs): ++ kwargs["vdo_pool"] = True ++ return super(LVMVDOFactory, self)._get_new_device(*args, **kwargs) ++ ++ def _set_device_size(self): ++ """ Set the size of the factory device. """ ++ super(LVMVDOFactory, self)._set_device_size() ++ ++ self.device.pool.size = self.size ++ self._reconfigure_container() ++ ++ if not self.virtual_size or self.virtual_size < self.size: ++ # virtual_size is not set or smaller than current size --> it should be same as the pool size ++ self.device.size = self.size ++ else: ++ self.device.size = self.virtual_size ++ ++ def _set_pool_name(self): ++ safe_new_name = self.storage.safe_device_name(self.pool_name) ++ if self.device.pool.name != safe_new_name: ++ if not safe_new_name: ++ log.error("not renaming '%s' to invalid name '%s'", ++ self.device.pool.name, self.pool_name) ++ return ++ if safe_new_name in self.storage.names: ++ log.error("not renaming '%s' to in-use name '%s'", ++ self.device.pool.name, safe_new_name) ++ return ++ ++ log.debug("renaming device '%s' to '%s'", ++ self.device.pool.name, safe_new_name) ++ self.device.pool.raw_device.name = safe_new_name ++ ++ def _set_name(self): ++ super(LVMVDOFactory, self)._set_name() ++ if self.pool_name: ++ self._set_pool_name() ++ ++ def _reconfigure_device(self): ++ super(LVMVDOFactory, self)._reconfigure_device() ++ ++ self.device.pool.compression = self.compression ++ self.device.pool.deduplication = self.deduplication ++ ++ # ++ # methods to configure the factory's device ++ # ++ def _get_new_device(self, *args, **kwargs): ++ """ Create and return the factory device as a StorageDevice. """ ++ pool = self._get_new_pool(name=self.pool_name, ++ size=self.size, ++ parents=[self.vg], ++ compression=self.compression, ++ deduplication=self.deduplication) ++ self.storage.create_device(pool) ++ ++ kwargs["parents"] = [pool] ++ kwargs["vdo_lv"] = True ++ ++ if self.virtual_size: ++ vdolv_kwargs = kwargs.copy() ++ vdolv_kwargs["size"] = self.virtual_size ++ else: ++ vdolv_kwargs = kwargs ++ return super(LVMVDOFactory, self)._get_new_device(*args, **vdolv_kwargs) ++ ++ + class MDFactory(DeviceFactory): + + """ Factory for creating MD RAID devices. """ +diff --git a/tests/devicefactory_test.py b/tests/devicefactory_test.py +index 08068779..7cdb51c5 100644 +--- a/tests/devicefactory_test.py ++++ b/tests/devicefactory_test.py +@@ -4,6 +4,9 @@ + from decimal import Decimal + import os + ++import test_compat # pylint: disable=unused-import ++from six.moves.mock import patch # pylint: disable=no-name-in-module,import-error ++ + import blivet + + from blivet import devicefactory +@@ -93,10 +96,12 @@ def _validate_factory_device(self, *args, **kwargs): + self.assertEqual(device.format.label, + kwargs.get('label')) + +- self.assertLessEqual(device.size, kwargs.get("size")) +- self.assertGreaterEqual(device.size, device.format.min_size) +- if device.format.max_size: +- self.assertLessEqual(device.size, device.format.max_size) ++ # sizes with VDO are special, we have a special check in LVMVDOFactoryTestCase._validate_factory_device ++ if device_type != devicefactory.DEVICE_TYPE_LVM_VDO: ++ self.assertLessEqual(device.size, kwargs.get("size")) ++ self.assertGreaterEqual(device.size, device.format.min_size) ++ if device.format.max_size: ++ self.assertLessEqual(device.size, device.format.max_size) + + self.assertEqual(device.encrypted, + kwargs.get("encrypted", False) or +@@ -115,7 +120,11 @@ def test_device_factory(self): + "mountpoint": '/factorytest'} + device = self._factory_device(device_type, **kwargs) + self._validate_factory_device(device, device_type, **kwargs) +- self.b.recursive_remove(device) ++ ++ if device.type == "lvmvdolv": ++ self.b.recursive_remove(device.pool) ++ else: ++ self.b.recursive_remove(device) + + if self.encryption_supported: + # Encrypt the leaf device +@@ -157,6 +166,12 @@ def test_device_factory(self): + device = self._factory_device(device_type, **kwargs) + self._validate_factory_device(device, device_type, **kwargs) + ++ # change size up ++ kwargs["device"] = device ++ kwargs["size"] = Size("900 MiB") ++ device = self._factory_device(device_type, **kwargs) ++ self._validate_factory_device(device, device_type, **kwargs) ++ + # Change LUKS version + kwargs["luks_version"] = "luks1" + device = self._factory_device(device_type, **kwargs) +@@ -179,7 +194,7 @@ def _get_size_delta(self, devices=None): + """ + return Size("1 MiB") + +- def test_get_free_disk_space(self): ++ def test_get_free_disk_space(self, *args): # pylint: disable=unused-argument + # get_free_disk_space should return the total free space on disks + kwargs = self._get_test_factory_args() + kwargs["size"] = Size("500 MiB") +@@ -206,7 +221,7 @@ def test_get_free_disk_space(self): + sum(d.size for d in self.b.disks) - device_space, + delta=self._get_size_delta(devices=[device])) + +- def test_normalize_size(self): ++ def test_normalize_size(self, *args): # pylint: disable=unused-argument + # _normalize_size should adjust target size to within the format limits + fstype = "ext2" + ext2 = get_format(fstype) +@@ -258,7 +273,7 @@ def test_default_factory_type(self): + factory = devicefactory.get_device_factory(self.b) + self.assertIsInstance(factory, devicefactory.LVMFactory) + +- def test_factory_defaults(self): ++ def test_factory_defaults(self, *args): # pylint: disable=unused-argument + ctor_kwargs = self._get_test_factory_args() + factory = devicefactory.get_device_factory(self.b, self.device_type, **ctor_kwargs) + for setting, value in factory._default_settings.items(): +@@ -522,6 +537,103 @@ def _get_size_delta(self, devices=None): + return delta + + ++class LVMVDOFactoryTestCase(LVMFactoryTestCase): ++ device_class = LVMLogicalVolumeDevice ++ device_type = devicefactory.DEVICE_TYPE_LVM_VDO ++ encryption_supported = False ++ ++ def _validate_factory_device(self, *args, **kwargs): ++ super(LVMVDOFactoryTestCase, self)._validate_factory_device(*args, ++ **kwargs) ++ device = args[0] ++ ++ if kwargs.get("encrypted", False): ++ vdolv = device.parents[0] ++ else: ++ vdolv = device ++ ++ self.assertTrue(hasattr(vdolv, "pool")) ++ ++ virtual_size = kwargs.get("virtual_size", 0) ++ if virtual_size: ++ self.assertEqual(vdolv.size, virtual_size) ++ else: ++ self.assertEqual(vdolv.size, vdolv.pool.size) ++ self.assertGreaterEqual(vdolv.size, vdolv.pool.size) ++ ++ compression = kwargs.get("compression", True) ++ self.assertEqual(vdolv.pool.compression, compression) ++ ++ deduplication = kwargs.get("deduplication", True) ++ self.assertEqual(vdolv.pool.deduplication, deduplication) ++ ++ pool_name = kwargs.get("pool_name", None) ++ if pool_name: ++ self.assertEqual(vdolv.pool.lvname, pool_name) ++ ++ return device ++ ++ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True) ++ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.destroyable", return_value=True) ++ @patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[]) ++ @patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set()) ++ def test_device_factory(self, *args): # pylint: disable=unused-argument,arguments-differ ++ device_type = self.device_type ++ kwargs = {"disks": self.b.disks, ++ "size": Size("400 MiB"), ++ "fstype": 'ext4', ++ "mountpoint": '/factorytest'} ++ device = self._factory_device(device_type, **kwargs) ++ self._validate_factory_device(device, device_type, **kwargs) ++ self.b.recursive_remove(device.pool) ++ ++ kwargs = {"disks": self.b.disks, ++ "size": Size("400 MiB"), ++ "fstype": 'ext4', ++ "mountpoint": '/factorytest', ++ "pool_name": "vdopool", ++ "deduplication": True, ++ "compression": True} ++ device = self._factory_device(device_type, **kwargs) ++ self._validate_factory_device(device, device_type, **kwargs) ++ ++ # change size without specifying virtual_size: both sizes should grow ++ kwargs["size"] = Size("600 MiB") ++ kwargs["device"] = device ++ device = self._factory_device(device_type, **kwargs) ++ self._validate_factory_device(device, device_type, **kwargs) ++ ++ # change virtual size ++ kwargs["virtual_size"] = Size("6 GiB") ++ kwargs["device"] = device ++ device = self._factory_device(device_type, **kwargs) ++ self._validate_factory_device(device, device_type, **kwargs) ++ ++ # change virtual size to smaller than size ++ kwargs["virtual_size"] = Size("500 GiB") ++ kwargs["device"] = device ++ device = self._factory_device(device_type, **kwargs) ++ self._validate_factory_device(device, device_type, **kwargs) ++ ++ # change deduplication and compression ++ kwargs["deduplication"] = False ++ kwargs["device"] = device ++ device = self._factory_device(device_type, **kwargs) ++ self._validate_factory_device(device, device_type, **kwargs) ++ ++ kwargs["compression"] = False ++ kwargs["device"] = device ++ device = self._factory_device(device_type, **kwargs) ++ self._validate_factory_device(device, device_type, **kwargs) ++ ++ # rename the pool ++ kwargs["pool_name"] = "vdopool2" ++ kwargs["device"] = device ++ device = self._factory_device(device_type, **kwargs) ++ self._validate_factory_device(device, device_type, **kwargs) ++ ++ + class MDFactoryTestCase(DeviceFactoryTestCase): + device_type = devicefactory.DEVICE_TYPE_MD + device_class = MDRaidArrayDevice + +From 22ba2b96111d5f153a3b55d3c56d84e597cf9a90 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 2 Nov 2020 14:33:06 +0100 +Subject: [PATCH 09/17] Add VM test for LVM VDO + +--- + tests/vmtests/blivet_reset_vmtest.py | 15 +++++++++++++++ + tests/vmtests/runvmtests.py | 3 ++- + 2 files changed, 17 insertions(+), 1 deletion(-) + +diff --git a/tests/vmtests/blivet_reset_vmtest.py b/tests/vmtests/blivet_reset_vmtest.py +index 8743d51e..47fc84c4 100644 +--- a/tests/vmtests/blivet_reset_vmtest.py ++++ b/tests/vmtests/blivet_reset_vmtest.py +@@ -192,6 +192,21 @@ def setUp(self): + self.collect_expected_data() + + ++class LVMVDOTestCase(BlivetResetTestCase): ++ ++ def _set_up_storage(self): ++ if not devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM_VDO): ++ self.skipTest("VDO not supported, skipping") ++ ++ self.blivet.factory_device(devicefactory.DEVICE_TYPE_LVM_VDO, ++ size=Size("10 GiB"), ++ fstype="ext4", ++ disks=self.blivet.disks[:], ++ name="vdolv", ++ pool_name="vdopool", ++ virtual_size=Size("40 GiB")) ++ ++ + @unittest.skip("temporarily disabled due to issues with raids with metadata version 0.90") + class MDRaid0TestCase(BlivetResetTestCase): + +diff --git a/tests/vmtests/runvmtests.py b/tests/vmtests/runvmtests.py +index 88143d3a..6f20484f 100644 +--- a/tests/vmtests/runvmtests.py ++++ b/tests/vmtests/runvmtests.py +@@ -12,7 +12,8 @@ + "tests.vmtests.blivet_reset_vmtest.LVMThinSnapShotTestCase", + "tests.vmtests.blivet_reset_vmtest.LVMRaidTestCase", + "tests.vmtests.blivet_reset_vmtest.MDRaid0TestCase", +- "tests.vmtests.blivet_reset_vmtest.LVMOnMDTestCase"] ++ "tests.vmtests.blivet_reset_vmtest.LVMOnMDTestCase", ++ "tests.vmtests.blivet_reset_vmtest.LVMVDOTestCase"] + + SNAP_NAME = "snapshot" + + +From 52b37bb86e856f1ede71f7cceb7284a639d741f4 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Thu, 19 Nov 2020 13:07:17 +0100 +Subject: [PATCH 10/17] Allow adding nodiscard option when running mkfs + +For filesystems that support it we might want to add some nodiscard +option to mkfs when creating format on devices like LVM VDO +volumes where discard is very slow and doesn't really makes sense +when running mkfs. +--- + blivet/formats/fs.py | 12 +++++- + blivet/tasks/fsmkfs.py | 59 +++++++++++++++++++++++++++--- + tests/formats_test/methods_test.py | 3 +- + 3 files changed, 66 insertions(+), 8 deletions(-) + +diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py +index 4ba83e6d..e61e5b86 100644 +--- a/blivet/formats/fs.py ++++ b/blivet/formats/fs.py +@@ -132,6 +132,7 @@ def __init__(self, **kwargs): + self.mountopts = kwargs.get("mountopts", "") + self.label = kwargs.get("label") + self.fsprofile = kwargs.get("fsprofile") ++ self._mkfs_nodiscard = kwargs.get("nodiscard", False) + + self._user_mountopts = self.mountopts + +@@ -263,6 +264,14 @@ def label_format_ok(self, label): + label = property(lambda s: s._get_label(), lambda s, l: s._set_label(l), + doc="this filesystem's label") + ++ def can_nodiscard(self): ++ """Returns True if this filesystem supports nodiscard option during ++ creation, otherwise False. ++ ++ :rtype: bool ++ """ ++ return self._mkfs.can_nodiscard and self._mkfs.available ++ + def can_set_uuid(self): + """Returns True if this filesystem supports setting an UUID during + creation, otherwise False. +@@ -402,7 +411,8 @@ def _create(self, **kwargs): + try: + self._mkfs.do_task(options=kwargs.get("options"), + label=not self.relabels(), +- set_uuid=self.can_set_uuid()) ++ set_uuid=self.can_set_uuid(), ++ nodiscard=self.can_nodiscard()) + except FSWriteLabelError as e: + log.warning("Choosing not to apply label (%s) during creation of filesystem %s. Label format is unacceptable for this filesystem.", self.label, self.type) + except FSWriteUUIDError as e: +diff --git a/blivet/tasks/fsmkfs.py b/blivet/tasks/fsmkfs.py +index ad166aa0..c982f7e7 100644 +--- a/blivet/tasks/fsmkfs.py ++++ b/blivet/tasks/fsmkfs.py +@@ -37,6 +37,7 @@ class FSMkfsTask(fstask.FSTask): + + can_label = abc.abstractproperty(doc="whether this task labels") + can_set_uuid = abc.abstractproperty(doc="whether this task can set UUID") ++ can_nodiscard = abc.abstractproperty(doc="whether this task can set nodiscard option") + + + @add_metaclass(abc.ABCMeta) +@@ -48,6 +49,9 @@ class FSMkfs(task.BasicApplication, FSMkfsTask): + label_option = abc.abstractproperty( + doc="Option for setting a filesystem label.") + ++ nodiscard_option = abc.abstractproperty( ++ doc="Option for setting nodiscrad option for mkfs.") ++ + args = abc.abstractproperty(doc="options for creating filesystem") + + @abc.abstractmethod +@@ -80,6 +84,15 @@ def can_set_uuid(self): + """ + return self.get_uuid_args is not None + ++ @property ++ def can_nodiscard(self): ++ """Whether this task can set nodiscard option for a filesystem. ++ ++ :returns: True if nodiscard can be set ++ :rtype: bool ++ """ ++ return self.nodiscard_option is not None ++ + @property + def _label_options(self): + """ Any labeling options that a particular filesystem may use. +@@ -100,6 +113,23 @@ def _label_options(self): + else: + raise FSWriteLabelError("Choosing not to apply label (%s) during creation of filesystem %s. Label format is unacceptable for this filesystem." % (self.fs.label, self.fs.type)) + ++ @property ++ def _nodiscard_option(self): ++ """ Any nodiscard options that a particular filesystem may use. ++ ++ :returns: nodiscard options ++ :rtype: list of str ++ """ ++ # Do not know how to set nodiscard while formatting. ++ if self.nodiscard_option is None: ++ return [] ++ ++ # nodiscard option not requested ++ if not self.fs._mkfs_nodiscard: ++ return [] ++ ++ return self.nodiscard_option ++ + @property + def _uuid_options(self): + """Any UUID options that a particular filesystem may use. +@@ -119,7 +149,7 @@ def _uuid_options(self): + " is unacceptable for this filesystem." + % (self.fs.uuid, self.fs.type)) + +- def _format_options(self, options=None, label=False, set_uuid=False): ++ def _format_options(self, options=None, label=False, set_uuid=False, nodiscard=False): + """Get a list of format options to be used when creating the + filesystem. + +@@ -135,11 +165,12 @@ def _format_options(self, options=None, label=False, set_uuid=False): + + label_options = self._label_options if label else [] + uuid_options = self._uuid_options if set_uuid else [] ++ nodiscard_option = self._nodiscard_option if nodiscard else [] + create_options = shlex.split(self.fs.create_options or "") + return (options + self.args + label_options + uuid_options + +- create_options + [self.fs.device]) ++ nodiscard_option + create_options + [self.fs.device]) + +- def _mkfs_command(self, options, label, set_uuid): ++ def _mkfs_command(self, options, label, set_uuid, nodiscard): + """Return the command to make the filesystem. + + :param options: any special options +@@ -148,12 +179,14 @@ def _mkfs_command(self, options, label, set_uuid): + :type label: bool + :param set_uuid: whether to set an UUID + :type set_uuid: bool ++ :param nodiscard: whether to run mkfs with nodiscard option ++ :type nodiscard: bool + :returns: the mkfs command + :rtype: list of str + """ +- return [str(self.ext)] + self._format_options(options, label, set_uuid) ++ return [str(self.ext)] + self._format_options(options, label, set_uuid, nodiscard) + +- def do_task(self, options=None, label=False, set_uuid=False): ++ def do_task(self, options=None, label=False, set_uuid=False, nodiscard=False): + """Create the format on the device and label if possible and desired. + + :param options: any special options, may be None +@@ -168,7 +201,7 @@ def do_task(self, options=None, label=False, set_uuid=False): + raise FSError("\n".join(error_msgs)) + + options = options or [] +- cmd = self._mkfs_command(options, label, set_uuid) ++ cmd = self._mkfs_command(options, label, set_uuid, nodiscard) + try: + ret = util.run_program(cmd) + except OSError as e: +@@ -181,6 +214,7 @@ def do_task(self, options=None, label=False, set_uuid=False): + class BTRFSMkfs(FSMkfs): + ext = availability.MKFS_BTRFS_APP + label_option = None ++ nodiscard_option = ["--nodiscard"] + + def get_uuid_args(self, uuid): + return ["-U", uuid] +@@ -193,6 +227,7 @@ def args(self): + class Ext2FSMkfs(FSMkfs): + ext = availability.MKE2FS_APP + label_option = "-L" ++ nodiscard_option = ["-E", "nodiscard"] + + _opts = [] + +@@ -215,6 +250,7 @@ class Ext4FSMkfs(Ext3FSMkfs): + class FATFSMkfs(FSMkfs): + ext = availability.MKDOSFS_APP + label_option = "-n" ++ nodiscard_option = None + + def get_uuid_args(self, uuid): + return ["-i", uuid.replace('-', '')] +@@ -227,6 +263,7 @@ def args(self): + class GFS2Mkfs(FSMkfs): + ext = availability.MKFS_GFS2_APP + label_option = None ++ nodiscard_option = None + get_uuid_args = None + + @property +@@ -237,6 +274,7 @@ def args(self): + class HFSMkfs(FSMkfs): + ext = availability.HFORMAT_APP + label_option = "-l" ++ nodiscard_option = None + get_uuid_args = None + + @property +@@ -247,6 +285,7 @@ def args(self): + class HFSPlusMkfs(FSMkfs): + ext = availability.MKFS_HFSPLUS_APP + label_option = "-v" ++ nodiscard_option = None + get_uuid_args = None + + @property +@@ -257,6 +296,7 @@ def args(self): + class JFSMkfs(FSMkfs): + ext = availability.MKFS_JFS_APP + label_option = "-L" ++ nodiscard_option = None + get_uuid_args = None + + @property +@@ -267,6 +307,7 @@ def args(self): + class NTFSMkfs(FSMkfs): + ext = availability.MKNTFS_APP + label_option = "-L" ++ nodiscard_option = None + get_uuid_args = None + + @property +@@ -277,6 +318,7 @@ def args(self): + class ReiserFSMkfs(FSMkfs): + ext = availability.MKREISERFS_APP + label_option = "-l" ++ nodiscard_option = None + + def get_uuid_args(self, uuid): + return ["-u", uuid] +@@ -289,6 +331,7 @@ def args(self): + class XFSMkfs(FSMkfs): + ext = availability.MKFS_XFS_APP + label_option = "-L" ++ nodiscard_option = ["-K"] + + def get_uuid_args(self, uuid): + return ["-m", "uuid=" + uuid] +@@ -307,3 +350,7 @@ def can_label(self): + @property + def can_set_uuid(self): + return False ++ ++ @property ++ def can_nodiscard(self): ++ return False +diff --git a/tests/formats_test/methods_test.py b/tests/formats_test/methods_test.py +index 710fa1c5..b2674ea7 100644 +--- a/tests/formats_test/methods_test.py ++++ b/tests/formats_test/methods_test.py +@@ -307,7 +307,8 @@ def _test_create_backend(self): + self.format._mkfs.do_task.assert_called_with( + options=None, + label=not self.format.relabels(), +- set_uuid=self.format.can_set_uuid() ++ set_uuid=self.format.can_set_uuid(), ++ nodiscard=self.format.can_nodiscard() + ) + + def _test_setup_backend(self): + +From ac04f74fa9bc8ded3facd302ca74ec033009a0bd Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Thu, 19 Nov 2020 13:19:21 +0100 +Subject: [PATCH 11/17] Add nodiscard option by default when creating VDO + logical volumes + +User can override this by passing "nodiscard=False" to the LV +constructor, but we want nodiscard by default. +--- + blivet/blivet.py | 8 +++++++- + blivet/devicefactory.py | 6 ++++++ + tests/devicefactory_test.py | 7 +++++++ + 3 files changed, 20 insertions(+), 1 deletion(-) + +diff --git a/blivet/blivet.py b/blivet/blivet.py +index 754eb152..e4115691 100644 +--- a/blivet/blivet.py ++++ b/blivet/blivet.py +@@ -613,9 +613,15 @@ def new_lv(self, *args, **kwargs): + + mountpoint = kwargs.pop("mountpoint", None) + if 'fmt_type' in kwargs: ++ fmt_args = kwargs.pop("fmt_args", {}) ++ if vdo_lv and "nodiscard" not in fmt_args.keys(): ++ # we don't want to run discard on VDO LV during mkfs so if user don't ++ # tell us not to do it, we should add the nodiscard option to mkfs ++ fmt_args["nodiscard"] = True ++ + kwargs["fmt"] = get_format(kwargs.pop("fmt_type"), + mountpoint=mountpoint, +- **kwargs.pop("fmt_args", {})) ++ **fmt_args) + + name = kwargs.pop("name", None) + if name: +diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py +index c95037cc..085f2fd6 100644 +--- a/blivet/devicefactory.py ++++ b/blivet/devicefactory.py +@@ -1811,6 +1811,12 @@ def _reconfigure_device(self): + self.device.pool.compression = self.compression + self.device.pool.deduplication = self.deduplication + ++ def _set_format(self): ++ super(LVMVDOFactory, self)._set_format() ++ ++ # preserve nodiscard mkfs option after changing filesystem ++ self.device.format._mkfs_nodiscard = True ++ + # + # methods to configure the factory's device + # +diff --git a/tests/devicefactory_test.py b/tests/devicefactory_test.py +index 7cdb51c5..4de1e05b 100644 +--- a/tests/devicefactory_test.py ++++ b/tests/devicefactory_test.py +@@ -571,6 +571,10 @@ def _validate_factory_device(self, *args, **kwargs): + if pool_name: + self.assertEqual(vdolv.pool.lvname, pool_name) + ++ # nodiscard should be always set for VDO LV format ++ if vdolv.format.type: ++ self.assertTrue(vdolv.format._mkfs_nodiscard) ++ + return device + + @patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True) +@@ -633,6 +637,9 @@ def test_device_factory(self, *args): # pylint: disable=unused-argument,argumen + device = self._factory_device(device_type, **kwargs) + self._validate_factory_device(device, device_type, **kwargs) + ++ # change fstype ++ kwargs["fstype"] = "xfs" ++ + + class MDFactoryTestCase(DeviceFactoryTestCase): + device_type = devicefactory.DEVICE_TYPE_MD + +From 43f25ce84729c321d1ff2bbba2f50489f6d736b4 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Thu, 19 Nov 2020 13:31:40 +0100 +Subject: [PATCH 12/17] Add LVM VDO example + +--- + examples/lvm_vdo.py | 61 +++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 61 insertions(+) + create mode 100644 examples/lvm_vdo.py + +diff --git a/examples/lvm_vdo.py b/examples/lvm_vdo.py +new file mode 100644 +index 00000000..ad081642 +--- /dev/null ++++ b/examples/lvm_vdo.py +@@ -0,0 +1,61 @@ ++import os ++ ++import blivet ++from blivet.size import Size ++from blivet.util import set_up_logging, create_sparse_tempfile ++ ++set_up_logging() ++b = blivet.Blivet() # create an instance of Blivet (don't add system devices) ++ ++# create a disk image file on which to create new devices ++disk1_file = create_sparse_tempfile("disk1", Size("100GiB")) ++b.disk_images["disk1"] = disk1_file ++disk2_file = create_sparse_tempfile("disk2", Size("100GiB")) ++b.disk_images["disk2"] = disk2_file ++ ++b.reset() ++ ++try: ++ disk1 = b.devicetree.get_device_by_name("disk1") ++ disk2 = b.devicetree.get_device_by_name("disk2") ++ ++ b.initialize_disk(disk1) ++ b.initialize_disk(disk2) ++ ++ pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1]) ++ b.create_device(pv) ++ pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2]) ++ b.create_device(pv2) ++ ++ # allocate the partitions (decide where and on which disks they'll reside) ++ blivet.partitioning.do_partitioning(b) ++ ++ vg = b.new_vg(parents=[pv, pv2]) ++ b.create_device(vg) ++ ++ # create 80 GiB VDO pool ++ # there can be only one VDO LV on the pool and these are created together ++ # with one LVM call, we have 2 separate devices because there are two block ++ # devices in the end and it allows to control the different "physical" size of ++ # the pool and "logical" size of the VDO LV (which is usually bigger, accounting ++ # for the saved space with deduplication and/or compression) ++ pool = b.new_lv(size=Size("80GiB"), parents=[vg], name="vdopool", vdo_pool=True, ++ deduplication=True, compression=True) ++ b.create_device(pool) ++ ++ # create the VDO LV with 400 GiB "virtual size" and ext4 filesystem on the VDO ++ # pool ++ lv = b.new_lv(size=Size("400GiB"), parents=[pool], name="vdolv", vdo_lv=True, ++ fmt_type="ext4") ++ b.create_device(lv) ++ ++ print(b.devicetree) ++ ++ # write the new partitions to disk and format them as specified ++ b.do_it() ++ print(b.devicetree) ++ input("Check the state and hit ENTER to trigger cleanup") ++finally: ++ b.devicetree.teardown_disk_images() ++ os.unlink(disk1_file) ++ os.unlink(disk2_file) + +From c487a1e6023b54f5beea8d99ba2f5da5d80590ee Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Wed, 25 Nov 2020 13:30:15 +0100 +Subject: [PATCH 13/17] Add LVM VDO documentation + +--- + doc/lvmvdo.rst | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 86 insertions(+) + create mode 100644 doc/lvmvdo.rst + +diff --git a/doc/lvmvdo.rst b/doc/lvmvdo.rst +new file mode 100644 +index 00000000..3965abd3 +--- /dev/null ++++ b/doc/lvmvdo.rst +@@ -0,0 +1,86 @@ ++LVM VDO support ++=============== ++ ++Support for creating LVM VDO devices has been added in Blivet 3.4. ++ ++These devices are similar to LVM thinly provisioned volumes, but there are some special steps ++and limitations when creating these devices which this document describes. ++ ++LVM VDO in Blivet ++----------------- ++ ++LVM VDO devices are represented by two ``LVMLogicalVolumeDevice`` devices: ++ ++- VDO Pool logical volume with type 'lvmvdopool' ++- VDO logical volume with type 'lvmvdolv' which is the child of the VDO Pool device ++ ++Existing LVM VDO setup in Blivet: ++ ++ existing 20 GiB disk vdb (265) with existing msdos disklabel ++ existing 20 GiB partition vdb1 (275) with existing lvmpv ++ existing 20 GiB lvmvg data (284) ++ existing 10 GiB lvmvdopool data-vdopool (288) ++ existing 50 GiB lvmvdolv data-vdolv (295) ++ ++When creating LVM VDO setup using Blivet these two devices must be created together as these ++are created by a single LVM command. ++ ++It currently isn't possible to create additional VDO logical volumes in the pool. It is however ++possible to create multiple VDO pools in a single volume group. ++ ++Deduplication and compression are properties of the VDO pool. Size specified for the VDO pool ++volume will be used as the "physical" size for the pool and size specified for the VDO logical volume ++will be used as the "virtual" size for the VDO volume. ++ ++When creating format, it must be created on the VDO logical volume. For filesystems with discard ++support, no discard option will be automatically added when calling the ``mkfs`` command ++(e.g. ``-K`` for ``mkfs.xfs``). ++ ++Example for creating a *80 GiB* VDO pool with *400 GiB* VDO logical volume with an *ext4* format with ++both deduplication and compression enabled: ++ ++ pool = b.new_lv(size=Size("80GiB"), parents=[vg], name="vdopool", vdo_pool=True, ++ deduplication=True, compression=True) ++ b.create_device(pool) ++ ++ lv = b.new_lv(size=Size("400GiB"), parents=[pool], name="vdolv", vdo_lv=True, ++ fmt_type="ext4") ++ b.create_device(lv) ++ ++When removing existing LVM VDO devices, both devices must be removed from the devicetree and the VDO ++logical volume must be removed first (``recursive_remove`` can be used to automate these two steps). ++ ++Managing of existing LVM VDO devices is currently not supported. ++ ++ ++LVM VDO in Devicefactory ++------------------------ ++ ++For the top-down specified creation using device factories a new ``LVMVDOFactory`` factory has been ++added. Factory device in this case is the VDO logical volume and is again automatically created ++together with the VDO pool. ++ ++Example of creating a new LVM VDO setup using the ``devicefactory`` module: ++ ++ factory = blivet.devicefactory.LVMVDOFactory(b, size=Size("5 GiB"), virtual_size=Size("50 GiB"), ++ disks=disks, fstype="xfs", ++ container_name="data", ++ pool_name="myvdopool", ++ compression=True, deduplication=True) ++ factory.configure() ++ factory.device ++ ++ LVMLogicalVolumeDevice instance (0x7f14d17422b0) -- ++ name = data-00 status = False id = 528 ++ children = [] ++ parents = ['non-existent 5 GiB lvmvdopool data-myvdopool (519)'] ++ ... ++ ++``size`` in this case sets the pool (physical) size, the VDO logical volume size can be specified ++with ``virtual_size`` (if not specified it will be same as the pool size). Name for the VDO volume ++can be specified using the ``name`` keyword argument. ``pool_name`` argument is optional and ++a unique name will be generated if omitted. Both ``compression`` and ``deduplication`` default to ++``True`` (enabled) if not specified. ++ ++This factory can create only a single VDO logical volume in a single VDO pool but additional VDO pools ++can be added by repeating the steps to create the first one. + +From c6c776cf137b5c6ae454487df469e9a6dba8a5d1 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Wed, 9 Dec 2020 14:06:27 +0100 +Subject: [PATCH 14/17] Set minimum size for LVM VDO pool devices + +--- + blivet/devicefactory.py | 3 +++ + blivet/devices/lvm.py | 26 ++++++++++++++++++++++++++ + tests/devicefactory_test.py | 29 ++++++++++++++++++++--------- + tests/devices_test/lvm_test.py | 6 ++++++ + 4 files changed, 55 insertions(+), 9 deletions(-) + +diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py +index 085f2fd6..5e47eb9a 100644 +--- a/blivet/devicefactory.py ++++ b/blivet/devicefactory.py +@@ -277,6 +277,7 @@ class DeviceFactory(object): + "container_size": SIZE_POLICY_AUTO, + "container_raid_level": None, + "container_encrypted": None} ++ _device_min_size = Size(0) # no limit by default, limited only by filesystem size + + def __init__(self, storage, **kwargs): + """ +@@ -1760,6 +1761,8 @@ class LVMVDOFactory(LVMFactory): + :type deduplication: bool + """ + ++ _device_min_size = LVMVDOPoolMixin._min_size ++ + def __init__(self, storage, **kwargs): + self.pool_name = kwargs.pop("pool_name", None) + self.virtual_size = kwargs.pop("virtual_size", None) +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index 0802e2de..785fa2d2 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -1792,6 +1792,7 @@ def populate_ksdata(self, data): + class LVMVDOPoolMixin(object): + + _external_dependencies = [availability.BLOCKDEV_LVM_PLUGIN, availability.BLOCKDEV_LVM_PLUGIN_VDO] ++ _min_size = Size("5 GiB") # 2.5 GiB for index and one 2 GiB slab rounded up to 5 GiB + + def __init__(self, compression=True, deduplication=True, index_memory=0, write_policy=None): + self.compression = compression +@@ -1800,6 +1801,9 @@ def __init__(self, compression=True, deduplication=True, index_memory=0, write_p + self.write_policy = write_policy + self._lvs = [] + ++ if not self.exists and self.size < self.min_size: ++ raise ValueError("Requested size %s is smaller than minimum %s" % (self.size, self.min_size)) ++ + @property + def is_vdo_pool(self): + return self.seg_type == "vdo-pool" +@@ -1856,6 +1860,23 @@ def direct(self): + """ Is this device directly accessible? """ + return False + ++ @property ++ @util.requires_property("is_vdo_pool") ++ def min_size(self): ++ if self.exists: ++ return self.current_size ++ ++ return self._min_size ++ ++ def _set_size(self, newsize): ++ if not isinstance(newsize, Size): ++ raise AttributeError("new size must of type Size") ++ ++ if newsize < self.min_size: ++ raise ValueError("Requested size %s is smaller than minimum %s" % (newsize, self.min_size)) ++ ++ DMDevice._set_size(self, newsize) ++ + def read_current_size(self): + log_method_call(self, exists=self.exists, path=self.path, + sysfs_path=self.sysfs_path) +@@ -2229,6 +2250,11 @@ def max_size(self): + max_format = self.format.max_size + return min(max_lv, max_format) if max_format else max_lv + ++ @property ++ @type_specific ++ def min_size(self): ++ return super(LVMLogicalVolumeDevice, self).min_size ++ + @property + @type_specific + def vg_space_used(self): +diff --git a/tests/devicefactory_test.py b/tests/devicefactory_test.py +index 4de1e05b..a1334cda 100644 +--- a/tests/devicefactory_test.py ++++ b/tests/devicefactory_test.py +@@ -49,13 +49,18 @@ class DeviceFactoryTestCase(unittest.TestCase): + encryption_supported = True + """ whether encryption of this device type is supported by blivet """ + ++ factory_class = None ++ """ devicefactory class used in this test case """ ++ ++ _disk_size = Size("2 GiB") ++ + def setUp(self): + if self.device_type is None: + raise unittest.SkipTest("abstract base class") + + self.b = blivet.Blivet() # don't populate it +- self.disk_files = [create_sparse_tempfile("factorytest", Size("2 GiB")), +- create_sparse_tempfile("factorytest", Size("2 GiB"))] ++ self.disk_files = [create_sparse_tempfile("factorytest", self._disk_size), ++ create_sparse_tempfile("factorytest", self._disk_size)] + for filename in self.disk_files: + disk = DiskFile(filename) + self.b.devicetree._add_device(disk) +@@ -197,7 +202,7 @@ def _get_size_delta(self, devices=None): + def test_get_free_disk_space(self, *args): # pylint: disable=unused-argument + # get_free_disk_space should return the total free space on disks + kwargs = self._get_test_factory_args() +- kwargs["size"] = Size("500 MiB") ++ kwargs["size"] = max(Size("500 MiB"), self.factory_class._device_min_size) + factory = devicefactory.get_device_factory(self.b, + self.device_type, + disks=self.b.disks, +@@ -285,7 +290,7 @@ def test_factory_defaults(self, *args): # pylint: disable=unused-argument + kwargs = self._get_test_factory_args() + kwargs.update({"disks": self.b.disks[:], + "fstype": "swap", +- "size": Size("2GiB"), ++ "size": max(Size("2GiB"), self.factory_class._device_min_size), + "label": "SWAP"}) + device = self._factory_device(self.device_type, **kwargs) + factory = devicefactory.get_device_factory(self.b, self.device_type, +@@ -302,6 +307,7 @@ def test_factory_defaults(self, *args): # pylint: disable=unused-argument + class PartitionFactoryTestCase(DeviceFactoryTestCase): + device_class = PartitionDevice + device_type = devicefactory.DEVICE_TYPE_PARTITION ++ factory_class = devicefactory.PartitionFactory + + def test_bug1178884(self): + # Test a change of format and size where old size is too large for the +@@ -330,6 +336,7 @@ def _get_size_delta(self, devices=None): + class LVMFactoryTestCase(DeviceFactoryTestCase): + device_class = LVMLogicalVolumeDevice + device_type = devicefactory.DEVICE_TYPE_LVM ++ factory_class = devicefactory.LVMFactory + + def _validate_factory_device(self, *args, **kwargs): + super(LVMFactoryTestCase, self)._validate_factory_device(*args, **kwargs) +@@ -510,6 +517,7 @@ class LVMThinPFactoryTestCase(LVMFactoryTestCase): + device_class = LVMLogicalVolumeDevice + device_type = devicefactory.DEVICE_TYPE_LVM_THINP + encryption_supported = False ++ factory_class = devicefactory.LVMThinPFactory + + def _validate_factory_device(self, *args, **kwargs): + super(LVMThinPFactoryTestCase, self)._validate_factory_device(*args, +@@ -541,6 +549,8 @@ class LVMVDOFactoryTestCase(LVMFactoryTestCase): + device_class = LVMLogicalVolumeDevice + device_type = devicefactory.DEVICE_TYPE_LVM_VDO + encryption_supported = False ++ _disk_size = Size("10 GiB") # we need bigger disks for VDO ++ factory_class = devicefactory.LVMVDOFactory + + def _validate_factory_device(self, *args, **kwargs): + super(LVMVDOFactoryTestCase, self)._validate_factory_device(*args, +@@ -585,7 +595,7 @@ def _validate_factory_device(self, *args, **kwargs): + def test_device_factory(self, *args): # pylint: disable=unused-argument,arguments-differ + device_type = self.device_type + kwargs = {"disks": self.b.disks, +- "size": Size("400 MiB"), ++ "size": Size("6 GiB"), + "fstype": 'ext4', + "mountpoint": '/factorytest'} + device = self._factory_device(device_type, **kwargs) +@@ -593,7 +603,7 @@ def test_device_factory(self, *args): # pylint: disable=unused-argument,argumen + self.b.recursive_remove(device.pool) + + kwargs = {"disks": self.b.disks, +- "size": Size("400 MiB"), ++ "size": Size("6 GiB"), + "fstype": 'ext4', + "mountpoint": '/factorytest', + "pool_name": "vdopool", +@@ -603,19 +613,19 @@ def test_device_factory(self, *args): # pylint: disable=unused-argument,argumen + self._validate_factory_device(device, device_type, **kwargs) + + # change size without specifying virtual_size: both sizes should grow +- kwargs["size"] = Size("600 MiB") ++ kwargs["size"] = Size("8 GiB") + kwargs["device"] = device + device = self._factory_device(device_type, **kwargs) + self._validate_factory_device(device, device_type, **kwargs) + + # change virtual size +- kwargs["virtual_size"] = Size("6 GiB") ++ kwargs["virtual_size"] = Size("40 GiB") + kwargs["device"] = device + device = self._factory_device(device_type, **kwargs) + self._validate_factory_device(device, device_type, **kwargs) + + # change virtual size to smaller than size +- kwargs["virtual_size"] = Size("500 GiB") ++ kwargs["virtual_size"] = Size("10 GiB") + kwargs["device"] = device + device = self._factory_device(device_type, **kwargs) + self._validate_factory_device(device, device_type, **kwargs) +@@ -644,6 +654,7 @@ def test_device_factory(self, *args): # pylint: disable=unused-argument,argumen + class MDFactoryTestCase(DeviceFactoryTestCase): + device_type = devicefactory.DEVICE_TYPE_MD + device_class = MDRaidArrayDevice ++ factory_class = devicefactory.MDFactory + + def test_device_factory(self): + # RAID0 across two disks +diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py +index 493d3ba1..78b140ba 100644 +--- a/tests/devices_test/lvm_test.py ++++ b/tests/devices_test/lvm_test.py +@@ -705,6 +705,12 @@ def test_new_vdo_pool(self): + + self.assertEqual(vg.size, Size("10236 MiB")) + ++ with self.assertRaises(ValueError): ++ vdopool = b.new_lv(name="vdopool", vdo_pool=True, ++ parents=[vg], compression=True, ++ deduplication=True, ++ size=blivet.size.Size("1 GiB")) ++ + vdopool = b.new_lv(name="vdopool", vdo_pool=True, + parents=[vg], compression=True, + deduplication=True, + +From 197f2877709e702c101ada6b9a055a88f09320c8 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 11 Dec 2020 14:20:48 +0100 +Subject: [PATCH 15/17] Use better description for libblockdev plugins in + tasks.availability + +The old names were quite confusing when showing that "lvm" is +missing when in fact libblockdev LVM plugin is missing. Also with +LVM VDO we need to be able to tell the difference between missing +LVM plugin and missing LVM VDO support. +--- + blivet/tasks/availability.py | 26 +++++++++++++------------- + 1 file changed, 13 insertions(+), 13 deletions(-) + +diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py +index b107428e..52418685 100644 +--- a/blivet/tasks/availability.py ++++ b/blivet/tasks/availability.py +@@ -236,13 +236,13 @@ def availability_errors(self, resource): + :returns: [] if the name of the plugin is loaded + :rtype: list of str + """ +- if resource.name not in blockdev.get_available_plugin_names(): # pylint: disable=no-value-for-parameter +- return ["libblockdev plugin %s not loaded" % resource.name] ++ if self._tech_info.plugin_name not in blockdev.get_available_plugin_names(): # pylint: disable=no-value-for-parameter ++ return ["libblockdev plugin %s not loaded" % self._tech_info.plugin_name] + else: + tech_missing = self._check_technologies() + if tech_missing: + return ["libblockdev plugin %s is loaded but some required " +- "technologies are not available:\n%s" % (resource.name, tech_missing)] ++ "technologies are not available:\n%s" % (self._tech_info.plugin_name, tech_missing)] + else: + return [] + +@@ -411,16 +411,16 @@ def available_resource(name): + # we can't just check if the plugin is loaded, we also need to make sure + # that all technologies required by us our supported (some may be missing + # due to missing dependencies) +-BLOCKDEV_BTRFS_PLUGIN = blockdev_plugin("btrfs", BLOCKDEV_BTRFS_TECH) +-BLOCKDEV_CRYPTO_PLUGIN = blockdev_plugin("crypto", BLOCKDEV_CRYPTO_TECH) +-BLOCKDEV_DM_PLUGIN = blockdev_plugin("dm", BLOCKDEV_DM_TECH) +-BLOCKDEV_DM_PLUGIN_RAID = blockdev_plugin("dm", BLOCKDEV_DM_TECH_RAID) +-BLOCKDEV_LOOP_PLUGIN = blockdev_plugin("loop", BLOCKDEV_LOOP_TECH) +-BLOCKDEV_LVM_PLUGIN = blockdev_plugin("lvm", BLOCKDEV_LVM_TECH) +-BLOCKDEV_LVM_PLUGIN_VDO = blockdev_plugin("lvm", BLOCKDEV_LVM_TECH_VDO) +-BLOCKDEV_MDRAID_PLUGIN = blockdev_plugin("mdraid", BLOCKDEV_MD_TECH) +-BLOCKDEV_MPATH_PLUGIN = blockdev_plugin("mpath", BLOCKDEV_MPATH_TECH) +-BLOCKDEV_SWAP_PLUGIN = blockdev_plugin("swap", BLOCKDEV_SWAP_TECH) ++BLOCKDEV_BTRFS_PLUGIN = blockdev_plugin("libblockdev btrfs plugin", BLOCKDEV_BTRFS_TECH) ++BLOCKDEV_CRYPTO_PLUGIN = blockdev_plugin("libblockdev crypto plugin", BLOCKDEV_CRYPTO_TECH) ++BLOCKDEV_DM_PLUGIN = blockdev_plugin("libblockdev dm plugin", BLOCKDEV_DM_TECH) ++BLOCKDEV_DM_PLUGIN_RAID = blockdev_plugin("libblockdev dm plugin (raid technology)", BLOCKDEV_DM_TECH_RAID) ++BLOCKDEV_LOOP_PLUGIN = blockdev_plugin("libblockdev loop plugin", BLOCKDEV_LOOP_TECH) ++BLOCKDEV_LVM_PLUGIN = blockdev_plugin("libblockdev lvm plugin", BLOCKDEV_LVM_TECH) ++BLOCKDEV_LVM_PLUGIN_VDO = blockdev_plugin("libblockdev lvm plugin (vdo technology)", BLOCKDEV_LVM_TECH_VDO) ++BLOCKDEV_MDRAID_PLUGIN = blockdev_plugin("libblockdev mdraid plugin", BLOCKDEV_MD_TECH) ++BLOCKDEV_MPATH_PLUGIN = blockdev_plugin("libblockdev mpath plugin", BLOCKDEV_MPATH_TECH) ++BLOCKDEV_SWAP_PLUGIN = blockdev_plugin("libblockdev swap plugin", BLOCKDEV_SWAP_TECH) + + # applications with versions + # we need e2fsprogs newer than 1.41 and we are checking the version by running + +From 5fc047b48b0de18fa249f102d2a7163ac2d6e6a6 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 11 Dec 2020 14:24:18 +0100 +Subject: [PATCH 16/17] Fix external dependencies for LVM VDO devices + +The external and unavailable dependencies code is mostly supposed +to work with just class objects and not instances, which is problem +for LVM devices where the LVMLogicalVolumeDevice can't depend on +LVM VDO and special LVM VDO device mixin classes don't inherit +from the Device class so they are missing some availability +functions. +This fix adds the neccessary functions to LVM VDO mixin classes to +make sure both "unavailable_type_dependencies" and +"type_external_dependencies" work with LVMVDOLogicalVolumeMixin +and LVMVDOPoolMixin. When working with an LVMLogicalVolumeDevice +instance its dependencies are correctly set based on type of the +logical volume. +--- + blivet/devicefactory.py | 7 +-- + blivet/devices/lvm.py | 31 ++++++++++ + tests/action_test.py | 7 +++ + tests/devicefactory_test.py | 32 ++++++++++ + tests/devices_test/lvm_test.py | 106 +++++++++++++++++++++++++++++++++ + 5 files changed, 179 insertions(+), 4 deletions(-) + +diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py +index 5e47eb9a..b29a107a 100644 +--- a/blivet/devicefactory.py ++++ b/blivet/devicefactory.py +@@ -27,7 +27,7 @@ + from .devices import BTRFSDevice, DiskDevice + from .devices import LUKSDevice, LVMLogicalVolumeDevice + from .devices import PartitionDevice, MDRaidArrayDevice +-from .devices.lvm import LVMVDOPoolMixin, DEFAULT_THPOOL_RESERVE ++from .devices.lvm import LVMVDOPoolMixin, LVMVDOLogicalVolumeMixin, DEFAULT_THPOOL_RESERVE + from .formats import get_format + from .devicelibs import btrfs + from .devicelibs import mdraid +@@ -70,9 +70,6 @@ def is_supported_device_type(device_type): + :returns: True if this device type is supported + :rtype: bool + """ +- if device_type == DEVICE_TYPE_LVM_VDO: +- return not any(e for e in LVMVDOPoolMixin._external_dependencies if not e.available) +- + devices = [] + if device_type == DEVICE_TYPE_BTRFS: + devices = [BTRFSDevice] +@@ -84,6 +81,8 @@ def is_supported_device_type(device_type): + devices = [PartitionDevice] + elif device_type == DEVICE_TYPE_MD: + devices = [MDRaidArrayDevice] ++ elif device_type == DEVICE_TYPE_LVM_VDO: ++ devices = [LVMLogicalVolumeDevice, LVMVDOPoolMixin, LVMVDOLogicalVolumeMixin] + + return not any(c.unavailable_type_dependencies() for c in devices) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index 785fa2d2..ac900bf3 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -1804,6 +1804,17 @@ def __init__(self, compression=True, deduplication=True, index_memory=0, write_p + if not self.exists and self.size < self.min_size: + raise ValueError("Requested size %s is smaller than minimum %s" % (self.size, self.min_size)) + ++ # these two methods are defined in Device but LVMVDOPoolMixin doesn't inherit from ++ # it and we can't have this code in LVMLogicalVolumeDevice because we need to be able ++ # to get dependencies without creating instance of the class ++ @classmethod ++ def type_external_dependencies(cls): ++ return set(d for d in cls._external_dependencies) | LVMLogicalVolumeDevice.type_external_dependencies() ++ ++ @classmethod ++ def unavailable_type_dependencies(cls): ++ return set(e for e in cls.type_external_dependencies() if not e.available) ++ + @property + def is_vdo_pool(self): + return self.seg_type == "vdo-pool" +@@ -1926,6 +1937,17 @@ def _check_parents(self): + if not container or not isinstance(container, LVMLogicalVolumeDevice) or not container.is_vdo_pool: + raise ValueError("constructor requires a vdo-pool LV") + ++ # these two methods are defined in Device but LVMVDOLogicalVolumeMixin doesn't inherit ++ # from it and we can't have this code in LVMLogicalVolumeDevice because we need to be ++ # able to get dependencies without creating instance of the class ++ @classmethod ++ def type_external_dependencies(cls): ++ return set(d for d in cls._external_dependencies) | LVMLogicalVolumeDevice.type_external_dependencies() ++ ++ @classmethod ++ def unavailable_type_dependencies(cls): ++ return set(e for e in cls.type_external_dependencies() if not e.available) ++ + @property + def vg_space_used(self): + return Size(0) # the pool's size is already accounted for in the vg +@@ -2217,6 +2239,15 @@ def _convert_from_lvs(self): + """Convert the LVs to create this LV from into its internal LVs""" + raise ValueError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type) + ++ @property ++ def external_dependencies(self): ++ deps = super(LVMLogicalVolumeBase, self).external_dependencies ++ if self.is_vdo_pool: ++ deps.update(LVMVDOPoolMixin.type_external_dependencies()) ++ if self.is_vdo_lv: ++ deps.update(LVMVDOLogicalVolumeMixin.type_external_dependencies()) ++ return deps ++ + @property + @type_specific + def vg(self): +diff --git a/tests/action_test.py b/tests/action_test.py +index 77176f46..38a2e872 100644 +--- a/tests/action_test.py ++++ b/tests/action_test.py +@@ -18,6 +18,8 @@ + from blivet.devices import MDRaidArrayDevice + from blivet.devices import LVMVolumeGroupDevice + from blivet.devices import LVMLogicalVolumeDevice ++from blivet.devices.lvm import LVMVDOPoolMixin ++from blivet.devices.lvm import LVMVDOLogicalVolumeMixin + + # format classes + from blivet.formats.fs import Ext2FS +@@ -1252,6 +1254,11 @@ def test_lv_from_lvs_actions(self): + self.assertEqual(set(self.storage.lvs), {pool}) + self.assertEqual(set(pool._internal_lvs), {lv1, lv2}) + ++ ++@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES + [LVMVDOPoolMixin, LVMVDOLogicalVolumeMixin]), "some unsupported device classes required for this test") ++@unittest.skipUnless(all(x().utils_available for x in FORMAT_CLASSES), "some unsupported format classes required for this test") ++class DeviceActionLVMVDOTestCase(DeviceActionTestCase): ++ + def test_lvm_vdo_destroy(self): + self.destroy_all_devices() + sdc = self.storage.devicetree.get_device_by_name("sdc") +diff --git a/tests/devicefactory_test.py b/tests/devicefactory_test.py +index a1334cda..e4210ead 100644 +--- a/tests/devicefactory_test.py ++++ b/tests/devicefactory_test.py +@@ -592,6 +592,8 @@ def _validate_factory_device(self, *args, **kwargs): + @patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[]) + @patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set()) + @patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMVDOPoolMixin.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin.type_external_dependencies", return_value=set()) + def test_device_factory(self, *args): # pylint: disable=unused-argument,arguments-differ + device_type = self.device_type + kwargs = {"disks": self.b.disks, +@@ -650,6 +652,36 @@ def test_device_factory(self, *args): # pylint: disable=unused-argument,argumen + # change fstype + kwargs["fstype"] = "xfs" + ++ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True) ++ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.destroyable", return_value=True) ++ @patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[]) ++ @patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMVDOPoolMixin.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin.type_external_dependencies", return_value=set()) ++ def test_factory_defaults(self, *args): # pylint: disable=unused-argument ++ super(LVMVDOFactoryTestCase, self).test_factory_defaults() ++ ++ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True) ++ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.destroyable", return_value=True) ++ @patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[]) ++ @patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMVDOPoolMixin.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin.type_external_dependencies", return_value=set()) ++ def test_get_free_disk_space(self, *args): ++ super(LVMVDOFactoryTestCase, self).test_get_free_disk_space() ++ ++ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True) ++ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.destroyable", return_value=True) ++ @patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[]) ++ @patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMVDOPoolMixin.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin.type_external_dependencies", return_value=set()) ++ def test_normalize_size(self, *args): # pylint: disable=unused-argument ++ super(LVMVDOFactoryTestCase, self).test_normalize_size() ++ + + class MDFactoryTestCase(DeviceFactoryTestCase): + device_type = devicefactory.DEVICE_TYPE_MD +diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py +index 78b140ba..d938144d 100644 +--- a/tests/devices_test/lvm_test.py ++++ b/tests/devices_test/lvm_test.py +@@ -10,10 +10,13 @@ + from blivet.devices import StorageDevice + from blivet.devices import LVMLogicalVolumeDevice + from blivet.devices import LVMVolumeGroupDevice ++from blivet.devices.lvm import LVMVDOPoolMixin ++from blivet.devices.lvm import LVMVDOLogicalVolumeMixin + from blivet.devices.lvm import LVMCacheRequest + from blivet.devices.lvm import LVPVSpec, LVMInternalLVtype + from blivet.size import Size + from blivet.devicelibs import raid ++from blivet import devicefactory + from blivet import errors + + DEVICE_CLASSES = [ +@@ -690,6 +693,10 @@ def test_new_lv_from_non_existing_lvs(self): + pool.create() + self.assertTrue(lvm.thpool_convert.called) + ++ ++@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES + [LVMVDOPoolMixin, LVMVDOLogicalVolumeMixin]), "some unsupported device classes required for this test") ++class BlivetNewLVMVDODeviceTest(unittest.TestCase): ++ + def test_new_vdo_pool(self): + b = blivet.Blivet() + pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), +@@ -726,3 +733,102 @@ def test_new_vdo_pool(self): + self.assertEqual(vdopool.children[0], vdolv) + self.assertEqual(vdolv.parents[0], vdopool) + self.assertListEqual(vg.lvs, [vdopool, vdolv]) ++ ++ ++@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test") ++class BlivetLVMVDODependenciesTest(unittest.TestCase): ++ def test_vdo_dependencies(self): ++ blivet.tasks.availability.CACHE_AVAILABILITY = False ++ ++ b = blivet.Blivet() ++ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), ++ size=Size("10 GiB"), exists=True) ++ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True) ++ ++ for dev in (pv, vg): ++ b.devicetree._add_device(dev) ++ ++ # check that all the above devices are in the expected places ++ self.assertEqual(set(b.devices), {pv, vg}) ++ self.assertEqual(set(b.vgs), {vg}) ++ ++ self.assertEqual(vg.size, Size("10236 MiB")) ++ ++ vdopool = b.new_lv(name="vdopool", vdo_pool=True, ++ parents=[vg], compression=True, ++ deduplication=True, ++ size=blivet.size.Size("8 GiB")) ++ ++ vdolv = b.new_lv(name="vdolv", vdo_lv=True, ++ parents=[vdopool], ++ size=blivet.size.Size("40 GiB")) ++ ++ # Dependencies check: for VDO types these should be combination of "normal" ++ # LVM dependencies (LVM libblockdev plugin + kpartx and DM plugin from DMDevice) ++ # and LVM VDO technology from the LVM plugin ++ lvm_vdo_dependencies = ["kpartx", ++ "libblockdev dm plugin", ++ "libblockdev lvm plugin", ++ "libblockdev lvm plugin (vdo technology)"] ++ pool_deps = [d.name for d in vdopool.external_dependencies] ++ six.assertCountEqual(self, pool_deps, lvm_vdo_dependencies) ++ ++ vdolv_deps = [d.name for d in vdolv.external_dependencies] ++ six.assertCountEqual(self, vdolv_deps, lvm_vdo_dependencies) ++ ++ # same dependencies should be returned when checking with class not instance ++ pool_type_deps = [d.name for d in LVMVDOPoolMixin.type_external_dependencies()] ++ six.assertCountEqual(self, pool_type_deps, lvm_vdo_dependencies) ++ ++ vdolv_type_deps = [d.name for d in LVMVDOLogicalVolumeMixin.type_external_dependencies()] ++ six.assertCountEqual(self, vdolv_type_deps, lvm_vdo_dependencies) ++ ++ # just to be sure LVM VDO specific code didn't break "normal" LVs ++ normallv = b.new_lv(name="lvol0", ++ parents=[vg], ++ size=blivet.size.Size("1 GiB")) ++ ++ normalvl_deps = [d.name for d in normallv.external_dependencies] ++ six.assertCountEqual(self, normalvl_deps, ["kpartx", ++ "libblockdev dm plugin", ++ "libblockdev lvm plugin"]) ++ ++ with patch("blivet.devices.lvm.LVMVDOPoolMixin._external_dependencies", ++ new=[blivet.tasks.availability.unavailable_resource("VDO unavailability test")]): ++ with patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin._external_dependencies", ++ new=[blivet.tasks.availability.unavailable_resource("VDO unavailability test")]): ++ ++ pool_deps = [d.name for d in vdopool.unavailable_dependencies] ++ self.assertEqual(pool_deps, ["VDO unavailability test"]) ++ ++ vdolv_deps = [d.name for d in vdolv.unavailable_dependencies] ++ self.assertEqual(vdolv_deps, ["VDO unavailability test"]) ++ ++ # same dependencies should be returned when checking with class not instance ++ pool_type_deps = [d.name for d in LVMVDOPoolMixin.unavailable_type_dependencies()] ++ six.assertCountEqual(self, pool_type_deps, ["VDO unavailability test"]) ++ ++ vdolv_type_deps = [d.name for d in LVMVDOLogicalVolumeMixin.unavailable_type_dependencies()] ++ six.assertCountEqual(self, vdolv_type_deps, ["VDO unavailability test"]) ++ ++ normallv_deps = [d.name for d in normallv.unavailable_dependencies] ++ self.assertEqual(normallv_deps, []) ++ ++ with self.assertRaises(errors.DependencyError): ++ b.create_device(vdopool) ++ b.create_device(vdolv) ++ ++ b.create_device(normallv) ++ ++ def test_vdo_dependencies_devicefactory(self): ++ with patch("blivet.devices.lvm.LVMVDOPoolMixin._external_dependencies", ++ new=[blivet.tasks.availability.unavailable_resource("VDO unavailability test")]): ++ with patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin._external_dependencies", ++ new=[blivet.tasks.availability.unavailable_resource("VDO unavailability test")]): ++ ++ # shouldn't affect "normal" LVM ++ lvm_supported = devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM) ++ self.assertTrue(lvm_supported) ++ ++ vdo_supported = devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM_VDO) ++ self.assertFalse(vdo_supported) + +From c7fb125ec552ee5070f8180f92fe5545709192ff Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 11 Dec 2020 15:02:05 +0100 +Subject: [PATCH 17/17] Bump required libblockdev version to 2.24 + +LVM VDO support was added in 2.24. +--- + python-blivet.spec | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/python-blivet.spec b/python-blivet.spec +index ffd4210e..58cad0b2 100644 +--- a/python-blivet.spec ++++ b/python-blivet.spec +@@ -36,7 +36,7 @@ Source1: http://github.com/storaged-project/blivet/archive/%{realname}-%{realver + %global partedver 1.8.1 + %global pypartedver 3.10.4 + %global utillinuxver 2.15.1 +-%global libblockdevver 2.19 ++%global libblockdevver 2.24 + %global libbytesizever 0.3 + %global pyudevver 0.18 + diff --git a/SOURCES/0020-apply-directory-s-SELinux-context-to-freshly-created.patch b/SOURCES/0020-apply-directory-s-SELinux-context-to-freshly-created.patch new file mode 100644 index 0000000..bdae03b --- /dev/null +++ b/SOURCES/0020-apply-directory-s-SELinux-context-to-freshly-created.patch @@ -0,0 +1,83 @@ +From dabc5f8a29f34aebd9ea61405d822b5d5bef1ec2 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Renaud=20M=C3=A9trich?= +Date: Tue, 2 Mar 2021 14:13:29 +0100 +Subject: [PATCH] apply directory's SELinux context to freshly created mount + points +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Signed-off-by: Renaud Métrich + +Resolves: rhbz#1934076 +--- + blivet/formats/fs.py | 3 ++- + blivet/util.py | 9 +++++---- + 2 files changed, 7 insertions(+), 5 deletions(-) + +diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py +index e61e5b86..a92d3485 100644 +--- a/blivet/formats/fs.py ++++ b/blivet/formats/fs.py +@@ -27,6 +27,7 @@ import os + import tempfile + import uuid as uuid_mod + import random ++import stat + + from parted import fileSystemType, PARTITION_BOOT + +@@ -582,7 +583,7 @@ class FS(DeviceFormat): + mountpoint = kwargs.get("mountpoint") or self.mountpoint + + if self._selinux_supported and flags.selinux and "ro" not in self._mount.mount_options(options).split(",") and flags.selinux_reset_fcon: +- ret = util.reset_file_context(mountpoint, chroot) ++ ret = util.reset_file_context(mountpoint, chroot, stat.S_IFDIR) + if not ret: + log.warning("Failed to reset SElinux context for newly mounted filesystem root directory to default.") + +diff --git a/blivet/util.py b/blivet/util.py +index 48b7818f..f5e0cc1a 100644 +--- a/blivet/util.py ++++ b/blivet/util.py +@@ -448,11 +448,11 @@ def get_cow_sysfs_path(dev_path, dev_sysfsPath): + ## + + +-def match_path_context(path): ++def match_path_context(path, mode=0): + """ Return the default SELinux context for the given path. """ + context = None + try: +- context = selinux.matchpathcon(os.path.normpath(path), 0)[1] ++ context = selinux.matchpathcon(os.path.normpath(path), mode)[1] + except OSError as e: + log.info("failed to get default SELinux context for %s: %s", path, e) + +@@ -491,7 +491,7 @@ def set_file_context(path, context, root=None): + return rc + + +-def reset_file_context(path, root=None): ++def reset_file_context(path, root=None, mode=0): + """ Restore the SELinux context of a file to its default value. + + Arguments: +@@ -501,12 +501,13 @@ def reset_file_context(path, root=None): + Keyword Arguments: + + root an optional chroot string ++ mode an optional mode to use + + Return Value: + + If successful, returns the file's new/default context. + """ +- context = match_path_context(path) ++ context = match_path_context(path, mode) + if context: + if set_file_context(path, context, root=root): + return context +-- +2.29.2 + diff --git a/SPECS/python-blivet.spec b/SPECS/python-blivet.spec index da3276d..ed6240c 100644 --- a/SPECS/python-blivet.spec +++ b/SPECS/python-blivet.spec @@ -23,7 +23,7 @@ Version: 3.2.2 #%%global prerelease .b2 # prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2 -Release: 6%{?prerelease}%{?dist} +Release: 10%{?prerelease}%{?dist} Epoch: 1 License: LGPLv2+ Group: System Environment/Libraries @@ -42,6 +42,15 @@ Patch7: 0008-set-allowed-disk-labels-for-s390x-as-standard-ones-plus-dasd.patch Patch8: 0009-Do-not-use-BlockDev-utils_have_kernel_module-to-check-for-modules.patch Patch9: 0010-Fix-name-resolution-for-MD-devices-and-partitions-on.patch Patch10: 0011-Fix-ignoring-disk-devices-with-parents-or-children.patch +Patch11: 0012-xfs-grow-support.patch +Patch12: 0013-Do-not-limit-swap-to-128-GiB.patch +Patch13: 0014-Use-UnusableConfigurationError-for-patially-hidden-multipath-devices.patch +Patch14: 0015-Fix-possible-UnicodeDecodeError-when-reading-model-f.patch +Patch15: 0016-Basic-LVM-VDO-support.patch +Patch16: 0017-Let-parted-fix-fixable-issues-with-partition-table.patch +Patch17: 0018-Fix-possible-UnicodeDecodeError-when-reading-sysfs-a.patch +Patch18: 0019-LVM-VDO-support.patch +Patch19: 0020-apply-directory-s-SELinux-context-to-freshly-created.patch # Versions of required components (done so we make sure the buildrequires # match the requires versions of things). @@ -203,6 +212,32 @@ configuration. %endif %changelog +* Mon Mar 08 2021 Vojtech Trefny - 3.2.2-10 +- apply directory's SELinux context to freshly created mount points + Resolves: rhbz#1934076 + +* Tue Feb 9 2021 Vojtech Trefny - 3.2.2-9 +- LVM VDO support + Resolves: rhbz#1509337 + +* Mon Jan 11 2021 Vojtech Trefny - 3.2.2-8 +- Let parted fix fixable issues with partition table + Resolves: rhbz#1846869 +- Fix possible UnicodeDecodeError when reading sysfs attributes + Resolves: rhbz#1849326 + +* Wed Nov 18 2020 Vojtech Trefny - 3.2.2-7 +- Add support for XFS format grow + Resolves: rhbz#1862349 +- Do not limit swap to 128 GiB + Resolves: rhbz#1656485 +- Use UnusableConfigurationError for partially hidden multipath devices + Resolves: rhbz#1877052 +- Fix possible UnicodeDecodeError when reading model from sysfs + Resolves: rhbz#1849326 +- Add basic support for LVM VDO devices + Resolves: rhbz#1828745 + * Thu Aug 20 2020 Vojtech Trefny - 3.2.2-6 - Fix name resolution for MD devices and partitions on them Resolves: rhbz#1862904