neil / rpms / python-blivet

Forked from rpms/python-blivet a year ago
Clone
Blob Blame History Raw
From 3f6bbf52442609b8e6e3919a3fdd8c5af64923e6 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 12 May 2020 12:48:41 +0200
Subject: [PATCH 1/3] Add basic support for LVM VDO devices

This adds support for LVM VDO devices detection during populate
and allows removing both VDO LVs and VDO pools using actions.
---
 blivet/devices/lvm.py           | 150 +++++++++++++++++++++++++++++++-
 blivet/populator/helpers/lvm.py |  16 +++-
 tests/action_test.py            |  39 +++++++++
 tests/devices_test/lvm_test.py  |  34 ++++++++
 tests/storagetestcase.py        |  11 ++-
 5 files changed, 245 insertions(+), 5 deletions(-)

diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 97de6acd..d9e24a33 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -1789,8 +1789,132 @@ class LVMThinLogicalVolumeMixin(object):
         data.pool_name = self.pool.lvname
 
 
+class LVMVDOPoolMixin(object):
+    def __init__(self):
+        self._lvs = []
+
+    @property
+    def is_vdo_pool(self):
+        return self.seg_type == "vdo-pool"
+
+    @property
+    def type(self):
+        return "lvmvdopool"
+
+    @property
+    def resizable(self):
+        return False
+
+    @util.requires_property("is_vdo_pool")
+    def _add_log_vol(self, lv):
+        """ Add an LV to this VDO pool. """
+        if lv in self._lvs:
+            raise ValueError("lv is already part of this VDO pool")
+
+        self.vg._add_log_vol(lv)
+        log.debug("Adding %s/%s to %s", lv.name, lv.size, self.name)
+        self._lvs.append(lv)
+
+    @util.requires_property("is_vdo_pool")
+    def _remove_log_vol(self, lv):
+        """ Remove an LV from this VDO pool. """
+        if lv not in self._lvs:
+            raise ValueError("specified lv is not part of this VDO pool")
+
+        self._lvs.remove(lv)
+        self.vg._remove_log_vol(lv)
+
+    @property
+    @util.requires_property("is_vdo_pool")
+    def lvs(self):
+        """ A list of this VDO pool's LVs """
+        return self._lvs[:]     # we don't want folks changing our list
+
+    @property
+    def direct(self):
+        """ Is this device directly accessible? """
+        return False
+
+    def _create(self):
+        """ Create the device. """
+        raise NotImplementedError
+
+
+class LVMVDOLogicalVolumeMixin(object):
+    def __init__(self):
+        pass
+
+    def _init_check(self):
+        pass
+
+    def _check_parents(self):
+        """Check that this device has parents as expected"""
+        if isinstance(self.parents, (list, ParentList)):
+            if len(self.parents) != 1:
+                raise ValueError("constructor requires a single vdo-pool LV")
+
+            container = self.parents[0]
+        else:
+            container = self.parents
+
+        if not container or not isinstance(container, LVMLogicalVolumeDevice) or not container.is_vdo_pool:
+            raise ValueError("constructor requires a vdo-pool LV")
+
+    @property
+    def vg_space_used(self):
+        return Size(0)    # the pool's size is already accounted for in the vg
+
+    @property
+    def is_vdo_lv(self):
+        return self.seg_type == "vdo"
+
+    @property
+    def vg(self):
+        # parents[0] is the pool, not the VG so set the VG here
+        return self.pool.vg
+
+    @property
+    def type(self):
+        return "vdolv"
+
+    @property
+    def resizable(self):
+        return False
+
+    @property
+    @util.requires_property("is_vdo_lv")
+    def pool(self):
+        return self.parents[0]
+
+    def _create(self):
+        """ Create the device. """
+        raise NotImplementedError
+
+    def _destroy(self):
+        # nothing to do here, VDO LV is destroyed automatically together with
+        # the VDO pool
+        pass
+
+    def remove_hook(self, modparent=True):
+        if modparent:
+            self.pool._remove_log_vol(self)
+
+        # pylint: disable=bad-super-call
+        super(LVMLogicalVolumeBase, self).remove_hook(modparent=modparent)
+
+    def add_hook(self, new=True):
+        # pylint: disable=bad-super-call
+        super(LVMLogicalVolumeBase, self).add_hook(new=new)
+        if new:
+            return
+
+        if self not in self.pool.lvs:
+            self.pool._add_log_vol(self)
+
+
 class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin, LVMSnapshotMixin,
-                             LVMThinPoolMixin, LVMThinLogicalVolumeMixin):
+                             LVMThinPoolMixin, LVMThinLogicalVolumeMixin, LVMVDOPoolMixin,
+                             LVMVDOLogicalVolumeMixin):
     """ An LVM Logical Volume """
 
     # generally resizable, see :property:`resizable` for details
@@ -1879,6 +2003,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
         LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
                                       fmt, exists, sysfs_path, grow, maxsize,
                                       percent, cache_request, pvs, from_lvs)
+        LVMVDOPoolMixin.__init__(self)
+        LVMVDOLogicalVolumeMixin.__init__(self)
 
         LVMInternalLogicalVolumeMixin._init_check(self)
         LVMSnapshotMixin._init_check(self)
@@ -1905,6 +2031,10 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
             ret.append(LVMThinPoolMixin)
         if self.is_thin_lv:
             ret.append(LVMThinLogicalVolumeMixin)
+        if self.is_vdo_pool:
+            ret.append(LVMVDOPoolMixin)
+        if self.is_vdo_lv:
+            ret.append(LVMVDOLogicalVolumeMixin)
         return ret
 
     def _try_specific_call(self, name, *args, **kwargs):
@@ -2066,6 +2196,11 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
     def display_lv_name(self):
         return self.lvname
 
+    @property
+    @type_specific
+    def pool(self):
+        return super(LVMLogicalVolumeDevice, self).pool
+
     def _setup(self, orig=False):
         """ Open, or set up, a device. """
         log_method_call(self, self.name, orig=orig, status=self.status,
@@ -2167,6 +2302,19 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
         udev.settle()
         blockdev.lvm.lvresize(self.vg.name, self._name, self.size)
 
+    @type_specific
+    def _add_log_vol(self, lv):
+        pass
+
+    @type_specific
+    def _remove_log_vol(self, lv):
+        pass
+
+    @property
+    @type_specific
+    def lvs(self):
+        return []
+
     @property
     @type_specific
     def direct(self):
diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py
index 4b674fac..ff8bf59f 100644
--- a/blivet/populator/helpers/lvm.py
+++ b/blivet/populator/helpers/lvm.py
@@ -211,9 +211,6 @@ class LVMFormatPopulator(FormatPopulator):
                     origin = self._devicetree.get_device_by_name(origin_device_name)
 
                 lv_kwargs["origin"] = origin
-            elif lv_attr[0] == 'v':
-                # skip vorigins
-                return
             elif lv_attr[0] in 'IrielTCo' and lv_name.endswith(']'):
                 # an internal LV, add the an instance of the appropriate class
                 # to internal_lvs for later processing when non-internal LVs are
@@ -237,6 +234,19 @@ class LVMFormatPopulator(FormatPopulator):
                     origin = self._devicetree.get_device_by_name(origin_device_name)
                     lv_kwargs["origin"] = origin
 
+                lv_parents = [self._devicetree.get_device_by_name(pool_device_name)]
+            elif lv_attr[0] == 'd':
+                # vdo pool
+                # nothing to do here
+                pass
+            elif lv_attr[0] == 'v':
+                if lv_type != "vdo":
+                    # skip vorigins
+                    return
+                pool_name = blockdev.lvm.vdolvpoolname(vg_name, lv_name)
+                pool_device_name = "%s-%s" % (vg_name, pool_name)
+                add_required_lv(pool_device_name, "failed to look up VDO pool")
+
                 lv_parents = [self._devicetree.get_device_by_name(pool_device_name)]
             elif lv_name.endswith(']'):
                 # unrecognized Internal LVM2 device
diff --git a/tests/action_test.py b/tests/action_test.py
index 90c1b312..8f9a7424 100644
--- a/tests/action_test.py
+++ b/tests/action_test.py
@@ -1252,6 +1252,45 @@ class DeviceActionTestCase(StorageTestCase):
         self.assertEqual(set(self.storage.lvs), {pool})
         self.assertEqual(set(pool._internal_lvs), {lv1, lv2})
 
+    def test_lvm_vdo_destroy(self):
+        self.destroy_all_devices()
+        sdc = self.storage.devicetree.get_device_by_name("sdc")
+        sdc1 = self.new_device(device_class=PartitionDevice, name="sdc1",
+                               size=Size("50 GiB"), parents=[sdc],
+                               fmt=blivet.formats.get_format("lvmpv"))
+        self.schedule_create_device(sdc1)
+
+        vg = self.new_device(device_class=LVMVolumeGroupDevice,
+                             name="vg", parents=[sdc1])
+        self.schedule_create_device(vg)
+
+        pool = self.new_device(device_class=LVMLogicalVolumeDevice,
+                               name="data", parents=[vg],
+                               size=Size("10 GiB"),
+                               seg_type="vdo-pool", exists=True)
+        self.storage.devicetree._add_device(pool)
+        lv = self.new_device(device_class=LVMLogicalVolumeDevice,
+                             name="meta", parents=[pool],
+                             size=Size("50 GiB"),
+                             seg_type="vdo", exists=True)
+        self.storage.devicetree._add_device(lv)
+
+        remove_lv = self.schedule_destroy_device(lv)
+        self.assertListEqual(pool.lvs, [])
+        self.assertNotIn(lv, vg.lvs)
+
+        # cancelling the action should put lv back to both vg and pool lvs
+        self.storage.devicetree.actions.remove(remove_lv)
+        self.assertListEqual(pool.lvs, [lv])
+        self.assertIn(lv, vg.lvs)
+
+        # can't remove non-leaf pool
+        with self.assertRaises(ValueError):
+            self.schedule_destroy_device(pool)
+
+        self.schedule_destroy_device(lv)
+        self.schedule_destroy_device(pool)
+
 
 class ConfigurationActionsTest(unittest.TestCase):
 
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
index 9e701d18..204cb99a 100644
--- a/tests/devices_test/lvm_test.py
+++ b/tests/devices_test/lvm_test.py
@@ -405,6 +405,40 @@ class LVMDeviceTest(unittest.TestCase):
                                exists=False)
         self.assertFalse(vg.is_empty)
 
+    def test_lvm_vdo_pool(self):
+        pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+                           size=Size("1 GiB"), exists=True)
+        vg = LVMVolumeGroupDevice("testvg", parents=[pv])
+        pool = LVMLogicalVolumeDevice("testpool", parents=[vg], size=Size("512 MiB"),
+                                      seg_type="vdo-pool", exists=True)
+        self.assertTrue(pool.is_vdo_pool)
+
+        free = vg.free_space
+        lv = LVMLogicalVolumeDevice("testlv", parents=[pool], size=Size("2 GiB"),
+                                    seg_type="vdo", exists=True)
+        self.assertTrue(lv.is_vdo_lv)
+        self.assertEqual(lv.vg, vg)
+        self.assertEqual(lv.pool, pool)
+
+        # free space in the vg shouldn't be affected by the vdo lv
+        self.assertEqual(lv.vg_space_used, 0)
+        self.assertEqual(free, vg.free_space)
+
+        self.assertListEqual(pool.lvs, [lv])
+
+        # now try to destroy both the pool and the vdo lv
+        # for the lv this should be a no-op, destroying the pool should destroy both
+        with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
+            lv.destroy()
+            lv.remove_hook()
+            self.assertFalse(lv.exists)
+            self.assertFalse(lvm.lvremove.called)
+            self.assertListEqual(pool.lvs, [])
+
+            pool.destroy()
+            self.assertFalse(pool.exists)
+            self.assertTrue(lvm.lvremove.called)
+
 
 class TypeSpecificCallsTest(unittest.TestCase):
     def test_type_specific_calls(self):
diff --git a/tests/storagetestcase.py b/tests/storagetestcase.py
index e581bca6..1844dec5 100644
--- a/tests/storagetestcase.py
+++ b/tests/storagetestcase.py
@@ -96,7 +96,16 @@ class StorageTestCase(unittest.TestCase):
     def new_device(self, *args, **kwargs):
         """ Return a new Device instance suitable for testing. """
         device_class = kwargs.pop("device_class")
-        exists = kwargs.pop("exists", False)
+
+        # we intentionally don't pass the "exists" kwarg to the constructor
+        # becauses this causes issues with some devices (especially partitions)
+        # but we still need it for some LVs like VDO because we can't create
+        # those so we need to fake their existence even for the constructor
+        if device_class is blivet.devices.LVMLogicalVolumeDevice:
+            exists = kwargs.get("exists", False)
+        else:
+            exists = kwargs.pop("exists", False)
+
         part_type = kwargs.pop("part_type", parted.PARTITION_NORMAL)
         device = device_class(*args, **kwargs)
 
-- 
2.26.2


From f05a66e1bed1ca1f3cd7d7ffecd6693ab4d7f32a Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 12 May 2020 12:52:47 +0200
Subject: [PATCH 2/3] Fix checking for filesystem support in action_test

---
 tests/action_test.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tests/action_test.py b/tests/action_test.py
index 8f9a7424..228eb97a 100644
--- a/tests/action_test.py
+++ b/tests/action_test.py
@@ -56,7 +56,7 @@ FORMAT_CLASSES = [
 
 
 @unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test")
-@unittest.skipUnless(not any(x().utils_available for x in FORMAT_CLASSES), "some unsupported format classes required for this test")
+@unittest.skipUnless(all(x().utils_available for x in FORMAT_CLASSES), "some unsupported format classes required for this test")
 class DeviceActionTestCase(StorageTestCase):
 
     """ DeviceActionTestSuite """
-- 
2.26.2


From 69bd2e69e21c8779377a6f54b3d83cb35138867a Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 12 May 2020 12:54:03 +0200
Subject: [PATCH 3/3] Fix LV min size for resize in test_action_dependencies

We've recently changed min size for all filesystems so we can't
resize the LV to the device minimal size.
This was overlooked in the original change because these tests
were skipped.
---
 tests/action_test.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tests/action_test.py b/tests/action_test.py
index 228eb97a..77176f46 100644
--- a/tests/action_test.py
+++ b/tests/action_test.py
@@ -870,7 +870,7 @@ class DeviceActionTestCase(StorageTestCase):
                                   name="testlv2", parents=[testvg])
         testlv2.format = self.new_format("ext4", device=testlv2.path,
                                          exists=True, device_instance=testlv2)
-        shrink_lv2 = ActionResizeDevice(testlv2, testlv2.size - Size("10 GiB"))
+        shrink_lv2 = ActionResizeDevice(testlv2, testlv2.size - Size("10 GiB") + Ext4FS._min_size)
         shrink_lv2.apply()
 
         self.assertTrue(grow_lv.requires(shrink_lv2))
-- 
2.26.2