6677d2
From 18f05802f07f580ed31f38931b1103842397d598 Mon Sep 17 00:00:00 2001
6677d2
From: Vojtech Trefny <vtrefny@redhat.com>
6677d2
Date: Mon, 2 Nov 2020 14:19:52 +0100
6677d2
Subject: [PATCH 01/17] Fix type of LVM VDO logical volumes
6677d2
6677d2
We should use "lvmvdolv" to make it similar to other "lvmXYZ"
6677d2
types.
6677d2
---
6677d2
 blivet/devices/lvm.py | 2 +-
6677d2
 1 file changed, 1 insertion(+), 1 deletion(-)
6677d2
6677d2
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
6677d2
index d9e24a33..9639256d 100644
6677d2
--- a/blivet/devices/lvm.py
6677d2
+++ b/blivet/devices/lvm.py
6677d2
@@ -1875,7 +1875,7 @@ def vg(self):
6677d2
 
6677d2
     @property
6677d2
     def type(self):
6677d2
-        return "vdolv"
6677d2
+        return "lvmvdolv"
6677d2
 
6677d2
     @property
6677d2
     def resizable(self):
6677d2
6677d2
From 7f4815e14075550f55f2afb44bfba461eacea1c4 Mon Sep 17 00:00:00 2001
6677d2
From: Vojtech Trefny <vtrefny@redhat.com>
6677d2
Date: Mon, 2 Nov 2020 14:21:33 +0100
6677d2
Subject: [PATCH 02/17] Add VDO pool data LV to internal LVs during populate
6677d2
6677d2
---
6677d2
 blivet/devices/lvm.py           | 9 ++++++++-
6677d2
 blivet/populator/helpers/lvm.py | 2 +-
6677d2
 2 files changed, 9 insertions(+), 2 deletions(-)
6677d2
6677d2
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
6677d2
index 9639256d..d0957d6a 100644
6677d2
--- a/blivet/devices/lvm.py
6677d2
+++ b/blivet/devices/lvm.py
6677d2
@@ -1119,7 +1119,7 @@ class LVMInternalLVtype(Enum):
6677d2
 
6677d2
     @classmethod
6677d2
     def get_type(cls, lv_attr, lv_name):  # pylint: disable=unused-argument
6677d2
-        attr_letters = {cls.data: ("T", "C"),
6677d2
+        attr_letters = {cls.data: ("T", "C", "D"),
6677d2
                         cls.meta: ("e",),
6677d2
                         cls.log: ("l", "L"),
6677d2
                         cls.image: ("i", "I"),
6677d2
@@ -1824,6 +1824,13 @@ def _remove_log_vol(self, lv):
6677d2
         self._lvs.remove(lv)
6677d2
         self.vg._remove_log_vol(lv)
6677d2
 
6677d2
+    @property
6677d2
+    @util.requires_property("is_vdo_pool")
6677d2
+    def _vdopool_data_lv(self):
6677d2
+        if not self._internal_lvs:
6677d2
+            return None
6677d2
+        return self._internal_lvs[0]
6677d2
+
6677d2
     @property
6677d2
     @util.requires_property("is_vdo_pool")
6677d2
     def lvs(self):
6677d2
diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py
6677d2
index ff8bf59f..b1626306 100644
6677d2
--- a/blivet/populator/helpers/lvm.py
6677d2
+++ b/blivet/populator/helpers/lvm.py
6677d2
@@ -211,7 +211,7 @@ def add_lv(lv):
6677d2
                     origin = self._devicetree.get_device_by_name(origin_device_name)
6677d2
 
6677d2
                 lv_kwargs["origin"] = origin
6677d2
-            elif lv_attr[0] in 'IrielTCo' and lv_name.endswith(']'):
6677d2
+            elif lv_attr[0] in 'IrielTCoD' and lv_name.endswith(']'):
6677d2
                 # an internal LV, add the an instance of the appropriate class
6677d2
                 # to internal_lvs for later processing when non-internal LVs are
6677d2
                 # processed
6677d2
6677d2
From c164864955e371aef78b5020f28bf0c9d235ac7c Mon Sep 17 00:00:00 2001
6677d2
From: Vojtech Trefny <vtrefny@redhat.com>
6677d2
Date: Mon, 2 Nov 2020 14:22:12 +0100
6677d2
Subject: [PATCH 03/17] Add availability functions for LVM VDO
6677d2
6677d2
VDO is currently available only on RHEL/CentOS so we need a
6677d2
separate availability check for LVM VDO devices.
6677d2
---
6677d2
 blivet/devices/lvm.py        | 6 ++++++
6677d2
 blivet/tasks/availability.py | 8 ++++++++
6677d2
 2 files changed, 14 insertions(+)
6677d2
6677d2
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
6677d2
index d0957d6a..ffc65dcd 100644
6677d2
--- a/blivet/devices/lvm.py
6677d2
+++ b/blivet/devices/lvm.py
6677d2
@@ -1790,6 +1790,9 @@ def populate_ksdata(self, data):
6677d2
 
6677d2
 
6677d2
 class LVMVDOPoolMixin(object):
6677d2
+
6677d2
+    _external_dependencies = [availability.BLOCKDEV_LVM_PLUGIN, availability.BLOCKDEV_LVM_PLUGIN_VDO]
6677d2
+
6677d2
     def __init__(self):
6677d2
         self._lvs = []
6677d2
 
6677d2
@@ -1848,6 +1851,9 @@ def _create(self):
6677d2
 
6677d2
 
6677d2
 class LVMVDOLogicalVolumeMixin(object):
6677d2
+
6677d2
+    _external_dependencies = [availability.BLOCKDEV_LVM_PLUGIN, availability.BLOCKDEV_LVM_PLUGIN_VDO]
6677d2
+
6677d2
     def __init__(self):
6677d2
         pass
6677d2
 
6677d2
diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py
6677d2
index f3b76650..b107428e 100644
6677d2
--- a/blivet/tasks/availability.py
6677d2
+++ b/blivet/tasks/availability.py
6677d2
@@ -372,6 +372,13 @@ def available_resource(name):
6677d2
                                                                            blockdev.LVMTechMode.MODIFY)})
6677d2
 BLOCKDEV_LVM_TECH = BlockDevMethod(BLOCKDEV_LVM)
6677d2
 
6677d2
+BLOCKDEV_LVM_VDO = BlockDevTechInfo(plugin_name="lvm",
6677d2
+                                    check_fn=blockdev.lvm_is_tech_avail,
6677d2
+                                    technologies={blockdev.LVMTech.VDO: (blockdev.LVMTechMode.CREATE |
6677d2
+                                                                         blockdev.LVMTechMode.REMOVE |
6677d2
+                                                                         blockdev.LVMTechMode.QUERY)})
6677d2
+BLOCKDEV_LVM_TECH_VDO = BlockDevMethod(BLOCKDEV_LVM_VDO)
6677d2
+
6677d2
 # libblockdev mdraid plugin required technologies and modes
6677d2
 BLOCKDEV_MD_ALL_MODES = (blockdev.MDTechMode.CREATE |
6677d2
                          blockdev.MDTechMode.DELETE |
6677d2
@@ -410,6 +417,7 @@ def available_resource(name):
6677d2
 BLOCKDEV_DM_PLUGIN_RAID = blockdev_plugin("dm", BLOCKDEV_DM_TECH_RAID)
6677d2
 BLOCKDEV_LOOP_PLUGIN = blockdev_plugin("loop", BLOCKDEV_LOOP_TECH)
6677d2
 BLOCKDEV_LVM_PLUGIN = blockdev_plugin("lvm", BLOCKDEV_LVM_TECH)
6677d2
+BLOCKDEV_LVM_PLUGIN_VDO = blockdev_plugin("lvm", BLOCKDEV_LVM_TECH_VDO)
6677d2
 BLOCKDEV_MDRAID_PLUGIN = blockdev_plugin("mdraid", BLOCKDEV_MD_TECH)
6677d2
 BLOCKDEV_MPATH_PLUGIN = blockdev_plugin("mpath", BLOCKDEV_MPATH_TECH)
6677d2
 BLOCKDEV_SWAP_PLUGIN = blockdev_plugin("swap", BLOCKDEV_SWAP_TECH)
6677d2
6677d2
From d782620129d47a7b79b0e6b80455e6d93f8bcc88 Mon Sep 17 00:00:00 2001
6677d2
From: Vojtech Trefny <vtrefny@redhat.com>
6677d2
Date: Mon, 2 Nov 2020 14:27:55 +0100
6677d2
Subject: [PATCH 04/17] Read the LVM VDO pool current size from the internal
6677d2
 data LV
6677d2
6677d2
The pool device mapper device size is always 512k when active.
6677d2
---
6677d2
 blivet/devices/lvm.py | 9 +++++++++
6677d2
 1 file changed, 9 insertions(+)
6677d2
6677d2
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
6677d2
index ffc65dcd..73743fa8 100644
6677d2
--- a/blivet/devices/lvm.py
6677d2
+++ b/blivet/devices/lvm.py
6677d2
@@ -1845,6 +1845,15 @@ def direct(self):
6677d2
         """ Is this device directly accessible? """
6677d2
         return False
6677d2
 
6677d2
+    def read_current_size(self):
6677d2
+        log_method_call(self, exists=self.exists, path=self.path,
6677d2
+                        sysfs_path=self.sysfs_path)
6677d2
+        if self.size != Size(0):
6677d2
+            return self.size
6677d2
+        if self._vdopool_data_lv:
6677d2
+            return self._vdopool_data_lv.read_current_size()
6677d2
+        return Size(0)
6677d2
+
6677d2
     def _create(self):
6677d2
         """ Create the device. """
6677d2
         raise NotImplementedError
6677d2
6677d2
From 2da48ae84f4eac84e8cf998ee2402249a5a52626 Mon Sep 17 00:00:00 2001
6677d2
From: Vojtech Trefny <vtrefny@redhat.com>
6677d2
Date: Mon, 2 Nov 2020 14:29:43 +0100
6677d2
Subject: [PATCH 05/17] Add "vdo_lv" property to LVMVDOPoolMixin
6677d2
6677d2
---
6677d2
 blivet/devices/lvm.py | 7 +++++++
6677d2
 1 file changed, 7 insertions(+)
6677d2
6677d2
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
6677d2
index 73743fa8..2f93fa22 100644
6677d2
--- a/blivet/devices/lvm.py
6677d2
+++ b/blivet/devices/lvm.py
6677d2
@@ -1840,6 +1840,13 @@ def lvs(self):
6677d2
         """ A list of this VDO pool's LVs """
6677d2
         return self._lvs[:]     # we don't want folks changing our list
6677d2
 
6677d2
+    @property
6677d2
+    @util.requires_property("is_vdo_pool")
6677d2
+    def vdo_lv(self):
6677d2
+        if not self._lvs:
6677d2
+            return None
6677d2
+        return self._lvs[0]
6677d2
+
6677d2
     @property
6677d2
     def direct(self):
6677d2
         """ Is this device directly accessible? """
6677d2
6677d2
From bbfa2cbdc6cb85d405b895c66eb4867cea4218b4 Mon Sep 17 00:00:00 2001
6677d2
From: Vojtech Trefny <vtrefny@redhat.com>
6677d2
Date: Mon, 2 Nov 2020 14:30:37 +0100
6677d2
Subject: [PATCH 06/17] Add support for creating LVM VDO pools and LVM VDO
6677d2
 volumes
6677d2
6677d2
The pool and the volume are created by one call but these can have
6677d2
different properties (like size) and are in fact two block devices
6677d2
when created, we also need to create two devices and add them to
6677d2
the devicetree. The pool device must be always created first and
6677d2
the _create function for the VDO volume is a no-op.
6677d2
---
6677d2
 blivet/devices/lvm.py | 63 +++++++++++++++++++++++++++++++++++++------
6677d2
 1 file changed, 55 insertions(+), 8 deletions(-)
6677d2
6677d2
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
6677d2
index 2f93fa22..0802e2de 100644
6677d2
--- a/blivet/devices/lvm.py
6677d2
+++ b/blivet/devices/lvm.py
6677d2
@@ -311,7 +311,7 @@ def _add_log_vol(self, lv):
6677d2
 
6677d2
         # verify we have the space, then add it
6677d2
         # do not verify for growing vg (because of ks)
6677d2
-        if not lv.exists and not self.growable and not lv.is_thin_lv and lv.size > self.free_space:
6677d2
+        if not lv.exists and not self.growable and not (lv.is_thin_lv or lv.is_vdo_lv) and lv.size > self.free_space:
6677d2
             raise errors.DeviceError("new lv is too large to fit in free space", self.name)
6677d2
 
6677d2
         log.debug("Adding %s/%s to %s", lv.name, lv.size, self.name)
6677d2
@@ -639,7 +639,7 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None,
6677d2
                  percent=None, cache_request=None, pvs=None, from_lvs=None):
6677d2
 
6677d2
         if not exists:
6677d2
-            if seg_type not in [None, "linear", "thin", "thin-pool", "cache"] + lvm.raid_seg_types:
6677d2
+            if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo"] + lvm.raid_seg_types:
6677d2
                 raise ValueError("Invalid or unsupported segment type: %s" % seg_type)
6677d2
             if seg_type and seg_type in lvm.raid_seg_types and not pvs:
6677d2
                 raise ValueError("List of PVs has to be given for every non-linear LV")
6677d2
@@ -1793,7 +1793,11 @@ class LVMVDOPoolMixin(object):
6677d2
 
6677d2
     _external_dependencies = [availability.BLOCKDEV_LVM_PLUGIN, availability.BLOCKDEV_LVM_PLUGIN_VDO]
6677d2
 
6677d2
-    def __init__(self):
6677d2
+    def __init__(self, compression=True, deduplication=True, index_memory=0, write_policy=None):
6677d2
+        self.compression = compression
6677d2
+        self.deduplication = deduplication
6677d2
+        self.index_memory = index_memory
6677d2
+        self.write_policy = write_policy
6677d2
         self._lvs = []
6677d2
 
6677d2
     @property
6677d2
@@ -1863,7 +1867,19 @@ def read_current_size(self):
6677d2
 
6677d2
     def _create(self):
6677d2
         """ Create the device. """
6677d2
-        raise NotImplementedError
6677d2
+
6677d2
+        if not self.vdo_lv:
6677d2
+            raise errors.DeviceError("Cannot create new VDO pool without a VDO LV.")
6677d2
+
6677d2
+        if self.write_policy:
6677d2
+            write_policy = blockdev.lvm_get_vdo_write_policy_str(self.write_policy)
6677d2
+        else:
6677d2
+            write_policy = blockdev.LVMVDOWritePolicy.AUTO
6677d2
+
6677d2
+        blockdev.lvm.vdo_pool_create(self.vg.name, self.vdo_lv.lvname, self.lvname,
6677d2
+                                     self.size, self.vdo_lv.size, self.index_memory,
6677d2
+                                     self.compression, self.deduplication,
6677d2
+                                     write_policy)
6677d2
 
6677d2
 
6677d2
 class LVMVDOLogicalVolumeMixin(object):
6677d2
@@ -1915,9 +1931,26 @@ def resizable(self):
6677d2
     def pool(self):
6677d2
         return self.parents[0]
6677d2
 
6677d2
+    def _set_size(self, newsize):
6677d2
+        if not isinstance(newsize, Size):
6677d2
+            raise AttributeError("new size must of type Size")
6677d2
+
6677d2
+        newsize = self.vg.align(newsize)
6677d2
+        newsize = self.vg.align(util.numeric_type(newsize))
6677d2
+        # just make sure the size is set (no VG size/free space check needed for
6677d2
+        # a VDO LV)
6677d2
+        DMDevice._set_size(self, newsize)
6677d2
+
6677d2
+    def _pre_create(self):
6677d2
+        # skip LVMLogicalVolumeDevice's _pre_create() method as it checks for a
6677d2
+        # free space in a VG which doesn't make sense for a VDO LV and causes a
6677d2
+        # bug by limitting the VDO LV's size to VG free space which is nonsense
6677d2
+        super(LVMLogicalVolumeBase, self)._pre_create()  # pylint: disable=bad-super-call
6677d2
+
6677d2
     def _create(self):
6677d2
-        """ Create the device. """
6677d2
-        raise NotImplementedError
6677d2
+        # nothing to do here, VDO LV is created automatically together with
6677d2
+        # the VDO pool
6677d2
+        pass
6677d2
 
6677d2
     def _destroy(self):
6677d2
         # nothing to do here, VDO LV is destroyed automatically together with
6677d2
@@ -1953,7 +1986,9 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None,
6677d2
                  fmt=None, exists=False, sysfs_path='', grow=None, maxsize=None,
6677d2
                  percent=None, cache_request=None, pvs=None,
6677d2
                  parent_lv=None, int_type=None, origin=None, vorigin=False,
6677d2
-                 metadata_size=None, chunk_size=None, profile=None, from_lvs=None):
6677d2
+                 metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
6677d2
+                 compression=False, deduplication=False, index_memory=0,
6677d2
+                 write_policy=None):
6677d2
         """
6677d2
             :param name: the device name (generally a device node's basename)
6677d2
             :type name: str
6677d2
@@ -2012,6 +2047,17 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None,
6677d2
             :keyword from_lvs: LVs to create the new LV from (in the (data_lv, metadata_lv) order)
6677d2
             :type from_lvs: tuple of :class:`LVMLogicalVolumeDevice`
6677d2
 
6677d2
+            For VDO pools only:
6677d2
+
6677d2
+            :keyword compression: whether to enable compression on the VDO pool
6677d2
+            :type compression: bool
6677d2
+            :keyword dudplication: whether to enable dudplication on the VDO pool
6677d2
+            :type dudplication: bool
6677d2
+            :keyword index_memory: amount of index memory (in bytes) or 0 for default
6677d2
+            :type index_memory: int
6677d2
+            :keyword write_policy: write policy for the volume or None for default
6677d2
+            :type write_policy: str
6677d2
+
6677d2
         """
6677d2
 
6677d2
         if isinstance(parents, (list, ParentList)):
6677d2
@@ -2032,7 +2078,8 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None,
6677d2
         LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
6677d2
                                       fmt, exists, sysfs_path, grow, maxsize,
6677d2
                                       percent, cache_request, pvs, from_lvs)
6677d2
-        LVMVDOPoolMixin.__init__(self)
6677d2
+        LVMVDOPoolMixin.__init__(self, compression, deduplication, index_memory,
6677d2
+                                 write_policy)
6677d2
         LVMVDOLogicalVolumeMixin.__init__(self)
6677d2
 
6677d2
         LVMInternalLogicalVolumeMixin._init_check(self)
6677d2
6677d2
From 2d1593b50dc6232e213b4df86dfbf5cf6d282dcd Mon Sep 17 00:00:00 2001
6677d2
From: Vojtech Trefny <vtrefny@redhat.com>
6677d2
Date: Mon, 2 Nov 2020 14:31:35 +0100
6677d2
Subject: [PATCH 07/17] Allow creating LVM VDO pools and volumes using
6677d2
 "blivet.new_lv"
6677d2
6677d2
The steps to create the VDO devices would typically look like:
6677d2
6677d2
pool = b.new_lv(vdo_pool=True, parents=[data], size=Size("8 GiB"))
6677d2
vdolv = b.new_lv(vdo_lv=True, parents=[pool], size=Size("40 GiB"))
6677d2
b.create_device(pool)
6677d2
b.create_device(vdolv)
6677d2
b.do_it()
6677d2
---
6677d2
 blivet/blivet.py               | 18 ++++++++++++++----
6677d2
 tests/devices_test/lvm_test.py | 31 +++++++++++++++++++++++++++++++
6677d2
 2 files changed, 45 insertions(+), 4 deletions(-)
6677d2
6677d2
diff --git a/blivet/blivet.py b/blivet/blivet.py
6677d2
index e7dbd37b..754eb152 100644
6677d2
--- a/blivet/blivet.py
6677d2
+++ b/blivet/blivet.py
6677d2
@@ -573,6 +573,10 @@ def new_lv(self, *args, **kwargs):
6677d2
             :type thin_pool: bool
6677d2
             :keyword thin_volume: whether to create a thin volume
6677d2
             :type thin_volume: bool
6677d2
+            :keyword vdo_pool: whether to create a vdo pool
6677d2
+            :type vdo_pool: bool
6677d2
+            :keyword vdo_lv: whether to create a vdo lv
6677d2
+            :type vdo_lv: bool
6677d2
             :returns: the new device
6677d2
             :rtype: :class:`~.devices.LVMLogicalVolumeDevice`
6677d2
 
6677d2
@@ -589,8 +593,10 @@ def new_lv(self, *args, **kwargs):
6677d2
         """
6677d2
         thin_volume = kwargs.pop("thin_volume", False)
6677d2
         thin_pool = kwargs.pop("thin_pool", False)
6677d2
+        vdo_pool = kwargs.pop("vdo_pool", False)
6677d2
+        vdo_lv = kwargs.pop("vdo_lv", False)
6677d2
         parent = kwargs.get("parents", [None])[0]
6677d2
-        if thin_volume and parent:
6677d2
+        if (thin_volume or vdo_lv) and parent:
6677d2
             # kwargs["parents"] will contain the pool device, so...
6677d2
             vg = parent.vg
6677d2
         else:
6677d2
@@ -600,6 +606,10 @@ def new_lv(self, *args, **kwargs):
6677d2
             kwargs["seg_type"] = "thin"
6677d2
         if thin_pool:
6677d2
             kwargs["seg_type"] = "thin-pool"
6677d2
+        if vdo_pool:
6677d2
+            kwargs["seg_type"] = "vdo-pool"
6677d2
+        if vdo_lv:
6677d2
+            kwargs["seg_type"] = "vdo"
6677d2
 
6677d2
         mountpoint = kwargs.pop("mountpoint", None)
6677d2
         if 'fmt_type' in kwargs:
6677d2
@@ -625,7 +635,7 @@ def new_lv(self, *args, **kwargs):
6677d2
                 swap = False
6677d2
 
6677d2
             prefix = ""
6677d2
-            if thin_pool:
6677d2
+            if thin_pool or vdo_pool:
6677d2
                 prefix = "pool"
6677d2
 
6677d2
             name = self.suggest_device_name(parent=vg,
6677d2
@@ -636,10 +646,10 @@ def new_lv(self, *args, **kwargs):
6677d2
         if "%s-%s" % (vg.name, name) in self.names:
6677d2
             raise ValueError("name already in use")
6677d2
 
6677d2
-        if thin_pool or thin_volume:
6677d2
+        if thin_pool or thin_volume or vdo_pool or vdo_lv:
6677d2
             cache_req = kwargs.pop("cache_request", None)
6677d2
             if cache_req:
6677d2
-                raise ValueError("Creating cached thin volumes and pools is not supported")
6677d2
+                raise ValueError("Creating cached thin and VDO volumes and pools is not supported")
6677d2
 
6677d2
         return LVMLogicalVolumeDevice(name, *args, **kwargs)
6677d2
 
6677d2
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
6677d2
index 204cb99a..493d3ba1 100644
6677d2
--- a/tests/devices_test/lvm_test.py
6677d2
+++ b/tests/devices_test/lvm_test.py
6677d2
@@ -689,3 +689,34 @@ def test_new_lv_from_non_existing_lvs(self):
6677d2
             with patch.object(pool, "_pre_create"):
6677d2
                 pool.create()
6677d2
                 self.assertTrue(lvm.thpool_convert.called)
6677d2
+
6677d2
+    def test_new_vdo_pool(self):
6677d2
+        b = blivet.Blivet()
6677d2
+        pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
6677d2
+                           size=Size("10 GiB"), exists=True)
6677d2
+        vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
6677d2
+
6677d2
+        for dev in (pv, vg):
6677d2
+            b.devicetree._add_device(dev)
6677d2
+
6677d2
+        # check that all the above devices are in the expected places
6677d2
+        self.assertEqual(set(b.devices), {pv, vg})
6677d2
+        self.assertEqual(set(b.vgs), {vg})
6677d2
+
6677d2
+        self.assertEqual(vg.size, Size("10236 MiB"))
6677d2
+
6677d2
+        vdopool = b.new_lv(name="vdopool", vdo_pool=True,
6677d2
+                           parents=[vg], compression=True,
6677d2
+                           deduplication=True,
6677d2
+                           size=blivet.size.Size("8 GiB"))
6677d2
+
6677d2
+        vdolv = b.new_lv(name="vdolv", vdo_lv=True,
6677d2
+                         parents=[vdopool],
6677d2
+                         size=blivet.size.Size("40 GiB"))
6677d2
+
6677d2
+        b.create_device(vdopool)
6677d2
+        b.create_device(vdolv)
6677d2
+
6677d2
+        self.assertEqual(vdopool.children[0], vdolv)
6677d2
+        self.assertEqual(vdolv.parents[0], vdopool)
6677d2
+        self.assertListEqual(vg.lvs, [vdopool, vdolv])
6677d2
6677d2
From 31ec429ad7bd0857a768e2dfebe1de088dafc144 Mon Sep 17 00:00:00 2001
6677d2
From: Vojtech Trefny <vtrefny@redhat.com>
6677d2
Date: Mon, 2 Nov 2020 14:32:47 +0100
6677d2
Subject: [PATCH 08/17] Add LVM VDO device factory
6677d2
6677d2
---
6677d2
 blivet/devicefactory.py     | 100 +++++++++++++++++++++++++++-
6677d2
 tests/devicefactory_test.py | 128 +++++++++++++++++++++++++++++++++---
6677d2
 2 files changed, 218 insertions(+), 10 deletions(-)
6677d2
6677d2
diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py
6677d2
index 9214ad54..c95037cc 100644
6677d2
--- a/blivet/devicefactory.py
6677d2
+++ b/blivet/devicefactory.py
6677d2
@@ -27,7 +27,7 @@
6677d2
 from .devices import BTRFSDevice, DiskDevice
6677d2
 from .devices import LUKSDevice, LVMLogicalVolumeDevice
6677d2
 from .devices import PartitionDevice, MDRaidArrayDevice
6677d2
-from .devices.lvm import DEFAULT_THPOOL_RESERVE
6677d2
+from .devices.lvm import LVMVDOPoolMixin, DEFAULT_THPOOL_RESERVE
6677d2
 from .formats import get_format
6677d2
 from .devicelibs import btrfs
6677d2
 from .devicelibs import mdraid
6677d2
@@ -58,6 +58,7 @@
6677d2
 DEVICE_TYPE_BTRFS = 3
6677d2
 DEVICE_TYPE_DISK = 4
6677d2
 DEVICE_TYPE_LVM_THINP = 5
6677d2
+DEVICE_TYPE_LVM_VDO = 6
6677d2
 
6677d2
 
6677d2
 def is_supported_device_type(device_type):
6677d2
@@ -69,6 +70,9 @@ def is_supported_device_type(device_type):
6677d2
         :returns: True if this device type is supported
6677d2
         :rtype: bool
6677d2
     """
6677d2
+    if device_type == DEVICE_TYPE_LVM_VDO:
6677d2
+        return not any(e for e in LVMVDOPoolMixin._external_dependencies if not e.available)
6677d2
+
6677d2
     devices = []
6677d2
     if device_type == DEVICE_TYPE_BTRFS:
6677d2
         devices = [BTRFSDevice]
6677d2
@@ -96,7 +100,7 @@ def get_supported_raid_levels(device_type):
6677d2
     pkg = None
6677d2
     if device_type == DEVICE_TYPE_BTRFS:
6677d2
         pkg = btrfs
6677d2
-    elif device_type in (DEVICE_TYPE_LVM, DEVICE_TYPE_LVM_THINP):
6677d2
+    elif device_type in (DEVICE_TYPE_LVM, DEVICE_TYPE_LVM_THINP, DEVICE_TYPE_LVM_VDO):
6677d2
         pkg = lvm
6677d2
     elif device_type == DEVICE_TYPE_MD:
6677d2
         pkg = mdraid
6677d2
@@ -116,6 +120,8 @@ def get_device_type(device):
6677d2
                     "lvmlv": DEVICE_TYPE_LVM,
6677d2
                     "lvmthinlv": DEVICE_TYPE_LVM_THINP,
6677d2
                     "lvmthinpool": DEVICE_TYPE_LVM,
6677d2
+                    "lvmvdolv": DEVICE_TYPE_LVM_VDO,
6677d2
+                    "lvmvdopool": DEVICE_TYPE_LVM,
6677d2
                     "btrfs subvolume": DEVICE_TYPE_BTRFS,
6677d2
                     "btrfs volume": DEVICE_TYPE_BTRFS,
6677d2
                     "mdarray": DEVICE_TYPE_MD}
6677d2
@@ -136,6 +142,7 @@ def get_device_factory(blivet, device_type=DEVICE_TYPE_LVM, **kwargs):
6677d2
                    DEVICE_TYPE_PARTITION: PartitionFactory,
6677d2
                    DEVICE_TYPE_MD: MDFactory,
6677d2
                    DEVICE_TYPE_LVM_THINP: LVMThinPFactory,
6677d2
+                   DEVICE_TYPE_LVM_VDO: LVMVDOFactory,
6677d2
                    DEVICE_TYPE_DISK: DeviceFactory}
6677d2
 
6677d2
     factory_class = class_table[device_type]
6677d2
@@ -1738,6 +1745,95 @@ def _get_new_device(self, *args, **kwargs):
6677d2
         return super(LVMThinPFactory, self)._get_new_device(*args, **kwargs)
6677d2
 
6677d2
 
6677d2
+class LVMVDOFactory(LVMFactory):
6677d2
+
6677d2
+    """ Factory for creating LVM VDO volumes.
6677d2
+
6677d2
+        :keyword pool_name: name for the VDO pool, if not specified unique name will be generated
6677d2
+        :type pool_name: str
6677d2
+        :keyword virtual_size: size for the VDO volume, usually bigger than pool size, if not
6677d2
+                               specified physical size (pool size) will be used
6677d2
+        :type size: :class:`~.size.Size`
6677d2
+        :keyword compression: whether to enable compression (defaults to True)
6677d2
+        :type compression: bool
6677d2
+        :keyword deduplication: whether to enable deduplication (defaults to True)
6677d2
+        :type deduplication: bool
6677d2
+    """
6677d2
+
6677d2
+    def __init__(self, storage, **kwargs):
6677d2
+        self.pool_name = kwargs.pop("pool_name", None)
6677d2
+        self.virtual_size = kwargs.pop("virtual_size", None)
6677d2
+        self.compression = kwargs.pop("compression", True)
6677d2
+        self.deduplication = kwargs.pop("deduplication", True)
6677d2
+        super(LVMVDOFactory, self).__init__(storage, **kwargs)
6677d2
+
6677d2
+    def _get_new_pool(self, *args, **kwargs):
6677d2
+        kwargs["vdo_pool"] = True
6677d2
+        return super(LVMVDOFactory, self)._get_new_device(*args, **kwargs)
6677d2
+
6677d2
+    def _set_device_size(self):
6677d2
+        """ Set the size of the factory device. """
6677d2
+        super(LVMVDOFactory, self)._set_device_size()
6677d2
+
6677d2
+        self.device.pool.size = self.size
6677d2
+        self._reconfigure_container()
6677d2
+
6677d2
+        if not self.virtual_size or self.virtual_size < self.size:
6677d2
+            # virtual_size is not set or smaller than current size --> it should be same as the pool size
6677d2
+            self.device.size = self.size
6677d2
+        else:
6677d2
+            self.device.size = self.virtual_size
6677d2
+
6677d2
+    def _set_pool_name(self):
6677d2
+        safe_new_name = self.storage.safe_device_name(self.pool_name)
6677d2
+        if self.device.pool.name != safe_new_name:
6677d2
+            if not safe_new_name:
6677d2
+                log.error("not renaming '%s' to invalid name '%s'",
6677d2
+                          self.device.pool.name, self.pool_name)
6677d2
+                return
6677d2
+            if safe_new_name in self.storage.names:
6677d2
+                log.error("not renaming '%s' to in-use name '%s'",
6677d2
+                          self.device.pool.name, safe_new_name)
6677d2
+                return
6677d2
+
6677d2
+            log.debug("renaming device '%s' to '%s'",
6677d2
+                      self.device.pool.name, safe_new_name)
6677d2
+            self.device.pool.raw_device.name = safe_new_name
6677d2
+
6677d2
+    def _set_name(self):
6677d2
+        super(LVMVDOFactory, self)._set_name()
6677d2
+        if self.pool_name:
6677d2
+            self._set_pool_name()
6677d2
+
6677d2
+    def _reconfigure_device(self):
6677d2
+        super(LVMVDOFactory, self)._reconfigure_device()
6677d2
+
6677d2
+        self.device.pool.compression = self.compression
6677d2
+        self.device.pool.deduplication = self.deduplication
6677d2
+
6677d2
+    #
6677d2
+    # methods to configure the factory's device
6677d2
+    #
6677d2
+    def _get_new_device(self, *args, **kwargs):
6677d2
+        """ Create and return the factory device as a StorageDevice. """
6677d2
+        pool = self._get_new_pool(name=self.pool_name,
6677d2
+                                  size=self.size,
6677d2
+                                  parents=[self.vg],
6677d2
+                                  compression=self.compression,
6677d2
+                                  deduplication=self.deduplication)
6677d2
+        self.storage.create_device(pool)
6677d2
+
6677d2
+        kwargs["parents"] = [pool]
6677d2
+        kwargs["vdo_lv"] = True
6677d2
+
6677d2
+        if self.virtual_size:
6677d2
+            vdolv_kwargs = kwargs.copy()
6677d2
+            vdolv_kwargs["size"] = self.virtual_size
6677d2
+        else:
6677d2
+            vdolv_kwargs = kwargs
6677d2
+        return super(LVMVDOFactory, self)._get_new_device(*args, **vdolv_kwargs)
6677d2
+
6677d2
+
6677d2
 class MDFactory(DeviceFactory):
6677d2
 
6677d2
     """ Factory for creating MD RAID devices. """
6677d2
diff --git a/tests/devicefactory_test.py b/tests/devicefactory_test.py
6677d2
index 08068779..7cdb51c5 100644
6677d2
--- a/tests/devicefactory_test.py
6677d2
+++ b/tests/devicefactory_test.py
6677d2
@@ -4,6 +4,9 @@
6677d2
 from decimal import Decimal
6677d2
 import os
6677d2
 
6677d2
+import test_compat  # pylint: disable=unused-import
6677d2
+from six.moves.mock import patch  # pylint: disable=no-name-in-module,import-error
6677d2
+
6677d2
 import blivet
6677d2
 
6677d2
 from blivet import devicefactory
6677d2
@@ -93,10 +96,12 @@ def _validate_factory_device(self, *args, **kwargs):
6677d2
             self.assertEqual(device.format.label,
6677d2
                              kwargs.get('label'))
6677d2
 
6677d2
-        self.assertLessEqual(device.size, kwargs.get("size"))
6677d2
-        self.assertGreaterEqual(device.size, device.format.min_size)
6677d2
-        if device.format.max_size:
6677d2
-            self.assertLessEqual(device.size, device.format.max_size)
6677d2
+        # sizes with VDO are special, we have a special check in LVMVDOFactoryTestCase._validate_factory_device
6677d2
+        if device_type != devicefactory.DEVICE_TYPE_LVM_VDO:
6677d2
+            self.assertLessEqual(device.size, kwargs.get("size"))
6677d2
+            self.assertGreaterEqual(device.size, device.format.min_size)
6677d2
+            if device.format.max_size:
6677d2
+                self.assertLessEqual(device.size, device.format.max_size)
6677d2
 
6677d2
         self.assertEqual(device.encrypted,
6677d2
                          kwargs.get("encrypted", False) or
6677d2
@@ -115,7 +120,11 @@ def test_device_factory(self):
6677d2
                   "mountpoint": '/factorytest'}
6677d2
         device = self._factory_device(device_type, **kwargs)
6677d2
         self._validate_factory_device(device, device_type, **kwargs)
6677d2
-        self.b.recursive_remove(device)
6677d2
+
6677d2
+        if device.type == "lvmvdolv":
6677d2
+            self.b.recursive_remove(device.pool)
6677d2
+        else:
6677d2
+            self.b.recursive_remove(device)
6677d2
 
6677d2
         if self.encryption_supported:
6677d2
             # Encrypt the leaf device
6677d2
@@ -157,6 +166,12 @@ def test_device_factory(self):
6677d2
         device = self._factory_device(device_type, **kwargs)
6677d2
         self._validate_factory_device(device, device_type, **kwargs)
6677d2
 
6677d2
+        # change size up
6677d2
+        kwargs["device"] = device
6677d2
+        kwargs["size"] = Size("900 MiB")
6677d2
+        device = self._factory_device(device_type, **kwargs)
6677d2
+        self._validate_factory_device(device, device_type, **kwargs)
6677d2
+
6677d2
         # Change LUKS version
6677d2
         kwargs["luks_version"] = "luks1"
6677d2
         device = self._factory_device(device_type, **kwargs)
6677d2
@@ -179,7 +194,7 @@ def _get_size_delta(self, devices=None):
6677d2
         """
6677d2
         return Size("1 MiB")
6677d2
 
6677d2
-    def test_get_free_disk_space(self):
6677d2
+    def test_get_free_disk_space(self, *args):  # pylint: disable=unused-argument
6677d2
         # get_free_disk_space should return the total free space on disks
6677d2
         kwargs = self._get_test_factory_args()
6677d2
         kwargs["size"] = Size("500 MiB")
6677d2
@@ -206,7 +221,7 @@ def test_get_free_disk_space(self):
6677d2
                                sum(d.size for d in self.b.disks) - device_space,
6677d2
                                delta=self._get_size_delta(devices=[device]))
6677d2
 
6677d2
-    def test_normalize_size(self):
6677d2
+    def test_normalize_size(self, *args):  # pylint: disable=unused-argument
6677d2
         # _normalize_size should adjust target size to within the format limits
6677d2
         fstype = "ext2"
6677d2
         ext2 = get_format(fstype)
6677d2
@@ -258,7 +273,7 @@ def test_default_factory_type(self):
6677d2
         factory = devicefactory.get_device_factory(self.b)
6677d2
         self.assertIsInstance(factory, devicefactory.LVMFactory)
6677d2
 
6677d2
-    def test_factory_defaults(self):
6677d2
+    def test_factory_defaults(self, *args):  # pylint: disable=unused-argument
6677d2
         ctor_kwargs = self._get_test_factory_args()
6677d2
         factory = devicefactory.get_device_factory(self.b, self.device_type, **ctor_kwargs)
6677d2
         for setting, value in factory._default_settings.items():
6677d2
@@ -522,6 +537,103 @@ def _get_size_delta(self, devices=None):
6677d2
         return delta
6677d2
 
6677d2
 
6677d2
+class LVMVDOFactoryTestCase(LVMFactoryTestCase):
6677d2
+    device_class = LVMLogicalVolumeDevice
6677d2
+    device_type = devicefactory.DEVICE_TYPE_LVM_VDO
6677d2
+    encryption_supported = False
6677d2
+
6677d2
+    def _validate_factory_device(self, *args, **kwargs):
6677d2
+        super(LVMVDOFactoryTestCase, self)._validate_factory_device(*args,
6677d2
+                                                                    **kwargs)
6677d2
+        device = args[0]
6677d2
+
6677d2
+        if kwargs.get("encrypted", False):
6677d2
+            vdolv = device.parents[0]
6677d2
+        else:
6677d2
+            vdolv = device
6677d2
+
6677d2
+        self.assertTrue(hasattr(vdolv, "pool"))
6677d2
+
6677d2
+        virtual_size = kwargs.get("virtual_size", 0)
6677d2
+        if virtual_size:
6677d2
+            self.assertEqual(vdolv.size, virtual_size)
6677d2
+        else:
6677d2
+            self.assertEqual(vdolv.size, vdolv.pool.size)
6677d2
+        self.assertGreaterEqual(vdolv.size, vdolv.pool.size)
6677d2
+
6677d2
+        compression = kwargs.get("compression", True)
6677d2
+        self.assertEqual(vdolv.pool.compression, compression)
6677d2
+
6677d2
+        deduplication = kwargs.get("deduplication", True)
6677d2
+        self.assertEqual(vdolv.pool.deduplication, deduplication)
6677d2
+
6677d2
+        pool_name = kwargs.get("pool_name", None)
6677d2
+        if pool_name:
6677d2
+            self.assertEqual(vdolv.pool.lvname, pool_name)
6677d2
+
6677d2
+        return device
6677d2
+
6677d2
+    @patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True)
6677d2
+    @patch("blivet.formats.lvmpv.LVMPhysicalVolume.destroyable", return_value=True)
6677d2
+    @patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[])
6677d2
+    @patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set())
6677d2
+    @patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set())
6677d2
+    def test_device_factory(self, *args):  # pylint: disable=unused-argument,arguments-differ
6677d2
+        device_type = self.device_type
6677d2
+        kwargs = {"disks": self.b.disks,
6677d2
+                  "size": Size("400 MiB"),
6677d2
+                  "fstype": 'ext4',
6677d2
+                  "mountpoint": '/factorytest'}
6677d2
+        device = self._factory_device(device_type, **kwargs)
6677d2
+        self._validate_factory_device(device, device_type, **kwargs)
6677d2
+        self.b.recursive_remove(device.pool)
6677d2
+
6677d2
+        kwargs = {"disks": self.b.disks,
6677d2
+                  "size": Size("400 MiB"),
6677d2
+                  "fstype": 'ext4',
6677d2
+                  "mountpoint": '/factorytest',
6677d2
+                  "pool_name": "vdopool",
6677d2
+                  "deduplication": True,
6677d2
+                  "compression": True}
6677d2
+        device = self._factory_device(device_type, **kwargs)
6677d2
+        self._validate_factory_device(device, device_type, **kwargs)
6677d2
+
6677d2
+        # change size without specifying virtual_size: both sizes should grow
6677d2
+        kwargs["size"] = Size("600 MiB")
6677d2
+        kwargs["device"] = device
6677d2
+        device = self._factory_device(device_type, **kwargs)
6677d2
+        self._validate_factory_device(device, device_type, **kwargs)
6677d2
+
6677d2
+        # change virtual size
6677d2
+        kwargs["virtual_size"] = Size("6 GiB")
6677d2
+        kwargs["device"] = device
6677d2
+        device = self._factory_device(device_type, **kwargs)
6677d2
+        self._validate_factory_device(device, device_type, **kwargs)
6677d2
+
6677d2
+        # change virtual size to smaller than size
6677d2
+        kwargs["virtual_size"] = Size("500 GiB")
6677d2
+        kwargs["device"] = device
6677d2
+        device = self._factory_device(device_type, **kwargs)
6677d2
+        self._validate_factory_device(device, device_type, **kwargs)
6677d2
+
6677d2
+        # change deduplication and compression
6677d2
+        kwargs["deduplication"] = False
6677d2
+        kwargs["device"] = device
6677d2
+        device = self._factory_device(device_type, **kwargs)
6677d2
+        self._validate_factory_device(device, device_type, **kwargs)
6677d2
+
6677d2
+        kwargs["compression"] = False
6677d2
+        kwargs["device"] = device
6677d2
+        device = self._factory_device(device_type, **kwargs)
6677d2
+        self._validate_factory_device(device, device_type, **kwargs)
6677d2
+
6677d2
+        # rename the pool
6677d2
+        kwargs["pool_name"] = "vdopool2"
6677d2
+        kwargs["device"] = device
6677d2
+        device = self._factory_device(device_type, **kwargs)
6677d2
+        self._validate_factory_device(device, device_type, **kwargs)
6677d2
+
6677d2
+
6677d2
 class MDFactoryTestCase(DeviceFactoryTestCase):
6677d2
     device_type = devicefactory.DEVICE_TYPE_MD
6677d2
     device_class = MDRaidArrayDevice
6677d2
6677d2
From 22ba2b96111d5f153a3b55d3c56d84e597cf9a90 Mon Sep 17 00:00:00 2001
6677d2
From: Vojtech Trefny <vtrefny@redhat.com>
6677d2
Date: Mon, 2 Nov 2020 14:33:06 +0100
6677d2
Subject: [PATCH 09/17] Add VM test for LVM VDO
6677d2
6677d2
---
6677d2
 tests/vmtests/blivet_reset_vmtest.py | 15 +++++++++++++++
6677d2
 tests/vmtests/runvmtests.py          |  3 ++-
6677d2
 2 files changed, 17 insertions(+), 1 deletion(-)
6677d2
6677d2
diff --git a/tests/vmtests/blivet_reset_vmtest.py b/tests/vmtests/blivet_reset_vmtest.py
6677d2
index 8743d51e..47fc84c4 100644
6677d2
--- a/tests/vmtests/blivet_reset_vmtest.py
6677d2
+++ b/tests/vmtests/blivet_reset_vmtest.py
6677d2
@@ -192,6 +192,21 @@ def setUp(self):
6677d2
         self.collect_expected_data()
6677d2
 
6677d2
 
6677d2
+class LVMVDOTestCase(BlivetResetTestCase):
6677d2
+
6677d2
+    def _set_up_storage(self):
6677d2
+        if not devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM_VDO):
6677d2
+            self.skipTest("VDO not supported, skipping")
6677d2
+
6677d2
+        self.blivet.factory_device(devicefactory.DEVICE_TYPE_LVM_VDO,
6677d2
+                                   size=Size("10 GiB"),
6677d2
+                                   fstype="ext4",
6677d2
+                                   disks=self.blivet.disks[:],
6677d2
+                                   name="vdolv",
6677d2
+                                   pool_name="vdopool",
6677d2
+                                   virtual_size=Size("40 GiB"))
6677d2
+
6677d2
+
6677d2
 @unittest.skip("temporarily disabled due to issues with raids with metadata version 0.90")
6677d2
 class MDRaid0TestCase(BlivetResetTestCase):
6677d2
 
6677d2
diff --git a/tests/vmtests/runvmtests.py b/tests/vmtests/runvmtests.py
6677d2
index 88143d3a..6f20484f 100644
6677d2
--- a/tests/vmtests/runvmtests.py
6677d2
+++ b/tests/vmtests/runvmtests.py
6677d2
@@ -12,7 +12,8 @@
6677d2
          "tests.vmtests.blivet_reset_vmtest.LVMThinSnapShotTestCase",
6677d2
          "tests.vmtests.blivet_reset_vmtest.LVMRaidTestCase",
6677d2
          "tests.vmtests.blivet_reset_vmtest.MDRaid0TestCase",
6677d2
-         "tests.vmtests.blivet_reset_vmtest.LVMOnMDTestCase"]
6677d2
+         "tests.vmtests.blivet_reset_vmtest.LVMOnMDTestCase",
6677d2
+         "tests.vmtests.blivet_reset_vmtest.LVMVDOTestCase"]
6677d2
 
6677d2
 SNAP_NAME = "snapshot"
6677d2
 
6677d2
6677d2
From 52b37bb86e856f1ede71f7cceb7284a639d741f4 Mon Sep 17 00:00:00 2001
6677d2
From: Vojtech Trefny <vtrefny@redhat.com>
6677d2
Date: Thu, 19 Nov 2020 13:07:17 +0100
6677d2
Subject: [PATCH 10/17] Allow adding nodiscard option when running mkfs
6677d2
6677d2
For filesystems that support it we might want to add some nodiscard
6677d2
option to mkfs when creating format on devices like LVM VDO
6677d2
volumes where discard is very slow and doesn't really makes sense
6677d2
when running mkfs.
6677d2
---
6677d2
 blivet/formats/fs.py               | 12 +++++-
6677d2
 blivet/tasks/fsmkfs.py             | 59 +++++++++++++++++++++++++++---
6677d2
 tests/formats_test/methods_test.py |  3 +-
6677d2
 3 files changed, 66 insertions(+), 8 deletions(-)
6677d2
6677d2
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
6677d2
index 4ba83e6d..e61e5b86 100644
6677d2
--- a/blivet/formats/fs.py
6677d2
+++ b/blivet/formats/fs.py
6677d2
@@ -132,6 +132,7 @@ def __init__(self, **kwargs):
6677d2
         self.mountopts = kwargs.get("mountopts", "")
6677d2
         self.label = kwargs.get("label")
6677d2
         self.fsprofile = kwargs.get("fsprofile")
6677d2
+        self._mkfs_nodiscard = kwargs.get("nodiscard", False)
6677d2
 
6677d2
         self._user_mountopts = self.mountopts
6677d2
 
6677d2
@@ -263,6 +264,14 @@ def label_format_ok(self, label):
6677d2
     label = property(lambda s: s._get_label(), lambda s, l: s._set_label(l),
6677d2
                      doc="this filesystem's label")
6677d2
 
6677d2
+    def can_nodiscard(self):
6677d2
+        """Returns True if this filesystem supports nodiscard option during
6677d2
+           creation, otherwise False.
6677d2
+
6677d2
+           :rtype: bool
6677d2
+        """
6677d2
+        return self._mkfs.can_nodiscard and self._mkfs.available
6677d2
+
6677d2
     def can_set_uuid(self):
6677d2
         """Returns True if this filesystem supports setting an UUID during
6677d2
            creation, otherwise False.
6677d2
@@ -402,7 +411,8 @@ def _create(self, **kwargs):
6677d2
         try:
6677d2
             self._mkfs.do_task(options=kwargs.get("options"),
6677d2
                                label=not self.relabels(),
6677d2
-                               set_uuid=self.can_set_uuid())
6677d2
+                               set_uuid=self.can_set_uuid(),
6677d2
+                               nodiscard=self.can_nodiscard())
6677d2
         except FSWriteLabelError as e:
6677d2
             log.warning("Choosing not to apply label (%s) during creation of filesystem %s. Label format is unacceptable for this filesystem.", self.label, self.type)
6677d2
         except FSWriteUUIDError as e:
6677d2
diff --git a/blivet/tasks/fsmkfs.py b/blivet/tasks/fsmkfs.py
6677d2
index ad166aa0..c982f7e7 100644
6677d2
--- a/blivet/tasks/fsmkfs.py
6677d2
+++ b/blivet/tasks/fsmkfs.py
6677d2
@@ -37,6 +37,7 @@ class FSMkfsTask(fstask.FSTask):
6677d2
 
6677d2
     can_label = abc.abstractproperty(doc="whether this task labels")
6677d2
     can_set_uuid = abc.abstractproperty(doc="whether this task can set UUID")
6677d2
+    can_nodiscard = abc.abstractproperty(doc="whether this task can set nodiscard option")
6677d2
 
6677d2
 
6677d2
 @add_metaclass(abc.ABCMeta)
6677d2
@@ -48,6 +49,9 @@ class FSMkfs(task.BasicApplication, FSMkfsTask):
6677d2
     label_option = abc.abstractproperty(
6677d2
         doc="Option for setting a filesystem label.")
6677d2
 
6677d2
+    nodiscard_option = abc.abstractproperty(
6677d2
+        doc="Option for setting nodiscrad option for mkfs.")
6677d2
+
6677d2
     args = abc.abstractproperty(doc="options for creating filesystem")
6677d2
 
6677d2
     @abc.abstractmethod
6677d2
@@ -80,6 +84,15 @@ def can_set_uuid(self):
6677d2
         """
6677d2
         return self.get_uuid_args is not None
6677d2
 
6677d2
+    @property
6677d2
+    def can_nodiscard(self):
6677d2
+        """Whether this task can set nodiscard option for a filesystem.
6677d2
+
6677d2
+           :returns: True if nodiscard can be set
6677d2
+           :rtype: bool
6677d2
+        """
6677d2
+        return self.nodiscard_option is not None
6677d2
+
6677d2
     @property
6677d2
     def _label_options(self):
6677d2
         """ Any labeling options that a particular filesystem may use.
6677d2
@@ -100,6 +113,23 @@ def _label_options(self):
6677d2
         else:
6677d2
             raise FSWriteLabelError("Choosing not to apply label (%s) during creation of filesystem %s. Label format is unacceptable for this filesystem." % (self.fs.label, self.fs.type))
6677d2
 
6677d2
+    @property
6677d2
+    def _nodiscard_option(self):
6677d2
+        """ Any nodiscard options that a particular filesystem may use.
6677d2
+
6677d2
+            :returns: nodiscard options
6677d2
+            :rtype: list of str
6677d2
+        """
6677d2
+        # Do not know how to set nodiscard while formatting.
6677d2
+        if self.nodiscard_option is None:
6677d2
+            return []
6677d2
+
6677d2
+        # nodiscard option not requested
6677d2
+        if not self.fs._mkfs_nodiscard:
6677d2
+            return []
6677d2
+
6677d2
+        return self.nodiscard_option
6677d2
+
6677d2
     @property
6677d2
     def _uuid_options(self):
6677d2
         """Any UUID options that a particular filesystem may use.
6677d2
@@ -119,7 +149,7 @@ def _uuid_options(self):
6677d2
                                    " is unacceptable for this filesystem."
6677d2
                                    % (self.fs.uuid, self.fs.type))
6677d2
 
6677d2
-    def _format_options(self, options=None, label=False, set_uuid=False):
6677d2
+    def _format_options(self, options=None, label=False, set_uuid=False, nodiscard=False):
6677d2
         """Get a list of format options to be used when creating the
6677d2
            filesystem.
6677d2
 
6677d2
@@ -135,11 +165,12 @@ def _format_options(self, options=None, label=False, set_uuid=False):
6677d2
 
6677d2
         label_options = self._label_options if label else []
6677d2
         uuid_options = self._uuid_options if set_uuid else []
6677d2
+        nodiscard_option = self._nodiscard_option if nodiscard else []
6677d2
         create_options = shlex.split(self.fs.create_options or "")
6677d2
         return (options + self.args + label_options + uuid_options +
6677d2
-                create_options + [self.fs.device])
6677d2
+                nodiscard_option + create_options + [self.fs.device])
6677d2
 
6677d2
-    def _mkfs_command(self, options, label, set_uuid):
6677d2
+    def _mkfs_command(self, options, label, set_uuid, nodiscard):
6677d2
         """Return the command to make the filesystem.
6677d2
 
6677d2
            :param options: any special options
6677d2
@@ -148,12 +179,14 @@ def _mkfs_command(self, options, label, set_uuid):
6677d2
            :type label: bool
6677d2
            :param set_uuid: whether to set an UUID
6677d2
            :type set_uuid: bool
6677d2
+           :param nodiscard: whether to run mkfs with nodiscard option
6677d2
+           :type nodiscard: bool
6677d2
            :returns: the mkfs command
6677d2
            :rtype: list of str
6677d2
         """
6677d2
-        return [str(self.ext)] + self._format_options(options, label, set_uuid)
6677d2
+        return [str(self.ext)] + self._format_options(options, label, set_uuid, nodiscard)
6677d2
 
6677d2
-    def do_task(self, options=None, label=False, set_uuid=False):
6677d2
+    def do_task(self, options=None, label=False, set_uuid=False, nodiscard=False):
6677d2
         """Create the format on the device and label if possible and desired.
6677d2
 
6677d2
            :param options: any special options, may be None
6677d2
@@ -168,7 +201,7 @@ def do_task(self, options=None, label=False, set_uuid=False):
6677d2
             raise FSError("\n".join(error_msgs))
6677d2
 
6677d2
         options = options or []
6677d2
-        cmd = self._mkfs_command(options, label, set_uuid)
6677d2
+        cmd = self._mkfs_command(options, label, set_uuid, nodiscard)
6677d2
         try:
6677d2
             ret = util.run_program(cmd)
6677d2
         except OSError as e:
6677d2
@@ -181,6 +214,7 @@ def do_task(self, options=None, label=False, set_uuid=False):
6677d2
 class BTRFSMkfs(FSMkfs):
6677d2
     ext = availability.MKFS_BTRFS_APP
6677d2
     label_option = None
6677d2
+    nodiscard_option = ["--nodiscard"]
6677d2
 
6677d2
     def get_uuid_args(self, uuid):
6677d2
         return ["-U", uuid]
6677d2
@@ -193,6 +227,7 @@ def args(self):
6677d2
 class Ext2FSMkfs(FSMkfs):
6677d2
     ext = availability.MKE2FS_APP
6677d2
     label_option = "-L"
6677d2
+    nodiscard_option = ["-E", "nodiscard"]
6677d2
 
6677d2
     _opts = []
6677d2
 
6677d2
@@ -215,6 +250,7 @@ class Ext4FSMkfs(Ext3FSMkfs):
6677d2
 class FATFSMkfs(FSMkfs):
6677d2
     ext = availability.MKDOSFS_APP
6677d2
     label_option = "-n"
6677d2
+    nodiscard_option = None
6677d2
 
6677d2
     def get_uuid_args(self, uuid):
6677d2
         return ["-i", uuid.replace('-', '')]
6677d2
@@ -227,6 +263,7 @@ def args(self):
6677d2
 class GFS2Mkfs(FSMkfs):
6677d2
     ext = availability.MKFS_GFS2_APP
6677d2
     label_option = None
6677d2
+    nodiscard_option = None
6677d2
     get_uuid_args = None
6677d2
 
6677d2
     @property
6677d2
@@ -237,6 +274,7 @@ def args(self):
6677d2
 class HFSMkfs(FSMkfs):
6677d2
     ext = availability.HFORMAT_APP
6677d2
     label_option = "-l"
6677d2
+    nodiscard_option = None
6677d2
     get_uuid_args = None
6677d2
 
6677d2
     @property
6677d2
@@ -247,6 +285,7 @@ def args(self):
6677d2
 class HFSPlusMkfs(FSMkfs):
6677d2
     ext = availability.MKFS_HFSPLUS_APP
6677d2
     label_option = "-v"
6677d2
+    nodiscard_option = None
6677d2
     get_uuid_args = None
6677d2
 
6677d2
     @property
6677d2
@@ -257,6 +296,7 @@ def args(self):
6677d2
 class JFSMkfs(FSMkfs):
6677d2
     ext = availability.MKFS_JFS_APP
6677d2
     label_option = "-L"
6677d2
+    nodiscard_option = None
6677d2
     get_uuid_args = None
6677d2
 
6677d2
     @property
6677d2
@@ -267,6 +307,7 @@ def args(self):
6677d2
 class NTFSMkfs(FSMkfs):
6677d2
     ext = availability.MKNTFS_APP
6677d2
     label_option = "-L"
6677d2
+    nodiscard_option = None
6677d2
     get_uuid_args = None
6677d2
 
6677d2
     @property
6677d2
@@ -277,6 +318,7 @@ def args(self):
6677d2
 class ReiserFSMkfs(FSMkfs):
6677d2
     ext = availability.MKREISERFS_APP
6677d2
     label_option = "-l"
6677d2
+    nodiscard_option = None
6677d2
 
6677d2
     def get_uuid_args(self, uuid):
6677d2
         return ["-u", uuid]
6677d2
@@ -289,6 +331,7 @@ def args(self):
6677d2
 class XFSMkfs(FSMkfs):
6677d2
     ext = availability.MKFS_XFS_APP
6677d2
     label_option = "-L"
6677d2
+    nodiscard_option = ["-K"]
6677d2
 
6677d2
     def get_uuid_args(self, uuid):
6677d2
         return ["-m", "uuid=" + uuid]
6677d2
@@ -307,3 +350,7 @@ def can_label(self):
6677d2
     @property
6677d2
     def can_set_uuid(self):
6677d2
         return False
6677d2
+
6677d2
+    @property
6677d2
+    def can_nodiscard(self):
6677d2
+        return False
6677d2
diff --git a/tests/formats_test/methods_test.py b/tests/formats_test/methods_test.py
6677d2
index 710fa1c5..b2674ea7 100644
6677d2
--- a/tests/formats_test/methods_test.py
6677d2
+++ b/tests/formats_test/methods_test.py
6677d2
@@ -307,7 +307,8 @@ def _test_create_backend(self):
6677d2
             self.format._mkfs.do_task.assert_called_with(
6677d2
                 options=None,
6677d2
                 label=not self.format.relabels(),
6677d2
-                set_uuid=self.format.can_set_uuid()
6677d2
+                set_uuid=self.format.can_set_uuid(),
6677d2
+                nodiscard=self.format.can_nodiscard()
6677d2
             )
6677d2
 
6677d2
     def _test_setup_backend(self):
6677d2
6677d2
From ac04f74fa9bc8ded3facd302ca74ec033009a0bd Mon Sep 17 00:00:00 2001
6677d2
From: Vojtech Trefny <vtrefny@redhat.com>
6677d2
Date: Thu, 19 Nov 2020 13:19:21 +0100
6677d2
Subject: [PATCH 11/17] Add nodiscard option by default when creating VDO
6677d2
 logical volumes
6677d2
6677d2
User can override this by passing "nodiscard=False" to the LV
6677d2
constructor, but we want nodiscard by default.
6677d2
---
6677d2
 blivet/blivet.py            | 8 +++++++-
6677d2
 blivet/devicefactory.py     | 6 ++++++
6677d2
 tests/devicefactory_test.py | 7 +++++++
6677d2
 3 files changed, 20 insertions(+), 1 deletion(-)
6677d2
6677d2
diff --git a/blivet/blivet.py b/blivet/blivet.py
6677d2
index 754eb152..e4115691 100644
6677d2
--- a/blivet/blivet.py
6677d2
+++ b/blivet/blivet.py
6677d2
@@ -613,9 +613,15 @@ def new_lv(self, *args, **kwargs):
6677d2
 
6677d2
         mountpoint = kwargs.pop("mountpoint", None)
6677d2
         if 'fmt_type' in kwargs:
6677d2
+            fmt_args = kwargs.pop("fmt_args", {})
6677d2
+            if vdo_lv and "nodiscard" not in fmt_args.keys():
6677d2
+                # we don't want to run discard on VDO LV during mkfs so if user don't
6677d2
+                # tell us not to do it, we should add the nodiscard option to mkfs
6677d2
+                fmt_args["nodiscard"] = True
6677d2
+
6677d2
             kwargs["fmt"] = get_format(kwargs.pop("fmt_type"),
6677d2
                                        mountpoint=mountpoint,
6677d2
-                                       **kwargs.pop("fmt_args", {}))
6677d2
+                                       **fmt_args)
6677d2
 
6677d2
         name = kwargs.pop("name", None)
6677d2
         if name:
6677d2
diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py
6677d2
index c95037cc..085f2fd6 100644
6677d2
--- a/blivet/devicefactory.py
6677d2
+++ b/blivet/devicefactory.py
6677d2
@@ -1811,6 +1811,12 @@ def _reconfigure_device(self):
6677d2
         self.device.pool.compression = self.compression
6677d2
         self.device.pool.deduplication = self.deduplication
6677d2
 
6677d2
+    def _set_format(self):
6677d2
+        super(LVMVDOFactory, self)._set_format()
6677d2
+
6677d2
+        # preserve nodiscard mkfs option after changing filesystem
6677d2
+        self.device.format._mkfs_nodiscard = True
6677d2
+
6677d2
     #
6677d2
     # methods to configure the factory's device
6677d2
     #
6677d2
diff --git a/tests/devicefactory_test.py b/tests/devicefactory_test.py
6677d2
index 7cdb51c5..4de1e05b 100644
6677d2
--- a/tests/devicefactory_test.py
6677d2
+++ b/tests/devicefactory_test.py
6677d2
@@ -571,6 +571,10 @@ def _validate_factory_device(self, *args, **kwargs):
6677d2
         if pool_name:
6677d2
             self.assertEqual(vdolv.pool.lvname, pool_name)
6677d2
 
6677d2
+        # nodiscard should be always set for VDO LV format
6677d2
+        if vdolv.format.type:
6677d2
+            self.assertTrue(vdolv.format._mkfs_nodiscard)
6677d2
+
6677d2
         return device
6677d2
 
6677d2
     @patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True)
6677d2
@@ -633,6 +637,9 @@ def test_device_factory(self, *args):  # pylint: disable=unused-argument,argumen
6677d2
         device = self._factory_device(device_type, **kwargs)
6677d2
         self._validate_factory_device(device, device_type, **kwargs)
6677d2
 
6677d2
+        # change fstype
6677d2
+        kwargs["fstype"] = "xfs"
6677d2
+
6677d2
 
6677d2
 class MDFactoryTestCase(DeviceFactoryTestCase):
6677d2
     device_type = devicefactory.DEVICE_TYPE_MD
6677d2
6677d2
From 43f25ce84729c321d1ff2bbba2f50489f6d736b4 Mon Sep 17 00:00:00 2001
6677d2
From: Vojtech Trefny <vtrefny@redhat.com>
6677d2
Date: Thu, 19 Nov 2020 13:31:40 +0100
6677d2
Subject: [PATCH 12/17] Add LVM VDO example
6677d2
6677d2
---
6677d2
 examples/lvm_vdo.py | 61 +++++++++++++++++++++++++++++++++++++++++++++
6677d2
 1 file changed, 61 insertions(+)
6677d2
 create mode 100644 examples/lvm_vdo.py
6677d2
6677d2
diff --git a/examples/lvm_vdo.py b/examples/lvm_vdo.py
6677d2
new file mode 100644
6677d2
index 00000000..ad081642
6677d2
--- /dev/null
6677d2
+++ b/examples/lvm_vdo.py
6677d2
@@ -0,0 +1,61 @@
6677d2
+import os
6677d2
+
6677d2
+import blivet
6677d2
+from blivet.size import Size
6677d2
+from blivet.util import set_up_logging, create_sparse_tempfile
6677d2
+
6677d2
+set_up_logging()
6677d2
+b = blivet.Blivet()   # create an instance of Blivet (don't add system devices)
6677d2
+
6677d2
+# create a disk image file on which to create new devices
6677d2
+disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
6677d2
+b.disk_images["disk1"] = disk1_file
6677d2
+disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
6677d2
+b.disk_images["disk2"] = disk2_file
6677d2
+
6677d2
+b.reset()
6677d2
+
6677d2
+try:
6677d2
+    disk1 = b.devicetree.get_device_by_name("disk1")
6677d2
+    disk2 = b.devicetree.get_device_by_name("disk2")
6677d2
+
6677d2
+    b.initialize_disk(disk1)
6677d2
+    b.initialize_disk(disk2)
6677d2
+
6677d2
+    pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
6677d2
+    b.create_device(pv)
6677d2
+    pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
6677d2
+    b.create_device(pv2)
6677d2
+
6677d2
+    # allocate the partitions (decide where and on which disks they'll reside)
6677d2
+    blivet.partitioning.do_partitioning(b)
6677d2
+
6677d2
+    vg = b.new_vg(parents=[pv, pv2])
6677d2
+    b.create_device(vg)
6677d2
+
6677d2
+    # create 80 GiB VDO pool
6677d2
+    # there can be only one VDO LV on the pool and these are created together
6677d2
+    # with one LVM call, we have 2 separate devices because there are two block
6677d2
+    # devices in the end and it allows to control the different "physical" size of
6677d2
+    # the pool and "logical" size of the VDO LV (which is usually bigger, accounting
6677d2
+    # for the saved space with deduplication and/or compression)
6677d2
+    pool = b.new_lv(size=Size("80GiB"), parents=[vg], name="vdopool", vdo_pool=True,
6677d2
+                    deduplication=True, compression=True)
6677d2
+    b.create_device(pool)
6677d2
+
6677d2
+    # create the VDO LV with 400 GiB "virtual size" and ext4 filesystem on the VDO
6677d2
+    # pool
6677d2
+    lv = b.new_lv(size=Size("400GiB"), parents=[pool], name="vdolv", vdo_lv=True,
6677d2
+                  fmt_type="ext4")
6677d2
+    b.create_device(lv)
6677d2
+
6677d2
+    print(b.devicetree)
6677d2
+
6677d2
+    # write the new partitions to disk and format them as specified
6677d2
+    b.do_it()
6677d2
+    print(b.devicetree)
6677d2
+    input("Check the state and hit ENTER to trigger cleanup")
6677d2
+finally:
6677d2
+    b.devicetree.teardown_disk_images()
6677d2
+    os.unlink(disk1_file)
6677d2
+    os.unlink(disk2_file)
6677d2
6677d2
From c487a1e6023b54f5beea8d99ba2f5da5d80590ee Mon Sep 17 00:00:00 2001
6677d2
From: Vojtech Trefny <vtrefny@redhat.com>
6677d2
Date: Wed, 25 Nov 2020 13:30:15 +0100
6677d2
Subject: [PATCH 13/17] Add LVM VDO documentation
6677d2
6677d2
---
6677d2
 doc/lvmvdo.rst | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++
6677d2
 1 file changed, 86 insertions(+)
6677d2
 create mode 100644 doc/lvmvdo.rst
6677d2
6677d2
diff --git a/doc/lvmvdo.rst b/doc/lvmvdo.rst
6677d2
new file mode 100644
6677d2
index 00000000..3965abd3
6677d2
--- /dev/null
6677d2
+++ b/doc/lvmvdo.rst
6677d2
@@ -0,0 +1,86 @@
6677d2
+LVM VDO support
6677d2
+===============
6677d2
+
6677d2
+Support for creating LVM VDO devices has been added in Blivet 3.4.
6677d2
+
6677d2
+These devices are similar to LVM thinly provisioned volumes, but there are some special steps
6677d2
+and limitations when creating these devices which this document describes.
6677d2
+
6677d2
+LVM VDO in Blivet
6677d2
+-----------------
6677d2
+
6677d2
+LVM VDO devices are represented by two ``LVMLogicalVolumeDevice`` devices:
6677d2
+
6677d2
+- VDO Pool logical volume with type 'lvmvdopool'
6677d2
+- VDO logical volume with type 'lvmvdolv' which is the child of the VDO Pool device
6677d2
+
6677d2
+Existing LVM VDO setup in Blivet:
6677d2
+
6677d2
+    existing 20 GiB disk vdb (265) with existing msdos disklabel
6677d2
+      existing 20 GiB partition vdb1 (275) with existing lvmpv
6677d2
+        existing 20 GiB lvmvg data (284)
6677d2
+          existing 10 GiB lvmvdopool data-vdopool (288)
6677d2
+            existing 50 GiB lvmvdolv data-vdolv (295)
6677d2
+
6677d2
+When creating LVM VDO setup using Blivet these two devices must be created together as these
6677d2
+are created by a single LVM command.
6677d2
+
6677d2
+It currently isn't possible to create additional VDO logical volumes in the pool. It is however
6677d2
+possible to create multiple VDO pools in a single volume group.
6677d2
+
6677d2
+Deduplication and compression are properties of the VDO pool. Size specified for the VDO pool
6677d2
+volume will be used as the "physical" size for the pool and size specified for the VDO logical volume
6677d2
+will be used as the "virtual" size for the VDO volume.
6677d2
+
6677d2
+When creating format, it must be created on the VDO logical volume. For filesystems with discard
6677d2
+support, no discard option will be automatically added when calling the ``mkfs`` command
6677d2
+(e.g. ``-K`` for ``mkfs.xfs``).
6677d2
+
6677d2
+Example for creating a *80 GiB* VDO pool with *400 GiB* VDO logical volume with an *ext4* format with
6677d2
+both deduplication and compression enabled:
6677d2
+
6677d2
+    pool = b.new_lv(size=Size("80GiB"), parents=[vg], name="vdopool", vdo_pool=True,
6677d2
+                    deduplication=True, compression=True)
6677d2
+    b.create_device(pool)
6677d2
+
6677d2
+    lv = b.new_lv(size=Size("400GiB"), parents=[pool], name="vdolv", vdo_lv=True,
6677d2
+                  fmt_type="ext4")
6677d2
+    b.create_device(lv)
6677d2
+
6677d2
+When removing existing LVM VDO devices, both devices must be removed from the devicetree and the VDO
6677d2
+logical volume must be removed first (``recursive_remove`` can be used to automate these two steps).
6677d2
+
6677d2
+Managing of existing LVM VDO devices is currently not supported.
6677d2
+
6677d2
+
6677d2
+LVM VDO in Devicefactory
6677d2
+------------------------
6677d2
+
6677d2
+For the top-down specified creation using device factories a new ``LVMVDOFactory`` factory has been
6677d2
+added. Factory device in this case is the VDO logical volume and is again automatically created
6677d2
+together with the VDO pool.
6677d2
+
6677d2
+Example of creating a new LVM VDO setup using the ``devicefactory`` module:
6677d2
+
6677d2
+    factory = blivet.devicefactory.LVMVDOFactory(b, size=Size("5 GiB"), virtual_size=Size("50 GiB"),
6677d2
+                                                 disks=disks, fstype="xfs",
6677d2
+                                                 container_name="data",
6677d2
+                                                 pool_name="myvdopool",
6677d2
+                                                 compression=True, deduplication=True)
6677d2
+    factory.configure()
6677d2
+    factory.device
6677d2
+
6677d2
+        LVMLogicalVolumeDevice instance (0x7f14d17422b0) --
6677d2
+            name = data-00  status = False  id = 528
6677d2
+            children = []
6677d2
+            parents = ['non-existent 5 GiB lvmvdopool data-myvdopool (519)']
6677d2
+            ...
6677d2
+
6677d2
+``size`` in this case sets the pool (physical) size, the VDO logical volume size can be specified
6677d2
+with ``virtual_size`` (if not specified it will be same as the pool size). Name for the VDO volume
6677d2
+can be specified using the ``name`` keyword argument. ``pool_name`` argument is optional and
6677d2
+a unique name will be generated if omitted. Both ``compression`` and ``deduplication`` default to
6677d2
+``True`` (enabled) if not specified.
6677d2
+
6677d2
+This factory can create only a single VDO logical volume in a single VDO pool but additional VDO pools
6677d2
+can be added by repeating the steps to create the first one.
6677d2
6677d2
From c6c776cf137b5c6ae454487df469e9a6dba8a5d1 Mon Sep 17 00:00:00 2001
6677d2
From: Vojtech Trefny <vtrefny@redhat.com>
6677d2
Date: Wed, 9 Dec 2020 14:06:27 +0100
6677d2
Subject: [PATCH 14/17] Set minimum size for LVM VDO pool devices
6677d2
6677d2
---
6677d2
 blivet/devicefactory.py        |  3 +++
6677d2
 blivet/devices/lvm.py          | 26 ++++++++++++++++++++++++++
6677d2
 tests/devicefactory_test.py    | 29 ++++++++++++++++++++---------
6677d2
 tests/devices_test/lvm_test.py |  6 ++++++
6677d2
 4 files changed, 55 insertions(+), 9 deletions(-)
6677d2
6677d2
diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py
6677d2
index 085f2fd6..5e47eb9a 100644
6677d2
--- a/blivet/devicefactory.py
6677d2
+++ b/blivet/devicefactory.py
6677d2
@@ -277,6 +277,7 @@ class DeviceFactory(object):
6677d2
                          "container_size": SIZE_POLICY_AUTO,
6677d2
                          "container_raid_level": None,
6677d2
                          "container_encrypted": None}
6677d2
+    _device_min_size = Size(0)  # no limit by default, limited only by filesystem size
6677d2
 
6677d2
     def __init__(self, storage, **kwargs):
6677d2
         """
6677d2
@@ -1760,6 +1761,8 @@ class LVMVDOFactory(LVMFactory):
6677d2
         :type deduplication: bool
6677d2
     """
6677d2
 
6677d2
+    _device_min_size = LVMVDOPoolMixin._min_size
6677d2
+
6677d2
     def __init__(self, storage, **kwargs):
6677d2
         self.pool_name = kwargs.pop("pool_name", None)
6677d2
         self.virtual_size = kwargs.pop("virtual_size", None)
6677d2
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
6677d2
index 0802e2de..785fa2d2 100644
6677d2
--- a/blivet/devices/lvm.py
6677d2
+++ b/blivet/devices/lvm.py
6677d2
@@ -1792,6 +1792,7 @@ def populate_ksdata(self, data):
6677d2
 class LVMVDOPoolMixin(object):
6677d2
 
6677d2
     _external_dependencies = [availability.BLOCKDEV_LVM_PLUGIN, availability.BLOCKDEV_LVM_PLUGIN_VDO]
6677d2
+    _min_size = Size("5 GiB")  # 2.5 GiB for index and one 2 GiB slab rounded up to 5 GiB
6677d2
 
6677d2
     def __init__(self, compression=True, deduplication=True, index_memory=0, write_policy=None):
6677d2
         self.compression = compression
6677d2
@@ -1800,6 +1801,9 @@ def __init__(self, compression=True, deduplication=True, index_memory=0, write_p
6677d2
         self.write_policy = write_policy
6677d2
         self._lvs = []
6677d2
 
6677d2
+        if not self.exists and self.size < self.min_size:
6677d2
+            raise ValueError("Requested size %s is smaller than minimum %s" % (self.size, self.min_size))
6677d2
+
6677d2
     @property
6677d2
     def is_vdo_pool(self):
6677d2
         return self.seg_type == "vdo-pool"
6677d2
@@ -1856,6 +1860,23 @@ def direct(self):
6677d2
         """ Is this device directly accessible? """
6677d2
         return False
6677d2
 
6677d2
+    @property
6677d2
+    @util.requires_property("is_vdo_pool")
6677d2
+    def min_size(self):
6677d2
+        if self.exists:
6677d2
+            return self.current_size
6677d2
+
6677d2
+        return self._min_size
6677d2
+
6677d2
+    def _set_size(self, newsize):
6677d2
+        if not isinstance(newsize, Size):
6677d2
+            raise AttributeError("new size must of type Size")
6677d2
+
6677d2
+        if newsize < self.min_size:
6677d2
+            raise ValueError("Requested size %s is smaller than minimum %s" % (newsize, self.min_size))
6677d2
+
6677d2
+        DMDevice._set_size(self, newsize)
6677d2
+
6677d2
     def read_current_size(self):
6677d2
         log_method_call(self, exists=self.exists, path=self.path,
6677d2
                         sysfs_path=self.sysfs_path)
6677d2
@@ -2229,6 +2250,11 @@ def max_size(self):
6677d2
         max_format = self.format.max_size
6677d2
         return min(max_lv, max_format) if max_format else max_lv
6677d2
 
6677d2
+    @property
6677d2
+    @type_specific
6677d2
+    def min_size(self):
6677d2
+        return super(LVMLogicalVolumeDevice, self).min_size
6677d2
+
6677d2
     @property
6677d2
     @type_specific
6677d2
     def vg_space_used(self):
6677d2
diff --git a/tests/devicefactory_test.py b/tests/devicefactory_test.py
6677d2
index 4de1e05b..a1334cda 100644
6677d2
--- a/tests/devicefactory_test.py
6677d2
+++ b/tests/devicefactory_test.py
6677d2
@@ -49,13 +49,18 @@ class DeviceFactoryTestCase(unittest.TestCase):
6677d2
     encryption_supported = True
6677d2
     """ whether encryption of this device type is supported by blivet """
6677d2
 
6677d2
+    factory_class = None
6677d2
+    """ devicefactory class used in this test case """
6677d2
+
6677d2
+    _disk_size = Size("2 GiB")
6677d2
+
6677d2
     def setUp(self):
6677d2
         if self.device_type is None:
6677d2
             raise unittest.SkipTest("abstract base class")
6677d2
 
6677d2
         self.b = blivet.Blivet()  # don't populate it
6677d2
-        self.disk_files = [create_sparse_tempfile("factorytest", Size("2 GiB")),
6677d2
-                           create_sparse_tempfile("factorytest", Size("2 GiB"))]
6677d2
+        self.disk_files = [create_sparse_tempfile("factorytest", self._disk_size),
6677d2
+                           create_sparse_tempfile("factorytest", self._disk_size)]
6677d2
         for filename in self.disk_files:
6677d2
             disk = DiskFile(filename)
6677d2
             self.b.devicetree._add_device(disk)
6677d2
@@ -197,7 +202,7 @@ def _get_size_delta(self, devices=None):
6677d2
     def test_get_free_disk_space(self, *args):  # pylint: disable=unused-argument
6677d2
         # get_free_disk_space should return the total free space on disks
6677d2
         kwargs = self._get_test_factory_args()
6677d2
-        kwargs["size"] = Size("500 MiB")
6677d2
+        kwargs["size"] = max(Size("500 MiB"), self.factory_class._device_min_size)
6677d2
         factory = devicefactory.get_device_factory(self.b,
6677d2
                                                    self.device_type,
6677d2
                                                    disks=self.b.disks,
6677d2
@@ -285,7 +290,7 @@ def test_factory_defaults(self, *args):  # pylint: disable=unused-argument
6677d2
         kwargs = self._get_test_factory_args()
6677d2
         kwargs.update({"disks": self.b.disks[:],
6677d2
                        "fstype": "swap",
6677d2
-                       "size": Size("2GiB"),
6677d2
+                       "size": max(Size("2GiB"), self.factory_class._device_min_size),
6677d2
                        "label": "SWAP"})
6677d2
         device = self._factory_device(self.device_type, **kwargs)
6677d2
         factory = devicefactory.get_device_factory(self.b, self.device_type,
6677d2
@@ -302,6 +307,7 @@ def test_factory_defaults(self, *args):  # pylint: disable=unused-argument
6677d2
 class PartitionFactoryTestCase(DeviceFactoryTestCase):
6677d2
     device_class = PartitionDevice
6677d2
     device_type = devicefactory.DEVICE_TYPE_PARTITION
6677d2
+    factory_class = devicefactory.PartitionFactory
6677d2
 
6677d2
     def test_bug1178884(self):
6677d2
         # Test a change of format and size where old size is too large for the
6677d2
@@ -330,6 +336,7 @@ def _get_size_delta(self, devices=None):
6677d2
 class LVMFactoryTestCase(DeviceFactoryTestCase):
6677d2
     device_class = LVMLogicalVolumeDevice
6677d2
     device_type = devicefactory.DEVICE_TYPE_LVM
6677d2
+    factory_class = devicefactory.LVMFactory
6677d2
 
6677d2
     def _validate_factory_device(self, *args, **kwargs):
6677d2
         super(LVMFactoryTestCase, self)._validate_factory_device(*args, **kwargs)
6677d2
@@ -510,6 +517,7 @@ class LVMThinPFactoryTestCase(LVMFactoryTestCase):
6677d2
     device_class = LVMLogicalVolumeDevice
6677d2
     device_type = devicefactory.DEVICE_TYPE_LVM_THINP
6677d2
     encryption_supported = False
6677d2
+    factory_class = devicefactory.LVMThinPFactory
6677d2
 
6677d2
     def _validate_factory_device(self, *args, **kwargs):
6677d2
         super(LVMThinPFactoryTestCase, self)._validate_factory_device(*args,
6677d2
@@ -541,6 +549,8 @@ class LVMVDOFactoryTestCase(LVMFactoryTestCase):
6677d2
     device_class = LVMLogicalVolumeDevice
6677d2
     device_type = devicefactory.DEVICE_TYPE_LVM_VDO
6677d2
     encryption_supported = False
6677d2
+    _disk_size = Size("10 GiB")  # we need bigger disks for VDO
6677d2
+    factory_class = devicefactory.LVMVDOFactory
6677d2
 
6677d2
     def _validate_factory_device(self, *args, **kwargs):
6677d2
         super(LVMVDOFactoryTestCase, self)._validate_factory_device(*args,
6677d2
@@ -585,7 +595,7 @@ def _validate_factory_device(self, *args, **kwargs):
6677d2
     def test_device_factory(self, *args):  # pylint: disable=unused-argument,arguments-differ
6677d2
         device_type = self.device_type
6677d2
         kwargs = {"disks": self.b.disks,
6677d2
-                  "size": Size("400 MiB"),
6677d2
+                  "size": Size("6 GiB"),
6677d2
                   "fstype": 'ext4',
6677d2
                   "mountpoint": '/factorytest'}
6677d2
         device = self._factory_device(device_type, **kwargs)
6677d2
@@ -593,7 +603,7 @@ def test_device_factory(self, *args):  # pylint: disable=unused-argument,argumen
6677d2
         self.b.recursive_remove(device.pool)
6677d2
 
6677d2
         kwargs = {"disks": self.b.disks,
6677d2
-                  "size": Size("400 MiB"),
6677d2
+                  "size": Size("6 GiB"),
6677d2
                   "fstype": 'ext4',
6677d2
                   "mountpoint": '/factorytest',
6677d2
                   "pool_name": "vdopool",
6677d2
@@ -603,19 +613,19 @@ def test_device_factory(self, *args):  # pylint: disable=unused-argument,argumen
6677d2
         self._validate_factory_device(device, device_type, **kwargs)
6677d2
 
6677d2
         # change size without specifying virtual_size: both sizes should grow
6677d2
-        kwargs["size"] = Size("600 MiB")
6677d2
+        kwargs["size"] = Size("8 GiB")
6677d2
         kwargs["device"] = device
6677d2
         device = self._factory_device(device_type, **kwargs)
6677d2
         self._validate_factory_device(device, device_type, **kwargs)
6677d2
 
6677d2
         # change virtual size
6677d2
-        kwargs["virtual_size"] = Size("6 GiB")
6677d2
+        kwargs["virtual_size"] = Size("40 GiB")
6677d2
         kwargs["device"] = device
6677d2
         device = self._factory_device(device_type, **kwargs)
6677d2
         self._validate_factory_device(device, device_type, **kwargs)
6677d2
 
6677d2
         # change virtual size to smaller than size
6677d2
-        kwargs["virtual_size"] = Size("500 GiB")
6677d2
+        kwargs["virtual_size"] = Size("10 GiB")
6677d2
         kwargs["device"] = device
6677d2
         device = self._factory_device(device_type, **kwargs)
6677d2
         self._validate_factory_device(device, device_type, **kwargs)
6677d2
@@ -644,6 +654,7 @@ def test_device_factory(self, *args):  # pylint: disable=unused-argument,argumen
6677d2
 class MDFactoryTestCase(DeviceFactoryTestCase):
6677d2
     device_type = devicefactory.DEVICE_TYPE_MD
6677d2
     device_class = MDRaidArrayDevice
6677d2
+    factory_class = devicefactory.MDFactory
6677d2
 
6677d2
     def test_device_factory(self):
6677d2
         # RAID0 across two disks
6677d2
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
6677d2
index 493d3ba1..78b140ba 100644
6677d2
--- a/tests/devices_test/lvm_test.py
6677d2
+++ b/tests/devices_test/lvm_test.py
6677d2
@@ -705,6 +705,12 @@ def test_new_vdo_pool(self):
6677d2
 
6677d2
         self.assertEqual(vg.size, Size("10236 MiB"))
6677d2
 
6677d2
+        with self.assertRaises(ValueError):
6677d2
+            vdopool = b.new_lv(name="vdopool", vdo_pool=True,
6677d2
+                               parents=[vg], compression=True,
6677d2
+                               deduplication=True,
6677d2
+                               size=blivet.size.Size("1 GiB"))
6677d2
+
6677d2
         vdopool = b.new_lv(name="vdopool", vdo_pool=True,
6677d2
                            parents=[vg], compression=True,
6677d2
                            deduplication=True,
6677d2
6677d2
From 197f2877709e702c101ada6b9a055a88f09320c8 Mon Sep 17 00:00:00 2001
6677d2
From: Vojtech Trefny <vtrefny@redhat.com>
6677d2
Date: Fri, 11 Dec 2020 14:20:48 +0100
6677d2
Subject: [PATCH 15/17] Use better description for libblockdev plugins in
6677d2
 tasks.availability
6677d2
6677d2
The old names were quite confusing when showing that "lvm" is
6677d2
missing when in fact libblockdev LVM plugin is missing. Also with
6677d2
LVM VDO we need to be able to tell the difference between missing
6677d2
LVM plugin and missing LVM VDO support.
6677d2
---
6677d2
 blivet/tasks/availability.py | 26 +++++++++++++-------------
6677d2
 1 file changed, 13 insertions(+), 13 deletions(-)
6677d2
6677d2
diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py
6677d2
index b107428e..52418685 100644
6677d2
--- a/blivet/tasks/availability.py
6677d2
+++ b/blivet/tasks/availability.py
6677d2
@@ -236,13 +236,13 @@ def availability_errors(self, resource):
6677d2
             :returns: [] if the name of the plugin is loaded
6677d2
             :rtype: list of str
6677d2
         """
6677d2
-        if resource.name not in blockdev.get_available_plugin_names():  # pylint: disable=no-value-for-parameter
6677d2
-            return ["libblockdev plugin %s not loaded" % resource.name]
6677d2
+        if self._tech_info.plugin_name not in blockdev.get_available_plugin_names():  # pylint: disable=no-value-for-parameter
6677d2
+            return ["libblockdev plugin %s not loaded" % self._tech_info.plugin_name]
6677d2
         else:
6677d2
             tech_missing = self._check_technologies()
6677d2
             if tech_missing:
6677d2
                 return ["libblockdev plugin %s is loaded but some required "
6677d2
-                        "technologies are not available:\n%s" % (resource.name, tech_missing)]
6677d2
+                        "technologies are not available:\n%s" % (self._tech_info.plugin_name, tech_missing)]
6677d2
             else:
6677d2
                 return []
6677d2
 
6677d2
@@ -411,16 +411,16 @@ def available_resource(name):
6677d2
 # we can't just check if the plugin is loaded, we also need to make sure
6677d2
 # that all technologies required by us our supported (some may be missing
6677d2
 # due to missing dependencies)
6677d2
-BLOCKDEV_BTRFS_PLUGIN = blockdev_plugin("btrfs", BLOCKDEV_BTRFS_TECH)
6677d2
-BLOCKDEV_CRYPTO_PLUGIN = blockdev_plugin("crypto", BLOCKDEV_CRYPTO_TECH)
6677d2
-BLOCKDEV_DM_PLUGIN = blockdev_plugin("dm", BLOCKDEV_DM_TECH)
6677d2
-BLOCKDEV_DM_PLUGIN_RAID = blockdev_plugin("dm", BLOCKDEV_DM_TECH_RAID)
6677d2
-BLOCKDEV_LOOP_PLUGIN = blockdev_plugin("loop", BLOCKDEV_LOOP_TECH)
6677d2
-BLOCKDEV_LVM_PLUGIN = blockdev_plugin("lvm", BLOCKDEV_LVM_TECH)
6677d2
-BLOCKDEV_LVM_PLUGIN_VDO = blockdev_plugin("lvm", BLOCKDEV_LVM_TECH_VDO)
6677d2
-BLOCKDEV_MDRAID_PLUGIN = blockdev_plugin("mdraid", BLOCKDEV_MD_TECH)
6677d2
-BLOCKDEV_MPATH_PLUGIN = blockdev_plugin("mpath", BLOCKDEV_MPATH_TECH)
6677d2
-BLOCKDEV_SWAP_PLUGIN = blockdev_plugin("swap", BLOCKDEV_SWAP_TECH)
6677d2
+BLOCKDEV_BTRFS_PLUGIN = blockdev_plugin("libblockdev btrfs plugin", BLOCKDEV_BTRFS_TECH)
6677d2
+BLOCKDEV_CRYPTO_PLUGIN = blockdev_plugin("libblockdev crypto plugin", BLOCKDEV_CRYPTO_TECH)
6677d2
+BLOCKDEV_DM_PLUGIN = blockdev_plugin("libblockdev dm plugin", BLOCKDEV_DM_TECH)
6677d2
+BLOCKDEV_DM_PLUGIN_RAID = blockdev_plugin("libblockdev dm plugin (raid technology)", BLOCKDEV_DM_TECH_RAID)
6677d2
+BLOCKDEV_LOOP_PLUGIN = blockdev_plugin("libblockdev loop plugin", BLOCKDEV_LOOP_TECH)
6677d2
+BLOCKDEV_LVM_PLUGIN = blockdev_plugin("libblockdev lvm plugin", BLOCKDEV_LVM_TECH)
6677d2
+BLOCKDEV_LVM_PLUGIN_VDO = blockdev_plugin("libblockdev lvm plugin (vdo technology)", BLOCKDEV_LVM_TECH_VDO)
6677d2
+BLOCKDEV_MDRAID_PLUGIN = blockdev_plugin("libblockdev mdraid plugin", BLOCKDEV_MD_TECH)
6677d2
+BLOCKDEV_MPATH_PLUGIN = blockdev_plugin("libblockdev mpath plugin", BLOCKDEV_MPATH_TECH)
6677d2
+BLOCKDEV_SWAP_PLUGIN = blockdev_plugin("libblockdev swap plugin", BLOCKDEV_SWAP_TECH)
6677d2
 
6677d2
 # applications with versions
6677d2
 # we need e2fsprogs newer than 1.41 and we are checking the version by running
6677d2
6677d2
From 5fc047b48b0de18fa249f102d2a7163ac2d6e6a6 Mon Sep 17 00:00:00 2001
6677d2
From: Vojtech Trefny <vtrefny@redhat.com>
6677d2
Date: Fri, 11 Dec 2020 14:24:18 +0100
6677d2
Subject: [PATCH 16/17] Fix external dependencies for LVM VDO devices
6677d2
6677d2
The external and unavailable dependencies code is mostly supposed
6677d2
to work with just class objects and not instances, which is problem
6677d2
for LVM devices where the LVMLogicalVolumeDevice can't depend on
6677d2
LVM VDO and special LVM VDO device mixin classes don't inherit
6677d2
from the Device class so they are missing some availability
6677d2
functions.
6677d2
This fix adds the neccessary functions to LVM VDO mixin classes to
6677d2
make sure both "unavailable_type_dependencies" and
6677d2
"type_external_dependencies" work with LVMVDOLogicalVolumeMixin
6677d2
and LVMVDOPoolMixin. When working with an LVMLogicalVolumeDevice
6677d2
instance its dependencies are correctly set based on type of the
6677d2
logical volume.
6677d2
---
6677d2
 blivet/devicefactory.py        |   7 +--
6677d2
 blivet/devices/lvm.py          |  31 ++++++++++
6677d2
 tests/action_test.py           |   7 +++
6677d2
 tests/devicefactory_test.py    |  32 ++++++++++
6677d2
 tests/devices_test/lvm_test.py | 106 +++++++++++++++++++++++++++++++++
6677d2
 5 files changed, 179 insertions(+), 4 deletions(-)
6677d2
6677d2
diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py
6677d2
index 5e47eb9a..b29a107a 100644
6677d2
--- a/blivet/devicefactory.py
6677d2
+++ b/blivet/devicefactory.py
6677d2
@@ -27,7 +27,7 @@
6677d2
 from .devices import BTRFSDevice, DiskDevice
6677d2
 from .devices import LUKSDevice, LVMLogicalVolumeDevice
6677d2
 from .devices import PartitionDevice, MDRaidArrayDevice
6677d2
-from .devices.lvm import LVMVDOPoolMixin, DEFAULT_THPOOL_RESERVE
6677d2
+from .devices.lvm import LVMVDOPoolMixin, LVMVDOLogicalVolumeMixin, DEFAULT_THPOOL_RESERVE
6677d2
 from .formats import get_format
6677d2
 from .devicelibs import btrfs
6677d2
 from .devicelibs import mdraid
6677d2
@@ -70,9 +70,6 @@ def is_supported_device_type(device_type):
6677d2
         :returns: True if this device type is supported
6677d2
         :rtype: bool
6677d2
     """
6677d2
-    if device_type == DEVICE_TYPE_LVM_VDO:
6677d2
-        return not any(e for e in LVMVDOPoolMixin._external_dependencies if not e.available)
6677d2
-
6677d2
     devices = []
6677d2
     if device_type == DEVICE_TYPE_BTRFS:
6677d2
         devices = [BTRFSDevice]
6677d2
@@ -84,6 +81,8 @@ def is_supported_device_type(device_type):
6677d2
         devices = [PartitionDevice]
6677d2
     elif device_type == DEVICE_TYPE_MD:
6677d2
         devices = [MDRaidArrayDevice]
6677d2
+    elif device_type == DEVICE_TYPE_LVM_VDO:
6677d2
+        devices = [LVMLogicalVolumeDevice, LVMVDOPoolMixin, LVMVDOLogicalVolumeMixin]
6677d2
 
6677d2
     return not any(c.unavailable_type_dependencies() for c in devices)
6677d2
 
6677d2
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
6677d2
index 785fa2d2..ac900bf3 100644
6677d2
--- a/blivet/devices/lvm.py
6677d2
+++ b/blivet/devices/lvm.py
6677d2
@@ -1804,6 +1804,17 @@ def __init__(self, compression=True, deduplication=True, index_memory=0, write_p
6677d2
         if not self.exists and self.size < self.min_size:
6677d2
             raise ValueError("Requested size %s is smaller than minimum %s" % (self.size, self.min_size))
6677d2
 
6677d2
+    # these two methods are defined in Device but LVMVDOPoolMixin doesn't inherit from
6677d2
+    # it and we can't have this code in LVMLogicalVolumeDevice because we need to be able
6677d2
+    # to get dependencies without creating instance of the class
6677d2
+    @classmethod
6677d2
+    def type_external_dependencies(cls):
6677d2
+        return set(d for d in cls._external_dependencies) | LVMLogicalVolumeDevice.type_external_dependencies()
6677d2
+
6677d2
+    @classmethod
6677d2
+    def unavailable_type_dependencies(cls):
6677d2
+        return set(e for e in cls.type_external_dependencies() if not e.available)
6677d2
+
6677d2
     @property
6677d2
     def is_vdo_pool(self):
6677d2
         return self.seg_type == "vdo-pool"
6677d2
@@ -1926,6 +1937,17 @@ def _check_parents(self):
6677d2
         if not container or not isinstance(container, LVMLogicalVolumeDevice) or not container.is_vdo_pool:
6677d2
             raise ValueError("constructor requires a vdo-pool LV")
6677d2
 
6677d2
+    # these two methods are defined in Device but LVMVDOLogicalVolumeMixin doesn't inherit
6677d2
+    # from it and we can't have this code in LVMLogicalVolumeDevice because we need to be
6677d2
+    # able to get dependencies without creating instance of the class
6677d2
+    @classmethod
6677d2
+    def type_external_dependencies(cls):
6677d2
+        return set(d for d in cls._external_dependencies) | LVMLogicalVolumeDevice.type_external_dependencies()
6677d2
+
6677d2
+    @classmethod
6677d2
+    def unavailable_type_dependencies(cls):
6677d2
+        return set(e for e in cls.type_external_dependencies() if not e.available)
6677d2
+
6677d2
     @property
6677d2
     def vg_space_used(self):
6677d2
         return Size(0)    # the pool's size is already accounted for in the vg
6677d2
@@ -2217,6 +2239,15 @@ def _convert_from_lvs(self):
6677d2
         """Convert the LVs to create this LV from into its internal LVs"""
6677d2
         raise ValueError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type)
6677d2
 
6677d2
+    @property
6677d2
+    def external_dependencies(self):
6677d2
+        deps = super(LVMLogicalVolumeBase, self).external_dependencies
6677d2
+        if self.is_vdo_pool:
6677d2
+            deps.update(LVMVDOPoolMixin.type_external_dependencies())
6677d2
+        if self.is_vdo_lv:
6677d2
+            deps.update(LVMVDOLogicalVolumeMixin.type_external_dependencies())
6677d2
+        return deps
6677d2
+
6677d2
     @property
6677d2
     @type_specific
6677d2
     def vg(self):
6677d2
diff --git a/tests/action_test.py b/tests/action_test.py
6677d2
index 77176f46..38a2e872 100644
6677d2
--- a/tests/action_test.py
6677d2
+++ b/tests/action_test.py
6677d2
@@ -18,6 +18,8 @@
6677d2
 from blivet.devices import MDRaidArrayDevice
6677d2
 from blivet.devices import LVMVolumeGroupDevice
6677d2
 from blivet.devices import LVMLogicalVolumeDevice
6677d2
+from blivet.devices.lvm import LVMVDOPoolMixin
6677d2
+from blivet.devices.lvm import LVMVDOLogicalVolumeMixin
6677d2
 
6677d2
 # format classes
6677d2
 from blivet.formats.fs import Ext2FS
6677d2
@@ -1252,6 +1254,11 @@ def test_lv_from_lvs_actions(self):
6677d2
         self.assertEqual(set(self.storage.lvs), {pool})
6677d2
         self.assertEqual(set(pool._internal_lvs), {lv1, lv2})
6677d2
 
6677d2
+
6677d2
+@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES + [LVMVDOPoolMixin, LVMVDOLogicalVolumeMixin]), "some unsupported device classes required for this test")
6677d2
+@unittest.skipUnless(all(x().utils_available for x in FORMAT_CLASSES), "some unsupported format classes required for this test")
6677d2
+class DeviceActionLVMVDOTestCase(DeviceActionTestCase):
6677d2
+
6677d2
     def test_lvm_vdo_destroy(self):
6677d2
         self.destroy_all_devices()
6677d2
         sdc = self.storage.devicetree.get_device_by_name("sdc")
6677d2
diff --git a/tests/devicefactory_test.py b/tests/devicefactory_test.py
6677d2
index a1334cda..e4210ead 100644
6677d2
--- a/tests/devicefactory_test.py
6677d2
+++ b/tests/devicefactory_test.py
6677d2
@@ -592,6 +592,8 @@ def _validate_factory_device(self, *args, **kwargs):
6677d2
     @patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[])
6677d2
     @patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set())
6677d2
     @patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set())
6677d2
+    @patch("blivet.devices.lvm.LVMVDOPoolMixin.type_external_dependencies", return_value=set())
6677d2
+    @patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin.type_external_dependencies", return_value=set())
6677d2
     def test_device_factory(self, *args):  # pylint: disable=unused-argument,arguments-differ
6677d2
         device_type = self.device_type
6677d2
         kwargs = {"disks": self.b.disks,
6677d2
@@ -650,6 +652,36 @@ def test_device_factory(self, *args):  # pylint: disable=unused-argument,argumen
6677d2
         # change fstype
6677d2
         kwargs["fstype"] = "xfs"
6677d2
 
6677d2
+    @patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True)
6677d2
+    @patch("blivet.formats.lvmpv.LVMPhysicalVolume.destroyable", return_value=True)
6677d2
+    @patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[])
6677d2
+    @patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set())
6677d2
+    @patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set())
6677d2
+    @patch("blivet.devices.lvm.LVMVDOPoolMixin.type_external_dependencies", return_value=set())
6677d2
+    @patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin.type_external_dependencies", return_value=set())
6677d2
+    def test_factory_defaults(self, *args):  # pylint: disable=unused-argument
6677d2
+        super(LVMVDOFactoryTestCase, self).test_factory_defaults()
6677d2
+
6677d2
+    @patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True)
6677d2
+    @patch("blivet.formats.lvmpv.LVMPhysicalVolume.destroyable", return_value=True)
6677d2
+    @patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[])
6677d2
+    @patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set())
6677d2
+    @patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set())
6677d2
+    @patch("blivet.devices.lvm.LVMVDOPoolMixin.type_external_dependencies", return_value=set())
6677d2
+    @patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin.type_external_dependencies", return_value=set())
6677d2
+    def test_get_free_disk_space(self, *args):
6677d2
+        super(LVMVDOFactoryTestCase, self).test_get_free_disk_space()
6677d2
+
6677d2
+    @patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True)
6677d2
+    @patch("blivet.formats.lvmpv.LVMPhysicalVolume.destroyable", return_value=True)
6677d2
+    @patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[])
6677d2
+    @patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set())
6677d2
+    @patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set())
6677d2
+    @patch("blivet.devices.lvm.LVMVDOPoolMixin.type_external_dependencies", return_value=set())
6677d2
+    @patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin.type_external_dependencies", return_value=set())
6677d2
+    def test_normalize_size(self, *args):  # pylint: disable=unused-argument
6677d2
+        super(LVMVDOFactoryTestCase, self).test_normalize_size()
6677d2
+
6677d2
 
6677d2
 class MDFactoryTestCase(DeviceFactoryTestCase):
6677d2
     device_type = devicefactory.DEVICE_TYPE_MD
6677d2
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
6677d2
index 78b140ba..d938144d 100644
6677d2
--- a/tests/devices_test/lvm_test.py
6677d2
+++ b/tests/devices_test/lvm_test.py
6677d2
@@ -10,10 +10,13 @@
6677d2
 from blivet.devices import StorageDevice
6677d2
 from blivet.devices import LVMLogicalVolumeDevice
6677d2
 from blivet.devices import LVMVolumeGroupDevice
6677d2
+from blivet.devices.lvm import LVMVDOPoolMixin
6677d2
+from blivet.devices.lvm import LVMVDOLogicalVolumeMixin
6677d2
 from blivet.devices.lvm import LVMCacheRequest
6677d2
 from blivet.devices.lvm import LVPVSpec, LVMInternalLVtype
6677d2
 from blivet.size import Size
6677d2
 from blivet.devicelibs import raid
6677d2
+from blivet import devicefactory
6677d2
 from blivet import errors
6677d2
 
6677d2
 DEVICE_CLASSES = [
6677d2
@@ -690,6 +693,10 @@ def test_new_lv_from_non_existing_lvs(self):
6677d2
                 pool.create()
6677d2
                 self.assertTrue(lvm.thpool_convert.called)
6677d2
 
6677d2
+
6677d2
+@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES + [LVMVDOPoolMixin, LVMVDOLogicalVolumeMixin]), "some unsupported device classes required for this test")
6677d2
+class BlivetNewLVMVDODeviceTest(unittest.TestCase):
6677d2
+
6677d2
     def test_new_vdo_pool(self):
6677d2
         b = blivet.Blivet()
6677d2
         pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
6677d2
@@ -726,3 +733,102 @@ def test_new_vdo_pool(self):
6677d2
         self.assertEqual(vdopool.children[0], vdolv)
6677d2
         self.assertEqual(vdolv.parents[0], vdopool)
6677d2
         self.assertListEqual(vg.lvs, [vdopool, vdolv])
6677d2
+
6677d2
+
6677d2
+@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test")
6677d2
+class BlivetLVMVDODependenciesTest(unittest.TestCase):
6677d2
+    def test_vdo_dependencies(self):
6677d2
+        blivet.tasks.availability.CACHE_AVAILABILITY = False
6677d2
+
6677d2
+        b = blivet.Blivet()
6677d2
+        pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
6677d2
+                           size=Size("10 GiB"), exists=True)
6677d2
+        vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
6677d2
+
6677d2
+        for dev in (pv, vg):
6677d2
+            b.devicetree._add_device(dev)
6677d2
+
6677d2
+        # check that all the above devices are in the expected places
6677d2
+        self.assertEqual(set(b.devices), {pv, vg})
6677d2
+        self.assertEqual(set(b.vgs), {vg})
6677d2
+
6677d2
+        self.assertEqual(vg.size, Size("10236 MiB"))
6677d2
+
6677d2
+        vdopool = b.new_lv(name="vdopool", vdo_pool=True,
6677d2
+                           parents=[vg], compression=True,
6677d2
+                           deduplication=True,
6677d2
+                           size=blivet.size.Size("8 GiB"))
6677d2
+
6677d2
+        vdolv = b.new_lv(name="vdolv", vdo_lv=True,
6677d2
+                         parents=[vdopool],
6677d2
+                         size=blivet.size.Size("40 GiB"))
6677d2
+
6677d2
+        # Dependencies check: for VDO types these should be combination of "normal"
6677d2
+        # LVM dependencies (LVM libblockdev plugin + kpartx and DM plugin from DMDevice)
6677d2
+        # and LVM VDO technology from the LVM plugin
6677d2
+        lvm_vdo_dependencies = ["kpartx",
6677d2
+                                "libblockdev dm plugin",
6677d2
+                                "libblockdev lvm plugin",
6677d2
+                                "libblockdev lvm plugin (vdo technology)"]
6677d2
+        pool_deps = [d.name for d in vdopool.external_dependencies]
6677d2
+        six.assertCountEqual(self, pool_deps, lvm_vdo_dependencies)
6677d2
+
6677d2
+        vdolv_deps = [d.name for d in vdolv.external_dependencies]
6677d2
+        six.assertCountEqual(self, vdolv_deps, lvm_vdo_dependencies)
6677d2
+
6677d2
+        # same dependencies should be returned when checking with class not instance
6677d2
+        pool_type_deps = [d.name for d in LVMVDOPoolMixin.type_external_dependencies()]
6677d2
+        six.assertCountEqual(self, pool_type_deps, lvm_vdo_dependencies)
6677d2
+
6677d2
+        vdolv_type_deps = [d.name for d in LVMVDOLogicalVolumeMixin.type_external_dependencies()]
6677d2
+        six.assertCountEqual(self, vdolv_type_deps, lvm_vdo_dependencies)
6677d2
+
6677d2
+        # just to be sure LVM VDO specific code didn't break "normal" LVs
6677d2
+        normallv = b.new_lv(name="lvol0",
6677d2
+                            parents=[vg],
6677d2
+                            size=blivet.size.Size("1 GiB"))
6677d2
+
6677d2
+        normalvl_deps = [d.name for d in normallv.external_dependencies]
6677d2
+        six.assertCountEqual(self, normalvl_deps, ["kpartx",
6677d2
+                                                   "libblockdev dm plugin",
6677d2
+                                                   "libblockdev lvm plugin"])
6677d2
+
6677d2
+        with patch("blivet.devices.lvm.LVMVDOPoolMixin._external_dependencies",
6677d2
+                   new=[blivet.tasks.availability.unavailable_resource("VDO unavailability test")]):
6677d2
+            with patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin._external_dependencies",
6677d2
+                       new=[blivet.tasks.availability.unavailable_resource("VDO unavailability test")]):
6677d2
+
6677d2
+                pool_deps = [d.name for d in vdopool.unavailable_dependencies]
6677d2
+                self.assertEqual(pool_deps, ["VDO unavailability test"])
6677d2
+
6677d2
+                vdolv_deps = [d.name for d in vdolv.unavailable_dependencies]
6677d2
+                self.assertEqual(vdolv_deps, ["VDO unavailability test"])
6677d2
+
6677d2
+                # same dependencies should be returned when checking with class not instance
6677d2
+                pool_type_deps = [d.name for d in LVMVDOPoolMixin.unavailable_type_dependencies()]
6677d2
+                six.assertCountEqual(self, pool_type_deps, ["VDO unavailability test"])
6677d2
+
6677d2
+                vdolv_type_deps = [d.name for d in LVMVDOLogicalVolumeMixin.unavailable_type_dependencies()]
6677d2
+                six.assertCountEqual(self, vdolv_type_deps, ["VDO unavailability test"])
6677d2
+
6677d2
+                normallv_deps = [d.name for d in normallv.unavailable_dependencies]
6677d2
+                self.assertEqual(normallv_deps, [])
6677d2
+
6677d2
+                with self.assertRaises(errors.DependencyError):
6677d2
+                    b.create_device(vdopool)
6677d2
+                    b.create_device(vdolv)
6677d2
+
6677d2
+                b.create_device(normallv)
6677d2
+
6677d2
+    def test_vdo_dependencies_devicefactory(self):
6677d2
+        with patch("blivet.devices.lvm.LVMVDOPoolMixin._external_dependencies",
6677d2
+                   new=[blivet.tasks.availability.unavailable_resource("VDO unavailability test")]):
6677d2
+            with patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin._external_dependencies",
6677d2
+                       new=[blivet.tasks.availability.unavailable_resource("VDO unavailability test")]):
6677d2
+
6677d2
+                # shouldn't affect "normal" LVM
6677d2
+                lvm_supported = devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM)
6677d2
+                self.assertTrue(lvm_supported)
6677d2
+
6677d2
+                vdo_supported = devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM_VDO)
6677d2
+                self.assertFalse(vdo_supported)
6677d2
6677d2
From c7fb125ec552ee5070f8180f92fe5545709192ff Mon Sep 17 00:00:00 2001
6677d2
From: Vojtech Trefny <vtrefny@redhat.com>
6677d2
Date: Fri, 11 Dec 2020 15:02:05 +0100
6677d2
Subject: [PATCH 17/17] Bump required libblockdev version to 2.24
6677d2
6677d2
LVM VDO support was added in 2.24.
6677d2
---
6677d2
 python-blivet.spec | 2 +-
6677d2
 1 file changed, 1 insertion(+), 1 deletion(-)
6677d2
6677d2
diff --git a/python-blivet.spec b/python-blivet.spec
6677d2
index ffd4210e..58cad0b2 100644
6677d2
--- a/python-blivet.spec
6677d2
+++ b/python-blivet.spec
6677d2
@@ -36,7 +36,7 @@ Source1: http://github.com/storaged-project/blivet/archive/%{realname}-%{realver
6677d2
 %global partedver 1.8.1
6677d2
 %global pypartedver 3.10.4
6677d2
 %global utillinuxver 2.15.1
6677d2
-%global libblockdevver 2.19
6677d2
+%global libblockdevver 2.24
6677d2
 %global libbytesizever 0.3
6677d2
 %global pyudevver 0.18
6677d2