Blob Blame History Raw
From acb99e74a24fa07863c596fe59d2999adc28c249 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 2 Jun 2022 15:18:19 +0200
Subject: [PATCH] LVM RAID raid0 level support (#272)

* Add workaround for missing LVM raid0 support in blivet

Blivet supports creating LVs with segment type "raid0" but it is
not in the list of supported RAID levels. This will be fixed in
blivet, see https://github.com/storaged-project/blivet/pull/1047

* Add a test for LVM RAID raid0 level

* README: Remove "striped" from the list of supported RAID for pools

We use MD RAID for RAIDs on the pool level which doesn't support
"striped" level.

* README: Clarify supported volume RAID levels

We support different levels for LVM RAID and MD RAID.

(cherry picked from commit 8b868a348155b08479743945aba88271121ad4b0)
---
 README.md                                    |  7 ++-
 library/blivet.py                            |  7 +++
 tests/tests_create_raid_pool_then_remove.yml | 54 ++++++++++++++++++++
 3 files changed, 66 insertions(+), 2 deletions(-)

diff --git a/README.md b/README.md
index f8e3daa..bd123d7 100644
--- a/README.md
+++ b/README.md
@@ -54,7 +54,7 @@ device node basename (like `sda` or `mpathb`), /dev/disk/ symlink
 ##### `raid_level`
 When used with `type: lvm` it manages a volume group with a mdraid array of given level
 on it. Input `disks` are in this case used as RAID members.
-Accepted values are: `linear`, `striped`, `raid0`, `raid1`, `raid4`, `raid5`, `raid6`, `raid10`
+Accepted values are: `linear`, `raid0`, `raid1`, `raid4`, `raid5`, `raid6`, `raid10`
 
 ##### `volumes`
 This is a list of volumes that belong to the current pool. It follows the
@@ -136,7 +136,10 @@ Specifies RAID level. LVM RAID can be created as well.
 "Regular" RAID volume requires type to be `raid`.
 LVM RAID needs that volume has `storage_pools` parent with type `lvm`,
 `raid_disks` need to be specified as well.
-Accepted values are: `linear` (N/A for LVM RAID), `striped`, `raid0`, `raid1`, `raid4`, `raid5`, `raid6`, `raid10`
+Accepted values are:
+* for LVM RAID volume: `raid0`, `raid1`, `raid4`, `raid5`, `raid6`, `raid10`, `striped`, `mirror`
+* for RAID volume: `linear`, `raid0`, `raid1`, `raid4`, `raid5`, `raid6`, `raid10`
+
 __WARNING__: Changing `raid_level` for a volume is a destructive operation, meaning
              all data on that volume will be lost as part of the process of
              removing old and adding new RAID. RAID reshaping is currently not
diff --git a/library/blivet.py b/library/blivet.py
index 29552fa..33c93b2 100644
--- a/library/blivet.py
+++ b/library/blivet.py
@@ -118,6 +118,7 @@ LIB_IMP_ERR = ""
 try:
     from blivet3 import Blivet
     from blivet3.callbacks import callbacks
+    from blivet3 import devicelibs
     from blivet3 import devices
     from blivet3.deviceaction import ActionConfigureFormat
     from blivet3.flags import flags as blivet_flags
@@ -132,6 +133,7 @@ except ImportError:
     try:
         from blivet import Blivet
         from blivet.callbacks import callbacks
+        from blivet import devicelibs
         from blivet import devices
         from blivet.deviceaction import ActionConfigureFormat
         from blivet.flags import flags as blivet_flags
@@ -152,6 +154,11 @@ if BLIVET_PACKAGE:
     set_up_logging()
     log = logging.getLogger(BLIVET_PACKAGE + ".ansible")
 
+    # XXX add support for LVM RAID raid0 level
+    devicelibs.lvm.raid_levels.add_raid_level(devicelibs.raid.RAID0)
+    if "raid0" not in devicelibs.lvm.raid_seg_types:
+        devicelibs.lvm.raid_seg_types.append("raid0")
+
 
 MAX_TRIM_PERCENT = 2
 
diff --git a/tests/tests_create_raid_pool_then_remove.yml b/tests/tests_create_raid_pool_then_remove.yml
index d81680d..1fb4e15 100644
--- a/tests/tests_create_raid_pool_then_remove.yml
+++ b/tests/tests_create_raid_pool_then_remove.yml
@@ -150,3 +150,57 @@
                 raid_disks: "{{ [unused_disks[0], unused_disks[1]] }}"
 
     - include_tasks: verify-role-results.yml
+
+    - name: Create a RAID0 lvm raid device
+      include_role:
+        name: linux-system-roles.storage
+      vars:
+        storage_pools:
+          - name: vg1
+            disks: "{{ unused_disks }}"
+            type: lvm
+            state: present
+            volumes:
+              - name: lv1
+                size: "{{ volume1_size }}"
+                mount_point: "{{ mount_location1 }}"
+                raid_disks: "{{ [unused_disks[0], unused_disks[1]] }}"
+                raid_level: raid0
+
+    - include_tasks: verify-role-results.yml
+
+    - name: Repeat the previous invocation to verify idempotence
+      include_role:
+        name: linux-system-roles.storage
+      vars:
+        storage_pools:
+          - name: vg1
+            disks: "{{ unused_disks }}"
+            type: lvm
+            state: present
+            volumes:
+              - name: lv1
+                size: "{{ volume1_size }}"
+                mount_point: "{{ mount_location1 }}"
+                raid_level: raid0
+                raid_disks: "{{ [unused_disks[0], unused_disks[1]] }}"
+
+    - include_tasks: verify-role-results.yml
+
+    - name: Remove the device created above
+      include_role:
+        name: linux-system-roles.storage
+      vars:
+        storage_pools:
+          - name: vg1
+            disks: "{{ unused_disks }}"
+            type: lvm
+            state: absent
+            volumes:
+              - name: lv1
+                size: "{{ volume1_size }}"
+                mount_point: "{{ mount_location1 }}"
+                raid_level: raid0
+                raid_disks: "{{ [unused_disks[0], unused_disks[1]] }}"
+
+    - include_tasks: verify-role-results.yml
-- 
2.35.3