From f01d01f267695aa2fed067d9d9d84e19b1ea1aaa Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Mar 30 2021 18:11:24 +0000 Subject: import python-blivet-3.2.2-9.el8 --- diff --git a/SOURCES/0006-Blivet-RHEL-8.3-localization-update.patch b/SOURCES/0006-Blivet-RHEL-8.3-localization-update.patch new file mode 100644 index 0000000..ad1368b --- /dev/null +++ b/SOURCES/0006-Blivet-RHEL-8.3-localization-update.patch @@ -0,0 +1,438 @@ +From 44d7e9669fe55fd4b2b3a6c96f23e2d0669f8dbb Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Thu, 9 Jul 2020 13:42:31 +0200 +Subject: [PATCH] Blivet RHEL 8.3 localization update + +Resolves: rhbz#1820565 +--- + po/ja.po | 33 ++++++++++----------- + po/ko.po | 83 ++++++++++++++++++++++++----------------------------- + po/zh_CN.po | 28 +++++++++--------- + 3 files changed, 68 insertions(+), 76 deletions(-) + +diff --git a/po/ja.po b/po/ja.po +index 733e63a0..b4c864c2 100644 +--- a/po/ja.po ++++ b/po/ja.po +@@ -29,17 +29,17 @@ msgid "" + msgstr "" + "Project-Id-Version: PACKAGE VERSION\n" + "Report-Msgid-Bugs-To: \n" +-"POT-Creation-Date: 2020-05-21 12:42+0200\n" +-"PO-Revision-Date: 2018-09-21 01:08+0000\n" +-"Last-Translator: Copied by Zanata \n" +-"Language-Team: Japanese (http://www.transifex.com/projects/p/blivet/language/" +-"ja/)\n" ++"POT-Creation-Date: 2020-01-29 14:04+0100\n" ++"PO-Revision-Date: 2020-07-03 07:42+0000\n" ++"Last-Translator: Ludek Janda \n" ++"Language-Team: Japanese \n" + "Language: ja\n" + "MIME-Version: 1.0\n" + "Content-Type: text/plain; charset=UTF-8\n" + "Content-Transfer-Encoding: 8bit\n" + "Plural-Forms: nplurals=1; plural=0;\n" +-"X-Generator: Zanata 4.6.2\n" ++"X-Generator: Weblate 4.1.1\n" + + #: ../blivet/errors.py:210 + msgid "" +@@ -47,6 +47,8 @@ msgid "" + "of the UUID value which should be unique. In that case you can either " + "disconnect one of the devices or reformat it." + msgstr "" ++"これは通常、デバイスイメージを複製したことで、一意であるはずのUUID値が重複することが原因です。その場合は、いずれかのデバイスを切断するか、再フォーマッ" ++"トしてください。" + + #: ../blivet/errors.py:217 + msgid "" +@@ -54,9 +56,8 @@ msgid "" + "kernel is reporting partitions on. It is unclear what the exact problem is. " + "Please file a bug at http://bugzilla.redhat.com" + msgstr "" +-"なんらかの理由により、kernel がパーティションを報告しているディスク上でディス" +-"クラベルを見つけられませんでした。何が問題となっているかは不明です。バグを " +-"http://bugzilla.redhat.com に提出してください。" ++"なんらかの理由により、kernel がパーティションを報告しているディスク上でディスクラベルを見つけられませんでした。何が問題となっているかは不明です。" ++"バグを http://bugzilla.redhat.com に提出してください" + + #: ../blivet/errors.py:224 + msgid "" +@@ -84,7 +85,7 @@ msgstr "FCoE は使用できません" + + #: ../blivet/zfcp.py:62 + msgid "You have not specified a device number or the number is invalid" +-msgstr "デバイス番号を指定していないか番号が無効です。" ++msgstr "デバイス番号を指定していないか番号が無効です" + + #: ../blivet/zfcp.py:64 + msgid "You have not specified a worldwide port name or the name is invalid." +@@ -202,7 +203,7 @@ msgstr "iSCSI ノードが何も探索できませんでした" + + #: ../blivet/iscsi.py:550 + msgid "No new iSCSI nodes discovered" +-msgstr "新しい iSCSI ノードは見つかりませんでした。" ++msgstr "新しい iSCSI ノードは見つかりませんでした" + + #: ../blivet/iscsi.py:553 + msgid "Could not log in to any of the discovered nodes" +@@ -257,7 +258,7 @@ msgstr "要求を超えたサイズを再利用することができません" + + #: ../blivet/partitioning.py:1419 + msgid "DiskChunk requests must be of type PartitionRequest" +-msgstr "DiskChunk 要求には PartitionResquest タイプが必要です。" ++msgstr "DiskChunk 要求には PartitionResquest タイプが必要です" + + #: ../blivet/partitioning.py:1432 + msgid "partitions allocated outside disklabel limits" +@@ -265,7 +266,7 @@ msgstr "ディスクラベルの範囲外に割り当てられたパーティシ + + #: ../blivet/partitioning.py:1517 + msgid "VGChunk requests must be of type LVRequest" +-msgstr "VGChunk 要求には LVResquest タイプが必要です。" ++msgstr "VGChunk 要求には LVResquest タイプが必要です" + + #. by now we have allocated the PVs so if there isn't enough + #. space in the VG we have a real problem +@@ -368,15 +369,15 @@ msgstr "" + msgid "Cannot remove a member from existing %s array" + msgstr "既存の %s 配列からメンバーを削除できません" + +-#: ../blivet/formats/fs.py:934 ++#: ../blivet/formats/fs.py:932 + msgid "EFI System Partition" + msgstr "EFI システムパーティション" + +-#: ../blivet/formats/fs.py:1139 ++#: ../blivet/formats/fs.py:1137 + msgid "Apple Bootstrap" + msgstr "Apple ブートストラップ" + +-#: ../blivet/formats/fs.py:1175 ++#: ../blivet/formats/fs.py:1173 + msgid "Linux HFS+ ESP" + msgstr "Linux HFS+ ESP" + +diff --git a/po/ko.po b/po/ko.po +index 66789af0..747b00c5 100644 +--- a/po/ko.po ++++ b/po/ko.po +@@ -20,17 +20,17 @@ msgid "" + msgstr "" + "Project-Id-Version: PACKAGE VERSION\n" + "Report-Msgid-Bugs-To: \n" +-"POT-Creation-Date: 2020-05-21 12:42+0200\n" +-"PO-Revision-Date: 2018-09-21 01:08+0000\n" +-"Last-Translator: Copied by Zanata \n" +-"Language-Team: Korean (http://www.transifex.com/projects/p/blivet/language/" +-"ko/)\n" ++"POT-Creation-Date: 2020-01-29 14:04+0100\n" ++"PO-Revision-Date: 2020-07-03 07:42+0000\n" ++"Last-Translator: Ludek Janda \n" ++"Language-Team: Korean \n" + "Language: ko\n" + "MIME-Version: 1.0\n" + "Content-Type: text/plain; charset=UTF-8\n" + "Content-Transfer-Encoding: 8bit\n" + "Plural-Forms: nplurals=1; plural=0;\n" +-"X-Generator: Zanata 4.6.2\n" ++"X-Generator: Weblate 4.1.1\n" + + #: ../blivet/errors.py:210 + msgid "" +@@ -38,6 +38,8 @@ msgid "" + "of the UUID value which should be unique. In that case you can either " + "disconnect one of the devices or reformat it." + msgstr "" ++"이는 일반적으로 장치 이미지 복제로 인해 고유한 UUID 값이 복제되기 때문에 발생합니다. 이 경우 장치 중 하나를 분리하거나 다시 " ++"포맷할 수 있습니다." + + #: ../blivet/errors.py:217 + msgid "" +@@ -45,9 +47,8 @@ msgid "" + "kernel is reporting partitions on. It is unclear what the exact problem is. " + "Please file a bug at http://bugzilla.redhat.com" + msgstr "" +-"일부 이유로 커널이 파티션 설정을 보고하는 디스크에 디스크 레이블을 배치할 수 " +-"없습니다. 정확한 문제가 무엇인지 알 수 없습니다. http://bugzilla.redhat.com" +-"에 버그 리포트를 제출해 주십시오." ++"일부 이유로 커널이 파티션 설정을 보고하는 디스크에 디스크 레이블을 배치할 수 없습니다. 정확한 문제가 무엇인지 알 수 없습니다. " ++"http://bugzilla.redhat.com에 버그 리포트를 제출해 주십시오" + + #: ../blivet/errors.py:224 + msgid "" +@@ -78,11 +79,11 @@ msgstr "장치 번호를 지정하지 않았거나, 번호가 맞지 않습니 + + #: ../blivet/zfcp.py:64 + msgid "You have not specified a worldwide port name or the name is invalid." +-msgstr "세계 포트 이름(WWPN)을 지정하지 않았거나, 포트 번호가 맞지 않습니다" ++msgstr "세계 포트 이름(WWPN)을 지정하지 않았거나, 포트 번호가 맞지 않습니다." + + #: ../blivet/zfcp.py:66 + msgid "You have not specified a FCP LUN or the number is invalid." +-msgstr "FCP LUN을 지정하지 않았거나, 번호가 맞지 않습니다" ++msgstr "FCP LUN을 지정하지 않았거나, 번호가 맞지 않습니다." + + #: ../blivet/zfcp.py:91 + #, python-format +@@ -103,7 +104,7 @@ msgstr "WWPN %(wwpn)s을(를) zFCP 장치 %(devnum)s (%(e)s)에 추가할 수 + #: ../blivet/zfcp.py:119 + #, python-format + msgid "WWPN %(wwpn)s not found at zFCP device %(devnum)s." +-msgstr "WWPN %(wwpn)s을(를) zFCP 장치 %(devnum)s에서 찾을 수 없습니다. " ++msgstr "WWPN %(wwpn)s을(를) zFCP 장치 %(devnum)s에서 찾을 수 없습니다." + + #: ../blivet/zfcp.py:134 + #, python-format +@@ -111,8 +112,7 @@ msgid "" + "Could not add LUN %(fcplun)s to WWPN %(wwpn)s on zFCP device %(devnum)s " + "(%(e)s)." + msgstr "" +-"zFCP 장치 %(devnum)s (%(e)s)에서 LUN %(fcplun)s을(를) WWPN %(wwpn)s에 추가할 " +-"수 없습니다. " ++"zFCP 장치 %(devnum)s (%(e)s)에서 LUN %(fcplun)s을(를) WWPN %(wwpn)s에 추가할 수 없습니다." + + #: ../blivet/zfcp.py:140 + #, python-format +@@ -136,18 +136,14 @@ msgstr "" + msgid "" + "Failed LUN %(fcplun)s at WWPN %(wwpn)s on zFCP device %(devnum)s removed " + "again." +-msgstr "" +-"zFCP 장치 %(devnum)s에 있는 WWPN %(wwpn)s에서 실패한 LUN %(fcplun)s이 다시 삭" +-"제되었습니다. " ++msgstr "zFCP 장치 %(devnum)s에 있는 WWPN %(wwpn)s에서 실패한 LUN %(fcplun)s이 다시 삭제되었습니다." + + #: ../blivet/zfcp.py:218 + #, python-format + msgid "" + "Could not correctly delete SCSI device of zFCP %(devnum)s %(wwpn)s " + "%(fcplun)s (%(e)s)." +-msgstr "" +-"zFCP %(devnum)s %(wwpn)s %(fcplun)s (%(e)s)의 SCSI 장치를 올바르게 삭제할 수 " +-"없습니다. " ++msgstr "zFCP %(devnum)s %(wwpn)s %(fcplun)s (%(e)s)의 SCSI 장치를 올바르게 삭제할 수 없습니다." + + #: ../blivet/zfcp.py:227 + #, python-format +@@ -161,41 +157,40 @@ msgstr "" + #: ../blivet/zfcp.py:245 + #, python-format + msgid "Could not remove WWPN %(wwpn)s on zFCP device %(devnum)s (%(e)s)." +-msgstr "" +-"zFCP 장치 %(devnum)s (%(e)s)에서 WWPN %(wwpn)s을(를) 제거할 수 없습니다. " ++msgstr "zFCP 장치 %(devnum)s (%(e)s)에서 WWPN %(wwpn)s을(를) 제거할 수 없습니다." + + #: ../blivet/zfcp.py:271 + #, python-format + msgid "Could not set zFCP device %(devnum)s offline (%(e)s)." +-msgstr "zFCP 장치 %(devnum)s를 오프라인 (%(e)s)으로 설정할 수 없습니다. " ++msgstr "zFCP 장치 %(devnum)s를 오프라인 (%(e)s)으로 설정할 수 없습니다." + + #: ../blivet/iscsi.py:217 + msgid "Unable to change iSCSI initiator name once set" +-msgstr "iSCSI 개시자 이름이 설정되면 이를 변경할 수 없음 " ++msgstr "iSCSI 개시자 이름이 설정되면 이를 변경할 수 없음" + + #: ../blivet/iscsi.py:219 + msgid "Must provide an iSCSI initiator name" +-msgstr "iSCSI 개시자 이름을 지정하십시오 " ++msgstr "iSCSI 개시자 이름을 지정하십시오" + + #: ../blivet/iscsi.py:410 + msgid "iSCSI not available" +-msgstr "iSCSI 사용 불가능 " ++msgstr "iSCSI 사용 불가능" + + #: ../blivet/iscsi.py:412 + msgid "No initiator name set" +-msgstr "이니셰이터 이름이 설정되지 않음 " ++msgstr "이니셰이터 이름이 설정되지 않음" + + #: ../blivet/iscsi.py:530 + msgid "No iSCSI nodes discovered" +-msgstr "iSCSI 노드를 찾을 수 없음 " ++msgstr "iSCSI 노드를 찾을 수 없음" + + #: ../blivet/iscsi.py:550 + msgid "No new iSCSI nodes discovered" +-msgstr "새 iSCSI 노드를 찾을 수 없음 " ++msgstr "새 iSCSI 노드를 찾을 수 없음" + + #: ../blivet/iscsi.py:553 + msgid "Could not log in to any of the discovered nodes" +-msgstr "검색된 노드로 로그인할 수 없음 " ++msgstr "검색된 노드로 로그인할 수 없음" + + #: ../blivet/partitioning.py:454 + msgid "unable to allocate aligned partition" +@@ -265,7 +260,7 @@ msgstr "LVM 요청에 필요한 공간이 충분하지 않습니다" + #: ../blivet/deviceaction.py:194 + #, python-format + msgid "Executing %(action)s" +-msgstr "%(action)s 실행 " ++msgstr "%(action)s 실행" + + #: ../blivet/deviceaction.py:322 + msgid "create device" +@@ -286,7 +281,7 @@ msgstr "포맷 생성" + #: ../blivet/deviceaction.py:613 + #, python-format + msgid "Creating %(type)s on %(device)s" +-msgstr "%(device)s에 %(type)s 생성 " ++msgstr "%(device)s에 %(type)s 생성" + + #: ../blivet/deviceaction.py:640 + #, python-format +@@ -327,11 +322,11 @@ msgstr "컨테이너 멤버 삭제" + + #: ../blivet/deviceaction.py:1058 + msgid "configure format" +-msgstr "포맷 설정 " ++msgstr "포맷 설정" + + #: ../blivet/deviceaction.py:1114 + msgid "configure device" +-msgstr "장치 설정 " ++msgstr "장치 설정" + + #: ../blivet/devices/raid.py:58 + #, python-format +@@ -341,32 +336,28 @@ msgid "" + msgid_plural "" + "RAID level %(raid_level)s requires that device have at least %(min_members)d " + "members." +-msgstr[0] "" +-"RAID 레벨 %(raid_level)s에는 최소 %(min_members)d개의 장치 구성원이 필요합니" +-"다. " ++msgstr[0] "RAID 레벨 %(raid_level)s에는 최소 %(min_members)d개의 장치 구성원이 필요합니다." + + #: ../blivet/devices/raid.py:79 + #, python-format + msgid "" + "RAID level %(raid_level)s is an invalid value. Must be one of (%(levels)s)." +-msgstr "" +-"RAID 레벨 %(raid_level)s이/가 유효한 값이 아닙니다. (%(levels)s) 중 하나여야 " +-"합니다. " ++msgstr "RAID 레벨 %(raid_level)s이/가 유효한 값이 아닙니다. (%(levels)s) 중 하나여야 합니다." + + #: ../blivet/devices/raid.py:104 + #, python-format + msgid "Cannot remove a member from existing %s array" +-msgstr "기존 %s 어레이에서 장치 구성원을 제거할 수 없습니다 " ++msgstr "기존 %s 어레이에서 장치 구성원을 제거할 수 없습니다" + +-#: ../blivet/formats/fs.py:934 ++#: ../blivet/formats/fs.py:932 + msgid "EFI System Partition" +-msgstr "EFI 시스템 파티션 " ++msgstr "EFI 시스템 파티션" + +-#: ../blivet/formats/fs.py:1139 ++#: ../blivet/formats/fs.py:1137 + msgid "Apple Bootstrap" + msgstr "Apple 부트스트랩" + +-#: ../blivet/formats/fs.py:1175 ++#: ../blivet/formats/fs.py:1173 + msgid "Linux HFS+ ESP" + msgstr "Linux HFS+ ESP" + +@@ -384,7 +375,7 @@ msgstr "암호화됨" + + #: ../blivet/formats/luks.py:388 + msgid "DM Integrity" +-msgstr "DM 무결성 " ++msgstr "DM 무결성" + + #: ../blivet/formats/__init__.py:148 + msgid "Unknown" +diff --git a/po/zh_CN.po b/po/zh_CN.po +index 480801de..2be6d492 100644 +--- a/po/zh_CN.po ++++ b/po/zh_CN.po +@@ -20,24 +20,24 @@ msgid "" + msgstr "" + "Project-Id-Version: PACKAGE VERSION\n" + "Report-Msgid-Bugs-To: \n" +-"POT-Creation-Date: 2020-05-21 12:42+0200\n" +-"PO-Revision-Date: 2018-09-13 02:13+0000\n" +-"Last-Translator: Copied by Zanata \n" +-"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/blivet/" +-"language/zh_CN/)\n" ++"POT-Creation-Date: 2020-01-29 14:04+0100\n" ++"PO-Revision-Date: 2020-07-03 07:42+0000\n" ++"Last-Translator: Ludek Janda \n" ++"Language-Team: Chinese (Simplified) \n" + "Language: zh_CN\n" + "MIME-Version: 1.0\n" + "Content-Type: text/plain; charset=UTF-8\n" + "Content-Transfer-Encoding: 8bit\n" + "Plural-Forms: nplurals=1; plural=0;\n" +-"X-Generator: Zanata 4.6.2\n" ++"X-Generator: Weblate 4.1.1\n" + + #: ../blivet/errors.py:210 + msgid "" + "This is usually caused by cloning the device image resulting in duplication " + "of the UUID value which should be unique. In that case you can either " + "disconnect one of the devices or reformat it." +-msgstr "" ++msgstr "这通常是由于克隆设备镜像导致 UUID 值重复造成的,而 UUID 值应该是唯一的。如果是这种情况,可以断开其中一个设备或重新格式化它。" + + #: ../blivet/errors.py:217 + msgid "" +@@ -45,8 +45,8 @@ msgid "" + "kernel is reporting partitions on. It is unclear what the exact problem is. " + "Please file a bug at http://bugzilla.redhat.com" + msgstr "" +-"由于某些原因无法定位内核报告中显示在其中进行分区的磁盘的磁盘标签。尚不了解具" +-"体问题所在。请在 http://bugzilla.redhat.com 提交 bug。" ++"由于某些原因无法定位内核报告中显示在其中进行分区的磁盘的磁盘标签。尚不了解具体问题所在。请在 http://bugzilla.redhat.com 提交 " ++"bug" + + #: ../blivet/errors.py:224 + msgid "" +@@ -170,7 +170,7 @@ msgstr "设定后就无法更改 iSCSI 启动程序名称" + + #: ../blivet/iscsi.py:219 + msgid "Must provide an iSCSI initiator name" +-msgstr "您必须提供一个 iSCSI 启动程序名称。" ++msgstr "您必须提供一个 iSCSI 启动程序名称" + + #: ../blivet/iscsi.py:410 + msgid "iSCSI not available" +@@ -223,7 +223,7 @@ msgstr "" + + #: ../blivet/partitioning.py:962 + msgid "Unable to allocate requested partition scheme." +-msgstr "无法分配所需分区方案" ++msgstr "无法分配所需分区方案。" + + #: ../blivet/partitioning.py:997 + msgid "not enough free space after creating extended partition" +@@ -347,15 +347,15 @@ msgstr "" + msgid "Cannot remove a member from existing %s array" + msgstr "无法从存在的 %s 阵列中删除一个成员" + +-#: ../blivet/formats/fs.py:934 ++#: ../blivet/formats/fs.py:932 + msgid "EFI System Partition" + msgstr "EFI 系统分区" + +-#: ../blivet/formats/fs.py:1139 ++#: ../blivet/formats/fs.py:1137 + msgid "Apple Bootstrap" + msgstr "Apple Bootstrap" + +-#: ../blivet/formats/fs.py:1175 ++#: ../blivet/formats/fs.py:1173 + msgid "Linux HFS+ ESP" + msgstr "Linux HFS+ ESP" + +-- +2.25.4 + diff --git a/SOURCES/0007-Do-not-use-FSAVAIL-and-FSUSE-options-when-running-lsblk.patch b/SOURCES/0007-Do-not-use-FSAVAIL-and-FSUSE-options-when-running-lsblk.patch new file mode 100644 index 0000000..c8e1447 --- /dev/null +++ b/SOURCES/0007-Do-not-use-FSAVAIL-and-FSUSE-options-when-running-lsblk.patch @@ -0,0 +1,24 @@ +From 7bc4e324580656585adad0cbe51d60ed3540b766 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 3 Jul 2020 13:04:23 +0200 +Subject: [PATCH] Do not use FSAVAIL and FSUSE% options when running lsblk + +These options were added in util-linux 2.33 which is not available +on older systems so we should not use these. +--- + blivet/blivet.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/blivet/blivet.py b/blivet/blivet.py +index fcc2080b..e7dbd37b 100644 +--- a/blivet/blivet.py ++++ b/blivet/blivet.py +@@ -77,7 +77,7 @@ def __init__(self): + self._dump_file = "%s/storage.state" % tempfile.gettempdir() + + try: +- options = "NAME,SIZE,OWNER,GROUP,MODE,FSTYPE,LABEL,UUID,PARTUUID,FSAVAIL,FSUSE%,MOUNTPOINT" ++ options = "NAME,SIZE,OWNER,GROUP,MODE,FSTYPE,LABEL,UUID,PARTUUID,MOUNTPOINT" + out = capture_output(["lsblk", "--bytes", "-a", "-o", options]) + except Exception: # pylint: disable=broad-except + pass diff --git a/SOURCES/0008-set-allowed-disk-labels-for-s390x-as-standard-ones-plus-dasd.patch b/SOURCES/0008-set-allowed-disk-labels-for-s390x-as-standard-ones-plus-dasd.patch new file mode 100644 index 0000000..931fca0 --- /dev/null +++ b/SOURCES/0008-set-allowed-disk-labels-for-s390x-as-standard-ones-plus-dasd.patch @@ -0,0 +1,39 @@ +From 462099a9137fb7997140360c07665a21615a0fea Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Dan=20Hor=C3=A1k?= +Date: Tue, 7 Jul 2020 13:19:02 +0200 +Subject: [PATCH] set allowed disk labels for s390x as standard ones (msdos + + gpt) plus dasd + +This will solve issues when a SCSI or NVMe disk with GPT partition table +is used with a s390x machine (rhbz#1827066, rhbz#1854110). +--- + blivet/formats/disklabel.py | 2 +- + tests/formats_test/disklabel_test.py | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/blivet/formats/disklabel.py b/blivet/formats/disklabel.py +index 3dcac12b..53e2c010 100644 +--- a/blivet/formats/disklabel.py ++++ b/blivet/formats/disklabel.py +@@ -230,7 +230,7 @@ def get_platform_label_types(cls): + elif arch.is_efi() and not arch.is_aarch64(): + label_types = ["gpt", "msdos"] + elif arch.is_s390(): +- label_types = ["msdos", "dasd"] ++ label_types += ["dasd"] + + return label_types + +diff --git a/tests/formats_test/disklabel_test.py b/tests/formats_test/disklabel_test.py +index 94f3775f..3068dc07 100644 +--- a/tests/formats_test/disklabel_test.py ++++ b/tests/formats_test/disklabel_test.py +@@ -95,7 +95,7 @@ def test_platform_label_types(self, arch): + arch.is_arm.return_value = False + + arch.is_s390.return_value = True +- self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "dasd"]) ++ self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "gpt", "dasd"]) + arch.is_s390.return_value = False + + def test_label_type_size_check(self): diff --git a/SOURCES/0009-Do-not-use-BlockDev-utils_have_kernel_module-to-check-for-modules.patch b/SOURCES/0009-Do-not-use-BlockDev-utils_have_kernel_module-to-check-for-modules.patch new file mode 100644 index 0000000..8736460 --- /dev/null +++ b/SOURCES/0009-Do-not-use-BlockDev-utils_have_kernel_module-to-check-for-modules.patch @@ -0,0 +1,47 @@ +From 7303f4a3f2fe3280339f6303dcff31b6ade12176 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Thu, 9 Jul 2020 16:30:55 +0200 +Subject: [PATCH] Do not use BlockDev.utils_have_kernel_module to check for + modules + +The function unfortunately uses only the name when searching for +the module and we need to use aliases for modules like ext2 and +ext3. So we need to use "modprobe --dry-run" instead. +--- + blivet/formats/fs.py | 12 +++--------- + 1 file changed, 3 insertions(+), 9 deletions(-) + +diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py +index eee15aaa..bcfbc08e 100644 +--- a/blivet/formats/fs.py ++++ b/blivet/formats/fs.py +@@ -60,12 +60,6 @@ + import logging + log = logging.getLogger("blivet") + +-import gi +-gi.require_version("GLib", "2.0") +-gi.require_version("BlockDev", "2.0") +- +-from gi.repository import GLib +-from gi.repository import BlockDev + + AVAILABLE_FILESYSTEMS = kernel_filesystems + +@@ -462,13 +456,13 @@ def check_module(self): + + for module in self._modules: + try: +- succ = BlockDev.utils_have_kernel_module(module) +- except GLib.GError as e: ++ rc = util.run_program(["modprobe", "--dry-run", module]) ++ except OSError as e: + log.error("Could not check kernel module availability %s: %s", module, e) + self._supported = False + return + +- if not succ: ++ if rc: + log.debug("Kernel module %s not available", module) + self._supported = False + return diff --git a/SOURCES/0010-Fix-name-resolution-for-MD-devices-and-partitions-on.patch b/SOURCES/0010-Fix-name-resolution-for-MD-devices-and-partitions-on.patch new file mode 100644 index 0000000..e94ba8e --- /dev/null +++ b/SOURCES/0010-Fix-name-resolution-for-MD-devices-and-partitions-on.patch @@ -0,0 +1,844 @@ +From 18ce766bc90abdf0d8ca54bdf578463392a52ee9 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Wed, 12 Aug 2020 10:57:19 +0200 +Subject: [PATCH 1/2] Fix name resolution for MD devices and partitions on them + +UDev data for both member disks/partitions and partitions on arrays +contain the MD_* properties we must be extra careful when deciding +what name we'll use for the device. + +Resolves: rhbz#1862904 +--- + blivet/udev.py | 12 ++++++++++-- + 1 file changed, 10 insertions(+), 2 deletions(-) + +diff --git a/blivet/udev.py b/blivet/udev.py +index 41c99496..c85eb3dc 100644 +--- a/blivet/udev.py ++++ b/blivet/udev.py +@@ -202,9 +202,16 @@ def device_get_name(udev_info): + """ Return the best name for a device based on the udev db data. """ + if "DM_NAME" in udev_info: + name = udev_info["DM_NAME"] +- elif "MD_DEVNAME" in udev_info and os.path.exists(device_get_sysfs_path(udev_info) + "/md"): ++ elif "MD_DEVNAME" in udev_info: + mdname = udev_info["MD_DEVNAME"] +- if device_is_partition(udev_info): ++ if device_is_md(udev_info): ++ # MD RAID array -> use MD_DEVNAME ++ name = mdname ++ elif device_get_format(udev_info) == "linux_raid_member": ++ # MD RAID member -> use SYS_NAME ++ name = udev_info["SYS_NAME"] ++ elif device_is_partition(udev_info): ++ # partition on RAID -> construct name from MD_DEVNAME + partition number + # for partitions on named RAID we want to use the raid name, not + # the node, e.g. "raid1" instead of "md127p1" + partnum = udev_info["ID_PART_ENTRY_NUMBER"] +@@ -213,6 +220,7 @@ def device_get_name(udev_info): + else: + name = mdname + partnum + else: ++ # something else -> default to MD_DEVNAME + name = mdname + else: + name = udev_info["SYS_NAME"] +-- +2.25.4 + + +From dc96961adcb9dd6ef6d09e4daaa0a5eaae1ffe60 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Wed, 12 Aug 2020 11:10:03 +0200 +Subject: [PATCH 2/2] Add tests for udev.device_get_name for RAID devices + +This includes sample UDev data for various combinations of RAID +devices configuration. + +Related: rhbz#1862904 +--- + tests/udev_data/__init__.py | 0 + tests/udev_data/raid_data.py | 705 +++++++++++++++++++++++++++++++++++ + tests/udev_test.py | 46 +++ + 3 files changed, 751 insertions(+) + create mode 100644 tests/udev_data/__init__.py + create mode 100644 tests/udev_data/raid_data.py + +diff --git a/tests/udev_data/__init__.py b/tests/udev_data/__init__.py +new file mode 100644 +index 00000000..e69de29b +diff --git a/tests/udev_data/raid_data.py b/tests/udev_data/raid_data.py +new file mode 100644 +index 00000000..509cbfbd +--- /dev/null ++++ b/tests/udev_data/raid_data.py +@@ -0,0 +1,705 @@ ++# Sample UDev data for various MD RAID devices: ++# - member_boot: data for the member disk or partition after booting the system ++# - member_assemble: data for the member disk or partition after re-assembling stopped array using ++# 'mdadm --assemble --scan' (yes, this is different from member_boot) ++# - raid_device: data for the RAID array device ++# - raid_partition: data for partition on the array ++# ++# We have data for different combinations of member "types", MD metadata versions and named v unnamed ++# RAID devices. ++# The data were gathered on Fedora 32. ++ ++ ++class RaidOnDisk1(): ++ member_name = "sda" ++ raid_name = "127" ++ raid_node = "md127" ++ metadata_version = "1.2" ++ ++ member_boot = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:0 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0', ++ 'DEVNAME': '/dev/sda', ++ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:0/block/sda', ++ 'DEVTYPE': 'disk', ++ 'ID_BUS': 'scsi', ++ 'ID_FS_LABEL': 'localhost.localdomain:127', ++ 'ID_FS_LABEL_ENC': 'localhost.localdomain:127', ++ 'ID_FS_TYPE': 'linux_raid_member', ++ 'ID_FS_USAGE': 'raid', ++ 'ID_FS_UUID': '54956eb2-6983-8759-e2ad-4c40acc92e4b', ++ 'ID_FS_UUID_ENC': '54956eb2-6983-8759-e2ad-4c40acc92e4b', ++ 'ID_FS_UUID_SUB': '64f96f0b-e97c-9157-d393-1fe457f3dd59', ++ 'ID_FS_UUID_SUB_ENC': '64f96f0b-e97c-9157-d393-1fe457f3dd59', ++ 'ID_FS_VERSION': '1.2', ++ 'ID_MODEL': 'QEMU_HARDDISK', ++ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20', ++ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:0', ++ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_0', ++ 'ID_REVISION': '2.5+', ++ 'ID_SCSI': '1', ++ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0', ++ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-0', ++ 'ID_TYPE': 'disk', ++ 'ID_VENDOR': 'QEMU', ++ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20', ++ 'MAJOR': '8', ++ 'MD_DEVICE': 'md127', ++ 'MD_DEVNAME': '127', ++ 'MD_FOREIGN': 'no', ++ 'MD_STARTED': 'unsafe', ++ 'MINOR': '0', ++ 'SUBSYSTEM': 'block', ++ 'SYSTEMD_WANTS': 'mdadm-last-resort@md127.timer', ++ 'TAGS': ':systemd:', ++ 'USEC_INITIALIZED': '5529231', ++ 'SYS_NAME': 'sda', ++ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:0/block/sda'} ++ ++ member_assemble = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:0 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0', ++ 'DEVNAME': '/dev/sda', ++ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:0/block/sda', ++ 'DEVTYPE': 'disk', ++ 'ID_BUS': 'scsi', ++ 'ID_FS_LABEL': 'localhost.localdomain:127', ++ 'ID_FS_LABEL_ENC': 'localhost.localdomain:127', ++ 'ID_FS_TYPE': 'linux_raid_member', ++ 'ID_FS_USAGE': 'raid', ++ 'ID_FS_UUID': '54956eb2-6983-8759-e2ad-4c40acc92e4b', ++ 'ID_FS_UUID_ENC': '54956eb2-6983-8759-e2ad-4c40acc92e4b', ++ 'ID_FS_UUID_SUB': '64f96f0b-e97c-9157-d393-1fe457f3dd59', ++ 'ID_FS_UUID_SUB_ENC': '64f96f0b-e97c-9157-d393-1fe457f3dd59', ++ 'ID_FS_VERSION': '1.2', ++ 'ID_MODEL': 'QEMU_HARDDISK', ++ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20', ++ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:0', ++ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_0', ++ 'ID_REVISION': '2.5+', ++ 'ID_SCSI': '1', ++ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0', ++ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-0', ++ 'ID_TYPE': 'disk', ++ 'ID_VENDOR': 'QEMU', ++ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20', ++ 'MAJOR': '8', ++ 'MINOR': '0', ++ 'SUBSYSTEM': 'block', ++ 'TAGS': ':systemd:', ++ 'USEC_INITIALIZED': '5529231', ++ 'SYS_NAME': 'sda', ++ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:0/block/sda'} ++ ++ raid_device = {'DEVLINKS': '/dev/disk/by-id/md-name-localhost.localdomain:127 /dev/disk/by-id/md-uuid-54956eb2:69838759:e2ad4c40:acc92e4b /dev/md/127', ++ 'DEVNAME': '/dev/md127', ++ 'DEVPATH': '/devices/virtual/block/md127', ++ 'DEVTYPE': 'disk', ++ 'ID_PART_TABLE_TYPE': 'dos', ++ 'ID_PART_TABLE_UUID': '4eec0361', ++ 'MAJOR': '9', ++ 'MD_DEVICES': '2', ++ 'MD_DEVICE_ev_sda_DEV': '/dev/sda', ++ 'MD_DEVICE_ev_sda_ROLE': '0', ++ 'MD_DEVICE_ev_sdb_DEV': '/dev/sdb', ++ 'MD_DEVICE_ev_sdb_ROLE': '1', ++ 'MD_DEVNAME': '127', ++ 'MD_LEVEL': 'raid1', ++ 'MD_METADATA': '1.2', ++ 'MD_NAME': 'localhost.localdomain:127', ++ 'MD_UUID': '54956eb2:69838759:e2ad4c40:acc92e4b', ++ 'MINOR': '127', ++ 'SUBSYSTEM': 'block', ++ 'SYSTEMD_WANTS': 'mdmonitor.service', ++ 'TAGS': ':systemd:', ++ 'USEC_INITIALIZED': '603606045', ++ 'SYS_NAME': 'md127', ++ 'SYS_PATH': '/sys/devices/virtual/block/md127'} ++ ++ raid_partition = {'DEVLINKS': '/dev/md/127p1 /dev/disk/by-id/md-uuid-54956eb2:69838759:e2ad4c40:acc92e4b-part1 /dev/disk/by-id/md-name-localhost.localdomain:127-part1', ++ 'DEVNAME': '/dev/md127p1', ++ 'DEVPATH': '/devices/virtual/block/md127/md127p1', ++ 'DEVTYPE': 'partition', ++ 'ID_PART_ENTRY_DISK': '9:127', ++ 'ID_PART_ENTRY_NUMBER': '1', ++ 'ID_PART_ENTRY_OFFSET': '2048', ++ 'ID_PART_ENTRY_SCHEME': 'dos', ++ 'ID_PART_ENTRY_SIZE': '2091008', ++ 'ID_PART_ENTRY_TYPE': '0x83', ++ 'ID_PART_ENTRY_UUID': '4eec0361-01', ++ 'MAJOR': '259', ++ 'MD_DEVICES': '2', ++ 'MD_DEVICE_ev_sda_DEV': '/dev/sda', ++ 'MD_DEVICE_ev_sda_ROLE': '0', ++ 'MD_DEVICE_ev_sdb_DEV': '/dev/sdb', ++ 'MD_DEVICE_ev_sdb_ROLE': '1', ++ 'MD_DEVNAME': '127', ++ 'MD_LEVEL': 'raid1', ++ 'MD_METADATA': '1.2', ++ 'MD_NAME': 'localhost.localdomain:127', ++ 'MD_UUID': '54956eb2:69838759:e2ad4c40:acc92e4b', ++ 'MINOR': '2', ++ 'PARTN': '1', ++ 'SUBSYSTEM': 'block', ++ 'SYSTEMD_WANTS': 'mdmonitor.service', ++ 'TAGS': ':systemd:', ++ 'USEC_INITIALIZED': '603714783', ++ 'SYS_NAME': 'md127p1', ++ 'SYS_PATH': '/sys/devices/virtual/block/md127/md127p1'} ++ ++ ++class RaidOnDisk2(): ++ member_name = "sdc" ++ raid_name = "name" ++ raid_node = "md127" ++ metadata_version = "1.2" ++ ++ member_boot = {'DEVLINKS': '/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-4 /dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:4', ++ 'DEVNAME': '/dev/sdc', ++ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:4/block/sdc', ++ 'DEVTYPE': 'disk', ++ 'ID_BUS': 'scsi', ++ 'ID_FS_LABEL': 'localhost.localdomain:name', ++ 'ID_FS_LABEL_ENC': 'localhost.localdomain:name', ++ 'ID_FS_TYPE': 'linux_raid_member', ++ 'ID_FS_USAGE': 'raid', ++ 'ID_FS_UUID': '143d480c-12c3-909f-5476-98a9f94a1c4f', ++ 'ID_FS_UUID_ENC': '143d480c-12c3-909f-5476-98a9f94a1c4f', ++ 'ID_FS_UUID_SUB': '121f2b71-3634-4183-dc9c-08bfceda765c', ++ 'ID_FS_UUID_SUB_ENC': '121f2b71-3634-4183-dc9c-08bfceda765c', ++ 'ID_FS_VERSION': '1.2', ++ 'ID_MODEL': 'QEMU_HARDDISK', ++ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20', ++ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:4', ++ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_4', ++ 'ID_REVISION': '2.5+', ++ 'ID_SCSI': '1', ++ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-4', ++ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-4', ++ 'ID_TYPE': 'disk', ++ 'ID_VENDOR': 'QEMU', ++ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20', ++ 'MAJOR': '8', ++ 'MD_DEVICE': 'md127', ++ 'MD_DEVNAME': 'name', ++ 'MD_FOREIGN': 'no', ++ 'MD_STARTED': 'yes', ++ 'MINOR': '32', ++ 'SUBSYSTEM': 'block', ++ 'TAGS': ':systemd:', ++ 'USEC_INITIALIZED': '6109555', ++ 'SYS_NAME': 'sdc', ++ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:4/block/sdc'} ++ ++ member_assemble = {'DEVLINKS': '/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-4 /dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:4', ++ 'DEVNAME': '/dev/sdc', ++ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:4/block/sdc', ++ 'DEVTYPE': 'disk', ++ 'ID_BUS': 'scsi', ++ 'ID_FS_LABEL': 'localhost.localdomain:name', ++ 'ID_FS_LABEL_ENC': 'localhost.localdomain:name', ++ 'ID_FS_TYPE': 'linux_raid_member', ++ 'ID_FS_USAGE': 'raid', ++ 'ID_FS_UUID': '143d480c-12c3-909f-5476-98a9f94a1c4f', ++ 'ID_FS_UUID_ENC': '143d480c-12c3-909f-5476-98a9f94a1c4f', ++ 'ID_FS_UUID_SUB': '121f2b71-3634-4183-dc9c-08bfceda765c', ++ 'ID_FS_UUID_SUB_ENC': '121f2b71-3634-4183-dc9c-08bfceda765c', ++ 'ID_FS_VERSION': '1.2', ++ 'ID_MODEL': 'QEMU_HARDDISK', ++ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20', ++ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:4', ++ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_4', ++ 'ID_REVISION': '2.5+', ++ 'ID_SCSI': '1', ++ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-4', ++ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-4', ++ 'ID_TYPE': 'disk', ++ 'ID_VENDOR': 'QEMU', ++ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20', ++ 'MAJOR': '8', ++ 'MINOR': '32', ++ 'SUBSYSTEM': 'block', ++ 'TAGS': ':systemd:', ++ 'USEC_INITIALIZED': '6109555', ++ 'SYS_NAME': 'sdc', ++ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:4/block/sdc'} ++ ++ raid_device = {'DEVLINKS': '/dev/disk/by-id/md-name-localhost.localdomain:name /dev/disk/by-id/md-uuid-143d480c:12c3909f:547698a9:f94a1c4f /dev/md/name', ++ 'DEVNAME': '/dev/md127', ++ 'DEVPATH': '/devices/virtual/block/md127', ++ 'DEVTYPE': 'disk', ++ 'ID_PART_TABLE_TYPE': 'dos', ++ 'ID_PART_TABLE_UUID': '19e9cb5b', ++ 'MAJOR': '9', ++ 'MD_DEVICES': '2', ++ 'MD_DEVICE_ev_sdc_DEV': '/dev/sdc', ++ 'MD_DEVICE_ev_sdc_ROLE': '0', ++ 'MD_DEVICE_ev_sdd_DEV': '/dev/sdd', ++ 'MD_DEVICE_ev_sdd_ROLE': '1', ++ 'MD_DEVNAME': 'name', ++ 'MD_LEVEL': 'raid1', ++ 'MD_METADATA': '1.2', ++ 'MD_NAME': 'localhost.localdomain:name', ++ 'MD_UUID': '143d480c:12c3909f:547698a9:f94a1c4f', ++ 'MINOR': '127', ++ 'SUBSYSTEM': 'block', ++ 'SYSTEMD_WANTS': 'mdmonitor.service', ++ 'TAGS': ':systemd:', ++ 'USEC_INITIALIZED': '5844744', ++ 'SYS_NAME': 'md127', ++ 'SYS_PATH': '/sys/devices/virtual/block/md127'} ++ ++ raid_partition = {'DEVLINKS': '/dev/disk/by-id/md-uuid-143d480c:12c3909f:547698a9:f94a1c4f-part1 /dev/disk/by-id/md-name-localhost.localdomain:name-part1 /dev/md/name1', ++ 'DEVNAME': '/dev/md127p1', ++ 'DEVPATH': '/devices/virtual/block/md127/md127p1', ++ 'DEVTYPE': 'partition', ++ 'ID_PART_ENTRY_DISK': '9:127', ++ 'ID_PART_ENTRY_NUMBER': '1', ++ 'ID_PART_ENTRY_OFFSET': '2048', ++ 'ID_PART_ENTRY_SCHEME': 'dos', ++ 'ID_PART_ENTRY_SIZE': '2091008', ++ 'ID_PART_ENTRY_TYPE': '0x83', ++ 'ID_PART_ENTRY_UUID': '19e9cb5b-01', ++ 'ID_PART_TABLE_TYPE': 'dos', ++ 'ID_PART_TABLE_UUID': 'ec985633', ++ 'MAJOR': '259', ++ 'MD_DEVICES': '2', ++ 'MD_DEVICE_ev_sdc_DEV': '/dev/sdc', ++ 'MD_DEVICE_ev_sdc_ROLE': '0', ++ 'MD_DEVICE_ev_sdd_DEV': '/dev/sdd', ++ 'MD_DEVICE_ev_sdd_ROLE': '1', ++ 'MD_DEVNAME': 'name', ++ 'MD_LEVEL': 'raid1', ++ 'MD_METADATA': '1.2', ++ 'MD_NAME': 'localhost.localdomain:name', ++ 'MD_UUID': '143d480c:12c3909f:547698a9:f94a1c4f', ++ 'MINOR': '1', ++ 'PARTN': '1', ++ 'SUBSYSTEM': 'block', ++ 'SYSTEMD_WANTS': 'mdmonitor.service', ++ 'TAGS': ':systemd:', ++ 'USEC_INITIALIZED': '5928255', ++ 'SYS_NAME': 'md127p1', ++ 'SYS_PATH': '/sys/devices/virtual/block/md127/md127p1'} ++ ++ ++class RaidOnDisk3(): ++ member_name = "sde" ++ raid_name = "125" ++ raid_node = "md125" ++ metadata_version = "0.9" ++ ++ member_boot = {'DEVLINKS': '/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-1 /dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:1', ++ 'DEVNAME': '/dev/sde', ++ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:1/block/sde', ++ 'DEVTYPE': 'disk', ++ 'ID_BUS': 'scsi', ++ 'ID_FS_TYPE': 'linux_raid_member', ++ 'ID_FS_USAGE': 'raid', ++ 'ID_FS_UUID': 'c4ef60f5-e374-5f70-bfe7-8010bc810f04', ++ 'ID_FS_UUID_ENC': 'c4ef60f5-e374-5f70-bfe7-8010bc810f04', ++ 'ID_FS_VERSION': '0.90.0', ++ 'ID_MODEL': 'QEMU_HARDDISK', ++ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20', ++ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:1', ++ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_1', ++ 'ID_REVISION': '2.5+', ++ 'ID_SCSI': '1', ++ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-1', ++ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-1', ++ 'ID_TYPE': 'disk', ++ 'ID_VENDOR': 'QEMU', ++ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20', ++ 'MAJOR': '8', ++ 'MD_DEVICE': 'md125', ++ 'MD_DEVNAME': '125', ++ 'MD_FOREIGN': 'no', ++ 'MD_STARTED': 'unsafe', ++ 'MINOR': '64', ++ 'SUBSYSTEM': 'block', ++ 'SYSTEMD_WANTS': 'mdadm-last-resort@md125.timer', ++ 'TAGS': ':systemd:', ++ 'USEC_INITIALIZED': '5538551', ++ 'SYS_NAME': 'sde', ++ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:1/block/sde'} ++ ++ member_assemble = {'DEVLINKS': '/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-1 /dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:1', ++ 'DEVNAME': '/dev/sde', ++ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:1/block/sde', ++ 'DEVTYPE': 'disk', ++ 'ID_BUS': 'scsi', ++ 'ID_FS_TYPE': 'linux_raid_member', ++ 'ID_FS_USAGE': 'raid', ++ 'ID_FS_UUID': 'c4ef60f5-e374-5f70-bfe7-8010bc810f04', ++ 'ID_FS_UUID_ENC': 'c4ef60f5-e374-5f70-bfe7-8010bc810f04', ++ 'ID_FS_VERSION': '0.90.0', ++ 'ID_MODEL': 'QEMU_HARDDISK', ++ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20', ++ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:1', ++ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_1', ++ 'ID_REVISION': '2.5+', ++ 'ID_SCSI': '1', ++ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-1', ++ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-1', ++ 'ID_TYPE': 'disk', ++ 'ID_VENDOR': 'QEMU', ++ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20', ++ 'MAJOR': '8', ++ 'MINOR': '64', ++ 'SUBSYSTEM': 'block', ++ 'TAGS': ':systemd:', ++ 'USEC_INITIALIZED': '5538551', ++ 'SYS_NAME': 'sde', ++ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:1/block/sde'} ++ ++ raid_device = {'DEVLINKS': '/dev/md/125 /dev/disk/by-id/md-uuid-c4ef60f5:e3745f70:bfe78010:bc810f04', ++ 'DEVNAME': '/dev/md125', ++ 'DEVPATH': '/devices/virtual/block/md125', ++ 'DEVTYPE': 'disk', ++ 'ID_PART_TABLE_TYPE': 'dos', ++ 'ID_PART_TABLE_UUID': 'e74877cd', ++ 'MAJOR': '9', ++ 'MD_DEVICES': '2', ++ 'MD_DEVICE_ev_sde_DEV': '/dev/sde', ++ 'MD_DEVICE_ev_sde_ROLE': '0', ++ 'MD_DEVICE_ev_sdf_DEV': '/dev/sdf', ++ 'MD_DEVICE_ev_sdf_ROLE': '1', ++ 'MD_DEVNAME': '125', ++ 'MD_LEVEL': 'raid1', ++ 'MD_METADATA': '0.90', ++ 'MD_UUID': 'c4ef60f5:e3745f70:bfe78010:bc810f04', ++ 'MINOR': '125', ++ 'SUBSYSTEM': 'block', ++ 'SYSTEMD_WANTS': 'mdmonitor.service', ++ 'TAGS': ':systemd:', ++ 'USEC_INITIALIZED': '5786380', ++ 'SYS_NAME': 'md125', ++ 'SYS_PATH': '/sys/devices/virtual/block/md125'} ++ ++ raid_partition = {'DEVLINKS': '/dev/md/125p1 /dev/disk/by-id/md-uuid-c4ef60f5:e3745f70:bfe78010:bc810f04-part1', ++ 'DEVNAME': '/dev/md125p1', ++ 'DEVPATH': '/devices/virtual/block/md125/md125p1', ++ 'DEVTYPE': 'partition', ++ 'ID_PART_ENTRY_DISK': '9:125', ++ 'ID_PART_ENTRY_NUMBER': '1', ++ 'ID_PART_ENTRY_OFFSET': '2048', ++ 'ID_PART_ENTRY_SCHEME': 'dos', ++ 'ID_PART_ENTRY_SIZE': '2094976', ++ 'ID_PART_ENTRY_TYPE': '0x83', ++ 'ID_PART_ENTRY_UUID': 'e74877cd-01', ++ 'MAJOR': '259', ++ 'MD_DEVICES': '2', ++ 'MD_DEVICE_ev_sde_DEV': '/dev/sde', ++ 'MD_DEVICE_ev_sde_ROLE': '0', ++ 'MD_DEVICE_ev_sdf_DEV': '/dev/sdf', ++ 'MD_DEVICE_ev_sdf_ROLE': '1', ++ 'MD_DEVNAME': '125', ++ 'MD_LEVEL': 'raid1', ++ 'MD_METADATA': '0.90', ++ 'MD_UUID': 'c4ef60f5:e3745f70:bfe78010:bc810f04', ++ 'MINOR': '3', ++ 'PARTN': '1', ++ 'SUBSYSTEM': 'block', ++ 'SYSTEMD_WANTS': 'mdmonitor.service', ++ 'TAGS': ':systemd:', ++ 'USEC_INITIALIZED': '8808457', ++ 'SYS_NAME': 'md125p1', ++ 'SYS_PATH': '/sys/devices/virtual/block/md125/md125p1'} ++ ++ ++class RaidOnPartition1(): ++ member_name = "sdh3" ++ raid_name = "122" ++ raid_node = "md122" ++ metadata_version = "1.2" ++ ++ member_boot = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:2:0-part3 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1-0-2-part3 /dev/disk/by-partuuid/73eb11a9-03', ++ 'DEVNAME': '/dev/sdh3', ++ 'DEVPATH': '/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh3', ++ 'DEVTYPE': 'partition', ++ 'ID_BUS': 'scsi', ++ 'ID_FS_LABEL': 'localhost.localdomain:122', ++ 'ID_FS_LABEL_ENC': 'localhost.localdomain:122', ++ 'ID_FS_TYPE': 'linux_raid_member', ++ 'ID_FS_USAGE': 'raid', ++ 'ID_FS_UUID': '0628d995-eb60-ebd1-a767-51730b16f212', ++ 'ID_FS_UUID_ENC': '0628d995-eb60-ebd1-a767-51730b16f212', ++ 'ID_FS_UUID_SUB': 'b301779b-f759-ad7d-5324-b38d4b6d944d', ++ 'ID_FS_UUID_SUB_ENC': 'b301779b-f759-ad7d-5324-b38d4b6d944d', ++ 'ID_FS_VERSION': '1.2', ++ 'ID_MODEL': 'QEMU_HARDDISK', ++ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20', ++ 'ID_PART_ENTRY_DISK': '8:112', ++ 'ID_PART_ENTRY_NUMBER': '3', ++ 'ID_PART_ENTRY_OFFSET': '411648', ++ 'ID_PART_ENTRY_SCHEME': 'dos', ++ 'ID_PART_ENTRY_SIZE': '204800', ++ 'ID_PART_ENTRY_TYPE': '0x83', ++ 'ID_PART_ENTRY_UUID': '73eb11a9-03', ++ 'ID_PART_TABLE_TYPE': 'dos', ++ 'ID_PART_TABLE_UUID': '73eb11a9', ++ 'ID_PATH': 'pci-0000:00:07.0-scsi-0:0:2:0', ++ 'ID_PATH_TAG': 'pci-0000_00_07_0-scsi-0_0_2_0', ++ 'ID_REVISION': '2.5+', ++ 'ID_SCSI': '1', ++ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi1-0-2', ++ 'ID_SERIAL_SHORT': 'drive-scsi1-0-2', ++ 'ID_TYPE': 'disk', ++ 'ID_VENDOR': 'QEMU', ++ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20', ++ 'MAJOR': '8', ++ 'MD_DEVICE': 'md122', ++ 'MD_DEVNAME': '122', ++ 'MD_FOREIGN': 'no', ++ 'MD_STARTED': 'yes', ++ 'MINOR': '115', ++ 'PARTN': '3', ++ 'SUBSYSTEM': 'block', ++ 'TAGS': ':systemd:', ++ 'USEC_INITIALIZED': '8920462', ++ 'SYS_NAME': 'sdh3', ++ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh3'} ++ ++ member_assemble = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:2:0-part3 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1-0-2-part3 /dev/disk/by-partuuid/73eb11a9-03', ++ 'DEVNAME': '/dev/sdh3', ++ 'DEVPATH': '/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh3', ++ 'DEVTYPE': 'partition', ++ 'ID_BUS': 'scsi', ++ 'ID_FS_LABEL': 'localhost.localdomain:122', ++ 'ID_FS_LABEL_ENC': 'localhost.localdomain:122', ++ 'ID_FS_TYPE': 'linux_raid_member', ++ 'ID_FS_USAGE': 'raid', ++ 'ID_FS_UUID': '0628d995-eb60-ebd1-a767-51730b16f212', ++ 'ID_FS_UUID_ENC': '0628d995-eb60-ebd1-a767-51730b16f212', ++ 'ID_FS_UUID_SUB': 'b301779b-f759-ad7d-5324-b38d4b6d944d', ++ 'ID_FS_UUID_SUB_ENC': 'b301779b-f759-ad7d-5324-b38d4b6d944d', ++ 'ID_FS_VERSION': '1.2', ++ 'ID_MODEL': 'QEMU_HARDDISK', ++ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20', ++ 'ID_PART_ENTRY_DISK': '8:112', ++ 'ID_PART_ENTRY_NUMBER': '3', ++ 'ID_PART_ENTRY_OFFSET': '411648', ++ 'ID_PART_ENTRY_SCHEME': 'dos', ++ 'ID_PART_ENTRY_SIZE': '204800', ++ 'ID_PART_ENTRY_TYPE': '0x83', ++ 'ID_PART_ENTRY_UUID': '73eb11a9-03', ++ 'ID_PART_TABLE_TYPE': 'dos', ++ 'ID_PART_TABLE_UUID': '73eb11a9', ++ 'ID_PATH': 'pci-0000:00:07.0-scsi-0:0:2:0', ++ 'ID_PATH_TAG': 'pci-0000_00_07_0-scsi-0_0_2_0', ++ 'ID_REVISION': '2.5+', ++ 'ID_SCSI': '1', ++ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi1-0-2', ++ 'ID_SERIAL_SHORT': 'drive-scsi1-0-2', ++ 'ID_TYPE': 'disk', ++ 'ID_VENDOR': 'QEMU', ++ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20', ++ 'MAJOR': '8', ++ 'MINOR': '115', ++ 'PARTN': '3', ++ 'SUBSYSTEM': 'block', ++ 'TAGS': ':systemd:', ++ 'USEC_INITIALIZED': '8920462', ++ 'SYS_NAME': 'sdh3', ++ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh3'} ++ ++ raid_device = {'DEVLINKS': '/dev/disk/by-id/md-uuid-0628d995:eb60ebd1:a7675173:0b16f212 /dev/disk/by-id/md-name-localhost.localdomain:122 /dev/md/122', ++ 'DEVNAME': '/dev/md122', ++ 'DEVPATH': '/devices/virtual/block/md122', ++ 'DEVTYPE': 'disk', ++ 'ID_PART_TABLE_TYPE': 'dos', ++ 'ID_PART_TABLE_UUID': '6dc80b3b', ++ 'MAJOR': '9', ++ 'MD_DEVICES': '2', ++ 'MD_DEVICE_ev_sdh3_DEV': '/dev/sdh3', ++ 'MD_DEVICE_ev_sdh3_ROLE': '0', ++ 'MD_DEVICE_ev_sdh5_DEV': '/dev/sdh5', ++ 'MD_DEVICE_ev_sdh5_ROLE': '1', ++ 'MD_DEVNAME': '122', ++ 'MD_LEVEL': 'raid1', ++ 'MD_METADATA': '1.2', ++ 'MD_NAME': 'localhost.localdomain:122', ++ 'MD_UUID': '0628d995:eb60ebd1:a7675173:0b16f212', ++ 'MINOR': '122', ++ 'SUBSYSTEM': 'block', ++ 'SYSTEMD_WANTS': 'mdmonitor.service', ++ 'TAGS': ':systemd:', ++ 'USEC_INITIALIZED': '8770105', ++ 'SYS_NAME': 'md122', ++ 'SYS_PATH': '/sys/devices/virtual/block/md122'} ++ ++ raid_partition = {'DEVLINKS': '/dev/disk/by-id/md-uuid-0628d995:eb60ebd1:a7675173:0b16f212-part1 /dev/disk/by-id/md-name-localhost.localdomain:122-part1 /dev/md/122p1', ++ 'DEVNAME': '/dev/md122p1', ++ 'DEVPATH': '/devices/virtual/block/md122/md122p1', ++ 'DEVTYPE': 'partition', ++ 'ID_PART_ENTRY_DISK': '9:122', ++ 'ID_PART_ENTRY_NUMBER': '1', ++ 'ID_PART_ENTRY_OFFSET': '2048', ++ 'ID_PART_ENTRY_SCHEME': 'dos', ++ 'ID_PART_ENTRY_SIZE': '200704', ++ 'ID_PART_ENTRY_TYPE': '0x83', ++ 'ID_PART_ENTRY_UUID': '6dc80b3b-01', ++ 'MAJOR': '259', ++ 'MD_DEVICES': '2', ++ 'MD_DEVICE_ev_sdh3_DEV': '/dev/sdh3', ++ 'MD_DEVICE_ev_sdh3_ROLE': '0', ++ 'MD_DEVICE_ev_sdh5_DEV': '/dev/sdh5', ++ 'MD_DEVICE_ev_sdh5_ROLE': '1', ++ 'MD_DEVNAME': '122', ++ 'MD_LEVEL': 'raid1', ++ 'MD_METADATA': '1.2', ++ 'MD_NAME': 'localhost.localdomain:122', ++ 'MD_UUID': '0628d995:eb60ebd1:a7675173:0b16f212', ++ 'MINOR': '6', ++ 'PARTN': '1', ++ 'SUBSYSTEM': 'block', ++ 'SYSTEMD_WANTS': 'mdmonitor.service', ++ 'TAGS': ':systemd:', ++ 'USEC_INITIALIZED': '9003885', ++ 'SYS_NAME': 'md122p1', ++ 'SYS_PATH': '/sys/devices/virtual/block/md122/md122p1'} ++ ++ ++class RaidOnPartition2(): ++ member_name = "sdh1" ++ raid_name = "123" ++ raid_node = "md123" ++ metadata_version = "0.9" ++ ++ member_boot = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:2:0-part1 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1-0-2-part1 /dev/disk/by-partuuid/73eb11a9-01', ++ 'DEVNAME': '/dev/sdh1', ++ 'DEVPATH': '/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh1', ++ 'DEVTYPE': 'partition', ++ 'ID_BUS': 'scsi', ++ 'ID_FS_TYPE': 'linux_raid_member', ++ 'ID_FS_USAGE': 'raid', ++ 'ID_FS_UUID': '335b35e0-f1af-8e86-bfe7-8010bc810f04', ++ 'ID_FS_UUID_ENC': '335b35e0-f1af-8e86-bfe7-8010bc810f04', ++ 'ID_FS_VERSION': '0.90.0', ++ 'ID_MODEL': 'QEMU_HARDDISK', ++ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20', ++ 'ID_PART_ENTRY_DISK': '8:112', ++ 'ID_PART_ENTRY_NUMBER': '1', ++ 'ID_PART_ENTRY_OFFSET': '2048', ++ 'ID_PART_ENTRY_SCHEME': 'dos', ++ 'ID_PART_ENTRY_SIZE': '204800', ++ 'ID_PART_ENTRY_TYPE': '0x83', ++ 'ID_PART_ENTRY_UUID': '73eb11a9-01', ++ 'ID_PART_TABLE_TYPE': 'dos', ++ 'ID_PART_TABLE_UUID': '73eb11a9', ++ 'ID_PATH': 'pci-0000:00:07.0-scsi-0:0:2:0', ++ 'ID_PATH_TAG': 'pci-0000_00_07_0-scsi-0_0_2_0', ++ 'ID_REVISION': '2.5+', ++ 'ID_SCSI': '1', ++ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi1-0-2', ++ 'ID_SERIAL_SHORT': 'drive-scsi1-0-2', ++ 'ID_TYPE': 'disk', ++ 'ID_VENDOR': 'QEMU', ++ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20', ++ 'MAJOR': '8', ++ 'MD_DEVICE': 'md123', ++ 'MD_DEVNAME': '123', ++ 'MD_FOREIGN': 'no', ++ 'MD_STARTED': 'unsafe', ++ 'MINOR': '113', ++ 'PARTN': '1', ++ 'SUBSYSTEM': 'block', ++ 'SYSTEMD_WANTS': 'mdadm-last-resort@md123.timer', ++ 'TAGS': ':systemd:', ++ 'USEC_INITIALIZED': '8778733', ++ 'SYS_NAME': 'sdh1', ++ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh1'} ++ ++ member_assemble = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:2:0-part1 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1-0-2-part1 /dev/disk/by-partuuid/73eb11a9-01', ++ 'DEVNAME': '/dev/sdh1', ++ 'DEVPATH': '/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh1', ++ 'DEVTYPE': 'partition', ++ 'ID_BUS': 'scsi', ++ 'ID_FS_TYPE': 'linux_raid_member', ++ 'ID_FS_USAGE': 'raid', ++ 'ID_FS_UUID': '335b35e0-f1af-8e86-bfe7-8010bc810f04', ++ 'ID_FS_UUID_ENC': '335b35e0-f1af-8e86-bfe7-8010bc810f04', ++ 'ID_FS_VERSION': '0.90.0', ++ 'ID_MODEL': 'QEMU_HARDDISK', ++ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20', ++ 'ID_PART_ENTRY_DISK': '8:112', ++ 'ID_PART_ENTRY_NUMBER': '1', ++ 'ID_PART_ENTRY_OFFSET': '2048', ++ 'ID_PART_ENTRY_SCHEME': 'dos', ++ 'ID_PART_ENTRY_SIZE': '204800', ++ 'ID_PART_ENTRY_TYPE': '0x83', ++ 'ID_PART_ENTRY_UUID': '73eb11a9-01', ++ 'ID_PART_TABLE_TYPE': 'dos', ++ 'ID_PART_TABLE_UUID': '73eb11a9', ++ 'ID_PATH': 'pci-0000:00:07.0-scsi-0:0:2:0', ++ 'ID_PATH_TAG': 'pci-0000_00_07_0-scsi-0_0_2_0', ++ 'ID_REVISION': '2.5+', ++ 'ID_SCSI': '1', ++ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi1-0-2', ++ 'ID_SERIAL_SHORT': 'drive-scsi1-0-2', ++ 'ID_TYPE': 'disk', ++ 'ID_VENDOR': 'QEMU', ++ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20', ++ 'MAJOR': '8', ++ 'MINOR': '113', ++ 'PARTN': '1', ++ 'SUBSYSTEM': 'block', ++ 'TAGS': ':systemd:', ++ 'UDISKS_MD_MEMBER_DEVICES': '2', ++ 'UDISKS_MD_MEMBER_EVENTS': '18', ++ 'UDISKS_MD_MEMBER_LEVEL': 'raid1', ++ 'UDISKS_MD_MEMBER_UPDATE_TIME': '1597143914', ++ 'UDISKS_MD_MEMBER_UUID': '335b35e0:f1af8e86:bfe78010:bc810f04', ++ 'USEC_INITIALIZED': '8778733', ++ 'SYS_NAME': 'sdh1', ++ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh1'} ++ ++ raid_device = {'DEVLINKS': '/dev/md/123 /dev/disk/by-id/md-uuid-335b35e0:f1af8e86:bfe78010:bc810f04', ++ 'DEVNAME': '/dev/md123', ++ 'DEVPATH': '/devices/virtual/block/md123', ++ 'DEVTYPE': 'disk', ++ 'ID_PART_TABLE_TYPE': 'dos', ++ 'ID_PART_TABLE_UUID': '653f84c8', ++ 'MAJOR': '9', ++ 'MD_DEVICES': '2', ++ 'MD_DEVICE_ev_sdh1_DEV': '/dev/sdh1', ++ 'MD_DEVICE_ev_sdh1_ROLE': '0', ++ 'MD_DEVICE_ev_sdh2_DEV': '/dev/sdh2', ++ 'MD_DEVICE_ev_sdh2_ROLE': '1', ++ 'MD_DEVNAME': '123', ++ 'MD_LEVEL': 'raid1', ++ 'MD_METADATA': '0.90', ++ 'MD_UUID': '335b35e0:f1af8e86:bfe78010:bc810f04', ++ 'MINOR': '123', ++ 'SUBSYSTEM': 'block', ++ 'SYSTEMD_WANTS': 'mdmonitor.service', ++ 'TAGS': ':systemd:', ++ 'USEC_INITIALIZED': '8760382', ++ 'SYS_NAME': 'md123', ++ 'SYS_PATH': '/sys/devices/virtual/block/md123'} ++ ++ raid_partition = {'DEVLINKS': '/dev/disk/by-id/md-uuid-335b35e0:f1af8e86:bfe78010:bc810f04-part1 /dev/md/123p1', ++ 'DEVNAME': '/dev/md123p1', ++ 'DEVPATH': '/devices/virtual/block/md123/md123p1', ++ 'DEVTYPE': 'partition', ++ 'ID_PART_ENTRY_DISK': '9:123', ++ 'ID_PART_ENTRY_NUMBER': '1', ++ 'ID_PART_ENTRY_OFFSET': '2048', ++ 'ID_PART_ENTRY_SCHEME': 'dos', ++ 'ID_PART_ENTRY_SIZE': '202624', ++ 'ID_PART_ENTRY_TYPE': '0x83', ++ 'ID_PART_ENTRY_UUID': '653f84c8-01', ++ 'MAJOR': '259', ++ 'MD_DEVICES': '2', ++ 'MD_DEVICE_ev_sdh1_DEV': '/dev/sdh1', ++ 'MD_DEVICE_ev_sdh1_ROLE': '0', ++ 'MD_DEVICE_ev_sdh2_DEV': '/dev/sdh2', ++ 'MD_DEVICE_ev_sdh2_ROLE': '1', ++ 'MD_DEVNAME': '123', ++ 'MD_LEVEL': 'raid1', ++ 'MD_METADATA': '0.90', ++ 'MD_UUID': '335b35e0:f1af8e86:bfe78010:bc810f04', ++ 'MINOR': '5', ++ 'PARTN': '1', ++ 'SUBSYSTEM': 'block', ++ 'SYSTEMD_WANTS': 'mdmonitor.service', ++ 'TAGS': ':systemd:', ++ 'USEC_INITIALIZED': '8952876', ++ 'SYS_NAME': 'md123p1', ++ 'SYS_PATH': '/sys/devices/virtual/block/md123/md123p1'} +diff --git a/tests/udev_test.py b/tests/udev_test.py +index 653eeb6d..d30a647b 100644 +--- a/tests/udev_test.py ++++ b/tests/udev_test.py +@@ -2,6 +2,8 @@ + import unittest + import mock + ++from udev_data import raid_data ++ + + class UdevTest(unittest.TestCase): + +@@ -77,3 +79,47 @@ class UdevTest(unittest.TestCase): + # Normal MD RAID (w/ at least one non-disk member) + device_get_slaves.side_effect = lambda info: mixed_parents if info['SYS_PATH'] == mock.sentinel.md_path else list() + self.assertFalse(blivet.udev.device_is_disk(info)) ++ ++ ++class UdevGetNameRaidTest(unittest.TestCase): ++ ++ def _test_raid_name(self, udev_data): ++ import blivet.udev ++ ++ # members don't have the device_get_sysfs_path(info) + "/md" folder ++ with mock.patch("blivet.udev.device_is_md", return_value=False): ++ member_name = blivet.udev.device_get_name(udev_data.member_boot) ++ self.assertEqual(member_name, udev_data.member_name) ++ ++ member_name = blivet.udev.device_get_name(udev_data.member_assemble) ++ self.assertEqual(member_name, udev_data.member_name) ++ ++ with mock.patch("blivet.udev.device_is_md", return_value=True): ++ raid_name = blivet.udev.device_get_name(udev_data.raid_device) ++ self.assertEqual(raid_name, udev_data.raid_name) ++ ++ # partitions also don't have the device_get_sysfs_path(info) + "/md" folder ++ with mock.patch("blivet.udev.device_is_md", return_value=False): ++ part_name = blivet.udev.device_get_name(udev_data.raid_partition) ++ expected_name = udev_data.raid_name + "p1" if udev_data.raid_name[-1].isdigit() else udev_data.raid_name + "1" ++ self.assertEqual(part_name, expected_name) ++ ++ def test_raid_name_on_disk_no_name(self): ++ data = raid_data.RaidOnDisk1() ++ self._test_raid_name(data) ++ ++ def test_raid_name_on_disk__with_name(self): ++ data = raid_data.RaidOnDisk2() ++ self._test_raid_name(data) ++ ++ def test_raid_name_on_disk_old_metadata(self): ++ data = raid_data.RaidOnDisk3() ++ self._test_raid_name(data) ++ ++ def test_raid_name_on_part_no_name(self): ++ data = raid_data.RaidOnPartition1() ++ self._test_raid_name(data) ++ ++ def test_raid_name_on_part_old_metadata(self): ++ data = raid_data.RaidOnPartition2() ++ self._test_raid_name(data) +-- +2.25.4 + diff --git a/SOURCES/0011-Fix-ignoring-disk-devices-with-parents-or-children.patch b/SOURCES/0011-Fix-ignoring-disk-devices-with-parents-or-children.patch new file mode 100644 index 0000000..6ce0a64 --- /dev/null +++ b/SOURCES/0011-Fix-ignoring-disk-devices-with-parents-or-children.patch @@ -0,0 +1,269 @@ +From f19140993e94be9e58c8a01c18f1907792f59927 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Wed, 5 Aug 2020 13:44:38 +0200 +Subject: [PATCH] Fix ignoring disk devices with parents or children + +For disk-like devices like multipath we should allow to ignore +these by simply ignoring the mpath device or by ignoring all of its +drives. + +- when ignoring the "mpatha" device we should also ignore "sda" and +"sdb" +- when ignoring both "sda" and "sdb" we should also ignore "mpatha" +- when ignoring only "sda" we should not ignore "mpatha" (we don't +want to deal with an "incomplete" multipath device in the tree) + +This is consistent with the existing behaviour when using exclusive +disks (or "ignoredisks --only-use" in kickstart). + +Resolves: rhbz#1866243 +--- + blivet/devicetree.py | 51 ++++++++----- + tests/devicetree_test.py | 157 ++++++++++++++++++++++++++++----------- + 2 files changed, 146 insertions(+), 62 deletions(-) + +diff --git a/blivet/devicetree.py b/blivet/devicetree.py +index 5cc360e1..2afb0d0e 100644 +--- a/blivet/devicetree.py ++++ b/blivet/devicetree.py +@@ -907,31 +907,48 @@ class DeviceTreeBase(object): + hidden.add_hook(new=False) + lvm.lvm_cc_removeFilterRejectRegexp(hidden.name) + ++ def _disk_in_taglist(self, disk, taglist): ++ # Taglist is a list containing mix of disk names and tags into which disk may belong. ++ # Check if it does. Raise ValueError if unknown tag is encountered. ++ if disk.name in taglist: ++ return True ++ tags = [t[1:] for t in taglist if t.startswith("@")] ++ for tag in tags: ++ if tag not in Tags.__members__: ++ raise ValueError("unknown ignoredisk tag '@%s' encountered" % tag) ++ if Tags(tag) in disk.tags: ++ return True ++ return False ++ + def _is_ignored_disk(self, disk): + """ Checks config for lists of exclusive and ignored disks + and returns if the given one should be ignored + """ +- +- def disk_in_taglist(disk, taglist): +- # Taglist is a list containing mix of disk names and tags into which disk may belong. +- # Check if it does. Raise ValueError if unknown tag is encountered. +- if disk.name in taglist: +- return True +- tags = [t[1:] for t in taglist if t.startswith("@")] +- for tag in tags: +- if tag not in Tags.__members__: +- raise ValueError("unknown ignoredisk tag '@%s' encountered" % tag) +- if Tags(tag) in disk.tags: +- return True +- return False +- +- return ((self.ignored_disks and disk_in_taglist(disk, self.ignored_disks)) or +- (self.exclusive_disks and not disk_in_taglist(disk, self.exclusive_disks))) ++ return ((self.ignored_disks and self._disk_in_taglist(disk, self.ignored_disks)) or ++ (self.exclusive_disks and not self._disk_in_taglist(disk, self.exclusive_disks))) + + def _hide_ignored_disks(self): + # hide any subtrees that begin with an ignored disk + for disk in [d for d in self._devices if d.is_disk]: +- if self._is_ignored_disk(disk): ++ is_ignored = self.ignored_disks and self._disk_in_taglist(disk, self.ignored_disks) ++ is_exclusive = self.exclusive_disks and self._disk_in_taglist(disk, self.exclusive_disks) ++ ++ if is_ignored: ++ if len(disk.children) == 1: ++ if not all(self._is_ignored_disk(d) for d in disk.children[0].parents): ++ raise DeviceTreeError("Including only a subset of raid/multipath member disks is not allowed.") ++ ++ # and also children like fwraid or mpath ++ self.hide(disk.children[0]) ++ ++ # this disk is ignored: ignore it and all it's potential parents ++ for p in disk.parents: ++ self.hide(p) ++ ++ # and finally hide the disk itself ++ self.hide(disk) ++ ++ if self.exclusive_disks and not is_exclusive: + ignored = True + # If the filter allows all members of a fwraid or mpath, the + # fwraid or mpath itself is implicitly allowed as well. I don't +diff --git a/tests/devicetree_test.py b/tests/devicetree_test.py +index a8f369cf..6032e7f6 100644 +--- a/tests/devicetree_test.py ++++ b/tests/devicetree_test.py +@@ -370,51 +370,6 @@ class DeviceTreeTestCase(unittest.TestCase): + self.assertTrue(sdb in tree.devices) + self.assertTrue(sdc in tree.devices) + +- # now test exclusive_disks special cases for multipath +- sda.format = get_format("multipath_member", exists=True) +- sdb.format = get_format("multipath_member", exists=True) +- sdc.format = get_format("multipath_member", exists=True) +- mpatha = MultipathDevice("mpatha", parents=[sda, sdb, sdc]) +- tree._add_device(mpatha) +- +- tree.ignored_disks = [] +- tree.exclusive_disks = ["mpatha"] +- +- with patch.object(tree, "hide") as hide: +- tree._hide_ignored_disks() +- self.assertFalse(hide.called) +- +- tree._hide_ignored_disks() +- self.assertTrue(sda in tree.devices) +- self.assertTrue(sdb in tree.devices) +- self.assertTrue(sdc in tree.devices) +- self.assertTrue(mpatha in tree.devices) +- +- # all members in exclusive_disks implies the mpath in exclusive_disks +- tree.exclusive_disks = ["sda", "sdb", "sdc"] +- with patch.object(tree, "hide") as hide: +- tree._hide_ignored_disks() +- self.assertFalse(hide.called) +- +- tree._hide_ignored_disks() +- self.assertTrue(sda in tree.devices) +- self.assertTrue(sdb in tree.devices) +- self.assertTrue(sdc in tree.devices) +- self.assertTrue(mpatha in tree.devices) +- +- tree.exclusive_disks = ["sda", "sdb"] +- with patch.object(tree, "hide") as hide: +- tree._hide_ignored_disks() +- hide.assert_any_call(mpatha) +- hide.assert_any_call(sdc) +- +- # verify that hide works as expected +- tree._hide_ignored_disks() +- self.assertTrue(sda in tree.devices) +- self.assertTrue(sdb in tree.devices) +- self.assertFalse(sdc in tree.devices) +- self.assertFalse(mpatha in tree.devices) +- + def test_get_related_disks(self): + tree = DeviceTree() + +@@ -447,3 +402,115 @@ class DeviceTreeTestCase(unittest.TestCase): + tree.unhide(sda) + self.assertEqual(tree.get_related_disks(sda), set([sda, sdb])) + self.assertEqual(tree.get_related_disks(sdb), set([sda, sdb])) ++ ++ ++class DeviceTreeIgnoredExclusiveMultipathTestCase(unittest.TestCase): ++ ++ def setUp(self): ++ self.tree = DeviceTree() ++ ++ self.sda = DiskDevice("sda") ++ self.sdb = DiskDevice("sdb") ++ self.sdc = DiskDevice("sdc") ++ ++ self.tree._add_device(self.sda) ++ self.tree._add_device(self.sdb) ++ self.tree._add_device(self.sdc) ++ ++ self.assertTrue(self.sda in self.tree.devices) ++ self.assertTrue(self.sdb in self.tree.devices) ++ self.assertTrue(self.sdc in self.tree.devices) ++ ++ # now test exclusive_disks special cases for multipath ++ self.sda.format = get_format("multipath_member", exists=True) ++ self.sdb.format = get_format("multipath_member", exists=True) ++ self.sdc.format = get_format("multipath_member", exists=True) ++ self.mpatha = MultipathDevice("mpatha", parents=[self.sda, self.sdb, self.sdc]) ++ self.tree._add_device(self.mpatha) ++ ++ def test_exclusive_disks_multipath_1(self): ++ # multipath is exclusive -> all disks should be exclusive ++ self.tree.ignored_disks = [] ++ self.tree.exclusive_disks = ["mpatha"] ++ ++ with patch.object(self.tree, "hide") as hide: ++ self.tree._hide_ignored_disks() ++ self.assertFalse(hide.called) ++ ++ self.tree._hide_ignored_disks() ++ self.assertTrue(self.sda in self.tree.devices) ++ self.assertTrue(self.sdb in self.tree.devices) ++ self.assertTrue(self.sdc in self.tree.devices) ++ self.assertTrue(self.mpatha in self.tree.devices) ++ ++ def test_exclusive_disks_multipath_2(self): ++ # all disks exclusive -> mpath should also be exclusive ++ self.tree.exclusive_disks = ["sda", "sdb", "sdc"] ++ with patch.object(self.tree, "hide") as hide: ++ self.tree._hide_ignored_disks() ++ self.assertFalse(hide.called) ++ ++ self.tree._hide_ignored_disks() ++ self.assertTrue(self.sda in self.tree.devices) ++ self.assertTrue(self.sdb in self.tree.devices) ++ self.assertTrue(self.sdc in self.tree.devices) ++ self.assertTrue(self.mpatha in self.tree.devices) ++ ++ def test_exclusive_disks_multipath_3(self): ++ # some disks exclusive -> mpath should be hidden ++ self.tree.exclusive_disks = ["sda", "sdb"] ++ with patch.object(self.tree, "hide") as hide: ++ self.tree._hide_ignored_disks() ++ hide.assert_any_call(self.mpatha) ++ hide.assert_any_call(self.sdc) ++ ++ # verify that hide works as expected ++ self.tree._hide_ignored_disks() ++ self.assertTrue(self.sda in self.tree.devices) ++ self.assertTrue(self.sdb in self.tree.devices) ++ self.assertFalse(self.sdc in self.tree.devices) ++ self.assertFalse(self.mpatha in self.tree.devices) ++ ++ def test_ignored_disks_multipath_1(self): ++ # mpatha ignored -> disks should be hidden ++ self.tree.ignored_disks = ["mpatha"] ++ self.tree.exclusive_disks = [] ++ ++ with patch.object(self.tree, "hide") as hide: ++ self.tree._hide_ignored_disks() ++ hide.assert_any_call(self.mpatha) ++ hide.assert_any_call(self.sda) ++ hide.assert_any_call(self.sdb) ++ hide.assert_any_call(self.sdc) ++ ++ self.tree._hide_ignored_disks() ++ self.assertFalse(self.sda in self.tree.devices) ++ self.assertFalse(self.sdb in self.tree.devices) ++ self.assertFalse(self.sdc in self.tree.devices) ++ self.assertFalse(self.mpatha in self.tree.devices) ++ ++ def test_ignored_disks_multipath_2(self): ++ # all disks ignored -> mpath should be hidden ++ self.tree.ignored_disks = ["sda", "sdb", "sdc"] ++ self.tree.exclusive_disks = [] ++ ++ with patch.object(self.tree, "hide") as hide: ++ self.tree._hide_ignored_disks() ++ hide.assert_any_call(self.mpatha) ++ hide.assert_any_call(self.sda) ++ hide.assert_any_call(self.sdb) ++ hide.assert_any_call(self.sdc) ++ ++ self.tree._hide_ignored_disks() ++ self.assertFalse(self.sda in self.tree.devices) ++ self.assertFalse(self.sdb in self.tree.devices) ++ self.assertFalse(self.sdc in self.tree.devices) ++ self.assertFalse(self.mpatha in self.tree.devices) ++ ++ def test_ignored_disks_multipath_3(self): ++ # some disks ignored -> error ++ self.tree.ignored_disks = ["sda", "sdb"] ++ self.tree.exclusive_disks = [] ++ ++ with self.assertRaises(DeviceTreeError): ++ self.tree._hide_ignored_disks() +-- +2.25.4 + diff --git a/SOURCES/0012-xfs-grow-support.patch b/SOURCES/0012-xfs-grow-support.patch new file mode 100644 index 0000000..1607c51 --- /dev/null +++ b/SOURCES/0012-xfs-grow-support.patch @@ -0,0 +1,459 @@ +From 433d863cd8a57e5fc30948ff905e6a477ed5f17c Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 14 Jul 2020 11:27:08 +0200 +Subject: [PATCH 1/4] Add support for XFS format grow + +--- + blivet/formats/fs.py | 2 ++ + blivet/tasks/availability.py | 1 + + blivet/tasks/fsresize.py | 54 ++++++++++++++++++++++++++++++++++++ + 3 files changed, 57 insertions(+) + +diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py +index eee15aaa..12cb9885 100644 +--- a/blivet/formats/fs.py ++++ b/blivet/formats/fs.py +@@ -1089,11 +1089,13 @@ class XFS(FS): + _formattable = True + _linux_native = True + _supported = True ++ _resizable = True + _packages = ["xfsprogs"] + _info_class = fsinfo.XFSInfo + _mkfs_class = fsmkfs.XFSMkfs + _readlabel_class = fsreadlabel.XFSReadLabel + _size_info_class = fssize.XFSSize ++ _resize_class = fsresize.XFSResize + _sync_class = fssync.XFSSync + _writelabel_class = fswritelabel.XFSWriteLabel + _writeuuid_class = fswriteuuid.XFSWriteUUID +diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py +index b6b5955a..df62780c 100644 +--- a/blivet/tasks/availability.py ++++ b/blivet/tasks/availability.py +@@ -455,5 +455,6 @@ TUNE2FS_APP = application_by_version("tune2fs", E2FSPROGS_VERSION) + XFSADMIN_APP = application("xfs_admin") + XFSDB_APP = application("xfs_db") + XFSFREEZE_APP = application("xfs_freeze") ++XFSRESIZE_APP = application("xfs_growfs") + + MOUNT_APP = application("mount") +diff --git a/blivet/tasks/fsresize.py b/blivet/tasks/fsresize.py +index e7e26984..12c0367f 100644 +--- a/blivet/tasks/fsresize.py ++++ b/blivet/tasks/fsresize.py +@@ -20,7 +20,10 @@ + # Red Hat Author(s): Anne Mulhern + + import abc ++import os ++import tempfile + ++from contextlib import contextmanager + from six import add_metaclass + + from ..errors import FSError +@@ -32,6 +35,9 @@ from . import task + from . import fstask + from . import dfresize + ++import logging ++log = logging.getLogger("blivet") ++ + + @add_metaclass(abc.ABCMeta) + class FSResizeTask(fstask.FSTask): +@@ -115,6 +121,54 @@ class NTFSResize(FSResize): + ] + + ++class XFSResize(FSResize): ++ ext = availability.XFSRESIZE_APP ++ unit = B ++ size_fmt = None ++ ++ @contextmanager ++ def _do_temp_mount(self): ++ if self.fs.status: ++ yield ++ else: ++ dev_name = os.path.basename(self.fs.device) ++ tmpdir = tempfile.mkdtemp(prefix="xfs-tempmount-%s" % dev_name) ++ log.debug("mounting XFS on '%s' to '%s' for resize", self.fs.device, tmpdir) ++ try: ++ self.fs.mount(mountpoint=tmpdir) ++ except FSError as e: ++ raise FSError("Failed to mount XFS filesystem for resize: %s" % str(e)) ++ ++ try: ++ yield ++ finally: ++ util.umount(mountpoint=tmpdir) ++ os.rmdir(tmpdir) ++ ++ def _get_block_size(self): ++ if self.fs._current_info: ++ # this should be set by update_size_info() ++ for line in self.fs._current_info.split("\n"): ++ if line.startswith("blocksize ="): ++ return int(line.split("=")[-1]) ++ ++ raise FSError("Failed to get XFS filesystem block size for resize") ++ ++ def size_spec(self): ++ # size for xfs_growfs is in blocks ++ return str(self.fs.target_size.convert_to(self.unit) / self._get_block_size()) ++ ++ @property ++ def args(self): ++ return [self.fs.system_mountpoint, "-D", self.size_spec()] ++ ++ def do_task(self): ++ """ Resizes the XFS format. """ ++ ++ with self._do_temp_mount(): ++ super(XFSResize, self).do_task() ++ ++ + class TmpFSResize(FSResize): + + ext = availability.MOUNT_APP +-- +2.26.2 + + +From 56d05334231c30699a9c77dedbc23fdb021b9dee Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 14 Jul 2020 11:27:51 +0200 +Subject: [PATCH 2/4] Add tests for XFS resize + +XFS supports only grow so we can't reuse most of the fstesting +code and we also need to test the resize on partition because +XFS won't allow grow to size bigger than the underlying block +device. +--- + tests/formats_test/fs_test.py | 91 +++++++++++++++++++++++++++++++++ + tests/formats_test/fstesting.py | 33 ++++++------ + 2 files changed, 107 insertions(+), 17 deletions(-) + +diff --git a/tests/formats_test/fs_test.py b/tests/formats_test/fs_test.py +index 15fc0c35..9bc5d20d 100644 +--- a/tests/formats_test/fs_test.py ++++ b/tests/formats_test/fs_test.py +@@ -2,8 +2,13 @@ import os + import tempfile + import unittest + ++import parted ++ + import blivet.formats.fs as fs + from blivet.size import Size, ROUND_DOWN ++from blivet.errors import DeviceFormatError ++from blivet.formats import get_format ++from blivet.devices import PartitionDevice, DiskDevice + + from tests import loopbackedtestcase + +@@ -50,6 +55,92 @@ class ReiserFSTestCase(fstesting.FSAsRoot): + class XFSTestCase(fstesting.FSAsRoot): + _fs_class = fs.XFS + ++ def can_resize(self, an_fs): ++ resize_tasks = (an_fs._resize, an_fs._size_info) ++ return not any(t.availability_errors for t in resize_tasks) ++ ++ def _create_partition(self, disk, size): ++ disk.format = get_format("disklabel", device=disk.path, label_type="msdos") ++ disk.format.create() ++ pstart = disk.format.alignment.grainSize ++ pend = pstart + int(Size(size) / disk.format.parted_device.sectorSize) ++ disk.format.add_partition(pstart, pend, parted.PARTITION_NORMAL) ++ disk.format.parted_disk.commit() ++ part = disk.format.parted_disk.getPartitionBySector(pstart) ++ ++ device = PartitionDevice(os.path.basename(part.path)) ++ device.disk = disk ++ device.exists = True ++ device.parted_partition = part ++ ++ return device ++ ++ def _remove_partition(self, partition, disk): ++ disk.format.remove_partition(partition.parted_partition) ++ disk.format.parted_disk.commit() ++ ++ def test_resize(self): ++ an_fs = self._fs_class() ++ if not an_fs.formattable: ++ self.skipTest("can not create filesystem %s" % an_fs.name) ++ an_fs.device = self.loop_devices[0] ++ self.assertIsNone(an_fs.create()) ++ an_fs.update_size_info() ++ ++ self._test_sizes(an_fs) ++ # CHECKME: target size is still 0 after updated_size_info is called. ++ self.assertEqual(an_fs.size, Size(0) if an_fs.resizable else an_fs._size) ++ ++ if not self.can_resize(an_fs): ++ self.assertFalse(an_fs.resizable) ++ # Not resizable, so can not do resizing actions. ++ with self.assertRaises(DeviceFormatError): ++ an_fs.target_size = Size("64 MiB") ++ with self.assertRaises(DeviceFormatError): ++ an_fs.do_resize() ++ else: ++ disk = DiskDevice(os.path.basename(self.loop_devices[0])) ++ part = self._create_partition(disk, Size("50 MiB")) ++ an_fs = self._fs_class() ++ an_fs.device = part.path ++ self.assertIsNone(an_fs.create()) ++ an_fs.update_size_info() ++ ++ self.assertTrue(an_fs.resizable) ++ ++ # grow the partition so we can grow the filesystem ++ self._remove_partition(part, disk) ++ part = self._create_partition(disk, size=part.size + Size("40 MiB")) ++ ++ # Try a reasonable target size ++ TARGET_SIZE = Size("64 MiB") ++ an_fs.target_size = TARGET_SIZE ++ self.assertEqual(an_fs.target_size, TARGET_SIZE) ++ self.assertNotEqual(an_fs._size, TARGET_SIZE) ++ self.assertIsNone(an_fs.do_resize()) ++ ACTUAL_SIZE = TARGET_SIZE.round_to_nearest(an_fs._resize.unit, rounding=ROUND_DOWN) ++ self.assertEqual(an_fs.size, ACTUAL_SIZE) ++ self.assertEqual(an_fs._size, ACTUAL_SIZE) ++ self._test_sizes(an_fs) ++ ++ self._remove_partition(part, disk) ++ ++ # and no errors should occur when checking ++ self.assertIsNone(an_fs.do_check()) ++ ++ def test_shrink(self): ++ self.skipTest("Not checking resize for this test category.") ++ ++ def test_too_small(self): ++ self.skipTest("Not checking resize for this test category.") ++ ++ def test_no_explicit_target_size2(self): ++ self.skipTest("Not checking resize for this test category.") ++ ++ def test_too_big2(self): ++ # XXX this tests assumes that resizing to max size - 1 B will fail, but xfs_grow won't ++ self.skipTest("Not checking resize for this test category.") ++ + + class HFSTestCase(fstesting.FSAsRoot): + _fs_class = fs.HFS +diff --git a/tests/formats_test/fstesting.py b/tests/formats_test/fstesting.py +index 62f806f9..86b2a116 100644 +--- a/tests/formats_test/fstesting.py ++++ b/tests/formats_test/fstesting.py +@@ -11,16 +11,6 @@ from blivet.size import Size, ROUND_DOWN + from blivet.formats import fs + + +-def can_resize(an_fs): +- """ Returns True if this filesystem has all necessary resizing tools +- available. +- +- :param an_fs: a filesystem object +- """ +- resize_tasks = (an_fs._resize, an_fs._size_info, an_fs._minsize) +- return not any(t.availability_errors for t in resize_tasks) +- +- + @add_metaclass(abc.ABCMeta) + class FSAsRoot(loopbackedtestcase.LoopBackedTestCase): + +@@ -32,6 +22,15 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase): + def __init__(self, methodName='run_test'): + super(FSAsRoot, self).__init__(methodName=methodName, device_spec=[self._DEVICE_SIZE]) + ++ def can_resize(self, an_fs): ++ """ Returns True if this filesystem has all necessary resizing tools ++ available. ++ ++ :param an_fs: a filesystem object ++ """ ++ resize_tasks = (an_fs._resize, an_fs._size_info, an_fs._minsize) ++ return not any(t.availability_errors for t in resize_tasks) ++ + def _test_sizes(self, an_fs): + """ Test relationships between different size values. + +@@ -190,7 +189,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase): + # CHECKME: target size is still 0 after updated_size_info is called. + self.assertEqual(an_fs.size, Size(0) if an_fs.resizable else an_fs._size) + +- if not can_resize(an_fs): ++ if not self.can_resize(an_fs): + self.assertFalse(an_fs.resizable) + # Not resizable, so can not do resizing actions. + with self.assertRaises(DeviceFormatError): +@@ -221,7 +220,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase): + # in constructor call behavior would be different. + + an_fs = self._fs_class() +- if not can_resize(an_fs): ++ if not self.can_resize(an_fs): + self.skipTest("Not checking resize for this test category.") + if not an_fs.formattable: + self.skipTest("can not create filesystem %s" % an_fs.name) +@@ -244,7 +243,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase): + """ + SIZE = Size("64 MiB") + an_fs = self._fs_class(size=SIZE) +- if not can_resize(an_fs): ++ if not self.can_resize(an_fs): + self.skipTest("Not checking resize for this test category.") + if not an_fs.formattable: + self.skipTest("can not create filesystem %s" % an_fs.name) +@@ -264,7 +263,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase): + + def test_shrink(self): + an_fs = self._fs_class() +- if not can_resize(an_fs): ++ if not self.can_resize(an_fs): + self.skipTest("Not checking resize for this test category.") + if not an_fs.formattable: + self.skipTest("can not create filesystem %s" % an_fs.name) +@@ -296,7 +295,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase): + + def test_too_small(self): + an_fs = self._fs_class() +- if not can_resize(an_fs): ++ if not self.can_resize(an_fs): + self.skipTest("Not checking resize for this test category.") + if not an_fs.formattable: + self.skipTest("can not create or resize filesystem %s" % an_fs.name) +@@ -315,7 +314,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase): + + def test_too_big(self): + an_fs = self._fs_class() +- if not can_resize(an_fs): ++ if not self.can_resize(an_fs): + self.skipTest("Not checking resize for this test category.") + if not an_fs.formattable: + self.skipTest("can not create filesystem %s" % an_fs.name) +@@ -334,7 +333,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase): + + def test_too_big2(self): + an_fs = self._fs_class() +- if not can_resize(an_fs): ++ if not self.can_resize(an_fs): + self.skipTest("Not checking resize for this test category.") + if not an_fs.formattable: + self.skipTest("can not create filesystem %s" % an_fs.name) +-- +2.26.2 + + +From 51acc04f4639f143b55789a06a68aae988a91296 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Wed, 15 Jul 2020 12:59:04 +0200 +Subject: [PATCH 3/4] Add support for checking and fixing XFS using xfs_repair + +--- + blivet/formats/fs.py | 1 + + blivet/tasks/availability.py | 1 + + blivet/tasks/fsck.py | 12 ++++++++++++ + tests/formats_test/fs_test.py | 6 +++--- + 4 files changed, 17 insertions(+), 3 deletions(-) + +diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py +index 12cb9885..06fbdf10 100644 +--- a/blivet/formats/fs.py ++++ b/blivet/formats/fs.py +@@ -1091,6 +1091,7 @@ class XFS(FS): + _supported = True + _resizable = True + _packages = ["xfsprogs"] ++ _fsck_class = fsck.XFSCK + _info_class = fsinfo.XFSInfo + _mkfs_class = fsmkfs.XFSMkfs + _readlabel_class = fsreadlabel.XFSReadLabel +diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py +index df62780c..f3b76650 100644 +--- a/blivet/tasks/availability.py ++++ b/blivet/tasks/availability.py +@@ -456,5 +456,6 @@ XFSADMIN_APP = application("xfs_admin") + XFSDB_APP = application("xfs_db") + XFSFREEZE_APP = application("xfs_freeze") + XFSRESIZE_APP = application("xfs_growfs") ++XFSREPAIR_APP = application("xfs_repair") + + MOUNT_APP = application("mount") +diff --git a/blivet/tasks/fsck.py b/blivet/tasks/fsck.py +index 5274f13a..8477f5f8 100644 +--- a/blivet/tasks/fsck.py ++++ b/blivet/tasks/fsck.py +@@ -123,6 +123,18 @@ class Ext2FSCK(FSCK): + return "\n".join(msgs) or None + + ++class XFSCK(FSCK): ++ _fsck_errors = {1: "Runtime error encountered during repair operation.", ++ 2: "XFS repair was unable to proceed due to a dirty log."} ++ ++ ext = availability.XFSREPAIR_APP ++ options = [] ++ ++ def _error_message(self, rc): ++ msgs = (self._fsck_errors[c] for c in self._fsck_errors.keys() if rc & c) ++ return "\n".join(msgs) or None ++ ++ + class HFSPlusFSCK(FSCK): + _fsck_errors = {3: "Quick check found a dirty filesystem; no repairs done.", + 4: "Root filesystem was dirty. System should be rebooted.", +diff --git a/tests/formats_test/fs_test.py b/tests/formats_test/fs_test.py +index 9bc5d20d..8fb099fd 100644 +--- a/tests/formats_test/fs_test.py ++++ b/tests/formats_test/fs_test.py +@@ -123,10 +123,10 @@ class XFSTestCase(fstesting.FSAsRoot): + self.assertEqual(an_fs._size, ACTUAL_SIZE) + self._test_sizes(an_fs) + +- self._remove_partition(part, disk) ++ # and no errors should occur when checking ++ self.assertIsNone(an_fs.do_check()) + +- # and no errors should occur when checking +- self.assertIsNone(an_fs.do_check()) ++ self._remove_partition(part, disk) + + def test_shrink(self): + self.skipTest("Not checking resize for this test category.") +-- +2.26.2 + + +From 2a6947098e66f880193f3bac2282a6c7857ca5f7 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Thu, 16 Jul 2020 09:05:35 +0200 +Subject: [PATCH 4/4] Use xfs_db in read-only mode when getting XFS information + +This way it will also work on mounted filesystems. +--- + blivet/tasks/fsinfo.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/blivet/tasks/fsinfo.py b/blivet/tasks/fsinfo.py +index af208f5d..41ff700f 100644 +--- a/blivet/tasks/fsinfo.py ++++ b/blivet/tasks/fsinfo.py +@@ -95,7 +95,7 @@ class ReiserFSInfo(FSInfo): + + class XFSInfo(FSInfo): + ext = availability.XFSDB_APP +- options = ["-c", "sb 0", "-c", "p dblocks", "-c", "p blocksize"] ++ options = ["-c", "sb 0", "-c", "p dblocks", "-c", "p blocksize", "-r"] + + + class UnimplementedFSInfo(fstask.UnimplementedFSTask): +-- +2.26.2 + diff --git a/SOURCES/0013-Do-not-limit-swap-to-128-GiB.patch b/SOURCES/0013-Do-not-limit-swap-to-128-GiB.patch new file mode 100644 index 0000000..5b9f0ed --- /dev/null +++ b/SOURCES/0013-Do-not-limit-swap-to-128-GiB.patch @@ -0,0 +1,76 @@ +From aa4ce218fe9b4ee3571d872ff1575a499596181c Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 29 May 2020 12:14:30 +0200 +Subject: [PATCH 1/2] Do not limit swap to 128 GiB + +The limit was part of change to limit suggested swap size in +kickstart which doesn't use the SwapSpace._max_size so there is no +reason to limit this for manual installations. +16 TiB seems to be max usable swap size based on mkswap code. + +Resolves: rhbz#1656485 +--- + blivet/formats/swap.py | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/blivet/formats/swap.py b/blivet/formats/swap.py +index 4b8a7edf..3cc59138 100644 +--- a/blivet/formats/swap.py ++++ b/blivet/formats/swap.py +@@ -52,8 +52,7 @@ class SwapSpace(DeviceFormat): + _linux_native = True # for clearpart + _plugin = availability.BLOCKDEV_SWAP_PLUGIN + +- # see rhbz#744129 for details +- _max_size = Size("128 GiB") ++ _max_size = Size("16 TiB") + + config_actions_map = {"label": "write_label"} + +-- +2.26.2 + + +From 93aa6ad87116f1c86616d73dbe561251c4a0c286 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Thu, 11 Jun 2020 14:27:44 +0200 +Subject: [PATCH 2/2] Add test for SwapSpace max size + +--- + tests/formats_test/swap_test.py | 24 ++++++++++++++++++++++++ + 1 file changed, 24 insertions(+) + create mode 100644 tests/formats_test/swap_test.py + +diff --git a/tests/formats_test/swap_test.py b/tests/formats_test/swap_test.py +new file mode 100644 +index 00000000..56356144 +--- /dev/null ++++ b/tests/formats_test/swap_test.py +@@ -0,0 +1,24 @@ ++import test_compat # pylint: disable=unused-import ++ ++import six ++import unittest ++ ++from blivet.devices.storage import StorageDevice ++from blivet.errors import DeviceError ++from blivet.formats import get_format ++ ++from blivet.size import Size ++ ++ ++class SwapNodevTestCase(unittest.TestCase): ++ ++ def test_swap_max_size(self): ++ StorageDevice("dev", size=Size("129 GiB"), ++ fmt=get_format("swap")) ++ ++ StorageDevice("dev", size=Size("15 TiB"), ++ fmt=get_format("swap")) ++ ++ with six.assertRaisesRegex(self, DeviceError, "device is too large for new format"): ++ StorageDevice("dev", size=Size("17 TiB"), ++ fmt=get_format("swap")) +-- +2.26.2 + diff --git a/SOURCES/0014-Use-UnusableConfigurationError-for-patially-hidden-multipath-devices.patch b/SOURCES/0014-Use-UnusableConfigurationError-for-patially-hidden-multipath-devices.patch new file mode 100644 index 0000000..1e14de6 --- /dev/null +++ b/SOURCES/0014-Use-UnusableConfigurationError-for-patially-hidden-multipath-devices.patch @@ -0,0 +1,78 @@ +From 4e6a322d32d2a12f8a87ab763a6286cf3d7b5c27 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 8 Sep 2020 13:57:40 +0200 +Subject: [PATCH] Use UnusableConfigurationError for partially hidden multipath + devices + +Follow-up for https://github.com/storaged-project/blivet/pull/883 +to make Anaconda show an error message instead of crashing. + +Resolves: rhbz#1877052 +--- + blivet/devicetree.py | 4 ++-- + blivet/errors.py | 6 ++++++ + tests/devicetree_test.py | 4 ++-- + 3 files changed, 10 insertions(+), 4 deletions(-) + +diff --git a/blivet/devicetree.py b/blivet/devicetree.py +index 2afb0d0e..57a9bbd7 100644 +--- a/blivet/devicetree.py ++++ b/blivet/devicetree.py +@@ -32,7 +32,7 @@ from gi.repository import BlockDev as blockdev + + from .actionlist import ActionList + from .callbacks import callbacks +-from .errors import DeviceError, DeviceTreeError, StorageError, DuplicateUUIDError ++from .errors import DeviceError, DeviceTreeError, StorageError, DuplicateUUIDError, InvalidMultideviceSelection + from .deviceaction import ActionDestroyDevice, ActionDestroyFormat + from .devices import BTRFSDevice, NoDevice, PartitionDevice + from .devices import LVMLogicalVolumeDevice, LVMVolumeGroupDevice +@@ -936,7 +936,7 @@ class DeviceTreeBase(object): + if is_ignored: + if len(disk.children) == 1: + if not all(self._is_ignored_disk(d) for d in disk.children[0].parents): +- raise DeviceTreeError("Including only a subset of raid/multipath member disks is not allowed.") ++ raise InvalidMultideviceSelection("Including only a subset of raid/multipath member disks is not allowed.") + + # and also children like fwraid or mpath + self.hide(disk.children[0]) +diff --git a/blivet/errors.py b/blivet/errors.py +index 811abf81..7a93f1ce 100644 +--- a/blivet/errors.py ++++ b/blivet/errors.py +@@ -233,6 +233,12 @@ class DuplicateVGError(UnusableConfigurationError): + "Hint 2: You can get the VG UUIDs by running " + "'pvs -o +vg_uuid'.") + ++ ++class InvalidMultideviceSelection(UnusableConfigurationError): ++ suggestion = N_("All parent devices must be selected when choosing exclusive " ++ "or ignored disks for a multipath or firmware RAID device.") ++ ++ + # DeviceAction + + +diff --git a/tests/devicetree_test.py b/tests/devicetree_test.py +index 6032e7f6..4e47ffc3 100644 +--- a/tests/devicetree_test.py ++++ b/tests/devicetree_test.py +@@ -5,7 +5,7 @@ import six + import unittest + + from blivet.actionlist import ActionList +-from blivet.errors import DeviceTreeError, DuplicateUUIDError ++from blivet.errors import DeviceTreeError, DuplicateUUIDError, InvalidMultideviceSelection + from blivet.deviceaction import ACTION_TYPE_DESTROY, ACTION_OBJECT_DEVICE + from blivet.devicelibs import lvm + from blivet.devices import DiskDevice +@@ -512,5 +512,5 @@ class DeviceTreeIgnoredExclusiveMultipathTestCase(unittest.TestCase): + self.tree.ignored_disks = ["sda", "sdb"] + self.tree.exclusive_disks = [] + +- with self.assertRaises(DeviceTreeError): ++ with self.assertRaises(InvalidMultideviceSelection): + self.tree._hide_ignored_disks() +-- +2.26.2 + diff --git a/SOURCES/0015-Fix-possible-UnicodeDecodeError-when-reading-model-f.patch b/SOURCES/0015-Fix-possible-UnicodeDecodeError-when-reading-model-f.patch new file mode 100644 index 0000000..24e408e --- /dev/null +++ b/SOURCES/0015-Fix-possible-UnicodeDecodeError-when-reading-model-f.patch @@ -0,0 +1,32 @@ +From 866a48e6c3d8246d2897bb402a191df5f2848aa4 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 23 Jun 2020 10:33:33 +0200 +Subject: [PATCH] Fix possible UnicodeDecodeError when reading model from sysfs + +Some Innovation IT NVMe devices have an (invalid) unicode in their +model name. + +Resolves: rhbz#1849326 +--- + blivet/udev.py | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/blivet/udev.py b/blivet/udev.py +index 41c99496..2c795225 100644 +--- a/blivet/udev.py ++++ b/blivet/udev.py +@@ -185,8 +185,9 @@ def __is_blacklisted_blockdev(dev_name): + if any(re.search(expr, dev_name) for expr in device_name_blacklist): + return True + +- if os.path.exists("/sys/class/block/%s/device/model" % (dev_name,)): +- model = open("/sys/class/block/%s/device/model" % (dev_name,)).read() ++ model_path = "/sys/class/block/%s/device/model" % dev_name ++ if os.path.exists(model_path): ++ model = open(model_path, encoding="utf-8", errors="replace").read() + for bad in ("IBM *STMF KERNEL", "SCEI Flash-5", "DGC LUNZ"): + if model.find(bad) != -1: + log.info("ignoring %s with model %s", dev_name, model) +-- +2.26.2 + diff --git a/SOURCES/0016-Basic-LVM-VDO-support.patch b/SOURCES/0016-Basic-LVM-VDO-support.patch new file mode 100644 index 0000000..b52342b --- /dev/null +++ b/SOURCES/0016-Basic-LVM-VDO-support.patch @@ -0,0 +1,415 @@ +From 3f6bbf52442609b8e6e3919a3fdd8c5af64923e6 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 12 May 2020 12:48:41 +0200 +Subject: [PATCH 1/3] Add basic support for LVM VDO devices + +This adds support for LVM VDO devices detection during populate +and allows removing both VDO LVs and VDO pools using actions. +--- + blivet/devices/lvm.py | 150 +++++++++++++++++++++++++++++++- + blivet/populator/helpers/lvm.py | 16 +++- + tests/action_test.py | 39 +++++++++ + tests/devices_test/lvm_test.py | 34 ++++++++ + tests/storagetestcase.py | 11 ++- + 5 files changed, 245 insertions(+), 5 deletions(-) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index 97de6acd..d9e24a33 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -1789,8 +1789,132 @@ class LVMThinLogicalVolumeMixin(object): + data.pool_name = self.pool.lvname + + ++class LVMVDOPoolMixin(object): ++ def __init__(self): ++ self._lvs = [] ++ ++ @property ++ def is_vdo_pool(self): ++ return self.seg_type == "vdo-pool" ++ ++ @property ++ def type(self): ++ return "lvmvdopool" ++ ++ @property ++ def resizable(self): ++ return False ++ ++ @util.requires_property("is_vdo_pool") ++ def _add_log_vol(self, lv): ++ """ Add an LV to this VDO pool. """ ++ if lv in self._lvs: ++ raise ValueError("lv is already part of this VDO pool") ++ ++ self.vg._add_log_vol(lv) ++ log.debug("Adding %s/%s to %s", lv.name, lv.size, self.name) ++ self._lvs.append(lv) ++ ++ @util.requires_property("is_vdo_pool") ++ def _remove_log_vol(self, lv): ++ """ Remove an LV from this VDO pool. """ ++ if lv not in self._lvs: ++ raise ValueError("specified lv is not part of this VDO pool") ++ ++ self._lvs.remove(lv) ++ self.vg._remove_log_vol(lv) ++ ++ @property ++ @util.requires_property("is_vdo_pool") ++ def lvs(self): ++ """ A list of this VDO pool's LVs """ ++ return self._lvs[:] # we don't want folks changing our list ++ ++ @property ++ def direct(self): ++ """ Is this device directly accessible? """ ++ return False ++ ++ def _create(self): ++ """ Create the device. """ ++ raise NotImplementedError ++ ++ ++class LVMVDOLogicalVolumeMixin(object): ++ def __init__(self): ++ pass ++ ++ def _init_check(self): ++ pass ++ ++ def _check_parents(self): ++ """Check that this device has parents as expected""" ++ if isinstance(self.parents, (list, ParentList)): ++ if len(self.parents) != 1: ++ raise ValueError("constructor requires a single vdo-pool LV") ++ ++ container = self.parents[0] ++ else: ++ container = self.parents ++ ++ if not container or not isinstance(container, LVMLogicalVolumeDevice) or not container.is_vdo_pool: ++ raise ValueError("constructor requires a vdo-pool LV") ++ ++ @property ++ def vg_space_used(self): ++ return Size(0) # the pool's size is already accounted for in the vg ++ ++ @property ++ def is_vdo_lv(self): ++ return self.seg_type == "vdo" ++ ++ @property ++ def vg(self): ++ # parents[0] is the pool, not the VG so set the VG here ++ return self.pool.vg ++ ++ @property ++ def type(self): ++ return "vdolv" ++ ++ @property ++ def resizable(self): ++ return False ++ ++ @property ++ @util.requires_property("is_vdo_lv") ++ def pool(self): ++ return self.parents[0] ++ ++ def _create(self): ++ """ Create the device. """ ++ raise NotImplementedError ++ ++ def _destroy(self): ++ # nothing to do here, VDO LV is destroyed automatically together with ++ # the VDO pool ++ pass ++ ++ def remove_hook(self, modparent=True): ++ if modparent: ++ self.pool._remove_log_vol(self) ++ ++ # pylint: disable=bad-super-call ++ super(LVMLogicalVolumeBase, self).remove_hook(modparent=modparent) ++ ++ def add_hook(self, new=True): ++ # pylint: disable=bad-super-call ++ super(LVMLogicalVolumeBase, self).add_hook(new=new) ++ if new: ++ return ++ ++ if self not in self.pool.lvs: ++ self.pool._add_log_vol(self) ++ ++ + class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin, LVMSnapshotMixin, +- LVMThinPoolMixin, LVMThinLogicalVolumeMixin): ++ LVMThinPoolMixin, LVMThinLogicalVolumeMixin, LVMVDOPoolMixin, ++ LVMVDOLogicalVolumeMixin): + """ An LVM Logical Volume """ + + # generally resizable, see :property:`resizable` for details +@@ -1879,6 +2003,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin + LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type, + fmt, exists, sysfs_path, grow, maxsize, + percent, cache_request, pvs, from_lvs) ++ LVMVDOPoolMixin.__init__(self) ++ LVMVDOLogicalVolumeMixin.__init__(self) + + LVMInternalLogicalVolumeMixin._init_check(self) + LVMSnapshotMixin._init_check(self) +@@ -1905,6 +2031,10 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin + ret.append(LVMThinPoolMixin) + if self.is_thin_lv: + ret.append(LVMThinLogicalVolumeMixin) ++ if self.is_vdo_pool: ++ ret.append(LVMVDOPoolMixin) ++ if self.is_vdo_lv: ++ ret.append(LVMVDOLogicalVolumeMixin) + return ret + + def _try_specific_call(self, name, *args, **kwargs): +@@ -2066,6 +2196,11 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin + def display_lv_name(self): + return self.lvname + ++ @property ++ @type_specific ++ def pool(self): ++ return super(LVMLogicalVolumeDevice, self).pool ++ + def _setup(self, orig=False): + """ Open, or set up, a device. """ + log_method_call(self, self.name, orig=orig, status=self.status, +@@ -2167,6 +2302,19 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin + udev.settle() + blockdev.lvm.lvresize(self.vg.name, self._name, self.size) + ++ @type_specific ++ def _add_log_vol(self, lv): ++ pass ++ ++ @type_specific ++ def _remove_log_vol(self, lv): ++ pass ++ ++ @property ++ @type_specific ++ def lvs(self): ++ return [] ++ + @property + @type_specific + def direct(self): +diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py +index 4b674fac..ff8bf59f 100644 +--- a/blivet/populator/helpers/lvm.py ++++ b/blivet/populator/helpers/lvm.py +@@ -211,9 +211,6 @@ class LVMFormatPopulator(FormatPopulator): + origin = self._devicetree.get_device_by_name(origin_device_name) + + lv_kwargs["origin"] = origin +- elif lv_attr[0] == 'v': +- # skip vorigins +- return + elif lv_attr[0] in 'IrielTCo' and lv_name.endswith(']'): + # an internal LV, add the an instance of the appropriate class + # to internal_lvs for later processing when non-internal LVs are +@@ -237,6 +234,19 @@ class LVMFormatPopulator(FormatPopulator): + origin = self._devicetree.get_device_by_name(origin_device_name) + lv_kwargs["origin"] = origin + ++ lv_parents = [self._devicetree.get_device_by_name(pool_device_name)] ++ elif lv_attr[0] == 'd': ++ # vdo pool ++ # nothing to do here ++ pass ++ elif lv_attr[0] == 'v': ++ if lv_type != "vdo": ++ # skip vorigins ++ return ++ pool_name = blockdev.lvm.vdolvpoolname(vg_name, lv_name) ++ pool_device_name = "%s-%s" % (vg_name, pool_name) ++ add_required_lv(pool_device_name, "failed to look up VDO pool") ++ + lv_parents = [self._devicetree.get_device_by_name(pool_device_name)] + elif lv_name.endswith(']'): + # unrecognized Internal LVM2 device +diff --git a/tests/action_test.py b/tests/action_test.py +index 90c1b312..8f9a7424 100644 +--- a/tests/action_test.py ++++ b/tests/action_test.py +@@ -1252,6 +1252,45 @@ class DeviceActionTestCase(StorageTestCase): + self.assertEqual(set(self.storage.lvs), {pool}) + self.assertEqual(set(pool._internal_lvs), {lv1, lv2}) + ++ def test_lvm_vdo_destroy(self): ++ self.destroy_all_devices() ++ sdc = self.storage.devicetree.get_device_by_name("sdc") ++ sdc1 = self.new_device(device_class=PartitionDevice, name="sdc1", ++ size=Size("50 GiB"), parents=[sdc], ++ fmt=blivet.formats.get_format("lvmpv")) ++ self.schedule_create_device(sdc1) ++ ++ vg = self.new_device(device_class=LVMVolumeGroupDevice, ++ name="vg", parents=[sdc1]) ++ self.schedule_create_device(vg) ++ ++ pool = self.new_device(device_class=LVMLogicalVolumeDevice, ++ name="data", parents=[vg], ++ size=Size("10 GiB"), ++ seg_type="vdo-pool", exists=True) ++ self.storage.devicetree._add_device(pool) ++ lv = self.new_device(device_class=LVMLogicalVolumeDevice, ++ name="meta", parents=[pool], ++ size=Size("50 GiB"), ++ seg_type="vdo", exists=True) ++ self.storage.devicetree._add_device(lv) ++ ++ remove_lv = self.schedule_destroy_device(lv) ++ self.assertListEqual(pool.lvs, []) ++ self.assertNotIn(lv, vg.lvs) ++ ++ # cancelling the action should put lv back to both vg and pool lvs ++ self.storage.devicetree.actions.remove(remove_lv) ++ self.assertListEqual(pool.lvs, [lv]) ++ self.assertIn(lv, vg.lvs) ++ ++ # can't remove non-leaf pool ++ with self.assertRaises(ValueError): ++ self.schedule_destroy_device(pool) ++ ++ self.schedule_destroy_device(lv) ++ self.schedule_destroy_device(pool) ++ + + class ConfigurationActionsTest(unittest.TestCase): + +diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py +index 9e701d18..204cb99a 100644 +--- a/tests/devices_test/lvm_test.py ++++ b/tests/devices_test/lvm_test.py +@@ -405,6 +405,40 @@ class LVMDeviceTest(unittest.TestCase): + exists=False) + self.assertFalse(vg.is_empty) + ++ def test_lvm_vdo_pool(self): ++ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), ++ size=Size("1 GiB"), exists=True) ++ vg = LVMVolumeGroupDevice("testvg", parents=[pv]) ++ pool = LVMLogicalVolumeDevice("testpool", parents=[vg], size=Size("512 MiB"), ++ seg_type="vdo-pool", exists=True) ++ self.assertTrue(pool.is_vdo_pool) ++ ++ free = vg.free_space ++ lv = LVMLogicalVolumeDevice("testlv", parents=[pool], size=Size("2 GiB"), ++ seg_type="vdo", exists=True) ++ self.assertTrue(lv.is_vdo_lv) ++ self.assertEqual(lv.vg, vg) ++ self.assertEqual(lv.pool, pool) ++ ++ # free space in the vg shouldn't be affected by the vdo lv ++ self.assertEqual(lv.vg_space_used, 0) ++ self.assertEqual(free, vg.free_space) ++ ++ self.assertListEqual(pool.lvs, [lv]) ++ ++ # now try to destroy both the pool and the vdo lv ++ # for the lv this should be a no-op, destroying the pool should destroy both ++ with patch("blivet.devices.lvm.blockdev.lvm") as lvm: ++ lv.destroy() ++ lv.remove_hook() ++ self.assertFalse(lv.exists) ++ self.assertFalse(lvm.lvremove.called) ++ self.assertListEqual(pool.lvs, []) ++ ++ pool.destroy() ++ self.assertFalse(pool.exists) ++ self.assertTrue(lvm.lvremove.called) ++ + + class TypeSpecificCallsTest(unittest.TestCase): + def test_type_specific_calls(self): +diff --git a/tests/storagetestcase.py b/tests/storagetestcase.py +index e581bca6..1844dec5 100644 +--- a/tests/storagetestcase.py ++++ b/tests/storagetestcase.py +@@ -96,7 +96,16 @@ class StorageTestCase(unittest.TestCase): + def new_device(self, *args, **kwargs): + """ Return a new Device instance suitable for testing. """ + device_class = kwargs.pop("device_class") +- exists = kwargs.pop("exists", False) ++ ++ # we intentionally don't pass the "exists" kwarg to the constructor ++ # becauses this causes issues with some devices (especially partitions) ++ # but we still need it for some LVs like VDO because we can't create ++ # those so we need to fake their existence even for the constructor ++ if device_class is blivet.devices.LVMLogicalVolumeDevice: ++ exists = kwargs.get("exists", False) ++ else: ++ exists = kwargs.pop("exists", False) ++ + part_type = kwargs.pop("part_type", parted.PARTITION_NORMAL) + device = device_class(*args, **kwargs) + +-- +2.26.2 + + +From f05a66e1bed1ca1f3cd7d7ffecd6693ab4d7f32a Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 12 May 2020 12:52:47 +0200 +Subject: [PATCH 2/3] Fix checking for filesystem support in action_test + +--- + tests/action_test.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tests/action_test.py b/tests/action_test.py +index 8f9a7424..228eb97a 100644 +--- a/tests/action_test.py ++++ b/tests/action_test.py +@@ -56,7 +56,7 @@ FORMAT_CLASSES = [ + + + @unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test") +-@unittest.skipUnless(not any(x().utils_available for x in FORMAT_CLASSES), "some unsupported format classes required for this test") ++@unittest.skipUnless(all(x().utils_available for x in FORMAT_CLASSES), "some unsupported format classes required for this test") + class DeviceActionTestCase(StorageTestCase): + + """ DeviceActionTestSuite """ +-- +2.26.2 + + +From 69bd2e69e21c8779377a6f54b3d83cb35138867a Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 12 May 2020 12:54:03 +0200 +Subject: [PATCH 3/3] Fix LV min size for resize in test_action_dependencies + +We've recently changed min size for all filesystems so we can't +resize the LV to the device minimal size. +This was overlooked in the original change because these tests +were skipped. +--- + tests/action_test.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tests/action_test.py b/tests/action_test.py +index 228eb97a..77176f46 100644 +--- a/tests/action_test.py ++++ b/tests/action_test.py +@@ -870,7 +870,7 @@ class DeviceActionTestCase(StorageTestCase): + name="testlv2", parents=[testvg]) + testlv2.format = self.new_format("ext4", device=testlv2.path, + exists=True, device_instance=testlv2) +- shrink_lv2 = ActionResizeDevice(testlv2, testlv2.size - Size("10 GiB")) ++ shrink_lv2 = ActionResizeDevice(testlv2, testlv2.size - Size("10 GiB") + Ext4FS._min_size) + shrink_lv2.apply() + + self.assertTrue(grow_lv.requires(shrink_lv2)) +-- +2.26.2 + diff --git a/SOURCES/0017-Let-parted-fix-fixable-issues-with-partition-table.patch b/SOURCES/0017-Let-parted-fix-fixable-issues-with-partition-table.patch new file mode 100644 index 0000000..af2c4d8 --- /dev/null +++ b/SOURCES/0017-Let-parted-fix-fixable-issues-with-partition-table.patch @@ -0,0 +1,30 @@ +From d477f8d076789cbe1c0a85545ea8b5133fdc4bdf Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 18 Sep 2020 13:58:48 +0200 +Subject: [PATCH] Let parted fix fixable issues with partition table + +This will automatically fix issues like GPT partition table not +covering whole device after disk size change. + +Resolves: rhbz#1846869 +--- + blivet/populator/populator.py | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/blivet/populator/populator.py b/blivet/populator/populator.py +index 465c272d..fe566816 100644 +--- a/blivet/populator/populator.py ++++ b/blivet/populator/populator.py +@@ -64,6 +64,9 @@ def parted_exn_handler(exn_type, exn_options, exn_msg): + if exn_type == parted.EXCEPTION_TYPE_ERROR and \ + exn_options == parted.EXCEPTION_OPT_YES_NO: + ret = parted.EXCEPTION_RESOLVE_YES ++ elif exn_type == parted.EXCEPTION_TYPE_WARNING and \ ++ exn_options & parted.EXCEPTION_RESOLVE_FIX: ++ ret = parted.EXCEPTION_RESOLVE_FIX + return ret + + +-- +2.29.2 + diff --git a/SOURCES/0018-Fix-possible-UnicodeDecodeError-when-reading-sysfs-a.patch b/SOURCES/0018-Fix-possible-UnicodeDecodeError-when-reading-sysfs-a.patch new file mode 100644 index 0000000..11b6a40 --- /dev/null +++ b/SOURCES/0018-Fix-possible-UnicodeDecodeError-when-reading-sysfs-a.patch @@ -0,0 +1,112 @@ +From 430cd2cdba8fba434b5bed2d2a7ed97803c62f6d Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 5 Jan 2021 16:56:52 +0100 +Subject: [PATCH 1/3] Fix possible UnicodeDecodeError when reading sysfs + attributes + +This is a follow-up for https://github.com/storaged-project/blivet/pull/861 +where we fixed reading device model in "__is_blacklisted_blockdev" +but we read the device model from other places too so it makes +more sense to "fix" all sysfs attribute reads. +--- + blivet/util.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/blivet/util.py b/blivet/util.py +index 2fa9c8fc..48b7818f 100644 +--- a/blivet/util.py ++++ b/blivet/util.py +@@ -379,7 +379,7 @@ def get_sysfs_attr(path, attr, root=None): + log.warning("%s is not a valid attribute", attr) + return None + +- f = open(fullattr, "r") ++ f = open(fullattr, "r", encoding="utf-8", errors="replace") + data = f.read() + f.close() + sdata = "".join(["%02x" % (ord(x),) for x in data]) +-- +2.29.2 + + +From 15350b52f30910d4fadad92da0195710adcb69a0 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 5 Jan 2021 16:59:14 +0100 +Subject: [PATCH 2/3] Use util.get_sysfs_attr in __is_ignored_blockdev to read + device mode + +--- + blivet/udev.py | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/blivet/udev.py b/blivet/udev.py +index 2c795225..25375459 100644 +--- a/blivet/udev.py ++++ b/blivet/udev.py +@@ -185,9 +185,8 @@ def __is_blacklisted_blockdev(dev_name): + if any(re.search(expr, dev_name) for expr in device_name_blacklist): + return True + +- model_path = "/sys/class/block/%s/device/model" % dev_name +- if os.path.exists(model_path): +- model = open(model_path, encoding="utf-8", errors="replace").read() ++ model = util.get_sysfs_attr("/sys/class/block/%s" % dev_name, "device/model") ++ if model: + for bad in ("IBM *STMF KERNEL", "SCEI Flash-5", "DGC LUNZ"): + if model.find(bad) != -1: + log.info("ignoring %s with model %s", dev_name, model) +-- +2.29.2 + + +From 64ece8c0dafb550bbde4798a766515fb04f44568 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Wed, 6 Jan 2021 12:34:49 +0100 +Subject: [PATCH 3/3] Add test for util.get_sysfs_attr + +--- + tests/util_test.py | 23 +++++++++++++++++++++++ + 1 file changed, 23 insertions(+) + +diff --git a/tests/util_test.py b/tests/util_test.py +index 9a2ff492..853b6166 100644 +--- a/tests/util_test.py ++++ b/tests/util_test.py +@@ -2,7 +2,9 @@ + import test_compat + + from six.moves import mock ++import os + import six ++import tempfile + import unittest + from decimal import Decimal + +@@ -157,3 +159,24 @@ class DependencyGuardTestCase(unittest.TestCase): + with mock.patch.object(_requires_something, '_check_avail', return_value=True): + self.assertEqual(self._test_dependency_guard_non_critical(), True) + self.assertEqual(self._test_dependency_guard_critical(), True) ++ ++ ++class GetSysfsAttrTestCase(unittest.TestCase): ++ ++ def test_get_sysfs_attr(self): ++ ++ with tempfile.TemporaryDirectory() as sysfs: ++ model_file = os.path.join(sysfs, "model") ++ with open(model_file, "w") as f: ++ f.write("test model\n") ++ ++ model = util.get_sysfs_attr(sysfs, "model") ++ self.assertEqual(model, "test model") ++ ++ # now with some invalid byte in the model ++ with open(model_file, "wb") as f: ++ f.write(b"test model\xef\n") ++ ++ # the unicode replacement character (U+FFFD) should be used instead ++ model = util.get_sysfs_attr(sysfs, "model") ++ self.assertEqual(model, "test model\ufffd") +-- +2.29.2 + diff --git a/SOURCES/0019-LVM-VDO-support.patch b/SOURCES/0019-LVM-VDO-support.patch new file mode 100644 index 0000000..c79d6c1 --- /dev/null +++ b/SOURCES/0019-LVM-VDO-support.patch @@ -0,0 +1,2027 @@ +From 18f05802f07f580ed31f38931b1103842397d598 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 2 Nov 2020 14:19:52 +0100 +Subject: [PATCH 01/17] Fix type of LVM VDO logical volumes + +We should use "lvmvdolv" to make it similar to other "lvmXYZ" +types. +--- + blivet/devices/lvm.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index d9e24a33..9639256d 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -1875,7 +1875,7 @@ def vg(self): + + @property + def type(self): +- return "vdolv" ++ return "lvmvdolv" + + @property + def resizable(self): + +From 7f4815e14075550f55f2afb44bfba461eacea1c4 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 2 Nov 2020 14:21:33 +0100 +Subject: [PATCH 02/17] Add VDO pool data LV to internal LVs during populate + +--- + blivet/devices/lvm.py | 9 ++++++++- + blivet/populator/helpers/lvm.py | 2 +- + 2 files changed, 9 insertions(+), 2 deletions(-) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index 9639256d..d0957d6a 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -1119,7 +1119,7 @@ class LVMInternalLVtype(Enum): + + @classmethod + def get_type(cls, lv_attr, lv_name): # pylint: disable=unused-argument +- attr_letters = {cls.data: ("T", "C"), ++ attr_letters = {cls.data: ("T", "C", "D"), + cls.meta: ("e",), + cls.log: ("l", "L"), + cls.image: ("i", "I"), +@@ -1824,6 +1824,13 @@ def _remove_log_vol(self, lv): + self._lvs.remove(lv) + self.vg._remove_log_vol(lv) + ++ @property ++ @util.requires_property("is_vdo_pool") ++ def _vdopool_data_lv(self): ++ if not self._internal_lvs: ++ return None ++ return self._internal_lvs[0] ++ + @property + @util.requires_property("is_vdo_pool") + def lvs(self): +diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py +index ff8bf59f..b1626306 100644 +--- a/blivet/populator/helpers/lvm.py ++++ b/blivet/populator/helpers/lvm.py +@@ -211,7 +211,7 @@ def add_lv(lv): + origin = self._devicetree.get_device_by_name(origin_device_name) + + lv_kwargs["origin"] = origin +- elif lv_attr[0] in 'IrielTCo' and lv_name.endswith(']'): ++ elif lv_attr[0] in 'IrielTCoD' and lv_name.endswith(']'): + # an internal LV, add the an instance of the appropriate class + # to internal_lvs for later processing when non-internal LVs are + # processed + +From c164864955e371aef78b5020f28bf0c9d235ac7c Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 2 Nov 2020 14:22:12 +0100 +Subject: [PATCH 03/17] Add availability functions for LVM VDO + +VDO is currently available only on RHEL/CentOS so we need a +separate availability check for LVM VDO devices. +--- + blivet/devices/lvm.py | 6 ++++++ + blivet/tasks/availability.py | 8 ++++++++ + 2 files changed, 14 insertions(+) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index d0957d6a..ffc65dcd 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -1790,6 +1790,9 @@ def populate_ksdata(self, data): + + + class LVMVDOPoolMixin(object): ++ ++ _external_dependencies = [availability.BLOCKDEV_LVM_PLUGIN, availability.BLOCKDEV_LVM_PLUGIN_VDO] ++ + def __init__(self): + self._lvs = [] + +@@ -1848,6 +1851,9 @@ def _create(self): + + + class LVMVDOLogicalVolumeMixin(object): ++ ++ _external_dependencies = [availability.BLOCKDEV_LVM_PLUGIN, availability.BLOCKDEV_LVM_PLUGIN_VDO] ++ + def __init__(self): + pass + +diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py +index f3b76650..b107428e 100644 +--- a/blivet/tasks/availability.py ++++ b/blivet/tasks/availability.py +@@ -372,6 +372,13 @@ def available_resource(name): + blockdev.LVMTechMode.MODIFY)}) + BLOCKDEV_LVM_TECH = BlockDevMethod(BLOCKDEV_LVM) + ++BLOCKDEV_LVM_VDO = BlockDevTechInfo(plugin_name="lvm", ++ check_fn=blockdev.lvm_is_tech_avail, ++ technologies={blockdev.LVMTech.VDO: (blockdev.LVMTechMode.CREATE | ++ blockdev.LVMTechMode.REMOVE | ++ blockdev.LVMTechMode.QUERY)}) ++BLOCKDEV_LVM_TECH_VDO = BlockDevMethod(BLOCKDEV_LVM_VDO) ++ + # libblockdev mdraid plugin required technologies and modes + BLOCKDEV_MD_ALL_MODES = (blockdev.MDTechMode.CREATE | + blockdev.MDTechMode.DELETE | +@@ -410,6 +417,7 @@ def available_resource(name): + BLOCKDEV_DM_PLUGIN_RAID = blockdev_plugin("dm", BLOCKDEV_DM_TECH_RAID) + BLOCKDEV_LOOP_PLUGIN = blockdev_plugin("loop", BLOCKDEV_LOOP_TECH) + BLOCKDEV_LVM_PLUGIN = blockdev_plugin("lvm", BLOCKDEV_LVM_TECH) ++BLOCKDEV_LVM_PLUGIN_VDO = blockdev_plugin("lvm", BLOCKDEV_LVM_TECH_VDO) + BLOCKDEV_MDRAID_PLUGIN = blockdev_plugin("mdraid", BLOCKDEV_MD_TECH) + BLOCKDEV_MPATH_PLUGIN = blockdev_plugin("mpath", BLOCKDEV_MPATH_TECH) + BLOCKDEV_SWAP_PLUGIN = blockdev_plugin("swap", BLOCKDEV_SWAP_TECH) + +From d782620129d47a7b79b0e6b80455e6d93f8bcc88 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 2 Nov 2020 14:27:55 +0100 +Subject: [PATCH 04/17] Read the LVM VDO pool current size from the internal + data LV + +The pool device mapper device size is always 512k when active. +--- + blivet/devices/lvm.py | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index ffc65dcd..73743fa8 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -1845,6 +1845,15 @@ def direct(self): + """ Is this device directly accessible? """ + return False + ++ def read_current_size(self): ++ log_method_call(self, exists=self.exists, path=self.path, ++ sysfs_path=self.sysfs_path) ++ if self.size != Size(0): ++ return self.size ++ if self._vdopool_data_lv: ++ return self._vdopool_data_lv.read_current_size() ++ return Size(0) ++ + def _create(self): + """ Create the device. """ + raise NotImplementedError + +From 2da48ae84f4eac84e8cf998ee2402249a5a52626 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 2 Nov 2020 14:29:43 +0100 +Subject: [PATCH 05/17] Add "vdo_lv" property to LVMVDOPoolMixin + +--- + blivet/devices/lvm.py | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index 73743fa8..2f93fa22 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -1840,6 +1840,13 @@ def lvs(self): + """ A list of this VDO pool's LVs """ + return self._lvs[:] # we don't want folks changing our list + ++ @property ++ @util.requires_property("is_vdo_pool") ++ def vdo_lv(self): ++ if not self._lvs: ++ return None ++ return self._lvs[0] ++ + @property + def direct(self): + """ Is this device directly accessible? """ + +From bbfa2cbdc6cb85d405b895c66eb4867cea4218b4 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 2 Nov 2020 14:30:37 +0100 +Subject: [PATCH 06/17] Add support for creating LVM VDO pools and LVM VDO + volumes + +The pool and the volume are created by one call but these can have +different properties (like size) and are in fact two block devices +when created, we also need to create two devices and add them to +the devicetree. The pool device must be always created first and +the _create function for the VDO volume is a no-op. +--- + blivet/devices/lvm.py | 63 +++++++++++++++++++++++++++++++++++++------ + 1 file changed, 55 insertions(+), 8 deletions(-) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index 2f93fa22..0802e2de 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -311,7 +311,7 @@ def _add_log_vol(self, lv): + + # verify we have the space, then add it + # do not verify for growing vg (because of ks) +- if not lv.exists and not self.growable and not lv.is_thin_lv and lv.size > self.free_space: ++ if not lv.exists and not self.growable and not (lv.is_thin_lv or lv.is_vdo_lv) and lv.size > self.free_space: + raise errors.DeviceError("new lv is too large to fit in free space", self.name) + + log.debug("Adding %s/%s to %s", lv.name, lv.size, self.name) +@@ -639,7 +639,7 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None, + percent=None, cache_request=None, pvs=None, from_lvs=None): + + if not exists: +- if seg_type not in [None, "linear", "thin", "thin-pool", "cache"] + lvm.raid_seg_types: ++ if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo"] + lvm.raid_seg_types: + raise ValueError("Invalid or unsupported segment type: %s" % seg_type) + if seg_type and seg_type in lvm.raid_seg_types and not pvs: + raise ValueError("List of PVs has to be given for every non-linear LV") +@@ -1793,7 +1793,11 @@ class LVMVDOPoolMixin(object): + + _external_dependencies = [availability.BLOCKDEV_LVM_PLUGIN, availability.BLOCKDEV_LVM_PLUGIN_VDO] + +- def __init__(self): ++ def __init__(self, compression=True, deduplication=True, index_memory=0, write_policy=None): ++ self.compression = compression ++ self.deduplication = deduplication ++ self.index_memory = index_memory ++ self.write_policy = write_policy + self._lvs = [] + + @property +@@ -1863,7 +1867,19 @@ def read_current_size(self): + + def _create(self): + """ Create the device. """ +- raise NotImplementedError ++ ++ if not self.vdo_lv: ++ raise errors.DeviceError("Cannot create new VDO pool without a VDO LV.") ++ ++ if self.write_policy: ++ write_policy = blockdev.lvm_get_vdo_write_policy_str(self.write_policy) ++ else: ++ write_policy = blockdev.LVMVDOWritePolicy.AUTO ++ ++ blockdev.lvm.vdo_pool_create(self.vg.name, self.vdo_lv.lvname, self.lvname, ++ self.size, self.vdo_lv.size, self.index_memory, ++ self.compression, self.deduplication, ++ write_policy) + + + class LVMVDOLogicalVolumeMixin(object): +@@ -1915,9 +1931,26 @@ def resizable(self): + def pool(self): + return self.parents[0] + ++ def _set_size(self, newsize): ++ if not isinstance(newsize, Size): ++ raise AttributeError("new size must of type Size") ++ ++ newsize = self.vg.align(newsize) ++ newsize = self.vg.align(util.numeric_type(newsize)) ++ # just make sure the size is set (no VG size/free space check needed for ++ # a VDO LV) ++ DMDevice._set_size(self, newsize) ++ ++ def _pre_create(self): ++ # skip LVMLogicalVolumeDevice's _pre_create() method as it checks for a ++ # free space in a VG which doesn't make sense for a VDO LV and causes a ++ # bug by limitting the VDO LV's size to VG free space which is nonsense ++ super(LVMLogicalVolumeBase, self)._pre_create() # pylint: disable=bad-super-call ++ + def _create(self): +- """ Create the device. """ +- raise NotImplementedError ++ # nothing to do here, VDO LV is created automatically together with ++ # the VDO pool ++ pass + + def _destroy(self): + # nothing to do here, VDO LV is destroyed automatically together with +@@ -1953,7 +1986,9 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None, + fmt=None, exists=False, sysfs_path='', grow=None, maxsize=None, + percent=None, cache_request=None, pvs=None, + parent_lv=None, int_type=None, origin=None, vorigin=False, +- metadata_size=None, chunk_size=None, profile=None, from_lvs=None): ++ metadata_size=None, chunk_size=None, profile=None, from_lvs=None, ++ compression=False, deduplication=False, index_memory=0, ++ write_policy=None): + """ + :param name: the device name (generally a device node's basename) + :type name: str +@@ -2012,6 +2047,17 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None, + :keyword from_lvs: LVs to create the new LV from (in the (data_lv, metadata_lv) order) + :type from_lvs: tuple of :class:`LVMLogicalVolumeDevice` + ++ For VDO pools only: ++ ++ :keyword compression: whether to enable compression on the VDO pool ++ :type compression: bool ++ :keyword dudplication: whether to enable dudplication on the VDO pool ++ :type dudplication: bool ++ :keyword index_memory: amount of index memory (in bytes) or 0 for default ++ :type index_memory: int ++ :keyword write_policy: write policy for the volume or None for default ++ :type write_policy: str ++ + """ + + if isinstance(parents, (list, ParentList)): +@@ -2032,7 +2078,8 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None, + LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type, + fmt, exists, sysfs_path, grow, maxsize, + percent, cache_request, pvs, from_lvs) +- LVMVDOPoolMixin.__init__(self) ++ LVMVDOPoolMixin.__init__(self, compression, deduplication, index_memory, ++ write_policy) + LVMVDOLogicalVolumeMixin.__init__(self) + + LVMInternalLogicalVolumeMixin._init_check(self) + +From 2d1593b50dc6232e213b4df86dfbf5cf6d282dcd Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 2 Nov 2020 14:31:35 +0100 +Subject: [PATCH 07/17] Allow creating LVM VDO pools and volumes using + "blivet.new_lv" + +The steps to create the VDO devices would typically look like: + +pool = b.new_lv(vdo_pool=True, parents=[data], size=Size("8 GiB")) +vdolv = b.new_lv(vdo_lv=True, parents=[pool], size=Size("40 GiB")) +b.create_device(pool) +b.create_device(vdolv) +b.do_it() +--- + blivet/blivet.py | 18 ++++++++++++++---- + tests/devices_test/lvm_test.py | 31 +++++++++++++++++++++++++++++++ + 2 files changed, 45 insertions(+), 4 deletions(-) + +diff --git a/blivet/blivet.py b/blivet/blivet.py +index e7dbd37b..754eb152 100644 +--- a/blivet/blivet.py ++++ b/blivet/blivet.py +@@ -573,6 +573,10 @@ def new_lv(self, *args, **kwargs): + :type thin_pool: bool + :keyword thin_volume: whether to create a thin volume + :type thin_volume: bool ++ :keyword vdo_pool: whether to create a vdo pool ++ :type vdo_pool: bool ++ :keyword vdo_lv: whether to create a vdo lv ++ :type vdo_lv: bool + :returns: the new device + :rtype: :class:`~.devices.LVMLogicalVolumeDevice` + +@@ -589,8 +593,10 @@ def new_lv(self, *args, **kwargs): + """ + thin_volume = kwargs.pop("thin_volume", False) + thin_pool = kwargs.pop("thin_pool", False) ++ vdo_pool = kwargs.pop("vdo_pool", False) ++ vdo_lv = kwargs.pop("vdo_lv", False) + parent = kwargs.get("parents", [None])[0] +- if thin_volume and parent: ++ if (thin_volume or vdo_lv) and parent: + # kwargs["parents"] will contain the pool device, so... + vg = parent.vg + else: +@@ -600,6 +606,10 @@ def new_lv(self, *args, **kwargs): + kwargs["seg_type"] = "thin" + if thin_pool: + kwargs["seg_type"] = "thin-pool" ++ if vdo_pool: ++ kwargs["seg_type"] = "vdo-pool" ++ if vdo_lv: ++ kwargs["seg_type"] = "vdo" + + mountpoint = kwargs.pop("mountpoint", None) + if 'fmt_type' in kwargs: +@@ -625,7 +635,7 @@ def new_lv(self, *args, **kwargs): + swap = False + + prefix = "" +- if thin_pool: ++ if thin_pool or vdo_pool: + prefix = "pool" + + name = self.suggest_device_name(parent=vg, +@@ -636,10 +646,10 @@ def new_lv(self, *args, **kwargs): + if "%s-%s" % (vg.name, name) in self.names: + raise ValueError("name already in use") + +- if thin_pool or thin_volume: ++ if thin_pool or thin_volume or vdo_pool or vdo_lv: + cache_req = kwargs.pop("cache_request", None) + if cache_req: +- raise ValueError("Creating cached thin volumes and pools is not supported") ++ raise ValueError("Creating cached thin and VDO volumes and pools is not supported") + + return LVMLogicalVolumeDevice(name, *args, **kwargs) + +diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py +index 204cb99a..493d3ba1 100644 +--- a/tests/devices_test/lvm_test.py ++++ b/tests/devices_test/lvm_test.py +@@ -689,3 +689,34 @@ def test_new_lv_from_non_existing_lvs(self): + with patch.object(pool, "_pre_create"): + pool.create() + self.assertTrue(lvm.thpool_convert.called) ++ ++ def test_new_vdo_pool(self): ++ b = blivet.Blivet() ++ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), ++ size=Size("10 GiB"), exists=True) ++ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True) ++ ++ for dev in (pv, vg): ++ b.devicetree._add_device(dev) ++ ++ # check that all the above devices are in the expected places ++ self.assertEqual(set(b.devices), {pv, vg}) ++ self.assertEqual(set(b.vgs), {vg}) ++ ++ self.assertEqual(vg.size, Size("10236 MiB")) ++ ++ vdopool = b.new_lv(name="vdopool", vdo_pool=True, ++ parents=[vg], compression=True, ++ deduplication=True, ++ size=blivet.size.Size("8 GiB")) ++ ++ vdolv = b.new_lv(name="vdolv", vdo_lv=True, ++ parents=[vdopool], ++ size=blivet.size.Size("40 GiB")) ++ ++ b.create_device(vdopool) ++ b.create_device(vdolv) ++ ++ self.assertEqual(vdopool.children[0], vdolv) ++ self.assertEqual(vdolv.parents[0], vdopool) ++ self.assertListEqual(vg.lvs, [vdopool, vdolv]) + +From 31ec429ad7bd0857a768e2dfebe1de088dafc144 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 2 Nov 2020 14:32:47 +0100 +Subject: [PATCH 08/17] Add LVM VDO device factory + +--- + blivet/devicefactory.py | 100 +++++++++++++++++++++++++++- + tests/devicefactory_test.py | 128 +++++++++++++++++++++++++++++++++--- + 2 files changed, 218 insertions(+), 10 deletions(-) + +diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py +index 9214ad54..c95037cc 100644 +--- a/blivet/devicefactory.py ++++ b/blivet/devicefactory.py +@@ -27,7 +27,7 @@ + from .devices import BTRFSDevice, DiskDevice + from .devices import LUKSDevice, LVMLogicalVolumeDevice + from .devices import PartitionDevice, MDRaidArrayDevice +-from .devices.lvm import DEFAULT_THPOOL_RESERVE ++from .devices.lvm import LVMVDOPoolMixin, DEFAULT_THPOOL_RESERVE + from .formats import get_format + from .devicelibs import btrfs + from .devicelibs import mdraid +@@ -58,6 +58,7 @@ + DEVICE_TYPE_BTRFS = 3 + DEVICE_TYPE_DISK = 4 + DEVICE_TYPE_LVM_THINP = 5 ++DEVICE_TYPE_LVM_VDO = 6 + + + def is_supported_device_type(device_type): +@@ -69,6 +70,9 @@ def is_supported_device_type(device_type): + :returns: True if this device type is supported + :rtype: bool + """ ++ if device_type == DEVICE_TYPE_LVM_VDO: ++ return not any(e for e in LVMVDOPoolMixin._external_dependencies if not e.available) ++ + devices = [] + if device_type == DEVICE_TYPE_BTRFS: + devices = [BTRFSDevice] +@@ -96,7 +100,7 @@ def get_supported_raid_levels(device_type): + pkg = None + if device_type == DEVICE_TYPE_BTRFS: + pkg = btrfs +- elif device_type in (DEVICE_TYPE_LVM, DEVICE_TYPE_LVM_THINP): ++ elif device_type in (DEVICE_TYPE_LVM, DEVICE_TYPE_LVM_THINP, DEVICE_TYPE_LVM_VDO): + pkg = lvm + elif device_type == DEVICE_TYPE_MD: + pkg = mdraid +@@ -116,6 +120,8 @@ def get_device_type(device): + "lvmlv": DEVICE_TYPE_LVM, + "lvmthinlv": DEVICE_TYPE_LVM_THINP, + "lvmthinpool": DEVICE_TYPE_LVM, ++ "lvmvdolv": DEVICE_TYPE_LVM_VDO, ++ "lvmvdopool": DEVICE_TYPE_LVM, + "btrfs subvolume": DEVICE_TYPE_BTRFS, + "btrfs volume": DEVICE_TYPE_BTRFS, + "mdarray": DEVICE_TYPE_MD} +@@ -136,6 +142,7 @@ def get_device_factory(blivet, device_type=DEVICE_TYPE_LVM, **kwargs): + DEVICE_TYPE_PARTITION: PartitionFactory, + DEVICE_TYPE_MD: MDFactory, + DEVICE_TYPE_LVM_THINP: LVMThinPFactory, ++ DEVICE_TYPE_LVM_VDO: LVMVDOFactory, + DEVICE_TYPE_DISK: DeviceFactory} + + factory_class = class_table[device_type] +@@ -1738,6 +1745,95 @@ def _get_new_device(self, *args, **kwargs): + return super(LVMThinPFactory, self)._get_new_device(*args, **kwargs) + + ++class LVMVDOFactory(LVMFactory): ++ ++ """ Factory for creating LVM VDO volumes. ++ ++ :keyword pool_name: name for the VDO pool, if not specified unique name will be generated ++ :type pool_name: str ++ :keyword virtual_size: size for the VDO volume, usually bigger than pool size, if not ++ specified physical size (pool size) will be used ++ :type size: :class:`~.size.Size` ++ :keyword compression: whether to enable compression (defaults to True) ++ :type compression: bool ++ :keyword deduplication: whether to enable deduplication (defaults to True) ++ :type deduplication: bool ++ """ ++ ++ def __init__(self, storage, **kwargs): ++ self.pool_name = kwargs.pop("pool_name", None) ++ self.virtual_size = kwargs.pop("virtual_size", None) ++ self.compression = kwargs.pop("compression", True) ++ self.deduplication = kwargs.pop("deduplication", True) ++ super(LVMVDOFactory, self).__init__(storage, **kwargs) ++ ++ def _get_new_pool(self, *args, **kwargs): ++ kwargs["vdo_pool"] = True ++ return super(LVMVDOFactory, self)._get_new_device(*args, **kwargs) ++ ++ def _set_device_size(self): ++ """ Set the size of the factory device. """ ++ super(LVMVDOFactory, self)._set_device_size() ++ ++ self.device.pool.size = self.size ++ self._reconfigure_container() ++ ++ if not self.virtual_size or self.virtual_size < self.size: ++ # virtual_size is not set or smaller than current size --> it should be same as the pool size ++ self.device.size = self.size ++ else: ++ self.device.size = self.virtual_size ++ ++ def _set_pool_name(self): ++ safe_new_name = self.storage.safe_device_name(self.pool_name) ++ if self.device.pool.name != safe_new_name: ++ if not safe_new_name: ++ log.error("not renaming '%s' to invalid name '%s'", ++ self.device.pool.name, self.pool_name) ++ return ++ if safe_new_name in self.storage.names: ++ log.error("not renaming '%s' to in-use name '%s'", ++ self.device.pool.name, safe_new_name) ++ return ++ ++ log.debug("renaming device '%s' to '%s'", ++ self.device.pool.name, safe_new_name) ++ self.device.pool.raw_device.name = safe_new_name ++ ++ def _set_name(self): ++ super(LVMVDOFactory, self)._set_name() ++ if self.pool_name: ++ self._set_pool_name() ++ ++ def _reconfigure_device(self): ++ super(LVMVDOFactory, self)._reconfigure_device() ++ ++ self.device.pool.compression = self.compression ++ self.device.pool.deduplication = self.deduplication ++ ++ # ++ # methods to configure the factory's device ++ # ++ def _get_new_device(self, *args, **kwargs): ++ """ Create and return the factory device as a StorageDevice. """ ++ pool = self._get_new_pool(name=self.pool_name, ++ size=self.size, ++ parents=[self.vg], ++ compression=self.compression, ++ deduplication=self.deduplication) ++ self.storage.create_device(pool) ++ ++ kwargs["parents"] = [pool] ++ kwargs["vdo_lv"] = True ++ ++ if self.virtual_size: ++ vdolv_kwargs = kwargs.copy() ++ vdolv_kwargs["size"] = self.virtual_size ++ else: ++ vdolv_kwargs = kwargs ++ return super(LVMVDOFactory, self)._get_new_device(*args, **vdolv_kwargs) ++ ++ + class MDFactory(DeviceFactory): + + """ Factory for creating MD RAID devices. """ +diff --git a/tests/devicefactory_test.py b/tests/devicefactory_test.py +index 08068779..7cdb51c5 100644 +--- a/tests/devicefactory_test.py ++++ b/tests/devicefactory_test.py +@@ -4,6 +4,9 @@ + from decimal import Decimal + import os + ++import test_compat # pylint: disable=unused-import ++from six.moves.mock import patch # pylint: disable=no-name-in-module,import-error ++ + import blivet + + from blivet import devicefactory +@@ -93,10 +96,12 @@ def _validate_factory_device(self, *args, **kwargs): + self.assertEqual(device.format.label, + kwargs.get('label')) + +- self.assertLessEqual(device.size, kwargs.get("size")) +- self.assertGreaterEqual(device.size, device.format.min_size) +- if device.format.max_size: +- self.assertLessEqual(device.size, device.format.max_size) ++ # sizes with VDO are special, we have a special check in LVMVDOFactoryTestCase._validate_factory_device ++ if device_type != devicefactory.DEVICE_TYPE_LVM_VDO: ++ self.assertLessEqual(device.size, kwargs.get("size")) ++ self.assertGreaterEqual(device.size, device.format.min_size) ++ if device.format.max_size: ++ self.assertLessEqual(device.size, device.format.max_size) + + self.assertEqual(device.encrypted, + kwargs.get("encrypted", False) or +@@ -115,7 +120,11 @@ def test_device_factory(self): + "mountpoint": '/factorytest'} + device = self._factory_device(device_type, **kwargs) + self._validate_factory_device(device, device_type, **kwargs) +- self.b.recursive_remove(device) ++ ++ if device.type == "lvmvdolv": ++ self.b.recursive_remove(device.pool) ++ else: ++ self.b.recursive_remove(device) + + if self.encryption_supported: + # Encrypt the leaf device +@@ -157,6 +166,12 @@ def test_device_factory(self): + device = self._factory_device(device_type, **kwargs) + self._validate_factory_device(device, device_type, **kwargs) + ++ # change size up ++ kwargs["device"] = device ++ kwargs["size"] = Size("900 MiB") ++ device = self._factory_device(device_type, **kwargs) ++ self._validate_factory_device(device, device_type, **kwargs) ++ + # Change LUKS version + kwargs["luks_version"] = "luks1" + device = self._factory_device(device_type, **kwargs) +@@ -179,7 +194,7 @@ def _get_size_delta(self, devices=None): + """ + return Size("1 MiB") + +- def test_get_free_disk_space(self): ++ def test_get_free_disk_space(self, *args): # pylint: disable=unused-argument + # get_free_disk_space should return the total free space on disks + kwargs = self._get_test_factory_args() + kwargs["size"] = Size("500 MiB") +@@ -206,7 +221,7 @@ def test_get_free_disk_space(self): + sum(d.size for d in self.b.disks) - device_space, + delta=self._get_size_delta(devices=[device])) + +- def test_normalize_size(self): ++ def test_normalize_size(self, *args): # pylint: disable=unused-argument + # _normalize_size should adjust target size to within the format limits + fstype = "ext2" + ext2 = get_format(fstype) +@@ -258,7 +273,7 @@ def test_default_factory_type(self): + factory = devicefactory.get_device_factory(self.b) + self.assertIsInstance(factory, devicefactory.LVMFactory) + +- def test_factory_defaults(self): ++ def test_factory_defaults(self, *args): # pylint: disable=unused-argument + ctor_kwargs = self._get_test_factory_args() + factory = devicefactory.get_device_factory(self.b, self.device_type, **ctor_kwargs) + for setting, value in factory._default_settings.items(): +@@ -522,6 +537,103 @@ def _get_size_delta(self, devices=None): + return delta + + ++class LVMVDOFactoryTestCase(LVMFactoryTestCase): ++ device_class = LVMLogicalVolumeDevice ++ device_type = devicefactory.DEVICE_TYPE_LVM_VDO ++ encryption_supported = False ++ ++ def _validate_factory_device(self, *args, **kwargs): ++ super(LVMVDOFactoryTestCase, self)._validate_factory_device(*args, ++ **kwargs) ++ device = args[0] ++ ++ if kwargs.get("encrypted", False): ++ vdolv = device.parents[0] ++ else: ++ vdolv = device ++ ++ self.assertTrue(hasattr(vdolv, "pool")) ++ ++ virtual_size = kwargs.get("virtual_size", 0) ++ if virtual_size: ++ self.assertEqual(vdolv.size, virtual_size) ++ else: ++ self.assertEqual(vdolv.size, vdolv.pool.size) ++ self.assertGreaterEqual(vdolv.size, vdolv.pool.size) ++ ++ compression = kwargs.get("compression", True) ++ self.assertEqual(vdolv.pool.compression, compression) ++ ++ deduplication = kwargs.get("deduplication", True) ++ self.assertEqual(vdolv.pool.deduplication, deduplication) ++ ++ pool_name = kwargs.get("pool_name", None) ++ if pool_name: ++ self.assertEqual(vdolv.pool.lvname, pool_name) ++ ++ return device ++ ++ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True) ++ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.destroyable", return_value=True) ++ @patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[]) ++ @patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set()) ++ def test_device_factory(self, *args): # pylint: disable=unused-argument,arguments-differ ++ device_type = self.device_type ++ kwargs = {"disks": self.b.disks, ++ "size": Size("400 MiB"), ++ "fstype": 'ext4', ++ "mountpoint": '/factorytest'} ++ device = self._factory_device(device_type, **kwargs) ++ self._validate_factory_device(device, device_type, **kwargs) ++ self.b.recursive_remove(device.pool) ++ ++ kwargs = {"disks": self.b.disks, ++ "size": Size("400 MiB"), ++ "fstype": 'ext4', ++ "mountpoint": '/factorytest', ++ "pool_name": "vdopool", ++ "deduplication": True, ++ "compression": True} ++ device = self._factory_device(device_type, **kwargs) ++ self._validate_factory_device(device, device_type, **kwargs) ++ ++ # change size without specifying virtual_size: both sizes should grow ++ kwargs["size"] = Size("600 MiB") ++ kwargs["device"] = device ++ device = self._factory_device(device_type, **kwargs) ++ self._validate_factory_device(device, device_type, **kwargs) ++ ++ # change virtual size ++ kwargs["virtual_size"] = Size("6 GiB") ++ kwargs["device"] = device ++ device = self._factory_device(device_type, **kwargs) ++ self._validate_factory_device(device, device_type, **kwargs) ++ ++ # change virtual size to smaller than size ++ kwargs["virtual_size"] = Size("500 GiB") ++ kwargs["device"] = device ++ device = self._factory_device(device_type, **kwargs) ++ self._validate_factory_device(device, device_type, **kwargs) ++ ++ # change deduplication and compression ++ kwargs["deduplication"] = False ++ kwargs["device"] = device ++ device = self._factory_device(device_type, **kwargs) ++ self._validate_factory_device(device, device_type, **kwargs) ++ ++ kwargs["compression"] = False ++ kwargs["device"] = device ++ device = self._factory_device(device_type, **kwargs) ++ self._validate_factory_device(device, device_type, **kwargs) ++ ++ # rename the pool ++ kwargs["pool_name"] = "vdopool2" ++ kwargs["device"] = device ++ device = self._factory_device(device_type, **kwargs) ++ self._validate_factory_device(device, device_type, **kwargs) ++ ++ + class MDFactoryTestCase(DeviceFactoryTestCase): + device_type = devicefactory.DEVICE_TYPE_MD + device_class = MDRaidArrayDevice + +From 22ba2b96111d5f153a3b55d3c56d84e597cf9a90 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 2 Nov 2020 14:33:06 +0100 +Subject: [PATCH 09/17] Add VM test for LVM VDO + +--- + tests/vmtests/blivet_reset_vmtest.py | 15 +++++++++++++++ + tests/vmtests/runvmtests.py | 3 ++- + 2 files changed, 17 insertions(+), 1 deletion(-) + +diff --git a/tests/vmtests/blivet_reset_vmtest.py b/tests/vmtests/blivet_reset_vmtest.py +index 8743d51e..47fc84c4 100644 +--- a/tests/vmtests/blivet_reset_vmtest.py ++++ b/tests/vmtests/blivet_reset_vmtest.py +@@ -192,6 +192,21 @@ def setUp(self): + self.collect_expected_data() + + ++class LVMVDOTestCase(BlivetResetTestCase): ++ ++ def _set_up_storage(self): ++ if not devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM_VDO): ++ self.skipTest("VDO not supported, skipping") ++ ++ self.blivet.factory_device(devicefactory.DEVICE_TYPE_LVM_VDO, ++ size=Size("10 GiB"), ++ fstype="ext4", ++ disks=self.blivet.disks[:], ++ name="vdolv", ++ pool_name="vdopool", ++ virtual_size=Size("40 GiB")) ++ ++ + @unittest.skip("temporarily disabled due to issues with raids with metadata version 0.90") + class MDRaid0TestCase(BlivetResetTestCase): + +diff --git a/tests/vmtests/runvmtests.py b/tests/vmtests/runvmtests.py +index 88143d3a..6f20484f 100644 +--- a/tests/vmtests/runvmtests.py ++++ b/tests/vmtests/runvmtests.py +@@ -12,7 +12,8 @@ + "tests.vmtests.blivet_reset_vmtest.LVMThinSnapShotTestCase", + "tests.vmtests.blivet_reset_vmtest.LVMRaidTestCase", + "tests.vmtests.blivet_reset_vmtest.MDRaid0TestCase", +- "tests.vmtests.blivet_reset_vmtest.LVMOnMDTestCase"] ++ "tests.vmtests.blivet_reset_vmtest.LVMOnMDTestCase", ++ "tests.vmtests.blivet_reset_vmtest.LVMVDOTestCase"] + + SNAP_NAME = "snapshot" + + +From 52b37bb86e856f1ede71f7cceb7284a639d741f4 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Thu, 19 Nov 2020 13:07:17 +0100 +Subject: [PATCH 10/17] Allow adding nodiscard option when running mkfs + +For filesystems that support it we might want to add some nodiscard +option to mkfs when creating format on devices like LVM VDO +volumes where discard is very slow and doesn't really makes sense +when running mkfs. +--- + blivet/formats/fs.py | 12 +++++- + blivet/tasks/fsmkfs.py | 59 +++++++++++++++++++++++++++--- + tests/formats_test/methods_test.py | 3 +- + 3 files changed, 66 insertions(+), 8 deletions(-) + +diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py +index 4ba83e6d..e61e5b86 100644 +--- a/blivet/formats/fs.py ++++ b/blivet/formats/fs.py +@@ -132,6 +132,7 @@ def __init__(self, **kwargs): + self.mountopts = kwargs.get("mountopts", "") + self.label = kwargs.get("label") + self.fsprofile = kwargs.get("fsprofile") ++ self._mkfs_nodiscard = kwargs.get("nodiscard", False) + + self._user_mountopts = self.mountopts + +@@ -263,6 +264,14 @@ def label_format_ok(self, label): + label = property(lambda s: s._get_label(), lambda s, l: s._set_label(l), + doc="this filesystem's label") + ++ def can_nodiscard(self): ++ """Returns True if this filesystem supports nodiscard option during ++ creation, otherwise False. ++ ++ :rtype: bool ++ """ ++ return self._mkfs.can_nodiscard and self._mkfs.available ++ + def can_set_uuid(self): + """Returns True if this filesystem supports setting an UUID during + creation, otherwise False. +@@ -402,7 +411,8 @@ def _create(self, **kwargs): + try: + self._mkfs.do_task(options=kwargs.get("options"), + label=not self.relabels(), +- set_uuid=self.can_set_uuid()) ++ set_uuid=self.can_set_uuid(), ++ nodiscard=self.can_nodiscard()) + except FSWriteLabelError as e: + log.warning("Choosing not to apply label (%s) during creation of filesystem %s. Label format is unacceptable for this filesystem.", self.label, self.type) + except FSWriteUUIDError as e: +diff --git a/blivet/tasks/fsmkfs.py b/blivet/tasks/fsmkfs.py +index ad166aa0..c982f7e7 100644 +--- a/blivet/tasks/fsmkfs.py ++++ b/blivet/tasks/fsmkfs.py +@@ -37,6 +37,7 @@ class FSMkfsTask(fstask.FSTask): + + can_label = abc.abstractproperty(doc="whether this task labels") + can_set_uuid = abc.abstractproperty(doc="whether this task can set UUID") ++ can_nodiscard = abc.abstractproperty(doc="whether this task can set nodiscard option") + + + @add_metaclass(abc.ABCMeta) +@@ -48,6 +49,9 @@ class FSMkfs(task.BasicApplication, FSMkfsTask): + label_option = abc.abstractproperty( + doc="Option for setting a filesystem label.") + ++ nodiscard_option = abc.abstractproperty( ++ doc="Option for setting nodiscrad option for mkfs.") ++ + args = abc.abstractproperty(doc="options for creating filesystem") + + @abc.abstractmethod +@@ -80,6 +84,15 @@ def can_set_uuid(self): + """ + return self.get_uuid_args is not None + ++ @property ++ def can_nodiscard(self): ++ """Whether this task can set nodiscard option for a filesystem. ++ ++ :returns: True if nodiscard can be set ++ :rtype: bool ++ """ ++ return self.nodiscard_option is not None ++ + @property + def _label_options(self): + """ Any labeling options that a particular filesystem may use. +@@ -100,6 +113,23 @@ def _label_options(self): + else: + raise FSWriteLabelError("Choosing not to apply label (%s) during creation of filesystem %s. Label format is unacceptable for this filesystem." % (self.fs.label, self.fs.type)) + ++ @property ++ def _nodiscard_option(self): ++ """ Any nodiscard options that a particular filesystem may use. ++ ++ :returns: nodiscard options ++ :rtype: list of str ++ """ ++ # Do not know how to set nodiscard while formatting. ++ if self.nodiscard_option is None: ++ return [] ++ ++ # nodiscard option not requested ++ if not self.fs._mkfs_nodiscard: ++ return [] ++ ++ return self.nodiscard_option ++ + @property + def _uuid_options(self): + """Any UUID options that a particular filesystem may use. +@@ -119,7 +149,7 @@ def _uuid_options(self): + " is unacceptable for this filesystem." + % (self.fs.uuid, self.fs.type)) + +- def _format_options(self, options=None, label=False, set_uuid=False): ++ def _format_options(self, options=None, label=False, set_uuid=False, nodiscard=False): + """Get a list of format options to be used when creating the + filesystem. + +@@ -135,11 +165,12 @@ def _format_options(self, options=None, label=False, set_uuid=False): + + label_options = self._label_options if label else [] + uuid_options = self._uuid_options if set_uuid else [] ++ nodiscard_option = self._nodiscard_option if nodiscard else [] + create_options = shlex.split(self.fs.create_options or "") + return (options + self.args + label_options + uuid_options + +- create_options + [self.fs.device]) ++ nodiscard_option + create_options + [self.fs.device]) + +- def _mkfs_command(self, options, label, set_uuid): ++ def _mkfs_command(self, options, label, set_uuid, nodiscard): + """Return the command to make the filesystem. + + :param options: any special options +@@ -148,12 +179,14 @@ def _mkfs_command(self, options, label, set_uuid): + :type label: bool + :param set_uuid: whether to set an UUID + :type set_uuid: bool ++ :param nodiscard: whether to run mkfs with nodiscard option ++ :type nodiscard: bool + :returns: the mkfs command + :rtype: list of str + """ +- return [str(self.ext)] + self._format_options(options, label, set_uuid) ++ return [str(self.ext)] + self._format_options(options, label, set_uuid, nodiscard) + +- def do_task(self, options=None, label=False, set_uuid=False): ++ def do_task(self, options=None, label=False, set_uuid=False, nodiscard=False): + """Create the format on the device and label if possible and desired. + + :param options: any special options, may be None +@@ -168,7 +201,7 @@ def do_task(self, options=None, label=False, set_uuid=False): + raise FSError("\n".join(error_msgs)) + + options = options or [] +- cmd = self._mkfs_command(options, label, set_uuid) ++ cmd = self._mkfs_command(options, label, set_uuid, nodiscard) + try: + ret = util.run_program(cmd) + except OSError as e: +@@ -181,6 +214,7 @@ def do_task(self, options=None, label=False, set_uuid=False): + class BTRFSMkfs(FSMkfs): + ext = availability.MKFS_BTRFS_APP + label_option = None ++ nodiscard_option = ["--nodiscard"] + + def get_uuid_args(self, uuid): + return ["-U", uuid] +@@ -193,6 +227,7 @@ def args(self): + class Ext2FSMkfs(FSMkfs): + ext = availability.MKE2FS_APP + label_option = "-L" ++ nodiscard_option = ["-E", "nodiscard"] + + _opts = [] + +@@ -215,6 +250,7 @@ class Ext4FSMkfs(Ext3FSMkfs): + class FATFSMkfs(FSMkfs): + ext = availability.MKDOSFS_APP + label_option = "-n" ++ nodiscard_option = None + + def get_uuid_args(self, uuid): + return ["-i", uuid.replace('-', '')] +@@ -227,6 +263,7 @@ def args(self): + class GFS2Mkfs(FSMkfs): + ext = availability.MKFS_GFS2_APP + label_option = None ++ nodiscard_option = None + get_uuid_args = None + + @property +@@ -237,6 +274,7 @@ def args(self): + class HFSMkfs(FSMkfs): + ext = availability.HFORMAT_APP + label_option = "-l" ++ nodiscard_option = None + get_uuid_args = None + + @property +@@ -247,6 +285,7 @@ def args(self): + class HFSPlusMkfs(FSMkfs): + ext = availability.MKFS_HFSPLUS_APP + label_option = "-v" ++ nodiscard_option = None + get_uuid_args = None + + @property +@@ -257,6 +296,7 @@ def args(self): + class JFSMkfs(FSMkfs): + ext = availability.MKFS_JFS_APP + label_option = "-L" ++ nodiscard_option = None + get_uuid_args = None + + @property +@@ -267,6 +307,7 @@ def args(self): + class NTFSMkfs(FSMkfs): + ext = availability.MKNTFS_APP + label_option = "-L" ++ nodiscard_option = None + get_uuid_args = None + + @property +@@ -277,6 +318,7 @@ def args(self): + class ReiserFSMkfs(FSMkfs): + ext = availability.MKREISERFS_APP + label_option = "-l" ++ nodiscard_option = None + + def get_uuid_args(self, uuid): + return ["-u", uuid] +@@ -289,6 +331,7 @@ def args(self): + class XFSMkfs(FSMkfs): + ext = availability.MKFS_XFS_APP + label_option = "-L" ++ nodiscard_option = ["-K"] + + def get_uuid_args(self, uuid): + return ["-m", "uuid=" + uuid] +@@ -307,3 +350,7 @@ def can_label(self): + @property + def can_set_uuid(self): + return False ++ ++ @property ++ def can_nodiscard(self): ++ return False +diff --git a/tests/formats_test/methods_test.py b/tests/formats_test/methods_test.py +index 710fa1c5..b2674ea7 100644 +--- a/tests/formats_test/methods_test.py ++++ b/tests/formats_test/methods_test.py +@@ -307,7 +307,8 @@ def _test_create_backend(self): + self.format._mkfs.do_task.assert_called_with( + options=None, + label=not self.format.relabels(), +- set_uuid=self.format.can_set_uuid() ++ set_uuid=self.format.can_set_uuid(), ++ nodiscard=self.format.can_nodiscard() + ) + + def _test_setup_backend(self): + +From ac04f74fa9bc8ded3facd302ca74ec033009a0bd Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Thu, 19 Nov 2020 13:19:21 +0100 +Subject: [PATCH 11/17] Add nodiscard option by default when creating VDO + logical volumes + +User can override this by passing "nodiscard=False" to the LV +constructor, but we want nodiscard by default. +--- + blivet/blivet.py | 8 +++++++- + blivet/devicefactory.py | 6 ++++++ + tests/devicefactory_test.py | 7 +++++++ + 3 files changed, 20 insertions(+), 1 deletion(-) + +diff --git a/blivet/blivet.py b/blivet/blivet.py +index 754eb152..e4115691 100644 +--- a/blivet/blivet.py ++++ b/blivet/blivet.py +@@ -613,9 +613,15 @@ def new_lv(self, *args, **kwargs): + + mountpoint = kwargs.pop("mountpoint", None) + if 'fmt_type' in kwargs: ++ fmt_args = kwargs.pop("fmt_args", {}) ++ if vdo_lv and "nodiscard" not in fmt_args.keys(): ++ # we don't want to run discard on VDO LV during mkfs so if user don't ++ # tell us not to do it, we should add the nodiscard option to mkfs ++ fmt_args["nodiscard"] = True ++ + kwargs["fmt"] = get_format(kwargs.pop("fmt_type"), + mountpoint=mountpoint, +- **kwargs.pop("fmt_args", {})) ++ **fmt_args) + + name = kwargs.pop("name", None) + if name: +diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py +index c95037cc..085f2fd6 100644 +--- a/blivet/devicefactory.py ++++ b/blivet/devicefactory.py +@@ -1811,6 +1811,12 @@ def _reconfigure_device(self): + self.device.pool.compression = self.compression + self.device.pool.deduplication = self.deduplication + ++ def _set_format(self): ++ super(LVMVDOFactory, self)._set_format() ++ ++ # preserve nodiscard mkfs option after changing filesystem ++ self.device.format._mkfs_nodiscard = True ++ + # + # methods to configure the factory's device + # +diff --git a/tests/devicefactory_test.py b/tests/devicefactory_test.py +index 7cdb51c5..4de1e05b 100644 +--- a/tests/devicefactory_test.py ++++ b/tests/devicefactory_test.py +@@ -571,6 +571,10 @@ def _validate_factory_device(self, *args, **kwargs): + if pool_name: + self.assertEqual(vdolv.pool.lvname, pool_name) + ++ # nodiscard should be always set for VDO LV format ++ if vdolv.format.type: ++ self.assertTrue(vdolv.format._mkfs_nodiscard) ++ + return device + + @patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True) +@@ -633,6 +637,9 @@ def test_device_factory(self, *args): # pylint: disable=unused-argument,argumen + device = self._factory_device(device_type, **kwargs) + self._validate_factory_device(device, device_type, **kwargs) + ++ # change fstype ++ kwargs["fstype"] = "xfs" ++ + + class MDFactoryTestCase(DeviceFactoryTestCase): + device_type = devicefactory.DEVICE_TYPE_MD + +From 43f25ce84729c321d1ff2bbba2f50489f6d736b4 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Thu, 19 Nov 2020 13:31:40 +0100 +Subject: [PATCH 12/17] Add LVM VDO example + +--- + examples/lvm_vdo.py | 61 +++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 61 insertions(+) + create mode 100644 examples/lvm_vdo.py + +diff --git a/examples/lvm_vdo.py b/examples/lvm_vdo.py +new file mode 100644 +index 00000000..ad081642 +--- /dev/null ++++ b/examples/lvm_vdo.py +@@ -0,0 +1,61 @@ ++import os ++ ++import blivet ++from blivet.size import Size ++from blivet.util import set_up_logging, create_sparse_tempfile ++ ++set_up_logging() ++b = blivet.Blivet() # create an instance of Blivet (don't add system devices) ++ ++# create a disk image file on which to create new devices ++disk1_file = create_sparse_tempfile("disk1", Size("100GiB")) ++b.disk_images["disk1"] = disk1_file ++disk2_file = create_sparse_tempfile("disk2", Size("100GiB")) ++b.disk_images["disk2"] = disk2_file ++ ++b.reset() ++ ++try: ++ disk1 = b.devicetree.get_device_by_name("disk1") ++ disk2 = b.devicetree.get_device_by_name("disk2") ++ ++ b.initialize_disk(disk1) ++ b.initialize_disk(disk2) ++ ++ pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1]) ++ b.create_device(pv) ++ pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2]) ++ b.create_device(pv2) ++ ++ # allocate the partitions (decide where and on which disks they'll reside) ++ blivet.partitioning.do_partitioning(b) ++ ++ vg = b.new_vg(parents=[pv, pv2]) ++ b.create_device(vg) ++ ++ # create 80 GiB VDO pool ++ # there can be only one VDO LV on the pool and these are created together ++ # with one LVM call, we have 2 separate devices because there are two block ++ # devices in the end and it allows to control the different "physical" size of ++ # the pool and "logical" size of the VDO LV (which is usually bigger, accounting ++ # for the saved space with deduplication and/or compression) ++ pool = b.new_lv(size=Size("80GiB"), parents=[vg], name="vdopool", vdo_pool=True, ++ deduplication=True, compression=True) ++ b.create_device(pool) ++ ++ # create the VDO LV with 400 GiB "virtual size" and ext4 filesystem on the VDO ++ # pool ++ lv = b.new_lv(size=Size("400GiB"), parents=[pool], name="vdolv", vdo_lv=True, ++ fmt_type="ext4") ++ b.create_device(lv) ++ ++ print(b.devicetree) ++ ++ # write the new partitions to disk and format them as specified ++ b.do_it() ++ print(b.devicetree) ++ input("Check the state and hit ENTER to trigger cleanup") ++finally: ++ b.devicetree.teardown_disk_images() ++ os.unlink(disk1_file) ++ os.unlink(disk2_file) + +From c487a1e6023b54f5beea8d99ba2f5da5d80590ee Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Wed, 25 Nov 2020 13:30:15 +0100 +Subject: [PATCH 13/17] Add LVM VDO documentation + +--- + doc/lvmvdo.rst | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 86 insertions(+) + create mode 100644 doc/lvmvdo.rst + +diff --git a/doc/lvmvdo.rst b/doc/lvmvdo.rst +new file mode 100644 +index 00000000..3965abd3 +--- /dev/null ++++ b/doc/lvmvdo.rst +@@ -0,0 +1,86 @@ ++LVM VDO support ++=============== ++ ++Support for creating LVM VDO devices has been added in Blivet 3.4. ++ ++These devices are similar to LVM thinly provisioned volumes, but there are some special steps ++and limitations when creating these devices which this document describes. ++ ++LVM VDO in Blivet ++----------------- ++ ++LVM VDO devices are represented by two ``LVMLogicalVolumeDevice`` devices: ++ ++- VDO Pool logical volume with type 'lvmvdopool' ++- VDO logical volume with type 'lvmvdolv' which is the child of the VDO Pool device ++ ++Existing LVM VDO setup in Blivet: ++ ++ existing 20 GiB disk vdb (265) with existing msdos disklabel ++ existing 20 GiB partition vdb1 (275) with existing lvmpv ++ existing 20 GiB lvmvg data (284) ++ existing 10 GiB lvmvdopool data-vdopool (288) ++ existing 50 GiB lvmvdolv data-vdolv (295) ++ ++When creating LVM VDO setup using Blivet these two devices must be created together as these ++are created by a single LVM command. ++ ++It currently isn't possible to create additional VDO logical volumes in the pool. It is however ++possible to create multiple VDO pools in a single volume group. ++ ++Deduplication and compression are properties of the VDO pool. Size specified for the VDO pool ++volume will be used as the "physical" size for the pool and size specified for the VDO logical volume ++will be used as the "virtual" size for the VDO volume. ++ ++When creating format, it must be created on the VDO logical volume. For filesystems with discard ++support, no discard option will be automatically added when calling the ``mkfs`` command ++(e.g. ``-K`` for ``mkfs.xfs``). ++ ++Example for creating a *80 GiB* VDO pool with *400 GiB* VDO logical volume with an *ext4* format with ++both deduplication and compression enabled: ++ ++ pool = b.new_lv(size=Size("80GiB"), parents=[vg], name="vdopool", vdo_pool=True, ++ deduplication=True, compression=True) ++ b.create_device(pool) ++ ++ lv = b.new_lv(size=Size("400GiB"), parents=[pool], name="vdolv", vdo_lv=True, ++ fmt_type="ext4") ++ b.create_device(lv) ++ ++When removing existing LVM VDO devices, both devices must be removed from the devicetree and the VDO ++logical volume must be removed first (``recursive_remove`` can be used to automate these two steps). ++ ++Managing of existing LVM VDO devices is currently not supported. ++ ++ ++LVM VDO in Devicefactory ++------------------------ ++ ++For the top-down specified creation using device factories a new ``LVMVDOFactory`` factory has been ++added. Factory device in this case is the VDO logical volume and is again automatically created ++together with the VDO pool. ++ ++Example of creating a new LVM VDO setup using the ``devicefactory`` module: ++ ++ factory = blivet.devicefactory.LVMVDOFactory(b, size=Size("5 GiB"), virtual_size=Size("50 GiB"), ++ disks=disks, fstype="xfs", ++ container_name="data", ++ pool_name="myvdopool", ++ compression=True, deduplication=True) ++ factory.configure() ++ factory.device ++ ++ LVMLogicalVolumeDevice instance (0x7f14d17422b0) -- ++ name = data-00 status = False id = 528 ++ children = [] ++ parents = ['non-existent 5 GiB lvmvdopool data-myvdopool (519)'] ++ ... ++ ++``size`` in this case sets the pool (physical) size, the VDO logical volume size can be specified ++with ``virtual_size`` (if not specified it will be same as the pool size). Name for the VDO volume ++can be specified using the ``name`` keyword argument. ``pool_name`` argument is optional and ++a unique name will be generated if omitted. Both ``compression`` and ``deduplication`` default to ++``True`` (enabled) if not specified. ++ ++This factory can create only a single VDO logical volume in a single VDO pool but additional VDO pools ++can be added by repeating the steps to create the first one. + +From c6c776cf137b5c6ae454487df469e9a6dba8a5d1 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Wed, 9 Dec 2020 14:06:27 +0100 +Subject: [PATCH 14/17] Set minimum size for LVM VDO pool devices + +--- + blivet/devicefactory.py | 3 +++ + blivet/devices/lvm.py | 26 ++++++++++++++++++++++++++ + tests/devicefactory_test.py | 29 ++++++++++++++++++++--------- + tests/devices_test/lvm_test.py | 6 ++++++ + 4 files changed, 55 insertions(+), 9 deletions(-) + +diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py +index 085f2fd6..5e47eb9a 100644 +--- a/blivet/devicefactory.py ++++ b/blivet/devicefactory.py +@@ -277,6 +277,7 @@ class DeviceFactory(object): + "container_size": SIZE_POLICY_AUTO, + "container_raid_level": None, + "container_encrypted": None} ++ _device_min_size = Size(0) # no limit by default, limited only by filesystem size + + def __init__(self, storage, **kwargs): + """ +@@ -1760,6 +1761,8 @@ class LVMVDOFactory(LVMFactory): + :type deduplication: bool + """ + ++ _device_min_size = LVMVDOPoolMixin._min_size ++ + def __init__(self, storage, **kwargs): + self.pool_name = kwargs.pop("pool_name", None) + self.virtual_size = kwargs.pop("virtual_size", None) +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index 0802e2de..785fa2d2 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -1792,6 +1792,7 @@ def populate_ksdata(self, data): + class LVMVDOPoolMixin(object): + + _external_dependencies = [availability.BLOCKDEV_LVM_PLUGIN, availability.BLOCKDEV_LVM_PLUGIN_VDO] ++ _min_size = Size("5 GiB") # 2.5 GiB for index and one 2 GiB slab rounded up to 5 GiB + + def __init__(self, compression=True, deduplication=True, index_memory=0, write_policy=None): + self.compression = compression +@@ -1800,6 +1801,9 @@ def __init__(self, compression=True, deduplication=True, index_memory=0, write_p + self.write_policy = write_policy + self._lvs = [] + ++ if not self.exists and self.size < self.min_size: ++ raise ValueError("Requested size %s is smaller than minimum %s" % (self.size, self.min_size)) ++ + @property + def is_vdo_pool(self): + return self.seg_type == "vdo-pool" +@@ -1856,6 +1860,23 @@ def direct(self): + """ Is this device directly accessible? """ + return False + ++ @property ++ @util.requires_property("is_vdo_pool") ++ def min_size(self): ++ if self.exists: ++ return self.current_size ++ ++ return self._min_size ++ ++ def _set_size(self, newsize): ++ if not isinstance(newsize, Size): ++ raise AttributeError("new size must of type Size") ++ ++ if newsize < self.min_size: ++ raise ValueError("Requested size %s is smaller than minimum %s" % (newsize, self.min_size)) ++ ++ DMDevice._set_size(self, newsize) ++ + def read_current_size(self): + log_method_call(self, exists=self.exists, path=self.path, + sysfs_path=self.sysfs_path) +@@ -2229,6 +2250,11 @@ def max_size(self): + max_format = self.format.max_size + return min(max_lv, max_format) if max_format else max_lv + ++ @property ++ @type_specific ++ def min_size(self): ++ return super(LVMLogicalVolumeDevice, self).min_size ++ + @property + @type_specific + def vg_space_used(self): +diff --git a/tests/devicefactory_test.py b/tests/devicefactory_test.py +index 4de1e05b..a1334cda 100644 +--- a/tests/devicefactory_test.py ++++ b/tests/devicefactory_test.py +@@ -49,13 +49,18 @@ class DeviceFactoryTestCase(unittest.TestCase): + encryption_supported = True + """ whether encryption of this device type is supported by blivet """ + ++ factory_class = None ++ """ devicefactory class used in this test case """ ++ ++ _disk_size = Size("2 GiB") ++ + def setUp(self): + if self.device_type is None: + raise unittest.SkipTest("abstract base class") + + self.b = blivet.Blivet() # don't populate it +- self.disk_files = [create_sparse_tempfile("factorytest", Size("2 GiB")), +- create_sparse_tempfile("factorytest", Size("2 GiB"))] ++ self.disk_files = [create_sparse_tempfile("factorytest", self._disk_size), ++ create_sparse_tempfile("factorytest", self._disk_size)] + for filename in self.disk_files: + disk = DiskFile(filename) + self.b.devicetree._add_device(disk) +@@ -197,7 +202,7 @@ def _get_size_delta(self, devices=None): + def test_get_free_disk_space(self, *args): # pylint: disable=unused-argument + # get_free_disk_space should return the total free space on disks + kwargs = self._get_test_factory_args() +- kwargs["size"] = Size("500 MiB") ++ kwargs["size"] = max(Size("500 MiB"), self.factory_class._device_min_size) + factory = devicefactory.get_device_factory(self.b, + self.device_type, + disks=self.b.disks, +@@ -285,7 +290,7 @@ def test_factory_defaults(self, *args): # pylint: disable=unused-argument + kwargs = self._get_test_factory_args() + kwargs.update({"disks": self.b.disks[:], + "fstype": "swap", +- "size": Size("2GiB"), ++ "size": max(Size("2GiB"), self.factory_class._device_min_size), + "label": "SWAP"}) + device = self._factory_device(self.device_type, **kwargs) + factory = devicefactory.get_device_factory(self.b, self.device_type, +@@ -302,6 +307,7 @@ def test_factory_defaults(self, *args): # pylint: disable=unused-argument + class PartitionFactoryTestCase(DeviceFactoryTestCase): + device_class = PartitionDevice + device_type = devicefactory.DEVICE_TYPE_PARTITION ++ factory_class = devicefactory.PartitionFactory + + def test_bug1178884(self): + # Test a change of format and size where old size is too large for the +@@ -330,6 +336,7 @@ def _get_size_delta(self, devices=None): + class LVMFactoryTestCase(DeviceFactoryTestCase): + device_class = LVMLogicalVolumeDevice + device_type = devicefactory.DEVICE_TYPE_LVM ++ factory_class = devicefactory.LVMFactory + + def _validate_factory_device(self, *args, **kwargs): + super(LVMFactoryTestCase, self)._validate_factory_device(*args, **kwargs) +@@ -510,6 +517,7 @@ class LVMThinPFactoryTestCase(LVMFactoryTestCase): + device_class = LVMLogicalVolumeDevice + device_type = devicefactory.DEVICE_TYPE_LVM_THINP + encryption_supported = False ++ factory_class = devicefactory.LVMThinPFactory + + def _validate_factory_device(self, *args, **kwargs): + super(LVMThinPFactoryTestCase, self)._validate_factory_device(*args, +@@ -541,6 +549,8 @@ class LVMVDOFactoryTestCase(LVMFactoryTestCase): + device_class = LVMLogicalVolumeDevice + device_type = devicefactory.DEVICE_TYPE_LVM_VDO + encryption_supported = False ++ _disk_size = Size("10 GiB") # we need bigger disks for VDO ++ factory_class = devicefactory.LVMVDOFactory + + def _validate_factory_device(self, *args, **kwargs): + super(LVMVDOFactoryTestCase, self)._validate_factory_device(*args, +@@ -585,7 +595,7 @@ def _validate_factory_device(self, *args, **kwargs): + def test_device_factory(self, *args): # pylint: disable=unused-argument,arguments-differ + device_type = self.device_type + kwargs = {"disks": self.b.disks, +- "size": Size("400 MiB"), ++ "size": Size("6 GiB"), + "fstype": 'ext4', + "mountpoint": '/factorytest'} + device = self._factory_device(device_type, **kwargs) +@@ -593,7 +603,7 @@ def test_device_factory(self, *args): # pylint: disable=unused-argument,argumen + self.b.recursive_remove(device.pool) + + kwargs = {"disks": self.b.disks, +- "size": Size("400 MiB"), ++ "size": Size("6 GiB"), + "fstype": 'ext4', + "mountpoint": '/factorytest', + "pool_name": "vdopool", +@@ -603,19 +613,19 @@ def test_device_factory(self, *args): # pylint: disable=unused-argument,argumen + self._validate_factory_device(device, device_type, **kwargs) + + # change size without specifying virtual_size: both sizes should grow +- kwargs["size"] = Size("600 MiB") ++ kwargs["size"] = Size("8 GiB") + kwargs["device"] = device + device = self._factory_device(device_type, **kwargs) + self._validate_factory_device(device, device_type, **kwargs) + + # change virtual size +- kwargs["virtual_size"] = Size("6 GiB") ++ kwargs["virtual_size"] = Size("40 GiB") + kwargs["device"] = device + device = self._factory_device(device_type, **kwargs) + self._validate_factory_device(device, device_type, **kwargs) + + # change virtual size to smaller than size +- kwargs["virtual_size"] = Size("500 GiB") ++ kwargs["virtual_size"] = Size("10 GiB") + kwargs["device"] = device + device = self._factory_device(device_type, **kwargs) + self._validate_factory_device(device, device_type, **kwargs) +@@ -644,6 +654,7 @@ def test_device_factory(self, *args): # pylint: disable=unused-argument,argumen + class MDFactoryTestCase(DeviceFactoryTestCase): + device_type = devicefactory.DEVICE_TYPE_MD + device_class = MDRaidArrayDevice ++ factory_class = devicefactory.MDFactory + + def test_device_factory(self): + # RAID0 across two disks +diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py +index 493d3ba1..78b140ba 100644 +--- a/tests/devices_test/lvm_test.py ++++ b/tests/devices_test/lvm_test.py +@@ -705,6 +705,12 @@ def test_new_vdo_pool(self): + + self.assertEqual(vg.size, Size("10236 MiB")) + ++ with self.assertRaises(ValueError): ++ vdopool = b.new_lv(name="vdopool", vdo_pool=True, ++ parents=[vg], compression=True, ++ deduplication=True, ++ size=blivet.size.Size("1 GiB")) ++ + vdopool = b.new_lv(name="vdopool", vdo_pool=True, + parents=[vg], compression=True, + deduplication=True, + +From 197f2877709e702c101ada6b9a055a88f09320c8 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 11 Dec 2020 14:20:48 +0100 +Subject: [PATCH 15/17] Use better description for libblockdev plugins in + tasks.availability + +The old names were quite confusing when showing that "lvm" is +missing when in fact libblockdev LVM plugin is missing. Also with +LVM VDO we need to be able to tell the difference between missing +LVM plugin and missing LVM VDO support. +--- + blivet/tasks/availability.py | 26 +++++++++++++------------- + 1 file changed, 13 insertions(+), 13 deletions(-) + +diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py +index b107428e..52418685 100644 +--- a/blivet/tasks/availability.py ++++ b/blivet/tasks/availability.py +@@ -236,13 +236,13 @@ def availability_errors(self, resource): + :returns: [] if the name of the plugin is loaded + :rtype: list of str + """ +- if resource.name not in blockdev.get_available_plugin_names(): # pylint: disable=no-value-for-parameter +- return ["libblockdev plugin %s not loaded" % resource.name] ++ if self._tech_info.plugin_name not in blockdev.get_available_plugin_names(): # pylint: disable=no-value-for-parameter ++ return ["libblockdev plugin %s not loaded" % self._tech_info.plugin_name] + else: + tech_missing = self._check_technologies() + if tech_missing: + return ["libblockdev plugin %s is loaded but some required " +- "technologies are not available:\n%s" % (resource.name, tech_missing)] ++ "technologies are not available:\n%s" % (self._tech_info.plugin_name, tech_missing)] + else: + return [] + +@@ -411,16 +411,16 @@ def available_resource(name): + # we can't just check if the plugin is loaded, we also need to make sure + # that all technologies required by us our supported (some may be missing + # due to missing dependencies) +-BLOCKDEV_BTRFS_PLUGIN = blockdev_plugin("btrfs", BLOCKDEV_BTRFS_TECH) +-BLOCKDEV_CRYPTO_PLUGIN = blockdev_plugin("crypto", BLOCKDEV_CRYPTO_TECH) +-BLOCKDEV_DM_PLUGIN = blockdev_plugin("dm", BLOCKDEV_DM_TECH) +-BLOCKDEV_DM_PLUGIN_RAID = blockdev_plugin("dm", BLOCKDEV_DM_TECH_RAID) +-BLOCKDEV_LOOP_PLUGIN = blockdev_plugin("loop", BLOCKDEV_LOOP_TECH) +-BLOCKDEV_LVM_PLUGIN = blockdev_plugin("lvm", BLOCKDEV_LVM_TECH) +-BLOCKDEV_LVM_PLUGIN_VDO = blockdev_plugin("lvm", BLOCKDEV_LVM_TECH_VDO) +-BLOCKDEV_MDRAID_PLUGIN = blockdev_plugin("mdraid", BLOCKDEV_MD_TECH) +-BLOCKDEV_MPATH_PLUGIN = blockdev_plugin("mpath", BLOCKDEV_MPATH_TECH) +-BLOCKDEV_SWAP_PLUGIN = blockdev_plugin("swap", BLOCKDEV_SWAP_TECH) ++BLOCKDEV_BTRFS_PLUGIN = blockdev_plugin("libblockdev btrfs plugin", BLOCKDEV_BTRFS_TECH) ++BLOCKDEV_CRYPTO_PLUGIN = blockdev_plugin("libblockdev crypto plugin", BLOCKDEV_CRYPTO_TECH) ++BLOCKDEV_DM_PLUGIN = blockdev_plugin("libblockdev dm plugin", BLOCKDEV_DM_TECH) ++BLOCKDEV_DM_PLUGIN_RAID = blockdev_plugin("libblockdev dm plugin (raid technology)", BLOCKDEV_DM_TECH_RAID) ++BLOCKDEV_LOOP_PLUGIN = blockdev_plugin("libblockdev loop plugin", BLOCKDEV_LOOP_TECH) ++BLOCKDEV_LVM_PLUGIN = blockdev_plugin("libblockdev lvm plugin", BLOCKDEV_LVM_TECH) ++BLOCKDEV_LVM_PLUGIN_VDO = blockdev_plugin("libblockdev lvm plugin (vdo technology)", BLOCKDEV_LVM_TECH_VDO) ++BLOCKDEV_MDRAID_PLUGIN = blockdev_plugin("libblockdev mdraid plugin", BLOCKDEV_MD_TECH) ++BLOCKDEV_MPATH_PLUGIN = blockdev_plugin("libblockdev mpath plugin", BLOCKDEV_MPATH_TECH) ++BLOCKDEV_SWAP_PLUGIN = blockdev_plugin("libblockdev swap plugin", BLOCKDEV_SWAP_TECH) + + # applications with versions + # we need e2fsprogs newer than 1.41 and we are checking the version by running + +From 5fc047b48b0de18fa249f102d2a7163ac2d6e6a6 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 11 Dec 2020 14:24:18 +0100 +Subject: [PATCH 16/17] Fix external dependencies for LVM VDO devices + +The external and unavailable dependencies code is mostly supposed +to work with just class objects and not instances, which is problem +for LVM devices where the LVMLogicalVolumeDevice can't depend on +LVM VDO and special LVM VDO device mixin classes don't inherit +from the Device class so they are missing some availability +functions. +This fix adds the neccessary functions to LVM VDO mixin classes to +make sure both "unavailable_type_dependencies" and +"type_external_dependencies" work with LVMVDOLogicalVolumeMixin +and LVMVDOPoolMixin. When working with an LVMLogicalVolumeDevice +instance its dependencies are correctly set based on type of the +logical volume. +--- + blivet/devicefactory.py | 7 +-- + blivet/devices/lvm.py | 31 ++++++++++ + tests/action_test.py | 7 +++ + tests/devicefactory_test.py | 32 ++++++++++ + tests/devices_test/lvm_test.py | 106 +++++++++++++++++++++++++++++++++ + 5 files changed, 179 insertions(+), 4 deletions(-) + +diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py +index 5e47eb9a..b29a107a 100644 +--- a/blivet/devicefactory.py ++++ b/blivet/devicefactory.py +@@ -27,7 +27,7 @@ + from .devices import BTRFSDevice, DiskDevice + from .devices import LUKSDevice, LVMLogicalVolumeDevice + from .devices import PartitionDevice, MDRaidArrayDevice +-from .devices.lvm import LVMVDOPoolMixin, DEFAULT_THPOOL_RESERVE ++from .devices.lvm import LVMVDOPoolMixin, LVMVDOLogicalVolumeMixin, DEFAULT_THPOOL_RESERVE + from .formats import get_format + from .devicelibs import btrfs + from .devicelibs import mdraid +@@ -70,9 +70,6 @@ def is_supported_device_type(device_type): + :returns: True if this device type is supported + :rtype: bool + """ +- if device_type == DEVICE_TYPE_LVM_VDO: +- return not any(e for e in LVMVDOPoolMixin._external_dependencies if not e.available) +- + devices = [] + if device_type == DEVICE_TYPE_BTRFS: + devices = [BTRFSDevice] +@@ -84,6 +81,8 @@ def is_supported_device_type(device_type): + devices = [PartitionDevice] + elif device_type == DEVICE_TYPE_MD: + devices = [MDRaidArrayDevice] ++ elif device_type == DEVICE_TYPE_LVM_VDO: ++ devices = [LVMLogicalVolumeDevice, LVMVDOPoolMixin, LVMVDOLogicalVolumeMixin] + + return not any(c.unavailable_type_dependencies() for c in devices) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index 785fa2d2..ac900bf3 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -1804,6 +1804,17 @@ def __init__(self, compression=True, deduplication=True, index_memory=0, write_p + if not self.exists and self.size < self.min_size: + raise ValueError("Requested size %s is smaller than minimum %s" % (self.size, self.min_size)) + ++ # these two methods are defined in Device but LVMVDOPoolMixin doesn't inherit from ++ # it and we can't have this code in LVMLogicalVolumeDevice because we need to be able ++ # to get dependencies without creating instance of the class ++ @classmethod ++ def type_external_dependencies(cls): ++ return set(d for d in cls._external_dependencies) | LVMLogicalVolumeDevice.type_external_dependencies() ++ ++ @classmethod ++ def unavailable_type_dependencies(cls): ++ return set(e for e in cls.type_external_dependencies() if not e.available) ++ + @property + def is_vdo_pool(self): + return self.seg_type == "vdo-pool" +@@ -1926,6 +1937,17 @@ def _check_parents(self): + if not container or not isinstance(container, LVMLogicalVolumeDevice) or not container.is_vdo_pool: + raise ValueError("constructor requires a vdo-pool LV") + ++ # these two methods are defined in Device but LVMVDOLogicalVolumeMixin doesn't inherit ++ # from it and we can't have this code in LVMLogicalVolumeDevice because we need to be ++ # able to get dependencies without creating instance of the class ++ @classmethod ++ def type_external_dependencies(cls): ++ return set(d for d in cls._external_dependencies) | LVMLogicalVolumeDevice.type_external_dependencies() ++ ++ @classmethod ++ def unavailable_type_dependencies(cls): ++ return set(e for e in cls.type_external_dependencies() if not e.available) ++ + @property + def vg_space_used(self): + return Size(0) # the pool's size is already accounted for in the vg +@@ -2217,6 +2239,15 @@ def _convert_from_lvs(self): + """Convert the LVs to create this LV from into its internal LVs""" + raise ValueError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type) + ++ @property ++ def external_dependencies(self): ++ deps = super(LVMLogicalVolumeBase, self).external_dependencies ++ if self.is_vdo_pool: ++ deps.update(LVMVDOPoolMixin.type_external_dependencies()) ++ if self.is_vdo_lv: ++ deps.update(LVMVDOLogicalVolumeMixin.type_external_dependencies()) ++ return deps ++ + @property + @type_specific + def vg(self): +diff --git a/tests/action_test.py b/tests/action_test.py +index 77176f46..38a2e872 100644 +--- a/tests/action_test.py ++++ b/tests/action_test.py +@@ -18,6 +18,8 @@ + from blivet.devices import MDRaidArrayDevice + from blivet.devices import LVMVolumeGroupDevice + from blivet.devices import LVMLogicalVolumeDevice ++from blivet.devices.lvm import LVMVDOPoolMixin ++from blivet.devices.lvm import LVMVDOLogicalVolumeMixin + + # format classes + from blivet.formats.fs import Ext2FS +@@ -1252,6 +1254,11 @@ def test_lv_from_lvs_actions(self): + self.assertEqual(set(self.storage.lvs), {pool}) + self.assertEqual(set(pool._internal_lvs), {lv1, lv2}) + ++ ++@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES + [LVMVDOPoolMixin, LVMVDOLogicalVolumeMixin]), "some unsupported device classes required for this test") ++@unittest.skipUnless(all(x().utils_available for x in FORMAT_CLASSES), "some unsupported format classes required for this test") ++class DeviceActionLVMVDOTestCase(DeviceActionTestCase): ++ + def test_lvm_vdo_destroy(self): + self.destroy_all_devices() + sdc = self.storage.devicetree.get_device_by_name("sdc") +diff --git a/tests/devicefactory_test.py b/tests/devicefactory_test.py +index a1334cda..e4210ead 100644 +--- a/tests/devicefactory_test.py ++++ b/tests/devicefactory_test.py +@@ -592,6 +592,8 @@ def _validate_factory_device(self, *args, **kwargs): + @patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[]) + @patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set()) + @patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMVDOPoolMixin.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin.type_external_dependencies", return_value=set()) + def test_device_factory(self, *args): # pylint: disable=unused-argument,arguments-differ + device_type = self.device_type + kwargs = {"disks": self.b.disks, +@@ -650,6 +652,36 @@ def test_device_factory(self, *args): # pylint: disable=unused-argument,argumen + # change fstype + kwargs["fstype"] = "xfs" + ++ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True) ++ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.destroyable", return_value=True) ++ @patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[]) ++ @patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMVDOPoolMixin.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin.type_external_dependencies", return_value=set()) ++ def test_factory_defaults(self, *args): # pylint: disable=unused-argument ++ super(LVMVDOFactoryTestCase, self).test_factory_defaults() ++ ++ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True) ++ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.destroyable", return_value=True) ++ @patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[]) ++ @patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMVDOPoolMixin.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin.type_external_dependencies", return_value=set()) ++ def test_get_free_disk_space(self, *args): ++ super(LVMVDOFactoryTestCase, self).test_get_free_disk_space() ++ ++ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True) ++ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.destroyable", return_value=True) ++ @patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[]) ++ @patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMVDOPoolMixin.type_external_dependencies", return_value=set()) ++ @patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin.type_external_dependencies", return_value=set()) ++ def test_normalize_size(self, *args): # pylint: disable=unused-argument ++ super(LVMVDOFactoryTestCase, self).test_normalize_size() ++ + + class MDFactoryTestCase(DeviceFactoryTestCase): + device_type = devicefactory.DEVICE_TYPE_MD +diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py +index 78b140ba..d938144d 100644 +--- a/tests/devices_test/lvm_test.py ++++ b/tests/devices_test/lvm_test.py +@@ -10,10 +10,13 @@ + from blivet.devices import StorageDevice + from blivet.devices import LVMLogicalVolumeDevice + from blivet.devices import LVMVolumeGroupDevice ++from blivet.devices.lvm import LVMVDOPoolMixin ++from blivet.devices.lvm import LVMVDOLogicalVolumeMixin + from blivet.devices.lvm import LVMCacheRequest + from blivet.devices.lvm import LVPVSpec, LVMInternalLVtype + from blivet.size import Size + from blivet.devicelibs import raid ++from blivet import devicefactory + from blivet import errors + + DEVICE_CLASSES = [ +@@ -690,6 +693,10 @@ def test_new_lv_from_non_existing_lvs(self): + pool.create() + self.assertTrue(lvm.thpool_convert.called) + ++ ++@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES + [LVMVDOPoolMixin, LVMVDOLogicalVolumeMixin]), "some unsupported device classes required for this test") ++class BlivetNewLVMVDODeviceTest(unittest.TestCase): ++ + def test_new_vdo_pool(self): + b = blivet.Blivet() + pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), +@@ -726,3 +733,102 @@ def test_new_vdo_pool(self): + self.assertEqual(vdopool.children[0], vdolv) + self.assertEqual(vdolv.parents[0], vdopool) + self.assertListEqual(vg.lvs, [vdopool, vdolv]) ++ ++ ++@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test") ++class BlivetLVMVDODependenciesTest(unittest.TestCase): ++ def test_vdo_dependencies(self): ++ blivet.tasks.availability.CACHE_AVAILABILITY = False ++ ++ b = blivet.Blivet() ++ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), ++ size=Size("10 GiB"), exists=True) ++ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True) ++ ++ for dev in (pv, vg): ++ b.devicetree._add_device(dev) ++ ++ # check that all the above devices are in the expected places ++ self.assertEqual(set(b.devices), {pv, vg}) ++ self.assertEqual(set(b.vgs), {vg}) ++ ++ self.assertEqual(vg.size, Size("10236 MiB")) ++ ++ vdopool = b.new_lv(name="vdopool", vdo_pool=True, ++ parents=[vg], compression=True, ++ deduplication=True, ++ size=blivet.size.Size("8 GiB")) ++ ++ vdolv = b.new_lv(name="vdolv", vdo_lv=True, ++ parents=[vdopool], ++ size=blivet.size.Size("40 GiB")) ++ ++ # Dependencies check: for VDO types these should be combination of "normal" ++ # LVM dependencies (LVM libblockdev plugin + kpartx and DM plugin from DMDevice) ++ # and LVM VDO technology from the LVM plugin ++ lvm_vdo_dependencies = ["kpartx", ++ "libblockdev dm plugin", ++ "libblockdev lvm plugin", ++ "libblockdev lvm plugin (vdo technology)"] ++ pool_deps = [d.name for d in vdopool.external_dependencies] ++ six.assertCountEqual(self, pool_deps, lvm_vdo_dependencies) ++ ++ vdolv_deps = [d.name for d in vdolv.external_dependencies] ++ six.assertCountEqual(self, vdolv_deps, lvm_vdo_dependencies) ++ ++ # same dependencies should be returned when checking with class not instance ++ pool_type_deps = [d.name for d in LVMVDOPoolMixin.type_external_dependencies()] ++ six.assertCountEqual(self, pool_type_deps, lvm_vdo_dependencies) ++ ++ vdolv_type_deps = [d.name for d in LVMVDOLogicalVolumeMixin.type_external_dependencies()] ++ six.assertCountEqual(self, vdolv_type_deps, lvm_vdo_dependencies) ++ ++ # just to be sure LVM VDO specific code didn't break "normal" LVs ++ normallv = b.new_lv(name="lvol0", ++ parents=[vg], ++ size=blivet.size.Size("1 GiB")) ++ ++ normalvl_deps = [d.name for d in normallv.external_dependencies] ++ six.assertCountEqual(self, normalvl_deps, ["kpartx", ++ "libblockdev dm plugin", ++ "libblockdev lvm plugin"]) ++ ++ with patch("blivet.devices.lvm.LVMVDOPoolMixin._external_dependencies", ++ new=[blivet.tasks.availability.unavailable_resource("VDO unavailability test")]): ++ with patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin._external_dependencies", ++ new=[blivet.tasks.availability.unavailable_resource("VDO unavailability test")]): ++ ++ pool_deps = [d.name for d in vdopool.unavailable_dependencies] ++ self.assertEqual(pool_deps, ["VDO unavailability test"]) ++ ++ vdolv_deps = [d.name for d in vdolv.unavailable_dependencies] ++ self.assertEqual(vdolv_deps, ["VDO unavailability test"]) ++ ++ # same dependencies should be returned when checking with class not instance ++ pool_type_deps = [d.name for d in LVMVDOPoolMixin.unavailable_type_dependencies()] ++ six.assertCountEqual(self, pool_type_deps, ["VDO unavailability test"]) ++ ++ vdolv_type_deps = [d.name for d in LVMVDOLogicalVolumeMixin.unavailable_type_dependencies()] ++ six.assertCountEqual(self, vdolv_type_deps, ["VDO unavailability test"]) ++ ++ normallv_deps = [d.name for d in normallv.unavailable_dependencies] ++ self.assertEqual(normallv_deps, []) ++ ++ with self.assertRaises(errors.DependencyError): ++ b.create_device(vdopool) ++ b.create_device(vdolv) ++ ++ b.create_device(normallv) ++ ++ def test_vdo_dependencies_devicefactory(self): ++ with patch("blivet.devices.lvm.LVMVDOPoolMixin._external_dependencies", ++ new=[blivet.tasks.availability.unavailable_resource("VDO unavailability test")]): ++ with patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin._external_dependencies", ++ new=[blivet.tasks.availability.unavailable_resource("VDO unavailability test")]): ++ ++ # shouldn't affect "normal" LVM ++ lvm_supported = devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM) ++ self.assertTrue(lvm_supported) ++ ++ vdo_supported = devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM_VDO) ++ self.assertFalse(vdo_supported) + +From c7fb125ec552ee5070f8180f92fe5545709192ff Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 11 Dec 2020 15:02:05 +0100 +Subject: [PATCH 17/17] Bump required libblockdev version to 2.24 + +LVM VDO support was added in 2.24. +--- + python-blivet.spec | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/python-blivet.spec b/python-blivet.spec +index ffd4210e..58cad0b2 100644 +--- a/python-blivet.spec ++++ b/python-blivet.spec +@@ -36,7 +36,7 @@ Source1: http://github.com/storaged-project/blivet/archive/%{realname}-%{realver + %global partedver 1.8.1 + %global pypartedver 3.10.4 + %global utillinuxver 2.15.1 +-%global libblockdevver 2.19 ++%global libblockdevver 2.24 + %global libbytesizever 0.3 + %global pyudevver 0.18 + diff --git a/SPECS/python-blivet.spec b/SPECS/python-blivet.spec index 80b222c..a85e8d6 100644 --- a/SPECS/python-blivet.spec +++ b/SPECS/python-blivet.spec @@ -23,7 +23,7 @@ Version: 3.2.2 #%%global prerelease .b2 # prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2 -Release: 3%{?prerelease}%{?dist} +Release: 9%{?prerelease}%{?dist} Epoch: 1 License: LGPLv2+ Group: System Environment/Libraries @@ -36,6 +36,20 @@ Patch1: 0002-remove-btrfs-plugin.patch Patch2: 0003-Skip-test_mounting-for-filesystems-that-are-not-moun.patch Patch3: 0004-Add-extra-sleep-after-pvremove-call.patch Patch4: 0005-Round-down-to-nearest-MiB-value-when-writing-ks-parittion-info.ks +Patch5: 0006-Blivet-RHEL-8.3-localization-update.patch +Patch6: 0007-Do-not-use-FSAVAIL-and-FSUSE-options-when-running-lsblk.patch +Patch7: 0008-set-allowed-disk-labels-for-s390x-as-standard-ones-plus-dasd.patch +Patch8: 0009-Do-not-use-BlockDev-utils_have_kernel_module-to-check-for-modules.patch +Patch9: 0010-Fix-name-resolution-for-MD-devices-and-partitions-on.patch +Patch10: 0011-Fix-ignoring-disk-devices-with-parents-or-children.patch +Patch11: 0012-xfs-grow-support.patch +Patch12: 0013-Do-not-limit-swap-to-128-GiB.patch +Patch13: 0014-Use-UnusableConfigurationError-for-patially-hidden-multipath-devices.patch +Patch14: 0015-Fix-possible-UnicodeDecodeError-when-reading-model-f.patch +Patch15: 0016-Basic-LVM-VDO-support.patch +Patch16: 0017-Let-parted-fix-fixable-issues-with-partition-table.patch +Patch17: 0018-Fix-possible-UnicodeDecodeError-when-reading-sysfs-a.patch +Patch18: 0019-LVM-VDO-support.patch # Versions of required components (done so we make sure the buildrequires # match the requires versions of things). @@ -197,6 +211,46 @@ configuration. %endif %changelog +* Tue Feb 9 2021 Vojtech Trefny - 3.2.2-9 +- LVM VDO support + Resolves: rhbz#1509337 + +* Mon Jan 11 2021 Vojtech Trefny - 3.2.2-8 +- Let parted fix fixable issues with partition table + Resolves: rhbz#1846869 +- Fix possible UnicodeDecodeError when reading sysfs attributes + Resolves: rhbz#1849326 + +* Wed Nov 18 2020 Vojtech Trefny - 3.2.2-7 +- Add support for XFS format grow + Resolves: rhbz#1862349 +- Do not limit swap to 128 GiB + Resolves: rhbz#1656485 +- Use UnusableConfigurationError for partially hidden multipath devices + Resolves: rhbz#1877052 +- Fix possible UnicodeDecodeError when reading model from sysfs + Resolves: rhbz#1849326 +- Add basic support for LVM VDO devices + Resolves: rhbz#1828745 + +* Thu Aug 20 2020 Vojtech Trefny - 3.2.2-6 +- Fix name resolution for MD devices and partitions on them + Resolves: rhbz#1862904 +- Fix ignoring disk devices with parents or children + Resolves: rhbz#1866243 + +* Thu Jul 16 2020 Vojtech Trefny - 3.2.2-5 +- set allowed disk labels for s390x as standard ones (msdos + gpt) plus dasd + Resolves: rhbz#1855200 +- Do not use BlockDev.utils_have_kernel_module to check for modules + Resolves: rhbz#1855344 + +* Thu Jul 09 2020 Vojtech Trefny - 3.2.2-4 +- Blivet RHEL 8.3 localization update + Resolves: rhbz#182056 +- Do not use FSAVAIL and FSUSE% options when running lsblk + Resolves: rhbz#1853624 + * Tue Jun 30 2020 Vojtech Trefny - 3.2.2-3 - Round down to nearest MiB value when writing ks parittion info Resolves: rhbz#1850670