From 8971e917a7fcec0cefd133fd7a3cf901bf96f225 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Jan 21 2020 20:33:45 +0000 Subject: import rhel-system-roles-1.0-10.el8_1 --- diff --git a/.gitignore b/.gitignore index 39c3d7d..37f29fa 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,5 @@ SOURCES/kdump-0c2bb28.tar.gz SOURCES/network-d5891d4.tar.gz SOURCES/postfix-0.1.tar.gz SOURCES/selinux-6cd1ec8.tar.gz +SOURCES/storage-1594e25.tar.gz SOURCES/timesync-924650d.tar.gz diff --git a/.rhel-system-roles.metadata b/.rhel-system-roles.metadata index 4a8398a..bd9b6a5 100644 --- a/.rhel-system-roles.metadata +++ b/.rhel-system-roles.metadata @@ -2,4 +2,5 @@ 530aaa9302d90c278b9e1c8d8513e516494e3380 SOURCES/network-d5891d4.tar.gz 66c82331f4ac9598c506c3999965b4d07dbfe49d SOURCES/postfix-0.1.tar.gz 246383bd6823533ed3a51a0501b75e38ba852908 SOURCES/selinux-6cd1ec8.tar.gz +aa1c37b04cef831148d9834033fe414156ba62df SOURCES/storage-1594e25.tar.gz ffd2a706e4e3007684aa9874c8457ad5c8920050 SOURCES/timesync-924650d.tar.gz diff --git a/SOURCES/rhel-system-roles-storage-prefix.diff b/SOURCES/rhel-system-roles-storage-prefix.diff new file mode 100644 index 0000000..8bd9ea1 --- /dev/null +++ b/SOURCES/rhel-system-roles-storage-prefix.diff @@ -0,0 +1,13 @@ +diff --git a/README.md b/README.md +index c2debc9..d9e40b3 100644 +--- a/README.md ++++ b/README.md +@@ -81,7 +81,7 @@ Example Playbook + - hosts: all + + roles: +- - name: linux-system-roles.storage ++ - name: rhel-system-roles.storage + storage_pools: + - name: app + disks: diff --git a/SOURCES/storage-safemode.diff b/SOURCES/storage-safemode.diff new file mode 100644 index 0000000..90d3c02 --- /dev/null +++ b/SOURCES/storage-safemode.diff @@ -0,0 +1,1021 @@ +diff --git a/README.md b/README.md +index c2debc9..f808adc 100644 +--- a/README.md ++++ b/README.md +@@ -73,6 +73,9 @@ The `mount_point` specifies the directory on which the file system will be mount + ##### `mount_options` + The `mount_options` specifies custom mount options as a string, e.g.: 'ro'. + ++#### `storage_safe_mode` ++When true (the default), an error will occur instead of automatically removing existing devices and/or formatting. ++ + + Example Playbook + ---------------- +diff --git a/defaults/main.yml b/defaults/main.yml +index 7b500e5..476616b 100644 +--- a/defaults/main.yml ++++ b/defaults/main.yml +@@ -3,6 +3,7 @@ + storage_provider: "blivet" + storage_use_partitions: null + storage_disklabel_type: null # leave unset to allow the role to select an appropriate label type ++storage_safe_mode: true # fail instead of implicitly/automatically removing devices or formatting + + storage_pool_defaults: + state: "present" +diff --git a/library/blivet.py b/library/blivet.py +index d416944..1d8cd36 100644 +--- a/library/blivet.py ++++ b/library/blivet.py +@@ -31,6 +31,9 @@ options: + disklabel_type: + description: + - disklabel type string (eg: 'gpt') to use, overriding the built-in logic in blivet ++ safe_mode: ++ description: ++ - boolean indicating that we should fail rather than implicitly/automatically removing devices or formatting + + author: + - David Lehman (dlehman@redhat.com) +@@ -112,13 +115,15 @@ if BLIVET_PACKAGE: + + use_partitions = None # create partitions on pool backing device disks? + disklabel_type = None # user-specified disklabel type ++safe_mode = None # do not remove any existing devices or formatting ++packages_only = None # only set things up enough to get a list of required packages + + + class BlivetAnsibleError(Exception): + pass + + +-class BlivetVolume: ++class BlivetVolume(object): + def __init__(self, blivet_obj, volume, bpool=None): + self._blivet = blivet_obj + self._volume = volume +@@ -206,11 +211,16 @@ class BlivetVolume: + + def _reformat(self): + """ Schedule actions as needed to ensure the volume is formatted as specified. """ ++ global packages_only ++ + fmt = self._get_format() + if self._device.format.type == fmt.type: + return + +- if self._device.format.status: ++ if safe_mode and (self._device.format.type is not None or self._device.format.name != get_format(None).name): ++ raise BlivetAnsibleError("cannot remove existing formatting on volume '%s' in safe mode" % self._volume['name']) ++ ++ if self._device.format.status and not packages_only: + self._device.format.teardown() + self._blivet.format_device(self._device, fmt) + +@@ -251,6 +261,19 @@ class BlivetDiskVolume(BlivetVolume): + def _type_check(self): + return self._device.is_disk + ++ def _look_up_device(self): ++ super(BlivetDiskVolume, self)._look_up_device() ++ if not self._get_device_id(): ++ # FAIL: no disks specified for volume ++ raise BlivetAnsibleError("no disks specified for volume '%s'" % self._volume['name']) # sure about this one? ++ elif not isinstance(self._volume['disks'], list): ++ raise BlivetAnsibleError("volume disks must be specified as a list") ++ ++ if self._device is None: ++ # FAIL: failed to find the disk ++ raise BlivetAnsibleError("unable to resolve disk specified for volume '%s' (%s)" % (self._volume['name'], self._volume['disks'])) ++ ++ + + class BlivetPartitionVolume(BlivetVolume): + def _type_check(self): +@@ -342,7 +365,7 @@ def _get_blivet_volume(blivet_obj, volume, bpool=None): + return _BLIVET_VOLUME_TYPES[volume_type](blivet_obj, volume, bpool=bpool) + + +-class BlivetPool: ++class BlivetPool(object): + def __init__(self, blivet_obj, pool): + self._blivet = blivet_obj + self._pool = pool +@@ -424,8 +447,11 @@ class BlivetPool: + """ Schedule actions as needed to ensure pool member devices exist. """ + members = list() + for disk in self._disks: +- if not disk.isleaf: +- self._blivet.devicetree.recursive_remove(disk) ++ if not disk.isleaf or disk.format.type is not None: ++ if not safe_mode: ++ self._blivet.devicetree.recursive_remove(disk) ++ else: ++ raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (disk.name, self._pool['name'])) + + if use_partitions: + label = get_format("disklabel", device=disk.path) +@@ -486,7 +512,10 @@ class BlivetPartitionPool(BlivetPool): + def _create(self): + if self._device.format.type != "disklabel" or \ + self._device.format.label_type != disklabel_type: +- self._blivet.devicetree.recursive_remove(self._device, remove_device=False) ++ if not safe_mode: ++ self._blivet.devicetree.recursive_remove(self._device, remove_device=False) ++ else: ++ raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (self._device.name, self._pool['name'])) + + label = get_format("disklabel", device=self._device.path, label_type=disklabel_type) + self._blivet.format_device(self._device, label) +@@ -520,7 +549,7 @@ class BlivetLVMPool(BlivetPool): + + + _BLIVET_POOL_TYPES = { +- "disk": BlivetPartitionPool, ++ "partition": BlivetPartitionPool, + "lvm": BlivetLVMPool + } + +@@ -550,7 +579,7 @@ def manage_pool(b, pool): + volume['_mount_id'] = bvolume._volume.get('_mount_id', '') + + +-class FSTab: ++class FSTab(object): + def __init__(self, blivet_obj): + self._blivet = blivet_obj + self._entries = list() +@@ -656,6 +685,7 @@ def run_module(): + volumes=dict(type='list'), + packages_only=dict(type='bool', required=False, default=False), + disklabel_type=dict(type='str', required=False, default=None), ++ safe_mode=dict(type='bool', required=False, default=False), + use_partitions=dict(type='bool', required=False, default=True)) + + # seed the result dict in the object +@@ -684,6 +714,12 @@ def run_module(): + global use_partitions + use_partitions = module.params['use_partitions'] + ++ global safe_mode ++ safe_mode = module.params['safe_mode'] ++ ++ global packages_only ++ packages_only = module.params['packages_only'] ++ + b = Blivet() + b.reset() + fstab = FSTab(b) +diff --git a/tasks/main-blivet.yml b/tasks/main-blivet.yml +index 061195c..65b8580 100644 +--- a/tasks/main-blivet.yml ++++ b/tasks/main-blivet.yml +@@ -38,7 +38,7 @@ + _storage_vols_no_defaults: "{{ _storage_vols_no_defaults|default([]) }} + [{{ item.1 }}]" + _storage_vol_defaults: "{{ _storage_vol_defaults|default([]) }} + [{{ storage_volume_defaults }}]" + _storage_vol_pools: "{{ _storage_vol_pools|default([]) }} + ['{{ item.0.name }}']" +- loop: "{{ _storage_pools|subelements('volumes') }}" ++ loop: "{{ _storage_pools|subelements('volumes', skip_missing=true) }}" + when: storage_pools is defined + + - name: Apply defaults to pools and volumes [3/6] +@@ -85,6 +85,15 @@ + - debug: + var: _storage_volumes + ++- name: load mount facts ++ setup: ++ gather_subset: '!all,!min,mounts' ++ register: __storage_mounts_before_packages ++ ++# - name: show mounts before get required packages ++# debug: ++# var: __storage_mounts_before_packages ++ + - name: get required packages + blivet: + pools: "{{ _storage_pools }}" +@@ -94,6 +103,30 @@ + packages_only: true + register: package_info + ++- name: load mount facts ++ setup: ++ gather_subset: '!all,!min,mounts' ++ register: __storage_mounts_after_packages ++ ++- name: detect mount alteration by 'get required packages' ++ block: ++ - name: show mounts before manage the pools and volumes ++ debug: ++ var: __storage_mounts_before_packages.ansible_facts.ansible_mounts ++ ++ - name: show mounts after manage the pools and volumes ++ debug: ++ var: __storage_mounts_after_packages.ansible_facts.ansible_mounts ++ ++ - name: fail if mounts changed ++ fail: ++ msg: "get required packages changed mounts. Changed status is ++ {{ package_info.changed }}" ++ when: ++ - __storage_mounts_before_packages.ansible_facts.ansible_mounts | ++ count != ++ __storage_mounts_after_packages.ansible_facts.ansible_mounts | count ++ + - name: make sure required packages are installed + package: + name: "{{ package_info.packages }}" +@@ -105,6 +138,7 @@ + volumes: "{{ _storage_volumes }}" + use_partitions: "{{ storage_use_partitions }}" + disklabel_type: "{{ storage_disklabel_type }}" ++ safe_mode: "{{ storage_safe_mode }}" + register: blivet_output + + - debug: +diff --git a/tests/get_unused_disk.yml b/tests/get_unused_disk.yml +index 9f4c5d2..79e952a 100644 +--- a/tests/get_unused_disk.yml ++++ b/tests/get_unused_disk.yml +@@ -9,12 +9,10 @@ + unused_disks: "{{ unused_disks_return.disks }}" + when: "'Unable to find unused disk' not in unused_disks_return.disks" + +-- block: +- - name: Exit playbook when there's no unused disks in the system +- debug: +- msg: "Unable to find unused disks. Exiting playbook." +- - meta: end_play +- when: unused_disks is undefined ++- name: Exit playbook when there's not enough unused disks in the system ++ fail: ++ msg: "Unable to find enough unused disks. Exiting playbook." ++ when: unused_disks is undefined or unused_disks|length < disks_needed|default(1) + + - name: Print unused disks + debug: +diff --git a/tests/tests_change_disk_fs.yml b/tests/tests_change_disk_fs.yml +index b6aa80b..f7962c6 100644 +--- a/tests/tests_change_disk_fs.yml ++++ b/tests/tests_change_disk_fs.yml +@@ -2,6 +2,7 @@ + - hosts: all + become: true + vars: ++ storage_safe_mode: false + mount_location: '/opt/test' + volume_size: '5g' + fs_type_after: "{{ 'ext3' if (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') else 'ext4' }}" +diff --git a/tests/tests_change_fs.yml b/tests/tests_change_fs.yml +index cca23eb..b88e768 100644 +--- a/tests/tests_change_fs.yml ++++ b/tests/tests_change_fs.yml +@@ -2,6 +2,7 @@ + - hosts: all + become: true + vars: ++ storage_safe_mode: false + mount_location: '/opt/test1' + volume_size: '5g' + fs_after: "{{ (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') | ternary('ext4', 'xfs') }}" +diff --git a/tests/tests_change_fs_use_partitions.yml b/tests/tests_change_fs_use_partitions.yml +index e4aa76c..eb93c11 100644 +--- a/tests/tests_change_fs_use_partitions.yml ++++ b/tests/tests_change_fs_use_partitions.yml +@@ -2,6 +2,7 @@ + - hosts: all + become: true + vars: ++ storage_safe_mode: false + storage_use_partitions: true + mount_location: '/opt/test1' + volume_size: '5g' +diff --git a/tests/tests_create_disk_then_remove.yml b/tests/tests_create_disk_then_remove.yml +index b19ae35..c5290eb 100644 +--- a/tests/tests_create_disk_then_remove.yml ++++ b/tests/tests_create_disk_then_remove.yml +@@ -2,6 +2,7 @@ + - hosts: all + become: true + vars: ++ storage_safe_mode: false + mount_location: '/opt/test1' + + tasks: +diff --git a/tests/tests_create_lvm_pool_then_remove.yml b/tests/tests_create_lvm_pool_then_remove.yml +index 6b25939..f2c06fb 100644 +--- a/tests/tests_create_lvm_pool_then_remove.yml ++++ b/tests/tests_create_lvm_pool_then_remove.yml +@@ -2,6 +2,7 @@ + - hosts: all + become: true + vars: ++ storage_safe_mode: false + mount_location1: '/opt/test1' + mount_location2: '/opt/test2' + volume_group_size: '10g' +diff --git a/tests/tests_create_partition_volume_then_remove.yml b/tests/tests_create_partition_volume_then_remove.yml +index 40b3e62..ae589d3 100644 +--- a/tests/tests_create_partition_volume_then_remove.yml ++++ b/tests/tests_create_partition_volume_then_remove.yml +@@ -2,6 +2,7 @@ + - hosts: all + become: true + vars: ++ storage_safe_mode: false + mount_location: '/opt/test1' + + tasks: +@@ -18,7 +19,7 @@ + vars: + storage_pools: + - name: "{{ unused_disks[0] }}" +- type: disk ++ type: partition + disks: "{{ unused_disks }}" + volumes: + - name: test1 +@@ -33,7 +34,7 @@ + vars: + storage_pools: + - name: "{{ unused_disks[0] }}" +- type: disk ++ type: partition + disks: "{{ unused_disks }}" + volumes: + - name: test1 +@@ -48,7 +49,7 @@ + vars: + storage_pools: + - name: "{{ unused_disks[0] }}" +- type: disk ++ type: partition + disks: "{{ unused_disks }}" + state: absent + volumes: +@@ -65,7 +66,7 @@ + vars: + storage_pools: + - name: "{{ unused_disks[0] }}" +- type: disk ++ type: partition + disks: "{{ unused_disks }}" + state: absent + volumes: +diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml +index 36eec41..7112f6e 100644 +--- a/tests/tests_disk_errors.yml ++++ b/tests/tests_disk_errors.yml +@@ -3,8 +3,17 @@ + become: true + vars: + mount_location: '/opt/test1' ++ testfile: "{{ mount_location }}/quux" + + tasks: ++ - include_role: ++ name: storage ++ ++ - include_tasks: get_unused_disk.yml ++ vars: ++ min_size: "10g" ++ max_return: 1 ++ + - name: Verify that the play fails with the expected error message + block: + - name: Create a disk volume mounted at "{{ mount_location }}" +@@ -14,11 +23,246 @@ + storage_volumes: + - name: test1 + type: disk +- disks: "['/dev/surelyidonotexist']" ++ disks: ['/dev/surelyidonotexist'] + mount_point: "{{ mount_location }}" + +- - name: Check the error output ++ - name: UNREACH ++ fail: ++ msg: "this should be unreachable" ++ ++ rescue: ++ - name: Check that we failed in the role ++ assert: ++ that: ++ - ansible_failed_task.name != 'UNREACH' ++ msg: "Role has not failed when it should have" ++ vars: ++ # Ugh! needed to expand ansible_failed_task ++ storage_provider: blivet ++ ++ # the following does not work properly, ++ # blivet_output.failed is false. ++ # - name: Show the error output ++ # debug: ++ # msg: "{{ blivet_output.failed }}" ++ ++ # - name: Check the error output ++ # assert: ++ # that: blivet_output.failed | bool ++ # msg: "Expected error message not found for missing disk" ++ ++ - name: Create a file system on disk ++ include_role: ++ name: storage ++ vars: ++ storage_volumes: ++ - name: test1 ++ type: disk ++ fs_type: 'ext4' ++ disks: "{{ unused_disks }}" ++ mount_point: "{{ mount_location }}" ++ ++ - name: create a file ++ file: ++ path: "{{ testfile }}" ++ state: touch ++ ++ - name: Test for correct handling of safe_mode ++ block: ++ - name: Try to replace the file system on disk in safe mode ++ include_role: ++ name: storage ++ vars: ++ storage_volumes: ++ - name: test1 ++ type: disk ++ fs_type: 'ext3' ++ disks: "{{ unused_disks }}" ++ ++ - name: UNREACH ++ fail: ++ msg: "this should be unreachable" ++ ++ rescue: ++ - name: Check that we failed in the role ++ assert: ++ that: ++ - ansible_failed_task.name != 'UNREACH' ++ msg: "Role has not failed when it should have" ++ vars: ++ # Ugh! needed to expand ansible_failed_task ++ storage_provider: blivet ++ ++ - name: Verify the output ++ assert: ++ that: "blivet_output.failed and ++ blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and ++ not blivet_output.changed" ++ msg: "Unexpected behavior w/ existing data on specified disks" ++ ++ - name: Unmount file system ++ include_role: ++ name: storage ++ vars: ++ storage_volumes: ++ - name: test1 ++ type: disk ++ fs_type: 'ext4' ++ disks: "{{ unused_disks }}" ++ mount_point: none ++ ++ - name: Test for correct handling of safe_mode with unmounted filesystem ++ block: ++ - name: Try to replace the file system on disk in safe mode ++ include_role: ++ name: storage ++ vars: ++ storage_volumes: ++ - name: test1 ++ type: disk ++ fs_type: 'ext3' ++ disks: "{{ unused_disks }}" ++ ++ - name: UNREACH ++ fail: ++ msg: "this should be unreachable" ++ ++ rescue: ++ - name: Check that we failed in the role + assert: +- that: "{{ blivet_output.failed }}" +- msg: "Expected error message not found for missing disk" +- ignore_errors: yes ++ that: ++ - ansible_failed_task.name != 'UNREACH' ++ msg: "Role has not failed when it should have" ++ vars: ++ # Ugh! needed to expand ansible_failed_task ++ storage_provider: blivet ++ ++ - name: Verify the output ++ assert: ++ that: "blivet_output.failed and ++ blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and ++ not blivet_output.changed" ++ msg: "Unexpected behavior w/ existing data on specified disks" ++ ++ - name: Remount file system ++ include_role: ++ name: storage ++ vars: ++ storage_volumes: ++ - name: test1 ++ type: disk ++ fs_type: 'ext4' ++ disks: "{{ unused_disks }}" ++ mount_point: "{{ mount_location }}" ++ ++ - name: stat the file ++ stat: ++ path: "{{ testfile }}" ++ register: stat_r ++ ++ - name: assert file presence ++ assert: ++ that: ++ stat_r.stat.isreg is defined and stat_r.stat.isreg ++ msg: "data lost!" ++ ++ - name: Test for correct handling of safe_mode ++ block: ++ - name: Try to create a partition pool on the disk already containing a file system in safe_mode ++ include_role: ++ name: storage ++ vars: ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ type: partition ++ ++ - name: UNREACH ++ fail: ++ msg: "this should be unreachable" ++ ++ rescue: ++ - name: Check that we failed in the role ++ assert: ++ that: ++ - ansible_failed_task.name != 'UNREACH' ++ msg: "Role has not failed when it should have" ++ vars: ++ # Ugh! needed to expand ansible_failed_task ++ storage_provider: blivet ++ ++ - name: Verify the output ++ assert: ++ that: "blivet_output.failed and ++ blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and ++ not blivet_output.changed" ++ msg: "Unexpected behavior w/ existing data on specified disks" ++ ++ - name: Test for correct handling of safe_mode with existing filesystem ++ block: ++ - name: Try to create LVM pool on disk that already belongs to an existing filesystem ++ include_role: ++ name: storage ++ vars: ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ type: lvm ++ ++ - name: UNREACH ++ fail: ++ msg: "this should be unreachable" ++ ++ rescue: ++ - name: Check that we failed in the role ++ assert: ++ that: ++ - ansible_failed_task.name != 'UNREACH' ++ msg: "Role has not failed when it should have" ++ vars: ++ # Ugh! needed to expand ansible_failed_task ++ storage_provider: blivet ++ ++ - name: Verify the output ++ assert: ++ that: "{{ blivet_output.failed and ++ blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and ++ not blivet_output.changed }}" ++ msg: "Unexpected behavior w/ existing data on specified disks" ++ ++ - name: stat the file ++ stat: ++ path: "{{ testfile }}" ++ register: stat_r ++ ++ - name: assert file presence ++ assert: ++ that: ++ stat_r.stat.isreg is defined and stat_r.stat.isreg ++ msg: "data lost!" ++ ++ - name: Create a partition pool on the disk already containing a file system w/o safe_mode ++ include_role: ++ name: storage ++ vars: ++ storage_safe_mode: false ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ type: partition ++ ++ - name: Verify the output ++ assert: ++ that: not blivet_output.failed ++ msg: "failed to create partition pool over existing file system w/o safe_mode" ++ ++ - name: Clean up ++ include_role: ++ name: storage ++ vars: ++ storage_safe_mode: false ++ storage_pools: ++ - name: foo ++ type: partition ++ disks: "{{ unused_disks }}" ++ state: absent +diff --git a/tests/tests_lvm_errors.yml b/tests/tests_lvm_errors.yml +index ab23674..e8be153 100644 +--- a/tests/tests_lvm_errors.yml ++++ b/tests/tests_lvm_errors.yml +@@ -33,13 +33,32 @@ + size: "{{ volume1_size }}" + mount_point: "{{ mount_location1 }}" + +- - name: Verify the output ++ - name: UNREACH ++ fail: ++ msg: "this should be unreachable" ++ ++ rescue: ++ - name: Check that we failed in the role + assert: +- that: "{{ blivet_output.failed and +- blivet_output.msg|regex_search('unable to resolve.+disk')|length>0 and +- not blivet_output.changed }}" +- msg: "Unexpected behavior w/ non-existent pool disk" +- ignore_errors: yes ++ that: ++ - ansible_failed_task.name != 'UNREACH' ++ msg: "Role has not failed when it should have" ++ vars: ++ # Ugh! needed to expand ansible_failed_task ++ storage_provider: blivet ++ ++ # the following does not work properly ++ # - debug: ++ # msg: "{{ 'failed: ' + blivet_output.failed | string + ++ # 'msg: ' + blivet_output.msg + ++ # 'changed: ' + blivet_output.changed | string }}" ++ ++ # - name: Verify the output ++ # assert: ++ # that: "{{ blivet_output.failed and ++ # blivet_output.msg|regex_search('unable to resolve.+disk')|length>0 and ++ # not blivet_output.changed }}" ++ # msg: "Unexpected behavior w/ non-existent pool disk" + + - name: Test for correct handling of invalid size specification. + block: +@@ -55,13 +74,27 @@ + size: "{{ invalid_size }}" + mount_point: "{{ mount_location1 }}" + +- - name: Verify the output ++ - name: UNREACH ++ fail: ++ msg: "this should be unreachable" ++ ++ rescue: ++ - name: Check that we failed in the role + assert: +- that: "{{ blivet_output.failed and +- blivet_output.msg|regex_search('invalid size.+for volume') and +- not blivet_output.changed }}" +- msg: "Unexpected behavior w/ invalid volume size" +- ignore_errors: yes ++ that: ++ - ansible_failed_task.name != 'UNREACH' ++ msg: "Role has not failed when it should have" ++ vars: ++ # Ugh! needed to expand ansible_failed_task ++ storage_provider: blivet ++ ++ # the following does not work properly ++ # - name: Verify the output ++ # assert: ++ # that: "{{ blivet_output.failed and ++ # blivet_output.msg|regex_search('invalid size.+for volume') and ++ # not blivet_output.changed }}" ++ # msg: "Unexpected behavior w/ invalid volume size" + + - name: Test for correct handling of too-large volume size. + block: +@@ -77,13 +110,27 @@ + size: "{{ too_large_size }}" + mount_point: "{{ mount_location1 }}" + +- - name: Verify the output ++ - name: UNREACH ++ fail: ++ msg: "this should be unreachable" ++ ++ rescue: ++ - name: Check that we failed in the role + assert: +- that: "{{ blivet_output.failed and +- blivet_output.msg|regex_search('size.+exceeds.+space in pool') and +- not blivet_output.changed }}" +- msg: "Unexpected behavior w/ too-large volume size" +- ignore_errors: yes ++ that: ++ - ansible_failed_task.name != 'UNREACH' ++ msg: "Role has not failed when it should have" ++ vars: ++ # Ugh! needed to expand ansible_failed_task ++ storage_provider: blivet ++ ++ # the following does not work properly ++ # - name: Verify the output ++ # assert: ++ # that: "{{ blivet_output.failed and ++ # blivet_output.msg|regex_search('size.+exceeds.+space in pool') and ++ # not blivet_output.changed }}" ++ # msg: "Unexpected behavior w/ too-large volume size" + + - name: Test for correct handling of non-list disk specification. + block: +@@ -99,13 +146,27 @@ + size: "{{ too_large_size }}" + mount_point: "{{ mount_location1 }}" + +- - name: Verify the output ++ - name: UNREACH ++ fail: ++ msg: "this should be unreachable" ++ ++ rescue: ++ - name: Check that we failed in the role + assert: +- that: "{{ blivet_output.failed and +- blivet_output.msg|regex_search('disk.+list') and +- not blivet_output.changed }}" +- msg: "Unexpected behavior w/ disks not in list form" +- ignore_errors: yes ++ that: ++ - ansible_failed_task.name != 'UNREACH' ++ msg: "Role has not failed when it should have" ++ vars: ++ # Ugh! needed to expand ansible_failed_task ++ storage_provider: blivet ++ ++ # the following does not work properly ++ # - name: Verify the output ++ # assert: ++ # that: "{{ blivet_output.failed and ++ # blivet_output.msg|regex_search('disk.+list') and ++ # not blivet_output.changed }}" ++ # msg: "Unexpected behavior w/ disks not in list form" + + - name: Test for correct handling of missing disk specification. + block: +@@ -121,13 +182,27 @@ + size: "{{ too_large_size }}" + mount_point: "{{ mount_location1 }}" + +- - name: Verify the output ++ - name: UNREACH ++ fail: ++ msg: "this should be unreachable" ++ ++ rescue: ++ - name: Check that we failed in the role + assert: +- that: "{{ blivet_output.failed and +- blivet_output.msg|regex_search('no disks.+pool') and +- not blivet_output.changed }}" +- msg: "Unexpected behavior w/ no disks specified" +- ignore_errors: yes ++ that: ++ - ansible_failed_task.name != 'UNREACH' ++ msg: "Role has not failed when it should have" ++ vars: ++ # Ugh! needed to expand ansible_failed_task ++ storage_provider: blivet ++ ++ # the following does not work properly ++ # - name: Verify the output ++ # assert: ++ # that: "{{ blivet_output.failed and ++ # blivet_output.msg|regex_search('no disks.+pool') and ++ # not blivet_output.changed }}" ++ # msg: "Unexpected behavior w/ no disks specified" + + - name: Test for correct handling of LVM volume not defined within a pool. + block: +@@ -142,10 +217,179 @@ + size: "{{ volume1_size }}" + mount_point: "{{ mount_location1 }}" + ++ - name: UNREACH ++ fail: ++ msg: "this should be unreachable" ++ ++ rescue: ++ - name: Check that we failed in the role ++ assert: ++ that: ++ - ansible_failed_task.name != 'UNREACH' ++ msg: "Role has not failed when it should have" ++ vars: ++ # Ugh! needed to expand ansible_failed_task ++ storage_provider: blivet ++ ++ # the following does not work properly ++ # - name: Verify the output ++ # assert: ++ # that: "{{ blivet_output.failed and ++ # blivet_output.msg|regex_search('failed to find pool .+ for volume') and ++ # not blivet_output.changed }}" ++ # msg: "Unexpected behavior w/ LVM volume defined outside of any pool" ++ ++ - name: Create a pool ++ include_role: ++ name: storage ++ vars: ++ storage_pools: ++ - name: testpool1 ++ type: lvm ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: testvol1 ++ fs_type: 'ext4' ++ size: '1g' ++ ++ - name: Test for correct handling of safe_mode ++ block: ++ - name: Try to replace file system in safe mode ++ include_role: ++ name: storage ++ vars: ++ storage_pools: ++ - name: testpool1 ++ type: lvm ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: testvol1 ++ fs_type: 'ext3' ++ size: '1g' ++ ++ - name: UNREACH ++ fail: ++ msg: "this should be unreachable" ++ ++ rescue: ++ - name: Check that we failed in the role ++ assert: ++ that: ++ - ansible_failed_task.name != 'UNREACH' ++ msg: "Role has not failed when it should have" ++ vars: ++ # Ugh! needed to expand ansible_failed_task ++ storage_provider: blivet ++ + - name: Verify the output + assert: + that: "{{ blivet_output.failed and +- blivet_output.msg|regex_search('failed to find pool .+ for volume') and ++ blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed }}" +- msg: "Unexpected behavior w/ LVM volume defined outside of any pool" +- ignore_errors: yes ++ msg: "Unexpected behavior w/ existing data on specified disks" ++ ++ - name: Test for correct handling of safe_mode with resize ++ block: ++ - name: Try to resize in safe mode ++ include_role: ++ name: storage ++ vars: ++ storage_pools: ++ - name: testpool1 ++ type: lvm ++ disks: "{{ unused_disks }}" ++ volumes: ++ - name: testvol1 ++ fs_type: 'ext4' ++ size: '2g' ++ ++ - name: Verify the output ++ assert: ++ that: "{{ not blivet_output.failed and blivet_output.changed }}" ++ msg: "Unexpected behavior w/ existing data on specified disks" ++ ++ when: false ++ ++ - name: Test for correct handling of safe_mode with existing pool ++ block: ++ - name: Try to create LVM pool on disks that already belong to an existing pool ++ include_role: ++ name: storage ++ vars: ++ storage_pools: ++ - name: foo ++ disks: "{{ unused_disks }}" ++ type: lvm ++ ++ - name: UNREACH ++ fail: ++ msg: "this should be unreachable" ++ ++ rescue: ++ - name: Check that we failed in the role ++ assert: ++ that: ++ - ansible_failed_task.name != 'UNREACH' ++ msg: "Role has not failed when it should have" ++ vars: ++ # Ugh! needed to expand ansible_failed_task ++ storage_provider: blivet ++ ++ - name: Verify the output ++ assert: ++ that: "{{ blivet_output.failed and ++ blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and ++ not blivet_output.changed }}" ++ msg: "Unexpected behavior w/ existing data on specified disks" ++ ++ - name: Test for correct handling of safe_mode ++ block: ++ - name: Try to replace a pool by a file system on disk in safe mode ++ include_role: ++ name: storage ++ vars: ++ storage_volumes: ++ - name: test1 ++ type: disk ++ fs_type: 'ext3' ++ disks: ++ - "{{ unused_disks[0] }}" ++ ++ - name: UNREACH ++ fail: ++ msg: "this should be unreachable" ++ ++ rescue: ++ - name: Check that we failed in the role ++ assert: ++ that: ++ - ansible_failed_task.name != 'UNREACH' ++ msg: "Role has not failed when it should have" ++ vars: ++ # Ugh! needed to expand ansible_failed_task ++ storage_provider: blivet ++ ++ - name: Verify the output ++ assert: ++ that: "blivet_output.failed and ++ blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and ++ not blivet_output.changed" ++ msg: "Unexpected behavior w/ existing data on specified disks" ++ ++ - name: Verify the output ++ assert: ++ that: "blivet_output.failed and ++ blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and ++ not blivet_output.changed" ++ msg: "Unexpected behavior w/ existing data on specified disks" ++ ++ - name: Clean up ++ include_role: ++ name: storage ++ vars: ++ storage_safe_mode: false ++ storage_pools: ++ - name: testpool1 ++ type: lvm ++ disks: "{{ unused_disks }}" ++ state: absent +diff --git a/tests/tests_lvm_multiple_disks_multiple_volumes.yml b/tests/tests_lvm_multiple_disks_multiple_volumes.yml +index bbc7bb0..ca3968f 100644 +--- a/tests/tests_lvm_multiple_disks_multiple_volumes.yml ++++ b/tests/tests_lvm_multiple_disks_multiple_volumes.yml +@@ -15,13 +15,7 @@ + vars: + min_size: "{{ volume_group_size }}" + max_return: 2 +- +- - block: +- - debug: +- msg: "There needs to be two unused disks in the system to run this playbook." +- - name: End playbook if there isn't two disks available +- meta: end_play +- when: unused_disks|length < 2 ++ disks_needed: 2 + + - name: Create a logical volume spanning two physical volumes that changes its mount location + include_role: diff --git a/SPECS/rhel-system-roles.spec b/SPECS/rhel-system-roles.spec index 302d307..e50c1db 100644 --- a/SPECS/rhel-system-roles.spec +++ b/SPECS/rhel-system-roles.spec @@ -5,7 +5,7 @@ Name: linux-system-roles %endif Summary: Set of interfaces for unified system management Version: 1.0 -Release: 7%{?dist} +Release: 10%{?dist} #Group: Development/Libraries License: GPLv3+ and MIT and BSD @@ -46,11 +46,16 @@ License: GPLv3+ and MIT and BSD %global rolename5 network #%%deftag 5 1.0.0 +%defcommit 6 1594e2527c4eca0fa2876d4cc3ff6395ed280b8d +%global rolename6 storage +#%%deftag 6 1.0.2 + Source: https://github.com/linux-system-roles/%{rolename0}/archive/%{id0}.tar.gz#/%{rolename0}-%{shortid0}.tar.gz Source1: https://github.com/linux-system-roles/%{rolename1}/archive/%{id1}.tar.gz#/%{rolename1}-%{shortid1}.tar.gz Source2: https://github.com/linux-system-roles/%{rolename2}/archive/%{id2}.tar.gz#/%{rolename2}-%{shortid2}.tar.gz Source3: https://github.com/linux-system-roles/%{rolename3}/archive/%{id3}.tar.gz#/%{rolename3}-%{shortid3}.tar.gz Source5: https://github.com/linux-system-roles/%{rolename5}/archive/%{id5}.tar.gz#/%{rolename5}-%{shortid5}.tar.gz +Source6: https://github.com/linux-system-roles/%{rolename6}/archive/%{id6}.tar.gz#/%{rolename6}-%{shortid6}.tar.gz Source8: md2html.sh @@ -59,6 +64,7 @@ Patch1: rhel-system-roles-%{rolename1}-prefix.diff Patch2: rhel-system-roles-%{rolename2}-prefix.diff Patch3: rhel-system-roles-%{rolename3}-prefix.diff Patch5: rhel-system-roles-%{rolename5}-prefix.diff +Patch6: rhel-system-roles-%{rolename6}-prefix.diff %endif Patch11: rhel-system-roles-postfix-pr5.diff @@ -74,6 +80,8 @@ Patch52: network-permissions.diff Patch53: network-tier1-tags.diff Patch54: rhel-system-roles-network-pr121.diff +Patch61: storage-safemode.diff + Url: https://github.com/linux-system-roles/ BuildArch: noarch @@ -100,7 +108,7 @@ of Fedora, Red Hat Enterprise Linux & CentOS. %endif %prep -%setup -qc -a1 -a2 -a3 -a5 +%setup -qc -a1 -a2 -a3 -a5 -a6 cd %{rolename0}-%{id0} %patch101 -p1 %patch102 -p1 @@ -131,6 +139,12 @@ cd %{rolename5}-%{id5} %patch53 -p1 %patch54 -p1 cd .. +cd %{rolename6}-%{id6} +%if "%{roleprefix}" != "linux-system-roles." +%patch6 -p1 +%endif +%patch61 -p1 +cd .. %build sh %{SOURCE8} \ @@ -138,7 +152,8 @@ sh %{SOURCE8} \ %{rolename1}-%{id1}/README.md \ %{rolename2}-%{id2}/README.md \ %{rolename3}-%{id3}/README.md \ -%{rolename5}-%{id5}/README.md +%{rolename5}-%{id5}/README.md \ +%{rolename6}-%{id6}/README.md %install mkdir -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles @@ -148,6 +163,7 @@ cp -pR %{rolename1}-%{id1} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolep cp -pR %{rolename2}-%{id2} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename2} cp -pR %{rolename3}-%{id3} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename3} cp -pR %{rolename5}-%{id5} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename5} +cp -pR %{rolename6}-%{id6} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}%{rolename6} %if 0%{?rolealtprefix:1} ln -s %{roleprefix}%{rolename0} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename0} @@ -155,6 +171,7 @@ ln -s %{roleprefix}%{rolename1} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{ ln -s %{roleprefix}%{rolename2} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename2} ln -s %{roleprefix}%{rolename3} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename3} ln -s %{roleprefix}%{rolename5} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename5} +ln -s %{roleprefix}%{rolename6} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename6} %endif mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/kdump @@ -162,6 +179,7 @@ mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/postfix mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/selinux mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/timesync mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/network +mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/storage cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}kdump/README.md \ $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}kdump/README.html \ @@ -217,7 +235,12 @@ mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/ethtoo $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-ethtool-features-playbook.yml mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/ethtool-features-default.yml \ $RPM_BUILD_ROOT%{_pkgdocdir}/network/example-ethtool-features-default-playbook.yml - + +cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}storage/README.md \ + $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}storage/README.html \ + $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}storage/LICENSE \ + $RPM_BUILD_ROOT%{_pkgdocdir}/storage + rm $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}*/semaphore rm -r $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}*/molecule @@ -238,12 +261,14 @@ rmdir $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples %{_datadir}/ansible/roles/%{rolealtprefix}selinux %{_datadir}/ansible/roles/%{rolealtprefix}timesync %{_datadir}/ansible/roles/%{rolealtprefix}network +%{_datadir}/ansible/roles/%{rolealtprefix}storage %endif %{_datadir}/ansible/roles/%{roleprefix}kdump %{_datadir}/ansible/roles/%{roleprefix}postfix %{_datadir}/ansible/roles/%{roleprefix}selinux %{_datadir}/ansible/roles/%{roleprefix}timesync %{_datadir}/ansible/roles/%{roleprefix}network +%{_datadir}/ansible/roles/%{roleprefix}storage %doc %{_pkgdocdir}/*/example-*-playbook.yml %doc %{_pkgdocdir}/network/example-inventory %doc %{_pkgdocdir}/*/README.md @@ -253,11 +278,13 @@ rmdir $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples %doc %{_datadir}/ansible/roles/%{roleprefix}selinux/README.md %doc %{_datadir}/ansible/roles/%{roleprefix}timesync/README.md %doc %{_datadir}/ansible/roles/%{roleprefix}network/README.md +%doc %{_datadir}/ansible/roles/%{roleprefix}storage/README.md %doc %{_datadir}/ansible/roles/%{roleprefix}kdump/README.html %doc %{_datadir}/ansible/roles/%{roleprefix}postfix/README.html %doc %{_datadir}/ansible/roles/%{roleprefix}selinux/README.html %doc %{_datadir}/ansible/roles/%{roleprefix}timesync/README.html %doc %{_datadir}/ansible/roles/%{roleprefix}network/README.html +%doc %{_datadir}/ansible/roles/%{roleprefix}storage/README.html %license %{_pkgdocdir}/*/COPYING @@ -267,8 +294,16 @@ rmdir $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples %license %{_datadir}/ansible/roles/%{roleprefix}selinux/COPYING %license %{_datadir}/ansible/roles/%{roleprefix}timesync/COPYING %license %{_datadir}/ansible/roles/%{roleprefix}network/LICENSE +%license %{_datadir}/ansible/roles/%{roleprefix}storage/LICENSE %changelog +* Mon Oct 21 2019 Pavel Cahyna - 1.0-10 +- Add the storage_safe_mode option, true by default, to prevent accidental + data removal: rhbz#1763242, issue #42, PR #43 and #51. + +* Thu Aug 15 2019 Pavel Cahyna - 1.0-9 +- Add the storage role + * Thu Jun 13 2019 Pavel Cahyna - 1.0-7 - Update tests for the network role - Fix typo in a test for the timesync role