diff --git a/README.md b/README.md index c2debc9..f808adc 100644 --- a/README.md +++ b/README.md @@ -73,6 +73,9 @@ The `mount_point` specifies the directory on which the file system will be mount ##### `mount_options` The `mount_options` specifies custom mount options as a string, e.g.: 'ro'. +#### `storage_safe_mode` +When true (the default), an error will occur instead of automatically removing existing devices and/or formatting. + Example Playbook ---------------- diff --git a/defaults/main.yml b/defaults/main.yml index 7b500e5..476616b 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -3,6 +3,7 @@ storage_provider: "blivet" storage_use_partitions: null storage_disklabel_type: null # leave unset to allow the role to select an appropriate label type +storage_safe_mode: true # fail instead of implicitly/automatically removing devices or formatting storage_pool_defaults: state: "present" diff --git a/library/blivet.py b/library/blivet.py index d416944..1d8cd36 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -31,6 +31,9 @@ options: disklabel_type: description: - disklabel type string (eg: 'gpt') to use, overriding the built-in logic in blivet + safe_mode: + description: + - boolean indicating that we should fail rather than implicitly/automatically removing devices or formatting author: - David Lehman (dlehman@redhat.com) @@ -112,13 +115,15 @@ if BLIVET_PACKAGE: use_partitions = None # create partitions on pool backing device disks? disklabel_type = None # user-specified disklabel type +safe_mode = None # do not remove any existing devices or formatting +packages_only = None # only set things up enough to get a list of required packages class BlivetAnsibleError(Exception): pass -class BlivetVolume: +class BlivetVolume(object): def __init__(self, blivet_obj, volume, bpool=None): self._blivet = blivet_obj self._volume = volume @@ -206,11 +211,16 @@ class BlivetVolume: def _reformat(self): """ Schedule actions as needed to ensure the volume is formatted as specified. """ + global packages_only + fmt = self._get_format() if self._device.format.type == fmt.type: return - if self._device.format.status: + if safe_mode and (self._device.format.type is not None or self._device.format.name != get_format(None).name): + raise BlivetAnsibleError("cannot remove existing formatting on volume '%s' in safe mode" % self._volume['name']) + + if self._device.format.status and not packages_only: self._device.format.teardown() self._blivet.format_device(self._device, fmt) @@ -251,6 +261,19 @@ class BlivetDiskVolume(BlivetVolume): def _type_check(self): return self._device.is_disk + def _look_up_device(self): + super(BlivetDiskVolume, self)._look_up_device() + if not self._get_device_id(): + # FAIL: no disks specified for volume + raise BlivetAnsibleError("no disks specified for volume '%s'" % self._volume['name']) # sure about this one? + elif not isinstance(self._volume['disks'], list): + raise BlivetAnsibleError("volume disks must be specified as a list") + + if self._device is None: + # FAIL: failed to find the disk + raise BlivetAnsibleError("unable to resolve disk specified for volume '%s' (%s)" % (self._volume['name'], self._volume['disks'])) + + class BlivetPartitionVolume(BlivetVolume): def _type_check(self): @@ -342,7 +365,7 @@ def _get_blivet_volume(blivet_obj, volume, bpool=None): return _BLIVET_VOLUME_TYPES[volume_type](blivet_obj, volume, bpool=bpool) -class BlivetPool: +class BlivetPool(object): def __init__(self, blivet_obj, pool): self._blivet = blivet_obj self._pool = pool @@ -424,8 +447,11 @@ class BlivetPool: """ Schedule actions as needed to ensure pool member devices exist. """ members = list() for disk in self._disks: - if not disk.isleaf: - self._blivet.devicetree.recursive_remove(disk) + if not disk.isleaf or disk.format.type is not None: + if not safe_mode: + self._blivet.devicetree.recursive_remove(disk) + else: + raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (disk.name, self._pool['name'])) if use_partitions: label = get_format("disklabel", device=disk.path) @@ -486,7 +512,10 @@ class BlivetPartitionPool(BlivetPool): def _create(self): if self._device.format.type != "disklabel" or \ self._device.format.label_type != disklabel_type: - self._blivet.devicetree.recursive_remove(self._device, remove_device=False) + if not safe_mode: + self._blivet.devicetree.recursive_remove(self._device, remove_device=False) + else: + raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (self._device.name, self._pool['name'])) label = get_format("disklabel", device=self._device.path, label_type=disklabel_type) self._blivet.format_device(self._device, label) @@ -520,7 +549,7 @@ class BlivetLVMPool(BlivetPool): _BLIVET_POOL_TYPES = { - "disk": BlivetPartitionPool, + "partition": BlivetPartitionPool, "lvm": BlivetLVMPool } @@ -550,7 +579,7 @@ def manage_pool(b, pool): volume['_mount_id'] = bvolume._volume.get('_mount_id', '') -class FSTab: +class FSTab(object): def __init__(self, blivet_obj): self._blivet = blivet_obj self._entries = list() @@ -656,6 +685,7 @@ def run_module(): volumes=dict(type='list'), packages_only=dict(type='bool', required=False, default=False), disklabel_type=dict(type='str', required=False, default=None), + safe_mode=dict(type='bool', required=False, default=False), use_partitions=dict(type='bool', required=False, default=True)) # seed the result dict in the object @@ -684,6 +714,12 @@ def run_module(): global use_partitions use_partitions = module.params['use_partitions'] + global safe_mode + safe_mode = module.params['safe_mode'] + + global packages_only + packages_only = module.params['packages_only'] + b = Blivet() b.reset() fstab = FSTab(b) diff --git a/tasks/main-blivet.yml b/tasks/main-blivet.yml index 061195c..65b8580 100644 --- a/tasks/main-blivet.yml +++ b/tasks/main-blivet.yml @@ -38,7 +38,7 @@ _storage_vols_no_defaults: "{{ _storage_vols_no_defaults|default([]) }} + [{{ item.1 }}]" _storage_vol_defaults: "{{ _storage_vol_defaults|default([]) }} + [{{ storage_volume_defaults }}]" _storage_vol_pools: "{{ _storage_vol_pools|default([]) }} + ['{{ item.0.name }}']" - loop: "{{ _storage_pools|subelements('volumes') }}" + loop: "{{ _storage_pools|subelements('volumes', skip_missing=true) }}" when: storage_pools is defined - name: Apply defaults to pools and volumes [3/6] @@ -85,6 +85,15 @@ - debug: var: _storage_volumes +- name: load mount facts + setup: + gather_subset: '!all,!min,mounts' + register: __storage_mounts_before_packages + +# - name: show mounts before get required packages +# debug: +# var: __storage_mounts_before_packages + - name: get required packages blivet: pools: "{{ _storage_pools }}" @@ -94,6 +103,30 @@ packages_only: true register: package_info +- name: load mount facts + setup: + gather_subset: '!all,!min,mounts' + register: __storage_mounts_after_packages + +- name: detect mount alteration by 'get required packages' + block: + - name: show mounts before manage the pools and volumes + debug: + var: __storage_mounts_before_packages.ansible_facts.ansible_mounts + + - name: show mounts after manage the pools and volumes + debug: + var: __storage_mounts_after_packages.ansible_facts.ansible_mounts + + - name: fail if mounts changed + fail: + msg: "get required packages changed mounts. Changed status is + {{ package_info.changed }}" + when: + - __storage_mounts_before_packages.ansible_facts.ansible_mounts | + count != + __storage_mounts_after_packages.ansible_facts.ansible_mounts | count + - name: make sure required packages are installed package: name: "{{ package_info.packages }}" @@ -105,6 +138,7 @@ volumes: "{{ _storage_volumes }}" use_partitions: "{{ storage_use_partitions }}" disklabel_type: "{{ storage_disklabel_type }}" + safe_mode: "{{ storage_safe_mode }}" register: blivet_output - debug: diff --git a/tests/get_unused_disk.yml b/tests/get_unused_disk.yml index 9f4c5d2..79e952a 100644 --- a/tests/get_unused_disk.yml +++ b/tests/get_unused_disk.yml @@ -9,12 +9,10 @@ unused_disks: "{{ unused_disks_return.disks }}" when: "'Unable to find unused disk' not in unused_disks_return.disks" -- block: - - name: Exit playbook when there's no unused disks in the system - debug: - msg: "Unable to find unused disks. Exiting playbook." - - meta: end_play - when: unused_disks is undefined +- name: Exit playbook when there's not enough unused disks in the system + fail: + msg: "Unable to find enough unused disks. Exiting playbook." + when: unused_disks is undefined or unused_disks|length < disks_needed|default(1) - name: Print unused disks debug: diff --git a/tests/tests_change_disk_fs.yml b/tests/tests_change_disk_fs.yml index b6aa80b..f7962c6 100644 --- a/tests/tests_change_disk_fs.yml +++ b/tests/tests_change_disk_fs.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test' volume_size: '5g' fs_type_after: "{{ 'ext3' if (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') else 'ext4' }}" diff --git a/tests/tests_change_fs.yml b/tests/tests_change_fs.yml index cca23eb..b88e768 100644 --- a/tests/tests_change_fs.yml +++ b/tests/tests_change_fs.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test1' volume_size: '5g' fs_after: "{{ (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') | ternary('ext4', 'xfs') }}" diff --git a/tests/tests_change_fs_use_partitions.yml b/tests/tests_change_fs_use_partitions.yml index e4aa76c..eb93c11 100644 --- a/tests/tests_change_fs_use_partitions.yml +++ b/tests/tests_change_fs_use_partitions.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false storage_use_partitions: true mount_location: '/opt/test1' volume_size: '5g' diff --git a/tests/tests_create_disk_then_remove.yml b/tests/tests_create_disk_then_remove.yml index b19ae35..c5290eb 100644 --- a/tests/tests_create_disk_then_remove.yml +++ b/tests/tests_create_disk_then_remove.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test1' tasks: diff --git a/tests/tests_create_lvm_pool_then_remove.yml b/tests/tests_create_lvm_pool_then_remove.yml index 6b25939..f2c06fb 100644 --- a/tests/tests_create_lvm_pool_then_remove.yml +++ b/tests/tests_create_lvm_pool_then_remove.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location1: '/opt/test1' mount_location2: '/opt/test2' volume_group_size: '10g' diff --git a/tests/tests_create_partition_volume_then_remove.yml b/tests/tests_create_partition_volume_then_remove.yml index 40b3e62..ae589d3 100644 --- a/tests/tests_create_partition_volume_then_remove.yml +++ b/tests/tests_create_partition_volume_then_remove.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test1' tasks: @@ -18,7 +19,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" volumes: - name: test1 @@ -33,7 +34,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" volumes: - name: test1 @@ -48,7 +49,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" state: absent volumes: @@ -65,7 +66,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" state: absent volumes: diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 36eec41..7112f6e 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -3,8 +3,17 @@ become: true vars: mount_location: '/opt/test1' + testfile: "{{ mount_location }}/quux" tasks: + - include_role: + name: storage + + - include_tasks: get_unused_disk.yml + vars: + min_size: "10g" + max_return: 1 + - name: Verify that the play fails with the expected error message block: - name: Create a disk volume mounted at "{{ mount_location }}" @@ -14,11 +23,246 @@ storage_volumes: - name: test1 type: disk - disks: "['/dev/surelyidonotexist']" + disks: ['/dev/surelyidonotexist'] mount_point: "{{ mount_location }}" - - name: Check the error output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly, + # blivet_output.failed is false. + # - name: Show the error output + # debug: + # msg: "{{ blivet_output.failed }}" + + # - name: Check the error output + # assert: + # that: blivet_output.failed | bool + # msg: "Expected error message not found for missing disk" + + - name: Create a file system on disk + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext4' + disks: "{{ unused_disks }}" + mount_point: "{{ mount_location }}" + + - name: create a file + file: + path: "{{ testfile }}" + state: touch + + - name: Test for correct handling of safe_mode + block: + - name: Try to replace the file system on disk in safe mode + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext3' + disks: "{{ unused_disks }}" + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Unmount file system + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext4' + disks: "{{ unused_disks }}" + mount_point: none + + - name: Test for correct handling of safe_mode with unmounted filesystem + block: + - name: Try to replace the file system on disk in safe mode + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext3' + disks: "{{ unused_disks }}" + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed }}" - msg: "Expected error message not found for missing disk" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Remount file system + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext4' + disks: "{{ unused_disks }}" + mount_point: "{{ mount_location }}" + + - name: stat the file + stat: + path: "{{ testfile }}" + register: stat_r + + - name: assert file presence + assert: + that: + stat_r.stat.isreg is defined and stat_r.stat.isreg + msg: "data lost!" + + - name: Test for correct handling of safe_mode + block: + - name: Try to create a partition pool on the disk already containing a file system in safe_mode + include_role: + name: storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: partition + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Test for correct handling of safe_mode with existing filesystem + block: + - name: Try to create LVM pool on disk that already belongs to an existing filesystem + include_role: + name: storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: lvm + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: stat the file + stat: + path: "{{ testfile }}" + register: stat_r + + - name: assert file presence + assert: + that: + stat_r.stat.isreg is defined and stat_r.stat.isreg + msg: "data lost!" + + - name: Create a partition pool on the disk already containing a file system w/o safe_mode + include_role: + name: storage + vars: + storage_safe_mode: false + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: partition + + - name: Verify the output + assert: + that: not blivet_output.failed + msg: "failed to create partition pool over existing file system w/o safe_mode" + + - name: Clean up + include_role: + name: storage + vars: + storage_safe_mode: false + storage_pools: + - name: foo + type: partition + disks: "{{ unused_disks }}" + state: absent diff --git a/tests/tests_lvm_errors.yml b/tests/tests_lvm_errors.yml index ab23674..e8be153 100644 --- a/tests/tests_lvm_errors.yml +++ b/tests/tests_lvm_errors.yml @@ -33,13 +33,32 @@ size: "{{ volume1_size }}" mount_point: "{{ mount_location1 }}" - - name: Verify the output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('unable to resolve.+disk')|length>0 and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ non-existent pool disk" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly + # - debug: + # msg: "{{ 'failed: ' + blivet_output.failed | string + + # 'msg: ' + blivet_output.msg + + # 'changed: ' + blivet_output.changed | string }}" + + # - name: Verify the output + # assert: + # that: "{{ blivet_output.failed and + # blivet_output.msg|regex_search('unable to resolve.+disk')|length>0 and + # not blivet_output.changed }}" + # msg: "Unexpected behavior w/ non-existent pool disk" - name: Test for correct handling of invalid size specification. block: @@ -55,13 +74,27 @@ size: "{{ invalid_size }}" mount_point: "{{ mount_location1 }}" - - name: Verify the output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('invalid size.+for volume') and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ invalid volume size" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly + # - name: Verify the output + # assert: + # that: "{{ blivet_output.failed and + # blivet_output.msg|regex_search('invalid size.+for volume') and + # not blivet_output.changed }}" + # msg: "Unexpected behavior w/ invalid volume size" - name: Test for correct handling of too-large volume size. block: @@ -77,13 +110,27 @@ size: "{{ too_large_size }}" mount_point: "{{ mount_location1 }}" - - name: Verify the output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('size.+exceeds.+space in pool') and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ too-large volume size" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly + # - name: Verify the output + # assert: + # that: "{{ blivet_output.failed and + # blivet_output.msg|regex_search('size.+exceeds.+space in pool') and + # not blivet_output.changed }}" + # msg: "Unexpected behavior w/ too-large volume size" - name: Test for correct handling of non-list disk specification. block: @@ -99,13 +146,27 @@ size: "{{ too_large_size }}" mount_point: "{{ mount_location1 }}" - - name: Verify the output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('disk.+list') and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ disks not in list form" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly + # - name: Verify the output + # assert: + # that: "{{ blivet_output.failed and + # blivet_output.msg|regex_search('disk.+list') and + # not blivet_output.changed }}" + # msg: "Unexpected behavior w/ disks not in list form" - name: Test for correct handling of missing disk specification. block: @@ -121,13 +182,27 @@ size: "{{ too_large_size }}" mount_point: "{{ mount_location1 }}" - - name: Verify the output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('no disks.+pool') and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ no disks specified" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly + # - name: Verify the output + # assert: + # that: "{{ blivet_output.failed and + # blivet_output.msg|regex_search('no disks.+pool') and + # not blivet_output.changed }}" + # msg: "Unexpected behavior w/ no disks specified" - name: Test for correct handling of LVM volume not defined within a pool. block: @@ -142,10 +217,179 @@ size: "{{ volume1_size }}" mount_point: "{{ mount_location1 }}" + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly + # - name: Verify the output + # assert: + # that: "{{ blivet_output.failed and + # blivet_output.msg|regex_search('failed to find pool .+ for volume') and + # not blivet_output.changed }}" + # msg: "Unexpected behavior w/ LVM volume defined outside of any pool" + + - name: Create a pool + include_role: + name: storage + vars: + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: testvol1 + fs_type: 'ext4' + size: '1g' + + - name: Test for correct handling of safe_mode + block: + - name: Try to replace file system in safe mode + include_role: + name: storage + vars: + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: testvol1 + fs_type: 'ext3' + size: '1g' + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + - name: Verify the output assert: that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('failed to find pool .+ for volume') and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and not blivet_output.changed }}" - msg: "Unexpected behavior w/ LVM volume defined outside of any pool" - ignore_errors: yes + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Test for correct handling of safe_mode with resize + block: + - name: Try to resize in safe mode + include_role: + name: storage + vars: + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: testvol1 + fs_type: 'ext4' + size: '2g' + + - name: Verify the output + assert: + that: "{{ not blivet_output.failed and blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + when: false + + - name: Test for correct handling of safe_mode with existing pool + block: + - name: Try to create LVM pool on disks that already belong to an existing pool + include_role: + name: storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: lvm + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Test for correct handling of safe_mode + block: + - name: Try to replace a pool by a file system on disk in safe mode + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext3' + disks: + - "{{ unused_disks[0] }}" + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Verify the output + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Clean up + include_role: + name: storage + vars: + storage_safe_mode: false + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + state: absent diff --git a/tests/tests_lvm_multiple_disks_multiple_volumes.yml b/tests/tests_lvm_multiple_disks_multiple_volumes.yml index bbc7bb0..ca3968f 100644 --- a/tests/tests_lvm_multiple_disks_multiple_volumes.yml +++ b/tests/tests_lvm_multiple_disks_multiple_volumes.yml @@ -15,13 +15,7 @@ vars: min_size: "{{ volume_group_size }}" max_return: 2 - - - block: - - debug: - msg: "There needs to be two unused disks in the system to run this playbook." - - name: End playbook if there isn't two disks available - meta: end_play - when: unused_disks|length < 2 + disks_needed: 2 - name: Create a logical volume spanning two physical volumes that changes its mount location include_role: