|
|
8971e9 |
diff --git a/README.md b/README.md
|
|
|
8971e9 |
index c2debc9..f808adc 100644
|
|
|
8971e9 |
--- a/README.md
|
|
|
8971e9 |
+++ b/README.md
|
|
|
8971e9 |
@@ -73,6 +73,9 @@ The `mount_point` specifies the directory on which the file system will be mount
|
|
|
8971e9 |
##### `mount_options`
|
|
|
8971e9 |
The `mount_options` specifies custom mount options as a string, e.g.: 'ro'.
|
|
|
8971e9 |
|
|
|
8971e9 |
+#### `storage_safe_mode`
|
|
|
8971e9 |
+When true (the default), an error will occur instead of automatically removing existing devices and/or formatting.
|
|
|
8971e9 |
+
|
|
|
8971e9 |
|
|
|
8971e9 |
Example Playbook
|
|
|
8971e9 |
----------------
|
|
|
8971e9 |
diff --git a/defaults/main.yml b/defaults/main.yml
|
|
|
8971e9 |
index 7b500e5..476616b 100644
|
|
|
8971e9 |
--- a/defaults/main.yml
|
|
|
8971e9 |
+++ b/defaults/main.yml
|
|
|
8971e9 |
@@ -3,6 +3,7 @@
|
|
|
8971e9 |
storage_provider: "blivet"
|
|
|
8971e9 |
storage_use_partitions: null
|
|
|
8971e9 |
storage_disklabel_type: null # leave unset to allow the role to select an appropriate label type
|
|
|
8971e9 |
+storage_safe_mode: true # fail instead of implicitly/automatically removing devices or formatting
|
|
|
8971e9 |
|
|
|
8971e9 |
storage_pool_defaults:
|
|
|
8971e9 |
state: "present"
|
|
|
8971e9 |
diff --git a/library/blivet.py b/library/blivet.py
|
|
|
8971e9 |
index d416944..1d8cd36 100644
|
|
|
8971e9 |
--- a/library/blivet.py
|
|
|
8971e9 |
+++ b/library/blivet.py
|
|
|
8971e9 |
@@ -31,6 +31,9 @@ options:
|
|
|
8971e9 |
disklabel_type:
|
|
|
8971e9 |
description:
|
|
|
8971e9 |
- disklabel type string (eg: 'gpt') to use, overriding the built-in logic in blivet
|
|
|
8971e9 |
+ safe_mode:
|
|
|
8971e9 |
+ description:
|
|
|
8971e9 |
+ - boolean indicating that we should fail rather than implicitly/automatically removing devices or formatting
|
|
|
8971e9 |
|
|
|
8971e9 |
author:
|
|
|
8971e9 |
- David Lehman (dlehman@redhat.com)
|
|
|
8971e9 |
@@ -112,13 +115,15 @@ if BLIVET_PACKAGE:
|
|
|
8971e9 |
|
|
|
8971e9 |
use_partitions = None # create partitions on pool backing device disks?
|
|
|
8971e9 |
disklabel_type = None # user-specified disklabel type
|
|
|
8971e9 |
+safe_mode = None # do not remove any existing devices or formatting
|
|
|
8971e9 |
+packages_only = None # only set things up enough to get a list of required packages
|
|
|
8971e9 |
|
|
|
8971e9 |
|
|
|
8971e9 |
class BlivetAnsibleError(Exception):
|
|
|
8971e9 |
pass
|
|
|
8971e9 |
|
|
|
8971e9 |
|
|
|
8971e9 |
-class BlivetVolume:
|
|
|
8971e9 |
+class BlivetVolume(object):
|
|
|
8971e9 |
def __init__(self, blivet_obj, volume, bpool=None):
|
|
|
8971e9 |
self._blivet = blivet_obj
|
|
|
8971e9 |
self._volume = volume
|
|
|
8971e9 |
@@ -206,11 +211,16 @@ class BlivetVolume:
|
|
|
8971e9 |
|
|
|
8971e9 |
def _reformat(self):
|
|
|
8971e9 |
""" Schedule actions as needed to ensure the volume is formatted as specified. """
|
|
|
8971e9 |
+ global packages_only
|
|
|
8971e9 |
+
|
|
|
8971e9 |
fmt = self._get_format()
|
|
|
8971e9 |
if self._device.format.type == fmt.type:
|
|
|
8971e9 |
return
|
|
|
8971e9 |
|
|
|
8971e9 |
- if self._device.format.status:
|
|
|
8971e9 |
+ if safe_mode and (self._device.format.type is not None or self._device.format.name != get_format(None).name):
|
|
|
8971e9 |
+ raise BlivetAnsibleError("cannot remove existing formatting on volume '%s' in safe mode" % self._volume['name'])
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ if self._device.format.status and not packages_only:
|
|
|
8971e9 |
self._device.format.teardown()
|
|
|
8971e9 |
self._blivet.format_device(self._device, fmt)
|
|
|
8971e9 |
|
|
|
8971e9 |
@@ -251,6 +261,19 @@ class BlivetDiskVolume(BlivetVolume):
|
|
|
8971e9 |
def _type_check(self):
|
|
|
8971e9 |
return self._device.is_disk
|
|
|
8971e9 |
|
|
|
8971e9 |
+ def _look_up_device(self):
|
|
|
8971e9 |
+ super(BlivetDiskVolume, self)._look_up_device()
|
|
|
8971e9 |
+ if not self._get_device_id():
|
|
|
8971e9 |
+ # FAIL: no disks specified for volume
|
|
|
8971e9 |
+ raise BlivetAnsibleError("no disks specified for volume '%s'" % self._volume['name']) # sure about this one?
|
|
|
8971e9 |
+ elif not isinstance(self._volume['disks'], list):
|
|
|
8971e9 |
+ raise BlivetAnsibleError("volume disks must be specified as a list")
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ if self._device is None:
|
|
|
8971e9 |
+ # FAIL: failed to find the disk
|
|
|
8971e9 |
+ raise BlivetAnsibleError("unable to resolve disk specified for volume '%s' (%s)" % (self._volume['name'], self._volume['disks']))
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+
|
|
|
8971e9 |
|
|
|
8971e9 |
class BlivetPartitionVolume(BlivetVolume):
|
|
|
8971e9 |
def _type_check(self):
|
|
|
8971e9 |
@@ -342,7 +365,7 @@ def _get_blivet_volume(blivet_obj, volume, bpool=None):
|
|
|
8971e9 |
return _BLIVET_VOLUME_TYPES[volume_type](blivet_obj, volume, bpool=bpool)
|
|
|
8971e9 |
|
|
|
8971e9 |
|
|
|
8971e9 |
-class BlivetPool:
|
|
|
8971e9 |
+class BlivetPool(object):
|
|
|
8971e9 |
def __init__(self, blivet_obj, pool):
|
|
|
8971e9 |
self._blivet = blivet_obj
|
|
|
8971e9 |
self._pool = pool
|
|
|
8971e9 |
@@ -424,8 +447,11 @@ class BlivetPool:
|
|
|
8971e9 |
""" Schedule actions as needed to ensure pool member devices exist. """
|
|
|
8971e9 |
members = list()
|
|
|
8971e9 |
for disk in self._disks:
|
|
|
8971e9 |
- if not disk.isleaf:
|
|
|
8971e9 |
- self._blivet.devicetree.recursive_remove(disk)
|
|
|
8971e9 |
+ if not disk.isleaf or disk.format.type is not None:
|
|
|
8971e9 |
+ if not safe_mode:
|
|
|
8971e9 |
+ self._blivet.devicetree.recursive_remove(disk)
|
|
|
8971e9 |
+ else:
|
|
|
8971e9 |
+ raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (disk.name, self._pool['name']))
|
|
|
8971e9 |
|
|
|
8971e9 |
if use_partitions:
|
|
|
8971e9 |
label = get_format("disklabel", device=disk.path)
|
|
|
8971e9 |
@@ -486,7 +512,10 @@ class BlivetPartitionPool(BlivetPool):
|
|
|
8971e9 |
def _create(self):
|
|
|
8971e9 |
if self._device.format.type != "disklabel" or \
|
|
|
8971e9 |
self._device.format.label_type != disklabel_type:
|
|
|
8971e9 |
- self._blivet.devicetree.recursive_remove(self._device, remove_device=False)
|
|
|
8971e9 |
+ if not safe_mode:
|
|
|
8971e9 |
+ self._blivet.devicetree.recursive_remove(self._device, remove_device=False)
|
|
|
8971e9 |
+ else:
|
|
|
8971e9 |
+ raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (self._device.name, self._pool['name']))
|
|
|
8971e9 |
|
|
|
8971e9 |
label = get_format("disklabel", device=self._device.path, label_type=disklabel_type)
|
|
|
8971e9 |
self._blivet.format_device(self._device, label)
|
|
|
8971e9 |
@@ -520,7 +549,7 @@ class BlivetLVMPool(BlivetPool):
|
|
|
8971e9 |
|
|
|
8971e9 |
|
|
|
8971e9 |
_BLIVET_POOL_TYPES = {
|
|
|
8971e9 |
- "disk": BlivetPartitionPool,
|
|
|
8971e9 |
+ "partition": BlivetPartitionPool,
|
|
|
8971e9 |
"lvm": BlivetLVMPool
|
|
|
8971e9 |
}
|
|
|
8971e9 |
|
|
|
8971e9 |
@@ -550,7 +579,7 @@ def manage_pool(b, pool):
|
|
|
8971e9 |
volume['_mount_id'] = bvolume._volume.get('_mount_id', '')
|
|
|
8971e9 |
|
|
|
8971e9 |
|
|
|
8971e9 |
-class FSTab:
|
|
|
8971e9 |
+class FSTab(object):
|
|
|
8971e9 |
def __init__(self, blivet_obj):
|
|
|
8971e9 |
self._blivet = blivet_obj
|
|
|
8971e9 |
self._entries = list()
|
|
|
8971e9 |
@@ -656,6 +685,7 @@ def run_module():
|
|
|
8971e9 |
volumes=dict(type='list'),
|
|
|
8971e9 |
packages_only=dict(type='bool', required=False, default=False),
|
|
|
8971e9 |
disklabel_type=dict(type='str', required=False, default=None),
|
|
|
8971e9 |
+ safe_mode=dict(type='bool', required=False, default=False),
|
|
|
8971e9 |
use_partitions=dict(type='bool', required=False, default=True))
|
|
|
8971e9 |
|
|
|
8971e9 |
# seed the result dict in the object
|
|
|
8971e9 |
@@ -684,6 +714,12 @@ def run_module():
|
|
|
8971e9 |
global use_partitions
|
|
|
8971e9 |
use_partitions = module.params['use_partitions']
|
|
|
8971e9 |
|
|
|
8971e9 |
+ global safe_mode
|
|
|
8971e9 |
+ safe_mode = module.params['safe_mode']
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ global packages_only
|
|
|
8971e9 |
+ packages_only = module.params['packages_only']
|
|
|
8971e9 |
+
|
|
|
8971e9 |
b = Blivet()
|
|
|
8971e9 |
b.reset()
|
|
|
8971e9 |
fstab = FSTab(b)
|
|
|
8971e9 |
diff --git a/tasks/main-blivet.yml b/tasks/main-blivet.yml
|
|
|
8971e9 |
index 061195c..65b8580 100644
|
|
|
8971e9 |
--- a/tasks/main-blivet.yml
|
|
|
8971e9 |
+++ b/tasks/main-blivet.yml
|
|
|
8971e9 |
@@ -38,7 +38,7 @@
|
|
|
8971e9 |
_storage_vols_no_defaults: "{{ _storage_vols_no_defaults|default([]) }} + [{{ item.1 }}]"
|
|
|
8971e9 |
_storage_vol_defaults: "{{ _storage_vol_defaults|default([]) }} + [{{ storage_volume_defaults }}]"
|
|
|
8971e9 |
_storage_vol_pools: "{{ _storage_vol_pools|default([]) }} + ['{{ item.0.name }}']"
|
|
|
8971e9 |
- loop: "{{ _storage_pools|subelements('volumes') }}"
|
|
|
8971e9 |
+ loop: "{{ _storage_pools|subelements('volumes', skip_missing=true) }}"
|
|
|
8971e9 |
when: storage_pools is defined
|
|
|
8971e9 |
|
|
|
8971e9 |
- name: Apply defaults to pools and volumes [3/6]
|
|
|
8971e9 |
@@ -85,6 +85,15 @@
|
|
|
8971e9 |
- debug:
|
|
|
8971e9 |
var: _storage_volumes
|
|
|
8971e9 |
|
|
|
8971e9 |
+- name: load mount facts
|
|
|
8971e9 |
+ setup:
|
|
|
8971e9 |
+ gather_subset: '!all,!min,mounts'
|
|
|
8971e9 |
+ register: __storage_mounts_before_packages
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+# - name: show mounts before get required packages
|
|
|
8971e9 |
+# debug:
|
|
|
8971e9 |
+# var: __storage_mounts_before_packages
|
|
|
8971e9 |
+
|
|
|
8971e9 |
- name: get required packages
|
|
|
8971e9 |
blivet:
|
|
|
8971e9 |
pools: "{{ _storage_pools }}"
|
|
|
8971e9 |
@@ -94,6 +103,30 @@
|
|
|
8971e9 |
packages_only: true
|
|
|
8971e9 |
register: package_info
|
|
|
8971e9 |
|
|
|
8971e9 |
+- name: load mount facts
|
|
|
8971e9 |
+ setup:
|
|
|
8971e9 |
+ gather_subset: '!all,!min,mounts'
|
|
|
8971e9 |
+ register: __storage_mounts_after_packages
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+- name: detect mount alteration by 'get required packages'
|
|
|
8971e9 |
+ block:
|
|
|
8971e9 |
+ - name: show mounts before manage the pools and volumes
|
|
|
8971e9 |
+ debug:
|
|
|
8971e9 |
+ var: __storage_mounts_before_packages.ansible_facts.ansible_mounts
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: show mounts after manage the pools and volumes
|
|
|
8971e9 |
+ debug:
|
|
|
8971e9 |
+ var: __storage_mounts_after_packages.ansible_facts.ansible_mounts
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: fail if mounts changed
|
|
|
8971e9 |
+ fail:
|
|
|
8971e9 |
+ msg: "get required packages changed mounts. Changed status is
|
|
|
8971e9 |
+ {{ package_info.changed }}"
|
|
|
8971e9 |
+ when:
|
|
|
8971e9 |
+ - __storage_mounts_before_packages.ansible_facts.ansible_mounts |
|
|
|
8971e9 |
+ count !=
|
|
|
8971e9 |
+ __storage_mounts_after_packages.ansible_facts.ansible_mounts | count
|
|
|
8971e9 |
+
|
|
|
8971e9 |
- name: make sure required packages are installed
|
|
|
8971e9 |
package:
|
|
|
8971e9 |
name: "{{ package_info.packages }}"
|
|
|
8971e9 |
@@ -105,6 +138,7 @@
|
|
|
8971e9 |
volumes: "{{ _storage_volumes }}"
|
|
|
8971e9 |
use_partitions: "{{ storage_use_partitions }}"
|
|
|
8971e9 |
disklabel_type: "{{ storage_disklabel_type }}"
|
|
|
8971e9 |
+ safe_mode: "{{ storage_safe_mode }}"
|
|
|
8971e9 |
register: blivet_output
|
|
|
8971e9 |
|
|
|
8971e9 |
- debug:
|
|
|
8971e9 |
diff --git a/tests/get_unused_disk.yml b/tests/get_unused_disk.yml
|
|
|
8971e9 |
index 9f4c5d2..79e952a 100644
|
|
|
8971e9 |
--- a/tests/get_unused_disk.yml
|
|
|
8971e9 |
+++ b/tests/get_unused_disk.yml
|
|
|
8971e9 |
@@ -9,12 +9,10 @@
|
|
|
8971e9 |
unused_disks: "{{ unused_disks_return.disks }}"
|
|
|
8971e9 |
when: "'Unable to find unused disk' not in unused_disks_return.disks"
|
|
|
8971e9 |
|
|
|
8971e9 |
-- block:
|
|
|
8971e9 |
- - name: Exit playbook when there's no unused disks in the system
|
|
|
8971e9 |
- debug:
|
|
|
8971e9 |
- msg: "Unable to find unused disks. Exiting playbook."
|
|
|
8971e9 |
- - meta: end_play
|
|
|
8971e9 |
- when: unused_disks is undefined
|
|
|
8971e9 |
+- name: Exit playbook when there's not enough unused disks in the system
|
|
|
8971e9 |
+ fail:
|
|
|
8971e9 |
+ msg: "Unable to find enough unused disks. Exiting playbook."
|
|
|
8971e9 |
+ when: unused_disks is undefined or unused_disks|length < disks_needed|default(1)
|
|
|
8971e9 |
|
|
|
8971e9 |
- name: Print unused disks
|
|
|
8971e9 |
debug:
|
|
|
8971e9 |
diff --git a/tests/tests_change_disk_fs.yml b/tests/tests_change_disk_fs.yml
|
|
|
8971e9 |
index b6aa80b..f7962c6 100644
|
|
|
8971e9 |
--- a/tests/tests_change_disk_fs.yml
|
|
|
8971e9 |
+++ b/tests/tests_change_disk_fs.yml
|
|
|
8971e9 |
@@ -2,6 +2,7 @@
|
|
|
8971e9 |
- hosts: all
|
|
|
8971e9 |
become: true
|
|
|
8971e9 |
vars:
|
|
|
8971e9 |
+ storage_safe_mode: false
|
|
|
8971e9 |
mount_location: '/opt/test'
|
|
|
8971e9 |
volume_size: '5g'
|
|
|
8971e9 |
fs_type_after: "{{ 'ext3' if (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') else 'ext4' }}"
|
|
|
8971e9 |
diff --git a/tests/tests_change_fs.yml b/tests/tests_change_fs.yml
|
|
|
8971e9 |
index cca23eb..b88e768 100644
|
|
|
8971e9 |
--- a/tests/tests_change_fs.yml
|
|
|
8971e9 |
+++ b/tests/tests_change_fs.yml
|
|
|
8971e9 |
@@ -2,6 +2,7 @@
|
|
|
8971e9 |
- hosts: all
|
|
|
8971e9 |
become: true
|
|
|
8971e9 |
vars:
|
|
|
8971e9 |
+ storage_safe_mode: false
|
|
|
8971e9 |
mount_location: '/opt/test1'
|
|
|
8971e9 |
volume_size: '5g'
|
|
|
8971e9 |
fs_after: "{{ (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') | ternary('ext4', 'xfs') }}"
|
|
|
8971e9 |
diff --git a/tests/tests_change_fs_use_partitions.yml b/tests/tests_change_fs_use_partitions.yml
|
|
|
8971e9 |
index e4aa76c..eb93c11 100644
|
|
|
8971e9 |
--- a/tests/tests_change_fs_use_partitions.yml
|
|
|
8971e9 |
+++ b/tests/tests_change_fs_use_partitions.yml
|
|
|
8971e9 |
@@ -2,6 +2,7 @@
|
|
|
8971e9 |
- hosts: all
|
|
|
8971e9 |
become: true
|
|
|
8971e9 |
vars:
|
|
|
8971e9 |
+ storage_safe_mode: false
|
|
|
8971e9 |
storage_use_partitions: true
|
|
|
8971e9 |
mount_location: '/opt/test1'
|
|
|
8971e9 |
volume_size: '5g'
|
|
|
8971e9 |
diff --git a/tests/tests_create_disk_then_remove.yml b/tests/tests_create_disk_then_remove.yml
|
|
|
8971e9 |
index b19ae35..c5290eb 100644
|
|
|
8971e9 |
--- a/tests/tests_create_disk_then_remove.yml
|
|
|
8971e9 |
+++ b/tests/tests_create_disk_then_remove.yml
|
|
|
8971e9 |
@@ -2,6 +2,7 @@
|
|
|
8971e9 |
- hosts: all
|
|
|
8971e9 |
become: true
|
|
|
8971e9 |
vars:
|
|
|
8971e9 |
+ storage_safe_mode: false
|
|
|
8971e9 |
mount_location: '/opt/test1'
|
|
|
8971e9 |
|
|
|
8971e9 |
tasks:
|
|
|
8971e9 |
diff --git a/tests/tests_create_lvm_pool_then_remove.yml b/tests/tests_create_lvm_pool_then_remove.yml
|
|
|
8971e9 |
index 6b25939..f2c06fb 100644
|
|
|
8971e9 |
--- a/tests/tests_create_lvm_pool_then_remove.yml
|
|
|
8971e9 |
+++ b/tests/tests_create_lvm_pool_then_remove.yml
|
|
|
8971e9 |
@@ -2,6 +2,7 @@
|
|
|
8971e9 |
- hosts: all
|
|
|
8971e9 |
become: true
|
|
|
8971e9 |
vars:
|
|
|
8971e9 |
+ storage_safe_mode: false
|
|
|
8971e9 |
mount_location1: '/opt/test1'
|
|
|
8971e9 |
mount_location2: '/opt/test2'
|
|
|
8971e9 |
volume_group_size: '10g'
|
|
|
8971e9 |
diff --git a/tests/tests_create_partition_volume_then_remove.yml b/tests/tests_create_partition_volume_then_remove.yml
|
|
|
8971e9 |
index 40b3e62..ae589d3 100644
|
|
|
8971e9 |
--- a/tests/tests_create_partition_volume_then_remove.yml
|
|
|
8971e9 |
+++ b/tests/tests_create_partition_volume_then_remove.yml
|
|
|
8971e9 |
@@ -2,6 +2,7 @@
|
|
|
8971e9 |
- hosts: all
|
|
|
8971e9 |
become: true
|
|
|
8971e9 |
vars:
|
|
|
8971e9 |
+ storage_safe_mode: false
|
|
|
8971e9 |
mount_location: '/opt/test1'
|
|
|
8971e9 |
|
|
|
8971e9 |
tasks:
|
|
|
8971e9 |
@@ -18,7 +19,7 @@
|
|
|
8971e9 |
vars:
|
|
|
8971e9 |
storage_pools:
|
|
|
8971e9 |
- name: "{{ unused_disks[0] }}"
|
|
|
8971e9 |
- type: disk
|
|
|
8971e9 |
+ type: partition
|
|
|
8971e9 |
disks: "{{ unused_disks }}"
|
|
|
8971e9 |
volumes:
|
|
|
8971e9 |
- name: test1
|
|
|
8971e9 |
@@ -33,7 +34,7 @@
|
|
|
8971e9 |
vars:
|
|
|
8971e9 |
storage_pools:
|
|
|
8971e9 |
- name: "{{ unused_disks[0] }}"
|
|
|
8971e9 |
- type: disk
|
|
|
8971e9 |
+ type: partition
|
|
|
8971e9 |
disks: "{{ unused_disks }}"
|
|
|
8971e9 |
volumes:
|
|
|
8971e9 |
- name: test1
|
|
|
8971e9 |
@@ -48,7 +49,7 @@
|
|
|
8971e9 |
vars:
|
|
|
8971e9 |
storage_pools:
|
|
|
8971e9 |
- name: "{{ unused_disks[0] }}"
|
|
|
8971e9 |
- type: disk
|
|
|
8971e9 |
+ type: partition
|
|
|
8971e9 |
disks: "{{ unused_disks }}"
|
|
|
8971e9 |
state: absent
|
|
|
8971e9 |
volumes:
|
|
|
8971e9 |
@@ -65,7 +66,7 @@
|
|
|
8971e9 |
vars:
|
|
|
8971e9 |
storage_pools:
|
|
|
8971e9 |
- name: "{{ unused_disks[0] }}"
|
|
|
8971e9 |
- type: disk
|
|
|
8971e9 |
+ type: partition
|
|
|
8971e9 |
disks: "{{ unused_disks }}"
|
|
|
8971e9 |
state: absent
|
|
|
8971e9 |
volumes:
|
|
|
8971e9 |
diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml
|
|
|
8971e9 |
index 36eec41..7112f6e 100644
|
|
|
8971e9 |
--- a/tests/tests_disk_errors.yml
|
|
|
8971e9 |
+++ b/tests/tests_disk_errors.yml
|
|
|
8971e9 |
@@ -3,8 +3,17 @@
|
|
|
8971e9 |
become: true
|
|
|
8971e9 |
vars:
|
|
|
8971e9 |
mount_location: '/opt/test1'
|
|
|
8971e9 |
+ testfile: "{{ mount_location }}/quux"
|
|
|
8971e9 |
|
|
|
8971e9 |
tasks:
|
|
|
8971e9 |
+ - include_role:
|
|
|
8971e9 |
+ name: storage
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - include_tasks: get_unused_disk.yml
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ min_size: "10g"
|
|
|
8971e9 |
+ max_return: 1
|
|
|
8971e9 |
+
|
|
|
8971e9 |
- name: Verify that the play fails with the expected error message
|
|
|
8971e9 |
block:
|
|
|
8971e9 |
- name: Create a disk volume mounted at "{{ mount_location }}"
|
|
|
8971e9 |
@@ -14,11 +23,246 @@
|
|
|
8971e9 |
storage_volumes:
|
|
|
8971e9 |
- name: test1
|
|
|
8971e9 |
type: disk
|
|
|
8971e9 |
- disks: "['/dev/surelyidonotexist']"
|
|
|
8971e9 |
+ disks: ['/dev/surelyidonotexist']
|
|
|
8971e9 |
mount_point: "{{ mount_location }}"
|
|
|
8971e9 |
|
|
|
8971e9 |
- - name: Check the error output
|
|
|
8971e9 |
+ - name: UNREACH
|
|
|
8971e9 |
+ fail:
|
|
|
8971e9 |
+ msg: "this should be unreachable"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ rescue:
|
|
|
8971e9 |
+ - name: Check that we failed in the role
|
|
|
8971e9 |
+ assert:
|
|
|
8971e9 |
+ that:
|
|
|
8971e9 |
+ - ansible_failed_task.name != 'UNREACH'
|
|
|
8971e9 |
+ msg: "Role has not failed when it should have"
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ # Ugh! needed to expand ansible_failed_task
|
|
|
8971e9 |
+ storage_provider: blivet
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ # the following does not work properly,
|
|
|
8971e9 |
+ # blivet_output.failed is false.
|
|
|
8971e9 |
+ # - name: Show the error output
|
|
|
8971e9 |
+ # debug:
|
|
|
8971e9 |
+ # msg: "{{ blivet_output.failed }}"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ # - name: Check the error output
|
|
|
8971e9 |
+ # assert:
|
|
|
8971e9 |
+ # that: blivet_output.failed | bool
|
|
|
8971e9 |
+ # msg: "Expected error message not found for missing disk"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Create a file system on disk
|
|
|
8971e9 |
+ include_role:
|
|
|
8971e9 |
+ name: storage
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ storage_volumes:
|
|
|
8971e9 |
+ - name: test1
|
|
|
8971e9 |
+ type: disk
|
|
|
8971e9 |
+ fs_type: 'ext4'
|
|
|
8971e9 |
+ disks: "{{ unused_disks }}"
|
|
|
8971e9 |
+ mount_point: "{{ mount_location }}"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: create a file
|
|
|
8971e9 |
+ file:
|
|
|
8971e9 |
+ path: "{{ testfile }}"
|
|
|
8971e9 |
+ state: touch
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Test for correct handling of safe_mode
|
|
|
8971e9 |
+ block:
|
|
|
8971e9 |
+ - name: Try to replace the file system on disk in safe mode
|
|
|
8971e9 |
+ include_role:
|
|
|
8971e9 |
+ name: storage
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ storage_volumes:
|
|
|
8971e9 |
+ - name: test1
|
|
|
8971e9 |
+ type: disk
|
|
|
8971e9 |
+ fs_type: 'ext3'
|
|
|
8971e9 |
+ disks: "{{ unused_disks }}"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: UNREACH
|
|
|
8971e9 |
+ fail:
|
|
|
8971e9 |
+ msg: "this should be unreachable"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ rescue:
|
|
|
8971e9 |
+ - name: Check that we failed in the role
|
|
|
8971e9 |
+ assert:
|
|
|
8971e9 |
+ that:
|
|
|
8971e9 |
+ - ansible_failed_task.name != 'UNREACH'
|
|
|
8971e9 |
+ msg: "Role has not failed when it should have"
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ # Ugh! needed to expand ansible_failed_task
|
|
|
8971e9 |
+ storage_provider: blivet
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Verify the output
|
|
|
8971e9 |
+ assert:
|
|
|
8971e9 |
+ that: "blivet_output.failed and
|
|
|
8971e9 |
+ blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and
|
|
|
8971e9 |
+ not blivet_output.changed"
|
|
|
8971e9 |
+ msg: "Unexpected behavior w/ existing data on specified disks"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Unmount file system
|
|
|
8971e9 |
+ include_role:
|
|
|
8971e9 |
+ name: storage
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ storage_volumes:
|
|
|
8971e9 |
+ - name: test1
|
|
|
8971e9 |
+ type: disk
|
|
|
8971e9 |
+ fs_type: 'ext4'
|
|
|
8971e9 |
+ disks: "{{ unused_disks }}"
|
|
|
8971e9 |
+ mount_point: none
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Test for correct handling of safe_mode with unmounted filesystem
|
|
|
8971e9 |
+ block:
|
|
|
8971e9 |
+ - name: Try to replace the file system on disk in safe mode
|
|
|
8971e9 |
+ include_role:
|
|
|
8971e9 |
+ name: storage
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ storage_volumes:
|
|
|
8971e9 |
+ - name: test1
|
|
|
8971e9 |
+ type: disk
|
|
|
8971e9 |
+ fs_type: 'ext3'
|
|
|
8971e9 |
+ disks: "{{ unused_disks }}"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: UNREACH
|
|
|
8971e9 |
+ fail:
|
|
|
8971e9 |
+ msg: "this should be unreachable"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ rescue:
|
|
|
8971e9 |
+ - name: Check that we failed in the role
|
|
|
8971e9 |
assert:
|
|
|
8971e9 |
- that: "{{ blivet_output.failed }}"
|
|
|
8971e9 |
- msg: "Expected error message not found for missing disk"
|
|
|
8971e9 |
- ignore_errors: yes
|
|
|
8971e9 |
+ that:
|
|
|
8971e9 |
+ - ansible_failed_task.name != 'UNREACH'
|
|
|
8971e9 |
+ msg: "Role has not failed when it should have"
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ # Ugh! needed to expand ansible_failed_task
|
|
|
8971e9 |
+ storage_provider: blivet
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Verify the output
|
|
|
8971e9 |
+ assert:
|
|
|
8971e9 |
+ that: "blivet_output.failed and
|
|
|
8971e9 |
+ blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and
|
|
|
8971e9 |
+ not blivet_output.changed"
|
|
|
8971e9 |
+ msg: "Unexpected behavior w/ existing data on specified disks"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Remount file system
|
|
|
8971e9 |
+ include_role:
|
|
|
8971e9 |
+ name: storage
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ storage_volumes:
|
|
|
8971e9 |
+ - name: test1
|
|
|
8971e9 |
+ type: disk
|
|
|
8971e9 |
+ fs_type: 'ext4'
|
|
|
8971e9 |
+ disks: "{{ unused_disks }}"
|
|
|
8971e9 |
+ mount_point: "{{ mount_location }}"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: stat the file
|
|
|
8971e9 |
+ stat:
|
|
|
8971e9 |
+ path: "{{ testfile }}"
|
|
|
8971e9 |
+ register: stat_r
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: assert file presence
|
|
|
8971e9 |
+ assert:
|
|
|
8971e9 |
+ that:
|
|
|
8971e9 |
+ stat_r.stat.isreg is defined and stat_r.stat.isreg
|
|
|
8971e9 |
+ msg: "data lost!"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Test for correct handling of safe_mode
|
|
|
8971e9 |
+ block:
|
|
|
8971e9 |
+ - name: Try to create a partition pool on the disk already containing a file system in safe_mode
|
|
|
8971e9 |
+ include_role:
|
|
|
8971e9 |
+ name: storage
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ storage_pools:
|
|
|
8971e9 |
+ - name: foo
|
|
|
8971e9 |
+ disks: "{{ unused_disks }}"
|
|
|
8971e9 |
+ type: partition
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: UNREACH
|
|
|
8971e9 |
+ fail:
|
|
|
8971e9 |
+ msg: "this should be unreachable"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ rescue:
|
|
|
8971e9 |
+ - name: Check that we failed in the role
|
|
|
8971e9 |
+ assert:
|
|
|
8971e9 |
+ that:
|
|
|
8971e9 |
+ - ansible_failed_task.name != 'UNREACH'
|
|
|
8971e9 |
+ msg: "Role has not failed when it should have"
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ # Ugh! needed to expand ansible_failed_task
|
|
|
8971e9 |
+ storage_provider: blivet
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Verify the output
|
|
|
8971e9 |
+ assert:
|
|
|
8971e9 |
+ that: "blivet_output.failed and
|
|
|
8971e9 |
+ blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and
|
|
|
8971e9 |
+ not blivet_output.changed"
|
|
|
8971e9 |
+ msg: "Unexpected behavior w/ existing data on specified disks"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Test for correct handling of safe_mode with existing filesystem
|
|
|
8971e9 |
+ block:
|
|
|
8971e9 |
+ - name: Try to create LVM pool on disk that already belongs to an existing filesystem
|
|
|
8971e9 |
+ include_role:
|
|
|
8971e9 |
+ name: storage
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ storage_pools:
|
|
|
8971e9 |
+ - name: foo
|
|
|
8971e9 |
+ disks: "{{ unused_disks }}"
|
|
|
8971e9 |
+ type: lvm
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: UNREACH
|
|
|
8971e9 |
+ fail:
|
|
|
8971e9 |
+ msg: "this should be unreachable"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ rescue:
|
|
|
8971e9 |
+ - name: Check that we failed in the role
|
|
|
8971e9 |
+ assert:
|
|
|
8971e9 |
+ that:
|
|
|
8971e9 |
+ - ansible_failed_task.name != 'UNREACH'
|
|
|
8971e9 |
+ msg: "Role has not failed when it should have"
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ # Ugh! needed to expand ansible_failed_task
|
|
|
8971e9 |
+ storage_provider: blivet
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Verify the output
|
|
|
8971e9 |
+ assert:
|
|
|
8971e9 |
+ that: "{{ blivet_output.failed and
|
|
|
8971e9 |
+ blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and
|
|
|
8971e9 |
+ not blivet_output.changed }}"
|
|
|
8971e9 |
+ msg: "Unexpected behavior w/ existing data on specified disks"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: stat the file
|
|
|
8971e9 |
+ stat:
|
|
|
8971e9 |
+ path: "{{ testfile }}"
|
|
|
8971e9 |
+ register: stat_r
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: assert file presence
|
|
|
8971e9 |
+ assert:
|
|
|
8971e9 |
+ that:
|
|
|
8971e9 |
+ stat_r.stat.isreg is defined and stat_r.stat.isreg
|
|
|
8971e9 |
+ msg: "data lost!"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Create a partition pool on the disk already containing a file system w/o safe_mode
|
|
|
8971e9 |
+ include_role:
|
|
|
8971e9 |
+ name: storage
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ storage_safe_mode: false
|
|
|
8971e9 |
+ storage_pools:
|
|
|
8971e9 |
+ - name: foo
|
|
|
8971e9 |
+ disks: "{{ unused_disks }}"
|
|
|
8971e9 |
+ type: partition
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Verify the output
|
|
|
8971e9 |
+ assert:
|
|
|
8971e9 |
+ that: not blivet_output.failed
|
|
|
8971e9 |
+ msg: "failed to create partition pool over existing file system w/o safe_mode"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Clean up
|
|
|
8971e9 |
+ include_role:
|
|
|
8971e9 |
+ name: storage
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ storage_safe_mode: false
|
|
|
8971e9 |
+ storage_pools:
|
|
|
8971e9 |
+ - name: foo
|
|
|
8971e9 |
+ type: partition
|
|
|
8971e9 |
+ disks: "{{ unused_disks }}"
|
|
|
8971e9 |
+ state: absent
|
|
|
8971e9 |
diff --git a/tests/tests_lvm_errors.yml b/tests/tests_lvm_errors.yml
|
|
|
8971e9 |
index ab23674..e8be153 100644
|
|
|
8971e9 |
--- a/tests/tests_lvm_errors.yml
|
|
|
8971e9 |
+++ b/tests/tests_lvm_errors.yml
|
|
|
8971e9 |
@@ -33,13 +33,32 @@
|
|
|
8971e9 |
size: "{{ volume1_size }}"
|
|
|
8971e9 |
mount_point: "{{ mount_location1 }}"
|
|
|
8971e9 |
|
|
|
8971e9 |
- - name: Verify the output
|
|
|
8971e9 |
+ - name: UNREACH
|
|
|
8971e9 |
+ fail:
|
|
|
8971e9 |
+ msg: "this should be unreachable"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ rescue:
|
|
|
8971e9 |
+ - name: Check that we failed in the role
|
|
|
8971e9 |
assert:
|
|
|
8971e9 |
- that: "{{ blivet_output.failed and
|
|
|
8971e9 |
- blivet_output.msg|regex_search('unable to resolve.+disk')|length>0 and
|
|
|
8971e9 |
- not blivet_output.changed }}"
|
|
|
8971e9 |
- msg: "Unexpected behavior w/ non-existent pool disk"
|
|
|
8971e9 |
- ignore_errors: yes
|
|
|
8971e9 |
+ that:
|
|
|
8971e9 |
+ - ansible_failed_task.name != 'UNREACH'
|
|
|
8971e9 |
+ msg: "Role has not failed when it should have"
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ # Ugh! needed to expand ansible_failed_task
|
|
|
8971e9 |
+ storage_provider: blivet
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ # the following does not work properly
|
|
|
8971e9 |
+ # - debug:
|
|
|
8971e9 |
+ # msg: "{{ 'failed: ' + blivet_output.failed | string +
|
|
|
8971e9 |
+ # 'msg: ' + blivet_output.msg +
|
|
|
8971e9 |
+ # 'changed: ' + blivet_output.changed | string }}"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ # - name: Verify the output
|
|
|
8971e9 |
+ # assert:
|
|
|
8971e9 |
+ # that: "{{ blivet_output.failed and
|
|
|
8971e9 |
+ # blivet_output.msg|regex_search('unable to resolve.+disk')|length>0 and
|
|
|
8971e9 |
+ # not blivet_output.changed }}"
|
|
|
8971e9 |
+ # msg: "Unexpected behavior w/ non-existent pool disk"
|
|
|
8971e9 |
|
|
|
8971e9 |
- name: Test for correct handling of invalid size specification.
|
|
|
8971e9 |
block:
|
|
|
8971e9 |
@@ -55,13 +74,27 @@
|
|
|
8971e9 |
size: "{{ invalid_size }}"
|
|
|
8971e9 |
mount_point: "{{ mount_location1 }}"
|
|
|
8971e9 |
|
|
|
8971e9 |
- - name: Verify the output
|
|
|
8971e9 |
+ - name: UNREACH
|
|
|
8971e9 |
+ fail:
|
|
|
8971e9 |
+ msg: "this should be unreachable"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ rescue:
|
|
|
8971e9 |
+ - name: Check that we failed in the role
|
|
|
8971e9 |
assert:
|
|
|
8971e9 |
- that: "{{ blivet_output.failed and
|
|
|
8971e9 |
- blivet_output.msg|regex_search('invalid size.+for volume') and
|
|
|
8971e9 |
- not blivet_output.changed }}"
|
|
|
8971e9 |
- msg: "Unexpected behavior w/ invalid volume size"
|
|
|
8971e9 |
- ignore_errors: yes
|
|
|
8971e9 |
+ that:
|
|
|
8971e9 |
+ - ansible_failed_task.name != 'UNREACH'
|
|
|
8971e9 |
+ msg: "Role has not failed when it should have"
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ # Ugh! needed to expand ansible_failed_task
|
|
|
8971e9 |
+ storage_provider: blivet
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ # the following does not work properly
|
|
|
8971e9 |
+ # - name: Verify the output
|
|
|
8971e9 |
+ # assert:
|
|
|
8971e9 |
+ # that: "{{ blivet_output.failed and
|
|
|
8971e9 |
+ # blivet_output.msg|regex_search('invalid size.+for volume') and
|
|
|
8971e9 |
+ # not blivet_output.changed }}"
|
|
|
8971e9 |
+ # msg: "Unexpected behavior w/ invalid volume size"
|
|
|
8971e9 |
|
|
|
8971e9 |
- name: Test for correct handling of too-large volume size.
|
|
|
8971e9 |
block:
|
|
|
8971e9 |
@@ -77,13 +110,27 @@
|
|
|
8971e9 |
size: "{{ too_large_size }}"
|
|
|
8971e9 |
mount_point: "{{ mount_location1 }}"
|
|
|
8971e9 |
|
|
|
8971e9 |
- - name: Verify the output
|
|
|
8971e9 |
+ - name: UNREACH
|
|
|
8971e9 |
+ fail:
|
|
|
8971e9 |
+ msg: "this should be unreachable"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ rescue:
|
|
|
8971e9 |
+ - name: Check that we failed in the role
|
|
|
8971e9 |
assert:
|
|
|
8971e9 |
- that: "{{ blivet_output.failed and
|
|
|
8971e9 |
- blivet_output.msg|regex_search('size.+exceeds.+space in pool') and
|
|
|
8971e9 |
- not blivet_output.changed }}"
|
|
|
8971e9 |
- msg: "Unexpected behavior w/ too-large volume size"
|
|
|
8971e9 |
- ignore_errors: yes
|
|
|
8971e9 |
+ that:
|
|
|
8971e9 |
+ - ansible_failed_task.name != 'UNREACH'
|
|
|
8971e9 |
+ msg: "Role has not failed when it should have"
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ # Ugh! needed to expand ansible_failed_task
|
|
|
8971e9 |
+ storage_provider: blivet
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ # the following does not work properly
|
|
|
8971e9 |
+ # - name: Verify the output
|
|
|
8971e9 |
+ # assert:
|
|
|
8971e9 |
+ # that: "{{ blivet_output.failed and
|
|
|
8971e9 |
+ # blivet_output.msg|regex_search('size.+exceeds.+space in pool') and
|
|
|
8971e9 |
+ # not blivet_output.changed }}"
|
|
|
8971e9 |
+ # msg: "Unexpected behavior w/ too-large volume size"
|
|
|
8971e9 |
|
|
|
8971e9 |
- name: Test for correct handling of non-list disk specification.
|
|
|
8971e9 |
block:
|
|
|
8971e9 |
@@ -99,13 +146,27 @@
|
|
|
8971e9 |
size: "{{ too_large_size }}"
|
|
|
8971e9 |
mount_point: "{{ mount_location1 }}"
|
|
|
8971e9 |
|
|
|
8971e9 |
- - name: Verify the output
|
|
|
8971e9 |
+ - name: UNREACH
|
|
|
8971e9 |
+ fail:
|
|
|
8971e9 |
+ msg: "this should be unreachable"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ rescue:
|
|
|
8971e9 |
+ - name: Check that we failed in the role
|
|
|
8971e9 |
assert:
|
|
|
8971e9 |
- that: "{{ blivet_output.failed and
|
|
|
8971e9 |
- blivet_output.msg|regex_search('disk.+list') and
|
|
|
8971e9 |
- not blivet_output.changed }}"
|
|
|
8971e9 |
- msg: "Unexpected behavior w/ disks not in list form"
|
|
|
8971e9 |
- ignore_errors: yes
|
|
|
8971e9 |
+ that:
|
|
|
8971e9 |
+ - ansible_failed_task.name != 'UNREACH'
|
|
|
8971e9 |
+ msg: "Role has not failed when it should have"
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ # Ugh! needed to expand ansible_failed_task
|
|
|
8971e9 |
+ storage_provider: blivet
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ # the following does not work properly
|
|
|
8971e9 |
+ # - name: Verify the output
|
|
|
8971e9 |
+ # assert:
|
|
|
8971e9 |
+ # that: "{{ blivet_output.failed and
|
|
|
8971e9 |
+ # blivet_output.msg|regex_search('disk.+list') and
|
|
|
8971e9 |
+ # not blivet_output.changed }}"
|
|
|
8971e9 |
+ # msg: "Unexpected behavior w/ disks not in list form"
|
|
|
8971e9 |
|
|
|
8971e9 |
- name: Test for correct handling of missing disk specification.
|
|
|
8971e9 |
block:
|
|
|
8971e9 |
@@ -121,13 +182,27 @@
|
|
|
8971e9 |
size: "{{ too_large_size }}"
|
|
|
8971e9 |
mount_point: "{{ mount_location1 }}"
|
|
|
8971e9 |
|
|
|
8971e9 |
- - name: Verify the output
|
|
|
8971e9 |
+ - name: UNREACH
|
|
|
8971e9 |
+ fail:
|
|
|
8971e9 |
+ msg: "this should be unreachable"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ rescue:
|
|
|
8971e9 |
+ - name: Check that we failed in the role
|
|
|
8971e9 |
assert:
|
|
|
8971e9 |
- that: "{{ blivet_output.failed and
|
|
|
8971e9 |
- blivet_output.msg|regex_search('no disks.+pool') and
|
|
|
8971e9 |
- not blivet_output.changed }}"
|
|
|
8971e9 |
- msg: "Unexpected behavior w/ no disks specified"
|
|
|
8971e9 |
- ignore_errors: yes
|
|
|
8971e9 |
+ that:
|
|
|
8971e9 |
+ - ansible_failed_task.name != 'UNREACH'
|
|
|
8971e9 |
+ msg: "Role has not failed when it should have"
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ # Ugh! needed to expand ansible_failed_task
|
|
|
8971e9 |
+ storage_provider: blivet
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ # the following does not work properly
|
|
|
8971e9 |
+ # - name: Verify the output
|
|
|
8971e9 |
+ # assert:
|
|
|
8971e9 |
+ # that: "{{ blivet_output.failed and
|
|
|
8971e9 |
+ # blivet_output.msg|regex_search('no disks.+pool') and
|
|
|
8971e9 |
+ # not blivet_output.changed }}"
|
|
|
8971e9 |
+ # msg: "Unexpected behavior w/ no disks specified"
|
|
|
8971e9 |
|
|
|
8971e9 |
- name: Test for correct handling of LVM volume not defined within a pool.
|
|
|
8971e9 |
block:
|
|
|
8971e9 |
@@ -142,10 +217,179 @@
|
|
|
8971e9 |
size: "{{ volume1_size }}"
|
|
|
8971e9 |
mount_point: "{{ mount_location1 }}"
|
|
|
8971e9 |
|
|
|
8971e9 |
+ - name: UNREACH
|
|
|
8971e9 |
+ fail:
|
|
|
8971e9 |
+ msg: "this should be unreachable"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ rescue:
|
|
|
8971e9 |
+ - name: Check that we failed in the role
|
|
|
8971e9 |
+ assert:
|
|
|
8971e9 |
+ that:
|
|
|
8971e9 |
+ - ansible_failed_task.name != 'UNREACH'
|
|
|
8971e9 |
+ msg: "Role has not failed when it should have"
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ # Ugh! needed to expand ansible_failed_task
|
|
|
8971e9 |
+ storage_provider: blivet
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ # the following does not work properly
|
|
|
8971e9 |
+ # - name: Verify the output
|
|
|
8971e9 |
+ # assert:
|
|
|
8971e9 |
+ # that: "{{ blivet_output.failed and
|
|
|
8971e9 |
+ # blivet_output.msg|regex_search('failed to find pool .+ for volume') and
|
|
|
8971e9 |
+ # not blivet_output.changed }}"
|
|
|
8971e9 |
+ # msg: "Unexpected behavior w/ LVM volume defined outside of any pool"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Create a pool
|
|
|
8971e9 |
+ include_role:
|
|
|
8971e9 |
+ name: storage
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ storage_pools:
|
|
|
8971e9 |
+ - name: testpool1
|
|
|
8971e9 |
+ type: lvm
|
|
|
8971e9 |
+ disks: "{{ unused_disks }}"
|
|
|
8971e9 |
+ volumes:
|
|
|
8971e9 |
+ - name: testvol1
|
|
|
8971e9 |
+ fs_type: 'ext4'
|
|
|
8971e9 |
+ size: '1g'
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Test for correct handling of safe_mode
|
|
|
8971e9 |
+ block:
|
|
|
8971e9 |
+ - name: Try to replace file system in safe mode
|
|
|
8971e9 |
+ include_role:
|
|
|
8971e9 |
+ name: storage
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ storage_pools:
|
|
|
8971e9 |
+ - name: testpool1
|
|
|
8971e9 |
+ type: lvm
|
|
|
8971e9 |
+ disks: "{{ unused_disks }}"
|
|
|
8971e9 |
+ volumes:
|
|
|
8971e9 |
+ - name: testvol1
|
|
|
8971e9 |
+ fs_type: 'ext3'
|
|
|
8971e9 |
+ size: '1g'
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: UNREACH
|
|
|
8971e9 |
+ fail:
|
|
|
8971e9 |
+ msg: "this should be unreachable"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ rescue:
|
|
|
8971e9 |
+ - name: Check that we failed in the role
|
|
|
8971e9 |
+ assert:
|
|
|
8971e9 |
+ that:
|
|
|
8971e9 |
+ - ansible_failed_task.name != 'UNREACH'
|
|
|
8971e9 |
+ msg: "Role has not failed when it should have"
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ # Ugh! needed to expand ansible_failed_task
|
|
|
8971e9 |
+ storage_provider: blivet
|
|
|
8971e9 |
+
|
|
|
8971e9 |
- name: Verify the output
|
|
|
8971e9 |
assert:
|
|
|
8971e9 |
that: "{{ blivet_output.failed and
|
|
|
8971e9 |
- blivet_output.msg|regex_search('failed to find pool .+ for volume') and
|
|
|
8971e9 |
+ blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and
|
|
|
8971e9 |
not blivet_output.changed }}"
|
|
|
8971e9 |
- msg: "Unexpected behavior w/ LVM volume defined outside of any pool"
|
|
|
8971e9 |
- ignore_errors: yes
|
|
|
8971e9 |
+ msg: "Unexpected behavior w/ existing data on specified disks"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Test for correct handling of safe_mode with resize
|
|
|
8971e9 |
+ block:
|
|
|
8971e9 |
+ - name: Try to resize in safe mode
|
|
|
8971e9 |
+ include_role:
|
|
|
8971e9 |
+ name: storage
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ storage_pools:
|
|
|
8971e9 |
+ - name: testpool1
|
|
|
8971e9 |
+ type: lvm
|
|
|
8971e9 |
+ disks: "{{ unused_disks }}"
|
|
|
8971e9 |
+ volumes:
|
|
|
8971e9 |
+ - name: testvol1
|
|
|
8971e9 |
+ fs_type: 'ext4'
|
|
|
8971e9 |
+ size: '2g'
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Verify the output
|
|
|
8971e9 |
+ assert:
|
|
|
8971e9 |
+ that: "{{ not blivet_output.failed and blivet_output.changed }}"
|
|
|
8971e9 |
+ msg: "Unexpected behavior w/ existing data on specified disks"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ when: false
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Test for correct handling of safe_mode with existing pool
|
|
|
8971e9 |
+ block:
|
|
|
8971e9 |
+ - name: Try to create LVM pool on disks that already belong to an existing pool
|
|
|
8971e9 |
+ include_role:
|
|
|
8971e9 |
+ name: storage
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ storage_pools:
|
|
|
8971e9 |
+ - name: foo
|
|
|
8971e9 |
+ disks: "{{ unused_disks }}"
|
|
|
8971e9 |
+ type: lvm
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: UNREACH
|
|
|
8971e9 |
+ fail:
|
|
|
8971e9 |
+ msg: "this should be unreachable"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ rescue:
|
|
|
8971e9 |
+ - name: Check that we failed in the role
|
|
|
8971e9 |
+ assert:
|
|
|
8971e9 |
+ that:
|
|
|
8971e9 |
+ - ansible_failed_task.name != 'UNREACH'
|
|
|
8971e9 |
+ msg: "Role has not failed when it should have"
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ # Ugh! needed to expand ansible_failed_task
|
|
|
8971e9 |
+ storage_provider: blivet
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Verify the output
|
|
|
8971e9 |
+ assert:
|
|
|
8971e9 |
+ that: "{{ blivet_output.failed and
|
|
|
8971e9 |
+ blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and
|
|
|
8971e9 |
+ not blivet_output.changed }}"
|
|
|
8971e9 |
+ msg: "Unexpected behavior w/ existing data on specified disks"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Test for correct handling of safe_mode
|
|
|
8971e9 |
+ block:
|
|
|
8971e9 |
+ - name: Try to replace a pool by a file system on disk in safe mode
|
|
|
8971e9 |
+ include_role:
|
|
|
8971e9 |
+ name: storage
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ storage_volumes:
|
|
|
8971e9 |
+ - name: test1
|
|
|
8971e9 |
+ type: disk
|
|
|
8971e9 |
+ fs_type: 'ext3'
|
|
|
8971e9 |
+ disks:
|
|
|
8971e9 |
+ - "{{ unused_disks[0] }}"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: UNREACH
|
|
|
8971e9 |
+ fail:
|
|
|
8971e9 |
+ msg: "this should be unreachable"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ rescue:
|
|
|
8971e9 |
+ - name: Check that we failed in the role
|
|
|
8971e9 |
+ assert:
|
|
|
8971e9 |
+ that:
|
|
|
8971e9 |
+ - ansible_failed_task.name != 'UNREACH'
|
|
|
8971e9 |
+ msg: "Role has not failed when it should have"
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ # Ugh! needed to expand ansible_failed_task
|
|
|
8971e9 |
+ storage_provider: blivet
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Verify the output
|
|
|
8971e9 |
+ assert:
|
|
|
8971e9 |
+ that: "blivet_output.failed and
|
|
|
8971e9 |
+ blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and
|
|
|
8971e9 |
+ not blivet_output.changed"
|
|
|
8971e9 |
+ msg: "Unexpected behavior w/ existing data on specified disks"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Verify the output
|
|
|
8971e9 |
+ assert:
|
|
|
8971e9 |
+ that: "blivet_output.failed and
|
|
|
8971e9 |
+ blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and
|
|
|
8971e9 |
+ not blivet_output.changed"
|
|
|
8971e9 |
+ msg: "Unexpected behavior w/ existing data on specified disks"
|
|
|
8971e9 |
+
|
|
|
8971e9 |
+ - name: Clean up
|
|
|
8971e9 |
+ include_role:
|
|
|
8971e9 |
+ name: storage
|
|
|
8971e9 |
+ vars:
|
|
|
8971e9 |
+ storage_safe_mode: false
|
|
|
8971e9 |
+ storage_pools:
|
|
|
8971e9 |
+ - name: testpool1
|
|
|
8971e9 |
+ type: lvm
|
|
|
8971e9 |
+ disks: "{{ unused_disks }}"
|
|
|
8971e9 |
+ state: absent
|
|
|
8971e9 |
diff --git a/tests/tests_lvm_multiple_disks_multiple_volumes.yml b/tests/tests_lvm_multiple_disks_multiple_volumes.yml
|
|
|
8971e9 |
index bbc7bb0..ca3968f 100644
|
|
|
8971e9 |
--- a/tests/tests_lvm_multiple_disks_multiple_volumes.yml
|
|
|
8971e9 |
+++ b/tests/tests_lvm_multiple_disks_multiple_volumes.yml
|
|
|
8971e9 |
@@ -15,13 +15,7 @@
|
|
|
8971e9 |
vars:
|
|
|
8971e9 |
min_size: "{{ volume_group_size }}"
|
|
|
8971e9 |
max_return: 2
|
|
|
8971e9 |
-
|
|
|
8971e9 |
- - block:
|
|
|
8971e9 |
- - debug:
|
|
|
8971e9 |
- msg: "There needs to be two unused disks in the system to run this playbook."
|
|
|
8971e9 |
- - name: End playbook if there isn't two disks available
|
|
|
8971e9 |
- meta: end_play
|
|
|
8971e9 |
- when: unused_disks|length < 2
|
|
|
8971e9 |
+ disks_needed: 2
|
|
|
8971e9 |
|
|
|
8971e9 |
- name: Create a logical volume spanning two physical volumes that changes its mount location
|
|
|
8971e9 |
include_role:
|