Blame SOURCES/storage-trim-volume-size.diff

0c1b9b
diff --git a/library/blivet.py b/library/blivet.py
0c1b9b
index e927121..f59f821 100644
0c1b9b
--- a/library/blivet.py
0c1b9b
+++ b/library/blivet.py
0c1b9b
@@ -130,6 +130,9 @@ if BLIVET_PACKAGE:
0c1b9b
     set_up_logging()
0c1b9b
     log = logging.getLogger(BLIVET_PACKAGE + ".ansible")
0c1b9b
 
0c1b9b
+
0c1b9b
+MAX_TRIM_PERCENT = 2
0c1b9b
+
0c1b9b
 use_partitions = None  # create partitions on pool backing device disks?
0c1b9b
 disklabel_type = None  # user-specified disklabel type
0c1b9b
 safe_mode = None       # do not remove any existing devices or formatting
0c1b9b
@@ -445,8 +448,16 @@ class BlivetVolume(BlivetBase):
0c1b9b
             if not self._device.resizable:
0c1b9b
                 return
0c1b9b
 
0c1b9b
-            if self._device.format.resizable:
0c1b9b
-                self._device.format.update_size_info()
0c1b9b
+            trim_percent = (1.0 - float(self._device.max_size / size))*100
0c1b9b
+            log.debug("resize: size=%s->%s ; trim=%s", self._device.size, size, trim_percent)
0c1b9b
+            if size > self._device.max_size and trim_percent <= MAX_TRIM_PERCENT:
0c1b9b
+                log.info("adjusting %s resize target from %s to %s to fit in free space",
0c1b9b
+                         self._volume['name'],
0c1b9b
+                         size,
0c1b9b
+                         self._device.max_size)
0c1b9b
+                size = self._device.max_size
0c1b9b
+                if size == self._device.size:
0c1b9b
+                    return
0c1b9b
 
0c1b9b
             if not self._device.min_size <= size <= self._device.max_size:
0c1b9b
                 raise BlivetAnsibleError("volume '%s' cannot be resized to '%s'" % (self._volume['name'], size))
0c1b9b
@@ -610,10 +621,18 @@ class BlivetLVMVolume(BlivetVolume):
0c1b9b
             raise BlivetAnsibleError("invalid size '%s' specified for volume '%s'" % (self._volume['size'], self._volume['name']))
0c1b9b
 
0c1b9b
         fmt = self._get_format()
0c1b9b
+        trim_percent = (1.0 - float(parent.free_space / size))*100
0c1b9b
+        log.debug("size: %s ; %s", size, trim_percent)
0c1b9b
         if size > parent.free_space:
0c1b9b
-            raise BlivetAnsibleError("specified size for volume '%s' exceeds available space in pool '%s' (%s)" % (size,
0c1b9b
-                                                                                                                   parent.name,
0c1b9b
-                                                                                                                   parent.free_space))
0c1b9b
+            if trim_percent > MAX_TRIM_PERCENT:
0c1b9b
+                raise BlivetAnsibleError("specified size for volume '%s' exceeds available space in pool '%s' (%s)"
0c1b9b
+                                         % (size, parent.name, parent.free_space))
0c1b9b
+            else:
0c1b9b
+                log.info("adjusting %s size from %s to %s to fit in %s free space", self._volume['name'],
0c1b9b
+                                                                                    size,
0c1b9b
+                                                                                    parent.free_space,
0c1b9b
+                                                                                    parent.name)
0c1b9b
+                size = parent.free_space
0c1b9b
 
0c1b9b
         try:
0c1b9b
             device = self._blivet.new_lv(name=self._volume['name'],
0c1b9b
diff --git a/tests/tests_create_lv_size_equal_to_vg.yml b/tests/tests_create_lv_size_equal_to_vg.yml
0c1b9b
new file mode 100644
0c1b9b
index 0000000..21a5788
0c1b9b
--- /dev/null
0c1b9b
+++ b/tests/tests_create_lv_size_equal_to_vg.yml
0c1b9b
@@ -0,0 +1,48 @@
0c1b9b
+---
0c1b9b
+- hosts: all
0c1b9b
+  become: true
0c1b9b
+  vars:
0c1b9b
+    storage_safe_mode: false
0c1b9b
+    mount_location: '/opt/test1'
0c1b9b
+    volume_group_size: '10g'
0c1b9b
+    lv_size: '10g'
0c1b9b
+    unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}'
0c1b9b
+    disk_size: '{{ unused_disk_subfact.sectors|int *
0c1b9b
+                   unused_disk_subfact.sectorsize|int }}'
0c1b9b
+
0c1b9b
+  tasks:
0c1b9b
+    - include_role:
0c1b9b
+        name: linux-system-roles.storage
0c1b9b
+
0c1b9b
+    - include_tasks: get_unused_disk.yml
0c1b9b
+      vars:
0c1b9b
+        min_size: "{{ volume_group_size }}"
0c1b9b
+        max_return: 1
0c1b9b
+
0c1b9b
+    - name: Create one lv which size is equal to vg size
0c1b9b
+      include_role:
0c1b9b
+        name: linux-system-roles.storage
0c1b9b
+      vars:
0c1b9b
+          storage_pools:
0c1b9b
+            - name: foo
0c1b9b
+              disks: "{{ unused_disks }}"
0c1b9b
+              volumes:
0c1b9b
+                - name: test1
0c1b9b
+                  size: "{{ lv_size }}"
0c1b9b
+                  mount_point: "{{ mount_location }}"
0c1b9b
+
0c1b9b
+    - include_tasks: verify-role-results.yml
0c1b9b
+
0c1b9b
+    - name: Clean up
0c1b9b
+      include_role:
0c1b9b
+        name: linux-system-roles.storage
0c1b9b
+      vars:
0c1b9b
+          storage_pools:
0c1b9b
+            - name: foo
0c1b9b
+              disks: "{{ unused_disks }}"
0c1b9b
+              state: "absent"
0c1b9b
+              volumes:
0c1b9b
+                - name: test1
0c1b9b
+                  mount_point: "{{ mount_location }}"
0c1b9b
+
0c1b9b
+    - include_tasks: verify-role-results.yml
0c1b9b
diff --git a/tests/tests_lvm_auto_size_cap.yml b/tests/tests_lvm_auto_size_cap.yml
0c1b9b
new file mode 100644
0c1b9b
index 0000000..fb17c23
0c1b9b
--- /dev/null
0c1b9b
+++ b/tests/tests_lvm_auto_size_cap.yml
0c1b9b
@@ -0,0 +1,89 @@
0c1b9b
+---
0c1b9b
+- hosts: all
0c1b9b
+  become: true
0c1b9b
+
0c1b9b
+  tasks:
0c1b9b
+    - include_role:
0c1b9b
+        name: linux-system-roles.storage
0c1b9b
+
0c1b9b
+    - include_tasks: get_unused_disk.yml
0c1b9b
+      vars:
0c1b9b
+        min_size: 10g
0c1b9b
+        max_return: 1
0c1b9b
+
0c1b9b
+    - command: lsblk -b -l --noheadings -o NAME,SIZE
0c1b9b
+      register: storage_test_lsblk
0c1b9b
+
0c1b9b
+    - set_fact:
0c1b9b
+        test_disk_size: "{{ storage_test_lsblk.stdout_lines|map('regex_search', '^' + unused_disks[0] + '\\s+\\d+$')|select('string')|first|regex_replace('^\\w+\\s+', '') }}"
0c1b9b
+
0c1b9b
+    - package:
0c1b9b
+        name: bc
0c1b9b
+        state: installed
0c1b9b
+
0c1b9b
+    - command:
0c1b9b
+        cmd: bc
0c1b9b
+        stdin: "{{ test_disk_size }} *2"
0c1b9b
+      register: doubled_size
0c1b9b
+
0c1b9b
+    - name: Test handling of too-large LVM volume size
0c1b9b
+      block:
0c1b9b
+        - name: Try to create a pool containing one volume twice the size of the backing disk
0c1b9b
+          include_role:
0c1b9b
+            name: linux-system-roles.storage
0c1b9b
+          vars:
0c1b9b
+            storage_pools:
0c1b9b
+                - name: foo
0c1b9b
+                  type: lvm
0c1b9b
+                  disks: "{{ unused_disks }}"
0c1b9b
+                  volumes:
0c1b9b
+                    - name: test1
0c1b9b
+                      size: "{{ doubled_size.stdout|trim }}"
0c1b9b
+        - name: unreachable task
0c1b9b
+          fail:
0c1b9b
+            msg: UNREACH
0c1b9b
+      rescue:
0c1b9b
+        - name: Check that we failed in the role
0c1b9b
+          assert:
0c1b9b
+            that:
0c1b9b
+              - ansible_failed_result.msg != 'UNREACH'
0c1b9b
+              - blivet_output.failed and
0c1b9b
+                blivet_output.msg|regex_search('specified size for volume.+exceeds available')
0c1b9b
+            msg: "Role has not failed when it should have"
0c1b9b
+
0c1b9b
+    - name: Create a pool containing one volume the same size as the backing disk
0c1b9b
+      include_role:
0c1b9b
+        name: linux-system-roles.storage
0c1b9b
+      vars:
0c1b9b
+        storage_pools:
0c1b9b
+            - name: foo
0c1b9b
+              disks: "{{ unused_disks }}"
0c1b9b
+              volumes:
0c1b9b
+                - name: test1
0c1b9b
+                  size: "{{ test_disk_size }}"
0c1b9b
+
0c1b9b
+    - include_tasks: verify-role-results.yml
0c1b9b
+
0c1b9b
+    - name: Repeat the previous invocation to verify idempotence
0c1b9b
+      include_role:
0c1b9b
+        name: linux-system-roles.storage
0c1b9b
+      vars:
0c1b9b
+        storage_pools:
0c1b9b
+            - name: foo
0c1b9b
+              type: lvm
0c1b9b
+              disks: "{{ unused_disks }}"
0c1b9b
+              volumes:
0c1b9b
+                - name: test1
0c1b9b
+                  size: "{{ test_disk_size }}"
0c1b9b
+
0c1b9b
+    - include_tasks: verify-role-results.yml
0c1b9b
+
0c1b9b
+    - name: Clean up
0c1b9b
+      include_role:
0c1b9b
+        name: linux-system-roles.storage
0c1b9b
+      vars:
0c1b9b
+        storage_pools:
0c1b9b
+            - name: foo
0c1b9b
+              disks: "{{ unused_disks }}"
0c1b9b
+              state: absent
0c1b9b
+              volumes: []
0c1b9b
diff --git a/tests/tests_lvm_errors.yml b/tests/tests_lvm_errors.yml
0c1b9b
index 37d41dc..e8dc4f4 100644
0c1b9b
--- a/tests/tests_lvm_errors.yml
0c1b9b
+++ b/tests/tests_lvm_errors.yml
0c1b9b
@@ -11,8 +11,6 @@
0c1b9b
       - '/non/existent/disk'
0c1b9b
     invalid_size: 'xyz GiB'
0c1b9b
     unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}'
0c1b9b
-    too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) *
0c1b9b
-                        unused_disk_subfact.sectorsize|int }}'
0c1b9b
 
0c1b9b
   tasks:
0c1b9b
     - include_role:
0c1b9b
@@ -86,39 +84,6 @@
0c1b9b
               - ansible_failed_result.msg != 'UNREACH'
0c1b9b
             msg: "Role has not failed when it should have"
0c1b9b
 
0c1b9b
-    # the following does not work properly
0c1b9b
-    # - name: Verify the output
0c1b9b
-    #   assert:
0c1b9b
-    #     that: "{{ blivet_output.failed and
0c1b9b
-    #               blivet_output.msg|regex_search('invalid size.+for volume') and
0c1b9b
-    #               not blivet_output.changed }}"
0c1b9b
-    #     msg: "Unexpected behavior w/ invalid volume size"
0c1b9b
-
0c1b9b
-    - name: Test for correct handling of too-large volume size.
0c1b9b
-      block:
0c1b9b
-        - name: Try to create LVM with a too-large volume size.
0c1b9b
-          include_role:
0c1b9b
-            name: linux-system-roles.storage
0c1b9b
-          vars:
0c1b9b
-            storage_pools:
0c1b9b
-              - name: foo
0c1b9b
-                disks: "{{ unused_disks }}"
0c1b9b
-                volumes:
0c1b9b
-                  - name: test1
0c1b9b
-                    size: "{{ too_large_size }}"
0c1b9b
-                    mount_point: "{{ mount_location1 }}"
0c1b9b
-
0c1b9b
-        - name: unreachable task
0c1b9b
-          fail:
0c1b9b
-            msg: UNREACH
0c1b9b
-
0c1b9b
-      rescue:
0c1b9b
-        - name: Check that we failed in the role
0c1b9b
-          assert:
0c1b9b
-            that:
0c1b9b
-              - ansible_failed_result.msg != 'UNREACH'
0c1b9b
-            msg: "Role has not failed when it should have"
0c1b9b
-
0c1b9b
     # the following does not work properly
0c1b9b
     # - name: Verify the output
0c1b9b
     #   assert:
0c1b9b
@@ -138,7 +103,7 @@
0c1b9b
                 disks: "{{ unused_disks[0] }}"
0c1b9b
                 volumes:
0c1b9b
                   - name: test1
0c1b9b
-                    size: "{{ too_large_size }}"
0c1b9b
+                    size: "{{ volume_size }}"
0c1b9b
                     mount_point: "{{ mount_location1 }}"
0c1b9b
 
0c1b9b
         - name: unreachable task
0c1b9b
@@ -171,7 +136,7 @@
0c1b9b
                 disks: []
0c1b9b
                 volumes:
0c1b9b
                   - name: test1
0c1b9b
-                    size: "{{ too_large_size }}"
0c1b9b
+                    size: "{{ volume1_size }}"
0c1b9b
                     mount_point: "{{ mount_location1 }}"
0c1b9b
 
0c1b9b
         - name: unreachable task
0c1b9b
diff --git a/tests/tests_misc.yml b/tests/tests_misc.yml
0c1b9b
index a69ee98..3139bc7 100644
0c1b9b
--- a/tests/tests_misc.yml
0c1b9b
+++ b/tests/tests_misc.yml
0c1b9b
@@ -7,7 +7,7 @@
0c1b9b
     volume_group_size: '5g'
0c1b9b
     volume1_size: '4g'
0c1b9b
     unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}'
0c1b9b
-    too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) *
0c1b9b
+    too_large_size: '{{ (unused_disk_subfact.sectors|int * 1.2) *
0c1b9b
                         unused_disk_subfact.sectorsize|int }}'
0c1b9b
 
0c1b9b
   tasks:
0c1b9b
diff --git a/tests/tests_resize.yml b/tests/tests_resize.yml
0c1b9b
index 9eeb2b9..209d129 100644
0c1b9b
--- a/tests/tests_resize.yml
0c1b9b
+++ b/tests/tests_resize.yml
0c1b9b
@@ -9,7 +9,7 @@
0c1b9b
     invalid_size1: 'xyz GiB'
0c1b9b
     invalid_size2: 'none'
0c1b9b
     unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}'
0c1b9b
-    too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) *
0c1b9b
+    too_large_size: '{{ unused_disk_subfact.sectors|int * 1.2 *
0c1b9b
                         unused_disk_subfact.sectorsize|int }}'
0c1b9b
     disk_size: '{{ unused_disk_subfact.sectors|int *
0c1b9b
                    unused_disk_subfact.sectorsize|int }}'
0c1b9b
@@ -122,23 +122,7 @@
0c1b9b
                     size: "{{ disk_size }}"
0c1b9b
                     mount_point: "{{ mount_location }}"
0c1b9b
 
0c1b9b
-        - name: Unreachable task
0c1b9b
-          fail:
0c1b9b
-            msg: UNREACH
0c1b9b
-
0c1b9b
-      rescue:
0c1b9b
-        - name: Check that we failed in the role
0c1b9b
-          assert:
0c1b9b
-            that:
0c1b9b
-              - ansible_failed_result.msg != 'UNREACH'
0c1b9b
-            msg: "Role has not failed when it should have"
0c1b9b
-
0c1b9b
-        - name: Verify the output
0c1b9b
-          assert:
0c1b9b
-            that: "blivet_output.failed and
0c1b9b
-                   blivet_output.msg|regex_search('volume.+cannot be resized to.+') and
0c1b9b
-                   not blivet_output.changed"
0c1b9b
-            msg: "Unexpected behavior w/ invalid volume size"
0c1b9b
+    - include_tasks: verify-role-results.yml
0c1b9b
 
0c1b9b
     - name: Test for correct handling of invalid size specification
0c1b9b
       block: