mrc0mmand / rpms / lvm2

Forked from rpms/lvm2 2 years ago
Clone
Blob Blame History Raw
 WHATS_NEW                           |  6 ++++++
 lib/activate/activate.c             | 21 +++++++++++++++++++++
 lib/activate/dev_manager.c          |  2 +-
 lib/metadata/lv.c                   |  4 ++++
 lib/metadata/raid_manip.c           | 30 ++++++++++--------------------
 test/shell/lvconvert-raid.sh        |  6 ++++++
 test/shell/lvconvert-repair-thin.sh |  9 ++++++++-
 tools/lvconvert.c                   |  1 +
 8 files changed, 57 insertions(+), 22 deletions(-)

diff --git a/WHATS_NEW b/WHATS_NEW
index 75c4569..ef5cc83 100644
--- a/WHATS_NEW
+++ b/WHATS_NEW
@@ -1,3 +1,9 @@
+Version 2.02.116 - 
+====================================
+  Preserve chunk size with repair and metadata swap of a thin pool.
+  Fix raid --splitmirror 1 functionality (2.02.112).
+  Fix tree preload to handle splitting raid images.
+
 Version 2.02.115 - 21st January 2015
 ====================================
   Report segment types without monitoring support as undefined.
diff --git a/lib/activate/activate.c b/lib/activate/activate.c
index 424786c..00d3a10 100644
--- a/lib/activate/activate.c
+++ b/lib/activate/activate.c
@@ -1760,6 +1760,22 @@ static int _preload_detached_lv(struct logical_volume *lv, void *data)
 	struct detached_lv_data *detached = data;
 	struct lv_list *lvl_pre;
 
+        /* Check and preload removed raid image leg */
+	if (lv_is_raid_image(lv)) {
+		if ((lvl_pre = find_lv_in_vg_by_lvid(detached->lv_pre->vg, &lv->lvid)) &&
+		    !lv_is_raid_image(lvl_pre->lv) &&
+		    !_lv_preload(lvl_pre->lv, detached->laopts, detached->flush_required))
+			return_0;
+	}
+
+        /* Check and preload removed of raid metadata */
+	if (lv_is_raid_metadata(lv)) {
+		if ((lvl_pre = find_lv_in_vg_by_lvid(detached->lv_pre->vg, &lv->lvid)) &&
+		    !lv_is_raid_metadata(lvl_pre->lv) &&
+		    !_lv_preload(lvl_pre->lv, detached->laopts, detached->flush_required))
+			return_0;
+	}
+
 	if ((lvl_pre = find_lv_in_vg(detached->lv_pre->vg, lv->name))) {
 		if (lv_is_visible(lvl_pre->lv) && lv_is_active(lv) &&
 		    (!lv_is_cow(lv) || !lv_is_cow(lvl_pre->lv)) &&
@@ -1863,6 +1879,11 @@ static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
 		if (!for_each_sub_lv((struct logical_volume *)ondisk_lv, &_preload_detached_lv, &detached))
 			goto_out;
 
+		/* ATM cache/thin pool is not scanned in  'for_each_sub_lv()', TODO explore better way */
+		if (lv_is_cache(ondisk_lv) &&
+		    !for_each_sub_lv(first_seg(ondisk_lv)->pool_lv, &_preload_detached_lv, &detached))
+			goto_out;
+
 		/*
 		 * Preload any snapshots that are being removed.
 		 */
diff --git a/lib/activate/dev_manager.c b/lib/activate/dev_manager.c
index 015af5b..dcb2c5d 100644
--- a/lib/activate/dev_manager.c
+++ b/lib/activate/dev_manager.c
@@ -2295,7 +2295,7 @@ int add_areas_line(struct dev_manager *dm, struct lv_segment *seg,
 			 * is used in the CTR table.
 			 */
 			if ((seg_type(seg, s) == AREA_UNASSIGNED) ||
-			    ((seg_lv(seg, s)->status & VISIBLE_LV) &&
+			    (lv_is_visible(seg_lv(seg, s)) &&
 			     !(seg_lv(seg, s)->status & LVM_WRITE))) {
 				/* One each for metadata area and data area */
 				if (!dm_tree_node_add_null_area(node, 0) ||
diff --git a/lib/metadata/lv.c b/lib/metadata/lv.c
index 683ec47..9052e63 100644
--- a/lib/metadata/lv.c
+++ b/lib/metadata/lv.c
@@ -1018,6 +1018,10 @@ const struct logical_volume *lv_lock_holder(const struct logical_volume *lv)
 				return sl->seg->lv;
 			}
 
+	/* RAID changes visibility of splitted LVs but references them still as leg/meta */
+	if ((lv_is_raid_image(lv) || lv_is_raid_metadata(lv)) && lv_is_visible(lv))
+		return lv;
+
 	/* For other types, by default look for the first user */
 	dm_list_iterate_items(sl, &lv->segs_using_this_lv) {
 		/* FIXME: complete this exception list */
diff --git a/lib/metadata/raid_manip.c b/lib/metadata/raid_manip.c
index d502379..22f71c7 100644
--- a/lib/metadata/raid_manip.c
+++ b/lib/metadata/raid_manip.c
@@ -1149,12 +1149,6 @@ int lv_raid_split(struct logical_volume *lv, const char *split_name,
 		return 0;
 	}
 
-	if (!resume_lv(lv->vg->cmd, lv_lock_holder(lv))) {
-		log_error("Failed to resume %s/%s after committing changes",
-			  lv->vg->name, lv->name);
-		return 0;
-	}
-
 	/*
 	 * First activate the newly split LV and LVs on the removal list.
 	 * This is necessary so that there are no name collisions due to
@@ -1164,26 +1158,22 @@ int lv_raid_split(struct logical_volume *lv, const char *split_name,
 	if (!activate_lv_excl_local(cmd, lvl->lv))
 		return_0;
 
+	dm_list_iterate_items(lvl, &removal_list)
+		if (!activate_lv_excl_local(cmd, lvl->lv))
+			return_0;
+
+	if (!resume_lv(cmd, lv_lock_holder(lv))) {
+		log_error("Failed to resume %s/%s after committing changes",
+			  lv->vg->name, lv->name);
+		return 0;
+	}
+
 	/*
 	 * Since newly split LV is typically already active - we need to call
 	 * suspend() and resume() to also rename it.
 	 *
 	 * TODO: activate should recognize it and avoid these 2 calls
 	 */
-	if (!suspend_lv(cmd, lvl->lv)) {
-		log_error("Failed to suspend %s.", lvl->lv->name);
-		return 0;
-	}
-
-	if (!resume_lv(cmd, lvl->lv)) {
-		log_error("Failed to reactivate %s.", lvl->lv->name);
-		return 0;
-	}
-
-	dm_list_iterate_items(lvl, &removal_list)
-		if (!activate_lv_excl_local(cmd, lvl->lv))
-			return_0;
-
 
 	/*
 	 * Eliminate the residual LVs
diff --git a/test/shell/lvconvert-raid.sh b/test/shell/lvconvert-raid.sh
index 8621311..12e0420 100644
--- a/test/shell/lvconvert-raid.sh
+++ b/test/shell/lvconvert-raid.sh
@@ -121,6 +121,12 @@ check active $vg $lv2
 # FIXME: ensure no residual devices
 lvremove -ff $vg
 
+# 4-way
+lvcreate --type raid1 -m 4 -l 2 -n $lv1 $vg
+aux wait_for_sync $vg $lv1
+lvconvert --yes --splitmirrors 1 --name $lv2 $vg/$lv1 "$dev2"
+lvremove -ff $vg
+
 ###########################################
 # RAID1 split + trackchanges / merge
 ###########################################
diff --git a/test/shell/lvconvert-repair-thin.sh b/test/shell/lvconvert-repair-thin.sh
index 0e9534b..73f061c 100644
--- a/test/shell/lvconvert-repair-thin.sh
+++ b/test/shell/lvconvert-repair-thin.sh
@@ -24,7 +24,9 @@ aux have_thin 1 0 0 || skip
 aux prepare_vg 4
 
 # Create LV
-lvcreate -T -L20 -V10 -n $lv1 $vg/pool  "$dev1" "$dev2"
+# TODO: investigate problem with --zero n and my repairable damage trick
+#lvcreate -T -L20 -V10 -n $lv1 $vg/pool --discards ignore --zero n --chunksize 128 "$dev1" "$dev2"
+lvcreate -T -L20 -V10 -n $lv1 $vg/pool --chunksize 128 --discards ignore "$dev1" "$dev2"
 lvcreate -T -V10 -n $lv2 $vg/pool
 
 mkfs.ext2 "$DM_DEV_DIR/$vg/$lv1"
@@ -71,6 +73,11 @@ lvchange -an $vg
 # Swap repaired metadata back
 lvconvert -y -f --poolmetadata $vg/fixed --thinpool $vg/pool
 
+# Check pool still preserves its original settings
+check lv_field $vg/pool chunksize "128.00k"
+check lv_field $vg/pool discards "ignore"
+check lv_field $vg/pool zero "zero"
+
 # Activate pool - this should now work
 vgchange -ay $vg
 
diff --git a/tools/lvconvert.c b/tools/lvconvert.c
index c7acd5d..20f017a 100644
--- a/tools/lvconvert.c
+++ b/tools/lvconvert.c
@@ -2956,6 +2956,7 @@ static int _lvconvert_pool(struct cmd_context *cmd,
 			return 0;
 		}
 
+		lp->passed_args |= PASS_ARG_CHUNK_SIZE | PASS_ARG_DISCARDS | PASS_ARG_ZERO;
 		seg = first_seg(pool_lv);
 
 		/* Normally do NOT change chunk size when swapping */