Blob Blame History Raw
 WHATS_NEW                                          |   2 +
 WHATS_NEW_DM                                       |   1 +
 lib/metadata/lv.c                                  |  11 +-
 lib/metadata/lv_manip.c                            |  18 +-
 lib/metadata/metadata-exported.h                   |   1 +
 lib/metadata/raid_manip.c                          | 354 +++++++++++----------
 lib/raid/raid.c                                    |  39 +--
 lib/report/report.c                                |  10 +-
 libdm/libdm-targets.c                              |  18 ++
 scripts/fsadm.sh                                   |   1 +
 test/lib/check.sh                                  |   2 +-
 test/shell/fsadm.sh                                |   1 +
 test/shell/lvconvert-raid-status-validation.sh     | 127 ++++++++
 .../shell/lvconvert-raid-takeover-alloc-failure.sh |   9 +-
 test/shell/lvconvert-raid-takeover-thin.sh         |  72 +++++
 tools/lvresize.c                                   |   1 +
 16 files changed, 465 insertions(+), 202 deletions(-)
diff --git a/WHATS_NEW b/WHATS_NEW
index b2796f6..e60380a 100644
--- a/WHATS_NEW
+++ b/WHATS_NEW
@@ -1,5 +1,7 @@
 Version 2.02.172 - 
 ===============================
+  Reenable conversion of data and metadata thin-pool volumes to raid.
+  Improve raid status reporting with lvs.
   No longer necessary to '--force' a repair for RAID1
   Linear to RAID1 upconverts now use "recover" sync action, not "resync".
   Improve lvcreate --cachepool arg validation.
diff --git a/WHATS_NEW_DM b/WHATS_NEW_DM
index 5718ab7..581cd42 100644
--- a/WHATS_NEW_DM
+++ b/WHATS_NEW_DM
@@ -1,5 +1,6 @@
 Version 1.02.141 - 
 ===============================
+  dm_get_status_raid() handle better some incosistent md statuses.
   Accept truncated files in calls to dm_stats_update_regions_from_fd().
   Restore Warning by 5% increment when thin-pool is over 80% (1.02.138).
 
diff --git a/lib/metadata/lv.c b/lib/metadata/lv.c
index b24c4aa..c87bb6b 100644
--- a/lib/metadata/lv.c
+++ b/lib/metadata/lv.c
@@ -395,6 +395,15 @@ dm_percent_t lvseg_percent_with_info_and_seg_status(const struct lv_with_info_an
 			}
 		}
 		break;
+	case SEG_STATUS_RAID:
+		switch (type) {
+		case PERCENT_GET_DIRTY:
+			p = dm_make_percent(s->raid->insync_regions, s->raid->total_regions);
+			break;
+		default:
+			p = DM_PERCENT_INVALID;
+		}
+		break;
 	case SEG_STATUS_SNAPSHOT:
 		if (s->snapshot->merge_failed)
 			p = DM_PERCENT_INVALID;
@@ -1087,7 +1096,7 @@ int lv_raid_healthy(const struct logical_volume *lv)
 	}
 
 	if (!seg_is_raid(raid_seg)) {
-		log_error("%s on %s is not a RAID segment",
+		log_error(INTERNAL_ERROR "%s on %s is not a RAID segment.",
 			  raid_seg->lv->name, lv->name);
 		return 0;
 	}
diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c
index 8f38839..c431868 100644
--- a/lib/metadata/lv_manip.c
+++ b/lib/metadata/lv_manip.c
@@ -4578,6 +4578,7 @@ enum fsadm_cmd_e { FSADM_CMD_CHECK, FSADM_CMD_RESIZE };
 static int _fsadm_cmd(enum fsadm_cmd_e fcmd,
 		      struct logical_volume *lv,
 		      uint32_t extents,
+		      int yes,
 		      int force,
 		      int *status)
 {
@@ -4585,7 +4586,7 @@ static int _fsadm_cmd(enum fsadm_cmd_e fcmd,
 	struct cmd_context *cmd = vg->cmd;
 	char lv_path[PATH_MAX];
 	char size_buf[SIZE_BUF];
-	const char *argv[FSADM_CMD_MAX_ARGS + 2];
+	const char *argv[FSADM_CMD_MAX_ARGS + 4];
 	unsigned i = 0;
 
 	argv[i++] = find_config_tree_str(cmd, global_fsadm_executable_CFG, NULL);
@@ -4596,6 +4597,9 @@ static int _fsadm_cmd(enum fsadm_cmd_e fcmd,
 	if (verbose_level() >= _LOG_NOTICE)
 		argv[i++] = "--verbose";
 
+	if (yes)
+		argv[i++] = "--yes";
+
 	if (force)
 		argv[i++] = "--force";
 
@@ -5498,7 +5502,7 @@ int lv_resize(struct logical_volume *lv,
 
 	if (lp->resizefs) {
 		if (!lp->nofsck &&
-		    !_fsadm_cmd(FSADM_CMD_CHECK, lv, 0, lp->force, &status)) {
+		    !_fsadm_cmd(FSADM_CMD_CHECK, lv, 0, lp->yes, lp->force, &status)) {
 			if (status != FSADM_CHECK_FAILS_FOR_MOUNTED) {
 				log_error("Filesystem check failed.");
 				return 0;
@@ -5508,7 +5512,7 @@ int lv_resize(struct logical_volume *lv,
 
 		/* FIXME forks here */
 		if ((lp->resize == LV_REDUCE) &&
-		    !_fsadm_cmd(FSADM_CMD_RESIZE, lv, lp->extents, lp->force, NULL)) {
+		    !_fsadm_cmd(FSADM_CMD_RESIZE, lv, lp->extents, lp->yes, lp->force, NULL)) {
 			log_error("Filesystem resize failed.");
 			return 0;
 		}
@@ -5589,7 +5593,7 @@ out:
 				display_lvname(lv));
 
 	if (lp->resizefs && (lp->resize == LV_EXTEND) &&
-	    !_fsadm_cmd(FSADM_CMD_RESIZE, lv, lp->extents, lp->force, NULL))
+	    !_fsadm_cmd(FSADM_CMD_RESIZE, lv, lp->extents, lp->yes, lp->force, NULL))
 		return_0;
 
 	ret = 1;
@@ -6368,6 +6372,12 @@ static int _lv_update_and_reload(struct logical_volume *lv, int origin_only)
 	if (!vg_write(vg))
 		return_0;
 
+	if (lock_lv != lv) {
+		log_debug_activation("Dropping origin_only for %s as lock holds %s",
+				     display_lvname(lv), display_lvname(lock_lv));
+		origin_only = 0;
+	}
+
 	if (!(origin_only ? suspend_lv_origin(vg->cmd, lock_lv) : suspend_lv(vg->cmd, lock_lv))) {
 		log_error("Failed to lock logical volume %s.",
 			  display_lvname(lock_lv));
diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h
index c4bebd0..6c3d8d7 100644
--- a/lib/metadata/metadata-exported.h
+++ b/lib/metadata/metadata-exported.h
@@ -649,6 +649,7 @@ struct lvresize_params {
 	int use_policies;
 
 	alloc_policy_t alloc;
+	int yes;
 	int force;
 	int nosync;
 	int nofsck;
diff --git a/lib/metadata/raid_manip.c b/lib/metadata/raid_manip.c
index ade27e6..9e4f3a3 100644
--- a/lib/metadata/raid_manip.c
+++ b/lib/metadata/raid_manip.c
@@ -75,6 +75,24 @@ static int _rebuild_with_emptymeta_is_supported(struct cmd_context *cmd,
 	return 1;
 }
 
+/* https://bugzilla.redhat.com/1447812 check open count of @lv vs. @open_count */
+static int _check_lv_open_count(struct logical_volume *lv, int open_count) {
+	struct lvinfo info = { 0 };
+
+	if (!lv_info(lv->vg->cmd, lv, 0, &info, 1, 0)) {
+		log_error("lv_info failed: aborting.");
+		return 0;
+	}
+	if (info.open_count != open_count) {
+		log_error("Reshape is only supported when %s is not in use (e.g. unmount filesystem).",
+			  display_lvname(lv));
+		return 0;
+	}
+
+	return 1;
+}
+
+
 /*
  * Ensure region size exceeds the minimum for @lv because
  * MD's bitmap is limited to tracking 2^21 regions.
@@ -442,7 +460,7 @@ static int _raid_remove_top_layer(struct logical_volume *lv,
 
 	if (!(lvl_array = dm_pool_alloc(lv->vg->vgmem, 2 * sizeof(*lvl)))) {
 		log_error("Memory allocation failed.");
-		return_0;
+		return 0;
 	}
 
 	/* Add last metadata area to removal_lvs */
@@ -534,10 +552,20 @@ static int _lv_update_reload_fns_reset_eliminate_lvs(struct logical_volume *lv,
 	fn_on_lv_t fn_pre_on_lv = NULL, fn_post_on_lv;
 	void *fn_pre_data, *fn_post_data = NULL;
 	struct dm_list *removal_lvs;
+	const struct logical_volume *lock_lv = lv_lock_holder(lv);
 
 	va_start(ap, origin_only);
 	removal_lvs = va_arg(ap, struct dm_list *);
 
+	if (lock_lv != lv) {
+		log_debug_activation("Dropping origin_only for %s as lock holds %s",
+				     display_lvname(lv), display_lvname(lock_lv));
+		origin_only = 0;
+	}
+
+	/* TODO/FIXME:  this function should be simplified to just call
+	 * lv_update_and_reload() and cleanup of remained LVs */
+
 	/* Retrieve post/pre functions and post/pre data reference from variable arguments, if any */
 	if ((fn_post_on_lv = va_arg(ap, fn_on_lv_t))) {
 		fn_post_data = va_arg(ap, void *);
@@ -545,11 +573,13 @@ static int _lv_update_reload_fns_reset_eliminate_lvs(struct logical_volume *lv,
 			fn_pre_data = va_arg(ap, void *);
 	}
 
+	va_end(ap);
+
 	/* Call any fn_pre_on_lv before the first update and reload call (e.g. to rename LVs) */
 	/* returns 1: ok+ask caller to update, 2: metadata commited+ask caller to resume */
 	if (fn_pre_on_lv && !(r = fn_pre_on_lv(lv, fn_pre_data))) {
 		log_error(INTERNAL_ERROR "Pre callout function failed.");
-		goto err;
+		return 0;
 	}
 
 	if (r == 2) {
@@ -557,19 +587,19 @@ static int _lv_update_reload_fns_reset_eliminate_lvs(struct logical_volume *lv,
 		 * Returning 2 from pre function -> lv is suspended and
 		 * metadata got updated, don't need to do it again
 		 */
-		if (!(r = (origin_only ? resume_lv_origin(lv->vg->cmd, lv_lock_holder(lv)) :
-					 resume_lv(lv->vg->cmd, lv_lock_holder(lv))))) {
+		if (!(r = (origin_only ? resume_lv_origin(lv->vg->cmd, lock_lv) :
+					 resume_lv(lv->vg->cmd, lock_lv)))) {
 			log_error("Failed to resume %s.", display_lvname(lv));
-			goto err;
+			return 0;
 		}
 
 	/* Update metadata and reload mappings including flags (e.g. LV_REBUILD, LV_RESHAPE_DELTA_DISKS_PLUS) */
 	} else if (!(r = (origin_only ? lv_update_and_reload_origin(lv) : lv_update_and_reload(lv))))
-		goto err;
+		return_0;
 
 	/* Eliminate any residual LV and don't commit the metadata */
 	if (!(r = _eliminate_extracted_lvs_optional_write_vg(lv->vg, removal_lvs, 0)))
-		goto err;
+		return_0;
 
 	/*
 	 * Now that any 'REBUILD' or 'RESHAPE_DELTA_DISKS' etc.
@@ -582,25 +612,22 @@ static int _lv_update_reload_fns_reset_eliminate_lvs(struct logical_volume *lv,
 	 */
 	log_debug_metadata("Clearing any flags for %s passed to the kernel.", display_lvname(lv));
 	if (!(r = _reset_flags_passed_to_kernel(lv, &flags_reset)))
-		goto err;
+		return_0;
 
 	/* Call any @fn_post_on_lv before the second update call (e.g. to rename LVs back) */
 	if (fn_post_on_lv && !(r = fn_post_on_lv(lv, fn_post_data))) {
 		log_error("Post callout function failed.");
-		goto err;
+		return 0;
 	}
 
 	/* Update and reload to clear out reset flags in the metadata and in the kernel */
 	log_debug_metadata("Updating metadata mappings for %s.", display_lvname(lv));
 	if ((r != 2 || flags_reset) && !(r = (origin_only ? lv_update_and_reload_origin(lv) : lv_update_and_reload(lv)))) {
 		log_error(INTERNAL_ERROR "Update of LV %s failed.", display_lvname(lv));
-		goto err;
+		return 0;
 	}
 
-	r = 1;
-err:
-	va_end(ap);
-	return r;
+	return 1;
 }
 
 /*
@@ -622,6 +649,12 @@ static int _lv_update_and_reload_list(struct logical_volume *lv, int origin_only
 	struct lv_list *lvl;
 	int r;
 
+	if (lock_lv != lv) {
+		log_debug_activation("Dropping origin_only for %s as lock holds %s",
+				     display_lvname(lv), display_lvname(lock_lv));
+		origin_only = 0;
+	}
+
 	log_very_verbose("Updating logical volume %s on disk(s)%s.",
 			 display_lvname(lock_lv), origin_only ? " (origin only)": "");
 
@@ -879,7 +912,7 @@ static int _reorder_raid10_near_seg_areas(struct lv_segment *seg, enum raid0_rai
 		break;
 
 	default:
-		return 0;
+		return_0;
 	}
 
 	/* Sort areas */
@@ -1558,7 +1591,7 @@ static int _lv_alloc_reshape_space(struct logical_volume *lv,
 		lv->size = lv_size_cur;
 		/* pay attention to lv_extend maybe having allocated more because of layout specific rounding */
 		if (!_lv_set_reshape_len(lv, _lv_total_rimage_len(lv) - prev_rimage_len))
-			return 0;
+			return_0;
 	}
 
 	/* Preset data offset in case we fail relocating reshape space below */
@@ -1635,7 +1668,7 @@ static int _lv_free_reshape_space_with_status(struct logical_volume *lv, enum al
 		seg->extents_copied = first_seg(lv)->area_len;
 
 		if (!_lv_set_reshape_len(lv, 0))
-			return 0;
+			return_0;
 
 		/*
 		 * Only in case reshape space was freed at the beginning,
@@ -1689,7 +1722,7 @@ static int _reshaped_state(struct logical_volume *lv, const unsigned dev_count,
 		return_0;
 
 	if (!_get_dev_health(lv, &kernel_devs, devs_health, devs_in_sync, NULL))
-		return 0;
+		return_0;
 
 	if (kernel_devs == dev_count)
 		return 1;
@@ -1740,7 +1773,7 @@ static int _reshape_adjust_to_size(struct logical_volume *lv,
 	uint32_t new_le_count;
 
 	if (!_lv_reshape_get_new_len(lv, old_image_count, new_image_count, &new_le_count))
-		return 0;
+		return_0;
 
 	/* Externally visible LV size w/o reshape space */
 	lv->le_count = seg->len = new_le_count;
@@ -1803,7 +1836,7 @@ static int _raid_reshape_add_images(struct logical_volume *lv,
 	}
 
 	if (!_lv_reshape_get_new_len(lv, old_image_count, new_image_count, &grown_le_count))
-		return 0;
+		return_0;
 
 	current_le_count = lv->le_count - _reshape_len_per_lv(lv);
 	grown_le_count -= _reshape_len_per_dev(seg) * _data_rimages_count(seg, new_image_count);
@@ -1828,7 +1861,7 @@ static int _raid_reshape_add_images(struct logical_volume *lv,
 			   new_image_count - old_image_count, new_image_count - old_image_count > 1 ? "s" : "",
 			   display_lvname(lv));
 	if (!_lv_raid_change_image_count(lv, 1, new_image_count, allocate_pvs, NULL, 0, 0))
-		return 0;
+		return_0;
 
 	/* Reshape adding image component pairs -> change sizes/counters accordingly */
 	if (!_reshape_adjust_to_size(lv, old_image_count, new_image_count)) {
@@ -1839,7 +1872,7 @@ static int _raid_reshape_add_images(struct logical_volume *lv,
 	/* Allocate forward out of place reshape space at the beginning of all data image LVs */
 	log_debug_metadata("(Re)allocating reshape space for %s.", display_lvname(lv));
 	if (!_lv_alloc_reshape_space(lv, alloc_begin, NULL, allocate_pvs))
-		return 0;
+		return_0;
 
 	/*
 	 * Reshape adding image component pairs:
@@ -1914,7 +1947,7 @@ static int _raid_reshape_remove_images(struct logical_volume *lv,
 		}
 
 		if (!_lv_reshape_get_new_len(lv, old_image_count, new_image_count, &reduced_le_count))
-			return 0;
+			return_0;
 
 		reduced_le_count -= seg->reshape_len * _data_rimages_count(seg, new_image_count);
 		current_le_count = lv->le_count - seg->reshape_len * _data_rimages_count(seg, old_image_count);
@@ -1936,7 +1969,7 @@ static int _raid_reshape_remove_images(struct logical_volume *lv,
 			 new_stripes, display_lvname(lv));
 
 		if (!force) {
-			log_warn("WARNING: Can't remove stripes without --force option.");
+			log_error("Can't remove stripes without --force option.");
 			return 0;
 		}
 
@@ -1952,7 +1985,7 @@ static int _raid_reshape_remove_images(struct logical_volume *lv,
 		 * to remove disks from a raid set
 		 */
 		if (!_lv_alloc_reshape_space(lv, alloc_end, NULL, allocate_pvs))
-			return 0;
+			return_0;
 
 		/* Flag all disks past new images as delta disks minus to kernel */
 		for (s = new_image_count; s < old_image_count; s++)
@@ -1998,7 +2031,7 @@ static int _raid_reshape_remove_images(struct logical_volume *lv,
 				   old_image_count - new_image_count, old_image_count - new_image_count > 1 ? "s" : "",
 				   display_lvname(lv));
 		if (!_lv_raid_change_image_count(lv, 1, new_image_count, allocate_pvs, removal_lvs, 0, 0))
-			return 0;
+			return_0;
 
 		seg->area_count = new_image_count;
 		break;
@@ -2069,7 +2102,7 @@ static int _raid_reshape_keep_images(struct logical_volume *lv,
 
 	if (alloc_reshape_space &&
 	    !_lv_alloc_reshape_space(lv, where, NULL, allocate_pvs))
-		return 0;
+		return_0;
 
 	seg->segtype = new_segtype;
 
@@ -2084,15 +2117,22 @@ static int _vg_write_lv_suspend_commit_backup(struct volume_group *vg,
 					      struct logical_volume *lv,
 					      int origin_only, int do_backup)
 {
+	const struct logical_volume *lock_lv = lv_lock_holder(lv);
 	int r = 1;
 
+	if (lock_lv != lv) {
+		log_debug_activation("Dropping origin_only for %s as lock holds %s",
+				     display_lvname(lv), display_lvname(lock_lv));
+		origin_only = 0;
+	}
+
 	if (!vg_write(vg)) {
 		log_error("Write of VG %s failed.", vg->name);
 		return_0;
 	}
 
-	if (lv && !(r = (origin_only ? suspend_lv_origin(vg->cmd, lv_lock_holder(lv)) :
-				       suspend_lv(vg->cmd, lv_lock_holder(lv))))) {
+	if (lv && !(r = (origin_only ? suspend_lv_origin(vg->cmd, lock_lv) :
+				       suspend_lv(vg->cmd, lock_lv)))) {
 		log_error("Failed to suspend %s before committing changes.",
 			  display_lvname(lv));
 		vg_revert(lv->vg);
@@ -2133,24 +2173,6 @@ static int _activate_sub_lv_excl_local(struct logical_volume *lv)
 	return 1;
 }
 
-/* Helper: function to activate any sub LVs of @lv exclusively local starting with area indexed by @start_idx */
-static int _activate_sub_lvs_excl_local(struct logical_volume *lv, uint32_t start_idx)
-{
-	uint32_t s;
-	struct lv_segment *seg = first_seg(lv);
-
-	/* seg->area_count may be 0 here! */
-	log_debug_metadata("Activating %u image component%s of LV %s.",
-			   seg->area_count - start_idx, seg->meta_areas ? " pairs" : "s",
-			   display_lvname(lv));
-	for (s = start_idx; s < seg->area_count; s++)
-		if (!_activate_sub_lv_excl_local(seg_lv(seg, s)) ||
-		    (seg->meta_areas && !_activate_sub_lv_excl_local(seg_metalv(seg, s))))
-			return_0;
-
-	return 1;
-}
-
 /* Helper: function to activate any LVs on @lv_list */
 static int _activate_sub_lvs_excl_local_list(struct logical_volume *lv, struct dm_list *lv_list)
 {
@@ -2169,20 +2191,6 @@ static int _activate_sub_lvs_excl_local_list(struct logical_volume *lv, struct d
 	return r;
 }
 
-/* Helper: callback function to activate image component pairs of @lv to update size after reshape space allocation */
-static int _pre_raid_reactivate_legs(struct logical_volume *lv, void *data)
-{
-	if (!_vg_write_lv_suspend_vg_commit(lv, 1))
-		return 0;
-
-	/* Reload any changed image component pairs for out-of-place reshape space */
-	if (!_activate_sub_lvs_excl_local(lv, 0))
-		return 0;
-
-	/* 1: ok+ask caller to update, 2: metadata commited+ask caller to resume */
-	return 2;
-}
-
 /* Helper: callback function to activate any rmetas on @data list */
 __attribute__ ((__unused__))
 static int _pre_raid0_remove_rmeta(struct logical_volume *lv, void *data)
@@ -2190,26 +2198,12 @@ static int _pre_raid0_remove_rmeta(struct logical_volume *lv, void *data)
 	struct dm_list *lv_list = data;
 
 	if (!_vg_write_lv_suspend_vg_commit(lv, 1))
-		return 0;
+		return_0;
 
 	/* 1: ok+ask caller to update, 2: metadata commited+ask caller to resume */
 	return _activate_sub_lvs_excl_local_list(lv, lv_list) ? 2 : 0;
 }
 
-/* Helper: callback dummy needed for takeover+reshape */
-static int _post_raid_reshape(struct logical_volume *lv, void *data)
-{
-	/* 1: ask caller to update, 2: don't ask caller to update */
-	return 1;
-}
-
-/* Helper: callback dummy needed for takeover+reshape */
-static int _post_raid_takeover(struct logical_volume *lv, void *data)
-{
-	/* 1: ask caller to update, 2: don't ask caller to update */
-	return 2;
-}
-
 /*
  * Reshape logical volume @lv by adding/removing stripes
  * (absolute new stripes given in @new_stripes), changing
@@ -2251,7 +2245,7 @@ static int _raid_reshape(struct logical_volume *lv,
 		return_0;
 
 	if (!_check_region_size_constraints(lv, new_segtype, new_region_size, new_stripe_size))
-		return 0;
+		return_0;
 
 	if (!_raid_in_sync(lv)) {
 		log_error("Unable to convert %s while it is not in-sync.",
@@ -2346,30 +2340,18 @@ static int _raid_reshape(struct logical_volume *lv,
 
 	/* Handle disk addition reshaping */
 	if (old_image_count < new_image_count) {
-		/* FIXME: remove once MD kernel rhbz1443999 got fixed. */
-		if (sysconf(_SC_NPROCESSORS_ONLN) < 2) {
-			log_error("Can't add stripes to LV %s on single core.", display_lvname(lv));
-			return 0;
-		}
-
 		if (!_raid_reshape_add_images(lv, new_segtype, yes,
 					      old_image_count, new_image_count,
 					      new_stripes, new_stripe_size, allocate_pvs))
-			return 0;
+			return_0;
 
 	/* Handle disk removal reshaping */
 	} else if (old_image_count > new_image_count) {
-		/* FIXME: remove once MD kernel rhbz1443999 got fixed. */
-		if (sysconf(_SC_NPROCESSORS_ONLN) < 2) {
-			log_error("Can't remove stripes from LV %s on single core.", display_lvname(lv));
-			return 0;
-		}
-
 		if (!_raid_reshape_remove_images(lv, new_segtype, yes, force,
 						 old_image_count, new_image_count,
 						 new_stripes, new_stripe_size,
 						 allocate_pvs, &removal_lvs))
-			return 0;
+			return_0;
 
 	/*
 	 * Handle raid set layout reshaping w/o changing # of legs (allocation algorithm or stripe size change)
@@ -2377,20 +2359,22 @@ static int _raid_reshape(struct logical_volume *lv,
 	 */
 	} else if (!_raid_reshape_keep_images(lv, new_segtype, yes, force, &force_repair,
 					      new_data_copies, new_stripe_size, allocate_pvs))
-		return 0;
+		return_0;
 
 	/* HM FIXME: workaround for not resetting "nosync" flag */
 	init_mirror_in_sync(0);
 
 	seg->region_size = new_region_size;
 
+	/* https://bugzilla.redhat.com/1447812 also check open count */
+	if (!_check_lv_open_count(lv, 1))
+		return_0;
+
 	if (seg->area_count != 2 || old_image_count != seg->area_count) {
-		if (!_lv_update_reload_fns_reset_eliminate_lvs(lv, 0, &removal_lvs,
-							       _post_raid_reshape, NULL,
-							       _pre_raid_reactivate_legs, NULL))
-			return 0;
+		if (!_lv_update_reload_fns_reset_eliminate_lvs(lv, 0, &removal_lvs, NULL))
+			return_0;
 	} if (!_vg_write_commit_backup(lv->vg))
-		return 0;
+		return_0;
 
 	return 1; 
 	/* FIXME force_repair ? _lv_cond_repair(lv) : 1; */
@@ -2565,14 +2549,6 @@ static int _raid_add_images_without_commit(struct logical_volume *lv,
 		return 0;
 	}
 
-	if (lv_is_active(lv_lock_holder(lv)) &&
-	    (old_count == 1) &&
-	    (lv_is_thin_pool_data(lv) || lv_is_thin_pool_metadata(lv))) {
-		log_error("Can't add image to active thin pool LV %s yet. Deactivate first.",
-			  display_lvname(lv));
-		return 0;
-	}
-
 	if (!archive(lv->vg))
 		return_0;
 
@@ -3233,11 +3209,13 @@ int lv_raid_split(struct logical_volume *lv, int yes, const char *split_name,
 	}
 
 	/* Split on a 2-legged raid1 LV causes losing all resilience */
-	if (new_count == 1 &&
-	    !yes && yes_no_prompt("Are you sure you want to split %s LV %s losing all resilience? [y/n]: ",
-				  lvseg_name(first_seg(lv)), display_lvname(lv)) == 'n') {
-		log_error("Logical volume %s NOT split.", display_lvname(lv));
-		return 0;
+	if (new_count == 1) {
+		if (!yes && yes_no_prompt("Are you sure you want to split %s LV %s losing all resilience? [y/n]: ",
+					  lvseg_name(first_seg(lv)), display_lvname(lv)) == 'n') {
+			log_error("Logical volume %s NOT split.", display_lvname(lv));
+			return 0;
+		}
+		log_verbose("Losing all resilience for logical volume %s.", display_lvname(lv));
 	}
 
 	/*
@@ -3375,11 +3353,14 @@ int lv_raid_split_and_track(struct logical_volume *lv,
 	}
 
 	/* Split and track changes on a 2-legged raid1 LV causes losing resilience for newly written data. */
-	if (seg->area_count == 2 &&
-	    !yes && yes_no_prompt("Are you sure you want to split and track %s LV %s losing resilience for any newly written data? [y/n]: ",
-				  lvseg_name(seg), display_lvname(lv)) == 'n') {
-		log_error("Logical volume %s NOT split.", display_lvname(lv));
-		return 0;
+	if (seg->area_count == 2) {
+		if (!yes && yes_no_prompt("Are you sure you want to split and track %s LV %s losing resilience for any newly written data? [y/n]: ",
+					  lvseg_name(seg), display_lvname(lv)) == 'n') {
+			log_error("Logical volume %s NOT split.", display_lvname(lv));
+			return 0;
+		}
+		log_verbose("Losing resilience for newly written data on logical volume %s.",
+			    display_lvname(lv));
 	}
 
 	for (s = seg->area_count - 1; s >= 0; --s) {
@@ -3408,7 +3389,7 @@ int lv_raid_split_and_track(struct logical_volume *lv,
 		return_0;
 
 	if (seg->area_count == 2)
-		log_warn("Any newly written data will be non-resilient on LV %s during the split!",
+		log_warn("WARNING: Any newly written data will be non-resilient on LV %s during the split!",
 			 display_lvname(lv));
 
 	log_print_unless_silent("Use 'lvconvert --merge %s' to merge back into %s.",
@@ -3568,7 +3549,7 @@ static int _add_image_component_list(struct lv_segment *seg, int delete_from_lis
 		if (delete_from_list)
 			dm_list_del(&lvl->list);
 		if (!_add_component_lv(seg, lvl->lv, lv_flags, s++))
-			return 0;
+			return_0;
 	}
 
 	return 1;
@@ -3674,7 +3655,7 @@ static int _extract_image_component_sublist(struct lv_segment *seg,
 
 	for (s = idx; s < end; s++) {
 		if (!_extract_image_component_error_seg(seg, type, s, &lvl->lv, error_seg))
-			return 0;
+			return_0;
 
 		dm_list_add(removal_lvs, &lvl->list);
 		lvl++;
@@ -3831,7 +3812,7 @@ static int _raid0_add_or_remove_metadata_lvs(struct logical_volume *lv,
 		new_raid_type_flag = SEG_RAID0;
 	} else {
 		if (!_alloc_and_add_rmeta_devs_for_lv(lv, allocate_pvs))
-			return 0;
+			return_0;
 
 		new_raid_type_flag = SEG_RAID0_META;
 	}
@@ -4002,7 +3983,6 @@ static int _convert_raid1_to_mirror(struct logical_volume *lv,
 				    uint32_t new_image_count,
 				    uint32_t new_region_size,
 				    struct dm_list *allocate_pvs,
-				    int update_and_reload,
 				    struct dm_list *removal_lvs)
 {
 	struct logical_volume *log_lv;
@@ -4042,14 +4022,14 @@ static int _convert_raid1_to_mirror(struct logical_volume *lv,
 	/* Remove rmeta LVs */
 	log_debug_metadata("Extracting and renaming metadata LVs.");
 	if (!_extract_image_component_list(seg, RAID_META, 0, removal_lvs))
-		return 0;
+		return_0;
 
 	seg->meta_areas = NULL;
 
 	/* Rename all data sub LVs from "*_rimage_*" to "*_mimage_*" and set their status */
 	log_debug_metadata("Adjust data LVs of %s.", display_lvname(lv));
 	if (!_adjust_data_lvs(lv, RAID1_TO_MIRROR))
-		return 0;
+		return_0;
 
 	seg->segtype = new_segtype;
 	seg->region_size = new_region_size;
@@ -4059,7 +4039,10 @@ static int _convert_raid1_to_mirror(struct logical_volume *lv,
 	if (!attach_mirror_log(first_seg(lv), log_lv))
 		return_0;
 
-	return update_and_reload ? _lv_update_reload_fns_reset_eliminate_lvs(lv, 0, removal_lvs, NULL) : 1;
+	if (!_lv_update_reload_fns_reset_eliminate_lvs(lv, 0, removal_lvs, NULL))
+		return_0;
+
+	return 1;
 }
 
 /*
@@ -4543,7 +4526,7 @@ static int _process_type_flags(const struct logical_volume *lv, struct possible_
 		    !(t & seg->segtype->flags) &&
 		     ((segtype = get_segtype_from_flag(lv->vg->cmd, t))))
 			if (!tfn(processed_segtypes, data ? : (void *) segtype))
-				return 0;
+				return_0;
 	}
 
 	return 1;
@@ -4794,7 +4777,7 @@ static int _raid1_to_mirrored_wrapper(TAKEOVER_FN_ARGS)
 		return_0;
 
 	return _convert_raid1_to_mirror(lv, new_segtype, new_image_count, new_region_size,
-					allocate_pvs, 1, &removal_lvs);
+					allocate_pvs, &removal_lvs);
 }
 
 /*
@@ -4874,7 +4857,7 @@ static int _clear_meta_lvs(struct logical_volume *lv)
 	dm_list_iterate_items(lvl, &meta_lvs) {
 		lv_set_hidden(lvl->lv);
 		if (!set_lv_segment_area_lv(seg, s++, lvl->lv, 0, RAID_META))
-			return 0;
+			return_0;
 	}
 
 	return 1;
@@ -5028,7 +5011,7 @@ static int _raid45_to_raid54_wrapper(TAKEOVER_FN_ARGS)
 
 	/* Shift parity SubLV pair "PDD..." <-> "DD...P" on raid4 <-> raid5_n conversion */
 	if( !_shift_parity_dev(seg))
-		return 0;
+		return_0;
 
 	/* Don't resync */
 	init_mirror_in_sync(1);
@@ -5068,7 +5051,7 @@ static int _takeover_downconvert_wrapper(TAKEOVER_FN_ARGS)
 	}
 
 	if (!_check_region_size_constraints(lv, new_segtype, new_region_size, new_stripe_size))
-		return 0;
+		return_0;
 
 	if (seg_is_any_raid10(seg) && (seg->area_count % seg->data_copies)) {
 		log_error("Can't convert %s LV %s to %s with odd number of stripes.",
@@ -5113,12 +5096,12 @@ static int _takeover_downconvert_wrapper(TAKEOVER_FN_ARGS)
 	if (seg_is_raid4(seg)) {
 		/* Shift parity SubLV pair "PDD..." -> "DD...P" to be able to remove it off the end */
 		if (!_shift_parity_dev(seg))
-			return 0;
+			return_0;
 
 	} else if (seg_is_raid10_near(seg)) {
 		log_debug_metadata("Reordering areas for raid10 -> raid0 takeover.");
 		if (!_reorder_raid10_near_seg_areas(seg, reorder_from_raid10_near))
-			return 0;
+			return_0;
 	}
 
 	if (segtype_is_any_raid0(new_segtype) &&
@@ -5133,7 +5116,7 @@ static int _takeover_downconvert_wrapper(TAKEOVER_FN_ARGS)
 				   lv_raid_image_count(lv) - new_image_count,
 				   display_lvname(lv));
 		if (!_lv_raid_change_image_count(lv, 1, new_image_count, allocate_pvs, &removal_lvs, 0, 0))
-			return 0;
+			return_0;
 
 		seg->area_count = new_image_count;
 	}
@@ -5283,7 +5266,7 @@ static int _takeover_upconvert_wrapper(TAKEOVER_FN_ARGS)
 	}
 
 	if (!_check_region_size_constraints(lv, new_segtype, new_region_size, new_stripe_size))
-		return 0;
+		return_0;
 
 	/* Archive metadata */
 	if (!archive(lv->vg))
@@ -5304,7 +5287,7 @@ static int _takeover_upconvert_wrapper(TAKEOVER_FN_ARGS)
 	if (seg_is_raid0(seg)) {
 		log_debug_metadata("Adding metadata LVs to %s.", display_lvname(lv));
 		if (!_raid0_add_or_remove_metadata_lvs(lv, 0 /* update_and_reload */, allocate_pvs, NULL))
-			return 0;
+			return_0;
 	}
 
 	/* Have to be cleared in conversion from raid0_meta -> raid4 or kernel will reject due to reordering disks */
@@ -5354,7 +5337,7 @@ static int _takeover_upconvert_wrapper(TAKEOVER_FN_ARGS)
 			if (!_eliminate_extracted_lvs(lv->vg, &removal_lvs)) /* Updates vg */
 				return_0;
 
-			return 0;
+			return_0;
 		}
 
 		seg = first_seg(lv);
@@ -5384,11 +5367,11 @@ static int _takeover_upconvert_wrapper(TAKEOVER_FN_ARGS)
 
 			if (!_raid45_to_raid54_wrapper(lv, raid5_n_segtype, 1 /* yes */, force, seg->area_count,
 						       1 /* data_copies */, 0, 0, 0, allocate_pvs))
-				return 0;
+				return_0;
 
 			if (!_drop_suffix(meta_lv->name, "_extracted") ||
 			    !_drop_suffix(data_lv->name, "_extracted"))
-				return 0;
+				return_0;
 
 			data_lv->status |= RAID_IMAGE;
 			meta_lv->status |= RAID_META;
@@ -5403,7 +5386,7 @@ static int _takeover_upconvert_wrapper(TAKEOVER_FN_ARGS)
 		} else if (segtype_is_raid5_n(new_segtype) &&
 			   !_raid45_to_raid54_wrapper(lv, raid5_n_segtype, yes, force, seg->area_count,
 						      1 /* data_copies */, 0, 0, 0, allocate_pvs))
-			return 0;
+			return_0;
 	}
 
 	seg->data_copies = new_data_copies;
@@ -5419,7 +5402,7 @@ static int _takeover_upconvert_wrapper(TAKEOVER_FN_ARGS)
 
 		log_debug_metadata("Reordering areas for raid0 -> raid10_near takeover.");
 		if (!_reorder_raid10_near_seg_areas(seg, reorder_to_raid10_near))
-			return 0;
+			return_0;
 		/* Set rebuild flags accordingly */
 		for (s = 0; s < seg->area_count; s++) {
 			seg_lv(seg, s)->status &= ~LV_REBUILD;
@@ -5436,10 +5419,8 @@ static int _takeover_upconvert_wrapper(TAKEOVER_FN_ARGS)
 
 	log_debug_metadata("Updating VG metadata and reloading %s LV %s.",
 			   lvseg_name(seg), display_lvname(lv));
-	if (!_lv_update_reload_fns_reset_eliminate_lvs(lv, 0, &removal_lvs,
-						       _post_raid_takeover, NULL,
-						       _pre_raid_reactivate_legs, NULL))
-		return 0;
+	if (!_lv_update_reload_fns_reset_eliminate_lvs(lv, 0, &removal_lvs, NULL))
+		return_0;
 
 	if (segtype_is_raid4(new_segtype) &&
 	    seg->area_count != 2) {
@@ -5975,7 +5956,7 @@ static int _set_convenient_raid145610_segtype_to(const struct lv_segment *seg_fr
 	/* raid1 -> */
 	} else if (seg_is_raid1(seg_from) && !segtype_is_mirror(*segtype)) {
 		if (seg_from->area_count != 2) {
-			log_warn("Convert %s LV %s to 2 images first.",
+			log_error("Convert %s LV %s to 2 images first.",
 				 lvseg_name(seg_from), display_lvname(seg_from->lv));
 			return 0;
 
@@ -5991,8 +5972,8 @@ static int _set_convenient_raid145610_segtype_to(const struct lv_segment *seg_fr
 	} else if (seg_is_raid4(seg_from) || seg_is_any_raid5(seg_from)) {
 		if (segtype_is_raid1(*segtype) &&
 		    seg_from->area_count != 2) {
-			log_warn("Convert %s LV %s to 2 stripes first (i.e. --stripes 1).",
-				 lvseg_name(seg_from), display_lvname(seg_from->lv));
+			log_error("Convert %s LV %s to 2 stripes first (i.e. --stripes 1).",
+				  lvseg_name(seg_from), display_lvname(seg_from->lv));
 			return 0;
 
 		} else if (seg_is_raid4(seg_from) &&
@@ -6007,8 +5988,8 @@ static int _set_convenient_raid145610_segtype_to(const struct lv_segment *seg_fr
 
 		else if (segtype_is_raid10(*segtype)) {
 			if (seg_from->area_count < 3) {
-				log_warn("Convert %s LV %s to minimum 3 stripes first (i.e. --stripes 2).",
-					 lvseg_name(seg_from), display_lvname(seg_from->lv));
+				log_error("Convert %s LV %s to minimum 3 stripes first (i.e. --stripes 2).",
+					  lvseg_name(seg_from), display_lvname(seg_from->lv));
 				return 0;
 			}
 
@@ -6016,8 +5997,8 @@ static int _set_convenient_raid145610_segtype_to(const struct lv_segment *seg_fr
 
 		} else if (segtype_is_any_raid6(*segtype)) {
 			if (seg_from->area_count < 4) {
-				log_warn("Convert %s LV %s to minimum 4 stripes first (i.e. --stripes 3).",
-					 lvseg_name(seg_from), display_lvname(seg_from->lv));
+				log_error("Convert %s LV %s to minimum 4 stripes first (i.e. --stripes 3).",
+					  lvseg_name(seg_from), display_lvname(seg_from->lv));
 				return 0;
 
 			} else if (seg_is_raid4(seg_from) && !segtype_is_raid6_n_6(*segtype))
@@ -6053,12 +6034,12 @@ static int _set_convenient_raid145610_segtype_to(const struct lv_segment *seg_fr
 	/* -> raid1 */
 	} else if (!seg_is_mirror(seg_from) && segtype_is_raid1(*segtype)) {
 		if (!seg_is_raid4(seg_from) && !seg_is_any_raid5(seg_from)) {
-			log_warn("Convert %s LV %s to raid4/raid5 first.",
-				 lvseg_name(seg_from), display_lvname(seg_from->lv));
+			log_error("Convert %s LV %s to raid4/raid5 first.",
+				  lvseg_name(seg_from), display_lvname(seg_from->lv));
 			return 0;
 
 		} else if (seg_from->area_count != 2) {
-			log_warn("Convert %s LV %s to 2 stripes first (i.e. --stripes 1).",
+			log_error("Convert %s LV %s to 2 stripes first (i.e. --stripes 1).",
 				 lvseg_name(seg_from), display_lvname(seg_from->lv));
 			return 0;
 
@@ -6113,7 +6094,7 @@ static int _region_size_change_requested(struct logical_volume *lv, int yes, con
 	}
 
 	if (!_check_region_size_constraints(lv, seg->segtype, region_size, seg->stripe_size))
-		return 0;
+		return_0;
 
 	if (!_raid_in_sync(lv)) {
 		log_error("Unable to change region size on %s LV %s while it is not in-sync.",
@@ -6224,6 +6205,27 @@ static int _conversion_options_allowed(const struct lv_segment *seg_from,
 	return r;
 }
 
+/* https://bugzilla.redhat.com/1447812 try opening LV exclusively */
+static int _lv_open_excl(struct logical_volume *lv, struct device **dev) {
+	char *dev_path;
+	size_t sz = strlen(lv->vg->cmd->dev_dir) + strlen(lv->vg->name) + strlen(lv->name) + 2;
+
+	*dev = NULL;
+	if (!(dev_path = dm_pool_alloc(lv->vg->cmd->mem, sz)))
+		return_0;
+	if (dm_snprintf(dev_path, sz, "%s%s/%s", lv->vg->cmd->dev_dir, lv->vg->name, lv->name) < 0)
+		return_0;
+	if (!(*dev = dev_create_file(dev_path, NULL, NULL, 0)))
+		return_0;
+	if (!dev_open_flags(*dev, O_EXCL, 1, 1)) {
+		log_error("Reshape is only supported when %s is not in use (e.g. unmount filesystem).",
+			  display_lvname(lv));
+		return 0;
+	}
+
+	return 1;
+}
+
 /*
  * lv_raid_convert
  *
@@ -6273,6 +6275,7 @@ int lv_raid_convert(struct logical_volume *lv,
 	uint32_t region_size;
 	uint32_t data_copies = seg->data_copies;
 	uint32_t available_slvs, removed_slvs;
+	struct device *dev;
 	takeover_fn_t takeover_fn;
 
 	/* FIXME If not active, prompt and activate */
@@ -6282,6 +6285,12 @@ int lv_raid_convert(struct logical_volume *lv,
 		log_error("%s must be active to perform this operation.",
 			  display_lvname(lv));
 		return 0;
+	} else if (vg_is_clustered(lv->vg) &&
+		   !lv_is_active_exclusive_locally(lv_lock_holder(lv))) {
+		/* In clustered VGs, the LV must be active on this node exclusively. */
+		log_error("%s must be active exclusive locally to "
+			  "perform this operation.", display_lvname(lv));
+		return 0;
 	}
 
 	new_segtype = new_segtype ? : seg->segtype;
@@ -6318,6 +6327,12 @@ int lv_raid_convert(struct logical_volume *lv,
 					 new_stripes, new_stripe_size_supplied))
 		return _log_possible_conversion_types(lv, new_segtype);
 
+	/* https://bugzilla.redhat.com/1439399 */
+	if (lv_is_origin(lv)) {
+		log_error("Can't convert snapshot origin %s.", display_lvname(lv));
+		return 0;
+	}
+
 	/*
 	 * reshape of capable raid type requested
 	 */
@@ -6325,13 +6340,31 @@ int lv_raid_convert(struct logical_volume *lv,
 	case 0:
 		break;
 	case 1:
+		/* Conversion of reshapable raids is the cluster is not supported yet. */
+		if (locking_is_clustered()) {
+			log_error("Conversion of %s not supported in the cluster.", display_lvname(lv));
+			return 0;
+		}
+
+		/* https://bugzilla.redhat.com/1447812 reject reshape on open LV */
+		if (!_check_lv_open_count(lv, 0))
+			return_0;
+		if (!_lv_open_excl(lv, &dev))
+			return_0;
+		if (!_check_lv_open_count(lv, 1)) {
+			dev_close(dev);
+			return_0;
+		}
+
 		if (!_raid_reshape(lv, new_segtype, yes, force,
 				   data_copies, region_size,
 				   stripes, stripe_size, allocate_pvs)) {
+			dev_close(dev);
 			log_error("Reshape request failed on LV %s.", display_lvname(lv));
 			return 0;
 		}
 
+		dev_close(dev);
 		return 1;
 	case 2:
 		log_error("Invalid conversion request on %s.", display_lvname(lv));
@@ -6344,7 +6377,8 @@ int lv_raid_convert(struct logical_volume *lv,
 
 	/* Prohibit any takeover in case sub LVs to be removed still exist after a previous reshape */
 	if (!_get_available_removed_sublvs(lv, &available_slvs, &removed_slvs))
-		return 0;
+		return_0;
+
 	if (removed_slvs) {
 		log_error("Can't convert %s LV %s to %s containing sub LVs to remove after a reshape.",
 			  lvseg_name(seg), display_lvname(lv), new_segtype->name);
@@ -6387,13 +6421,6 @@ int lv_raid_convert(struct logical_volume *lv,
 		    (segtype_is_striped_target(new_segtype) &&
 		    (new_stripes == 1)) ? SEG_TYPE_NAME_LINEAR : new_segtype->name);
 
-	/* In clustered VGs, the LV must be active on this node exclusively. */
-	if (vg_is_clustered(lv->vg) && !lv_is_active_exclusive_locally(lv)) {
-		log_error("%s must be active exclusive locally to "
-			  "perform this operation.", display_lvname(lv));
-		return 0;
-	}
-
 	/* LV must be in sync. */
 	if (!_raid_in_sync(lv)) {
 		log_error("Unable to convert %s while it is not in-sync.",
@@ -6634,6 +6661,11 @@ static int _lv_raid_rebuild_or_replace(struct logical_volume *lv,
 	}
 
 	if (!match_count) {
+		if (remove_pvs && !dm_list_empty(remove_pvs)) {
+			log_error("Logical volume %s does not contain devices specified to %s.",
+				  display_lvname(lv), action_str);
+			return 0;
+		}
 		log_print_unless_silent("%s does not contain devices specified to %s.",
 					display_lvname(lv), action_str);
 		return 1;
diff --git a/lib/raid/raid.c b/lib/raid/raid.c
index 8a53d7e..c5cfb0f 100644
--- a/lib/raid/raid.c
+++ b/lib/raid/raid.c
@@ -358,36 +358,21 @@ static int _raid_target_percent(void **target_state,
 				uint64_t *total_numerator,
 				uint64_t *total_denominator)
 {
-	int i;
-	uint64_t numerator, denominator;
-	char *pos = params;
-	/*
-	 * Status line:
-	 *    <raid_type> <#devs> <status_chars> <synced>/<total>
-	 * Example:
-	 *    raid1 2 AA 1024000/1024000
-	 */
-	for (i = 0; i < 3; i++) {
-		pos = strstr(pos, " ");
-		if (pos)
-			pos++;
-		else
-			break;
-	}
-	if (!pos || (sscanf(pos, FMTu64 "/" FMTu64 "%n", &numerator, &denominator, &i) != 2) ||
-	    !denominator) {
-		log_error("Failed to parse %s status fraction: %s",
-			  (seg) ? seg->segtype->name : "segment", params);
-		return 0;
-	}
+	struct dm_status_raid *sr;
+
+	if (!dm_get_status_raid(mem, params, &sr))
+		return_0;
 
-	*total_numerator += numerator;
-	*total_denominator += denominator;
+	*total_numerator += sr->insync_regions;
+	*total_denominator += sr->total_regions;
 
 	if (seg)
-		seg->extents_copied = (uint64_t) seg->area_len * dm_make_percent(numerator, denominator) / DM_PERCENT_100;
+		seg->extents_copied = (uint64_t) seg->area_len
+			* dm_make_percent(sr->insync_regions , sr->total_regions) / DM_PERCENT_100;
+
+	*percent = dm_make_percent(sr->insync_regions, sr->total_regions);
 
-	*percent = dm_make_percent(numerator, denominator);
+	dm_pool_free(mem, sr);
 
 	return 1;
 }
@@ -475,7 +460,7 @@ static int _raid_target_present(struct cmd_context *cmd,
 		{ 1, 7, 0, RAID_FEATURE_RAID0, SEG_TYPE_NAME_RAID0 },
 		{ 1, 9, 0, RAID_FEATURE_SHRINK, "shrinking" },
 		{ 1, 9, 0, RAID_FEATURE_NEW_DEVICES_ACCEPT_REBUILD, "rebuild+emptymeta" },
-		{ 1, 10, 1, RAID_FEATURE_RESHAPE, "reshaping" },
+		{ 1, 12, 0, RAID_FEATURE_RESHAPE, "reshaping" },
 	};
 
 	static int _raid_checked = 0;
diff --git a/lib/report/report.c b/lib/report/report.c
index d9880b2..f61776e 100644
--- a/lib/report/report.c
+++ b/lib/report/report.c
@@ -3079,11 +3079,13 @@ static int _copypercent_disp(struct dm_report *rh,
 	dm_percent_t percent = DM_PERCENT_INVALID;
 
 	/* TODO: just cache passes through lvseg_percent... */
-	if (lv_is_cache(lv) || lv_is_used_cache_pool(lv))
+	if (lv_is_cache(lv) || lv_is_used_cache_pool(lv) ||
+	    (!lv_is_merging_origin(lv) && lv_is_raid(lv) && !seg_is_any_raid0(first_seg(lv))))
 		percent = lvseg_percent_with_info_and_seg_status(lvdm, PERCENT_GET_DIRTY);
-	else if (((lv_is_raid(lv) && !seg_is_any_raid0(first_seg(lv)) &&
-		   lv_raid_percent(lv, &percent)) ||
-		  (lv_is_mirror(lv) &&
+	else if (lv_is_raid(lv) && !seg_is_any_raid0(first_seg(lv)))
+		/* old way for percentage when merging snapshot into raid origin */
+		(void) lv_raid_percent(lv, &percent);
+	else if (((lv_is_mirror(lv) &&
 		   lv_mirror_percent(lv->vg->cmd, lv, 0, &percent, NULL))) &&
 		 (percent != DM_PERCENT_INVALID))
 		percent = copy_percent(lv);
diff --git a/libdm/libdm-targets.c b/libdm/libdm-targets.c
index 1709c2b..6577f07 100644
--- a/libdm/libdm-targets.c
+++ b/libdm/libdm-targets.c
@@ -99,6 +99,7 @@ int dm_get_status_raid(struct dm_pool *mem, const char *params,
 	unsigned num_fields;
 	const char *p, *pp, *msg_fields = "";
 	struct dm_status_raid *s = NULL;
+	unsigned a = 0;
 
 	if ((num_fields = _count_fields(params)) < 4)
 		goto_bad;
@@ -168,6 +169,23 @@ int dm_get_status_raid(struct dm_pool *mem, const char *params,
 out:
 	*status = s;
 
+	if (s->insync_regions == s->total_regions) {
+		/* FIXME: kernel gives misleading info here
+		 * Trying to recognize a true state */
+		while (i-- > 0)
+			if (s->dev_health[i] == 'a')
+				a++; /* Count number of 'a' */
+
+		if (a && a < s->dev_count) {
+			/* SOME legs are in 'a' */
+			if (!strcasecmp(s->sync_action, "recover")
+			    || !strcasecmp(s->sync_action, "idle"))
+				/* Kernel may possibly start some action
+				 * in near-by future, do not report 100% */
+				s->insync_regions--;
+		}
+	}
+
 	return 1;
 
 bad:
diff --git a/scripts/fsadm.sh b/scripts/fsadm.sh
index ea14efe..adf9b55 100755
--- a/scripts/fsadm.sh
+++ b/scripts/fsadm.sh
@@ -402,6 +402,7 @@ yes_no() {
 	while read -r -s -n 1 ANS ; do
 		case "$ANS" in
 		 "y" | "Y" ) echo y ; return 0 ;;
+		 "n" | "N") break ;;
 		 "" ) if [ -t 1 ] ; then
 			echo y ; return 0
 		      fi ;;
diff --git a/test/lib/check.sh b/test/lib/check.sh
index 64812fb..5eec936 100644
--- a/test/lib/check.sh
+++ b/test/lib/check.sh
@@ -411,7 +411,7 @@ raid_leg_status() {
 
 grep_dmsetup() {
 	dmsetup $1 $2 | tee out
-	grep "${@:3}" out || die "Expected output from dmsetup $1 not found!"
+	grep -q "${@:3}" out || die "Expected output \"${@:3}\" from dmsetup $1 not found!"
 }
 
 #set -x
diff --git a/test/shell/fsadm.sh b/test/shell/fsadm.sh
index ac43900..6eff24a 100644
--- a/test/shell/fsadm.sh
+++ b/test/shell/fsadm.sh
@@ -131,6 +131,7 @@ if check_missing ext3; then
 	not fsadm -y --lvresize resize $vg_lv 4M
 	echo n | not lvresize -L4M -r -n $vg_lv
 	lvresize -L+20M -r -n $vg_lv
+	lvresize -L-10M -r -y $vg_lv
 	umount "$mount_dir"
 	umount "$mount_space_dir"
 	fscheck_ext3
diff --git a/test/shell/lvconvert-raid-status-validation.sh b/test/shell/lvconvert-raid-status-validation.sh
new file mode 100644
index 0000000..d705cc2
--- /dev/null
+++ b/test/shell/lvconvert-raid-status-validation.sh
@@ -0,0 +1,127 @@
+#######################################################################
+# This series of tests is meant to validate the correctness of
+# 'dmsetup status' for RAID LVs - especially during various sync action
+# transitions, like: recover, resync, check, repair, idle, reshape, etc
+#######################################################################
+SKIP_WITH_LVMLOCKD=1
+SKIP_WITH_LVMPOLLD=1
+
+export LVM_TEST_LVMETAD_DEBUG_OPTS=${LVM_TEST_LVMETAD_DEBUG_OPTS-}
+
+. lib/inittest
+
+# check for version 1.9.0
+# - it is the point at which linear->raid1 uses "recover"
+aux have_raid 1 9 0 || skip
+
+aux prepare_pvs 9
+vgcreate -s 2m $vg $(cat DEVICES)
+
+###########################################
+# Upconverted RAID1 should never have all 'a's in status output
+###########################################
+aux delay_dev $dev2 0 50
+lvcreate -aey -l 2 -n $lv1 $vg $dev1
+lvconvert --type raid1 -y -m 1 $vg/$lv1 $dev2
+while ! check in_sync $vg $lv1; do
+        a=( $(dmsetup status $vg-$lv1) ) || die "Unable to get status of $vg/$lv1"
+	[ ${a[5]} != "aa" ]
+        sleep .1
+done
+aux enable_dev $dev2
+lvremove -ff $vg
+
+###########################################
+# Upconverted RAID1 should not be at 100% right after upconvert
+###########################################
+aux delay_dev $dev2 0 50
+lvcreate -aey -l 2 -n $lv1 $vg $dev1
+lvconvert --type raid1 -y -m 1 $vg/$lv1 $dev2
+a=( $(dmsetup status $vg-$lv1) ) || die "Unable to get status of $vg/$lv1"
+b=( $(echo ${a[6]} | sed s:/:' ':) )
+[ ${b[0]} -ne ${b[1]} ]
+aux enable_dev $dev2
+lvremove -ff $vg
+
+###########################################
+# Catch anything suspicious with linear -> RAID1 upconvert
+###########################################
+aux delay_dev $dev2 0 50
+lvcreate -aey -l 2 -n $lv1 $vg $dev1
+lvconvert --type raid1 -y -m 1 $vg/$lv1 $dev2
+while true; do
+        a=( $(dmsetup status $vg-$lv1) ) || die "Unable to get status of $vg/$lv1"
+	b=( $(echo ${a[6]} | sed s:/:' ':) )
+	if [ ${b[0]} -ne ${b[1]} ]; then
+		# If the sync operation ("recover" in this case) is not
+		# finished, then it better be as follows:
+		[ ${a[5]} == "Aa" ]
+		[ ${a[7]} == "recover" ]
+	else
+		# Tough to tell the INVALID case,
+		#   Before starting sync thread: "Aa X/X recover"
+		# from the valid case,
+		#   Just finished sync thread: "Aa X/X recover"
+		# We'll just put "should" for now
+		should [ ${a[5]} == "AA" ]
+		should [ ${a[7]} == "idle" ]
+		break
+	fi
+        sleep .1
+done
+aux enable_dev $dev2
+lvremove -ff $vg
+
+###########################################
+# Catch anything suspicious with RAID1 2-way -> 3-way upconvert
+###########################################
+aux delay_dev $dev3 0 50
+lvcreate --type raid1 -m 1 -aey -l 2 -n $lv1 $vg $dev1 $dev2
+lvconvert -y -m +1 $vg/$lv1 $dev3
+while true; do
+        a=( $(dmsetup status $vg-$lv1) ) || die "Unable to get status of $vg/$lv1"
+	b=( $(echo ${a[6]} | sed s:/:' ':) )
+	if [ ${b[0]} -ne ${b[1]} ]; then
+		# If the sync operation ("recover" in this case) is not
+		# finished, then it better be as follows:
+		[ ${a[5]} == "AAa" ]
+		[ ${a[7]} == "recover" ]
+	else
+		# Tough to tell the INVALID case,
+		#   Before starting sync thread: "Aa X/X recover"
+		# from the valid case,
+		#   Just finished sync thread: "Aa X/X recover"
+		# We'll just put "should" for now
+		should [ ${a[5]} == "AAA" ]
+		should [ ${a[7]} == "idle" ]
+		break
+	fi
+        sleep .1
+done
+aux enable_dev $dev3
+lvremove -ff $vg
+
+###########################################
+# Catch anything suspicious with RAID1 initial resync
+###########################################
+aux delay_dev $dev2 0 50
+lvcreate --type raid1 -m 1 -aey -l 2 -n $lv1 $vg $dev1 $dev2
+while true; do
+        a=( $(dmsetup status $vg-$lv1) ) || die "Unable to get status of $vg/$lv1"
+	b=( $(echo ${a[6]} | sed s:/:' ':) )
+	if [ ${b[0]} -ne ${b[1]} ]; then
+		# If the sync operation ("resync" in this case) is not
+		# finished, then it better be as follows:
+		[ ${a[5]} == "aa" ]
+		[ ${a[7]} == "resync" ]
+	else
+		should [ ${a[5]} == "AA" ]
+		should [ ${a[7]} == "idle" ]
+		break
+	fi
+        sleep .1
+done
+aux enable_dev $dev2
+lvremove -ff $vg
+
+vgremove -ff $vg
diff --git a/test/shell/lvconvert-raid-takeover-alloc-failure.sh b/test/shell/lvconvert-raid-takeover-alloc-failure.sh
index 3870c35..cf926a1 100644
--- a/test/shell/lvconvert-raid-takeover-alloc-failure.sh
+++ b/test/shell/lvconvert-raid-takeover-alloc-failure.sh
@@ -48,7 +48,7 @@ function check_no_sub_lvs
 # Check takover upconversion fails allocation errors nicely without leaving image pair remnants behind
 
 # 6-way striped: neither conversion to raid5 nor raid6 possible
-lvcreate --yes --stripes 6 --size 4M --name $lv1 $vg
+lvcreate -aey --yes --stripes 6 --size 4M --name $lv1 $vg
 not lvconvert --yes --type raid4 $vg/$lv1
 check lv_field $vg/$lv1 segtype "striped"
 check_no_sub_lvs $vg $lv1 0 5
@@ -69,7 +69,7 @@ check_sub_lvs $vg $lv1 0 5
 lvremove -y $vg
 
 # 5-way striped: conversion to raid5 possible but not to raid6
-lvcreate --yes --stripes 5 --size 4M --name $lv1 $vg
+lvcreate -aey --stripes 5 --size 4M --name $lv1 $vg
 not lvconvert --yes --type raid6 $vg/$lv1
 check lv_field $vg/$lv1 segtype "striped"
 check_no_sub_lvs $vg $lv1 0 5
@@ -83,7 +83,7 @@ check_sub_lvs $vg $lv1 0 5
 lvremove -y $vg
 
 # 4-way striped: conversion to raid5 and raid6 possible
-lvcreate --yes --stripes 4 --size 4M --name $lv1 $vg
+lvcreate -aey --stripes 4 --size 4M --name $lv1 $vg
 lvconvert --yes --type raid5 $vg/$lv1
 check lv_field $vg/$lv1 segtype "raid5_n"
 check lv_field $vg/$lv1 stripes 5
@@ -92,7 +92,8 @@ check_sub_lvs $vg $lv1 0 4
 check_no_sub_lvs $vg $lv1 5 5
 
 lvremove -y $vg
-lvcreate --yes --stripes 4 --size 4M --name $lv1 $vg
+
+lvcreate -aey --stripes 4 --size 4M --name $lv1 $vg
 lvconvert --yes --type raid6 $vg/$lv1
 check lv_field $vg/$lv1 segtype "raid6_n_6"
 check lv_field $vg/$lv1 stripes 6
diff --git a/test/shell/lvconvert-raid-takeover-thin.sh b/test/shell/lvconvert-raid-takeover-thin.sh
new file mode 100644
index 0000000..647b133
--- /dev/null
+++ b/test/shell/lvconvert-raid-takeover-thin.sh
@@ -0,0 +1,72 @@
+#!/bin/sh
+# Copyright (C) 2017 Red Hat, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v.2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA
+
+# check we may convert thin-pool to raid1/raid10 and back
+# RHBZ#1365286
+
+SKIP_WITH_LVMLOCKD=1
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+aux have_thin 1 0 0 || skip
+aux have_raid 1 9 0 || skip
+
+aux prepare_vg 6
+
+lvcreate -L4 -i3 -T $vg/pool -V10
+
+for i in 1 2 ; do
+lvconvert --type raid10 -y $vg/pool_tdata
+check grep_dmsetup table $vg-pool_tdata "raid10"
+aux wait_for_sync $vg pool_tdata
+
+lvconvert --type striped -y $vg/pool_tdata
+check grep_dmsetup table $vg-pool_tdata "striped"
+done
+
+lvremove -f $vg
+
+lvcreate -L4  -T $vg/pool -V10 -n $lv1
+
+for j in data meta ; do
+  LV=pool_t${j}
+  for i in 1 2 ; do
+    lvconvert --type raid1 -m1 -y  $vg/$LV
+    check grep_dmsetup table $vg-${LV} "raid1"
+    aux wait_for_sync $vg $LV
+
+    lvconvert --type raid1 -m0 -y  $vg/$LV
+    check grep_dmsetup table ${vg}-${LV} "linear"
+  done
+done
+
+
+#
+# Now same test again, when lock holding LV is not a thin-poll
+# but thinLV $lv1
+#
+lvchange -an $vg
+lvchange -ay $vg/$lv1
+
+for j in data meta ; do
+  LV=pool_t${j}
+  for i in 1 2 ; do
+    lvconvert --type raid1 -m1 -y  $vg/$LV
+    check grep_dmsetup table $vg-${LV} "raid1"
+    aux wait_for_sync $vg $LV
+
+    lvconvert --type raid1 -m0 -y  $vg/$LV
+    check grep_dmsetup table ${vg}-${LV} "linear"
+  done
+done
+
+vgremove -ff $vg
diff --git a/tools/lvresize.c b/tools/lvresize.c
index 1d59239..9b061ac 100644
--- a/tools/lvresize.c
+++ b/tools/lvresize.c
@@ -147,6 +147,7 @@ static int _lvresize_params(struct cmd_context *cmd, int argc, char **argv,
 	lp->argv = ++argv;
 
 	lp->alloc = (alloc_policy_t) arg_uint_value(cmd, alloc_ARG, 0);
+	lp->yes = arg_is_set(cmd, yes_ARG);
 	lp->force = arg_is_set(cmd, force_ARG);
 	lp->nofsck = arg_is_set(cmd, nofsck_ARG);
 	lp->nosync = arg_is_set(cmd, nosync_ARG);