diff --git a/SOURCES/0003-Merge-master-up-to-commit-6eb9eba59bf5.patch b/SOURCES/0003-Merge-master-up-to-commit-6eb9eba59bf5.patch
new file mode 100644
index 0000000..e084090
--- /dev/null
+++ b/SOURCES/0003-Merge-master-up-to-commit-6eb9eba59bf5.patch
@@ -0,0 +1,4660 @@
+From 7e80b28b83fc8829cc7d4532cc77df5d66b54f4e Mon Sep 17 00:00:00 2001
+From: David Teigland <teigland@redhat.com>
+Date: Thu, 28 May 2020 15:51:59 -0500
+Subject: [PATCH] Merge master up to commit 6eb9eba59bf5
+
+---
+ WHATS_NEW                           |   1 +
+ device_mapper/all.h                 |   4 +
+ device_mapper/libdm-deptree.c       |  12 +
+ lib/cache/lvmcache.c                |   4 +
+ lib/device/bcache.c                 |   2 +-
+ lib/device/dev-cache.c              |   1 +
+ lib/device/dev-type.c               |  36 +-
+ lib/metadata/cache_manip.c          |   4 +
+ lib/metadata/integrity_manip.c      |  32 +-
+ lib/metadata/lv.c                   |   3 +
+ lib/metadata/lv_manip.c             |  24 +-
+ lib/metadata/metadata-exported.h    |   5 +-
+ lib/metadata/metadata.c             |   4 +-
+ lib/metadata/snapshot_manip.c       |   2 -
+ lib/metadata/writecache_manip.c     | 366 ++++++++++++++++---
+ lib/report/report.c                 |  16 +
+ lib/writecache/writecache.c         |  49 +++
+ man/lvmcache.7_main                 |  30 +-
+ man/lvs.8_end                       |   4 +
+ scripts/blkdeactivate.sh.in         |   6 +
+ test/shell/cachevol-cachedevice.sh  | 212 +++++++++++
+ test/shell/integrity-blocksize-2.sh | 128 +++++++
+ test/shell/integrity-blocksize-3.sh | 285 +++++++++++++++
+ test/shell/integrity-blocksize.sh   | 108 +++++-
+ test/shell/integrity-large.sh       |   3 +
+ test/shell/integrity-misc.sh        |  27 +-
+ test/shell/integrity.sh             |  46 ++-
+ test/shell/writecache-blocksize.sh  | 342 ++++++++++++++++++
+ test/shell/writecache-large.sh      | 153 ++++++++
+ test/shell/writecache-split.sh      |  34 +-
+ test/shell/writecache.sh            | 315 +++++++++++------
+ tools/args.h                        |  17 +-
+ tools/command-lines.in              | 142 +++++---
+ tools/command.c                     |   3 +
+ tools/lvchange.c                    |  85 +++++
+ tools/lvconvert.c                   | 681 ++++++++++++++++++++++--------------
+ tools/lvcreate.c                    | 153 +++++++-
+ tools/lvmcmdline.c                  |   8 +
+ tools/toollib.c                     | 164 +++++++++
+ tools/toollib.h                     |   3 +
+ tools/tools.h                       |  11 +
+ 41 files changed, 2976 insertions(+), 549 deletions(-)
+ create mode 100644 test/shell/cachevol-cachedevice.sh
+ create mode 100644 test/shell/integrity-blocksize-2.sh
+ create mode 100644 test/shell/integrity-blocksize-3.sh
+ create mode 100644 test/shell/writecache-blocksize.sh
+ create mode 100644 test/shell/writecache-large.sh
+
+diff --git a/WHATS_NEW b/WHATS_NEW
+index c0267b7..c6dad99 100644
+--- a/WHATS_NEW
++++ b/WHATS_NEW
+@@ -1,5 +1,6 @@
+ Version 2.03.10 - 
+ =================================
++  Fix running out of free buffers for async writing for larger writes.
+   Add integrity with raid capability.
+   Fix support for lvconvert --repair used by foreign apps (i.e. Docker).
+ 
+diff --git a/device_mapper/all.h b/device_mapper/all.h
+index f00b6a5..c3c6219 100644
+--- a/device_mapper/all.h
++++ b/device_mapper/all.h
+@@ -951,6 +951,8 @@ struct writecache_settings {
+ 	uint64_t autocommit_time; /* in milliseconds */
+ 	uint32_t fua;
+ 	uint32_t nofua;
++	uint32_t cleaner;
++	uint32_t max_age;
+ 
+ 	/*
+ 	 * Allow an unrecognized key and its val to be passed to the kernel for
+@@ -970,6 +972,8 @@ struct writecache_settings {
+ 	unsigned autocommit_time_set:1;
+ 	unsigned fua_set:1;
+ 	unsigned nofua_set:1;
++	unsigned cleaner_set:1;
++	unsigned max_age_set:1;
+ };
+ 
+ int dm_tree_node_add_writecache_target(struct dm_tree_node *node,
+diff --git a/device_mapper/libdm-deptree.c b/device_mapper/libdm-deptree.c
+index 9ba24cb..2722a2c 100644
+--- a/device_mapper/libdm-deptree.c
++++ b/device_mapper/libdm-deptree.c
+@@ -2670,6 +2670,10 @@ static int _writecache_emit_segment_line(struct dm_task *dmt,
+ 		count += 1;
+ 	if (seg->writecache_settings.nofua_set)
+ 		count += 1;
++	if (seg->writecache_settings.cleaner_set && seg->writecache_settings.cleaner)
++		count += 1;
++	if (seg->writecache_settings.max_age_set)
++		count += 2;
+ 	if (seg->writecache_settings.new_key)
+ 		count += 2;
+ 
+@@ -2713,6 +2717,14 @@ static int _writecache_emit_segment_line(struct dm_task *dmt,
+ 		EMIT_PARAMS(pos, " nofua");
+ 	}
+ 
++	if (seg->writecache_settings.cleaner_set && seg->writecache_settings.cleaner) {
++		EMIT_PARAMS(pos, " cleaner");
++	}
++
++	if (seg->writecache_settings.max_age_set) {
++		EMIT_PARAMS(pos, " max_age %u", seg->writecache_settings.max_age);
++	}
++
+ 	if (seg->writecache_settings.new_key) {
+ 		EMIT_PARAMS(pos, " %s %s",
+ 			seg->writecache_settings.new_key,
+diff --git a/lib/cache/lvmcache.c b/lib/cache/lvmcache.c
+index 6cb5ff0..b1d05fb 100644
+--- a/lib/cache/lvmcache.c
++++ b/lib/cache/lvmcache.c
+@@ -84,6 +84,7 @@ static DM_LIST_INIT(_unused_duplicates);
+ static DM_LIST_INIT(_prev_unused_duplicate_devs);
+ static int _vgs_locked = 0;
+ static int _found_duplicate_vgnames = 0;
++static int _outdated_warning = 0;
+ 
+ int lvmcache_init(struct cmd_context *cmd)
+ {
+@@ -1776,6 +1777,9 @@ int lvmcache_update_vg_from_read(struct volume_group *vg, unsigned precommitted)
+ 		log_warn("WARNING: outdated PV %s seqno %u has been removed in current VG %s seqno %u.",
+ 			 dev_name(info->dev), info->summary_seqno, vg->name, vginfo->seqno);
+ 
++		if (!_outdated_warning++)
++			log_warn("See vgck --updatemetadata to clear outdated metadata.");
++
+ 		_drop_vginfo(info, vginfo); /* remove from vginfo->infos */
+ 		dm_list_add(&vginfo->outdated_infos, &info->list);
+ 	}
+diff --git a/lib/device/bcache.c b/lib/device/bcache.c
+index a7d8055..7e7e185 100644
+--- a/lib/device/bcache.c
++++ b/lib/device/bcache.c
+@@ -950,7 +950,7 @@ static struct block *_new_block(struct bcache *cache, int fd, block_address i, b
+ 	struct block *b;
+ 
+ 	b = _alloc_block(cache);
+-	while (!b && !dm_list_empty(&cache->clean)) {
++	while (!b) {
+ 		b = _find_unused_clean_block(cache);
+ 		if (!b) {
+ 			if (can_wait) {
+diff --git a/lib/device/dev-cache.c b/lib/device/dev-cache.c
+index 6af559c..c3f7c49 100644
+--- a/lib/device/dev-cache.c
++++ b/lib/device/dev-cache.c
+@@ -65,6 +65,7 @@ static int _insert(const char *path, const struct stat *info,
+ static void _dev_init(struct device *dev)
+ {
+ 	dev->fd = -1;
++	dev->bcache_fd = -1;
+ 	dev->read_ahead = -1;
+ 
+ 	dev->ext.enabled = 0;
+diff --git a/lib/device/dev-type.c b/lib/device/dev-type.c
+index deb5d6a..896821d 100644
+--- a/lib/device/dev-type.c
++++ b/lib/device/dev-type.c
+@@ -649,37 +649,23 @@ out:
+ #ifdef BLKID_WIPING_SUPPORT
+ int get_fs_block_size(struct device *dev, uint32_t *fs_block_size)
+ {
+-	blkid_probe probe = NULL;
+-	const char *block_size_str = NULL;
+-	uint64_t block_size_val;
+-	int r = 0;
++	char *block_size_str = NULL;
+ 
+-	*fs_block_size = 0;
+-
+-	if (!(probe = blkid_new_probe_from_filename(dev_name(dev)))) {
+-		log_error("Failed to create a new blkid probe for device %s.", dev_name(dev));
+-		goto out;
++	if ((block_size_str = blkid_get_tag_value(NULL, "BLOCK_SIZE", dev_name(dev)))) {
++		*fs_block_size = (uint32_t)atoi(block_size_str);
++		free(block_size_str);
++		log_debug("Found blkid BLOCK_SIZE %u for fs on %s", *fs_block_size, dev_name(dev));
++		return 1;
++	} else {
++		log_debug("No blkid BLOCK_SIZE for fs on %s", dev_name(dev));
++		*fs_block_size = 0;
++		return 0;
+ 	}
+-
+-	blkid_probe_enable_partitions(probe, 1);
+-
+-	(void) blkid_probe_lookup_value(probe, "BLOCK_SIZE", &block_size_str, NULL);
+-
+-	if (!block_size_str)
+-		goto out;
+-
+-	block_size_val = strtoull(block_size_str, NULL, 10);
+-
+-	*fs_block_size = (uint32_t)block_size_val;
+-	r = 1;
+-out:
+-	if (probe)
+-		blkid_free_probe(probe);
+-	return r;
+ }
+ #else
+ int get_fs_block_size(struct device *dev, uint32_t *fs_block_size)
+ {
++	log_debug("Disabled blkid BLOCK_SIZE for fs.");
+ 	*fs_block_size = 0;
+ 	return 0;
+ }
+diff --git a/lib/metadata/cache_manip.c b/lib/metadata/cache_manip.c
+index 49b3850..a786e8b 100644
+--- a/lib/metadata/cache_manip.c
++++ b/lib/metadata/cache_manip.c
+@@ -1094,6 +1094,10 @@ int cache_vol_set_params(struct cmd_context *cmd,
+ 	if (!meta_size) {
+ 		meta_size = _cache_min_metadata_size(pool_lv->size, chunk_size);
+ 
++		/* fix bad value from _cache_min_metadata_size */
++		if (meta_size > (pool_lv->size / 2))
++			meta_size = pool_lv->size / 2;
++
+ 		if (meta_size < min_meta_size)
+ 			meta_size = min_meta_size;
+ 
+diff --git a/lib/metadata/integrity_manip.c b/lib/metadata/integrity_manip.c
+index 7942be0..3344527 100644
+--- a/lib/metadata/integrity_manip.c
++++ b/lib/metadata/integrity_manip.c
+@@ -278,7 +278,7 @@ int lv_remove_integrity_from_raid(struct logical_volume *lv)
+ 	return 1;
+ }
+ 
+-static int _set_integrity_block_size(struct cmd_context *cmd, struct logical_volume *lv,
++static int _set_integrity_block_size(struct cmd_context *cmd, struct logical_volume *lv, int is_active,
+ 				     struct integrity_settings *settings,
+ 				     int lbs_4k, int lbs_512, int pbs_4k, int pbs_512)
+ {
+@@ -375,7 +375,13 @@ static int _set_integrity_block_size(struct cmd_context *cmd, struct logical_vol
+ 		}
+ 
+ 		if (!settings->block_size) {
+-			if (fs_block_size <= 4096)
++			if (is_active && lbs_512) {
++				/* increasing the lbs from 512 to 4k under an active LV could cause problems
++				   for an application that expects a given io size/alignment is possible. */
++				settings->block_size = 512;
++				if (fs_block_size > 512)
++					log_print("Limiting integrity block size to 512 because the LV is active.");
++			} else if (fs_block_size <= 4096)
+ 				settings->block_size = fs_block_size;
+ 			else
+ 				settings->block_size = 4096; /* dm-integrity max is 4096 */
+@@ -587,13 +593,33 @@ int lv_add_integrity_to_raid(struct logical_volume *lv, struct integrity_setting
+ 		}
+ 	}
+ 
++	if (!is_active) {
++		/* checking block size of fs on the lv requires the lv to be active */
++		if (!activate_lv(cmd, lv)) {
++			log_error("Failed to activate LV to check block size %s", display_lvname(lv));
++			goto bad;
++		}
++		if (!sync_local_dev_names(cmd))
++			stack;
++	}
++
+ 	/*
+ 	 * Set settings->block_size which will be copied to segment settings below.
+ 	 * integrity block size chosen based on device logical block size and
+ 	 * file system block size.
+ 	 */
+-	if (!_set_integrity_block_size(cmd, lv, settings, lbs_4k, lbs_512, pbs_4k, pbs_512))
++	if (!_set_integrity_block_size(cmd, lv, is_active, settings, lbs_4k, lbs_512, pbs_4k, pbs_512)) {
++		if (!is_active && !deactivate_lv(cmd, lv))
++			stack;
+ 		goto_bad;
++	}
++
++	if (!is_active) {
++		if (!deactivate_lv(cmd, lv)) {
++			log_error("Failed to deactivate LV after checking block size %s", display_lvname(lv));
++			goto bad;
++		}
++	}
+ 
+ 	/*
+ 	 * For each rimage, move its segments to a new rimage_iorig and give
+diff --git a/lib/metadata/lv.c b/lib/metadata/lv.c
+index 4ee58b4..fac47e5 100644
+--- a/lib/metadata/lv.c
++++ b/lib/metadata/lv.c
+@@ -1412,6 +1412,9 @@ char *lv_attr_dup_with_info_and_seg_status(struct dm_pool *mem, const struct lv_
+ 	} else if (lvdm->seg_status.type == SEG_STATUS_THIN) {
+ 		if (lvdm->seg_status.thin->fail)
+ 			repstr[8] = 'F';
++	} else if (lvdm->seg_status.type == SEG_STATUS_WRITECACHE) {
++		if (lvdm->seg_status.writecache->error)
++			repstr[8] = 'E';
+ 	} else if (lvdm->seg_status.type == SEG_STATUS_UNKNOWN)
+ 		repstr[8] = 'X'; /* Unknown */
+ 
+diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c
+index 1311f70..1642b90 100644
+--- a/lib/metadata/lv_manip.c
++++ b/lib/metadata/lv_manip.c
+@@ -5066,6 +5066,7 @@ static int _lvresize_check(struct logical_volume *lv,
+ 			   struct lvresize_params *lp)
+ {
+ 	struct volume_group *vg = lv->vg;
++	struct lv_segment *seg = first_seg(lv);
+ 
+ 	if (lv_is_external_origin(lv)) {
+ 		/*
+@@ -5089,6 +5090,12 @@ static int _lvresize_check(struct logical_volume *lv,
+ 		return 0;
+ 	}
+ 
++	if (seg && (seg_is_raid4(seg) || seg_is_any_raid5(seg)) && seg->area_count < 3) {
++		log_error("Cannot resize %s LV %s. Convert to more stripes first.",
++			  lvseg_name(seg), display_lvname(lv));
++		return 0;
++	}
++
+ 	if (lv_is_raid(lv) &&
+ 	    lp->resize == LV_REDUCE) {
+ 		unsigned attrs;
+@@ -6568,7 +6575,20 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
+ 		}
+ 	}
+ 
+-	if (lv_is_used_cache_pool(lv) || lv_is_cache_vol(lv)) {
++	if (lv_is_cache_vol(lv)) {
++		if ((cache_seg = get_only_segment_using_this_lv(lv))) {
++			/* When used with cache, lvremove on cachevol also removes the cache! */
++		       	if (seg_is_cache(cache_seg)) {
++				if (!lv_cache_remove(cache_seg->lv))
++					return_0;
++			} else if (seg_is_writecache(cache_seg)) {
++				log_error("Detach cachevol before removing.");
++				return 0;
++			}
++		}
++	}
++
++	if (lv_is_used_cache_pool(lv)) {
+ 		/* Cache pool removal drops cache layer
+ 		 * If the cache pool is not linked, we can simply remove it. */
+ 		if (!(cache_seg = get_only_segment_using_this_lv(lv)))
+@@ -6832,7 +6852,7 @@ static int _lv_update_and_reload(struct logical_volume *lv, int origin_only)
+ 	}
+ 
+ 	if (!(origin_only ? suspend_lv_origin(vg->cmd, lock_lv) : suspend_lv(vg->cmd, lock_lv))) {
+-		log_error("Failed to lock logical volume %s.",
++		log_error("Failed to suspend logical volume %s.",
+ 			  display_lvname(lock_lv));
+ 		vg_revert(vg);
+ 	} else if (!(r = vg_commit(vg)))
+diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h
+index 083f74a..0cc5f37 100644
+--- a/lib/metadata/metadata-exported.h
++++ b/lib/metadata/metadata-exported.h
+@@ -89,8 +89,7 @@
+ #define PARTIAL_LV		UINT64_C(0x0000000001000000)	/* LV - derived flag, not
+ 							   written out in metadata*/
+ 
+-//#define POSTORDER_FLAG	UINT64_C(0x0000000002000000) /* Not real flags, reserved for
+-//#define POSTORDER_OPEN_FLAG	UINT64_C(0x0000000004000000)    temporary use inside vg_read_internal. */
++#define WRITECACHE_ORIGIN	UINT64_C(0x0000000002000000)
+ #define INTEGRITY_METADATA	UINT64_C(0x0000000004000000)    /* LV - Internal use only */
+ #define VIRTUAL_ORIGIN		UINT64_C(0x0000000008000000)	/* LV - internal use only */
+ 
+@@ -955,6 +954,7 @@ struct lvcreate_params {
+ 	int thin_chunk_size_calc_policy;
+ 	unsigned suppress_zero_warn : 1;
+ 	unsigned needs_lockd_init : 1;
++	unsigned ignore_type : 1;
+ 
+ 	const char *vg_name; /* only-used when VG is not yet opened (in /tools) */
+ 	const char *lv_name; /* all */
+@@ -1097,6 +1097,7 @@ int lv_is_cow(const struct logical_volume *lv);
+ int lv_is_cache_origin(const struct logical_volume *lv);
+ int lv_is_writecache_origin(const struct logical_volume *lv);
+ int lv_is_writecache_cachevol(const struct logical_volume *lv);
++int writecache_settings_to_str_list(struct writecache_settings *settings, struct dm_list *result, struct dm_pool *mem);
+ 
+ int lv_is_integrity_origin(const struct logical_volume *lv);
+ 
+diff --git a/lib/metadata/metadata.c b/lib/metadata/metadata.c
+index 4b8dce9..c0d4206 100644
+--- a/lib/metadata/metadata.c
++++ b/lib/metadata/metadata.c
+@@ -4875,8 +4875,10 @@ static struct volume_group *_vg_read(struct cmd_context *cmd,
+ 		}
+ 	}
+ 
+-	if (found_old_metadata)
++	if (found_old_metadata) {
+ 		log_warn("WARNING: Inconsistent metadata found for VG %s.", vgname);
++		log_warn("See vgck --updatemetadata to correct inconsistency.");
++	}
+ 
+ 	vg = NULL;
+ 
+diff --git a/lib/metadata/snapshot_manip.c b/lib/metadata/snapshot_manip.c
+index 3faea0e..0f48e62 100644
+--- a/lib/metadata/snapshot_manip.c
++++ b/lib/metadata/snapshot_manip.c
+@@ -389,8 +389,6 @@ int validate_snapshot_origin(const struct logical_volume *origin_lv)
+ 		err = "raid subvolumes";
+ 	} else if (lv_is_raid(origin_lv) && lv_raid_has_integrity((struct logical_volume *)origin_lv)) {
+ 		err = "raid with integrity";
+-	} else if (lv_is_writecache(origin_lv)) {
+-		err = "writecache";
+ 	}
+ 
+ 	if (err) {
+diff --git a/lib/metadata/writecache_manip.c b/lib/metadata/writecache_manip.c
+index 31d069e..7ad8c75 100644
+--- a/lib/metadata/writecache_manip.c
++++ b/lib/metadata/writecache_manip.c
+@@ -19,13 +19,24 @@
+ #include "lib/commands/toolcontext.h"
+ #include "lib/display/display.h"
+ #include "lib/metadata/segtype.h"
++#include "lib/metadata/lv_alloc.h"
+ #include "lib/activate/activate.h"
+ #include "lib/config/defaults.h"
++#include "lib/datastruct/str_list.h"
+ 
+ int lv_is_writecache_origin(const struct logical_volume *lv)
+ {
+ 	struct lv_segment *seg;
+ 
++	/*
++	 * This flag is needed when removing writecache from an origin
++	 * in which case the lv connections have been destroyed and
++	 * identifying a writecache origin by these connections doesn't
++	 * work.
++	 */
++	if (lv->status & WRITECACHE_ORIGIN)
++		return 1;
++
+ 	/* Make sure there's exactly one segment in segs_using_this_lv! */
+ 	if (dm_list_empty(&lv->segs_using_this_lv) ||
+ 	    (dm_list_size(&lv->segs_using_this_lv) > 1))
+@@ -48,46 +59,6 @@ int lv_is_writecache_cachevol(const struct logical_volume *lv)
+ 	return 0;
+ }
+ 
+-static int _lv_writecache_detach(struct cmd_context *cmd, struct logical_volume *lv,
+-				 struct logical_volume *lv_fast)
+-{
+-	struct lv_segment *seg = first_seg(lv);
+-	struct logical_volume *origin;
+-
+-	if (!seg_is_writecache(seg)) {
+-		log_error("LV %s segment is not writecache.", display_lvname(lv));
+-		return 0;
+-	}
+-
+-	if (!seg->writecache) {
+-		log_error("LV %s writecache segment has no writecache.", display_lvname(lv));
+-		return 0;
+-	}
+-
+-	if (!(origin = seg_lv(seg, 0))) {
+-		log_error("LV %s writecache segment has no origin", display_lvname(lv));
+-		return 0;
+-	}
+-
+-	if (!remove_seg_from_segs_using_this_lv(seg->writecache, seg))
+-		return_0;
+-
+-	lv_set_visible(seg->writecache);
+-
+-	lv->status &= ~WRITECACHE;
+-	seg->writecache = NULL;
+-
+-	lv_fast->status &= ~LV_CACHE_VOL;
+-
+-	if (!remove_layer_from_lv(lv, origin))
+-		return_0;
+-
+-	if (!lv_remove(origin))
+-		return_0;
+-
+-	return 1;
+-}
+-
+ static int _get_writecache_kernel_error(struct cmd_context *cmd,
+ 					struct logical_volume *lv,
+ 					uint32_t *kernel_error)
+@@ -131,13 +102,64 @@ fail:
+ 	return 0;
+ }
+ 
+-int lv_detach_writecache_cachevol(struct logical_volume *lv, int noflush)
++static void _rename_detached_cvol(struct cmd_context *cmd, struct logical_volume *lv_fast)
++{
++	struct volume_group *vg = lv_fast->vg;
++	char cvol_name[NAME_LEN];
++	char *suffix, *cvol_name_dup;
++
++	/*
++	 * Rename lv_fast back to its original name, without the _cvol
++	 * suffix that was added when lv_fast was attached for caching.
++	 * If the name is in use, generate new lvol%d.
++	 * Failing to rename is not really a problem, so we intentionally
++	 * do not consider some things here as errors.
++	 */
++	if (!dm_strncpy(cvol_name, lv_fast->name, sizeof(cvol_name)) ||
++	    !(suffix  = strstr(cvol_name, "_cvol"))) {
++		log_debug("LV %s has no suffix for cachevol (skipping rename).",
++			display_lvname(lv_fast));
++		return;
++	}
++
++	*suffix = 0;
++	if (lv_name_is_used_in_vg(vg, cvol_name, NULL) &&
++	    !generate_lv_name(vg, "lvol%d", cvol_name, sizeof(cvol_name))) {
++		log_warn("Failed to generate new unique name for unused LV %s", lv_fast->name);
++		return;
++	}
++
++	if (!(cvol_name_dup = dm_pool_strdup(vg->vgmem, cvol_name))) {
++		stack;
++		return;
++	}
++
++	lv_fast->name = cvol_name_dup;
++}
++
++static int _lv_detach_writecache_cachevol_inactive(struct logical_volume *lv, int noflush)
+ {
+ 	struct cmd_context *cmd = lv->vg->cmd;
++	struct volume_group *vg = lv->vg;
+ 	struct logical_volume *lv_fast;
++	struct logical_volume *lv_wcorig;
++	struct lv_segment *seg = first_seg(lv);
+ 	uint32_t kernel_error = 0;
+ 
+-	lv_fast = first_seg(lv)->writecache;
++	if (!seg_is_writecache(seg)) {
++		log_error("LV %s segment is not writecache.", display_lvname(lv));
++		return 0;
++	}
++
++	if (!(lv_fast = seg->writecache)) {
++		log_error("LV %s writecache segment has no writecache.", display_lvname(lv));
++		return 0;
++	}
++
++	if (!(lv_wcorig = seg_lv(seg, 0))) {
++		log_error("LV %s writecache segment has no origin", display_lvname(lv));
++		return 0;
++	}
+ 
+ 	if (noflush)
+ 		goto detach;
+@@ -157,6 +179,8 @@ int lv_detach_writecache_cachevol(struct logical_volume *lv, int noflush)
+ 
+ 	if (!sync_local_dev_names(cmd)) {
+ 		log_error("Failed to sync local devices before detaching writecache.");
++		if (!deactivate_lv(cmd, lv))
++			log_error("Failed to deactivate %s.", display_lvname(lv));
+ 		return 0;
+ 	}
+ 
+@@ -176,7 +200,8 @@ int lv_detach_writecache_cachevol(struct logical_volume *lv, int noflush)
+ 
+ 	if (kernel_error) {
+ 		log_error("Failed to flush writecache (error %u) for %s.", kernel_error, display_lvname(lv));
+-		deactivate_lv(cmd, lv);
++		if (!deactivate_lv(cmd, lv))
++			log_error("Failed to deactivate %s.", display_lvname(lv));
+ 		return 0;
+ 	}
+ 
+@@ -188,11 +213,262 @@ int lv_detach_writecache_cachevol(struct logical_volume *lv, int noflush)
+ 	lv->status &= ~LV_TEMPORARY;
+ 
+  detach:
+-	if (!_lv_writecache_detach(cmd, lv, lv_fast)) {
+-		log_error("Failed to detach writecache from %s", display_lvname(lv));
++	if (!remove_seg_from_segs_using_this_lv(lv_fast, seg))
++		return_0;
++
++	lv->status &= ~WRITECACHE;
++	seg->writecache = NULL;
++
++	if (!remove_layer_from_lv(lv, lv_wcorig))
++		return_0;
++
++	if (!lv_remove(lv_wcorig))
++		return_0;
++
++	lv_set_visible(lv_fast);
++	lv_fast->status &= ~LV_CACHE_VOL;
++
++	_rename_detached_cvol(cmd, lv_fast);
++
++	if (!vg_write(vg) || !vg_commit(vg))
++		return_0;
++
++	return 1;
++}
++
++static int _lv_detach_writecache_cachevol_active(struct logical_volume *lv, int noflush)
++{
++	struct cmd_context *cmd = lv->vg->cmd;
++	struct volume_group *vg = lv->vg;
++	struct logical_volume *lv_fast;
++	struct logical_volume *lv_wcorig;
++	struct logical_volume *lv_old;
++	struct lv_segment *seg = first_seg(lv);
++	uint32_t kernel_error = 0;
++
++	if (!seg_is_writecache(seg)) {
++		log_error("LV %s segment is not writecache.", display_lvname(lv));
++		return 0;
++	}
++
++	if (!(lv_fast = seg->writecache)) {
++		log_error("LV %s writecache segment has no writecache.", display_lvname(lv));
+ 		return 0;
+ 	}
+ 
++	if (!(lv_wcorig = seg_lv(seg, 0))) {
++		log_error("LV %s writecache segment has no origin", display_lvname(lv));
++		return 0;
++	}
++
++	if (noflush)
++		goto detach;
++
++	if (!lv_writecache_message(lv, "flush_on_suspend")) {
++		log_error("Failed to set flush_on_suspend in writecache detach %s.", display_lvname(lv));
++		return 0;
++	}
++
++ detach:
++	if (!remove_seg_from_segs_using_this_lv(lv_fast, seg)) {
++		log_error("Failed to remove seg in writecache detach.");
++		return 0;
++	}
++
++	lv->status &= ~WRITECACHE;
++	seg->writecache = NULL;
++
++	if (!remove_layer_from_lv(lv, lv_wcorig)) {
++		log_error("Failed to remove lv layer in writecache detach.");
++		return 0;
++	}
++
++	/*
++	 * vg_write(), suspend_lv(), vg_commit(), resume_lv().
++	 * usually done by lv_update_and_reload for an active lv,
++	 * but in this case we need to check for writecache errors
++	 * after suspend.
++	 */
++
++	if (!vg_write(vg)) {
++		log_error("Failed to write VG in writecache detach.");
++		return 0;
++	}
++
++	/*
++	 * The version of LV before removal of writecache.  When need to
++	 * check for kernel errors based on the old version of LV which
++	 * is still present in the kernel.
++	 */
++	if (!(lv_old = (struct logical_volume *)lv_committed(lv))) {
++		log_error("Failed to get lv_committed in writecache detach.");
++		return 0;
++	}
++
++	/*
++	 * suspend does not use 'lv' as we know it here, but grabs the
++	 * old (precommitted) version of 'lv' using lv_committed(),
++	 * which is from vg->vg_comitted.
++	 */
++	log_debug("Suspending writecache to detach %s", display_lvname(lv));
++
++	if (!suspend_lv(cmd, lv)) {
++		log_error("Failed to suspend LV in writecache detach.");
++		vg_revert(vg);
++		return 0;
++	}
++
++	log_debug("Checking writecache errors to detach.");
++
++	if (!_get_writecache_kernel_error(cmd, lv_old, &kernel_error)) {
++		log_error("Failed to get writecache error status for %s.", display_lvname(lv_old));
++		return 0;
++	}
++
++	if (kernel_error) {
++		log_error("Failed to flush writecache (error %u) for %s.", kernel_error, display_lvname(lv));
++		return 0;
++	}
++
++	if (!vg_commit(vg)) {
++		log_error("Failed to commit VG in writecache detach.");
++		return 0;
++	}
++
++	/*
++	 * Since vg_commit has happened, vg->vg_committed is now the
++	 * newest copy of lv, so resume uses the 'lv' that we know
++	 * here.
++	 */
++	log_debug("Resuming after writecache detached %s", display_lvname(lv));
++
++	if (!resume_lv(cmd, lv)) {
++		log_error("Failed to resume LV in writecache detach.");
++		return 0;
++	}
++
++	log_debug("Deactivating previous cachevol %s", display_lvname(lv_fast));
++
++	if (!deactivate_lv(cmd, lv_fast))
++		log_error("Failed to deactivate previous cachevol in writecache detach.");
++
++	/*
++	 * Needed for lv_is_writecache_origin to know lv_wcorig was
++	 * a writecache origin, which is needed so that the -real
++	 * dm uuid suffix is applied, which is needed for deactivate to
++	 * work. This is a hacky roundabout way of setting the -real
++	 * uuid suffix (it would be nice to have a deactivate command
++	 * that accepts a dm uuid.)
++	 */
++	lv_wcorig->status |= WRITECACHE_ORIGIN;
++
++	log_debug("Deactivating previous wcorig %s", display_lvname(lv_wcorig));
++
++	if (!lv_deactivate(cmd, NULL, lv_wcorig))
++		log_error("Failed to deactivate previous wcorig LV in writecache detach.");
++
++	log_debug("Removing previous wcorig %s", display_lvname(lv_wcorig));
++
++	if (!lv_remove(lv_wcorig)) {
++		log_error("Failed to remove previous wcorig LV in writecache detach.");
++		return 0;
++	}
++
++	lv_set_visible(lv_fast);
++	lv_fast->status &= ~LV_CACHE_VOL;
++
++	_rename_detached_cvol(cmd, lv_fast);
++
++	if (!vg_write(vg) || !vg_commit(vg)) {
++		log_error("Failed to write and commit VG in writecache detach.");
++		return 0;
++	}
++
++	return 1;
++}
++
++int lv_detach_writecache_cachevol(struct logical_volume *lv, int noflush)
++{
++	if (lv_is_active(lv))
++		return _lv_detach_writecache_cachevol_active(lv, noflush);
++	else
++		return _lv_detach_writecache_cachevol_inactive(lv, noflush);
++}
++
++static int _writecache_setting_str_list_add(const char *field, uint64_t val, char *val_str, struct dm_list *result, struct dm_pool *mem)
++{
++	char buf[128];
++	char *list_item;
++	int len;
++
++	if (val_str) {
++		if (dm_snprintf(buf, sizeof(buf), "%s=%s", field, val_str) < 0)
++			return_0;
++	} else {
++		if (dm_snprintf(buf, sizeof(buf), "%s=%llu", field, (unsigned long long)val) < 0)
++			return_0;
++	}
++
++	len = strlen(buf) + 1;
++
++	if (!(list_item = dm_pool_zalloc(mem, len)))
++		return_0;
++
++	memcpy(list_item, buf, len);
++
++	if (!str_list_add_no_dup_check(mem, result, list_item))
++		return_0;
++
++	return 1;
++}
++
++int writecache_settings_to_str_list(struct writecache_settings *settings, struct dm_list *result, struct dm_pool *mem)
++{
++	int errors = 0;
++
++	if (settings->high_watermark_set)
++		if (!_writecache_setting_str_list_add("high_watermark", settings->high_watermark, NULL, result, mem))
++			errors++;
++
++	if (settings->low_watermark_set)
++		if (!_writecache_setting_str_list_add("low_watermark", settings->low_watermark, NULL, result, mem))
++			errors++;
++
++	if (settings->writeback_jobs_set)
++		if (!_writecache_setting_str_list_add("writeback_jobs", settings->writeback_jobs, NULL, result, mem))
++			errors++;
++
++	if (settings->autocommit_blocks_set)
++		if (!_writecache_setting_str_list_add("autocommit_blocks", settings->autocommit_blocks, NULL, result, mem))
++			errors++;
++
++	if (settings->autocommit_time_set)
++		if (!_writecache_setting_str_list_add("autocommit_time", settings->autocommit_time, NULL, result, mem))
++			errors++;
++
++	if (settings->fua_set)
++		if (!_writecache_setting_str_list_add("fua", (uint64_t)settings->fua, NULL, result, mem))
++			errors++;
++
++	if (settings->nofua_set)
++		if (!_writecache_setting_str_list_add("nofua", (uint64_t)settings->nofua, NULL, result, mem))
++			errors++;
++
++	if (settings->cleaner_set && settings->cleaner)
++		if (!_writecache_setting_str_list_add("cleaner", (uint64_t)settings->cleaner, NULL, result, mem))
++			errors++;
++
++	if (settings->max_age_set)
++		if (!_writecache_setting_str_list_add("max_age", (uint64_t)settings->max_age, NULL, result, mem))
++			errors++;
++
++	if (settings->new_key && settings->new_val)
++		if (!_writecache_setting_str_list_add(settings->new_key, 0, settings->new_val, result, mem))
++			errors++;
++
++	if (errors)
++		log_warn("Failed to create list of writecache settings.");
++
+ 	return 1;
+ }
+ 
+diff --git a/lib/report/report.c b/lib/report/report.c
+index 170df69..74ec74c 100644
+--- a/lib/report/report.c
++++ b/lib/report/report.c
+@@ -1430,6 +1430,16 @@ static int _cache_settings_disp(struct dm_report *rh, struct dm_pool *mem,
+ 	struct _str_list_append_baton baton;
+ 	struct dm_list dummy_list; /* dummy list to display "nothing" */
+ 
++	if (seg_is_writecache(seg)) {
++		if (!(result = str_list_create(mem)))
++			return_0;
++
++		if (!writecache_settings_to_str_list((struct writecache_settings *)&seg->writecache_settings, result, mem))
++			return_0;
++
++		return _field_set_string_list(rh, field, result, private, 0, NULL);
++	}
++
+ 	if (seg_is_cache(seg) && lv_is_cache_vol(seg->pool_lv))
+ 		setting_seg = seg;
+ 
+@@ -3802,6 +3812,12 @@ static int _lvhealthstatus_disp(struct dm_report *rh, struct dm_pool *mem,
+ 			health = "failed";
+ 		else if (lvdm->seg_status.cache->read_only)
+ 			health = "metadata_read_only";
++	} else if (lv_is_writecache(lv) && (lvdm->seg_status.type != SEG_STATUS_NONE)) {
++		if (lvdm->seg_status.type != SEG_STATUS_WRITECACHE)
++			return _field_set_value(field, GET_FIRST_RESERVED_NAME(health_undef),
++						GET_FIELD_RESERVED_VALUE(health_undef));
++		if (lvdm->seg_status.writecache->error)
++			health = "error";
+ 	} else if (lv_is_thin_pool(lv) && (lvdm->seg_status.type != SEG_STATUS_NONE)) {
+ 		if (lvdm->seg_status.type != SEG_STATUS_THIN_POOL)
+ 			return _field_set_value(field, GET_FIRST_RESERVED_NAME(health_undef),
+diff --git a/lib/writecache/writecache.c b/lib/writecache/writecache.c
+index 130922a..c7aea28 100644
+--- a/lib/writecache/writecache.c
++++ b/lib/writecache/writecache.c
+@@ -26,6 +26,9 @@
+ #include "lib/metadata/lv_alloc.h"
+ #include "lib/config/defaults.h"
+ 
++static int _writecache_cleaner_supported;
++static int _writecache_max_age_supported;
++
+ #define SEG_LOG_ERROR(t, p...) \
+         log_error(t " segment %s of logical volume %s.", ## p,	\
+                   dm_config_parent_name(sn), seg->lv->name), 0;
+@@ -120,6 +123,18 @@ static int _writecache_text_import(struct lv_segment *seg,
+ 		seg->writecache_settings.nofua_set = 1;
+ 	}
+ 
++	if (dm_config_has_node(sn, "cleaner")) {
++		if (!dm_config_get_uint32(sn, "cleaner", &seg->writecache_settings.cleaner))
++			return SEG_LOG_ERROR("Unknown writecache_setting in");
++		seg->writecache_settings.cleaner_set = 1;
++	}
++
++	if (dm_config_has_node(sn, "max_age")) {
++		if (!dm_config_get_uint32(sn, "max_age", &seg->writecache_settings.max_age))
++			return SEG_LOG_ERROR("Unknown writecache_setting in");
++		seg->writecache_settings.max_age_set = 1;
++	}
++
+ 	if (dm_config_has_node(sn, "writecache_setting_key")) {
+ 		const char *key;
+ 		const char *val;
+@@ -184,6 +199,14 @@ static int _writecache_text_export(const struct lv_segment *seg,
+ 	        outf(f, "nofua = %u", seg->writecache_settings.nofua);
+ 	}
+ 
++	if (seg->writecache_settings.cleaner_set && seg->writecache_settings.cleaner) {
++	        outf(f, "cleaner = %u", seg->writecache_settings.cleaner);
++	}
++
++	if (seg->writecache_settings.max_age_set) {
++	        outf(f, "max_age = %u", seg->writecache_settings.max_age);
++	}
++
+ 	if (seg->writecache_settings.new_key && seg->writecache_settings.new_val) {
+ 	        outf(f, "writecache_setting_key = \"%s\"",
+ 	                seg->writecache_settings.new_key);
+@@ -208,6 +231,7 @@ static int _target_present(struct cmd_context *cmd,
+ {
+ 	static int _writecache_checked = 0;
+ 	static int _writecache_present = 0;
++	uint32_t maj, min, patchlevel;
+ 
+ 	if (!activation())
+ 		return 0;
+@@ -215,6 +239,19 @@ static int _target_present(struct cmd_context *cmd,
+ 	if (!_writecache_checked) {
+ 		_writecache_checked = 1;
+ 		_writecache_present =  target_present(cmd, TARGET_NAME_WRITECACHE, 1);
++
++		if (!target_version(TARGET_NAME_WRITECACHE, &maj, &min, &patchlevel))
++			return_0;
++
++		if (maj < 1) {
++			log_error("writecache target version older than minimum 1.0.0");
++			return 0;
++		}
++
++		if (min >= 2) {
++			_writecache_cleaner_supported = 1;
++			_writecache_max_age_supported = 1;
++		}
+ 	}
+ 
+ 	return _writecache_present;
+@@ -257,6 +294,18 @@ static int _writecache_add_target_line(struct dev_manager *dm,
+ 		return 0;
+ 	}
+ 
++	if (!_writecache_cleaner_supported && seg->writecache_settings.cleaner_set && seg->writecache_settings.cleaner) {
++		log_warn("WARNING: ignoring writecache setting \"cleaner\" which is not supported by kernel for LV %s.", seg->lv->name);
++		seg->writecache_settings.cleaner = 0;
++		seg->writecache_settings.cleaner_set = 0;
++	}
++
++	if (!_writecache_max_age_supported && seg->writecache_settings.max_age_set) {
++		log_warn("WARNING: ignoring writecache setting \"max_age\" which is not supported by kernel for LV %s.", seg->lv->name);
++		seg->writecache_settings.max_age = 0;
++		seg->writecache_settings.max_age_set = 0;
++	}
++
+ 	if ((pmem = lv_on_pmem(seg->writecache)) < 0)
+ 		return_0;
+ 
+diff --git a/man/lvmcache.7_main b/man/lvmcache.7_main
+index 425904e..244ea1e 100644
+--- a/man/lvmcache.7_main
++++ b/man/lvmcache.7_main
+@@ -34,8 +34,6 @@ LVM refers to this using the LV type \fBwritecache\fP.
+ 
+ .SH USAGE
+ 
+-Both kinds of caching use similar lvm commands:
+-
+ .B 1. Identify main LV that needs caching
+ 
+ The main LV may already exist, and is located on larger, slower devices.
+@@ -133,6 +131,22 @@ attached.
+   main vg -wi------- linear /dev/slow_hhd
+ .fi
+ 
++.SS Create a new LV with caching.
++
++A new LV can be created with caching attached at the time of creation
++using the following command:
++
++.nf
++$ lvcreate --type cache|writecache -n Name -L Size
++	--cachedevice /dev/fast_ssd vg /dev/slow_hhd
++.fi
++
++The main LV is created with the specified Name and Size from the slow_hhd.
++A hidden fast LV is created on the fast_ssd and is then attached to the
++new main LV.  If the fast_ssd is unused, the entire disk will be used as
++the cache unless the --cachesize option is used to specify a size for the
++fast LV.  The --cachedevice option can be repeated to use multiple disks
++for the fast LV.
+ 
+ .SH OPTIONS
+ 
+@@ -162,6 +176,18 @@ that cannot be used directly.  If a standard LV is passed with this
+ option, lvm will first convert it to a cache pool by combining it with
+ another LV to use for metadata.  This option can be used with dm-cache.
+ 
++.B --cachedevice
++.I PV
++.br
++
++This option can be used in place of --cachevol, in which case a cachevol
++LV will be created using the specified device.  This option can be
++repeated to create a cachevol using multiple devices, or a tag name can be
++specified in which case the cachevol will be created using any of the
++devices with the given tag.  If a named cache device is unused, the entire
++device will be used to create the cachevol.  To create a cachevol of a
++specific size from the cache devices, include the --cachesize option.
++
+ \&
+ 
+ .SS dm-cache block size
+diff --git a/man/lvs.8_end b/man/lvs.8_end
+index 6efc9cb..5a4ecc8 100644
+--- a/man/lvs.8_end
++++ b/man/lvs.8_end
+@@ -74,5 +74,9 @@ Related to Thin Logical Volumes: (F)ailed.
+ .br
+ (F)ailed is set when related thin pool enters Failed state and no further I/O
+ is permitted at all.
++.IP
++Related to writecache logical volumes: (E)rror.
++.br
++(E)rror is set dm-writecache reports an error.
+ .IP 10 3
+ s(k)ip activation: this volume is flagged to be skipped during activation.
+diff --git a/scripts/blkdeactivate.sh.in b/scripts/blkdeactivate.sh.in
+index 57b3e58..7c517b8 100644
+--- a/scripts/blkdeactivate.sh.in
++++ b/scripts/blkdeactivate.sh.in
+@@ -330,6 +330,12 @@ deactivate_vdo() {
+         test -b "$DEV_DIR/mapper/$xname" || return 0
+         test -z "${SKIP_DEVICE_LIST["$kname"]}" || return 1
+ 
++	# Skip VDO device deactivation if VDO tools missing.
++	test "$VDO_AVAILABLE" -eq 0 && {
++		add_device_to_skip_list
++		return 1
++	}
++
+         deactivate_holders "$DEV_DIR/mapper/$xname" || return 1
+ 
+         echo -n "  [VDO]: deactivating VDO volume $xname... "
+diff --git a/test/shell/cachevol-cachedevice.sh b/test/shell/cachevol-cachedevice.sh
+new file mode 100644
+index 0000000..11a37d9
+--- /dev/null
++++ b/test/shell/cachevol-cachedevice.sh
+@@ -0,0 +1,212 @@
++#!/usr/bin/env bash
++
++# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
++#
++# This copyrighted material is made available to anyone wishing to use,
++# modify, copy, or redistribute it subject to the terms and conditions
++# of the GNU General Public License v.2.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write to the Free Software Foundation,
++# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++
++SKIP_WITH_LVMPOLLD=1
++
++. lib/inittest
++
++aux have_cache 1 10 0 || skip
++aux have_writecache 1 0 0 || skip
++
++aux prepare_devs 4 64
++
++vgcreate $SHARED $vg "$dev1" "$dev2"
++
++## cache
++
++# use existing cachevol
++lvcreate -n $lv1 -l8 -an $vg "$dev1"
++lvcreate --type cache -n $lv2 -L40M --cachevol $lv1 -y $vg "$dev2"
++check lv_field $vg/$lv2 segtype cache
++check lv_field $vg/${lv1}_cvol segtype linear -a
++lvremove -y $vg/$lv2
++
++# use entire cachedevice for cachevol
++lvcreate --type cache -n $lv2 -L40M --cachedevice "$dev1" -y $vg "$dev2"
++check lv_field $vg/$lv2 segtype cache
++check lv_field $vg/${lv2}_cache_cvol segtype linear -a
++lvremove -y $vg/$lv2
++
++# use part of cachedevice for cachevol
++lvcreate --type cache -n $lv2 -L20M --cachedevice "$dev1" --cachesize 16M -y $vg "$dev2"
++check lv_field $vg/$lv2 segtype cache
++check lv_field $vg/${lv2}_cache_cvol segtype linear -a
++lvcreate --type cache -n $lv3 -L20M --cachedevice "$dev1" --cachesize 16M -y $vg "$dev2"
++check lv_field $vg/$lv3 segtype cache
++check lv_field $vg/${lv3}_cache_cvol segtype linear -a
++lvremove -y $vg/$lv2
++lvremove -y $vg/$lv3
++
++## writecache
++
++# use existing cachevol
++lvcreate -n $lv1 -l8 -an $vg "$dev1"
++lvcreate --type writecache -n $lv2 -L40M --cachevol $lv1 -y $vg "$dev2"
++check lv_field $vg/$lv2 segtype writecache
++check lv_field $vg/${lv1}_cvol segtype linear -a
++lvremove -y $vg/$lv2
++
++# use entire cachedevice for cachevol
++lvcreate --type writecache -n $lv2 -L40M --cachedevice "$dev1" -y $vg "$dev2"
++check lv_field $vg/$lv2 segtype writecache
++check lv_field $vg/${lv2}_cache_cvol segtype linear -a
++lvremove -y $vg/$lv2
++
++# use part of cachedevice for cachevol
++lvcreate --type writecache -n $lv2 -L20M --cachedevice "$dev1" --cachesize 16M -y $vg "$dev2"
++check lv_field $vg/$lv2 segtype writecache
++check lv_field $vg/${lv2}_cache_cvol segtype linear -a
++lvcreate --type writecache -n $lv3 -L20M --cachedevice "$dev1" --cachesize 16M -y $vg "$dev2"
++check lv_field $vg/$lv3 segtype writecache
++check lv_field $vg/${lv3}_cache_cvol segtype linear -a
++lvremove -y $vg/$lv2
++lvremove -y $vg/$lv3
++
++## multiple cachedevs
++
++vgextend $vg "$dev3" "$dev4"
++
++lvcreate --type writecache -n $lv2 -L100M --cachedevice "$dev1" --cachedevice "$dev3" -y $vg "$dev2" "$dev4"
++check lv_field $vg/${lv2}_cache_cvol lv_size "120.00m"
++lvremove -y $vg/$lv2
++
++lvcreate --type writecache -n $lv2 -L100M --cachedevice "$dev1" --cachedevice "$dev3" --cachesize 80M -y $vg "$dev2" "$dev4"
++check lv_field $vg/${lv2}_cache_cvol lv_size "80.00m"
++lvremove -y $vg/$lv2
++
++pvchange --addtag slow "$dev2"
++pvchange --addtag slow "$dev4"
++pvchange --addtag fast "$dev1"
++pvchange --addtag fast "$dev3"
++
++lvcreate --type writecache -n $lv2 -L100M --cachedevice @fast --cachesize 80M -y $vg @slow
++check lv_field $vg/${lv2}_cache_cvol lv_size "80.00m"
++lvremove -y $vg/$lv2
++
++lvcreate --type cache -n $lv2 -L100M --cachedevice @fast --cachesize 80M -y $vg @slow
++check lv_field $vg/${lv2}_cache_cvol lv_size "80.00m"
++lvremove -y $vg/$lv2
++
++## error cases
++
++# cachevol doesn't exist
++not lvcreate --type cache -n $lv2 -l8 --cachevol asdf -y $vg "$dev2"
++not lvs $vg/$lv1
++not lvs $vg/$lv2
++
++# cachedevice doesn't exist
++not lvcreate --type cache -n $lv2 -l8 --cachedevice asdf -y $vg "$dev2"
++not lvs $vg/$lv1
++not lvs $vg/$lv2
++
++# cachevol doesn't exist
++not lvcreate --type writecache -n $lv2 -l8 --cachevol asdf -y $vg "$dev2"
++not lvs $vg/$lv1
++not lvs $vg/$lv2
++
++# cachedevice doesn't exist
++not lvcreate --type writecache -n $lv2 -l8 --cachedevice asdf -y $vg "$dev2"
++not lvs $vg/$lv1
++not lvs $vg/$lv2
++
++# when cachedevice is already being used, cachesize is required to use a part of it
++lvcreate -n asdf -l1 $vg "$dev1"
++not lvcreate --type writecache -n $lv2 -l8 --cachedevice "$dev1" -y $vg "$dev2"
++not lvcreate --type writecache -n $lv2 -l8 --cachedevice "$dev1" --cachedevice "$dev3" -y $vg "$dev2"
++not lvs $vg/$lv1
++not lvs $vg/$lv2
++lvcreate --type writecache -n $lv2 -l8 --cachedevice "$dev1" --cachesize 8M -y $vg "$dev2"
++lvs $vg/$lv2
++check lv_field $vg/${lv2}_cache_cvol lv_size "8.00m"
++lvremove -y $vg/$lv2
++
++vgremove -ff $vg
++
++# lvconvert single step cachevol creation and attachment
++# . cache and writecache
++# . one or two cachedevices
++# . with or without --cachesize
++# . using tags for devices
++
++vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3" "$dev4"
++
++lvcreate -n $lv1 -l8 -an $vg "$dev1"
++lvconvert -y --type cache --cachedevice "$dev2" $vg/$lv1
++check lv_field $vg/$lv1 segtype cache
++check lv_field $vg/${lv1}_cache_cvol segtype linear -a
++check lv_field $vg/${lv1}_cache_cvol lv_size "60.00m"
++lvchange -ay $vg/$lv1
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++lvcreate -n $lv1 -l8 -an $vg "$dev1"
++lvconvert -y --type cache --cachedevice "$dev2" --cachedevice "$dev3" $vg/$lv1
++check lv_field $vg/$lv1 segtype cache
++check lv_field $vg/${lv1}_cache_cvol lv_size "120.00m"
++lvchange -ay $vg/$lv1
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++lvcreate -n $lv1 -l8 -an $vg "$dev1"
++lvconvert -y --type cache --cachedevice "$dev2" --cachedevice "$dev3" --cachesize 8M $vg/$lv1
++check lv_field $vg/$lv1 segtype cache
++check lv_field $vg/${lv1}_cache_cvol lv_size "8.00m"
++lvchange -ay $vg/$lv1
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++lvcreate -n $lv1 -l8 -an $vg "$dev1"
++lvconvert -y --type writecache --cachedevice "$dev2" $vg/$lv1
++check lv_field $vg/$lv1 segtype writecache
++check lv_field $vg/${lv1}_cache_cvol lv_size "60.00m"
++lvchange -ay $vg/$lv1
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++lvcreate -n $lv1 -l8 -an $vg "$dev1"
++lvconvert -y --type writecache --cachedevice "$dev2" --cachedevice "$dev3" $vg/$lv1
++check lv_field $vg/$lv1 segtype writecache
++check lv_field $vg/${lv1}_cache_cvol lv_size "120.00m"
++lvchange -ay $vg/$lv1
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++lvcreate -n $lv1 -l8 -an $vg "$dev1"
++lvconvert -y --type writecache --cachedevice "$dev2" --cachedevice "$dev3" --cachesize 8M $vg/$lv1
++check lv_field $vg/$lv1 segtype writecache
++check lv_field $vg/${lv1}_cache_cvol lv_size "8.00m"
++lvchange -ay $vg/$lv1
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++pvchange --addtag slow "$dev1"
++pvchange --addtag fast "$dev2"
++pvchange --addtag fast "$dev3"
++
++lvcreate -n $lv1 -l8 -an $vg @slow
++lvconvert -y --type cache --cachedevice @fast --cachesize 8M $vg/$lv1
++check lv_field $vg/$lv1 segtype cache
++check lv_field $vg/${lv1}_cache_cvol lv_size "8.00m"
++lvchange -ay $vg/$lv1
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++lvcreate -n $lv1 -l8 -an $vg @slow
++lvconvert -y --type writecache --cachedevice @fast --cachesize 8M $vg/$lv1
++check lv_field $vg/$lv1 segtype writecache
++check lv_field $vg/${lv1}_cache_cvol lv_size "8.00m"
++lvchange -ay $vg/$lv1
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++vgremove -ff $vg
++
+diff --git a/test/shell/integrity-blocksize-2.sh b/test/shell/integrity-blocksize-2.sh
+new file mode 100644
+index 0000000..5e0fd9a
+--- /dev/null
++++ b/test/shell/integrity-blocksize-2.sh
+@@ -0,0 +1,128 @@
++#!/usr/bin/env bash
++
++# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
++#
++# This copyrighted material is made available to anyone wishing to use,
++# modify, copy, or redistribute it subject to the terms and conditions
++# of the GNU General Public License v.2.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write to the Free Software Foundation,
++# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++
++SKIP_WITH_LVMPOLLD=1
++
++. lib/inittest
++
++aux have_integrity 1 5 0 || skip
++
++mnt="mnt"
++mkdir -p $mnt
++
++_sync_percent() {
++        local checklv=$1
++        get lv_field "$checklv" sync_percent | cut -d. -f1
++}
++
++_wait_recalc() {
++        local checklv=$1
++
++        for i in $(seq 1 10) ; do
++                sync=$(_sync_percent "$checklv")
++                echo "sync_percent is $sync"
++
++                if test "$sync" = "100"; then
++                        return
++                fi
++
++                sleep 1
++        done
++
++        # TODO: There is some strange bug, first leg of RAID with integrity
++        # enabled never gets in sync. I saw this in BB, but not when executing
++        # the commands manually
++        if test -z "$sync"; then
++                echo "TEST WARNING: Resync of dm-integrity device '$checklv' failed"
++                dmsetup status "$DM_DEV_DIR/mapper/${checklv/\//-}"
++                exit
++        fi
++        echo "timeout waiting for recalc"
++        return 1
++}
++
++# prepare_devs uses ramdisk backing which has 512 LBS and 4K PBS
++# This should cause mkfs.xfs to use 4K sector size,
++# and integrity to use 4K block size
++aux prepare_devs 2 64
++
++vgcreate $vg "$dev1" "$dev2"
++blockdev --getss "$dev1"
++blockdev --getpbsz "$dev1"
++blockdev --getss "$dev2"
++blockdev --getpbsz "$dev2"
++
++# add integrity while LV is inactive
++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg
++lvchange -an $vg/$lv1
++lvchange -ay $vg/$lv1
++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1"
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++echo "hello world" > $mnt/hello
++umount $mnt
++lvchange -an $vg
++lvconvert --raidintegrity y $vg/$lv1
++lvchange -ay $vg
++_wait_recalc $vg/${lv1}_rimage_0
++_wait_recalc $vg/${lv1}_rimage_1
++lvs -a -o+devices $vg
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++cat $mnt/hello
++umount $mnt
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++# add integrity while LV is active, fs unmounted
++# lvconvert will use ribs 512 to avoid increasing LBS from 512 to 4k on active LV
++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg
++lvchange -an $vg/$lv1
++lvchange -ay $vg/$lv1
++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1"
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++echo "hello world" > $mnt/hello
++umount $mnt
++lvchange -an $vg
++lvchange -ay $vg
++lvconvert --raidintegrity y $vg/$lv1
++_wait_recalc $vg/${lv1}_rimage_0
++_wait_recalc $vg/${lv1}_rimage_1
++lvs -a -o+devices $vg
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++cat $mnt/hello | grep "hello world"
++umount $mnt
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++# add integrity while LV is active, fs mounted
++# lvconvert will use ribs 512 to avoid increasing LBS from 512 to 4k on active LV
++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg
++lvchange -an $vg/$lv1
++lvchange -ay $vg/$lv1
++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1"
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++echo "hello world" > $mnt/hello
++lvconvert --raidintegrity y $vg/$lv1
++_wait_recalc $vg/${lv1}_rimage_0
++_wait_recalc $vg/${lv1}_rimage_1
++lvs -a -o+devices $vg
++cat $mnt/hello | grep "hello world"
++umount $mnt
++lvchange -an $vg/$lv1
++lvchange -ay $vg/$lv1
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++cat $mnt/hello | grep "hello world"
++umount $mnt
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++vgremove -ff $vg
++
+diff --git a/test/shell/integrity-blocksize-3.sh b/test/shell/integrity-blocksize-3.sh
+new file mode 100644
+index 0000000..4aea972
+--- /dev/null
++++ b/test/shell/integrity-blocksize-3.sh
+@@ -0,0 +1,285 @@
++#!/usr/bin/env bash
++
++# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
++#
++# This copyrighted material is made available to anyone wishing to use,
++# modify, copy, or redistribute it subject to the terms and conditions
++# of the GNU General Public License v.2.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write to the Free Software Foundation,
++# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++
++SKIP_WITH_LVMPOLLD=1
++
++. lib/inittest
++
++aux have_integrity 1 5 0 || skip
++
++mnt="mnt"
++mkdir -p $mnt
++
++_sync_percent() {
++        local checklv=$1
++        get lv_field "$checklv" sync_percent | cut -d. -f1
++}
++
++_wait_recalc() {
++        local checklv=$1
++
++        for i in $(seq 1 10) ; do
++                sync=$(_sync_percent "$checklv")
++                echo "sync_percent is $sync"
++
++                if test "$sync" = "100"; then
++                        return
++                fi
++
++                sleep 1
++        done
++
++        # TODO: There is some strange bug, first leg of RAID with integrity
++        # enabled never gets in sync. I saw this in BB, but not when executing
++        # the commands manually
++        if test -z "$sync"; then
++                echo "TEST WARNING: Resync of dm-integrity device '$checklv' failed"
++                dmsetup status "$DM_DEV_DIR/mapper/${checklv/\//-}"
++                exit
++        fi
++        echo "timeout waiting for recalc"
++        return 1
++}
++
++# scsi_debug devices with 512 LBS 512 PBS
++aux prepare_scsi_debug_dev 256
++check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "512"
++check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "512"
++aux prepare_devs 2 64
++
++vgcreate $vg "$dev1" "$dev2"
++blockdev --getss "$dev1"
++blockdev --getpbsz "$dev1"
++blockdev --getss "$dev2"
++blockdev --getpbsz "$dev2"
++
++# add integrity while LV is inactive
++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg
++lvchange -an $vg/$lv1
++lvchange -ay $vg/$lv1
++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1"
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++echo "hello world" > $mnt/hello
++umount $mnt
++lvchange -an $vg
++lvconvert --raidintegrity y $vg/$lv1
++lvchange -ay $vg
++_wait_recalc $vg/${lv1}_rimage_0
++_wait_recalc $vg/${lv1}_rimage_1
++lvs -a -o+devices $vg
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++cat $mnt/hello
++umount $mnt
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++# add integrity while LV is active, fs unmounted
++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg
++lvchange -an $vg/$lv1
++lvchange -ay $vg/$lv1
++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1"
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++echo "hello world" > $mnt/hello
++umount $mnt
++lvchange -an $vg
++lvchange -ay $vg
++lvconvert --raidintegrity y $vg/$lv1
++_wait_recalc $vg/${lv1}_rimage_0
++_wait_recalc $vg/${lv1}_rimage_1
++lvs -a -o+devices $vg
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++cat $mnt/hello | grep "hello world"
++umount $mnt
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++# add integrity while LV is active, fs mounted
++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg
++lvchange -an $vg/$lv1
++lvchange -ay $vg/$lv1
++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1"
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++echo "hello world" > $mnt/hello
++lvconvert --raidintegrity y $vg/$lv1
++_wait_recalc $vg/${lv1}_rimage_0
++_wait_recalc $vg/${lv1}_rimage_1
++lvs -a -o+devices $vg
++cat $mnt/hello | grep "hello world"
++umount $mnt
++lvchange -an $vg/$lv1
++lvchange -ay $vg/$lv1
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++cat $mnt/hello | grep "hello world"
++umount $mnt
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++vgremove -ff $vg
++aux cleanup_scsi_debug_dev
++sleep 1
++
++# scsi_debug devices with 4K LBS and 4K PBS
++aux prepare_scsi_debug_dev 256 sector_size=4096
++check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "4096"
++check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "4096"
++aux prepare_devs 2 64
++
++vgcreate $vg "$dev1" "$dev2"
++blockdev --getss "$dev1"
++blockdev --getpbsz "$dev1"
++blockdev --getss "$dev2"
++blockdev --getpbsz "$dev2"
++
++# add integrity while LV is inactive
++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg
++lvchange -an $vg/$lv1
++lvchange -ay $vg/$lv1
++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1"
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++echo "hello world" > $mnt/hello
++umount $mnt
++lvchange -an $vg
++lvconvert --raidintegrity y $vg/$lv1
++lvchange -ay $vg
++_wait_recalc $vg/${lv1}_rimage_0
++_wait_recalc $vg/${lv1}_rimage_1
++lvs -a -o+devices $vg
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++cat $mnt/hello
++umount $mnt
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++# add integrity while LV is active, fs unmounted
++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg
++lvchange -an $vg/$lv1
++lvchange -ay $vg/$lv1
++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1"
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++echo "hello world" > $mnt/hello
++umount $mnt
++lvchange -an $vg
++lvchange -ay $vg
++lvconvert --raidintegrity y $vg/$lv1
++_wait_recalc $vg/${lv1}_rimage_0
++_wait_recalc $vg/${lv1}_rimage_1
++lvs -a -o+devices $vg
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++cat $mnt/hello | grep "hello world"
++umount $mnt
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++# add integrity while LV is active, fs mounted
++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg
++lvchange -an $vg/$lv1
++lvchange -ay $vg/$lv1
++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1"
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++echo "hello world" > $mnt/hello
++lvconvert --raidintegrity y $vg/$lv1
++_wait_recalc $vg/${lv1}_rimage_0
++_wait_recalc $vg/${lv1}_rimage_1
++lvs -a -o+devices $vg
++cat $mnt/hello | grep "hello world"
++umount $mnt
++lvchange -an $vg/$lv1
++lvchange -ay $vg/$lv1
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++cat $mnt/hello | grep "hello world"
++umount $mnt
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++vgremove -ff $vg
++aux cleanup_scsi_debug_dev
++sleep 1
++
++# scsi_debug devices with 512 LBS and 4K PBS
++aux prepare_scsi_debug_dev 256 sector_size=512 physblk_exp=3
++check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "512"
++check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "4096"
++aux prepare_devs 2 64
++
++vgcreate $vg "$dev1" "$dev2"
++blockdev --getss "$dev1"
++blockdev --getpbsz "$dev1"
++blockdev --getss "$dev2"
++blockdev --getpbsz "$dev2"
++
++# add integrity while LV is inactive
++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg
++lvchange -an $vg/$lv1
++lvchange -ay $vg/$lv1
++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1"
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++echo "hello world" > $mnt/hello
++umount $mnt
++lvchange -an $vg
++lvconvert --raidintegrity y $vg/$lv1
++lvchange -ay $vg
++_wait_recalc $vg/${lv1}_rimage_0
++_wait_recalc $vg/${lv1}_rimage_1
++lvs -a -o+devices $vg
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++cat $mnt/hello
++umount $mnt
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++# add integrity while LV is active, fs unmounted
++# lvconvert will use ribs 512 to avoid increasing LBS from 512 to 4k on active LV
++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg
++lvchange -an $vg/$lv1
++lvchange -ay $vg/$lv1
++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1"
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++echo "hello world" > $mnt/hello
++umount $mnt
++lvchange -an $vg
++lvchange -ay $vg
++lvconvert --raidintegrity y $vg/$lv1
++_wait_recalc $vg/${lv1}_rimage_0
++_wait_recalc $vg/${lv1}_rimage_1
++lvs -a -o+devices $vg
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++cat $mnt/hello | grep "hello world"
++umount $mnt
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++# add integrity while LV is active, fs mounted
++# lvconvert will use ribs 512 to avoid increasing LBS from 512 to 4k on active LV
++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg
++lvchange -an $vg/$lv1
++lvchange -ay $vg/$lv1
++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1"
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++echo "hello world" > $mnt/hello
++lvconvert --raidintegrity y $vg/$lv1
++_wait_recalc $vg/${lv1}_rimage_0
++_wait_recalc $vg/${lv1}_rimage_1
++lvs -a -o+devices $vg
++cat $mnt/hello | grep "hello world"
++umount $mnt
++lvchange -an $vg/$lv1
++lvchange -ay $vg/$lv1
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++cat $mnt/hello | grep "hello world"
++umount $mnt
++lvchange -an $vg/$lv1
++lvremove $vg/$lv1
++
++vgremove -ff $vg
++aux cleanup_scsi_debug_dev
++sleep 1
++
+diff --git a/test/shell/integrity-blocksize.sh b/test/shell/integrity-blocksize.sh
+index 444e3db..eb6a364 100644
+--- a/test/shell/integrity-blocksize.sh
++++ b/test/shell/integrity-blocksize.sh
+@@ -48,9 +48,24 @@ aux extend_filter "a|$LOOP4|"
+ 
+ aux lvmconf 'devices/scan = "/dev"'
+ 
++mnt="mnt"
++mkdir -p $mnt
++
+ vgcreate $vg1 $LOOP1 $LOOP2
+ vgcreate $vg2 $LOOP3 $LOOP4
+ 
++# LOOP1/LOOP2 have LBS 512 and PBS 512
++# LOOP3/LOOP4 have LBS 4K and PBS 4K
++
++blockdev --getss $LOOP1
++blockdev --getpbsz $LOOP1
++blockdev --getss $LOOP2
++blockdev --getpbsz $LOOP2
++blockdev --getss $LOOP3
++blockdev --getpbsz $LOOP3
++blockdev --getss $LOOP4
++blockdev --getpbsz $LOOP4
++
+ # lvcreate on dev512, result 512
+ lvcreate --type raid1 -m1 --raidintegrity y -l 8 -n $lv1 $vg1
+ pvck --dump metadata $LOOP1 | grep 'block_size = 512'
+@@ -105,7 +120,11 @@ lvremove -y $vg2/$lv1
+ lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1
+ aux wipefs_a /dev/$vg1/$lv1
+ mkfs.xfs -f "$DM_DEV_DIR/$vg1/$lv1"
++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"512\"
+ lvconvert --raidintegrity y $vg1/$lv1
++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"512\"
++mount "$DM_DEV_DIR/$vg1/$lv1" $mnt
++umount $mnt
+ pvck --dump metadata $LOOP1 | grep 'block_size = 512'
+ lvremove -y $vg1/$lv1
+ 
+@@ -113,15 +132,37 @@ lvremove -y $vg1/$lv1
+ lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg2
+ aux wipefs_a /dev/$vg2/$lv1
+ mkfs.xfs -f "$DM_DEV_DIR/$vg2/$lv1"
++blkid "$DM_DEV_DIR/$vg2/$lv1" | grep BLOCK_SIZE=\"4096\"
+ lvconvert --raidintegrity y $vg2/$lv1
++blkid "$DM_DEV_DIR/$vg2/$lv1" | grep BLOCK_SIZE=\"4096\"
++mount "$DM_DEV_DIR/$vg2/$lv1" $mnt
++umount $mnt
+ pvck --dump metadata $LOOP3 | grep 'block_size = 4096'
+ lvremove -y $vg2/$lv1
+ 
+-# lvconvert on dev512, ext4 1024, result 1024 
++# lvconvert on dev512, ext4 1024, result 1024 (LV active when adding)
++lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1
++aux wipefs_a /dev/$vg1/$lv1
++mkfs.ext4 -b 1024 "$DM_DEV_DIR/$vg1/$lv1"
++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"1024\"
++lvconvert --raidintegrity y $vg1/$lv1
++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"1024\"
++mount "$DM_DEV_DIR/$vg1/$lv1" $mnt
++umount $mnt
++pvck --dump metadata $LOOP1 | grep 'block_size = 512'
++lvremove -y $vg1/$lv1
++
++# lvconvert on dev512, ext4 1024, result 1024 (LV inactive when adding)
+ lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1
+ aux wipefs_a /dev/$vg1/$lv1
+ mkfs.ext4 -b 1024 "$DM_DEV_DIR/$vg1/$lv1"
++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"1024\"
++lvchange -an $vg1/$lv1
+ lvconvert --raidintegrity y $vg1/$lv1
++lvchange -ay $vg1/$lv1
++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"1024\"
++mount "$DM_DEV_DIR/$vg1/$lv1" $mnt
++umount $mnt
+ pvck --dump metadata $LOOP1 | grep 'block_size = 1024'
+ lvremove -y $vg1/$lv1
+ 
+@@ -129,7 +170,11 @@ lvremove -y $vg1/$lv1
+ lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg2
+ aux wipefs_a /dev/$vg2/$lv1
+ mkfs.ext4 "$DM_DEV_DIR/$vg2/$lv1"
++blkid "$DM_DEV_DIR/$vg2/$lv1" | grep BLOCK_SIZE=\"4096\"
+ lvconvert --raidintegrity y $vg2/$lv1
++blkid "$DM_DEV_DIR/$vg2/$lv1" | grep BLOCK_SIZE=\"4096\"
++mount "$DM_DEV_DIR/$vg2/$lv1" $mnt
++umount $mnt
+ pvck --dump metadata $LOOP3 | grep 'block_size = 4096'
+ lvremove -y $vg2/$lv1
+ 
+@@ -137,7 +182,11 @@ lvremove -y $vg2/$lv1
+ lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1
+ aux wipefs_a /dev/$vg1/$lv1
+ mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg1/$lv1"
++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"4096\"
+ lvconvert --raidintegrity y --raidintegrityblocksize 512 $vg1/$lv1
++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"4096\"
++mount "$DM_DEV_DIR/$vg1/$lv1" $mnt
++umount $mnt
+ pvck --dump metadata $LOOP1 | grep 'block_size = 512'
+ lvremove -y $vg1/$lv1
+ 
+@@ -145,7 +194,14 @@ lvremove -y $vg1/$lv1
+ lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1
+ aux wipefs_a /dev/$vg1/$lv1
+ mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg1/$lv1"
++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"4096\"
++lvchange -an $vg1/$lv1
++# lv needs to be inactive to increase LBS from 512
+ lvconvert --raidintegrity y --raidintegrityblocksize 1024 $vg1/$lv1
++lvchange -ay $vg1/$lv1
++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"4096\"
++mount "$DM_DEV_DIR/$vg1/$lv1" $mnt
++umount $mnt
+ pvck --dump metadata $LOOP1 | grep 'block_size = 1024'
+ lvremove -y $vg1/$lv1
+ 
+@@ -153,7 +209,11 @@ lvremove -y $vg1/$lv1
+ lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1
+ aux wipefs_a /dev/$vg1/$lv1
+ mkfs.ext4 -b 1024 "$DM_DEV_DIR/$vg1/$lv1"
++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"1024\"
+ lvconvert --raidintegrity y --raidintegrityblocksize 512 $vg1/$lv1
++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"1024\"
++mount "$DM_DEV_DIR/$vg1/$lv1" $mnt
++umount $mnt
+ pvck --dump metadata $LOOP1 | grep 'block_size = 512'
+ lvremove -y $vg1/$lv1
+ 
+@@ -164,10 +224,48 @@ mkfs.ext4 "$DM_DEV_DIR/$vg2/$lv1"
+ not lvconvert --raidintegrity y --raidintegrityblocksize 512 $vg2/$lv1
+ lvremove -y $vg2/$lv1
+ 
+-# FIXME: need to use scsi_debug to create devs with LBS 512 PBS 4k
+-# FIXME: lvconvert, fsunknown, LBS 512, PBS 4k: result 512
+-# FIXME: lvconvert --bs 512, fsunknown, LBS 512, PBS 4k: result 512
+-# FIXME: lvconvert --bs 4k, fsunknown, LBS 512, PBS 4k: result 4k
++# TODO: need to use scsi_debug to create devs with LBS 512 PBS 4k
++# TODO: lvconvert, fsunknown, LBS 512, PBS 4k: result 512
++# TODO: lvconvert --bs 512, fsunknown, LBS 512, PBS 4k: result 512
++# TODO: lvconvert --bs 4k, fsunknown, LBS 512, PBS 4k: result 4k
++
++# lvconvert on dev512, xfs 512, result 512, (detect fs with LV inactive)
++lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1
++aux wipefs_a /dev/$vg1/$lv1
++mkfs.xfs -f "$DM_DEV_DIR/$vg1/$lv1"
++mount "$DM_DEV_DIR/$vg1/$lv1" $mnt
++echo "test" > $mnt/test
++umount $mnt
++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"512\"
++lvchange -an $vg1/$lv1
++lvconvert --raidintegrity y $vg1/$lv1
++lvchange -ay $vg1/$lv1
++mount "$DM_DEV_DIR/$vg1/$lv1" $mnt
++cat $mnt/test
++umount $mnt
++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"512\"
++pvck --dump metadata $LOOP1 | grep 'block_size = 512'
++lvchange -an $vg1/$lv1
++lvremove -y $vg1/$lv1
++
++# lvconvert on dev4k, xfs 4096, result 4096 (detect fs with LV inactive)
++lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg2
++aux wipefs_a /dev/$vg2/$lv1
++mkfs.xfs -f "$DM_DEV_DIR/$vg2/$lv1"
++mount "$DM_DEV_DIR/$vg2/$lv1" $mnt
++echo "test" > $mnt/test
++umount $mnt
++blkid "$DM_DEV_DIR/$vg2/$lv1" | grep BLOCK_SIZE=\"4096\"
++lvchange -an $vg2/$lv1
++lvconvert --raidintegrity y $vg2/$lv1
++lvchange -ay $vg2/$lv1
++mount "$DM_DEV_DIR/$vg2/$lv1" $mnt
++cat $mnt/test
++umount $mnt
++blkid "$DM_DEV_DIR/$vg2/$lv1" | grep BLOCK_SIZE=\"4096\"
++pvck --dump metadata $LOOP3 | grep 'block_size = 4096'
++lvchange -an $vg2/$lv1
++lvremove -y $vg2/$lv1
+ 
+ vgremove -ff $vg1
+ vgremove -ff $vg2
+diff --git a/test/shell/integrity-large.sh b/test/shell/integrity-large.sh
+index 5aba80e..1df9e6b 100644
+--- a/test/shell/integrity-large.sh
++++ b/test/shell/integrity-large.sh
+@@ -115,7 +115,10 @@ lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg
+ lvchange -an $vg/$lv1
+ lvchange -ay $vg/$lv1
+ _add_data_to_lv
++# lv needs to be inactive when adding integrity to increase LBS from 512 and get a ribs of 4k
++lvchange -an $vg/$lv1
+ lvconvert --raidintegrity y $vg/$lv1
++lvchange -ay $vg/$lv1
+ _wait_recalc $vg/${lv1}_rimage_0
+ _wait_recalc $vg/${lv1}_rimage_1
+ lvs -a -o+devices $vg
+diff --git a/test/shell/integrity-misc.sh b/test/shell/integrity-misc.sh
+index 0d05689..2dae25f 100644
+--- a/test/shell/integrity-misc.sh
++++ b/test/shell/integrity-misc.sh
+@@ -95,7 +95,7 @@ _sync_percent() {
+ 	get lv_field "$checklv" sync_percent | cut -d. -f1
+ }
+ 
+-_wait_recalc() {
++_wait_sync() {
+ 	local checklv=$1
+ 
+ 	for i in $(seq 1 10) ; do
+@@ -124,8 +124,9 @@ _wait_recalc() {
+ # lvrename
+ _prepare_vg
+ lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
+-_wait_recalc $vg/${lv1}_rimage_0
+-_wait_recalc $vg/${lv1}_rimage_1
++_wait_sync $vg/${lv1}_rimage_0
++_wait_sync $vg/${lv1}_rimage_1
++_wait_sync $vg/$lv1
+ _add_new_data_to_mnt
+ umount $mnt
+ lvrename $vg/$lv1 $vg/$lv2
+@@ -141,8 +142,9 @@ vgremove -ff $vg
+ # lv must be active
+ _prepare_vg
+ lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2"
+-_wait_recalc $vg/${lv1}_rimage_0
+-_wait_recalc $vg/${lv1}_rimage_1
++_wait_sync $vg/${lv1}_rimage_0
++_wait_sync $vg/${lv1}_rimage_1
++_wait_sync $vg/$lv1
+ _add_new_data_to_mnt
+ lvconvert --replace "$dev1" $vg/$lv1 "$dev3"
+ lvs -a -o+devices $vg > out
+@@ -162,8 +164,9 @@ vgremove -ff $vg
+ # same as prev but with bitmap mode
+ _prepare_vg
+ lvcreate --type raid1 -m1 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg "$dev1" "$dev2"
+-_wait_recalc $vg/${lv1}_rimage_0
+-_wait_recalc $vg/${lv1}_rimage_1
++_wait_sync $vg/${lv1}_rimage_0
++_wait_sync $vg/${lv1}_rimage_1
++_wait_sync $vg/$lv1
+ _add_new_data_to_mnt
+ lvconvert --replace "$dev1" $vg/$lv1 "$dev3"
+ lvs -a -o+devices $vg > out
+@@ -185,8 +188,9 @@ vgremove -ff $vg
+ # (like lvconvert --replace does for a dev that's not missing).
+ _prepare_vg
+ lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2"
+-_wait_recalc $vg/${lv1}_rimage_0
+-_wait_recalc $vg/${lv1}_rimage_1
++_wait_sync $vg/${lv1}_rimage_0
++_wait_sync $vg/${lv1}_rimage_1
++_wait_sync $vg/$lv1
+ _add_new_data_to_mnt
+ aux disable_dev "$dev2"
+ lvs -a -o+devices $vg > out
+@@ -213,8 +217,9 @@ vgremove -ff $vg
+ 
+ _prepare_vg
+ lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2"
+-_wait_recalc $vg/${lv1}_rimage_0
+-_wait_recalc $vg/${lv1}_rimage_1
++_wait_sync $vg/${lv1}_rimage_0
++_wait_sync $vg/${lv1}_rimage_1
++_wait_sync $vg/$lv1
+ _add_new_data_to_mnt
+ umount $mnt
+ lvchange -an $vg/$lv1
+diff --git a/test/shell/integrity.sh b/test/shell/integrity.sh
+index 77e9430..0143129 100644
+--- a/test/shell/integrity.sh
++++ b/test/shell/integrity.sh
+@@ -78,14 +78,14 @@ _test_fs_with_error() {
+ 	dd if=$mnt/fileA of=tmp bs=1k
+ 	ls -l tmp
+ 	stat -c %s tmp
+-	diff fileA tmp
++	cmp -b fileA tmp
+ 	rm tmp
+ 
+ 	# read partial fileB which was corrupted
+ 	not dd if=$mnt/fileB of=tmp bs=1k
+ 	ls -l tmp
+ 	stat -c %s tmp | grep 12288
+-	not diff fileB tmp
++	not cmp -b fileB tmp
+ 	rm tmp
+ 
+ 	umount $mnt
+@@ -118,14 +118,14 @@ _test_fs_with_raid() {
+ 	dd if=$mnt/fileA of=tmp bs=1k
+ 	ls -l tmp
+ 	stat -c %s tmp | grep 16384
+-	diff fileA tmp
++	cmp -b fileA tmp
+ 	rm tmp
+ 
+ 	# read complete fileB, corruption is corrected by raid
+ 	dd if=$mnt/fileB of=tmp bs=1k
+ 	ls -l tmp
+ 	stat -c %s tmp | grep 16384
+-	diff fileB tmp
++	cmp -b fileB tmp
+ 	rm tmp
+ 
+ 	umount $mnt
+@@ -161,15 +161,15 @@ _add_more_data_to_mnt() {
+ }
+ 
+ _verify_data_on_mnt() {
+-	diff randA $mnt/randA
+-	diff randB $mnt/randB
+-	diff randC $mnt/randC
+-	diff fileA $mnt/1/fileA
+-	diff fileB $mnt/1/fileB
+-	diff fileC $mnt/1/fileC
+-	diff fileA $mnt/2/fileA
+-	diff fileB $mnt/2/fileB
+-	diff fileC $mnt/2/fileC
++	cmp -b randA $mnt/randA
++	cmp -b randB $mnt/randB
++	cmp -b randC $mnt/randC
++	cmp -b fileA $mnt/1/fileA
++	cmp -b fileB $mnt/1/fileB
++	cmp -b fileC $mnt/1/fileC
++	cmp -b fileA $mnt/2/fileA
++	cmp -b fileB $mnt/2/fileB
++	cmp -b fileC $mnt/2/fileC
+ }
+ 
+ _verify_data_on_lv() {
+@@ -221,6 +221,8 @@ _wait_recalc() {
+ 
+ _prepare_vg
+ lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
++_wait_recalc $vg/${lv1}_rimage_0
++_wait_recalc $vg/${lv1}_rimage_1
+ _test_fs_with_raid
+ lvchange -an $vg/$lv1
+ lvconvert --raidintegrity n $vg/$lv1
+@@ -229,6 +231,9 @@ vgremove -ff $vg
+ 
+ _prepare_vg
+ lvcreate --type raid1 -m2 --raidintegrity y -n $lv1 -l 8 $vg
++_wait_recalc $vg/${lv1}_rimage_0
++_wait_recalc $vg/${lv1}_rimage_1
++_wait_recalc $vg/${lv1}_rimage_2
+ _test_fs_with_raid
+ lvchange -an $vg/$lv1
+ lvconvert --raidintegrity n $vg/$lv1
+@@ -237,6 +242,9 @@ vgremove -ff $vg
+ 
+ _prepare_vg
+ lvcreate --type raid4 --raidintegrity y -n $lv1 -l 8 $vg
++_wait_recalc $vg/${lv1}_rimage_0
++_wait_recalc $vg/${lv1}_rimage_1
++_wait_recalc $vg/${lv1}_rimage_2
+ _test_fs_with_raid
+ lvchange -an $vg/$lv1
+ lvconvert --raidintegrity n $vg/$lv1
+@@ -245,6 +253,9 @@ vgremove -ff $vg
+ 
+ _prepare_vg
+ lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg
++_wait_recalc $vg/${lv1}_rimage_0
++_wait_recalc $vg/${lv1}_rimage_1
++_wait_recalc $vg/${lv1}_rimage_2
+ _test_fs_with_raid
+ lvchange -an $vg/$lv1
+ lvconvert --raidintegrity n $vg/$lv1
+@@ -253,6 +264,11 @@ vgremove -ff $vg
+ 
+ _prepare_vg
+ lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg
++_wait_recalc $vg/${lv1}_rimage_0
++_wait_recalc $vg/${lv1}_rimage_1
++_wait_recalc $vg/${lv1}_rimage_2
++_wait_recalc $vg/${lv1}_rimage_3
++_wait_recalc $vg/${lv1}_rimage_4
+ _test_fs_with_raid
+ lvchange -an $vg/$lv1
+ lvconvert --raidintegrity n $vg/$lv1
+@@ -261,6 +277,10 @@ vgremove -ff $vg
+ 
+ _prepare_vg
+ lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg
++_wait_recalc $vg/${lv1}_rimage_0
++_wait_recalc $vg/${lv1}_rimage_1
++_wait_recalc $vg/${lv1}_rimage_2
++_wait_recalc $vg/${lv1}_rimage_3
+ _test_fs_with_raid
+ lvchange -an $vg/$lv1
+ lvconvert --raidintegrity n $vg/$lv1
+diff --git a/test/shell/writecache-blocksize.sh b/test/shell/writecache-blocksize.sh
+new file mode 100644
+index 0000000..1300176
+--- /dev/null
++++ b/test/shell/writecache-blocksize.sh
+@@ -0,0 +1,342 @@
++#!/usr/bin/env bash
++
++# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
++#
++# This copyrighted material is made available to anyone wishing to use,
++# modify, copy, or redistribute it subject to the terms and conditions
++# of the GNU General Public License v.2.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write to the Free Software Foundation,
++# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++
++# Test writecache usage
++
++SKIP_WITH_LVMPOLLD=1
++
++. lib/inittest
++
++aux have_writecache 1 0 0 || skip
++which mkfs.xfs || skip
++
++# Tests with fs block sizes require a libblkid version that shows BLOCK_SIZE
++aux prepare_devs 1
++vgcreate $vg "$dev1"
++lvcreate -n $lv1 -l8 $vg
++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1"
++blkid "$DM_DEV_DIR/$vg/$lv1" | grep BLOCK_SIZE || skip
++lvchange -an $vg
++vgremove -ff $vg
++aux cleanup_scsi_debug_dev
++
++mnt="mnt"
++mkdir -p $mnt
++
++for i in `seq 1 16384`; do echo -n "A" >> fileA; done
++for i in `seq 1 16384`; do echo -n "B" >> fileB; done
++for i in `seq 1 16384`; do echo -n "C" >> fileC; done
++
++# generate random data
++dd if=/dev/urandom of=randA bs=512K count=2
++dd if=/dev/urandom of=randB bs=512K count=3
++dd if=/dev/urandom of=randC bs=512K count=4
++
++_add_new_data_to_mnt() {
++	mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++
++	# add original data
++	cp randA $mnt
++	cp randB $mnt
++	cp randC $mnt
++	mkdir $mnt/1
++	cp fileA $mnt/1
++	cp fileB $mnt/1
++	cp fileC $mnt/1
++	mkdir $mnt/2
++	cp fileA $mnt/2
++	cp fileB $mnt/2
++	cp fileC $mnt/2
++	sync
++}
++
++_add_more_data_to_mnt() {
++	mkdir $mnt/more
++	cp fileA $mnt/more
++	cp fileB $mnt/more
++	cp fileC $mnt/more
++	cp randA $mnt/more
++	cp randB $mnt/more
++	cp randC $mnt/more
++	sync
++}
++
++_verify_data_on_mnt() {
++	diff randA $mnt/randA
++	diff randB $mnt/randB
++	diff randC $mnt/randC
++	diff fileA $mnt/1/fileA
++	diff fileB $mnt/1/fileB
++	diff fileC $mnt/1/fileC
++	diff fileA $mnt/2/fileA
++	diff fileB $mnt/2/fileB
++	diff fileC $mnt/2/fileC
++}
++
++_verify_more_data_on_mnt() {
++	diff randA $mnt/more/randA
++	diff randB $mnt/more/randB
++	diff randC $mnt/more/randC
++	diff fileA $mnt/more/fileA
++	diff fileB $mnt/more/fileB
++	diff fileC $mnt/more/fileC
++}
++
++_verify_data_on_lv() {
++	lvchange -ay $vg/$lv1
++	mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++	_verify_data_on_mnt
++	rm $mnt/randA
++	rm $mnt/randB
++	rm $mnt/randC
++	rm -rf $mnt/1
++	rm -rf $mnt/2
++	umount $mnt
++	lvchange -an $vg/$lv1
++}
++
++# the default is brd ram devs with 512 LBS 4K PBS
++aux prepare_devs 2 64
++
++blockdev --getss "$dev1"
++blockdev --getpbsz "$dev1"
++blockdev --getss "$dev2"
++blockdev --getpbsz "$dev2"
++
++# lbs 512, pbs 4k, xfs 4k, wc 4k
++vgcreate $SHARED $vg "$dev1"
++vgextend $vg "$dev2"
++lvcreate -n $lv1 -l 8 -an $vg "$dev1"
++lvcreate -n $lv2 -l 4 -an $vg "$dev2"
++lvchange -ay $vg/$lv1
++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" |tee out
++grep sectsz=4096 out
++_add_new_data_to_mnt
++lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out
++grep 4096 out
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
++_add_more_data_to_mnt
++_verify_data_on_mnt
++lvconvert --splitcache $vg/$lv1
++check lv_field $vg/$lv1 segtype linear
++check lv_field $vg/$lv2 segtype linear
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
++_verify_data_on_mnt
++_verify_more_data_on_mnt
++umount $mnt
++lvchange -an $vg/$lv1
++lvchange -an $vg/$lv2
++_verify_data_on_lv
++lvremove $vg/$lv1
++lvremove $vg/$lv2
++vgremove $vg
++
++# lbs 512, pbs 4k, xfs -s 512, wc 512
++vgcreate $SHARED $vg "$dev1"
++vgextend $vg "$dev2"
++lvcreate -n $lv1 -l 8 -an $vg "$dev1"
++lvcreate -n $lv2 -l 4 -an $vg "$dev2"
++lvchange -ay $vg/$lv1
++mkfs.xfs -f -s size=512 "$DM_DEV_DIR/$vg/$lv1" |tee out
++grep sectsz=512 out
++_add_new_data_to_mnt
++lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out
++grep 512 out
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
++_add_more_data_to_mnt
++_verify_data_on_mnt
++lvconvert --splitcache $vg/$lv1
++check lv_field $vg/$lv1 segtype linear
++check lv_field $vg/$lv2 segtype linear
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
++_verify_data_on_mnt
++_verify_more_data_on_mnt
++umount $mnt
++lvchange -an $vg/$lv1
++lvchange -an $vg/$lv2
++_verify_data_on_lv
++lvremove $vg/$lv1
++lvremove $vg/$lv2
++vgremove $vg
++
++aux cleanup_scsi_debug_dev
++sleep 1
++
++
++# scsi_debug devices with 512 LBS 512 PBS
++aux prepare_scsi_debug_dev 256
++check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "512"
++check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "512"
++aux prepare_devs 2 64
++
++blockdev --getss "$dev1"
++blockdev --getpbsz "$dev1"
++blockdev --getss "$dev2"
++blockdev --getpbsz "$dev2"
++
++# lbs 512, pbs 512, xfs 512, wc 512
++vgcreate $SHARED $vg "$dev1"
++vgextend $vg "$dev2"
++lvcreate -n $lv1 -l 8 -an $vg "$dev1"
++lvcreate -n $lv2 -l 4 -an $vg "$dev2"
++lvchange -ay $vg/$lv1
++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" |tee out
++grep sectsz=512 out
++_add_new_data_to_mnt
++lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out
++grep 512 out
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
++_add_more_data_to_mnt
++_verify_data_on_mnt
++lvconvert --splitcache $vg/$lv1
++check lv_field $vg/$lv1 segtype linear
++check lv_field $vg/$lv2 segtype linear
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
++_verify_data_on_mnt
++_verify_more_data_on_mnt
++umount $mnt
++lvchange -an $vg/$lv1
++lvchange -an $vg/$lv2
++_verify_data_on_lv
++lvremove $vg/$lv1
++lvremove $vg/$lv2
++vgremove $vg
++
++# lbs 512, pbs 512, xfs -s 4096, wc 4096
++vgcreate $SHARED $vg "$dev1"
++vgextend $vg "$dev2"
++lvcreate -n $lv1 -l 8 -an $vg "$dev1"
++lvcreate -n $lv2 -l 4 -an $vg "$dev2"
++lvchange -ay $vg/$lv1
++mkfs.xfs -s size=4096 -f "$DM_DEV_DIR/$vg/$lv1" |tee out
++grep sectsz=4096 out
++_add_new_data_to_mnt
++lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out
++grep 4096 out
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
++_add_more_data_to_mnt
++_verify_data_on_mnt
++lvconvert --splitcache $vg/$lv1
++check lv_field $vg/$lv1 segtype linear
++check lv_field $vg/$lv2 segtype linear
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
++_verify_data_on_mnt
++_verify_more_data_on_mnt
++umount $mnt
++lvchange -an $vg/$lv1
++lvchange -an $vg/$lv2
++_verify_data_on_lv
++lvremove $vg/$lv1
++lvremove $vg/$lv2
++vgremove $vg
++
++aux cleanup_scsi_debug_dev
++sleep 1
++
++
++# scsi_debug devices with 512 LBS and 4K PBS
++aux prepare_scsi_debug_dev 256 sector_size=512 physblk_exp=3
++check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "512"
++check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "4096"
++aux prepare_devs 2 64
++
++blockdev --getss "$dev1"
++blockdev --getpbsz "$dev1"
++blockdev --getss "$dev2"
++blockdev --getpbsz "$dev2"
++
++# lbs 512, pbs 4k, xfs 4k, wc 4k
++vgcreate $SHARED $vg "$dev1"
++vgextend $vg "$dev2"
++lvcreate -n $lv1 -l 8 -an $vg "$dev1"
++lvcreate -n $lv2 -l 4 -an $vg "$dev2"
++lvchange -ay $vg/$lv1
++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" |tee out
++grep sectsz=4096 out
++_add_new_data_to_mnt
++lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out
++grep 4096 out
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
++_add_more_data_to_mnt
++_verify_data_on_mnt
++lvconvert --splitcache $vg/$lv1
++check lv_field $vg/$lv1 segtype linear
++check lv_field $vg/$lv2 segtype linear
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
++_verify_data_on_mnt
++_verify_more_data_on_mnt
++umount $mnt
++lvchange -an $vg/$lv1
++lvchange -an $vg/$lv2
++_verify_data_on_lv
++lvremove $vg/$lv1
++lvremove $vg/$lv2
++vgremove $vg
++
++aux cleanup_scsi_debug_dev
++sleep 1
++
++
++# scsi_debug devices with 4K LBS and 4K PBS
++aux prepare_scsi_debug_dev 256 sector_size=4096
++check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "4096"
++check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "4096"
++aux prepare_devs 2 64
++
++blockdev --getss "$dev1"
++blockdev --getpbsz "$dev1"
++blockdev --getss "$dev2"
++blockdev --getpbsz "$dev2"
++
++# lbs 4k, pbs 4k, xfs 4k, wc 4k
++vgcreate $SHARED $vg "$dev1"
++vgextend $vg "$dev2"
++lvcreate -n $lv1 -l 8 -an $vg "$dev1"
++lvcreate -n $lv2 -l 4 -an $vg "$dev2"
++lvchange -ay $vg/$lv1
++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" |tee out
++grep sectsz=4096 out
++_add_new_data_to_mnt
++lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out
++grep 4096 out
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
++_add_more_data_to_mnt
++_verify_data_on_mnt
++lvconvert --splitcache $vg/$lv1
++check lv_field $vg/$lv1 segtype linear
++check lv_field $vg/$lv2 segtype linear
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
++_verify_data_on_mnt
++_verify_more_data_on_mnt
++umount $mnt
++lvchange -an $vg/$lv1
++lvchange -an $vg/$lv2
++_verify_data_on_lv
++lvremove $vg/$lv1
++lvremove $vg/$lv2
++vgremove $vg
++
++aux cleanup_scsi_debug_dev
++
++
+diff --git a/test/shell/writecache-large.sh b/test/shell/writecache-large.sh
+new file mode 100644
+index 0000000..b52eaf6
+--- /dev/null
++++ b/test/shell/writecache-large.sh
+@@ -0,0 +1,153 @@
++#!/usr/bin/env bash
++
++# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
++#
++# This copyrighted material is made available to anyone wishing to use,
++# modify, copy, or redistribute it subject to the terms and conditions
++# of the GNU General Public License v.2.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write to the Free Software Foundation,
++# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++
++# Test writecache usage
++
++SKIP_WITH_LVMPOLLD=1
++
++. lib/inittest
++
++aux have_writecache 1 0 0 || skip
++which mkfs.xfs || skip
++
++# scsi_debug devices with 512 LBS 512 PBS
++aux prepare_scsi_debug_dev 1200
++check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "512"
++check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "512"
++
++aux prepare_devs 2 600
++blockdev --getss "$dev1"
++blockdev --getpbsz "$dev1"
++blockdev --getss "$dev2"
++blockdev --getpbsz "$dev2"
++
++mnt="mnt"
++mkdir -p $mnt
++
++for i in `seq 1 16384`; do echo -n "A" >> fileA; done
++for i in `seq 1 16384`; do echo -n "B" >> fileB; done
++for i in `seq 1 16384`; do echo -n "C" >> fileC; done
++
++# generate random data
++dd if=/dev/urandom of=randA bs=512K count=2
++dd if=/dev/urandom of=randB bs=512K count=3
++dd if=/dev/urandom of=randC bs=512K count=4
++
++_add_new_data_to_mnt() {
++	mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1"
++
++	mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++
++	# add original data
++	cp randA $mnt
++	cp randB $mnt
++	cp randC $mnt
++	mkdir $mnt/1
++	cp fileA $mnt/1
++	cp fileB $mnt/1
++	cp fileC $mnt/1
++	mkdir $mnt/2
++	cp fileA $mnt/2
++	cp fileB $mnt/2
++	cp fileC $mnt/2
++	sync
++}
++
++_add_more_data_to_mnt() {
++	mkdir $mnt/more
++	cp fileA $mnt/more
++	cp fileB $mnt/more
++	cp fileC $mnt/more
++	cp randA $mnt/more
++	cp randB $mnt/more
++	cp randC $mnt/more
++	sync
++}
++
++_verify_data_on_mnt() {
++	diff randA $mnt/randA
++	diff randB $mnt/randB
++	diff randC $mnt/randC
++	diff fileA $mnt/1/fileA
++	diff fileB $mnt/1/fileB
++	diff fileC $mnt/1/fileC
++	diff fileA $mnt/2/fileA
++	diff fileB $mnt/2/fileB
++	diff fileC $mnt/2/fileC
++}
++
++_verify_more_data_on_mnt() {
++	diff randA $mnt/more/randA
++	diff randB $mnt/more/randB
++	diff randC $mnt/more/randC
++	diff fileA $mnt/more/fileA
++	diff fileB $mnt/more/fileB
++	diff fileC $mnt/more/fileC
++}
++
++_verify_data_on_lv() {
++	lvchange -ay $vg/$lv1
++	mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++	_verify_data_on_mnt
++	rm $mnt/randA
++	rm $mnt/randB
++	rm $mnt/randC
++	rm -rf $mnt/1
++	rm -rf $mnt/2
++	umount $mnt
++	lvchange -an $vg/$lv1
++}
++
++vgcreate $SHARED $vg "$dev1"
++vgextend $vg "$dev2"
++
++# Use a large enough size so that the cleaner will not
++# finish immediately when detaching, and will require
++# a secondary check from command top level.
++
++lvcreate -n $lv1 -L 560M -an $vg "$dev1"
++lvcreate -n $lv2 -L 500M -an $vg "$dev2"
++
++lvchange -ay $vg/$lv1
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" 
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" 
++
++lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1
++dmsetup table $vg-$lv1
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" 
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" 
++
++_add_new_data_to_mnt
++_add_more_data_to_mnt
++_verify_data_on_mnt
++
++dd if=/dev/zero of=$mnt/big1 bs=1M count=100 oflag=sync
++dd if=/dev/zero of=$mnt/big2 bs=1M count=100 oflag=sync
++dd if=/dev/zero of=$mnt/big3 bs=1M count=100 oflag=sync
++dd if=/dev/zero of=$mnt/big4 bs=1M count=100 oflag=sync
++
++lvconvert --splitcache $vg/$lv1
++check lv_field $vg/$lv1 segtype linear
++check lv_field $vg/$lv2 segtype linear
++dmsetup table $vg-$lv1
++_verify_data_on_mnt
++_verify_more_data_on_mnt
++dd if=$mnt/big4 of=/dev/null bs=1M count=100
++umount $mnt
++lvchange -an $vg/$lv1
++_verify_data_on_lv
++lvchange -an $vg/$lv2
++lvremove $vg/$lv1
++lvremove $vg/$lv2
++
++vgremove -ff $vg
++
+diff --git a/test/shell/writecache-split.sh b/test/shell/writecache-split.sh
+index 0f2dc47..e615e2a 100644
+--- a/test/shell/writecache-split.sh
++++ b/test/shell/writecache-split.sh
+@@ -20,29 +20,21 @@ mkfs_mount_umount()
+ {
+         lvt=$1
+ 
+-        lvchange -ay $vg/$lvt
+-
+         mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lvt"
+         mount "$DM_DEV_DIR/$vg/$lvt" "$mount_dir"
+         cp pattern1 "$mount_dir/pattern1"
+         dd if=/dev/zero of="$mount_dir/zeros2M" bs=1M count=32 conv=fdatasync
+         umount "$mount_dir"
+-
+-        lvchange -an $vg/$lvt
+ }
+ 
+ mount_umount()
+ {
+         lvt=$1
+ 
+-        lvchange -ay $vg/$lvt
+-
+         mount "$DM_DEV_DIR/$vg/$lvt" "$mount_dir"
+         diff pattern1 "$mount_dir/pattern1"
+         dd if="$mount_dir/zeros2M" of=/dev/null bs=1M count=32
+         umount "$mount_dir"
+-
+-        lvchange -an $vg/$lvt
+ }
+ 
+ aux have_writecache 1 0 0 || skip
+@@ -62,18 +54,38 @@ lvcreate -n $lv1 -l 16 -an $vg "$dev1" "$dev4"
+ lvcreate -n $lv2 -l 4 -an $vg "$dev2"
+ 
+ #
+-# split when no devs are missing
++# split while inactive
+ #
+ 
+ lvconvert -y --type writecache --cachevol $lv2 $vg/$lv1
+ 
++lvchange -ay $vg/$lv1
+ mkfs_mount_umount $lv1
++lvchange -an $vg/$lv1
+ 
+ lvconvert --splitcache $vg/$lv1
+ lvs -o segtype $vg/$lv1 | grep linear
+ lvs -o segtype $vg/$lv2 | grep linear
+ 
++lvchange -ay $vg/$lv1
+ mount_umount $lv1
++lvchange -an $vg/$lv1
++
++#
++# split while active
++#
++
++lvconvert -y --type writecache --cachevol $lv2 $vg/$lv1
++
++lvchange -ay $vg/$lv1
++mkfs_mount_umount $lv1
++
++lvconvert --splitcache $vg/$lv1
++lvs -o segtype $vg/$lv1 | grep linear
++lvs -o segtype $vg/$lv2 | grep linear
++
++mount_umount $lv1
++lvchange -an $vg/$lv1
+ 
+ #
+ # split while cachevol is missing
+@@ -81,7 +93,9 @@ mount_umount $lv1
+ 
+ lvconvert -y --type writecache --cachevol $lv2 $vg/$lv1
+ 
++lvchange -ay $vg/$lv1
+ mkfs_mount_umount $lv1
++lvchange -an $vg/$lv1
+ 
+ aux disable_dev "$dev2"
+ 
+@@ -108,7 +122,9 @@ lvcreate -n $lv2 -l 14 -an $vg "$dev2" "$dev3"
+ 
+ lvconvert -y --type writecache --cachevol $lv2 $vg/$lv1
+ 
++lvchange -ay $vg/$lv1
+ mkfs_mount_umount $lv1
++lvchange -an $vg/$lv1
+ 
+ aux disable_dev "$dev3"
+ 
+diff --git a/test/shell/writecache.sh b/test/shell/writecache.sh
+index 8852e93..39ef319 100644
+--- a/test/shell/writecache.sh
++++ b/test/shell/writecache.sh
+@@ -19,152 +19,251 @@ SKIP_WITH_LVMPOLLD=1
+ aux have_writecache 1 0 0 || skip
+ which mkfs.xfs || skip
+ 
+-mount_dir="mnt"
+-mkdir -p $mount_dir
++# scsi_debug devices with 512 LBS 512 PBS
++aux prepare_scsi_debug_dev 256
++check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "512"
++check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "512"
++aux prepare_devs 2 64
++
++# scsi_debug devices with 512 LBS and 4K PBS
++#aux prepare_scsi_debug_dev 256 sector_size=512 physblk_exp=3
++#check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "512"
++#check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "4096"
++#aux prepare_devs 2 64
++
++# loop devs with 512 LBS and 512 PBS
++#dd if=/dev/zero of=loopa bs=$((1024*1024)) count=64 2> /dev/null
++#dd if=/dev/zero of=loopb bs=$((1024*1024)) count=64 2> /dev/null
++#LOOP1=$(losetup -f loopa --show)
++#LOOP2=$(losetup -f loopb --show)
++#aux extend_filter "a|$LOOP1|"
++#aux extend_filter "a|$LOOP2|"
++#aux lvmconf 'devices/scan = "/dev"'
++#dev1=$LOOP1
++#dev2=$LOOP2
++
++# loop devs with 4096 LBS and 4096 PBS
++#dd if=/dev/zero of=loopa bs=$((1024*1024)) count=64 2> /dev/null
++#dd if=/dev/zero of=loopb bs=$((1024*1024)) count=64 2> /dev/null
++#LOOP1=$(losetup -f loopa --sector-size 4096 --show)
++#LOOP2=$(losetup -f loopb --sector-size 4096 --show)
++#aux extend_filter "a|$LOOP1|"
++#aux extend_filter "a|$LOOP2|"
++#aux lvmconf 'devices/scan = "/dev"'
++#dev1=$LOOP1
++#dev2=$LOOP2
++
++# the default is brd ram devs with 512 LBS 4K PBS
++# aux prepare_devs 2 64
++
++blockdev --getss "$dev1"
++blockdev --getpbsz "$dev1"
++blockdev --getss "$dev2"
++blockdev --getpbsz "$dev2"
++
++
++mnt="mnt"
++mkdir -p $mnt
++
++for i in `seq 1 16384`; do echo -n "A" >> fileA; done
++for i in `seq 1 16384`; do echo -n "B" >> fileB; done
++for i in `seq 1 16384`; do echo -n "C" >> fileC; done
+ 
+ # generate random data
+-dd if=/dev/urandom of=pattern1 bs=512K count=1
++dd if=/dev/urandom of=randA bs=512K count=2
++dd if=/dev/urandom of=randB bs=512K count=3
++dd if=/dev/urandom of=randC bs=512K count=4
++
++_add_new_data_to_mnt() {
++	mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1"
++
++	mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++
++	# add original data
++	cp randA $mnt
++	cp randB $mnt
++	cp randC $mnt
++	mkdir $mnt/1
++	cp fileA $mnt/1
++	cp fileB $mnt/1
++	cp fileC $mnt/1
++	mkdir $mnt/2
++	cp fileA $mnt/2
++	cp fileB $mnt/2
++	cp fileC $mnt/2
++	sync
++}
++
++_add_more_data_to_mnt() {
++	mkdir $mnt/more
++	cp fileA $mnt/more
++	cp fileB $mnt/more
++	cp fileC $mnt/more
++	cp randA $mnt/more
++	cp randB $mnt/more
++	cp randC $mnt/more
++	sync
++}
++
++_verify_data_on_mnt() {
++	diff randA $mnt/randA
++	diff randB $mnt/randB
++	diff randC $mnt/randC
++	diff fileA $mnt/1/fileA
++	diff fileB $mnt/1/fileB
++	diff fileC $mnt/1/fileC
++	diff fileA $mnt/2/fileA
++	diff fileB $mnt/2/fileB
++	diff fileC $mnt/2/fileC
++}
++
++_verify_more_data_on_mnt() {
++	diff randA $mnt/more/randA
++	diff randB $mnt/more/randB
++	diff randC $mnt/more/randC
++	diff fileA $mnt/more/fileA
++	diff fileB $mnt/more/fileB
++	diff fileC $mnt/more/fileC
++}
++
++_verify_data_on_lv() {
++	lvchange -ay $vg/$lv1
++	mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++	_verify_data_on_mnt
++	rm $mnt/randA
++	rm $mnt/randB
++	rm $mnt/randC
++	rm -rf $mnt/1
++	rm -rf $mnt/2
++	umount $mnt
++	lvchange -an $vg/$lv1
++}
+ 
+-aux prepare_devs 2 64
+ 
+ vgcreate $SHARED $vg "$dev1"
+-
+ vgextend $vg "$dev2"
+ 
+-lvcreate -n $lv1 -l 8 -an $vg "$dev1"
+-
+-lvcreate -n $lv2 -l 4 -an $vg "$dev2"
++blockdev --getss "$dev1"
++blockdev --getpbsz "$dev1"
++blockdev --getss "$dev2"
++blockdev --getpbsz "$dev2"
+ 
+-# test1: create fs on LV before writecache is attached
++# Test attach while inactive, detach while inactive
++# create fs on LV before writecache is attached
+ 
++lvcreate -n $lv1 -l 8 -an $vg "$dev1"
++lvcreate -n $lv2 -l 4 -an $vg "$dev2"
+ lvchange -ay $vg/$lv1
+-
+-mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
+-
+-mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+-
+-cp pattern1 $mount_dir/pattern1
+-
+-umount $mount_dir
++_add_new_data_to_mnt
++umount $mnt
+ lvchange -an $vg/$lv1
+-
+ lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1
+-
+ check lv_field $vg/$lv1 segtype writecache
+-
+ lvs -a $vg/${lv2}_cvol --noheadings -o segtype >out
+ grep linear out
+-
+ lvchange -ay $vg/$lv1
+-
+-mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+-
+-diff pattern1 $mount_dir/pattern1
+-
+-cp pattern1 $mount_dir/pattern1b
+-
+-ls -l $mount_dir
+-
+-umount $mount_dir
+-
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++_add_more_data_to_mnt
++_verify_data_on_mnt
++_verify_more_data_on_mnt
++umount $mnt
+ lvchange -an $vg/$lv1
+-
+ lvconvert --splitcache $vg/$lv1
+-
+ check lv_field $vg/$lv1 segtype linear
+ check lv_field $vg/$lv2 segtype linear
+-
+ lvchange -ay $vg/$lv1
+-lvchange -ay $vg/$lv2
+-
+-mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+-
+-ls -l $mount_dir
+-
+-diff pattern1 $mount_dir/pattern1
+-diff pattern1 $mount_dir/pattern1b
+-
+-umount $mount_dir
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
+ lvchange -an $vg/$lv1
++_verify_data_on_lv
+ lvchange -an $vg/$lv2
++lvremove $vg/$lv1
++lvremove $vg/$lv2
+ 
+-# test2: create fs on LV after writecache is attached
++# Test attach while inactive, detach while inactive
++# create fs on LV after writecache is attached
+ 
++lvcreate -n $lv1 -l 8 -an $vg "$dev1"
++lvcreate -n $lv2 -l 4 -an $vg "$dev2"
+ lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1
+-
+ check lv_field $vg/$lv1 segtype writecache
+-
+ lvs -a $vg/${lv2}_cvol --noheadings -o segtype >out
+ grep linear out
+-
+ lvchange -ay $vg/$lv1
+-
+-mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
+-
+-mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+-
+-cp pattern1 $mount_dir/pattern1
+-ls -l $mount_dir
+-
+-umount $mount_dir
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
++_add_new_data_to_mnt
++umount $mnt
+ lvchange -an $vg/$lv1
+-
+ lvconvert --splitcache $vg/$lv1
+-
+-check lv_field $vg/$lv1 segtype linear
+-check lv_field $vg/$lv2 segtype linear
+-
+ lvchange -ay $vg/$lv1
+-lvchange -ay $vg/$lv2
+-
+-mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+-
+-ls -l $mount_dir
+-
+-diff pattern1 $mount_dir/pattern1
+-
+-umount $mount_dir
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
++mount "$DM_DEV_DIR/$vg/$lv1" $mnt
++_add_more_data_to_mnt
++_verify_data_on_mnt
++_verify_more_data_on_mnt
++umount $mnt
+ lvchange -an $vg/$lv1
+-lvchange -an $vg/$lv2
+-
+-
+-# test3: attach writecache to an active LV
+-
+-lvchange -ay $vg/$lv1
+-
+-mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
+-
+-mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+-
+-cp pattern1 $mount_dir/pattern1
+-ls -l $mount_dir
+-
+-# TODO BZ 1808012 - can not convert active volume to writecache:
+-not lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1
+-
+-if false; then
+-check lv_field $vg/$lv1 segtype writecache
+-
+-lvs -a $vg/${lv2}_cvol --noheadings -o segtype >out
+-grep linear out
+-
+-cp pattern1 $mount_dir/pattern1.after
++_verify_data_on_lv
++lvremove $vg/$lv1
++lvremove $vg/$lv2
+ 
+-diff pattern1 $mount_dir/pattern1
+-diff pattern1 $mount_dir/pattern1.after
++# Test attach while active, detach while active
+ 
+-umount $mount_dir
+-lvchange -an $vg/$lv1
++lvcreate -n $lv1 -l 8 -an $vg "$dev1"
++lvcreate -n $lv2 -l 4 -an $vg "$dev2"
+ lvchange -ay $vg/$lv1
+-mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+-
+-diff pattern1 $mount_dir/pattern1
+-diff pattern1 $mount_dir/pattern1.after
+-fi
+-
+-umount $mount_dir
++_add_new_data_to_mnt
++lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
++_add_more_data_to_mnt
++_verify_data_on_mnt
++lvconvert --splitcache $vg/$lv1
++check lv_field $vg/$lv1 segtype linear
++check lv_field $vg/$lv2 segtype linear
++blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
++_verify_data_on_mnt
++_verify_more_data_on_mnt
++umount $mnt
+ lvchange -an $vg/$lv1
++lvchange -an $vg/$lv2
++_verify_data_on_lv
+ lvremove $vg/$lv1
++lvremove $vg/$lv2
+ 
++# FIXME: test depends on unpushed commit
++# that enables two stage flush using cleaner
++#
++# Test attach while active, detach while active,
++# skip cleaner so flush message is used instead
++# 
++# lvcreate -n $lv1 -l 8 -an $vg "$dev1"
++# lvcreate -n $lv2 -l 4 -an $vg "$dev2"
++# lvchange -ay $vg/$lv1
++# _add_new_data_to_mnt
++# lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1
++# blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
++# blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
++# _add_more_data_to_mnt
++# _verify_data_on_mnt
++# lvconvert --splitcache --cachesettings cleaner=0 $vg/$lv1
++# check lv_field $vg/$lv1 segtype linear
++# check lv_field $vg/$lv2 segtype linear
++# blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
++# blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
++# _verify_data_on_mnt
++# _verify_more_data_on_mnt
++# umount $mnt
++# lvchange -an $vg/$lv1
++# lvchange -an $vg/$lv2
++# _verify_data_on_lv
++# lvremove $vg/$lv1
++# lvremove $vg/$lv2
++ 
+ vgremove -ff $vg
+-
++ 
+diff --git a/tools/args.h b/tools/args.h
+index d1f604b..3a7e5d4 100644
+--- a/tools/args.h
++++ b/tools/args.h
+@@ -126,6 +126,12 @@ arg(cachepool_ARG, '\0', "cachepool", lv_VAL, 0, 0,
+ arg(cachevol_ARG, '\0', "cachevol", lv_VAL, 0, 0,
+     "The name of a cache volume.\n")
+ 
++arg(cachedevice_ARG, '\0', "cachedevice", pv_VAL, ARG_GROUPABLE, 0,
++    "The name of a device to use for a cache.\n")
++
++arg(cachesize_ARG, '\0', "cachesize", sizemb_VAL, 0, 0,
++    "The size of cache to use.\n")
++
+ arg(commandprofile_ARG, '\0', "commandprofile", string_VAL, 0, 0,
+     "The command profile to use for command configuration.\n"
+     "See \\fBlvm.conf\\fP(5) for more information about profiles.\n")
+@@ -1428,7 +1434,16 @@ arg(thin_ARG, 'T', "thin", 0, 0, 0,
+     "See \\fBlvmthin\\fP(7) for more information about LVM thin provisioning.\n")
+ 
+ arg(updatemetadata_ARG, '\0', "updatemetadata", 0, 0, 0,
+-    "Update VG metadata to correct problems.\n")
++    "Update VG metadata to correct problems.\n"
++    "If VG metadata was updated while a PV was missing, and the PV\n"
++    "reappears with an old version of metadata, then this option\n"
++    "(or any other command that writes metadata) will update the\n"
++    "metadata on the previously missing PV. If a PV was removed\n"
++    "from a VG while it was missing, and the PV reappears, using\n"
++    "this option will clear the outdated metadata from the previously\n"
++    "missing PV. If metadata text is damaged on one PV, using this\n"
++    "option will replace the damaged metadata text. For more severe\n"
++    "damage, e.g. with headers, see \\fBpvck\\fP(8).\n")
+ 
+ arg(uuid_ARG, 'u', "uuid", 0, 0, 0,
+     "#pvchange\n"
+diff --git a/tools/command-lines.in b/tools/command-lines.in
+index ed3d041..0051b77 100644
+--- a/tools/command-lines.in
++++ b/tools/command-lines.in
+@@ -247,7 +247,7 @@ RULE: --profile not --detachprofile
+ RULE: --metadataprofile not --detachprofile
+ RULE: --minrecoveryrate --maxrecoveryrate and LV_raid
+ RULE: --writebehind --writemostly and LV_raid1
+-RULE: --cachemode --cachepolicy --cachesettings and LV_cache LV_cachepool
++RULE: --cachemode --cachepolicy --cachesettings and LV_cache LV_cachepool LV_writecache
+ RULE: --errorwhenfull --discards --zero and LV_thinpool
+ RULE: --permission not lv_is_external_origin lv_is_raid_metadata lv_is_raid_image LV_thinpool
+ RULE: --alloc --contiguous --metadataprofile --permission --persistent --profile --readahead not lv_is_thick_origin
+@@ -497,6 +497,20 @@ FLAGS: SECONDARY_SYNTAX
+ 
+ ---
+ 
++lvconvert --type writecache --cachedevice PV LV
++OO: OO_LVCONVERT, --cachesize SizeMB, --cachesettings String
++ID: lvconvert_to_writecache_with_device
++DESC: Add a writecache to an LV, using a specified cache device.
++RULE: all and lv_is_visible
++
++lvconvert --type cache --cachedevice PV LV
++OO: OO_LVCONVERT, --cachesize SizeMB, --cachesettings String
++ID: lvconvert_to_cache_with_device
++DESC: Add a cache to an LV, using a specified cache device.
++RULE: all and lv_is_visible
++
++---
++
+ lvconvert --type thin-pool LV_linear_striped_raid_cache
+ OO: --stripes_long Number, --stripesize SizeKB,
+ --discards Discards, OO_LVCONVERT_POOL, OO_LVCONVERT
+@@ -1205,87 +1219,107 @@ lvcreate --type cache --size SizeMB --cachepool LV_cachepool VG
+ OO: --cache, OO_LVCREATE_POOL, OO_LVCREATE_CACHE, OO_LVCREATE,
+ --stripes Number, --stripesize SizeKB
+ OP: PV ...
+-ID: lvcreate_cache_vol_with_new_origin
+-DESC: Create a cache LV, first creating a new origin LV,
+-DESC: then combining it with the existing cache pool named
+-DESC: by the --cachepool arg.
++ID: lvcreate_and_attach_cachepool
++DESC: Create a new LV, then attach the specified cachepool
++DESC: which converts the new LV to type cache.
+ 
+ # alternate form of lvcreate --type cache
++# (omits the --type cache option which is inferred)
+ lvcreate --size SizeMB --cachepool LV_cachepool VG
+ OO: --type cache, --cache, OO_LVCREATE_CACHE, OO_LVCREATE,
+ --stripes Number, --stripesize SizeKB
+ OP: PV ...
+-ID: lvcreate_cache_vol_with_new_origin
+-DESC: Create a cache LV, first creating a new origin LV,
+-DESC: then combining it with the existing cache pool named
+-DESC: by the --cachepool arg (variant, infers --type cache).
++ID: lvcreate_and_attach_cachepool_v2
++DESC: Create a new LV, then attach the specified cachepool
++DESC: which converts the new LV to type cache
++DESC: (variant, infers --type cache.)
+ FLAGS: SECONDARY_SYNTAX
+ 
+ # alternate form of lvcreate --type cache
++# (moves cachepool from option arg to position arg,
++# dropping the normal VG position arg)
+ lvcreate --type cache --size SizeMB LV_cachepool
+ OO: --cache, OO_LVCREATE_POOL, OO_LVCREATE_CACHE, OO_LVCREATE,
+ --stripes Number, --stripesize SizeKB
+ OP: PV ...
+-ID: lvcreate_cache_vol_with_new_origin
+-DESC: Create a cache LV, first creating a new origin LV,
+-DESC: then combining it with the existing cache pool named
+-DESC: in the first arg (variant, also use --cachepool).
++ID: lvcreate_and_attach_cachepool_v3
++DESC: Create a new LV, then attach the specified cachepool
++DESC: which converts the new LV to type cache.
++DESC: (variant, also use --cachepool).
+ FLAGS: SECONDARY_SYNTAX
+ 
+-# This is a ridiculously crazy command which nobody could
+-# understand.  It should be be eliminated.  It does two different
+-# things depending on whether LV in pos 1 is a cachepool LV
+-# or not.  Both variations are unnecessary.
+-#
+-# 1. If LV is a cachepool, then it's an alternate form of
+-#    an already complicated command above.
+-#
+-# # alternate form for lvcreate_cache_vol_with_new_origin
+-# lvcreate --cache --size SizeMB LV_cachepool
+-# OO: --type cache, --cache, OO_LVCREATE_CACHE, OO_LVCREATE, --stripes Number, --stripesize SizeKB
+-# OP: PV ...
+-# ID: lvcreate_cache_vol_with_new_origin
+-# DESC: Create a cache LV, first creating a new origin LV,
+-# DESC: then combining it with the existing cache pool named
+-# DESC: in the first arg (variant, infers --type cache,
+-# DESC: also use --cachepool).
+-#
+-# 2. If LV is not a cachepool, then it's a disguised lvconvert.
+-#
+-# # FIXME: this should be done by lvconvert, and this command removed
+-# lvcreate --type cache --size SizeMB LV
+-# OO: OO_LVCREATE_POOL, OO_LVCREATE_CACHE, OO_LVCREATE
+-# OP: PV ...
+-# ID: lvcreate_convert_to_cache_vol_with_cachepool
+-# DESC: Convert the specified LV to type cache after creating a new
+-# DESC: cache pool LV to use (use lvconvert).
++# This command has two different meanings which ought to
++# have separate command defs, but since the syntax is the
++# same for both they have to share one command def with
++# an ambiguous meaning.  Which command is performed depends
++# on whether the LV in the first arg position is a
++# cachepool or not (we can't have two different command
++# defs that differ only in the type of LV in the arg position
++# because when parsing commands we don't know the LV type.)
++#
++# 1. An alternate form of lvcreate_and_attach_cachepool_v3
++#    this syntax:         lvcreate --cache --size SizeMB LV_cachepool
++#    is alternative for:  lvcreate --type cache --size SizeMB LV_cachepool
++#
++# 2. An alternative to using lvconvert to convert LV to type cache,
++#    but in this case the cachepool is created internally and
++#    then attached to the LV arg.
+ #
+ # Note that stripes are accepted by the first and not by the
+ # second, but it's not possible to validate this until after
+ # the LV type is known.
+-#
+-# So, to define this syntax we have to combine both of
+-# those variants, each crazy on it's own, into one
+-# ridiculous command.
+ 
+-# def1: alternate form of lvcreate --type cache, or
+-# def2: it should be done by lvconvert.
+ lvcreate --cache --size SizeMB LV
+ OO: OO_LVCREATE_CACHE, OO_LVCREATE_POOL, OO_LVCREATE,
+ --stripes Number, --stripesize SizeKB
+ OP: PV ...
+-ID: lvcreate_cache_vol_with_new_origin_or_convert_to_cache_vol_with_cachepool
+-DESC: When LV is a cache pool, create a cache LV,
+-DESC: first creating a new origin LV, then combining it with
+-DESC: the existing cache pool named in the first arg
+-DESC: (variant, infers --type cache, also use --cachepool).
+-DESC: When LV is not a cache pool, convert the specified LV
+-DESC: to type cache after creating a new cache pool LV to use
+-DESC: (use lvconvert).
++ID: lvcreate_new_plus_old_cachepool_or_lvconvert_old_plus_new_cachepool
++DESC: When the LV arg is a cachepool, then create a new LV and
++DESC: attach the cachepool arg to it.
++DESC: (variant, use --type cache and --cachepool.)
++DESC: When the LV arg is not a cachepool, then create a new cachepool
++DESC: and attach it to the LV arg (alternative, use lvconvert.)
+ FLAGS: SECONDARY_SYNTAX
+ 
+ ---
+ 
++# These all create a new origin LV, then forwards to lvconvert
++# which combines it with a cachevol (which already exists or
++# which needs to be created from cachedevice), converting
++# the new LV to type cache or writecache.
++
++lvcreate --type cache --size SizeMB --cachevol LV VG
++OO: OO_LVCREATE, OO_LVCREATE_CACHE, --stripes Number, --stripesize SizeKB
++OP: PV ...
++ID: lvcreate_and_attach_cachevol_for_cache
++DESC: Create a new LV, then attach the specified cachevol
++DESC: which converts the new LV to type cache.
++
++lvcreate --type cache --size SizeMB --cachedevice PV VG
++OO: OO_LVCREATE, OO_LVCREATE_CACHE, --cachesize SizeMB, --stripes Number, --stripesize SizeKB
++OP: PV ...
++ID: lvcreate_and_attach_cachedevice_for_cache
++DESC: Create a new LV, then attach a cachevol created from
++DESC: the specified cache device, which converts the
++DESC: new LV to type cache.
++
++lvcreate --type writecache --size SizeMB --cachevol LV VG
++OO: OO_LVCREATE, --cachesettings String, --stripes Number, --stripesize SizeKB
++OP: PV ...
++ID: lvcreate_and_attach_cachevol_for_writecache
++DESC: Create a new LV, then attach the specified cachevol
++DESC: which converts the new LV to type writecache.
++
++lvcreate --type writecache --size SizeMB --cachedevice PV VG
++OO: OO_LVCREATE, --cachesize SizeMB, --cachesettings String, --stripes Number, --stripesize SizeKB
++OP: PV ...
++ID: lvcreate_and_attach_cachedevice_for_writecache
++DESC: Create a new LV, then attach a cachevol created from
++DESC: the specified cache device, which converts the
++DESC: new LV to type writecache.
++
++---
++
+ lvdisplay
+ OO: --aligned, --all, --binary, --colon, --columns,
+ --configreport ConfigReport, --foreign, --history, --ignorelockingfailure,
+diff --git a/tools/command.c b/tools/command.c
+index 511dda1..2d01849 100644
+--- a/tools/command.c
++++ b/tools/command.c
+@@ -1420,6 +1420,9 @@ int define_commands(struct cmd_context *cmdtool, const char *run_name)
+ 		if (line[0] == '\n')
+ 			break;
+ 
++		if (!strcmp(line, "---") || !strcmp(line, "--"))
++			continue;
++
+ 		if ((n = strchr(line, '\n')))
+ 			*n = '\0';
+ 
+diff --git a/tools/lvchange.c b/tools/lvchange.c
+index 2d5bb32..c0adadf 100644
+--- a/tools/lvchange.c
++++ b/tools/lvchange.c
+@@ -606,6 +606,88 @@ static int _lvchange_persistent(struct cmd_context *cmd,
+ 	return 1;
+ }
+ 
++static int _lvchange_writecache(struct cmd_context *cmd,
++			   struct logical_volume *lv,
++			   uint32_t *mr)
++{
++	struct writecache_settings settings = { 0 };
++	uint32_t block_size_sectors = 0;
++	struct lv_segment *seg = first_seg(lv);
++	int set_count = 0;
++
++	if (!get_writecache_settings(cmd, &settings, &block_size_sectors))
++		return_0;
++
++	if (block_size_sectors && (seg->writecache_block_size != (block_size_sectors * 512))) {
++		log_error("Cannot change existing block size %u bytes.", seg->writecache_block_size);
++		return 0;
++	}
++
++	if (settings.high_watermark_set) {
++		seg->writecache_settings.high_watermark_set = settings.high_watermark_set;
++		seg->writecache_settings.high_watermark = settings.high_watermark;
++		set_count++;
++	}
++	if (settings.low_watermark_set) {
++		seg->writecache_settings.low_watermark_set = settings.low_watermark_set;
++		seg->writecache_settings.low_watermark = settings.low_watermark;
++		set_count++;
++	}
++	if (settings.writeback_jobs_set) {
++		seg->writecache_settings.writeback_jobs_set = settings.writeback_jobs_set;
++		seg->writecache_settings.writeback_jobs = settings.writeback_jobs;
++		set_count++;
++	}
++	if (settings.autocommit_blocks_set) {
++		seg->writecache_settings.autocommit_blocks_set = settings.autocommit_blocks_set;
++		seg->writecache_settings.autocommit_blocks = settings.autocommit_blocks;
++		set_count++;
++	}
++	if (settings.autocommit_time_set) {
++		seg->writecache_settings.autocommit_time_set = settings.autocommit_time_set;
++		seg->writecache_settings.autocommit_time = settings.autocommit_time;
++		set_count++;
++	}
++	if (settings.fua_set) {
++		seg->writecache_settings.fua_set = settings.fua_set;
++		seg->writecache_settings.fua = settings.fua;
++		set_count++;
++	}
++	if (settings.nofua_set) {
++		seg->writecache_settings.nofua_set = settings.nofua_set;
++		seg->writecache_settings.nofua = settings.nofua;
++		set_count++;
++	}
++	if (settings.cleaner_set) {
++		seg->writecache_settings.cleaner_set = settings.cleaner_set;
++		seg->writecache_settings.cleaner = settings.cleaner;
++		set_count++;
++	}
++	if (settings.max_age_set) {
++		seg->writecache_settings.max_age_set = settings.max_age_set;
++		seg->writecache_settings.max_age = settings.max_age;
++		set_count++;
++	}
++
++	if (!set_count) {
++		/*
++		 * Empty settings can be used to clear all current settings,
++		 * lvchange --cachesettings "" vg/lv
++		 */
++		if (!arg_count(cmd, yes_ARG) &&
++		    yes_no_prompt("Clear all writecache settings? ") == 'n') {
++			log_print("No settings changed.");
++			return 1;
++		}
++		memset(&seg->writecache_settings, 0, sizeof(struct writecache_settings));
++	}
++
++	/* Request caller to commit and reload metadata */
++	*mr |= MR_RELOAD;
++
++	return 1;
++}
++
+ static int _lvchange_cache(struct cmd_context *cmd,
+ 			   struct logical_volume *lv,
+ 			   uint32_t *mr)
+@@ -619,6 +701,9 @@ static int _lvchange_cache(struct cmd_context *cmd,
+ 	int r = 0, is_clean;
+ 	uint32_t chunk_size = 0; /* FYI: lvchange does NOT support its change */
+ 
++	if (lv_is_writecache(lv))
++		return _lvchange_writecache(cmd, lv, mr);
++
+ 	seg = first_seg(lv);
+ 
+ 	if (seg_is_cache(seg) && lv_is_cache_vol(seg->pool_lv))
+diff --git a/tools/lvconvert.c b/tools/lvconvert.c
+index 8652252..0155fdb 100644
+--- a/tools/lvconvert.c
++++ b/tools/lvconvert.c
+@@ -1319,6 +1319,7 @@ static int _raid4_conversion_supported(struct logical_volume *lv, struct lvconve
+ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *lp)
+ {
+ 	int image_count = 0;
++	int images_reduced = 0;
+ 	struct cmd_context *cmd = lv->vg->cmd;
+ 	struct lv_segment *seg = first_seg(lv);
+ 
+@@ -1357,6 +1358,8 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l
+ 		else
+ 			image_count = lp->mirrors + 1;
+ 
++		images_reduced = (image_count < lv_raid_image_count(lv));
++
+ 		if (image_count < 1) {
+ 			log_error("Unable to %s images by specified amount.",
+ 				  lp->keep_mimages ? "split" : "reduce");
+@@ -1400,7 +1403,7 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l
+ 							lp->region_size : seg->region_size , lp->pvh))
+ 				return_0;
+ 
+-			if (lv_raid_has_integrity(lv)) {
++			if (lv_raid_has_integrity(lv) && !images_reduced) {
+ 				struct integrity_settings *isettings = NULL;
+ 				if (!lv_get_raid_integrity_settings(lv, &isettings))
+ 					return_0;
+@@ -4245,51 +4248,191 @@ int lvconvert_to_pool_cmd(struct cmd_context *cmd, int argc, char **argv)
+ 			       NULL, NULL, &_lvconvert_to_pool_single);
+ }
+ 
+-static int _lvconvert_cachevol_attach_single(struct cmd_context *cmd,
+-					  struct logical_volume *lv,
+-					  struct processing_handle *handle)
++#define MAX_CACHEDEVS 8
++
++static int _lv_create_cachevol(struct cmd_context *cmd,
++			       struct volume_group *vg,
++			       struct logical_volume *lv,
++			       struct logical_volume **cachevol_lv)
+ {
+-	struct volume_group *vg = lv->vg;
+-	struct logical_volume *cachevol_lv;
+-	const char *cachevol_name;
++	char cvname[NAME_LEN];
++	struct dm_list *use_pvh;
++	struct pv_list *pvl;
++	char *dev_name;
++	struct device *dev_fast;
++	char *dev_argv[MAX_CACHEDEVS];
++	int dev_argc = 0;
++	uint64_t cache_size_sectors = 0;
++	uint64_t full_size_sectors = 0;
++	uint64_t pv_size_sectors;
++	struct logical_volume *cachevol;
++	struct arg_value_group_list *group;
++	struct lvcreate_params lp = {
++		.activate = CHANGE_AN,
++		.alloc = ALLOC_INHERIT,
++		.major = -1,
++		.minor = -1,
++		.permission = LVM_READ | LVM_WRITE,
++		.pvh = &vg->pvs,
++		.read_ahead = DM_READ_AHEAD_NONE,
++		.stripes = 1,
++		.vg_name = vg->name,
++		.zero = 0,
++		.wipe_signatures = 0,
++		.suppress_zero_warn = 1,
++	};
+ 
+-	if (!(cachevol_name = arg_str_value(cmd, cachevol_ARG, NULL)))
+-		goto_out;
++	/*
++	 * If cache size is not set, and all cachedevice's are unused,
++	 * then the cache size is the sum of all cachedevice sizes.
++	 */
++	cache_size_sectors = arg_uint64_value(cmd, cachesize_ARG, 0);
+ 
+-	if (!validate_lvname_param(cmd, &vg->name, &cachevol_name))
+-		goto_out;
++	dm_list_iterate_items(group, &cmd->arg_value_groups) {
++		if (!grouped_arg_is_set(group->arg_values, cachedevice_ARG))
++			continue;
+ 
+-	if (!(cachevol_lv = find_lv(vg, cachevol_name))) {
+-		log_error("Cache single %s not found.", cachevol_name);
+-		goto out;
++		if (!(dev_name = (char *)grouped_arg_str_value(group->arg_values, cachedevice_ARG, NULL)))
++			break;
++
++		if (dev_name[0] == '@') {
++			if (!cache_size_sectors) {
++				log_error("With tag as cachedevice, --cachesize is required.");
++				return 0;
++			}
++			goto add_dev_arg;
++		}
++
++		if (!(dev_fast = dev_cache_get(cmd, dev_name, cmd->filter))) {
++			log_error("Device %s not found.", dev_name);
++			return 0;
++		}
++
++		if (!(pvl = find_pv_in_vg(vg, dev_name))) {
++			log_error("PV %s not found in VG.", dev_name);
++			return 0;
++		}
++
++		/*
++		 * If the dev is used in the VG, then require a cachesize to allocate
++		 * from it.  If it is not used in the VG, then prompt asking if the
++		 * entire dev should be used.
++		 */
++		if (!cache_size_sectors && pvl->pv->pe_alloc_count) {
++			log_error("PV %s is in use, --cachesize is required.", dev_name);
++			return 0;
++		}
++
++		if (!cache_size_sectors) {
++			pv_size_sectors = (pvl->pv->pe_count * vg->extent_size);
++
++			if (!arg_is_set(cmd, yes_ARG) &&
++			    yes_no_prompt("Use all %s from %s for cache? [y/n]: ",
++					  display_size(cmd, pv_size_sectors), dev_name) == 'n') {
++				log_print("Use --cachesize SizeMB to use a part of the cachedevice.");
++				log_error("Conversion aborted.");
++				return 0;
++			}
++			full_size_sectors += pv_size_sectors;
++		}
++ add_dev_arg:
++		if (dev_argc >= MAX_CACHEDEVS) {
++			log_error("Cannot allocate from more than %u cache devices.", MAX_CACHEDEVS);
++			return 0;
++		}
++
++		dev_argv[dev_argc++] = dev_name;
+ 	}
+ 
+-	if (lv_is_cache_vol(cachevol_lv)) {
+-		log_error("LV %s is already used as a cachevol.", display_lvname(cachevol_lv));
+-		goto out;
++	if (!cache_size_sectors)
++		cache_size_sectors = full_size_sectors;
++
++	if (!dev_argc) {
++		log_error("No cachedevice specified to create a cachevol.");
++		return 0;
+ 	}
+ 
+-	/* Ensure the LV is not active elsewhere. */
+-	if (!lockd_lv(cmd, lv, "ex", 0))
+-		goto_out;
++	if (!(use_pvh = create_pv_list(cmd->mem, vg, dev_argc, dev_argv, 1))) {
++		log_error("cachedevice not found in VG %s.", dev_name);
++		return 0;
++	}
+ 
+-	if (!dm_list_empty(&cachevol_lv->segs_using_this_lv)) {
+-		log_error("LV %s is already in use.", display_lvname(cachevol_lv));
+-		goto out;
++	if (dm_snprintf(cvname, NAME_LEN, "%s_cache", lv->name) < 0) {
++		log_error("Failed to create cachevol LV name.");
++		return 0;
+ 	}
+ 
+-	if (!arg_is_set(cmd, yes_ARG) &&
+-	    yes_no_prompt("Erase all existing data on %s? [y/n]: ", display_lvname(cachevol_lv)) == 'n') {
+-		log_error("Conversion aborted.");
+-		goto out;
++	lp.lv_name = cvname;
++	lp.pvh = use_pvh;
++	lp.extents = cache_size_sectors / vg->extent_size;
++
++	log_print("Creating cachevol LV %s with size %s.",
++		  cvname, display_size(cmd, cache_size_sectors));
++
++	dm_list_init(&lp.tags);
++
++	if (!(lp.segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_STRIPED)))
++		return_0;
++
++	if (!(cachevol = lv_create_single(vg, &lp))) {
++		log_error("Failed to create cachevol LV");
++		return 0;
++	}
++
++	*cachevol_lv = cachevol;
++	return 1;
++}
++
++int lvconvert_cachevol_attach_single(struct cmd_context *cmd,
++				     struct logical_volume *lv,
++				     struct processing_handle *handle)
++{
++	struct volume_group *vg = lv->vg;
++	struct logical_volume *lv_fast;
++	const char *fast_name;
++
++	/*
++	 * User specifies an existing cachevol to use or a cachedevice
++	 * to create a cachevol from.
++	 */
++	if ((fast_name = arg_str_value(cmd, cachevol_ARG, NULL))) {
++		if (!validate_lvname_param(cmd, &vg->name, &fast_name))
++			goto_bad;
++
++		if (!(lv_fast = find_lv(vg, fast_name))) {
++			log_error("LV %s not found.", fast_name);
++			goto bad;
++		}
++
++		if (lv_is_cache_vol(lv_fast)) {
++			log_error("LV %s is already used as a cachevol.", display_lvname(lv_fast));
++			goto bad;
++		}
++
++		if (!dm_list_empty(&lv_fast->segs_using_this_lv)) {
++			log_error("LV %s is already in use.", display_lvname(lv_fast));
++			goto bad;
++		}
++
++		if (!arg_is_set(cmd, yes_ARG) &&
++		    yes_no_prompt("Erase all existing data on %s? [y/n]: ", display_lvname(lv_fast)) == 'n') {
++			log_error("Conversion aborted.");
++			goto bad;
++		}
++
++		if (!lockd_lv(cmd, lv_fast, "ex", 0))
++			goto_bad;
++	} else {
++		if (!_lv_create_cachevol(cmd, vg, lv, &lv_fast))
++			goto_bad;
+ 	}
+ 
+ 	/* Ensure the LV is not active elsewhere. */
+-	if (!lockd_lv(cmd, cachevol_lv, "ex", LDLV_PERSISTENT))
+-		goto_out;
++	if (!lockd_lv(cmd, lv, "ex", 0))
++		goto_bad;
+ 
+-	if (!wipe_cache_pool(cachevol_lv))
+-		goto_out;
++	if (!wipe_cache_pool(lv_fast))
++		goto_bad;
+ 
+ 	/* When the lv arg is a thinpool, redirect command to data sub lv. */
+ 
+@@ -4299,17 +4442,17 @@ static int _lvconvert_cachevol_attach_single(struct cmd_context *cmd,
+ 	}
+ 
+ 	if (_raid_split_image_conversion(lv))
+-		goto_out;
++		goto_bad;
+ 
+ 	/* Attach the cache to the main LV. */
+ 
+-	if (!_cache_vol_attach(cmd, lv, cachevol_lv))
+-		goto_out;
++	if (!_cache_vol_attach(cmd, lv, lv_fast))
++		goto_bad;
+ 
+ 	log_print_unless_silent("Logical volume %s is now cached.", display_lvname(lv));
+ 
+ 	return ECMD_PROCESSED;
+- out:
++ bad:
+ 	return ECMD_FAILED;
+ }
+ 
+@@ -5308,19 +5451,8 @@ static int _lvconvert_detach_writecache(struct cmd_context *cmd,
+ 					struct logical_volume *lv,
+ 					struct logical_volume *lv_fast)
+ {
+-	char cvol_name[NAME_LEN];
+-	char *c;
+ 	int noflush = 0;
+ 
+-	/*
+-	 * LV must be inactive externally before detaching cache.
+-	 */
+-
+-	if (lv_info(cmd, lv, 1, NULL, 0, 0)) {
+-		log_error("LV %s must be inactive to detach writecache.", display_lvname(lv));
+-		return 0;
+-	}
+-
+ 	if (!archive(lv->vg))
+ 		return_0;
+ 
+@@ -5344,36 +5476,23 @@ static int _lvconvert_detach_writecache(struct cmd_context *cmd,
+ 		noflush = 1;
+ 	}
+ 
+-	if (!lv_detach_writecache_cachevol(lv, noflush))
+-		return_0;
+-
+ 	/*
+-	 * Rename lv_fast back to its original name, without the _cvol
+-	 * suffix that was added when lv_fast was attached for caching.
++	 * TODO: send a message to writecache in the kernel to start writing
++	 * back cache data to the origin.  Then release the vg lock and monitor
++	 * the progress of that writeback.  When it's complete we can reacquire
++	 * the vg lock, rescan the vg (ensure it hasn't changed), and do the
++	 * detach which should be quick since the writeback is complete.  If
++	 * this command is canceled while monitoring writeback, it should just
++	 * be rerun.  The LV will continue to have the writecache until this
++	 * command is run to completion.
+ 	 */
+-	if (!dm_strncpy(cvol_name, lv_fast->name, sizeof(cvol_name)) ||
+-	    !(c = strstr(cvol_name, "_cvol"))) {
+-		log_debug("LV %s has no suffix for cachevol (skipping rename).",
+-			display_lvname(lv_fast));
+-	} else {
+-		*c = 0;
+-		/* If the name is in use, generate new lvol%d */
+-		if (lv_name_is_used_in_vg(lv->vg, cvol_name, NULL) &&
+-		    !generate_lv_name(lv->vg, "lvol%d", cvol_name, sizeof(cvol_name))) {
+-			log_error("Failed to generate unique name for unused logical volume.");
+-			return 0;
+-		}
+-
+-		if (!lv_rename_update(cmd, lv_fast, cvol_name, 0))
+-			return_0;
+-	}
+ 
+-	if (!vg_write(lv->vg) || !vg_commit(lv->vg))
++	if (!lv_detach_writecache_cachevol(lv, noflush))
+ 		return_0;
+ 
+ 	backup(lv->vg);
+ 
+-	log_print_unless_silent("Logical volume %s write cache has been detached.",
++	log_print_unless_silent("Logical volume %s writecache has been detached.",
+ 				display_lvname(lv));
+ 	return 1;
+ }
+@@ -5410,157 +5529,6 @@ static int _writecache_zero(struct cmd_context *cmd, struct logical_volume *lv)
+ 	return ret;
+ }
+ 
+-static int _get_one_writecache_setting(struct cmd_context *cmd, struct writecache_settings *settings,
+-				       char *key, char *val, uint32_t *block_size_sectors)
+-{
+-	/* special case: block_size is not a setting but is set with the --cachesettings option */
+-	if (!strncmp(key, "block_size", strlen("block_size"))) {
+-		uint32_t block_size = 0;
+-		if (sscanf(val, "%u", &block_size) != 1)
+-			goto_bad;
+-		if (block_size == 512)
+-			*block_size_sectors = 1;
+-		else if (block_size == 4096)
+-			*block_size_sectors = 8;
+-		else
+-			goto_bad;
+-		return 1;
+-	}
+-
+-	if (!strncmp(key, "high_watermark", strlen("high_watermark"))) {
+-		if (sscanf(val, "%llu", (unsigned long long *)&settings->high_watermark) != 1)
+-			goto_bad;
+-		if (settings->high_watermark > 100)
+-			goto_bad;
+-		settings->high_watermark_set = 1;
+-		return 1;
+-	}
+-
+-	if (!strncmp(key, "low_watermark", strlen("low_watermark"))) {
+-		if (sscanf(val, "%llu", (unsigned long long *)&settings->low_watermark) != 1)
+-			goto_bad;
+-		if (settings->low_watermark > 100)
+-			goto_bad;
+-		settings->low_watermark_set = 1;
+-		return 1;
+-	}
+-
+-	if (!strncmp(key, "writeback_jobs", strlen("writeback_jobs"))) {
+-		if (sscanf(val, "%llu", (unsigned long long *)&settings->writeback_jobs) != 1)
+-			goto_bad;
+-		settings->writeback_jobs_set = 1;
+-		return 1;
+-	}
+-
+-	if (!strncmp(key, "autocommit_blocks", strlen("autocommit_blocks"))) {
+-		if (sscanf(val, "%llu", (unsigned long long *)&settings->autocommit_blocks) != 1)
+-			goto_bad;
+-		settings->autocommit_blocks_set = 1;
+-		return 1;
+-	}
+-
+-	if (!strncmp(key, "autocommit_time", strlen("autocommit_time"))) {
+-		if (sscanf(val, "%llu", (unsigned long long *)&settings->autocommit_time) != 1)
+-			goto_bad;
+-		settings->autocommit_time_set = 1;
+-		return 1;
+-	}
+-
+-	if (!strncmp(key, "fua", strlen("fua"))) {
+-		if (settings->nofua_set) {
+-			log_error("Setting fua and nofua cannot both be set.");
+-			return 0;
+-		}
+-		if (sscanf(val, "%u", &settings->fua) != 1)
+-			goto_bad;
+-		settings->fua_set = 1;
+-		return 1;
+-	}
+-
+-	if (!strncmp(key, "nofua", strlen("nofua"))) {
+-		if (settings->fua_set) {
+-			log_error("Setting fua and nofua cannot both be set.");
+-			return 0;
+-		}
+-		if (sscanf(val, "%u", &settings->nofua) != 1)
+-			goto_bad;
+-		settings->nofua_set = 1;
+-		return 1;
+-	}
+-
+-	if (settings->new_key) {
+-		log_error("Setting %s is not recognized. Only one unrecognized setting is allowed.", key);
+-		return 0;
+-	}
+-
+-	log_warn("Unrecognized writecache setting \"%s\" may cause activation failure.", key);
+-	if (yes_no_prompt("Use unrecognized writecache setting? [y/n]: ") == 'n') {
+-		log_error("Aborting writecache conversion.");
+-		return 0;
+-	}
+-
+-	log_warn("Using unrecognized writecache setting: %s = %s.", key, val);
+-
+-	settings->new_key = dm_pool_strdup(cmd->mem, key);
+-	settings->new_val = dm_pool_strdup(cmd->mem, val);
+-	return 1;
+-
+- bad:
+-	log_error("Invalid setting: %s", key);
+-	return 0;
+-}
+-
+-static int _get_writecache_settings(struct cmd_context *cmd, struct writecache_settings *settings,
+-				    uint32_t *block_size_sectors)
+-{
+-	struct arg_value_group_list *group;
+-	const char *str;
+-	char key[64];
+-	char val[64];
+-	int num;
+-	int pos;
+-
+-	/*
+-	 * "grouped" means that multiple --cachesettings options can be used.
+-	 * Each option is also allowed to contain multiple key = val pairs.
+-	 */
+-
+-	dm_list_iterate_items(group, &cmd->arg_value_groups) {
+-		if (!grouped_arg_is_set(group->arg_values, cachesettings_ARG))
+-			continue;
+-
+-		if (!(str = grouped_arg_str_value(group->arg_values, cachesettings_ARG, NULL)))
+-			break;
+-
+-		pos = 0;
+-
+-		while (pos < strlen(str)) {
+-			/* scan for "key1=val1 key2 = val2  key3= val3" */
+-
+-			memset(key, 0, sizeof(key));
+-			memset(val, 0, sizeof(val));
+-
+-			if (sscanf(str + pos, " %63[^=]=%63s %n", key, val, &num) != 2) {
+-				log_error("Invalid setting at: %s", str+pos);
+-				return 0;
+-			}
+-
+-			pos += num;
+-
+-			if (!_get_one_writecache_setting(cmd, settings, key, val, block_size_sectors))
+-				return_0;
+-		}
+-	}
+-
+-	if (settings->high_watermark_set && settings->low_watermark_set &&
+-	    (settings->high_watermark <= settings->low_watermark)) {
+-		log_error("High watermark must be greater than low watermark.");
+-		return 0;
+-	}
+-
+-	return 1;
+-}
+-
+ static struct logical_volume *_lv_writecache_create(struct cmd_context *cmd,
+ 					    struct logical_volume *lv,
+ 					    struct logical_volume *lv_fast,
+@@ -5605,9 +5573,171 @@ static struct logical_volume *_lv_writecache_create(struct cmd_context *cmd,
+ 	return lv_wcorig;
+ }
+ 
+-#define DEFAULT_WRITECACHE_BLOCK_SIZE_SECTORS 8 /* 4K */
++/*
++ * Currently only supports writecache block sizes 512 and 4096.
++ * This could be expanded later.
++ */
++static int _set_writecache_block_size(struct cmd_context *cmd,
++				      struct logical_volume *lv,
++				      uint32_t *block_size_sectors)
++{
++	char pathname[PATH_MAX];
++	struct device *fs_dev;
++	struct dm_list pvs;
++	struct pv_list *pvl;
++	uint32_t fs_block_size = 0;
++	uint32_t block_size_setting = 0;
++	uint32_t block_size = 0;
++	int lbs_unknown = 0, lbs_4k = 0, lbs_512 = 0;
++	int pbs_unknown = 0, pbs_4k = 0, pbs_512 = 0;
++	int rv;
++
++	/* This is set if the user specified a writecache block size on the command line. */
++	if (*block_size_sectors)
++		block_size_setting = *block_size_sectors * 512;
++
++	dm_list_init(&pvs);
++
++	if (!get_pv_list_for_lv(cmd->mem, lv, &pvs)) {
++		log_error("Failed to build list of PVs for %s.", display_lvname(lv));
++		goto_bad;
++	}
++
++	dm_list_iterate_items(pvl, &pvs) {
++		unsigned int pbs = 0;
++		unsigned int lbs = 0;
++
++		if (!dev_get_direct_block_sizes(pvl->pv->dev, &pbs, &lbs)) {
++			lbs_unknown++;
++			pbs_unknown++;
++			continue;
++		}
++
++		if (lbs == 4096)
++			lbs_4k++;
++		else if (lbs == 512)
++			lbs_512++;
++		else
++			lbs_unknown++;
++
++		if (pbs == 4096)
++			pbs_4k++;
++		else if (pbs == 512)
++			pbs_512++;
++		else
++			pbs_unknown++;
++	}
++
++	if (lbs_4k && lbs_512) {
++		log_error("Writecache requires consistent logical block size for LV devices.");
++		goto_bad;
++	}
++
++	if (lbs_4k && block_size_setting && (block_size_setting < 4096)) {
++		log_error("Writecache block size %u not allowed with device logical block size 4096.",
++			  block_size_setting);
++		goto_bad;
++	}
++
++	if (dm_snprintf(pathname, sizeof(pathname), "%s/%s/%s", cmd->dev_dir,
++			lv->vg->name, lv->name) < 0) {
++		log_error("Path name too long to get LV block size %s", display_lvname(lv));
++		goto_bad;
++	}
++
++	if (!sync_local_dev_names(cmd))
++		stack;
++
++	if (!(fs_dev = dev_cache_get(cmd, pathname, NULL))) {
++		log_error("Device for LV not found to check block size %s", pathname);
++		goto_bad;
++	}
++
++	/*
++	 * get_fs_block_size() returns the libblkid BLOCK_SIZE value,
++	 * where libblkid has fs-specific code to set BLOCK_SIZE to the
++	 * value we need here.
++	 *
++	 * The term "block size" here may not equate directly to what the fs
++	 * calls the block size, e.g. xfs calls this the sector size (and
++	 * something different the block size); while ext4 does call this
++	 * value the block size, but it's possible values are not the same
++	 * as xfs's, and do not seem to relate directly to the device LBS.
++	 *
++	 * With 512 LBS and 4K PBS, mkfs.xfs will use xfs sector size 4K.
++	 */
++	rv = get_fs_block_size(fs_dev, &fs_block_size);
++	if (!rv || !fs_block_size) {
++		if (lbs_4k && pbs_4k && !pbs_512) {
++			block_size = 4096;
++		} else if (lbs_512 && pbs_512 && !pbs_4k) {
++			block_size = 512;
++		} else if (lbs_512 && pbs_4k) {
++			if (block_size_setting == 4096)
++				block_size = 4096;
++			else
++				block_size = 512;
++		} else {
++			block_size = 512;
++		}
++
++		if (block_size_setting && (block_size_setting != block_size)) {
++			log_error("Cannot use writecache block size %u with unknown file system block size, logical block size %u, physical block size %u.",
++				  block_size_setting, lbs_4k ? 4096 : 512, pbs_4k ? 4096 : 512);
++			goto bad;
++		}
++
++		if (block_size != 512) {
++			log_warn("WARNING: unable to detect a file system block size on %s", display_lvname(lv));
++			log_warn("WARNING: using a writecache block size larger than the file system block size may corrupt the file system.");
++			if (!arg_is_set(cmd, yes_ARG) &&
++			    yes_no_prompt("Use writecache block size %u? [y/n]: ", block_size) == 'n')  {
++				log_error("Conversion aborted.");
++				goto bad;
++			}
++		}
++
++		log_print("Using writecache block size %u for unknown file system block size, logical block size %u, physical block size %u.",
++			 block_size, lbs_4k ? 4096 : 512, pbs_4k ? 4096 : 512);
++		goto out;
++	}
++
++	if (!block_size_setting) {
++		/* User did not specify a block size, so choose according to fs block size. */
++		if (fs_block_size == 4096)
++			block_size = 4096;
++		else if (fs_block_size == 512)
++			block_size = 512;
++		else if (fs_block_size > 4096)
++			block_size = 4096;
++		else if (fs_block_size < 4096)
++			block_size = 512;
++		else
++			goto_bad;
++	} else {
++		if (block_size_setting <= fs_block_size)
++			block_size = block_size_setting;
++		else {
++			log_error("Writecache block size %u cannot be larger than file system block size %u.",
++				  block_size_setting, fs_block_size);
++			goto_bad;
++		}
++	}
++
++out:
++	if (block_size == 512)
++		*block_size_sectors = 1;
++	else if (block_size == 4096)
++		*block_size_sectors = 8;
++	else
++		goto_bad;
++
++	return 1;
++bad:
++	return 0;
++}
+ 
+-static int _lvconvert_writecache_attach_single(struct cmd_context *cmd,
++int lvconvert_writecache_attach_single(struct cmd_context *cmd,
+ 					struct logical_volume *lv,
+ 					struct processing_handle *handle)
+ {
+@@ -5616,68 +5746,91 @@ static int _lvconvert_writecache_attach_single(struct cmd_context *cmd,
+ 	struct logical_volume *lv_fast;
+ 	struct writecache_settings settings;
+ 	const char *fast_name;
+-	uint32_t block_size_sectors;
++	uint32_t block_size_sectors = 0;
+ 	char *lockd_fast_args = NULL;
+ 	char *lockd_fast_name = NULL;
+ 	struct id lockd_fast_id;
+ 	char cvol_name[NAME_LEN];
++	int is_active;
+ 
+-	fast_name = arg_str_value(cmd, cachevol_ARG, "");
++	/*
++	 * User specifies an existing cachevol to use or a cachedevice
++	 * to create a cachevol from.
++	 */
++	if ((fast_name = arg_str_value(cmd, cachevol_ARG, NULL))) {
++		if (!validate_lvname_param(cmd, &vg->name, &fast_name))
++			goto_bad;
+ 
+-	if (!(lv_fast = find_lv(vg, fast_name))) {
+-		log_error("LV %s not found.", fast_name);
+-		goto bad;
+-	}
++		if (!(lv_fast = find_lv(vg, fast_name))) {
++			log_error("LV %s not found.", fast_name);
++			goto bad;
++		}
+ 
+-	if (lv_fast == lv) {
+-		log_error("Invalid cachevol LV.");
+-		goto bad;
+-	}
++		if (lv_fast == lv) {
++			log_error("Invalid cachevol LV.");
++			goto bad;
++		}
+ 
+-	if (!seg_is_linear(first_seg(lv_fast))) {
+-		log_error("LV %s must be linear to use as a writecache.", display_lvname(lv_fast));
+-		goto bad;
+-	}
++		if (lv_is_cache_vol(lv_fast)) {
++			log_error("LV %s is already used as a cachevol.", display_lvname(lv_fast));
++			goto bad;
++		}
+ 
+-	if (lv_is_cache_vol(lv_fast)) {
+-		log_error("LV %s is already used as a cachevol.", display_lvname(lv_fast));
+-		goto bad;
+-	}
++		if (!seg_is_linear(first_seg(lv_fast))) {
++			log_error("LV %s must be linear to use as a writecache.", display_lvname(lv_fast));
++			goto bad;
++		}
+ 
+-	/*
+-	 * To permit this we need to check the block size of the fs using lv
+-	 * (recently in libblkid) so that we can use a matching writecache
+-	 * block size.  We also want to do that if the lv is inactive.
+-	 */
+-	if (lv_is_active(lv)) {
+-		log_error("LV %s must be inactive to attach writecache.", display_lvname(lv));
+-		goto bad;
+-	}
++		/* fast LV shouldn't generally be active by itself, but just in case. */
++		if (lv_is_active(lv_fast)) {
++			log_error("LV %s must be inactive to attach.", display_lvname(lv_fast));
++			goto bad;
++		}
+ 
+-	/* fast LV shouldn't generally be active by itself, but just in case. */
+-	if (lv_info(cmd, lv_fast, 1, NULL, 0, 0)) {
+-		log_error("LV %s must be inactive to attach.", display_lvname(lv_fast));
+-		goto bad;
++		if (!arg_is_set(cmd, yes_ARG) &&
++		     yes_no_prompt("Erase all existing data on %s? [y/n]: ", display_lvname(lv_fast)) == 'n') {
++			log_error("Conversion aborted.");
++			goto bad;
++		}
++	} else {
++		if (!_lv_create_cachevol(cmd, vg, lv, &lv_fast))
++			goto_bad;
+ 	}
+ 
++	is_active = lv_is_active(lv);
++
+ 	memset(&settings, 0, sizeof(settings));
+-	block_size_sectors = DEFAULT_WRITECACHE_BLOCK_SIZE_SECTORS;
+ 
+-	if (!_get_writecache_settings(cmd, &settings, &block_size_sectors)) {
++	if (!get_writecache_settings(cmd, &settings, &block_size_sectors)) {
+ 		log_error("Invalid writecache settings.");
+ 		goto bad;
+ 	}
+ 
+-	if (!arg_is_set(cmd, yes_ARG) &&
+-	    yes_no_prompt("Erase all existing data on %s? [y/n]: ", display_lvname(lv_fast)) == 'n') {
+-		log_error("Conversion aborted.");
+-		goto bad;
++	if (!is_active) {
++		/* checking block size of fs on the lv requires the lv to be active */
++		if (!activate_lv(cmd, lv)) {
++			log_error("Failed to activate LV to check block size %s", display_lvname(lv));
++			goto bad;
++		}
+ 	}
+ 
+-	/* Ensure the two LVs are not active elsewhere. */
++	if (!_set_writecache_block_size(cmd, lv, &block_size_sectors)) {
++		if (!is_active && !deactivate_lv(cmd, lv))
++			stack;
++		goto_bad;
++	}
++
++	if (!is_active) {
++		if (!deactivate_lv(cmd, lv)) {
++			log_error("Failed to deactivate LV after checking block size %s", display_lvname(lv));
++			goto bad;
++		}
++	}
++
++	/* Ensure the LV is not active elsewhere. */
+ 	if (!lockd_lv(cmd, lv, "ex", 0))
+ 		goto_bad;
+-	if (!lockd_lv(cmd, lv_fast, "ex", 0))
++	if (fast_name && !lockd_lv(cmd, lv_fast, "ex", 0))
+ 		goto_bad;
+ 
+ 	if (!archive(vg))
+@@ -5744,7 +5897,7 @@ static int _lvconvert_writecache_attach_single(struct cmd_context *cmd,
+ 			log_error("Failed to unlock fast LV %s/%s", vg->name, lockd_fast_name);
+ 	}
+ 
+-	log_print_unless_silent("Logical volume %s now has write cache.",
++	log_print_unless_silent("Logical volume %s now has writecache.",
+ 				display_lvname(lv));
+ 	return ECMD_PROCESSED;
+ bad:
+@@ -5768,7 +5921,7 @@ int lvconvert_to_writecache_cmd(struct cmd_context *cmd, int argc, char **argv)
+ 	cmd->cname->flags &= ~GET_VGNAME_FROM_OPTIONS;
+ 
+ 	ret = process_each_lv(cmd, cmd->position_argc, cmd->position_argv, NULL, NULL, READ_FOR_UPDATE, handle, NULL,
+-			      &_lvconvert_writecache_attach_single);
++			      &lvconvert_writecache_attach_single);
+ 
+ 	destroy_processing_handle(cmd, handle);
+ 
+@@ -5791,7 +5944,7 @@ int lvconvert_to_cache_with_cachevol_cmd(struct cmd_context *cmd, int argc, char
+ 	cmd->cname->flags &= ~GET_VGNAME_FROM_OPTIONS;
+ 
+ 	ret = process_each_lv(cmd, cmd->position_argc, cmd->position_argv, NULL, NULL, READ_FOR_UPDATE, handle, NULL,
+-			      &_lvconvert_cachevol_attach_single);
++			      &lvconvert_cachevol_attach_single);
+ 
+ 	destroy_processing_handle(cmd, handle);
+ 
+diff --git a/tools/lvcreate.c b/tools/lvcreate.c
+index 5c978b3..3357a08 100644
+--- a/tools/lvcreate.c
++++ b/tools/lvcreate.c
+@@ -766,7 +766,9 @@ static int _lvcreate_params(struct cmd_context *cmd,
+ 	 *
+ 	 * Ordering of following type tests is IMPORTANT
+ 	 */
+-	if ((segtype_str = arg_str_value(cmd, type_ARG, NULL))) {
++	if (lp->ignore_type) {
++		segtype_str = SEG_TYPE_NAME_STRIPED;
++	} else if ((segtype_str = arg_str_value(cmd, type_ARG, NULL))) {
+ 		lp->type = 1;
+ 		if (!strcmp(segtype_str, "linear")) {
+ 			segtype_str = "striped";
+@@ -1799,3 +1801,152 @@ int lvcreate(struct cmd_context *cmd, int argc, char **argv)
+ 	destroy_processing_handle(cmd, handle);
+ 	return ret;
+ }
++
++static int _lvcreate_and_attach_writecache_single(struct cmd_context *cmd,
++		const char *vg_name, struct volume_group *vg, struct processing_handle *handle)
++{
++	struct processing_params *pp = (struct processing_params *) handle->custom_handle;
++	struct lvcreate_params *lp = pp->lp;
++	struct logical_volume *lv;
++	int ret;
++
++	ret = _lvcreate_single(cmd, vg_name, vg, handle);
++
++	if (ret == ECMD_FAILED)
++		return ret;
++
++	if (!(lv = find_lv(vg, lp->lv_name))) {
++		log_error("Failed to find LV %s to add writecache.", lp->lv_name);
++		return ECMD_FAILED;
++	}
++
++	ret = lvconvert_writecache_attach_single(cmd, lv, handle);
++
++	if (ret == ECMD_FAILED) {
++		log_error("Removing new LV after failing to add writecache.");
++		if (!deactivate_lv(cmd, lv))
++			log_error("Failed to deactivate new LV %s.", display_lvname(lv));
++		if (!lv_remove_with_dependencies(cmd, lv, 1, 0))
++			log_error("Failed to remove new LV %s.", display_lvname(lv));
++		return ECMD_FAILED;
++	}
++
++	return ECMD_PROCESSED;
++}
++
++int lvcreate_and_attach_writecache_cmd(struct cmd_context *cmd, int argc, char **argv)
++{
++	struct processing_handle *handle = NULL;
++	struct processing_params pp;
++	struct lvcreate_params lp = {
++		.major = -1,
++		.minor = -1,
++	};
++	struct lvcreate_cmdline_params lcp = { 0 };
++	int ret;
++
++	/*
++	 * Tell lvcreate to ignore --type since we are using lvcreate
++	 * to create a linear LV and using lvconvert to add cache.
++	 * (Would be better if lvcreate code was split up so we could
++	 * call a specific function that just created a linear/striped LV.)
++	 */
++	lp.ignore_type = 1;
++
++	if (!_lvcreate_params(cmd, argc, argv, &lp, &lcp)) {
++		stack;
++		return EINVALID_CMD_LINE;
++	}
++
++	pp.lp = &lp;
++	pp.lcp = &lcp;
++
++        if (!(handle = init_processing_handle(cmd, NULL))) {
++		log_error("Failed to initialize processing handle.");
++		return ECMD_FAILED;
++	}
++
++	handle->custom_handle = &pp;
++
++	ret = process_each_vg(cmd, 0, NULL, lp.vg_name, NULL, READ_FOR_UPDATE, 0, handle,
++			      &_lvcreate_and_attach_writecache_single);
++
++	_destroy_lvcreate_params(&lp);
++	destroy_processing_handle(cmd, handle);
++	return ret;
++}
++
++static int _lvcreate_and_attach_cache_single(struct cmd_context *cmd,
++		const char *vg_name, struct volume_group *vg, struct processing_handle *handle)
++{
++	struct processing_params *pp = (struct processing_params *) handle->custom_handle;
++	struct lvcreate_params *lp = pp->lp;
++	struct logical_volume *lv;
++	int ret;
++
++	ret = _lvcreate_single(cmd, vg_name, vg, handle);
++
++	if (ret == ECMD_FAILED)
++		return ret;
++
++	if (!(lv = find_lv(vg, lp->lv_name))) {
++		log_error("Failed to find LV %s to add cache.", lp->lv_name);
++		return ECMD_FAILED;
++	}
++
++	ret = lvconvert_cachevol_attach_single(cmd, lv, handle);
++
++	if (ret == ECMD_FAILED) {
++		log_error("Removing new LV after failing to add cache.");
++		if (!deactivate_lv(cmd, lv))
++			log_error("Failed to deactivate new LV %s.", display_lvname(lv));
++		if (!lv_remove_with_dependencies(cmd, lv, 1, 0))
++			log_error("Failed to remove new LV %s.", display_lvname(lv));
++		return ECMD_FAILED;
++	}
++
++	return ECMD_PROCESSED;
++}
++
++int lvcreate_and_attach_cache_cmd(struct cmd_context *cmd, int argc, char **argv)
++{
++	struct processing_handle *handle = NULL;
++	struct processing_params pp;
++	struct lvcreate_params lp = {
++		.major = -1,
++		.minor = -1,
++	};
++	struct lvcreate_cmdline_params lcp = { 0 };
++	int ret;
++
++	/*
++	 * Tell lvcreate to ignore --type since we are using lvcreate
++	 * to create a linear LV and using lvconvert to add cache.
++	 * (Would be better if lvcreate code was split up so we could
++	 * call a specific function that just created a linear/striped LV.)
++	 */
++	lp.ignore_type = 1;
++
++	if (!_lvcreate_params(cmd, argc, argv, &lp, &lcp)) {
++		stack;
++		return EINVALID_CMD_LINE;
++	}
++
++	pp.lp = &lp;
++	pp.lcp = &lcp;
++
++	if (!(handle = init_processing_handle(cmd, NULL))) {
++		log_error("Failed to initialize processing handle.");
++		return ECMD_FAILED;
++	}
++
++	handle->custom_handle = &pp;
++
++	ret = process_each_vg(cmd, 0, NULL, lp.vg_name, NULL, READ_FOR_UPDATE, 0, handle,
++			      &_lvcreate_and_attach_cache_single);
++
++	_destroy_lvcreate_params(&lp);
++	destroy_processing_handle(cmd, handle);
++	return ret;
++}
++
+diff --git a/tools/lvmcmdline.c b/tools/lvmcmdline.c
+index d87a8f0..7cf4e3f 100644
+--- a/tools/lvmcmdline.c
++++ b/tools/lvmcmdline.c
+@@ -124,8 +124,10 @@ static const struct command_function _command_functions[CMD_COUNT] = {
+ 	{ lvconvert_to_cachepool_CMD,			lvconvert_to_pool_cmd },
+ 	{ lvconvert_to_thin_with_external_CMD,		lvconvert_to_thin_with_external_cmd },
+ 	{ lvconvert_to_cache_with_cachevol_CMD,		lvconvert_to_cache_with_cachevol_cmd },
++	{ lvconvert_to_cache_with_device_CMD,		lvconvert_to_cache_with_cachevol_cmd },
+ 	{ lvconvert_to_cache_with_cachepool_CMD,	lvconvert_to_cache_with_cachepool_cmd },
+ 	{ lvconvert_to_writecache_CMD,			lvconvert_to_writecache_cmd },
++	{ lvconvert_to_writecache_with_device_CMD,	lvconvert_to_writecache_cmd },
+ 	{ lvconvert_swap_pool_metadata_CMD,		lvconvert_swap_pool_metadata_cmd },
+ 	{ lvconvert_to_thinpool_or_swap_metadata_CMD,   lvconvert_to_pool_or_swap_metadata_cmd },
+ 	{ lvconvert_to_cachepool_or_swap_metadata_CMD,  lvconvert_to_pool_or_swap_metadata_cmd },
+@@ -152,6 +154,12 @@ static const struct command_function _command_functions[CMD_COUNT] = {
+ 	/* lvconvert for integrity */
+ 	{ lvconvert_integrity_CMD, lvconvert_integrity_cmd },
+ 
++	/* lvcreate */
++	{ lvcreate_and_attach_cachevol_for_cache_CMD,		lvcreate_and_attach_cache_cmd },
++	{ lvcreate_and_attach_cachedevice_for_cache_CMD,	lvcreate_and_attach_cache_cmd },
++	{ lvcreate_and_attach_cachevol_for_writecache_CMD,	lvcreate_and_attach_writecache_cmd },
++	{ lvcreate_and_attach_cachedevice_for_writecache_CMD,	lvcreate_and_attach_writecache_cmd },
++
+ 	{ pvscan_display_CMD, pvscan_display_cmd },
+ 	{ pvscan_cache_CMD, pvscan_cache_cmd },
+ };
+diff --git a/tools/toollib.c b/tools/toollib.c
+index 89b6374..eb0de55 100644
+--- a/tools/toollib.c
++++ b/tools/toollib.c
+@@ -1184,6 +1184,170 @@ out:
+ 	return ok;
+ }
+ 
++static int _get_one_writecache_setting(struct cmd_context *cmd, struct writecache_settings *settings,
++				       char *key, char *val, uint32_t *block_size_sectors)
++{
++	/* special case: block_size is not a setting but is set with the --cachesettings option */
++	if (!strncmp(key, "block_size", strlen("block_size"))) {
++		uint32_t block_size = 0;
++		if (sscanf(val, "%u", &block_size) != 1)
++			goto_bad;
++		if (block_size == 512)
++			*block_size_sectors = 1;
++		else if (block_size == 4096)
++			*block_size_sectors = 8;
++		else
++			goto_bad;
++		return 1;
++	}
++
++	if (!strncmp(key, "high_watermark", strlen("high_watermark"))) {
++		if (sscanf(val, "%llu", (unsigned long long *)&settings->high_watermark) != 1)
++			goto_bad;
++		if (settings->high_watermark > 100)
++			goto_bad;
++		settings->high_watermark_set = 1;
++		return 1;
++	}
++
++	if (!strncmp(key, "low_watermark", strlen("low_watermark"))) {
++		if (sscanf(val, "%llu", (unsigned long long *)&settings->low_watermark) != 1)
++			goto_bad;
++		if (settings->low_watermark > 100)
++			goto_bad;
++		settings->low_watermark_set = 1;
++		return 1;
++	}
++
++	if (!strncmp(key, "writeback_jobs", strlen("writeback_jobs"))) {
++		if (sscanf(val, "%llu", (unsigned long long *)&settings->writeback_jobs) != 1)
++			goto_bad;
++		settings->writeback_jobs_set = 1;
++		return 1;
++	}
++
++	if (!strncmp(key, "autocommit_blocks", strlen("autocommit_blocks"))) {
++		if (sscanf(val, "%llu", (unsigned long long *)&settings->autocommit_blocks) != 1)
++			goto_bad;
++		settings->autocommit_blocks_set = 1;
++		return 1;
++	}
++
++	if (!strncmp(key, "autocommit_time", strlen("autocommit_time"))) {
++		if (sscanf(val, "%llu", (unsigned long long *)&settings->autocommit_time) != 1)
++			goto_bad;
++		settings->autocommit_time_set = 1;
++		return 1;
++	}
++
++	if (!strncmp(key, "fua", strlen("fua"))) {
++		if (settings->nofua_set) {
++			log_error("Setting fua and nofua cannot both be set.");
++			return 0;
++		}
++		if (sscanf(val, "%u", &settings->fua) != 1)
++			goto_bad;
++		settings->fua_set = 1;
++		return 1;
++	}
++
++	if (!strncmp(key, "nofua", strlen("nofua"))) {
++		if (settings->fua_set) {
++			log_error("Setting fua and nofua cannot both be set.");
++			return 0;
++		}
++		if (sscanf(val, "%u", &settings->nofua) != 1)
++			goto_bad;
++		settings->nofua_set = 1;
++		return 1;
++	}
++
++	if (!strncmp(key, "cleaner", strlen("cleaner"))) {
++		if (sscanf(val, "%u", &settings->cleaner) != 1)
++			goto_bad;
++		settings->cleaner_set = 1;
++		return 1;
++	}
++
++	if (!strncmp(key, "max_age", strlen("max_age"))) {
++		if (sscanf(val, "%u", &settings->max_age) != 1)
++			goto_bad;
++		settings->max_age_set = 1;
++		return 1;
++	}
++
++	if (settings->new_key) {
++		log_error("Setting %s is not recognized. Only one unrecognized setting is allowed.", key);
++		return 0;
++	}
++
++	log_warn("Unrecognized writecache setting \"%s\" may cause activation failure.", key);
++	if (yes_no_prompt("Use unrecognized writecache setting? [y/n]: ") == 'n') {
++		log_error("Aborting writecache conversion.");
++		return 0;
++	}
++
++	log_warn("Using unrecognized writecache setting: %s = %s.", key, val);
++
++	settings->new_key = dm_pool_strdup(cmd->mem, key);
++	settings->new_val = dm_pool_strdup(cmd->mem, val);
++	return 1;
++
++ bad:
++	log_error("Invalid setting: %s", key);
++	return 0;
++}
++
++int get_writecache_settings(struct cmd_context *cmd, struct writecache_settings *settings,
++			    uint32_t *block_size_sectors)
++{
++	struct arg_value_group_list *group;
++	const char *str;
++	char key[64];
++	char val[64];
++	int num;
++	int pos;
++
++	/*
++	 * "grouped" means that multiple --cachesettings options can be used.
++	 * Each option is also allowed to contain multiple key = val pairs.
++	 */
++
++	dm_list_iterate_items(group, &cmd->arg_value_groups) {
++		if (!grouped_arg_is_set(group->arg_values, cachesettings_ARG))
++			continue;
++
++		if (!(str = grouped_arg_str_value(group->arg_values, cachesettings_ARG, NULL)))
++			break;
++
++		pos = 0;
++
++		while (pos < strlen(str)) {
++			/* scan for "key1=val1 key2 = val2  key3= val3" */
++
++			memset(key, 0, sizeof(key));
++			memset(val, 0, sizeof(val));
++
++			if (sscanf(str + pos, " %63[^=]=%63s %n", key, val, &num) != 2) {
++				log_error("Invalid setting at: %s", str+pos);
++				return 0;
++			}
++
++			pos += num;
++
++			if (!_get_one_writecache_setting(cmd, settings, key, val, block_size_sectors))
++				return_0;
++		}
++	}
++
++	if (settings->high_watermark_set && settings->low_watermark_set &&
++	    (settings->high_watermark <= settings->low_watermark)) {
++		log_error("High watermark must be greater than low watermark.");
++		return 0;
++	}
++
++	return 1;
++}
+ 
+ /* FIXME move to lib */
+ static int _pv_change_tag(struct physical_volume *pv, const char *tag, int addtag)
+diff --git a/tools/toollib.h b/tools/toollib.h
+index 53a5e5b..f3a60fb 100644
+--- a/tools/toollib.h
++++ b/tools/toollib.h
+@@ -217,6 +217,9 @@ int get_cache_params(struct cmd_context *cmd,
+ 		     const char **name,
+ 		     struct dm_config_tree **settings);
+ 
++int get_writecache_settings(struct cmd_context *cmd, struct writecache_settings *settings,
++                            uint32_t *block_size_sectors);
++
+ int change_tag(struct cmd_context *cmd, struct volume_group *vg,
+ 	       struct logical_volume *lv, struct physical_volume *pv, int arg);
+ 
+diff --git a/tools/tools.h b/tools/tools.h
+index 7f2434d..c3d780d 100644
+--- a/tools/tools.h
++++ b/tools/tools.h
+@@ -278,7 +278,18 @@ int lvconvert_to_vdopool_param_cmd(struct cmd_context *cmd, int argc, char **arg
+ 
+ int lvconvert_integrity_cmd(struct cmd_context *cmd, int argc, char **argv);
+ 
++int lvcreate_and_attach_writecache_cmd(struct cmd_context *cmd, int argc, char **argv);
++int lvcreate_and_attach_cache_cmd(struct cmd_context *cmd, int argc, char **argv);
++
+ int pvscan_display_cmd(struct cmd_context *cmd, int argc, char **argv);
+ int pvscan_cache_cmd(struct cmd_context *cmd, int argc, char **argv);
+ 
++
++int lvconvert_writecache_attach_single(struct cmd_context *cmd,
++                                        struct logical_volume *lv,
++                                        struct processing_handle *handle);
++int lvconvert_cachevol_attach_single(struct cmd_context *cmd,
++                                     struct logical_volume *lv,
++                                     struct processing_handle *handle);
++
+ #endif
+-- 
+1.8.3.1
+
diff --git a/SPECS/lvm2.spec b/SPECS/lvm2.spec
index 24967ad..19a9537 100644
--- a/SPECS/lvm2.spec
+++ b/SPECS/lvm2.spec
@@ -58,7 +58,7 @@ Name: lvm2
 Epoch: %{rhel}
 %endif
 Version: 2.03.09
-Release: 2%{?dist}
+Release: 3%{?dist}
 License: GPLv2
 URL: http://sourceware.org/lvm2
 Source0: ftp://sourceware.org/pub/lvm2/releases/LVM2.%{version}.tgz
@@ -76,6 +76,7 @@ Patch10: lvm2-2_03_10-WHATS_NEW-integrity-with-raid.patch
 Patch11: lvm2-2_03_10-build-make-generate.patch
 Patch12: 0001-Merge-master-up-to-commit-53803821de16.patch
 Patch13: 0002-Merge-master-up-to-commit-be61bd6ff5c6.patch
+Patch14: 0003-Merge-master-up-to-commit-6eb9eba59bf5.patch
 
 BuildRequires: gcc
 %if %{enable_testsuite}
@@ -145,6 +146,7 @@ or more physical volumes and creating one or more logical volumes
 %patch11 -p1 -b .backup11
 %patch12 -p1 -b .backup12
 %patch13 -p1 -b .backup13
+%patch14 -p1 -b .backup14
 
 %build
 %global _default_pid_dir /run
@@ -749,6 +751,9 @@ An extensive functional testsuite for LVM2.
 %endif
 
 %changelog
+* Mon Jun 29 2020 Marian Csontos <mcsontos@redhat.com> - 2.03.09-3
+- Merge fixes from upstream.
+
 * Thu May 21 2020 Marian Csontos <mcsontos@redhat.com> - 2.03.09-2
 - Merge fixes from upstream.