diff --git a/SOURCES/lvm2-2_02_187-Fix-rounding-writes-up-to-sector-size.patch b/SOURCES/lvm2-2_02_187-Fix-rounding-writes-up-to-sector-size.patch
new file mode 100644
index 0000000..b1ca921
--- /dev/null
+++ b/SOURCES/lvm2-2_02_187-Fix-rounding-writes-up-to-sector-size.patch
@@ -0,0 +1,173 @@
+From 12041b03584bb2fa36f797ece4b0f9a41760a303 Mon Sep 17 00:00:00 2001
+From: David Teigland <teigland@redhat.com>
+Date: Wed, 24 Jul 2019 11:32:13 -0500
+Subject: [PATCH 2/4] Fix rounding writes up to sector size
+
+Do this at two levels, although one would be enough to
+fix the problem seen recently:
+
+- Ignore any reported sector size other than 512 of 4096.
+  If either sector size (physical or logical) is reported
+  as 512, then use 512.  If neither are reported as 512,
+  and one or the other is reported as 4096, then use 4096.
+  If neither is reported as either 512 or 4096, then use 512.
+
+- When rounding up a limited write in bcache to be a multiple
+  of the sector size, check that the resulting write size is
+  not larger than the bcache block itself.  (This shouldn't
+  happen if the sector size is 512 or 4096.)
+
+(cherry picked from commit 7550665ba49ac7d497d5b212e14b69298ef01361)
+
+Conflicts:
+	lib/device/dev-io.c
+
+(cherry picked from commit 44c460954be5c63cf5338bd9151344fe2626565f)
+---
+ lib/device/bcache.c | 89 +++++++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 87 insertions(+), 2 deletions(-)
+
+diff --git a/lib/device/bcache.c b/lib/device/bcache.c
+index b64707e..77d1543 100644
+--- a/lib/device/bcache.c
++++ b/lib/device/bcache.c
+@@ -169,6 +169,7 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int fd,
+ 	sector_t offset;
+ 	sector_t nbytes;
+ 	sector_t limit_nbytes;
++	sector_t orig_nbytes;
+ 	sector_t extra_nbytes = 0;
+ 
+ 	if (((uintptr_t) data) & e->page_mask) {
+@@ -191,11 +192,41 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int fd,
+ 			return false;
+ 		}
+ 
++		/*
++		 * If the bcache block offset+len goes beyond where lvm is
++		 * intending to write, then reduce the len being written
++		 * (which is the bcache block size) so we don't write past
++		 * the limit set by lvm.  If after applying the limit, the
++		 * resulting size is not a multiple of the sector size (512
++		 * or 4096) then extend the reduced size to be a multiple of
++		 * the sector size (we don't want to write partial sectors.)
++		 */
+ 		if (offset + nbytes > _last_byte_offset) {
+ 			limit_nbytes = _last_byte_offset - offset;
+-			if (limit_nbytes % _last_byte_sector_size)
++
++			if (limit_nbytes % _last_byte_sector_size) {
+ 				extra_nbytes = _last_byte_sector_size - (limit_nbytes % _last_byte_sector_size);
+ 
++				/*
++				 * adding extra_nbytes to the reduced nbytes (limit_nbytes)
++				 * should make the final write size a multiple of the
++				 * sector size.  This should never result in a final size
++				 * larger than the bcache block size (as long as the bcache
++				 * block size is a multiple of the sector size).
++				 */
++				if (limit_nbytes + extra_nbytes > nbytes) {
++					log_warn("Skip extending write at %llu len %llu limit %llu extra %llu sector_size %llu",
++						 (unsigned long long)offset,
++						 (unsigned long long)nbytes,
++						 (unsigned long long)limit_nbytes,
++						 (unsigned long long)extra_nbytes,
++						 (unsigned long long)_last_byte_sector_size);
++					extra_nbytes = 0;
++				}
++			}
++
++			orig_nbytes = nbytes;
++
+ 			if (extra_nbytes) {
+ 				log_debug("Limit write at %llu len %llu to len %llu rounded to %llu",
+ 					  (unsigned long long)offset,
+@@ -210,6 +241,22 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int fd,
+ 					  (unsigned long long)limit_nbytes);
+ 				nbytes = limit_nbytes;
+ 			}
++
++			/*
++			 * This shouldn't happen, the reduced+extended
++			 * nbytes value should never be larger than the
++			 * bcache block size.
++			 */
++			if (nbytes > orig_nbytes) {
++				log_error("Invalid adjusted write at %llu len %llu adjusted %llu limit %llu extra %llu sector_size %llu",
++					  (unsigned long long)offset,
++					  (unsigned long long)orig_nbytes,
++					  (unsigned long long)nbytes,
++					  (unsigned long long)limit_nbytes,
++					  (unsigned long long)extra_nbytes,
++					  (unsigned long long)_last_byte_sector_size);
++				return false;
++			}
+ 		}
+ 	}
+ 
+@@ -403,6 +450,7 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd,
+ 		uint64_t nbytes = len;
+ 		sector_t limit_nbytes = 0;
+ 		sector_t extra_nbytes = 0;
++		sector_t orig_nbytes = 0;
+ 
+ 		if (offset > _last_byte_offset) {
+ 			log_error("Limit write at %llu len %llu beyond last byte %llu",
+@@ -415,9 +463,30 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd,
+ 
+ 		if (offset + nbytes > _last_byte_offset) {
+ 			limit_nbytes = _last_byte_offset - offset;
+-			if (limit_nbytes % _last_byte_sector_size)
++
++			if (limit_nbytes % _last_byte_sector_size) {
+ 				extra_nbytes = _last_byte_sector_size - (limit_nbytes % _last_byte_sector_size);
+ 
++				/*
++				 * adding extra_nbytes to the reduced nbytes (limit_nbytes)
++				 * should make the final write size a multiple of the
++				 * sector size.  This should never result in a final size
++				 * larger than the bcache block size (as long as the bcache
++				 * block size is a multiple of the sector size).
++				 */
++				if (limit_nbytes + extra_nbytes > nbytes) {
++					log_warn("Skip extending write at %llu len %llu limit %llu extra %llu sector_size %llu",
++						 (unsigned long long)offset,
++						 (unsigned long long)nbytes,
++						 (unsigned long long)limit_nbytes,
++						 (unsigned long long)extra_nbytes,
++						 (unsigned long long)_last_byte_sector_size);
++					extra_nbytes = 0;
++				}
++			}
++
++			orig_nbytes = nbytes;
++
+ 			if (extra_nbytes) {
+ 				log_debug("Limit write at %llu len %llu to len %llu rounded to %llu",
+ 					  (unsigned long long)offset,
+@@ -432,6 +501,22 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd,
+ 					  (unsigned long long)limit_nbytes);
+ 				nbytes = limit_nbytes;
+ 			}
++
++			/*
++			 * This shouldn't happen, the reduced+extended
++			 * nbytes value should never be larger than the
++			 * bcache block size.
++			 */
++			if (nbytes > orig_nbytes) {
++				log_error("Invalid adjusted write at %llu len %llu adjusted %llu limit %llu extra %llu sector_size %llu",
++					  (unsigned long long)offset,
++					  (unsigned long long)orig_nbytes,
++					  (unsigned long long)nbytes,
++					  (unsigned long long)limit_nbytes,
++					  (unsigned long long)extra_nbytes,
++					  (unsigned long long)_last_byte_sector_size);
++				return false;
++			}
+ 		}
+ 
+ 		where = offset;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/lvm2-2_02_187-bcache-Fix-memory-leak-in-error-path.patch b/SOURCES/lvm2-2_02_187-bcache-Fix-memory-leak-in-error-path.patch
new file mode 100644
index 0000000..50267c5
--- /dev/null
+++ b/SOURCES/lvm2-2_02_187-bcache-Fix-memory-leak-in-error-path.patch
@@ -0,0 +1,25 @@
+From 165dfc7cf803e5e00d7239e2521582a9c9838178 Mon Sep 17 00:00:00 2001
+From: Marian Csontos <mcsontos@redhat.com>
+Date: Wed, 4 Mar 2020 13:22:10 +0100
+Subject: [PATCH 3/4] bcache: Fix memory leak in error path
+
+(cherry picked from commit deaf304ee6d88cd47632a345b92b3949cd06d752)
+---
+ lib/device/bcache.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/lib/device/bcache.c b/lib/device/bcache.c
+index 77d1543..a74b6b3 100644
+--- a/lib/device/bcache.c
++++ b/lib/device/bcache.c
+@@ -515,6 +515,7 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd,
+ 					  (unsigned long long)limit_nbytes,
+ 					  (unsigned long long)extra_nbytes,
+ 					  (unsigned long long)_last_byte_sector_size);
++				free(io);
+ 				return false;
+ 			}
+ 		}
+-- 
+1.8.3.1
+
diff --git a/SOURCES/lvm2-2_02_187-pvscan-fix-activation-of-incomplete-VGs.patch b/SOURCES/lvm2-2_02_187-pvscan-fix-activation-of-incomplete-VGs.patch
new file mode 100644
index 0000000..5569cb1
--- /dev/null
+++ b/SOURCES/lvm2-2_02_187-pvscan-fix-activation-of-incomplete-VGs.patch
@@ -0,0 +1,328 @@
+From 8aac4049c270ae8beb741a2cd80084810945a718 Mon Sep 17 00:00:00 2001
+From: David Teigland <teigland@redhat.com>
+Date: Tue, 3 Sep 2019 15:14:08 -0500
+Subject: [PATCH 1/4] pvscan: fix activation of incomplete VGs
+
+For a long time there has been a bug in the activation
+done by the initial pvscan (which scans all devs to
+initialize the lvmetad cache.)  It was attempting to
+activate all VGs, even those that were not complete.
+
+lvmetad tells pvscan when a VG is complete, and pvscan
+needs to use this information to decide which VGs to
+activate.
+
+When there are problems that prevent lvmetad from being
+used (e.g. lvmetad is disabled or not running), pvscan
+activation cannot use lvmetad to determine when a VG
+is complete, so it now checks if devices are present
+for all PVs in the VG before activating.
+
+(The recent commit "pvscan: avoid redundant activation"
+could make this bug more apparent because redundant
+activations can cover up the effect of activating an
+incomplete VG and missing some LV activations.)
+
+(cherry picked from commit 6b12930860a993624d6325aec2e9c561f4412aa9)
+---
+ lib/cache/lvmetad.c   | 15 ++++++++----
+ lib/cache/lvmetad.h   |  2 +-
+ tools/lvmcmdline.c    |  2 +-
+ tools/lvscan.c        |  2 +-
+ tools/pvscan.c        | 65 ++++++++++++++++++++++++++++++++++++++++++++++-----
+ tools/vgcfgrestore.c  |  2 +-
+ tools/vgimport.c      |  2 +-
+ tools/vgimportclone.c |  2 +-
+ tools/vgscan.c        |  2 +-
+ 9 files changed, 77 insertions(+), 17 deletions(-)
+
+diff --git a/lib/cache/lvmetad.c b/lib/cache/lvmetad.c
+index 1eda567..e659711 100644
+--- a/lib/cache/lvmetad.c
++++ b/lib/cache/lvmetad.c
+@@ -1704,6 +1704,13 @@ int lvmetad_pv_found(struct cmd_context *cmd, const struct id *pvid, struct devi
+ 		changed = daemon_reply_int(reply, "changed", 0);
+ 	}
+ 
++	if (vg && vg->system_id && vg->system_id[0] &&
++	    cmd->system_id && cmd->system_id[0] &&
++	    strcmp(vg->system_id, cmd->system_id)) {
++		log_debug_lvmetad("Ignore foreign VG %s on %s", vg->name , dev_name(dev));
++		goto out;
++	}
++
+ 	/*
+ 	 * If lvmetad now sees all PVs in the VG, it returned the
+ 	 * "complete" status string.  Add this VG name to the list
+@@ -1734,7 +1741,7 @@ int lvmetad_pv_found(struct cmd_context *cmd, const struct id *pvid, struct devi
+ 				log_error("str_list_add failed");
+ 		}
+ 	}
+-
++out:
+ 	daemon_reply_destroy(reply);
+ 
+ 	return result;
+@@ -2347,7 +2354,7 @@ bad:
+  * generally revert disk scanning and not use lvmetad.
+  */
+ 
+-int lvmetad_pvscan_all_devs(struct cmd_context *cmd, int do_wait)
++int lvmetad_pvscan_all_devs(struct cmd_context *cmd, int do_wait, struct dm_list *found_vgnames)
+ {
+ 	struct device_list *devl, *devl2;
+ 	struct dm_list scan_devs;
+@@ -2429,7 +2436,7 @@ int lvmetad_pvscan_all_devs(struct cmd_context *cmd, int do_wait)
+ 
+ 		dm_list_del(&devl->list);
+ 
+-		ret = lvmetad_pvscan_single(cmd, devl->dev, NULL, NULL);
++		ret = lvmetad_pvscan_single(cmd, devl->dev, found_vgnames, NULL);
+ 
+ 		label_scan_invalidate(devl->dev);
+ 
+@@ -2774,7 +2781,7 @@ void lvmetad_validate_global_cache(struct cmd_context *cmd, int force)
+ 	 * we rescanned for the token, and the time we acquired the global
+ 	 * lock.)
+ 	 */
+-	if (!lvmetad_pvscan_all_devs(cmd, 1)) {
++	if (!lvmetad_pvscan_all_devs(cmd, 1, NULL)) {
+ 		log_warn("WARNING: Not using lvmetad because cache update failed.");
+ 		lvmetad_make_unused(cmd);
+ 		return;
+diff --git a/lib/cache/lvmetad.h b/lib/cache/lvmetad.h
+index 73c2645..55ce16a 100644
+--- a/lib/cache/lvmetad.h
++++ b/lib/cache/lvmetad.h
+@@ -151,7 +151,7 @@ int lvmetad_pvscan_single(struct cmd_context *cmd, struct device *dev,
+ 			  struct dm_list *found_vgnames,
+ 			  struct dm_list *changed_vgnames);
+ 
+-int lvmetad_pvscan_all_devs(struct cmd_context *cmd, int do_wait);
++int lvmetad_pvscan_all_devs(struct cmd_context *cmd, int do_wait, struct dm_list *found_vgnames);
+ 
+ int lvmetad_vg_clear_outdated_pvs(struct volume_group *vg);
+ void lvmetad_validate_global_cache(struct cmd_context *cmd, int force);
+diff --git a/tools/lvmcmdline.c b/tools/lvmcmdline.c
+index f82827d..75a0401 100644
+--- a/tools/lvmcmdline.c
++++ b/tools/lvmcmdline.c
+@@ -2991,7 +2991,7 @@ int lvm_run_command(struct cmd_context *cmd, int argc, char **argv)
+ 	 */
+ 	if (lvmetad_used() && !_cmd_no_lvmetad_autoscan(cmd)) {
+ 		if (cmd->include_foreign_vgs || !lvmetad_token_matches(cmd)) {
+-			if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, cmd->include_foreign_vgs ? 1 : 0)) {
++			if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, cmd->include_foreign_vgs ? 1 : 0, NULL)) {
+ 				log_warn("WARNING: Not using lvmetad because cache update failed.");
+ 				lvmetad_make_unused(cmd);
+ 			}
+diff --git a/tools/lvscan.c b/tools/lvscan.c
+index c38208a..34e9f31 100644
+--- a/tools/lvscan.c
++++ b/tools/lvscan.c
+@@ -103,7 +103,7 @@ int lvscan(struct cmd_context *cmd, int argc, char **argv)
+ 
+ 	/* Needed because this command has NO_LVMETAD_AUTOSCAN. */
+ 	if (lvmetad_used() && (!lvmetad_token_matches(cmd) || lvmetad_is_disabled(cmd, &reason))) {
+-		if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, 0)) {
++		if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, 0, NULL)) {
+ 			log_warn("WARNING: Not using lvmetad because cache update failed.");
+ 			lvmetad_make_unused(cmd);
+ 		}
+diff --git a/tools/pvscan.c b/tools/pvscan.c
+index e5afe0c..9e76f52 100644
+--- a/tools/pvscan.c
++++ b/tools/pvscan.c
+@@ -38,6 +38,7 @@ struct pvscan_params {
+ 
+ struct pvscan_aa_params {
+ 	int refresh_all;
++	int all_vgs;
+ 	unsigned int activate_errors;
+ 	struct dm_list changed_vgnames;
+ };
+@@ -223,6 +224,28 @@ void online_vg_file_remove(const char *vgname)
+ 	unlink(path);
+ }
+ 
++static void _online_files_remove(const char *dirpath)
++{
++	char path[PATH_MAX];
++	DIR *dir;
++	struct dirent *de;
++
++	if (!(dir = opendir(dirpath)))
++		return;
++
++	while ((de = readdir(dir))) {
++		if (de->d_name[0] == '.')
++			continue;
++
++		memset(path, 0, sizeof(path));
++		snprintf(path, sizeof(path), "%s/%s", dirpath, de->d_name);
++		if (unlink(path))
++			log_sys_debug("unlink", path);
++	}
++	if (closedir(dir))
++		log_sys_debug("closedir", dirpath);
++}
++
+ /*
+  * pvscan --cache does not perform any lvmlockd locking, and
+  * pvscan --cache -aay skips autoactivation in lockd VGs.
+@@ -271,6 +294,8 @@ static int _pvscan_autoactivate_single(struct cmd_context *cmd, const char *vg_n
+ 				       struct volume_group *vg, struct processing_handle *handle)
+ {
+ 	struct pvscan_aa_params *pp = (struct pvscan_aa_params *)handle->custom_handle;
++	struct pv_list *pvl;
++	int incomplete = 0;
+ 
+ 	if (vg_is_clustered(vg))
+ 		return ECMD_PROCESSED;
+@@ -281,6 +306,24 @@ static int _pvscan_autoactivate_single(struct cmd_context *cmd, const char *vg_n
+ 	if (is_lockd_type(vg->lock_type))
+ 		return ECMD_PROCESSED;
+ 
++	/*
++	 * This all_vgs case only happens in fallback cases when there's some
++	 * problem preventing the use of lvmetad.  When lvmetad can be properly
++	 * used, the found_vgnames list should have the names of complete VGs
++	 * that should be activated.
++	 */
++	if (pp->all_vgs) {
++		dm_list_iterate_items(pvl, &vg->pvs) {
++			if (!pvl->pv->dev)
++				incomplete++;
++		}
++
++		if (incomplete) {
++			log_print("pvscan[%d] VG %s incomplete (need %d).", getpid(), vg->name, incomplete);
++			return ECMD_PROCESSED;
++		}
++	}
++
+ 	log_debug("pvscan autoactivating VG %s.", vg_name);
+ 
+ #if 0
+@@ -377,6 +420,7 @@ static int _pvscan_autoactivate(struct cmd_context *cmd, struct pvscan_aa_params
+ 	if (all_vgs) {
+ 		cmd->cname->flags |= ALL_VGS_IS_DEFAULT;
+ 		pp->refresh_all = 1;
++		pp->all_vgs = 1;
+ 	}
+ 
+ 	ret = process_each_vg(cmd, 0, NULL, NULL, vgnames, 0, 0, handle, _pvscan_autoactivate_single);
+@@ -463,17 +507,23 @@ static int _pvscan_cache(struct cmd_context *cmd, int argc, char **argv)
+ 	 * Scan all devices when no args are given.
+ 	 */
+ 	if (!argc && !devno_args) {
++		_online_files_remove(_vgs_online_dir);
++
+ 		log_verbose("Scanning all devices.");
+ 
+-		if (!lvmetad_pvscan_all_devs(cmd, 1)) {
++		if (!lvmetad_pvscan_all_devs(cmd, 1, &found_vgnames)) {
+ 			log_warn("WARNING: Not using lvmetad because cache update failed.");
+ 			lvmetad_make_unused(cmd);
++			all_vgs = 1;
+ 		}
+ 		if (lvmetad_used() && lvmetad_is_disabled(cmd, &reason)) {
+ 			log_warn("WARNING: Not using lvmetad because %s.", reason);
+ 			lvmetad_make_unused(cmd);
++			all_vgs = 1;
+ 		}
+-		all_vgs = 1;
++
++		if (!all_vgs && do_activate)
++			log_print("pvscan[%d] activating all complete VGs (no args)", getpid());
+ 		goto activate;
+ 	}
+ 
+@@ -485,7 +535,7 @@ static int _pvscan_cache(struct cmd_context *cmd, int argc, char **argv)
+ 	 * never scan any devices other than those specified.
+ 	 */
+ 	if (!lvmetad_token_matches(cmd)) {
+-		if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, 0)) {
++		if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, 0, &found_vgnames)) {
+ 			log_warn("WARNING: Not updating lvmetad because cache update failed.");
+ 			ret = ECMD_FAILED;
+ 			goto out;
+@@ -493,9 +543,12 @@ static int _pvscan_cache(struct cmd_context *cmd, int argc, char **argv)
+ 		if (lvmetad_used() && lvmetad_is_disabled(cmd, &reason)) {
+ 			log_warn("WARNING: Not using lvmetad because %s.", reason);
+ 			lvmetad_make_unused(cmd);
++			all_vgs = 1;
++			log_print("pvscan[%d] activating all directly (lvmetad disabled from scan) %s", getpid(), dev_arg ?: "");
+ 		}
+-		log_print("pvscan[%d] activating all directly (lvmetad token) %s", getpid(), dev_arg ?: "");
+-		all_vgs = 1;
++
++		if (!all_vgs)
++			log_print("pvscan[%d] activating all complete VGs for init", getpid());
+ 		goto activate;
+ 	}
+ 
+@@ -808,7 +861,7 @@ int pvscan(struct cmd_context *cmd, int argc, char **argv)
+ 
+ 	/* Needed because this command has NO_LVMETAD_AUTOSCAN. */
+ 	if (lvmetad_used() && (!lvmetad_token_matches(cmd) || lvmetad_is_disabled(cmd, &reason))) {
+-		if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, 0)) {
++		if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, 0, NULL)) {
+ 			log_warn("WARNING: Not using lvmetad because cache update failed.");
+ 			lvmetad_make_unused(cmd);
+ 		}
+diff --git a/tools/vgcfgrestore.c b/tools/vgcfgrestore.c
+index 48a2fa4..e7f9848 100644
+--- a/tools/vgcfgrestore.c
++++ b/tools/vgcfgrestore.c
+@@ -177,7 +177,7 @@ rescan:
+ 		}
+ 		if (!refresh_filters(cmd))
+ 			stack;
+-		if (!lvmetad_pvscan_all_devs(cmd, 1)) {
++		if (!lvmetad_pvscan_all_devs(cmd, 1, NULL)) {
+ 			log_warn("WARNING: Failed to scan devices.");
+ 			log_warn("WARNING: Update lvmetad with pvscan --cache.");
+ 			goto out;
+diff --git a/tools/vgimport.c b/tools/vgimport.c
+index ea50198..d4455ec 100644
+--- a/tools/vgimport.c
++++ b/tools/vgimport.c
+@@ -96,7 +96,7 @@ int vgimport(struct cmd_context *cmd, int argc, char **argv)
+ 	 * import it.
+ 	 */
+ 	if (lvmetad_used()) {
+-		if (!lvmetad_pvscan_all_devs(cmd, 1)) {
++		if (!lvmetad_pvscan_all_devs(cmd, 1, NULL)) {
+ 			log_warn("WARNING: Not using lvmetad because cache update failed.");
+ 			lvmetad_make_unused(cmd);
+ 		}
+diff --git a/tools/vgimportclone.c b/tools/vgimportclone.c
+index c4c5d4c..ac3766b 100644
+--- a/tools/vgimportclone.c
++++ b/tools/vgimportclone.c
+@@ -377,7 +377,7 @@ out:
+ 		if (!refresh_filters(cmd))
+ 			stack;
+ 
+-		if (!lvmetad_pvscan_all_devs(cmd, 1)) {
++		if (!lvmetad_pvscan_all_devs(cmd, 1, NULL)) {
+ 			log_warn("WARNING: Failed to scan devices.");
+ 			log_warn("WARNING: Update lvmetad with pvscan --cache.");
+ 		}
+diff --git a/tools/vgscan.c b/tools/vgscan.c
+index 1ec9083..7a63996 100644
+--- a/tools/vgscan.c
++++ b/tools/vgscan.c
+@@ -101,7 +101,7 @@ int vgscan(struct cmd_context *cmd, int argc, char **argv)
+ 		log_verbose("Ignoring vgscan --cache command because lvmetad is not in use.");
+ 
+ 	if (lvmetad_used() && (arg_is_set(cmd, cache_long_ARG) || !lvmetad_token_matches(cmd) || lvmetad_is_disabled(cmd, &reason))) {
+-		if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, arg_is_set(cmd, cache_long_ARG))) {
++		if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, arg_is_set(cmd, cache_long_ARG), NULL)) {
+ 			log_warn("WARNING: Not using lvmetad because cache update failed.");
+ 			lvmetad_make_unused(cmd);
+ 		}
+-- 
+1.8.3.1
+
diff --git a/SOURCES/lvm2-2_02_188-lvconvert-no-validation-for-thin-pools-not-used-by-lvm.patch b/SOURCES/lvm2-2_02_188-lvconvert-no-validation-for-thin-pools-not-used-by-lvm.patch
new file mode 100644
index 0000000..d6fcba1
--- /dev/null
+++ b/SOURCES/lvm2-2_02_188-lvconvert-no-validation-for-thin-pools-not-used-by-lvm.patch
@@ -0,0 +1,34 @@
+From 16a03878cd39cb1fb0c052a41901b6660f9f674c Mon Sep 17 00:00:00 2001
+From: Zdenek Kabelac <zkabelac@redhat.com>
+Date: Wed, 8 Apr 2020 11:46:42 +0200
+Subject: [PATCH 4/4] lvconvert: no validation for thin-pools not used by lvm2
+
+lvm2 supports thin-pool to be later used by other tools doing
+virtual volumes themself (i.e. docker) - in this case we
+shall not validate transaction Id - is this is used by
+other tools and lvm2 keeps value 0 - so the transationId
+validation need to be skipped in this case.
+
+(cherry picked from commit 1316cafbe988307264e4f87dbcffaf56bc2ab388)
+(cherry picked from commit ca84deb23f0cfb51dbeba0ffe44f757345e6f8a0)
+---
+ tools/lvconvert.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/tools/lvconvert.c b/tools/lvconvert.c
+index 799e746..bf14eec 100644
+--- a/tools/lvconvert.c
++++ b/tools/lvconvert.c
+@@ -2330,7 +2330,8 @@ static int _lvconvert_thin_pool_repair(struct cmd_context *cmd,
+ 		goto deactivate_mlv;
+ 	}
+ 
+-	if (thin_dump[0]) {
++	/* Check matching transactionId when thin-pool is used by lvm2 (transactionId != 0) */
++	if (first_seg(pool_lv)->transaction_id && thin_dump[0]) {
+ 		argv[0] = thin_dump;
+ 		argv[1] = pms_path;
+ 		argv[2] = NULL;
+-- 
+1.8.3.1
+
diff --git a/SPECS/lvm2.spec b/SPECS/lvm2.spec
index ffcc22d..b114b2a 100644
--- a/SPECS/lvm2.spec
+++ b/SPECS/lvm2.spec
@@ -67,7 +67,7 @@ Summary: Userland logical volume management tools
 Name: lvm2
 Epoch: 7
 Version: 2.02.186
-Release: 7%{?dist}%{?scratch}.1
+Release: 7%{?dist}%{?scratch}.2
 License: GPLv2
 Group: System Environment/Base
 URL: http://sources.redhat.com/lvm2
@@ -116,6 +116,12 @@ Patch26: lvm2-2_02_187-raid-more-limitted-prohibition-of-stacked-raid-usage.patc
 Patch27: lvm2-2_02_187-raid-better-place-for-blocking-reshapes.patch
 # BZ 1812441:
 Patch28: lvm2-2_02_187-pvs-fix-locking_type-4.patch
+# BZ 1814004:
+Patch29: lvm2-2_02_187-pvscan-fix-activation-of-incomplete-VGs.patch
+Patch30: lvm2-2_02_187-Fix-rounding-writes-up-to-sector-size.patch
+Patch31: lvm2-2_02_187-bcache-Fix-memory-leak-in-error-path.patch
+# BZ 1822539:
+Patch32: lvm2-2_02_188-lvconvert-no-validation-for-thin-pools-not-used-by-lvm.patch
 
 BuildRequires: libselinux-devel >= %{libselinux_version}, libsepol-devel
 BuildRequires: libblkid-devel >= %{util_linux_version}
@@ -197,6 +203,10 @@ or more physical volumes and creating one or more logical volumes
 %patch26 -p1 -b .raid_more_limitted_prohibition_of_stacked_raid_usage
 %patch27 -p1 -b .raid_better_place_for_blocking_reshapes
 %patch28 -p1 -b .pvs_fix_locking_type_4
+%patch29 -p1 -b .pvscan_fix_activation_of_incomplete_VGs
+%patch30 -p1 -b .Fix_rounding_writes_up_to_sector_size
+%patch31 -p1 -b .bcache_Fix_memory_leak_in_error_path
+%patch32 -p1 -b .lvconvert_no_validation_for_thin_pools_not_used_by_lvm
 
 %build
 %global _default_pid_dir /run
@@ -944,7 +954,7 @@ the device-mapper event library.
 %package -n %{boom_pkgname}
 Summary: %{boom_summary}
 Version: %{boom_version}
-Release: %{boom_release}%{?dist}%{?scratch}.1
+Release: %{boom_release}%{?dist}%{?scratch}.2
 License: GPLv2
 Group: System Environment/Base
 BuildArch: noarch
@@ -975,6 +985,10 @@ This package provides the python2 version of boom.
 %endif
 
 %changelog
+* Thu Apr 16 2020 Marian Csontos <mcsontos@redhat.com> - 7:2.02.186-7.el7_8.2
+- No validation for thin pools not used by lvm,
+- Fix activation of incomplete VGs.
+
 * Thu Mar 12 2020 Marian Csontos <mcsontos@redhat.com> - 7:2.02.186-7.el7_8.1
 - Fix failing pvs with locking_type 4.