Blob Blame History Raw
From 99b646d87469b5ca0e93fad6b77f51a00fbbd2b7 Mon Sep 17 00:00:00 2001
From: Marian Csontos <mcsontos@redhat.com>
Date: Wed, 12 Aug 2020 18:47:15 +0200
Subject: [PATCH] Revert "debug: missing stacktrace"

This reverts commit d0faad0db38fe733cae42d7df136d7ed4f7bcba6.

Revert "raid: no wiping when zeroing raid metadata device"

This reverts commit 9b9bf8786fb423a4430cc676301edadf2310098d.

Revert "lvconvert: more support for yes conversion"

This reverts commit b7f3667ce20b731bbda9b1d61df49abbcd1bd20e.

Revert "wipe_lv: always zero at least 4K"

This reverts commit fe78cd4082cb9af10580180d61898fcef93dc624.

Revert "tests: check pool metadata are zeroed"

This reverts commit 3f32f9811e01c8953d201c7c9b563561ad856130.

Revert "tests: failure of zeroing fails command"

This reverts commit 094d6f80ddb6d8a1c64977dfaae4073827063fe3.

Revert "make: make generate"

This reverts commit 88b92d4225b90db82047f3bac55d8059918e9c1b.

Conflicts:
	man/lvconvert.8_pregen

Revert "pool: zero metadata"

This reverts commit bc39d5bec6fea787a8d8d16fa484084b7d2a7c29.

Conflicts:
	WHATS_NEW

Revert "wipe_lv: make error a fatal event"

This reverts commit edbc5a62b26806e5c4de59b5292609e955303576.

Conflicts:
	WHATS_NEW

build: make generate
---
 WHATS_NEW                               |  2 -
 conf/example.conf.in                    |  6 +--
 lib/config/config_settings.h            |  5 +--
 lib/config/defaults.h                   |  1 -
 lib/metadata/lv_manip.c                 | 78 ++++++++++++---------------------
 lib/metadata/metadata-exported.h        |  2 -
 lib/metadata/pool_manip.c               |  6 +--
 test/lib/aux.sh                         |  1 -
 test/shell/lvcreate-signature-wiping.sh |  7 ---
 test/shell/lvcreate-thin.sh             | 21 ---------
 tools/lvconvert.c                       | 12 ++---
 11 files changed, 36 insertions(+), 105 deletions(-)

diff --git a/WHATS_NEW b/WHATS_NEW
index ac99e97..6a098b5 100644
--- a/WHATS_NEW
+++ b/WHATS_NEW
@@ -6,8 +6,6 @@ Version 2.03.10 -
   warning.
   Fix conversion to raid from striped lagging type.
   Fix conversion to 'mirrored' mirror log with larger regionsize.
-  Zero pool metadata on allocation (disable with allocation/zero_metadata=0).
-  Failure in zeroing or wiping will fail command (bypass with -Zn, -Wn).
   Fix running out of free buffers for async writing for larger writes.
   Add integrity with raid capability.
   Fix support for lvconvert --repair used by foreign apps (i.e. Docker).
diff --git a/conf/example.conf.in b/conf/example.conf.in
index d5807e6..88858fc 100644
--- a/conf/example.conf.in
+++ b/conf/example.conf.in
@@ -489,7 +489,7 @@ allocation {
 	# This configuration option does not have a default value defined.
 
 	# Configuration option allocation/thin_pool_metadata_require_separate_pvs.
-	# Thin pool metadata and data will always use different PVs.
+	# Thin pool metdata and data will always use different PVs.
 	thin_pool_metadata_require_separate_pvs = 0
 
 	# Configuration option allocation/thin_pool_zero.
@@ -527,10 +527,6 @@ allocation {
 	# This configuration option has an automatic default value.
 	# thin_pool_chunk_size_policy = "generic"
 
-	# Configuration option allocation/zero_metadata.
-	# Zero whole metadata area before use with thin or cache pool.
-	zero_metadata = 1
-
 	# Configuration option allocation/thin_pool_chunk_size.
 	# The minimal chunk size in KiB for thin pool volumes.
 	# Larger chunk sizes may improve performance for plain thin volumes,
diff --git a/lib/config/config_settings.h b/lib/config/config_settings.h
index b38ca11..dce9705 100644
--- a/lib/config/config_settings.h
+++ b/lib/config/config_settings.h
@@ -626,7 +626,7 @@ cfg(allocation_cache_pool_max_chunks_CFG, "cache_pool_max_chunks", allocation_CF
 	"Using cache pool with more chunks may degrade cache performance.\n")
 
 cfg(allocation_thin_pool_metadata_require_separate_pvs_CFG, "thin_pool_metadata_require_separate_pvs", allocation_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_THIN_POOL_METADATA_REQUIRE_SEPARATE_PVS, vsn(2, 2, 89), NULL, 0, NULL,
-	"Thin pool metadata and data will always use different PVs.\n")
+	"Thin pool metdata and data will always use different PVs.\n")
 
 cfg(allocation_thin_pool_zero_CFG, "thin_pool_zero", allocation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA | CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_THIN_POOL_ZERO, vsn(2, 2, 99), NULL, 0, NULL,
 	"Thin pool data chunks are zeroed before they are first used.\n"
@@ -657,9 +657,6 @@ cfg(allocation_thin_pool_chunk_size_policy_CFG, "thin_pool_chunk_size_policy", a
 	"    512KiB.\n"
 	"#\n")
 
-cfg(allocation_zero_metadata_CFG, "zero_metadata", allocation_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_ZERO_METADATA, vsn(2, 3, 10), NULL, 0, NULL,
-	"Zero whole metadata area before use with thin or cache pool.\n")
-
 cfg_runtime(allocation_thin_pool_chunk_size_CFG, "thin_pool_chunk_size", allocation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA | CFG_DEFAULT_UNDEFINED, CFG_TYPE_INT, vsn(2, 2, 99), 0, NULL,
 	"The minimal chunk size in KiB for thin pool volumes.\n"
 	"Larger chunk sizes may improve performance for plain thin volumes,\n"
diff --git a/lib/config/defaults.h b/lib/config/defaults.h
index 708a575..be4f5ff 100644
--- a/lib/config/defaults.h
+++ b/lib/config/defaults.h
@@ -129,7 +129,6 @@
 #define DEFAULT_THIN_POOL_DISCARDS "passdown"
 #define DEFAULT_THIN_POOL_ZERO 1
 #define DEFAULT_POOL_METADATA_SPARE 1 /* thin + cache */
-#define DEFAULT_ZERO_METADATA 1		/* thin + cache */
 
 #ifdef CACHE_CHECK_NEEDS_CHECK
 #  define DEFAULT_CACHE_CHECK_OPTION1 "-q"
diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c
index f0ba3f0..1642b90 100644
--- a/lib/metadata/lv_manip.c
+++ b/lib/metadata/lv_manip.c
@@ -7576,22 +7576,20 @@ int wipe_lv(struct logical_volume *lv, struct wipe_params wp)
 	struct device *dev;
 	char name[PATH_MAX];
 	uint64_t zero_sectors;
-	int zero_metadata = wp.is_metadata ?
-		find_config_tree_bool(lv->vg->cmd, allocation_zero_metadata_CFG, NULL) : 0;
 
-	if (!wp.do_zero && !wp.do_wipe_signatures && !wp.is_metadata)
+	if (!wp.do_zero && !wp.do_wipe_signatures)
 		/* nothing to do */
 		return 1;
 
 	if (!lv_is_active(lv)) {
-		log_error("Volume %s is not active locally (volume_list activation filter?).",
-			  display_lvname(lv));
+		log_error("Volume \"%s/%s\" is not active locally (volume_list activation filter?).",
+			  lv->vg->name, lv->name);
 		return 0;
 	}
 
 	/* Wait until devices are available */
 	if (!sync_local_dev_names(lv->vg->cmd)) {
-		log_error("Failed to sync local devices before wiping volume %s.",
+		log_error("Failed to sync local devices before wiping LV %s.",
 			  display_lvname(lv));
 		return 0;
 	}
@@ -7615,59 +7613,40 @@ int wipe_lv(struct logical_volume *lv, struct wipe_params wp)
 	}
 
 	if (!label_scan_open_rw(dev)) {
-		log_error("Failed to open %s for wiping and zeroing.", display_lvname(lv));
-		return 0;
+		log_error("Failed to open %s/%s for wiping and zeroing.", lv->vg->name, lv->name);
+		goto out;
 	}
 
 	if (wp.do_wipe_signatures) {
-		log_verbose("Wiping known signatures on logical volume %s.",
-			    display_lvname(lv));
+		log_verbose("Wiping known signatures on logical volume \"%s/%s\"",
+			    lv->vg->name, lv->name);
 		if (!wipe_known_signatures(lv->vg->cmd, dev, name, 0,
 					   TYPE_DM_SNAPSHOT_COW,
-					   wp.yes, wp.force, NULL)) {
-			log_error("Filed to wipe signatures of logical volume %s.",
-				  display_lvname(lv));
-			return 0;
-		}
+					   wp.yes, wp.force, NULL))
+			stack;
 	}
 
-	if (wp.do_zero || wp.is_metadata) {
-		zero_metadata = !wp.is_metadata ? 0 :
-			find_config_tree_bool(lv->vg->cmd, allocation_zero_metadata_CFG, NULL);
-		if (zero_metadata) {
-			log_debug("Metadata logical volume %s will be fully zeroed.",
-				  display_lvname(lv));
-			zero_sectors = lv->size;
-		} else {
-			if (wp.is_metadata) /* Verbosely notify metadata will not be fully zeroed */
-				log_verbose("Metadata logical volume %s not fully zeroed and may contain stale data.",
-					    display_lvname(lv));
-			zero_sectors = UINT64_C(4096) >> SECTOR_SHIFT;
-			if (wp.zero_sectors > zero_sectors)
-				zero_sectors = wp.zero_sectors;
+	if (wp.do_zero) {
+		zero_sectors = wp.zero_sectors ? : UINT64_C(4096) >> SECTOR_SHIFT;
 
-			if (zero_sectors > lv->size)
-				zero_sectors = lv->size;
-		}
+		if (zero_sectors > lv->size)
+			zero_sectors = lv->size;
 
-		log_verbose("Initializing %s of logical volume %s with value %d.",
+		log_verbose("Initializing %s of logical volume \"%s/%s\" with value %d.",
 			    display_size(lv->vg->cmd, zero_sectors),
-			    display_lvname(lv), wp.zero_value);
-
-		if ((!wp.is_metadata &&
-		     wp.zero_value && !dev_set_bytes(dev, UINT64_C(0),
-						     (size_t) zero_sectors << SECTOR_SHIFT,
-						     (uint8_t)wp.zero_value)) ||
-		    !dev_write_zeros(dev, UINT64_C(0), (size_t) zero_sectors << SECTOR_SHIFT)) {
-			log_error("Failed to initialize %s of logical volume %s with value %d.",
-				  display_size(lv->vg->cmd, zero_sectors),
-				  display_lvname(lv), wp.zero_value);
-			return 0;
+			    lv->vg->name, lv->name, wp.zero_value);
+
+		if (!wp.zero_value) {
+			if (!dev_write_zeros(dev, UINT64_C(0), (size_t) zero_sectors << SECTOR_SHIFT))
+				stack;
+		} else {
+			if (!dev_set_bytes(dev, UINT64_C(0), (size_t) zero_sectors << SECTOR_SHIFT, (uint8_t)wp.zero_value))
+				stack;
 		}
 	}
 
 	label_scan_invalidate(dev);
-
+out:
 	lv->status &= ~LV_NOSCAN;
 
 	return 1;
@@ -7731,10 +7710,12 @@ int activate_and_wipe_lvlist(struct dm_list *lv_list, int commit)
 		}
 
 	dm_list_iterate_items(lvl, lv_list) {
+		log_verbose("Wiping metadata area %s.", display_lvname(lvl->lv));
 		/* Wipe any know signatures */
-		if (!wipe_lv(lvl->lv, (struct wipe_params) { .do_zero = 1 /* TODO: is_metadata = 1 */ })) {
+		if (!wipe_lv(lvl->lv, (struct wipe_params) { .do_wipe_signatures = 1, .do_zero = 1, .zero_sectors = 1 })) {
+			log_error("Failed to wipe %s.", display_lvname(lvl->lv));
 			r = 0;
-			goto_out;
+			goto out;
 		}
 	}
 out:
@@ -8479,8 +8460,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
 				     .do_zero = lp->zero,
 				     .do_wipe_signatures = lp->wipe_signatures,
 				     .yes = lp->yes,
-				     .force = lp->force,
-				     .is_metadata = lp->is_metadata,
+				     .force = lp->force
 			     })) {
 			log_error("Aborting. Failed to wipe %s.", lp->snapshot
 				  ? "snapshot exception store" : "start of new LV");
diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h
index 06ea757..0cc5f37 100644
--- a/lib/metadata/metadata-exported.h
+++ b/lib/metadata/metadata-exported.h
@@ -803,7 +803,6 @@ struct wipe_params {
 	int do_wipe_signatures;	/* should we wipe known signatures found on LV? */
 	int yes;		/* answer yes automatically to all questions */
 	force_t force;		/* force mode */
-	int is_metadata;	/* wipe volume is metadata LV */
 };
 
 /* Zero out LV and/or wipe signatures */
@@ -956,7 +955,6 @@ struct lvcreate_params {
 	unsigned suppress_zero_warn : 1;
 	unsigned needs_lockd_init : 1;
 	unsigned ignore_type : 1;
-	unsigned is_metadata : 1; /* created LV will be used as metadata LV (and can be zeroed) */
 
 	const char *vg_name; /* only-used when VG is not yet opened (in /tools) */
 	const char *lv_name; /* all */
diff --git a/lib/metadata/pool_manip.c b/lib/metadata/pool_manip.c
index 23b5b63..bed51f1 100644
--- a/lib/metadata/pool_manip.c
+++ b/lib/metadata/pool_manip.c
@@ -545,8 +545,8 @@ int create_pool(struct logical_volume *pool_lv,
 				  display_lvname(pool_lv));
 			goto bad;
 		}
-		/* Clear pool metadata device. */
-		if (!(r = wipe_lv(pool_lv, (struct wipe_params) { .is_metadata = 1 }))) {
+		/* Clear 4KB of pool metadata device. */
+		if (!(r = wipe_lv(pool_lv, (struct wipe_params) { .do_zero = 1 }))) {
 			log_error("Aborting. Failed to wipe pool metadata %s.",
 				  display_lvname(pool_lv));
 		}
@@ -627,7 +627,6 @@ struct logical_volume *alloc_pool_metadata(struct logical_volume *pool_lv,
 		.tags = DM_LIST_HEAD_INIT(lvc.tags),
 		.temporary = 1,
 		.zero = 1,
-		.is_metadata = 1,
 	};
 
 	if (!(lvc.segtype = get_segtype_from_string(pool_lv->vg->cmd, SEG_TYPE_NAME_STRIPED)))
@@ -664,7 +663,6 @@ static struct logical_volume *_alloc_pool_metadata_spare(struct volume_group *vg
 		.tags = DM_LIST_HEAD_INIT(lp.tags),
 		.temporary = 1,
 		.zero = 1,
-		.is_metadata = 1,
 	};
 
 	if (!(lp.segtype = get_segtype_from_string(vg->cmd, SEG_TYPE_NAME_STRIPED)))
diff --git a/test/lib/aux.sh b/test/lib/aux.sh
index 17e7935..e40da95 100644
--- a/test/lib/aux.sh
+++ b/test/lib/aux.sh
@@ -1234,7 +1234,6 @@ activation/verify_udev_operations = $LVM_VERIFY_UDEV
 activation/raid_region_size = 512
 allocation/wipe_signatures_when_zeroing_new_lvs = 0
 allocation/vdo_slab_size_mb = 128
-allocation/zero_metadata = 0
 backup/archive = 0
 backup/backup = 0
 devices/cache_dir = "$TESTDIR/etc"
diff --git a/test/shell/lvcreate-signature-wiping.sh b/test/shell/lvcreate-signature-wiping.sh
index 18d7a2f..73fea54 100644
--- a/test/shell/lvcreate-signature-wiping.sh
+++ b/test/shell/lvcreate-signature-wiping.sh
@@ -42,13 +42,6 @@ init_lv_
 test_blkid_ || skip
 lvremove -f $vg/$lv1
 
-# Zeroing stops the command when there is a failure (write error in this case)
-aux error_dev "$dev1" "$(get first_extent_sector "$dev1"):2"
-not lvcreate -l1 -n $lv1 $vg 2>&1 | tee out
-grep "Failed to initialize" out
-aux enable_dev "$dev1"
-
-
 aux lvmconf "allocation/wipe_signatures_when_zeroing_new_lvs = 0"
 
 lvcreate -y -Zn -l1 -n $lv1 $vg 2>&1 | tee out
diff --git a/test/shell/lvcreate-thin.sh b/test/shell/lvcreate-thin.sh
index c073eaf..9ca7f11 100644
--- a/test/shell/lvcreate-thin.sh
+++ b/test/shell/lvcreate-thin.sh
@@ -248,25 +248,4 @@ not lvcreate -s $vg/lv1 -L4M -V2G --name $vg/lv4
 not lvcreate -T mirpool -L4M --alloc anywhere -m1 $vg
 not lvcreate --thinpool mirpool -L4M --alloc anywhere -m1 $vg
 
-
-# Check pool metadata volume is zeroed, when zero_metadata is enabled.
-# 1st. ensure 8megs of both PVs will have some non-0 data
-lvcreate -L8m -n $lv1 $vg "$dev1"
-lvextend -L+8m $vg/$lv1 "$dev2"
-dd if=/dev/urandom of="$DM_DEV_DIR/$vg/$lv1" bs=1M count=16 oflag=direct conv=fdatasync
-lvremove -ff $vg/$lv1
-
-lvcreate -l1 --poolmetadatasize 4m --conf 'allocation/zero_metadata=1' -vvvv -T $vg/pool
-lvchange -an $vg
-# component activation to check device was zeroed
-lvchange -y -ay $vg/pool_tmeta
-dd if="$DM_DEV_DIR/$vg/pool_tmeta" of=file bs=1M count=3 skip=1 iflag=direct conv=fdatasync
-
-md5sum -b file | tee out
-# md5sum of 3M of zeros
-grep d1dd210d6b1312cb342b56d02bd5e651 out
-lvchange -an $vg
-lvremove -ff $vg
-
-
 vgremove -ff $vg
diff --git a/tools/lvconvert.c b/tools/lvconvert.c
index 524ed5a..6324ed7 100644
--- a/tools/lvconvert.c
+++ b/tools/lvconvert.c
@@ -3286,11 +3286,7 @@ static int _lvconvert_to_pool(struct cmd_context *cmd,
 			}
 			metadata_lv->status &= ~LV_ACTIVATION_SKIP;
 
-			if (!wipe_lv(metadata_lv, (struct wipe_params) {
-						  .do_wipe_signatures = 1,
-						  .is_metadata = 1,
-						  .yes = arg_count(cmd, yes_ARG),
-						  .force = arg_count(cmd, force_ARG) } )) {
+			if (!wipe_lv(metadata_lv, (struct wipe_params) { .do_zero = 1 })) {
 				log_error("Aborting. Failed to wipe metadata lv.");
 				goto bad;
 			}
@@ -5527,8 +5523,7 @@ static int _writecache_zero(struct cmd_context *cmd, struct logical_volume *lv)
 	struct wipe_params wp = {
 		.do_wipe_signatures = 1, /* optional, to print warning if clobbering something */
 		.do_zero = 1,            /* required for dm-writecache to work */
-		.yes = arg_count(cmd, yes_ARG),
-		.force = arg_count(cmd, force_ARG)
+		.zero_sectors = 1
 	};
 	int ret;
 
@@ -5545,8 +5540,7 @@ static int _writecache_zero(struct cmd_context *cmd, struct logical_volume *lv)
 		return 0;
 	}
 
-	if (!(ret = wipe_lv(lv, wp)))
-		stack;
+	ret = wipe_lv(lv, wp);
 
 	if (!deactivate_lv(cmd, lv)) {
 		log_error("Failed to deactivate LV %s for zeroing.", display_lvname(lv));
-- 
1.8.3.1