From f8f52d1238953180bbf7685437f58f20405eb541 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Apr 23 2019 12:36:58 +0000 Subject: import lvm2-2.02.180-10.el7_6.7 --- diff --git a/SOURCES/lvm2-2_02_182-bcache-reduce-MAX_IO-to-256.patch b/SOURCES/lvm2-2_02_182-bcache-reduce-MAX_IO-to-256.patch new file mode 100644 index 0000000..3577f9f --- /dev/null +++ b/SOURCES/lvm2-2_02_182-bcache-reduce-MAX_IO-to-256.patch @@ -0,0 +1,37 @@ + WHATS_NEW | 1 + + lib/device/bcache.c | 10 +++++++++- + 2 files changed, 10 insertions(+), 1 deletion(-) + +diff --git a/WHATS_NEW b/WHATS_NEW +index 47db4a3..8063364 100644 +--- a/WHATS_NEW ++++ b/WHATS_NEW +@@ -10,6 +10,7 @@ Version 2.02.182 - + Fix change of monitoring in clustered volumes. + Fix lvconvert striped/raid0/raid0_meta -> raid6 regression. + Add After=rbdmap.service to {lvm2-activation-net,blk-availability}.service. ++ Reduce max concurrent aios to avoid EMFILE with many devices. + Fix lvconvert conversion attempts to linear. + Fix lvconvert raid0/raid0_meta -> striped regression. + Fix lvconvert --splitmirror for mirror type (2.02.178). +diff --git a/lib/device/bcache.c b/lib/device/bcache.c +index 6886b74..7384a32 100644 +--- a/lib/device/bcache.c ++++ b/lib/device/bcache.c +@@ -253,7 +253,15 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int fd, + return true; + } + +-#define MAX_IO 1024 ++/* ++ * MAX_IO is returned to the layer above via bcache_max_prefetches() which ++ * tells the caller how many devices to submit io for concurrently. There will ++ * be an open file descriptor for each of these, so keep it low enough to avoid ++ * reaching the default max open file limit (1024) when there are over 1024 ++ * devices being scanned. ++ */ ++ ++#define MAX_IO 256 + #define MAX_EVENT 64 + + static bool _async_wait(struct io_engine *ioe, io_complete_fn fn) diff --git a/SOURCES/lvm2-2_02_184-apply-obtain_device_list_from_udev-to-all-libudev-us.patch b/SOURCES/lvm2-2_02_184-apply-obtain_device_list_from_udev-to-all-libudev-us.patch new file mode 100644 index 0000000..57befdb --- /dev/null +++ b/SOURCES/lvm2-2_02_184-apply-obtain_device_list_from_udev-to-all-libudev-us.patch @@ -0,0 +1,27 @@ + lib/device/dev-type.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/lib/device/dev-type.c b/lib/device/dev-type.c +index 331fe07..60b0d78 100644 +--- a/lib/device/dev-type.c ++++ b/lib/device/dev-type.c +@@ -1062,6 +1062,9 @@ int udev_dev_is_mpath_component(struct device *dev) + const char *value; + int ret = 0; + ++ if (!obtain_device_list_from_udev()) ++ return 0; ++ + if (!(udev_device = _udev_get_dev(dev))) + return 0; + +@@ -1091,6 +1094,9 @@ int udev_dev_is_md_component(struct device *dev) + const char *value; + int ret = 0; + ++ if (!obtain_device_list_from_udev()) ++ return 0; ++ + if (!(udev_device = _udev_get_dev(dev))) + return 0; + diff --git a/SOURCES/lvm2-2_02_184-config-add-new-setting-io_memory_size.patch b/SOURCES/lvm2-2_02_184-config-add-new-setting-io_memory_size.patch new file mode 100644 index 0000000..3d876ab --- /dev/null +++ b/SOURCES/lvm2-2_02_184-config-add-new-setting-io_memory_size.patch @@ -0,0 +1,160 @@ + lib/commands/toolcontext.c | 2 ++ + lib/config/config_settings.h | 8 ++++++++ + lib/config/defaults.h | 2 ++ + lib/label/label.c | 40 ++++++++++++++++++++-------------------- + lib/misc/lvm-globals.c | 10 ++++++++++ + lib/misc/lvm-globals.h | 3 +++ + 6 files changed, 45 insertions(+), 20 deletions(-) + +diff --git a/lib/commands/toolcontext.c b/lib/commands/toolcontext.c +index 4d3f744..95b2317 100644 +--- a/lib/commands/toolcontext.c ++++ b/lib/commands/toolcontext.c +@@ -693,6 +693,8 @@ static int _process_config(struct cmd_context *cmd) + if (!_init_system_id(cmd)) + return_0; + ++ init_io_memory_size(find_config_tree_int(cmd, global_io_memory_size_CFG, NULL)); ++ + return 1; + } + +diff --git a/lib/config/config_settings.h b/lib/config/config_settings.h +index 2de3fd4..c3e9600 100644 +--- a/lib/config/config_settings.h ++++ b/lib/config/config_settings.h +@@ -1138,6 +1138,14 @@ cfg(global_notify_dbus_CFG, "notify_dbus", global_CFG_SECTION, 0, CFG_TYPE_BOOL, + "When enabled, an LVM command that changes PVs, changes VG metadata,\n" + "or changes the activation state of an LV will send a notification.\n") + ++cfg(global_io_memory_size_CFG, "io_memory_size", global_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_INT, DEFAULT_IO_MEMORY_SIZE_KB, vsn(2, 2, 184), NULL, 0, NULL, ++ "The amount of memory in KiB that LVM allocates to perform disk io.\n" ++ "LVM performance may benefit from more io memory when there are many\n" ++ "disks or VG metadata is large. Increasing this size may be necessary\n" ++ "when a single copy of VG metadata is larger than the current setting.\n" ++ "This value should usually not be decreased from the default; setting\n" ++ "it too low can result in lvm failing to read VGs.\n") ++ + cfg(activation_udev_sync_CFG, "udev_sync", activation_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_UDEV_SYNC, vsn(2, 2, 51), NULL, 0, NULL, + "Use udev notifications to synchronize udev and LVM.\n" + "The --nodevsync option overrides this setting.\n" +diff --git a/lib/config/defaults.h b/lib/config/defaults.h +index b3e6c34..690bf56 100644 +--- a/lib/config/defaults.h ++++ b/lib/config/defaults.h +@@ -267,4 +267,6 @@ + #define DEFAULT_THIN_POOL_AUTOEXTEND_THRESHOLD 100 + #define DEFAULT_THIN_POOL_AUTOEXTEND_PERCENT 20 + ++#define DEFAULT_IO_MEMORY_SIZE_KB 4096 ++ + #endif /* _LVM_DEFAULTS_H */ +diff --git a/lib/label/label.c b/lib/label/label.c +index 03726d0..4ec7d9b 100644 +--- a/lib/label/label.c ++++ b/lib/label/label.c +@@ -772,33 +772,33 @@ out: + } + + /* +- * How many blocks to set up in bcache? Is 1024 a good max? ++ * num_devs is the number of devices the caller is going to scan. ++ * When 0 the caller doesn't know, and we use the default cache size. ++ * When non-zero, allocate at least num_devs bcache blocks. ++ * num_devs doesn't really tell us how many bcache blocks we'll use ++ * because it includes lvm devs and non-lvm devs, and each lvm dev ++ * will often use a number of bcache blocks. + * +- * Currently, we tell bcache to set up N blocks where N +- * is the number of devices that are going to be scanned. +- * Reasons why this number may not be be a good choice: +- * +- * - there may be a lot of non-lvm devices, which +- * would make this number larger than necessary +- * +- * - each lvm device may use more than one cache +- * block if the metadata is large enough or it +- * uses more than one metadata area, which +- * would make this number smaller than it +- * should be for the best performance. +- * +- * This is even more tricky to estimate when lvmetad +- * is used, because it's hard to predict how many +- * devs might need to be scanned when using lvmetad. +- * This currently just sets up bcache with MIN blocks. ++ * We don't know ahead of time if we will find some VG metadata ++ * that is larger than the total size of the bcache, which would ++ * prevent us from reading/writing the VG since we do not dynamically ++ * increase the bcache size when we find it's too small. In these ++ * cases the user would need to set io_memory_size to be larger ++ * than the max VG metadata size (lvm does not impose any limit on ++ * the metadata size.) + */ + +-#define MIN_BCACHE_BLOCKS 32 ++#define MIN_BCACHE_BLOCKS 32 /* 4MB, currently matches DEFAULT_IO_MEMORY_SIZE_KB */ + #define MAX_BCACHE_BLOCKS 1024 + +-static int _setup_bcache(int cache_blocks) ++static int _setup_bcache(int num_devs) + { + struct io_engine *ioe = NULL; ++ int iomem_kb = io_memory_size(); ++ int block_size_kb = (BCACHE_BLOCK_SIZE_IN_SECTORS * 512) / 1024; ++ int cache_blocks; ++ ++ cache_blocks = iomem_kb / block_size_kb; + + if (cache_blocks < MIN_BCACHE_BLOCKS) + cache_blocks = MIN_BCACHE_BLOCKS; +diff --git a/lib/misc/lvm-globals.c b/lib/misc/lvm-globals.c +index 82c5706..3bd5cac 100644 +--- a/lib/misc/lvm-globals.c ++++ b/lib/misc/lvm-globals.c +@@ -54,6 +54,7 @@ static char _sysfs_dir_path[PATH_MAX] = ""; + static int _dev_disable_after_error_count = DEFAULT_DISABLE_AFTER_ERROR_COUNT; + static uint64_t _pv_min_size = (DEFAULT_PV_MIN_SIZE_KB * 1024L >> SECTOR_SHIFT); + static const char *_unknown_device_name = DEFAULT_UNKNOWN_DEVICE_NAME; ++static int _io_memory_size_kb = DEFAULT_IO_MEMORY_SIZE_KB; + + void init_verbose(int level) + { +@@ -387,3 +388,12 @@ void init_unknown_device_name(const char *name) + _unknown_device_name = name; + } + ++int io_memory_size(void) ++{ ++ return _io_memory_size_kb; ++} ++ ++void init_io_memory_size(int val) ++{ ++ _io_memory_size_kb = val; ++} +diff --git a/lib/misc/lvm-globals.h b/lib/misc/lvm-globals.h +index f985cfa..3007cc5 100644 +--- a/lib/misc/lvm-globals.h ++++ b/lib/misc/lvm-globals.h +@@ -53,6 +53,7 @@ void init_pv_min_size(uint64_t sectors); + void init_activation_checks(int checks); + void init_retry_deactivation(int retry); + void init_unknown_device_name(const char *name); ++void init_io_memory_size(int val); + + void set_cmd_name(const char *cmd_name); + const char *get_cmd_name(void); +@@ -86,6 +87,7 @@ uint64_t pv_min_size(void); + int activation_checks(void); + int retry_deactivation(void); + const char *unknown_device_name(void); ++int io_memory_size(void); + + #define DMEVENTD_MONITOR_IGNORE -1 + int dmeventd_monitor_mode(void); +@@ -93,4 +95,5 @@ int dmeventd_monitor_mode(void); + #define NO_DEV_ERROR_COUNT_LIMIT 0 + int dev_disable_after_error_count(void); + ++ + #endif diff --git a/SOURCES/lvm2-2_02_184-dm-migration_threshold-for-old-linked-tools.patch b/SOURCES/lvm2-2_02_184-dm-migration_threshold-for-old-linked-tools.patch new file mode 100644 index 0000000..ee0ccdc --- /dev/null +++ b/SOURCES/lvm2-2_02_184-dm-migration_threshold-for-old-linked-tools.patch @@ -0,0 +1,73 @@ + WHATS_NEW_DM | 4 ++++ + libdm/libdm-deptree.c | 20 +++++++++++++++++--- + 2 files changed, 21 insertions(+), 3 deletions(-) + +diff --git a/WHATS_NEW_DM b/WHATS_NEW_DM +index bbd1057..68b7c0d 100644 +--- a/WHATS_NEW_DM ++++ b/WHATS_NEW_DM +@@ -1,3 +1,7 @@ ++Version 1.02.156 - ++===================================== ++ Ensure migration_threshold for cache is at least 8 chunks. ++ + Version 1.02.154 - + ============================== + Fix dmstats report printing no output. +diff --git a/libdm/libdm-deptree.c b/libdm/libdm-deptree.c +index ab05456..0728611 100644 +--- a/libdm/libdm-deptree.c ++++ b/libdm/libdm-deptree.c +@@ -192,6 +192,7 @@ struct load_segment { + uint64_t transaction_id; /* Thin_pool */ + uint64_t low_water_mark; /* Thin_pool */ + uint32_t data_block_size; /* Thin_pool + cache */ ++ uint32_t migration_threshold; /* Cache */ + unsigned skip_block_zeroing; /* Thin_pool */ + unsigned ignore_discard; /* Thin_pool target vsn 1.1 */ + unsigned no_discard_passdown; /* Thin_pool target vsn 1.1 */ +@@ -2462,10 +2463,14 @@ static int _cache_emit_segment_line(struct dm_task *dmt, + + EMIT_PARAMS(pos, " %s", name); + +- EMIT_PARAMS(pos, " %u", seg->policy_argc * 2); ++ /* Do not pass migration_threshold 2048 which is default */ ++ EMIT_PARAMS(pos, " %u", (seg->policy_argc + (seg->migration_threshold != 2048) ? 1 : 0) * 2); ++ if (seg->migration_threshold != 2048) ++ EMIT_PARAMS(pos, " migration_threshold %u", seg->migration_threshold); + if (seg->policy_settings) + for (cn = seg->policy_settings->child; cn; cn = cn->sib) +- EMIT_PARAMS(pos, " %s %" PRIu64, cn->key, cn->v->v.i); ++ if (cn->v) /* Skip deleted entry */ ++ EMIT_PARAMS(pos, " %s %" PRIu64, cn->key, cn->v->v.i); + + return 1; + } +@@ -3373,6 +3378,7 @@ int dm_tree_node_add_cache_target(struct dm_tree_node *node, + seg->data_block_size = data_block_size; + seg->flags = feature_flags; + seg->policy_name = policy_name; ++ seg->migration_threshold = 2048; /* Default migration threshold 1MiB */ + + /* FIXME: better validation missing */ + if (policy_settings) { +@@ -3385,10 +3391,18 @@ int dm_tree_node_add_cache_target(struct dm_tree_node *node, + log_error("Cache policy parameter %s is without integer value.", cn->key); + return 0; + } +- seg->policy_argc++; ++ if (strcmp(cn->key, "migration_threshold") == 0) { ++ seg->migration_threshold = cn->v->v.i; ++ cn->v = NULL; /* skip this entry */ ++ } else ++ seg->policy_argc++; + } + } + ++ /* Always some throughput available for cache to proceed */ ++ if (seg->migration_threshold < data_block_size * 8) ++ seg->migration_threshold = data_block_size * 8; ++ + return 1; + } + diff --git a/SOURCES/lvm2-2_02_184-io-increase-the-default-io-memory-from-4-to-8-MiB.patch b/SOURCES/lvm2-2_02_184-io-increase-the-default-io-memory-from-4-to-8-MiB.patch new file mode 100644 index 0000000..db6a9ba --- /dev/null +++ b/SOURCES/lvm2-2_02_184-io-increase-the-default-io-memory-from-4-to-8-MiB.patch @@ -0,0 +1,29 @@ + lib/config/defaults.h | 2 +- + lib/label/label.c | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/lib/config/defaults.h b/lib/config/defaults.h +index 690bf56..ad20e0f 100644 +--- a/lib/config/defaults.h ++++ b/lib/config/defaults.h +@@ -267,6 +267,6 @@ + #define DEFAULT_THIN_POOL_AUTOEXTEND_THRESHOLD 100 + #define DEFAULT_THIN_POOL_AUTOEXTEND_PERCENT 20 + +-#define DEFAULT_IO_MEMORY_SIZE_KB 4096 ++#define DEFAULT_IO_MEMORY_SIZE_KB 8192 + + #endif /* _LVM_DEFAULTS_H */ +diff --git a/lib/label/label.c b/lib/label/label.c +index 4fdbbb7..00dadfd 100644 +--- a/lib/label/label.c ++++ b/lib/label/label.c +@@ -791,7 +791,7 @@ out: + * the metadata size.) + */ + +-#define MIN_BCACHE_BLOCKS 32 /* 4MB, currently matches DEFAULT_IO_MEMORY_SIZE_KB */ ++#define MIN_BCACHE_BLOCKS 32 /* 4MB */ + #define MAX_BCACHE_BLOCKS 1024 + + static int _setup_bcache(int num_devs) diff --git a/SOURCES/lvm2-2_02_184-io-warn-when-metadata-size-approaches-io-memory-size.patch b/SOURCES/lvm2-2_02_184-io-warn-when-metadata-size-approaches-io-memory-size.patch new file mode 100644 index 0000000..6df9bd9 --- /dev/null +++ b/SOURCES/lvm2-2_02_184-io-warn-when-metadata-size-approaches-io-memory-size.patch @@ -0,0 +1,136 @@ + lib/cache/lvmcache.c | 15 +++++++++++++++ + lib/cache/lvmcache.h | 3 +++ + lib/format_text/format-text.c | 4 ++++ + lib/label/label.c | 41 +++++++++++++++++++++++++++++++++++++++++ + 4 files changed, 63 insertions(+) + +diff --git a/lib/cache/lvmcache.c b/lib/cache/lvmcache.c +index f55a14c..62f1d3c 100644 +--- a/lib/cache/lvmcache.c ++++ b/lib/cache/lvmcache.c +@@ -3048,3 +3048,18 @@ int lvmcache_scan_mismatch(struct cmd_context *cmd, const char *vgname, const ch + return 1; + } + ++static uint64_t _max_metadata_size; ++ ++void lvmcache_save_metadata_size(uint64_t val) ++{ ++ if (!_max_metadata_size) ++ _max_metadata_size = val; ++ else if (_max_metadata_size < val) ++ _max_metadata_size = val; ++} ++ ++uint64_t lvmcache_max_metadata_size(void) ++{ ++ return _max_metadata_size; ++} ++ +diff --git a/lib/cache/lvmcache.h b/lib/cache/lvmcache.h +index bf976e9..f436785 100644 +--- a/lib/cache/lvmcache.h ++++ b/lib/cache/lvmcache.h +@@ -225,4 +225,7 @@ struct volume_group *lvmcache_get_saved_vg(const char *vgid, int precommitted); + struct volume_group *lvmcache_get_saved_vg_latest(const char *vgid); + void lvmcache_drop_saved_vgid(const char *vgid); + ++uint64_t lvmcache_max_metadata_size(void); ++void lvmcache_save_metadata_size(uint64_t val); ++ + #endif +diff --git a/lib/format_text/format-text.c b/lib/format_text/format-text.c +index 36afba1..200b011 100644 +--- a/lib/format_text/format-text.c ++++ b/lib/format_text/format-text.c +@@ -1294,6 +1294,10 @@ int read_metadata_location_summary(const struct format_type *fmt, + */ + vgsummary->mda_checksum = rlocn->checksum; + vgsummary->mda_size = rlocn->size; ++ ++ /* Keep track of largest metadata size we find. */ ++ lvmcache_save_metadata_size(rlocn->size); ++ + lvmcache_lookup_mda(vgsummary); + + if (!text_read_metadata_summary(fmt, dev_area->dev, MDA_CONTENT_REASON(primary_mda), +diff --git a/lib/label/label.c b/lib/label/label.c +index 4ec7d9b..4fdbbb7 100644 +--- a/lib/label/label.c ++++ b/lib/label/label.c +@@ -21,6 +21,7 @@ + #include "bcache.h" + #include "toolcontext.h" + #include "activate.h" ++#include "metadata.h" + + #include + #include +@@ -29,6 +30,8 @@ + + int use_full_md_check; + ++static uint64_t _current_bcache_size_bytes; ++ + /* FIXME Allow for larger labels? Restricted to single sector currently */ + + /* +@@ -806,6 +809,8 @@ static int _setup_bcache(int num_devs) + if (cache_blocks > MAX_BCACHE_BLOCKS) + cache_blocks = MAX_BCACHE_BLOCKS; + ++ _current_bcache_size_bytes = cache_blocks * BCACHE_BLOCK_SIZE_IN_SECTORS * 512; ++ + if (use_aio()) { + if (!(ioe = create_async_io_engine())) { + log_warn("Failed to set up async io, using sync io."); +@@ -839,6 +844,7 @@ int label_scan(struct cmd_context *cmd) + struct dev_iter *iter; + struct device_list *devl, *devl2; + struct device *dev; ++ uint64_t max_metadata_size_bytes; + + log_debug_devs("Finding devices to scan"); + +@@ -909,6 +915,41 @@ int label_scan(struct cmd_context *cmd) + + _scan_list(cmd, cmd->full_filter, &all_devs, NULL); + ++ /* ++ * Metadata could be larger than total size of bcache, and bcache ++ * cannot currently be resized during the command. If this is the ++ * case (or within reach), warn that io_memory_size needs to be ++ * set larger. ++ * ++ * Even if bcache out of space did not cause a failure during scan, it ++ * may cause a failure during the next vg_read phase or during vg_write. ++ * ++ * If there was an error during scan, we could recreate bcache here ++ * with a larger size and then restart label_scan. But, this does not ++ * address the problem of writing new metadata that excedes the bcache ++ * size and failing, which would often be hit first, i.e. we'll fail ++ * to write new metadata exceding the max size before we have a chance ++ * to read any metadata with that size, unless we find an existing vg ++ * that has been previously created with the larger size. ++ * ++ * If the largest metadata is within 1MB of the bcache size, then start ++ * warning. ++ */ ++ max_metadata_size_bytes = lvmcache_max_metadata_size(); ++ ++ if (max_metadata_size_bytes + (1024 * 1024) > _current_bcache_size_bytes) { ++ /* we want bcache to be 1MB larger than the max metadata seen */ ++ uint64_t want_size_kb = (max_metadata_size_bytes / 1024) + 1024; ++ uint64_t remainder; ++ if ((remainder = (want_size_kb % 1024))) ++ want_size_kb = want_size_kb + 1024 - remainder; ++ ++ log_warn("WARNING: metadata may not be usable with current io_memory_size %d KiB", ++ io_memory_size()); ++ log_warn("WARNING: increase lvm.conf io_memory_size to at least %llu KiB", ++ (unsigned long long)want_size_kb); ++ } ++ + dm_list_iterate_items_safe(devl, devl2, &all_devs) { + dm_list_del(&devl->list); + dm_free(devl); diff --git a/SOURCES/lvm2-2_02_185-bcache-Fix-memory-leak.patch b/SOURCES/lvm2-2_02_185-bcache-Fix-memory-leak.patch new file mode 100644 index 0000000..acdfabe --- /dev/null +++ b/SOURCES/lvm2-2_02_185-bcache-Fix-memory-leak.patch @@ -0,0 +1,15 @@ + lib/device/bcache.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/lib/device/bcache.c b/lib/device/bcache.c +index 7384a32..f64931f 100644 +--- a/lib/device/bcache.c ++++ b/lib/device/bcache.c +@@ -411,6 +411,7 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd, + (unsigned long long)offset, + (unsigned long long)nbytes, + (unsigned long long)_last_byte_offset); ++ free(io); + return false; + } + diff --git a/SOURCES/lvm2-build-make-generate.patch b/SOURCES/lvm2-build-make-generate.patch new file mode 100644 index 0000000..3a292d2 --- /dev/null +++ b/SOURCES/lvm2-build-make-generate.patch @@ -0,0 +1,24 @@ + conf/example.conf.in | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +diff --git a/conf/example.conf.in b/conf/example.conf.in +index b37e0b2..fec779b 100644 +--- a/conf/example.conf.in ++++ b/conf/example.conf.in +@@ -1125,6 +1125,16 @@ global { + # When enabled, an LVM command that changes PVs, changes VG metadata, + # or changes the activation state of an LV will send a notification. + notify_dbus = 1 ++ ++ # Configuration option global/io_memory_size. ++ # The amount of memory in KiB that LVM allocates to perform disk io. ++ # LVM performance may benefit from more io memory when there are many ++ # disks or VG metadata is large. Increasing this size may be necessary ++ # when a single copy of VG metadata is larger than the current setting. ++ # This value should usually not be decreased from the default; setting ++ # it too low can result in lvm failing to read VGs. ++ # This configuration option has an automatic default value. ++ # io_memory_size = 8192 + } + + # Configuration section activation. diff --git a/SOURCES/lvm2-rhel-config-Change-version-for-backported-config-options.patch b/SOURCES/lvm2-rhel-config-Change-version-for-backported-config-options.patch new file mode 100644 index 0000000..d1359b7 --- /dev/null +++ b/SOURCES/lvm2-rhel-config-Change-version-for-backported-config-options.patch @@ -0,0 +1,25 @@ + lib/config/config_settings.h | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/lib/config/config_settings.h b/lib/config/config_settings.h +index c3e9600..5fddff0 100644 +--- a/lib/config/config_settings.h ++++ b/lib/config/config_settings.h +@@ -947,7 +947,7 @@ cfg(global_lvdisplay_shows_full_device_path_CFG, "lvdisplay_shows_full_device_pa + "Previously this was always shown as /dev/vgname/lvname even when that\n" + "was never a valid path in the /dev filesystem.\n") + +-cfg(global_use_aio_CFG, "use_aio", global_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_USE_AIO, vsn(2, 2, 183), NULL, 0, NULL, ++cfg(global_use_aio_CFG, "use_aio", global_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_USE_AIO, vsn(2, 2, 180), NULL, 0, NULL, + "Use async I/O when reading and writing devices.\n") + + cfg(global_use_lvmetad_CFG, "use_lvmetad", global_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_USE_LVMETAD, vsn(2, 2, 93), "@DEFAULT_USE_LVMETAD@", 0, NULL, +@@ -1138,7 +1138,7 @@ cfg(global_notify_dbus_CFG, "notify_dbus", global_CFG_SECTION, 0, CFG_TYPE_BOOL, + "When enabled, an LVM command that changes PVs, changes VG metadata,\n" + "or changes the activation state of an LV will send a notification.\n") + +-cfg(global_io_memory_size_CFG, "io_memory_size", global_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_INT, DEFAULT_IO_MEMORY_SIZE_KB, vsn(2, 2, 184), NULL, 0, NULL, ++cfg(global_io_memory_size_CFG, "io_memory_size", global_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_INT, DEFAULT_IO_MEMORY_SIZE_KB, vsn(2, 2, 180), NULL, 0, NULL, + "The amount of memory in KiB that LVM allocates to perform disk io.\n" + "LVM performance may benefit from more io memory when there are many\n" + "disks or VG metadata is large. Increasing this size may be necessary\n" diff --git a/SPECS/lvm2.spec b/SPECS/lvm2.spec index 85346e9..11d237a 100644 --- a/SPECS/lvm2.spec +++ b/SPECS/lvm2.spec @@ -67,7 +67,7 @@ Summary: Userland logical volume management tools Name: lvm2 Epoch: 7 Version: 2.02.180 -Release: 10%{?dist}.3%{?scratch} +Release: 10%{?dist}.7%{?scratch} License: GPLv2 Group: System Environment/Base URL: http://sources.redhat.com/lvm2 @@ -120,6 +120,22 @@ Patch40: lvm2-2_02_183-pvscan-lvmetad-use-udev-info-to-improve-md-component.patc # Overhead: Patch41: lvm2-2_02_183-build-make-generate.patch Patch42: lvm2-2_02_183-WHATS_NEW.patch +# BZ 1688316: +Patch43: lvm2-2_02_184-apply-obtain_device_list_from_udev-to-all-libudev-us.patch +# BZ 1695879 +Patch44: lvm2-2_02_182-bcache-reduce-MAX_IO-to-256.patch +# Mem leak by covertity: +Patch45: lvm2-2_02_185-bcache-Fix-memory-leak.patch +# BZ 1696742 +Patch46: lvm2-2_02_184-config-add-new-setting-io_memory_size.patch +Patch47: lvm2-2_02_184-io-warn-when-metadata-size-approaches-io-memory-size.patch +Patch48: lvm2-2_02_184-io-increase-the-default-io-memory-from-4-to-8-MiB.patch +# BZ 1696740 +Patch49: lvm2-2_02_184-dm-migration_threshold-for-old-linked-tools.patch +# Internals; +Patch50: lvm2-rhel-config-Change-version-for-backported-config-options.patch +Patch51: lvm2-build-make-generate.patch + BuildRequires: libselinux-devel >= %{libselinux_version}, libsepol-devel BuildRequires: libblkid-devel >= %{util_linux_version} @@ -215,6 +231,15 @@ or more physical volumes and creating one or more logical volumes %patch40 -p1 -b .pvscan_lvmetad_use_udev_info_to_improve_md_component %patch41 -p1 -b .build_make_generate2 %patch42 -p1 -b .WHATS_NEW2 +%patch43 -p1 -b .apply_obtain_device_list_from_udev +%patch44 -p1 -b .bcache_reduce_MAX_IO_to_256 +%patch45 -p1 -b .bcache_Fix_memory_leak +%patch46 -p1 -b .config_add_new_setting_io_memory_size +%patch47 -p1 -b .io_warn_when_metadata_size_approaches_io_memory_size +%patch48 -p1 -b .io_increase_the_default_io_memory_from_4_to_8_MiB +%patch49 -p1 -b .dm_migration_threshold_for_old_linked_tools +%patch50 -p1 -b .rhel_config +%patch51 -p1 -b .build_make_generate3 %build %global _default_pid_dir /run @@ -962,7 +987,7 @@ the device-mapper event library. %package -n %{boom_pkgname} Summary: %{boom_summary} Version: %{boom_version} -Release: %{boom_release}%{?dist}.2%{?scratch} +Release: %{boom_release}%{?dist}.7%{?scratch} License: GPLv2 Group: System Environment/Base BuildArch: noarch @@ -993,6 +1018,20 @@ This package provides the python2 version of boom. %endif %changelog +* Tue Apr 09 2019 Marian Csontos - 7:2.02.180-10.el7_6.7 +- Add io_memory_size configuration option. +- Warn when metadata aproaches io_memory_size. +- Ensure migration_threshold for cache is at least 8 chunks. + +* Thu Apr 04 2019 Marian Csontos - 7:2.02.180-10.el7_6.6 +- Reduce max concurrent aios to avoid EMFILE with many devices. + +* Mon Mar 18 2019 Marian Csontos - 7:2.02.180-10.el7_6.5 +- boom: Bump release. + +* Mon Mar 18 2019 Marian Csontos - 7:2.02.180-10.el7_6.4 +- Apply obtain_device_list_from_udev to all libudev calls. + * Mon Dec 17 2018 Marian Csontos - 7:2.02.180-10.el7_6.3 - Fix component detection for MD RAID version 1.0 and 0.90. - Use sync io if async io_setup fails, or when use_aio=0 is set in config.