From 3c4a430d20c055e6474295f4dd83c8bc6585b7f7 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Sep 29 2020 07:01:18 +0000 Subject: import lvm2-2.02.187-6.el7 --- diff --git a/.gitignore b/.gitignore index f69a918..6df63a2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,2 @@ -SOURCES/LVM2.2.02.186.tgz -SOURCES/boom-0.9.tar.gz +SOURCES/LVM2.2.02.187.tgz +SOURCES/boom-1.2.tar.gz diff --git a/.lvm2.metadata b/.lvm2.metadata index e9512fd..76461fd 100644 --- a/.lvm2.metadata +++ b/.lvm2.metadata @@ -1,2 +1,2 @@ -7a3834ca1ddaa7c4edc3863f18ec604f45722c65 SOURCES/LVM2.2.02.186.tgz -dd96613e238f342641b5be8977ee8598662e8ab9 SOURCES/boom-0.9.tar.gz +2a846b1a766aad5e04e2835a510c84ecc7ceb28d SOURCES/LVM2.2.02.187.tgz +67070610fcc9a8ee2c4fcdb165947b1d30599342 SOURCES/boom-1.2.tar.gz diff --git a/SOURCES/boom-etc-Remove-executable-permission-from-etc-default-bo.patch b/SOURCES/boom-etc-Remove-executable-permission-from-etc-default-bo.patch new file mode 100644 index 0000000..d454da1 --- /dev/null +++ b/SOURCES/boom-etc-Remove-executable-permission-from-etc-default-bo.patch @@ -0,0 +1,22 @@ + etc/default/boom | 0 + tests/bootloader_configs/boom_off/etc/default/boom | 0 + tests/bootloader_configs/boom_on/etc/default/boom | 0 + tests/bootloader_configs/no_grub_d/etc/default/boom | 0 + 4 files changed, 0 insertions(+), 0 deletions(-) + mode change 100755 => 100644 etc/default/boom + mode change 100755 => 100644 tests/bootloader_configs/boom_off/etc/default/boom + mode change 100755 => 100644 tests/bootloader_configs/boom_on/etc/default/boom + mode change 100755 => 100644 tests/bootloader_configs/no_grub_d/etc/default/boom + +diff --git a/etc/default/boom b/etc/default/boom +old mode 100755 +new mode 100644 +diff --git a/tests/bootloader_configs/boom_off/etc/default/boom b/tests/bootloader_configs/boom_off/etc/default/boom +old mode 100755 +new mode 100644 +diff --git a/tests/bootloader_configs/boom_on/etc/default/boom b/tests/bootloader_configs/boom_on/etc/default/boom +old mode 100755 +new mode 100644 +diff --git a/tests/bootloader_configs/no_grub_d/etc/default/boom b/tests/bootloader_configs/no_grub_d/etc/default/boom +old mode 100755 +new mode 100644 diff --git a/SOURCES/boom-man-Fix-line-starting-with.patch b/SOURCES/boom-man-Fix-line-starting-with.patch new file mode 100644 index 0000000..f747576 --- /dev/null +++ b/SOURCES/boom-man-Fix-line-starting-with.patch @@ -0,0 +1,18 @@ + man/man8/boom.8 | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/man/man8/boom.8 b/man/man8/boom.8 +index 7b862e7..9eee048 100644 +--- a/man/man8/boom.8 ++++ b/man/man8/boom.8 +@@ -1168,8 +1168,8 @@ describing the properties of the configured host profiles. + + The list of fields to display is given with \fB--options\fP as a comma + separated list of field names. To obtain a list of available fields run +-'\fBboom host list -o help\fP'. If the list of fields begins with the +-'\fB+\fP' character the specified fields are appended to the default ++\&'\fBboom host list -o help\fP'. If the list of fields begins with the ++\&'\fB+\fP' character the specified fields are appended to the default + field list. Otherwise the given list of fields replaces the default set + of report fields. + diff --git a/SOURCES/lvm2-2_02_187-Fix-rounding-writes-up-to-sector-size.patch b/SOURCES/lvm2-2_02_187-Fix-rounding-writes-up-to-sector-size.patch deleted file mode 100644 index b1ca921..0000000 --- a/SOURCES/lvm2-2_02_187-Fix-rounding-writes-up-to-sector-size.patch +++ /dev/null @@ -1,173 +0,0 @@ -From 12041b03584bb2fa36f797ece4b0f9a41760a303 Mon Sep 17 00:00:00 2001 -From: David Teigland -Date: Wed, 24 Jul 2019 11:32:13 -0500 -Subject: [PATCH 2/4] Fix rounding writes up to sector size - -Do this at two levels, although one would be enough to -fix the problem seen recently: - -- Ignore any reported sector size other than 512 of 4096. - If either sector size (physical or logical) is reported - as 512, then use 512. If neither are reported as 512, - and one or the other is reported as 4096, then use 4096. - If neither is reported as either 512 or 4096, then use 512. - -- When rounding up a limited write in bcache to be a multiple - of the sector size, check that the resulting write size is - not larger than the bcache block itself. (This shouldn't - happen if the sector size is 512 or 4096.) - -(cherry picked from commit 7550665ba49ac7d497d5b212e14b69298ef01361) - -Conflicts: - lib/device/dev-io.c - -(cherry picked from commit 44c460954be5c63cf5338bd9151344fe2626565f) ---- - lib/device/bcache.c | 89 +++++++++++++++++++++++++++++++++++++++++++++++++++-- - 1 file changed, 87 insertions(+), 2 deletions(-) - -diff --git a/lib/device/bcache.c b/lib/device/bcache.c -index b64707e..77d1543 100644 ---- a/lib/device/bcache.c -+++ b/lib/device/bcache.c -@@ -169,6 +169,7 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int fd, - sector_t offset; - sector_t nbytes; - sector_t limit_nbytes; -+ sector_t orig_nbytes; - sector_t extra_nbytes = 0; - - if (((uintptr_t) data) & e->page_mask) { -@@ -191,11 +192,41 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int fd, - return false; - } - -+ /* -+ * If the bcache block offset+len goes beyond where lvm is -+ * intending to write, then reduce the len being written -+ * (which is the bcache block size) so we don't write past -+ * the limit set by lvm. If after applying the limit, the -+ * resulting size is not a multiple of the sector size (512 -+ * or 4096) then extend the reduced size to be a multiple of -+ * the sector size (we don't want to write partial sectors.) -+ */ - if (offset + nbytes > _last_byte_offset) { - limit_nbytes = _last_byte_offset - offset; -- if (limit_nbytes % _last_byte_sector_size) -+ -+ if (limit_nbytes % _last_byte_sector_size) { - extra_nbytes = _last_byte_sector_size - (limit_nbytes % _last_byte_sector_size); - -+ /* -+ * adding extra_nbytes to the reduced nbytes (limit_nbytes) -+ * should make the final write size a multiple of the -+ * sector size. This should never result in a final size -+ * larger than the bcache block size (as long as the bcache -+ * block size is a multiple of the sector size). -+ */ -+ if (limit_nbytes + extra_nbytes > nbytes) { -+ log_warn("Skip extending write at %llu len %llu limit %llu extra %llu sector_size %llu", -+ (unsigned long long)offset, -+ (unsigned long long)nbytes, -+ (unsigned long long)limit_nbytes, -+ (unsigned long long)extra_nbytes, -+ (unsigned long long)_last_byte_sector_size); -+ extra_nbytes = 0; -+ } -+ } -+ -+ orig_nbytes = nbytes; -+ - if (extra_nbytes) { - log_debug("Limit write at %llu len %llu to len %llu rounded to %llu", - (unsigned long long)offset, -@@ -210,6 +241,22 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int fd, - (unsigned long long)limit_nbytes); - nbytes = limit_nbytes; - } -+ -+ /* -+ * This shouldn't happen, the reduced+extended -+ * nbytes value should never be larger than the -+ * bcache block size. -+ */ -+ if (nbytes > orig_nbytes) { -+ log_error("Invalid adjusted write at %llu len %llu adjusted %llu limit %llu extra %llu sector_size %llu", -+ (unsigned long long)offset, -+ (unsigned long long)orig_nbytes, -+ (unsigned long long)nbytes, -+ (unsigned long long)limit_nbytes, -+ (unsigned long long)extra_nbytes, -+ (unsigned long long)_last_byte_sector_size); -+ return false; -+ } - } - } - -@@ -403,6 +450,7 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd, - uint64_t nbytes = len; - sector_t limit_nbytes = 0; - sector_t extra_nbytes = 0; -+ sector_t orig_nbytes = 0; - - if (offset > _last_byte_offset) { - log_error("Limit write at %llu len %llu beyond last byte %llu", -@@ -415,9 +463,30 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd, - - if (offset + nbytes > _last_byte_offset) { - limit_nbytes = _last_byte_offset - offset; -- if (limit_nbytes % _last_byte_sector_size) -+ -+ if (limit_nbytes % _last_byte_sector_size) { - extra_nbytes = _last_byte_sector_size - (limit_nbytes % _last_byte_sector_size); - -+ /* -+ * adding extra_nbytes to the reduced nbytes (limit_nbytes) -+ * should make the final write size a multiple of the -+ * sector size. This should never result in a final size -+ * larger than the bcache block size (as long as the bcache -+ * block size is a multiple of the sector size). -+ */ -+ if (limit_nbytes + extra_nbytes > nbytes) { -+ log_warn("Skip extending write at %llu len %llu limit %llu extra %llu sector_size %llu", -+ (unsigned long long)offset, -+ (unsigned long long)nbytes, -+ (unsigned long long)limit_nbytes, -+ (unsigned long long)extra_nbytes, -+ (unsigned long long)_last_byte_sector_size); -+ extra_nbytes = 0; -+ } -+ } -+ -+ orig_nbytes = nbytes; -+ - if (extra_nbytes) { - log_debug("Limit write at %llu len %llu to len %llu rounded to %llu", - (unsigned long long)offset, -@@ -432,6 +501,22 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd, - (unsigned long long)limit_nbytes); - nbytes = limit_nbytes; - } -+ -+ /* -+ * This shouldn't happen, the reduced+extended -+ * nbytes value should never be larger than the -+ * bcache block size. -+ */ -+ if (nbytes > orig_nbytes) { -+ log_error("Invalid adjusted write at %llu len %llu adjusted %llu limit %llu extra %llu sector_size %llu", -+ (unsigned long long)offset, -+ (unsigned long long)orig_nbytes, -+ (unsigned long long)nbytes, -+ (unsigned long long)limit_nbytes, -+ (unsigned long long)extra_nbytes, -+ (unsigned long long)_last_byte_sector_size); -+ return false; -+ } - } - - where = offset; --- -1.8.3.1 - diff --git a/SOURCES/lvm2-2_02_187-WHATS_NEW-vgcreate-vgextend-logical-block-size.patch b/SOURCES/lvm2-2_02_187-WHATS_NEW-vgcreate-vgextend-logical-block-size.patch deleted file mode 100644 index 0246389..0000000 --- a/SOURCES/lvm2-2_02_187-WHATS_NEW-vgcreate-vgextend-logical-block-size.patch +++ /dev/null @@ -1,15 +0,0 @@ - WHATS_NEW | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/WHATS_NEW b/WHATS_NEW -index cf2ec3e..d99f183 100644 ---- a/WHATS_NEW -+++ b/WHATS_NEW -@@ -1,3 +1,7 @@ -+Version 2.02.187 - -+=================================== -+ Prevent creating VGs with PVs with different logical block sizes. -+ - Version 2.02.186 - 27th August 2019 - =================================== - Improve internal removal of cached devices. diff --git a/SOURCES/lvm2-2_02_187-bcache-Fix-memory-leak-in-error-path.patch b/SOURCES/lvm2-2_02_187-bcache-Fix-memory-leak-in-error-path.patch deleted file mode 100644 index 50267c5..0000000 --- a/SOURCES/lvm2-2_02_187-bcache-Fix-memory-leak-in-error-path.patch +++ /dev/null @@ -1,25 +0,0 @@ -From 165dfc7cf803e5e00d7239e2521582a9c9838178 Mon Sep 17 00:00:00 2001 -From: Marian Csontos -Date: Wed, 4 Mar 2020 13:22:10 +0100 -Subject: [PATCH 3/4] bcache: Fix memory leak in error path - -(cherry picked from commit deaf304ee6d88cd47632a345b92b3949cd06d752) ---- - lib/device/bcache.c | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/lib/device/bcache.c b/lib/device/bcache.c -index 77d1543..a74b6b3 100644 ---- a/lib/device/bcache.c -+++ b/lib/device/bcache.c -@@ -515,6 +515,7 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd, - (unsigned long long)limit_nbytes, - (unsigned long long)extra_nbytes, - (unsigned long long)_last_byte_sector_size); -+ free(io); - return false; - } - } --- -1.8.3.1 - diff --git a/SOURCES/lvm2-2_02_187-bcache-Fix-overwriting-headers-on-incorrect-device.patch b/SOURCES/lvm2-2_02_187-bcache-Fix-overwriting-headers-on-incorrect-device.patch deleted file mode 100644 index 5a67c22..0000000 --- a/SOURCES/lvm2-2_02_187-bcache-Fix-overwriting-headers-on-incorrect-device.patch +++ /dev/null @@ -1,5865 +0,0 @@ - Makefile.in | 18 +- - base/Makefile | 38 + - base/data-struct/radix-tree-adaptive.c | 1297 +++++++++++++++++++++++++ - base/data-struct/radix-tree-simple.c | 256 +++++ - base/data-struct/radix-tree.c | 851 +--------------- - base/data-struct/radix-tree.h | 6 + - lib/device/bcache.c | 384 ++++---- - lib/device/bcache.h | 8 +- - lib/label/label.c | 42 +- - make.tmpl.in | 12 +- - test/unit/bcache_t.c | 98 +- - test/unit/bcache_utils_t.c | 3 +- - test/unit/radix_tree_t.c | 399 +++++++- - test/unit/rt_case1.c | 1669 ++++++++++++++++++++++++++++++++ - test/unit/unit-test.sh | 2 - - 15 files changed, 3993 insertions(+), 1090 deletions(-) - create mode 100644 base/Makefile - create mode 100644 base/data-struct/radix-tree-adaptive.c - create mode 100644 base/data-struct/radix-tree-simple.c - create mode 100644 test/unit/rt_case1.c - -diff --git a/Makefile.in b/Makefile.in -index 29d5bed..3c8f8c8 100644 ---- a/Makefile.in -+++ b/Makefile.in -@@ -51,18 +51,20 @@ DISTCLEAN_TARGETS += config.cache config.log config.status make.tmpl - - include make.tmpl - --libdm: include --libdaemon: include --lib: libdm libdaemon --liblvm: lib --daemons: lib libdaemon tools --tools: lib libdaemon device-mapper -+include $(top_srcdir)/base/Makefile -+ -+libdm: include $(top_builddir)/base/libbase.a -+libdaemon: include $(top_builddir)/base/libbase.a -+lib: libdm libdaemon $(top_builddir)/base/libbase.a -+liblvm: lib $(top_builddir)/base/libbase.a -+daemons: lib libdaemon tools $(top_builddir)/base/libbase.a -+tools: lib libdaemon device-mapper $(top_builddir)/base/libbase.a - po: tools daemons - man: tools - all_man: tools - scripts: liblvm libdm --test: tools daemons --unit-test: lib -+test: tools daemons $(top_builddir)/base/libbase.a -+unit-test: lib $(top_builddir)/base/libbase.a - run-unit-test: unit-test - - lib.device-mapper: include.device-mapper -diff --git a/base/Makefile b/base/Makefile -new file mode 100644 -index 0000000..056ea59 ---- /dev/null -+++ b/base/Makefile -@@ -0,0 +1,38 @@ -+# Copyright (C) 2018 Red Hat, Inc. All rights reserved. -+# -+# This file is part of the device-mapper userspace tools. -+# -+# This copyrighted material is made available to anyone wishing to use, -+# modify, copy, or redistribute it subject to the terms and conditions -+# of the GNU Lesser General Public License v.2.1. -+# -+# You should have received a copy of the GNU Lesser General Public License -+# along with this program; if not, write to the Free Software Foundation, -+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA -+ -+# Uncomment this to build the simple radix tree. You'll need to make clean too. -+# Comment to build the advanced radix tree. -+#base/data-struct/radix-tree.o: CFLAGS += -DSIMPLE_RADIX_TREE -+ -+# NOTE: this Makefile only works as 'include' for toplevel Makefile -+# which defined all top_* variables -+ -+BASE_SOURCE=\ -+ base/data-struct/radix-tree.c -+ -+BASE_TARGET = base/libbase.a -+BASE_DEPENDS = $(BASE_SOURCE:%.c=%.d) -+BASE_OBJECTS = $(BASE_SOURCE:%.c=%.o) -+CLEAN_TARGETS += $(BASE_DEPENDS) $(BASE_OBJECTS) \ -+ $(BASE_SOURCE:%.c=%.gcda) \ -+ $(BASE_SOURCE:%.c=%.gcno) \ -+ $(BASE_TARGET) -+ -+$(BASE_TARGET): $(BASE_OBJECTS) -+ @echo " [AR] $@" -+ $(Q) $(RM) $@ -+ $(Q) $(AR) rsv $@ $(BASE_OBJECTS) > /dev/null -+ -+ifeq ("$(DEPENDS)","yes") -+-include $(BASE_DEPENDS) -+endif -diff --git a/base/data-struct/radix-tree-adaptive.c b/base/data-struct/radix-tree-adaptive.c -new file mode 100644 -index 0000000..b9ba417 ---- /dev/null -+++ b/base/data-struct/radix-tree-adaptive.c -@@ -0,0 +1,1297 @@ -+// Copyright (C) 2018 Red Hat, Inc. All rights reserved. -+// -+// This file is part of LVM2. -+// -+// This copyrighted material is made available to anyone wishing to use, -+// modify, copy, or redistribute it subject to the terms and conditions -+// of the GNU Lesser General Public License v.2.1. -+// -+// You should have received a copy of the GNU Lesser General Public License -+// along with this program; if not, write to the Free Software Foundation, -+// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA -+ -+#include "radix-tree.h" -+ -+#include "base/memory/container_of.h" -+#include "base/memory/zalloc.h" -+ -+#include -+#include -+#include -+#include -+ -+//---------------------------------------------------------------- -+ -+enum node_type { -+ UNSET = 0, -+ VALUE, -+ VALUE_CHAIN, -+ PREFIX_CHAIN, -+ NODE4, -+ NODE16, -+ NODE48, -+ NODE256 -+}; -+ -+struct value { -+ enum node_type type; -+ union radix_value value; -+}; -+ -+// This is used for entries that have a key which is a prefix of another key. -+struct value_chain { -+ union radix_value value; -+ struct value child; -+}; -+ -+struct prefix_chain { -+ struct value child; -+ unsigned len; -+ uint8_t prefix[0]; -+}; -+ -+struct node4 { -+ uint32_t nr_entries; -+ uint8_t keys[4]; -+ struct value values[4]; -+}; -+ -+struct node16 { -+ uint32_t nr_entries; -+ uint8_t keys[16]; -+ struct value values[16]; -+}; -+ -+struct node48 { -+ uint32_t nr_entries; -+ uint8_t keys[256]; -+ struct value values[48]; -+}; -+ -+struct node256 { -+ uint32_t nr_entries; -+ struct value values[256]; -+}; -+ -+struct radix_tree { -+ unsigned nr_entries; -+ struct value root; -+ radix_value_dtr dtr; -+ void *dtr_context; -+}; -+ -+//---------------------------------------------------------------- -+ -+struct radix_tree *radix_tree_create(radix_value_dtr dtr, void *dtr_context) -+{ -+ struct radix_tree *rt = malloc(sizeof(*rt)); -+ -+ if (rt) { -+ rt->nr_entries = 0; -+ rt->root.type = UNSET; -+ rt->dtr = dtr; -+ rt->dtr_context = dtr_context; -+ } -+ -+ return rt; -+} -+ -+static inline void _dtr(struct radix_tree *rt, union radix_value v) -+{ -+ if (rt->dtr) -+ rt->dtr(rt->dtr_context, v); -+} -+ -+// Returns the number of values removed -+static unsigned _free_node(struct radix_tree *rt, struct value v) -+{ -+ unsigned i, nr = 0; -+ struct value_chain *vc; -+ struct prefix_chain *pc; -+ struct node4 *n4; -+ struct node16 *n16; -+ struct node48 *n48; -+ struct node256 *n256; -+ -+ switch (v.type) { -+ case UNSET: -+ break; -+ -+ case VALUE: -+ _dtr(rt, v.value); -+ nr = 1; -+ break; -+ -+ case VALUE_CHAIN: -+ vc = v.value.ptr; -+ _dtr(rt, vc->value); -+ nr = 1 + _free_node(rt, vc->child); -+ free(vc); -+ break; -+ -+ case PREFIX_CHAIN: -+ pc = v.value.ptr; -+ nr = _free_node(rt, pc->child); -+ free(pc); -+ break; -+ -+ case NODE4: -+ n4 = (struct node4 *) v.value.ptr; -+ for (i = 0; i < n4->nr_entries; i++) -+ nr += _free_node(rt, n4->values[i]); -+ free(n4); -+ break; -+ -+ case NODE16: -+ n16 = (struct node16 *) v.value.ptr; -+ for (i = 0; i < n16->nr_entries; i++) -+ nr += _free_node(rt, n16->values[i]); -+ free(n16); -+ break; -+ -+ case NODE48: -+ n48 = (struct node48 *) v.value.ptr; -+ for (i = 0; i < n48->nr_entries; i++) -+ nr += _free_node(rt, n48->values[i]); -+ free(n48); -+ break; -+ -+ case NODE256: -+ n256 = (struct node256 *) v.value.ptr; -+ for (i = 0; i < 256; i++) -+ nr += _free_node(rt, n256->values[i]); -+ free(n256); -+ break; -+ } -+ -+ return nr; -+} -+ -+void radix_tree_destroy(struct radix_tree *rt) -+{ -+ _free_node(rt, rt->root); -+ free(rt); -+} -+ -+unsigned radix_tree_size(struct radix_tree *rt) -+{ -+ return rt->nr_entries; -+} -+ -+static bool _insert(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv); -+ -+static bool _insert_unset(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv) -+{ -+ unsigned len = ke - kb; -+ -+ if (!len) { -+ // value -+ v->type = VALUE; -+ v->value = rv; -+ rt->nr_entries++; -+ } else { -+ // prefix -> value -+ struct prefix_chain *pc = zalloc(sizeof(*pc) + len); -+ if (!pc) -+ return false; -+ -+ pc->child.type = VALUE; -+ pc->child.value = rv; -+ pc->len = len; -+ memcpy(pc->prefix, kb, len); -+ v->type = PREFIX_CHAIN; -+ v->value.ptr = pc; -+ rt->nr_entries++; -+ } -+ -+ return true; -+} -+ -+static bool _insert_value(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv) -+{ -+ unsigned len = ke - kb; -+ -+ if (!len) -+ // overwrite -+ v->value = rv; -+ -+ else { -+ // value_chain -> value -+ struct value_chain *vc = zalloc(sizeof(*vc)); -+ if (!vc) -+ return false; -+ -+ vc->value = v->value; -+ if (!_insert(rt, &vc->child, kb, ke, rv)) { -+ free(vc); -+ return false; -+ } -+ -+ v->type = VALUE_CHAIN; -+ v->value.ptr = vc; -+ } -+ -+ return true; -+} -+ -+static bool _insert_value_chain(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv) -+{ -+ struct value_chain *vc = v->value.ptr; -+ return _insert(rt, &vc->child, kb, ke, rv); -+} -+ -+static unsigned min(unsigned lhs, unsigned rhs) -+{ -+ if (lhs <= rhs) -+ return lhs; -+ else -+ return rhs; -+} -+ -+static bool _insert_prefix_chain(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv) -+{ -+ struct prefix_chain *pc = v->value.ptr; -+ -+ if (!pc->len) { -+ v->type = VALUE; -+ v->value = rv; -+ -+ } else if (*kb == pc->prefix[0]) { -+ // There's a common prefix let's split the chain into two and -+ // recurse. -+ struct prefix_chain *pc2; -+ unsigned i, len = min(pc->len, ke - kb); -+ -+ for (i = 0; i < len; i++) -+ if (kb[i] != pc->prefix[i]) -+ break; -+ -+ if (!(pc2 = zalloc(sizeof(*pc2) + pc->len - i))) -+ return false; -+ pc2->len = pc->len - i; -+ memmove(pc2->prefix, pc->prefix + i, pc2->len); -+ pc2->child = pc->child; -+ -+ // FIXME: this trashes pc so we can't back out -+ pc->child.type = PREFIX_CHAIN; -+ pc->child.value.ptr = pc2; -+ pc->len = i; -+ -+ if (!_insert(rt, &pc->child, kb + i, ke, rv)) { -+ free(pc2); -+ return false; -+ } -+ -+ } else { -+ // Stick an n4 in front. -+ struct node4 *n4 = zalloc(sizeof(*n4)); -+ if (!n4) -+ return false; -+ -+ n4->keys[0] = pc->prefix[0]; -+ if (pc->len == 1) { -+ n4->values[0] = pc->child; -+ free(pc); -+ } else { -+ memmove(pc->prefix, pc->prefix + 1, pc->len - 1); -+ pc->len--; -+ n4->values[0] = *v; -+ } -+ -+ n4->keys[1] = *kb; -+ if (!_insert(rt, n4->values + 1, kb + 1, ke, rv)) { -+ free(n4); -+ return false; -+ } -+ -+ n4->nr_entries = 2; -+ -+ v->type = NODE4; -+ v->value.ptr = n4; -+ } -+ -+ return true; -+} -+ -+static bool _insert_node4(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv) -+{ -+ struct node4 *n4 = v->value.ptr; -+ if (n4->nr_entries == 4) { -+ struct node16 *n16 = zalloc(sizeof(*n16)); -+ if (!n16) -+ return false; -+ -+ n16->nr_entries = 5; -+ memcpy(n16->keys, n4->keys, sizeof(n4->keys)); -+ memcpy(n16->values, n4->values, sizeof(n4->values)); -+ -+ n16->keys[4] = *kb; -+ if (!_insert(rt, n16->values + 4, kb + 1, ke, rv)) { -+ free(n16); -+ return false; -+ } -+ free(n4); -+ v->type = NODE16; -+ v->value.ptr = n16; -+ } else { -+ if (!_insert(rt, n4->values + n4->nr_entries, kb + 1, ke, rv)) -+ return false; -+ -+ n4->keys[n4->nr_entries] = *kb; -+ n4->nr_entries++; -+ } -+ return true; -+} -+ -+static bool _insert_node16(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv) -+{ -+ struct node16 *n16 = v->value.ptr; -+ -+ if (n16->nr_entries == 16) { -+ unsigned i; -+ struct node48 *n48 = zalloc(sizeof(*n48)); -+ -+ if (!n48) -+ return false; -+ -+ n48->nr_entries = 17; -+ /* coverity[bad_memset] intentional use of '0' */ -+ memset(n48->keys, 48, sizeof(n48->keys)); -+ -+ for (i = 0; i < 16; i++) { -+ n48->keys[n16->keys[i]] = i; -+ n48->values[i] = n16->values[i]; -+ } -+ -+ n48->keys[*kb] = 16; -+ if (!_insert(rt, n48->values + 16, kb + 1, ke, rv)) { -+ free(n48); -+ return false; -+ } -+ -+ free(n16); -+ v->type = NODE48; -+ v->value.ptr = n48; -+ } else { -+ if (!_insert(rt, n16->values + n16->nr_entries, kb + 1, ke, rv)) -+ return false; -+ n16->keys[n16->nr_entries] = *kb; -+ n16->nr_entries++; -+ } -+ -+ return true; -+} -+ -+static bool _insert_node48(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv) -+{ -+ struct node48 *n48 = v->value.ptr; -+ if (n48->nr_entries == 48) { -+ unsigned i; -+ struct node256 *n256 = zalloc(sizeof(*n256)); -+ if (!n256) -+ return false; -+ -+ n256->nr_entries = 49; -+ for (i = 0; i < 256; i++) { -+ if (n48->keys[i] < 48) -+ n256->values[i] = n48->values[n48->keys[i]]; -+ } -+ -+ if (!_insert(rt, n256->values + *kb, kb + 1, ke, rv)) { -+ free(n256); -+ return false; -+ } -+ -+ free(n48); -+ v->type = NODE256; -+ v->value.ptr = n256; -+ -+ } else { -+ if (!_insert(rt, n48->values + n48->nr_entries, kb + 1, ke, rv)) -+ return false; -+ -+ n48->keys[*kb] = n48->nr_entries; -+ n48->nr_entries++; -+ } -+ -+ return true; -+} -+ -+static bool _insert_node256(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv) -+{ -+ struct node256 *n256 = v->value.ptr; -+ bool r, was_unset = n256->values[*kb].type == UNSET; -+ -+ r = _insert(rt, n256->values + *kb, kb + 1, ke, rv); -+ if (r && was_unset) -+ n256->nr_entries++; -+ -+ return r; -+} -+ -+// FIXME: the tree should not be touched if insert fails (eg, OOM) -+static bool _insert(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv) -+{ -+ if (kb == ke) { -+ if (v->type == UNSET) { -+ v->type = VALUE; -+ v->value = rv; -+ rt->nr_entries++; -+ -+ } else if (v->type == VALUE) { -+ v->value = rv; -+ -+ } else { -+ struct value_chain *vc = zalloc(sizeof(*vc)); -+ if (!vc) -+ return false; -+ -+ vc->value = rv; -+ vc->child = *v; -+ v->type = VALUE_CHAIN; -+ v->value.ptr = vc; -+ rt->nr_entries++; -+ } -+ return true; -+ } -+ -+ switch (v->type) { -+ case UNSET: -+ return _insert_unset(rt, v, kb, ke, rv); -+ -+ case VALUE: -+ return _insert_value(rt, v, kb, ke, rv); -+ -+ case VALUE_CHAIN: -+ return _insert_value_chain(rt, v, kb, ke, rv); -+ -+ case PREFIX_CHAIN: -+ return _insert_prefix_chain(rt, v, kb, ke, rv); -+ -+ case NODE4: -+ return _insert_node4(rt, v, kb, ke, rv); -+ -+ case NODE16: -+ return _insert_node16(rt, v, kb, ke, rv); -+ -+ case NODE48: -+ return _insert_node48(rt, v, kb, ke, rv); -+ -+ case NODE256: -+ return _insert_node256(rt, v, kb, ke, rv); -+ } -+ -+ // can't get here -+ return false; -+} -+ -+struct lookup_result { -+ struct value *v; -+ uint8_t *kb; -+}; -+ -+static struct lookup_result _lookup_prefix(struct value *v, uint8_t *kb, uint8_t *ke) -+{ -+ unsigned i; -+ struct value_chain *vc; -+ struct prefix_chain *pc; -+ struct node4 *n4; -+ struct node16 *n16; -+ struct node48 *n48; -+ struct node256 *n256; -+ -+ if (kb == ke) -+ return (struct lookup_result) {.v = v, .kb = kb}; -+ -+ switch (v->type) { -+ case UNSET: -+ case VALUE: -+ break; -+ -+ case VALUE_CHAIN: -+ vc = v->value.ptr; -+ return _lookup_prefix(&vc->child, kb, ke); -+ -+ case PREFIX_CHAIN: -+ pc = v->value.ptr; -+ if (ke - kb < pc->len) -+ return (struct lookup_result) {.v = v, .kb = kb}; -+ -+ for (i = 0; i < pc->len; i++) -+ if (kb[i] != pc->prefix[i]) -+ return (struct lookup_result) {.v = v, .kb = kb}; -+ -+ return _lookup_prefix(&pc->child, kb + pc->len, ke); -+ -+ case NODE4: -+ n4 = v->value.ptr; -+ for (i = 0; i < n4->nr_entries; i++) -+ if (n4->keys[i] == *kb) -+ return _lookup_prefix(n4->values + i, kb + 1, ke); -+ break; -+ -+ case NODE16: -+ // FIXME: use binary search or simd? -+ n16 = v->value.ptr; -+ for (i = 0; i < n16->nr_entries; i++) -+ if (n16->keys[i] == *kb) -+ return _lookup_prefix(n16->values + i, kb + 1, ke); -+ break; -+ -+ case NODE48: -+ n48 = v->value.ptr; -+ i = n48->keys[*kb]; -+ if (i < 48) -+ return _lookup_prefix(n48->values + i, kb + 1, ke); -+ break; -+ -+ case NODE256: -+ n256 = v->value.ptr; -+ if (n256->values[*kb].type != UNSET) -+ return _lookup_prefix(n256->values + *kb, kb + 1, ke); -+ break; -+ } -+ -+ return (struct lookup_result) {.v = v, .kb = kb}; -+} -+ -+bool radix_tree_insert(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, union radix_value rv) -+{ -+ struct lookup_result lr = _lookup_prefix(&rt->root, kb, ke); -+ return _insert(rt, lr.v, lr.kb, ke, rv); -+} -+ -+// Note the degrade functions also free the original node. -+static void _degrade_to_n4(struct node16 *n16, struct value *result) -+{ -+ struct node4 *n4 = zalloc(sizeof(*n4)); -+ -+ assert(n4 != NULL); -+ -+ n4->nr_entries = n16->nr_entries; -+ memcpy(n4->keys, n16->keys, n16->nr_entries * sizeof(*n4->keys)); -+ memcpy(n4->values, n16->values, n16->nr_entries * sizeof(*n4->values)); -+ free(n16); -+ -+ result->type = NODE4; -+ result->value.ptr = n4; -+} -+ -+static void _degrade_to_n16(struct node48 *n48, struct value *result) -+{ -+ unsigned i, count = 0; -+ struct node16 *n16 = zalloc(sizeof(*n16)); -+ -+ assert(n16 != NULL); -+ -+ n16->nr_entries = n48->nr_entries; -+ for (i = 0; i < 256; i++) { -+ if (n48->keys[i] < 48) { -+ n16->keys[count] = i; -+ n16->values[count] = n48->values[n48->keys[i]]; -+ count++; -+ } -+ } -+ -+ free(n48); -+ -+ result->type = NODE16; -+ result->value.ptr = n16; -+} -+ -+static void _degrade_to_n48(struct node256 *n256, struct value *result) -+{ -+ unsigned i, count = 0; -+ struct node48 *n48 = zalloc(sizeof(*n48)); -+ -+ assert(n48 != NULL); -+ -+ n48->nr_entries = n256->nr_entries; -+ for (i = 0; i < 256; i++) { -+ if (n256->values[i].type == UNSET) -+ n48->keys[i] = 48; -+ -+ else { -+ n48->keys[i] = count; -+ n48->values[count] = n256->values[i]; -+ count++; -+ } -+ } -+ -+ free(n256); -+ -+ result->type = NODE48; -+ result->value.ptr = n48; -+} -+ -+// Removes an entry in an array by sliding the values above it down. -+static void _erase_elt(void *array, size_t obj_size, unsigned count, unsigned idx) -+{ -+ if (idx == (count - 1)) -+ // The simple case -+ return; -+ -+ memmove(((uint8_t *) array) + (obj_size * idx), -+ ((uint8_t *) array) + (obj_size * (idx + 1)), -+ obj_size * (count - idx - 1)); -+ -+ // Zero the now unused last elt (set's v.type to UNSET) -+ memset(((uint8_t *) array) + (count - 1) * obj_size, 0, obj_size); -+} -+ -+static bool _remove(struct radix_tree *rt, struct value *root, uint8_t *kb, uint8_t *ke) -+{ -+ bool r; -+ unsigned i, j; -+ struct value_chain *vc; -+ struct prefix_chain *pc; -+ struct node4 *n4; -+ struct node16 *n16; -+ struct node48 *n48; -+ struct node256 *n256; -+ -+ if (kb == ke) { -+ if (root->type == VALUE) { -+ root->type = UNSET; -+ _dtr(rt, root->value); -+ return true; -+ -+ } else if (root->type == VALUE_CHAIN) { -+ vc = root->value.ptr; -+ _dtr(rt, vc->value); -+ memcpy(root, &vc->child, sizeof(*root)); -+ free(vc); -+ return true; -+ -+ } else -+ return false; -+ } -+ -+ switch (root->type) { -+ case UNSET: -+ case VALUE: -+ // this is a value for a prefix of the key -+ return false; -+ -+ case VALUE_CHAIN: -+ vc = root->value.ptr; -+ r = _remove(rt, &vc->child, kb, ke); -+ if (r && (vc->child.type == UNSET)) { -+ root->type = VALUE; -+ root->value = vc->value; -+ free(vc); -+ } -+ return r; -+ -+ case PREFIX_CHAIN: -+ pc = root->value.ptr; -+ if (ke - kb < pc->len) -+ return false; -+ -+ for (i = 0; i < pc->len; i++) -+ if (kb[i] != pc->prefix[i]) -+ return false; -+ -+ r = _remove(rt, &pc->child, kb + pc->len, ke); -+ if (r && pc->child.type == UNSET) { -+ root->type = UNSET; -+ free(pc); -+ } -+ return r; -+ -+ case NODE4: -+ n4 = root->value.ptr; -+ for (i = 0; i < n4->nr_entries; i++) { -+ if (n4->keys[i] == *kb) { -+ r = _remove(rt, n4->values + i, kb + 1, ke); -+ if (r && n4->values[i].type == UNSET) { -+ if (i < n4->nr_entries) { -+ _erase_elt(n4->keys, sizeof(*n4->keys), n4->nr_entries, i); -+ _erase_elt(n4->values, sizeof(*n4->values), n4->nr_entries, i); -+ } -+ -+ n4->nr_entries--; -+ if (!n4->nr_entries) { -+ free(n4); -+ root->type = UNSET; -+ } -+ } -+ return r; -+ } -+ } -+ return false; -+ -+ case NODE16: -+ n16 = root->value.ptr; -+ for (i = 0; i < n16->nr_entries; i++) { -+ if (n16->keys[i] == *kb) { -+ r = _remove(rt, n16->values + i, kb + 1, ke); -+ if (r && n16->values[i].type == UNSET) { -+ if (i < n16->nr_entries) { -+ _erase_elt(n16->keys, sizeof(*n16->keys), n16->nr_entries, i); -+ _erase_elt(n16->values, sizeof(*n16->values), n16->nr_entries, i); -+ } -+ -+ n16->nr_entries--; -+ if (n16->nr_entries <= 4) { -+ _degrade_to_n4(n16, root); -+ } -+ } -+ return r; -+ } -+ } -+ return false; -+ -+ case NODE48: -+ n48 = root->value.ptr; -+ i = n48->keys[*kb]; -+ if (i < 48) { -+ r = _remove(rt, n48->values + i, kb + 1, ke); -+ if (r && n48->values[i].type == UNSET) { -+ n48->keys[*kb] = 48; -+ for (j = 0; j < 256; j++) -+ if (n48->keys[j] < 48 && n48->keys[j] > i) -+ n48->keys[j]--; -+ _erase_elt(n48->values, sizeof(*n48->values), n48->nr_entries, i); -+ n48->nr_entries--; -+ if (n48->nr_entries <= 16) -+ _degrade_to_n16(n48, root); -+ } -+ return r; -+ } -+ return false; -+ -+ case NODE256: -+ n256 = root->value.ptr; -+ r = _remove(rt, n256->values + (*kb), kb + 1, ke); -+ if (r && n256->values[*kb].type == UNSET) { -+ n256->nr_entries--; -+ if (n256->nr_entries <= 48) -+ _degrade_to_n48(n256, root); -+ } -+ return r; -+ } -+ -+ return false; -+} -+ -+bool radix_tree_remove(struct radix_tree *rt, uint8_t *key_begin, uint8_t *key_end) -+{ -+ if (_remove(rt, &rt->root, key_begin, key_end)) { -+ rt->nr_entries--; -+ return true; -+ } -+ -+ return false; -+} -+ -+//---------------------------------------------------------------- -+ -+static bool _prefix_chain_matches(struct lookup_result *lr, uint8_t *ke) -+{ -+ // It's possible the top node is a prefix chain, and -+ // the remaining key matches part of it. -+ if (lr->v->type == PREFIX_CHAIN) { -+ unsigned i, rlen = ke - lr->kb; -+ struct prefix_chain *pc = lr->v->value.ptr; -+ if (rlen < pc->len) { -+ for (i = 0; i < rlen; i++) -+ if (pc->prefix[i] != lr->kb[i]) -+ return false; -+ return true; -+ } -+ } -+ -+ return false; -+} -+ -+static bool _remove_subtree(struct radix_tree *rt, struct value *root, uint8_t *kb, uint8_t *ke, unsigned *count) -+{ -+ bool r; -+ unsigned i, j, len; -+ struct value_chain *vc; -+ struct prefix_chain *pc; -+ struct node4 *n4; -+ struct node16 *n16; -+ struct node48 *n48; -+ struct node256 *n256; -+ -+ if (kb == ke) { -+ *count += _free_node(rt, *root); -+ root->type = UNSET; -+ return true; -+ } -+ -+ switch (root->type) { -+ case UNSET: -+ case VALUE: -+ // No entries with the given prefix -+ return true; -+ -+ case VALUE_CHAIN: -+ vc = root->value.ptr; -+ r = _remove_subtree(rt, &vc->child, kb, ke, count); -+ if (r && (vc->child.type == UNSET)) { -+ root->type = VALUE; -+ root->value = vc->value; -+ free(vc); -+ } -+ return r; -+ -+ case PREFIX_CHAIN: -+ pc = root->value.ptr; -+ len = min(pc->len, ke - kb); -+ for (i = 0; i < len; i++) -+ if (kb[i] != pc->prefix[i]) -+ return true; -+ -+ r = _remove_subtree(rt, &pc->child, len < pc->len ? ke : (kb + pc->len), ke, count); -+ if (r && pc->child.type == UNSET) { -+ root->type = UNSET; -+ free(pc); -+ } -+ return r; -+ -+ case NODE4: -+ n4 = root->value.ptr; -+ for (i = 0; i < n4->nr_entries; i++) { -+ if (n4->keys[i] == *kb) { -+ r = _remove_subtree(rt, n4->values + i, kb + 1, ke, count); -+ if (r && n4->values[i].type == UNSET) { -+ if (i < n4->nr_entries) { -+ _erase_elt(n4->keys, sizeof(*n4->keys), n4->nr_entries, i); -+ _erase_elt(n4->values, sizeof(*n4->values), n4->nr_entries, i); -+ } -+ -+ n4->nr_entries--; -+ if (!n4->nr_entries) { -+ free(n4); -+ root->type = UNSET; -+ } -+ } -+ return r; -+ } -+ } -+ return true; -+ -+ case NODE16: -+ n16 = root->value.ptr; -+ for (i = 0; i < n16->nr_entries; i++) { -+ if (n16->keys[i] == *kb) { -+ r = _remove_subtree(rt, n16->values + i, kb + 1, ke, count); -+ if (r && n16->values[i].type == UNSET) { -+ if (i < n16->nr_entries) { -+ _erase_elt(n16->keys, sizeof(*n16->keys), n16->nr_entries, i); -+ _erase_elt(n16->values, sizeof(*n16->values), n16->nr_entries, i); -+ } -+ -+ n16->nr_entries--; -+ if (n16->nr_entries <= 4) -+ _degrade_to_n4(n16, root); -+ } -+ return r; -+ } -+ } -+ return true; -+ -+ case NODE48: -+ n48 = root->value.ptr; -+ i = n48->keys[*kb]; -+ if (i < 48) { -+ r = _remove_subtree(rt, n48->values + i, kb + 1, ke, count); -+ if (r && n48->values[i].type == UNSET) { -+ n48->keys[*kb] = 48; -+ for (j = 0; j < 256; j++) -+ if (n48->keys[j] < 48 && n48->keys[j] > i) -+ n48->keys[j]--; -+ _erase_elt(n48->values, sizeof(*n48->values), n48->nr_entries, i); -+ n48->nr_entries--; -+ if (n48->nr_entries <= 16) -+ _degrade_to_n16(n48, root); -+ } -+ return r; -+ } -+ return true; -+ -+ case NODE256: -+ n256 = root->value.ptr; -+ if (n256->values[*kb].type == UNSET) -+ return true; // No entries -+ -+ r = _remove_subtree(rt, n256->values + (*kb), kb + 1, ke, count); -+ if (r && n256->values[*kb].type == UNSET) { -+ n256->nr_entries--; -+ if (n256->nr_entries <= 48) -+ _degrade_to_n48(n256, root); -+ } -+ return r; -+ } -+ -+ // Shouldn't get here -+ return false; -+} -+ -+unsigned radix_tree_remove_prefix(struct radix_tree *rt, uint8_t *kb, uint8_t *ke) -+{ -+ unsigned count = 0; -+ -+ if (_remove_subtree(rt, &rt->root, kb, ke, &count)) -+ rt->nr_entries -= count; -+ -+ return count; -+} -+ -+//---------------------------------------------------------------- -+ -+bool radix_tree_lookup(struct radix_tree *rt, -+ uint8_t *kb, uint8_t *ke, union radix_value *result) -+{ -+ struct value_chain *vc; -+ struct lookup_result lr = _lookup_prefix(&rt->root, kb, ke); -+ if (lr.kb == ke) { -+ switch (lr.v->type) { -+ case VALUE: -+ *result = lr.v->value; -+ return true; -+ -+ case VALUE_CHAIN: -+ vc = lr.v->value.ptr; -+ *result = vc->value; -+ return true; -+ -+ default: -+ return false; -+ } -+ } -+ -+ return false; -+} -+ -+// FIXME: build up the keys too -+static bool _iterate(struct value *v, struct radix_tree_iterator *it) -+{ -+ unsigned i; -+ struct value_chain *vc; -+ struct prefix_chain *pc; -+ struct node4 *n4; -+ struct node16 *n16; -+ struct node48 *n48; -+ struct node256 *n256; -+ -+ switch (v->type) { -+ case UNSET: -+ // can't happen -+ break; -+ -+ case VALUE: -+ return it->visit(it, NULL, NULL, v->value); -+ -+ case VALUE_CHAIN: -+ vc = v->value.ptr; -+ return it->visit(it, NULL, NULL, vc->value) && _iterate(&vc->child, it); -+ -+ case PREFIX_CHAIN: -+ pc = v->value.ptr; -+ return _iterate(&pc->child, it); -+ -+ case NODE4: -+ n4 = (struct node4 *) v->value.ptr; -+ for (i = 0; i < n4->nr_entries; i++) -+ if (!_iterate(n4->values + i, it)) -+ return false; -+ return true; -+ -+ case NODE16: -+ n16 = (struct node16 *) v->value.ptr; -+ for (i = 0; i < n16->nr_entries; i++) -+ if (!_iterate(n16->values + i, it)) -+ return false; -+ return true; -+ -+ case NODE48: -+ n48 = (struct node48 *) v->value.ptr; -+ for (i = 0; i < n48->nr_entries; i++) -+ if (!_iterate(n48->values + i, it)) -+ return false; -+ return true; -+ -+ case NODE256: -+ n256 = (struct node256 *) v->value.ptr; -+ for (i = 0; i < 256; i++) -+ if (n256->values[i].type != UNSET && !_iterate(n256->values + i, it)) -+ return false; -+ return true; -+ } -+ -+ // can't get here -+ return false; -+} -+ -+void radix_tree_iterate(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, -+ struct radix_tree_iterator *it) -+{ -+ struct lookup_result lr = _lookup_prefix(&rt->root, kb, ke); -+ if (lr.kb == ke || _prefix_chain_matches(&lr, ke)) -+ _iterate(lr.v, it); -+} -+ -+//---------------------------------------------------------------- -+// Checks: -+// 1) The number of entries matches rt->nr_entries -+// 2) The number of entries is correct in each node -+// 3) prefix chain len > 0 -+// 4) all unused values are UNSET -+ -+static bool _check_nodes(struct value *v, unsigned *count) -+{ -+ uint64_t bits; -+ unsigned i, ncount; -+ struct value_chain *vc; -+ struct prefix_chain *pc; -+ struct node4 *n4; -+ struct node16 *n16; -+ struct node48 *n48; -+ struct node256 *n256; -+ -+ switch (v->type) { -+ case UNSET: -+ return true; -+ -+ case VALUE: -+ (*count)++; -+ return true; -+ -+ case VALUE_CHAIN: -+ (*count)++; -+ vc = v->value.ptr; -+ return _check_nodes(&vc->child, count); -+ -+ case PREFIX_CHAIN: -+ pc = v->value.ptr; -+ return _check_nodes(&pc->child, count); -+ -+ case NODE4: -+ n4 = v->value.ptr; -+ for (i = 0; i < n4->nr_entries; i++) -+ if (!_check_nodes(n4->values + i, count)) -+ return false; -+ -+ for (i = n4->nr_entries; i < 4; i++) -+ if (n4->values[i].type != UNSET) { -+ fprintf(stderr, "unused value is not UNSET (n4)\n"); -+ return false; -+ } -+ -+ return true; -+ -+ case NODE16: -+ n16 = v->value.ptr; -+ for (i = 0; i < n16->nr_entries; i++) -+ if (!_check_nodes(n16->values + i, count)) -+ return false; -+ -+ for (i = n16->nr_entries; i < 16; i++) -+ if (n16->values[i].type != UNSET) { -+ fprintf(stderr, "unused value is not UNSET (n16)\n"); -+ return false; -+ } -+ -+ return true; -+ -+ case NODE48: -+ bits = 0; -+ n48 = v->value.ptr; -+ ncount = 0; -+ for (i = 0; i < 256; i++) { -+ if (n48->keys[i] < 48) { -+ if (n48->keys[i] >= n48->nr_entries) { -+ fprintf(stderr, "referencing value past nr_entries (n48)\n"); -+ return false; -+ } -+ -+ if (bits & (1ull << n48->keys[i])) { -+ fprintf(stderr, "duplicate entry (n48) %u\n", (unsigned) n48->keys[i]); -+ return false; -+ } -+ bits = bits | (1ull << n48->keys[i]); -+ ncount++; -+ -+ if (!_check_nodes(n48->values + n48->keys[i], count)) -+ return false; -+ } -+ } -+ -+ for (i = 0; i < n48->nr_entries; i++) { -+ if (!(bits & (1ull << i))) { -+ fprintf(stderr, "not all values are referenced (n48)\n"); -+ return false; -+ } -+ } -+ -+ if (ncount != n48->nr_entries) { -+ fprintf(stderr, "incorrect number of entries in n48, n48->nr_entries = %u, actual = %u\n", -+ n48->nr_entries, ncount); -+ return false; -+ } -+ -+ for (i = 0; i < n48->nr_entries; i++) -+ if (n48->values[i].type == UNSET) { -+ fprintf(stderr, "value in UNSET (n48)\n"); -+ return false; -+ } -+ -+ for (i = n48->nr_entries; i < 48; i++) -+ if (n48->values[i].type != UNSET) { -+ fprintf(stderr, "unused value is not UNSET (n48)\n"); -+ return false; -+ } -+ -+ return true; -+ -+ case NODE256: -+ n256 = v->value.ptr; -+ -+ ncount = 0; -+ for (i = 0; i < 256; i++) { -+ struct value *v2 = n256->values + i; -+ -+ if (v2->type == UNSET) -+ continue; -+ -+ if (!_check_nodes(v2, count)) -+ return false; -+ -+ ncount++; -+ } -+ -+ if (ncount != n256->nr_entries) { -+ fprintf(stderr, "incorrect number of entries in n256, n256->nr_entries = %u, actual = %u\n", -+ n256->nr_entries, ncount); -+ return false; -+ } -+ -+ return true; -+ -+ default: -+ fprintf(stderr, "unknown value type: %u\n", v->type); -+ } -+ -+ fprintf(stderr, "shouldn't get here\n"); -+ return false; -+} -+ -+bool radix_tree_is_well_formed(struct radix_tree *rt) -+{ -+ unsigned count = 0; -+ -+ if (!_check_nodes(&rt->root, &count)) -+ return false; -+ -+ if (rt->nr_entries != count) { -+ fprintf(stderr, "incorrect entry count: rt->nr_entries = %u, actual = %u\n", -+ rt->nr_entries, count); -+ return false; -+ } -+ -+ return true; -+} -+ -+//---------------------------------------------------------------- -+ -+static void _dump(FILE *out, struct value v, unsigned indent) -+{ -+ unsigned i; -+ struct value_chain *vc; -+ struct prefix_chain *pc; -+ struct node4 *n4; -+ struct node16 *n16; -+ struct node48 *n48; -+ struct node256 *n256; -+ -+ if (v.type == UNSET) -+ return; -+ -+ for (i = 0; i < 2 * indent; i++) -+ fprintf(out, " "); -+ -+ switch (v.type) { -+ case UNSET: -+ // can't happen -+ break; -+ -+ case VALUE: -+ fprintf(out, "\n", (unsigned long long) v.value.n); -+ break; -+ -+ case VALUE_CHAIN: -+ vc = v.value.ptr; -+ fprintf(out, "\n", (unsigned long long) vc->value.n); -+ _dump(out, vc->child, indent + 1); -+ break; -+ -+ case PREFIX_CHAIN: -+ pc = v.value.ptr; -+ fprintf(out, "len; i++) -+ fprintf(out, "%x.", (unsigned) *(pc->prefix + i)); -+ fprintf(out, ">\n"); -+ _dump(out, pc->child, indent + 1); -+ break; -+ -+ case NODE4: -+ n4 = v.value.ptr; -+ fprintf(out, "nr_entries; i++) -+ fprintf(out, "%x ", (unsigned) n4->keys[i]); -+ fprintf(out, ">\n"); -+ -+ for (i = 0; i < n4->nr_entries; i++) -+ _dump(out, n4->values[i], indent + 1); -+ break; -+ -+ case NODE16: -+ n16 = v.value.ptr; -+ fprintf(out, "nr_entries; i++) -+ fprintf(out, "%x ", (unsigned) n16->keys[i]); -+ fprintf(out, ">\n"); -+ -+ for (i = 0; i < n16->nr_entries; i++) -+ _dump(out, n16->values[i], indent + 1); -+ break; -+ -+ case NODE48: -+ n48 = v.value.ptr; -+ fprintf(out, "keys[i] < 48) -+ fprintf(out, "%x ", i); -+ fprintf(out, ">\n"); -+ -+ for (i = 0; i < n48->nr_entries; i++) { -+ assert(n48->values[i].type != UNSET); -+ _dump(out, n48->values[i], indent + 1); -+ } -+ break; -+ -+ case NODE256: -+ n256 = v.value.ptr; -+ fprintf(out, "values[i].type != UNSET) -+ fprintf(out, "%x ", i); -+ fprintf(out, ">\n"); -+ -+ for (i = 0; i < 256; i++) -+ if (n256->values[i].type != UNSET) -+ _dump(out, n256->values[i], indent + 1); -+ break; -+ } -+} -+ -+void radix_tree_dump(struct radix_tree *rt, FILE *out) -+{ -+ _dump(out, rt->root, 0); -+} -+ -+//---------------------------------------------------------------- -diff --git a/base/data-struct/radix-tree-simple.c b/base/data-struct/radix-tree-simple.c -new file mode 100644 -index 0000000..e8a2fdd ---- /dev/null -+++ b/base/data-struct/radix-tree-simple.c -@@ -0,0 +1,256 @@ -+// Copyright (C) 2018 Red Hat, Inc. All rights reserved. -+// -+// This file is part of LVM2. -+// -+// This copyrighted material is made available to anyone wishing to use, -+// modify, copy, or redistribute it subject to the terms and conditions -+// of the GNU Lesser General Public License v.2.1. -+// -+// You should have received a copy of the GNU Lesser General Public License -+// along with this program; if not, write to the Free Software Foundation, -+// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA -+ -+#include "radix-tree.h" -+ -+#include "base/memory/container_of.h" -+#include "base/memory/zalloc.h" -+ -+#include -+#include -+#include -+ -+//---------------------------------------------------------------- -+// This implementation is based around nested binary trees. Very -+// simple (and hopefully correct). -+ -+struct node { -+ struct node *left; -+ struct node *right; -+ -+ uint8_t key; -+ struct node *center; -+ -+ bool has_value; -+ union radix_value value; -+}; -+ -+struct radix_tree { -+ radix_value_dtr dtr; -+ void *dtr_context; -+ -+ struct node *root; -+}; -+ -+struct radix_tree * -+radix_tree_create(radix_value_dtr dtr, void *dtr_context) -+{ -+ struct radix_tree *rt = zalloc(sizeof(*rt)); -+ -+ if (rt) { -+ rt->dtr = dtr; -+ rt->dtr_context = dtr_context; -+ } -+ -+ return rt; -+} -+ -+// Returns the number of entries in the tree -+static unsigned _destroy_tree(struct node *n, radix_value_dtr dtr, void *context) -+{ -+ unsigned r; -+ -+ if (!n) -+ return 0; -+ -+ r = _destroy_tree(n->left, dtr, context); -+ r += _destroy_tree(n->right, dtr, context); -+ r += _destroy_tree(n->center, dtr, context); -+ -+ if (n->has_value) { -+ if (dtr) -+ dtr(context, n->value); -+ r++; -+ } -+ -+ free(n); -+ -+ return r; -+} -+ -+void radix_tree_destroy(struct radix_tree *rt) -+{ -+ _destroy_tree(rt->root, rt->dtr, rt->dtr_context); -+ free(rt); -+} -+ -+static unsigned _count(struct node *n) -+{ -+ unsigned r; -+ -+ if (!n) -+ return 0; -+ -+ r = _count(n->left); -+ r += _count(n->right); -+ r += _count(n->center); -+ -+ if (n->has_value) -+ r++; -+ -+ return r; -+} -+ -+unsigned radix_tree_size(struct radix_tree *rt) -+{ -+ return _count(rt->root); -+} -+ -+static struct node **_lookup(struct node **pn, uint8_t *kb, uint8_t *ke) -+{ -+ struct node *n = *pn; -+ -+ if (!n || (kb == ke)) -+ return pn; -+ -+ if (*kb < n->key) -+ return _lookup(&n->left, kb, ke); -+ -+ else if (*kb > n->key) -+ return _lookup(&n->right, kb, ke); -+ -+ else -+ return _lookup(&n->center, kb + 1, ke); -+} -+ -+static bool _insert(struct node **pn, uint8_t *kb, uint8_t *ke, union radix_value v) -+{ -+ struct node *n = *pn; -+ -+ if (!n) { -+ n = zalloc(sizeof(*n)); -+ if (!n) -+ return false; -+ -+ n->key = *kb; -+ *pn = n; -+ } -+ -+ if (kb == ke) { -+ n->has_value = true; -+ n->value = v; -+ return true; -+ } -+ -+ if (*kb < n->key) -+ return _insert(&n->left, kb, ke, v); -+ -+ else if (*kb > n->key) -+ return _insert(&n->right, kb, ke, v); -+ -+ else -+ return _insert(&n->center, kb + 1, ke, v); -+} -+ -+bool radix_tree_insert(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, union radix_value v) -+{ -+ return _insert(&rt->root, kb, ke, v); -+} -+ -+bool radix_tree_remove(struct radix_tree *rt, uint8_t *kb, uint8_t *ke) -+{ -+ struct node **pn = _lookup(&rt->root, kb, ke); -+ struct node *n = *pn; -+ -+ if (!n || !n->has_value) -+ return false; -+ -+ else { -+ if (rt->dtr) -+ rt->dtr(rt->dtr_context, n->value); -+ -+ if (n->left || n->center || n->right) { -+ n->has_value = false; -+ return true; -+ -+ } else { -+ // FIXME: delete parent if this was the last entry -+ free(n); -+ *pn = NULL; -+ } -+ -+ return true; -+ } -+} -+ -+unsigned radix_tree_remove_prefix(struct radix_tree *rt, uint8_t *kb, uint8_t *ke) -+{ -+ struct node **pn; -+ unsigned count; -+ -+ pn = _lookup(&rt->root, kb, ke); -+ -+ if (*pn) { -+ count = _destroy_tree(*pn, rt->dtr, rt->dtr_context); -+ *pn = NULL; -+ } -+ -+ return count; -+} -+ -+bool -+radix_tree_lookup(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, union radix_value *result) -+{ -+ struct node **pn = _lookup(&rt->root, kb, ke); -+ struct node *n = *pn; -+ -+ if (n && n->has_value) { -+ *result = n->value; -+ return true; -+ } else -+ return false; -+} -+ -+static void _iterate(struct node *n, struct radix_tree_iterator *it) -+{ -+ if (!n) -+ return; -+ -+ _iterate(n->left, it); -+ -+ if (n->has_value) -+ // FIXME: fill out the key -+ it->visit(it, NULL, NULL, n->value); -+ -+ _iterate(n->center, it); -+ _iterate(n->right, it); -+} -+ -+void radix_tree_iterate(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, -+ struct radix_tree_iterator *it) -+{ -+ if (kb == ke) -+ _iterate(rt->root, it); -+ -+ else { -+ struct node **pn = _lookup(&rt->root, kb, ke); -+ struct node *n = *pn; -+ -+ if (n) { -+ if (n->has_value) -+ it->visit(it, NULL, NULL, n->value); -+ _iterate(n->center, it); -+ } -+ } -+} -+ -+bool radix_tree_is_well_formed(struct radix_tree *rt) -+{ -+ return true; -+} -+ -+void radix_tree_dump(struct radix_tree *rt, FILE *out) -+{ -+} -+ -+//---------------------------------------------------------------- -+ -diff --git a/base/data-struct/radix-tree.c b/base/data-struct/radix-tree.c -index 222b350..52a1a05 100644 ---- a/base/data-struct/radix-tree.c -+++ b/base/data-struct/radix-tree.c -@@ -10,853 +10,12 @@ - // along with this program; if not, write to the Free Software Foundation, - // Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - --#include "radix-tree.h" -- --#include "base/memory/container_of.h" --#include "base/memory/zalloc.h" -- --#include --#include --#include -- - //---------------------------------------------------------------- - --enum node_type { -- UNSET = 0, -- VALUE, -- VALUE_CHAIN, -- PREFIX_CHAIN, -- NODE4, -- NODE16, -- NODE48, -- NODE256 --}; -- --struct value { -- enum node_type type; -- union radix_value value; --}; -- --// This is used for entries that have a key which is a prefix of another key. --struct value_chain { -- union radix_value value; -- struct value child; --}; -- --struct prefix_chain { -- struct value child; -- unsigned len; -- uint8_t prefix[0]; --}; -- --struct node4 { -- uint32_t nr_entries; -- uint8_t keys[4]; -- struct value values[4]; --}; -- --struct node16 { -- uint32_t nr_entries; -- uint8_t keys[16]; -- struct value values[16]; --}; -- --struct node48 { -- uint32_t nr_entries; -- uint8_t keys[256]; -- struct value values[48]; --}; -- --struct node256 { -- uint32_t nr_entries; -- struct value values[256]; --}; -- --struct radix_tree { -- unsigned nr_entries; -- struct value root; -- radix_value_dtr dtr; -- void *dtr_context; --}; -- --//---------------------------------------------------------------- -- --struct radix_tree *radix_tree_create(radix_value_dtr dtr, void *dtr_context) --{ -- struct radix_tree *rt = malloc(sizeof(*rt)); -- -- if (rt) { -- rt->nr_entries = 0; -- rt->root.type = UNSET; -- rt->dtr = dtr; -- rt->dtr_context = dtr_context; -- } -- -- return rt; --} -- --static inline void _dtr(struct radix_tree *rt, union radix_value v) --{ -- if (rt->dtr) -- rt->dtr(rt->dtr_context, v); --} -- --// Returns the number of values removed --static unsigned _free_node(struct radix_tree *rt, struct value v) --{ -- unsigned i, nr = 0; -- struct value_chain *vc; -- struct prefix_chain *pc; -- struct node4 *n4; -- struct node16 *n16; -- struct node48 *n48; -- struct node256 *n256; -- -- switch (v.type) { -- case UNSET: -- break; -- -- case VALUE: -- _dtr(rt, v.value); -- nr = 1; -- break; -- -- case VALUE_CHAIN: -- vc = v.value.ptr; -- _dtr(rt, vc->value); -- nr = 1 + _free_node(rt, vc->child); -- free(vc); -- break; -- -- case PREFIX_CHAIN: -- pc = v.value.ptr; -- nr = _free_node(rt, pc->child); -- free(pc); -- break; -- -- case NODE4: -- n4 = (struct node4 *) v.value.ptr; -- for (i = 0; i < n4->nr_entries; i++) -- nr += _free_node(rt, n4->values[i]); -- free(n4); -- break; -- -- case NODE16: -- n16 = (struct node16 *) v.value.ptr; -- for (i = 0; i < n16->nr_entries; i++) -- nr += _free_node(rt, n16->values[i]); -- free(n16); -- break; -- -- case NODE48: -- n48 = (struct node48 *) v.value.ptr; -- for (i = 0; i < n48->nr_entries; i++) -- nr += _free_node(rt, n48->values[i]); -- free(n48); -- break; -- -- case NODE256: -- n256 = (struct node256 *) v.value.ptr; -- for (i = 0; i < 256; i++) -- nr += _free_node(rt, n256->values[i]); -- free(n256); -- break; -- } -- -- return nr; --} -- --void radix_tree_destroy(struct radix_tree *rt) --{ -- _free_node(rt, rt->root); -- free(rt); --} -- --unsigned radix_tree_size(struct radix_tree *rt) --{ -- return rt->nr_entries; --} -- --static bool _insert(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv); -- --static bool _insert_unset(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv) --{ -- unsigned len = ke - kb; -- -- if (!len) { -- // value -- v->type = VALUE; -- v->value = rv; -- rt->nr_entries++; -- } else { -- // prefix -> value -- struct prefix_chain *pc = zalloc(sizeof(*pc) + len); -- if (!pc) -- return false; -- -- pc->child.type = VALUE; -- pc->child.value = rv; -- pc->len = len; -- memcpy(pc->prefix, kb, len); -- v->type = PREFIX_CHAIN; -- v->value.ptr = pc; -- rt->nr_entries++; -- } -- -- return true; --} -- --static bool _insert_value(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv) --{ -- unsigned len = ke - kb; -- -- if (!len) -- // overwrite -- v->value = rv; -- -- else { -- // value_chain -> value -- struct value_chain *vc = zalloc(sizeof(*vc)); -- if (!vc) -- return false; -- -- vc->value = v->value; -- if (!_insert(rt, &vc->child, kb, ke, rv)) { -- free(vc); -- return false; -- } -- -- v->type = VALUE_CHAIN; -- v->value.ptr = vc; -- } -- -- return true; --} -- --static bool _insert_value_chain(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv) --{ -- struct value_chain *vc = v->value.ptr; -- return _insert(rt, &vc->child, kb, ke, rv); --} -- --static unsigned min(unsigned lhs, unsigned rhs) --{ -- if (lhs <= rhs) -- return lhs; -- else -- return rhs; --} -- --static bool _insert_prefix_chain(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv) --{ -- struct prefix_chain *pc = v->value.ptr; -- -- if (*kb == pc->prefix[0]) { -- // There's a common prefix let's split the chain into two and -- // recurse. -- struct prefix_chain *pc2; -- unsigned i, len = min(pc->len, ke - kb); -- -- for (i = 0; i < len; i++) -- if (kb[i] != pc->prefix[i]) -- break; -- -- pc2 = zalloc(sizeof(*pc2) + pc->len - i); -- pc2->len = pc->len - i; -- memmove(pc2->prefix, pc->prefix + i, pc2->len); -- pc2->child = pc->child; -- -- // FIXME: this trashes pc so we can't back out -- pc->child.type = PREFIX_CHAIN; -- pc->child.value.ptr = pc2; -- pc->len = i; -- -- if (!_insert(rt, &pc->child, kb + i, ke, rv)) { -- free(pc2); -- return false; -- } -- -- } else { -- // Stick an n4 in front. -- struct node4 *n4 = zalloc(sizeof(*n4)); -- if (!n4) -- return false; -- -- n4->keys[0] = *kb; -- if (!_insert(rt, n4->values, kb + 1, ke, rv)) { -- free(n4); -- return false; -- } -- -- if (pc->len) { -- n4->keys[1] = pc->prefix[0]; -- if (pc->len == 1) { -- n4->values[1] = pc->child; -- free(pc); -- } else { -- memmove(pc->prefix, pc->prefix + 1, pc->len - 1); -- pc->len--; -- n4->values[1] = *v; -- } -- n4->nr_entries = 2; -- } else -- n4->nr_entries = 1; -- -- v->type = NODE4; -- v->value.ptr = n4; -- } -- -- return true; --} -- --static bool _insert_node4(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv) --{ -- struct node4 *n4 = v->value.ptr; -- if (n4->nr_entries == 4) { -- struct node16 *n16 = zalloc(sizeof(*n16)); -- if (!n16) -- return false; -- -- n16->nr_entries = 5; -- memcpy(n16->keys, n4->keys, sizeof(n4->keys)); -- memcpy(n16->values, n4->values, sizeof(n4->values)); -- -- n16->keys[4] = *kb; -- if (!_insert(rt, n16->values + 4, kb + 1, ke, rv)) { -- free(n16); -- return false; -- } -- free(n4); -- v->type = NODE16; -- v->value.ptr = n16; -- } else { -- n4 = v->value.ptr; -- if (!_insert(rt, n4->values + n4->nr_entries, kb + 1, ke, rv)) -- return false; -- -- n4->keys[n4->nr_entries] = *kb; -- n4->nr_entries++; -- } -- return true; --} -- --static bool _insert_node16(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv) --{ -- struct node16 *n16 = v->value.ptr; -- -- if (n16->nr_entries == 16) { -- unsigned i; -- struct node48 *n48 = zalloc(sizeof(*n48)); -- -- if (!n48) -- return false; -- -- n48->nr_entries = 17; -- memset(n48->keys, 48, sizeof(n48->keys)); -- -- for (i = 0; i < 16; i++) { -- n48->keys[n16->keys[i]] = i; -- n48->values[i] = n16->values[i]; -- } -- -- n48->keys[*kb] = 16; -- if (!_insert(rt, n48->values + 16, kb + 1, ke, rv)) { -- free(n48); -- return false; -- } -- -- free(n16); -- v->type = NODE48; -- v->value.ptr = n48; -- } else { -- if (!_insert(rt, n16->values + n16->nr_entries, kb + 1, ke, rv)) -- return false; -- n16->keys[n16->nr_entries] = *kb; -- n16->nr_entries++; -- } -- -- return true; --} -- --static bool _insert_node48(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv) --{ -- struct node48 *n48 = v->value.ptr; -- if (n48->nr_entries == 48) { -- unsigned i; -- struct node256 *n256 = zalloc(sizeof(*n256)); -- if (!n256) -- return false; -- -- for (i = 0; i < 256; i++) { -- if (n48->keys[i] >= 48) -- continue; -- -- n256->values[i] = n48->values[n48->keys[i]]; -- } -- -- if (!_insert(rt, n256->values + *kb, kb + 1, ke, rv)) { -- free(n256); -- return false; -- } -- -- free(n48); -- v->type = NODE256; -- v->value.ptr = n256; -- -- } else { -- if (!_insert(rt, n48->values + n48->nr_entries, kb + 1, ke, rv)) -- return false; -- -- n48->keys[*kb] = n48->nr_entries; -- n48->nr_entries++; -- } -- -- return true; --} -- --static bool _insert_node256(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv) --{ -- struct node256 *n256 = v->value.ptr; -- bool was_unset = n256->values[*kb].type == UNSET; -- -- if (!_insert(rt, n256->values + *kb, kb + 1, ke, rv)) -- return false; -- -- if (was_unset) -- n256->nr_entries++; -- -- return true; --} -- --// FIXME: the tree should not be touched if insert fails (eg, OOM) --static bool _insert(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv) --{ -- if (kb == ke) { -- if (v->type == UNSET) { -- v->type = VALUE; -- v->value = rv; -- rt->nr_entries++; -- -- } else if (v->type == VALUE) { -- v->value = rv; -- -- } else { -- struct value_chain *vc = zalloc(sizeof(*vc)); -- if (!vc) -- return false; -- -- vc->value = rv; -- vc->child = *v; -- v->type = VALUE_CHAIN; -- v->value.ptr = vc; -- rt->nr_entries++; -- } -- return true; -- } -- -- switch (v->type) { -- case UNSET: -- return _insert_unset(rt, v, kb, ke, rv); -- -- case VALUE: -- return _insert_value(rt, v, kb, ke, rv); -- -- case VALUE_CHAIN: -- return _insert_value_chain(rt, v, kb, ke, rv); -- -- case PREFIX_CHAIN: -- return _insert_prefix_chain(rt, v, kb, ke, rv); -- -- case NODE4: -- return _insert_node4(rt, v, kb, ke, rv); -- -- case NODE16: -- return _insert_node16(rt, v, kb, ke, rv); -- -- case NODE48: -- return _insert_node48(rt, v, kb, ke, rv); -- -- case NODE256: -- return _insert_node256(rt, v, kb, ke, rv); -- } -- -- // can't get here -- return false; --} -- --struct lookup_result { -- struct value *v; -- uint8_t *kb; --}; -- --static struct lookup_result _lookup_prefix(struct value *v, uint8_t *kb, uint8_t *ke) --{ -- unsigned i; -- struct value_chain *vc; -- struct prefix_chain *pc; -- struct node4 *n4; -- struct node16 *n16; -- struct node48 *n48; -- struct node256 *n256; -- -- if (kb == ke) -- return (struct lookup_result) {.v = v, .kb = kb}; -- -- switch (v->type) { -- case UNSET: -- case VALUE: -- break; -- -- case VALUE_CHAIN: -- vc = v->value.ptr; -- return _lookup_prefix(&vc->child, kb, ke); -- -- case PREFIX_CHAIN: -- pc = v->value.ptr; -- if (ke - kb < pc->len) -- return (struct lookup_result) {.v = v, .kb = kb}; -- -- for (i = 0; i < pc->len; i++) -- if (kb[i] != pc->prefix[i]) -- return (struct lookup_result) {.v = v, .kb = kb}; -- -- return _lookup_prefix(&pc->child, kb + pc->len, ke); -- -- case NODE4: -- n4 = v->value.ptr; -- for (i = 0; i < n4->nr_entries; i++) -- if (n4->keys[i] == *kb) -- return _lookup_prefix(n4->values + i, kb + 1, ke); -- break; -- -- case NODE16: -- // FIXME: use binary search or simd? -- n16 = v->value.ptr; -- for (i = 0; i < n16->nr_entries; i++) -- if (n16->keys[i] == *kb) -- return _lookup_prefix(n16->values + i, kb + 1, ke); -- break; -- -- case NODE48: -- n48 = v->value.ptr; -- i = n48->keys[*kb]; -- if (i < 48) -- return _lookup_prefix(n48->values + i, kb + 1, ke); -- break; -- -- case NODE256: -- n256 = v->value.ptr; -- return _lookup_prefix(n256->values + *kb, kb + 1, ke); -- } -- -- return (struct lookup_result) {.v = v, .kb = kb}; --} -- --bool radix_tree_insert(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, union radix_value rv) --{ -- struct lookup_result lr = _lookup_prefix(&rt->root, kb, ke); -- return _insert(rt, lr.v, lr.kb, ke, rv); --} -- --// Note the degrade functions also free the original node. --static void _degrade_to_n4(struct node16 *n16, struct value *result) --{ -- struct node4 *n4 = zalloc(sizeof(*n4)); -- -- n4->nr_entries = n16->nr_entries; -- memcpy(n4->keys, n16->keys, n16->nr_entries * sizeof(*n4->keys)); -- memcpy(n4->values, n16->values, n16->nr_entries * sizeof(*n4->values)); -- free(n16); -- -- result->type = NODE4; -- result->value.ptr = n4; --} -- --static void _degrade_to_n16(struct node48 *n48, struct value *result) --{ -- struct node4 *n16 = zalloc(sizeof(*n16)); -- -- n16->nr_entries = n48->nr_entries; -- memcpy(n16->keys, n48->keys, n48->nr_entries * sizeof(*n16->keys)); -- memcpy(n16->values, n48->values, n48->nr_entries * sizeof(*n16->values)); -- free(n48); -- -- result->type = NODE16; -- result->value.ptr = n16; --} -- --static void _degrade_to_n48(struct node256 *n256, struct value *result) --{ -- unsigned i, count = 0; -- struct node4 *n48 = zalloc(sizeof(*n48)); -- -- n48->nr_entries = n256->nr_entries; -- for (i = 0; i < 256; i++) { -- if (n256->values[i].type == UNSET) -- continue; -- -- n48->keys[count] = i; -- n48->values[count] = n256->values[i]; -- count++; -- } -- free(n256); -- -- result->type = NODE48; -- result->value.ptr = n48; --} -- --static bool _remove(struct radix_tree *rt, struct value *root, uint8_t *kb, uint8_t *ke) --{ -- bool r; -- unsigned i; -- struct value_chain *vc; -- struct prefix_chain *pc; -- struct node4 *n4; -- struct node16 *n16; -- struct node48 *n48; -- struct node256 *n256; -- -- if (kb == ke) { -- if (root->type == VALUE) { -- root->type = UNSET; -- _dtr(rt, root->value); -- return true; -- -- } else if (root->type == VALUE_CHAIN) { -- vc = root->value.ptr; -- _dtr(rt, vc->value); -- memcpy(root, &vc->child, sizeof(*root)); -- free(vc); -- return true; -- -- } else -- return false; -- } -- -- switch (root->type) { -- case UNSET: -- case VALUE: -- // this is a value for a prefix of the key -- return false; -- -- case VALUE_CHAIN: -- vc = root->value.ptr; -- r = _remove(rt, &vc->child, kb, ke); -- if (r && (vc->child.type == UNSET)) { -- memcpy(root, &vc->child, sizeof(*root)); -- free(vc); -- } -- return r; -- -- case PREFIX_CHAIN: -- pc = root->value.ptr; -- if (ke - kb < pc->len) -- return false; -- -- for (i = 0; i < pc->len; i++) -- if (kb[i] != pc->prefix[i]) -- return false; -- -- return _remove(rt, &pc->child, kb + pc->len, ke); -- -- case NODE4: -- n4 = root->value.ptr; -- for (i = 0; i < n4->nr_entries; i++) { -- if (n4->keys[i] == *kb) { -- r = _remove(rt, n4->values + i, kb + 1, ke); -- if (r && n4->values[i].type == UNSET) { -- n4->nr_entries--; -- if (i < n4->nr_entries) -- // slide the entries down -- memmove(n4->keys + i, n4->keys + i + 1, -- sizeof(*n4->keys) * (n4->nr_entries - i)); -- if (!n4->nr_entries) -- root->type = UNSET; -- } -- return r; -- } -- } -- return false; -- -- case NODE16: -- n16 = root->value.ptr; -- for (i = 0; i < n16->nr_entries; i++) { -- if (n16->keys[i] == *kb) { -- r = _remove(rt, n16->values + i, kb + 1, ke); -- if (r && n16->values[i].type == UNSET) { -- n16->nr_entries--; -- if (i < n16->nr_entries) -- // slide the entries down -- memmove(n16->keys + i, n16->keys + i + 1, -- sizeof(*n16->keys) * (n16->nr_entries - i)); -- if (n16->nr_entries <= 4) -- _degrade_to_n4(n16, root); -- } -- return r; -- } -- } -- return false; -- -- case NODE48: -- n48 = root->value.ptr; -- i = n48->keys[*kb]; -- if (i < 48) { -- r = _remove(rt, n48->values + i, kb + 1, ke); -- if (r && n48->values[i].type == UNSET) { -- n48->keys[*kb] = 48; -- n48->nr_entries--; -- if (n48->nr_entries <= 16) -- _degrade_to_n16(n48, root); -- } -- return r; -- } -- return false; -- -- case NODE256: -- n256 = root->value.ptr; -- r = _remove(rt, n256->values + (*kb), kb + 1, ke); -- if (r && n256->values[*kb].type == UNSET) { -- n256->nr_entries--; -- if (n256->nr_entries <= 48) -- _degrade_to_n48(n256, root); -- } -- return r; -- } -- -- return false; --} -- --bool radix_tree_remove(struct radix_tree *rt, uint8_t *key_begin, uint8_t *key_end) --{ -- if (_remove(rt, &rt->root, key_begin, key_end)) { -- rt->nr_entries--; -- return true; -- } -- -- return false; --} -- --static bool _prefix_chain_matches(struct lookup_result *lr, uint8_t *ke) --{ -- // It's possible the top node is a prefix chain, and -- // the remaining key matches part of it. -- if (lr->v->type == PREFIX_CHAIN) { -- unsigned i, rlen = ke - lr->kb; -- struct prefix_chain *pc = lr->v->value.ptr; -- if (rlen < pc->len) { -- for (i = 0; i < rlen; i++) -- if (pc->prefix[i] != lr->kb[i]) -- return false; -- return true; -- } -- } -- -- return false; --} -- --unsigned radix_tree_remove_prefix(struct radix_tree *rt, uint8_t *kb, uint8_t *ke) --{ -- unsigned count = 0; -- struct lookup_result lr = _lookup_prefix(&rt->root, kb, ke); -- if (lr.kb == ke || _prefix_chain_matches(&lr, ke)) { -- count = _free_node(rt, *lr.v); -- lr.v->type = UNSET; -- } -- -- rt->nr_entries -= count; -- return count; --} -- --bool radix_tree_lookup(struct radix_tree *rt, -- uint8_t *kb, uint8_t *ke, union radix_value *result) --{ -- struct value_chain *vc; -- struct lookup_result lr = _lookup_prefix(&rt->root, kb, ke); -- if (lr.kb == ke) { -- switch (lr.v->type) { -- case VALUE: -- *result = lr.v->value; -- return true; -- -- case VALUE_CHAIN: -- vc = lr.v->value.ptr; -- *result = vc->value; -- return true; -- -- default: -- return false; -- } -- } -- -- return false; --} -- --// FIXME: build up the keys too --static bool _iterate(struct value *v, struct radix_tree_iterator *it) --{ -- unsigned i; -- struct value_chain *vc; -- struct prefix_chain *pc; -- struct node4 *n4; -- struct node16 *n16; -- struct node48 *n48; -- struct node256 *n256; -- -- switch (v->type) { -- case UNSET: -- // can't happen -- break; -- -- case VALUE: -- return it->visit(it, NULL, NULL, v->value); -- -- case VALUE_CHAIN: -- vc = v->value.ptr; -- return it->visit(it, NULL, NULL, vc->value) && _iterate(&vc->child, it); -- -- case PREFIX_CHAIN: -- pc = v->value.ptr; -- return _iterate(&pc->child, it); -- -- case NODE4: -- n4 = (struct node4 *) v->value.ptr; -- for (i = 0; i < n4->nr_entries; i++) -- if (!_iterate(n4->values + i, it)) -- return false; -- return true; -- -- case NODE16: -- n16 = (struct node16 *) v->value.ptr; -- for (i = 0; i < n16->nr_entries; i++) -- if (!_iterate(n16->values + i, it)) -- return false; -- return true; -- -- case NODE48: -- n48 = (struct node48 *) v->value.ptr; -- for (i = 0; i < n48->nr_entries; i++) -- if (!_iterate(n48->values + i, it)) -- return false; -- return true; -- -- case NODE256: -- n256 = (struct node256 *) v->value.ptr; -- for (i = 0; i < 256; i++) -- if (n256->values[i].type != UNSET && !_iterate(n256->values + i, it)) -- return false; -- return true; -- } -- -- // can't get here -- return false; --} -- --void radix_tree_iterate(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, -- struct radix_tree_iterator *it) --{ -- struct lookup_result lr = _lookup_prefix(&rt->root, kb, ke); -- if (lr.kb == ke || _prefix_chain_matches(&lr, ke)) -- _iterate(lr.v, it); --} -+#ifdef SIMPLE_RADIX_TREE -+#include "base/data-struct/radix-tree-simple.c" -+#else -+#include "base/data-struct/radix-tree-adaptive.c" -+#endif - - //---------------------------------------------------------------- -diff --git a/base/data-struct/radix-tree.h b/base/data-struct/radix-tree.h -index 1b6aee8..5d4d04c 100644 ---- a/base/data-struct/radix-tree.h -+++ b/base/data-struct/radix-tree.h -@@ -15,6 +15,7 @@ - - #include - #include -+#include - - //---------------------------------------------------------------- - -@@ -53,6 +54,11 @@ struct radix_tree_iterator { - void radix_tree_iterate(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, - struct radix_tree_iterator *it); - -+// Checks that some constraints on the shape of the tree are -+// being held. For debug only. -+bool radix_tree_is_well_formed(struct radix_tree *rt); -+void radix_tree_dump(struct radix_tree *rt, FILE *out); -+ - //---------------------------------------------------------------- - - #endif -diff --git a/lib/device/bcache.c b/lib/device/bcache.c -index d487ca2..b64707e 100644 ---- a/lib/device/bcache.c -+++ b/lib/device/bcache.c -@@ -12,9 +12,9 @@ - * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - --#define _GNU_SOURCE -- - #include "bcache.h" -+ -+#include "base/data-struct/radix-tree.h" - #include "lvm-logging.h" - #include "log.h" - -@@ -67,14 +67,14 @@ struct cb_set { - static struct cb_set *_cb_set_create(unsigned nr) - { - int i; -- struct cb_set *cbs = dm_malloc(sizeof(*cbs)); -+ struct cb_set *cbs = malloc(sizeof(*cbs)); - - if (!cbs) - return NULL; - -- cbs->vec = dm_malloc(nr * sizeof(*cbs->vec)); -+ cbs->vec = malloc(nr * sizeof(*cbs->vec)); - if (!cbs->vec) { -- dm_free(cbs); -+ free(cbs); - return NULL; - } - -@@ -97,8 +97,8 @@ static void _cb_set_destroy(struct cb_set *cbs) - return; - } - -- dm_free(cbs->vec); -- dm_free(cbs); -+ free(cbs->vec); -+ free(cbs); - } - - static struct control_block *_cb_alloc(struct cb_set *cbs, void *context) -@@ -152,7 +152,7 @@ static void _async_destroy(struct io_engine *ioe) - if (r) - log_sys_warn("io_destroy"); - -- dm_free(e); -+ free(e); - } - - static int _last_byte_fd; -@@ -169,7 +169,6 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int fd, - sector_t offset; - sector_t nbytes; - sector_t limit_nbytes; -- sector_t orig_nbytes; - sector_t extra_nbytes = 0; - - if (((uintptr_t) data) & e->page_mask) { -@@ -192,41 +191,11 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int fd, - return false; - } - -- /* -- * If the bcache block offset+len goes beyond where lvm is -- * intending to write, then reduce the len being written -- * (which is the bcache block size) so we don't write past -- * the limit set by lvm. If after applying the limit, the -- * resulting size is not a multiple of the sector size (512 -- * or 4096) then extend the reduced size to be a multiple of -- * the sector size (we don't want to write partial sectors.) -- */ - if (offset + nbytes > _last_byte_offset) { - limit_nbytes = _last_byte_offset - offset; -- -- if (limit_nbytes % _last_byte_sector_size) { -+ if (limit_nbytes % _last_byte_sector_size) - extra_nbytes = _last_byte_sector_size - (limit_nbytes % _last_byte_sector_size); - -- /* -- * adding extra_nbytes to the reduced nbytes (limit_nbytes) -- * should make the final write size a multiple of the -- * sector size. This should never result in a final size -- * larger than the bcache block size (as long as the bcache -- * block size is a multiple of the sector size). -- */ -- if (limit_nbytes + extra_nbytes > nbytes) { -- log_warn("Skip extending write at %llu len %llu limit %llu extra %llu sector_size %llu", -- (unsigned long long)offset, -- (unsigned long long)nbytes, -- (unsigned long long)limit_nbytes, -- (unsigned long long)extra_nbytes, -- (unsigned long long)_last_byte_sector_size); -- extra_nbytes = 0; -- } -- } -- -- orig_nbytes = nbytes; -- - if (extra_nbytes) { - log_debug("Limit write at %llu len %llu to len %llu rounded to %llu", - (unsigned long long)offset, -@@ -241,22 +210,6 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int fd, - (unsigned long long)limit_nbytes); - nbytes = limit_nbytes; - } -- -- /* -- * This shouldn't happen, the reduced+extended -- * nbytes value should never be larger than the -- * bcache block size. -- */ -- if (nbytes > orig_nbytes) { -- log_error("Invalid adjusted write at %llu len %llu adjusted %llu limit %llu extra %llu sector_size %llu", -- (unsigned long long)offset, -- (unsigned long long)orig_nbytes, -- (unsigned long long)nbytes, -- (unsigned long long)limit_nbytes, -- (unsigned long long)extra_nbytes, -- (unsigned long long)_last_byte_sector_size); -- return false; -- } - } - } - -@@ -361,7 +314,7 @@ static unsigned _async_max_io(struct io_engine *e) - struct io_engine *create_async_io_engine(void) - { - int r; -- struct async_engine *e = dm_malloc(sizeof(*e)); -+ struct async_engine *e = malloc(sizeof(*e)); - - if (!e) - return NULL; -@@ -375,14 +328,14 @@ struct io_engine *create_async_io_engine(void) - r = io_setup(MAX_IO, &e->aio_context); - if (r < 0) { - log_debug("io_setup failed %d", r); -- dm_free(e); -+ free(e); - return NULL; - } - - e->cbs = _cb_set_create(MAX_IO); - if (!e->cbs) { - log_warn("couldn't create control block set"); -- dm_free(e); -+ free(e); - return NULL; - } - -@@ -411,7 +364,7 @@ static struct sync_engine *_to_sync(struct io_engine *e) - static void _sync_destroy(struct io_engine *ioe) - { - struct sync_engine *e = _to_sync(ioe); -- dm_free(e); -+ free(e); - } - - static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd, -@@ -430,7 +383,6 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd, - } - - where = sb * 512; -- - off = lseek(fd, where, SEEK_SET); - if (off == (off_t) -1) { - log_warn("Device seek error %d for offset %llu", errno, (unsigned long long)where); -@@ -451,7 +403,6 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd, - uint64_t nbytes = len; - sector_t limit_nbytes = 0; - sector_t extra_nbytes = 0; -- sector_t orig_nbytes = 0; - - if (offset > _last_byte_offset) { - log_error("Limit write at %llu len %llu beyond last byte %llu", -@@ -464,30 +415,9 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd, - - if (offset + nbytes > _last_byte_offset) { - limit_nbytes = _last_byte_offset - offset; -- -- if (limit_nbytes % _last_byte_sector_size) { -+ if (limit_nbytes % _last_byte_sector_size) - extra_nbytes = _last_byte_sector_size - (limit_nbytes % _last_byte_sector_size); - -- /* -- * adding extra_nbytes to the reduced nbytes (limit_nbytes) -- * should make the final write size a multiple of the -- * sector size. This should never result in a final size -- * larger than the bcache block size (as long as the bcache -- * block size is a multiple of the sector size). -- */ -- if (limit_nbytes + extra_nbytes > nbytes) { -- log_warn("Skip extending write at %llu len %llu limit %llu extra %llu sector_size %llu", -- (unsigned long long)offset, -- (unsigned long long)nbytes, -- (unsigned long long)limit_nbytes, -- (unsigned long long)extra_nbytes, -- (unsigned long long)_last_byte_sector_size); -- extra_nbytes = 0; -- } -- } -- -- orig_nbytes = nbytes; -- - if (extra_nbytes) { - log_debug("Limit write at %llu len %llu to len %llu rounded to %llu", - (unsigned long long)offset, -@@ -502,23 +432,6 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd, - (unsigned long long)limit_nbytes); - nbytes = limit_nbytes; - } -- -- /* -- * This shouldn't happen, the reduced+extended -- * nbytes value should never be larger than the -- * bcache block size. -- */ -- if (nbytes > orig_nbytes) { -- log_error("Invalid adjusted write at %llu len %llu adjusted %llu limit %llu extra %llu sector_size %llu", -- (unsigned long long)offset, -- (unsigned long long)orig_nbytes, -- (unsigned long long)nbytes, -- (unsigned long long)limit_nbytes, -- (unsigned long long)extra_nbytes, -- (unsigned long long)_last_byte_sector_size); -- free(io); -- return false; -- } - } - - where = offset; -@@ -580,7 +493,7 @@ static bool _sync_wait(struct io_engine *ioe, io_complete_fn fn) - dm_list_iterate_items_safe(io, tmp, &e->complete) { - fn(io->context, 0); - dm_list_del(&io->list); -- dm_free(io); -+ free(io); - } - - return true; -@@ -593,7 +506,7 @@ static unsigned _sync_max_io(struct io_engine *e) - - struct io_engine *create_sync_io_engine(void) - { -- struct sync_engine *e = dm_malloc(sizeof(*e)); -+ struct sync_engine *e = malloc(sizeof(*e)); - - if (!e) - return NULL; -@@ -673,12 +586,7 @@ struct bcache { - struct dm_list clean; - struct dm_list io_pending; - -- /* -- * Hash table. -- */ -- unsigned nr_buckets; -- unsigned hash_mask; -- struct dm_list *buckets; -+ struct radix_tree *rtree; - - /* - * Statistics -@@ -693,75 +601,50 @@ struct bcache { - - //---------------------------------------------------------------- - --/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */ --#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL -+struct key_parts { -+ uint32_t fd; -+ uint64_t b; -+} __attribute__ ((packed)); - --static unsigned _hash(struct bcache *cache, int fd, uint64_t i) --{ -- uint64_t h = (i << 10) & fd; -- h *= GOLDEN_RATIO_PRIME_64; -- return h & cache->hash_mask; --} -+union key { -+ struct key_parts parts; -+ uint8_t bytes[12]; -+}; - --static struct block *_hash_lookup(struct bcache *cache, int fd, uint64_t i) -+static struct block *_block_lookup(struct bcache *cache, int fd, uint64_t i) - { -- struct block *b; -- unsigned h = _hash(cache, fd, i); -+ union key k; -+ union radix_value v; - -- dm_list_iterate_items_gen (b, cache->buckets + h, hash) -- if (b->fd == fd && b->index == i) -- return b; -+ k.parts.fd = fd; -+ k.parts.b = i; - -- return NULL; --} -- --static void _hash_insert(struct block *b) --{ -- unsigned h = _hash(b->cache, b->fd, b->index); -- dm_list_add_h(b->cache->buckets + h, &b->hash); --} -+ if (radix_tree_lookup(cache->rtree, k.bytes, k.bytes + sizeof(k.bytes), &v)) -+ return v.ptr; - --static inline void _hash_remove(struct block *b) --{ -- dm_list_del(&b->hash); -+ return NULL; - } - --/* -- * Must return a power of 2. -- */ --static unsigned _calc_nr_buckets(unsigned nr_blocks) -+static bool _block_insert(struct block *b) - { -- unsigned r = 8; -- unsigned n = nr_blocks / 4; -+ union key k; -+ union radix_value v; - -- if (n < 8) -- n = 8; -+ k.parts.fd = b->fd; -+ k.parts.b = b->index; -+ v.ptr = b; - -- while (r < n) -- r <<= 1; -- -- return r; -+ return radix_tree_insert(b->cache->rtree, k.bytes, k.bytes + sizeof(k.bytes), v); - } - --static bool _hash_table_init(struct bcache *cache, unsigned nr_entries) -+static void _block_remove(struct block *b) - { -- unsigned i; -- -- cache->nr_buckets = _calc_nr_buckets(nr_entries); -- cache->hash_mask = cache->nr_buckets - 1; -- cache->buckets = dm_malloc(cache->nr_buckets * sizeof(*cache->buckets)); -- if (!cache->buckets) -- return false; -+ union key k; - -- for (i = 0; i < cache->nr_buckets; i++) -- dm_list_init(cache->buckets + i); -+ k.parts.fd = b->fd; -+ k.parts.b = b->index; - -- return true; --} -- --static void _hash_table_exit(struct bcache *cache) --{ -- dm_free(cache->buckets); -+ radix_tree_remove(b->cache->rtree, k.bytes, k.bytes + sizeof(k.bytes)); - } - - //---------------------------------------------------------------- -@@ -777,7 +660,7 @@ static bool _init_free_list(struct bcache *cache, unsigned count, unsigned pgsiz - if (!data) - return false; - -- cache->raw_blocks = dm_malloc(count * sizeof(*cache->raw_blocks)); -+ cache->raw_blocks = malloc(count * sizeof(*cache->raw_blocks)); - if (!cache->raw_blocks) { - free(data); - return false; -@@ -797,8 +680,8 @@ static bool _init_free_list(struct bcache *cache, unsigned count, unsigned pgsiz - - static void _exit_free_list(struct bcache *cache) - { -- dm_free(cache->raw_data); -- dm_free(cache->raw_blocks); -+ free(cache->raw_data); -+ free(cache->raw_blocks); - } - - static struct block *_alloc_block(struct bcache *cache) -@@ -809,6 +692,11 @@ static struct block *_alloc_block(struct bcache *cache) - return dm_list_struct_base(_list_pop(&cache->free), struct block, list); - } - -+static void _free_block(struct block *b) -+{ -+ dm_list_add(&b->cache->free, &b->list); -+} -+ - /*---------------------------------------------------------------- - * Clean/dirty list management. - * Always use these methods to ensure nr_dirty_ is correct. -@@ -963,7 +851,7 @@ static struct block *_find_unused_clean_block(struct bcache *cache) - dm_list_iterate_items (b, &cache->clean) { - if (!b->ref_count) { - _unlink_block(b); -- _hash_remove(b); -+ _block_remove(b); - return b; - } - } -@@ -993,29 +881,18 @@ static struct block *_new_block(struct bcache *cache, int fd, block_address i, b - - if (b) { - dm_list_init(&b->list); -- dm_list_init(&b->hash); - b->flags = 0; - b->fd = fd; - b->index = i; - b->ref_count = 0; - b->error = 0; - -- _hash_insert(b); -- } -- --#if 0 -- if (!b) { -- log_error("bcache no new blocks for fd %d index %u " -- "clean %u free %u dirty %u pending %u nr_data_blocks %u nr_cache_blocks %u", -- fd, (uint32_t) i, -- dm_list_size(&cache->clean), -- dm_list_size(&cache->free), -- dm_list_size(&cache->dirty), -- dm_list_size(&cache->io_pending), -- (uint32_t)cache->nr_data_blocks, -- (uint32_t)cache->nr_cache_blocks); -+ if (!_block_insert(b)) { -+ log_error("bcache unable to insert block in radix tree (OOM?)"); -+ _free_block(b); -+ return NULL; -+ } - } --#endif - - return b; - } -@@ -1054,7 +931,7 @@ static struct block *_lookup_or_read_block(struct bcache *cache, - int fd, block_address i, - unsigned flags) - { -- struct block *b = _hash_lookup(cache, fd, i); -+ struct block *b = _block_lookup(cache, fd, i); - - if (b) { - // FIXME: this is insufficient. We need to also catch a read -@@ -1125,8 +1002,8 @@ struct bcache *bcache_create(sector_t block_sectors, unsigned nr_cache_blocks, - unsigned max_io = engine->max_io(engine); - long pgsize = sysconf(_SC_PAGESIZE); - -- if ((pgsize = sysconf(_SC_PAGESIZE)) < 0) { -- log_warn("bcache cannot read pagesize."); -+ if (pgsize < 0) { -+ log_warn("WARNING: _SC_PAGESIZE returns negative value."); - return NULL; - } - -@@ -1145,7 +1022,7 @@ struct bcache *bcache_create(sector_t block_sectors, unsigned nr_cache_blocks, - return NULL; - } - -- cache = dm_malloc(sizeof(*cache)); -+ cache = malloc(sizeof(*cache)); - if (!cache) - return NULL; - -@@ -1163,9 +1040,10 @@ struct bcache *bcache_create(sector_t block_sectors, unsigned nr_cache_blocks, - dm_list_init(&cache->clean); - dm_list_init(&cache->io_pending); - -- if (!_hash_table_init(cache, nr_cache_blocks)) { -+ cache->rtree = radix_tree_create(NULL, NULL); -+ if (!cache->rtree) { - cache->engine->destroy(cache->engine); -- dm_free(cache); -+ free(cache); - return NULL; - } - -@@ -1178,8 +1056,8 @@ struct bcache *bcache_create(sector_t block_sectors, unsigned nr_cache_blocks, - - if (!_init_free_list(cache, nr_cache_blocks, pgsize)) { - cache->engine->destroy(cache->engine); -- _hash_table_exit(cache); -- dm_free(cache); -+ radix_tree_destroy(cache->rtree); -+ free(cache); - return NULL; - } - -@@ -1192,12 +1070,12 @@ void bcache_destroy(struct bcache *cache) - log_warn("some blocks are still locked"); - - if (!bcache_flush(cache)) -- log_warn("cache flushing failed."); -+ stack; - _wait_all(cache); - _exit_free_list(cache); -- _hash_table_exit(cache); -+ radix_tree_destroy(cache->rtree); - cache->engine->destroy(cache->engine); -- dm_free(cache); -+ free(cache); - } - - sector_t bcache_block_sectors(struct bcache *cache) -@@ -1217,7 +1095,7 @@ unsigned bcache_max_prefetches(struct bcache *cache) - - void bcache_prefetch(struct bcache *cache, int fd, block_address i) - { -- struct block *b = _hash_lookup(cache, fd, i); -+ struct block *b = _block_lookup(cache, fd, i); - - if (!b) { - if (cache->nr_io_pending < cache->max_io) { -@@ -1230,11 +1108,13 @@ void bcache_prefetch(struct bcache *cache, int fd, block_address i) - } - } - -+//---------------------------------------------------------------- -+ - static void _recycle_block(struct bcache *cache, struct block *b) - { - _unlink_block(b); -- _hash_remove(b); -- dm_list_add(&cache->free, &b->list); -+ _block_remove(b); -+ _free_block(b); - } - - bool bcache_get(struct bcache *cache, int fd, block_address i, -@@ -1268,6 +1148,8 @@ bool bcache_get(struct bcache *cache, int fd, block_address i, - return false; - } - -+//---------------------------------------------------------------- -+ - static void _put_ref(struct block *b) - { - if (!b->ref_count) { -@@ -1288,6 +1170,8 @@ void bcache_put(struct block *b) - _preemptive_writeback(b->cache); - } - -+//---------------------------------------------------------------- -+ - bool bcache_flush(struct bcache *cache) - { - // Only dirty data is on the errored list, since bad read blocks get -@@ -1310,6 +1194,7 @@ bool bcache_flush(struct bcache *cache) - return dm_list_empty(&cache->errored); - } - -+//---------------------------------------------------------------- - /* - * You can safely call this with a NULL block. - */ -@@ -1342,29 +1227,108 @@ static bool _invalidate_block(struct bcache *cache, struct block *b) - - bool bcache_invalidate(struct bcache *cache, int fd, block_address i) - { -- return _invalidate_block(cache, _hash_lookup(cache, fd, i)); -+ return _invalidate_block(cache, _block_lookup(cache, fd, i)); -+} -+ -+//---------------------------------------------------------------- -+ -+struct invalidate_iterator { -+ bool success; -+ struct radix_tree_iterator it; -+}; -+ -+static bool _writeback_v(struct radix_tree_iterator *it, -+ uint8_t *kb, uint8_t *ke, union radix_value v) -+{ -+ struct block *b = v.ptr; -+ -+ if (_test_flags(b, BF_DIRTY)) -+ _issue_write(b); -+ -+ return true; -+} -+ -+static bool _invalidate_v(struct radix_tree_iterator *it, -+ uint8_t *kb, uint8_t *ke, union radix_value v) -+{ -+ struct block *b = v.ptr; -+ struct invalidate_iterator *iit = container_of(it, struct invalidate_iterator, it); -+ -+ if (b->error || _test_flags(b, BF_DIRTY)) { -+ log_warn("bcache_invalidate: block (%d, %llu) still dirty", -+ b->fd, (unsigned long long) b->index); -+ iit->success = false; -+ return true; -+ } -+ -+ if (b->ref_count) { -+ log_warn("bcache_invalidate: block (%d, %llu) still held", -+ b->fd, (unsigned long long) b->index); -+ iit->success = false; -+ return true; -+ } -+ -+ _unlink_block(b); -+ _free_block(b); -+ -+ // We can't remove the block from the radix tree yet because -+ // we're in the middle of an iteration. -+ return true; - } - --// FIXME: switch to a trie, or maybe 1 hash table per fd? To save iterating --// through the whole cache. - bool bcache_invalidate_fd(struct bcache *cache, int fd) - { -- struct block *b, *tmp; -- bool r = true; -+ union key k; -+ struct invalidate_iterator it; - -- // Start writing back any dirty blocks on this fd. -- dm_list_iterate_items_safe (b, tmp, &cache->dirty) -- if (b->fd == fd) -- _issue_write(b); -+ k.parts.fd = fd; -+ -+ it.it.visit = _writeback_v; -+ radix_tree_iterate(cache->rtree, k.bytes, k.bytes + sizeof(k.parts.fd), &it.it); - - _wait_all(cache); - -- // Everything should be in the clean list now. -- dm_list_iterate_items_safe (b, tmp, &cache->clean) -- if (b->fd == fd) -- r = _invalidate_block(cache, b) && r; -+ it.success = true; -+ it.it.visit = _invalidate_v; -+ radix_tree_iterate(cache->rtree, k.bytes, k.bytes + sizeof(k.parts.fd), &it.it); -+ -+ if (it.success) -+ radix_tree_remove_prefix(cache->rtree, k.bytes, k.bytes + sizeof(k.parts.fd)); -+ -+ return it.success; -+} -+ -+//---------------------------------------------------------------- -+ -+static bool _abort_v(struct radix_tree_iterator *it, -+ uint8_t *kb, uint8_t *ke, union radix_value v) -+{ -+ struct block *b = v.ptr; -+ -+ if (b->ref_count) { -+ log_fatal("bcache_abort: block (%d, %llu) still held", -+ b->fd, (unsigned long long) b->index); -+ return true; -+ } -+ -+ _unlink_block(b); -+ _free_block(b); -+ -+ // We can't remove the block from the radix tree yet because -+ // we're in the middle of an iteration. -+ return true; -+} -+ -+void bcache_abort_fd(struct bcache *cache, int fd) -+{ -+ union key k; -+ struct radix_tree_iterator it; -+ -+ k.parts.fd = fd; - -- return r; -+ it.visit = _abort_v; -+ radix_tree_iterate(cache->rtree, k.bytes, k.bytes + sizeof(k.parts.fd), &it); -+ radix_tree_remove_prefix(cache->rtree, k.bytes, k.bytes + sizeof(k.parts.fd)); - } - - //---------------------------------------------------------------- -diff --git a/lib/device/bcache.h b/lib/device/bcache.h -index cb902ef..f9067f7 100644 ---- a/lib/device/bcache.h -+++ b/lib/device/bcache.h -@@ -61,7 +61,6 @@ struct block { - - struct bcache *cache; - struct dm_list list; -- struct dm_list hash; - - unsigned flags; - unsigned ref_count; -@@ -145,6 +144,13 @@ bool bcache_invalidate(struct bcache *cache, int fd, block_address index); - */ - bool bcache_invalidate_fd(struct bcache *cache, int fd); - -+/* -+ * Call this function if flush, or invalidate fail and you do not -+ * wish to retry the writes. This will throw away any dirty data -+ * not written. If any blocks for fd are held, then it will call -+ * abort(). -+ */ -+void bcache_abort_fd(struct bcache *cache, int fd); - - //---------------------------------------------------------------- - // The next four functions are utilities written in terms of the above api. -diff --git a/lib/label/label.c b/lib/label/label.c -index 8107e33..2444ee0 100644 ---- a/lib/label/label.c -+++ b/lib/label/label.c -@@ -594,6 +594,14 @@ static void _drop_bad_aliases(struct device *dev) - } - } - -+// Like bcache_invalidate, only it throws any dirty data away if the -+// write fails. -+static void _invalidate_fd(struct bcache *cache, int fd) -+{ -+ if (!bcache_invalidate_fd(cache, fd)) -+ bcache_abort_fd(cache, fd); -+} -+ - /* - * Read or reread label/metadata from selected devs. - * -@@ -706,7 +714,7 @@ static int _scan_list(struct cmd_context *cmd, struct dev_filter *f, - * drop it from bcache. - */ - if (scan_failed || !is_lvm_device) { -- bcache_invalidate_fd(scan_bcache, devl->dev->bcache_fd); -+ _invalidate_fd(scan_bcache, devl->dev->bcache_fd); - _scan_dev_close(devl->dev); - } - -@@ -878,7 +886,7 @@ int label_scan(struct cmd_context *cmd) - * so this will usually not be true. - */ - if (_in_bcache(dev)) { -- bcache_invalidate_fd(scan_bcache, dev->bcache_fd); -+ _invalidate_fd(scan_bcache, dev->bcache_fd); - _scan_dev_close(dev); - } - -@@ -1063,7 +1071,7 @@ int label_scan_devs(struct cmd_context *cmd, struct dev_filter *f, struct dm_lis - - dm_list_iterate_items(devl, devs) { - if (_in_bcache(devl->dev)) { -- bcache_invalidate_fd(scan_bcache, devl->dev->bcache_fd); -+ _invalidate_fd(scan_bcache, devl->dev->bcache_fd); - _scan_dev_close(devl->dev); - } - } -@@ -1082,7 +1090,7 @@ int label_scan_devs_rw(struct cmd_context *cmd, struct dev_filter *f, struct dm_ - - dm_list_iterate_items(devl, devs) { - if (_in_bcache(devl->dev)) { -- bcache_invalidate_fd(scan_bcache, devl->dev->bcache_fd); -+ _invalidate_fd(scan_bcache, devl->dev->bcache_fd); - _scan_dev_close(devl->dev); - } - -@@ -1104,7 +1112,7 @@ int label_scan_devs_excl(struct dm_list *devs) - - dm_list_iterate_items(devl, devs) { - if (_in_bcache(devl->dev)) { -- bcache_invalidate_fd(scan_bcache, devl->dev->bcache_fd); -+ _invalidate_fd(scan_bcache, devl->dev->bcache_fd); - _scan_dev_close(devl->dev); - } - /* -@@ -1124,7 +1132,7 @@ int label_scan_devs_excl(struct dm_list *devs) - void label_scan_invalidate(struct device *dev) - { - if (_in_bcache(dev)) { -- bcache_invalidate_fd(scan_bcache, dev->bcache_fd); -+ _invalidate_fd(scan_bcache, dev->bcache_fd); - _scan_dev_close(dev); - } - } -@@ -1205,7 +1213,7 @@ int label_read(struct device *dev) - dm_list_add(&one_dev, &devl->list); - - if (_in_bcache(dev)) { -- bcache_invalidate_fd(scan_bcache, dev->bcache_fd); -+ _invalidate_fd(scan_bcache, dev->bcache_fd); - _scan_dev_close(dev); - } - -@@ -1311,7 +1319,7 @@ int label_scan_open_excl(struct device *dev) - if (_in_bcache(dev) && !(dev->flags & DEV_BCACHE_EXCL)) { - /* FIXME: avoid tossing out bcache blocks just to replace fd. */ - log_debug("Close and reopen excl %s", dev_name(dev)); -- bcache_invalidate_fd(scan_bcache, dev->bcache_fd); -+ _invalidate_fd(scan_bcache, dev->bcache_fd); - _scan_dev_close(dev); - } - dev->flags |= DEV_BCACHE_EXCL; -@@ -1319,6 +1327,18 @@ int label_scan_open_excl(struct device *dev) - return label_scan_open(dev); - } - -+int label_scan_open_rw(struct device *dev) -+{ -+ if (_in_bcache(dev) && !(dev->flags & DEV_BCACHE_WRITE)) { -+ /* FIXME: avoid tossing out bcache blocks just to replace fd. */ -+ log_debug("Close and reopen rw %s", dev_name(dev)); -+ _invalidate_fd(scan_bcache, dev->bcache_fd); -+ _scan_dev_close(dev); -+ } -+ dev->flags |= DEV_BCACHE_WRITE; -+ return label_scan_open(dev); -+} -+ - bool dev_read_bytes(struct device *dev, uint64_t start, size_t len, void *data) - { - if (!scan_bcache) { -@@ -1360,7 +1380,7 @@ bool dev_write_bytes(struct device *dev, uint64_t start, size_t len, void *data) - if (_in_bcache(dev) && !(dev->flags & DEV_BCACHE_WRITE)) { - /* FIXME: avoid tossing out bcache blocks just to replace fd. */ - log_debug("Close and reopen to write %s", dev_name(dev)); -- bcache_invalidate_fd(scan_bcache, dev->bcache_fd); -+ _invalidate_fd(scan_bcache, dev->bcache_fd); - _scan_dev_close(dev); - - dev->flags |= DEV_BCACHE_WRITE; -@@ -1406,7 +1426,7 @@ bool dev_write_zeros(struct device *dev, uint64_t start, size_t len) - if (_in_bcache(dev) && !(dev->flags & DEV_BCACHE_WRITE)) { - /* FIXME: avoid tossing out bcache blocks just to replace fd. */ - log_debug("Close and reopen to write %s", dev_name(dev)); -- bcache_invalidate_fd(scan_bcache, dev->bcache_fd); -+ _invalidate_fd(scan_bcache, dev->bcache_fd); - _scan_dev_close(dev); - - dev->flags |= DEV_BCACHE_WRITE; -@@ -1457,7 +1477,7 @@ bool dev_set_bytes(struct device *dev, uint64_t start, size_t len, uint8_t val) - if (_in_bcache(dev) && !(dev->flags & DEV_BCACHE_WRITE)) { - /* FIXME: avoid tossing out bcache blocks just to replace fd. */ - log_debug("Close and reopen to write %s", dev_name(dev)); -- bcache_invalidate_fd(scan_bcache, dev->bcache_fd); -+ _invalidate_fd(scan_bcache, dev->bcache_fd); - _scan_dev_close(dev); - - dev->flags |= DEV_BCACHE_WRITE; -diff --git a/make.tmpl.in b/make.tmpl.in -index c8e4f14..e7780e8 100644 ---- a/make.tmpl.in -+++ b/make.tmpl.in -@@ -68,7 +68,15 @@ CLDFLAGS += @CLDFLAGS@ - ELDFLAGS += @ELDFLAGS@ - LDDEPS += @LDDEPS@ - LIB_SUFFIX = @LIB_SUFFIX@ --LVMINTERNAL_LIBS = -llvm-internal $(DMEVENT_LIBS) $(DAEMON_LIBS) $(SYSTEMD_LIBS) $(UDEV_LIBS) $(DL_LIBS) $(BLKID_LIBS) -+LVMINTERNAL_LIBS =\ -+ -llvm-internal \ -+ $(top_builddir)/base/libbase.a \ -+ $(DMEVENT_LIBS) \ -+ $(DAEMON_LIBS) \ -+ $(SYSTEMD_LIBS) \ -+ $(UDEV_LIBS) \ -+ $(DL_LIBS) \ -+ $(BLKID_LIBS) - DL_LIBS = @DL_LIBS@ - RT_LIBS = @RT_LIBS@ - M_LIBS = @M_LIBS@ -@@ -306,7 +314,7 @@ LIB_VERSION_DM := $(shell $(AWK) -F '.' '{printf "%s.%s",$$1,$$2}' $(top_srcdir) - - LIB_VERSION_APP := $(shell $(AWK) -F '[(). ]' '{printf "%s.%s",$$1,$$4}' $(top_srcdir)/VERSION) - --INCLUDES += -I$(srcdir) -I$(top_builddir)/include -+INCLUDES += -I$(top_srcdir) -I$(srcdir) -I$(top_builddir)/include - - INC_LNS = $(top_builddir)/include/.symlinks_created - -diff --git a/test/unit/bcache_t.c b/test/unit/bcache_t.c -index 925b95d..2a8f931 100644 ---- a/test/unit/bcache_t.c -+++ b/test/unit/bcache_t.c -@@ -12,15 +12,14 @@ - * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -+#include "units.h" -+#include "lib/device/bcache.h" -+ - #include - #include - #include - #include - --#include "bcache.h" --#include "framework.h" --#include "units.h" -- - #define SHOW_MOCK_CALLS 0 - - /*---------------------------------------------------------------- -@@ -794,7 +793,6 @@ static void test_invalidate_after_write_error(void *context) - - static void test_invalidate_held_block(void *context) - { -- - struct fixture *f = context; - struct mock_engine *me = f->me; - struct bcache *cache = f->cache; -@@ -811,6 +809,90 @@ static void test_invalidate_held_block(void *context) - } - - //---------------------------------------------------------------- -+// abort tests -+ -+static void test_abort_no_blocks(void *context) -+{ -+ struct fixture *f = context; -+ struct bcache *cache = f->cache; -+ int fd = 17; -+ -+ // We have no expectations -+ bcache_abort_fd(cache, fd); -+} -+ -+static void test_abort_single_block(void *context) -+{ -+ struct fixture *f = context; -+ struct bcache *cache = f->cache; -+ struct block *b; -+ int fd = 17; -+ -+ T_ASSERT(bcache_get(cache, fd, 0, GF_ZERO, &b)); -+ bcache_put(b); -+ -+ bcache_abort_fd(cache, fd); -+ -+ // no write should be issued -+ T_ASSERT(bcache_flush(cache)); -+} -+ -+static void test_abort_forces_reread(void *context) -+{ -+ struct fixture *f = context; -+ struct mock_engine *me = f->me; -+ struct bcache *cache = f->cache; -+ struct block *b; -+ int fd = 17; -+ -+ _expect_read(me, fd, 0); -+ _expect(me, E_WAIT); -+ T_ASSERT(bcache_get(cache, fd, 0, GF_DIRTY, &b)); -+ bcache_put(b); -+ -+ bcache_abort_fd(cache, fd); -+ T_ASSERT(bcache_flush(cache)); -+ -+ // Check the block is re-read -+ _expect_read(me, fd, 0); -+ _expect(me, E_WAIT); -+ T_ASSERT(bcache_get(cache, fd, 0, 0, &b)); -+ bcache_put(b); -+} -+ -+static void test_abort_only_specific_fd(void *context) -+{ -+ struct fixture *f = context; -+ struct mock_engine *me = f->me; -+ struct bcache *cache = f->cache; -+ struct block *b; -+ int fd1 = 17, fd2 = 18; -+ -+ T_ASSERT(bcache_get(cache, fd1, 0, GF_ZERO, &b)); -+ bcache_put(b); -+ -+ T_ASSERT(bcache_get(cache, fd1, 1, GF_ZERO, &b)); -+ bcache_put(b); -+ -+ T_ASSERT(bcache_get(cache, fd2, 0, GF_ZERO, &b)); -+ bcache_put(b); -+ -+ T_ASSERT(bcache_get(cache, fd2, 1, GF_ZERO, &b)); -+ bcache_put(b); -+ -+ bcache_abort_fd(cache, fd2); -+ -+ // writes for fd1 should still be issued -+ _expect_write(me, fd1, 0); -+ _expect_write(me, fd1, 1); -+ -+ _expect(me, E_WAIT); -+ _expect(me, E_WAIT); -+ -+ T_ASSERT(bcache_flush(cache)); -+} -+ -+//---------------------------------------------------------------- - // Chasing a bug reported by dct - - static void _cycle(struct fixture *f, unsigned nr_cache_blocks) -@@ -898,6 +980,12 @@ static struct test_suite *_small_tests(void) - T("invalidate-read-error", "invalidate a block that errored", test_invalidate_after_read_error); - T("invalidate-write-error", "invalidate a block that errored", test_invalidate_after_write_error); - T("invalidate-fails-in-held", "invalidating a held block fails", test_invalidate_held_block); -+ -+ T("abort-with-no-blocks", "you can call abort, even if there are no blocks in the cache", test_abort_no_blocks); -+ T("abort-single-block", "single block get silently discarded", test_abort_single_block); -+ T("abort-forces-read", "if a block has been discarded then another read is necc.", test_abort_forces_reread); -+ T("abort-specific-fd", "abort doesn't effect other fds", test_abort_only_specific_fd); -+ - T("concurrent-reads-after-invalidate", "prefetch should still issue concurrent reads after invalidate", - test_concurrent_reads_after_invalidate); - -diff --git a/test/unit/bcache_utils_t.c b/test/unit/bcache_utils_t.c -index 9ddc194..2e08320 100644 ---- a/test/unit/bcache_utils_t.c -+++ b/test/unit/bcache_utils_t.c -@@ -14,9 +14,8 @@ - - #define _GNU_SOURCE - --#include "bcache.h" --#include "framework.h" - #include "units.h" -+#include "lib/device/bcache.h" - - #include - #include -diff --git a/test/unit/radix_tree_t.c b/test/unit/radix_tree_t.c -index 7266a8a..54bc406 100644 ---- a/test/unit/radix_tree_t.c -+++ b/test/unit/radix_tree_t.c -@@ -10,11 +10,10 @@ - // along with this program; if not, write to the Free Software Foundation, - // Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -+#include "units.h" - #include "base/data-struct/radix-tree.h" - #include "base/memory/container_of.h" - --#include "units.h" -- - #include - #include - -@@ -44,6 +43,7 @@ static void test_insert_one(void *fixture) - unsigned char k = 'a'; - v.n = 65; - T_ASSERT(radix_tree_insert(rt, &k, &k + 1, v)); -+ T_ASSERT(radix_tree_is_well_formed(rt)); - v.n = 0; - T_ASSERT(radix_tree_lookup(rt, &k, &k + 1, &v)); - T_ASSERT_EQUAL(v.n, 65); -@@ -62,6 +62,8 @@ static void test_single_byte_keys(void *fixture) - T_ASSERT(radix_tree_insert(rt, &k, &k + 1, v)); - } - -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+ - for (i = 0; i < count; i++) { - k = i; - T_ASSERT(radix_tree_lookup(rt, &k, &k + 1, &v)); -@@ -82,12 +84,16 @@ static void test_overwrite_single_byte_keys(void *fixture) - T_ASSERT(radix_tree_insert(rt, &k, &k + 1, v)); - } - -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+ - for (i = 0; i < count; i++) { - k = i; - v.n = 1000 + i; - T_ASSERT(radix_tree_insert(rt, &k, &k + 1, v)); - } - -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+ - for (i = 0; i < count; i++) { - k = i; - T_ASSERT(radix_tree_lookup(rt, &k, &k + 1, &v)); -@@ -109,6 +115,8 @@ static void test_16_bit_keys(void *fixture) - T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v)); - } - -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+ - for (i = 0; i < count; i++) { - k[0] = i / 256; - k[1] = i % 256; -@@ -127,8 +135,10 @@ static void test_prefix_keys(void *fixture) - k[1] = 200; - v.n = 1024; - T_ASSERT(radix_tree_insert(rt, k, k + 1, v)); -+ T_ASSERT(radix_tree_is_well_formed(rt)); - v.n = 2345; - T_ASSERT(radix_tree_insert(rt, k, k + 2, v)); -+ T_ASSERT(radix_tree_is_well_formed(rt)); - T_ASSERT(radix_tree_lookup(rt, k, k + 1, &v)); - T_ASSERT_EQUAL(v.n, 1024); - T_ASSERT(radix_tree_lookup(rt, k, k + 2, &v)); -@@ -145,8 +155,10 @@ static void test_prefix_keys_reversed(void *fixture) - k[1] = 200; - v.n = 1024; - T_ASSERT(radix_tree_insert(rt, k, k + 2, v)); -+ T_ASSERT(radix_tree_is_well_formed(rt)); - v.n = 2345; - T_ASSERT(radix_tree_insert(rt, k, k + 1, v)); -+ T_ASSERT(radix_tree_is_well_formed(rt)); - T_ASSERT(radix_tree_lookup(rt, k, k + 2, &v)); - T_ASSERT_EQUAL(v.n, 1024); - T_ASSERT(radix_tree_lookup(rt, k, k + 1, &v)); -@@ -170,7 +182,10 @@ static void test_sparse_keys(void *fixture) - _gen_key(k, k + sizeof(k)); - v.n = 1234; - T_ASSERT(radix_tree_insert(rt, k, k + 32, v)); -+ // FIXME: remove -+ //T_ASSERT(radix_tree_is_well_formed(rt)); - } -+ T_ASSERT(radix_tree_is_well_formed(rt)); - } - - static void test_remove_one(void *fixture) -@@ -182,7 +197,9 @@ static void test_remove_one(void *fixture) - _gen_key(k, k + sizeof(k)); - v.n = 1234; - T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v)); -+ T_ASSERT(radix_tree_is_well_formed(rt)); - T_ASSERT(radix_tree_remove(rt, k, k + sizeof(k))); -+ T_ASSERT(radix_tree_is_well_formed(rt)); - T_ASSERT(!radix_tree_lookup(rt, k, k + sizeof(k), &v)); - } - -@@ -199,14 +216,19 @@ static void test_remove_one_byte_keys(void *fixture) - T_ASSERT(radix_tree_insert(rt, k, k + 1, v)); - } - -+ T_ASSERT(radix_tree_is_well_formed(rt)); - for (i = 0; i < 256; i++) { - k[0] = i; - T_ASSERT(radix_tree_remove(rt, k, k + 1)); -+ T_ASSERT(radix_tree_is_well_formed(rt)); - - for (j = i + 1; j < 256; j++) { - k[0] = j; - T_ASSERT(radix_tree_lookup(rt, k, k + 1, &v)); -- T_ASSERT_EQUAL(v.n, j + 1000); -+ if (v.n != j + 1000) -+ test_fail("v.n (%u) != j + 1000 (%u)\n", -+ (unsigned) v.n, -+ (unsigned) j + 1000); - } - } - -@@ -216,6 +238,40 @@ static void test_remove_one_byte_keys(void *fixture) - } - } - -+static void test_remove_one_byte_keys_reversed(void *fixture) -+{ -+ struct radix_tree *rt = fixture; -+ unsigned i, j; -+ uint8_t k[1]; -+ union radix_value v; -+ -+ for (i = 0; i < 256; i++) { -+ k[0] = i; -+ v.n = i + 1000; -+ T_ASSERT(radix_tree_insert(rt, k, k + 1, v)); -+ } -+ -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+ for (i = 256; i; i--) { -+ k[0] = i - 1; -+ T_ASSERT(radix_tree_remove(rt, k, k + 1)); -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+ -+ for (j = 0; j < i - 1; j++) { -+ k[0] = j; -+ T_ASSERT(radix_tree_lookup(rt, k, k + 1, &v)); -+ if (v.n != j + 1000) -+ test_fail("v.n (%u) != j + 1000 (%u)\n", -+ (unsigned) v.n, -+ (unsigned) j + 1000); -+ } -+ } -+ -+ for (i = 0; i < 256; i++) { -+ k[0] = i; -+ T_ASSERT(!radix_tree_lookup(rt, k, k + 1, &v)); -+ } -+} - static void test_remove_prefix_keys(void *fixture) - { - struct radix_tree *rt = fixture; -@@ -230,8 +286,10 @@ static void test_remove_prefix_keys(void *fixture) - T_ASSERT(radix_tree_insert(rt, k, k + i, v)); - } - -+ T_ASSERT(radix_tree_is_well_formed(rt)); - for (i = 0; i < 32; i++) { - T_ASSERT(radix_tree_remove(rt, k, k + i)); -+ T_ASSERT(radix_tree_is_well_formed(rt)); - for (j = i + 1; j < 32; j++) { - T_ASSERT(radix_tree_lookup(rt, k, k + j, &v)); - T_ASSERT_EQUAL(v.n, j); -@@ -256,8 +314,10 @@ static void test_remove_prefix_keys_reversed(void *fixture) - T_ASSERT(radix_tree_insert(rt, k, k + i, v)); - } - -+ T_ASSERT(radix_tree_is_well_formed(rt)); - for (i = 0; i < 32; i++) { - T_ASSERT(radix_tree_remove(rt, k, k + (31 - i))); -+ T_ASSERT(radix_tree_is_well_formed(rt)); - for (j = 0; j < 31 - i; j++) { - T_ASSERT(radix_tree_lookup(rt, k, k + j, &v)); - T_ASSERT_EQUAL(v.n, j); -@@ -284,9 +344,12 @@ static void test_remove_prefix(void *fixture) - T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v)); - } - -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+ - // remove keys in a sub range - k[0] = 21; - T_ASSERT_EQUAL(radix_tree_remove_prefix(rt, k, k + 1), count); -+ T_ASSERT(radix_tree_is_well_formed(rt)); - } - - static void test_remove_prefix_single(void *fixture) -@@ -298,7 +361,9 @@ static void test_remove_prefix_single(void *fixture) - _gen_key(k, k + sizeof(k)); - v.n = 1234; - T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v)); -+ T_ASSERT(radix_tree_is_well_formed(rt)); - T_ASSERT_EQUAL(radix_tree_remove_prefix(rt, k, k + 2), 1); -+ T_ASSERT(radix_tree_is_well_formed(rt)); - } - - static void test_size(void *fixture) -@@ -318,6 +383,7 @@ static void test_size(void *fixture) - } - - T_ASSERT_EQUAL(radix_tree_size(rt), 10000 - dup_count); -+ T_ASSERT(radix_tree_is_well_formed(rt)); - } - - struct visitor { -@@ -348,6 +414,7 @@ static void test_iterate_all(void *fixture) - T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v)); - } - -+ T_ASSERT(radix_tree_is_well_formed(rt)); - vt.count = 0; - vt.it.visit = _visit; - radix_tree_iterate(rt, NULL, NULL, &vt.it); -@@ -371,6 +438,7 @@ static void test_iterate_subset(void *fixture) - T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v)); - } - -+ T_ASSERT(radix_tree_is_well_formed(rt)); - vt.count = 0; - vt.it.visit = _visit; - k[0] = 21; -@@ -390,6 +458,7 @@ static void test_iterate_single(void *fixture) - v.n = 1234; - T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v)); - -+ T_ASSERT(radix_tree_is_well_formed(rt)); - vt.count = 0; - vt.it.visit = _visit; - radix_tree_iterate(rt, k, k + 3, &vt.it); -@@ -411,6 +480,7 @@ static void test_iterate_vary_middle(void *fixture) - T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v)); - } - -+ T_ASSERT(radix_tree_is_well_formed(rt)); - vt.it.visit = _visit; - for (i = 0; i < 16; i++) { - vt.count = 0; -@@ -422,6 +492,323 @@ static void test_iterate_vary_middle(void *fixture) - - //---------------------------------------------------------------- - -+#define DTR_COUNT 100 -+ -+struct counter { -+ unsigned c; -+ uint8_t present[DTR_COUNT]; -+}; -+ -+static void _counting_dtr(void *context, union radix_value v) -+{ -+ struct counter *c = context; -+ c->c++; -+ T_ASSERT(v.n < DTR_COUNT); -+ c->present[v.n] = 0; -+} -+ -+static void test_remove_calls_dtr(void *fixture) -+{ -+ struct counter c; -+ struct radix_tree *rt = radix_tree_create(_counting_dtr, &c); -+ T_ASSERT(rt); -+ -+ // Bug hunting, so I need the keys to be deterministic -+ srand(0); -+ -+ c.c = 0; -+ memset(c.present, 1, sizeof(c.present)); -+ -+ { -+ unsigned i; -+ uint8_t keys[DTR_COUNT * 3]; -+ union radix_value v; -+ -+ // generate and insert a lot of keys -+ for (i = 0; i < DTR_COUNT; i++) { -+ bool found = false; -+ do { -+ v.n = i; -+ uint8_t *k = keys + (i * 3); -+ _gen_key(k, k + 3); -+ if (!radix_tree_lookup(rt, k, k + 3, &v)) { -+ T_ASSERT(radix_tree_insert(rt, k, k + 3, v)); -+ found = true; -+ } -+ -+ } while (!found); -+ } -+ -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+ -+ // double check -+ for (i = 0; i < DTR_COUNT; i++) { -+ uint8_t *k = keys + (i * 3); -+ T_ASSERT(radix_tree_lookup(rt, k, k + 3, &v)); -+ } -+ -+ for (i = 0; i < DTR_COUNT; i++) { -+ uint8_t *k = keys + (i * 3); -+ // FIXME: check the values get passed to the dtr -+ T_ASSERT(radix_tree_remove(rt, k, k + 3)); -+ } -+ -+ T_ASSERT(c.c == DTR_COUNT); -+ for (i = 0; i < DTR_COUNT; i++) -+ T_ASSERT(!c.present[i]); -+ } -+ -+ radix_tree_destroy(rt); -+} -+ -+static void test_destroy_calls_dtr(void *fixture) -+{ -+ unsigned i; -+ struct counter c; -+ struct radix_tree *rt = radix_tree_create(_counting_dtr, &c); -+ T_ASSERT(rt); -+ -+ // Bug hunting, so I need the keys to be deterministic -+ srand(0); -+ -+ c.c = 0; -+ memset(c.present, 1, sizeof(c.present)); -+ -+ { -+ uint8_t keys[DTR_COUNT * 3]; -+ union radix_value v; -+ -+ // generate and insert a lot of keys -+ for (i = 0; i < DTR_COUNT; i++) { -+ bool found = false; -+ do { -+ v.n = i; -+ uint8_t *k = keys + (i * 3); -+ _gen_key(k, k + 3); -+ if (!radix_tree_lookup(rt, k, k + 3, &v)) { -+ T_ASSERT(radix_tree_insert(rt, k, k + 3, v)); -+ found = true; -+ } -+ -+ } while (!found); -+ } -+ -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+ } -+ -+ radix_tree_destroy(rt); -+ T_ASSERT(c.c == DTR_COUNT); -+ for (i = 0; i < DTR_COUNT; i++) -+ T_ASSERT(!c.present[i]); -+} -+ -+//---------------------------------------------------------------- -+ -+static void test_bcache_scenario(void *fixture) -+{ -+ struct radix_tree *rt = fixture; -+ -+ unsigned i; -+ uint8_t k[6]; -+ union radix_value v; -+ -+ memset(k, 0, sizeof(k)); -+ -+ for (i = 0; i < 3; i++) { -+ // it has to be the 4th byte that varies to -+ // trigger the bug. -+ k[4] = i; -+ v.n = i; -+ T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v)); -+ } -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+ -+ k[4] = 0; -+ T_ASSERT(radix_tree_remove(rt, k, k + sizeof(k))); -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+ -+ k[4] = i; -+ v.n = i; -+ T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v)); -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+} -+ -+//---------------------------------------------------------------- -+ -+static void _bcs2_step1(struct radix_tree *rt) -+{ -+ unsigned i; -+ uint8_t k[12]; -+ union radix_value v; -+ -+ memset(k, 0, sizeof(k)); -+ for (i = 0x6; i < 0x69; i++) { -+ k[0] = i; -+ v.n = i; -+ T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v)); -+ } -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+} -+ -+static void _bcs2_step2(struct radix_tree *rt) -+{ -+ unsigned i; -+ uint8_t k[12]; -+ -+ memset(k, 0, sizeof(k)); -+ for (i = 0x6; i < 0x69; i++) { -+ k[0] = i; -+ radix_tree_remove_prefix(rt, k, k + 4); -+ } -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+} -+ -+static void test_bcache_scenario2(void *fixture) -+{ -+ unsigned i; -+ struct radix_tree *rt = fixture; -+ uint8_t k[12]; -+ union radix_value v; -+ -+ _bcs2_step1(rt); -+ _bcs2_step2(rt); -+ -+ memset(k, 0, sizeof(k)); -+ for (i = 0; i < 50; i++) { -+ k[0] = 0x6; -+ v.n = 0x6; -+ T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v)); -+ radix_tree_remove_prefix(rt, k, k + 4); -+ } -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+ -+ _bcs2_step1(rt); -+ _bcs2_step2(rt); -+ _bcs2_step1(rt); -+ _bcs2_step2(rt); -+ -+ memset(k, 0, sizeof(k)); -+ for(i = 0x6; i < 0x37; i++) { -+ k[0] = i; -+ k[4] = 0xf; -+ k[5] = 0x1; -+ T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v)); -+ k[4] = 0; -+ k[5] = 0; -+ T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v)); -+ } -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+ -+ memset(k, 0, sizeof(k)); -+ for (i = 0x38; i < 0x69; i++) { -+ k[0] = i - 0x32; -+ k[4] = 0xf; -+ k[5] = 1; -+ T_ASSERT(radix_tree_remove(rt, k, k + sizeof(k))); -+ -+ k[0] = i; -+ T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v)); -+ -+ k[0] = i - 0x32; -+ k[4] = 0; -+ k[5] = 0; -+ T_ASSERT(radix_tree_remove(rt, k, k + sizeof(k))); -+ -+ k[0] = i; -+ T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v)); -+ } -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+ -+ memset(k, 0, sizeof(k)); -+ k[0] = 0x6; -+ radix_tree_remove_prefix(rt, k, k + 4); -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+ -+ k[0] = 0x38; -+ k[4] = 0xf; -+ k[5] = 0x1; -+ T_ASSERT(radix_tree_remove(rt, k, k + sizeof(k))); -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+ -+ memset(k, 0, sizeof(k)); -+ k[0] = 0x6; -+ T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v)); -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+ -+ k[0] = 0x7; -+ radix_tree_remove_prefix(rt, k, k + 4); -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+ -+ k[0] = 0x38; -+ T_ASSERT(radix_tree_remove(rt, k, k + sizeof(k))); -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+ -+ k[0] = 7; -+ T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v)); -+ T_ASSERT(radix_tree_is_well_formed(rt)); -+} -+ -+//---------------------------------------------------------------- -+ -+struct key_parts { -+ uint32_t fd; -+ uint64_t b; -+} __attribute__ ((packed)); -+ -+union key { -+ struct key_parts parts; -+ uint8_t bytes[12]; -+}; -+ -+static void __lookup_matches(struct radix_tree *rt, int fd, uint64_t b, uint64_t expected) -+{ -+ union key k; -+ union radix_value v; -+ -+ k.parts.fd = fd; -+ k.parts.b = b; -+ T_ASSERT(radix_tree_lookup(rt, k.bytes, k.bytes + sizeof(k.bytes), &v)); -+ T_ASSERT(v.n == expected); -+} -+ -+static void __lookup_fails(struct radix_tree *rt, int fd, uint64_t b) -+{ -+ union key k; -+ union radix_value v; -+ -+ k.parts.fd = fd; -+ k.parts.b = b; -+ T_ASSERT(!radix_tree_lookup(rt, k.bytes, k.bytes + sizeof(k.bytes), &v)); -+} -+ -+static void __insert(struct radix_tree *rt, int fd, uint64_t b, uint64_t n) -+{ -+ union key k; -+ union radix_value v; -+ -+ k.parts.fd = fd; -+ k.parts.b = b; -+ v.n = n; -+ T_ASSERT(radix_tree_insert(rt, k.bytes, k.bytes + sizeof(k.bytes), v)); -+} -+ -+static void __invalidate(struct radix_tree *rt, int fd) -+{ -+ union key k; -+ -+ k.parts.fd = fd; -+ radix_tree_remove_prefix(rt, k.bytes, k.bytes + sizeof(k.parts.fd)); -+ radix_tree_is_well_formed(rt); -+} -+ -+static void test_bcache_scenario3(void *fixture) -+{ -+ struct radix_tree *rt = fixture; -+ -+ #include "test/unit/rt_case1.c" -+} -+ -+//---------------------------------------------------------------- - #define T(path, desc, fn) register_test(ts, "/base/data-struct/radix-tree/" path, desc, fn) - - void radix_tree_tests(struct dm_list *all_tests) -@@ -442,6 +829,7 @@ void radix_tree_tests(struct dm_list *all_tests) - T("sparse-keys", "see what the memory usage is for sparsely distributed keys", test_sparse_keys); - T("remove-one", "remove one entry", test_remove_one); - T("remove-one-byte-keys", "remove many one byte keys", test_remove_one_byte_keys); -+ T("remove-one-byte-keys-reversed", "remove many one byte keys reversed", test_remove_one_byte_keys_reversed); - T("remove-prefix-keys", "remove a set of keys that have common prefixes", test_remove_prefix_keys); - T("remove-prefix-keys-reversed", "remove a set of keys that have common prefixes (reversed)", test_remove_prefix_keys_reversed); - T("remove-prefix", "remove a subrange", test_remove_prefix); -@@ -451,6 +839,11 @@ void radix_tree_tests(struct dm_list *all_tests) - T("iterate-subset", "iterate a subset of entries in tree", test_iterate_subset); - T("iterate-single", "iterate a subset that contains a single entry", test_iterate_single); - T("iterate-vary-middle", "iterate keys that vary in the middle", test_iterate_vary_middle); -+ T("remove-calls-dtr", "remove should call the dtr for the value", test_remove_calls_dtr); -+ T("destroy-calls-dtr", "destroy should call the dtr for all values", test_destroy_calls_dtr); -+ T("bcache-scenario", "A specific series of keys from a bcache scenario", test_bcache_scenario); -+ T("bcache-scenario-2", "A second series of keys from a bcache scenario", test_bcache_scenario2); -+ T("bcache-scenario-3", "A third series of keys from a bcache scenario", test_bcache_scenario3); - - dm_list_add(all_tests, &ts->list); - } -diff --git a/test/unit/rt_case1.c b/test/unit/rt_case1.c -new file mode 100644 -index 0000000..c1677d1 ---- /dev/null -+++ b/test/unit/rt_case1.c -@@ -0,0 +1,1669 @@ -+ __lookup_fails(rt, 6, 0); -+ __insert(rt, 6, 0, 0); -+ __lookup_fails(rt, 7, 0); -+ __insert(rt, 7, 0, 1); -+ __lookup_fails(rt, 8, 0); -+ __insert(rt, 8, 0, 2); -+ __lookup_fails(rt, 9, 0); -+ __insert(rt, 9, 0, 3); -+ __lookup_fails(rt, 10, 0); -+ __insert(rt, 10, 0, 4); -+ __lookup_fails(rt, 11, 0); -+ __insert(rt, 11, 0, 5); -+ __lookup_fails(rt, 12, 0); -+ __insert(rt, 12, 0, 6); -+ __lookup_fails(rt, 13, 0); -+ __insert(rt, 13, 0, 7); -+ __lookup_fails(rt, 14, 0); -+ __insert(rt, 14, 0, 8); -+ __lookup_fails(rt, 15, 0); -+ __insert(rt, 15, 0, 9); -+ __lookup_fails(rt, 16, 0); -+ __insert(rt, 16, 0, 10); -+ __lookup_fails(rt, 17, 0); -+ __insert(rt, 17, 0, 11); -+ __lookup_fails(rt, 18, 0); -+ __insert(rt, 18, 0, 12); -+ __lookup_fails(rt, 19, 0); -+ __insert(rt, 19, 0, 13); -+ __lookup_fails(rt, 20, 0); -+ __insert(rt, 20, 0, 14); -+ __lookup_fails(rt, 21, 0); -+ __insert(rt, 21, 0, 15); -+ __lookup_fails(rt, 22, 0); -+ __insert(rt, 22, 0, 16); -+ __lookup_fails(rt, 23, 0); -+ __insert(rt, 23, 0, 17); -+ __lookup_fails(rt, 24, 0); -+ __insert(rt, 24, 0, 18); -+ __lookup_fails(rt, 25, 0); -+ __insert(rt, 25, 0, 19); -+ __lookup_fails(rt, 26, 0); -+ __insert(rt, 26, 0, 20); -+ __lookup_fails(rt, 27, 0); -+ __insert(rt, 27, 0, 21); -+ __lookup_fails(rt, 28, 0); -+ __insert(rt, 28, 0, 22); -+ __lookup_fails(rt, 29, 0); -+ __insert(rt, 29, 0, 23); -+ __lookup_fails(rt, 30, 0); -+ __insert(rt, 30, 0, 24); -+ __lookup_fails(rt, 31, 0); -+ __insert(rt, 31, 0, 25); -+ __lookup_fails(rt, 32, 0); -+ __insert(rt, 32, 0, 26); -+ __lookup_fails(rt, 33, 0); -+ __insert(rt, 33, 0, 27); -+ __lookup_fails(rt, 34, 0); -+ __insert(rt, 34, 0, 28); -+ __lookup_fails(rt, 35, 0); -+ __insert(rt, 35, 0, 29); -+ __lookup_fails(rt, 36, 0); -+ __insert(rt, 36, 0, 30); -+ __lookup_fails(rt, 37, 0); -+ __insert(rt, 37, 0, 31); -+ __lookup_fails(rt, 38, 0); -+ __insert(rt, 38, 0, 32); -+ __lookup_fails(rt, 39, 0); -+ __insert(rt, 39, 0, 33); -+ __lookup_fails(rt, 40, 0); -+ __insert(rt, 40, 0, 34); -+ __lookup_fails(rt, 41, 0); -+ __insert(rt, 41, 0, 35); -+ __lookup_fails(rt, 42, 0); -+ __insert(rt, 42, 0, 36); -+ __lookup_fails(rt, 43, 0); -+ __insert(rt, 43, 0, 37); -+ __lookup_fails(rt, 44, 0); -+ __insert(rt, 44, 0, 38); -+ __lookup_fails(rt, 45, 0); -+ __insert(rt, 45, 0, 39); -+ __lookup_fails(rt, 46, 0); -+ __insert(rt, 46, 0, 40); -+ __lookup_fails(rt, 47, 0); -+ __insert(rt, 47, 0, 41); -+ __lookup_fails(rt, 48, 0); -+ __insert(rt, 48, 0, 42); -+ __lookup_fails(rt, 49, 0); -+ __insert(rt, 49, 0, 43); -+ __lookup_fails(rt, 50, 0); -+ __insert(rt, 50, 0, 44); -+ __lookup_fails(rt, 51, 0); -+ __insert(rt, 51, 0, 45); -+ __lookup_fails(rt, 52, 0); -+ __insert(rt, 52, 0, 46); -+ __lookup_fails(rt, 53, 0); -+ __insert(rt, 53, 0, 47); -+ __lookup_fails(rt, 54, 0); -+ __insert(rt, 54, 0, 48); -+ __lookup_fails(rt, 55, 0); -+ __insert(rt, 55, 0, 49); -+ __lookup_fails(rt, 56, 0); -+ __insert(rt, 56, 0, 50); -+ __lookup_fails(rt, 57, 0); -+ __insert(rt, 57, 0, 51); -+ __lookup_fails(rt, 58, 0); -+ __insert(rt, 58, 0, 52); -+ __lookup_fails(rt, 59, 0); -+ __insert(rt, 59, 0, 53); -+ __lookup_fails(rt, 60, 0); -+ __insert(rt, 60, 0, 54); -+ __lookup_fails(rt, 61, 0); -+ __insert(rt, 61, 0, 55); -+ __lookup_fails(rt, 62, 0); -+ __insert(rt, 62, 0, 56); -+ __lookup_fails(rt, 63, 0); -+ __insert(rt, 63, 0, 57); -+ __lookup_fails(rt, 64, 0); -+ __insert(rt, 64, 0, 58); -+ __lookup_fails(rt, 65, 0); -+ __insert(rt, 65, 0, 59); -+ __lookup_fails(rt, 66, 0); -+ __insert(rt, 66, 0, 60); -+ __lookup_fails(rt, 67, 0); -+ __insert(rt, 67, 0, 61); -+ __lookup_fails(rt, 68, 0); -+ __insert(rt, 68, 0, 62); -+ __lookup_fails(rt, 69, 0); -+ __insert(rt, 69, 0, 63); -+ __lookup_fails(rt, 70, 0); -+ __insert(rt, 70, 0, 64); -+ __lookup_fails(rt, 71, 0); -+ __insert(rt, 71, 0, 65); -+ __lookup_fails(rt, 72, 0); -+ __insert(rt, 72, 0, 66); -+ __lookup_fails(rt, 73, 0); -+ __insert(rt, 73, 0, 67); -+ __lookup_fails(rt, 74, 0); -+ __insert(rt, 74, 0, 68); -+ __lookup_fails(rt, 75, 0); -+ __insert(rt, 75, 0, 69); -+ __lookup_fails(rt, 76, 0); -+ __insert(rt, 76, 0, 70); -+ __lookup_fails(rt, 77, 0); -+ __insert(rt, 77, 0, 71); -+ __lookup_fails(rt, 78, 0); -+ __insert(rt, 78, 0, 72); -+ __lookup_fails(rt, 79, 0); -+ __insert(rt, 79, 0, 73); -+ __lookup_fails(rt, 80, 0); -+ __insert(rt, 80, 0, 74); -+ __lookup_fails(rt, 81, 0); -+ __insert(rt, 81, 0, 75); -+ __lookup_fails(rt, 82, 0); -+ __insert(rt, 82, 0, 76); -+ __lookup_fails(rt, 83, 0); -+ __insert(rt, 83, 0, 77); -+ __lookup_fails(rt, 84, 0); -+ __insert(rt, 84, 0, 78); -+ __lookup_fails(rt, 85, 0); -+ __insert(rt, 85, 0, 79); -+ __lookup_fails(rt, 86, 0); -+ __insert(rt, 86, 0, 80); -+ __lookup_fails(rt, 87, 0); -+ __insert(rt, 87, 0, 81); -+ __lookup_fails(rt, 88, 0); -+ __insert(rt, 88, 0, 82); -+ __lookup_fails(rt, 89, 0); -+ __insert(rt, 89, 0, 83); -+ __lookup_fails(rt, 90, 0); -+ __insert(rt, 90, 0, 84); -+ __lookup_fails(rt, 91, 0); -+ __insert(rt, 91, 0, 85); -+ __lookup_fails(rt, 92, 0); -+ __insert(rt, 92, 0, 86); -+ __lookup_fails(rt, 93, 0); -+ __insert(rt, 93, 0, 87); -+ __lookup_fails(rt, 94, 0); -+ __insert(rt, 94, 0, 88); -+ __lookup_fails(rt, 95, 0); -+ __insert(rt, 95, 0, 89); -+ __lookup_fails(rt, 96, 0); -+ __insert(rt, 96, 0, 90); -+ __lookup_fails(rt, 97, 0); -+ __insert(rt, 97, 0, 91); -+ __lookup_fails(rt, 98, 0); -+ __insert(rt, 98, 0, 92); -+ __lookup_fails(rt, 99, 0); -+ __insert(rt, 99, 0, 93); -+ __lookup_fails(rt, 100, 0); -+ __insert(rt, 100, 0, 94); -+ __lookup_fails(rt, 101, 0); -+ __insert(rt, 101, 0, 95); -+ __lookup_fails(rt, 102, 0); -+ __insert(rt, 102, 0, 96); -+ __lookup_fails(rt, 103, 0); -+ __insert(rt, 103, 0, 97); -+ __lookup_fails(rt, 104, 0); -+ __insert(rt, 104, 0, 98); -+ __lookup_fails(rt, 105, 0); -+ __insert(rt, 105, 0, 99); -+ __lookup_fails(rt, 106, 0); -+ __insert(rt, 106, 0, 100); -+ __lookup_fails(rt, 107, 0); -+ __insert(rt, 107, 0, 101); -+ __lookup_fails(rt, 108, 0); -+ __insert(rt, 108, 0, 102); -+ __lookup_fails(rt, 109, 0); -+ __insert(rt, 109, 0, 103); -+ __lookup_fails(rt, 110, 0); -+ __insert(rt, 110, 0, 104); -+ __lookup_fails(rt, 111, 0); -+ __insert(rt, 111, 0, 105); -+ __lookup_fails(rt, 112, 0); -+ __insert(rt, 112, 0, 106); -+ __lookup_fails(rt, 113, 0); -+ __insert(rt, 113, 0, 107); -+ __lookup_fails(rt, 114, 0); -+ __insert(rt, 114, 0, 108); -+ __lookup_fails(rt, 115, 0); -+ __insert(rt, 115, 0, 109); -+ __lookup_fails(rt, 116, 0); -+ __insert(rt, 116, 0, 110); -+ __lookup_fails(rt, 117, 0); -+ __insert(rt, 117, 0, 111); -+ __lookup_fails(rt, 118, 0); -+ __insert(rt, 118, 0, 112); -+ __lookup_fails(rt, 119, 0); -+ __insert(rt, 119, 0, 113); -+ __lookup_fails(rt, 120, 0); -+ __insert(rt, 120, 0, 114); -+ __lookup_fails(rt, 121, 0); -+ __insert(rt, 121, 0, 115); -+ __lookup_fails(rt, 122, 0); -+ __insert(rt, 122, 0, 116); -+ __lookup_fails(rt, 123, 0); -+ __insert(rt, 123, 0, 117); -+ __lookup_fails(rt, 124, 0); -+ __insert(rt, 124, 0, 118); -+ __lookup_fails(rt, 125, 0); -+ __insert(rt, 125, 0, 119); -+ __lookup_fails(rt, 126, 0); -+ __insert(rt, 126, 0, 120); -+ __lookup_fails(rt, 127, 0); -+ __insert(rt, 127, 0, 121); -+ __lookup_fails(rt, 128, 0); -+ __insert(rt, 128, 0, 122); -+ __lookup_fails(rt, 129, 0); -+ __insert(rt, 129, 0, 123); -+ __lookup_fails(rt, 130, 0); -+ __insert(rt, 130, 0, 124); -+ __lookup_fails(rt, 131, 0); -+ __insert(rt, 131, 0, 125); -+ __lookup_fails(rt, 132, 0); -+ __insert(rt, 132, 0, 126); -+ __lookup_fails(rt, 133, 0); -+ __insert(rt, 133, 0, 127); -+ __lookup_fails(rt, 134, 0); -+ __insert(rt, 134, 0, 128); -+ __lookup_fails(rt, 135, 0); -+ __insert(rt, 135, 0, 129); -+ __lookup_fails(rt, 136, 0); -+ __insert(rt, 136, 0, 130); -+ __lookup_fails(rt, 137, 0); -+ __insert(rt, 137, 0, 131); -+ __lookup_fails(rt, 138, 0); -+ __insert(rt, 138, 0, 132); -+ __lookup_fails(rt, 139, 0); -+ __insert(rt, 139, 0, 133); -+ __lookup_fails(rt, 140, 0); -+ __insert(rt, 140, 0, 134); -+ __lookup_fails(rt, 141, 0); -+ __insert(rt, 141, 0, 135); -+ __lookup_fails(rt, 142, 0); -+ __insert(rt, 142, 0, 136); -+ __lookup_fails(rt, 143, 0); -+ __insert(rt, 143, 0, 137); -+ __lookup_fails(rt, 144, 0); -+ __insert(rt, 144, 0, 138); -+ __lookup_fails(rt, 145, 0); -+ __insert(rt, 145, 0, 139); -+ __lookup_fails(rt, 146, 0); -+ __insert(rt, 146, 0, 140); -+ __lookup_fails(rt, 147, 0); -+ __insert(rt, 147, 0, 141); -+ __lookup_fails(rt, 148, 0); -+ __insert(rt, 148, 0, 142); -+ __lookup_fails(rt, 149, 0); -+ __insert(rt, 149, 0, 143); -+ __lookup_fails(rt, 150, 0); -+ __insert(rt, 150, 0, 144); -+ __lookup_fails(rt, 151, 0); -+ __insert(rt, 151, 0, 145); -+ __lookup_fails(rt, 152, 0); -+ __insert(rt, 152, 0, 146); -+ __lookup_fails(rt, 153, 0); -+ __insert(rt, 153, 0, 147); -+ __lookup_fails(rt, 154, 0); -+ __insert(rt, 154, 0, 148); -+ __lookup_fails(rt, 155, 0); -+ __insert(rt, 155, 0, 149); -+ __lookup_fails(rt, 156, 0); -+ __insert(rt, 156, 0, 150); -+ __lookup_fails(rt, 157, 0); -+ __insert(rt, 157, 0, 151); -+ __lookup_fails(rt, 158, 0); -+ __insert(rt, 158, 0, 152); -+ __lookup_fails(rt, 159, 0); -+ __insert(rt, 159, 0, 153); -+ __lookup_fails(rt, 160, 0); -+ __insert(rt, 160, 0, 154); -+ __lookup_fails(rt, 161, 0); -+ __insert(rt, 161, 0, 155); -+ __lookup_fails(rt, 162, 0); -+ __insert(rt, 162, 0, 156); -+ __lookup_fails(rt, 163, 0); -+ __insert(rt, 163, 0, 157); -+ __lookup_fails(rt, 164, 0); -+ __insert(rt, 164, 0, 158); -+ __lookup_fails(rt, 165, 0); -+ __insert(rt, 165, 0, 159); -+ __lookup_fails(rt, 166, 0); -+ __insert(rt, 166, 0, 160); -+ __lookup_fails(rt, 167, 0); -+ __insert(rt, 167, 0, 161); -+ __lookup_fails(rt, 168, 0); -+ __insert(rt, 168, 0, 162); -+ __lookup_fails(rt, 169, 0); -+ __insert(rt, 169, 0, 163); -+ __lookup_fails(rt, 170, 0); -+ __insert(rt, 170, 0, 164); -+ __lookup_fails(rt, 171, 0); -+ __insert(rt, 171, 0, 165); -+ __lookup_fails(rt, 172, 0); -+ __insert(rt, 172, 0, 166); -+ __lookup_fails(rt, 173, 0); -+ __insert(rt, 173, 0, 167); -+ __lookup_fails(rt, 174, 0); -+ __insert(rt, 174, 0, 168); -+ __lookup_fails(rt, 175, 0); -+ __insert(rt, 175, 0, 169); -+ __lookup_fails(rt, 176, 0); -+ __insert(rt, 176, 0, 170); -+ __lookup_fails(rt, 177, 0); -+ __insert(rt, 177, 0, 171); -+ __lookup_fails(rt, 178, 0); -+ __insert(rt, 178, 0, 172); -+ __lookup_fails(rt, 179, 0); -+ __insert(rt, 179, 0, 173); -+ __lookup_fails(rt, 180, 0); -+ __insert(rt, 180, 0, 174); -+ __lookup_fails(rt, 181, 0); -+ __insert(rt, 181, 0, 175); -+ __lookup_fails(rt, 182, 0); -+ __insert(rt, 182, 0, 176); -+ __lookup_fails(rt, 183, 0); -+ __insert(rt, 183, 0, 177); -+ __lookup_fails(rt, 184, 0); -+ __insert(rt, 184, 0, 178); -+ __lookup_fails(rt, 185, 0); -+ __insert(rt, 185, 0, 179); -+ __lookup_fails(rt, 186, 0); -+ __insert(rt, 186, 0, 180); -+ __lookup_fails(rt, 187, 0); -+ __insert(rt, 187, 0, 181); -+ __lookup_fails(rt, 188, 0); -+ __insert(rt, 188, 0, 182); -+ __lookup_fails(rt, 189, 0); -+ __insert(rt, 189, 0, 183); -+ __lookup_fails(rt, 190, 0); -+ __insert(rt, 190, 0, 184); -+ __lookup_fails(rt, 191, 0); -+ __insert(rt, 191, 0, 185); -+ __lookup_fails(rt, 192, 0); -+ __insert(rt, 192, 0, 186); -+ __lookup_fails(rt, 193, 0); -+ __insert(rt, 193, 0, 187); -+ __lookup_fails(rt, 194, 0); -+ __insert(rt, 194, 0, 188); -+ __lookup_fails(rt, 195, 0); -+ __insert(rt, 195, 0, 189); -+ __lookup_fails(rt, 196, 0); -+ __insert(rt, 196, 0, 190); -+ __lookup_fails(rt, 197, 0); -+ __insert(rt, 197, 0, 191); -+ __lookup_fails(rt, 198, 0); -+ __insert(rt, 198, 0, 192); -+ __lookup_fails(rt, 199, 0); -+ __insert(rt, 199, 0, 193); -+ __lookup_fails(rt, 200, 0); -+ __insert(rt, 200, 0, 194); -+ __lookup_fails(rt, 201, 0); -+ __insert(rt, 201, 0, 195); -+ __lookup_fails(rt, 202, 0); -+ __insert(rt, 202, 0, 196); -+ __lookup_fails(rt, 203, 0); -+ __insert(rt, 203, 0, 197); -+ __lookup_fails(rt, 204, 0); -+ __insert(rt, 204, 0, 198); -+ __lookup_fails(rt, 205, 0); -+ __insert(rt, 205, 0, 199); -+ __lookup_matches(rt, 6, 0, 0); -+ __invalidate(rt, 6); -+ __lookup_matches(rt, 7, 0, 1); -+ __invalidate(rt, 7); -+ __lookup_matches(rt, 8, 0, 2); -+ __invalidate(rt, 8); -+ __lookup_matches(rt, 9, 0, 3); -+ __invalidate(rt, 9); -+ __lookup_matches(rt, 10, 0, 4); -+ __invalidate(rt, 10); -+ __lookup_matches(rt, 11, 0, 5); -+ __invalidate(rt, 11); -+ __lookup_matches(rt, 12, 0, 6); -+ __lookup_matches(rt, 13, 0, 7); -+ __invalidate(rt, 13); -+ __lookup_matches(rt, 14, 0, 8); -+ __invalidate(rt, 14); -+ __lookup_matches(rt, 15, 0, 9); -+ __invalidate(rt, 15); -+ __lookup_matches(rt, 16, 0, 10); -+ __invalidate(rt, 16); -+ __lookup_matches(rt, 17, 0, 11); -+ __invalidate(rt, 17); -+ __lookup_matches(rt, 18, 0, 12); -+ __invalidate(rt, 18); -+ __lookup_matches(rt, 19, 0, 13); -+ __invalidate(rt, 19); -+ __lookup_matches(rt, 20, 0, 14); -+ __invalidate(rt, 20); -+ __lookup_matches(rt, 21, 0, 15); -+ __invalidate(rt, 21); -+ __lookup_matches(rt, 22, 0, 16); -+ __invalidate(rt, 22); -+ __lookup_matches(rt, 23, 0, 17); -+ __invalidate(rt, 23); -+ __lookup_matches(rt, 24, 0, 18); -+ __invalidate(rt, 24); -+ __lookup_matches(rt, 25, 0, 19); -+ __invalidate(rt, 25); -+ __lookup_matches(rt, 26, 0, 20); -+ __invalidate(rt, 26); -+ __lookup_matches(rt, 27, 0, 21); -+ __invalidate(rt, 27); -+ __lookup_matches(rt, 28, 0, 22); -+ __invalidate(rt, 28); -+ __lookup_matches(rt, 29, 0, 23); -+ __invalidate(rt, 29); -+ __lookup_matches(rt, 30, 0, 24); -+ __invalidate(rt, 30); -+ __lookup_matches(rt, 31, 0, 25); -+ __invalidate(rt, 31); -+ __lookup_matches(rt, 32, 0, 26); -+ __invalidate(rt, 32); -+ __lookup_matches(rt, 33, 0, 27); -+ __invalidate(rt, 33); -+ __lookup_matches(rt, 34, 0, 28); -+ __invalidate(rt, 34); -+ __lookup_matches(rt, 35, 0, 29); -+ __invalidate(rt, 35); -+ __lookup_matches(rt, 36, 0, 30); -+ __invalidate(rt, 36); -+ __lookup_matches(rt, 37, 0, 31); -+ __invalidate(rt, 37); -+ __lookup_matches(rt, 38, 0, 32); -+ __invalidate(rt, 38); -+ __lookup_matches(rt, 39, 0, 33); -+ __invalidate(rt, 39); -+ __lookup_matches(rt, 40, 0, 34); -+ __invalidate(rt, 40); -+ __lookup_matches(rt, 41, 0, 35); -+ __invalidate(rt, 41); -+ __lookup_matches(rt, 42, 0, 36); -+ __invalidate(rt, 42); -+ __lookup_matches(rt, 43, 0, 37); -+ __invalidate(rt, 43); -+ __lookup_matches(rt, 44, 0, 38); -+ __invalidate(rt, 44); -+ __lookup_matches(rt, 45, 0, 39); -+ __invalidate(rt, 45); -+ __lookup_matches(rt, 46, 0, 40); -+ __lookup_fails(rt, 46, 5); -+ __insert(rt, 46, 5, 200); -+ __lookup_matches(rt, 46, 5, 200); -+ __lookup_fails(rt, 46, 6); -+ __insert(rt, 46, 6, 201); -+ __lookup_fails(rt, 46, 7); -+ __insert(rt, 46, 7, 202); -+ __lookup_fails(rt, 46, 8); -+ __insert(rt, 46, 8, 203); -+ __lookup_matches(rt, 46, 5, 200); -+ __lookup_matches(rt, 46, 6, 201); -+ __lookup_matches(rt, 46, 7, 202); -+ __lookup_matches(rt, 46, 8, 203); -+ __lookup_matches(rt, 47, 0, 41); -+ __invalidate(rt, 47); -+ __lookup_matches(rt, 48, 0, 42); -+ __invalidate(rt, 48); -+ __lookup_matches(rt, 49, 0, 43); -+ __invalidate(rt, 49); -+ __lookup_matches(rt, 50, 0, 44); -+ __invalidate(rt, 50); -+ __lookup_matches(rt, 51, 0, 45); -+ __invalidate(rt, 51); -+ __lookup_matches(rt, 52, 0, 46); -+ __invalidate(rt, 52); -+ __lookup_matches(rt, 53, 0, 47); -+ __invalidate(rt, 53); -+ __lookup_matches(rt, 54, 0, 48); -+ __invalidate(rt, 54); -+ __lookup_matches(rt, 55, 0, 49); -+ __invalidate(rt, 55); -+ __lookup_matches(rt, 56, 0, 50); -+ __invalidate(rt, 56); -+ __lookup_matches(rt, 57, 0, 51); -+ __invalidate(rt, 57); -+ __lookup_matches(rt, 58, 0, 52); -+ __invalidate(rt, 58); -+ __lookup_matches(rt, 59, 0, 53); -+ __invalidate(rt, 59); -+ __lookup_matches(rt, 60, 0, 54); -+ __invalidate(rt, 60); -+ __lookup_matches(rt, 61, 0, 55); -+ __invalidate(rt, 61); -+ __lookup_matches(rt, 62, 0, 56); -+ __invalidate(rt, 62); -+ __lookup_matches(rt, 63, 0, 57); -+ __invalidate(rt, 63); -+ __lookup_matches(rt, 64, 0, 58); -+ __invalidate(rt, 64); -+ __lookup_matches(rt, 65, 0, 59); -+ __lookup_fails(rt, 65, 1); -+ __insert(rt, 65, 1, 204); -+ __lookup_fails(rt, 65, 2); -+ __insert(rt, 65, 2, 205); -+ __lookup_fails(rt, 65, 3); -+ __insert(rt, 65, 3, 206); -+ __lookup_fails(rt, 65, 4); -+ __insert(rt, 65, 4, 207); -+ __lookup_matches(rt, 65, 0, 59); -+ __lookup_matches(rt, 65, 1, 204); -+ __lookup_matches(rt, 65, 2, 205); -+ __lookup_matches(rt, 65, 3, 206); -+ __lookup_matches(rt, 65, 4, 207); -+ __lookup_matches(rt, 66, 0, 60); -+ __invalidate(rt, 66); -+ __lookup_matches(rt, 67, 0, 61); -+ __invalidate(rt, 67); -+ __lookup_matches(rt, 68, 0, 62); -+ __invalidate(rt, 68); -+ __lookup_matches(rt, 69, 0, 63); -+ __invalidate(rt, 69); -+ __lookup_matches(rt, 70, 0, 64); -+ __invalidate(rt, 70); -+ __lookup_matches(rt, 71, 0, 65); -+ __invalidate(rt, 71); -+ __lookup_matches(rt, 72, 0, 66); -+ __invalidate(rt, 72); -+ __lookup_matches(rt, 73, 0, 67); -+ __invalidate(rt, 73); -+ __lookup_matches(rt, 74, 0, 68); -+ __invalidate(rt, 74); -+ __lookup_matches(rt, 75, 0, 69); -+ __invalidate(rt, 75); -+ __lookup_matches(rt, 76, 0, 70); -+ __invalidate(rt, 76); -+ __lookup_matches(rt, 77, 0, 71); -+ __invalidate(rt, 77); -+ __lookup_matches(rt, 78, 0, 72); -+ __invalidate(rt, 78); -+ __lookup_matches(rt, 79, 0, 73); -+ __invalidate(rt, 79); -+ __lookup_matches(rt, 80, 0, 74); -+ __invalidate(rt, 80); -+ __lookup_matches(rt, 81, 0, 75); -+ __invalidate(rt, 81); -+ __lookup_matches(rt, 82, 0, 76); -+ __invalidate(rt, 82); -+ __lookup_matches(rt, 83, 0, 77); -+ __invalidate(rt, 83); -+ __lookup_matches(rt, 84, 0, 78); -+ __invalidate(rt, 84); -+ __lookup_matches(rt, 85, 0, 79); -+ __invalidate(rt, 85); -+ __lookup_matches(rt, 86, 0, 80); -+ __invalidate(rt, 86); -+ __lookup_matches(rt, 87, 0, 81); -+ __invalidate(rt, 87); -+ __lookup_matches(rt, 88, 0, 82); -+ __invalidate(rt, 88); -+ __lookup_matches(rt, 89, 0, 83); -+ __invalidate(rt, 89); -+ __lookup_matches(rt, 90, 0, 84); -+ __invalidate(rt, 90); -+ __lookup_matches(rt, 91, 0, 85); -+ __invalidate(rt, 91); -+ __lookup_matches(rt, 92, 0, 86); -+ __invalidate(rt, 92); -+ __lookup_matches(rt, 93, 0, 87); -+ __invalidate(rt, 93); -+ __lookup_matches(rt, 94, 0, 88); -+ __invalidate(rt, 94); -+ __lookup_matches(rt, 95, 0, 89); -+ __invalidate(rt, 95); -+ __lookup_matches(rt, 96, 0, 90); -+ __lookup_matches(rt, 97, 0, 91); -+ __invalidate(rt, 97); -+ __lookup_matches(rt, 98, 0, 92); -+ __invalidate(rt, 98); -+ __lookup_matches(rt, 99, 0, 93); -+ __invalidate(rt, 99); -+ __lookup_matches(rt, 100, 0, 94); -+ __invalidate(rt, 100); -+ __lookup_matches(rt, 101, 0, 95); -+ __invalidate(rt, 101); -+ __lookup_matches(rt, 102, 0, 96); -+ __invalidate(rt, 102); -+ __lookup_matches(rt, 103, 0, 97); -+ __invalidate(rt, 103); -+ __lookup_matches(rt, 104, 0, 98); -+ __invalidate(rt, 104); -+ __lookup_matches(rt, 105, 0, 99); -+ __invalidate(rt, 105); -+ __lookup_matches(rt, 106, 0, 100); -+ __invalidate(rt, 106); -+ __lookup_matches(rt, 107, 0, 101); -+ __invalidate(rt, 107); -+ __lookup_matches(rt, 108, 0, 102); -+ __invalidate(rt, 108); -+ __lookup_matches(rt, 109, 0, 103); -+ __invalidate(rt, 109); -+ __lookup_matches(rt, 110, 0, 104); -+ __invalidate(rt, 110); -+ __lookup_matches(rt, 111, 0, 105); -+ __invalidate(rt, 111); -+ __lookup_matches(rt, 112, 0, 106); -+ __invalidate(rt, 112); -+ __lookup_matches(rt, 113, 0, 107); -+ __invalidate(rt, 113); -+ __lookup_matches(rt, 114, 0, 108); -+ __invalidate(rt, 114); -+ __lookup_matches(rt, 115, 0, 109); -+ __invalidate(rt, 115); -+ __lookup_matches(rt, 116, 0, 110); -+ __invalidate(rt, 116); -+ __lookup_matches(rt, 117, 0, 111); -+ __invalidate(rt, 117); -+ __lookup_matches(rt, 118, 0, 112); -+ __invalidate(rt, 118); -+ __lookup_matches(rt, 119, 0, 113); -+ __invalidate(rt, 119); -+ __lookup_matches(rt, 120, 0, 114); -+ __invalidate(rt, 120); -+ __lookup_matches(rt, 121, 0, 115); -+ __invalidate(rt, 121); -+ __lookup_matches(rt, 122, 0, 116); -+ __invalidate(rt, 122); -+ __lookup_matches(rt, 123, 0, 117); -+ __invalidate(rt, 123); -+ __lookup_matches(rt, 124, 0, 118); -+ __invalidate(rt, 124); -+ __lookup_matches(rt, 125, 0, 119); -+ __invalidate(rt, 125); -+ __lookup_matches(rt, 126, 0, 120); -+ __invalidate(rt, 126); -+ __lookup_matches(rt, 127, 0, 121); -+ __invalidate(rt, 127); -+ __lookup_matches(rt, 128, 0, 122); -+ __invalidate(rt, 128); -+ __lookup_matches(rt, 129, 0, 123); -+ __invalidate(rt, 129); -+ __lookup_matches(rt, 130, 0, 124); -+ __invalidate(rt, 130); -+ __lookup_matches(rt, 131, 0, 125); -+ __invalidate(rt, 131); -+ __lookup_matches(rt, 132, 0, 126); -+ __invalidate(rt, 132); -+ __lookup_matches(rt, 133, 0, 127); -+ __invalidate(rt, 133); -+ __lookup_matches(rt, 134, 0, 128); -+ __invalidate(rt, 134); -+ __lookup_matches(rt, 135, 0, 129); -+ __invalidate(rt, 135); -+ __lookup_matches(rt, 136, 0, 130); -+ __invalidate(rt, 136); -+ __lookup_matches(rt, 137, 0, 131); -+ __invalidate(rt, 137); -+ __lookup_matches(rt, 138, 0, 132); -+ __invalidate(rt, 138); -+ __lookup_matches(rt, 139, 0, 133); -+ __invalidate(rt, 139); -+ __lookup_matches(rt, 140, 0, 134); -+ __invalidate(rt, 140); -+ __lookup_matches(rt, 141, 0, 135); -+ __invalidate(rt, 141); -+ __lookup_matches(rt, 142, 0, 136); -+ __invalidate(rt, 142); -+ __lookup_matches(rt, 143, 0, 137); -+ __invalidate(rt, 143); -+ __lookup_matches(rt, 144, 0, 138); -+ __invalidate(rt, 144); -+ __lookup_matches(rt, 145, 0, 139); -+ __invalidate(rt, 145); -+ __lookup_matches(rt, 146, 0, 140); -+ __invalidate(rt, 146); -+ __lookup_matches(rt, 147, 0, 141); -+ __invalidate(rt, 147); -+ __lookup_matches(rt, 148, 0, 142); -+ __invalidate(rt, 148); -+ __lookup_matches(rt, 149, 0, 143); -+ __invalidate(rt, 149); -+ __lookup_matches(rt, 150, 0, 144); -+ __invalidate(rt, 150); -+ __lookup_matches(rt, 151, 0, 145); -+ __invalidate(rt, 151); -+ __lookup_matches(rt, 152, 0, 146); -+ __invalidate(rt, 152); -+ __lookup_matches(rt, 153, 0, 147); -+ __invalidate(rt, 153); -+ __lookup_matches(rt, 154, 0, 148); -+ __invalidate(rt, 154); -+ __lookup_matches(rt, 155, 0, 149); -+ __invalidate(rt, 155); -+ __lookup_matches(rt, 156, 0, 150); -+ __invalidate(rt, 156); -+ __lookup_matches(rt, 157, 0, 151); -+ __invalidate(rt, 157); -+ __lookup_matches(rt, 158, 0, 152); -+ __invalidate(rt, 158); -+ __lookup_matches(rt, 159, 0, 153); -+ __invalidate(rt, 159); -+ __lookup_matches(rt, 160, 0, 154); -+ __invalidate(rt, 160); -+ __lookup_matches(rt, 161, 0, 155); -+ __invalidate(rt, 161); -+ __lookup_matches(rt, 162, 0, 156); -+ __invalidate(rt, 162); -+ __lookup_matches(rt, 163, 0, 157); -+ __lookup_matches(rt, 164, 0, 158); -+ __invalidate(rt, 164); -+ __lookup_matches(rt, 165, 0, 159); -+ __invalidate(rt, 165); -+ __lookup_matches(rt, 166, 0, 160); -+ __invalidate(rt, 166); -+ __lookup_matches(rt, 167, 0, 161); -+ __invalidate(rt, 167); -+ __lookup_matches(rt, 168, 0, 162); -+ __invalidate(rt, 168); -+ __lookup_matches(rt, 169, 0, 163); -+ __invalidate(rt, 169); -+ __lookup_matches(rt, 170, 0, 164); -+ __invalidate(rt, 170); -+ __lookup_matches(rt, 171, 0, 165); -+ __invalidate(rt, 171); -+ __lookup_matches(rt, 172, 0, 166); -+ __invalidate(rt, 172); -+ __lookup_matches(rt, 173, 0, 167); -+ __invalidate(rt, 173); -+ __lookup_matches(rt, 174, 0, 168); -+ __invalidate(rt, 174); -+ __lookup_matches(rt, 175, 0, 169); -+ __invalidate(rt, 175); -+ __lookup_matches(rt, 176, 0, 170); -+ __invalidate(rt, 176); -+ __lookup_matches(rt, 177, 0, 171); -+ __invalidate(rt, 177); -+ __lookup_matches(rt, 178, 0, 172); -+ __invalidate(rt, 178); -+ __lookup_matches(rt, 179, 0, 173); -+ __invalidate(rt, 179); -+ __lookup_matches(rt, 180, 0, 174); -+ __invalidate(rt, 180); -+ __lookup_matches(rt, 181, 0, 175); -+ __invalidate(rt, 181); -+ __lookup_matches(rt, 182, 0, 176); -+ __invalidate(rt, 182); -+ __lookup_matches(rt, 183, 0, 177); -+ __invalidate(rt, 183); -+ __lookup_matches(rt, 184, 0, 178); -+ __invalidate(rt, 184); -+ __lookup_matches(rt, 185, 0, 179); -+ __invalidate(rt, 185); -+ __lookup_matches(rt, 186, 0, 180); -+ __invalidate(rt, 186); -+ __lookup_matches(rt, 187, 0, 181); -+ __invalidate(rt, 187); -+ __lookup_matches(rt, 188, 0, 182); -+ __invalidate(rt, 188); -+ __lookup_matches(rt, 189, 0, 183); -+ __invalidate(rt, 189); -+ __lookup_matches(rt, 190, 0, 184); -+ __invalidate(rt, 190); -+ __lookup_matches(rt, 191, 0, 185); -+ __invalidate(rt, 191); -+ __lookup_matches(rt, 192, 0, 186); -+ __invalidate(rt, 192); -+ __lookup_matches(rt, 193, 0, 187); -+ __invalidate(rt, 193); -+ __lookup_matches(rt, 194, 0, 188); -+ __invalidate(rt, 194); -+ __lookup_matches(rt, 195, 0, 189); -+ __invalidate(rt, 195); -+ __lookup_matches(rt, 196, 0, 190); -+ __invalidate(rt, 196); -+ __lookup_matches(rt, 197, 0, 191); -+ __invalidate(rt, 197); -+ __lookup_matches(rt, 198, 0, 192); -+ __invalidate(rt, 198); -+ __lookup_matches(rt, 199, 0, 193); -+ __invalidate(rt, 199); -+ __lookup_matches(rt, 200, 0, 194); -+ __invalidate(rt, 200); -+ __lookup_matches(rt, 201, 0, 195); -+ __invalidate(rt, 201); -+ __lookup_matches(rt, 202, 0, 196); -+ __invalidate(rt, 202); -+ __lookup_matches(rt, 203, 0, 197); -+ __invalidate(rt, 203); -+ __lookup_matches(rt, 204, 0, 198); -+ __invalidate(rt, 204); -+ __lookup_matches(rt, 205, 0, 199); -+ __invalidate(rt, 205); -+ __lookup_fails(rt, 6, 0); -+ __insert(rt, 6, 0, 208); -+ __lookup_fails(rt, 7, 0); -+ __insert(rt, 7, 0, 209); -+ __lookup_fails(rt, 8, 0); -+ __insert(rt, 8, 0, 210); -+ __lookup_fails(rt, 9, 0); -+ __insert(rt, 9, 0, 211); -+ __lookup_fails(rt, 10, 0); -+ __insert(rt, 10, 0, 212); -+ __lookup_fails(rt, 11, 0); -+ __insert(rt, 11, 0, 213); -+ __lookup_fails(rt, 13, 0); -+ __insert(rt, 13, 0, 214); -+ __lookup_fails(rt, 14, 0); -+ __insert(rt, 14, 0, 215); -+ __lookup_fails(rt, 15, 0); -+ __insert(rt, 15, 0, 216); -+ __lookup_fails(rt, 16, 0); -+ __insert(rt, 16, 0, 217); -+ __lookup_fails(rt, 17, 0); -+ __insert(rt, 17, 0, 218); -+ __lookup_fails(rt, 18, 0); -+ __insert(rt, 18, 0, 219); -+ __lookup_fails(rt, 19, 0); -+ __insert(rt, 19, 0, 220); -+ __lookup_fails(rt, 20, 0); -+ __insert(rt, 20, 0, 221); -+ __lookup_fails(rt, 21, 0); -+ __insert(rt, 21, 0, 222); -+ __lookup_fails(rt, 22, 0); -+ __insert(rt, 22, 0, 223); -+ __lookup_fails(rt, 23, 0); -+ __insert(rt, 23, 0, 224); -+ __lookup_fails(rt, 24, 0); -+ __insert(rt, 24, 0, 225); -+ __lookup_fails(rt, 25, 0); -+ __insert(rt, 25, 0, 226); -+ __lookup_fails(rt, 26, 0); -+ __insert(rt, 26, 0, 227); -+ __lookup_fails(rt, 27, 0); -+ __insert(rt, 27, 0, 228); -+ __lookup_fails(rt, 28, 0); -+ __insert(rt, 28, 0, 229); -+ __lookup_fails(rt, 29, 0); -+ __insert(rt, 29, 0, 230); -+ __lookup_fails(rt, 30, 0); -+ __insert(rt, 30, 0, 231); -+ __lookup_fails(rt, 31, 0); -+ __insert(rt, 31, 0, 232); -+ __lookup_fails(rt, 32, 0); -+ __insert(rt, 32, 0, 233); -+ __lookup_fails(rt, 33, 0); -+ __insert(rt, 33, 0, 234); -+ __lookup_fails(rt, 34, 0); -+ __insert(rt, 34, 0, 235); -+ __lookup_fails(rt, 35, 0); -+ __insert(rt, 35, 0, 236); -+ __lookup_fails(rt, 36, 0); -+ __insert(rt, 36, 0, 237); -+ __lookup_fails(rt, 37, 0); -+ __insert(rt, 37, 0, 238); -+ __lookup_fails(rt, 38, 0); -+ __insert(rt, 38, 0, 239); -+ __lookup_fails(rt, 39, 0); -+ __insert(rt, 39, 0, 240); -+ __lookup_fails(rt, 40, 0); -+ __insert(rt, 40, 0, 241); -+ __lookup_fails(rt, 41, 0); -+ __insert(rt, 41, 0, 242); -+ __lookup_fails(rt, 42, 0); -+ __insert(rt, 42, 0, 243); -+ __lookup_fails(rt, 43, 0); -+ __insert(rt, 43, 0, 244); -+ __lookup_fails(rt, 44, 0); -+ __insert(rt, 44, 0, 245); -+ __lookup_fails(rt, 45, 0); -+ __insert(rt, 45, 0, 246); -+ __lookup_fails(rt, 47, 0); -+ __insert(rt, 47, 0, 247); -+ __lookup_fails(rt, 48, 0); -+ __insert(rt, 48, 0, 248); -+ __lookup_fails(rt, 49, 0); -+ __insert(rt, 49, 0, 249); -+ __lookup_fails(rt, 50, 0); -+ __insert(rt, 50, 0, 250); -+ __lookup_fails(rt, 51, 0); -+ __insert(rt, 51, 0, 251); -+ __lookup_fails(rt, 52, 0); -+ __insert(rt, 52, 0, 252); -+ __lookup_fails(rt, 53, 0); -+ __insert(rt, 53, 0, 253); -+ __lookup_fails(rt, 54, 0); -+ __insert(rt, 54, 0, 254); -+ __lookup_fails(rt, 55, 0); -+ __insert(rt, 55, 0, 255); -+ __lookup_fails(rt, 56, 0); -+ __insert(rt, 56, 0, 256); -+ __lookup_fails(rt, 57, 0); -+ __insert(rt, 57, 0, 257); -+ __lookup_fails(rt, 58, 0); -+ __insert(rt, 58, 0, 258); -+ __lookup_fails(rt, 59, 0); -+ __insert(rt, 59, 0, 259); -+ __lookup_fails(rt, 60, 0); -+ __insert(rt, 60, 0, 260); -+ __lookup_fails(rt, 61, 0); -+ __insert(rt, 61, 0, 261); -+ __lookup_fails(rt, 62, 0); -+ __insert(rt, 62, 0, 262); -+ __lookup_fails(rt, 63, 0); -+ __insert(rt, 63, 0, 263); -+ __lookup_fails(rt, 64, 0); -+ __insert(rt, 64, 0, 264); -+ __lookup_fails(rt, 66, 0); -+ __insert(rt, 66, 0, 265); -+ __lookup_fails(rt, 67, 0); -+ __insert(rt, 67, 0, 266); -+ __lookup_fails(rt, 68, 0); -+ __insert(rt, 68, 0, 267); -+ __lookup_fails(rt, 69, 0); -+ __insert(rt, 69, 0, 268); -+ __lookup_fails(rt, 70, 0); -+ __insert(rt, 70, 0, 269); -+ __lookup_fails(rt, 71, 0); -+ __insert(rt, 71, 0, 270); -+ __lookup_fails(rt, 72, 0); -+ __insert(rt, 72, 0, 271); -+ __lookup_fails(rt, 73, 0); -+ __insert(rt, 73, 0, 272); -+ __lookup_fails(rt, 74, 0); -+ __insert(rt, 74, 0, 273); -+ __lookup_fails(rt, 75, 0); -+ __insert(rt, 75, 0, 274); -+ __lookup_fails(rt, 76, 0); -+ __insert(rt, 76, 0, 275); -+ __lookup_fails(rt, 77, 0); -+ __insert(rt, 77, 0, 276); -+ __lookup_fails(rt, 78, 0); -+ __insert(rt, 78, 0, 277); -+ __lookup_fails(rt, 79, 0); -+ __insert(rt, 79, 0, 278); -+ __lookup_fails(rt, 80, 0); -+ __insert(rt, 80, 0, 279); -+ __lookup_fails(rt, 81, 0); -+ __insert(rt, 81, 0, 280); -+ __lookup_fails(rt, 82, 0); -+ __insert(rt, 82, 0, 281); -+ __lookup_fails(rt, 83, 0); -+ __insert(rt, 83, 0, 282); -+ __lookup_fails(rt, 84, 0); -+ __insert(rt, 84, 0, 283); -+ __lookup_fails(rt, 85, 0); -+ __insert(rt, 85, 0, 284); -+ __lookup_fails(rt, 86, 0); -+ __insert(rt, 86, 0, 285); -+ __lookup_fails(rt, 87, 0); -+ __insert(rt, 87, 0, 286); -+ __lookup_fails(rt, 88, 0); -+ __insert(rt, 88, 0, 287); -+ __lookup_fails(rt, 89, 0); -+ __insert(rt, 89, 0, 288); -+ __lookup_fails(rt, 90, 0); -+ __insert(rt, 90, 0, 289); -+ __lookup_fails(rt, 91, 0); -+ __insert(rt, 91, 0, 290); -+ __lookup_fails(rt, 92, 0); -+ __insert(rt, 92, 0, 291); -+ __lookup_fails(rt, 93, 0); -+ __insert(rt, 93, 0, 292); -+ __lookup_fails(rt, 94, 0); -+ __insert(rt, 94, 0, 293); -+ __lookup_fails(rt, 95, 0); -+ __insert(rt, 95, 0, 294); -+ __lookup_fails(rt, 97, 0); -+ __insert(rt, 97, 0, 295); -+ __lookup_fails(rt, 98, 0); -+ __insert(rt, 98, 0, 296); -+ __lookup_fails(rt, 99, 0); -+ __insert(rt, 99, 0, 297); -+ __lookup_fails(rt, 100, 0); -+ __insert(rt, 100, 0, 298); -+ __lookup_fails(rt, 101, 0); -+ __insert(rt, 101, 0, 299); -+ __lookup_fails(rt, 102, 0); -+ __insert(rt, 102, 0, 300); -+ __lookup_fails(rt, 103, 0); -+ __insert(rt, 103, 0, 301); -+ __lookup_fails(rt, 104, 0); -+ __insert(rt, 104, 0, 302); -+ __lookup_fails(rt, 105, 0); -+ __insert(rt, 105, 0, 303); -+ __lookup_fails(rt, 106, 0); -+ __insert(rt, 106, 0, 304); -+ __lookup_fails(rt, 107, 0); -+ __insert(rt, 107, 0, 305); -+ __lookup_fails(rt, 108, 0); -+ __insert(rt, 108, 0, 306); -+ __lookup_fails(rt, 109, 0); -+ __insert(rt, 109, 0, 307); -+ __lookup_fails(rt, 110, 0); -+ __insert(rt, 110, 0, 308); -+ __lookup_fails(rt, 111, 0); -+ __insert(rt, 111, 0, 309); -+ __lookup_fails(rt, 112, 0); -+ __insert(rt, 112, 0, 310); -+ __lookup_fails(rt, 113, 0); -+ __insert(rt, 113, 0, 311); -+ __lookup_fails(rt, 114, 0); -+ __insert(rt, 114, 0, 312); -+ __lookup_fails(rt, 115, 0); -+ __insert(rt, 115, 0, 313); -+ __lookup_fails(rt, 116, 0); -+ __insert(rt, 116, 0, 314); -+ __lookup_fails(rt, 117, 0); -+ __insert(rt, 117, 0, 315); -+ __lookup_fails(rt, 118, 0); -+ __insert(rt, 118, 0, 316); -+ __lookup_fails(rt, 119, 0); -+ __insert(rt, 119, 0, 317); -+ __lookup_fails(rt, 120, 0); -+ __insert(rt, 120, 0, 318); -+ __lookup_fails(rt, 121, 0); -+ __insert(rt, 121, 0, 319); -+ __lookup_fails(rt, 122, 0); -+ __insert(rt, 122, 0, 320); -+ __lookup_fails(rt, 123, 0); -+ __insert(rt, 123, 0, 321); -+ __lookup_fails(rt, 124, 0); -+ __insert(rt, 124, 0, 322); -+ __lookup_fails(rt, 125, 0); -+ __insert(rt, 125, 0, 323); -+ __lookup_fails(rt, 126, 0); -+ __insert(rt, 126, 0, 324); -+ __lookup_fails(rt, 127, 0); -+ __insert(rt, 127, 0, 325); -+ __lookup_fails(rt, 128, 0); -+ __insert(rt, 128, 0, 326); -+ __lookup_fails(rt, 129, 0); -+ __insert(rt, 129, 0, 327); -+ __lookup_fails(rt, 130, 0); -+ __insert(rt, 130, 0, 328); -+ __lookup_fails(rt, 131, 0); -+ __insert(rt, 131, 0, 329); -+ __lookup_fails(rt, 132, 0); -+ __insert(rt, 132, 0, 330); -+ __lookup_fails(rt, 133, 0); -+ __insert(rt, 133, 0, 331); -+ __lookup_fails(rt, 134, 0); -+ __insert(rt, 134, 0, 332); -+ __lookup_fails(rt, 135, 0); -+ __insert(rt, 135, 0, 333); -+ __lookup_fails(rt, 136, 0); -+ __insert(rt, 136, 0, 334); -+ __lookup_fails(rt, 137, 0); -+ __insert(rt, 137, 0, 335); -+ __lookup_fails(rt, 138, 0); -+ __insert(rt, 138, 0, 336); -+ __lookup_fails(rt, 139, 0); -+ __insert(rt, 139, 0, 337); -+ __lookup_fails(rt, 140, 0); -+ __insert(rt, 140, 0, 338); -+ __lookup_fails(rt, 141, 0); -+ __insert(rt, 141, 0, 339); -+ __lookup_fails(rt, 142, 0); -+ __insert(rt, 142, 0, 340); -+ __lookup_fails(rt, 143, 0); -+ __insert(rt, 143, 0, 341); -+ __lookup_fails(rt, 144, 0); -+ __insert(rt, 144, 0, 342); -+ __lookup_fails(rt, 145, 0); -+ __insert(rt, 145, 0, 343); -+ __lookup_fails(rt, 146, 0); -+ __insert(rt, 146, 0, 344); -+ __lookup_fails(rt, 147, 0); -+ __insert(rt, 147, 0, 345); -+ __lookup_fails(rt, 148, 0); -+ __insert(rt, 148, 0, 346); -+ __lookup_fails(rt, 149, 0); -+ __insert(rt, 149, 0, 347); -+ __lookup_fails(rt, 150, 0); -+ __insert(rt, 150, 0, 348); -+ __lookup_fails(rt, 151, 0); -+ __insert(rt, 151, 0, 349); -+ __lookup_fails(rt, 152, 0); -+ __insert(rt, 152, 0, 350); -+ __lookup_fails(rt, 153, 0); -+ __insert(rt, 153, 0, 351); -+ __lookup_fails(rt, 154, 0); -+ __insert(rt, 154, 0, 352); -+ __lookup_fails(rt, 155, 0); -+ __insert(rt, 155, 0, 353); -+ __lookup_fails(rt, 156, 0); -+ __insert(rt, 156, 0, 354); -+ __lookup_fails(rt, 157, 0); -+ __insert(rt, 157, 0, 355); -+ __lookup_fails(rt, 158, 0); -+ __insert(rt, 158, 0, 356); -+ __lookup_fails(rt, 159, 0); -+ __insert(rt, 159, 0, 357); -+ __lookup_fails(rt, 160, 0); -+ __insert(rt, 160, 0, 358); -+ __lookup_fails(rt, 161, 0); -+ __insert(rt, 161, 0, 359); -+ __lookup_fails(rt, 162, 0); -+ __insert(rt, 162, 0, 360); -+ __lookup_fails(rt, 164, 0); -+ __insert(rt, 164, 0, 361); -+ __lookup_fails(rt, 165, 0); -+ __insert(rt, 165, 0, 362); -+ __lookup_fails(rt, 166, 0); -+ __insert(rt, 166, 0, 363); -+ __lookup_fails(rt, 167, 0); -+ __insert(rt, 167, 0, 364); -+ __lookup_fails(rt, 168, 0); -+ __insert(rt, 168, 0, 365); -+ __lookup_fails(rt, 169, 0); -+ __insert(rt, 169, 0, 366); -+ __lookup_fails(rt, 170, 0); -+ __insert(rt, 170, 0, 367); -+ __lookup_fails(rt, 171, 0); -+ __insert(rt, 171, 0, 368); -+ __lookup_fails(rt, 172, 0); -+ __insert(rt, 172, 0, 369); -+ __lookup_fails(rt, 173, 0); -+ __insert(rt, 173, 0, 370); -+ __lookup_fails(rt, 174, 0); -+ __insert(rt, 174, 0, 371); -+ __lookup_fails(rt, 175, 0); -+ __insert(rt, 175, 0, 372); -+ __lookup_fails(rt, 176, 0); -+ __insert(rt, 176, 0, 373); -+ __lookup_fails(rt, 177, 0); -+ __insert(rt, 177, 0, 374); -+ __lookup_fails(rt, 178, 0); -+ __insert(rt, 178, 0, 375); -+ __lookup_fails(rt, 179, 0); -+ __insert(rt, 179, 0, 376); -+ __lookup_fails(rt, 180, 0); -+ __insert(rt, 180, 0, 377); -+ __lookup_fails(rt, 181, 0); -+ __insert(rt, 181, 0, 378); -+ __lookup_fails(rt, 182, 0); -+ __insert(rt, 182, 0, 379); -+ __lookup_fails(rt, 183, 0); -+ __insert(rt, 183, 0, 380); -+ __lookup_fails(rt, 184, 0); -+ __insert(rt, 184, 0, 381); -+ __lookup_fails(rt, 185, 0); -+ __insert(rt, 185, 0, 382); -+ __lookup_fails(rt, 186, 0); -+ __insert(rt, 186, 0, 383); -+ __lookup_fails(rt, 187, 0); -+ __insert(rt, 187, 0, 384); -+ __lookup_fails(rt, 188, 0); -+ __insert(rt, 188, 0, 385); -+ __lookup_fails(rt, 189, 0); -+ __insert(rt, 189, 0, 386); -+ __lookup_fails(rt, 190, 0); -+ __insert(rt, 190, 0, 387); -+ __lookup_fails(rt, 191, 0); -+ __insert(rt, 191, 0, 388); -+ __lookup_fails(rt, 192, 0); -+ __insert(rt, 192, 0, 389); -+ __lookup_fails(rt, 193, 0); -+ __insert(rt, 193, 0, 390); -+ __lookup_fails(rt, 194, 0); -+ __insert(rt, 194, 0, 391); -+ __lookup_fails(rt, 195, 0); -+ __insert(rt, 195, 0, 392); -+ __lookup_fails(rt, 196, 0); -+ __insert(rt, 196, 0, 393); -+ __lookup_fails(rt, 197, 0); -+ __insert(rt, 197, 0, 394); -+ __lookup_fails(rt, 198, 0); -+ __insert(rt, 198, 0, 395); -+ __lookup_fails(rt, 199, 0); -+ __insert(rt, 199, 0, 396); -+ __lookup_fails(rt, 200, 0); -+ __insert(rt, 200, 0, 397); -+ __lookup_fails(rt, 201, 0); -+ __insert(rt, 201, 0, 398); -+ __lookup_fails(rt, 202, 0); -+ __insert(rt, 202, 0, 399); -+ __lookup_fails(rt, 203, 0); -+ __insert(rt, 203, 0, 400); -+ __lookup_fails(rt, 204, 0); -+ __insert(rt, 204, 0, 401); -+ __lookup_fails(rt, 205, 0); -+ __insert(rt, 205, 0, 402); -+ __lookup_fails(rt, 206, 0); -+ __insert(rt, 206, 0, 403); -+ __lookup_fails(rt, 207, 0); -+ __insert(rt, 207, 0, 404); -+ __lookup_fails(rt, 208, 0); -+ __insert(rt, 208, 0, 405); -+ __lookup_fails(rt, 209, 0); -+ __insert(rt, 209, 0, 406); -+ __lookup_fails(rt, 210, 0); -+ __insert(rt, 210, 0, 407); -+ __lookup_matches(rt, 6, 0, 208); -+ __invalidate(rt, 6); -+ __lookup_matches(rt, 7, 0, 209); -+ __invalidate(rt, 7); -+ __lookup_matches(rt, 8, 0, 210); -+ __invalidate(rt, 8); -+ __lookup_matches(rt, 9, 0, 211); -+ __invalidate(rt, 9); -+ __lookup_matches(rt, 10, 0, 212); -+ __invalidate(rt, 10); -+ __lookup_matches(rt, 11, 0, 213); -+ __invalidate(rt, 11); -+ __lookup_matches(rt, 13, 0, 214); -+ __invalidate(rt, 13); -+ __lookup_matches(rt, 14, 0, 215); -+ __invalidate(rt, 14); -+ __lookup_matches(rt, 15, 0, 216); -+ __invalidate(rt, 15); -+ __lookup_matches(rt, 16, 0, 217); -+ __invalidate(rt, 16); -+ __lookup_matches(rt, 17, 0, 218); -+ __invalidate(rt, 17); -+ __lookup_matches(rt, 18, 0, 219); -+ __invalidate(rt, 18); -+ __lookup_matches(rt, 19, 0, 220); -+ __invalidate(rt, 19); -+ __lookup_matches(rt, 20, 0, 221); -+ __invalidate(rt, 20); -+ __lookup_matches(rt, 21, 0, 222); -+ __invalidate(rt, 21); -+ __lookup_matches(rt, 22, 0, 223); -+ __invalidate(rt, 22); -+ __lookup_matches(rt, 23, 0, 224); -+ __invalidate(rt, 23); -+ __lookup_matches(rt, 24, 0, 225); -+ __invalidate(rt, 24); -+ __lookup_matches(rt, 25, 0, 226); -+ __invalidate(rt, 25); -+ __lookup_matches(rt, 26, 0, 227); -+ __invalidate(rt, 26); -+ __lookup_matches(rt, 27, 0, 228); -+ __invalidate(rt, 27); -+ __lookup_matches(rt, 28, 0, 229); -+ __invalidate(rt, 28); -+ __lookup_matches(rt, 29, 0, 230); -+ __invalidate(rt, 29); -+ __lookup_matches(rt, 30, 0, 231); -+ __invalidate(rt, 30); -+ __lookup_matches(rt, 31, 0, 232); -+ __invalidate(rt, 31); -+ __lookup_matches(rt, 32, 0, 233); -+ __invalidate(rt, 32); -+ __lookup_matches(rt, 33, 0, 234); -+ __invalidate(rt, 33); -+ __lookup_matches(rt, 34, 0, 235); -+ __invalidate(rt, 34); -+ __lookup_matches(rt, 35, 0, 236); -+ __invalidate(rt, 35); -+ __lookup_matches(rt, 36, 0, 237); -+ __invalidate(rt, 36); -+ __lookup_matches(rt, 37, 0, 238); -+ __invalidate(rt, 37); -+ __lookup_matches(rt, 38, 0, 239); -+ __invalidate(rt, 38); -+ __lookup_matches(rt, 39, 0, 240); -+ __invalidate(rt, 39); -+ __lookup_matches(rt, 40, 0, 241); -+ __invalidate(rt, 40); -+ __lookup_matches(rt, 41, 0, 242); -+ __invalidate(rt, 41); -+ __lookup_matches(rt, 42, 0, 243); -+ __invalidate(rt, 42); -+ __lookup_matches(rt, 43, 0, 244); -+ __invalidate(rt, 43); -+ __lookup_matches(rt, 44, 0, 245); -+ __invalidate(rt, 44); -+ __lookup_matches(rt, 45, 0, 246); -+ __invalidate(rt, 45); -+ __lookup_matches(rt, 47, 0, 247); -+ __invalidate(rt, 47); -+ __lookup_matches(rt, 48, 0, 248); -+ __invalidate(rt, 48); -+ __lookup_matches(rt, 49, 0, 249); -+ __invalidate(rt, 49); -+ __lookup_matches(rt, 50, 0, 250); -+ __invalidate(rt, 50); -+ __lookup_matches(rt, 51, 0, 251); -+ __invalidate(rt, 51); -+ __lookup_matches(rt, 52, 0, 252); -+ __invalidate(rt, 52); -+ __lookup_matches(rt, 53, 0, 253); -+ __invalidate(rt, 53); -+ __lookup_matches(rt, 54, 0, 254); -+ __invalidate(rt, 54); -+ __lookup_matches(rt, 55, 0, 255); -+ __invalidate(rt, 55); -+ __lookup_matches(rt, 56, 0, 256); -+ __invalidate(rt, 56); -+ __lookup_matches(rt, 57, 0, 257); -+ __invalidate(rt, 57); -+ __lookup_matches(rt, 58, 0, 258); -+ __invalidate(rt, 58); -+ __lookup_matches(rt, 59, 0, 259); -+ __invalidate(rt, 59); -+ __lookup_matches(rt, 60, 0, 260); -+ __invalidate(rt, 60); -+ __lookup_matches(rt, 61, 0, 261); -+ __invalidate(rt, 61); -+ __lookup_matches(rt, 62, 0, 262); -+ __invalidate(rt, 62); -+ __lookup_matches(rt, 63, 0, 263); -+ __invalidate(rt, 63); -+ __lookup_matches(rt, 64, 0, 264); -+ __invalidate(rt, 64); -+ __lookup_matches(rt, 66, 0, 265); -+ __invalidate(rt, 66); -+ __lookup_matches(rt, 67, 0, 266); -+ __invalidate(rt, 67); -+ __lookup_matches(rt, 68, 0, 267); -+ __invalidate(rt, 68); -+ __lookup_matches(rt, 69, 0, 268); -+ __invalidate(rt, 69); -+ __lookup_matches(rt, 70, 0, 269); -+ __invalidate(rt, 70); -+ __lookup_matches(rt, 71, 0, 270); -+ __invalidate(rt, 71); -+ __lookup_matches(rt, 72, 0, 271); -+ __invalidate(rt, 72); -+ __lookup_matches(rt, 73, 0, 272); -+ __lookup_matches(rt, 74, 0, 273); -+ __invalidate(rt, 74); -+ __lookup_matches(rt, 75, 0, 274); -+ __invalidate(rt, 75); -+ __lookup_matches(rt, 76, 0, 275); -+ __invalidate(rt, 76); -+ __lookup_matches(rt, 77, 0, 276); -+ __invalidate(rt, 77); -+ __lookup_matches(rt, 78, 0, 277); -+ __invalidate(rt, 78); -+ __lookup_matches(rt, 79, 0, 278); -+ __invalidate(rt, 79); -+ __lookup_matches(rt, 80, 0, 279); -+ __invalidate(rt, 80); -+ __lookup_matches(rt, 81, 0, 280); -+ __invalidate(rt, 81); -+ __lookup_matches(rt, 82, 0, 281); -+ __invalidate(rt, 82); -+ __lookup_matches(rt, 83, 0, 282); -+ __invalidate(rt, 83); -+ __lookup_matches(rt, 84, 0, 283); -+ __invalidate(rt, 84); -+ __lookup_matches(rt, 85, 0, 284); -+ __invalidate(rt, 85); -+ __lookup_matches(rt, 86, 0, 285); -+ __invalidate(rt, 86); -+ __lookup_matches(rt, 87, 0, 286); -+ __invalidate(rt, 87); -+ __lookup_matches(rt, 88, 0, 287); -+ __invalidate(rt, 88); -+ __lookup_matches(rt, 89, 0, 288); -+ __invalidate(rt, 89); -+ __lookup_matches(rt, 90, 0, 289); -+ __invalidate(rt, 90); -+ __lookup_matches(rt, 91, 0, 290); -+ __invalidate(rt, 91); -+ __lookup_matches(rt, 92, 0, 291); -+ __invalidate(rt, 92); -+ __lookup_matches(rt, 93, 0, 292); -+ __invalidate(rt, 93); -+ __lookup_matches(rt, 94, 0, 293); -+ __invalidate(rt, 94); -+ __lookup_matches(rt, 95, 0, 294); -+ __invalidate(rt, 95); -+ __lookup_matches(rt, 97, 0, 295); -+ __invalidate(rt, 97); -+ __lookup_matches(rt, 98, 0, 296); -+ __invalidate(rt, 98); -+ __lookup_matches(rt, 99, 0, 297); -+ __invalidate(rt, 99); -+ __lookup_matches(rt, 100, 0, 298); -+ __invalidate(rt, 100); -+ __lookup_matches(rt, 101, 0, 299); -+ __invalidate(rt, 101); -+ __lookup_matches(rt, 102, 0, 300); -+ __invalidate(rt, 102); -+ __lookup_matches(rt, 103, 0, 301); -+ __invalidate(rt, 103); -+ __lookup_matches(rt, 104, 0, 302); -+ __invalidate(rt, 104); -+ __lookup_matches(rt, 105, 0, 303); -+ __invalidate(rt, 105); -+ __lookup_matches(rt, 106, 0, 304); -+ __invalidate(rt, 106); -+ __lookup_matches(rt, 107, 0, 305); -+ __invalidate(rt, 107); -+ __lookup_matches(rt, 108, 0, 306); -+ __invalidate(rt, 108); -+ __lookup_matches(rt, 109, 0, 307); -+ __invalidate(rt, 109); -+ __lookup_matches(rt, 110, 0, 308); -+ __invalidate(rt, 110); -+ __lookup_matches(rt, 111, 0, 309); -+ __invalidate(rt, 111); -+ __lookup_matches(rt, 112, 0, 310); -+ __invalidate(rt, 112); -+ __lookup_matches(rt, 113, 0, 311); -+ __invalidate(rt, 113); -+ __lookup_matches(rt, 114, 0, 312); -+ __invalidate(rt, 114); -+ __lookup_matches(rt, 115, 0, 313); -+ __invalidate(rt, 115); -+ __lookup_matches(rt, 116, 0, 314); -+ __invalidate(rt, 116); -+ __lookup_matches(rt, 117, 0, 315); -+ __invalidate(rt, 117); -+ __lookup_matches(rt, 118, 0, 316); -+ __invalidate(rt, 118); -+ __lookup_matches(rt, 119, 0, 317); -+ __invalidate(rt, 119); -+ __lookup_matches(rt, 120, 0, 318); -+ __invalidate(rt, 120); -+ __lookup_matches(rt, 121, 0, 319); -+ __invalidate(rt, 121); -+ __lookup_matches(rt, 122, 0, 320); -+ __invalidate(rt, 122); -+ __lookup_matches(rt, 123, 0, 321); -+ __invalidate(rt, 123); -+ __lookup_matches(rt, 124, 0, 322); -+ __invalidate(rt, 124); -+ __lookup_matches(rt, 125, 0, 323); -+ __invalidate(rt, 125); -+ __lookup_matches(rt, 126, 0, 324); -+ __invalidate(rt, 126); -+ __lookup_matches(rt, 127, 0, 325); -+ __invalidate(rt, 127); -+ __lookup_matches(rt, 128, 0, 326); -+ __invalidate(rt, 128); -+ __lookup_matches(rt, 129, 0, 327); -+ __invalidate(rt, 129); -+ __lookup_matches(rt, 130, 0, 328); -+ __invalidate(rt, 130); -+ __lookup_matches(rt, 131, 0, 329); -+ __invalidate(rt, 131); -+ __lookup_matches(rt, 132, 0, 330); -+ __invalidate(rt, 132); -+ __lookup_matches(rt, 133, 0, 331); -+ __invalidate(rt, 133); -+ __lookup_matches(rt, 134, 0, 332); -+ __invalidate(rt, 134); -+ __lookup_matches(rt, 135, 0, 333); -+ __invalidate(rt, 135); -+ __lookup_matches(rt, 136, 0, 334); -+ __invalidate(rt, 136); -+ __lookup_matches(rt, 137, 0, 335); -+ __invalidate(rt, 137); -+ __lookup_matches(rt, 138, 0, 336); -+ __invalidate(rt, 138); -+ __lookup_matches(rt, 139, 0, 337); -+ __invalidate(rt, 139); -+ __lookup_matches(rt, 140, 0, 338); -+ __invalidate(rt, 140); -+ __lookup_matches(rt, 141, 0, 339); -+ __invalidate(rt, 141); -+ __lookup_matches(rt, 142, 0, 340); -+ __invalidate(rt, 142); -+ __lookup_matches(rt, 143, 0, 341); -+ __invalidate(rt, 143); -+ __lookup_matches(rt, 144, 0, 342); -+ __invalidate(rt, 144); -+ __lookup_matches(rt, 145, 0, 343); -+ __invalidate(rt, 145); -+ __lookup_matches(rt, 146, 0, 344); -+ __invalidate(rt, 146); -+ __lookup_matches(rt, 147, 0, 345); -+ __invalidate(rt, 147); -+ __lookup_matches(rt, 148, 0, 346); -+ __invalidate(rt, 148); -+ __lookup_matches(rt, 149, 0, 347); -+ __invalidate(rt, 149); -+ __lookup_matches(rt, 150, 0, 348); -+ __invalidate(rt, 150); -+ __lookup_matches(rt, 151, 0, 349); -+ __invalidate(rt, 151); -+ __lookup_matches(rt, 152, 0, 350); -+ __invalidate(rt, 152); -+ __lookup_matches(rt, 153, 0, 351); -+ __invalidate(rt, 153); -+ __lookup_matches(rt, 154, 0, 352); -+ __invalidate(rt, 154); -+ __lookup_matches(rt, 155, 0, 353); -+ __invalidate(rt, 155); -+ __lookup_matches(rt, 156, 0, 354); -+ __invalidate(rt, 156); -+ __lookup_matches(rt, 157, 0, 355); -+ __invalidate(rt, 157); -+ __lookup_matches(rt, 158, 0, 356); -+ __invalidate(rt, 158); -+ __lookup_matches(rt, 159, 0, 357); -+ __invalidate(rt, 159); -+ __lookup_matches(rt, 160, 0, 358); -+ __invalidate(rt, 160); -+ __lookup_matches(rt, 161, 0, 359); -+ __invalidate(rt, 161); -+ __lookup_matches(rt, 162, 0, 360); -+ __invalidate(rt, 162); -+ __lookup_matches(rt, 164, 0, 361); -+ __invalidate(rt, 164); -+ __lookup_matches(rt, 165, 0, 362); -+ __invalidate(rt, 165); -+ __lookup_matches(rt, 166, 0, 363); -+ __invalidate(rt, 166); -+ __lookup_matches(rt, 167, 0, 364); -+ __invalidate(rt, 167); -+ __lookup_matches(rt, 168, 0, 365); -+ __invalidate(rt, 168); -+ __lookup_matches(rt, 169, 0, 366); -+ __invalidate(rt, 169); -+ __lookup_matches(rt, 170, 0, 367); -+ __invalidate(rt, 170); -+ __lookup_matches(rt, 171, 0, 368); -+ __invalidate(rt, 171); -+ __lookup_matches(rt, 172, 0, 369); -+ __invalidate(rt, 172); -+ __lookup_matches(rt, 173, 0, 370); -+ __invalidate(rt, 173); -+ __lookup_matches(rt, 174, 0, 371); -+ __invalidate(rt, 174); -+ __lookup_matches(rt, 175, 0, 372); -+ __invalidate(rt, 175); -+ __lookup_matches(rt, 176, 0, 373); -+ __invalidate(rt, 176); -+ __lookup_matches(rt, 177, 0, 374); -+ __invalidate(rt, 177); -+ __lookup_matches(rt, 178, 0, 375); -+ __invalidate(rt, 178); -+ __lookup_matches(rt, 179, 0, 376); -+ __invalidate(rt, 179); -+ __lookup_matches(rt, 180, 0, 377); -+ __invalidate(rt, 180); -+ __lookup_matches(rt, 181, 0, 378); -+ __invalidate(rt, 181); -+ __lookup_matches(rt, 182, 0, 379); -+ __invalidate(rt, 182); -+ __lookup_matches(rt, 183, 0, 380); -+ __invalidate(rt, 183); -+ __lookup_matches(rt, 184, 0, 381); -+ __invalidate(rt, 184); -+ __lookup_matches(rt, 185, 0, 382); -+ __invalidate(rt, 185); -+ __lookup_matches(rt, 186, 0, 383); -+ __invalidate(rt, 186); -+ __lookup_matches(rt, 187, 0, 384); -+ __invalidate(rt, 187); -+ __lookup_matches(rt, 188, 0, 385); -+ __invalidate(rt, 188); -+ __lookup_matches(rt, 189, 0, 386); -+ __invalidate(rt, 189); -+ __lookup_matches(rt, 190, 0, 387); -+ __invalidate(rt, 190); -+ __lookup_matches(rt, 191, 0, 388); -+ __invalidate(rt, 191); -+ __lookup_matches(rt, 192, 0, 389); -+ __invalidate(rt, 192); -+ __lookup_matches(rt, 193, 0, 390); -+ __invalidate(rt, 193); -+ __lookup_matches(rt, 194, 0, 391); -+ __invalidate(rt, 194); -+ __lookup_matches(rt, 195, 0, 392); -+ __invalidate(rt, 195); -+ __lookup_matches(rt, 196, 0, 393); -+ __invalidate(rt, 196); -+ __lookup_matches(rt, 197, 0, 394); -+ __invalidate(rt, 197); -+ __lookup_matches(rt, 198, 0, 395); -+ __invalidate(rt, 198); -+ __lookup_matches(rt, 199, 0, 396); -+ __invalidate(rt, 199); -+ __lookup_matches(rt, 200, 0, 397); -+ __invalidate(rt, 200); -+ __lookup_matches(rt, 201, 0, 398); -+ __invalidate(rt, 201); -+ __lookup_matches(rt, 202, 0, 399); -+ __invalidate(rt, 202); -+ __lookup_matches(rt, 203, 0, 400); -+ __invalidate(rt, 203); -+ __lookup_matches(rt, 204, 0, 401); -+ __invalidate(rt, 204); -+ __lookup_matches(rt, 205, 0, 402); -+ __invalidate(rt, 205); -+ __lookup_matches(rt, 206, 0, 403); -+ __invalidate(rt, 206); -+ __lookup_matches(rt, 207, 0, 404); -+ __invalidate(rt, 207); -+ __lookup_matches(rt, 208, 0, 405); -+ __invalidate(rt, 208); -+ __lookup_matches(rt, 209, 0, 406); -+ __invalidate(rt, 209); -+ __lookup_matches(rt, 210, 0, 407); -+ __invalidate(rt, 210); -+ __lookup_fails(rt, 6, 0); -+ __insert(rt, 6, 0, 408); -+ __lookup_fails(rt, 7, 0); -+ __insert(rt, 7, 0, 409); -+ __lookup_fails(rt, 8, 0); -+ __insert(rt, 8, 0, 410); -+ __lookup_fails(rt, 9, 0); -+ __insert(rt, 9, 0, 411); -+ __lookup_fails(rt, 10, 0); -+ __insert(rt, 10, 0, 412); -+ __lookup_fails(rt, 11, 0); -+ __insert(rt, 11, 0, 413); -+ __lookup_fails(rt, 13, 0); -+ __insert(rt, 13, 0, 414); -+ __lookup_fails(rt, 14, 0); -+ __insert(rt, 14, 0, 415); -+ __lookup_fails(rt, 15, 0); -+ __insert(rt, 15, 0, 416); -+ __lookup_fails(rt, 16, 0); -+ __insert(rt, 16, 0, 417); -+ __lookup_fails(rt, 17, 0); -+ __insert(rt, 17, 0, 418); -+ __lookup_fails(rt, 18, 0); -+ __insert(rt, 18, 0, 419); -+ __lookup_fails(rt, 19, 0); -+ __insert(rt, 19, 0, 420); -+ __lookup_fails(rt, 20, 0); -+ __insert(rt, 20, 0, 421); -+ __lookup_fails(rt, 21, 0); -+ __insert(rt, 21, 0, 422); -+ __lookup_fails(rt, 22, 0); -+ __insert(rt, 22, 0, 423); -+ __lookup_fails(rt, 23, 0); -+ __insert(rt, 23, 0, 424); -+ __lookup_matches(rt, 6, 0, 408); -+ __invalidate(rt, 6); -+ __lookup_matches(rt, 7, 0, 409); -+ __invalidate(rt, 7); -+ __lookup_matches(rt, 8, 0, 410); -+ __invalidate(rt, 8); -+ __lookup_matches(rt, 9, 0, 411); -+ __invalidate(rt, 9); -+ __lookup_matches(rt, 10, 0, 412); -+ __invalidate(rt, 10); -+ __lookup_matches(rt, 11, 0, 413); -+ __invalidate(rt, 11); -+ __lookup_matches(rt, 13, 0, 414); -+ __invalidate(rt, 13); -+ __lookup_matches(rt, 14, 0, 415); -diff --git a/test/unit/unit-test.sh b/test/unit/unit-test.sh -index e8332d6..f545f14 100644 ---- a/test/unit/unit-test.sh -+++ b/test/unit/unit-test.sh -@@ -13,8 +13,6 @@ - - SKIP_WITH_LVMLOCKD=1 - SKIP_WITH_LVMPOLLD=1 --SKIP_WITH_LVMETAD=1 --SKIP_WITH_CLVMD=1 - - SKIP_ROOT_DM_CHECK=1 - diff --git a/SOURCES/lvm2-2_02_187-cov-Fix-memory-leak.patch b/SOURCES/lvm2-2_02_187-cov-Fix-memory-leak.patch deleted file mode 100644 index ac15ad5..0000000 --- a/SOURCES/lvm2-2_02_187-cov-Fix-memory-leak.patch +++ /dev/null @@ -1,17 +0,0 @@ - libdm/libdm-common.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/libdm/libdm-common.c b/libdm/libdm-common.c -index c300223..b06e678 100644 ---- a/libdm/libdm-common.c -+++ b/libdm/libdm-common.c -@@ -2012,7 +2012,8 @@ static int _sysfs_get_kernel_name(uint32_t major, uint32_t minor, char *buf, siz - log_sys_error("readlink", sysfs_path); - else { - log_sys_debug("readlink", sysfs_path); -- return _sysfs_find_kernel_name(major, minor, buf, buf_size); -+ r = _sysfs_find_kernel_name(major, minor, buf, buf_size); -+ goto bad; - } - goto bad; - } diff --git a/SOURCES/lvm2-2_02_187-dmsetup-do-not-treat-no-groups-as-an-error-in-dmstat.patch b/SOURCES/lvm2-2_02_187-dmsetup-do-not-treat-no-groups-as-an-error-in-dmstat.patch deleted file mode 100644 index 4cb1f1b..0000000 --- a/SOURCES/lvm2-2_02_187-dmsetup-do-not-treat-no-groups-as-an-error-in-dmstat.patch +++ /dev/null @@ -1,22 +0,0 @@ - tools/dmsetup.c | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - -diff --git a/tools/dmsetup.c b/tools/dmsetup.c -index 60e0638..15a09c6 100644 ---- a/tools/dmsetup.c -+++ b/tools/dmsetup.c -@@ -941,10 +941,12 @@ static int _display_info_cols(struct dm_task *dmt, struct dm_info *info) - } - } - -- /* group report with no groups? */ -+ /* Group report with no groups is not an error */ - if ((walk_flags == DM_STATS_WALK_GROUP) -- && !dm_stats_get_nr_groups(obj.stats)) -+ && !dm_stats_get_nr_groups(obj.stats)) { -+ r = 1; - goto out; -+ } - - dm_stats_walk_init(obj.stats, walk_flags); - dm_stats_walk_do(obj.stats) { diff --git a/SOURCES/lvm2-2_02_187-lvconvert-improve-validation-thin-and-cache-pool-con.patch b/SOURCES/lvm2-2_02_187-lvconvert-improve-validation-thin-and-cache-pool-con.patch deleted file mode 100644 index a356326..0000000 --- a/SOURCES/lvm2-2_02_187-lvconvert-improve-validation-thin-and-cache-pool-con.patch +++ /dev/null @@ -1,92 +0,0 @@ - WHATS_NEW | 1 + - tools/lvconvert.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++----- - 2 files changed, 48 insertions(+), 5 deletions(-) - -diff --git a/WHATS_NEW b/WHATS_NEW -index 399864d..d1f4530 100644 ---- a/WHATS_NEW -+++ b/WHATS_NEW -@@ -2,6 +2,7 @@ Version 2.02.187 - - =================================== - Prevent creating VGs with PVs with different logical block sizes. - Pvmove runs in exlusively activating mode for exclusively active LVs. -+ Enhance validation for thin and cache pool conversion and swapping. - - Version 2.02.186 - 27th August 2019 - =================================== -diff --git a/tools/lvconvert.c b/tools/lvconvert.c -index e66f063..799e746 100644 ---- a/tools/lvconvert.c -+++ b/tools/lvconvert.c -@@ -4309,24 +4309,66 @@ static int _lvconvert_to_pool_or_swap_metadata_single(struct cmd_context *cmd, - struct dm_list *use_pvh = NULL; - int to_thinpool = 0; - int to_cachepool = 0; -+ int lvt_enum = get_lvt_enum(lv); -+ struct lv_type *lvtype; - - switch (cmd->command->command_enum) { - case lvconvert_to_thinpool_or_swap_metadata_CMD: -+ if (lv_is_cache(lv)) -+ /* For cached LV check the cache origin LV type */ -+ lvt_enum = get_lvt_enum(seg_lv(first_seg(lv), 0)); - to_thinpool = 1; - break; - case lvconvert_to_cachepool_or_swap_metadata_CMD: -+ if (lv_is_cache(lv)) -+ goto_bad; /* Cache over cache is not supported */ - to_cachepool = 1; - break; - default: -- log_error(INTERNAL_ERROR "Invalid lvconvert pool command"); -- return 0; -- }; -+ log_error(INTERNAL_ERROR "Invalid lvconvert pool command."); -+ return ECMD_FAILED; -+ } -+ -+ switch (lvt_enum) { -+ case thinpool_LVT: -+ if (!to_thinpool) -+ goto_bad; /* can't accept cache-pool */ -+ break; /* swap thin-pool */ -+ case cachepool_LVT: -+ if (!to_cachepool) -+ goto_bad; /* can't accept thin-pool */ -+ break; /* swap cache-pool */ -+ case linear_LVT: -+ case raid_LVT: -+ case striped_LVT: -+ case zero_LVT: -+ break; -+ default: -+bad: -+ lvtype = get_lv_type(lvt_enum); -+ log_error("LV %s with type %s cannot be used as a %s pool LV.", -+ display_lvname(lv), lvtype ? lvtype->name : "unknown", -+ to_thinpool ? "thin" : "cache"); -+ return ECMD_FAILED; -+ } - - if (lv_is_origin(lv)) { - log_error("Cannot convert logical volume %s under snapshot.", - display_lvname(lv)); -- return 0; -- }; -+ return ECMD_FAILED; -+ } -+ -+ if (!lv_is_visible(lv)) { -+ log_error("Can't convert internal LV %s.", -+ display_lvname(lv)); -+ return ECMD_FAILED; -+ } -+ -+ if (lv_is_locked(lv)) { -+ log_error("Can't convert locked LV %s.", -+ display_lvname(lv)); -+ return ECMD_FAILED; -+ } - - if (cmd->position_argc > 1) { - /* First pos arg is required LV, remaining are optional PVs. */ diff --git a/SOURCES/lvm2-2_02_187-lvextend-fix-resizing-volumes-of-different-segtype.patch b/SOURCES/lvm2-2_02_187-lvextend-fix-resizing-volumes-of-different-segtype.patch deleted file mode 100644 index de4b283..0000000 --- a/SOURCES/lvm2-2_02_187-lvextend-fix-resizing-volumes-of-different-segtype.patch +++ /dev/null @@ -1,16 +0,0 @@ - lib/metadata/lv_manip.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c -index 9af90f9..6db8575 100644 ---- a/lib/metadata/lv_manip.c -+++ b/lib/metadata/lv_manip.c -@@ -5446,6 +5446,8 @@ static struct logical_volume *_lvresize_setup_aux(struct logical_volume *lv, - struct lv_segment *mseg = last_seg(lv); - - lp->alloc = lv->alloc; -+ lp->percent = PERCENT_NONE; -+ lp->segtype = mseg->segtype; - lp->mirrors = seg_is_mirrored(mseg) ? lv_mirror_count(lv) : 0; - lp->resizefs = 0; - lp->stripes = lp->mirrors ? mseg->area_count / lp->mirrors : 0; diff --git a/SOURCES/lvm2-2_02_187-lvmetad-fix-timeout-on-shutdown.patch b/SOURCES/lvm2-2_02_187-lvmetad-fix-timeout-on-shutdown.patch deleted file mode 100644 index 21b64c8..0000000 --- a/SOURCES/lvm2-2_02_187-lvmetad-fix-timeout-on-shutdown.patch +++ /dev/null @@ -1,51 +0,0 @@ - WHATS_NEW | 1 + - libdaemon/server/daemon-server.c | 11 +++++++++-- - 2 files changed, 10 insertions(+), 2 deletions(-) - -diff --git a/WHATS_NEW b/WHATS_NEW -index 00b84f9..ac70074 100644 ---- a/WHATS_NEW -+++ b/WHATS_NEW -@@ -1,5 +1,6 @@ - Version 2.02.187 - - =================================== -+ Fix lvmetad shutdown and avoid lenghty timeouts when rebooting system. - Prevent creating VGs with PVs with different logical block sizes. - Pvmove runs in exlusively activating mode for exclusively active LVs. - Activate thin-pool layered volume as 'read-only' device. -diff --git a/libdaemon/server/daemon-server.c b/libdaemon/server/daemon-server.c -index bc58f7b..62f403a 100644 ---- a/libdaemon/server/daemon-server.c -+++ b/libdaemon/server/daemon-server.c -@@ -89,6 +89,13 @@ static int _is_idle(daemon_state s) - - static struct timespec *_get_timeout(daemon_state s) - { -+ static struct timespec _tm = { 0 }; -+ -+ if (_shutdown_requested) { -+ _tm.tv_sec = 1; -+ return &_tm; -+ } -+ - return s.idle ? s.idle->ptimeout : NULL; - } - -@@ -506,7 +513,7 @@ static int _handle_connect(daemon_state s) - socklen_t sl = sizeof(sockaddr); - - client.socket_fd = accept(s.socket_fd, (struct sockaddr *) &sockaddr, &sl); -- if (client.socket_fd < 0) { -+ if (client.socket_fd < 0 || _shutdown_requested) { - if (errno != EAGAIN || !_shutdown_requested) - ERROR(&s, "Failed to accept connection: %s.", strerror(errno)); - return 0; -@@ -672,7 +679,7 @@ void daemon_start(daemon_state s) - continue; - } - -- if (FD_ISSET(s.socket_fd, &in)) { -+ if (!_shutdown_requested && FD_ISSET(s.socket_fd, &in)) { - timeout_count = 0; - _handle_connect(s); - } diff --git a/SOURCES/lvm2-2_02_187-mirror-directly-activate-updated-mirror.patch b/SOURCES/lvm2-2_02_187-mirror-directly-activate-updated-mirror.patch deleted file mode 100644 index 0d3c8d9..0000000 --- a/SOURCES/lvm2-2_02_187-mirror-directly-activate-updated-mirror.patch +++ /dev/null @@ -1,16 +0,0 @@ - lib/metadata/mirror.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/lib/metadata/mirror.c b/lib/metadata/mirror.c -index cd8ce1e..6cd88cb 100644 ---- a/lib/metadata/mirror.c -+++ b/lib/metadata/mirror.c -@@ -790,7 +790,7 @@ static int _split_mirror_images(struct logical_volume *lv, - - act = lv_is_active(lv_lock_holder(lv)); - -- if (act && (!deactivate_lv(cmd, new_lv) || !_activate_lv_like_model(lv, new_lv))) { -+ if (act && !_activate_lv_like_model(lv, new_lv)) { - log_error("Failed to rename newly split LV in the kernel"); - return 0; - } diff --git a/SOURCES/lvm2-2_02_187-mirror-fix-leg-splitting.patch b/SOURCES/lvm2-2_02_187-mirror-fix-leg-splitting.patch deleted file mode 100644 index 7f44bbd..0000000 --- a/SOURCES/lvm2-2_02_187-mirror-fix-leg-splitting.patch +++ /dev/null @@ -1,283 +0,0 @@ - lib/activate/activate.c | 40 +++++++++++++++++++++++++--------------- - lib/activate/activate.h | 2 ++ - lib/activate/dev_manager.c | 28 +++++++++++++++++++--------- - lib/activate/dev_manager.h | 2 +- - 4 files changed, 47 insertions(+), 25 deletions(-) - -diff --git a/lib/activate/activate.c b/lib/activate/activate.c -index aba5d14..c395d58 100644 ---- a/lib/activate/activate.c -+++ b/lib/activate/activate.c -@@ -671,7 +671,7 @@ static int _lv_info(struct cmd_context *cmd, const struct logical_volume *lv, - int use_layer, struct lvinfo *info, - const struct lv_segment *seg, - struct lv_seg_status *seg_status, -- int with_open_count, int with_read_ahead) -+ int with_open_count, int with_read_ahead, int with_name_check) - { - struct dm_info dminfo; - -@@ -691,7 +691,7 @@ static int _lv_info(struct cmd_context *cmd, const struct logical_volume *lv, - /* New thin-pool has no layer, but -tpool suffix needs to be queried */ - if (!use_layer && lv_is_new_thin_pool(lv)) { - /* Check if there isn't existing old thin pool mapping in the table */ -- if (!dev_manager_info(cmd, lv, NULL, 0, 0, &dminfo, NULL, NULL)) -+ if (!dev_manager_info(cmd, lv, NULL, 0, 0, 0, &dminfo, NULL, NULL)) - return_0; - if (!dminfo.exists) - use_layer = 1; -@@ -704,8 +704,9 @@ static int _lv_info(struct cmd_context *cmd, const struct logical_volume *lv, - - if (!dev_manager_info(cmd, lv, - (use_layer) ? lv_layer(lv) : NULL, -- with_open_count, with_read_ahead, -- &dminfo, (info) ? &info->read_ahead : NULL, -+ with_open_count, with_read_ahead, with_name_check, -+ &dminfo, -+ (info) ? &info->read_ahead : NULL, - seg_status)) - return_0; - -@@ -734,7 +735,7 @@ int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, int use_la - if (!activation()) - return 0; - -- return _lv_info(cmd, lv, use_layer, info, NULL, NULL, with_open_count, with_read_ahead); -+ return _lv_info(cmd, lv, use_layer, info, NULL, NULL, with_open_count, with_read_ahead, 0); - } - - int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s, int use_layer, -@@ -752,6 +753,15 @@ int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s, int use_layer, - return r; - } - -+int lv_info_with_name_check(struct cmd_context *cmd, const struct logical_volume *lv, -+ int use_layer, struct lvinfo *info) -+{ -+ if (!activation()) -+ return 0; -+ -+ return _lv_info(cmd, lv, use_layer, info, NULL, NULL, 0, 0, 1); -+} -+ - /* - * Returns 1 if lv_with_info_and_seg_status info structure populated, - * else 0 on failure or if device not active locally. -@@ -779,16 +789,16 @@ int lv_info_with_seg_status(struct cmd_context *cmd, - * STATUS is collected from cache LV */ - if (!(lv_seg = get_only_segment_using_this_lv(lv))) - return_0; -- (void) _lv_info(cmd, lv_seg->lv, 1, NULL, lv_seg, &status->seg_status, 0, 0); -+ (void) _lv_info(cmd, lv_seg->lv, 1, NULL, lv_seg, &status->seg_status, 0, 0, 0); - return 1; - } - - if (lv_is_thin_pool(lv)) { - /* Always collect status for '-tpool' */ -- if (_lv_info(cmd, lv, 1, &status->info, lv_seg, &status->seg_status, 0, 0) && -+ if (_lv_info(cmd, lv, 1, &status->info, lv_seg, &status->seg_status, 0, 0, 0) && - (status->seg_status.type == SEG_STATUS_THIN_POOL)) { - /* There is -tpool device, but query 'active' state of 'fake' thin-pool */ -- if (!_lv_info(cmd, lv, 0, NULL, NULL, NULL, 0, 0) && -+ if (!_lv_info(cmd, lv, 0, NULL, NULL, NULL, 0, 0, 0) && - !status->seg_status.thin_pool->needs_check) - status->info.exists = 0; /* So pool LV is not active */ - } -@@ -797,10 +807,10 @@ int lv_info_with_seg_status(struct cmd_context *cmd, - - if (lv_is_external_origin(lv)) { - if (!_lv_info(cmd, lv, 0, &status->info, NULL, NULL, -- with_open_count, with_read_ahead)) -+ with_open_count, with_read_ahead, 0)) - return_0; - -- (void) _lv_info(cmd, lv, 1, NULL, lv_seg, &status->seg_status, 0, 0); -+ (void) _lv_info(cmd, lv, 1, NULL, lv_seg, &status->seg_status, 0, 0, 0); - return 1; - } - -@@ -813,13 +823,13 @@ int lv_info_with_seg_status(struct cmd_context *cmd, - /* Show INFO for actual origin and grab status for merging origin */ - if (!_lv_info(cmd, lv, 0, &status->info, lv_seg, - lv_is_merging_origin(lv) ? &status->seg_status : NULL, -- with_open_count, with_read_ahead)) -+ with_open_count, with_read_ahead, 0)) - return_0; - - if (status->info.exists && - (status->seg_status.type != SEG_STATUS_SNAPSHOT)) /* Not merging */ - /* Grab STATUS from layered -real */ -- (void) _lv_info(cmd, lv, 1, NULL, lv_seg, &status->seg_status, 0, 0); -+ (void) _lv_info(cmd, lv, 1, NULL, lv_seg, &status->seg_status, 0, 0, 0); - return 1; - } - -@@ -828,7 +838,7 @@ int lv_info_with_seg_status(struct cmd_context *cmd, - olv = origin_from_cow(lv); - - if (!_lv_info(cmd, olv, 0, &status->info, first_seg(olv), &status->seg_status, -- with_open_count, with_read_ahead)) -+ with_open_count, with_read_ahead, 0)) - return_0; - - if (status->seg_status.type == SEG_STATUS_SNAPSHOT || -@@ -849,7 +859,7 @@ int lv_info_with_seg_status(struct cmd_context *cmd, - } - - return _lv_info(cmd, lv, 0, &status->info, lv_seg, &status->seg_status, -- with_open_count, with_read_ahead); -+ with_open_count, with_read_ahead, 0); - } - - #define OPEN_COUNT_CHECK_RETRIES 25 -@@ -2834,7 +2844,7 @@ static int _lv_activate(struct cmd_context *cmd, const char *lvid_s, - laopts->noscan ? " noscan" : "", - laopts->temporary ? " temporary" : ""); - -- if (!lv_info(cmd, lv, 0, &info, 0, 0)) -+ if (!lv_info_with_name_check(cmd, lv, 0, &info)) - goto_out; - - /* -diff --git a/lib/activate/activate.h b/lib/activate/activate.h -index 43d26d1..a938cb4 100644 ---- a/lib/activate/activate.h -+++ b/lib/activate/activate.h -@@ -135,6 +135,8 @@ int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, int use_la - struct lvinfo *info, int with_open_count, int with_read_ahead); - int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s, int use_layer, - struct lvinfo *info, int with_open_count, int with_read_ahead); -+int lv_info_with_name_check(struct cmd_context *cmd, const struct logical_volume *lv, -+ int use_layer, struct lvinfo *info); - - /* - * Returns 1 if lv_info_and_seg_status structure has been populated, -diff --git a/lib/activate/dev_manager.c b/lib/activate/dev_manager.c -index a5e026c..1ca97c1 100644 ---- a/lib/activate/dev_manager.c -+++ b/lib/activate/dev_manager.c -@@ -239,6 +239,7 @@ static uint32_t _seg_len(const struct lv_segment *seg) - static int _info_run(const char *dlid, struct dm_info *dminfo, - uint32_t *read_ahead, - struct lv_seg_status *seg_status, -+ const char *name_check, - int with_open_count, int with_read_ahead, - uint32_t major, uint32_t minor) - { -@@ -249,6 +250,7 @@ static int _info_run(const char *dlid, struct dm_info *dminfo, - void *target = NULL; - uint64_t target_start, target_length, start, length; - char *target_name, *target_params; -+ const char *dev_name; - - if (seg_status) { - dmtask = DM_DEVICE_STATUS; -@@ -262,6 +264,11 @@ static int _info_run(const char *dlid, struct dm_info *dminfo, - with_open_count, with_flush, 0))) - return_0; - -+ if (name_check && dminfo->exists && -+ (dev_name = dm_task_get_name(dmt)) && -+ (strcmp(name_check, dev_name) != 0)) -+ dminfo->exists = 0; /* mismatching name -> device does not exist */ -+ - if (with_read_ahead && dminfo->exists) { - if (!dm_task_get_read_ahead(dmt, read_ahead)) - goto_out; -@@ -777,18 +784,19 @@ static int _original_uuid_format_check_required(struct cmd_context *cmd) - - static int _info(struct cmd_context *cmd, - const char *name, const char *dlid, -- int with_open_count, int with_read_ahead, -+ int with_open_count, int with_read_ahead, int with_name_check, - struct dm_info *dminfo, uint32_t *read_ahead, - struct lv_seg_status *seg_status) - { - char old_style_dlid[sizeof(UUID_PREFIX) + 2 * ID_LEN]; - const char *suffix, *suffix_position; -+ const char *name_check = (with_name_check) ? name : NULL; - unsigned i = 0; - - log_debug_activation("Getting device info for %s [%s].", name, dlid); - - /* Check for dlid */ -- if (!_info_run(dlid, dminfo, read_ahead, seg_status, -+ if (!_info_run(dlid, dminfo, read_ahead, seg_status, name_check, - with_open_count, with_read_ahead, 0, 0)) - return_0; - -@@ -804,7 +812,8 @@ static int _info(struct cmd_context *cmd, - (void) strncpy(old_style_dlid, dlid, sizeof(old_style_dlid)); - old_style_dlid[sizeof(old_style_dlid) - 1] = '\0'; - if (!_info_run(old_style_dlid, dminfo, read_ahead, seg_status, -- with_open_count, with_read_ahead, 0, 0)) -+ name_check, with_open_count, with_read_ahead, -+ 0, 0)) - return_0; - if (dminfo->exists) - return 1; -@@ -817,7 +826,7 @@ static int _info(struct cmd_context *cmd, - - /* Check for dlid before UUID_PREFIX was added */ - if (!_info_run(dlid + sizeof(UUID_PREFIX) - 1, dminfo, read_ahead, seg_status, -- with_open_count, with_read_ahead, 0, 0)) -+ name_check, with_open_count, with_read_ahead, 0, 0)) - return_0; - - return 1; -@@ -825,12 +834,12 @@ static int _info(struct cmd_context *cmd, - - static int _info_by_dev(uint32_t major, uint32_t minor, struct dm_info *info) - { -- return _info_run(NULL, info, NULL, 0, 0, 0, major, minor); -+ return _info_run(NULL, info, NULL, NULL, NULL, 0, 0, major, minor); - } - - int dev_manager_info(struct cmd_context *cmd, - const struct logical_volume *lv, const char *layer, -- int with_open_count, int with_read_ahead, -+ int with_open_count, int with_read_ahead, int with_name_check, - struct dm_info *dminfo, uint32_t *read_ahead, - struct lv_seg_status *seg_status) - { -@@ -843,7 +852,8 @@ int dev_manager_info(struct cmd_context *cmd, - if (!(dlid = build_dm_uuid(cmd->mem, lv, layer))) - goto_out; - -- if (!(r = _info(cmd, name, dlid, with_open_count, with_read_ahead, -+ if (!(r = _info(cmd, name, dlid, -+ with_open_count, with_read_ahead, with_name_check, - dminfo, read_ahead, seg_status))) - stack; - out: -@@ -1953,7 +1963,7 @@ static int _add_dev_to_dtree(struct dev_manager *dm, struct dm_tree *dtree, - if (!(dlid = build_dm_uuid(dm->track_pending_delete ? dm->cmd->pending_delete_mem : dm->mem, lv, layer))) - return_0; - -- if (!_info(dm->cmd, name, dlid, 1, 0, &info, NULL, NULL)) -+ if (!_info(dm->cmd, name, dlid, 1, 0, 0, &info, NULL, NULL)) - return_0; - - /* -@@ -2479,7 +2489,7 @@ static char *_add_error_or_zero_device(struct dev_manager *dm, struct dm_tree *d - seg->lv->name, errid))) - return_NULL; - -- if (!_info(dm->cmd, name, dlid, 1, 0, &info, NULL, NULL)) -+ if (!_info(dm->cmd, name, dlid, 1, 0, 0, &info, NULL, NULL)) - return_NULL; - - if (!info.exists) { -diff --git a/lib/activate/dev_manager.h b/lib/activate/dev_manager.h -index 5be417b..20b6a26 100644 ---- a/lib/activate/dev_manager.h -+++ b/lib/activate/dev_manager.h -@@ -47,7 +47,7 @@ void dev_manager_exit(void); - */ - int dev_manager_info(struct cmd_context *cmd, const struct logical_volume *lv, - const char *layer, -- int with_open_count, int with_read_ahead, -+ int with_open_count, int with_read_ahead, int with_name_check, - struct dm_info *dminfo, uint32_t *read_ahead, - struct lv_seg_status *seg_status); - diff --git a/SOURCES/lvm2-2_02_187-pvmove-check-if-participating-LV-is-already-exlcusiv.patch b/SOURCES/lvm2-2_02_187-pvmove-check-if-participating-LV-is-already-exlcusiv.patch deleted file mode 100644 index e72869f..0000000 --- a/SOURCES/lvm2-2_02_187-pvmove-check-if-participating-LV-is-already-exlcusiv.patch +++ /dev/null @@ -1,30 +0,0 @@ - WHATS_NEW | 1 + - tools/pvmove.c | 3 ++- - 2 files changed, 3 insertions(+), 1 deletion(-) - -diff --git a/WHATS_NEW b/WHATS_NEW -index d99f183..399864d 100644 ---- a/WHATS_NEW -+++ b/WHATS_NEW -@@ -1,6 +1,7 @@ - Version 2.02.187 - - =================================== - Prevent creating VGs with PVs with different logical block sizes. -+ Pvmove runs in exlusively activating mode for exclusively active LVs. - - Version 2.02.186 - 27th August 2019 - =================================== -diff --git a/tools/pvmove.c b/tools/pvmove.c -index 754bd58..3a447c4 100644 ---- a/tools/pvmove.c -+++ b/tools/pvmove.c -@@ -674,7 +674,8 @@ static int _pvmove_setup_single(struct cmd_context *cmd, - dm_list_iterate_items(lvl, lvs_changed) { - lvh = lv_lock_holder(lvl->lv); - /* Exclusive LV decides whether pvmove must be also exclusive */ -- if (lv_is_origin(lvh) || seg_only_exclusive(first_seg(lvh))) -+ if (lv_is_origin(lvh) || seg_only_exclusive(first_seg(lvh)) || -+ lv_is_active_exclusive(lvh)) - exclusive = 1; - } - diff --git a/SOURCES/lvm2-2_02_187-pvmove-detect-exclusively-active-LVs.patch b/SOURCES/lvm2-2_02_187-pvmove-detect-exclusively-active-LVs.patch deleted file mode 100644 index 188ad22..0000000 --- a/SOURCES/lvm2-2_02_187-pvmove-detect-exclusively-active-LVs.patch +++ /dev/null @@ -1,17 +0,0 @@ - tools/pvmove.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/tools/pvmove.c b/tools/pvmove.c -index 3a447c4..cecff00 100644 ---- a/tools/pvmove.c -+++ b/tools/pvmove.c -@@ -397,7 +397,8 @@ static struct logical_volume *_set_up_pvmove_lv(struct cmd_context *cmd, - /* Presence of exclusive LV decides whether pvmove must be also exclusive */ - if (!seg_only_exclusive(seg)) { - holder = lv_lock_holder(lv); -- if (seg_only_exclusive(first_seg(holder)) || lv_is_origin(holder) || lv_is_cow(holder)) -+ if (seg_only_exclusive(first_seg(holder)) || lv_is_origin(holder) || -+ lv_is_cow(holder) || lv_is_active_exclusive(holder)) - needs_exclusive = 1; - } else - needs_exclusive = 1; diff --git a/SOURCES/lvm2-2_02_187-pvs-fix-locking_type-4.patch b/SOURCES/lvm2-2_02_187-pvs-fix-locking_type-4.patch deleted file mode 100644 index fc6a5c0..0000000 --- a/SOURCES/lvm2-2_02_187-pvs-fix-locking_type-4.patch +++ /dev/null @@ -1,16 +0,0 @@ - lib/metadata/metadata.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/lib/metadata/metadata.c b/lib/metadata/metadata.c -index 123f7f5..d448fd9 100644 ---- a/lib/metadata/metadata.c -+++ b/lib/metadata/metadata.c -@@ -3435,6 +3435,8 @@ static struct volume_group *_vg_read_orphans(struct cmd_context *cmd, - - dm_list_init(&head.list); - -+ *consistent = 1; -+ - if (!(vginfo = lvmcache_vginfo_from_vgname(orphan_vgname, NULL))) - return_NULL; - diff --git a/SOURCES/lvm2-2_02_187-pvscan-fix-activation-of-incomplete-VGs.patch b/SOURCES/lvm2-2_02_187-pvscan-fix-activation-of-incomplete-VGs.patch deleted file mode 100644 index 5569cb1..0000000 --- a/SOURCES/lvm2-2_02_187-pvscan-fix-activation-of-incomplete-VGs.patch +++ /dev/null @@ -1,328 +0,0 @@ -From 8aac4049c270ae8beb741a2cd80084810945a718 Mon Sep 17 00:00:00 2001 -From: David Teigland -Date: Tue, 3 Sep 2019 15:14:08 -0500 -Subject: [PATCH 1/4] pvscan: fix activation of incomplete VGs - -For a long time there has been a bug in the activation -done by the initial pvscan (which scans all devs to -initialize the lvmetad cache.) It was attempting to -activate all VGs, even those that were not complete. - -lvmetad tells pvscan when a VG is complete, and pvscan -needs to use this information to decide which VGs to -activate. - -When there are problems that prevent lvmetad from being -used (e.g. lvmetad is disabled or not running), pvscan -activation cannot use lvmetad to determine when a VG -is complete, so it now checks if devices are present -for all PVs in the VG before activating. - -(The recent commit "pvscan: avoid redundant activation" -could make this bug more apparent because redundant -activations can cover up the effect of activating an -incomplete VG and missing some LV activations.) - -(cherry picked from commit 6b12930860a993624d6325aec2e9c561f4412aa9) ---- - lib/cache/lvmetad.c | 15 ++++++++---- - lib/cache/lvmetad.h | 2 +- - tools/lvmcmdline.c | 2 +- - tools/lvscan.c | 2 +- - tools/pvscan.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++----- - tools/vgcfgrestore.c | 2 +- - tools/vgimport.c | 2 +- - tools/vgimportclone.c | 2 +- - tools/vgscan.c | 2 +- - 9 files changed, 77 insertions(+), 17 deletions(-) - -diff --git a/lib/cache/lvmetad.c b/lib/cache/lvmetad.c -index 1eda567..e659711 100644 ---- a/lib/cache/lvmetad.c -+++ b/lib/cache/lvmetad.c -@@ -1704,6 +1704,13 @@ int lvmetad_pv_found(struct cmd_context *cmd, const struct id *pvid, struct devi - changed = daemon_reply_int(reply, "changed", 0); - } - -+ if (vg && vg->system_id && vg->system_id[0] && -+ cmd->system_id && cmd->system_id[0] && -+ strcmp(vg->system_id, cmd->system_id)) { -+ log_debug_lvmetad("Ignore foreign VG %s on %s", vg->name , dev_name(dev)); -+ goto out; -+ } -+ - /* - * If lvmetad now sees all PVs in the VG, it returned the - * "complete" status string. Add this VG name to the list -@@ -1734,7 +1741,7 @@ int lvmetad_pv_found(struct cmd_context *cmd, const struct id *pvid, struct devi - log_error("str_list_add failed"); - } - } -- -+out: - daemon_reply_destroy(reply); - - return result; -@@ -2347,7 +2354,7 @@ bad: - * generally revert disk scanning and not use lvmetad. - */ - --int lvmetad_pvscan_all_devs(struct cmd_context *cmd, int do_wait) -+int lvmetad_pvscan_all_devs(struct cmd_context *cmd, int do_wait, struct dm_list *found_vgnames) - { - struct device_list *devl, *devl2; - struct dm_list scan_devs; -@@ -2429,7 +2436,7 @@ int lvmetad_pvscan_all_devs(struct cmd_context *cmd, int do_wait) - - dm_list_del(&devl->list); - -- ret = lvmetad_pvscan_single(cmd, devl->dev, NULL, NULL); -+ ret = lvmetad_pvscan_single(cmd, devl->dev, found_vgnames, NULL); - - label_scan_invalidate(devl->dev); - -@@ -2774,7 +2781,7 @@ void lvmetad_validate_global_cache(struct cmd_context *cmd, int force) - * we rescanned for the token, and the time we acquired the global - * lock.) - */ -- if (!lvmetad_pvscan_all_devs(cmd, 1)) { -+ if (!lvmetad_pvscan_all_devs(cmd, 1, NULL)) { - log_warn("WARNING: Not using lvmetad because cache update failed."); - lvmetad_make_unused(cmd); - return; -diff --git a/lib/cache/lvmetad.h b/lib/cache/lvmetad.h -index 73c2645..55ce16a 100644 ---- a/lib/cache/lvmetad.h -+++ b/lib/cache/lvmetad.h -@@ -151,7 +151,7 @@ int lvmetad_pvscan_single(struct cmd_context *cmd, struct device *dev, - struct dm_list *found_vgnames, - struct dm_list *changed_vgnames); - --int lvmetad_pvscan_all_devs(struct cmd_context *cmd, int do_wait); -+int lvmetad_pvscan_all_devs(struct cmd_context *cmd, int do_wait, struct dm_list *found_vgnames); - - int lvmetad_vg_clear_outdated_pvs(struct volume_group *vg); - void lvmetad_validate_global_cache(struct cmd_context *cmd, int force); -diff --git a/tools/lvmcmdline.c b/tools/lvmcmdline.c -index f82827d..75a0401 100644 ---- a/tools/lvmcmdline.c -+++ b/tools/lvmcmdline.c -@@ -2991,7 +2991,7 @@ int lvm_run_command(struct cmd_context *cmd, int argc, char **argv) - */ - if (lvmetad_used() && !_cmd_no_lvmetad_autoscan(cmd)) { - if (cmd->include_foreign_vgs || !lvmetad_token_matches(cmd)) { -- if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, cmd->include_foreign_vgs ? 1 : 0)) { -+ if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, cmd->include_foreign_vgs ? 1 : 0, NULL)) { - log_warn("WARNING: Not using lvmetad because cache update failed."); - lvmetad_make_unused(cmd); - } -diff --git a/tools/lvscan.c b/tools/lvscan.c -index c38208a..34e9f31 100644 ---- a/tools/lvscan.c -+++ b/tools/lvscan.c -@@ -103,7 +103,7 @@ int lvscan(struct cmd_context *cmd, int argc, char **argv) - - /* Needed because this command has NO_LVMETAD_AUTOSCAN. */ - if (lvmetad_used() && (!lvmetad_token_matches(cmd) || lvmetad_is_disabled(cmd, &reason))) { -- if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, 0)) { -+ if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, 0, NULL)) { - log_warn("WARNING: Not using lvmetad because cache update failed."); - lvmetad_make_unused(cmd); - } -diff --git a/tools/pvscan.c b/tools/pvscan.c -index e5afe0c..9e76f52 100644 ---- a/tools/pvscan.c -+++ b/tools/pvscan.c -@@ -38,6 +38,7 @@ struct pvscan_params { - - struct pvscan_aa_params { - int refresh_all; -+ int all_vgs; - unsigned int activate_errors; - struct dm_list changed_vgnames; - }; -@@ -223,6 +224,28 @@ void online_vg_file_remove(const char *vgname) - unlink(path); - } - -+static void _online_files_remove(const char *dirpath) -+{ -+ char path[PATH_MAX]; -+ DIR *dir; -+ struct dirent *de; -+ -+ if (!(dir = opendir(dirpath))) -+ return; -+ -+ while ((de = readdir(dir))) { -+ if (de->d_name[0] == '.') -+ continue; -+ -+ memset(path, 0, sizeof(path)); -+ snprintf(path, sizeof(path), "%s/%s", dirpath, de->d_name); -+ if (unlink(path)) -+ log_sys_debug("unlink", path); -+ } -+ if (closedir(dir)) -+ log_sys_debug("closedir", dirpath); -+} -+ - /* - * pvscan --cache does not perform any lvmlockd locking, and - * pvscan --cache -aay skips autoactivation in lockd VGs. -@@ -271,6 +294,8 @@ static int _pvscan_autoactivate_single(struct cmd_context *cmd, const char *vg_n - struct volume_group *vg, struct processing_handle *handle) - { - struct pvscan_aa_params *pp = (struct pvscan_aa_params *)handle->custom_handle; -+ struct pv_list *pvl; -+ int incomplete = 0; - - if (vg_is_clustered(vg)) - return ECMD_PROCESSED; -@@ -281,6 +306,24 @@ static int _pvscan_autoactivate_single(struct cmd_context *cmd, const char *vg_n - if (is_lockd_type(vg->lock_type)) - return ECMD_PROCESSED; - -+ /* -+ * This all_vgs case only happens in fallback cases when there's some -+ * problem preventing the use of lvmetad. When lvmetad can be properly -+ * used, the found_vgnames list should have the names of complete VGs -+ * that should be activated. -+ */ -+ if (pp->all_vgs) { -+ dm_list_iterate_items(pvl, &vg->pvs) { -+ if (!pvl->pv->dev) -+ incomplete++; -+ } -+ -+ if (incomplete) { -+ log_print("pvscan[%d] VG %s incomplete (need %d).", getpid(), vg->name, incomplete); -+ return ECMD_PROCESSED; -+ } -+ } -+ - log_debug("pvscan autoactivating VG %s.", vg_name); - - #if 0 -@@ -377,6 +420,7 @@ static int _pvscan_autoactivate(struct cmd_context *cmd, struct pvscan_aa_params - if (all_vgs) { - cmd->cname->flags |= ALL_VGS_IS_DEFAULT; - pp->refresh_all = 1; -+ pp->all_vgs = 1; - } - - ret = process_each_vg(cmd, 0, NULL, NULL, vgnames, 0, 0, handle, _pvscan_autoactivate_single); -@@ -463,17 +507,23 @@ static int _pvscan_cache(struct cmd_context *cmd, int argc, char **argv) - * Scan all devices when no args are given. - */ - if (!argc && !devno_args) { -+ _online_files_remove(_vgs_online_dir); -+ - log_verbose("Scanning all devices."); - -- if (!lvmetad_pvscan_all_devs(cmd, 1)) { -+ if (!lvmetad_pvscan_all_devs(cmd, 1, &found_vgnames)) { - log_warn("WARNING: Not using lvmetad because cache update failed."); - lvmetad_make_unused(cmd); -+ all_vgs = 1; - } - if (lvmetad_used() && lvmetad_is_disabled(cmd, &reason)) { - log_warn("WARNING: Not using lvmetad because %s.", reason); - lvmetad_make_unused(cmd); -+ all_vgs = 1; - } -- all_vgs = 1; -+ -+ if (!all_vgs && do_activate) -+ log_print("pvscan[%d] activating all complete VGs (no args)", getpid()); - goto activate; - } - -@@ -485,7 +535,7 @@ static int _pvscan_cache(struct cmd_context *cmd, int argc, char **argv) - * never scan any devices other than those specified. - */ - if (!lvmetad_token_matches(cmd)) { -- if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, 0)) { -+ if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, 0, &found_vgnames)) { - log_warn("WARNING: Not updating lvmetad because cache update failed."); - ret = ECMD_FAILED; - goto out; -@@ -493,9 +543,12 @@ static int _pvscan_cache(struct cmd_context *cmd, int argc, char **argv) - if (lvmetad_used() && lvmetad_is_disabled(cmd, &reason)) { - log_warn("WARNING: Not using lvmetad because %s.", reason); - lvmetad_make_unused(cmd); -+ all_vgs = 1; -+ log_print("pvscan[%d] activating all directly (lvmetad disabled from scan) %s", getpid(), dev_arg ?: ""); - } -- log_print("pvscan[%d] activating all directly (lvmetad token) %s", getpid(), dev_arg ?: ""); -- all_vgs = 1; -+ -+ if (!all_vgs) -+ log_print("pvscan[%d] activating all complete VGs for init", getpid()); - goto activate; - } - -@@ -808,7 +861,7 @@ int pvscan(struct cmd_context *cmd, int argc, char **argv) - - /* Needed because this command has NO_LVMETAD_AUTOSCAN. */ - if (lvmetad_used() && (!lvmetad_token_matches(cmd) || lvmetad_is_disabled(cmd, &reason))) { -- if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, 0)) { -+ if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, 0, NULL)) { - log_warn("WARNING: Not using lvmetad because cache update failed."); - lvmetad_make_unused(cmd); - } -diff --git a/tools/vgcfgrestore.c b/tools/vgcfgrestore.c -index 48a2fa4..e7f9848 100644 ---- a/tools/vgcfgrestore.c -+++ b/tools/vgcfgrestore.c -@@ -177,7 +177,7 @@ rescan: - } - if (!refresh_filters(cmd)) - stack; -- if (!lvmetad_pvscan_all_devs(cmd, 1)) { -+ if (!lvmetad_pvscan_all_devs(cmd, 1, NULL)) { - log_warn("WARNING: Failed to scan devices."); - log_warn("WARNING: Update lvmetad with pvscan --cache."); - goto out; -diff --git a/tools/vgimport.c b/tools/vgimport.c -index ea50198..d4455ec 100644 ---- a/tools/vgimport.c -+++ b/tools/vgimport.c -@@ -96,7 +96,7 @@ int vgimport(struct cmd_context *cmd, int argc, char **argv) - * import it. - */ - if (lvmetad_used()) { -- if (!lvmetad_pvscan_all_devs(cmd, 1)) { -+ if (!lvmetad_pvscan_all_devs(cmd, 1, NULL)) { - log_warn("WARNING: Not using lvmetad because cache update failed."); - lvmetad_make_unused(cmd); - } -diff --git a/tools/vgimportclone.c b/tools/vgimportclone.c -index c4c5d4c..ac3766b 100644 ---- a/tools/vgimportclone.c -+++ b/tools/vgimportclone.c -@@ -377,7 +377,7 @@ out: - if (!refresh_filters(cmd)) - stack; - -- if (!lvmetad_pvscan_all_devs(cmd, 1)) { -+ if (!lvmetad_pvscan_all_devs(cmd, 1, NULL)) { - log_warn("WARNING: Failed to scan devices."); - log_warn("WARNING: Update lvmetad with pvscan --cache."); - } -diff --git a/tools/vgscan.c b/tools/vgscan.c -index 1ec9083..7a63996 100644 ---- a/tools/vgscan.c -+++ b/tools/vgscan.c -@@ -101,7 +101,7 @@ int vgscan(struct cmd_context *cmd, int argc, char **argv) - log_verbose("Ignoring vgscan --cache command because lvmetad is not in use."); - - if (lvmetad_used() && (arg_is_set(cmd, cache_long_ARG) || !lvmetad_token_matches(cmd) || lvmetad_is_disabled(cmd, &reason))) { -- if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, arg_is_set(cmd, cache_long_ARG))) { -+ if (lvmetad_used() && !lvmetad_pvscan_all_devs(cmd, arg_is_set(cmd, cache_long_ARG), NULL)) { - log_warn("WARNING: Not using lvmetad because cache update failed."); - lvmetad_make_unused(cmd); - } --- -1.8.3.1 - diff --git a/SOURCES/lvm2-2_02_187-raid-better-place-for-blocking-reshapes.patch b/SOURCES/lvm2-2_02_187-raid-better-place-for-blocking-reshapes.patch deleted file mode 100644 index b0eea6b..0000000 --- a/SOURCES/lvm2-2_02_187-raid-better-place-for-blocking-reshapes.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 56474336821cf703073bd0d82f9428697b85ec29 Mon Sep 17 00:00:00 2001 -From: Zdenek Kabelac -Date: Fri, 7 Feb 2020 15:14:05 +0100 -Subject: [PATCH] raid: better place for blocking reshapes - -Still the place can be better to block only particular reshape -operations which ATM cause kernel problems. - -We check if the new number of images is higher - and prevent to take -conversion if the volume is in use (i.e. thin-pool's data LV). - -(cherry picked from commit 96985b1373d58b411a80c2985f348466e78cbe6e) -(cherry picked from commit 253d10f840682f85dad0e4c29f55ff50f94792fa) ---- - lib/metadata/raid_manip.c | 13 +++++++------ - 1 file changed, 7 insertions(+), 6 deletions(-) - -diff --git a/lib/metadata/raid_manip.c b/lib/metadata/raid_manip.c -index 7481ebf..eae0a8d 100644 ---- a/lib/metadata/raid_manip.c -+++ b/lib/metadata/raid_manip.c -@@ -2299,6 +2299,13 @@ static int _raid_reshape(struct logical_volume *lv, - if ((new_image_count = new_stripes + seg->segtype->parity_devs) < 2) - return_0; - -+ /* FIXME Can't reshape volume in use - aka not toplevel devices */ -+ if (old_image_count < new_image_count && -+ !dm_list_empty(&seg->lv->segs_using_this_lv)) { -+ log_error("Unable to convert stacked volume %s.", display_lvname(seg->lv)); -+ return 0; -+ } -+ - if (!_check_max_raid_devices(new_image_count)) - return_0; - -@@ -6218,12 +6225,6 @@ static int _set_convenient_raid145610_segtype_to(const struct lv_segment *seg_fr - if (!(*segtype = get_segtype_from_flag(cmd, seg_flag))) - return_0; - -- /* FIXME Can't reshape volume in use - aka not toplevel devices */ -- if (!dm_list_empty(&seg_from->lv->segs_using_this_lv)) { -- log_error("Can't reshape stacked volume %s.", display_lvname(seg_from->lv)); -- return 0; -- } -- - if (segtype_sav != *segtype) { - log_warn("Replaced LV type %s%s with possible type %s.", - segtype_sav->name, _get_segtype_alias_str(seg_from->lv, segtype_sav), --- -1.8.3.1 - diff --git a/SOURCES/lvm2-2_02_187-raid-disallow-reshape-of-stacked-LVs.patch b/SOURCES/lvm2-2_02_187-raid-disallow-reshape-of-stacked-LVs.patch deleted file mode 100644 index 0e8b2cd..0000000 --- a/SOURCES/lvm2-2_02_187-raid-disallow-reshape-of-stacked-LVs.patch +++ /dev/null @@ -1,32 +0,0 @@ - WHATS_NEW | 1 + - lib/metadata/raid_manip.c | 6 ++++++ - 2 files changed, 7 insertions(+) - -diff --git a/WHATS_NEW b/WHATS_NEW -index ac70074..01d0bc6 100644 ---- a/WHATS_NEW -+++ b/WHATS_NEW -@@ -1,5 +1,6 @@ - Version 2.02.187 - - =================================== -+ Prevent raid reshaping of stacked volumes. - Fix lvmetad shutdown and avoid lenghty timeouts when rebooting system. - Prevent creating VGs with PVs with different logical block sizes. - Pvmove runs in exlusively activating mode for exclusively active LVs. -diff --git a/lib/metadata/raid_manip.c b/lib/metadata/raid_manip.c -index bffae60..768f261 100644 ---- a/lib/metadata/raid_manip.c -+++ b/lib/metadata/raid_manip.c -@@ -6445,6 +6445,12 @@ int lv_raid_convert(struct logical_volume *lv, - uint32_t available_slvs, removed_slvs; - takeover_fn_t takeover_fn; - -+ /* FIXME Can't reshape volume in use - aka not toplevel devices */ -+ if (!dm_list_empty(&lv->segs_using_this_lv)) { -+ log_error("Can't reshape stacked volume %s.", display_lvname(lv)); -+ return 0; -+ } -+ - /* FIXME If not active, prompt and activate */ - /* FIXME Some operations do not require the LV to be active */ - /* LV must be active to perform raid conversion operations */ diff --git a/SOURCES/lvm2-2_02_187-raid-more-limitted-prohibition-of-stacked-raid-usage.patch b/SOURCES/lvm2-2_02_187-raid-more-limitted-prohibition-of-stacked-raid-usage.patch deleted file mode 100644 index 6c47c54..0000000 --- a/SOURCES/lvm2-2_02_187-raid-more-limitted-prohibition-of-stacked-raid-usage.patch +++ /dev/null @@ -1,34 +0,0 @@ - lib/metadata/raid_manip.c | 13 +++++++------ - 1 file changed, 7 insertions(+), 6 deletions(-) - -diff --git a/lib/metadata/raid_manip.c b/lib/metadata/raid_manip.c -index 768f261..7481ebf 100644 ---- a/lib/metadata/raid_manip.c -+++ b/lib/metadata/raid_manip.c -@@ -6217,6 +6217,13 @@ static int _set_convenient_raid145610_segtype_to(const struct lv_segment *seg_fr - if (seg_flag) { - if (!(*segtype = get_segtype_from_flag(cmd, seg_flag))) - return_0; -+ -+ /* FIXME Can't reshape volume in use - aka not toplevel devices */ -+ if (!dm_list_empty(&seg_from->lv->segs_using_this_lv)) { -+ log_error("Can't reshape stacked volume %s.", display_lvname(seg_from->lv)); -+ return 0; -+ } -+ - if (segtype_sav != *segtype) { - log_warn("Replaced LV type %s%s with possible type %s.", - segtype_sav->name, _get_segtype_alias_str(seg_from->lv, segtype_sav), -@@ -6445,12 +6452,6 @@ int lv_raid_convert(struct logical_volume *lv, - uint32_t available_slvs, removed_slvs; - takeover_fn_t takeover_fn; - -- /* FIXME Can't reshape volume in use - aka not toplevel devices */ -- if (!dm_list_empty(&lv->segs_using_this_lv)) { -- log_error("Can't reshape stacked volume %s.", display_lvname(lv)); -- return 0; -- } -- - /* FIXME If not active, prompt and activate */ - /* FIXME Some operations do not require the LV to be active */ - /* LV must be active to perform raid conversion operations */ diff --git a/SOURCES/lvm2-2_02_187-snapshot-correctly-check-device-id-of-merged-thin.patch b/SOURCES/lvm2-2_02_187-snapshot-correctly-check-device-id-of-merged-thin.patch deleted file mode 100644 index e835531..0000000 --- a/SOURCES/lvm2-2_02_187-snapshot-correctly-check-device-id-of-merged-thin.patch +++ /dev/null @@ -1,35 +0,0 @@ - lib/activate/dev_manager.c | 11 ++++++++++- - 1 file changed, 10 insertions(+), 1 deletion(-) - -diff --git a/lib/activate/dev_manager.c b/lib/activate/dev_manager.c -index 56608e3..a5e026c 100644 ---- a/lib/activate/dev_manager.c -+++ b/lib/activate/dev_manager.c -@@ -1592,6 +1592,9 @@ int dev_manager_thin_percent(struct dev_manager *dm, - return 1; - } - -+/* -+ * Explore state of running DM table to obtain currently used deviceId -+ */ - int dev_manager_thin_device_id(struct dev_manager *dm, - const struct logical_volume *lv, - uint32_t *device_id) -@@ -1601,10 +1604,16 @@ int dev_manager_thin_device_id(struct dev_manager *dm, - struct dm_info info; - uint64_t start, length; - char *params, *target_type = NULL; -+ const char *layer = lv_layer(lv); - int r = 0; - -+ if (lv_is_merging_origin(lv) && !lv_info(lv->vg->cmd, lv, 1, NULL, 0, 0)) -+ /* If the merge has already happened, that table -+ * can already be using correct LV without -real layer */ -+ layer = NULL; -+ - /* Build dlid for the thin layer */ -- if (!(dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv)))) -+ if (!(dlid = build_dm_uuid(dm->mem, lv, layer))) - return_0; - - if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, &info, NULL, dlid, 0, 0, 0, 0, 1, 0))) diff --git a/SOURCES/lvm2-2_02_187-snapshot-fix-checking-of-merged-thin-volume.patch b/SOURCES/lvm2-2_02_187-snapshot-fix-checking-of-merged-thin-volume.patch deleted file mode 100644 index 7085762..0000000 --- a/SOURCES/lvm2-2_02_187-snapshot-fix-checking-of-merged-thin-volume.patch +++ /dev/null @@ -1,17 +0,0 @@ - lib/activate/activate.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/lib/activate/activate.c b/lib/activate/activate.c -index 0790817..aba5d14 100644 ---- a/lib/activate/activate.c -+++ b/lib/activate/activate.c -@@ -831,7 +831,8 @@ int lv_info_with_seg_status(struct cmd_context *cmd, - with_open_count, with_read_ahead)) - return_0; - -- if (status->seg_status.type == SEG_STATUS_SNAPSHOT) { -+ if (status->seg_status.type == SEG_STATUS_SNAPSHOT || -+ (lv_is_thin_volume(olv) && (status->seg_status.type == SEG_STATUS_THIN))) { - log_debug_activation("Snapshot merge is in progress, querying status of %s instead.", - display_lvname(lv)); - /* diff --git a/SOURCES/lvm2-2_02_187-snapshot-use-single-merging-sequence.patch b/SOURCES/lvm2-2_02_187-snapshot-use-single-merging-sequence.patch deleted file mode 100644 index e34f02e..0000000 --- a/SOURCES/lvm2-2_02_187-snapshot-use-single-merging-sequence.patch +++ /dev/null @@ -1,54 +0,0 @@ - lib/metadata/snapshot_manip.c | 19 +------------------ - 1 file changed, 1 insertion(+), 18 deletions(-) - -diff --git a/lib/metadata/snapshot_manip.c b/lib/metadata/snapshot_manip.c -index 156b4c8..63e3361 100644 ---- a/lib/metadata/snapshot_manip.c -+++ b/lib/metadata/snapshot_manip.c -@@ -286,7 +286,6 @@ int vg_add_snapshot(struct logical_volume *origin, - - int vg_remove_snapshot(struct logical_volume *cow) - { -- int merging_snapshot = 0; - struct logical_volume *origin = origin_from_cow(cow); - int is_origin_active = lv_is_active(origin); - -@@ -315,17 +314,6 @@ int vg_remove_snapshot(struct logical_volume *cow) - * preload origin IFF "snapshot-merge" target is active - * - IMPORTANT: avoids preload if inactivate merge is pending - */ -- if (lv_has_target_type(origin->vg->vgmem, origin, NULL, -- TARGET_NAME_SNAPSHOT_MERGE)) { -- /* -- * preload origin to: -- * - allow proper release of -cow -- * - avoid allocations with other devices suspended -- * when transitioning from "snapshot-merge" to -- * "snapshot-origin after a merge completes. -- */ -- merging_snapshot = 1; -- } - } - - if (!lv_remove(cow->snapshot->lv)) { -@@ -367,7 +355,7 @@ int vg_remove_snapshot(struct logical_volume *cow) - * the LV lock on cluster has to be grabbed, so use - * activate_lv() which resumes suspend cow device. - */ -- if (!merging_snapshot && !activate_lv(cow->vg->cmd, cow)) { -+ if (!activate_lv(cow->vg->cmd, cow)) { - log_error("Failed to activate %s.", cow->name); - return 0; - } -@@ -376,11 +364,6 @@ int vg_remove_snapshot(struct logical_volume *cow) - log_error("Failed to resume %s.", origin->name); - return 0; - } -- -- if (merging_snapshot && !activate_lv(cow->vg->cmd, cow)) { -- log_error("Failed to activate %s.", cow->name); -- return 0; -- } - } - - return 1; diff --git a/SOURCES/lvm2-2_02_187-thin-activate-layer-pool-aas-read-only-LV.patch b/SOURCES/lvm2-2_02_187-thin-activate-layer-pool-aas-read-only-LV.patch deleted file mode 100644 index eff68ac..0000000 --- a/SOURCES/lvm2-2_02_187-thin-activate-layer-pool-aas-read-only-LV.patch +++ /dev/null @@ -1,32 +0,0 @@ - WHATS_NEW | 1 + - lib/activate/dev_manager.c | 5 +++++ - 2 files changed, 6 insertions(+) - -diff --git a/WHATS_NEW b/WHATS_NEW -index d1f4530..00b84f9 100644 ---- a/WHATS_NEW -+++ b/WHATS_NEW -@@ -2,6 +2,7 @@ Version 2.02.187 - - =================================== - Prevent creating VGs with PVs with different logical block sizes. - Pvmove runs in exlusively activating mode for exclusively active LVs. -+ Activate thin-pool layered volume as 'read-only' device. - Enhance validation for thin and cache pool conversion and swapping. - - Version 2.02.186 - 27th August 2019 -diff --git a/lib/activate/dev_manager.c b/lib/activate/dev_manager.c -index dc64159..56608e3 100644 ---- a/lib/activate/dev_manager.c -+++ b/lib/activate/dev_manager.c -@@ -84,6 +84,11 @@ int read_only_lv(const struct logical_volume *lv, const struct lv_activate_opts - if (lv_is_raid_image(lv) || lv_is_raid_metadata(lv)) - return 0; /* Keep RAID SubLvs writable */ - -+ if (!layer) { -+ if (lv_is_thin_pool(lv)) -+ return 1; -+ } -+ - return (laopts->read_only || !(lv->status & LVM_WRITE)); - } - diff --git a/SOURCES/lvm2-2_02_187-vgcreate-vgextend-restrict-PVs-with-mixed-block-size.patch b/SOURCES/lvm2-2_02_187-vgcreate-vgextend-restrict-PVs-with-mixed-block-size.patch deleted file mode 100644 index 1583cc9..0000000 --- a/SOURCES/lvm2-2_02_187-vgcreate-vgextend-restrict-PVs-with-mixed-block-size.patch +++ /dev/null @@ -1,208 +0,0 @@ - lib/commands/toolcontext.h | 1 + - lib/config/config_settings.h | 5 +++++ - lib/metadata/metadata-exported.h | 1 + - lib/metadata/metadata.c | 44 +++++++++++++++++++++++++++++++++++++ - tools/lvmcmdline.c | 2 ++ - tools/toollib.c | 47 ++++++++++++++++++++++++++++++++++++++++ - tools/vgcreate.c | 2 ++ - 7 files changed, 102 insertions(+) - -diff --git a/lib/commands/toolcontext.h b/lib/commands/toolcontext.h -index 4b2a079..497f4bd 100644 ---- a/lib/commands/toolcontext.h -+++ b/lib/commands/toolcontext.h -@@ -155,6 +155,7 @@ struct cmd_context { - unsigned include_shared_vgs:1; /* report/display cmds can reveal lockd VGs */ - unsigned include_active_foreign_vgs:1; /* cmd should process foreign VGs with active LVs */ - unsigned vg_read_print_access_error:1; /* print access errors from vg_read */ -+ unsigned allow_mixed_block_sizes:1; - unsigned force_access_clustered:1; - unsigned lockd_gl_disable:1; - unsigned lockd_vg_disable:1; -diff --git a/lib/config/config_settings.h b/lib/config/config_settings.h -index 9904a62..622e982 100644 ---- a/lib/config/config_settings.h -+++ b/lib/config/config_settings.h -@@ -470,6 +470,11 @@ cfg(devices_allow_changes_with_duplicate_pvs_CFG, "allow_changes_with_duplicate_ - "Enabling this setting allows the VG to be used as usual even with\n" - "uncertain devices.\n") - -+cfg(devices_allow_mixed_block_sizes_CFG, "allow_mixed_block_sizes", devices_CFG_SECTION, 0, CFG_TYPE_BOOL, 0, vsn(2, 3, 6), NULL, 0, NULL, -+ "Allow PVs in the same VG with different logical block sizes.\n" -+ "When allowed, the user is responsible to ensure that an LV is\n" -+ "using PVs with matching block sizes when necessary.\n") -+ - cfg_array(allocation_cling_tag_list_CFG, "cling_tag_list", allocation_CFG_SECTION, CFG_DEFAULT_UNDEFINED, CFG_TYPE_STRING, NULL, vsn(2, 2, 77), NULL, 0, NULL, - "Advise LVM which PVs to use when searching for new space.\n" - "When searching for free space to extend an LV, the 'cling' allocation\n" -diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h -index 2245c29..5674545 100644 ---- a/lib/metadata/metadata-exported.h -+++ b/lib/metadata/metadata-exported.h -@@ -593,6 +593,7 @@ struct pvcreate_params { - unsigned is_remove : 1; /* is removing PVs, not creating */ - unsigned preserve_existing : 1; - unsigned check_failed : 1; -+ unsigned check_consistent_block_size : 1; - }; - - struct lvresize_params { -diff --git a/lib/metadata/metadata.c b/lib/metadata/metadata.c -index 3620240..123f7f5 100644 ---- a/lib/metadata/metadata.c -+++ b/lib/metadata/metadata.c -@@ -699,12 +699,40 @@ int vg_extend_each_pv(struct volume_group *vg, struct pvcreate_params *pp) - { - struct pv_list *pvl; - unsigned int max_phys_block_size = 0; -+ unsigned int physical_block_size, logical_block_size; -+ unsigned int prev_lbs = 0; -+ int inconsistent_existing_lbs = 0; - - log_debug_metadata("Adding PVs to VG %s.", vg->name); - - if (vg_bad_status_bits(vg, RESIZEABLE_VG)) - return_0; - -+ /* -+ * Check if existing PVs have inconsistent block sizes. -+ * If so, do not enforce new devices to be consistent. -+ */ -+ dm_list_iterate_items(pvl, &vg->pvs) { -+ logical_block_size = 0; -+ physical_block_size = 0; -+ -+ if (!dev_get_direct_block_sizes(pvl->pv->dev, &physical_block_size, &logical_block_size)) -+ continue; -+ -+ if (!logical_block_size) -+ continue; -+ -+ if (!prev_lbs) { -+ prev_lbs = logical_block_size; -+ continue; -+ } -+ -+ if (prev_lbs != logical_block_size) { -+ inconsistent_existing_lbs = 1; -+ break; -+ } -+ } -+ - dm_list_iterate_items(pvl, &pp->pvs) { - log_debug_metadata("Adding PV %s to VG %s.", pv_dev_name(pvl->pv), vg->name); - -@@ -715,6 +743,22 @@ int vg_extend_each_pv(struct volume_group *vg, struct pvcreate_params *pp) - return 0; - } - -+ logical_block_size = 0; -+ physical_block_size = 0; -+ -+ if (!dev_get_direct_block_sizes(pvl->pv->dev, &physical_block_size, &logical_block_size)) -+ log_warn("WARNING: PV %s has unknown block size.", pv_dev_name(pvl->pv)); -+ -+ else if (prev_lbs && logical_block_size && (logical_block_size != prev_lbs)) { -+ if (vg->cmd->allow_mixed_block_sizes || inconsistent_existing_lbs) -+ log_debug("Devices have inconsistent block sizes (%u and %u)", prev_lbs, logical_block_size); -+ else { -+ log_error("Devices have inconsistent logical block sizes (%u and %u).", -+ prev_lbs, logical_block_size); -+ return 0; -+ } -+ } -+ - if (!add_pv_to_vg(vg, pv_dev_name(pvl->pv), pvl->pv, 0)) { - log_error("PV %s cannot be added to VG %s.", - pv_dev_name(pvl->pv), vg->name); -diff --git a/tools/lvmcmdline.c b/tools/lvmcmdline.c -index f238b64..f82827d 100644 ---- a/tools/lvmcmdline.c -+++ b/tools/lvmcmdline.c -@@ -2308,6 +2308,8 @@ static int _get_current_settings(struct cmd_context *cmd) - if (cmd->cname->flags & CAN_USE_ONE_SCAN) - cmd->can_use_one_scan = 1; - -+ cmd->allow_mixed_block_sizes = find_config_tree_bool(cmd, devices_allow_mixed_block_sizes_CFG, NULL); -+ - cmd->partial_activation = 0; - cmd->degraded_activation = 0; - activation_mode = find_config_tree_str(cmd, activation_mode_CFG, NULL); -diff --git a/tools/toollib.c b/tools/toollib.c -index 81953ee..0b957cc 100644 ---- a/tools/toollib.c -+++ b/tools/toollib.c -@@ -5506,6 +5506,8 @@ int pvcreate_each_device(struct cmd_context *cmd, - struct device_list *devl; - const char *pv_name; - int consistent = 0; -+ unsigned int physical_block_size, logical_block_size; -+ unsigned int prev_pbs = 0, prev_lbs = 0; - int must_use_all = (cmd->cname->flags & MUST_USE_ALL_ARGS); - int found; - unsigned i; -@@ -5584,6 +5586,51 @@ int pvcreate_each_device(struct cmd_context *cmd, - pd->dev = dev_cache_get(pd->name, cmd->full_filter); - - /* -+ * Check for consistent block sizes. -+ */ -+ if (pp->check_consistent_block_size) { -+ dm_list_iterate_items(pd, &pp->arg_devices) { -+ if (!pd->dev) -+ continue; -+ -+ logical_block_size = 0; -+ physical_block_size = 0; -+ -+ if (!dev_get_direct_block_sizes(pd->dev, &physical_block_size, &logical_block_size)) { -+ log_warn("WARNING: Unknown block size for device %s.", dev_name(pd->dev)); -+ continue; -+ } -+ -+ if (!logical_block_size) { -+ log_warn("WARNING: Unknown logical_block_size for device %s.", dev_name(pd->dev)); -+ continue; -+ } -+ -+ if (!prev_lbs) { -+ prev_lbs = logical_block_size; -+ prev_pbs = physical_block_size; -+ continue; -+ } -+ -+ if (prev_lbs == logical_block_size) { -+ /* Require lbs to match, just warn about unmatching pbs. */ -+ if (!cmd->allow_mixed_block_sizes && prev_pbs && physical_block_size && -+ (prev_pbs != physical_block_size)) -+ log_warn("WARNING: Devices have inconsistent physical block sizes (%u and %u).", -+ prev_pbs, physical_block_size); -+ continue; -+ } -+ -+ if (!cmd->allow_mixed_block_sizes) { -+ log_error("Devices have inconsistent logical block sizes (%u and %u).", -+ prev_lbs, logical_block_size); -+ log_print("See lvm.conf allow_mixed_block_sizes."); -+ return 0; -+ } -+ } -+ } -+ -+ /* - * Use process_each_pv to search all existing PVs and devices. - * - * This is a slightly different way to use process_each_pv, because the -diff --git a/tools/vgcreate.c b/tools/vgcreate.c -index 4356d99..7add53b 100644 ---- a/tools/vgcreate.c -+++ b/tools/vgcreate.c -@@ -48,6 +48,8 @@ int vgcreate(struct cmd_context *cmd, int argc, char **argv) - /* Don't create a new PV on top of an existing PV like pvcreate does. */ - pp.preserve_existing = 1; - -+ pp.check_consistent_block_size = 1; -+ - if (!vgcreate_params_set_defaults(cmd, &vp_def, NULL)) - return EINVALID_CMD_LINE; - vp_def.vg_name = vg_name; diff --git a/SOURCES/lvm2-2_02_188-blkdeactivate-add-support-for-VDO-in-blkdeactivate-script.patch b/SOURCES/lvm2-2_02_188-blkdeactivate-add-support-for-VDO-in-blkdeactivate-script.patch new file mode 100644 index 0000000..fed16e2 --- /dev/null +++ b/SOURCES/lvm2-2_02_188-blkdeactivate-add-support-for-VDO-in-blkdeactivate-script.patch @@ -0,0 +1,179 @@ + WHATS_NEW_DM | 4 ++++ + man/blkdeactivate.8_main | 11 +++++++++++ + scripts/blkdeactivate.sh.in | 48 ++++++++++++++++++++++++++++++++++++++++++++- + 3 files changed, 62 insertions(+), 1 deletion(-) + +diff --git a/WHATS_NEW_DM b/WHATS_NEW_DM +index 2b213bb..31e655c 100644 +--- a/WHATS_NEW_DM ++++ b/WHATS_NEW_DM +@@ -1,3 +1,7 @@ ++Version 1.02.172 - ++================================== ++ Add support for VDO in blkdeactivate script. ++ + Version 1.02.170 - 24th March 2020 + ================================== + Add support for DM_DEVICE_GET_TARGET_VERSION. +diff --git a/man/blkdeactivate.8_main b/man/blkdeactivate.8_main +index f3c19a8..06af52e 100644 +--- a/man/blkdeactivate.8_main ++++ b/man/blkdeactivate.8_main +@@ -9,6 +9,7 @@ blkdeactivate \(em utility to deactivate block devices + .RB [ -l \ \fIlvm_options\fP ] + .RB [ -m \ \fImpath_options\fP ] + .RB [ -r \ \fImdraid_options\fP ] ++.RB [ -o \ \fIvdo_options\fP ] + .RB [ -u ] + .RB [ -v ] + .RI [ device ] +@@ -70,6 +71,15 @@ Comma-separated list of MD RAID specific options: + Wait MD device's resync, recovery or reshape action to complete + before deactivation. + .RE ++ ++.TP ++.BR -o ", " --vdooptions \ \fIvdo_options\fP ++Comma-separated list of VDO specific options: ++.RS ++.IP \fIconfigfile=file\fP ++Use specified VDO configuration file. ++.RE ++ + .TP + .BR -u ", " --umount + Unmount a mounted device before trying to deactivate it. +@@ -120,4 +130,5 @@ of a device-mapper device fails, retry it and force removal. + .BR lvm (8), + .BR mdadm (8), + .BR multipathd (8), ++.BR vdo (8), + .BR umount (8) +diff --git a/scripts/blkdeactivate.sh.in b/scripts/blkdeactivate.sh.in +index a4b8a8f..57b3e58 100644 +--- a/scripts/blkdeactivate.sh.in ++++ b/scripts/blkdeactivate.sh.in +@@ -1,6 +1,6 @@ + #!/bin/bash + # +-# Copyright (C) 2012-2017 Red Hat, Inc. All rights reserved. ++# Copyright (C) 2012-2020 Red Hat, Inc. All rights reserved. + # + # This file is part of LVM2. + # +@@ -38,6 +38,7 @@ MDADM="/sbin/mdadm" + MOUNTPOINT="/bin/mountpoint" + MPATHD="/sbin/multipathd" + UMOUNT="/bin/umount" ++VDO="/bin/vdo" + + sbindir="@SBINDIR@" + DMSETUP="$sbindir/dmsetup" +@@ -54,6 +55,7 @@ DMSETUP_OPTS="" + LVM_OPTS="" + MDADM_OPTS="" + MPATHD_OPTS="" ++VDO_OPTS="" + + LSBLK="/bin/lsblk -r --noheadings -o TYPE,KNAME,NAME,MOUNTPOINT" + LSBLK_VARS="local devtype local kname local name local mnt" +@@ -124,6 +126,7 @@ usage() { + echo " -l | --lvmoptions LVM_OPTIONS Comma separated LVM specific options" + echo " -m | --mpathoptions MPATH_OPTIONS Comma separated DM-multipath specific options" + echo " -r | --mdraidoptions MDRAID_OPTIONS Comma separated MD RAID specific options" ++ echo " -o | --vdooptions VDO_OPTIONS Comma separated VDO specific options" + echo " -u | --umount Unmount the device if mounted" + echo " -v | --verbose Verbose mode (also implies -e)" + echo +@@ -138,6 +141,8 @@ usage() { + echo " wait wait for resync, recovery or reshape to complete first" + echo " MPATH_OPTIONS:" + echo " disablequeueing disable queueing on all DM-multipath devices first" ++ echo " VDO_OPTIONS:" ++ echo " configfile=file use specified VDO configuration file" + + exit + } +@@ -319,6 +324,23 @@ deactivate_md () { + fi + } + ++deactivate_vdo() { ++ local xname ++ xname=$(printf "%s" "$name") ++ test -b "$DEV_DIR/mapper/$xname" || return 0 ++ test -z "${SKIP_DEVICE_LIST["$kname"]}" || return 1 ++ ++ deactivate_holders "$DEV_DIR/mapper/$xname" || return 1 ++ ++ echo -n " [VDO]: deactivating VDO volume $xname... " ++ if eval "$VDO" stop $VDO_OPTS --name="$xname" "$OUT" "$ERR"; then ++ echo "done" ++ else ++ echo "skipping" ++ add_device_to_skip_list ++ fi ++} ++ + deactivate () { + ###################################################################### + # DEACTIVATION HOOKS FOR NEW DEVICE TYPES GO HERE! # +@@ -335,6 +357,8 @@ deactivate () { + ###################################################################### + if test "$devtype" = "lvm"; then + deactivate_lvm ++ elif test "$devtype" = "vdo"; then ++ deactivate_vdo + elif test "${kname:0:3}" = "dm-"; then + deactivate_dm + elif test "${kname:0:2}" = "md"; then +@@ -479,6 +503,20 @@ get_mpathopts() { + IFS=$ORIG_IFS + } + ++get_vdoopts() { ++ ORIG_IFS=$IFS; IFS=',' ++ ++ for opt in $1; do ++ case "$opt" in ++ "") ;; ++ configfile=*) tmp=${opt#*=}; VDO_OPTS+="--confFile=${tmp%%,*} " ;; ++ *) echo "$opt: unknown VDO option" ++ esac ++ done ++ ++ IFS=$ORIG_IFS ++} ++ + set_env() { + if test "$ERRORS" -eq "1"; then + unset ERR +@@ -493,6 +531,7 @@ set_env() { + LVM_OPTS+="-vvvv" + MDADM_OPTS+="-vv" + MPATHD_OPTS+="-v 3" ++ VDO_OPTS+="--verbose " + else + OUT="1>$DEV_DIR/null" + fi +@@ -509,6 +548,12 @@ set_env() { + MDADM_AVAILABLE=0 + fi + ++ if test -f $VDO; then ++ VDO_AVAILABLE=1 ++ else ++ VDO_AVAILABLE=0 ++ fi ++ + MPATHD_RUNNING=0 + test "$MPATHD_DO_DISABLEQUEUEING" -eq 1 && { + if test -f "$MPATHD"; then +@@ -528,6 +573,7 @@ while test $# -ne 0; do + "-l"|"--lvmoptions") get_lvmopts "$2" ; shift ;; + "-m"|"--mpathoptions") get_mpathopts "$2" ; shift ;; + "-r"|"--mdraidoptions") get_mdraidopts "$2"; shift ;; ++ "-o"|"--vdooptions") get_vdoopts "$2"; shift ;; + "-u"|"--umount") DO_UMOUNT=1 ;; + "-v"|"--verbose") VERBOSE=1 ; ERRORS=1 ;; + "-vv") VERBOSE=1 ; ERRORS=1 ; set -x ;; diff --git a/SOURCES/lvm2-2_02_188-lvconvert-no-validation-for-thin-pools-not-used-by-lvm.patch b/SOURCES/lvm2-2_02_188-lvconvert-no-validation-for-thin-pools-not-used-by-lvm.patch index d6fcba1..53fc32f 100644 --- a/SOURCES/lvm2-2_02_188-lvconvert-no-validation-for-thin-pools-not-used-by-lvm.patch +++ b/SOURCES/lvm2-2_02_188-lvconvert-no-validation-for-thin-pools-not-used-by-lvm.patch @@ -1,22 +1,8 @@ -From 16a03878cd39cb1fb0c052a41901b6660f9f674c Mon Sep 17 00:00:00 2001 -From: Zdenek Kabelac -Date: Wed, 8 Apr 2020 11:46:42 +0200 -Subject: [PATCH 4/4] lvconvert: no validation for thin-pools not used by lvm2 - -lvm2 supports thin-pool to be later used by other tools doing -virtual volumes themself (i.e. docker) - in this case we -shall not validate transaction Id - is this is used by -other tools and lvm2 keeps value 0 - so the transationId -validation need to be skipped in this case. - -(cherry picked from commit 1316cafbe988307264e4f87dbcffaf56bc2ab388) -(cherry picked from commit ca84deb23f0cfb51dbeba0ffe44f757345e6f8a0) ---- tools/lvconvert.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/lvconvert.c b/tools/lvconvert.c -index 799e746..bf14eec 100644 +index d0a6b02..38c8146 100644 --- a/tools/lvconvert.c +++ b/tools/lvconvert.c @@ -2330,7 +2330,8 @@ static int _lvconvert_thin_pool_repair(struct cmd_context *cmd, @@ -29,6 +15,3 @@ index 799e746..bf14eec 100644 argv[0] = thin_dump; argv[1] = pms_path; argv[2] = NULL; --- -1.8.3.1 - diff --git a/SOURCES/lvm2-2_02_188-scanning-optimize-by-checking-text-offset-and-checks.patch b/SOURCES/lvm2-2_02_188-scanning-optimize-by-checking-text-offset-and-checks.patch new file mode 100644 index 0000000..5ef3001 --- /dev/null +++ b/SOURCES/lvm2-2_02_188-scanning-optimize-by-checking-text-offset-and-checks.patch @@ -0,0 +1,355 @@ + lib/cache/lvmcache.c | 24 +++++++++ + lib/cache/lvmcache.h | 4 ++ + lib/device/bcache-utils.c | 15 ++++++ + lib/device/bcache.h | 1 + + lib/format_text/format-text.c | 16 +++++- + lib/format_text/layout.h | 2 +- + lib/format_text/text_label.c | 2 +- + lib/label/label.c | 5 ++ + lib/label/label.h | 1 + + lib/metadata/metadata.c | 117 +++++++++++++++++++++++++++++++++++++++++- + lib/metadata/metadata.h | 4 +- + 11 files changed, 186 insertions(+), 5 deletions(-) + +diff --git a/lib/cache/lvmcache.c b/lib/cache/lvmcache.c +index b6a02b0..5b8dce8 100644 +--- a/lib/cache/lvmcache.c ++++ b/lib/cache/lvmcache.c +@@ -3068,3 +3068,27 @@ uint64_t lvmcache_max_metadata_size(void) + return _max_metadata_size; + } + ++void lvmcache_get_mdas(struct cmd_context *cmd, ++ const char *vgname, const char *vgid, ++ struct dm_list *mda_list) ++{ ++ struct lvmcache_vginfo *vginfo; ++ struct lvmcache_info *info; ++ struct mda_list *mdal; ++ struct metadata_area *mda, *mda2; ++ ++ if (!(vginfo = lvmcache_vginfo_from_vgname(vgname, vgid))) { ++ log_error(INTERNAL_ERROR "lvmcache_get_mdas no vginfo %s", vgname); ++ return; ++ } ++ ++ dm_list_iterate_items(info, &vginfo->infos) { ++ dm_list_iterate_items_safe(mda, mda2, &info->mdas) { ++ if (!(mdal = dm_zalloc(sizeof(*mdal)))) ++ continue; ++ mdal->mda = mda; ++ dm_list_add(mda_list, &mdal->list); ++ } ++ } ++} ++ +diff --git a/lib/cache/lvmcache.h b/lib/cache/lvmcache.h +index f436785..541e8be 100644 +--- a/lib/cache/lvmcache.h ++++ b/lib/cache/lvmcache.h +@@ -228,4 +228,8 @@ void lvmcache_drop_saved_vgid(const char *vgid); + uint64_t lvmcache_max_metadata_size(void); + void lvmcache_save_metadata_size(uint64_t val); + ++void lvmcache_get_mdas(struct cmd_context *cmd, ++ const char *vgname, const char *vgid, ++ struct dm_list *mda_list); ++ + #endif +diff --git a/lib/device/bcache-utils.c b/lib/device/bcache-utils.c +index a533a66..2f0b01d 100644 +--- a/lib/device/bcache-utils.c ++++ b/lib/device/bcache-utils.c +@@ -79,6 +79,21 @@ bool bcache_read_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, + return true; + } + ++bool bcache_invalidate_bytes(struct bcache *cache, int fd, uint64_t start, size_t len) ++{ ++ block_address bb, be; ++ bool result = true; ++ ++ byte_range_to_block_range(cache, start, len, &bb, &be); ++ ++ for (; bb != be; bb++) { ++ if (!bcache_invalidate(cache, fd, bb)) ++ result = false; ++ } ++ ++ return result; ++} ++ + //---------------------------------------------------------------- + + // Writing bytes and zeroing bytes are very similar, so we factor out +diff --git a/lib/device/bcache.h b/lib/device/bcache.h +index f9067f7..3e7a168 100644 +--- a/lib/device/bcache.h ++++ b/lib/device/bcache.h +@@ -163,6 +163,7 @@ bool bcache_read_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, + bool bcache_write_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data); + bool bcache_zero_bytes(struct bcache *cache, int fd, uint64_t start, size_t len); + bool bcache_set_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, uint8_t val); ++bool bcache_invalidate_bytes(struct bcache *cache, int fd, uint64_t start, size_t len); + + void bcache_set_last_byte(struct bcache *cache, int fd, uint64_t offset, int sector_size); + void bcache_unset_last_byte(struct bcache *cache, int fd); +diff --git a/lib/format_text/format-text.c b/lib/format_text/format-text.c +index 6f5d739..75d7fcd 100644 +--- a/lib/format_text/format-text.c ++++ b/lib/format_text/format-text.c +@@ -1199,6 +1199,7 @@ static int _scan_file(const struct format_type *fmt, const char *vgname) + } + + int read_metadata_location_summary(const struct format_type *fmt, ++ struct metadata_area *mda, + struct mda_header *mdah, int primary_mda, struct device_area *dev_area, + struct lvmcache_vgsummary *vgsummary, uint64_t *mda_free_sectors) + { +@@ -1251,6 +1252,19 @@ int read_metadata_location_summary(const struct format_type *fmt, + return 0; + } + ++ /* ++ * This function is used to read the vg summary during label scan. ++ * Save the text start location and checksum during scan. After the VG ++ * lock is acquired in vg_read, we can reread the mda_header, and ++ * compare rlocn->offset,checksum to what was saved during scan. If ++ * unchanged, it means that the metadata was not changed between scan ++ * and the read. ++ */ ++ if (mda) { ++ mda->scan_text_offset = rlocn->offset; ++ mda->scan_text_checksum = rlocn->checksum; ++ } ++ + /* We found a VG - now check the metadata */ + if (rlocn->offset + rlocn->size > mdah->size) + wrap = (uint32_t) ((rlocn->offset + rlocn->size) - mdah->size); +@@ -1374,7 +1388,7 @@ static int _scan_raw(const struct format_type *fmt, const char *vgname __attribu + continue; + } + +- if (read_metadata_location_summary(fmt, mdah, 0, &rl->dev_area, &vgsummary, NULL)) { ++ if (read_metadata_location_summary(fmt, NULL, mdah, 0, &rl->dev_area, &vgsummary, NULL)) { + vg = _vg_read_raw_area(&fid, vgsummary.vgname, &rl->dev_area, NULL, NULL, 0, 0); + if (vg) { + lvmcache_update_vg(vg, 0); +diff --git a/lib/format_text/layout.h b/lib/format_text/layout.h +index 2671bbf..4601952 100644 +--- a/lib/format_text/layout.h ++++ b/lib/format_text/layout.h +@@ -104,7 +104,7 @@ struct mda_context { + #define MDA_SIZE_MIN (8 * (unsigned) lvm_getpagesize()) + #define MDA_ORIGINAL_ALIGNMENT 512 /* Original alignment used for start of VG metadata content */ + +-int read_metadata_location_summary(const struct format_type *fmt, struct mda_header *mdah, int primary_mda, ++int read_metadata_location_summary(const struct format_type *fmt, struct metadata_area *mda, struct mda_header *mdah, int primary_mda, + struct device_area *dev_area, struct lvmcache_vgsummary *vgsummary, + uint64_t *mda_free_sectors); + +diff --git a/lib/format_text/text_label.c b/lib/format_text/text_label.c +index 7d10e06..fc8294e 100644 +--- a/lib/format_text/text_label.c ++++ b/lib/format_text/text_label.c +@@ -345,7 +345,7 @@ static int _read_mda_header_and_metadata(struct metadata_area *mda, void *baton) + return 1; + } + +- if (!read_metadata_location_summary(fmt, mdah, mda_is_primary(mda), &mdac->area, ++ if (!read_metadata_location_summary(fmt, mda, mdah, mda_is_primary(mda), &mdac->area, + &vgsummary, &mdac->free_sectors)) { + if (vgsummary.zero_offset) + return 1; +diff --git a/lib/label/label.c b/lib/label/label.c +index 70b7934..8a4b662 100644 +--- a/lib/label/label.c ++++ b/lib/label/label.c +@@ -1418,6 +1418,11 @@ bool dev_write_bytes(struct device *dev, uint64_t start, size_t len, void *data) + return true; + } + ++bool dev_invalidate_bytes(struct device *dev, uint64_t start, size_t len) ++{ ++ return bcache_invalidate_bytes(scan_bcache, dev->bcache_fd, start, len); ++} ++ + bool dev_write_zeros(struct device *dev, uint64_t start, size_t len) + { + if (test_mode()) +diff --git a/lib/label/label.h b/lib/label/label.h +index 42c9946..ea29c84 100644 +--- a/lib/label/label.h ++++ b/lib/label/label.h +@@ -128,6 +128,7 @@ bool dev_read_bytes(struct device *dev, uint64_t start, size_t len, void *data); + bool dev_write_bytes(struct device *dev, uint64_t start, size_t len, void *data); + bool dev_write_zeros(struct device *dev, uint64_t start, size_t len); + bool dev_set_bytes(struct device *dev, uint64_t start, size_t len, uint8_t val); ++bool dev_invalidate_bytes(struct device *dev, uint64_t start, size_t len); + void dev_set_last_byte(struct device *dev, uint64_t offset); + void dev_unset_last_byte(struct device *dev); + +diff --git a/lib/metadata/metadata.c b/lib/metadata/metadata.c +index d448fd9..39f10fe 100644 +--- a/lib/metadata/metadata.c ++++ b/lib/metadata/metadata.c +@@ -33,6 +33,8 @@ + #include "lvmlockd.h" + #include "time.h" + #include "lvmnotify.h" ++#include "format_text/format-text.h" ++#include "format_text/layout.h" + + #include + #include +@@ -3760,6 +3762,118 @@ out: + return r; + } + ++/* ++ * Reread an mda_header. If the text offset is the same as was seen and saved ++ * by label scan, it means the metadata is unchanged and we do not need to ++ * reread metadata. ++ * ++ * This is used to ensure that the metadata seen during scan still matches ++ * what's on disk. If the scan data still matches what's on disk we don't ++ * need to reread the metadata from disk. When we read the metadata from ++ * bcache it may come from the cache or from disk again if the cache has ++ * dropped it. ++ */ ++ ++static bool _scan_text_mismatch(struct cmd_context *cmd, const char *vgname, const char *vgid) ++{ ++ struct dm_list mda_list; ++ struct mda_list *mdal, *safe; ++ struct metadata_area *mda; ++ struct mda_context *mdac; ++ struct device_area *area; ++ struct mda_header *mdah; ++ struct raw_locn *rlocn; ++ struct device *dev; ++ bool ret = true; ++ ++ /* ++ * if cmd->can_use_one_scan, check one mda_header is unchanged, ++ * else check that all mda_headers are unchanged. ++ */ ++ ++ dm_list_init(&mda_list); ++ ++ lvmcache_get_mdas(cmd, vgname, vgid, &mda_list); ++ ++ dm_list_iterate_items(mdal, &mda_list) { ++ mda = mdal->mda; ++ ++ if (!mda->scan_text_offset) ++ continue; ++ ++ if (!mda_is_primary(mda)) ++ continue; ++ ++ if (!(dev = mda_get_device(mda))) { ++ log_debug("rescan for text mismatch - no mda dev"); ++ goto out; ++ } ++ ++ mdac = mda->metadata_locn; ++ area = &mdac->area; ++ ++ /* ++ * Invalidate mda_header in bcache so it will be reread from disk. ++ */ ++ if (!dev_invalidate_bytes(dev, 4096, 512)) { ++ log_debug("rescan for text mismatch - cannot invalidate"); ++ goto out; ++ } ++ ++ if (!(mdah = raw_read_mda_header(cmd->fmt, area, 1))) { ++ log_debug("rescan for text mismatch - no mda header"); ++ goto out; ++ } ++ ++ rlocn = mdah->raw_locns; ++ ++ if (rlocn->checksum != mda->scan_text_checksum) { ++ log_debug("rescan for text checksum mismatch on %s - now %x prev %x offset now %llu prev %llu", ++ dev_name(dev), ++ rlocn->checksum, mda->scan_text_checksum, ++ (unsigned long long)rlocn->offset, ++ (unsigned long long)mda->scan_text_offset); ++ } else if (rlocn->offset != mda->scan_text_offset) { ++ log_debug("rescan for text offset mismatch on %s - now %llu prev %llu checksum %x", ++ dev_name(dev), ++ (unsigned long long)rlocn->offset, ++ (unsigned long long)mda->scan_text_offset, ++ rlocn->checksum); ++ } else { ++ /* the common case where fields match and no rescan needed */ ++ ret = false; ++ } ++ ++ dm_pool_free(cmd->mem, mdah); ++ ++ /* For can_use_one_scan commands, return result from checking one mda. */ ++ if (cmd->can_use_one_scan) ++ goto out; ++ ++ /* For other commands, return mismatch immediately. */ ++ if (ret) ++ goto_out; ++ } ++ ++ if (ret) { ++ /* shouldn't happen */ ++ log_debug("rescan for text mismatch - no mdas"); ++ goto out; ++ } ++out: ++ if (!ret) ++ log_debug("rescan skipped - unchanged offset %llu checksum %x", ++ (unsigned long long)mda->scan_text_offset, ++ mda->scan_text_checksum); ++ ++ dm_list_iterate_items_safe(mdal, safe, &mda_list) { ++ dm_list_del(&mdal->list); ++ free(mdal); ++ } ++ ++ return ret; ++} ++ + /* Caller sets consistent to 1 if it's safe for vg_read_internal to correct + * inconsistent metadata on disk (i.e. the VG write lock is held). + * This guarantees only consistent metadata is returned. +@@ -3904,7 +4018,8 @@ static struct volume_group *_vg_read(struct cmd_context *cmd, + * lock is taken prior to the label scan, and still held here, + * we can also skip the rescan in that case. + */ +- if (!cmd->can_use_one_scan || lvmcache_scan_mismatch(cmd, vgname, vgid)) { ++ if (!cmd->can_use_one_scan || ++ lvmcache_scan_mismatch(cmd, vgname, vgid) || _scan_text_mismatch(cmd, vgname, vgid)) { + /* the skip rescan special case is for clvmd vg_read_by_vgid */ + /* FIXME: this is not a warn flag, pass this differently */ + if (warn_flags & SKIP_RESCAN) +diff --git a/lib/metadata/metadata.h b/lib/metadata/metadata.h +index f8083e5..96bbb56 100644 +--- a/lib/metadata/metadata.h ++++ b/lib/metadata/metadata.h +@@ -173,6 +173,8 @@ struct metadata_area { + struct metadata_area_ops *ops; + void *metadata_locn; + uint32_t status; ++ uint64_t scan_text_offset; /* rlocn->offset seen during scan */ ++ uint32_t scan_text_checksum; /* rlocn->checksum seen during scan */ + }; + struct metadata_area *mda_copy(struct dm_pool *mem, + struct metadata_area *mda); +@@ -234,7 +236,7 @@ struct name_list { + + struct mda_list { + struct dm_list list; +- struct device_area mda; ++ struct metadata_area *mda; + }; + + struct peg_list { diff --git a/SOURCES/lvm2-drop-unavailable-libblkid-2_24-BLKID_SUBLKS_BADCSUM-for-signature-detection.patch b/SOURCES/lvm2-drop-unavailable-libblkid-2_24-BLKID_SUBLKS_BADCSUM-for-signature-detection.patch index 048911f..dcd4c8a 100644 --- a/SOURCES/lvm2-drop-unavailable-libblkid-2_24-BLKID_SUBLKS_BADCSUM-for-signature-detection.patch +++ b/SOURCES/lvm2-drop-unavailable-libblkid-2_24-BLKID_SUBLKS_BADCSUM-for-signature-detection.patch @@ -4,7 +4,7 @@ 3 files changed, 13 insertions(+), 14 deletions(-) diff --git a/configure b/configure -index 4d2fb2f..f357462 100755 +index fe0e8e2..ce37d03 100755 --- a/configure +++ b/configure @@ -12111,12 +12111,12 @@ if test -n "$BLKID_CFLAGS"; then diff --git a/SOURCES/lvm2-make-generate-2.patch b/SOURCES/lvm2-make-generate-2.patch new file mode 100644 index 0000000..9220334 --- /dev/null +++ b/SOURCES/lvm2-make-generate-2.patch @@ -0,0 +1,20 @@ + conf/example.conf.in | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/conf/example.conf.in b/conf/example.conf.in +index 6f7b161..9fa1235 100644 +--- a/conf/example.conf.in ++++ b/conf/example.conf.in +@@ -326,6 +326,12 @@ devices { + # Enabling this setting allows the VG to be used as usual even with + # uncertain devices. + allow_changes_with_duplicate_pvs = 1 ++ ++ # Configuration option devices/allow_mixed_block_sizes. ++ # Allow PVs in the same VG with different logical block sizes. ++ # When allowed, the user is responsible to ensure that an LV is ++ # using PVs with matching block sizes when necessary. ++ allow_mixed_block_sizes = 1 + } + + # Configuration section allocation. diff --git a/SOURCES/lvm2-rhel7-Change-allow_mixed_block_sizes-default-and-vsn.patch b/SOURCES/lvm2-rhel7-Change-allow_mixed_block_sizes-default-and-vsn.patch deleted file mode 100644 index 38a3f29..0000000 --- a/SOURCES/lvm2-rhel7-Change-allow_mixed_block_sizes-default-and-vsn.patch +++ /dev/null @@ -1,47 +0,0 @@ - conf/example.conf.in | 6 ++++++ - lib/config/config_settings.h | 2 +- - lib/config/defaults.h | 1 + - 3 files changed, 8 insertions(+), 1 deletion(-) - -diff --git a/conf/example.conf.in b/conf/example.conf.in -index 6f7b161..9fa1235 100644 ---- a/conf/example.conf.in -+++ b/conf/example.conf.in -@@ -326,6 +326,12 @@ devices { - # Enabling this setting allows the VG to be used as usual even with - # uncertain devices. - allow_changes_with_duplicate_pvs = 1 -+ -+ # Configuration option devices/allow_mixed_block_sizes. -+ # Allow PVs in the same VG with different logical block sizes. -+ # When allowed, the user is responsible to ensure that an LV is -+ # using PVs with matching block sizes when necessary. -+ allow_mixed_block_sizes = 1 - } - - # Configuration section allocation. -diff --git a/lib/config/config_settings.h b/lib/config/config_settings.h -index 622e982..055b883 100644 ---- a/lib/config/config_settings.h -+++ b/lib/config/config_settings.h -@@ -470,7 +470,7 @@ cfg(devices_allow_changes_with_duplicate_pvs_CFG, "allow_changes_with_duplicate_ - "Enabling this setting allows the VG to be used as usual even with\n" - "uncertain devices.\n") - --cfg(devices_allow_mixed_block_sizes_CFG, "allow_mixed_block_sizes", devices_CFG_SECTION, 0, CFG_TYPE_BOOL, 0, vsn(2, 3, 6), NULL, 0, NULL, -+cfg(devices_allow_mixed_block_sizes_CFG, "allow_mixed_block_sizes", devices_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_ALLOW_MIXED_BLOCK_SIZES, vsn(2, 2, 186), NULL, 0, NULL, - "Allow PVs in the same VG with different logical block sizes.\n" - "When allowed, the user is responsible to ensure that an LV is\n" - "using PVs with matching block sizes when necessary.\n") -diff --git a/lib/config/defaults.h b/lib/config/defaults.h -index 0d55928..e689208 100644 ---- a/lib/config/defaults.h -+++ b/lib/config/defaults.h -@@ -46,6 +46,7 @@ - #define DEFAULT_ISSUE_DISCARDS 0 - #define DEFAULT_PV_MIN_SIZE_KB 2048 - #define DEFAULT_ALLOW_CHANGES_WITH_DUPLICATE_PVS 1 -+#define DEFAULT_ALLOW_MIXED_BLOCK_SIZES 1 - - #define DEFAULT_LOCKING_LIB "liblvm2clusterlock.so" - #define DEFAULT_ERROR_WHEN_FULL 0 diff --git a/SOURCES/lvm2-rhel7-add-lvm1-and-pool-back.patch b/SOURCES/lvm2-rhel7-add-lvm1-and-pool-back.patch index a16d52e..af41e37 100644 --- a/SOURCES/lvm2-rhel7-add-lvm1-and-pool-back.patch +++ b/SOURCES/lvm2-rhel7-add-lvm1-and-pool-back.patch @@ -91,7 +91,7 @@ create mode 100644 test/shell/snapshot-lvm1.sh diff --git a/configure b/configure -index f357462..2585e58 100755 +index ce37d03..f0ad5d1 100755 --- a/configure +++ b/configure @@ -675,6 +675,7 @@ PYTHON_BINDINGS @@ -406,7 +406,7 @@ index be2623a..a2adfe6 100644 #define LVMETAD_DISABLE_REASON_VGRESTORE "VGRESTORE" #define LVMETAD_DISABLE_REASON_REPAIR "REPAIR" diff --git a/daemons/lvmetad/lvmetad-core.c b/daemons/lvmetad/lvmetad-core.c -index 9ae359b..3e379fe 100644 +index 76aa62c..ce85631 100644 --- a/daemons/lvmetad/lvmetad-core.c +++ b/daemons/lvmetad/lvmetad-core.c @@ -200,12 +200,12 @@ struct vg_info { @@ -539,7 +539,7 @@ index 1d42235..241cf09 100644 locking endif diff --git a/lib/activate/activate.c b/lib/activate/activate.c -index 561a965..0790817 100644 +index fb6f545..c395d58 100644 --- a/lib/activate/activate.c +++ b/lib/activate/activate.c @@ -37,6 +37,19 @@ @@ -563,7 +563,7 @@ index 561a965..0790817 100644 struct dm_list *modules) { diff --git a/lib/activate/activate.h b/lib/activate/activate.h -index 524d2bf..43d26d1 100644 +index 7139276..a938cb4 100644 --- a/lib/activate/activate.h +++ b/lib/activate/activate.h @@ -91,6 +91,7 @@ int activation(void); @@ -575,7 +575,7 @@ index 524d2bf..43d26d1 100644 int module_present(struct cmd_context *cmd, const char *target_name); int target_present_version(struct cmd_context *cmd, const char *target_name, diff --git a/lib/cache/lvmcache.c b/lib/cache/lvmcache.c -index 9890325..0bec548 100644 +index c12ec2b..b6a02b0 100644 --- a/lib/cache/lvmcache.c +++ b/lib/cache/lvmcache.c @@ -22,6 +22,8 @@ @@ -587,7 +587,7 @@ index 9890325..0bec548 100644 #include "config.h" #include "lvmetad.h" -@@ -545,6 +547,8 @@ void lvmcache_drop_metadata(const char *vgname, int drop_precommitted) +@@ -547,6 +549,8 @@ void lvmcache_drop_metadata(const char *vgname, int drop_precommitted) /* For VG_ORPHANS, we need to invalidate all labels on orphan PVs. */ if (!strcmp(vgname, VG_ORPHANS)) { _drop_metadata(FMT_TEXT_ORPHAN_VG_NAME, 0); @@ -597,7 +597,7 @@ index 9890325..0bec548 100644 _drop_metadata(vgname, drop_precommitted); } diff --git a/lib/cache/lvmetad.c b/lib/cache/lvmetad.c -index d7e798d..1eda567 100644 +index 61ba53e..7a85c0f 100644 --- a/lib/cache/lvmetad.c +++ b/lib/cache/lvmetad.c @@ -38,6 +38,8 @@ static const char *_lvmetad_socket = NULL; @@ -609,7 +609,7 @@ index d7e798d..1eda567 100644 static struct volume_group *_lvmetad_pvscan_vg(struct cmd_context *cmd, struct volume_group *vg, const char *vgid, struct format_type *fmt); static uint64_t _monotonic_seconds(void) -@@ -2290,6 +2292,18 @@ int lvmetad_pvscan_single(struct cmd_context *cmd, struct device *dev, +@@ -2324,6 +2326,18 @@ int lvmetad_pvscan_single(struct cmd_context *cmd, struct device *dev, if (!baton.fid) goto_bad; @@ -628,7 +628,7 @@ index d7e798d..1eda567 100644 lvmcache_foreach_mda(info, _lvmetad_pvscan_single, &baton); if (!baton.vg) -@@ -2454,9 +2468,11 @@ int lvmetad_pvscan_all_devs(struct cmd_context *cmd, int do_wait) +@@ -2488,9 +2502,11 @@ int lvmetad_pvscan_all_devs(struct cmd_context *cmd, int do_wait, struct dm_list } /* @@ -642,7 +642,7 @@ index d7e798d..1eda567 100644 log_debug_lvmetad("Enabling lvmetad which was previously disabled."); lvmetad_clear_disabled(cmd); } -@@ -3104,6 +3120,9 @@ int lvmetad_is_disabled(struct cmd_context *cmd, const char **reason) +@@ -3138,6 +3154,9 @@ int lvmetad_is_disabled(struct cmd_context *cmd, const char **reason) } else if (strstr(reply_reason, LVMETAD_DISABLE_REASON_REPAIR)) { *reason = "a repair command was run"; @@ -708,10 +708,10 @@ index 95fb343..3db9bb3 100644 /* Load any formats in shared libs if not static */ if (!is_static() && diff --git a/lib/config/config_settings.h b/lib/config/config_settings.h -index 02e2b29..9904a62 100644 +index 114680c..87bd8ca 100644 --- a/lib/config/config_settings.h +++ b/lib/config/config_settings.h -@@ -780,14 +780,26 @@ cfg(global_activation_CFG, "activation", global_CFG_SECTION, 0, CFG_TYPE_BOOL, D +@@ -785,14 +785,26 @@ cfg(global_activation_CFG, "activation", global_CFG_SECTION, 0, CFG_TYPE_BOOL, D "is not present in the kernel, disabling this should suppress\n" "the error messages.\n") @@ -5457,7 +5457,7 @@ index 0000000..5812adb + +#endif diff --git a/lib/format_text/export.c b/lib/format_text/export.c -index 7866d56..e535237 100644 +index 3e294a1..ee699a7 100644 --- a/lib/format_text/export.c +++ b/lib/format_text/export.c @@ -467,6 +467,8 @@ static int _print_vg(struct formatter *f, struct volume_group *vg) @@ -5470,10 +5470,10 @@ index 7866d56..e535237 100644 if (vg->lock_type) { outf(f, "lock_type = \"%s\"", vg->lock_type); diff --git a/lib/format_text/format-text.c b/lib/format_text/format-text.c -index f39051c..4d0f6d5 100644 +index 026f93a..6f5d739 100644 --- a/lib/format_text/format-text.c +++ b/lib/format_text/format-text.c -@@ -2568,9 +2568,9 @@ struct format_type *create_text_format(struct cmd_context *cmd) +@@ -2565,9 +2565,9 @@ struct format_type *create_text_format(struct cmd_context *cmd) fmt->name = FMT_TEXT_NAME; fmt->alias = FMT_TEXT_ALIAS; fmt->orphan_vg_name = ORPHAN_VG_NAME(FMT_TEXT_NAME); @@ -5565,10 +5565,10 @@ index 2584227..2b53553 100644 case LCK_LV: /* All LV locks are non-blocking. */ diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c -index 76451ea..9af90f9 100644 +index c8acb6c..6db8575 100644 --- a/lib/metadata/lv_manip.c +++ b/lib/metadata/lv_manip.c -@@ -6011,6 +6011,8 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv, +@@ -6013,6 +6013,8 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv, force_t force, int suppress_remove_message) { struct volume_group *vg; @@ -5577,7 +5577,7 @@ index 76451ea..9af90f9 100644 int visible, historical; struct logical_volume *pool_lv = NULL; struct logical_volume *lock_lv = lv; -@@ -6163,6 +6165,10 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv, +@@ -6165,6 +6167,10 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv, } if (lv_is_cow(lv)) { @@ -5588,7 +5588,7 @@ index 76451ea..9af90f9 100644 log_verbose("Removing snapshot volume %s.", display_lvname(lv)); /* vg_remove_snapshot() will preload origin/former snapshots */ if (!vg_remove_snapshot(lv)) -@@ -6218,10 +6224,30 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv, +@@ -6220,10 +6226,30 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv, } } @@ -5620,7 +5620,7 @@ index 76451ea..9af90f9 100644 /* TODO: defer when multiple LVs relased at once */ if (pool_lv && !update_pool_lv(pool_lv, 1)) { diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h -index 75caba1..2245c29 100644 +index 9c24132..5674545 100644 --- a/lib/metadata/metadata-exported.h +++ b/lib/metadata/metadata-exported.h @@ -149,7 +149,7 @@ @@ -5652,10 +5652,10 @@ index 75caba1..2245c29 100644 #define MIRROR_BY_SEG 0x00000001U /* segment-by-segment mirror */ #define MIRROR_BY_LV 0x00000002U /* mirror using whole mimage LVs */ diff --git a/lib/metadata/metadata.c b/lib/metadata/metadata.c -index cb38f66..3620240 100644 +index 666ad78..d448fd9 100644 --- a/lib/metadata/metadata.c +++ b/lib/metadata/metadata.c -@@ -1011,6 +1011,8 @@ struct volume_group *vg_create(struct cmd_context *cmd, const char *vg_name) +@@ -1055,6 +1055,8 @@ struct volume_group *vg_create(struct cmd_context *cmd, const char *vg_name) vg->status = (RESIZEABLE_VG | LVM_READ | LVM_WRITE); vg->system_id = NULL; @@ -5664,7 +5664,7 @@ index cb38f66..3620240 100644 vg->extent_size = DEFAULT_EXTENT_SIZE * 2; vg->max_lv = DEFAULT_MAX_LV; -@@ -2969,7 +2971,7 @@ int vg_write(struct volume_group *vg) +@@ -3013,7 +3015,7 @@ int vg_write(struct volume_group *vg) return 0; } @@ -5673,7 +5673,7 @@ index cb38f66..3620240 100644 return_0; if (!vg_mda_used_count(vg)) { -@@ -5373,6 +5375,15 @@ int is_system_id_allowed(struct cmd_context *cmd, const char *system_id) +@@ -5419,6 +5421,15 @@ int is_system_id_allowed(struct cmd_context *cmd, const char *system_id) static int _access_vg_systemid(struct cmd_context *cmd, struct volume_group *vg) { /* @@ -5689,7 +5689,7 @@ index cb38f66..3620240 100644 * A few commands allow read-only access to foreign VGs. */ if (cmd->include_foreign_vgs) -@@ -5424,6 +5435,11 @@ static int _vg_access_permitted(struct cmd_context *cmd, struct volume_group *vg +@@ -5470,6 +5481,11 @@ static int _vg_access_permitted(struct cmd_context *cmd, struct volume_group *vg uint32_t lockd_state, uint32_t *failure) { if (!is_real_vg(vg->name)) { @@ -5715,10 +5715,10 @@ index 9c05836..309a246 100644 #define SEG_CANNOT_BE_ZEROED (1ULL << 6) #define SEG_MONITORED (1ULL << 7) diff --git a/lib/metadata/snapshot_manip.c b/lib/metadata/snapshot_manip.c -index aeb739c..156b4c8 100644 +index 95a3b2b..63e3361 100644 --- a/lib/metadata/snapshot_manip.c +++ b/lib/metadata/snapshot_manip.c -@@ -337,6 +337,17 @@ int vg_remove_snapshot(struct logical_volume *cow) +@@ -325,6 +325,17 @@ int vg_remove_snapshot(struct logical_volume *cow) cow->snapshot = NULL; lv_set_visible(cow); @@ -5993,7 +5993,7 @@ index c2fbac6..abe193c 100644 arg(name_ARG, 'n', "name", string_VAL, 0, 0, "#lvcreate\n" diff --git a/tools/lvconvert.c b/tools/lvconvert.c -index 8a07a84..e66f063 100644 +index 677bed1..d0a6b02 100644 --- a/tools/lvconvert.c +++ b/tools/lvconvert.c @@ -1816,6 +1816,11 @@ static int _lvconvert_splitsnapshot(struct cmd_context *cmd, struct logical_volu @@ -6009,7 +6009,7 @@ index 8a07a84..e66f063 100644 /* FIXME: we need to create a lock for the new LV. */ log_error("Unable to split snapshots in VG with lock_type %s.", vg->lock_type); diff --git a/tools/lvmcmdline.c b/tools/lvmcmdline.c -index 0840c65..f238b64 100644 +index 9c39f48..75a0401 100644 --- a/tools/lvmcmdline.c +++ b/tools/lvmcmdline.c @@ -19,9 +19,11 @@ @@ -6024,7 +6024,7 @@ index 0840c65..f238b64 100644 #include #include -@@ -2912,6 +2914,13 @@ int lvm_run_command(struct cmd_context *cmd, int argc, char **argv) +@@ -2914,6 +2916,13 @@ int lvm_run_command(struct cmd_context *cmd, int argc, char **argv) goto out; } @@ -6038,7 +6038,7 @@ index 0840c65..f238b64 100644 if (cmd->command->command_enum == lvconvert_repair_CMD) { log_warn("WARNING: Not using lvmetad because of repair."); lvmetad_make_unused(cmd); -@@ -2974,7 +2983,7 @@ int lvm_run_command(struct cmd_context *cmd, int argc, char **argv) +@@ -2976,7 +2985,7 @@ int lvm_run_command(struct cmd_context *cmd, int argc, char **argv) * by different token values.) * * lvmetad may have been previously disabled (or disabled during the @@ -6047,7 +6047,7 @@ index 0840c65..f238b64 100644 * In this case, disable the *use* of lvmetad by this command, reverting to * disk scanning. */ -@@ -3399,6 +3408,41 @@ static int _run_script(struct cmd_context *cmd, int argc, char **argv) +@@ -3401,6 +3410,41 @@ static int _run_script(struct cmd_context *cmd, int argc, char **argv) return ret; } @@ -6089,7 +6089,7 @@ index 0840c65..f238b64 100644 static void _nonroot_warning(void) { if (getuid() || geteuid()) -@@ -3488,6 +3532,19 @@ int lvm2_main(int argc, char **argv) +@@ -3490,6 +3534,19 @@ int lvm2_main(int argc, char **argv) } else run_name = dm_basename(argv[0]); @@ -6110,10 +6110,10 @@ index 0840c65..f238b64 100644 * Decide if we are running a shell or a command or a script. When * there is no run_name, it's a shell, when run_name is a recognized diff --git a/tools/pvscan.c b/tools/pvscan.c -index c21845c..e5afe0c 100644 +index 2e7a864..9e76f52 100644 --- a/tools/pvscan.c +++ b/tools/pvscan.c -@@ -748,7 +748,7 @@ out: +@@ -801,7 +801,7 @@ out: * display the PV info. * * iii. If lvmetad is being used, but has been disabled (because of @@ -6122,7 +6122,7 @@ index c21845c..e5afe0c 100644 * (because the device filter is different from the device filter last * used to populate lvmetad), then 'pvscan' will begin by rescanning * devices to repopulate lvmetad. If lvmetad is enabled after the -@@ -761,7 +761,8 @@ out: +@@ -814,7 +814,8 @@ out: * attempt to repopulate the lvmetad cache by rescanning all devs * (regardless of whether lvmetad was previously disabled or had an * unmatching token.) lvmetad may be enabled or disabled after the @@ -6145,7 +6145,7 @@ index 1d58387..f03e5d3 100644 } diff --git a/tools/toollib.c b/tools/toollib.c -index 1b01ccc..81953ee 100644 +index 0c1c095..a48eea0 100644 --- a/tools/toollib.c +++ b/tools/toollib.c @@ -14,6 +14,7 @@ @@ -6156,7 +6156,7 @@ index 1b01ccc..81953ee 100644 #include "format-text.h" #include -@@ -4168,6 +4169,7 @@ static int _process_duplicate_pvs(struct cmd_context *cmd, +@@ -4169,6 +4170,7 @@ static int _process_duplicate_pvs(struct cmd_context *cmd, .fid = &dummy_fid, .name = "", .system_id = (char *) "", @@ -6164,7 +6164,7 @@ index 1b01ccc..81953ee 100644 .pvs = DM_LIST_HEAD_INIT(dummy_vg.pvs), .lvs = DM_LIST_HEAD_INIT(dummy_vg.lvs), .historical_lvs = DM_LIST_HEAD_INIT(dummy_vg.historical_lvs), -@@ -4805,6 +4807,23 @@ int pvcreate_params_from_args(struct cmd_context *cmd, struct pvcreate_params *p +@@ -4806,6 +4808,23 @@ int pvcreate_params_from_args(struct cmd_context *cmd, struct pvcreate_params *p pp->pva.label_sector = arg_int64_value(cmd, labelsector_ARG, DEFAULT_LABELSECTOR); @@ -6188,7 +6188,7 @@ index 1b01ccc..81953ee 100644 if (arg_is_set(cmd, metadataignore_ARG)) pp->pva.metadataignore = arg_int_value(cmd, metadataignore_ARG, DEFAULT_PVMETADATAIGNORE); -@@ -5164,7 +5183,10 @@ static int _pvcreate_check_single(struct cmd_context *cmd, +@@ -5165,7 +5184,10 @@ static int _pvcreate_check_single(struct cmd_context *cmd, pd->is_orphan_pv = 1; } @@ -6200,7 +6200,7 @@ index 1b01ccc..81953ee 100644 } else { log_debug("Found pvcreate arg %s: device is not a PV.", pd->name); /* Device is not a PV. */ -@@ -5393,7 +5415,10 @@ static int _pvremove_check_single(struct cmd_context *cmd, +@@ -5394,7 +5416,10 @@ static int _pvremove_check_single(struct cmd_context *cmd, pd->is_orphan_pv = 1; } @@ -6345,7 +6345,7 @@ index ca9615c..8bdf8be 100644 &_vgconvert_single); } diff --git a/tools/vgscan.c b/tools/vgscan.c -index f9fa382..1ec9083 100644 +index a1ef264..7a63996 100644 --- a/tools/vgscan.c +++ b/tools/vgscan.c @@ -44,7 +44,7 @@ static int _vgscan_single(struct cmd_context *cmd, const char *vg_name, diff --git a/SOURCES/lvm2-rhel7.patch b/SOURCES/lvm2-rhel7.patch index b7df03d..0fd29af 100644 --- a/SOURCES/lvm2-rhel7.patch +++ b/SOURCES/lvm2-rhel7.patch @@ -3,16 +3,16 @@ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION -index 7f0e456..0c3bfac 100644 +index 79026e7..24945b2 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ --2.02.186(2) (2019-08-27) -+2.02.186(2)-RHEL7 (2019-08-27) +-2.02.187(2) (2020-03-24) ++2.02.187(2)-RHEL7 (2020-03-24) diff --git a/VERSION_DM b/VERSION_DM -index 864ceb8..5093e13 100644 +index a8e0ff9..4ffb776 100644 --- a/VERSION_DM +++ b/VERSION_DM @@ -1 +1 @@ --1.02.164 (2019-08-27) -+1.02.164-RHEL7 (2019-08-27) +-1.02.170 (2020-03-24) ++1.02.170-RHEL7 (2020-03-24) diff --git a/SOURCES/lvm2-set-default-preferred_names.patch b/SOURCES/lvm2-set-default-preferred_names.patch index 205dd10..3f65953 100644 --- a/SOURCES/lvm2-set-default-preferred_names.patch +++ b/SOURCES/lvm2-set-default-preferred_names.patch @@ -16,7 +16,7 @@ index 38855e9..7ccf928 100644 # Configuration option devices/filter. # Limit the block devices that are used by LVM commands. diff --git a/lib/config/config_settings.h b/lib/config/config_settings.h -index 0e81252..02e2b29 100644 +index 429bff1..114680c 100644 --- a/lib/config/config_settings.h +++ b/lib/config/config_settings.h @@ -255,7 +255,7 @@ cfg(devices_external_device_info_source_CFG, "external_device_info_source", devi diff --git a/SPECS/lvm2.spec b/SPECS/lvm2.spec index b114b2a..51b71cb 100644 --- a/SPECS/lvm2.spec +++ b/SPECS/lvm2.spec @@ -1,8 +1,9 @@ -%global device_mapper_version 1.02.164 +%global device_mapper_version 1.02.170 %global enable_cache 1 %global enable_cluster 1 %global enable_cmirror 1 +%global enable_standalone 0 %global enable_lvmetad 1 %global enable_lvmlockd 1 %global enable_lvmpolld 1 @@ -20,14 +21,14 @@ %global libselinux_version 1.30.19-4 %global persistent_data_version 0.7.0-0.1.rc6 %global sanlock_version 3.3.0-1 -%global boom_version 0.9 +%global boom_version 1.1 %global enable_lockd_sanlock %{enable_lvmlockd} %global enable_lockd_dlm %{enable_lvmlockd} %global boom_pkgname lvm2-python-boom -%global boom_version 0.9 -%global boom_release 25 +%global boom_version 1.2 +%global boom_release 2 %global boom_summary A set of libraries and tools for managing boot loader entries %global boom_dir boom-%{boom_version} @@ -66,13 +67,13 @@ Summary: Userland logical volume management tools Name: lvm2 Epoch: 7 -Version: 2.02.186 -Release: 7%{?dist}%{?scratch}.2 +Version: 2.02.187 +Release: 6%{?dist}%{?scratch} License: GPLv2 Group: System Environment/Base URL: http://sources.redhat.com/lvm2 Source0: ftp://sources.redhat.com/pub/lvm2/releases/LVM2.%{version}.tgz -Source1: https://github.com/bmr-cymru/boom/archive/%{boom_version}/boom-%{boom_version}.tar.gz +Source1: https://github.com/snapshotmanager/boom/archive/%{version}/boom-%{boom_version}.tar.gz Patch0: lvm2-rhel7.patch Patch1: lvm2-set-default-preferred_names.patch Patch2: lvm2-fix-libdm-versioning-for-dm_tree_node_size_changed-symbol.patch @@ -81,47 +82,17 @@ Patch4: lvm2-default-allow-changes-with-duplicate-pvs.patch #Patch5: lvm2-rhel7-fix-StartLimitInterval.patch Patch6: lvm2-rhel7-add-lvm1-and-pool-back.patch Patch7: lvm2-make-generate.patch -Patch8: lvm2-2_02_187-cov-Fix-memory-leak.patch -# BZ 1669751: -Patch9: lvm2-2_02_187-vgcreate-vgextend-restrict-PVs-with-mixed-block-size.patch -Patch10: lvm2-2_02_187-WHATS_NEW-vgcreate-vgextend-logical-block-size.patch -# BZ 1622813: -Patch11: lvm2-2_02_187-pvmove-check-if-participating-LV-is-already-exlcusiv.patch -# BZ 1697823: -Patch12: lvm2-2_02_187-lvconvert-improve-validation-thin-and-cache-pool-con.patch -Patch13: lvm2-2_02_187-thin-activate-layer-pool-aas-read-only-LV.patch -# BZ 1669751: -Patch14: lvm2-rhel7-Change-allow_mixed_block_sizes-default-and-vsn.patch -# BZ 1763795: -Patch15: lvm2-2_02_187-lvmetad-fix-timeout-on-shutdown.patch -# BZ 1647567: -Patch16: lvm2-2_02_187-dmsetup-do-not-treat-no-groups-as-an-error-in-dmstat.patch -# BZ 1622813: -Patch17: lvm2-2_02_187-pvmove-detect-exclusively-active-LVs.patch -# BZ 1753713: -Patch18: lvm2-2_02_187-snapshot-correctly-check-device-id-of-merged-thin.patch -Patch19: lvm2-2_02_187-snapshot-fix-checking-of-merged-thin-volume.patch -Patch20: lvm2-2_02_187-snapshot-use-single-merging-sequence.patch -# BZ 1642162: -Patch21: lvm2-2_02_187-mirror-fix-leg-splitting.patch -Patch22: lvm2-2_02_187-mirror-directly-activate-updated-mirror.patch -# BZ 1722666: -Patch23: lvm2-2_02_187-lvextend-fix-resizing-volumes-of-different-segtype.patch -# BZ 1784695: -Patch24: lvm2-2_02_187-raid-disallow-reshape-of-stacked-LVs.patch -# BZ 1787071: -Patch25: lvm2-2_02_187-bcache-Fix-overwriting-headers-on-incorrect-device.patch -# BZ 1784695: -Patch26: lvm2-2_02_187-raid-more-limitted-prohibition-of-stacked-raid-usage.patch -Patch27: lvm2-2_02_187-raid-better-place-for-blocking-reshapes.patch -# BZ 1812441: -Patch28: lvm2-2_02_187-pvs-fix-locking_type-4.patch -# BZ 1814004: -Patch29: lvm2-2_02_187-pvscan-fix-activation-of-incomplete-VGs.patch -Patch30: lvm2-2_02_187-Fix-rounding-writes-up-to-sector-size.patch -Patch31: lvm2-2_02_187-bcache-Fix-memory-leak-in-error-path.patch -# BZ 1822539: -Patch32: lvm2-2_02_188-lvconvert-no-validation-for-thin-pools-not-used-by-lvm.patch +# BZ 1806798: +Patch8: lvm2-2_02_188-lvconvert-no-validation-for-thin-pools-not-used-by-lvm.patch +# BZ 1821970: +Patch9: lvm2-2_02_188-blkdeactivate-add-support-for-VDO-in-blkdeactivate-script.patch +# BZ 1830904: +Patch10: lvm2-make-generate-2.patch +# BZ 1842600: +Patch11: lvm2-2_02_188-scanning-optimize-by-checking-text-offset-and-checks.patch +# BZ 1777364: +Patch50: boom-etc-Remove-executable-permission-from-etc-default-bo.patch +Patch51: boom-man-Fix-line-starting-with.patch BuildRequires: libselinux-devel >= %{libselinux_version}, libsepol-devel BuildRequires: libblkid-devel >= %{util_linux_version} @@ -171,6 +142,8 @@ or more physical volumes and creating one or more logical volumes %prep %if %{enable_boom} %setup -q -b 1 -n %{boom_dir} +%patch50 -p1 +%patch51 -p1 %endif %setup -q -n LVM2.%{version} @@ -182,31 +155,10 @@ or more physical volumes and creating one or more logical volumes #%%patch5 -p1 -b .startlimitinterval %patch6 -p1 -b .add_lvm1_and_pool %patch7 -p1 -b .generate -%patch8 -p1 -b .cov_Fix_memory_leak -%patch9 -p1 -b .vgcreate_vgextend_restrict_PVs_with_mixed_block_size -%patch10 -p1 -b .WHATS_NEW_vgcreate_vgextend_logical_block_size -%patch11 -p1 -b .pvmove_check_if_participating_LV_is_already_exlcusiv -%patch12 -p1 -b .lvconvert_improve_validation_thin_and_cache_pool_con -%patch13 -p1 -b .thin_activate_layer_pool_aas_read_only_LV -%patch14 -p1 -b .change_allow_mixed_block_sizes_default_and_vsn -%patch15 -p1 -b .lvmetad_fix_timeout_on_shutdown -%patch16 -p1 -b .dmsetup_do_not_treat_no_groups_as_an_error_in_dmstat -%patch17 -p1 -b .pvmove_detect_exclusively_active_LVs -%patch18 -p1 -b .snapshot_correctly_check_device_id_of_merged_thin -%patch19 -p1 -b .snapshot_fix_checking_of_merged_thin_volume -%patch20 -p1 -b .snapshot_use_single_merging_sequence -%patch21 -p1 -b .mirror_fix_leg_splitting -%patch22 -p1 -b .mirror_directly_activate_updated_mirror -%patch23 -p1 -b .fix_resizing_volumes_of_different_segtype -%patch24 -p1 -b .raid_disallow_reshape_of_stacked_LVs -%patch25 -p1 -b .bcache_Fix_overwriting_headers_on_incorrect_device -%patch26 -p1 -b .raid_more_limitted_prohibition_of_stacked_raid_usage -%patch27 -p1 -b .raid_better_place_for_blocking_reshapes -%patch28 -p1 -b .pvs_fix_locking_type_4 -%patch29 -p1 -b .pvscan_fix_activation_of_incomplete_VGs -%patch30 -p1 -b .Fix_rounding_writes_up_to_sector_size -%patch31 -p1 -b .bcache_Fix_memory_leak_in_error_path -%patch32 -p1 -b .lvconvert_no_validation_for_thin_pools_not_used_by_lvm +%patch8 -p1 -b .lvconvert_no_validation_for_thin_pools_not_used_by_lvm +%patch9 -p1 -b .add_support_for_VDO_in_blkdeactivate_script +%patch10 -p1 -b .generate2 +%patch11 -p1 -b .scanning_optimize_by_checking_text_offset_and_checks %build %global _default_pid_dir /run @@ -270,6 +222,17 @@ make install_systemd_units DESTDIR=$RPM_BUILD_ROOT V=1 make install_systemd_generators DESTDIR=$RPM_BUILD_ROOT V=1 make install_tmpfiles_configuration DESTDIR=$RPM_BUILD_ROOT V=1 +%if ! %{enable_standalone} +%if %{enable_cluster} +rm ${RPM_BUILD_ROOT}/%{_prefix}/lib/systemd/lvm2-cluster-activation +rm ${RPM_BUILD_ROOT}/%{_unitdir}/lvm2-cluster-activation.service +rm ${RPM_BUILD_ROOT}/%{_unitdir}/lvm2-clvmd.service +%if %{enable_cmirror} +rm ${RPM_BUILD_ROOT}/%{_unitdir}/lvm2-cmirrord.service +%endif +%endif +%endif + %if %{enable_boom} ( cd ../%{boom_dir} @@ -283,11 +246,16 @@ install -m 644 etc/default/boom ${RPM_BUILD_ROOT}/etc/default # Make configuration directories install -d -m 700 ${RPM_BUILD_ROOT}/boot/boom/profiles +install -d -m 700 ${RPM_BUILD_ROOT}/boot/boom/hosts install -d -m 700 ${RPM_BUILD_ROOT}/boot/loader/entries +install -d -m 700 ${RPM_BUILD_ROOT}/boot/boom/cache +install -m 644 examples/boom.conf ${RPM_BUILD_ROOT}/boot/boom install -m 644 examples/profiles/*.profile ${RPM_BUILD_ROOT}/boot/boom/profiles -install -d -m 755 ${RPM_BUILD_ROOT}/%{_mandir}/man8 +mkdir -p ${RPM_BUILD_ROOT}/%{_mandir}/man8 +mkdir -p ${RPM_BUILD_ROOT}/%{_mandir}/man5 install -m 644 man/man8/boom.8 ${RPM_BUILD_ROOT}/%{_mandir}/man8 +install -m 644 man/man5/boom.5 ${RPM_BUILD_ROOT}/%{_mandir}/man5 ) %endif @@ -666,6 +634,7 @@ fi %{_sbindir}/clvmd %attr(444, -, -) %{_mandir}/man8/clvmd.8.gz +%if %{enable_standalone} ############################################################################## # Cluster-standalone subpackage ############################################################################## @@ -696,14 +665,12 @@ involvement (e.g. pacemaker). %defattr(444,root,root,-) %{_unitdir}/lvm2-clvmd.service %{_unitdir}/lvm2-cluster-activation.service - %endif ############################################################################### # Cluster mirror subpackage # The 'clvm' OCF script to manage cmirrord instance is part of resource-agents. ############################################################################### -%if %{enable_cluster} %if %{enable_cmirror} %package -n cmirror @@ -721,6 +688,7 @@ Daemon providing device-mapper-based mirrors in a shared-storage cluster. %{_sbindir}/cmirrord %attr(444, -, -) %{_mandir}/man8/cmirrord.8.gz +%if %{enable_standalone} ############################################################################## # Cmirror-standalone subpackage ############################################################################## @@ -748,9 +716,9 @@ involvement (e.g. pacemaker). %files -n cmirror-standalone %defattr(444,root,root,-) %{_unitdir}/lvm2-cmirrord.service - -%endif -%endif +%endif # enable_standalone +%endif # enable_cmirror +%endif # enable_cluster ############################################################################## # Legacy SysV init subpackage @@ -954,7 +922,7 @@ the device-mapper event library. %package -n %{boom_pkgname} Summary: %{boom_summary} Version: %{boom_version} -Release: %{boom_release}%{?dist}%{?scratch}.2 +Release: %{boom_release}%{?dist}%{?scratch} License: GPLv2 Group: System Environment/Base BuildArch: noarch @@ -974,20 +942,45 @@ This package provides the python2 version of boom. %files -n %{boom_pkgname} %license COPYING %{_mandir}/man8/boom.* +%{_mandir}/man5/boom.* %{python2_sitelib}/boom* %{_bindir}/boom /etc/grub.d/42_boom %config(noreplace) /etc/default/boom -/boot/* +%dir /boot/boom +%config(noreplace) /boot/boom/boom.conf +%dir /boot/boom/profiles +%config(noreplace) /boot/boom/profiles/* +%dir /boot/boom/hosts +%dir /boot/boom/cache +%dir /boot/loader/entries %doc ../%{boom_dir}/README.md %doc ../%{boom_dir}/examples/ %doc ../%{boom_dir}/tests/ %endif %changelog -* Thu Apr 16 2020 Marian Csontos - 7:2.02.186-7.el7_8.2 +* Mon Jun 07 2020 Marian Csontos - 7:2.02.187-6 +- Fix pvs/lvs/vgs failing due empty VG spotted in metadata when under load. + +* Sun Jun 07 2020 Marian Csontos - 7:2.02.187-5 +- Update boom to bug fix release 1.2. + +* Tue May 26 2020 Marian Csontos - 7:2.02.187-4 +- boom: Fix traceback in error path. +- boom: Improve error path handling with --debug. + +* Tue May 19 2020 Marian Csontos - 7:2.02.187-3 +- Add allow_mixed_block_sizes into lvm.conf. +- Update boom to version 1.1. + +* Thu Apr 16 2020 Marian Csontos - 7:2.02.187-2 - No validation for thin pools not used by lvm, -- Fix activation of incomplete VGs. +- Add support for VDO in blkdeactivate script. + +* Tue Mar 24 2020 Marian Csontos - 7:2.02.187-1 +- Bug fix release. +- See WHATS_NEW file for details. * Thu Mar 12 2020 Marian Csontos - 7:2.02.186-7.el7_8.1 - Fix failing pvs with locking_type 4.