Blob Blame History Raw
 Makefile.in                            |   18 +-
 base/Makefile                          |   38 +
 base/data-struct/radix-tree-adaptive.c | 1297 +++++++++++++++++++++++++
 base/data-struct/radix-tree-simple.c   |  256 +++++
 base/data-struct/radix-tree.c          |  851 +---------------
 base/data-struct/radix-tree.h          |    6 +
 lib/device/bcache.c                    |  384 ++++----
 lib/device/bcache.h                    |    8 +-
 lib/label/label.c                      |   42 +-
 make.tmpl.in                           |   12 +-
 test/unit/bcache_t.c                   |   98 +-
 test/unit/bcache_utils_t.c             |    3 +-
 test/unit/radix_tree_t.c               |  399 +++++++-
 test/unit/rt_case1.c                   | 1669 ++++++++++++++++++++++++++++++++
 test/unit/unit-test.sh                 |    2 -
 15 files changed, 3993 insertions(+), 1090 deletions(-)
 create mode 100644 base/Makefile
 create mode 100644 base/data-struct/radix-tree-adaptive.c
 create mode 100644 base/data-struct/radix-tree-simple.c
 create mode 100644 test/unit/rt_case1.c

diff --git a/Makefile.in b/Makefile.in
index 29d5bed..3c8f8c8 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -51,18 +51,20 @@ DISTCLEAN_TARGETS += config.cache config.log config.status make.tmpl
 
 include make.tmpl
 
-libdm: include
-libdaemon: include
-lib: libdm libdaemon
-liblvm: lib
-daemons: lib libdaemon tools
-tools: lib libdaemon device-mapper
+include $(top_srcdir)/base/Makefile
+
+libdm: include $(top_builddir)/base/libbase.a
+libdaemon: include $(top_builddir)/base/libbase.a
+lib: libdm libdaemon $(top_builddir)/base/libbase.a
+liblvm: lib $(top_builddir)/base/libbase.a
+daemons: lib libdaemon tools $(top_builddir)/base/libbase.a
+tools: lib libdaemon device-mapper $(top_builddir)/base/libbase.a
 po: tools daemons
 man: tools
 all_man: tools
 scripts: liblvm libdm
-test: tools daemons
-unit-test: lib
+test: tools daemons $(top_builddir)/base/libbase.a
+unit-test: lib $(top_builddir)/base/libbase.a
 run-unit-test: unit-test
 
 lib.device-mapper: include.device-mapper
diff --git a/base/Makefile b/base/Makefile
new file mode 100644
index 0000000..056ea59
--- /dev/null
+++ b/base/Makefile
@@ -0,0 +1,38 @@
+# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
+#
+# This file is part of the device-mapper userspace tools.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU Lesser General Public License v.2.1.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# Uncomment this to build the simple radix tree.  You'll need to make clean too.
+# Comment to build the advanced radix tree.
+#base/data-struct/radix-tree.o: CFLAGS += -DSIMPLE_RADIX_TREE
+
+# NOTE: this Makefile only works as 'include' for toplevel Makefile
+#       which defined all top_* variables
+
+BASE_SOURCE=\
+	base/data-struct/radix-tree.c
+
+BASE_TARGET = base/libbase.a
+BASE_DEPENDS = $(BASE_SOURCE:%.c=%.d)
+BASE_OBJECTS = $(BASE_SOURCE:%.c=%.o)
+CLEAN_TARGETS += $(BASE_DEPENDS) $(BASE_OBJECTS) \
+	$(BASE_SOURCE:%.c=%.gcda) \
+	$(BASE_SOURCE:%.c=%.gcno) \
+	$(BASE_TARGET)
+
+$(BASE_TARGET): $(BASE_OBJECTS)
+	@echo "    [AR] $@"
+	$(Q) $(RM) $@
+	$(Q) $(AR) rsv $@ $(BASE_OBJECTS) > /dev/null
+
+ifeq ("$(DEPENDS)","yes")
+-include $(BASE_DEPENDS)
+endif
diff --git a/base/data-struct/radix-tree-adaptive.c b/base/data-struct/radix-tree-adaptive.c
new file mode 100644
index 0000000..b9ba417
--- /dev/null
+++ b/base/data-struct/radix-tree-adaptive.c
@@ -0,0 +1,1297 @@
+// Copyright (C) 2018 Red Hat, Inc. All rights reserved.
+// 
+// This file is part of LVM2.
+//
+// This copyrighted material is made available to anyone wishing to use,
+// modify, copy, or redistribute it subject to the terms and conditions
+// of the GNU Lesser General Public License v.2.1.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with this program; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+#include "radix-tree.h"
+
+#include "base/memory/container_of.h"
+#include "base/memory/zalloc.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+//----------------------------------------------------------------
+
+enum node_type {
+	UNSET = 0,
+	VALUE,
+	VALUE_CHAIN,
+	PREFIX_CHAIN,
+	NODE4,
+	NODE16,
+	NODE48,
+	NODE256
+};
+
+struct value {
+	enum node_type type;
+	union radix_value value;
+};
+
+// This is used for entries that have a key which is a prefix of another key.
+struct value_chain {
+	union radix_value value;
+	struct value child;
+};
+
+struct prefix_chain {
+	struct value child;
+	unsigned len;
+	uint8_t prefix[0];
+};
+
+struct node4 {
+	uint32_t nr_entries;
+	uint8_t keys[4];
+	struct value values[4];
+};
+
+struct node16 {
+	uint32_t nr_entries;
+	uint8_t keys[16];
+	struct value values[16];
+};
+
+struct node48 {
+	uint32_t nr_entries;
+	uint8_t keys[256];
+	struct value values[48];
+};
+
+struct node256 {
+        uint32_t nr_entries;
+	struct value values[256];
+};
+
+struct radix_tree {
+	unsigned nr_entries;
+	struct value root;
+	radix_value_dtr dtr;
+	void *dtr_context;
+};
+
+//----------------------------------------------------------------
+
+struct radix_tree *radix_tree_create(radix_value_dtr dtr, void *dtr_context)
+{
+	struct radix_tree *rt = malloc(sizeof(*rt));
+
+	if (rt) {
+		rt->nr_entries = 0;
+		rt->root.type = UNSET;
+		rt->dtr = dtr;
+		rt->dtr_context = dtr_context;
+	}
+
+	return rt;
+}
+
+static inline void _dtr(struct radix_tree *rt, union radix_value v)
+{
+	if (rt->dtr)
+        	rt->dtr(rt->dtr_context, v);
+}
+
+// Returns the number of values removed
+static unsigned _free_node(struct radix_tree *rt, struct value v)
+{
+	unsigned i, nr = 0;
+	struct value_chain *vc;
+	struct prefix_chain *pc;
+	struct node4 *n4;
+	struct node16 *n16;
+	struct node48 *n48;
+	struct node256 *n256;
+
+	switch (v.type) {
+	case UNSET:
+		break;
+
+	case VALUE:
+        	_dtr(rt, v.value);
+        	nr = 1;
+		break;
+
+	case VALUE_CHAIN:
+		vc = v.value.ptr;
+		_dtr(rt, vc->value);
+		nr = 1 + _free_node(rt, vc->child);
+		free(vc);
+		break;
+
+	case PREFIX_CHAIN:
+		pc = v.value.ptr;
+		nr = _free_node(rt, pc->child);
+		free(pc);
+		break;
+
+	case NODE4:
+		n4 = (struct node4 *) v.value.ptr;
+		for (i = 0; i < n4->nr_entries; i++)
+			nr += _free_node(rt, n4->values[i]);
+		free(n4);
+		break;
+
+	case NODE16:
+		n16 = (struct node16 *) v.value.ptr;
+		for (i = 0; i < n16->nr_entries; i++)
+			nr += _free_node(rt, n16->values[i]);
+		free(n16);
+		break;
+
+	case NODE48:
+		n48 = (struct node48 *) v.value.ptr;
+		for (i = 0; i < n48->nr_entries; i++)
+			nr += _free_node(rt, n48->values[i]);
+		free(n48);
+		break;
+
+	case NODE256:
+		n256 = (struct node256 *) v.value.ptr;
+		for (i = 0; i < 256; i++)
+			nr += _free_node(rt, n256->values[i]);
+		free(n256);
+		break;
+	}
+
+	return nr;
+}
+
+void radix_tree_destroy(struct radix_tree *rt)
+{
+	_free_node(rt, rt->root);
+	free(rt);
+}
+
+unsigned radix_tree_size(struct radix_tree *rt)
+{
+	return rt->nr_entries;
+}
+
+static bool _insert(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv);
+
+static bool _insert_unset(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
+{
+	unsigned len = ke - kb;
+
+	if (!len) {
+		// value
+		v->type = VALUE;
+		v->value = rv;
+		rt->nr_entries++;
+	} else {
+		// prefix -> value
+		struct prefix_chain *pc = zalloc(sizeof(*pc) + len);
+		if (!pc)
+			return false;
+
+		pc->child.type = VALUE;
+		pc->child.value = rv;
+		pc->len = len;
+		memcpy(pc->prefix, kb, len);
+		v->type = PREFIX_CHAIN;
+		v->value.ptr = pc;
+		rt->nr_entries++;
+	}
+
+	return true;
+}
+
+static bool _insert_value(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
+{
+	unsigned len = ke - kb;
+
+	if (!len)
+		// overwrite
+		v->value = rv;
+
+	else {
+		// value_chain -> value
+		struct value_chain *vc = zalloc(sizeof(*vc));
+		if (!vc)
+			return false;
+
+		vc->value = v->value;
+		if (!_insert(rt, &vc->child, kb, ke, rv)) {
+			free(vc);
+			return false;
+		}
+
+		v->type = VALUE_CHAIN;
+		v->value.ptr = vc;
+	}
+
+	return true;
+}
+
+static bool _insert_value_chain(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
+{
+	struct value_chain *vc = v->value.ptr;
+	return _insert(rt, &vc->child, kb, ke, rv);
+}
+
+static unsigned min(unsigned lhs, unsigned rhs)
+{
+	if (lhs <= rhs)
+		return lhs;
+	else
+		return rhs;
+}
+
+static bool _insert_prefix_chain(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
+{
+	struct prefix_chain *pc = v->value.ptr;
+
+	if (!pc->len) {
+		v->type = VALUE;
+		v->value = rv;
+
+	} else if (*kb == pc->prefix[0]) {
+		// There's a common prefix let's split the chain into two and
+		// recurse.
+		struct prefix_chain *pc2;
+		unsigned i, len = min(pc->len, ke - kb);
+
+		for (i = 0; i < len; i++)
+			if (kb[i] != pc->prefix[i])
+				break;
+
+		if (!(pc2 = zalloc(sizeof(*pc2) + pc->len - i)))
+			return false;
+		pc2->len = pc->len - i;
+		memmove(pc2->prefix, pc->prefix + i, pc2->len);
+		pc2->child = pc->child;
+
+		// FIXME: this trashes pc so we can't back out
+		pc->child.type = PREFIX_CHAIN;
+		pc->child.value.ptr = pc2;
+		pc->len = i;
+
+		if (!_insert(rt, &pc->child, kb + i, ke, rv)) {
+			free(pc2);
+			return false;
+		}
+
+	} else {
+		// Stick an n4 in front.
+		struct node4 *n4 = zalloc(sizeof(*n4));
+		if (!n4)
+			return false;
+
+		n4->keys[0] = pc->prefix[0];
+		if (pc->len == 1) {
+			n4->values[0] = pc->child;
+			free(pc);
+		} else {
+			memmove(pc->prefix, pc->prefix + 1, pc->len - 1);
+			pc->len--;
+			n4->values[0] = *v;
+		}
+
+		n4->keys[1] = *kb;
+		if (!_insert(rt, n4->values + 1, kb + 1, ke, rv)) {
+			free(n4);
+			return false;
+		}
+
+		n4->nr_entries = 2;
+
+		v->type = NODE4;
+		v->value.ptr = n4;
+	}
+
+	return true;
+}
+
+static bool _insert_node4(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
+{
+	struct node4 *n4 = v->value.ptr;
+	if (n4->nr_entries == 4) {
+		struct node16 *n16 = zalloc(sizeof(*n16));
+		if (!n16)
+			return false;
+
+		n16->nr_entries = 5;
+		memcpy(n16->keys, n4->keys, sizeof(n4->keys));
+		memcpy(n16->values, n4->values, sizeof(n4->values));
+
+		n16->keys[4] = *kb;
+		if (!_insert(rt, n16->values + 4, kb + 1, ke, rv)) {
+			free(n16);
+			return false;
+		}
+		free(n4);
+		v->type = NODE16;
+		v->value.ptr = n16;
+	} else {
+		if (!_insert(rt, n4->values + n4->nr_entries, kb + 1, ke, rv))
+			return false;
+
+		n4->keys[n4->nr_entries] = *kb;
+		n4->nr_entries++;
+	}
+	return true;
+}
+
+static bool _insert_node16(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
+{
+	struct node16 *n16 = v->value.ptr;
+
+	if (n16->nr_entries == 16) {
+		unsigned i;
+		struct node48 *n48 = zalloc(sizeof(*n48));
+
+		if (!n48)
+			return false;
+
+		n48->nr_entries = 17;
+		/* coverity[bad_memset] intentional use of '0' */
+		memset(n48->keys, 48, sizeof(n48->keys));
+
+		for (i = 0; i < 16; i++) {
+			n48->keys[n16->keys[i]] = i;
+			n48->values[i] = n16->values[i];
+		}
+
+		n48->keys[*kb] = 16;
+		if (!_insert(rt, n48->values + 16, kb + 1, ke, rv)) {
+			free(n48);
+			return false;
+		}
+
+		free(n16);
+		v->type = NODE48;
+		v->value.ptr = n48;
+	} else {
+		if (!_insert(rt, n16->values + n16->nr_entries, kb + 1, ke, rv))
+			return false;
+		n16->keys[n16->nr_entries] = *kb;
+		n16->nr_entries++;
+	}
+
+	return true;
+}
+
+static bool _insert_node48(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
+{
+	struct node48 *n48 = v->value.ptr;
+	if (n48->nr_entries == 48) {
+		unsigned i;
+		struct node256 *n256 = zalloc(sizeof(*n256));
+		if (!n256)
+			return false;
+
+		n256->nr_entries = 49;
+		for (i = 0; i < 256; i++) {
+			if (n48->keys[i] < 48)
+				n256->values[i] = n48->values[n48->keys[i]];
+		}
+
+		if (!_insert(rt, n256->values + *kb, kb + 1, ke, rv)) {
+			free(n256);
+			return false;
+		}
+
+		free(n48);
+		v->type = NODE256;
+		v->value.ptr = n256;
+
+	} else {
+		if (!_insert(rt, n48->values + n48->nr_entries, kb + 1, ke, rv))
+			return false;
+
+		n48->keys[*kb] = n48->nr_entries;
+		n48->nr_entries++;
+	}
+
+	return true;
+}
+
+static bool _insert_node256(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
+{
+	struct node256 *n256 = v->value.ptr;
+	bool r, was_unset = n256->values[*kb].type == UNSET;
+
+	r = _insert(rt, n256->values + *kb, kb + 1, ke, rv);
+	if (r && was_unset)
+        	n256->nr_entries++;
+
+	return r;
+}
+
+// FIXME: the tree should not be touched if insert fails (eg, OOM)
+static bool _insert(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
+{
+	if (kb == ke) {
+		if (v->type == UNSET) {
+			v->type = VALUE;
+			v->value = rv;
+			rt->nr_entries++;
+
+		} else if (v->type == VALUE) {
+			v->value = rv;
+
+		} else {
+			struct value_chain *vc = zalloc(sizeof(*vc));
+			if (!vc)
+				return false;
+
+			vc->value = rv;
+			vc->child = *v;
+			v->type = VALUE_CHAIN;
+			v->value.ptr = vc;
+			rt->nr_entries++;
+		}
+		return true;
+	}
+
+	switch (v->type) {
+	case UNSET:
+		return _insert_unset(rt, v, kb, ke, rv);
+
+	case VALUE:
+		return _insert_value(rt, v, kb, ke, rv);
+
+	case VALUE_CHAIN:
+		return _insert_value_chain(rt, v, kb, ke, rv);
+
+	case PREFIX_CHAIN:
+		return _insert_prefix_chain(rt, v, kb, ke, rv);
+
+	case NODE4:
+		return _insert_node4(rt, v, kb, ke, rv);
+
+	case NODE16:
+		return _insert_node16(rt, v, kb, ke, rv);
+
+	case NODE48:
+		return _insert_node48(rt, v, kb, ke, rv);
+
+	case NODE256:
+		return _insert_node256(rt, v, kb, ke, rv);
+	}
+
+	// can't get here
+	return false;
+}
+
+struct lookup_result {
+	struct value *v;
+	uint8_t *kb;
+};
+
+static struct lookup_result _lookup_prefix(struct value *v, uint8_t *kb, uint8_t *ke)
+{
+	unsigned i;
+	struct value_chain *vc;
+	struct prefix_chain *pc;
+	struct node4 *n4;
+	struct node16 *n16;
+	struct node48 *n48;
+	struct node256 *n256;
+
+	if (kb == ke)
+		return (struct lookup_result) {.v = v, .kb = kb};
+
+	switch (v->type) {
+	case UNSET:
+	case VALUE:
+		break;
+
+	case VALUE_CHAIN:
+		vc = v->value.ptr;
+		return _lookup_prefix(&vc->child, kb, ke);
+
+	case PREFIX_CHAIN:
+		pc = v->value.ptr;
+		if (ke - kb < pc->len)
+			return (struct lookup_result) {.v = v, .kb = kb};
+
+		for (i = 0; i < pc->len; i++)
+			if (kb[i] != pc->prefix[i])
+				return (struct lookup_result) {.v = v, .kb = kb};
+
+		return _lookup_prefix(&pc->child, kb + pc->len, ke);
+
+	case NODE4:
+		n4 = v->value.ptr;
+		for (i = 0; i < n4->nr_entries; i++)
+			if (n4->keys[i] == *kb)
+				return _lookup_prefix(n4->values + i, kb + 1, ke);
+		break;
+
+	case NODE16:
+		// FIXME: use binary search or simd?
+		n16 = v->value.ptr;
+		for (i = 0; i < n16->nr_entries; i++)
+			if (n16->keys[i] == *kb)
+				return _lookup_prefix(n16->values + i, kb + 1, ke);
+		break;
+
+	case NODE48:
+		n48 = v->value.ptr;
+		i = n48->keys[*kb];
+		if (i < 48)
+			return _lookup_prefix(n48->values + i, kb + 1, ke);
+		break;
+
+	case NODE256:
+		n256 = v->value.ptr;
+		if (n256->values[*kb].type != UNSET)
+			return _lookup_prefix(n256->values + *kb, kb + 1, ke);
+		break;
+	}
+
+	return (struct lookup_result) {.v = v, .kb = kb};
+}
+
+bool radix_tree_insert(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, union radix_value rv)
+{
+	struct lookup_result lr = _lookup_prefix(&rt->root, kb, ke);
+	return _insert(rt, lr.v, lr.kb, ke, rv);
+}
+
+// Note the degrade functions also free the original node.
+static void _degrade_to_n4(struct node16 *n16, struct value *result)
+{
+        struct node4 *n4 = zalloc(sizeof(*n4));
+
+	assert(n4 != NULL);
+
+        n4->nr_entries = n16->nr_entries;
+        memcpy(n4->keys, n16->keys, n16->nr_entries * sizeof(*n4->keys));
+        memcpy(n4->values, n16->values, n16->nr_entries * sizeof(*n4->values));
+        free(n16);
+
+	result->type = NODE4;
+	result->value.ptr = n4;
+}
+
+static void _degrade_to_n16(struct node48 *n48, struct value *result)
+{
+	unsigned i, count = 0;
+        struct node16 *n16 = zalloc(sizeof(*n16));
+
+	assert(n16 != NULL);
+
+        n16->nr_entries = n48->nr_entries;
+        for (i = 0; i < 256; i++) {
+	        if (n48->keys[i] < 48) {
+		        n16->keys[count] = i;
+		        n16->values[count] = n48->values[n48->keys[i]];
+		        count++;
+	        }
+        }
+
+        free(n48);
+
+	result->type = NODE16;
+	result->value.ptr = n16;
+}
+
+static void _degrade_to_n48(struct node256 *n256, struct value *result)
+{
+        unsigned i, count = 0;
+        struct node48 *n48 = zalloc(sizeof(*n48));
+
+	assert(n48 != NULL);
+
+        n48->nr_entries = n256->nr_entries;
+        for (i = 0; i < 256; i++) {
+		if (n256->values[i].type == UNSET)
+			n48->keys[i] = 48;
+
+		else {
+			n48->keys[i] = count;
+			n48->values[count] = n256->values[i];
+			count++;
+		}
+        }
+
+        free(n256);
+
+	result->type = NODE48;
+	result->value.ptr = n48;
+}
+
+// Removes an entry in an array by sliding the values above it down.
+static void _erase_elt(void *array, size_t obj_size, unsigned count, unsigned idx)
+{
+	if (idx == (count - 1))
+		// The simple case
+		return;
+
+	memmove(((uint8_t *) array) + (obj_size * idx),
+                ((uint8_t *) array) + (obj_size * (idx + 1)),
+                obj_size * (count - idx - 1));
+
+	// Zero the now unused last elt (set's v.type to UNSET)
+	memset(((uint8_t *) array) + (count - 1) * obj_size, 0, obj_size);
+}
+
+static bool _remove(struct radix_tree *rt, struct value *root, uint8_t *kb, uint8_t *ke)
+{
+	bool r;
+	unsigned i, j;
+	struct value_chain *vc;
+	struct prefix_chain *pc;
+	struct node4 *n4;
+	struct node16 *n16;
+	struct node48 *n48;
+	struct node256 *n256;
+
+	if (kb == ke) {
+        	if (root->type == VALUE) {
+                	root->type = UNSET;
+                	_dtr(rt, root->value);
+                	return true;
+
+                } else if (root->type == VALUE_CHAIN) {
+			vc = root->value.ptr;
+			_dtr(rt, vc->value);
+			memcpy(root, &vc->child, sizeof(*root));
+			free(vc);
+			return true;
+
+                } else
+			return false;
+	}
+
+	switch (root->type) {
+	case UNSET:
+	case VALUE:
+        	// this is a value for a prefix of the key
+        	return false;
+
+	case VALUE_CHAIN:
+		vc = root->value.ptr;
+		r = _remove(rt, &vc->child, kb, ke);
+		if (r && (vc->child.type == UNSET)) {
+			root->type = VALUE;
+			root->value = vc->value;
+			free(vc);
+		}
+		return r;
+
+	case PREFIX_CHAIN:
+		pc = root->value.ptr;
+		if (ke - kb < pc->len)
+        		return false;
+
+		for (i = 0; i < pc->len; i++)
+			if (kb[i] != pc->prefix[i])
+        			return false;
+
+		r = _remove(rt, &pc->child, kb + pc->len, ke);
+		if (r && pc->child.type == UNSET) {
+			root->type = UNSET;
+			free(pc);
+		}
+		return r;
+
+	case NODE4:
+		n4 = root->value.ptr;
+		for (i = 0; i < n4->nr_entries; i++) {
+			if (n4->keys[i] == *kb) {
+				r = _remove(rt, n4->values + i, kb + 1, ke);
+				if (r && n4->values[i].type == UNSET) {
+        				if (i < n4->nr_entries) {
+	        				_erase_elt(n4->keys, sizeof(*n4->keys), n4->nr_entries, i);
+	        				_erase_elt(n4->values, sizeof(*n4->values), n4->nr_entries, i);
+        				}
+
+        				n4->nr_entries--;
+					if (!n4->nr_entries) {
+						free(n4);
+						root->type = UNSET;
+					}
+				}
+				return r;
+			}
+		}
+		return false;
+
+	case NODE16:
+        	n16 = root->value.ptr;
+		for (i = 0; i < n16->nr_entries; i++) {
+			if (n16->keys[i] == *kb) {
+				r = _remove(rt, n16->values + i, kb + 1, ke);
+				if (r && n16->values[i].type == UNSET) {
+        				if (i < n16->nr_entries) {
+	        				_erase_elt(n16->keys, sizeof(*n16->keys), n16->nr_entries, i);
+	        				_erase_elt(n16->values, sizeof(*n16->values), n16->nr_entries, i);
+        				}
+
+        				n16->nr_entries--;
+					if (n16->nr_entries <= 4) {
+        					_degrade_to_n4(n16, root);
+					}
+				}
+				return r;
+			}
+		}
+		return false;
+
+	case NODE48:
+		n48 = root->value.ptr;
+		i = n48->keys[*kb];
+		if (i < 48) {
+        		r = _remove(rt, n48->values + i, kb + 1, ke);
+        		if (r && n48->values[i].type == UNSET) {
+                		n48->keys[*kb] = 48;
+                		for (j = 0; j < 256; j++)
+	                		if (n48->keys[j] < 48 && n48->keys[j] > i)
+		                		n48->keys[j]--;
+				_erase_elt(n48->values, sizeof(*n48->values), n48->nr_entries, i);
+				n48->nr_entries--;
+				if (n48->nr_entries <= 16)
+        				_degrade_to_n16(n48, root);
+        		}
+        		return r;
+		}
+		return false;
+
+	case NODE256:
+		n256 = root->value.ptr;
+		r = _remove(rt, n256->values + (*kb), kb + 1, ke);
+		if (r && n256->values[*kb].type == UNSET) {
+			n256->nr_entries--;
+			if (n256->nr_entries <= 48)
+        			_degrade_to_n48(n256, root);
+		}
+		return r;
+	}
+
+	return false;
+}
+
+bool radix_tree_remove(struct radix_tree *rt, uint8_t *key_begin, uint8_t *key_end)
+{
+	if (_remove(rt, &rt->root, key_begin, key_end)) {
+        	rt->nr_entries--;
+        	return true;
+	}
+
+	return false;
+}
+
+//----------------------------------------------------------------
+
+static bool _prefix_chain_matches(struct lookup_result *lr, uint8_t *ke)
+{
+        // It's possible the top node is a prefix chain, and
+        // the remaining key matches part of it.
+        if (lr->v->type == PREFIX_CHAIN) {
+                unsigned i, rlen = ke - lr->kb;
+                struct prefix_chain *pc = lr->v->value.ptr;
+                if (rlen < pc->len) {
+                        for (i = 0; i < rlen; i++)
+                                if (pc->prefix[i] != lr->kb[i])
+                                        return false;
+                        return true;
+		}
+        }
+
+        return false;
+}
+
+static bool _remove_subtree(struct radix_tree *rt, struct value *root, uint8_t *kb, uint8_t *ke, unsigned *count)
+{
+	bool r;
+	unsigned i, j, len;
+	struct value_chain *vc;
+	struct prefix_chain *pc;
+	struct node4 *n4;
+	struct node16 *n16;
+	struct node48 *n48;
+	struct node256 *n256;
+
+	if (kb == ke) {
+		*count += _free_node(rt, *root);
+		root->type = UNSET;
+		return true;
+	}
+
+	switch (root->type) {
+	case UNSET:
+	case VALUE:
+		// No entries with the given prefix
+        	return true;
+
+	case VALUE_CHAIN:
+		vc = root->value.ptr;
+		r = _remove_subtree(rt, &vc->child, kb, ke, count);
+		if (r && (vc->child.type == UNSET)) {
+			root->type = VALUE;
+			root->value = vc->value;
+			free(vc);
+		}
+		return r;
+
+	case PREFIX_CHAIN:
+		pc = root->value.ptr;
+		len = min(pc->len, ke - kb);
+		for (i = 0; i < len; i++)
+			if (kb[i] != pc->prefix[i])
+        			return true;
+
+		r = _remove_subtree(rt, &pc->child, len < pc->len ? ke : (kb + pc->len), ke, count);
+		if (r && pc->child.type == UNSET) {
+			root->type = UNSET;
+			free(pc);
+		}
+		return r;
+
+	case NODE4:
+		n4 = root->value.ptr;
+		for (i = 0; i < n4->nr_entries; i++) {
+			if (n4->keys[i] == *kb) {
+				r = _remove_subtree(rt, n4->values + i, kb + 1, ke, count);
+				if (r && n4->values[i].type == UNSET) {
+        				if (i < n4->nr_entries) {
+	        				_erase_elt(n4->keys, sizeof(*n4->keys), n4->nr_entries, i);
+	        				_erase_elt(n4->values, sizeof(*n4->values), n4->nr_entries, i);
+        				}
+
+        				n4->nr_entries--;
+					if (!n4->nr_entries) {
+						free(n4);
+						root->type = UNSET;
+					}
+				}
+				return r;
+			}
+		}
+		return true;
+
+	case NODE16:
+        	n16 = root->value.ptr;
+		for (i = 0; i < n16->nr_entries; i++) {
+			if (n16->keys[i] == *kb) {
+				r = _remove_subtree(rt, n16->values + i, kb + 1, ke, count);
+				if (r && n16->values[i].type == UNSET) {
+        				if (i < n16->nr_entries) {
+	        				_erase_elt(n16->keys, sizeof(*n16->keys), n16->nr_entries, i);
+	        				_erase_elt(n16->values, sizeof(*n16->values), n16->nr_entries, i);
+        				}
+
+        				n16->nr_entries--;
+					if (n16->nr_entries <= 4)
+        					_degrade_to_n4(n16, root);
+				}
+				return r;
+			}
+		}
+		return true;
+
+	case NODE48:
+		n48 = root->value.ptr;
+		i = n48->keys[*kb];
+		if (i < 48) {
+        		r = _remove_subtree(rt, n48->values + i, kb + 1, ke, count);
+        		if (r && n48->values[i].type == UNSET) {
+                		n48->keys[*kb] = 48;
+                		for (j = 0; j < 256; j++)
+	                		if (n48->keys[j] < 48 && n48->keys[j] > i)
+		                		n48->keys[j]--;
+				_erase_elt(n48->values, sizeof(*n48->values), n48->nr_entries, i);
+				n48->nr_entries--;
+				if (n48->nr_entries <= 16)
+        				_degrade_to_n16(n48, root);
+        		}
+        		return r;
+		}
+		return true;
+
+	case NODE256:
+		n256 = root->value.ptr;
+		if (n256->values[*kb].type == UNSET)
+			return true;  // No entries
+
+		r = _remove_subtree(rt, n256->values + (*kb), kb + 1, ke, count);
+		if (r && n256->values[*kb].type == UNSET) {
+			n256->nr_entries--;
+			if (n256->nr_entries <= 48)
+        			_degrade_to_n48(n256, root);
+		}
+		return r;
+	}
+
+	// Shouldn't get here
+	return false;
+}
+
+unsigned radix_tree_remove_prefix(struct radix_tree *rt, uint8_t *kb, uint8_t *ke)
+{
+        unsigned count = 0;
+
+        if (_remove_subtree(rt, &rt->root, kb, ke, &count))
+		rt->nr_entries -= count;
+
+	return count;
+}
+
+//----------------------------------------------------------------
+
+bool radix_tree_lookup(struct radix_tree *rt,
+		       uint8_t *kb, uint8_t *ke, union radix_value *result)
+{
+	struct value_chain *vc;
+	struct lookup_result lr = _lookup_prefix(&rt->root, kb, ke);
+	if (lr.kb == ke) {
+		switch (lr.v->type) {
+		case VALUE:
+			*result = lr.v->value;
+			return true;
+
+		case VALUE_CHAIN:
+			vc = lr.v->value.ptr;
+			*result = vc->value;
+			return true;
+
+		default:
+			return false;
+		}
+	}
+
+	return false;
+}
+
+// FIXME: build up the keys too
+static bool _iterate(struct value *v, struct radix_tree_iterator *it)
+{
+	unsigned i;
+	struct value_chain *vc;
+	struct prefix_chain *pc;
+	struct node4 *n4;
+	struct node16 *n16;
+	struct node48 *n48;
+	struct node256 *n256;
+
+	switch (v->type) {
+	case UNSET:
+        	// can't happen
+		break;
+
+	case VALUE:
+        	return it->visit(it, NULL, NULL, v->value);
+
+	case VALUE_CHAIN:
+		vc = v->value.ptr;
+		return it->visit(it, NULL, NULL, vc->value) && _iterate(&vc->child, it);
+
+	case PREFIX_CHAIN:
+		pc = v->value.ptr;
+		return _iterate(&pc->child, it);
+
+	case NODE4:
+		n4 = (struct node4 *) v->value.ptr;
+		for (i = 0; i < n4->nr_entries; i++)
+			if (!_iterate(n4->values + i, it))
+        			return false;
+        	return true;
+
+	case NODE16:
+		n16 = (struct node16 *) v->value.ptr;
+		for (i = 0; i < n16->nr_entries; i++)
+        		if (!_iterate(n16->values + i, it))
+        			return false;
+		return true;
+
+	case NODE48:
+		n48 = (struct node48 *) v->value.ptr;
+		for (i = 0; i < n48->nr_entries; i++)
+        		if (!_iterate(n48->values + i, it))
+        			return false;
+		return true;
+
+	case NODE256:
+		n256 = (struct node256 *) v->value.ptr;
+		for (i = 0; i < 256; i++)
+        		if (n256->values[i].type != UNSET && !_iterate(n256->values + i, it))
+        			return false;
+		return true;
+	}
+
+	// can't get here
+	return false;
+}
+
+void radix_tree_iterate(struct radix_tree *rt, uint8_t *kb, uint8_t *ke,
+                        struct radix_tree_iterator *it)
+{
+	struct lookup_result lr = _lookup_prefix(&rt->root, kb, ke);
+	if (lr.kb == ke || _prefix_chain_matches(&lr, ke))
+        	_iterate(lr.v, it);
+}
+
+//----------------------------------------------------------------
+// Checks:
+// 1) The number of entries matches rt->nr_entries
+// 2) The number of entries is correct in each node
+// 3) prefix chain len > 0
+// 4) all unused values are UNSET
+
+static bool _check_nodes(struct value *v, unsigned *count)
+{
+	uint64_t bits;
+	unsigned i, ncount;
+	struct value_chain *vc;
+	struct prefix_chain *pc;
+	struct node4 *n4;
+	struct node16 *n16;
+	struct node48 *n48;
+	struct node256 *n256;
+
+	switch (v->type) {
+	case UNSET:
+		return true;
+
+	case VALUE:
+		(*count)++;
+		return true;
+
+	case VALUE_CHAIN:
+		(*count)++;
+		vc = v->value.ptr;
+		return _check_nodes(&vc->child, count);
+
+	case PREFIX_CHAIN:
+		pc = v->value.ptr;
+		return _check_nodes(&pc->child, count);
+
+	case NODE4:
+		n4 = v->value.ptr;
+		for (i = 0; i < n4->nr_entries; i++)
+			if (!_check_nodes(n4->values + i, count))
+				return false;
+
+		for (i = n4->nr_entries; i < 4; i++)
+			if (n4->values[i].type != UNSET) {
+				fprintf(stderr, "unused value is not UNSET (n4)\n");
+				return false;
+			}
+
+		return true;
+
+	case NODE16:
+		n16 = v->value.ptr;
+		for (i = 0; i < n16->nr_entries; i++)
+			if (!_check_nodes(n16->values + i, count))
+				return false;
+
+		for (i = n16->nr_entries; i < 16; i++)
+			if (n16->values[i].type != UNSET) {
+				fprintf(stderr, "unused value is not UNSET (n16)\n");
+				return false;
+			}
+
+		return true;
+
+	case NODE48:
+		bits = 0;
+		n48 = v->value.ptr;
+		ncount = 0;
+		for (i = 0; i < 256; i++) {
+			if (n48->keys[i] < 48) {
+				if (n48->keys[i] >= n48->nr_entries) {
+					fprintf(stderr, "referencing value past nr_entries (n48)\n");
+					return false;
+				}
+
+				if (bits & (1ull << n48->keys[i])) {
+					fprintf(stderr, "duplicate entry (n48) %u\n", (unsigned) n48->keys[i]);
+					return false;
+				}
+				bits = bits | (1ull << n48->keys[i]);
+				ncount++;
+
+				if (!_check_nodes(n48->values + n48->keys[i], count))
+					return false;
+			}
+		}
+
+		for (i = 0; i < n48->nr_entries; i++) {
+			if (!(bits & (1ull << i))) {
+				fprintf(stderr, "not all values are referenced (n48)\n");
+				return false;
+			}
+		}
+
+		if (ncount != n48->nr_entries) {
+			fprintf(stderr, "incorrect number of entries in n48, n48->nr_entries = %u, actual = %u\n",
+                                n48->nr_entries, ncount);
+			return false;
+		}
+
+		for (i = 0; i < n48->nr_entries; i++)
+			if (n48->values[i].type == UNSET) {
+				fprintf(stderr, "value in UNSET (n48)\n");
+				return false;
+			}
+
+		for (i = n48->nr_entries; i < 48; i++)
+			if (n48->values[i].type != UNSET) {
+				fprintf(stderr, "unused value is not UNSET (n48)\n");
+				return false;
+			}
+
+		return true;
+
+	case NODE256:
+		n256 = v->value.ptr;
+
+		ncount = 0;
+		for (i = 0; i < 256; i++) {
+			struct value *v2 = n256->values + i;
+
+			if (v2->type == UNSET)
+				continue;
+
+			if (!_check_nodes(v2, count))
+				return false;
+
+			ncount++;
+		}
+
+		if (ncount != n256->nr_entries) {
+			fprintf(stderr, "incorrect number of entries in n256, n256->nr_entries = %u, actual = %u\n",
+                                n256->nr_entries, ncount);
+			return false;
+		}
+
+		return true;
+
+	default:
+		fprintf(stderr, "unknown value type: %u\n", v->type);
+	}
+
+	fprintf(stderr, "shouldn't get here\n");
+	return false;
+}
+
+bool radix_tree_is_well_formed(struct radix_tree *rt)
+{
+	unsigned count = 0;
+
+	if (!_check_nodes(&rt->root, &count))
+		return false;
+
+	if (rt->nr_entries != count) {
+		fprintf(stderr, "incorrect entry count: rt->nr_entries = %u, actual = %u\n",
+                        rt->nr_entries, count);
+		return false;
+	}
+
+	return true;
+}
+
+//----------------------------------------------------------------
+
+static void _dump(FILE *out, struct value v, unsigned indent)
+{
+	unsigned i;
+	struct value_chain *vc;
+	struct prefix_chain *pc;
+	struct node4 *n4;
+	struct node16 *n16;
+	struct node48 *n48;
+	struct node256 *n256;
+
+	if (v.type == UNSET)
+		return;
+
+	for (i = 0; i < 2 * indent; i++)
+		fprintf(out, " ");
+
+	switch (v.type) {
+	case UNSET:
+		// can't happen
+		break;
+
+	case VALUE:
+		fprintf(out, "<val: %llu>\n", (unsigned long long) v.value.n);
+		break;
+
+	case VALUE_CHAIN:
+		vc = v.value.ptr;
+		fprintf(out, "<val_chain: %llu>\n", (unsigned long long) vc->value.n);
+		_dump(out, vc->child, indent + 1);
+		break;
+
+	case PREFIX_CHAIN:
+		pc = v.value.ptr;
+		fprintf(out, "<prefix: ");
+		for (i = 0; i < pc->len; i++)
+			fprintf(out, "%x.", (unsigned) *(pc->prefix + i));
+		fprintf(out, ">\n");
+		_dump(out, pc->child, indent + 1);
+		break;
+
+	case NODE4:
+		n4 = v.value.ptr;
+		fprintf(out, "<n4: ");
+		for (i = 0; i < n4->nr_entries; i++)
+			fprintf(out, "%x ", (unsigned) n4->keys[i]);
+		fprintf(out, ">\n");
+
+		for (i = 0; i < n4->nr_entries; i++)
+			_dump(out, n4->values[i], indent + 1);
+		break;
+
+	case NODE16:
+		n16 = v.value.ptr;
+		fprintf(out, "<n16: ");
+		for (i = 0; i < n16->nr_entries; i++)
+			fprintf(out, "%x ", (unsigned) n16->keys[i]);
+		fprintf(out, ">\n");
+
+		for (i = 0; i < n16->nr_entries; i++)
+			_dump(out, n16->values[i], indent + 1);
+		break;
+
+	case NODE48:
+		n48 = v.value.ptr;
+		fprintf(out, "<n48: ");
+		for (i = 0; i < 256; i++)
+			if (n48->keys[i] < 48)
+				fprintf(out, "%x ", i);
+		fprintf(out, ">\n");
+
+		for (i = 0; i < n48->nr_entries; i++) {
+			assert(n48->values[i].type != UNSET);
+			_dump(out, n48->values[i], indent + 1);
+		}
+		break;
+
+	case NODE256:
+		n256 = v.value.ptr;
+		fprintf(out, "<n256: ");
+		for (i = 0; i < 256; i++)
+			if (n256->values[i].type != UNSET)
+				fprintf(out, "%x ", i);
+		fprintf(out, ">\n");
+
+		for (i = 0; i < 256; i++)
+			if (n256->values[i].type != UNSET)
+				_dump(out, n256->values[i], indent + 1);
+		break;
+	}
+}
+
+void radix_tree_dump(struct radix_tree *rt, FILE *out)
+{
+	_dump(out, rt->root, 0);
+}
+
+//----------------------------------------------------------------
diff --git a/base/data-struct/radix-tree-simple.c b/base/data-struct/radix-tree-simple.c
new file mode 100644
index 0000000..e8a2fdd
--- /dev/null
+++ b/base/data-struct/radix-tree-simple.c
@@ -0,0 +1,256 @@
+// Copyright (C) 2018 Red Hat, Inc. All rights reserved.
+// 
+// This file is part of LVM2.
+//
+// This copyrighted material is made available to anyone wishing to use,
+// modify, copy, or redistribute it subject to the terms and conditions
+// of the GNU Lesser General Public License v.2.1.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with this program; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+#include "radix-tree.h"
+
+#include "base/memory/container_of.h"
+#include "base/memory/zalloc.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+//----------------------------------------------------------------
+// This implementation is based around nested binary trees.  Very
+// simple (and hopefully correct).
+
+struct node {
+	struct node *left;
+	struct node *right;
+
+	uint8_t key;
+	struct node *center;
+
+	bool has_value;
+	union radix_value value;
+};
+
+struct radix_tree {
+	radix_value_dtr dtr;
+	void *dtr_context;
+
+	struct node *root;
+};
+
+struct radix_tree *
+radix_tree_create(radix_value_dtr dtr, void *dtr_context)
+{
+	struct radix_tree *rt = zalloc(sizeof(*rt));
+
+	if (rt) {
+		rt->dtr = dtr;
+		rt->dtr_context = dtr_context;
+	}
+
+	return rt;
+}
+
+// Returns the number of entries in the tree
+static unsigned _destroy_tree(struct node *n, radix_value_dtr dtr, void *context)
+{
+	unsigned r;
+
+	if (!n)
+		return 0;
+
+	r = _destroy_tree(n->left, dtr, context);
+	r += _destroy_tree(n->right, dtr, context);
+	r += _destroy_tree(n->center, dtr, context);
+
+	if (n->has_value) {
+		if (dtr)
+			dtr(context, n->value);
+		r++;
+	}
+
+	free(n);
+
+	return r;
+}
+
+void radix_tree_destroy(struct radix_tree *rt)
+{
+	_destroy_tree(rt->root, rt->dtr, rt->dtr_context);
+	free(rt);
+}
+
+static unsigned _count(struct node *n)
+{
+	unsigned r;
+
+	if (!n)
+		return 0;
+
+	r = _count(n->left);
+	r += _count(n->right);
+	r += _count(n->center);
+
+	if (n->has_value)
+		r++;
+
+	return r;
+}
+
+unsigned radix_tree_size(struct radix_tree *rt)
+{
+	return _count(rt->root);
+}
+
+static struct node **_lookup(struct node **pn, uint8_t *kb, uint8_t *ke)
+{
+	struct node *n = *pn;
+
+	if (!n || (kb == ke))
+		return pn;
+
+	if (*kb < n->key)
+		return _lookup(&n->left, kb, ke);
+
+	else if (*kb > n->key)
+		return _lookup(&n->right, kb, ke);
+
+	else
+		return _lookup(&n->center, kb + 1, ke);
+}
+
+static bool _insert(struct node **pn, uint8_t *kb, uint8_t *ke, union radix_value v)
+{
+	struct node *n = *pn;
+
+	if (!n) {
+		n = zalloc(sizeof(*n));
+		if (!n)
+			return false;
+
+		n->key = *kb;
+		*pn = n;
+	}
+
+	if (kb == ke) {
+		n->has_value = true;
+		n->value = v;
+		return true;
+	}
+
+	if (*kb < n->key)
+		return _insert(&n->left, kb, ke, v);
+
+	else if (*kb > n->key)
+		return _insert(&n->right, kb, ke, v);
+
+	else
+		return _insert(&n->center, kb + 1, ke, v);
+}
+
+bool radix_tree_insert(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, union radix_value v)
+{
+	return _insert(&rt->root, kb, ke, v);
+}
+
+bool radix_tree_remove(struct radix_tree *rt, uint8_t *kb, uint8_t *ke)
+{
+	struct node **pn = _lookup(&rt->root, kb, ke);
+	struct node *n = *pn;
+
+	if (!n || !n->has_value)
+		return false;
+
+	else {
+		if (rt->dtr)
+			rt->dtr(rt->dtr_context, n->value);
+
+		if (n->left || n->center || n->right) {
+			n->has_value = false;
+			return true;
+
+		} else {
+			// FIXME: delete parent if this was the last entry
+			free(n);
+			*pn = NULL;
+		}
+
+		return true;
+	}
+}
+
+unsigned radix_tree_remove_prefix(struct radix_tree *rt, uint8_t *kb, uint8_t *ke)
+{
+	struct node **pn;
+	unsigned count;
+
+	pn = _lookup(&rt->root, kb, ke);
+
+	if (*pn) {
+		count = _destroy_tree(*pn, rt->dtr, rt->dtr_context);
+		*pn = NULL;
+	}
+
+	return count;
+}
+
+bool
+radix_tree_lookup(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, union radix_value *result)
+{
+	struct node **pn = _lookup(&rt->root, kb, ke);
+	struct node *n = *pn;
+
+	if (n && n->has_value) {
+		*result = n->value;
+		return true;
+	} else
+		return false;
+}
+
+static void _iterate(struct node *n, struct radix_tree_iterator *it)
+{
+	if (!n)
+		return;
+
+	_iterate(n->left, it);
+
+	if (n->has_value)
+		// FIXME: fill out the key
+		it->visit(it, NULL, NULL, n->value);
+
+	_iterate(n->center, it);
+	_iterate(n->right, it);
+}
+
+void radix_tree_iterate(struct radix_tree *rt, uint8_t *kb, uint8_t *ke,
+                        struct radix_tree_iterator *it)
+{
+	if (kb == ke)
+		_iterate(rt->root, it);
+
+	else {
+		struct node **pn = _lookup(&rt->root, kb, ke);
+		struct node *n = *pn;
+
+		if (n) {
+			if (n->has_value)
+				it->visit(it, NULL, NULL, n->value);
+			_iterate(n->center, it);
+		}
+	}
+}
+
+bool radix_tree_is_well_formed(struct radix_tree *rt)
+{
+	return true;
+}
+
+void radix_tree_dump(struct radix_tree *rt, FILE *out)
+{
+}
+
+//----------------------------------------------------------------
+
diff --git a/base/data-struct/radix-tree.c b/base/data-struct/radix-tree.c
index 222b350..52a1a05 100644
--- a/base/data-struct/radix-tree.c
+++ b/base/data-struct/radix-tree.c
@@ -10,853 +10,12 @@
 // along with this program; if not, write to the Free Software Foundation,
 // Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 
-#include "radix-tree.h"
-
-#include "base/memory/container_of.h"
-#include "base/memory/zalloc.h"
-
-#include <assert.h>
-#include <stdlib.h>
-#include <stdio.h>
-
 //----------------------------------------------------------------
 
-enum node_type {
-	UNSET = 0,
-	VALUE,
-	VALUE_CHAIN,
-	PREFIX_CHAIN,
-	NODE4,
-	NODE16,
-	NODE48,
-	NODE256
-};
-
-struct value {
-	enum node_type type;
-	union radix_value value;
-};
-
-// This is used for entries that have a key which is a prefix of another key.
-struct value_chain {
-	union radix_value value;
-	struct value child;
-};
-
-struct prefix_chain {
-	struct value child;
-	unsigned len;
-	uint8_t prefix[0];
-};
-
-struct node4 {
-	uint32_t nr_entries;
-	uint8_t keys[4];
-	struct value values[4];
-};
-
-struct node16 {
-	uint32_t nr_entries;
-	uint8_t keys[16];
-	struct value values[16];
-};
-
-struct node48 {
-	uint32_t nr_entries;
-	uint8_t keys[256];
-	struct value values[48];
-};
-
-struct node256 {
-        uint32_t nr_entries;
-	struct value values[256];
-};
-
-struct radix_tree {
-	unsigned nr_entries;
-	struct value root;
-	radix_value_dtr dtr;
-	void *dtr_context;
-};
-
-//----------------------------------------------------------------
-
-struct radix_tree *radix_tree_create(radix_value_dtr dtr, void *dtr_context)
-{
-	struct radix_tree *rt = malloc(sizeof(*rt));
-
-	if (rt) {
-		rt->nr_entries = 0;
-		rt->root.type = UNSET;
-		rt->dtr = dtr;
-		rt->dtr_context = dtr_context;
-	}
-
-	return rt;
-}
-
-static inline void _dtr(struct radix_tree *rt, union radix_value v)
-{
-	if (rt->dtr)
-        	rt->dtr(rt->dtr_context, v);
-}
-
-// Returns the number of values removed
-static unsigned _free_node(struct radix_tree *rt, struct value v)
-{
-	unsigned i, nr = 0;
-	struct value_chain *vc;
-	struct prefix_chain *pc;
-	struct node4 *n4;
-	struct node16 *n16;
-	struct node48 *n48;
-	struct node256 *n256;
-
-	switch (v.type) {
-	case UNSET:
-		break;
-
-	case VALUE:
-        	_dtr(rt, v.value);
-        	nr = 1;
-		break;
-
-	case VALUE_CHAIN:
-		vc = v.value.ptr;
-		_dtr(rt, vc->value);
-		nr = 1 + _free_node(rt, vc->child);
-		free(vc);
-		break;
-
-	case PREFIX_CHAIN:
-		pc = v.value.ptr;
-		nr = _free_node(rt, pc->child);
-		free(pc);
-		break;
-
-	case NODE4:
-		n4 = (struct node4 *) v.value.ptr;
-		for (i = 0; i < n4->nr_entries; i++)
-			nr += _free_node(rt, n4->values[i]);
-		free(n4);
-		break;
-
-	case NODE16:
-		n16 = (struct node16 *) v.value.ptr;
-		for (i = 0; i < n16->nr_entries; i++)
-			nr += _free_node(rt, n16->values[i]);
-		free(n16);
-		break;
-
-	case NODE48:
-		n48 = (struct node48 *) v.value.ptr;
-		for (i = 0; i < n48->nr_entries; i++)
-			nr += _free_node(rt, n48->values[i]);
-		free(n48);
-		break;
-
-	case NODE256:
-		n256 = (struct node256 *) v.value.ptr;
-		for (i = 0; i < 256; i++)
-			nr += _free_node(rt, n256->values[i]);
-		free(n256);
-		break;
-	}
-
-	return nr;
-}
-
-void radix_tree_destroy(struct radix_tree *rt)
-{
-	_free_node(rt, rt->root);
-	free(rt);
-}
-
-unsigned radix_tree_size(struct radix_tree *rt)
-{
-	return rt->nr_entries;
-}
-
-static bool _insert(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv);
-
-static bool _insert_unset(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
-{
-	unsigned len = ke - kb;
-
-	if (!len) {
-		// value
-		v->type = VALUE;
-		v->value = rv;
-		rt->nr_entries++;
-	} else {
-		// prefix -> value
-		struct prefix_chain *pc = zalloc(sizeof(*pc) + len);
-		if (!pc)
-			return false;
-
-		pc->child.type = VALUE;
-		pc->child.value = rv;
-		pc->len = len;
-		memcpy(pc->prefix, kb, len);
-		v->type = PREFIX_CHAIN;
-		v->value.ptr = pc;
-		rt->nr_entries++;
-	}
-
-	return true;
-}
-
-static bool _insert_value(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
-{
-	unsigned len = ke - kb;
-
-	if (!len)
-		// overwrite
-		v->value = rv;
-
-	else {
-		// value_chain -> value
-		struct value_chain *vc = zalloc(sizeof(*vc));
-		if (!vc)
-			return false;
-
-		vc->value = v->value;
-		if (!_insert(rt, &vc->child, kb, ke, rv)) {
-			free(vc);
-			return false;
-		}
-
-		v->type = VALUE_CHAIN;
-		v->value.ptr = vc;
-	}
-
-	return true;
-}
-
-static bool _insert_value_chain(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
-{
-	struct value_chain *vc = v->value.ptr;
-	return _insert(rt, &vc->child, kb, ke, rv);
-}
-
-static unsigned min(unsigned lhs, unsigned rhs)
-{
-	if (lhs <= rhs)
-		return lhs;
-	else
-		return rhs;
-}
-
-static bool _insert_prefix_chain(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
-{
-	struct prefix_chain *pc = v->value.ptr;
-
-	if (*kb == pc->prefix[0]) {
-		// There's a common prefix let's split the chain into two and
-		// recurse.
-		struct prefix_chain *pc2;
-		unsigned i, len = min(pc->len, ke - kb);
-
-		for (i = 0; i < len; i++)
-			if (kb[i] != pc->prefix[i])
-				break;
-
-		pc2 = zalloc(sizeof(*pc2) + pc->len - i);
-		pc2->len = pc->len - i;
-		memmove(pc2->prefix, pc->prefix + i, pc2->len);
-		pc2->child = pc->child;
-
-		// FIXME: this trashes pc so we can't back out
-		pc->child.type = PREFIX_CHAIN;
-		pc->child.value.ptr = pc2;
-		pc->len = i;
-
-		if (!_insert(rt, &pc->child, kb + i, ke, rv)) {
-			free(pc2);
-			return false;
-		}
-
-	} else {
-		// Stick an n4 in front.
-		struct node4 *n4 = zalloc(sizeof(*n4));
-		if (!n4)
-			return false;
-
-		n4->keys[0] = *kb;
-		if (!_insert(rt, n4->values, kb + 1, ke, rv)) {
-			free(n4);
-			return false;
-		}
-
-		if (pc->len) {
-			n4->keys[1] = pc->prefix[0];
-			if (pc->len == 1) {
-				n4->values[1] = pc->child;
-				free(pc);
-			} else {
-				memmove(pc->prefix, pc->prefix + 1, pc->len - 1);
-				pc->len--;
-				n4->values[1] = *v;
-			}
-			n4->nr_entries = 2;
-		} else
-			n4->nr_entries = 1;
-
-		v->type = NODE4;
-		v->value.ptr = n4;
-	}
-
-	return true;
-}
-
-static bool _insert_node4(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
-{
-	struct node4 *n4 = v->value.ptr;
-	if (n4->nr_entries == 4) {
-		struct node16 *n16 = zalloc(sizeof(*n16));
-		if (!n16)
-			return false;
-
-		n16->nr_entries = 5;
-		memcpy(n16->keys, n4->keys, sizeof(n4->keys));
-		memcpy(n16->values, n4->values, sizeof(n4->values));
-
-		n16->keys[4] = *kb;
-		if (!_insert(rt, n16->values + 4, kb + 1, ke, rv)) {
-			free(n16);
-			return false;
-		}
-		free(n4);
-		v->type = NODE16;
-		v->value.ptr = n16;
-	} else {
-		n4 = v->value.ptr;
-		if (!_insert(rt, n4->values + n4->nr_entries, kb + 1, ke, rv))
-			return false;
-
-		n4->keys[n4->nr_entries] = *kb;
-		n4->nr_entries++;
-	}
-	return true;
-}
-
-static bool _insert_node16(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
-{
-	struct node16 *n16 = v->value.ptr;
-
-	if (n16->nr_entries == 16) {
-		unsigned i;
-		struct node48 *n48 = zalloc(sizeof(*n48));
-
-		if (!n48)
-			return false;
-
-		n48->nr_entries = 17;
-		memset(n48->keys, 48, sizeof(n48->keys));
-
-		for (i = 0; i < 16; i++) {
-			n48->keys[n16->keys[i]] = i;
-			n48->values[i] = n16->values[i];
-		}
-
-		n48->keys[*kb] = 16;
-		if (!_insert(rt, n48->values + 16, kb + 1, ke, rv)) {
-			free(n48);
-			return false;
-		}
-
-		free(n16);
-		v->type = NODE48;
-		v->value.ptr = n48;
-	} else {
-		if (!_insert(rt, n16->values + n16->nr_entries, kb + 1, ke, rv))
-			return false;
-		n16->keys[n16->nr_entries] = *kb;
-		n16->nr_entries++;
-	}
-
-	return true;
-}
-
-static bool _insert_node48(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
-{
-	struct node48 *n48 = v->value.ptr;
-	if (n48->nr_entries == 48) {
-		unsigned i;
-		struct node256 *n256 = zalloc(sizeof(*n256));
-		if (!n256)
-			return false;
-
-		for (i = 0; i < 256; i++) {
-			if (n48->keys[i] >= 48)
-				continue;
-
-			n256->values[i] = n48->values[n48->keys[i]];
-		}
-
-		if (!_insert(rt, n256->values + *kb, kb + 1, ke, rv)) {
-			free(n256);
-			return false;
-		}
-
-		free(n48);
-		v->type = NODE256;
-		v->value.ptr = n256;
-
-	} else {
-		if (!_insert(rt, n48->values + n48->nr_entries, kb + 1, ke, rv))
-			return false;
-
-		n48->keys[*kb] = n48->nr_entries;
-		n48->nr_entries++;
-	}
-
-	return true;
-}
-
-static bool _insert_node256(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
-{
-	struct node256 *n256 = v->value.ptr;
-	bool was_unset = n256->values[*kb].type == UNSET;
-
-	if (!_insert(rt, n256->values + *kb, kb + 1, ke, rv))
-		return false;
-
-	if (was_unset)
-        	n256->nr_entries++;
-
-	return true;
-}
-
-// FIXME: the tree should not be touched if insert fails (eg, OOM)
-static bool _insert(struct radix_tree *rt, struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
-{
-	if (kb == ke) {
-		if (v->type == UNSET) {
-			v->type = VALUE;
-			v->value = rv;
-			rt->nr_entries++;
-
-		} else if (v->type == VALUE) {
-			v->value = rv;
-
-		} else {
-			struct value_chain *vc = zalloc(sizeof(*vc));
-			if (!vc)
-				return false;
-
-			vc->value = rv;
-			vc->child = *v;
-			v->type = VALUE_CHAIN;
-			v->value.ptr = vc;
-			rt->nr_entries++;
-		}
-		return true;
-	}
-
-	switch (v->type) {
-	case UNSET:
-		return _insert_unset(rt, v, kb, ke, rv);
-
-	case VALUE:
-		return _insert_value(rt, v, kb, ke, rv);
-
-	case VALUE_CHAIN:
-		return _insert_value_chain(rt, v, kb, ke, rv);
-
-	case PREFIX_CHAIN:
-		return _insert_prefix_chain(rt, v, kb, ke, rv);
-
-	case NODE4:
-		return _insert_node4(rt, v, kb, ke, rv);
-
-	case NODE16:
-		return _insert_node16(rt, v, kb, ke, rv);
-
-	case NODE48:
-		return _insert_node48(rt, v, kb, ke, rv);
-
-	case NODE256:
-		return _insert_node256(rt, v, kb, ke, rv);
-	}
-
-	// can't get here
-	return false;
-}
-
-struct lookup_result {
-	struct value *v;
-	uint8_t *kb;
-};
-
-static struct lookup_result _lookup_prefix(struct value *v, uint8_t *kb, uint8_t *ke)
-{
-	unsigned i;
-	struct value_chain *vc;
-	struct prefix_chain *pc;
-	struct node4 *n4;
-	struct node16 *n16;
-	struct node48 *n48;
-	struct node256 *n256;
-
-	if (kb == ke)
-		return (struct lookup_result) {.v = v, .kb = kb};
-
-	switch (v->type) {
-	case UNSET:
-	case VALUE:
-		break;
-
-	case VALUE_CHAIN:
-		vc = v->value.ptr;
-		return _lookup_prefix(&vc->child, kb, ke);
-
-	case PREFIX_CHAIN:
-		pc = v->value.ptr;
-		if (ke - kb < pc->len)
-			return (struct lookup_result) {.v = v, .kb = kb};
-
-		for (i = 0; i < pc->len; i++)
-			if (kb[i] != pc->prefix[i])
-				return (struct lookup_result) {.v = v, .kb = kb};
-
-		return _lookup_prefix(&pc->child, kb + pc->len, ke);
-
-	case NODE4:
-		n4 = v->value.ptr;
-		for (i = 0; i < n4->nr_entries; i++)
-			if (n4->keys[i] == *kb)
-				return _lookup_prefix(n4->values + i, kb + 1, ke);
-		break;
-
-	case NODE16:
-		// FIXME: use binary search or simd?
-		n16 = v->value.ptr;
-		for (i = 0; i < n16->nr_entries; i++)
-			if (n16->keys[i] == *kb)
-				return _lookup_prefix(n16->values + i, kb + 1, ke);
-		break;
-
-	case NODE48:
-		n48 = v->value.ptr;
-		i = n48->keys[*kb];
-		if (i < 48)
-			return _lookup_prefix(n48->values + i, kb + 1, ke);
-		break;
-
-	case NODE256:
-		n256 = v->value.ptr;
-		return _lookup_prefix(n256->values + *kb, kb + 1, ke);
-	}
-
-	return (struct lookup_result) {.v = v, .kb = kb};
-}
-
-bool radix_tree_insert(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, union radix_value rv)
-{
-	struct lookup_result lr = _lookup_prefix(&rt->root, kb, ke);
-	return _insert(rt, lr.v, lr.kb, ke, rv);
-}
-
-// Note the degrade functions also free the original node.
-static void _degrade_to_n4(struct node16 *n16, struct value *result)
-{
-        struct node4 *n4 = zalloc(sizeof(*n4));
-
-        n4->nr_entries = n16->nr_entries;
-        memcpy(n4->keys, n16->keys, n16->nr_entries * sizeof(*n4->keys));
-        memcpy(n4->values, n16->values, n16->nr_entries * sizeof(*n4->values));
-        free(n16);
-
-	result->type = NODE4;
-	result->value.ptr = n4;
-}
-
-static void _degrade_to_n16(struct node48 *n48, struct value *result)
-{
-        struct node4 *n16 = zalloc(sizeof(*n16));
-
-        n16->nr_entries = n48->nr_entries;
-        memcpy(n16->keys, n48->keys, n48->nr_entries * sizeof(*n16->keys));
-        memcpy(n16->values, n48->values, n48->nr_entries * sizeof(*n16->values));
-        free(n48);
-
-	result->type = NODE16;
-	result->value.ptr = n16;
-}
-
-static void _degrade_to_n48(struct node256 *n256, struct value *result)
-{
-        unsigned i, count = 0;
-        struct node4 *n48 = zalloc(sizeof(*n48));
-
-        n48->nr_entries = n256->nr_entries;
-        for (i = 0; i < 256; i++) {
-		if (n256->values[i].type == UNSET)
-        		continue;
-
-		n48->keys[count] = i;
-		n48->values[count] = n256->values[i];
-		count++;
-        }
-        free(n256);
-
-	result->type = NODE48;
-	result->value.ptr = n48;
-}
-
-static bool _remove(struct radix_tree *rt, struct value *root, uint8_t *kb, uint8_t *ke)
-{
-	bool r;
-	unsigned i;
-	struct value_chain *vc;
-	struct prefix_chain *pc;
-	struct node4 *n4;
-	struct node16 *n16;
-	struct node48 *n48;
-	struct node256 *n256;
-
-	if (kb == ke) {
-        	if (root->type == VALUE) {
-                	root->type = UNSET;
-                	_dtr(rt, root->value);
-                	return true;
-
-                } else if (root->type == VALUE_CHAIN) {
-			vc = root->value.ptr;
-			_dtr(rt, vc->value);
-			memcpy(root, &vc->child, sizeof(*root));
-			free(vc);
-			return true;
-
-                } else
-			return false;
-	}
-
-	switch (root->type) {
-	case UNSET:
-	case VALUE:
-        	// this is a value for a prefix of the key
-        	return false;
-
-	case VALUE_CHAIN:
-		vc = root->value.ptr;
-		r = _remove(rt, &vc->child, kb, ke);
-		if (r && (vc->child.type == UNSET)) {
-			memcpy(root, &vc->child, sizeof(*root));
-			free(vc);
-		}
-		return r;
-
-	case PREFIX_CHAIN:
-		pc = root->value.ptr;
-		if (ke - kb < pc->len)
-        		return false;
-
-		for (i = 0; i < pc->len; i++)
-			if (kb[i] != pc->prefix[i])
-        			return false;
-
-		return _remove(rt, &pc->child, kb + pc->len, ke);
-
-	case NODE4:
-		n4 = root->value.ptr;
-		for (i = 0; i < n4->nr_entries; i++) {
-			if (n4->keys[i] == *kb) {
-				r = _remove(rt, n4->values + i, kb + 1, ke);
-				if (r && n4->values[i].type == UNSET) {
-        				n4->nr_entries--;
-        				if (i < n4->nr_entries)
-                				// slide the entries down
-        					memmove(n4->keys + i, n4->keys + i + 1,
-                                                       sizeof(*n4->keys) * (n4->nr_entries - i));
-					if (!n4->nr_entries)
-						root->type = UNSET;
-				}
-				return r;
-			}
-		}
-		return false;
-
-	case NODE16:
-        	n16 = root->value.ptr;
-		for (i = 0; i < n16->nr_entries; i++) {
-			if (n16->keys[i] == *kb) {
-				r = _remove(rt, n16->values + i, kb + 1, ke);
-				if (r && n16->values[i].type == UNSET) {
-        				n16->nr_entries--;
-        				if (i < n16->nr_entries)
-                				// slide the entries down
-        					memmove(n16->keys + i, n16->keys + i + 1,
-                                                        sizeof(*n16->keys) * (n16->nr_entries - i));
-					if (n16->nr_entries <= 4)
-        					_degrade_to_n4(n16, root);
-				}
-				return r;
-			}
-		}
-		return false;
-
-	case NODE48:
-		n48 = root->value.ptr;
-		i = n48->keys[*kb];
-		if (i < 48) {
-        		r = _remove(rt, n48->values + i, kb + 1, ke);
-        		if (r && n48->values[i].type == UNSET) {
-                		n48->keys[*kb] = 48;
-				n48->nr_entries--;
-				if (n48->nr_entries <= 16)
-        				_degrade_to_n16(n48, root);
-        		}
-        		return r;
-		}
-		return false;
-
-	case NODE256:
-		n256 = root->value.ptr;
-		r = _remove(rt, n256->values + (*kb), kb + 1, ke);
-		if (r && n256->values[*kb].type == UNSET) {
-			n256->nr_entries--;
-			if (n256->nr_entries <= 48)
-        			_degrade_to_n48(n256, root);
-		}
-		return r;
-	}
-
-	return false;
-}
-
-bool radix_tree_remove(struct radix_tree *rt, uint8_t *key_begin, uint8_t *key_end)
-{
-	if (_remove(rt, &rt->root, key_begin, key_end)) {
-        	rt->nr_entries--;
-        	return true;
-	}
-
-	return false;
-}
-
-static bool _prefix_chain_matches(struct lookup_result *lr, uint8_t *ke)
-{
-        // It's possible the top node is a prefix chain, and
-        // the remaining key matches part of it.
-        if (lr->v->type == PREFIX_CHAIN) {
-                unsigned i, rlen = ke - lr->kb;
-                struct prefix_chain *pc = lr->v->value.ptr;
-                if (rlen < pc->len) {
-                        for (i = 0; i < rlen; i++)
-                                if (pc->prefix[i] != lr->kb[i])
-                                        return false;
-                        return true;
-		}
-        }
-
-        return false;
-}
-
-unsigned radix_tree_remove_prefix(struct radix_tree *rt, uint8_t *kb, uint8_t *ke)
-{
-        unsigned count = 0;
-	struct lookup_result lr = _lookup_prefix(&rt->root, kb, ke);
-	if (lr.kb == ke || _prefix_chain_matches(&lr, ke)) {
-        	count = _free_node(rt, *lr.v);
-        	lr.v->type = UNSET;
-	}
-
-	rt->nr_entries -= count;
-	return count;
-}
-
-bool radix_tree_lookup(struct radix_tree *rt,
-		       uint8_t *kb, uint8_t *ke, union radix_value *result)
-{
-	struct value_chain *vc;
-	struct lookup_result lr = _lookup_prefix(&rt->root, kb, ke);
-	if (lr.kb == ke) {
-		switch (lr.v->type) {
-		case VALUE:
-			*result = lr.v->value;
-			return true;
-
-		case VALUE_CHAIN:
-			vc = lr.v->value.ptr;
-			*result = vc->value;
-			return true;
-
-		default:
-			return false;
-		}
-	}
-
-	return false;
-}
-
-// FIXME: build up the keys too
-static bool _iterate(struct value *v, struct radix_tree_iterator *it)
-{
-	unsigned i;
-	struct value_chain *vc;
-	struct prefix_chain *pc;
-	struct node4 *n4;
-	struct node16 *n16;
-	struct node48 *n48;
-	struct node256 *n256;
-
-	switch (v->type) {
-	case UNSET:
-        	// can't happen
-		break;
-
-	case VALUE:
-        	return it->visit(it, NULL, NULL, v->value);
-
-	case VALUE_CHAIN:
-		vc = v->value.ptr;
-		return it->visit(it, NULL, NULL, vc->value) && _iterate(&vc->child, it);
-
-	case PREFIX_CHAIN:
-		pc = v->value.ptr;
-		return _iterate(&pc->child, it);
-
-	case NODE4:
-		n4 = (struct node4 *) v->value.ptr;
-		for (i = 0; i < n4->nr_entries; i++)
-			if (!_iterate(n4->values + i, it))
-        			return false;
-        	return true;
-
-	case NODE16:
-		n16 = (struct node16 *) v->value.ptr;
-		for (i = 0; i < n16->nr_entries; i++)
-        		if (!_iterate(n16->values + i, it))
-        			return false;
-		return true;
-
-	case NODE48:
-		n48 = (struct node48 *) v->value.ptr;
-		for (i = 0; i < n48->nr_entries; i++)
-        		if (!_iterate(n48->values + i, it))
-        			return false;
-		return true;
-
-	case NODE256:
-		n256 = (struct node256 *) v->value.ptr;
-		for (i = 0; i < 256; i++)
-        		if (n256->values[i].type != UNSET && !_iterate(n256->values + i, it))
-        			return false;
-		return true;
-	}
-
-	// can't get here
-	return false;
-}
-
-void radix_tree_iterate(struct radix_tree *rt, uint8_t *kb, uint8_t *ke,
-                        struct radix_tree_iterator *it)
-{
-	struct lookup_result lr = _lookup_prefix(&rt->root, kb, ke);
-	if (lr.kb == ke || _prefix_chain_matches(&lr, ke))
-        	_iterate(lr.v, it);
-}
+#ifdef SIMPLE_RADIX_TREE
+#include "base/data-struct/radix-tree-simple.c"
+#else
+#include "base/data-struct/radix-tree-adaptive.c"
+#endif
 
 //----------------------------------------------------------------
diff --git a/base/data-struct/radix-tree.h b/base/data-struct/radix-tree.h
index 1b6aee8..5d4d04c 100644
--- a/base/data-struct/radix-tree.h
+++ b/base/data-struct/radix-tree.h
@@ -15,6 +15,7 @@
 
 #include <stdbool.h>
 #include <stdint.h>
+#include <stdio.h>
 
 //----------------------------------------------------------------
 
@@ -53,6 +54,11 @@ struct radix_tree_iterator {
 void radix_tree_iterate(struct radix_tree *rt, uint8_t *kb, uint8_t *ke,
                         struct radix_tree_iterator *it);
 
+// Checks that some constraints on the shape of the tree are
+// being held.  For debug only.
+bool radix_tree_is_well_formed(struct radix_tree *rt);
+void radix_tree_dump(struct radix_tree *rt, FILE *out);
+
 //----------------------------------------------------------------
 
 #endif
diff --git a/lib/device/bcache.c b/lib/device/bcache.c
index d487ca2..b64707e 100644
--- a/lib/device/bcache.c
+++ b/lib/device/bcache.c
@@ -12,9 +12,9 @@
  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
-#define _GNU_SOURCE
-
 #include "bcache.h"
+
+#include "base/data-struct/radix-tree.h"
 #include "lvm-logging.h"
 #include "log.h"
 
@@ -67,14 +67,14 @@ struct cb_set {
 static struct cb_set *_cb_set_create(unsigned nr)
 {
 	int i;
-	struct cb_set *cbs = dm_malloc(sizeof(*cbs));
+	struct cb_set *cbs = malloc(sizeof(*cbs));
 
 	if (!cbs)
 		return NULL;
 
-	cbs->vec = dm_malloc(nr * sizeof(*cbs->vec));
+	cbs->vec = malloc(nr * sizeof(*cbs->vec));
 	if (!cbs->vec) {
-		dm_free(cbs);
+		free(cbs);
 		return NULL;
 	}
 
@@ -97,8 +97,8 @@ static void _cb_set_destroy(struct cb_set *cbs)
 		return;
 	}
 
-	dm_free(cbs->vec);
-	dm_free(cbs);
+	free(cbs->vec);
+	free(cbs);
 }
 
 static struct control_block *_cb_alloc(struct cb_set *cbs, void *context)
@@ -152,7 +152,7 @@ static void _async_destroy(struct io_engine *ioe)
 	if (r)
 		log_sys_warn("io_destroy");
 
-	dm_free(e);
+	free(e);
 }
 
 static int _last_byte_fd;
@@ -169,7 +169,6 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int fd,
 	sector_t offset;
 	sector_t nbytes;
 	sector_t limit_nbytes;
-	sector_t orig_nbytes;
 	sector_t extra_nbytes = 0;
 
 	if (((uintptr_t) data) & e->page_mask) {
@@ -192,41 +191,11 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int fd,
 			return false;
 		}
 
-		/*
-		 * If the bcache block offset+len goes beyond where lvm is
-		 * intending to write, then reduce the len being written
-		 * (which is the bcache block size) so we don't write past
-		 * the limit set by lvm.  If after applying the limit, the
-		 * resulting size is not a multiple of the sector size (512
-		 * or 4096) then extend the reduced size to be a multiple of
-		 * the sector size (we don't want to write partial sectors.)
-		 */
 		if (offset + nbytes > _last_byte_offset) {
 			limit_nbytes = _last_byte_offset - offset;
-
-			if (limit_nbytes % _last_byte_sector_size) {
+			if (limit_nbytes % _last_byte_sector_size)
 				extra_nbytes = _last_byte_sector_size - (limit_nbytes % _last_byte_sector_size);
 
-				/*
-				 * adding extra_nbytes to the reduced nbytes (limit_nbytes)
-				 * should make the final write size a multiple of the
-				 * sector size.  This should never result in a final size
-				 * larger than the bcache block size (as long as the bcache
-				 * block size is a multiple of the sector size).
-				 */
-				if (limit_nbytes + extra_nbytes > nbytes) {
-					log_warn("Skip extending write at %llu len %llu limit %llu extra %llu sector_size %llu",
-						 (unsigned long long)offset,
-						 (unsigned long long)nbytes,
-						 (unsigned long long)limit_nbytes,
-						 (unsigned long long)extra_nbytes,
-						 (unsigned long long)_last_byte_sector_size);
-					extra_nbytes = 0;
-				}
-			}
-
-			orig_nbytes = nbytes;
-
 			if (extra_nbytes) {
 				log_debug("Limit write at %llu len %llu to len %llu rounded to %llu",
 					  (unsigned long long)offset,
@@ -241,22 +210,6 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int fd,
 					  (unsigned long long)limit_nbytes);
 				nbytes = limit_nbytes;
 			}
-
-			/*
-			 * This shouldn't happen, the reduced+extended
-			 * nbytes value should never be larger than the
-			 * bcache block size.
-			 */
-			if (nbytes > orig_nbytes) {
-				log_error("Invalid adjusted write at %llu len %llu adjusted %llu limit %llu extra %llu sector_size %llu",
-					  (unsigned long long)offset,
-					  (unsigned long long)orig_nbytes,
-					  (unsigned long long)nbytes,
-					  (unsigned long long)limit_nbytes,
-					  (unsigned long long)extra_nbytes,
-					  (unsigned long long)_last_byte_sector_size);
-				return false;
-			}
 		}
 	}
 
@@ -361,7 +314,7 @@ static unsigned _async_max_io(struct io_engine *e)
 struct io_engine *create_async_io_engine(void)
 {
 	int r;
-	struct async_engine *e = dm_malloc(sizeof(*e));
+	struct async_engine *e = malloc(sizeof(*e));
 
 	if (!e)
 		return NULL;
@@ -375,14 +328,14 @@ struct io_engine *create_async_io_engine(void)
 	r = io_setup(MAX_IO, &e->aio_context);
 	if (r < 0) {
 		log_debug("io_setup failed %d", r);
-		dm_free(e);
+		free(e);
 		return NULL;
 	}
 
 	e->cbs = _cb_set_create(MAX_IO);
 	if (!e->cbs) {
 		log_warn("couldn't create control block set");
-		dm_free(e);
+		free(e);
 		return NULL;
 	}
 
@@ -411,7 +364,7 @@ static struct sync_engine *_to_sync(struct io_engine *e)
 static void _sync_destroy(struct io_engine *ioe)
 {
         struct sync_engine *e = _to_sync(ioe);
-        dm_free(e);
+        free(e);
 }
 
 static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd,
@@ -430,7 +383,6 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd,
 	}
 
 	where = sb * 512;
-
 	off = lseek(fd, where, SEEK_SET);
 	if (off == (off_t) -1) {
 		log_warn("Device seek error %d for offset %llu", errno, (unsigned long long)where);
@@ -451,7 +403,6 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd,
 		uint64_t nbytes = len;
 		sector_t limit_nbytes = 0;
 		sector_t extra_nbytes = 0;
-		sector_t orig_nbytes = 0;
 
 		if (offset > _last_byte_offset) {
 			log_error("Limit write at %llu len %llu beyond last byte %llu",
@@ -464,30 +415,9 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd,
 
 		if (offset + nbytes > _last_byte_offset) {
 			limit_nbytes = _last_byte_offset - offset;
-
-			if (limit_nbytes % _last_byte_sector_size) {
+			if (limit_nbytes % _last_byte_sector_size)
 				extra_nbytes = _last_byte_sector_size - (limit_nbytes % _last_byte_sector_size);
 
-				/*
-				 * adding extra_nbytes to the reduced nbytes (limit_nbytes)
-				 * should make the final write size a multiple of the
-				 * sector size.  This should never result in a final size
-				 * larger than the bcache block size (as long as the bcache
-				 * block size is a multiple of the sector size).
-				 */
-				if (limit_nbytes + extra_nbytes > nbytes) {
-					log_warn("Skip extending write at %llu len %llu limit %llu extra %llu sector_size %llu",
-						 (unsigned long long)offset,
-						 (unsigned long long)nbytes,
-						 (unsigned long long)limit_nbytes,
-						 (unsigned long long)extra_nbytes,
-						 (unsigned long long)_last_byte_sector_size);
-					extra_nbytes = 0;
-				}
-			}
-
-			orig_nbytes = nbytes;
-
 			if (extra_nbytes) {
 				log_debug("Limit write at %llu len %llu to len %llu rounded to %llu",
 					  (unsigned long long)offset,
@@ -502,23 +432,6 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd,
 					  (unsigned long long)limit_nbytes);
 				nbytes = limit_nbytes;
 			}
-
-			/*
-			 * This shouldn't happen, the reduced+extended
-			 * nbytes value should never be larger than the
-			 * bcache block size.
-			 */
-			if (nbytes > orig_nbytes) {
-				log_error("Invalid adjusted write at %llu len %llu adjusted %llu limit %llu extra %llu sector_size %llu",
-					  (unsigned long long)offset,
-					  (unsigned long long)orig_nbytes,
-					  (unsigned long long)nbytes,
-					  (unsigned long long)limit_nbytes,
-					  (unsigned long long)extra_nbytes,
-					  (unsigned long long)_last_byte_sector_size);
-                                free(io);
-				return false;
-			}
 		}
 
 		where = offset;
@@ -580,7 +493,7 @@ static bool _sync_wait(struct io_engine *ioe, io_complete_fn fn)
 	dm_list_iterate_items_safe(io, tmp, &e->complete) {
 		fn(io->context, 0);
 		dm_list_del(&io->list);
-		dm_free(io);
+		free(io);
 	}
 
 	return true;
@@ -593,7 +506,7 @@ static unsigned _sync_max_io(struct io_engine *e)
 
 struct io_engine *create_sync_io_engine(void)
 {
-	struct sync_engine *e = dm_malloc(sizeof(*e));
+	struct sync_engine *e = malloc(sizeof(*e));
 
 	if (!e)
         	return NULL;
@@ -673,12 +586,7 @@ struct bcache {
 	struct dm_list clean;
 	struct dm_list io_pending;
 
-	/*
-	 * Hash table.
-	 */
-	unsigned nr_buckets;
-	unsigned hash_mask;
-	struct dm_list *buckets;
+	struct radix_tree *rtree;
 
 	/*
 	 * Statistics
@@ -693,75 +601,50 @@ struct bcache {
 
 //----------------------------------------------------------------
 
-/*  2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
-#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
+struct key_parts {
+	uint32_t fd;
+	uint64_t b;
+} __attribute__ ((packed));
 
-static unsigned _hash(struct bcache *cache, int fd, uint64_t i)
-{
-	uint64_t h = (i << 10) & fd;
-	h *= GOLDEN_RATIO_PRIME_64;
-	return h & cache->hash_mask;
-}
+union key {
+	struct key_parts parts;
+        uint8_t bytes[12];
+};
 
-static struct block *_hash_lookup(struct bcache *cache, int fd, uint64_t i)
+static struct block *_block_lookup(struct bcache *cache, int fd, uint64_t i)
 {
-	struct block *b;
-	unsigned h = _hash(cache, fd, i);
+	union key k;
+	union radix_value v;
 
-	dm_list_iterate_items_gen (b, cache->buckets + h, hash)
-		if (b->fd == fd && b->index == i)
-			return b;
+	k.parts.fd = fd;
+	k.parts.b = i;
 
-	return NULL;
-}
-
-static void _hash_insert(struct block *b)
-{
-	unsigned h = _hash(b->cache, b->fd, b->index);
-	dm_list_add_h(b->cache->buckets + h, &b->hash);
-}
+	if (radix_tree_lookup(cache->rtree, k.bytes, k.bytes + sizeof(k.bytes), &v))
+		return v.ptr;
 
-static inline void _hash_remove(struct block *b)
-{
-	dm_list_del(&b->hash);
+	return NULL;
 }
 
-/*
- * Must return a power of 2.
- */
-static unsigned _calc_nr_buckets(unsigned nr_blocks)
+static bool _block_insert(struct block *b)
 {
-	unsigned r = 8;
-	unsigned n = nr_blocks / 4;
+        union key k;
+        union radix_value v;
 
-	if (n < 8)
-		n = 8;
+        k.parts.fd = b->fd;
+        k.parts.b = b->index;
+        v.ptr = b;
 
-	while (r < n)
-		r <<= 1;
-
-	return r;
+	return radix_tree_insert(b->cache->rtree, k.bytes, k.bytes + sizeof(k.bytes), v);
 }
 
-static bool _hash_table_init(struct bcache *cache, unsigned nr_entries)
+static void _block_remove(struct block *b)
 {
-	unsigned i;
-
-	cache->nr_buckets = _calc_nr_buckets(nr_entries);
-	cache->hash_mask = cache->nr_buckets - 1;
-	cache->buckets = dm_malloc(cache->nr_buckets * sizeof(*cache->buckets));
-	if (!cache->buckets)
-		return false;
+        union key k;
 
-	for (i = 0; i < cache->nr_buckets; i++)
-		dm_list_init(cache->buckets + i);
+        k.parts.fd = b->fd;
+        k.parts.b = b->index;
 
-	return true;
-}
-
-static void _hash_table_exit(struct bcache *cache)
-{
-	dm_free(cache->buckets);
+	radix_tree_remove(b->cache->rtree, k.bytes, k.bytes + sizeof(k.bytes));
 }
 
 //----------------------------------------------------------------
@@ -777,7 +660,7 @@ static bool _init_free_list(struct bcache *cache, unsigned count, unsigned pgsiz
 	if (!data)
 		return false;
 
-	cache->raw_blocks = dm_malloc(count * sizeof(*cache->raw_blocks));
+	cache->raw_blocks = malloc(count * sizeof(*cache->raw_blocks));
 	if (!cache->raw_blocks) {
 		free(data);
 		return false;
@@ -797,8 +680,8 @@ static bool _init_free_list(struct bcache *cache, unsigned count, unsigned pgsiz
 
 static void _exit_free_list(struct bcache *cache)
 {
-	dm_free(cache->raw_data);
-	dm_free(cache->raw_blocks);
+	free(cache->raw_data);
+	free(cache->raw_blocks);
 }
 
 static struct block *_alloc_block(struct bcache *cache)
@@ -809,6 +692,11 @@ static struct block *_alloc_block(struct bcache *cache)
 	return dm_list_struct_base(_list_pop(&cache->free), struct block, list);
 }
 
+static void _free_block(struct block *b)
+{
+	dm_list_add(&b->cache->free, &b->list);
+}
+
 /*----------------------------------------------------------------
  * Clean/dirty list management.
  * Always use these methods to ensure nr_dirty_ is correct.
@@ -963,7 +851,7 @@ static struct block *_find_unused_clean_block(struct bcache *cache)
 	dm_list_iterate_items (b, &cache->clean) {
 		if (!b->ref_count) {
 			_unlink_block(b);
-			_hash_remove(b);
+			_block_remove(b);
 			return b;
 		}
 	}
@@ -993,29 +881,18 @@ static struct block *_new_block(struct bcache *cache, int fd, block_address i, b
 
 	if (b) {
 		dm_list_init(&b->list);
-		dm_list_init(&b->hash);
 		b->flags = 0;
 		b->fd = fd;
 		b->index = i;
 		b->ref_count = 0;
 		b->error = 0;
 
-		_hash_insert(b);
-	}
-
-#if 0
-	if (!b) {
-		log_error("bcache no new blocks for fd %d index %u "
-			  "clean %u free %u dirty %u pending %u nr_data_blocks %u nr_cache_blocks %u",
-			  fd, (uint32_t) i,
-			  dm_list_size(&cache->clean),
-			  dm_list_size(&cache->free),
-			  dm_list_size(&cache->dirty),
-			  dm_list_size(&cache->io_pending),
-			  (uint32_t)cache->nr_data_blocks,
-			  (uint32_t)cache->nr_cache_blocks);
+		if (!_block_insert(b)) {
+        		log_error("bcache unable to insert block in radix tree (OOM?)");
+			_free_block(b);
+			return NULL;
+		}
 	}
-#endif
 
 	return b;
 }
@@ -1054,7 +931,7 @@ static struct block *_lookup_or_read_block(struct bcache *cache,
 				  	   int fd, block_address i,
 					   unsigned flags)
 {
-	struct block *b = _hash_lookup(cache, fd, i);
+	struct block *b = _block_lookup(cache, fd, i);
 
 	if (b) {
 		// FIXME: this is insufficient.  We need to also catch a read
@@ -1125,8 +1002,8 @@ struct bcache *bcache_create(sector_t block_sectors, unsigned nr_cache_blocks,
 	unsigned max_io = engine->max_io(engine);
 	long pgsize = sysconf(_SC_PAGESIZE);
 
-	if ((pgsize = sysconf(_SC_PAGESIZE)) < 0) {
-		log_warn("bcache cannot read pagesize.");
+	if (pgsize < 0) {
+		log_warn("WARNING: _SC_PAGESIZE returns negative value.");
 		return NULL;
 	}
 
@@ -1145,7 +1022,7 @@ struct bcache *bcache_create(sector_t block_sectors, unsigned nr_cache_blocks,
 		return NULL;
 	}
 
-	cache = dm_malloc(sizeof(*cache));
+	cache = malloc(sizeof(*cache));
 	if (!cache)
 		return NULL;
 
@@ -1163,9 +1040,10 @@ struct bcache *bcache_create(sector_t block_sectors, unsigned nr_cache_blocks,
 	dm_list_init(&cache->clean);
 	dm_list_init(&cache->io_pending);
 
-	if (!_hash_table_init(cache, nr_cache_blocks)) {
+        cache->rtree = radix_tree_create(NULL, NULL);
+	if (!cache->rtree) {
 		cache->engine->destroy(cache->engine);
-		dm_free(cache);
+		free(cache);
 		return NULL;
 	}
 
@@ -1178,8 +1056,8 @@ struct bcache *bcache_create(sector_t block_sectors, unsigned nr_cache_blocks,
 
 	if (!_init_free_list(cache, nr_cache_blocks, pgsize)) {
 		cache->engine->destroy(cache->engine);
-		_hash_table_exit(cache);
-		dm_free(cache);
+		radix_tree_destroy(cache->rtree);
+		free(cache);
 		return NULL;
 	}
 
@@ -1192,12 +1070,12 @@ void bcache_destroy(struct bcache *cache)
 		log_warn("some blocks are still locked");
 
 	if (!bcache_flush(cache))
-		log_warn("cache flushing failed.");
+		stack;
 	_wait_all(cache);
 	_exit_free_list(cache);
-	_hash_table_exit(cache);
+	radix_tree_destroy(cache->rtree);
 	cache->engine->destroy(cache->engine);
-	dm_free(cache);
+	free(cache);
 }
 
 sector_t bcache_block_sectors(struct bcache *cache)
@@ -1217,7 +1095,7 @@ unsigned bcache_max_prefetches(struct bcache *cache)
 
 void bcache_prefetch(struct bcache *cache, int fd, block_address i)
 {
-	struct block *b = _hash_lookup(cache, fd, i);
+	struct block *b = _block_lookup(cache, fd, i);
 
 	if (!b) {
 		if (cache->nr_io_pending < cache->max_io) {
@@ -1230,11 +1108,13 @@ void bcache_prefetch(struct bcache *cache, int fd, block_address i)
 	}
 }
 
+//----------------------------------------------------------------
+
 static void _recycle_block(struct bcache *cache, struct block *b)
 {
 	_unlink_block(b);
-	_hash_remove(b);
-	dm_list_add(&cache->free, &b->list);
+	_block_remove(b);
+	_free_block(b);
 }
 
 bool bcache_get(struct bcache *cache, int fd, block_address i,
@@ -1268,6 +1148,8 @@ bool bcache_get(struct bcache *cache, int fd, block_address i,
 	return false;
 }
 
+//----------------------------------------------------------------
+
 static void _put_ref(struct block *b)
 {
 	if (!b->ref_count) {
@@ -1288,6 +1170,8 @@ void bcache_put(struct block *b)
 		_preemptive_writeback(b->cache);
 }
 
+//----------------------------------------------------------------
+
 bool bcache_flush(struct bcache *cache)
 {
 	// Only dirty data is on the errored list, since bad read blocks get
@@ -1310,6 +1194,7 @@ bool bcache_flush(struct bcache *cache)
 	return dm_list_empty(&cache->errored);
 }
 
+//----------------------------------------------------------------
 /*
  * You can safely call this with a NULL block.
  */
@@ -1342,29 +1227,108 @@ static bool _invalidate_block(struct bcache *cache, struct block *b)
 
 bool bcache_invalidate(struct bcache *cache, int fd, block_address i)
 {
-	return _invalidate_block(cache, _hash_lookup(cache, fd, i));
+	return _invalidate_block(cache, _block_lookup(cache, fd, i));
+}
+
+//----------------------------------------------------------------
+
+struct invalidate_iterator {
+	bool success;
+	struct radix_tree_iterator it;
+};
+
+static bool _writeback_v(struct radix_tree_iterator *it,
+                         uint8_t *kb, uint8_t *ke, union radix_value v)
+{
+	struct block *b = v.ptr;
+
+	if (_test_flags(b, BF_DIRTY))
+        	_issue_write(b);
+
+        return true;
+}
+
+static bool _invalidate_v(struct radix_tree_iterator *it,
+                          uint8_t *kb, uint8_t *ke, union radix_value v)
+{
+	struct block *b = v.ptr;
+        struct invalidate_iterator *iit = container_of(it, struct invalidate_iterator, it);
+
+	if (b->error || _test_flags(b, BF_DIRTY)) {
+        	log_warn("bcache_invalidate: block (%d, %llu) still dirty",
+                         b->fd, (unsigned long long) b->index);
+        	iit->success = false;
+        	return true;
+	}
+
+	if (b->ref_count) {
+		log_warn("bcache_invalidate: block (%d, %llu) still held",
+			 b->fd, (unsigned long long) b->index);
+		iit->success = false;
+		return true;
+	}
+
+	_unlink_block(b);
+	_free_block(b);
+
+	// We can't remove the block from the radix tree yet because
+	// we're in the middle of an iteration.
+	return true;
 }
 
-// FIXME: switch to a trie, or maybe 1 hash table per fd?  To save iterating
-// through the whole cache.
 bool bcache_invalidate_fd(struct bcache *cache, int fd)
 {
-	struct block *b, *tmp;
-	bool r = true;
+        union key k;
+	struct invalidate_iterator it;
 
-	// Start writing back any dirty blocks on this fd.
-	dm_list_iterate_items_safe (b, tmp, &cache->dirty)
-		if (b->fd == fd)
-			_issue_write(b);
+	k.parts.fd = fd;
+
+	it.it.visit = _writeback_v;
+	radix_tree_iterate(cache->rtree, k.bytes, k.bytes + sizeof(k.parts.fd), &it.it);
 
 	_wait_all(cache);
 
-	// Everything should be in the clean list now.
-	dm_list_iterate_items_safe (b, tmp, &cache->clean)
-		if (b->fd == fd)
-			r = _invalidate_block(cache, b) && r;
+	it.success = true;
+	it.it.visit = _invalidate_v;
+	radix_tree_iterate(cache->rtree, k.bytes, k.bytes + sizeof(k.parts.fd), &it.it);
+
+	if (it.success)
+		radix_tree_remove_prefix(cache->rtree, k.bytes, k.bytes + sizeof(k.parts.fd));
+
+	return it.success;
+}
+
+//----------------------------------------------------------------
+
+static bool _abort_v(struct radix_tree_iterator *it,
+                     uint8_t *kb, uint8_t *ke, union radix_value v)
+{
+	struct block *b = v.ptr;
+
+	if (b->ref_count) {
+		log_fatal("bcache_abort: block (%d, %llu) still held",
+			 b->fd, (unsigned long long) b->index);
+		return true;
+	}
+
+	_unlink_block(b);
+	_free_block(b);
+
+	// We can't remove the block from the radix tree yet because
+	// we're in the middle of an iteration.
+	return true;
+}
+
+void bcache_abort_fd(struct bcache *cache, int fd)
+{
+        union key k;
+	struct radix_tree_iterator it;
+
+	k.parts.fd = fd;
 
-       return r;
+	it.visit = _abort_v;
+	radix_tree_iterate(cache->rtree, k.bytes, k.bytes + sizeof(k.parts.fd), &it);
+	radix_tree_remove_prefix(cache->rtree, k.bytes, k.bytes + sizeof(k.parts.fd));
 }
 
 //----------------------------------------------------------------
diff --git a/lib/device/bcache.h b/lib/device/bcache.h
index cb902ef..f9067f7 100644
--- a/lib/device/bcache.h
+++ b/lib/device/bcache.h
@@ -61,7 +61,6 @@ struct block {
 
 	struct bcache *cache;
 	struct dm_list list;
-	struct dm_list hash;
 
 	unsigned flags;
 	unsigned ref_count;
@@ -145,6 +144,13 @@ bool bcache_invalidate(struct bcache *cache, int fd, block_address index);
  */
 bool bcache_invalidate_fd(struct bcache *cache, int fd);
 
+/*
+ * Call this function if flush, or invalidate fail and you do not
+ * wish to retry the writes.  This will throw away any dirty data
+ * not written.  If any blocks for fd are held, then it will call
+ * abort().
+ */
+void bcache_abort_fd(struct bcache *cache, int fd);
 
 //----------------------------------------------------------------
 // The next four functions are utilities written in terms of the above api.
diff --git a/lib/label/label.c b/lib/label/label.c
index 8107e33..2444ee0 100644
--- a/lib/label/label.c
+++ b/lib/label/label.c
@@ -594,6 +594,14 @@ static void _drop_bad_aliases(struct device *dev)
 	}
 }
 
+// Like bcache_invalidate, only it throws any dirty data away if the
+// write fails.
+static void _invalidate_fd(struct bcache *cache, int fd)
+{
+	if (!bcache_invalidate_fd(cache, fd))
+		bcache_abort_fd(cache, fd);
+}
+
 /*
  * Read or reread label/metadata from selected devs.
  *
@@ -706,7 +714,7 @@ static int _scan_list(struct cmd_context *cmd, struct dev_filter *f,
 		 * drop it from bcache.
 		 */
 		if (scan_failed || !is_lvm_device) {
-			bcache_invalidate_fd(scan_bcache, devl->dev->bcache_fd);
+			_invalidate_fd(scan_bcache, devl->dev->bcache_fd);
 			_scan_dev_close(devl->dev);
 		}
 
@@ -878,7 +886,7 @@ int label_scan(struct cmd_context *cmd)
 		 * so this will usually not be true.
 		 */
 		if (_in_bcache(dev)) {
-			bcache_invalidate_fd(scan_bcache, dev->bcache_fd);
+			_invalidate_fd(scan_bcache, dev->bcache_fd);
 			_scan_dev_close(dev);
 		}
 
@@ -1063,7 +1071,7 @@ int label_scan_devs(struct cmd_context *cmd, struct dev_filter *f, struct dm_lis
 
 	dm_list_iterate_items(devl, devs) {
 		if (_in_bcache(devl->dev)) {
-			bcache_invalidate_fd(scan_bcache, devl->dev->bcache_fd);
+			_invalidate_fd(scan_bcache, devl->dev->bcache_fd);
 			_scan_dev_close(devl->dev);
 		}
 	}
@@ -1082,7 +1090,7 @@ int label_scan_devs_rw(struct cmd_context *cmd, struct dev_filter *f, struct dm_
 
 	dm_list_iterate_items(devl, devs) {
 		if (_in_bcache(devl->dev)) {
-			bcache_invalidate_fd(scan_bcache, devl->dev->bcache_fd);
+			_invalidate_fd(scan_bcache, devl->dev->bcache_fd);
 			_scan_dev_close(devl->dev);
 		}
 
@@ -1104,7 +1112,7 @@ int label_scan_devs_excl(struct dm_list *devs)
 
 	dm_list_iterate_items(devl, devs) {
 		if (_in_bcache(devl->dev)) {
-			bcache_invalidate_fd(scan_bcache, devl->dev->bcache_fd);
+			_invalidate_fd(scan_bcache, devl->dev->bcache_fd);
 			_scan_dev_close(devl->dev);
 		}
 		/*
@@ -1124,7 +1132,7 @@ int label_scan_devs_excl(struct dm_list *devs)
 void label_scan_invalidate(struct device *dev)
 {
 	if (_in_bcache(dev)) {
-		bcache_invalidate_fd(scan_bcache, dev->bcache_fd);
+		_invalidate_fd(scan_bcache, dev->bcache_fd);
 		_scan_dev_close(dev);
 	}
 }
@@ -1205,7 +1213,7 @@ int label_read(struct device *dev)
 	dm_list_add(&one_dev, &devl->list);
 
 	if (_in_bcache(dev)) {
-		bcache_invalidate_fd(scan_bcache, dev->bcache_fd);
+		_invalidate_fd(scan_bcache, dev->bcache_fd);
 		_scan_dev_close(dev);
 	}
 
@@ -1311,7 +1319,7 @@ int label_scan_open_excl(struct device *dev)
 	if (_in_bcache(dev) && !(dev->flags & DEV_BCACHE_EXCL)) {
 		/* FIXME: avoid tossing out bcache blocks just to replace fd. */
 		log_debug("Close and reopen excl %s", dev_name(dev));
-		bcache_invalidate_fd(scan_bcache, dev->bcache_fd);
+		_invalidate_fd(scan_bcache, dev->bcache_fd);
 		_scan_dev_close(dev);
 	}
 	dev->flags |= DEV_BCACHE_EXCL;
@@ -1319,6 +1327,18 @@ int label_scan_open_excl(struct device *dev)
 	return label_scan_open(dev);
 }
 
+int label_scan_open_rw(struct device *dev)
+{
+	if (_in_bcache(dev) && !(dev->flags & DEV_BCACHE_WRITE)) {
+		/* FIXME: avoid tossing out bcache blocks just to replace fd. */
+		log_debug("Close and reopen rw %s", dev_name(dev));
+		_invalidate_fd(scan_bcache, dev->bcache_fd);
+		_scan_dev_close(dev);
+	}
+	dev->flags |= DEV_BCACHE_WRITE;
+	return label_scan_open(dev);
+}
+
 bool dev_read_bytes(struct device *dev, uint64_t start, size_t len, void *data)
 {
 	if (!scan_bcache) {
@@ -1360,7 +1380,7 @@ bool dev_write_bytes(struct device *dev, uint64_t start, size_t len, void *data)
 	if (_in_bcache(dev) && !(dev->flags & DEV_BCACHE_WRITE)) {
 		/* FIXME: avoid tossing out bcache blocks just to replace fd. */
 		log_debug("Close and reopen to write %s", dev_name(dev));
-		bcache_invalidate_fd(scan_bcache, dev->bcache_fd);
+		_invalidate_fd(scan_bcache, dev->bcache_fd);
 		_scan_dev_close(dev);
 
 		dev->flags |= DEV_BCACHE_WRITE;
@@ -1406,7 +1426,7 @@ bool dev_write_zeros(struct device *dev, uint64_t start, size_t len)
 	if (_in_bcache(dev) && !(dev->flags & DEV_BCACHE_WRITE)) {
 		/* FIXME: avoid tossing out bcache blocks just to replace fd. */
 		log_debug("Close and reopen to write %s", dev_name(dev));
-		bcache_invalidate_fd(scan_bcache, dev->bcache_fd);
+		_invalidate_fd(scan_bcache, dev->bcache_fd);
 		_scan_dev_close(dev);
 
 		dev->flags |= DEV_BCACHE_WRITE;
@@ -1457,7 +1477,7 @@ bool dev_set_bytes(struct device *dev, uint64_t start, size_t len, uint8_t val)
 	if (_in_bcache(dev) && !(dev->flags & DEV_BCACHE_WRITE)) {
 		/* FIXME: avoid tossing out bcache blocks just to replace fd. */
 		log_debug("Close and reopen to write %s", dev_name(dev));
-		bcache_invalidate_fd(scan_bcache, dev->bcache_fd);
+		_invalidate_fd(scan_bcache, dev->bcache_fd);
 		_scan_dev_close(dev);
 
 		dev->flags |= DEV_BCACHE_WRITE;
diff --git a/make.tmpl.in b/make.tmpl.in
index c8e4f14..e7780e8 100644
--- a/make.tmpl.in
+++ b/make.tmpl.in
@@ -68,7 +68,15 @@ CLDFLAGS += @CLDFLAGS@
 ELDFLAGS += @ELDFLAGS@
 LDDEPS += @LDDEPS@
 LIB_SUFFIX = @LIB_SUFFIX@
-LVMINTERNAL_LIBS = -llvm-internal $(DMEVENT_LIBS) $(DAEMON_LIBS) $(SYSTEMD_LIBS) $(UDEV_LIBS) $(DL_LIBS) $(BLKID_LIBS)
+LVMINTERNAL_LIBS =\
+	-llvm-internal \
+	$(top_builddir)/base/libbase.a \
+	$(DMEVENT_LIBS) \
+	$(DAEMON_LIBS) \
+	$(SYSTEMD_LIBS) \
+	$(UDEV_LIBS) \
+	$(DL_LIBS) \
+	$(BLKID_LIBS)
 DL_LIBS = @DL_LIBS@
 RT_LIBS = @RT_LIBS@
 M_LIBS = @M_LIBS@
@@ -306,7 +314,7 @@ LIB_VERSION_DM := $(shell $(AWK) -F '.' '{printf "%s.%s",$$1,$$2}' $(top_srcdir)
 
 LIB_VERSION_APP := $(shell $(AWK) -F '[(). ]' '{printf "%s.%s",$$1,$$4}' $(top_srcdir)/VERSION)
 
-INCLUDES += -I$(srcdir) -I$(top_builddir)/include
+INCLUDES += -I$(top_srcdir) -I$(srcdir) -I$(top_builddir)/include
 
 INC_LNS = $(top_builddir)/include/.symlinks_created
 
diff --git a/test/unit/bcache_t.c b/test/unit/bcache_t.c
index 925b95d..2a8f931 100644
--- a/test/unit/bcache_t.c
+++ b/test/unit/bcache_t.c
@@ -12,15 +12,14 @@
  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
+#include "units.h"
+#include "lib/device/bcache.h"
+
 #include <errno.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <unistd.h>
 
-#include "bcache.h"
-#include "framework.h"
-#include "units.h"
-
 #define SHOW_MOCK_CALLS 0
 
 /*----------------------------------------------------------------
@@ -794,7 +793,6 @@ static void test_invalidate_after_write_error(void *context)
 
 static void test_invalidate_held_block(void *context)
 {
-
 	struct fixture *f = context;
 	struct mock_engine *me = f->me;
 	struct bcache *cache = f->cache;
@@ -811,6 +809,90 @@ static void test_invalidate_held_block(void *context)
 }
 
 //----------------------------------------------------------------
+// abort tests
+
+static void test_abort_no_blocks(void *context)
+{
+	struct fixture *f = context;
+	struct bcache *cache = f->cache;
+	int fd = 17;
+
+	// We have no expectations
+	bcache_abort_fd(cache, fd);
+}
+
+static void test_abort_single_block(void *context)
+{
+	struct fixture *f = context;
+	struct bcache *cache = f->cache;
+	struct block *b;
+	int fd = 17;
+
+	T_ASSERT(bcache_get(cache, fd, 0, GF_ZERO, &b));
+	bcache_put(b);
+
+	bcache_abort_fd(cache, fd);
+
+	// no write should be issued
+	T_ASSERT(bcache_flush(cache));
+}
+
+static void test_abort_forces_reread(void *context)
+{
+	struct fixture *f = context;
+	struct mock_engine *me = f->me;
+	struct bcache *cache = f->cache;
+	struct block *b;
+	int fd = 17;
+
+	_expect_read(me, fd, 0);
+	_expect(me, E_WAIT);
+	T_ASSERT(bcache_get(cache, fd, 0, GF_DIRTY, &b));
+	bcache_put(b);
+
+	bcache_abort_fd(cache, fd);
+	T_ASSERT(bcache_flush(cache));
+
+	// Check the block is re-read
+	_expect_read(me, fd, 0);
+	_expect(me, E_WAIT);
+	T_ASSERT(bcache_get(cache, fd, 0, 0, &b));
+	bcache_put(b);
+}
+
+static void test_abort_only_specific_fd(void *context)
+{
+	struct fixture *f = context;
+	struct mock_engine *me = f->me;
+	struct bcache *cache = f->cache;
+	struct block *b;
+	int fd1 = 17, fd2 = 18;
+
+	T_ASSERT(bcache_get(cache, fd1, 0, GF_ZERO, &b));
+	bcache_put(b);
+
+	T_ASSERT(bcache_get(cache, fd1, 1, GF_ZERO, &b));
+	bcache_put(b);
+
+	T_ASSERT(bcache_get(cache, fd2, 0, GF_ZERO, &b));
+	bcache_put(b);
+
+	T_ASSERT(bcache_get(cache, fd2, 1, GF_ZERO, &b));
+	bcache_put(b);
+
+	bcache_abort_fd(cache, fd2);
+
+	// writes for fd1 should still be issued
+	_expect_write(me, fd1, 0);
+	_expect_write(me, fd1, 1);
+
+	_expect(me, E_WAIT);
+	_expect(me, E_WAIT);
+
+	T_ASSERT(bcache_flush(cache));
+}
+
+//----------------------------------------------------------------
 // Chasing a bug reported by dct
 
 static void _cycle(struct fixture *f, unsigned nr_cache_blocks)
@@ -898,6 +980,12 @@ static struct test_suite *_small_tests(void)
 	T("invalidate-read-error", "invalidate a block that errored", test_invalidate_after_read_error);
 	T("invalidate-write-error", "invalidate a block that errored", test_invalidate_after_write_error);
 	T("invalidate-fails-in-held", "invalidating a held block fails", test_invalidate_held_block);
+
+	T("abort-with-no-blocks", "you can call abort, even if there are no blocks in the cache", test_abort_no_blocks);
+	T("abort-single-block", "single block get silently discarded", test_abort_single_block);
+	T("abort-forces-read", "if a block has been discarded then another read is necc.", test_abort_forces_reread);
+	T("abort-specific-fd", "abort doesn't effect other fds", test_abort_only_specific_fd);
+
 	T("concurrent-reads-after-invalidate", "prefetch should still issue concurrent reads after invalidate",
           test_concurrent_reads_after_invalidate);
 
diff --git a/test/unit/bcache_utils_t.c b/test/unit/bcache_utils_t.c
index 9ddc194..2e08320 100644
--- a/test/unit/bcache_utils_t.c
+++ b/test/unit/bcache_utils_t.c
@@ -14,9 +14,8 @@
 
 #define _GNU_SOURCE
 
-#include "bcache.h"
-#include "framework.h"
 #include "units.h"
+#include "lib/device/bcache.h"
 
 #include <errno.h>
 #include <stdio.h>
diff --git a/test/unit/radix_tree_t.c b/test/unit/radix_tree_t.c
index 7266a8a..54bc406 100644
--- a/test/unit/radix_tree_t.c
+++ b/test/unit/radix_tree_t.c
@@ -10,11 +10,10 @@
 // along with this program; if not, write to the Free Software Foundation,
 // Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  
+#include "units.h"
 #include "base/data-struct/radix-tree.h"
 #include "base/memory/container_of.h"
 
-#include "units.h"
-
 #include <stdio.h>
 #include <stdlib.h>
 
@@ -44,6 +43,7 @@ static void test_insert_one(void *fixture)
 	unsigned char k = 'a';
 	v.n = 65;
 	T_ASSERT(radix_tree_insert(rt, &k, &k + 1, v));
+	T_ASSERT(radix_tree_is_well_formed(rt));
 	v.n = 0;
 	T_ASSERT(radix_tree_lookup(rt, &k, &k + 1, &v));
 	T_ASSERT_EQUAL(v.n, 65);
@@ -62,6 +62,8 @@ static void test_single_byte_keys(void *fixture)
 		T_ASSERT(radix_tree_insert(rt, &k, &k + 1, v));
 	}
 
+	T_ASSERT(radix_tree_is_well_formed(rt));
+
 	for (i = 0; i < count; i++) {
 		k = i;
 		T_ASSERT(radix_tree_lookup(rt, &k, &k + 1, &v));
@@ -82,12 +84,16 @@ static void test_overwrite_single_byte_keys(void *fixture)
 		T_ASSERT(radix_tree_insert(rt, &k, &k + 1, v));
 	}
 
+	T_ASSERT(radix_tree_is_well_formed(rt));
+
 	for (i = 0; i < count; i++) {
 		k = i;
 		v.n = 1000 + i;
 		T_ASSERT(radix_tree_insert(rt, &k, &k + 1, v));
 	}
 
+	T_ASSERT(radix_tree_is_well_formed(rt));
+
 	for (i = 0; i < count; i++) {
 		k = i;
 		T_ASSERT(radix_tree_lookup(rt, &k, &k + 1, &v));
@@ -109,6 +115,8 @@ static void test_16_bit_keys(void *fixture)
 		T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v));
 	}
 
+	T_ASSERT(radix_tree_is_well_formed(rt));
+
 	for (i = 0; i < count; i++) {
 		k[0] = i / 256;
 		k[1] = i % 256;
@@ -127,8 +135,10 @@ static void test_prefix_keys(void *fixture)
 	k[1] = 200;
 	v.n = 1024;
 	T_ASSERT(radix_tree_insert(rt, k, k + 1, v));
+	T_ASSERT(radix_tree_is_well_formed(rt));
 	v.n = 2345;
 	T_ASSERT(radix_tree_insert(rt, k, k + 2, v));
+	T_ASSERT(radix_tree_is_well_formed(rt));
 	T_ASSERT(radix_tree_lookup(rt, k, k + 1, &v));
 	T_ASSERT_EQUAL(v.n, 1024);
 	T_ASSERT(radix_tree_lookup(rt, k, k + 2, &v));
@@ -145,8 +155,10 @@ static void test_prefix_keys_reversed(void *fixture)
 	k[1] = 200;
 	v.n = 1024;
 	T_ASSERT(radix_tree_insert(rt, k, k + 2, v));
+	T_ASSERT(radix_tree_is_well_formed(rt));
 	v.n = 2345;
 	T_ASSERT(radix_tree_insert(rt, k, k + 1, v));
+	T_ASSERT(radix_tree_is_well_formed(rt));
 	T_ASSERT(radix_tree_lookup(rt, k, k + 2, &v));
 	T_ASSERT_EQUAL(v.n, 1024);
 	T_ASSERT(radix_tree_lookup(rt, k, k + 1, &v));
@@ -170,7 +182,10 @@ static void test_sparse_keys(void *fixture)
 		_gen_key(k, k + sizeof(k));
 		v.n = 1234;
 		T_ASSERT(radix_tree_insert(rt, k, k + 32, v));
+		// FIXME: remove
+		//T_ASSERT(radix_tree_is_well_formed(rt));
 	}
+	T_ASSERT(radix_tree_is_well_formed(rt));
 }
 
 static void test_remove_one(void *fixture)
@@ -182,7 +197,9 @@ static void test_remove_one(void *fixture)
 	_gen_key(k, k + sizeof(k));
 	v.n = 1234;
 	T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v));
+	T_ASSERT(radix_tree_is_well_formed(rt));
 	T_ASSERT(radix_tree_remove(rt, k, k + sizeof(k)));
+	T_ASSERT(radix_tree_is_well_formed(rt));
 	T_ASSERT(!radix_tree_lookup(rt, k, k + sizeof(k), &v));
 }
 
@@ -199,14 +216,19 @@ static void test_remove_one_byte_keys(void *fixture)
 		T_ASSERT(radix_tree_insert(rt, k, k + 1, v));
 	}
 
+	T_ASSERT(radix_tree_is_well_formed(rt));
 	for (i = 0; i < 256; i++) {
         	k[0] = i;
 		T_ASSERT(radix_tree_remove(rt, k, k + 1));
+		T_ASSERT(radix_tree_is_well_formed(rt));
 
 		for (j = i + 1; j < 256; j++) {
         		k[0] = j;
 			T_ASSERT(radix_tree_lookup(rt, k, k + 1, &v));
-			T_ASSERT_EQUAL(v.n, j + 1000);
+			if (v.n != j + 1000)
+				test_fail("v.n (%u) != j + 1000 (%u)\n",
+                                          (unsigned) v.n,
+                                          (unsigned) j + 1000);
 		}
 	}
 
@@ -216,6 +238,40 @@ static void test_remove_one_byte_keys(void *fixture)
 	}
 }
 
+static void test_remove_one_byte_keys_reversed(void *fixture)
+{
+        struct radix_tree *rt = fixture;
+        unsigned i, j;
+	uint8_t k[1];
+	union radix_value v;
+
+	for (i = 0; i < 256; i++) {
+        	k[0] = i;
+        	v.n = i + 1000;
+		T_ASSERT(radix_tree_insert(rt, k, k + 1, v));
+	}
+
+	T_ASSERT(radix_tree_is_well_formed(rt));
+	for (i = 256; i; i--) {
+        	k[0] = i - 1;
+		T_ASSERT(radix_tree_remove(rt, k, k + 1));
+		T_ASSERT(radix_tree_is_well_formed(rt));
+
+		for (j = 0; j < i - 1; j++) {
+        		k[0] = j;
+			T_ASSERT(radix_tree_lookup(rt, k, k + 1, &v));
+			if (v.n != j + 1000)
+				test_fail("v.n (%u) != j + 1000 (%u)\n",
+                                          (unsigned) v.n,
+                                          (unsigned) j + 1000);
+		}
+	}
+
+	for (i = 0; i < 256; i++) {
+        	k[0] = i;
+		T_ASSERT(!radix_tree_lookup(rt, k, k + 1, &v));
+	}
+}
 static void test_remove_prefix_keys(void *fixture)
 {
 	struct radix_tree *rt = fixture;
@@ -230,8 +286,10 @@ static void test_remove_prefix_keys(void *fixture)
 		T_ASSERT(radix_tree_insert(rt, k, k + i, v));
 	}
 
+	T_ASSERT(radix_tree_is_well_formed(rt));
 	for (i = 0; i < 32; i++) {
         	T_ASSERT(radix_tree_remove(rt, k, k + i));
+		T_ASSERT(radix_tree_is_well_formed(rt));
         	for (j = i + 1; j < 32; j++) {
                 	T_ASSERT(radix_tree_lookup(rt, k, k + j, &v));
                 	T_ASSERT_EQUAL(v.n, j);
@@ -256,8 +314,10 @@ static void test_remove_prefix_keys_reversed(void *fixture)
 		T_ASSERT(radix_tree_insert(rt, k, k + i, v));
 	}
 
+	T_ASSERT(radix_tree_is_well_formed(rt));
 	for (i = 0; i < 32; i++) {
         	T_ASSERT(radix_tree_remove(rt, k, k + (31 - i)));
+		T_ASSERT(radix_tree_is_well_formed(rt));
         	for (j = 0; j < 31 - i; j++) {
                 	T_ASSERT(radix_tree_lookup(rt, k, k + j, &v));
                 	T_ASSERT_EQUAL(v.n, j);
@@ -284,9 +344,12 @@ static void test_remove_prefix(void *fixture)
 		T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v));
 	}
 
+	T_ASSERT(radix_tree_is_well_formed(rt));
+
 	// remove keys in a sub range 
 	k[0] = 21;
 	T_ASSERT_EQUAL(radix_tree_remove_prefix(rt, k, k + 1), count);
+	T_ASSERT(radix_tree_is_well_formed(rt));
 }
 
 static void test_remove_prefix_single(void *fixture)
@@ -298,7 +361,9 @@ static void test_remove_prefix_single(void *fixture)
 	_gen_key(k, k + sizeof(k));
 	v.n = 1234;
 	T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v));
+	T_ASSERT(radix_tree_is_well_formed(rt));
 	T_ASSERT_EQUAL(radix_tree_remove_prefix(rt, k, k + 2), 1);
+	T_ASSERT(radix_tree_is_well_formed(rt));
 }
 
 static void test_size(void *fixture)
@@ -318,6 +383,7 @@ static void test_size(void *fixture)
 	}
 
 	T_ASSERT_EQUAL(radix_tree_size(rt), 10000 - dup_count);
+	T_ASSERT(radix_tree_is_well_formed(rt));
 }
 
 struct visitor {
@@ -348,6 +414,7 @@ static void test_iterate_all(void *fixture)
 		T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v));
 	}
 
+	T_ASSERT(radix_tree_is_well_formed(rt));
 	vt.count = 0;
 	vt.it.visit = _visit;
 	radix_tree_iterate(rt, NULL, NULL, &vt.it);
@@ -371,6 +438,7 @@ static void test_iterate_subset(void *fixture)
 		T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v));
 	}
 
+	T_ASSERT(radix_tree_is_well_formed(rt));
 	vt.count = 0;
 	vt.it.visit = _visit;
 	k[0] = 21;
@@ -390,6 +458,7 @@ static void test_iterate_single(void *fixture)
 	v.n = 1234;
 	T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v));
 
+	T_ASSERT(radix_tree_is_well_formed(rt));
 	vt.count = 0;
 	vt.it.visit = _visit;
 	radix_tree_iterate(rt, k, k + 3, &vt.it);
@@ -411,6 +480,7 @@ static void test_iterate_vary_middle(void *fixture)
 		T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v));
 	}
 
+	T_ASSERT(radix_tree_is_well_formed(rt));
 	vt.it.visit = _visit;
 	for (i = 0; i < 16; i++) {
         	vt.count = 0;
@@ -422,6 +492,323 @@ static void test_iterate_vary_middle(void *fixture)
 
 //----------------------------------------------------------------
 
+#define DTR_COUNT 100
+
+struct counter {
+	unsigned c;
+	uint8_t present[DTR_COUNT];
+};
+
+static void _counting_dtr(void *context, union radix_value v)
+{
+	struct counter *c = context;
+	c->c++;
+	T_ASSERT(v.n < DTR_COUNT);
+	c->present[v.n] = 0;
+}
+
+static void test_remove_calls_dtr(void *fixture)
+{
+	struct counter c;
+	struct radix_tree *rt = radix_tree_create(_counting_dtr, &c);
+	T_ASSERT(rt);
+
+	// Bug hunting, so I need the keys to be deterministic
+	srand(0);
+
+	c.c = 0;
+	memset(c.present, 1, sizeof(c.present));
+
+	{
+		unsigned i;
+		uint8_t keys[DTR_COUNT * 3];
+		union radix_value v;
+
+		// generate and insert a lot of keys
+		for (i = 0; i < DTR_COUNT; i++) {
+			bool found = false;
+			do {
+				v.n = i;
+				uint8_t *k = keys + (i * 3);
+				_gen_key(k, k + 3);
+				if (!radix_tree_lookup(rt, k, k + 3, &v)) {
+					T_ASSERT(radix_tree_insert(rt, k, k + 3, v));
+					found = true;
+				}
+
+			} while (!found);
+		}
+
+		T_ASSERT(radix_tree_is_well_formed(rt));
+		
+		// double check
+		for (i = 0; i < DTR_COUNT; i++) {
+			uint8_t *k = keys + (i * 3);
+			T_ASSERT(radix_tree_lookup(rt, k, k + 3, &v));
+		}
+
+		for (i = 0; i < DTR_COUNT; i++) {
+			uint8_t *k = keys + (i * 3);
+			// FIXME: check the values get passed to the dtr
+			T_ASSERT(radix_tree_remove(rt, k, k + 3));
+		}
+
+		T_ASSERT(c.c == DTR_COUNT);
+		for (i = 0; i < DTR_COUNT; i++)
+			T_ASSERT(!c.present[i]);
+	}
+
+	radix_tree_destroy(rt);
+}
+
+static void test_destroy_calls_dtr(void *fixture)
+{
+	unsigned i;
+	struct counter c;
+	struct radix_tree *rt = radix_tree_create(_counting_dtr, &c);
+	T_ASSERT(rt);
+
+	// Bug hunting, so I need the keys to be deterministic
+	srand(0);
+
+	c.c = 0;
+	memset(c.present, 1, sizeof(c.present));
+
+	{
+		uint8_t keys[DTR_COUNT * 3];
+		union radix_value v;
+
+		// generate and insert a lot of keys
+		for (i = 0; i < DTR_COUNT; i++) {
+			bool found = false;
+			do {
+				v.n = i;
+				uint8_t *k = keys + (i * 3);
+				_gen_key(k, k + 3);
+				if (!radix_tree_lookup(rt, k, k + 3, &v)) {
+					T_ASSERT(radix_tree_insert(rt, k, k + 3, v));
+					found = true;
+				}
+
+			} while (!found);
+		}
+
+		T_ASSERT(radix_tree_is_well_formed(rt));
+	}
+		
+	radix_tree_destroy(rt);
+	T_ASSERT(c.c == DTR_COUNT);
+	for (i = 0; i < DTR_COUNT; i++)
+		T_ASSERT(!c.present[i]);
+}
+
+//----------------------------------------------------------------
+
+static void test_bcache_scenario(void *fixture)
+{
+	struct radix_tree *rt = fixture;
+
+    	unsigned i;
+    	uint8_t k[6];
+	union radix_value v;
+
+    	memset(k, 0, sizeof(k));
+
+    	for (i = 0; i < 3; i++) {
+	    	// it has to be the 4th byte that varies to
+	    	// trigger the bug.
+	    	k[4] = i;
+	    	v.n = i;
+	    	T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v));
+    	}
+	T_ASSERT(radix_tree_is_well_formed(rt));
+
+	k[4] = 0;
+    	T_ASSERT(radix_tree_remove(rt, k, k + sizeof(k)));
+	T_ASSERT(radix_tree_is_well_formed(rt));
+
+    	k[4] = i;
+    	v.n = i;
+    	T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v));
+	T_ASSERT(radix_tree_is_well_formed(rt));
+}
+
+//----------------------------------------------------------------
+
+static void _bcs2_step1(struct radix_tree *rt)
+{
+	unsigned i;
+	uint8_t k[12];
+	union radix_value v;
+
+	memset(k, 0, sizeof(k));
+	for (i = 0x6; i < 0x69; i++) {
+		k[0] = i;
+		v.n = i;
+		T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v));
+	}
+	T_ASSERT(radix_tree_is_well_formed(rt));
+}
+
+static void _bcs2_step2(struct radix_tree *rt)
+{
+	unsigned i;
+	uint8_t k[12];
+
+	memset(k, 0, sizeof(k));
+	for (i = 0x6; i < 0x69; i++) {
+		k[0] = i;
+		radix_tree_remove_prefix(rt, k, k + 4);
+	}
+	T_ASSERT(radix_tree_is_well_formed(rt));
+}
+
+static void test_bcache_scenario2(void *fixture)
+{
+	unsigned i;
+	struct radix_tree *rt = fixture;
+	uint8_t k[12];
+	union radix_value v;
+
+	_bcs2_step1(rt);
+	_bcs2_step2(rt);
+
+	memset(k, 0, sizeof(k));
+        for (i = 0; i < 50; i++) {
+	        k[0] = 0x6;
+	        v.n = 0x6;
+	        T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v));
+	        radix_tree_remove_prefix(rt, k, k + 4);
+        }
+        T_ASSERT(radix_tree_is_well_formed(rt));
+
+	_bcs2_step1(rt);
+	_bcs2_step2(rt);
+	_bcs2_step1(rt);
+	_bcs2_step2(rt);
+
+	memset(k, 0, sizeof(k));
+	for(i = 0x6; i < 0x37; i++) {
+		k[0] = i;
+		k[4] = 0xf;
+		k[5] = 0x1;
+		T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v));
+		k[4] = 0;
+		k[5] = 0;
+		T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v));
+	}
+	T_ASSERT(radix_tree_is_well_formed(rt));
+
+	memset(k, 0, sizeof(k));
+	for (i = 0x38; i < 0x69; i++) {
+		k[0] = i - 0x32;
+		k[4] = 0xf;
+		k[5] = 1;
+		T_ASSERT(radix_tree_remove(rt, k, k + sizeof(k)));
+
+		k[0] = i;
+		T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v));
+
+		k[0] = i - 0x32;
+		k[4] = 0;
+		k[5] = 0;
+		T_ASSERT(radix_tree_remove(rt, k, k + sizeof(k)));
+
+		k[0] = i;
+		T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v));
+	}
+	T_ASSERT(radix_tree_is_well_formed(rt));
+
+	memset(k, 0, sizeof(k));
+	k[0] = 0x6;
+	radix_tree_remove_prefix(rt, k, k + 4);
+	T_ASSERT(radix_tree_is_well_formed(rt));
+
+	k[0] = 0x38;
+	k[4] = 0xf;
+	k[5] = 0x1;
+	T_ASSERT(radix_tree_remove(rt, k, k + sizeof(k)));
+	T_ASSERT(radix_tree_is_well_formed(rt));
+
+	memset(k, 0, sizeof(k));
+	k[0] = 0x6;
+	T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v));
+	T_ASSERT(radix_tree_is_well_formed(rt));
+
+	k[0] = 0x7;
+	radix_tree_remove_prefix(rt, k, k + 4);
+	T_ASSERT(radix_tree_is_well_formed(rt));
+
+	k[0] = 0x38;
+	T_ASSERT(radix_tree_remove(rt, k, k + sizeof(k)));
+	T_ASSERT(radix_tree_is_well_formed(rt));
+
+	k[0] = 7;
+	T_ASSERT(radix_tree_insert(rt, k, k + sizeof(k), v));
+	T_ASSERT(radix_tree_is_well_formed(rt));
+}
+
+//----------------------------------------------------------------
+
+struct key_parts {
+	uint32_t fd;
+	uint64_t b;
+} __attribute__ ((packed));
+
+union key {
+	struct key_parts parts;
+        uint8_t bytes[12];
+};
+
+static void __lookup_matches(struct radix_tree *rt, int fd, uint64_t b, uint64_t expected)
+{
+	union key k;
+	union radix_value v;
+
+	k.parts.fd = fd;
+	k.parts.b = b;
+	T_ASSERT(radix_tree_lookup(rt, k.bytes, k.bytes + sizeof(k.bytes), &v));
+	T_ASSERT(v.n == expected);
+}
+
+static void __lookup_fails(struct radix_tree *rt, int fd, uint64_t b)
+{
+	union key k;
+	union radix_value v;
+
+	k.parts.fd = fd;
+	k.parts.b = b;
+	T_ASSERT(!radix_tree_lookup(rt, k.bytes, k.bytes + sizeof(k.bytes), &v));
+}
+
+static void __insert(struct radix_tree *rt, int fd, uint64_t b, uint64_t n)
+{
+	union key k;
+	union radix_value v;
+
+	k.parts.fd = fd;
+	k.parts.b = b;
+	v.n = n;
+	T_ASSERT(radix_tree_insert(rt, k.bytes, k.bytes + sizeof(k.bytes), v));
+}
+
+static void __invalidate(struct radix_tree *rt, int fd)
+{
+	union key k;
+
+	k.parts.fd = fd;
+	radix_tree_remove_prefix(rt, k.bytes, k.bytes + sizeof(k.parts.fd));
+	radix_tree_is_well_formed(rt);
+}
+
+static void test_bcache_scenario3(void *fixture)
+{
+	struct radix_tree *rt = fixture;
+
+	#include "test/unit/rt_case1.c"
+}
+
+//----------------------------------------------------------------
 #define T(path, desc, fn) register_test(ts, "/base/data-struct/radix-tree/" path, desc, fn)
 
 void radix_tree_tests(struct dm_list *all_tests)
@@ -442,6 +829,7 @@ void radix_tree_tests(struct dm_list *all_tests)
 	T("sparse-keys", "see what the memory usage is for sparsely distributed keys", test_sparse_keys);
 	T("remove-one", "remove one entry", test_remove_one);
 	T("remove-one-byte-keys", "remove many one byte keys", test_remove_one_byte_keys);
+	T("remove-one-byte-keys-reversed", "remove many one byte keys reversed", test_remove_one_byte_keys_reversed);
 	T("remove-prefix-keys", "remove a set of keys that have common prefixes", test_remove_prefix_keys);
 	T("remove-prefix-keys-reversed", "remove a set of keys that have common prefixes (reversed)", test_remove_prefix_keys_reversed);
 	T("remove-prefix", "remove a subrange", test_remove_prefix);
@@ -451,6 +839,11 @@ void radix_tree_tests(struct dm_list *all_tests)
 	T("iterate-subset", "iterate a subset of entries in tree", test_iterate_subset);
 	T("iterate-single", "iterate a subset that contains a single entry", test_iterate_single);
 	T("iterate-vary-middle", "iterate keys that vary in the middle", test_iterate_vary_middle);
+	T("remove-calls-dtr", "remove should call the dtr for the value", test_remove_calls_dtr);
+	T("destroy-calls-dtr", "destroy should call the dtr for all values", test_destroy_calls_dtr);
+	T("bcache-scenario", "A specific series of keys from a bcache scenario", test_bcache_scenario);
+	T("bcache-scenario-2", "A second series of keys from a bcache scenario", test_bcache_scenario2);
+	T("bcache-scenario-3", "A third series of keys from a bcache scenario", test_bcache_scenario3);
 
 	dm_list_add(all_tests, &ts->list);
 }
diff --git a/test/unit/rt_case1.c b/test/unit/rt_case1.c
new file mode 100644
index 0000000..c1677d1
--- /dev/null
+++ b/test/unit/rt_case1.c
@@ -0,0 +1,1669 @@
+	__lookup_fails(rt, 6, 0);
+	__insert(rt, 6, 0, 0);
+	__lookup_fails(rt, 7, 0);
+	__insert(rt, 7, 0, 1);
+	__lookup_fails(rt, 8, 0);
+	__insert(rt, 8, 0, 2);
+	__lookup_fails(rt, 9, 0);
+	__insert(rt, 9, 0, 3);
+	__lookup_fails(rt, 10, 0);
+	__insert(rt, 10, 0, 4);
+	__lookup_fails(rt, 11, 0);
+	__insert(rt, 11, 0, 5);
+	__lookup_fails(rt, 12, 0);
+	__insert(rt, 12, 0, 6);
+	__lookup_fails(rt, 13, 0);
+	__insert(rt, 13, 0, 7);
+	__lookup_fails(rt, 14, 0);
+	__insert(rt, 14, 0, 8);
+	__lookup_fails(rt, 15, 0);
+	__insert(rt, 15, 0, 9);
+	__lookup_fails(rt, 16, 0);
+	__insert(rt, 16, 0, 10);
+	__lookup_fails(rt, 17, 0);
+	__insert(rt, 17, 0, 11);
+	__lookup_fails(rt, 18, 0);
+	__insert(rt, 18, 0, 12);
+	__lookup_fails(rt, 19, 0);
+	__insert(rt, 19, 0, 13);
+	__lookup_fails(rt, 20, 0);
+	__insert(rt, 20, 0, 14);
+	__lookup_fails(rt, 21, 0);
+	__insert(rt, 21, 0, 15);
+	__lookup_fails(rt, 22, 0);
+	__insert(rt, 22, 0, 16);
+	__lookup_fails(rt, 23, 0);
+	__insert(rt, 23, 0, 17);
+	__lookup_fails(rt, 24, 0);
+	__insert(rt, 24, 0, 18);
+	__lookup_fails(rt, 25, 0);
+	__insert(rt, 25, 0, 19);
+	__lookup_fails(rt, 26, 0);
+	__insert(rt, 26, 0, 20);
+	__lookup_fails(rt, 27, 0);
+	__insert(rt, 27, 0, 21);
+	__lookup_fails(rt, 28, 0);
+	__insert(rt, 28, 0, 22);
+	__lookup_fails(rt, 29, 0);
+	__insert(rt, 29, 0, 23);
+	__lookup_fails(rt, 30, 0);
+	__insert(rt, 30, 0, 24);
+	__lookup_fails(rt, 31, 0);
+	__insert(rt, 31, 0, 25);
+	__lookup_fails(rt, 32, 0);
+	__insert(rt, 32, 0, 26);
+	__lookup_fails(rt, 33, 0);
+	__insert(rt, 33, 0, 27);
+	__lookup_fails(rt, 34, 0);
+	__insert(rt, 34, 0, 28);
+	__lookup_fails(rt, 35, 0);
+	__insert(rt, 35, 0, 29);
+	__lookup_fails(rt, 36, 0);
+	__insert(rt, 36, 0, 30);
+	__lookup_fails(rt, 37, 0);
+	__insert(rt, 37, 0, 31);
+	__lookup_fails(rt, 38, 0);
+	__insert(rt, 38, 0, 32);
+	__lookup_fails(rt, 39, 0);
+	__insert(rt, 39, 0, 33);
+	__lookup_fails(rt, 40, 0);
+	__insert(rt, 40, 0, 34);
+	__lookup_fails(rt, 41, 0);
+	__insert(rt, 41, 0, 35);
+	__lookup_fails(rt, 42, 0);
+	__insert(rt, 42, 0, 36);
+	__lookup_fails(rt, 43, 0);
+	__insert(rt, 43, 0, 37);
+	__lookup_fails(rt, 44, 0);
+	__insert(rt, 44, 0, 38);
+	__lookup_fails(rt, 45, 0);
+	__insert(rt, 45, 0, 39);
+	__lookup_fails(rt, 46, 0);
+	__insert(rt, 46, 0, 40);
+	__lookup_fails(rt, 47, 0);
+	__insert(rt, 47, 0, 41);
+	__lookup_fails(rt, 48, 0);
+	__insert(rt, 48, 0, 42);
+	__lookup_fails(rt, 49, 0);
+	__insert(rt, 49, 0, 43);
+	__lookup_fails(rt, 50, 0);
+	__insert(rt, 50, 0, 44);
+	__lookup_fails(rt, 51, 0);
+	__insert(rt, 51, 0, 45);
+	__lookup_fails(rt, 52, 0);
+	__insert(rt, 52, 0, 46);
+	__lookup_fails(rt, 53, 0);
+	__insert(rt, 53, 0, 47);
+	__lookup_fails(rt, 54, 0);
+	__insert(rt, 54, 0, 48);
+	__lookup_fails(rt, 55, 0);
+	__insert(rt, 55, 0, 49);
+	__lookup_fails(rt, 56, 0);
+	__insert(rt, 56, 0, 50);
+	__lookup_fails(rt, 57, 0);
+	__insert(rt, 57, 0, 51);
+	__lookup_fails(rt, 58, 0);
+	__insert(rt, 58, 0, 52);
+	__lookup_fails(rt, 59, 0);
+	__insert(rt, 59, 0, 53);
+	__lookup_fails(rt, 60, 0);
+	__insert(rt, 60, 0, 54);
+	__lookup_fails(rt, 61, 0);
+	__insert(rt, 61, 0, 55);
+	__lookup_fails(rt, 62, 0);
+	__insert(rt, 62, 0, 56);
+	__lookup_fails(rt, 63, 0);
+	__insert(rt, 63, 0, 57);
+	__lookup_fails(rt, 64, 0);
+	__insert(rt, 64, 0, 58);
+	__lookup_fails(rt, 65, 0);
+	__insert(rt, 65, 0, 59);
+	__lookup_fails(rt, 66, 0);
+	__insert(rt, 66, 0, 60);
+	__lookup_fails(rt, 67, 0);
+	__insert(rt, 67, 0, 61);
+	__lookup_fails(rt, 68, 0);
+	__insert(rt, 68, 0, 62);
+	__lookup_fails(rt, 69, 0);
+	__insert(rt, 69, 0, 63);
+	__lookup_fails(rt, 70, 0);
+	__insert(rt, 70, 0, 64);
+	__lookup_fails(rt, 71, 0);
+	__insert(rt, 71, 0, 65);
+	__lookup_fails(rt, 72, 0);
+	__insert(rt, 72, 0, 66);
+	__lookup_fails(rt, 73, 0);
+	__insert(rt, 73, 0, 67);
+	__lookup_fails(rt, 74, 0);
+	__insert(rt, 74, 0, 68);
+	__lookup_fails(rt, 75, 0);
+	__insert(rt, 75, 0, 69);
+	__lookup_fails(rt, 76, 0);
+	__insert(rt, 76, 0, 70);
+	__lookup_fails(rt, 77, 0);
+	__insert(rt, 77, 0, 71);
+	__lookup_fails(rt, 78, 0);
+	__insert(rt, 78, 0, 72);
+	__lookup_fails(rt, 79, 0);
+	__insert(rt, 79, 0, 73);
+	__lookup_fails(rt, 80, 0);
+	__insert(rt, 80, 0, 74);
+	__lookup_fails(rt, 81, 0);
+	__insert(rt, 81, 0, 75);
+	__lookup_fails(rt, 82, 0);
+	__insert(rt, 82, 0, 76);
+	__lookup_fails(rt, 83, 0);
+	__insert(rt, 83, 0, 77);
+	__lookup_fails(rt, 84, 0);
+	__insert(rt, 84, 0, 78);
+	__lookup_fails(rt, 85, 0);
+	__insert(rt, 85, 0, 79);
+	__lookup_fails(rt, 86, 0);
+	__insert(rt, 86, 0, 80);
+	__lookup_fails(rt, 87, 0);
+	__insert(rt, 87, 0, 81);
+	__lookup_fails(rt, 88, 0);
+	__insert(rt, 88, 0, 82);
+	__lookup_fails(rt, 89, 0);
+	__insert(rt, 89, 0, 83);
+	__lookup_fails(rt, 90, 0);
+	__insert(rt, 90, 0, 84);
+	__lookup_fails(rt, 91, 0);
+	__insert(rt, 91, 0, 85);
+	__lookup_fails(rt, 92, 0);
+	__insert(rt, 92, 0, 86);
+	__lookup_fails(rt, 93, 0);
+	__insert(rt, 93, 0, 87);
+	__lookup_fails(rt, 94, 0);
+	__insert(rt, 94, 0, 88);
+	__lookup_fails(rt, 95, 0);
+	__insert(rt, 95, 0, 89);
+	__lookup_fails(rt, 96, 0);
+	__insert(rt, 96, 0, 90);
+	__lookup_fails(rt, 97, 0);
+	__insert(rt, 97, 0, 91);
+	__lookup_fails(rt, 98, 0);
+	__insert(rt, 98, 0, 92);
+	__lookup_fails(rt, 99, 0);
+	__insert(rt, 99, 0, 93);
+	__lookup_fails(rt, 100, 0);
+	__insert(rt, 100, 0, 94);
+	__lookup_fails(rt, 101, 0);
+	__insert(rt, 101, 0, 95);
+	__lookup_fails(rt, 102, 0);
+	__insert(rt, 102, 0, 96);
+	__lookup_fails(rt, 103, 0);
+	__insert(rt, 103, 0, 97);
+	__lookup_fails(rt, 104, 0);
+	__insert(rt, 104, 0, 98);
+	__lookup_fails(rt, 105, 0);
+	__insert(rt, 105, 0, 99);
+	__lookup_fails(rt, 106, 0);
+	__insert(rt, 106, 0, 100);
+	__lookup_fails(rt, 107, 0);
+	__insert(rt, 107, 0, 101);
+	__lookup_fails(rt, 108, 0);
+	__insert(rt, 108, 0, 102);
+	__lookup_fails(rt, 109, 0);
+	__insert(rt, 109, 0, 103);
+	__lookup_fails(rt, 110, 0);
+	__insert(rt, 110, 0, 104);
+	__lookup_fails(rt, 111, 0);
+	__insert(rt, 111, 0, 105);
+	__lookup_fails(rt, 112, 0);
+	__insert(rt, 112, 0, 106);
+	__lookup_fails(rt, 113, 0);
+	__insert(rt, 113, 0, 107);
+	__lookup_fails(rt, 114, 0);
+	__insert(rt, 114, 0, 108);
+	__lookup_fails(rt, 115, 0);
+	__insert(rt, 115, 0, 109);
+	__lookup_fails(rt, 116, 0);
+	__insert(rt, 116, 0, 110);
+	__lookup_fails(rt, 117, 0);
+	__insert(rt, 117, 0, 111);
+	__lookup_fails(rt, 118, 0);
+	__insert(rt, 118, 0, 112);
+	__lookup_fails(rt, 119, 0);
+	__insert(rt, 119, 0, 113);
+	__lookup_fails(rt, 120, 0);
+	__insert(rt, 120, 0, 114);
+	__lookup_fails(rt, 121, 0);
+	__insert(rt, 121, 0, 115);
+	__lookup_fails(rt, 122, 0);
+	__insert(rt, 122, 0, 116);
+	__lookup_fails(rt, 123, 0);
+	__insert(rt, 123, 0, 117);
+	__lookup_fails(rt, 124, 0);
+	__insert(rt, 124, 0, 118);
+	__lookup_fails(rt, 125, 0);
+	__insert(rt, 125, 0, 119);
+	__lookup_fails(rt, 126, 0);
+	__insert(rt, 126, 0, 120);
+	__lookup_fails(rt, 127, 0);
+	__insert(rt, 127, 0, 121);
+	__lookup_fails(rt, 128, 0);
+	__insert(rt, 128, 0, 122);
+	__lookup_fails(rt, 129, 0);
+	__insert(rt, 129, 0, 123);
+	__lookup_fails(rt, 130, 0);
+	__insert(rt, 130, 0, 124);
+	__lookup_fails(rt, 131, 0);
+	__insert(rt, 131, 0, 125);
+	__lookup_fails(rt, 132, 0);
+	__insert(rt, 132, 0, 126);
+	__lookup_fails(rt, 133, 0);
+	__insert(rt, 133, 0, 127);
+	__lookup_fails(rt, 134, 0);
+	__insert(rt, 134, 0, 128);
+	__lookup_fails(rt, 135, 0);
+	__insert(rt, 135, 0, 129);
+	__lookup_fails(rt, 136, 0);
+	__insert(rt, 136, 0, 130);
+	__lookup_fails(rt, 137, 0);
+	__insert(rt, 137, 0, 131);
+	__lookup_fails(rt, 138, 0);
+	__insert(rt, 138, 0, 132);
+	__lookup_fails(rt, 139, 0);
+	__insert(rt, 139, 0, 133);
+	__lookup_fails(rt, 140, 0);
+	__insert(rt, 140, 0, 134);
+	__lookup_fails(rt, 141, 0);
+	__insert(rt, 141, 0, 135);
+	__lookup_fails(rt, 142, 0);
+	__insert(rt, 142, 0, 136);
+	__lookup_fails(rt, 143, 0);
+	__insert(rt, 143, 0, 137);
+	__lookup_fails(rt, 144, 0);
+	__insert(rt, 144, 0, 138);
+	__lookup_fails(rt, 145, 0);
+	__insert(rt, 145, 0, 139);
+	__lookup_fails(rt, 146, 0);
+	__insert(rt, 146, 0, 140);
+	__lookup_fails(rt, 147, 0);
+	__insert(rt, 147, 0, 141);
+	__lookup_fails(rt, 148, 0);
+	__insert(rt, 148, 0, 142);
+	__lookup_fails(rt, 149, 0);
+	__insert(rt, 149, 0, 143);
+	__lookup_fails(rt, 150, 0);
+	__insert(rt, 150, 0, 144);
+	__lookup_fails(rt, 151, 0);
+	__insert(rt, 151, 0, 145);
+	__lookup_fails(rt, 152, 0);
+	__insert(rt, 152, 0, 146);
+	__lookup_fails(rt, 153, 0);
+	__insert(rt, 153, 0, 147);
+	__lookup_fails(rt, 154, 0);
+	__insert(rt, 154, 0, 148);
+	__lookup_fails(rt, 155, 0);
+	__insert(rt, 155, 0, 149);
+	__lookup_fails(rt, 156, 0);
+	__insert(rt, 156, 0, 150);
+	__lookup_fails(rt, 157, 0);
+	__insert(rt, 157, 0, 151);
+	__lookup_fails(rt, 158, 0);
+	__insert(rt, 158, 0, 152);
+	__lookup_fails(rt, 159, 0);
+	__insert(rt, 159, 0, 153);
+	__lookup_fails(rt, 160, 0);
+	__insert(rt, 160, 0, 154);
+	__lookup_fails(rt, 161, 0);
+	__insert(rt, 161, 0, 155);
+	__lookup_fails(rt, 162, 0);
+	__insert(rt, 162, 0, 156);
+	__lookup_fails(rt, 163, 0);
+	__insert(rt, 163, 0, 157);
+	__lookup_fails(rt, 164, 0);
+	__insert(rt, 164, 0, 158);
+	__lookup_fails(rt, 165, 0);
+	__insert(rt, 165, 0, 159);
+	__lookup_fails(rt, 166, 0);
+	__insert(rt, 166, 0, 160);
+	__lookup_fails(rt, 167, 0);
+	__insert(rt, 167, 0, 161);
+	__lookup_fails(rt, 168, 0);
+	__insert(rt, 168, 0, 162);
+	__lookup_fails(rt, 169, 0);
+	__insert(rt, 169, 0, 163);
+	__lookup_fails(rt, 170, 0);
+	__insert(rt, 170, 0, 164);
+	__lookup_fails(rt, 171, 0);
+	__insert(rt, 171, 0, 165);
+	__lookup_fails(rt, 172, 0);
+	__insert(rt, 172, 0, 166);
+	__lookup_fails(rt, 173, 0);
+	__insert(rt, 173, 0, 167);
+	__lookup_fails(rt, 174, 0);
+	__insert(rt, 174, 0, 168);
+	__lookup_fails(rt, 175, 0);
+	__insert(rt, 175, 0, 169);
+	__lookup_fails(rt, 176, 0);
+	__insert(rt, 176, 0, 170);
+	__lookup_fails(rt, 177, 0);
+	__insert(rt, 177, 0, 171);
+	__lookup_fails(rt, 178, 0);
+	__insert(rt, 178, 0, 172);
+	__lookup_fails(rt, 179, 0);
+	__insert(rt, 179, 0, 173);
+	__lookup_fails(rt, 180, 0);
+	__insert(rt, 180, 0, 174);
+	__lookup_fails(rt, 181, 0);
+	__insert(rt, 181, 0, 175);
+	__lookup_fails(rt, 182, 0);
+	__insert(rt, 182, 0, 176);
+	__lookup_fails(rt, 183, 0);
+	__insert(rt, 183, 0, 177);
+	__lookup_fails(rt, 184, 0);
+	__insert(rt, 184, 0, 178);
+	__lookup_fails(rt, 185, 0);
+	__insert(rt, 185, 0, 179);
+	__lookup_fails(rt, 186, 0);
+	__insert(rt, 186, 0, 180);
+	__lookup_fails(rt, 187, 0);
+	__insert(rt, 187, 0, 181);
+	__lookup_fails(rt, 188, 0);
+	__insert(rt, 188, 0, 182);
+	__lookup_fails(rt, 189, 0);
+	__insert(rt, 189, 0, 183);
+	__lookup_fails(rt, 190, 0);
+	__insert(rt, 190, 0, 184);
+	__lookup_fails(rt, 191, 0);
+	__insert(rt, 191, 0, 185);
+	__lookup_fails(rt, 192, 0);
+	__insert(rt, 192, 0, 186);
+	__lookup_fails(rt, 193, 0);
+	__insert(rt, 193, 0, 187);
+	__lookup_fails(rt, 194, 0);
+	__insert(rt, 194, 0, 188);
+	__lookup_fails(rt, 195, 0);
+	__insert(rt, 195, 0, 189);
+	__lookup_fails(rt, 196, 0);
+	__insert(rt, 196, 0, 190);
+	__lookup_fails(rt, 197, 0);
+	__insert(rt, 197, 0, 191);
+	__lookup_fails(rt, 198, 0);
+	__insert(rt, 198, 0, 192);
+	__lookup_fails(rt, 199, 0);
+	__insert(rt, 199, 0, 193);
+	__lookup_fails(rt, 200, 0);
+	__insert(rt, 200, 0, 194);
+	__lookup_fails(rt, 201, 0);
+	__insert(rt, 201, 0, 195);
+	__lookup_fails(rt, 202, 0);
+	__insert(rt, 202, 0, 196);
+	__lookup_fails(rt, 203, 0);
+	__insert(rt, 203, 0, 197);
+	__lookup_fails(rt, 204, 0);
+	__insert(rt, 204, 0, 198);
+	__lookup_fails(rt, 205, 0);
+	__insert(rt, 205, 0, 199);
+	__lookup_matches(rt, 6, 0, 0);
+	__invalidate(rt, 6);
+	__lookup_matches(rt, 7, 0, 1);
+	__invalidate(rt, 7);
+	__lookup_matches(rt, 8, 0, 2);
+	__invalidate(rt, 8);
+	__lookup_matches(rt, 9, 0, 3);
+	__invalidate(rt, 9);
+	__lookup_matches(rt, 10, 0, 4);
+	__invalidate(rt, 10);
+	__lookup_matches(rt, 11, 0, 5);
+	__invalidate(rt, 11);
+	__lookup_matches(rt, 12, 0, 6);
+	__lookup_matches(rt, 13, 0, 7);
+	__invalidate(rt, 13);
+	__lookup_matches(rt, 14, 0, 8);
+	__invalidate(rt, 14);
+	__lookup_matches(rt, 15, 0, 9);
+	__invalidate(rt, 15);
+	__lookup_matches(rt, 16, 0, 10);
+	__invalidate(rt, 16);
+	__lookup_matches(rt, 17, 0, 11);
+	__invalidate(rt, 17);
+	__lookup_matches(rt, 18, 0, 12);
+	__invalidate(rt, 18);
+	__lookup_matches(rt, 19, 0, 13);
+	__invalidate(rt, 19);
+	__lookup_matches(rt, 20, 0, 14);
+	__invalidate(rt, 20);
+	__lookup_matches(rt, 21, 0, 15);
+	__invalidate(rt, 21);
+	__lookup_matches(rt, 22, 0, 16);
+	__invalidate(rt, 22);
+	__lookup_matches(rt, 23, 0, 17);
+	__invalidate(rt, 23);
+	__lookup_matches(rt, 24, 0, 18);
+	__invalidate(rt, 24);
+	__lookup_matches(rt, 25, 0, 19);
+	__invalidate(rt, 25);
+	__lookup_matches(rt, 26, 0, 20);
+	__invalidate(rt, 26);
+	__lookup_matches(rt, 27, 0, 21);
+	__invalidate(rt, 27);
+	__lookup_matches(rt, 28, 0, 22);
+	__invalidate(rt, 28);
+	__lookup_matches(rt, 29, 0, 23);
+	__invalidate(rt, 29);
+	__lookup_matches(rt, 30, 0, 24);
+	__invalidate(rt, 30);
+	__lookup_matches(rt, 31, 0, 25);
+	__invalidate(rt, 31);
+	__lookup_matches(rt, 32, 0, 26);
+	__invalidate(rt, 32);
+	__lookup_matches(rt, 33, 0, 27);
+	__invalidate(rt, 33);
+	__lookup_matches(rt, 34, 0, 28);
+	__invalidate(rt, 34);
+	__lookup_matches(rt, 35, 0, 29);
+	__invalidate(rt, 35);
+	__lookup_matches(rt, 36, 0, 30);
+	__invalidate(rt, 36);
+	__lookup_matches(rt, 37, 0, 31);
+	__invalidate(rt, 37);
+	__lookup_matches(rt, 38, 0, 32);
+	__invalidate(rt, 38);
+	__lookup_matches(rt, 39, 0, 33);
+	__invalidate(rt, 39);
+	__lookup_matches(rt, 40, 0, 34);
+	__invalidate(rt, 40);
+	__lookup_matches(rt, 41, 0, 35);
+	__invalidate(rt, 41);
+	__lookup_matches(rt, 42, 0, 36);
+	__invalidate(rt, 42);
+	__lookup_matches(rt, 43, 0, 37);
+	__invalidate(rt, 43);
+	__lookup_matches(rt, 44, 0, 38);
+	__invalidate(rt, 44);
+	__lookup_matches(rt, 45, 0, 39);
+	__invalidate(rt, 45);
+	__lookup_matches(rt, 46, 0, 40);
+	__lookup_fails(rt, 46, 5);
+	__insert(rt, 46, 5, 200);
+	__lookup_matches(rt, 46, 5, 200);
+	__lookup_fails(rt, 46, 6);
+	__insert(rt, 46, 6, 201);
+	__lookup_fails(rt, 46, 7);
+	__insert(rt, 46, 7, 202);
+	__lookup_fails(rt, 46, 8);
+	__insert(rt, 46, 8, 203);
+	__lookup_matches(rt, 46, 5, 200);
+	__lookup_matches(rt, 46, 6, 201);
+	__lookup_matches(rt, 46, 7, 202);
+	__lookup_matches(rt, 46, 8, 203);
+	__lookup_matches(rt, 47, 0, 41);
+	__invalidate(rt, 47);
+	__lookup_matches(rt, 48, 0, 42);
+	__invalidate(rt, 48);
+	__lookup_matches(rt, 49, 0, 43);
+	__invalidate(rt, 49);
+	__lookup_matches(rt, 50, 0, 44);
+	__invalidate(rt, 50);
+	__lookup_matches(rt, 51, 0, 45);
+	__invalidate(rt, 51);
+	__lookup_matches(rt, 52, 0, 46);
+	__invalidate(rt, 52);
+	__lookup_matches(rt, 53, 0, 47);
+	__invalidate(rt, 53);
+	__lookup_matches(rt, 54, 0, 48);
+	__invalidate(rt, 54);
+	__lookup_matches(rt, 55, 0, 49);
+	__invalidate(rt, 55);
+	__lookup_matches(rt, 56, 0, 50);
+	__invalidate(rt, 56);
+	__lookup_matches(rt, 57, 0, 51);
+	__invalidate(rt, 57);
+	__lookup_matches(rt, 58, 0, 52);
+	__invalidate(rt, 58);
+	__lookup_matches(rt, 59, 0, 53);
+	__invalidate(rt, 59);
+	__lookup_matches(rt, 60, 0, 54);
+	__invalidate(rt, 60);
+	__lookup_matches(rt, 61, 0, 55);
+	__invalidate(rt, 61);
+	__lookup_matches(rt, 62, 0, 56);
+	__invalidate(rt, 62);
+	__lookup_matches(rt, 63, 0, 57);
+	__invalidate(rt, 63);
+	__lookup_matches(rt, 64, 0, 58);
+	__invalidate(rt, 64);
+	__lookup_matches(rt, 65, 0, 59);
+	__lookup_fails(rt, 65, 1);
+	__insert(rt, 65, 1, 204);
+	__lookup_fails(rt, 65, 2);
+	__insert(rt, 65, 2, 205);
+	__lookup_fails(rt, 65, 3);
+	__insert(rt, 65, 3, 206);
+	__lookup_fails(rt, 65, 4);
+	__insert(rt, 65, 4, 207);
+	__lookup_matches(rt, 65, 0, 59);
+	__lookup_matches(rt, 65, 1, 204);
+	__lookup_matches(rt, 65, 2, 205);
+	__lookup_matches(rt, 65, 3, 206);
+	__lookup_matches(rt, 65, 4, 207);
+	__lookup_matches(rt, 66, 0, 60);
+	__invalidate(rt, 66);
+	__lookup_matches(rt, 67, 0, 61);
+	__invalidate(rt, 67);
+	__lookup_matches(rt, 68, 0, 62);
+	__invalidate(rt, 68);
+	__lookup_matches(rt, 69, 0, 63);
+	__invalidate(rt, 69);
+	__lookup_matches(rt, 70, 0, 64);
+	__invalidate(rt, 70);
+	__lookup_matches(rt, 71, 0, 65);
+	__invalidate(rt, 71);
+	__lookup_matches(rt, 72, 0, 66);
+	__invalidate(rt, 72);
+	__lookup_matches(rt, 73, 0, 67);
+	__invalidate(rt, 73);
+	__lookup_matches(rt, 74, 0, 68);
+	__invalidate(rt, 74);
+	__lookup_matches(rt, 75, 0, 69);
+	__invalidate(rt, 75);
+	__lookup_matches(rt, 76, 0, 70);
+	__invalidate(rt, 76);
+	__lookup_matches(rt, 77, 0, 71);
+	__invalidate(rt, 77);
+	__lookup_matches(rt, 78, 0, 72);
+	__invalidate(rt, 78);
+	__lookup_matches(rt, 79, 0, 73);
+	__invalidate(rt, 79);
+	__lookup_matches(rt, 80, 0, 74);
+	__invalidate(rt, 80);
+	__lookup_matches(rt, 81, 0, 75);
+	__invalidate(rt, 81);
+	__lookup_matches(rt, 82, 0, 76);
+	__invalidate(rt, 82);
+	__lookup_matches(rt, 83, 0, 77);
+	__invalidate(rt, 83);
+	__lookup_matches(rt, 84, 0, 78);
+	__invalidate(rt, 84);
+	__lookup_matches(rt, 85, 0, 79);
+	__invalidate(rt, 85);
+	__lookup_matches(rt, 86, 0, 80);
+	__invalidate(rt, 86);
+	__lookup_matches(rt, 87, 0, 81);
+	__invalidate(rt, 87);
+	__lookup_matches(rt, 88, 0, 82);
+	__invalidate(rt, 88);
+	__lookup_matches(rt, 89, 0, 83);
+	__invalidate(rt, 89);
+	__lookup_matches(rt, 90, 0, 84);
+	__invalidate(rt, 90);
+	__lookup_matches(rt, 91, 0, 85);
+	__invalidate(rt, 91);
+	__lookup_matches(rt, 92, 0, 86);
+	__invalidate(rt, 92);
+	__lookup_matches(rt, 93, 0, 87);
+	__invalidate(rt, 93);
+	__lookup_matches(rt, 94, 0, 88);
+	__invalidate(rt, 94);
+	__lookup_matches(rt, 95, 0, 89);
+	__invalidate(rt, 95);
+	__lookup_matches(rt, 96, 0, 90);
+	__lookup_matches(rt, 97, 0, 91);
+	__invalidate(rt, 97);
+	__lookup_matches(rt, 98, 0, 92);
+	__invalidate(rt, 98);
+	__lookup_matches(rt, 99, 0, 93);
+	__invalidate(rt, 99);
+	__lookup_matches(rt, 100, 0, 94);
+	__invalidate(rt, 100);
+	__lookup_matches(rt, 101, 0, 95);
+	__invalidate(rt, 101);
+	__lookup_matches(rt, 102, 0, 96);
+	__invalidate(rt, 102);
+	__lookup_matches(rt, 103, 0, 97);
+	__invalidate(rt, 103);
+	__lookup_matches(rt, 104, 0, 98);
+	__invalidate(rt, 104);
+	__lookup_matches(rt, 105, 0, 99);
+	__invalidate(rt, 105);
+	__lookup_matches(rt, 106, 0, 100);
+	__invalidate(rt, 106);
+	__lookup_matches(rt, 107, 0, 101);
+	__invalidate(rt, 107);
+	__lookup_matches(rt, 108, 0, 102);
+	__invalidate(rt, 108);
+	__lookup_matches(rt, 109, 0, 103);
+	__invalidate(rt, 109);
+	__lookup_matches(rt, 110, 0, 104);
+	__invalidate(rt, 110);
+	__lookup_matches(rt, 111, 0, 105);
+	__invalidate(rt, 111);
+	__lookup_matches(rt, 112, 0, 106);
+	__invalidate(rt, 112);
+	__lookup_matches(rt, 113, 0, 107);
+	__invalidate(rt, 113);
+	__lookup_matches(rt, 114, 0, 108);
+	__invalidate(rt, 114);
+	__lookup_matches(rt, 115, 0, 109);
+	__invalidate(rt, 115);
+	__lookup_matches(rt, 116, 0, 110);
+	__invalidate(rt, 116);
+	__lookup_matches(rt, 117, 0, 111);
+	__invalidate(rt, 117);
+	__lookup_matches(rt, 118, 0, 112);
+	__invalidate(rt, 118);
+	__lookup_matches(rt, 119, 0, 113);
+	__invalidate(rt, 119);
+	__lookup_matches(rt, 120, 0, 114);
+	__invalidate(rt, 120);
+	__lookup_matches(rt, 121, 0, 115);
+	__invalidate(rt, 121);
+	__lookup_matches(rt, 122, 0, 116);
+	__invalidate(rt, 122);
+	__lookup_matches(rt, 123, 0, 117);
+	__invalidate(rt, 123);
+	__lookup_matches(rt, 124, 0, 118);
+	__invalidate(rt, 124);
+	__lookup_matches(rt, 125, 0, 119);
+	__invalidate(rt, 125);
+	__lookup_matches(rt, 126, 0, 120);
+	__invalidate(rt, 126);
+	__lookup_matches(rt, 127, 0, 121);
+	__invalidate(rt, 127);
+	__lookup_matches(rt, 128, 0, 122);
+	__invalidate(rt, 128);
+	__lookup_matches(rt, 129, 0, 123);
+	__invalidate(rt, 129);
+	__lookup_matches(rt, 130, 0, 124);
+	__invalidate(rt, 130);
+	__lookup_matches(rt, 131, 0, 125);
+	__invalidate(rt, 131);
+	__lookup_matches(rt, 132, 0, 126);
+	__invalidate(rt, 132);
+	__lookup_matches(rt, 133, 0, 127);
+	__invalidate(rt, 133);
+	__lookup_matches(rt, 134, 0, 128);
+	__invalidate(rt, 134);
+	__lookup_matches(rt, 135, 0, 129);
+	__invalidate(rt, 135);
+	__lookup_matches(rt, 136, 0, 130);
+	__invalidate(rt, 136);
+	__lookup_matches(rt, 137, 0, 131);
+	__invalidate(rt, 137);
+	__lookup_matches(rt, 138, 0, 132);
+	__invalidate(rt, 138);
+	__lookup_matches(rt, 139, 0, 133);
+	__invalidate(rt, 139);
+	__lookup_matches(rt, 140, 0, 134);
+	__invalidate(rt, 140);
+	__lookup_matches(rt, 141, 0, 135);
+	__invalidate(rt, 141);
+	__lookup_matches(rt, 142, 0, 136);
+	__invalidate(rt, 142);
+	__lookup_matches(rt, 143, 0, 137);
+	__invalidate(rt, 143);
+	__lookup_matches(rt, 144, 0, 138);
+	__invalidate(rt, 144);
+	__lookup_matches(rt, 145, 0, 139);
+	__invalidate(rt, 145);
+	__lookup_matches(rt, 146, 0, 140);
+	__invalidate(rt, 146);
+	__lookup_matches(rt, 147, 0, 141);
+	__invalidate(rt, 147);
+	__lookup_matches(rt, 148, 0, 142);
+	__invalidate(rt, 148);
+	__lookup_matches(rt, 149, 0, 143);
+	__invalidate(rt, 149);
+	__lookup_matches(rt, 150, 0, 144);
+	__invalidate(rt, 150);
+	__lookup_matches(rt, 151, 0, 145);
+	__invalidate(rt, 151);
+	__lookup_matches(rt, 152, 0, 146);
+	__invalidate(rt, 152);
+	__lookup_matches(rt, 153, 0, 147);
+	__invalidate(rt, 153);
+	__lookup_matches(rt, 154, 0, 148);
+	__invalidate(rt, 154);
+	__lookup_matches(rt, 155, 0, 149);
+	__invalidate(rt, 155);
+	__lookup_matches(rt, 156, 0, 150);
+	__invalidate(rt, 156);
+	__lookup_matches(rt, 157, 0, 151);
+	__invalidate(rt, 157);
+	__lookup_matches(rt, 158, 0, 152);
+	__invalidate(rt, 158);
+	__lookup_matches(rt, 159, 0, 153);
+	__invalidate(rt, 159);
+	__lookup_matches(rt, 160, 0, 154);
+	__invalidate(rt, 160);
+	__lookup_matches(rt, 161, 0, 155);
+	__invalidate(rt, 161);
+	__lookup_matches(rt, 162, 0, 156);
+	__invalidate(rt, 162);
+	__lookup_matches(rt, 163, 0, 157);
+	__lookup_matches(rt, 164, 0, 158);
+	__invalidate(rt, 164);
+	__lookup_matches(rt, 165, 0, 159);
+	__invalidate(rt, 165);
+	__lookup_matches(rt, 166, 0, 160);
+	__invalidate(rt, 166);
+	__lookup_matches(rt, 167, 0, 161);
+	__invalidate(rt, 167);
+	__lookup_matches(rt, 168, 0, 162);
+	__invalidate(rt, 168);
+	__lookup_matches(rt, 169, 0, 163);
+	__invalidate(rt, 169);
+	__lookup_matches(rt, 170, 0, 164);
+	__invalidate(rt, 170);
+	__lookup_matches(rt, 171, 0, 165);
+	__invalidate(rt, 171);
+	__lookup_matches(rt, 172, 0, 166);
+	__invalidate(rt, 172);
+	__lookup_matches(rt, 173, 0, 167);
+	__invalidate(rt, 173);
+	__lookup_matches(rt, 174, 0, 168);
+	__invalidate(rt, 174);
+	__lookup_matches(rt, 175, 0, 169);
+	__invalidate(rt, 175);
+	__lookup_matches(rt, 176, 0, 170);
+	__invalidate(rt, 176);
+	__lookup_matches(rt, 177, 0, 171);
+	__invalidate(rt, 177);
+	__lookup_matches(rt, 178, 0, 172);
+	__invalidate(rt, 178);
+	__lookup_matches(rt, 179, 0, 173);
+	__invalidate(rt, 179);
+	__lookup_matches(rt, 180, 0, 174);
+	__invalidate(rt, 180);
+	__lookup_matches(rt, 181, 0, 175);
+	__invalidate(rt, 181);
+	__lookup_matches(rt, 182, 0, 176);
+	__invalidate(rt, 182);
+	__lookup_matches(rt, 183, 0, 177);
+	__invalidate(rt, 183);
+	__lookup_matches(rt, 184, 0, 178);
+	__invalidate(rt, 184);
+	__lookup_matches(rt, 185, 0, 179);
+	__invalidate(rt, 185);
+	__lookup_matches(rt, 186, 0, 180);
+	__invalidate(rt, 186);
+	__lookup_matches(rt, 187, 0, 181);
+	__invalidate(rt, 187);
+	__lookup_matches(rt, 188, 0, 182);
+	__invalidate(rt, 188);
+	__lookup_matches(rt, 189, 0, 183);
+	__invalidate(rt, 189);
+	__lookup_matches(rt, 190, 0, 184);
+	__invalidate(rt, 190);
+	__lookup_matches(rt, 191, 0, 185);
+	__invalidate(rt, 191);
+	__lookup_matches(rt, 192, 0, 186);
+	__invalidate(rt, 192);
+	__lookup_matches(rt, 193, 0, 187);
+	__invalidate(rt, 193);
+	__lookup_matches(rt, 194, 0, 188);
+	__invalidate(rt, 194);
+	__lookup_matches(rt, 195, 0, 189);
+	__invalidate(rt, 195);
+	__lookup_matches(rt, 196, 0, 190);
+	__invalidate(rt, 196);
+	__lookup_matches(rt, 197, 0, 191);
+	__invalidate(rt, 197);
+	__lookup_matches(rt, 198, 0, 192);
+	__invalidate(rt, 198);
+	__lookup_matches(rt, 199, 0, 193);
+	__invalidate(rt, 199);
+	__lookup_matches(rt, 200, 0, 194);
+	__invalidate(rt, 200);
+	__lookup_matches(rt, 201, 0, 195);
+	__invalidate(rt, 201);
+	__lookup_matches(rt, 202, 0, 196);
+	__invalidate(rt, 202);
+	__lookup_matches(rt, 203, 0, 197);
+	__invalidate(rt, 203);
+	__lookup_matches(rt, 204, 0, 198);
+	__invalidate(rt, 204);
+	__lookup_matches(rt, 205, 0, 199);
+	__invalidate(rt, 205);
+	__lookup_fails(rt, 6, 0);
+	__insert(rt, 6, 0, 208);
+	__lookup_fails(rt, 7, 0);
+	__insert(rt, 7, 0, 209);
+	__lookup_fails(rt, 8, 0);
+	__insert(rt, 8, 0, 210);
+	__lookup_fails(rt, 9, 0);
+	__insert(rt, 9, 0, 211);
+	__lookup_fails(rt, 10, 0);
+	__insert(rt, 10, 0, 212);
+	__lookup_fails(rt, 11, 0);
+	__insert(rt, 11, 0, 213);
+	__lookup_fails(rt, 13, 0);
+	__insert(rt, 13, 0, 214);
+	__lookup_fails(rt, 14, 0);
+	__insert(rt, 14, 0, 215);
+	__lookup_fails(rt, 15, 0);
+	__insert(rt, 15, 0, 216);
+	__lookup_fails(rt, 16, 0);
+	__insert(rt, 16, 0, 217);
+	__lookup_fails(rt, 17, 0);
+	__insert(rt, 17, 0, 218);
+	__lookup_fails(rt, 18, 0);
+	__insert(rt, 18, 0, 219);
+	__lookup_fails(rt, 19, 0);
+	__insert(rt, 19, 0, 220);
+	__lookup_fails(rt, 20, 0);
+	__insert(rt, 20, 0, 221);
+	__lookup_fails(rt, 21, 0);
+	__insert(rt, 21, 0, 222);
+	__lookup_fails(rt, 22, 0);
+	__insert(rt, 22, 0, 223);
+	__lookup_fails(rt, 23, 0);
+	__insert(rt, 23, 0, 224);
+	__lookup_fails(rt, 24, 0);
+	__insert(rt, 24, 0, 225);
+	__lookup_fails(rt, 25, 0);
+	__insert(rt, 25, 0, 226);
+	__lookup_fails(rt, 26, 0);
+	__insert(rt, 26, 0, 227);
+	__lookup_fails(rt, 27, 0);
+	__insert(rt, 27, 0, 228);
+	__lookup_fails(rt, 28, 0);
+	__insert(rt, 28, 0, 229);
+	__lookup_fails(rt, 29, 0);
+	__insert(rt, 29, 0, 230);
+	__lookup_fails(rt, 30, 0);
+	__insert(rt, 30, 0, 231);
+	__lookup_fails(rt, 31, 0);
+	__insert(rt, 31, 0, 232);
+	__lookup_fails(rt, 32, 0);
+	__insert(rt, 32, 0, 233);
+	__lookup_fails(rt, 33, 0);
+	__insert(rt, 33, 0, 234);
+	__lookup_fails(rt, 34, 0);
+	__insert(rt, 34, 0, 235);
+	__lookup_fails(rt, 35, 0);
+	__insert(rt, 35, 0, 236);
+	__lookup_fails(rt, 36, 0);
+	__insert(rt, 36, 0, 237);
+	__lookup_fails(rt, 37, 0);
+	__insert(rt, 37, 0, 238);
+	__lookup_fails(rt, 38, 0);
+	__insert(rt, 38, 0, 239);
+	__lookup_fails(rt, 39, 0);
+	__insert(rt, 39, 0, 240);
+	__lookup_fails(rt, 40, 0);
+	__insert(rt, 40, 0, 241);
+	__lookup_fails(rt, 41, 0);
+	__insert(rt, 41, 0, 242);
+	__lookup_fails(rt, 42, 0);
+	__insert(rt, 42, 0, 243);
+	__lookup_fails(rt, 43, 0);
+	__insert(rt, 43, 0, 244);
+	__lookup_fails(rt, 44, 0);
+	__insert(rt, 44, 0, 245);
+	__lookup_fails(rt, 45, 0);
+	__insert(rt, 45, 0, 246);
+	__lookup_fails(rt, 47, 0);
+	__insert(rt, 47, 0, 247);
+	__lookup_fails(rt, 48, 0);
+	__insert(rt, 48, 0, 248);
+	__lookup_fails(rt, 49, 0);
+	__insert(rt, 49, 0, 249);
+	__lookup_fails(rt, 50, 0);
+	__insert(rt, 50, 0, 250);
+	__lookup_fails(rt, 51, 0);
+	__insert(rt, 51, 0, 251);
+	__lookup_fails(rt, 52, 0);
+	__insert(rt, 52, 0, 252);
+	__lookup_fails(rt, 53, 0);
+	__insert(rt, 53, 0, 253);
+	__lookup_fails(rt, 54, 0);
+	__insert(rt, 54, 0, 254);
+	__lookup_fails(rt, 55, 0);
+	__insert(rt, 55, 0, 255);
+	__lookup_fails(rt, 56, 0);
+	__insert(rt, 56, 0, 256);
+	__lookup_fails(rt, 57, 0);
+	__insert(rt, 57, 0, 257);
+	__lookup_fails(rt, 58, 0);
+	__insert(rt, 58, 0, 258);
+	__lookup_fails(rt, 59, 0);
+	__insert(rt, 59, 0, 259);
+	__lookup_fails(rt, 60, 0);
+	__insert(rt, 60, 0, 260);
+	__lookup_fails(rt, 61, 0);
+	__insert(rt, 61, 0, 261);
+	__lookup_fails(rt, 62, 0);
+	__insert(rt, 62, 0, 262);
+	__lookup_fails(rt, 63, 0);
+	__insert(rt, 63, 0, 263);
+	__lookup_fails(rt, 64, 0);
+	__insert(rt, 64, 0, 264);
+	__lookup_fails(rt, 66, 0);
+	__insert(rt, 66, 0, 265);
+	__lookup_fails(rt, 67, 0);
+	__insert(rt, 67, 0, 266);
+	__lookup_fails(rt, 68, 0);
+	__insert(rt, 68, 0, 267);
+	__lookup_fails(rt, 69, 0);
+	__insert(rt, 69, 0, 268);
+	__lookup_fails(rt, 70, 0);
+	__insert(rt, 70, 0, 269);
+	__lookup_fails(rt, 71, 0);
+	__insert(rt, 71, 0, 270);
+	__lookup_fails(rt, 72, 0);
+	__insert(rt, 72, 0, 271);
+	__lookup_fails(rt, 73, 0);
+	__insert(rt, 73, 0, 272);
+	__lookup_fails(rt, 74, 0);
+	__insert(rt, 74, 0, 273);
+	__lookup_fails(rt, 75, 0);
+	__insert(rt, 75, 0, 274);
+	__lookup_fails(rt, 76, 0);
+	__insert(rt, 76, 0, 275);
+	__lookup_fails(rt, 77, 0);
+	__insert(rt, 77, 0, 276);
+	__lookup_fails(rt, 78, 0);
+	__insert(rt, 78, 0, 277);
+	__lookup_fails(rt, 79, 0);
+	__insert(rt, 79, 0, 278);
+	__lookup_fails(rt, 80, 0);
+	__insert(rt, 80, 0, 279);
+	__lookup_fails(rt, 81, 0);
+	__insert(rt, 81, 0, 280);
+	__lookup_fails(rt, 82, 0);
+	__insert(rt, 82, 0, 281);
+	__lookup_fails(rt, 83, 0);
+	__insert(rt, 83, 0, 282);
+	__lookup_fails(rt, 84, 0);
+	__insert(rt, 84, 0, 283);
+	__lookup_fails(rt, 85, 0);
+	__insert(rt, 85, 0, 284);
+	__lookup_fails(rt, 86, 0);
+	__insert(rt, 86, 0, 285);
+	__lookup_fails(rt, 87, 0);
+	__insert(rt, 87, 0, 286);
+	__lookup_fails(rt, 88, 0);
+	__insert(rt, 88, 0, 287);
+	__lookup_fails(rt, 89, 0);
+	__insert(rt, 89, 0, 288);
+	__lookup_fails(rt, 90, 0);
+	__insert(rt, 90, 0, 289);
+	__lookup_fails(rt, 91, 0);
+	__insert(rt, 91, 0, 290);
+	__lookup_fails(rt, 92, 0);
+	__insert(rt, 92, 0, 291);
+	__lookup_fails(rt, 93, 0);
+	__insert(rt, 93, 0, 292);
+	__lookup_fails(rt, 94, 0);
+	__insert(rt, 94, 0, 293);
+	__lookup_fails(rt, 95, 0);
+	__insert(rt, 95, 0, 294);
+	__lookup_fails(rt, 97, 0);
+	__insert(rt, 97, 0, 295);
+	__lookup_fails(rt, 98, 0);
+	__insert(rt, 98, 0, 296);
+	__lookup_fails(rt, 99, 0);
+	__insert(rt, 99, 0, 297);
+	__lookup_fails(rt, 100, 0);
+	__insert(rt, 100, 0, 298);
+	__lookup_fails(rt, 101, 0);
+	__insert(rt, 101, 0, 299);
+	__lookup_fails(rt, 102, 0);
+	__insert(rt, 102, 0, 300);
+	__lookup_fails(rt, 103, 0);
+	__insert(rt, 103, 0, 301);
+	__lookup_fails(rt, 104, 0);
+	__insert(rt, 104, 0, 302);
+	__lookup_fails(rt, 105, 0);
+	__insert(rt, 105, 0, 303);
+	__lookup_fails(rt, 106, 0);
+	__insert(rt, 106, 0, 304);
+	__lookup_fails(rt, 107, 0);
+	__insert(rt, 107, 0, 305);
+	__lookup_fails(rt, 108, 0);
+	__insert(rt, 108, 0, 306);
+	__lookup_fails(rt, 109, 0);
+	__insert(rt, 109, 0, 307);
+	__lookup_fails(rt, 110, 0);
+	__insert(rt, 110, 0, 308);
+	__lookup_fails(rt, 111, 0);
+	__insert(rt, 111, 0, 309);
+	__lookup_fails(rt, 112, 0);
+	__insert(rt, 112, 0, 310);
+	__lookup_fails(rt, 113, 0);
+	__insert(rt, 113, 0, 311);
+	__lookup_fails(rt, 114, 0);
+	__insert(rt, 114, 0, 312);
+	__lookup_fails(rt, 115, 0);
+	__insert(rt, 115, 0, 313);
+	__lookup_fails(rt, 116, 0);
+	__insert(rt, 116, 0, 314);
+	__lookup_fails(rt, 117, 0);
+	__insert(rt, 117, 0, 315);
+	__lookup_fails(rt, 118, 0);
+	__insert(rt, 118, 0, 316);
+	__lookup_fails(rt, 119, 0);
+	__insert(rt, 119, 0, 317);
+	__lookup_fails(rt, 120, 0);
+	__insert(rt, 120, 0, 318);
+	__lookup_fails(rt, 121, 0);
+	__insert(rt, 121, 0, 319);
+	__lookup_fails(rt, 122, 0);
+	__insert(rt, 122, 0, 320);
+	__lookup_fails(rt, 123, 0);
+	__insert(rt, 123, 0, 321);
+	__lookup_fails(rt, 124, 0);
+	__insert(rt, 124, 0, 322);
+	__lookup_fails(rt, 125, 0);
+	__insert(rt, 125, 0, 323);
+	__lookup_fails(rt, 126, 0);
+	__insert(rt, 126, 0, 324);
+	__lookup_fails(rt, 127, 0);
+	__insert(rt, 127, 0, 325);
+	__lookup_fails(rt, 128, 0);
+	__insert(rt, 128, 0, 326);
+	__lookup_fails(rt, 129, 0);
+	__insert(rt, 129, 0, 327);
+	__lookup_fails(rt, 130, 0);
+	__insert(rt, 130, 0, 328);
+	__lookup_fails(rt, 131, 0);
+	__insert(rt, 131, 0, 329);
+	__lookup_fails(rt, 132, 0);
+	__insert(rt, 132, 0, 330);
+	__lookup_fails(rt, 133, 0);
+	__insert(rt, 133, 0, 331);
+	__lookup_fails(rt, 134, 0);
+	__insert(rt, 134, 0, 332);
+	__lookup_fails(rt, 135, 0);
+	__insert(rt, 135, 0, 333);
+	__lookup_fails(rt, 136, 0);
+	__insert(rt, 136, 0, 334);
+	__lookup_fails(rt, 137, 0);
+	__insert(rt, 137, 0, 335);
+	__lookup_fails(rt, 138, 0);
+	__insert(rt, 138, 0, 336);
+	__lookup_fails(rt, 139, 0);
+	__insert(rt, 139, 0, 337);
+	__lookup_fails(rt, 140, 0);
+	__insert(rt, 140, 0, 338);
+	__lookup_fails(rt, 141, 0);
+	__insert(rt, 141, 0, 339);
+	__lookup_fails(rt, 142, 0);
+	__insert(rt, 142, 0, 340);
+	__lookup_fails(rt, 143, 0);
+	__insert(rt, 143, 0, 341);
+	__lookup_fails(rt, 144, 0);
+	__insert(rt, 144, 0, 342);
+	__lookup_fails(rt, 145, 0);
+	__insert(rt, 145, 0, 343);
+	__lookup_fails(rt, 146, 0);
+	__insert(rt, 146, 0, 344);
+	__lookup_fails(rt, 147, 0);
+	__insert(rt, 147, 0, 345);
+	__lookup_fails(rt, 148, 0);
+	__insert(rt, 148, 0, 346);
+	__lookup_fails(rt, 149, 0);
+	__insert(rt, 149, 0, 347);
+	__lookup_fails(rt, 150, 0);
+	__insert(rt, 150, 0, 348);
+	__lookup_fails(rt, 151, 0);
+	__insert(rt, 151, 0, 349);
+	__lookup_fails(rt, 152, 0);
+	__insert(rt, 152, 0, 350);
+	__lookup_fails(rt, 153, 0);
+	__insert(rt, 153, 0, 351);
+	__lookup_fails(rt, 154, 0);
+	__insert(rt, 154, 0, 352);
+	__lookup_fails(rt, 155, 0);
+	__insert(rt, 155, 0, 353);
+	__lookup_fails(rt, 156, 0);
+	__insert(rt, 156, 0, 354);
+	__lookup_fails(rt, 157, 0);
+	__insert(rt, 157, 0, 355);
+	__lookup_fails(rt, 158, 0);
+	__insert(rt, 158, 0, 356);
+	__lookup_fails(rt, 159, 0);
+	__insert(rt, 159, 0, 357);
+	__lookup_fails(rt, 160, 0);
+	__insert(rt, 160, 0, 358);
+	__lookup_fails(rt, 161, 0);
+	__insert(rt, 161, 0, 359);
+	__lookup_fails(rt, 162, 0);
+	__insert(rt, 162, 0, 360);
+	__lookup_fails(rt, 164, 0);
+	__insert(rt, 164, 0, 361);
+	__lookup_fails(rt, 165, 0);
+	__insert(rt, 165, 0, 362);
+	__lookup_fails(rt, 166, 0);
+	__insert(rt, 166, 0, 363);
+	__lookup_fails(rt, 167, 0);
+	__insert(rt, 167, 0, 364);
+	__lookup_fails(rt, 168, 0);
+	__insert(rt, 168, 0, 365);
+	__lookup_fails(rt, 169, 0);
+	__insert(rt, 169, 0, 366);
+	__lookup_fails(rt, 170, 0);
+	__insert(rt, 170, 0, 367);
+	__lookup_fails(rt, 171, 0);
+	__insert(rt, 171, 0, 368);
+	__lookup_fails(rt, 172, 0);
+	__insert(rt, 172, 0, 369);
+	__lookup_fails(rt, 173, 0);
+	__insert(rt, 173, 0, 370);
+	__lookup_fails(rt, 174, 0);
+	__insert(rt, 174, 0, 371);
+	__lookup_fails(rt, 175, 0);
+	__insert(rt, 175, 0, 372);
+	__lookup_fails(rt, 176, 0);
+	__insert(rt, 176, 0, 373);
+	__lookup_fails(rt, 177, 0);
+	__insert(rt, 177, 0, 374);
+	__lookup_fails(rt, 178, 0);
+	__insert(rt, 178, 0, 375);
+	__lookup_fails(rt, 179, 0);
+	__insert(rt, 179, 0, 376);
+	__lookup_fails(rt, 180, 0);
+	__insert(rt, 180, 0, 377);
+	__lookup_fails(rt, 181, 0);
+	__insert(rt, 181, 0, 378);
+	__lookup_fails(rt, 182, 0);
+	__insert(rt, 182, 0, 379);
+	__lookup_fails(rt, 183, 0);
+	__insert(rt, 183, 0, 380);
+	__lookup_fails(rt, 184, 0);
+	__insert(rt, 184, 0, 381);
+	__lookup_fails(rt, 185, 0);
+	__insert(rt, 185, 0, 382);
+	__lookup_fails(rt, 186, 0);
+	__insert(rt, 186, 0, 383);
+	__lookup_fails(rt, 187, 0);
+	__insert(rt, 187, 0, 384);
+	__lookup_fails(rt, 188, 0);
+	__insert(rt, 188, 0, 385);
+	__lookup_fails(rt, 189, 0);
+	__insert(rt, 189, 0, 386);
+	__lookup_fails(rt, 190, 0);
+	__insert(rt, 190, 0, 387);
+	__lookup_fails(rt, 191, 0);
+	__insert(rt, 191, 0, 388);
+	__lookup_fails(rt, 192, 0);
+	__insert(rt, 192, 0, 389);
+	__lookup_fails(rt, 193, 0);
+	__insert(rt, 193, 0, 390);
+	__lookup_fails(rt, 194, 0);
+	__insert(rt, 194, 0, 391);
+	__lookup_fails(rt, 195, 0);
+	__insert(rt, 195, 0, 392);
+	__lookup_fails(rt, 196, 0);
+	__insert(rt, 196, 0, 393);
+	__lookup_fails(rt, 197, 0);
+	__insert(rt, 197, 0, 394);
+	__lookup_fails(rt, 198, 0);
+	__insert(rt, 198, 0, 395);
+	__lookup_fails(rt, 199, 0);
+	__insert(rt, 199, 0, 396);
+	__lookup_fails(rt, 200, 0);
+	__insert(rt, 200, 0, 397);
+	__lookup_fails(rt, 201, 0);
+	__insert(rt, 201, 0, 398);
+	__lookup_fails(rt, 202, 0);
+	__insert(rt, 202, 0, 399);
+	__lookup_fails(rt, 203, 0);
+	__insert(rt, 203, 0, 400);
+	__lookup_fails(rt, 204, 0);
+	__insert(rt, 204, 0, 401);
+	__lookup_fails(rt, 205, 0);
+	__insert(rt, 205, 0, 402);
+	__lookup_fails(rt, 206, 0);
+	__insert(rt, 206, 0, 403);
+	__lookup_fails(rt, 207, 0);
+	__insert(rt, 207, 0, 404);
+	__lookup_fails(rt, 208, 0);
+	__insert(rt, 208, 0, 405);
+	__lookup_fails(rt, 209, 0);
+	__insert(rt, 209, 0, 406);
+	__lookup_fails(rt, 210, 0);
+	__insert(rt, 210, 0, 407);
+	__lookup_matches(rt, 6, 0, 208);
+	__invalidate(rt, 6);
+	__lookup_matches(rt, 7, 0, 209);
+	__invalidate(rt, 7);
+	__lookup_matches(rt, 8, 0, 210);
+	__invalidate(rt, 8);
+	__lookup_matches(rt, 9, 0, 211);
+	__invalidate(rt, 9);
+	__lookup_matches(rt, 10, 0, 212);
+	__invalidate(rt, 10);
+	__lookup_matches(rt, 11, 0, 213);
+	__invalidate(rt, 11);
+	__lookup_matches(rt, 13, 0, 214);
+	__invalidate(rt, 13);
+	__lookup_matches(rt, 14, 0, 215);
+	__invalidate(rt, 14);
+	__lookup_matches(rt, 15, 0, 216);
+	__invalidate(rt, 15);
+	__lookup_matches(rt, 16, 0, 217);
+	__invalidate(rt, 16);
+	__lookup_matches(rt, 17, 0, 218);
+	__invalidate(rt, 17);
+	__lookup_matches(rt, 18, 0, 219);
+	__invalidate(rt, 18);
+	__lookup_matches(rt, 19, 0, 220);
+	__invalidate(rt, 19);
+	__lookup_matches(rt, 20, 0, 221);
+	__invalidate(rt, 20);
+	__lookup_matches(rt, 21, 0, 222);
+	__invalidate(rt, 21);
+	__lookup_matches(rt, 22, 0, 223);
+	__invalidate(rt, 22);
+	__lookup_matches(rt, 23, 0, 224);
+	__invalidate(rt, 23);
+	__lookup_matches(rt, 24, 0, 225);
+	__invalidate(rt, 24);
+	__lookup_matches(rt, 25, 0, 226);
+	__invalidate(rt, 25);
+	__lookup_matches(rt, 26, 0, 227);
+	__invalidate(rt, 26);
+	__lookup_matches(rt, 27, 0, 228);
+	__invalidate(rt, 27);
+	__lookup_matches(rt, 28, 0, 229);
+	__invalidate(rt, 28);
+	__lookup_matches(rt, 29, 0, 230);
+	__invalidate(rt, 29);
+	__lookup_matches(rt, 30, 0, 231);
+	__invalidate(rt, 30);
+	__lookup_matches(rt, 31, 0, 232);
+	__invalidate(rt, 31);
+	__lookup_matches(rt, 32, 0, 233);
+	__invalidate(rt, 32);
+	__lookup_matches(rt, 33, 0, 234);
+	__invalidate(rt, 33);
+	__lookup_matches(rt, 34, 0, 235);
+	__invalidate(rt, 34);
+	__lookup_matches(rt, 35, 0, 236);
+	__invalidate(rt, 35);
+	__lookup_matches(rt, 36, 0, 237);
+	__invalidate(rt, 36);
+	__lookup_matches(rt, 37, 0, 238);
+	__invalidate(rt, 37);
+	__lookup_matches(rt, 38, 0, 239);
+	__invalidate(rt, 38);
+	__lookup_matches(rt, 39, 0, 240);
+	__invalidate(rt, 39);
+	__lookup_matches(rt, 40, 0, 241);
+	__invalidate(rt, 40);
+	__lookup_matches(rt, 41, 0, 242);
+	__invalidate(rt, 41);
+	__lookup_matches(rt, 42, 0, 243);
+	__invalidate(rt, 42);
+	__lookup_matches(rt, 43, 0, 244);
+	__invalidate(rt, 43);
+	__lookup_matches(rt, 44, 0, 245);
+	__invalidate(rt, 44);
+	__lookup_matches(rt, 45, 0, 246);
+	__invalidate(rt, 45);
+	__lookup_matches(rt, 47, 0, 247);
+	__invalidate(rt, 47);
+	__lookup_matches(rt, 48, 0, 248);
+	__invalidate(rt, 48);
+	__lookup_matches(rt, 49, 0, 249);
+	__invalidate(rt, 49);
+	__lookup_matches(rt, 50, 0, 250);
+	__invalidate(rt, 50);
+	__lookup_matches(rt, 51, 0, 251);
+	__invalidate(rt, 51);
+	__lookup_matches(rt, 52, 0, 252);
+	__invalidate(rt, 52);
+	__lookup_matches(rt, 53, 0, 253);
+	__invalidate(rt, 53);
+	__lookup_matches(rt, 54, 0, 254);
+	__invalidate(rt, 54);
+	__lookup_matches(rt, 55, 0, 255);
+	__invalidate(rt, 55);
+	__lookup_matches(rt, 56, 0, 256);
+	__invalidate(rt, 56);
+	__lookup_matches(rt, 57, 0, 257);
+	__invalidate(rt, 57);
+	__lookup_matches(rt, 58, 0, 258);
+	__invalidate(rt, 58);
+	__lookup_matches(rt, 59, 0, 259);
+	__invalidate(rt, 59);
+	__lookup_matches(rt, 60, 0, 260);
+	__invalidate(rt, 60);
+	__lookup_matches(rt, 61, 0, 261);
+	__invalidate(rt, 61);
+	__lookup_matches(rt, 62, 0, 262);
+	__invalidate(rt, 62);
+	__lookup_matches(rt, 63, 0, 263);
+	__invalidate(rt, 63);
+	__lookup_matches(rt, 64, 0, 264);
+	__invalidate(rt, 64);
+	__lookup_matches(rt, 66, 0, 265);
+	__invalidate(rt, 66);
+	__lookup_matches(rt, 67, 0, 266);
+	__invalidate(rt, 67);
+	__lookup_matches(rt, 68, 0, 267);
+	__invalidate(rt, 68);
+	__lookup_matches(rt, 69, 0, 268);
+	__invalidate(rt, 69);
+	__lookup_matches(rt, 70, 0, 269);
+	__invalidate(rt, 70);
+	__lookup_matches(rt, 71, 0, 270);
+	__invalidate(rt, 71);
+	__lookup_matches(rt, 72, 0, 271);
+	__invalidate(rt, 72);
+	__lookup_matches(rt, 73, 0, 272);
+	__lookup_matches(rt, 74, 0, 273);
+	__invalidate(rt, 74);
+	__lookup_matches(rt, 75, 0, 274);
+	__invalidate(rt, 75);
+	__lookup_matches(rt, 76, 0, 275);
+	__invalidate(rt, 76);
+	__lookup_matches(rt, 77, 0, 276);
+	__invalidate(rt, 77);
+	__lookup_matches(rt, 78, 0, 277);
+	__invalidate(rt, 78);
+	__lookup_matches(rt, 79, 0, 278);
+	__invalidate(rt, 79);
+	__lookup_matches(rt, 80, 0, 279);
+	__invalidate(rt, 80);
+	__lookup_matches(rt, 81, 0, 280);
+	__invalidate(rt, 81);
+	__lookup_matches(rt, 82, 0, 281);
+	__invalidate(rt, 82);
+	__lookup_matches(rt, 83, 0, 282);
+	__invalidate(rt, 83);
+	__lookup_matches(rt, 84, 0, 283);
+	__invalidate(rt, 84);
+	__lookup_matches(rt, 85, 0, 284);
+	__invalidate(rt, 85);
+	__lookup_matches(rt, 86, 0, 285);
+	__invalidate(rt, 86);
+	__lookup_matches(rt, 87, 0, 286);
+	__invalidate(rt, 87);
+	__lookup_matches(rt, 88, 0, 287);
+	__invalidate(rt, 88);
+	__lookup_matches(rt, 89, 0, 288);
+	__invalidate(rt, 89);
+	__lookup_matches(rt, 90, 0, 289);
+	__invalidate(rt, 90);
+	__lookup_matches(rt, 91, 0, 290);
+	__invalidate(rt, 91);
+	__lookup_matches(rt, 92, 0, 291);
+	__invalidate(rt, 92);
+	__lookup_matches(rt, 93, 0, 292);
+	__invalidate(rt, 93);
+	__lookup_matches(rt, 94, 0, 293);
+	__invalidate(rt, 94);
+	__lookup_matches(rt, 95, 0, 294);
+	__invalidate(rt, 95);
+	__lookup_matches(rt, 97, 0, 295);
+	__invalidate(rt, 97);
+	__lookup_matches(rt, 98, 0, 296);
+	__invalidate(rt, 98);
+	__lookup_matches(rt, 99, 0, 297);
+	__invalidate(rt, 99);
+	__lookup_matches(rt, 100, 0, 298);
+	__invalidate(rt, 100);
+	__lookup_matches(rt, 101, 0, 299);
+	__invalidate(rt, 101);
+	__lookup_matches(rt, 102, 0, 300);
+	__invalidate(rt, 102);
+	__lookup_matches(rt, 103, 0, 301);
+	__invalidate(rt, 103);
+	__lookup_matches(rt, 104, 0, 302);
+	__invalidate(rt, 104);
+	__lookup_matches(rt, 105, 0, 303);
+	__invalidate(rt, 105);
+	__lookup_matches(rt, 106, 0, 304);
+	__invalidate(rt, 106);
+	__lookup_matches(rt, 107, 0, 305);
+	__invalidate(rt, 107);
+	__lookup_matches(rt, 108, 0, 306);
+	__invalidate(rt, 108);
+	__lookup_matches(rt, 109, 0, 307);
+	__invalidate(rt, 109);
+	__lookup_matches(rt, 110, 0, 308);
+	__invalidate(rt, 110);
+	__lookup_matches(rt, 111, 0, 309);
+	__invalidate(rt, 111);
+	__lookup_matches(rt, 112, 0, 310);
+	__invalidate(rt, 112);
+	__lookup_matches(rt, 113, 0, 311);
+	__invalidate(rt, 113);
+	__lookup_matches(rt, 114, 0, 312);
+	__invalidate(rt, 114);
+	__lookup_matches(rt, 115, 0, 313);
+	__invalidate(rt, 115);
+	__lookup_matches(rt, 116, 0, 314);
+	__invalidate(rt, 116);
+	__lookup_matches(rt, 117, 0, 315);
+	__invalidate(rt, 117);
+	__lookup_matches(rt, 118, 0, 316);
+	__invalidate(rt, 118);
+	__lookup_matches(rt, 119, 0, 317);
+	__invalidate(rt, 119);
+	__lookup_matches(rt, 120, 0, 318);
+	__invalidate(rt, 120);
+	__lookup_matches(rt, 121, 0, 319);
+	__invalidate(rt, 121);
+	__lookup_matches(rt, 122, 0, 320);
+	__invalidate(rt, 122);
+	__lookup_matches(rt, 123, 0, 321);
+	__invalidate(rt, 123);
+	__lookup_matches(rt, 124, 0, 322);
+	__invalidate(rt, 124);
+	__lookup_matches(rt, 125, 0, 323);
+	__invalidate(rt, 125);
+	__lookup_matches(rt, 126, 0, 324);
+	__invalidate(rt, 126);
+	__lookup_matches(rt, 127, 0, 325);
+	__invalidate(rt, 127);
+	__lookup_matches(rt, 128, 0, 326);
+	__invalidate(rt, 128);
+	__lookup_matches(rt, 129, 0, 327);
+	__invalidate(rt, 129);
+	__lookup_matches(rt, 130, 0, 328);
+	__invalidate(rt, 130);
+	__lookup_matches(rt, 131, 0, 329);
+	__invalidate(rt, 131);
+	__lookup_matches(rt, 132, 0, 330);
+	__invalidate(rt, 132);
+	__lookup_matches(rt, 133, 0, 331);
+	__invalidate(rt, 133);
+	__lookup_matches(rt, 134, 0, 332);
+	__invalidate(rt, 134);
+	__lookup_matches(rt, 135, 0, 333);
+	__invalidate(rt, 135);
+	__lookup_matches(rt, 136, 0, 334);
+	__invalidate(rt, 136);
+	__lookup_matches(rt, 137, 0, 335);
+	__invalidate(rt, 137);
+	__lookup_matches(rt, 138, 0, 336);
+	__invalidate(rt, 138);
+	__lookup_matches(rt, 139, 0, 337);
+	__invalidate(rt, 139);
+	__lookup_matches(rt, 140, 0, 338);
+	__invalidate(rt, 140);
+	__lookup_matches(rt, 141, 0, 339);
+	__invalidate(rt, 141);
+	__lookup_matches(rt, 142, 0, 340);
+	__invalidate(rt, 142);
+	__lookup_matches(rt, 143, 0, 341);
+	__invalidate(rt, 143);
+	__lookup_matches(rt, 144, 0, 342);
+	__invalidate(rt, 144);
+	__lookup_matches(rt, 145, 0, 343);
+	__invalidate(rt, 145);
+	__lookup_matches(rt, 146, 0, 344);
+	__invalidate(rt, 146);
+	__lookup_matches(rt, 147, 0, 345);
+	__invalidate(rt, 147);
+	__lookup_matches(rt, 148, 0, 346);
+	__invalidate(rt, 148);
+	__lookup_matches(rt, 149, 0, 347);
+	__invalidate(rt, 149);
+	__lookup_matches(rt, 150, 0, 348);
+	__invalidate(rt, 150);
+	__lookup_matches(rt, 151, 0, 349);
+	__invalidate(rt, 151);
+	__lookup_matches(rt, 152, 0, 350);
+	__invalidate(rt, 152);
+	__lookup_matches(rt, 153, 0, 351);
+	__invalidate(rt, 153);
+	__lookup_matches(rt, 154, 0, 352);
+	__invalidate(rt, 154);
+	__lookup_matches(rt, 155, 0, 353);
+	__invalidate(rt, 155);
+	__lookup_matches(rt, 156, 0, 354);
+	__invalidate(rt, 156);
+	__lookup_matches(rt, 157, 0, 355);
+	__invalidate(rt, 157);
+	__lookup_matches(rt, 158, 0, 356);
+	__invalidate(rt, 158);
+	__lookup_matches(rt, 159, 0, 357);
+	__invalidate(rt, 159);
+	__lookup_matches(rt, 160, 0, 358);
+	__invalidate(rt, 160);
+	__lookup_matches(rt, 161, 0, 359);
+	__invalidate(rt, 161);
+	__lookup_matches(rt, 162, 0, 360);
+	__invalidate(rt, 162);
+	__lookup_matches(rt, 164, 0, 361);
+	__invalidate(rt, 164);
+	__lookup_matches(rt, 165, 0, 362);
+	__invalidate(rt, 165);
+	__lookup_matches(rt, 166, 0, 363);
+	__invalidate(rt, 166);
+	__lookup_matches(rt, 167, 0, 364);
+	__invalidate(rt, 167);
+	__lookup_matches(rt, 168, 0, 365);
+	__invalidate(rt, 168);
+	__lookup_matches(rt, 169, 0, 366);
+	__invalidate(rt, 169);
+	__lookup_matches(rt, 170, 0, 367);
+	__invalidate(rt, 170);
+	__lookup_matches(rt, 171, 0, 368);
+	__invalidate(rt, 171);
+	__lookup_matches(rt, 172, 0, 369);
+	__invalidate(rt, 172);
+	__lookup_matches(rt, 173, 0, 370);
+	__invalidate(rt, 173);
+	__lookup_matches(rt, 174, 0, 371);
+	__invalidate(rt, 174);
+	__lookup_matches(rt, 175, 0, 372);
+	__invalidate(rt, 175);
+	__lookup_matches(rt, 176, 0, 373);
+	__invalidate(rt, 176);
+	__lookup_matches(rt, 177, 0, 374);
+	__invalidate(rt, 177);
+	__lookup_matches(rt, 178, 0, 375);
+	__invalidate(rt, 178);
+	__lookup_matches(rt, 179, 0, 376);
+	__invalidate(rt, 179);
+	__lookup_matches(rt, 180, 0, 377);
+	__invalidate(rt, 180);
+	__lookup_matches(rt, 181, 0, 378);
+	__invalidate(rt, 181);
+	__lookup_matches(rt, 182, 0, 379);
+	__invalidate(rt, 182);
+	__lookup_matches(rt, 183, 0, 380);
+	__invalidate(rt, 183);
+	__lookup_matches(rt, 184, 0, 381);
+	__invalidate(rt, 184);
+	__lookup_matches(rt, 185, 0, 382);
+	__invalidate(rt, 185);
+	__lookup_matches(rt, 186, 0, 383);
+	__invalidate(rt, 186);
+	__lookup_matches(rt, 187, 0, 384);
+	__invalidate(rt, 187);
+	__lookup_matches(rt, 188, 0, 385);
+	__invalidate(rt, 188);
+	__lookup_matches(rt, 189, 0, 386);
+	__invalidate(rt, 189);
+	__lookup_matches(rt, 190, 0, 387);
+	__invalidate(rt, 190);
+	__lookup_matches(rt, 191, 0, 388);
+	__invalidate(rt, 191);
+	__lookup_matches(rt, 192, 0, 389);
+	__invalidate(rt, 192);
+	__lookup_matches(rt, 193, 0, 390);
+	__invalidate(rt, 193);
+	__lookup_matches(rt, 194, 0, 391);
+	__invalidate(rt, 194);
+	__lookup_matches(rt, 195, 0, 392);
+	__invalidate(rt, 195);
+	__lookup_matches(rt, 196, 0, 393);
+	__invalidate(rt, 196);
+	__lookup_matches(rt, 197, 0, 394);
+	__invalidate(rt, 197);
+	__lookup_matches(rt, 198, 0, 395);
+	__invalidate(rt, 198);
+	__lookup_matches(rt, 199, 0, 396);
+	__invalidate(rt, 199);
+	__lookup_matches(rt, 200, 0, 397);
+	__invalidate(rt, 200);
+	__lookup_matches(rt, 201, 0, 398);
+	__invalidate(rt, 201);
+	__lookup_matches(rt, 202, 0, 399);
+	__invalidate(rt, 202);
+	__lookup_matches(rt, 203, 0, 400);
+	__invalidate(rt, 203);
+	__lookup_matches(rt, 204, 0, 401);
+	__invalidate(rt, 204);
+	__lookup_matches(rt, 205, 0, 402);
+	__invalidate(rt, 205);
+	__lookup_matches(rt, 206, 0, 403);
+	__invalidate(rt, 206);
+	__lookup_matches(rt, 207, 0, 404);
+	__invalidate(rt, 207);
+	__lookup_matches(rt, 208, 0, 405);
+	__invalidate(rt, 208);
+	__lookup_matches(rt, 209, 0, 406);
+	__invalidate(rt, 209);
+	__lookup_matches(rt, 210, 0, 407);
+	__invalidate(rt, 210);
+	__lookup_fails(rt, 6, 0);
+	__insert(rt, 6, 0, 408);
+	__lookup_fails(rt, 7, 0);
+	__insert(rt, 7, 0, 409);
+	__lookup_fails(rt, 8, 0);
+	__insert(rt, 8, 0, 410);
+	__lookup_fails(rt, 9, 0);
+	__insert(rt, 9, 0, 411);
+	__lookup_fails(rt, 10, 0);
+	__insert(rt, 10, 0, 412);
+	__lookup_fails(rt, 11, 0);
+	__insert(rt, 11, 0, 413);
+	__lookup_fails(rt, 13, 0);
+	__insert(rt, 13, 0, 414);
+	__lookup_fails(rt, 14, 0);
+	__insert(rt, 14, 0, 415);
+	__lookup_fails(rt, 15, 0);
+	__insert(rt, 15, 0, 416);
+	__lookup_fails(rt, 16, 0);
+	__insert(rt, 16, 0, 417);
+	__lookup_fails(rt, 17, 0);
+	__insert(rt, 17, 0, 418);
+	__lookup_fails(rt, 18, 0);
+	__insert(rt, 18, 0, 419);
+	__lookup_fails(rt, 19, 0);
+	__insert(rt, 19, 0, 420);
+	__lookup_fails(rt, 20, 0);
+	__insert(rt, 20, 0, 421);
+	__lookup_fails(rt, 21, 0);
+	__insert(rt, 21, 0, 422);
+	__lookup_fails(rt, 22, 0);
+	__insert(rt, 22, 0, 423);
+	__lookup_fails(rt, 23, 0);
+	__insert(rt, 23, 0, 424);
+	__lookup_matches(rt, 6, 0, 408);
+	__invalidate(rt, 6);
+	__lookup_matches(rt, 7, 0, 409);
+	__invalidate(rt, 7);
+	__lookup_matches(rt, 8, 0, 410);
+	__invalidate(rt, 8);
+	__lookup_matches(rt, 9, 0, 411);
+	__invalidate(rt, 9);
+	__lookup_matches(rt, 10, 0, 412);
+	__invalidate(rt, 10);
+	__lookup_matches(rt, 11, 0, 413);
+	__invalidate(rt, 11);
+	__lookup_matches(rt, 13, 0, 414);
+	__invalidate(rt, 13);
+	__lookup_matches(rt, 14, 0, 415);
diff --git a/test/unit/unit-test.sh b/test/unit/unit-test.sh
index e8332d6..f545f14 100644
--- a/test/unit/unit-test.sh
+++ b/test/unit/unit-test.sh
@@ -13,8 +13,6 @@
 
 SKIP_WITH_LVMLOCKD=1
 SKIP_WITH_LVMPOLLD=1
-SKIP_WITH_LVMETAD=1
-SKIP_WITH_CLVMD=1
 
 SKIP_ROOT_DM_CHECK=1