Blame SOURCES/autofs-5.1.6-add-hashtable-implementation.patch

49b67f
autofs-5.1.6 - add hashtable implementation
49b67f
49b67f
From: Ian Kent <raven@themaw.net>
49b67f
49b67f
Include the (slightly modified) Linux kernel hashtable implementation.
49b67f
49b67f
Signed-off-by: Ian Kent <raven@themaw.net>
49b67f
---
49b67f
 CHANGELOG           |    1 
49b67f
 include/automount.h |   19 -----
49b67f
 include/hash.h      |  101 +++++++++++++++++++++++++++++++
49b67f
 include/hashtable.h |  166 ++++++++++++++++++++++++++++++++++++++++++++++++++++
49b67f
 4 files changed, 269 insertions(+), 18 deletions(-)
49b67f
 create mode 100644 include/hash.h
49b67f
 create mode 100644 include/hashtable.h
49b67f
49b67f
--- autofs-5.1.4.orig/CHANGELOG
49b67f
+++ autofs-5.1.4/CHANGELOG
49b67f
@@ -117,6 +117,7 @@ xx/xx/2018 autofs-5.1.5
49b67f
 - make bind mounts propagation slave by default.
49b67f
 - fix browse dir not re-created on symlink expire.
49b67f
 - update list.h.
49b67f
+- add hashtable implementation.
49b67f
 
49b67f
 19/12/2017 autofs-5.1.4
49b67f
 - fix spec file url.
49b67f
--- autofs-5.1.4.orig/include/automount.h
49b67f
+++ autofs-5.1.4/include/automount.h
49b67f
@@ -22,6 +22,7 @@
49b67f
 #include <mntent.h>
49b67f
 #include "config.h"
49b67f
 #include "list.h"
49b67f
+#include "hash.h"
49b67f
 
49b67f
 #include <linux/auto_fs4.h>
49b67f
 
49b67f
@@ -143,24 +144,6 @@ struct autofs_point;
49b67f
 #define UMOUNT_RETRIES		8
49b67f
 #define EXPIRE_RETRIES		3
49b67f
 
49b67f
-static u_int32_t inline hash(const char *key, unsigned int size)
49b67f
-{
49b67f
-	u_int32_t hashval;
49b67f
-	char *s = (char *) key;
49b67f
-
49b67f
-	for (hashval = 0; *s != '\0';) {
49b67f
-		hashval += (unsigned char) *s++;
49b67f
-		hashval += (hashval << 10);
49b67f
-		hashval ^= (hashval >> 6);
49b67f
-	}
49b67f
-
49b67f
-	hashval += (hashval << 3);
49b67f
-	hashval ^= (hashval >> 11);
49b67f
-	hashval += (hashval << 15);
49b67f
-
49b67f
-	return hashval % size;
49b67f
-}
49b67f
-
49b67f
 struct mapent_cache {
49b67f
 	pthread_rwlock_t rwlock;
49b67f
 	unsigned int size;
49b67f
--- /dev/null
49b67f
+++ autofs-5.1.4/include/hash.h
49b67f
@@ -0,0 +1,101 @@
49b67f
+#ifndef _LINUX_HASH_H
49b67f
+#define _LINUX_HASH_H
49b67f
+/* Fast hashing routine for ints,  longs and pointers.
49b67f
+   (C) 2002 Nadia Yvette Chambers, IBM */
49b67f
+
49b67f
+#include <sys/types.h>
49b67f
+#include <stdint.h>
49b67f
+
49b67f
+/*
49b67f
+ * The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and
49b67f
+ * fs/inode.c.  It's not actually prime any more (the previous primes
49b67f
+ * were actively bad for hashing), but the name remains.
49b67f
+ */
49b67f
+#if __WORDSIZE == 32
49b67f
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32
49b67f
+#define hash_long(val, bits) hash_32(val, bits)
49b67f
+#elif __WORDSIZE == 64
49b67f
+#define hash_long(val, bits) hash_64(val, bits)
49b67f
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64
49b67f
+#else
49b67f
+#error Wordsize not 32 or 64
49b67f
+#endif
49b67f
+
49b67f
+/* String based hash function */
49b67f
+static uint32_t inline hash(const char *key, unsigned int size)
49b67f
+{
49b67f
+	u_int32_t hashval;
49b67f
+	char *s = (char *) key;
49b67f
+
49b67f
+	for (hashval = 0; *s != '\0';) {
49b67f
+		hashval += (unsigned char) *s++;
49b67f
+		hashval += (hashval << 10);
49b67f
+		hashval ^= (hashval >> 6);
49b67f
+	}
49b67f
+
49b67f
+	hashval += (hashval << 3);
49b67f
+	hashval ^= (hashval >> 11);
49b67f
+	hashval += (hashval << 15);
49b67f
+
49b67f
+	return hashval % size;
49b67f
+}
49b67f
+
49b67f
+/*
49b67f
+ * This hash multiplies the input by a large odd number and takes the
49b67f
+ * high bits.  Since multiplication propagates changes to the most
49b67f
+ * significant end only, it is essential that the high bits of the
49b67f
+ * product be used for the hash value.
49b67f
+ *
49b67f
+ * Chuck Lever verified the effectiveness of this technique:
49b67f
+ * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
49b67f
+ *
49b67f
+ * Although a random odd number will do, it turns out that the golden
49b67f
+ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
49b67f
+ * properties.  (See Knuth vol 3, section 6.4, exercise 9.)
49b67f
+ *
49b67f
+ * These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2,
49b67f
+ * which is very slightly easier to multiply by and makes no
49b67f
+ * difference to the hash distribution.
49b67f
+ */
49b67f
+#define GOLDEN_RATIO_32 0x61C88647
49b67f
+#define GOLDEN_RATIO_64 0x61C8864680B583EBull
49b67f
+
49b67f
+static inline uint32_t __hash_32(uint32_t val)
49b67f
+{
49b67f
+	return val * GOLDEN_RATIO_32;
49b67f
+}
49b67f
+
49b67f
+static inline uint32_t hash_32(uint32_t val, unsigned int bits)
49b67f
+{
49b67f
+	/* High bits are more random, so use them. */
49b67f
+	return __hash_32(val) >> (32 - bits);
49b67f
+}
49b67f
+
49b67f
+static __always_inline uint32_t hash_64(uint64_t val, unsigned int bits)
49b67f
+{
49b67f
+#if __WORDSIZE == 64
49b67f
+	/* 64x64-bit multiply is efficient on all 64-bit processors */
49b67f
+	return val * GOLDEN_RATIO_64 >> (64 - bits);
49b67f
+#else
49b67f
+	/* Hash 64 bits using only 32x32-bit multiply. */
49b67f
+	return hash_32((uint32_t) val ^ __hash_32(val >> 32), bits);
49b67f
+#endif
49b67f
+}
49b67f
+
49b67f
+static inline uint32_t hash_ptr(const void *ptr, unsigned int bits)
49b67f
+{
49b67f
+	return hash_long((unsigned long) ptr, bits);
49b67f
+}
49b67f
+
49b67f
+/* This really should be called fold32_ptr; it does no hashing to speak of. */
49b67f
+static inline uint32_t hash32_ptr(const void *ptr)
49b67f
+{
49b67f
+	unsigned long val = (unsigned long) ptr;
49b67f
+
49b67f
+#if __WORDSIZE == 64
49b67f
+	val ^= (val >> 32);
49b67f
+#endif
49b67f
+	return (uint32_t) val;
49b67f
+}
49b67f
+
49b67f
+#endif /* _LINUX_HASH_H */
49b67f
--- /dev/null
49b67f
+++ autofs-5.1.4/include/hashtable.h
49b67f
@@ -0,0 +1,166 @@
49b67f
+/*
49b67f
+ * Statically sized hash table implementation
49b67f
+ * (C) 2012  Sasha Levin <levinsasha928@gmail.com>
49b67f
+ */
49b67f
+
49b67f
+#ifndef _LINUX_HASHTABLE_H
49b67f
+#define _LINUX_HASHTABLE_H
49b67f
+
49b67f
+#include "list.h"
49b67f
+#include "hash.h"
49b67f
+
49b67f
+#ifndef ARRAY_SIZE
49b67f
+#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
49b67f
+#endif
49b67f
+
49b67f
+static inline unsigned int ilog2(unsigned long val) {
49b67f
+	unsigned int ret = -1;
49b67f
+
49b67f
+	while (val != 0) {
49b67f
+		val >>= 1;
49b67f
+		ret++;
49b67f
+	}
49b67f
+	return ret;
49b67f
+}
49b67f
+
49b67f
+#define DEFINE_HASHTABLE(name, bits)						\
49b67f
+	struct hlist_head name[1 << (bits)] =					\
49b67f
+			{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
49b67f
+
49b67f
+#define DECLARE_HASHTABLE(name, bits)                                   	\
49b67f
+	struct hlist_head name[1 << (bits)]
49b67f
+
49b67f
+#define HASH_SIZE(name) (ARRAY_SIZE(name))
49b67f
+#define HASH_BITS(name) ilog2(HASH_SIZE(name))
49b67f
+
49b67f
+/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
49b67f
+#define hash_min(val, bits)							\
49b67f
+	(sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
49b67f
+
49b67f
+static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
49b67f
+{
49b67f
+	unsigned int i;
49b67f
+
49b67f
+	for (i = 0; i < sz; i++)
49b67f
+		INIT_HLIST_HEAD(&ht[i]);
49b67f
+}
49b67f
+
49b67f
+/**
49b67f
+ * hash_init - initialize a hash table
49b67f
+ * @hashtable: hashtable to be initialized
49b67f
+ *
49b67f
+ * Calculates the size of the hashtable from the given parameter, otherwise
49b67f
+ * same as hash_init_size.
49b67f
+ *
49b67f
+ * This has to be a macro since HASH_BITS() will not work on pointers since
49b67f
+ * it calculates the size during preprocessing.
49b67f
+ */
49b67f
+#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
49b67f
+
49b67f
+/**
49b67f
+ * hash_add - add an object to a hashtable
49b67f
+ * @hashtable: hashtable to add to
49b67f
+ * @node: the &struct hlist_node of the object to be added
49b67f
+ * @key: the key of the object to be added
49b67f
+ */
49b67f
+#define hash_add(hashtable, node, key)						\
49b67f
+	hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
49b67f
+
49b67f
+/**
49b67f
+ * hash_add_str - add a string object to a hashtable
49b67f
+ * @hashtable: hashtable to add to
49b67f
+ * @node: the &struct hlist_node of the object to be added
49b67f
+ * @key: the string key of the object to be added
49b67f
+ */
49b67f
+#define hash_add_str(hashtable, node, key)						\
49b67f
+	hlist_add_head(node, &hashtable[hash(key, HASH_SIZE(hashtable))])
49b67f
+
49b67f
+/**
49b67f
+ * hash_hashed - check whether an object is in any hashtable
49b67f
+ * @node: the &struct hlist_node of the object to be checked
49b67f
+ */
49b67f
+static inline int hash_hashed(struct hlist_node *node)
49b67f
+{
49b67f
+	return !hlist_unhashed(node);
49b67f
+}
49b67f
+
49b67f
+static inline int __hash_empty(struct hlist_head *ht, unsigned int sz)
49b67f
+{
49b67f
+	unsigned int i;
49b67f
+
49b67f
+	for (i = 0; i < sz; i++)
49b67f
+		if (!hlist_empty(&ht[i]))
49b67f
+			return 0;
49b67f
+
49b67f
+	return 1;
49b67f
+}
49b67f
+
49b67f
+/**
49b67f
+ * hash_empty - check whether a hashtable is empty
49b67f
+ * @hashtable: hashtable to check
49b67f
+ *
49b67f
+ * This has to be a macro since HASH_BITS() will not work on pointers since
49b67f
+ * it calculates the size during preprocessing.
49b67f
+ */
49b67f
+#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
49b67f
+
49b67f
+/**
49b67f
+ * hash_del - remove an object from a hashtable
49b67f
+ * @node: &struct hlist_node of the object to remove
49b67f
+ */
49b67f
+static inline void hash_del(struct hlist_node *node)
49b67f
+{
49b67f
+	hlist_del_init(node);
49b67f
+}
49b67f
+
49b67f
+/**
49b67f
+ * hash_for_each - iterate over a hashtable
49b67f
+ * @name: hashtable to iterate
49b67f
+ * @bkt: integer to use as bucket loop cursor
49b67f
+ * @obj: the type * to use as a loop cursor for each entry
49b67f
+ * @member: the name of the hlist_node within the struct
49b67f
+ */
49b67f
+#define hash_for_each(name, bkt, obj, member)				\
49b67f
+	for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
49b67f
+			(bkt)++)\
49b67f
+		hlist_for_each_entry(obj, &name[bkt], member)
49b67f
+
49b67f
+/**
49b67f
+ * hash_for_each_safe - iterate over a hashtable safe against removal of
49b67f
+ * hash entry
49b67f
+ * @name: hashtable to iterate
49b67f
+ * @bkt: integer to use as bucket loop cursor
49b67f
+ * @tmp: a &struct used for temporary storage
49b67f
+ * @obj: the type * to use as a loop cursor for each entry
49b67f
+ * @member: the name of the hlist_node within the struct
49b67f
+ */
49b67f
+#define hash_for_each_safe(name, bkt, tmp, obj, member)			\
49b67f
+	for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
49b67f
+			(bkt)++)\
49b67f
+		hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
49b67f
+
49b67f
+/**
49b67f
+ * hash_for_each_possible - iterate over all possible objects hashing to the
49b67f
+ * same bucket
49b67f
+ * @name: hashtable to iterate
49b67f
+ * @obj: the type * to use as a loop cursor for each entry
49b67f
+ * @member: the name of the hlist_node within the struct
49b67f
+ * @key: the key of the objects to iterate over
49b67f
+ */
49b67f
+#define hash_for_each_possible(name, obj, member, key)			\
49b67f
+	hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
49b67f
+
49b67f
+/**
49b67f
+ * hash_for_each_possible_safe - iterate over all possible objects hashing to the
49b67f
+ * same bucket safe against removals
49b67f
+ * @name: hashtable to iterate
49b67f
+ * @obj: the type * to use as a loop cursor for each entry
49b67f
+ * @tmp: a &struct used for temporary storage
49b67f
+ * @member: the name of the hlist_node within the struct
49b67f
+ * @key: the key of the objects to iterate over
49b67f
+ */
49b67f
+#define hash_for_each_possible_safe(name, obj, tmp, member, key)	\
49b67f
+	hlist_for_each_entry_safe(obj, tmp,\
49b67f
+		&name[hash_min(key, HASH_BITS(name))], member)
49b67f
+
49b67f
+#endif