Blame SOURCES/autofs-5.1.7-refactor-lookup_prune_one_cache-a-bit.patch

49b67f
autofs-5.1.7 - refactor lookup_prune_one_cache() a bit
49b67f
49b67f
From: Ian Kent <raven@themaw.net>
49b67f
49b67f
Coverity: use: Using an unreliable value of "me" inside the second locked
49b67f
	  section.
49b67f
49b67f
Change lookup_prune_one_cache() a little, move the location the next
49b67f
key is set (before releasing the lock) and add a comment explaining
49b67f
why we don't care about the side effects of the read lock release/
49b67f
write lock aquire/write lock release/read lock reaquire.
49b67f
49b67f
Signed-off-by: Ian Kent <raven@themaw.net>
49b67f
---
49b67f
 CHANGELOG       |    1 +
49b67f
 daemon/lookup.c |   20 +++++++++++++++++++-
49b67f
 2 files changed, 20 insertions(+), 1 deletion(-)
49b67f
49b67f
--- autofs-5.1.4.orig/CHANGELOG
49b67f
+++ autofs-5.1.4/CHANGELOG
49b67f
@@ -63,6 +63,7 @@
49b67f
 - fix arg not used in error print.
49b67f
 - fix missing lock release in mount_subtree().
49b67f
 - fix double free in parse_mapent().
49b67f
+- refactor lookup_prune_one_cache() a bit.
49b67f
 
49b67f
 xx/xx/2018 autofs-5.1.5
49b67f
 - fix flag file permission.
49b67f
--- autofs-5.1.4.orig/daemon/lookup.c
49b67f
+++ autofs-5.1.4/daemon/lookup.c
49b67f
@@ -1383,7 +1383,6 @@ void lookup_prune_one_cache(struct autof
49b67f
 		}
49b67f
 
49b67f
 		key = strdup(me->key);
49b67f
-		me = cache_enumerate(mc, me);
49b67f
 		/* Don't consider any entries with a wildcard */
49b67f
 		if (!key || strchr(key, '*')) {
49b67f
 			if (key)
49b67f
@@ -1430,6 +1429,7 @@ void lookup_prune_one_cache(struct autof
49b67f
 		if (valid)
49b67f
 			cache_unlock(valid->mc);
49b67f
 
49b67f
+		me = cache_enumerate(mc, me);
49b67f
 		if (me)
49b67f
 			next_key = strdup(me->key);
49b67f
 
49b67f
@@ -1464,6 +1464,24 @@ void lookup_prune_one_cache(struct autof
49b67f
 next:
49b67f
 		cache_readlock(mc);
49b67f
 		if (next_key) {
49b67f
+			/* The lock release and reaquire above can mean
49b67f
+			 * a number of things could happen.
49b67f
+			 *
49b67f
+			 * First, mapents could be added between the
49b67f
+			 * current mapent and the mapent of next_key.
49b67f
+			 * Don't care about that because there's no
49b67f
+			 * need to prune newly added entries.
49b67f
+			 *
49b67f
+			 * Second, the next mapent data could have
49b67f
+			 * changed. Don't care about that either since
49b67f
+			 * we are looking to prune stale map entries
49b67f
+			 * and don't care when they become stale.
49b67f
+			 *
49b67f
+			 * Finally, the mapent of next_key could have
49b67f
+			 * gone away. Again don't care about this either,
49b67f
+			 * the loop will exit prematurely so just wait
49b67f
+			 * until the next prune and try again.
49b67f
+			 */
49b67f
 			me = cache_lookup_distinct(mc, next_key);
49b67f
 			free(next_key);
49b67f
 		}