9f5ccc
From 75a9d946d252ce70460144615ca17dbdf2e80fab Mon Sep 17 00:00:00 2001
9f5ccc
From: Xavi Hernandez <xhernandez@redhat.com>
9f5ccc
Date: Fri, 7 Feb 2020 10:19:57 +0100
9f5ccc
Subject: [PATCH 354/355] core: fix memory pool management races
9f5ccc
9f5ccc
Objects allocated from a per-thread memory pool keep a reference to it
9f5ccc
to be able to return the object to the pool when not used anymore. The
9f5ccc
object holding this reference can have a long life cycle that could
9f5ccc
survive a glfs_fini() call.
9f5ccc
9f5ccc
This means that it's unsafe to destroy memory pools from glfs_fini().
9f5ccc
9f5ccc
Another side effect of destroying memory pools from glfs_fini() is that
9f5ccc
the TLS variable that points to one of those pools cannot be reset for
9f5ccc
all alive threads.  This means that any attempt to allocate memory from
9f5ccc
those threads will access already free'd memory, which is very
9f5ccc
dangerous.
9f5ccc
9f5ccc
To fix these issues, mem_pools_fini() doesn't destroy pool lists
9f5ccc
anymore. Only at process termination the pools are destroyed.
9f5ccc
9f5ccc
Upatream patch:
9f5ccc
> Upstream patch link: https://review.gluster.org/c/glusterfs/+/24099
9f5ccc
> Change-Id: Ib189a5510ab6bdac78983c6c65a022e9634b0965
9f5ccc
> Fixes: bz#1801684
9f5ccc
> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
9f5ccc
9f5ccc
Change-Id: Ib189a5510ab6bdac78983c6c65a022e9634b0965
9f5ccc
BUG: 1800703
9f5ccc
Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
9f5ccc
Reviewed-on: https://code.engineering.redhat.com/gerrit/192262
9f5ccc
Tested-by: RHGS Build Bot <nigelb@redhat.com>
9f5ccc
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
9f5ccc
---
9f5ccc
 libglusterfs/src/globals.c            |  13 ++-
9f5ccc
 libglusterfs/src/glusterfs/globals.h  |   3 +
9f5ccc
 libglusterfs/src/glusterfs/mem-pool.h |  28 ++---
9f5ccc
 libglusterfs/src/mem-pool.c           | 201 ++++++++++++++++++----------------
9f5ccc
 libglusterfs/src/syncop.c             |   7 ++
9f5ccc
 5 files changed, 146 insertions(+), 106 deletions(-)
9f5ccc
9f5ccc
diff --git a/libglusterfs/src/globals.c b/libglusterfs/src/globals.c
9f5ccc
index 02098e6..e433ee8 100644
9f5ccc
--- a/libglusterfs/src/globals.c
9f5ccc
+++ b/libglusterfs/src/globals.c
9f5ccc
@@ -319,7 +319,18 @@ glusterfs_cleanup(void *ptr)
9f5ccc
         GF_FREE(thread_syncopctx.groups);
9f5ccc
     }
9f5ccc
 
9f5ccc
-    mem_pool_thread_destructor();
9f5ccc
+    mem_pool_thread_destructor(NULL);
9f5ccc
+}
9f5ccc
+
9f5ccc
+void
9f5ccc
+gf_thread_needs_cleanup(void)
9f5ccc
+{
9f5ccc
+    /* The value stored in free_key TLS is not really used for anything, but
9f5ccc
+     * pthread implementation doesn't call the TLS destruction function unless
9f5ccc
+     * it's != NULL. This function must be called whenever something is
9f5ccc
+     * allocated for this thread so that glusterfs_cleanup() will be called
9f5ccc
+     * and resources can be released. */
9f5ccc
+    (void)pthread_setspecific(free_key, (void *)1);
9f5ccc
 }
9f5ccc
 
9f5ccc
 static void
9f5ccc
diff --git a/libglusterfs/src/glusterfs/globals.h b/libglusterfs/src/glusterfs/globals.h
9f5ccc
index e218285..31717ed 100644
9f5ccc
--- a/libglusterfs/src/glusterfs/globals.h
9f5ccc
+++ b/libglusterfs/src/glusterfs/globals.h
9f5ccc
@@ -181,6 +181,9 @@ glusterfs_leaseid_exist(void);
9f5ccc
 int
9f5ccc
 glusterfs_globals_init(glusterfs_ctx_t *ctx);
9f5ccc
 
9f5ccc
+void
9f5ccc
+gf_thread_needs_cleanup(void);
9f5ccc
+
9f5ccc
 struct tvec_base *
9f5ccc
 glusterfs_ctx_tw_get(glusterfs_ctx_t *ctx);
9f5ccc
 void
9f5ccc
diff --git a/libglusterfs/src/glusterfs/mem-pool.h b/libglusterfs/src/glusterfs/mem-pool.h
9f5ccc
index be0a26d..97bf76c 100644
9f5ccc
--- a/libglusterfs/src/glusterfs/mem-pool.h
9f5ccc
+++ b/libglusterfs/src/glusterfs/mem-pool.h
9f5ccc
@@ -245,24 +245,26 @@ typedef struct per_thread_pool {
9f5ccc
 } per_thread_pool_t;
9f5ccc
 
9f5ccc
 typedef struct per_thread_pool_list {
9f5ccc
-    /*
9f5ccc
-     * These first two members are protected by the global pool lock.  When
9f5ccc
-     * a thread first tries to use any pool, we create one of these.  We
9f5ccc
-     * link it into the global list using thr_list so the pool-sweeper
9f5ccc
-     * thread can find it, and use pthread_setspecific so this thread can
9f5ccc
-     * find it.  When the per-thread destructor runs, we "poison" the pool
9f5ccc
-     * list to prevent further allocations.  This also signals to the
9f5ccc
-     * pool-sweeper thread that the list should be detached and freed after
9f5ccc
-     * the next time it's swept.
9f5ccc
-     */
9f5ccc
+    /* thr_list is used to place the TLS pool_list into the active global list
9f5ccc
+     * (pool_threads) or the inactive global list (pool_free_threads). It's
9f5ccc
+     * protected by the global pool_lock. */
9f5ccc
     struct list_head thr_list;
9f5ccc
-    unsigned int poison;
9f5ccc
+
9f5ccc
+    /* This lock is used to update poison and the hot/cold lists of members
9f5ccc
+     * of 'pools' array. */
9f5ccc
+    pthread_spinlock_t lock;
9f5ccc
+
9f5ccc
+    /* This field is used to mark a pool_list as not being owned by any thread.
9f5ccc
+     * This means that the sweeper thread won't be cleaning objects stored in
9f5ccc
+     * its pools. mem_put() uses it to decide if the object being released is
9f5ccc
+     * placed into its original pool_list or directly destroyed. */
9f5ccc
+    bool poison;
9f5ccc
+
9f5ccc
     /*
9f5ccc
      * There's really more than one pool, but the actual number is hidden
9f5ccc
      * in the implementation code so we just make it a single-element array
9f5ccc
      * here.
9f5ccc
      */
9f5ccc
-    pthread_spinlock_t lock;
9f5ccc
     per_thread_pool_t pools[1];
9f5ccc
 } per_thread_pool_list_t;
9f5ccc
 
9f5ccc
@@ -307,7 +309,7 @@ void
9f5ccc
 mem_pool_destroy(struct mem_pool *pool);
9f5ccc
 
9f5ccc
 void
9f5ccc
-mem_pool_thread_destructor(void);
9f5ccc
+mem_pool_thread_destructor(per_thread_pool_list_t *pool_list);
9f5ccc
 
9f5ccc
 void
9f5ccc
 gf_mem_acct_enable_set(void *ctx);
9f5ccc
diff --git a/libglusterfs/src/mem-pool.c b/libglusterfs/src/mem-pool.c
9f5ccc
index d88041d..2b41c01 100644
9f5ccc
--- a/libglusterfs/src/mem-pool.c
9f5ccc
+++ b/libglusterfs/src/mem-pool.c
9f5ccc
@@ -367,7 +367,6 @@ static __thread per_thread_pool_list_t *thread_pool_list = NULL;
9f5ccc
 #define POOL_SWEEP_SECS 30
9f5ccc
 
9f5ccc
 typedef struct {
9f5ccc
-    struct list_head death_row;
9f5ccc
     pooled_obj_hdr_t *cold_lists[N_COLD_LISTS];
9f5ccc
     unsigned int n_cold_lists;
9f5ccc
 } sweep_state_t;
9f5ccc
@@ -384,36 +383,33 @@ static pthread_mutex_t init_mutex = PTHREAD_MUTEX_INITIALIZER;
9f5ccc
 static unsigned int init_count = 0;
9f5ccc
 static pthread_t sweeper_tid;
9f5ccc
 
9f5ccc
-gf_boolean_t
9f5ccc
+static bool
9f5ccc
 collect_garbage(sweep_state_t *state, per_thread_pool_list_t *pool_list)
9f5ccc
 {
9f5ccc
     unsigned int i;
9f5ccc
     per_thread_pool_t *pt_pool;
9f5ccc
-    gf_boolean_t poisoned;
9f5ccc
 
9f5ccc
     (void)pthread_spin_lock(&pool_list->lock);
9f5ccc
 
9f5ccc
-    poisoned = pool_list->poison != 0;
9f5ccc
-    if (!poisoned) {
9f5ccc
-        for (i = 0; i < NPOOLS; ++i) {
9f5ccc
-            pt_pool = &pool_list->pools[i];
9f5ccc
-            if (pt_pool->cold_list) {
9f5ccc
-                if (state->n_cold_lists >= N_COLD_LISTS) {
9f5ccc
-                    break;
9f5ccc
-                }
9f5ccc
-                state->cold_lists[state->n_cold_lists++] = pt_pool->cold_list;
9f5ccc
+    for (i = 0; i < NPOOLS; ++i) {
9f5ccc
+        pt_pool = &pool_list->pools[i];
9f5ccc
+        if (pt_pool->cold_list) {
9f5ccc
+            if (state->n_cold_lists >= N_COLD_LISTS) {
9f5ccc
+                (void)pthread_spin_unlock(&pool_list->lock);
9f5ccc
+                return true;
9f5ccc
             }
9f5ccc
-            pt_pool->cold_list = pt_pool->hot_list;
9f5ccc
-            pt_pool->hot_list = NULL;
9f5ccc
+            state->cold_lists[state->n_cold_lists++] = pt_pool->cold_list;
9f5ccc
         }
9f5ccc
+        pt_pool->cold_list = pt_pool->hot_list;
9f5ccc
+        pt_pool->hot_list = NULL;
9f5ccc
     }
9f5ccc
 
9f5ccc
     (void)pthread_spin_unlock(&pool_list->lock);
9f5ccc
 
9f5ccc
-    return poisoned;
9f5ccc
+    return false;
9f5ccc
 }
9f5ccc
 
9f5ccc
-void
9f5ccc
+static void
9f5ccc
 free_obj_list(pooled_obj_hdr_t *victim)
9f5ccc
 {
9f5ccc
     pooled_obj_hdr_t *next;
9f5ccc
@@ -425,82 +421,96 @@ free_obj_list(pooled_obj_hdr_t *victim)
9f5ccc
     }
9f5ccc
 }
9f5ccc
 
9f5ccc
-void *
9f5ccc
+static void *
9f5ccc
 pool_sweeper(void *arg)
9f5ccc
 {
9f5ccc
     sweep_state_t state;
9f5ccc
     per_thread_pool_list_t *pool_list;
9f5ccc
-    per_thread_pool_list_t *next_pl;
9f5ccc
-    per_thread_pool_t *pt_pool;
9f5ccc
-    unsigned int i;
9f5ccc
-    gf_boolean_t poisoned;
9f5ccc
+    uint32_t i;
9f5ccc
+    bool pending;
9f5ccc
 
9f5ccc
     /*
9f5ccc
      * This is all a bit inelegant, but the point is to avoid doing
9f5ccc
      * expensive things (like freeing thousands of objects) while holding a
9f5ccc
-     * global lock.  Thus, we split each iteration into three passes, with
9f5ccc
+     * global lock.  Thus, we split each iteration into two passes, with
9f5ccc
      * only the first and fastest holding the lock.
9f5ccc
      */
9f5ccc
 
9f5ccc
+    pending = true;
9f5ccc
+
9f5ccc
     for (;;) {
9f5ccc
-        sleep(POOL_SWEEP_SECS);
9f5ccc
+        /* If we know there's pending work to do (or it's the first run), we
9f5ccc
+         * do collect garbage more often. */
9f5ccc
+        sleep(pending ? POOL_SWEEP_SECS / 5 : POOL_SWEEP_SECS);
9f5ccc
+
9f5ccc
         (void)pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
9f5ccc
-        INIT_LIST_HEAD(&state.death_row);
9f5ccc
         state.n_cold_lists = 0;
9f5ccc
+        pending = false;
9f5ccc
 
9f5ccc
         /* First pass: collect stuff that needs our attention. */
9f5ccc
         (void)pthread_mutex_lock(&pool_lock);
9f5ccc
-        list_for_each_entry_safe(pool_list, next_pl, &pool_threads, thr_list)
9f5ccc
+        list_for_each_entry(pool_list, &pool_threads, thr_list)
9f5ccc
         {
9f5ccc
-            (void)pthread_mutex_unlock(&pool_lock);
9f5ccc
-            poisoned = collect_garbage(&state, pool_list);
9f5ccc
-            (void)pthread_mutex_lock(&pool_lock);
9f5ccc
-
9f5ccc
-            if (poisoned) {
9f5ccc
-                list_move(&pool_list->thr_list, &state.death_row);
9f5ccc
+            if (collect_garbage(&state, pool_list)) {
9f5ccc
+                pending = true;
9f5ccc
             }
9f5ccc
         }
9f5ccc
         (void)pthread_mutex_unlock(&pool_lock);
9f5ccc
 
9f5ccc
-        /* Second pass: free dead pools. */
9f5ccc
-        (void)pthread_mutex_lock(&pool_free_lock);
9f5ccc
-        list_for_each_entry_safe(pool_list, next_pl, &state.death_row, thr_list)
9f5ccc
-        {
9f5ccc
-            for (i = 0; i < NPOOLS; ++i) {
9f5ccc
-                pt_pool = &pool_list->pools[i];
9f5ccc
-                free_obj_list(pt_pool->cold_list);
9f5ccc
-                free_obj_list(pt_pool->hot_list);
9f5ccc
-                pt_pool->hot_list = pt_pool->cold_list = NULL;
9f5ccc
-            }
9f5ccc
-            list_del(&pool_list->thr_list);
9f5ccc
-            list_add(&pool_list->thr_list, &pool_free_threads);
9f5ccc
-        }
9f5ccc
-        (void)pthread_mutex_unlock(&pool_free_lock);
9f5ccc
-
9f5ccc
-        /* Third pass: free cold objects from live pools. */
9f5ccc
+        /* Second pass: free cold objects from live pools. */
9f5ccc
         for (i = 0; i < state.n_cold_lists; ++i) {
9f5ccc
             free_obj_list(state.cold_lists[i]);
9f5ccc
         }
9f5ccc
         (void)pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
9f5ccc
     }
9f5ccc
+
9f5ccc
+    return NULL;
9f5ccc
 }
9f5ccc
 
9f5ccc
 void
9f5ccc
-mem_pool_thread_destructor(void)
9f5ccc
+mem_pool_thread_destructor(per_thread_pool_list_t *pool_list)
9f5ccc
 {
9f5ccc
-    per_thread_pool_list_t *pool_list = thread_pool_list;
9f5ccc
-
9f5ccc
-    /* The pool-sweeper thread will take it from here.
9f5ccc
-     *
9f5ccc
-     * We can change 'poison' here without taking locks because the change
9f5ccc
-     * itself doesn't interact with other parts of the code and a simple write
9f5ccc
-     * is already atomic from the point of view of the processor.
9f5ccc
-     *
9f5ccc
-     * This change can modify what mem_put() does, but both possibilities are
9f5ccc
-     * fine until the sweeper thread kicks in. The real synchronization must be
9f5ccc
-     * between mem_put() and the sweeper thread. */
9f5ccc
+    per_thread_pool_t *pt_pool;
9f5ccc
+    uint32_t i;
9f5ccc
+
9f5ccc
+    if (pool_list == NULL) {
9f5ccc
+        pool_list = thread_pool_list;
9f5ccc
+    }
9f5ccc
+
9f5ccc
+    /* The current thread is terminating. None of the allocated objects will
9f5ccc
+     * be used again. We can directly destroy them here instead of delaying
9f5ccc
+     * it until the next sweeper loop. */
9f5ccc
     if (pool_list != NULL) {
9f5ccc
-        pool_list->poison = 1;
9f5ccc
+        /* Remove pool_list from the global list to avoid that sweeper
9f5ccc
+         * could touch it. */
9f5ccc
+        pthread_mutex_lock(&pool_lock);
9f5ccc
+        list_del(&pool_list->thr_list);
9f5ccc
+        pthread_mutex_unlock(&pool_lock);
9f5ccc
+
9f5ccc
+        /* We need to protect hot/cold changes from potential mem_put() calls
9f5ccc
+         * that reference this pool_list. Once poison is set to true, we are
9f5ccc
+         * sure that no one else will touch hot/cold lists. The only possible
9f5ccc
+         * race is when at the same moment a mem_put() is adding a new item
9f5ccc
+         * to the hot list. We protect from that by taking pool_list->lock.
9f5ccc
+         * After that we don't need the lock to destroy the hot/cold lists. */
9f5ccc
+        pthread_spin_lock(&pool_list->lock);
9f5ccc
+        pool_list->poison = true;
9f5ccc
+        pthread_spin_unlock(&pool_list->lock);
9f5ccc
+
9f5ccc
+        for (i = 0; i < NPOOLS; i++) {
9f5ccc
+            pt_pool = &pool_list->pools[i];
9f5ccc
+
9f5ccc
+            free_obj_list(pt_pool->hot_list);
9f5ccc
+            pt_pool->hot_list = NULL;
9f5ccc
+
9f5ccc
+            free_obj_list(pt_pool->cold_list);
9f5ccc
+            pt_pool->cold_list = NULL;
9f5ccc
+        }
9f5ccc
+
9f5ccc
+        pthread_mutex_lock(&pool_free_lock);
9f5ccc
+        list_add(&pool_list->thr_list, &pool_free_threads);
9f5ccc
+        pthread_mutex_unlock(&pool_free_lock);
9f5ccc
+
9f5ccc
         thread_pool_list = NULL;
9f5ccc
     }
9f5ccc
 }
9f5ccc
@@ -528,6 +538,30 @@ mem_pools_preinit(void)
9f5ccc
     init_done = GF_MEMPOOL_INIT_EARLY;
9f5ccc
 }
9f5ccc
 
9f5ccc
+static __attribute__((destructor)) void
9f5ccc
+mem_pools_postfini(void)
9f5ccc
+{
9f5ccc
+    per_thread_pool_list_t *pool_list, *next;
9f5ccc
+
9f5ccc
+    /* This is part of a process shutdown (or dlclose()) which means that
9f5ccc
+     * most probably all threads should be stopped. However this is not the
9f5ccc
+     * case for gluster and there are even legitimate situations in which we
9f5ccc
+     * could have some threads alive. What is sure is that none of those
9f5ccc
+     * threads should be using anything from this library, so destroying
9f5ccc
+     * everything here should be fine and safe. */
9f5ccc
+
9f5ccc
+    list_for_each_entry_safe(pool_list, next, &pool_threads, thr_list)
9f5ccc
+    {
9f5ccc
+        mem_pool_thread_destructor(pool_list);
9f5ccc
+    }
9f5ccc
+
9f5ccc
+    list_for_each_entry_safe(pool_list, next, &pool_free_threads, thr_list)
9f5ccc
+    {
9f5ccc
+        list_del(&pool_list->thr_list);
9f5ccc
+        FREE(pool_list);
9f5ccc
+    }
9f5ccc
+}
9f5ccc
+
9f5ccc
 /* Call mem_pools_init() once threading has been configured completely. This
9f5ccc
  * prevent the pool_sweeper thread from getting killed once the main() thread
9f5ccc
  * exits during deamonizing. */
9f5ccc
@@ -560,10 +594,6 @@ mem_pools_fini(void)
9f5ccc
              */
9f5ccc
             break;
9f5ccc
         case 1: {
9f5ccc
-            per_thread_pool_list_t *pool_list;
9f5ccc
-            per_thread_pool_list_t *next_pl;
9f5ccc
-            unsigned int i;
9f5ccc
-
9f5ccc
             /* if mem_pools_init() was not called, sweeper_tid will be invalid
9f5ccc
              * and the functions will error out. That is not critical. In all
9f5ccc
              * other cases, the sweeper_tid will be valid and the thread gets
9f5ccc
@@ -571,32 +601,11 @@ mem_pools_fini(void)
9f5ccc
             (void)pthread_cancel(sweeper_tid);
9f5ccc
             (void)pthread_join(sweeper_tid, NULL);
9f5ccc
 
9f5ccc
-            /* At this point all threads should have already terminated, so
9f5ccc
-             * it should be safe to destroy all pending per_thread_pool_list_t
9f5ccc
-             * structures that are stored for each thread. */
9f5ccc
-            mem_pool_thread_destructor();
9f5ccc
-
9f5ccc
-            /* free all objects from all pools */
9f5ccc
-            list_for_each_entry_safe(pool_list, next_pl, &pool_threads,
9f5ccc
-                                     thr_list)
9f5ccc
-            {
9f5ccc
-                for (i = 0; i < NPOOLS; ++i) {
9f5ccc
-                    free_obj_list(pool_list->pools[i].hot_list);
9f5ccc
-                    free_obj_list(pool_list->pools[i].cold_list);
9f5ccc
-                    pool_list->pools[i].hot_list = NULL;
9f5ccc
-                    pool_list->pools[i].cold_list = NULL;
9f5ccc
-                }
9f5ccc
-
9f5ccc
-                list_del(&pool_list->thr_list);
9f5ccc
-                FREE(pool_list);
9f5ccc
-            }
9f5ccc
-
9f5ccc
-            list_for_each_entry_safe(pool_list, next_pl, &pool_free_threads,
9f5ccc
-                                     thr_list)
9f5ccc
-            {
9f5ccc
-                list_del(&pool_list->thr_list);
9f5ccc
-                FREE(pool_list);
9f5ccc
-            }
9f5ccc
+            /* There could be threads still running in some cases, so we can't
9f5ccc
+             * destroy pool_lists in use. We can also not destroy unused
9f5ccc
+             * pool_lists because some allocated objects may still be pointing
9f5ccc
+             * to them. */
9f5ccc
+            mem_pool_thread_destructor(NULL);
9f5ccc
 
9f5ccc
             init_done = GF_MEMPOOL_INIT_DESTROY;
9f5ccc
             /* Fall through. */
9f5ccc
@@ -617,7 +626,7 @@ mem_pools_fini(void)
9f5ccc
 {
9f5ccc
 }
9f5ccc
 void
9f5ccc
-mem_pool_thread_destructor(void)
9f5ccc
+mem_pool_thread_destructor(per_thread_pool_list_t *pool_list)
9f5ccc
 {
9f5ccc
 }
9f5ccc
 
9f5ccc
@@ -738,13 +747,21 @@ mem_get_pool_list(void)
9f5ccc
         }
9f5ccc
     }
9f5ccc
 
9f5ccc
+    /* There's no need to take pool_list->lock, because this is already an
9f5ccc
+     * atomic operation and we don't need to synchronize it with any change
9f5ccc
+     * in hot/cold lists. */
9f5ccc
+    pool_list->poison = false;
9f5ccc
+
9f5ccc
     (void)pthread_mutex_lock(&pool_lock);
9f5ccc
-    pool_list->poison = 0;
9f5ccc
     list_add(&pool_list->thr_list, &pool_threads);
9f5ccc
     (void)pthread_mutex_unlock(&pool_lock);
9f5ccc
 
9f5ccc
     thread_pool_list = pool_list;
9f5ccc
 
9f5ccc
+    /* Ensure that all memory objects associated to the new pool_list are
9f5ccc
+     * destroyed when the thread terminates. */
9f5ccc
+    gf_thread_needs_cleanup();
9f5ccc
+
9f5ccc
     return pool_list;
9f5ccc
 }
9f5ccc
 
9f5ccc
diff --git a/libglusterfs/src/syncop.c b/libglusterfs/src/syncop.c
9f5ccc
index 2eb7b49..0de53c6 100644
9f5ccc
--- a/libglusterfs/src/syncop.c
9f5ccc
+++ b/libglusterfs/src/syncop.c
9f5ccc
@@ -97,6 +97,13 @@ syncopctx_setfsgroups(int count, const void *groups)
9f5ccc
 
9f5ccc
     /* set/reset the ngrps, this is where reset of groups is handled */
9f5ccc
     opctx->ngrps = count;
9f5ccc
+
9f5ccc
+    if ((opctx->valid & SYNCOPCTX_GROUPS) == 0) {
9f5ccc
+        /* This is the first time we are storing groups into the TLS structure
9f5ccc
+         * so we mark the current thread so that it will be properly cleaned
9f5ccc
+         * up when the thread terminates. */
9f5ccc
+        gf_thread_needs_cleanup();
9f5ccc
+    }
9f5ccc
     opctx->valid |= SYNCOPCTX_GROUPS;
9f5ccc
 
9f5ccc
 out:
9f5ccc
-- 
9f5ccc
1.8.3.1
9f5ccc