diff --git a/README.debrand b/README.debrand
deleted file mode 100644
index 01c46d2..0000000
--- a/README.debrand
+++ /dev/null
@@ -1,2 +0,0 @@
-Warning: This package was configured for automatic debranding, but the changes
-failed to apply.
diff --git a/SOURCES/0352-spec-fixed-missing-dependencies-for-glusterfs-clouds.patch b/SOURCES/0352-spec-fixed-missing-dependencies-for-glusterfs-clouds.patch
new file mode 100644
index 0000000..1d9a389
--- /dev/null
+++ b/SOURCES/0352-spec-fixed-missing-dependencies-for-glusterfs-clouds.patch
@@ -0,0 +1,38 @@
+From 8074906ace5fbd71b5d24cc3da5571ebdebed859 Mon Sep 17 00:00:00 2001
+From: Rinku Kothiya <rkothiya@redhat.com>
+Date: Thu, 2 Jan 2020 11:27:47 +0000
+Subject: [PATCH 352/353] spec: fixed missing dependencies for
+ glusterfs-cloudsync-plugins
+
+RPMDiff raises a warning, subpackage glusterfs-cloudsync-plugins
+on x86_64 consumes library libglusterfs.so.0()(64bit) from
+subpackage glusterfs-libs but does not have explicit package
+version requirement, which is fixed using this patch.
+
+Label: DOWNSTREAM ONLY
+
+BUG: 1775564
+
+Change-Id: I05ea46ac2c92090f01c07dfbd6e0d66498f1c586
+Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/188619
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ glusterfs.spec.in | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/glusterfs.spec.in b/glusterfs.spec.in
+index 671ee27..e95e539 100644
+--- a/glusterfs.spec.in
++++ b/glusterfs.spec.in
+@@ -374,6 +374,7 @@ This package provides the GlusterFS CLI application and its man page
+ %package cloudsync-plugins
+ Summary:          Cloudsync Plugins
+ BuildRequires:    libcurl-devel
++Requires:         glusterfs-libs = %{version}-%{release}
+ 
+ %description cloudsync-plugins
+ GlusterFS is a distributed file-system capable of scaling to several
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0353-build-glusterfs-ganesha-pkg-requires-python3-policyc.patch b/SOURCES/0353-build-glusterfs-ganesha-pkg-requires-python3-policyc.patch
new file mode 100644
index 0000000..e436373
--- /dev/null
+++ b/SOURCES/0353-build-glusterfs-ganesha-pkg-requires-python3-policyc.patch
@@ -0,0 +1,47 @@
+From 37e2d76579abf38031d1cd9769da798fa04b183a Mon Sep 17 00:00:00 2001
+From: "Kaleb S. KEITHLEY" <kkeithle@redhat.com>
+Date: Wed, 22 Jan 2020 14:14:33 -0500
+Subject: [PATCH 353/353] build: glusterfs-ganesha pkg requires
+ python3-policycoreutils on rhel8
+
+glusterfs-ganesha pkg requires policycoreutils-python-utils on rhel8,
+not policycoreutils-python
+
+also requires nfs-ganesha-selinux on rhel-8 (optional on rhel-7)
+
+Label: DOWNSTREAM ONLY
+
+Change-Id: Ia97b4dabdc098fb76e3f60e8b48ea4191e677136
+Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
+BUG: 1794153
+Reviewed-on: https://code.engineering.redhat.com/gerrit/190130
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ glusterfs.spec.in | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/glusterfs.spec.in b/glusterfs.spec.in
+index e95e539..7c8a751 100644
+--- a/glusterfs.spec.in
++++ b/glusterfs.spec.in
+@@ -462,6 +462,7 @@ Summary:          NFS-Ganesha configuration
+ Group:            Applications/File
+ 
+ Requires:         %{name}-server%{?_isa} = %{version}-%{release}
++Requires:         nfs-ganesha-selinux >= 2.7.3
+ Requires:         nfs-ganesha-gluster >= 2.7.3
+ Requires:         pcs, dbus
+ %if ( 0%{?rhel} && 0%{?rhel} == 6 )
+@@ -475,7 +476,7 @@ Requires:         net-tools
+ %endif
+ 
+ %if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
+-%if ( 0%{?rhel} )
++%if ( 0%{?rhel} && 0%{?rhel} < 8 )
+ Requires: selinux-policy >= 3.13.1-160
+ Requires(post):   policycoreutils-python
+ Requires(postun): policycoreutils-python
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0354-core-fix-memory-pool-management-races.patch b/SOURCES/0354-core-fix-memory-pool-management-races.patch
new file mode 100644
index 0000000..a7cdfc0
--- /dev/null
+++ b/SOURCES/0354-core-fix-memory-pool-management-races.patch
@@ -0,0 +1,466 @@
+From 75a9d946d252ce70460144615ca17dbdf2e80fab Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Fri, 7 Feb 2020 10:19:57 +0100
+Subject: [PATCH 354/355] core: fix memory pool management races
+
+Objects allocated from a per-thread memory pool keep a reference to it
+to be able to return the object to the pool when not used anymore. The
+object holding this reference can have a long life cycle that could
+survive a glfs_fini() call.
+
+This means that it's unsafe to destroy memory pools from glfs_fini().
+
+Another side effect of destroying memory pools from glfs_fini() is that
+the TLS variable that points to one of those pools cannot be reset for
+all alive threads.  This means that any attempt to allocate memory from
+those threads will access already free'd memory, which is very
+dangerous.
+
+To fix these issues, mem_pools_fini() doesn't destroy pool lists
+anymore. Only at process termination the pools are destroyed.
+
+Upatream patch:
+> Upstream patch link: https://review.gluster.org/c/glusterfs/+/24099
+> Change-Id: Ib189a5510ab6bdac78983c6c65a022e9634b0965
+> Fixes: bz#1801684
+> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+
+Change-Id: Ib189a5510ab6bdac78983c6c65a022e9634b0965
+BUG: 1800703
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/192262
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ libglusterfs/src/globals.c            |  13 ++-
+ libglusterfs/src/glusterfs/globals.h  |   3 +
+ libglusterfs/src/glusterfs/mem-pool.h |  28 ++---
+ libglusterfs/src/mem-pool.c           | 201 ++++++++++++++++++----------------
+ libglusterfs/src/syncop.c             |   7 ++
+ 5 files changed, 146 insertions(+), 106 deletions(-)
+
+diff --git a/libglusterfs/src/globals.c b/libglusterfs/src/globals.c
+index 02098e6..e433ee8 100644
+--- a/libglusterfs/src/globals.c
++++ b/libglusterfs/src/globals.c
+@@ -319,7 +319,18 @@ glusterfs_cleanup(void *ptr)
+         GF_FREE(thread_syncopctx.groups);
+     }
+ 
+-    mem_pool_thread_destructor();
++    mem_pool_thread_destructor(NULL);
++}
++
++void
++gf_thread_needs_cleanup(void)
++{
++    /* The value stored in free_key TLS is not really used for anything, but
++     * pthread implementation doesn't call the TLS destruction function unless
++     * it's != NULL. This function must be called whenever something is
++     * allocated for this thread so that glusterfs_cleanup() will be called
++     * and resources can be released. */
++    (void)pthread_setspecific(free_key, (void *)1);
+ }
+ 
+ static void
+diff --git a/libglusterfs/src/glusterfs/globals.h b/libglusterfs/src/glusterfs/globals.h
+index e218285..31717ed 100644
+--- a/libglusterfs/src/glusterfs/globals.h
++++ b/libglusterfs/src/glusterfs/globals.h
+@@ -181,6 +181,9 @@ glusterfs_leaseid_exist(void);
+ int
+ glusterfs_globals_init(glusterfs_ctx_t *ctx);
+ 
++void
++gf_thread_needs_cleanup(void);
++
+ struct tvec_base *
+ glusterfs_ctx_tw_get(glusterfs_ctx_t *ctx);
+ void
+diff --git a/libglusterfs/src/glusterfs/mem-pool.h b/libglusterfs/src/glusterfs/mem-pool.h
+index be0a26d..97bf76c 100644
+--- a/libglusterfs/src/glusterfs/mem-pool.h
++++ b/libglusterfs/src/glusterfs/mem-pool.h
+@@ -245,24 +245,26 @@ typedef struct per_thread_pool {
+ } per_thread_pool_t;
+ 
+ typedef struct per_thread_pool_list {
+-    /*
+-     * These first two members are protected by the global pool lock.  When
+-     * a thread first tries to use any pool, we create one of these.  We
+-     * link it into the global list using thr_list so the pool-sweeper
+-     * thread can find it, and use pthread_setspecific so this thread can
+-     * find it.  When the per-thread destructor runs, we "poison" the pool
+-     * list to prevent further allocations.  This also signals to the
+-     * pool-sweeper thread that the list should be detached and freed after
+-     * the next time it's swept.
+-     */
++    /* thr_list is used to place the TLS pool_list into the active global list
++     * (pool_threads) or the inactive global list (pool_free_threads). It's
++     * protected by the global pool_lock. */
+     struct list_head thr_list;
+-    unsigned int poison;
++
++    /* This lock is used to update poison and the hot/cold lists of members
++     * of 'pools' array. */
++    pthread_spinlock_t lock;
++
++    /* This field is used to mark a pool_list as not being owned by any thread.
++     * This means that the sweeper thread won't be cleaning objects stored in
++     * its pools. mem_put() uses it to decide if the object being released is
++     * placed into its original pool_list or directly destroyed. */
++    bool poison;
++
+     /*
+      * There's really more than one pool, but the actual number is hidden
+      * in the implementation code so we just make it a single-element array
+      * here.
+      */
+-    pthread_spinlock_t lock;
+     per_thread_pool_t pools[1];
+ } per_thread_pool_list_t;
+ 
+@@ -307,7 +309,7 @@ void
+ mem_pool_destroy(struct mem_pool *pool);
+ 
+ void
+-mem_pool_thread_destructor(void);
++mem_pool_thread_destructor(per_thread_pool_list_t *pool_list);
+ 
+ void
+ gf_mem_acct_enable_set(void *ctx);
+diff --git a/libglusterfs/src/mem-pool.c b/libglusterfs/src/mem-pool.c
+index d88041d..2b41c01 100644
+--- a/libglusterfs/src/mem-pool.c
++++ b/libglusterfs/src/mem-pool.c
+@@ -367,7 +367,6 @@ static __thread per_thread_pool_list_t *thread_pool_list = NULL;
+ #define POOL_SWEEP_SECS 30
+ 
+ typedef struct {
+-    struct list_head death_row;
+     pooled_obj_hdr_t *cold_lists[N_COLD_LISTS];
+     unsigned int n_cold_lists;
+ } sweep_state_t;
+@@ -384,36 +383,33 @@ static pthread_mutex_t init_mutex = PTHREAD_MUTEX_INITIALIZER;
+ static unsigned int init_count = 0;
+ static pthread_t sweeper_tid;
+ 
+-gf_boolean_t
++static bool
+ collect_garbage(sweep_state_t *state, per_thread_pool_list_t *pool_list)
+ {
+     unsigned int i;
+     per_thread_pool_t *pt_pool;
+-    gf_boolean_t poisoned;
+ 
+     (void)pthread_spin_lock(&pool_list->lock);
+ 
+-    poisoned = pool_list->poison != 0;
+-    if (!poisoned) {
+-        for (i = 0; i < NPOOLS; ++i) {
+-            pt_pool = &pool_list->pools[i];
+-            if (pt_pool->cold_list) {
+-                if (state->n_cold_lists >= N_COLD_LISTS) {
+-                    break;
+-                }
+-                state->cold_lists[state->n_cold_lists++] = pt_pool->cold_list;
++    for (i = 0; i < NPOOLS; ++i) {
++        pt_pool = &pool_list->pools[i];
++        if (pt_pool->cold_list) {
++            if (state->n_cold_lists >= N_COLD_LISTS) {
++                (void)pthread_spin_unlock(&pool_list->lock);
++                return true;
+             }
+-            pt_pool->cold_list = pt_pool->hot_list;
+-            pt_pool->hot_list = NULL;
++            state->cold_lists[state->n_cold_lists++] = pt_pool->cold_list;
+         }
++        pt_pool->cold_list = pt_pool->hot_list;
++        pt_pool->hot_list = NULL;
+     }
+ 
+     (void)pthread_spin_unlock(&pool_list->lock);
+ 
+-    return poisoned;
++    return false;
+ }
+ 
+-void
++static void
+ free_obj_list(pooled_obj_hdr_t *victim)
+ {
+     pooled_obj_hdr_t *next;
+@@ -425,82 +421,96 @@ free_obj_list(pooled_obj_hdr_t *victim)
+     }
+ }
+ 
+-void *
++static void *
+ pool_sweeper(void *arg)
+ {
+     sweep_state_t state;
+     per_thread_pool_list_t *pool_list;
+-    per_thread_pool_list_t *next_pl;
+-    per_thread_pool_t *pt_pool;
+-    unsigned int i;
+-    gf_boolean_t poisoned;
++    uint32_t i;
++    bool pending;
+ 
+     /*
+      * This is all a bit inelegant, but the point is to avoid doing
+      * expensive things (like freeing thousands of objects) while holding a
+-     * global lock.  Thus, we split each iteration into three passes, with
++     * global lock.  Thus, we split each iteration into two passes, with
+      * only the first and fastest holding the lock.
+      */
+ 
++    pending = true;
++
+     for (;;) {
+-        sleep(POOL_SWEEP_SECS);
++        /* If we know there's pending work to do (or it's the first run), we
++         * do collect garbage more often. */
++        sleep(pending ? POOL_SWEEP_SECS / 5 : POOL_SWEEP_SECS);
++
+         (void)pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
+-        INIT_LIST_HEAD(&state.death_row);
+         state.n_cold_lists = 0;
++        pending = false;
+ 
+         /* First pass: collect stuff that needs our attention. */
+         (void)pthread_mutex_lock(&pool_lock);
+-        list_for_each_entry_safe(pool_list, next_pl, &pool_threads, thr_list)
++        list_for_each_entry(pool_list, &pool_threads, thr_list)
+         {
+-            (void)pthread_mutex_unlock(&pool_lock);
+-            poisoned = collect_garbage(&state, pool_list);
+-            (void)pthread_mutex_lock(&pool_lock);
+-
+-            if (poisoned) {
+-                list_move(&pool_list->thr_list, &state.death_row);
++            if (collect_garbage(&state, pool_list)) {
++                pending = true;
+             }
+         }
+         (void)pthread_mutex_unlock(&pool_lock);
+ 
+-        /* Second pass: free dead pools. */
+-        (void)pthread_mutex_lock(&pool_free_lock);
+-        list_for_each_entry_safe(pool_list, next_pl, &state.death_row, thr_list)
+-        {
+-            for (i = 0; i < NPOOLS; ++i) {
+-                pt_pool = &pool_list->pools[i];
+-                free_obj_list(pt_pool->cold_list);
+-                free_obj_list(pt_pool->hot_list);
+-                pt_pool->hot_list = pt_pool->cold_list = NULL;
+-            }
+-            list_del(&pool_list->thr_list);
+-            list_add(&pool_list->thr_list, &pool_free_threads);
+-        }
+-        (void)pthread_mutex_unlock(&pool_free_lock);
+-
+-        /* Third pass: free cold objects from live pools. */
++        /* Second pass: free cold objects from live pools. */
+         for (i = 0; i < state.n_cold_lists; ++i) {
+             free_obj_list(state.cold_lists[i]);
+         }
+         (void)pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
+     }
++
++    return NULL;
+ }
+ 
+ void
+-mem_pool_thread_destructor(void)
++mem_pool_thread_destructor(per_thread_pool_list_t *pool_list)
+ {
+-    per_thread_pool_list_t *pool_list = thread_pool_list;
+-
+-    /* The pool-sweeper thread will take it from here.
+-     *
+-     * We can change 'poison' here without taking locks because the change
+-     * itself doesn't interact with other parts of the code and a simple write
+-     * is already atomic from the point of view of the processor.
+-     *
+-     * This change can modify what mem_put() does, but both possibilities are
+-     * fine until the sweeper thread kicks in. The real synchronization must be
+-     * between mem_put() and the sweeper thread. */
++    per_thread_pool_t *pt_pool;
++    uint32_t i;
++
++    if (pool_list == NULL) {
++        pool_list = thread_pool_list;
++    }
++
++    /* The current thread is terminating. None of the allocated objects will
++     * be used again. We can directly destroy them here instead of delaying
++     * it until the next sweeper loop. */
+     if (pool_list != NULL) {
+-        pool_list->poison = 1;
++        /* Remove pool_list from the global list to avoid that sweeper
++         * could touch it. */
++        pthread_mutex_lock(&pool_lock);
++        list_del(&pool_list->thr_list);
++        pthread_mutex_unlock(&pool_lock);
++
++        /* We need to protect hot/cold changes from potential mem_put() calls
++         * that reference this pool_list. Once poison is set to true, we are
++         * sure that no one else will touch hot/cold lists. The only possible
++         * race is when at the same moment a mem_put() is adding a new item
++         * to the hot list. We protect from that by taking pool_list->lock.
++         * After that we don't need the lock to destroy the hot/cold lists. */
++        pthread_spin_lock(&pool_list->lock);
++        pool_list->poison = true;
++        pthread_spin_unlock(&pool_list->lock);
++
++        for (i = 0; i < NPOOLS; i++) {
++            pt_pool = &pool_list->pools[i];
++
++            free_obj_list(pt_pool->hot_list);
++            pt_pool->hot_list = NULL;
++
++            free_obj_list(pt_pool->cold_list);
++            pt_pool->cold_list = NULL;
++        }
++
++        pthread_mutex_lock(&pool_free_lock);
++        list_add(&pool_list->thr_list, &pool_free_threads);
++        pthread_mutex_unlock(&pool_free_lock);
++
+         thread_pool_list = NULL;
+     }
+ }
+@@ -528,6 +538,30 @@ mem_pools_preinit(void)
+     init_done = GF_MEMPOOL_INIT_EARLY;
+ }
+ 
++static __attribute__((destructor)) void
++mem_pools_postfini(void)
++{
++    per_thread_pool_list_t *pool_list, *next;
++
++    /* This is part of a process shutdown (or dlclose()) which means that
++     * most probably all threads should be stopped. However this is not the
++     * case for gluster and there are even legitimate situations in which we
++     * could have some threads alive. What is sure is that none of those
++     * threads should be using anything from this library, so destroying
++     * everything here should be fine and safe. */
++
++    list_for_each_entry_safe(pool_list, next, &pool_threads, thr_list)
++    {
++        mem_pool_thread_destructor(pool_list);
++    }
++
++    list_for_each_entry_safe(pool_list, next, &pool_free_threads, thr_list)
++    {
++        list_del(&pool_list->thr_list);
++        FREE(pool_list);
++    }
++}
++
+ /* Call mem_pools_init() once threading has been configured completely. This
+  * prevent the pool_sweeper thread from getting killed once the main() thread
+  * exits during deamonizing. */
+@@ -560,10 +594,6 @@ mem_pools_fini(void)
+              */
+             break;
+         case 1: {
+-            per_thread_pool_list_t *pool_list;
+-            per_thread_pool_list_t *next_pl;
+-            unsigned int i;
+-
+             /* if mem_pools_init() was not called, sweeper_tid will be invalid
+              * and the functions will error out. That is not critical. In all
+              * other cases, the sweeper_tid will be valid and the thread gets
+@@ -571,32 +601,11 @@ mem_pools_fini(void)
+             (void)pthread_cancel(sweeper_tid);
+             (void)pthread_join(sweeper_tid, NULL);
+ 
+-            /* At this point all threads should have already terminated, so
+-             * it should be safe to destroy all pending per_thread_pool_list_t
+-             * structures that are stored for each thread. */
+-            mem_pool_thread_destructor();
+-
+-            /* free all objects from all pools */
+-            list_for_each_entry_safe(pool_list, next_pl, &pool_threads,
+-                                     thr_list)
+-            {
+-                for (i = 0; i < NPOOLS; ++i) {
+-                    free_obj_list(pool_list->pools[i].hot_list);
+-                    free_obj_list(pool_list->pools[i].cold_list);
+-                    pool_list->pools[i].hot_list = NULL;
+-                    pool_list->pools[i].cold_list = NULL;
+-                }
+-
+-                list_del(&pool_list->thr_list);
+-                FREE(pool_list);
+-            }
+-
+-            list_for_each_entry_safe(pool_list, next_pl, &pool_free_threads,
+-                                     thr_list)
+-            {
+-                list_del(&pool_list->thr_list);
+-                FREE(pool_list);
+-            }
++            /* There could be threads still running in some cases, so we can't
++             * destroy pool_lists in use. We can also not destroy unused
++             * pool_lists because some allocated objects may still be pointing
++             * to them. */
++            mem_pool_thread_destructor(NULL);
+ 
+             init_done = GF_MEMPOOL_INIT_DESTROY;
+             /* Fall through. */
+@@ -617,7 +626,7 @@ mem_pools_fini(void)
+ {
+ }
+ void
+-mem_pool_thread_destructor(void)
++mem_pool_thread_destructor(per_thread_pool_list_t *pool_list)
+ {
+ }
+ 
+@@ -738,13 +747,21 @@ mem_get_pool_list(void)
+         }
+     }
+ 
++    /* There's no need to take pool_list->lock, because this is already an
++     * atomic operation and we don't need to synchronize it with any change
++     * in hot/cold lists. */
++    pool_list->poison = false;
++
+     (void)pthread_mutex_lock(&pool_lock);
+-    pool_list->poison = 0;
+     list_add(&pool_list->thr_list, &pool_threads);
+     (void)pthread_mutex_unlock(&pool_lock);
+ 
+     thread_pool_list = pool_list;
+ 
++    /* Ensure that all memory objects associated to the new pool_list are
++     * destroyed when the thread terminates. */
++    gf_thread_needs_cleanup();
++
+     return pool_list;
+ }
+ 
+diff --git a/libglusterfs/src/syncop.c b/libglusterfs/src/syncop.c
+index 2eb7b49..0de53c6 100644
+--- a/libglusterfs/src/syncop.c
++++ b/libglusterfs/src/syncop.c
+@@ -97,6 +97,13 @@ syncopctx_setfsgroups(int count, const void *groups)
+ 
+     /* set/reset the ngrps, this is where reset of groups is handled */
+     opctx->ngrps = count;
++
++    if ((opctx->valid & SYNCOPCTX_GROUPS) == 0) {
++        /* This is the first time we are storing groups into the TLS structure
++         * so we mark the current thread so that it will be properly cleaned
++         * up when the thread terminates. */
++        gf_thread_needs_cleanup();
++    }
+     opctx->valid |= SYNCOPCTX_GROUPS;
+ 
+ out:
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0355-core-Prevent-crash-on-process-termination.patch b/SOURCES/0355-core-Prevent-crash-on-process-termination.patch
new file mode 100644
index 0000000..fca3f2c
--- /dev/null
+++ b/SOURCES/0355-core-Prevent-crash-on-process-termination.patch
@@ -0,0 +1,74 @@
+From 10f1730073b9fb02d2ed7f7de855afd6df0e5202 Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Wed, 19 Feb 2020 12:24:15 +0100
+Subject: [PATCH 355/355] core: Prevent crash on process termination
+
+A previous patch (ce61da816a) has fixed a use-after-free issue,
+but it doesn't work well when the final cleanup is done at process
+termination because gluster doesn't stop other threads before
+calling exit().
+
+For this reason, the final cleanup is removed to avoid the crash,
+at least until the termination sequence properly stops all gluster
+threads before exiting the program.
+
+Upstream patch:
+> Upstream patch link: https://review.gluster.org/c/glusterfs/+/24138
+> Change-Id: Id7cfb4407fcf208e28f03a7c3cdc3ef9c1f3bf9b
+> Fixes: bz#1801684
+> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+
+Change-Id: Id7cfb4407fcf208e28f03a7c3cdc3ef9c1f3bf9b
+BUG: 1800703
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/192344
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ libglusterfs/src/mem-pool.c | 30 +++++++++++-------------------
+ 1 file changed, 11 insertions(+), 19 deletions(-)
+
+diff --git a/libglusterfs/src/mem-pool.c b/libglusterfs/src/mem-pool.c
+index 2b41c01..73503e0 100644
+--- a/libglusterfs/src/mem-pool.c
++++ b/libglusterfs/src/mem-pool.c
+@@ -541,25 +541,17 @@ mem_pools_preinit(void)
+ static __attribute__((destructor)) void
+ mem_pools_postfini(void)
+ {
+-    per_thread_pool_list_t *pool_list, *next;
+-
+-    /* This is part of a process shutdown (or dlclose()) which means that
+-     * most probably all threads should be stopped. However this is not the
+-     * case for gluster and there are even legitimate situations in which we
+-     * could have some threads alive. What is sure is that none of those
+-     * threads should be using anything from this library, so destroying
+-     * everything here should be fine and safe. */
+-
+-    list_for_each_entry_safe(pool_list, next, &pool_threads, thr_list)
+-    {
+-        mem_pool_thread_destructor(pool_list);
+-    }
+-
+-    list_for_each_entry_safe(pool_list, next, &pool_free_threads, thr_list)
+-    {
+-        list_del(&pool_list->thr_list);
+-        FREE(pool_list);
+-    }
++    /* TODO: This function should destroy all per thread memory pools that
++     *       are still alive, but this is not possible right now because glibc
++     *       starts calling destructors as soon as exit() is called, and
++     *       gluster doesn't ensure that all threads have been stopped before
++     *       calling exit(). Existing threads would crash when they try to use
++     *       memory or they terminate if we destroy things here.
++     *
++     *       When we propertly terminate all threads, we can add the needed
++     *       code here. Till then we need to leave the memory allocated. Most
++     *       probably this function will be executed on process termination,
++     *       so the memory will be released anyway by the system. */
+ }
+ 
+ /* Call mem_pools_init() once threading has been configured completely. This
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0356-Update-rfc.sh-to-rhgs-3.5.1-rhel-8.patch b/SOURCES/0356-Update-rfc.sh-to-rhgs-3.5.1-rhel-8.patch
new file mode 100644
index 0000000..f2b6835
--- /dev/null
+++ b/SOURCES/0356-Update-rfc.sh-to-rhgs-3.5.1-rhel-8.patch
@@ -0,0 +1,26 @@
+From 4099fb424482ede2fb6346c76c58523113f415df Mon Sep 17 00:00:00 2001
+From: Rinku Kothiya <rkothiya@redhat.com>
+Date: Thu, 12 Mar 2020 01:02:41 -0400
+Subject: [PATCH 356/357] Update rfc.sh to rhgs-3.5.1-rhel-8
+
+Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
+---
+ rfc.sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/rfc.sh b/rfc.sh
+index 918fb11..a408e45 100755
+--- a/rfc.sh
++++ b/rfc.sh
+@@ -18,7 +18,7 @@ done
+ shift $((OPTIND-1))
+ 
+ 
+-branch="rhgs-3.5.1";
++branch="rhgs-3.5.1-rhel-8";
+ 
+ set_hooks_commit_msg()
+ {
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0357-ganesha-ha-updates-for-pcs-0.10.x-i.e.-in-Fedora-29-.patch b/SOURCES/0357-ganesha-ha-updates-for-pcs-0.10.x-i.e.-in-Fedora-29-.patch
new file mode 100644
index 0000000..a67b89c
--- /dev/null
+++ b/SOURCES/0357-ganesha-ha-updates-for-pcs-0.10.x-i.e.-in-Fedora-29-.patch
@@ -0,0 +1,268 @@
+From 2d5e678f8331d4d99ee4dff6e166cbf01c83ab36 Mon Sep 17 00:00:00 2001
+From: "Kaleb S. KEITHLEY" <kkeithle@redhat.com>
+Date: Wed, 12 Feb 2020 12:47:57 -0500
+Subject: [PATCH 357/357] ganesha-ha: updates for pcs-0.10.x (i.e. in Fedora-29
+ and RHEL-8)
+
+pcs-0.10 has introduced changes options to pcs commands
+
+pcs-0.10.x is in Fedora-29 and later and RHEL-8.
+
+Also some minor cleanup. Namely use bash built-in [[...]] in a few
+more places instead of test(1), i.e. [...], and use correct "==" for
+comparison.
+
+master: https://review.gluster.org/24115
+
+Change-Id: I3fb2fcd71406964c77fdc4f18580ca133f365fd6
+BUG: 1802727
+Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/194467
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ extras/ganesha/scripts/ganesha-ha.sh | 84 ++++++++++++++++++++++++------------
+ 1 file changed, 56 insertions(+), 28 deletions(-)
+
+diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
+index 32af1ca..0b0050a 100644
+--- a/extras/ganesha/scripts/ganesha-ha.sh
++++ b/extras/ganesha/scripts/ganesha-ha.sh
+@@ -28,7 +28,12 @@ HA_VOL_MNT="/var/run/gluster/shared_storage"
+ HA_CONFDIR=$HA_VOL_MNT"/nfs-ganesha"
+ SERVICE_MAN="DISTRO_NOT_FOUND"
+ 
+-RHEL6_PCS_CNAME_OPTION="--name"
++# rhel, fedora id, version
++ID=""
++VERSION_ID=""
++
++PCS9OR10_PCS_CNAME_OPTION=""
++PCS9OR10_PCS_CLONE_OPTION="clone"
+ SECRET_PEM="/var/lib/glusterd/nfs/secret.pem"
+ 
+ # UNBLOCK RA uses shared_storage which may become unavailable
+@@ -101,9 +106,9 @@ determine_service_manager () {
+         then
+                 SERVICE_MAN="/sbin/service"
+         fi
+-        if [ "${SERVICE_MAN}" == "DISTRO_NOT_FOUND" ]
++        if [[ "${SERVICE_MAN}X" == "DISTRO_NOT_FOUNDX" ]]
+         then
+-                echo "Service manager not recognized, exiting"
++                logger "Service manager not recognized, exiting"
+                 exit 1
+         fi
+ }
+@@ -114,7 +119,7 @@ manage_service ()
+         local new_node=${2}
+         local option=
+ 
+-        if [ "${action}" == "start" ]; then
++        if [[ "${action}" == "start" ]]; then
+                 option="yes"
+         else
+                 option="no"
+@@ -122,7 +127,7 @@ manage_service ()
+         ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+ ${SECRET_PEM} root@${new_node} "${GANESHA_HA_SH} --setup-ganesha-conf-files $HA_CONFDIR $option"
+ 
+-        if [ "${SERVICE_MAN}" == "/bin/systemctl" ]
++        if [[ "${SERVICE_MAN}" == "/bin/systemctl" ]]
+         then
+                 ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+ ${SECRET_PEM} root@${new_node} "${SERVICE_MAN}  ${action} nfs-ganesha"
+@@ -140,7 +145,7 @@ check_cluster_exists()
+ 
+     if [ -e /var/run/corosync.pid ]; then
+         cluster_name=$(pcs status | grep "Cluster name:" | cut -d ' ' -f 3)
+-        if [ ${cluster_name} -a ${cluster_name} = ${name} ]; then
++        if [[ "${cluster_name}X" == "${name}X" ]]; then
+             logger "$name already exists, exiting"
+             exit 0
+         fi
+@@ -155,7 +160,7 @@ determine_servers()
+     local tmp_ifs=${IFS}
+     local ha_servers=""
+ 
+-    if [ "X${cmd}X" != "XsetupX" -a "X${cmd}X" != "XstatusX" ]; then
++    if [ "${cmd}X" != "setupX" -a "${cmd}X" != "statusX" ]; then
+         ha_servers=$(pcs status | grep "Online:" | grep -o '\[.*\]' | sed -e 's/\[//' | sed -e 's/\]//')
+         IFS=$' '
+         for server in ${ha_servers} ; do
+@@ -193,15 +198,21 @@ setup_cluster()
+ 
+     logger "setting up cluster ${name} with the following ${servers}"
+ 
+-    pcs cluster auth ${servers}
+-    # pcs cluster setup --name ${name} ${servers}
+-    pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} --enable --transport udpu ${servers}
++    # pcs cluster setup --force ${PCS9OR10_PCS_CNAME_OPTION} ${name} ${servers}
++    pcs cluster setup --force ${PCS9OR10_PCS_CNAME_OPTION} ${name} --enable ${servers}
+     if [ $? -ne 0 ]; then
+-        logger "pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} --enable --transport udpu ${servers} failed"
++        logger "pcs cluster setup ${PCS9OR10_PCS_CNAME_OPTION} ${name} --enable ${servers} failed, shutting down ganesha and bailing out"
+         #set up failed stop all ganesha process and clean up symlinks in cluster
+         stop_ganesha_all "${servers}"
+         exit 1;
+     fi
++
++    # pcs cluster auth ${servers}
++    pcs cluster auth
++    if [ $? -ne 0 ]; then
++        logger "pcs cluster auth failed"
++    fi
++
+     pcs cluster start --all
+     if [ $? -ne 0 ]; then
+         logger "pcs cluster start failed"
+@@ -217,7 +228,7 @@ setup_cluster()
+     done
+ 
+     unclean=$(pcs status | grep -u "UNCLEAN")
+-    while [[ "${unclean}X" = "UNCLEANX" ]]; do
++    while [[ "${unclean}X" == "UNCLEANX" ]]; do
+          sleep 1
+          unclean=$(pcs status | grep -u "UNCLEAN")
+     done
+@@ -244,7 +255,7 @@ setup_finalize_ha()
+     local stopped=""
+ 
+     stopped=$(pcs status | grep -u "Stopped")
+-    while [[ "${stopped}X" = "StoppedX" ]]; do
++    while [[ "${stopped}X" == "StoppedX" ]]; do
+          sleep 1
+          stopped=$(pcs status | grep -u "Stopped")
+     done
+@@ -265,7 +276,7 @@ refresh_config ()
+         if [ -e ${SECRET_PEM} ]; then
+         while [[ ${3} ]]; do
+             current_host=`echo ${3} | cut -d "." -f 1`
+-            if [ ${short_host} != ${current_host} ]; then
++            if [[ ${short_host} != ${current_host} ]]; then
+                 output=$(ssh -oPasswordAuthentication=no \
+ -oStrictHostKeyChecking=no -i ${SECRET_PEM} root@${current_host} \
+ "dbus-send --print-reply --system --dest=org.ganesha.nfsd \
+@@ -398,7 +409,7 @@ wrap_create_virt_ip_constraints()
+     # the result is "node2 node3 node4"; for node2, "node3 node4 node1"
+     # and so on.
+     while [[ ${1} ]]; do
+-        if [ "${1}" = "${primary}" ]; then
++        if [[ ${1} == ${primary} ]]; then
+             shift
+             while [[ ${1} ]]; do
+                 tail=${tail}" "${1}
+@@ -429,15 +440,15 @@ setup_create_resources()
+     local cibfile=$(mktemp -u)
+ 
+     # fixup /var/lib/nfs
+-    logger "pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} --clone"
+-    pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} --clone
++    logger "pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} ${PCS9OR10_PCS_CLONE_OPTION}"
++    pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} ${PCS9OR10_PCS_CLONE_OPTION}
+     if [ $? -ne 0 ]; then
+-        logger "warning: pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} --clone failed"
++        logger "warning: pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} ${PCS9OR10_PCS_CLONE_OPTION} failed"
+     fi
+ 
+-    pcs resource create nfs-mon ocf:heartbeat:ganesha_mon --clone
++    pcs resource create nfs-mon ocf:heartbeat:ganesha_mon ${PCS9OR10_PCS_CLONE_OPTION}
+     if [ $? -ne 0 ]; then
+-        logger "warning: pcs resource create nfs-mon ocf:heartbeat:ganesha_mon --clone failed"
++        logger "warning: pcs resource create nfs-mon ocf:heartbeat:ganesha_mon ${PCS9OR10_PCS_CLONE_OPTION} failed"
+     fi
+ 
+     # see comment in (/usr/lib/ocf/resource.d/heartbeat/ganesha_grace
+@@ -445,9 +456,9 @@ setup_create_resources()
+     # ganesha-active crm_attribute
+     sleep 5
+ 
+-    pcs resource create nfs-grace ocf:heartbeat:ganesha_grace --clone notify=true
++    pcs resource create nfs-grace ocf:heartbeat:ganesha_grace ${PCS9OR10_PCS_CLONE_OPTION} notify=true
+     if [ $? -ne 0 ]; then
+-        logger "warning: pcs resource create nfs-grace ocf:heartbeat:ganesha_grace --clone failed"
++        logger "warning: pcs resource create nfs-grace ocf:heartbeat:ganesha_grace ${PCS9OR10_PCS_CLONE_OPTION} failed"
+     fi
+ 
+     pcs constraint location nfs-grace-clone rule score=-INFINITY grace-active ne 1
+@@ -616,7 +627,7 @@ addnode_recreate_resources()
+     --after ${add_node}-nfs_block
+     if [ $? -ne 0 ]; then
+         logger "warning pcs resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr \
+-	ip=${add_vip} cidr_netmask=32 op monitor interval=15s failed"
++        ip=${add_vip} cidr_netmask=32 op monitor interval=15s failed"
+     fi
+ 
+     pcs -f ${cibfile} constraint order nfs-grace-clone then ${add_node}-cluster_ip-1
+@@ -780,7 +791,7 @@ setup_state_volume()
+             touch ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state
+         fi
+         for server in ${HA_SERVERS} ; do
+-            if [ ${server} != ${dirname} ]; then
++            if [[ ${server} != ${dirname} ]]; then
+                 ln -s ${mnt}/nfs-ganesha/${server}/nfs/ganesha ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/${server}
+                 ln -s ${mnt}/nfs-ganesha/${server}/nfs/statd ${mnt}/nfs-ganesha/${dirname}/nfs/statd/${server}
+             fi
+@@ -794,7 +805,7 @@ setup_state_volume()
+ enable_pacemaker()
+ {
+     while [[ ${1} ]]; do
+-        if [ "${SERVICE_MAN}" == "/usr/bin/systemctl" ]; then
++        if [[ "${SERVICE_MAN}" == "/bin/systemctl" ]]; then
+             ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+ ${SECRET_PEM} root@${1} "${SERVICE_MAN} enable pacemaker"
+         else
+@@ -892,7 +903,7 @@ delnode_state_volume()
+     rm -rf ${mnt}/nfs-ganesha/${dirname}
+ 
+     for server in ${HA_SERVERS} ; do
+-        if [[ "${server}" != "${dirname}" ]]; then
++        if [[ ${server} != ${dirname} ]]; then
+             rm -f ${mnt}/nfs-ganesha/${server}/nfs/ganesha/${dirname}
+             rm -f ${mnt}/nfs-ganesha/${server}/nfs/statd/${dirname}
+         fi
+@@ -963,7 +974,7 @@ status()
+ 
+ create_ganesha_conf_file()
+ {
+-        if [ $1 == "yes" ];
++        if [[ "$1" == "yes" ]];
+         then
+                 if [  -e $GANESHA_CONF ];
+                 then
+@@ -1012,6 +1023,13 @@ main()
+      semanage boolean -m gluster_use_execmem --on
+     fi
+ 
++    local osid=""
++
++    osid=$(grep ^ID= /etc/os-release)
++    eval $(echo ${osid} | grep -F ID=)
++    osid=$(grep ^VERSION_ID= /etc/os-release)
++    eval $(echo ${osid} | grep -F VERSION_ID=)
++
+     HA_CONFDIR=${1%/}; shift
+     local ha_conf=${HA_CONFDIR}/ganesha-ha.conf
+     local node=""
+@@ -1032,7 +1050,17 @@ main()
+ 
+         determine_servers "setup"
+ 
+-        if [ "X${HA_NUM_SERVERS}X" != "X1X" ]; then
++        # Fedora 29+ and rhel/centos 8 has PCS-0.10.x
++        # default is pcs-0.10.x options but check for
++        # rhel/centos 7 (pcs-0.9.x) and adjust accordingly
++        if [[ ${ID} =~ {rhel,centos} ]]; then
++            if [[ ${VERSION_ID} == 7.* ]]; then
++                PCS9OR10_PCS_CNAME_OPTION="--name"
++                PCS9OR10_PCS_CLONE_OPTION="--clone"
++            fi
++        fi
++
++        if [[ "${HA_NUM_SERVERS}X" != "1X" ]]; then
+ 
+             determine_service_manager
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0358-inode-fix-wrong-loop-count-in-__inode_ctx_free.patch b/SOURCES/0358-inode-fix-wrong-loop-count-in-__inode_ctx_free.patch
new file mode 100644
index 0000000..d7138a6
--- /dev/null
+++ b/SOURCES/0358-inode-fix-wrong-loop-count-in-__inode_ctx_free.patch
@@ -0,0 +1,51 @@
+From 0d8c6d78130d22c475010bcce8055073b19de82a Mon Sep 17 00:00:00 2001
+From: Xie Changlong <xiechanglong@cmss.chinamobile.com>
+Date: Fri, 17 May 2019 18:33:11 +0800
+Subject: [PATCH 358/362] inode: fix wrong loop count in __inode_ctx_free
+
+Avoid serious memory leak
+
+Backport of :
+>fixes: bz#1711240
+>Upstream patch link: https://review.gluster.org/#/c/glusterfs/+/22738/
+>Change-Id: Ic61a8fdd0e941e136c98376a87b5a77fa8c22316
+>Signed-off-by: Xie Changlong <xiechanglong@cmss.chinamobile.com>
+
+BUG: 1781543
+Change-Id: I601ebb6cd6744a61c64edd3d21d3b9a0edf1e95b
+Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/195611
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ libglusterfs/src/inode.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/libglusterfs/src/inode.c b/libglusterfs/src/inode.c
+index 5331e93..9dbb25b 100644
+--- a/libglusterfs/src/inode.c
++++ b/libglusterfs/src/inode.c
+@@ -402,14 +402,15 @@ __inode_ctx_free(inode_t *inode)
+         goto noctx;
+     }
+ 
+-    for (index = 0; index < inode->table->xl->graph->xl_count; index++) {
++    for (index = 0; index < inode->table->ctxcount; index++) {
+         if (inode->_ctx[index].value1 || inode->_ctx[index].value2) {
+             xl = (xlator_t *)(long)inode->_ctx[index].xl_key;
+-            old_THIS = THIS;
+-            THIS = xl;
+-            if (!xl->call_cleanup && xl->cbks->forget)
++            if (xl && !xl->call_cleanup && xl->cbks->forget) {
++                old_THIS = THIS;
++                THIS = xl;
+                 xl->cbks->forget(xl, inode);
+-            THIS = old_THIS;
++                THIS = old_THIS;
++            }
+         }
+     }
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0359-dht-gf_defrag_process_dir-is-called-even-if-gf_defra.patch b/SOURCES/0359-dht-gf_defrag_process_dir-is-called-even-if-gf_defra.patch
new file mode 100644
index 0000000..bd730bc
--- /dev/null
+++ b/SOURCES/0359-dht-gf_defrag_process_dir-is-called-even-if-gf_defra.patch
@@ -0,0 +1,41 @@
+From c0efaa98d777e4520028bf55482846b3ef5fca3a Mon Sep 17 00:00:00 2001
+From: Susant Palai <spalai@redhat.com>
+Date: Wed, 1 Apr 2020 12:14:31 +0530
+Subject: [PATCH 359/362] dht: gf_defrag_process_dir is called even if
+ gf_defrag_fix_layout has failed
+
+Currently even though gf_defrag_fix_layout fails with ENOENT or ESTALE, a
+subsequent call is made to gf_defrag_process_dir leading to rebalance failure.
+
+upstream patch: https://review.gluster.org/#/c/glusterfs/+/24225
+
+> fixes: #1102
+> Change-Id: Ib0c309fd78e89a000fed3feb4bbe2c5b48e61478
+> Signed-off-by: Susant Palai <spalai@redhat.com>
+
+BUG: 1812789
+Change-Id: Ib0c309fd78e89a000fed3feb4bbe2c5b48e61478
+Signed-off-by: Susant Palai <spalai@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/196249
+Reviewed-by: Mohit Agrawal <moagrawa@redhat.com>
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/cluster/dht/src/dht-rebalance.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/xlators/cluster/dht/src/dht-rebalance.c b/xlators/cluster/dht/src/dht-rebalance.c
+index 559f046..f4c62b8 100644
+--- a/xlators/cluster/dht/src/dht-rebalance.c
++++ b/xlators/cluster/dht/src/dht-rebalance.c
+@@ -3939,6 +3939,7 @@ gf_defrag_fix_layout(xlator_t *this, gf_defrag_info_t *defrag, loc_t *loc,
+                 defrag->total_failures++;
+             }
+             ret = 0;
++            goto out;
+         } else {
+             gf_msg(this->name, GF_LOG_ERROR, -ret, DHT_MSG_LAYOUT_FIX_FAILED,
+                    "Setxattr failed for %s", loc->path);
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0360-rpc-Make-ssl-log-more-useful.patch b/SOURCES/0360-rpc-Make-ssl-log-more-useful.patch
new file mode 100644
index 0000000..05e903d
--- /dev/null
+++ b/SOURCES/0360-rpc-Make-ssl-log-more-useful.patch
@@ -0,0 +1,117 @@
+From 2b859d1a5499a215c8c37472d4fc7d7e4d70dac6 Mon Sep 17 00:00:00 2001
+From: Mohit Agrawal <moagrawal@redhat.com>
+Date: Tue, 31 Mar 2020 16:45:35 +0530
+Subject: [PATCH 360/362] rpc: Make ssl log more useful
+
+Currently, ssl_setup_connection_params throws 4 messages for every
+rpc connection that irritates a user while reading the logs. The same
+info we can print in a single log with peerinfo to make it more
+useful.ssl_setup_connection_params try to load dh_param even user
+has not configured it and if a dh_param file is not available it throws
+a failure message.To avoid the message load dh_param only while the user
+has configured it.
+
+> Change-Id: I9ddb57f86a3fa3e519180cb5d88828e59fe0e487
+> Fixes: #1141
+> Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
+> Cherry pick from commit 80dd8cceab3b860bf1bc2945c8e2d8d0b3913e48
+> Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/24270/
+
+BUG: 1812824
+Change-Id: I9ddb57f86a3fa3e519180cb5d88828e59fe0e487
+Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/196371
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ rpc/rpc-transport/socket/src/socket.c | 46 ++++++++++++++++++++---------------
+ 1 file changed, 26 insertions(+), 20 deletions(-)
+
+diff --git a/rpc/rpc-transport/socket/src/socket.c b/rpc/rpc-transport/socket/src/socket.c
+index f54ca83..65845ea 100644
+--- a/rpc/rpc-transport/socket/src/socket.c
++++ b/rpc/rpc-transport/socket/src/socket.c
+@@ -4240,6 +4240,7 @@ ssl_setup_connection_params(rpc_transport_t *this)
+     char *cipher_list = DEFAULT_CIPHER_LIST;
+     char *dh_param = DEFAULT_DH_PARAM;
+     char *ec_curve = DEFAULT_EC_CURVE;
++    gf_boolean_t dh_flag = _gf_false;
+ 
+     priv = this->private;
+ 
+@@ -4248,6 +4249,10 @@ ssl_setup_connection_params(rpc_transport_t *this)
+         return 0;
+     }
+ 
++    if (!priv->ssl_enabled && !priv->mgmt_ssl) {
++        return 0;
++    }
++
+     priv->ssl_own_cert = DEFAULT_CERT_PATH;
+     if (dict_get_str(this->options, SSL_OWN_CERT_OPT, &optstr) == 0) {
+         if (!priv->ssl_enabled) {
+@@ -4294,27 +4299,25 @@ ssl_setup_connection_params(rpc_transport_t *this)
+             priv->crl_path = gf_strdup(optstr);
+     }
+ 
+-    gf_log(this->name, priv->ssl_enabled ? GF_LOG_INFO : GF_LOG_DEBUG,
+-           "SSL support on the I/O path is %s",
+-           priv->ssl_enabled ? "ENABLED" : "NOT enabled");
+-    gf_log(this->name, priv->mgmt_ssl ? GF_LOG_INFO : GF_LOG_DEBUG,
+-           "SSL support for glusterd is %s",
+-           priv->mgmt_ssl ? "ENABLED" : "NOT enabled");
+-
+     if (!priv->mgmt_ssl) {
+-        if (!dict_get_int32(this->options, SSL_CERT_DEPTH_OPT, &cert_depth)) {
+-            gf_log(this->name, GF_LOG_INFO, "using certificate depth %d",
+-                   cert_depth);
++        if (!dict_get_int32_sizen(this->options, SSL_CERT_DEPTH_OPT,
++                                  &cert_depth)) {
+         }
+     } else {
+         cert_depth = this->ctx->ssl_cert_depth;
+-        gf_log(this->name, GF_LOG_INFO, "using certificate depth %d",
+-               cert_depth);
+     }
+-    if (!dict_get_str(this->options, SSL_CIPHER_LIST_OPT, &cipher_list)) {
++    gf_log(this->name, priv->ssl_enabled ? GF_LOG_INFO : GF_LOG_DEBUG,
++           "SSL support for MGMT is %s IO path is %s certificate depth is %d "
++           "for peer %s",
++           (priv->mgmt_ssl ? "ENABLED" : "NOT enabled"),
++           (priv->ssl_enabled ? "ENABLED" : "NOT enabled"), cert_depth,
++           this->peerinfo.identifier);
++
++    if (!dict_get_str_sizen(this->options, SSL_CIPHER_LIST_OPT, &cipher_list)) {
+         gf_log(this->name, GF_LOG_INFO, "using cipher list %s", cipher_list);
+     }
+-    if (!dict_get_str(this->options, SSL_DH_PARAM_OPT, &dh_param)) {
++    if (!dict_get_str_sizen(this->options, SSL_DH_PARAM_OPT, &dh_param)) {
++        dh_flag = _gf_true;
+         gf_log(this->name, GF_LOG_INFO, "using DH parameters %s", dh_param);
+     }
+     if (!dict_get_str(this->options, SSL_EC_CURVE_OPT, &ec_curve)) {
+@@ -4349,12 +4352,15 @@ ssl_setup_connection_params(rpc_transport_t *this)
+ #ifdef SSL_OP_NO_COMPRESSION
+         SSL_CTX_set_options(priv->ssl_ctx, SSL_OP_NO_COMPRESSION);
+ #endif
+-
+-        if ((bio = BIO_new_file(dh_param, "r")) == NULL) {
+-            gf_log(this->name, GF_LOG_INFO,
+-                   "failed to open %s, "
+-                   "DH ciphers are disabled",
+-                   dh_param);
++        /* Upload file to bio wrapper only if dh param is configured
++         */
++        if (dh_flag) {
++            if ((bio = BIO_new_file(dh_param, "r")) == NULL) {
++                gf_log(this->name, GF_LOG_ERROR,
++                       "failed to open %s, "
++                       "DH ciphers are disabled",
++                       dh_param);
++            }
+         }
+ 
+         if (bio != NULL) {
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0361-snap_scheduler-python3-compatibility-and-new-test-ca.patch b/SOURCES/0361-snap_scheduler-python3-compatibility-and-new-test-ca.patch
new file mode 100644
index 0000000..62b2fe0
--- /dev/null
+++ b/SOURCES/0361-snap_scheduler-python3-compatibility-and-new-test-ca.patch
@@ -0,0 +1,122 @@
+From 04b824ebfcf80c648d5855f10bc30fde45fd62eb Mon Sep 17 00:00:00 2001
+From: Sunny Kumar <sunkumar@redhat.com>
+Date: Thu, 26 Mar 2020 10:46:16 +0000
+Subject: [PATCH 361/362] snap_scheduler: python3 compatibility and new test
+ case
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Problem:
+"snap_scheduler.py init" command failing with the below traceback:
+
+[root@dhcp43-104 ~]# snap_scheduler.py init
+Traceback (most recent call last):
+  File "/usr/sbin/snap_scheduler.py", line 941, in <module>
+    sys.exit(main(sys.argv[1:]))
+  File "/usr/sbin/snap_scheduler.py", line 851, in main
+    initLogger()
+  File "/usr/sbin/snap_scheduler.py", line 153, in initLogger
+    logfile = os.path.join(process.stdout.read()[:-1], SCRIPT_NAME + ".log")
+  File "/usr/lib64/python3.6/posixpath.py", line 94, in join
+    genericpath._check_arg_types('join', a, *p)
+  File "/usr/lib64/python3.6/genericpath.py", line 151, in _check_arg_types
+    raise TypeError("Can't mix strings and bytes in path components") from None
+TypeError: Can't mix strings and bytes in path components
+
+Solution:
+
+Added the 'universal_newlines' flag to Popen to support backward compatibility.
+
+Added a basic test for snapshot scheduler.
+
+Backport Of:
+   
+        >Upstream Patch: https://review.gluster.org/#/c/glusterfs/+/24257/
+        >Change-Id: I78e8fabd866fd96638747ecd21d292f5ca074a4e
+        >Fixes: #1134
+        >Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
+   
+BUG: 1817369
+Change-Id: I78e8fabd866fd96638747ecd21d292f5ca074a4e
+Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/196482
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ extras/snap_scheduler/snap_scheduler.py |  2 +-
+ tests/basic/volume-snap-scheduler.t     | 49 +++++++++++++++++++++++++++++++++
+ 2 files changed, 50 insertions(+), 1 deletion(-)
+ create mode 100644 tests/basic/volume-snap-scheduler.t
+
+diff --git a/extras/snap_scheduler/snap_scheduler.py b/extras/snap_scheduler/snap_scheduler.py
+index a66c5e3..5a29d41 100755
+--- a/extras/snap_scheduler/snap_scheduler.py
++++ b/extras/snap_scheduler/snap_scheduler.py
+@@ -149,7 +149,7 @@ def initLogger():
+     sh.setFormatter(formatter)
+ 
+     process = subprocess.Popen(["gluster", "--print-logdir"],
+-                               stdout=subprocess.PIPE)
++                               stdout=subprocess.PIPE, universal_newlines=True)
+     logfile = os.path.join(process.stdout.read()[:-1], SCRIPT_NAME + ".log")
+ 
+     fh = logging.FileHandler(logfile)
+diff --git a/tests/basic/volume-snap-scheduler.t b/tests/basic/volume-snap-scheduler.t
+new file mode 100644
+index 0000000..a638c5c
+--- /dev/null
++++ b/tests/basic/volume-snap-scheduler.t
+@@ -0,0 +1,49 @@
++#!/bin/bash
++
++. $(dirname $0)/../include.rc
++. $(dirname $0)/../volume.rc
++
++cleanup;
++
++TEST glusterd;
++TEST pidof glusterd;
++
++TEST $CLI volume create $V0 replica 2 $H0:$B0/${GMV0}{1,2,3,4};
++TEST $CLI volume start $V0
++
++## Create, start and mount meta_volume as
++## snap_scheduler expects shared storage to be enabled.
++## This test is very basic in nature not creating any snapshot
++## and purpose is to validate snap scheduling commands.
++
++TEST $CLI volume create $META_VOL replica 3 $H0:$B0/${META_VOL}{1,2,3};
++TEST $CLI volume start $META_VOL
++TEST mkdir -p $META_MNT
++TEST glusterfs -s $H0 --volfile-id $META_VOL $META_MNT
++
++##function to check status
++function check_status_scheduler()
++{
++     local key=$1
++     snap_scheduler.py status | grep -F "$key" | wc -l
++}
++
++##Basic snap_scheduler command test init/enable/disable/list
++
++TEST snap_scheduler.py init
++
++TEST snap_scheduler.py enable
++
++EXPECT 1 check_status_scheduler "Enabled"
++
++TEST snap_scheduler.py disable
++
++EXPECT 1 check_status_scheduler "Disabled"
++
++TEST snap_scheduler.py list
++
++TEST $CLI volume stop $V0;
++
++TEST $CLI volume delete $V0;
++
++cleanup;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0362-write-behind-fix-data-corruption.patch b/SOURCES/0362-write-behind-fix-data-corruption.patch
new file mode 100644
index 0000000..aeb7242
--- /dev/null
+++ b/SOURCES/0362-write-behind-fix-data-corruption.patch
@@ -0,0 +1,454 @@
+From 48f6929590157d9a1697e11c02441207afdc1bed Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Fri, 27 Mar 2020 23:56:15 +0100
+Subject: [PATCH 362/362] write-behind: fix data corruption
+
+There was a bug in write-behind that allowed a previous completed write
+to overwrite the overlapping region of data from a future write.
+
+Suppose we want to send three writes (W1, W2 and W3). W1 and W2 are
+sequential, and W3 writes at the same offset of W2:
+
+    W2.offset = W3.offset = W1.offset + W1.size
+
+Both W1 and W2 are sent in parallel. W3 is only sent after W2 completes.
+So W3 should *always* overwrite the overlapping part of W2.
+
+Suppose write-behind processes the requests from 2 concurrent threads:
+
+    Thread 1                    Thread 2
+
+    <received W1>
+                                <received W2>
+    wb_enqueue_tempted(W1)
+    /* W1 is assigned gen X */
+                                wb_enqueue_tempted(W2)
+                                /* W2 is assigned gen X */
+
+                                wb_process_queue()
+                                  __wb_preprocess_winds()
+                                    /* W1 and W2 are sequential and all
+                                     * other requisites are met to merge
+                                     * both requests. */
+                                    __wb_collapse_small_writes(W1, W2)
+                                    __wb_fulfill_request(W2)
+
+                                  __wb_pick_unwinds() -> W2
+                                  /* In this case, since the request is
+                                   * already fulfilled, wb_inode->gen
+                                   * is not updated. */
+
+                                wb_do_unwinds()
+                                  STACK_UNWIND(W2)
+
+                                /* The application has received the
+                                 * result of W2, so it can send W3. */
+                                <received W3>
+
+                                wb_enqueue_tempted(W3)
+                                /* W3 is assigned gen X */
+
+                                wb_process_queue()
+                                  /* Here we have W1 (which contains
+                                   * the conflicting W2) and W3 with
+                                   * same gen, so they are interpreted
+                                   * as concurrent writes that do not
+                                   * conflict. */
+                                  __wb_pick_winds() -> W3
+
+                                wb_do_winds()
+                                  STACK_WIND(W3)
+
+    wb_process_queue()
+      /* Eventually W1 will be
+       * ready to be sent */
+      __wb_pick_winds() -> W1
+      __wb_pick_unwinds() -> W1
+        /* Here wb_inode->gen is
+         * incremented. */
+
+    wb_do_unwinds()
+      STACK_UNWIND(W1)
+
+    wb_do_winds()
+      STACK_WIND(W1)
+
+So, as we can see, W3 is sent before W1, which shouldn't happen.
+
+The problem is that wb_inode->gen is only incremented for requests that
+have not been fulfilled but, after a merge, the request is marked as
+fulfilled even though it has not been sent to the brick. This allows
+that future requests are assigned to the same generation, which could
+be internally reordered.
+
+Solution:
+
+Increment wb_inode->gen before any unwind, even if it's for a fulfilled
+request.
+
+Special thanks to Stefan Ring for writing a reproducer that has been
+crucial to identify the issue.
+
+Upstream patch:
+> Upstream patch link: https://review.gluster.org/c/glusterfs/+/24263
+> Change-Id: Id4ab0f294a09aca9a863ecaeef8856474662ab45
+> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+> Fixes: #884
+
+Change-Id: Id4ab0f294a09aca9a863ecaeef8856474662ab45
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+BUG: 1819059
+Reviewed-on: https://code.engineering.redhat.com/gerrit/196250
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/bugs/write-behind/issue-884.c                | 267 +++++++++++++++++++++
+ tests/bugs/write-behind/issue-884.t                |  40 +++
+ .../performance/write-behind/src/write-behind.c    |   4 +-
+ 3 files changed, 309 insertions(+), 2 deletions(-)
+ create mode 100644 tests/bugs/write-behind/issue-884.c
+ create mode 100755 tests/bugs/write-behind/issue-884.t
+
+diff --git a/tests/bugs/write-behind/issue-884.c b/tests/bugs/write-behind/issue-884.c
+new file mode 100644
+index 0000000..e9c33b3
+--- /dev/null
++++ b/tests/bugs/write-behind/issue-884.c
+@@ -0,0 +1,267 @@
++
++#define _GNU_SOURCE
++
++#include <stdlib.h>
++#include <stdio.h>
++#include <string.h>
++#include <time.h>
++#include <assert.h>
++#include <errno.h>
++#include <sys/types.h>
++#include <sys/stat.h>
++#include <pthread.h>
++
++#include <glusterfs/api/glfs.h>
++
++/* Based on a reproducer by Stefan Ring. It seems to be quite sensible to any
++ * timing modification, so the code has been maintained as is, only with minor
++ * changes. */
++
++struct glfs *glfs;
++
++pthread_mutex_t the_mutex = PTHREAD_MUTEX_INITIALIZER;
++pthread_cond_t the_cond = PTHREAD_COND_INITIALIZER;
++
++typedef struct _my_aiocb {
++    int64_t size;
++    volatile int64_t seq;
++    int which;
++} my_aiocb;
++
++typedef struct _worker_data {
++    my_aiocb cb;
++    struct iovec iov;
++    int64_t offset;
++} worker_data;
++
++typedef struct {
++    worker_data wdata[2];
++
++    volatile unsigned busy;
++} all_data_t;
++
++all_data_t all_data;
++
++static void
++completion_fnc(struct glfs_fd *fd, ssize_t ret, struct glfs_stat *pre,
++               struct glfs_stat *post, void *arg)
++{
++    void *the_thread;
++    my_aiocb *cb = (my_aiocb *)arg;
++    long seq = cb->seq;
++
++    assert(ret == cb->size);
++
++    pthread_mutex_lock(&the_mutex);
++    pthread_cond_broadcast(&the_cond);
++
++    all_data.busy &= ~(1 << cb->which);
++    cb->seq = -1;
++
++    the_thread = (void *)pthread_self();
++    printf("worker %d is done from thread %p, seq %ld!\n", cb->which,
++           the_thread, seq);
++
++    pthread_mutex_unlock(&the_mutex);
++}
++
++static void
++init_wdata(worker_data *data, int which)
++{
++    data->cb.which = which;
++    data->cb.seq = -1;
++
++    data->iov.iov_base = malloc(1024 * 1024);
++    memset(data->iov.iov_base, 6,
++           1024 * 1024); /* tail part never overwritten */
++}
++
++static void
++init()
++{
++    all_data.busy = 0;
++
++    init_wdata(&all_data.wdata[0], 0);
++    init_wdata(&all_data.wdata[1], 1);
++}
++
++static void
++do_write(struct glfs_fd *fd, int content, int size, int64_t seq,
++         worker_data *wdata, const char *name)
++{
++    int ret;
++
++    wdata->cb.size = size;
++    wdata->cb.seq = seq;
++
++    if (content >= 0)
++        memset(wdata->iov.iov_base, content, size);
++    wdata->iov.iov_len = size;
++
++    pthread_mutex_lock(&the_mutex);
++    printf("(%d) dispatching write \"%s\", offset %lx, len %x, seq %ld\n",
++           wdata->cb.which, name, (long)wdata->offset, size, (long)seq);
++    pthread_mutex_unlock(&the_mutex);
++    ret = glfs_pwritev_async(fd, &wdata->iov, 1, wdata->offset, 0,
++                             completion_fnc, &wdata->cb);
++    assert(ret >= 0);
++}
++
++#define IDLE 0  // both workers must be idle
++#define ANY 1   // use any worker, other one may be busy
++
++int
++get_worker(int waitfor, int64_t excl_seq)
++{
++    int which;
++
++    pthread_mutex_lock(&the_mutex);
++
++    while (waitfor == IDLE && (all_data.busy & 3) != 0 ||
++           waitfor == ANY &&
++               ((all_data.busy & 3) == 3 ||
++                excl_seq >= 0 && (all_data.wdata[0].cb.seq == excl_seq ||
++                                  all_data.wdata[1].cb.seq == excl_seq)))
++        pthread_cond_wait(&the_cond, &the_mutex);
++
++    if (!(all_data.busy & 1))
++        which = 0;
++    else
++        which = 1;
++
++    all_data.busy |= (1 << which);
++
++    pthread_mutex_unlock(&the_mutex);
++
++    return which;
++}
++
++static int
++doit(struct glfs_fd *fd)
++{
++    int ret;
++    int64_t seq = 0;
++    int64_t offset = 0;     // position in file, in blocks
++    int64_t base = 0x1000;  // where to place the data, in blocks
++
++    int async_mode = ANY;
++
++    init();
++
++    for (;;) {
++        int which;
++        worker_data *wdata;
++
++        // for growing to the first offset
++        for (;;) {
++            int gap = base + 0x42 - offset;
++            if (!gap)
++                break;
++            if (gap > 80)
++                gap = 80;
++
++            which = get_worker(IDLE, -1);
++            wdata = &all_data.wdata[which];
++
++            wdata->offset = offset << 9;
++            do_write(fd, 0, gap << 9, seq++, wdata, "gap-filling");
++
++            offset += gap;
++        }
++
++        // 8700
++        which = get_worker(IDLE, -1);
++        wdata = &all_data.wdata[which];
++
++        wdata->offset = (base + 0x42) << 9;
++        do_write(fd, 1, 62 << 9, seq++, wdata, "!8700");
++
++        // 8701
++        which = get_worker(IDLE, -1);
++        wdata = &all_data.wdata[which];
++
++        wdata->offset = (base + 0x42) << 9;
++        do_write(fd, 2, 55 << 9, seq++, wdata, "!8701");
++
++        // 8702
++        which = get_worker(async_mode, -1);
++        wdata = &all_data.wdata[which];
++
++        wdata->offset = (base + 0x79) << 9;
++        do_write(fd, 3, 54 << 9, seq++, wdata, "!8702");
++
++        // 8703
++        which = get_worker(async_mode, -1);
++        wdata = &all_data.wdata[which];
++
++        wdata->offset = (base + 0xaf) << 9;
++        do_write(fd, 4, 81 << 9, seq++, wdata, "!8703");
++
++        // 8704
++        // this writes both 5s and 6s
++        // the range of 5s is the one that overwrites 8703
++
++        which = get_worker(async_mode, seq - 1);
++        wdata = &all_data.wdata[which];
++
++        memset(wdata->iov.iov_base, 5, 81 << 9);
++        wdata->offset = (base + 0xaf) << 9;
++        do_write(fd, -1, 1623 << 9, seq++, wdata, "!8704");
++
++        offset = base + 0x706;
++        base += 0x1000;
++        if (base >= 0x100000)
++            break;
++    }
++
++    printf("done!\n");
++    fflush(stdout);
++
++    pthread_mutex_lock(&the_mutex);
++
++    while ((all_data.busy & 3) != 0)
++        pthread_cond_wait(&the_cond, &the_mutex);
++
++    pthread_mutex_unlock(&the_mutex);
++
++    ret = glfs_close(fd);
++    assert(ret >= 0);
++    /*
++        ret = glfs_fini(glfs);
++        assert(ret >= 0);
++    */
++    return 0;
++}
++
++int
++main(int argc, char *argv[])
++{
++    int ret;
++    int open_flags = O_RDWR | O_DIRECT | O_TRUNC;
++    struct glfs_fd *fd;
++
++    glfs = glfs_new(argv[1]);
++    if (!glfs) {
++        printf("glfs_new!\n");
++        goto out;
++    }
++    ret = glfs_set_volfile_server(glfs, "tcp", "localhost", 24007);
++    if (ret < 0) {
++        printf("set_volfile!\n");
++        goto out;
++    }
++    ret = glfs_init(glfs);
++    if (ret) {
++        printf("init!\n");
++        goto out;
++    }
++    fd = glfs_open(glfs, argv[2], open_flags);
++    if (!fd) {
++        printf("open!\n");
++        goto out;
++    }
++    srand(time(NULL));
++    return doit(fd);
++out:
++    return 1;
++}
+diff --git a/tests/bugs/write-behind/issue-884.t b/tests/bugs/write-behind/issue-884.t
+new file mode 100755
+index 0000000..2bcf7d1
+--- /dev/null
++++ b/tests/bugs/write-behind/issue-884.t
+@@ -0,0 +1,40 @@
++#!/bin/bash
++
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++
++# This test tries to detect a race condition in write-behind. It's based on a
++# reproducer written by Stefan Ring that is able to hit it sometimes. On my
++# system, it happened around 10% of the runs. This means that if this bug
++# appears again, this test will fail once every 10 runs. Most probably this
++# failure will be hidden by the automatic test retry of the testing framework.
++#
++# Please, if this test fails, it needs to be analyzed in detail.
++
++function run() {
++    "${@}" >/dev/null
++}
++
++cleanup
++
++TEST glusterd
++TEST pidof glusterd
++
++TEST $CLI volume create $V0 $H0:$B0/$V0
++# This makes it easier to hit the issue
++TEST $CLI volume set $V0 client-log-level TRACE
++TEST $CLI volume start $V0
++
++TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
++
++build_tester $(dirname $0)/issue-884.c -lgfapi
++
++TEST touch $M0/testfile
++
++# This program generates a file of 535694336 bytes with a fixed pattern
++TEST run $(dirname $0)/issue-884 $V0 testfile
++
++# This is the md5sum of the expected pattern without corruption
++EXPECT "ad105f9349345a70fc697632cbb5eec8" echo "$(md5sum $B0/$V0/testfile | awk '{ print $1; }')"
++
++cleanup
+diff --git a/xlators/performance/write-behind/src/write-behind.c b/xlators/performance/write-behind/src/write-behind.c
+index 70e281a..90a0bcf 100644
+--- a/xlators/performance/write-behind/src/write-behind.c
++++ b/xlators/performance/write-behind/src/write-behind.c
+@@ -1284,14 +1284,14 @@ __wb_pick_unwinds(wb_inode_t *wb_inode, list_head_t *lies)
+ 
+         wb_inode->window_current += req->orig_size;
+ 
++        wb_inode->gen++;
++
+         if (!req->ordering.fulfilled) {
+             /* burden increased */
+             list_add_tail(&req->lie, &wb_inode->liability);
+ 
+             req->ordering.lied = 1;
+ 
+-            wb_inode->gen++;
+-
+             uuid_utoa_r(req->gfid, gfid);
+             gf_msg_debug(wb_inode->this->name, 0,
+                          "(unique=%" PRIu64
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0363-common-ha-cluster-status-shows-FAILOVER-when-actuall.patch b/SOURCES/0363-common-ha-cluster-status-shows-FAILOVER-when-actuall.patch
new file mode 100644
index 0000000..e1ea6d0
--- /dev/null
+++ b/SOURCES/0363-common-ha-cluster-status-shows-FAILOVER-when-actuall.patch
@@ -0,0 +1,47 @@
+From d7c0dc7107a024d28196a4582bacf28ddcfbeb69 Mon Sep 17 00:00:00 2001
+From: "Kaleb S. KEITHLEY" <kkeithle@redhat.com>
+Date: Tue, 14 Apr 2020 07:59:22 -0400
+Subject: [PATCH 363/367] common-ha: cluster status shows "FAILOVER" when
+ actually HEALTHY
+
+pacemaker devs change the format of the ouput of `pcs status`
+
+Expected to find a line in the format:
+
+Online: ....
+
+but now it's
+
+  * Online: ...
+
+And the `grep -E "^Online:" no longer finds the list of nodes that
+are online.
+
+    https://review.gluster.org/#/c/glusterfs/+/24333/
+
+Change-Id: If2aa1e7b53c766c625d7b4cc222a83ea2c0bd72d
+BUG: 1823706
+Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/197367
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ extras/ganesha/scripts/ganesha-ha.sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
+index 0b0050a..df333a1 100644
+--- a/extras/ganesha/scripts/ganesha-ha.sh
++++ b/extras/ganesha/scripts/ganesha-ha.sh
+@@ -935,7 +935,7 @@ status()
+     done
+ 
+     # print the nodes that are expected to be online
+-    grep -E "^Online:" ${scratch}
++    grep -E "Online:" ${scratch}
+ 
+     echo
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0364-dht-fixing-rebalance-failures-for-files-with-holes.patch b/SOURCES/0364-dht-fixing-rebalance-failures-for-files-with-holes.patch
new file mode 100644
index 0000000..2c6ba98
--- /dev/null
+++ b/SOURCES/0364-dht-fixing-rebalance-failures-for-files-with-holes.patch
@@ -0,0 +1,97 @@
+From 5b1bfebacac649e6f5051316e4075309caf93901 Mon Sep 17 00:00:00 2001
+From: Barak Sason Rofman <bsasonro@redhat.com>
+Date: Tue, 21 Apr 2020 19:13:41 +0300
+Subject: [PATCH 364/367] dht - fixing rebalance failures for files with holes
+
+Rebalance process handling of files which contains holes casued
+rebalance to fail with "No space left on device" errors.
+This patch modifies the code-flow in such a way that files with holes
+will be rebalanced correctly.
+
+backport of https://review.gluster.org/#/c/glusterfs/+/24357/
+>fixes: #1187
+>Change-Id: I89bc3d4ea7f074db7213d759c49307f379543932
+>Signed-off-by: Barak Sason Rofman <bsasonro@redhat.com>
+
+BUG: 1823703
+Change-Id: I89bc3d4ea7f074db7213d759c49307f379543932
+Signed-off-by: Barak Sason Rofman <bsasonro@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/198579
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/cluster/dht/src/dht-rebalance.c | 21 ++++++++++-----------
+ 1 file changed, 10 insertions(+), 11 deletions(-)
+
+diff --git a/xlators/cluster/dht/src/dht-rebalance.c b/xlators/cluster/dht/src/dht-rebalance.c
+index f4c62b8..7d9df02 100644
+--- a/xlators/cluster/dht/src/dht-rebalance.c
++++ b/xlators/cluster/dht/src/dht-rebalance.c
+@@ -650,7 +650,7 @@ out:
+ static int
+ __dht_rebalance_create_dst_file(xlator_t *this, xlator_t *to, xlator_t *from,
+                                 loc_t *loc, struct iatt *stbuf, fd_t **dst_fd,
+-                                int *fop_errno)
++                                int *fop_errno, int file_has_holes)
+ {
+     int ret = -1;
+     int ret2 = -1;
+@@ -819,7 +819,7 @@ __dht_rebalance_create_dst_file(xlator_t *this, xlator_t *to, xlator_t *from,
+ 
+     /* No need to bother about 0 byte size files */
+     if (stbuf->ia_size > 0) {
+-        if (conf->use_fallocate) {
++        if (conf->use_fallocate && !file_has_holes) {
+             ret = syncop_fallocate(to, fd, 0, 0, stbuf->ia_size, NULL, NULL);
+             if (ret < 0) {
+                 if (ret == -EOPNOTSUPP || ret == -EINVAL || ret == -ENOSYS) {
+@@ -846,9 +846,7 @@ __dht_rebalance_create_dst_file(xlator_t *this, xlator_t *to, xlator_t *from,
+                     goto out;
+                 }
+             }
+-        }
+-
+-        if (!conf->use_fallocate) {
++        } else {
+             ret = syncop_ftruncate(to, fd, stbuf->ia_size, NULL, NULL, NULL,
+                                    NULL);
+             if (ret < 0) {
+@@ -1728,9 +1726,13 @@ dht_migrate_file(xlator_t *this, loc_t *loc, xlator_t *from, xlator_t *to,
+         goto out;
+     }
+ 
++    /* Try to preserve 'holes' while migrating data */
++    if (stbuf.ia_size > (stbuf.ia_blocks * GF_DISK_SECTOR_SIZE))
++        file_has_holes = 1;
++
+     /* create the destination, with required modes/xattr */
+     ret = __dht_rebalance_create_dst_file(this, to, from, loc, &stbuf, &dst_fd,
+-                                          fop_errno);
++                                          fop_errno, file_has_holes);
+     if (ret) {
+         gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+                "Create dst failed"
+@@ -1774,8 +1776,8 @@ dht_migrate_file(xlator_t *this, loc_t *loc, xlator_t *from, xlator_t *to,
+          * destination. We need to do update this only post migration
+          * as in case of failure the linkto needs to point to the source
+          * subvol */
+-        ret = __dht_rebalance_create_dst_file(this, to, from, loc, &stbuf,
+-                                              &dst_fd, fop_errno);
++        ret = __dht_rebalance_create_dst_file(
++            this, to, from, loc, &stbuf, &dst_fd, fop_errno, file_has_holes);
+         if (ret) {
+             gf_log(this->name, GF_LOG_ERROR,
+                    "Create dst failed"
+@@ -1862,9 +1864,6 @@ dht_migrate_file(xlator_t *this, loc_t *loc, xlator_t *from, xlator_t *to,
+             ret = 0;
+         goto out;
+     }
+-    /* Try to preserve 'holes' while migrating data */
+-    if (stbuf.ia_size > (stbuf.ia_blocks * GF_DISK_SECTOR_SIZE))
+-        file_has_holes = 1;
+ 
+     ret = __dht_rebalance_migrate_data(this, defrag, from, to, src_fd, dst_fd,
+                                        stbuf.ia_size, file_has_holes,
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0365-build-geo-rep-requires-relevant-selinux-permission-f.patch b/SOURCES/0365-build-geo-rep-requires-relevant-selinux-permission-f.patch
new file mode 100644
index 0000000..daf8dc6
--- /dev/null
+++ b/SOURCES/0365-build-geo-rep-requires-relevant-selinux-permission-f.patch
@@ -0,0 +1,70 @@
+From 36180d21dc4b16619b75d65d51eaf37df4e0e2d3 Mon Sep 17 00:00:00 2001
+From: Sunny Kumar <sunkumar@redhat.com>
+Date: Mon, 20 Apr 2020 12:15:42 +0100
+Subject: [PATCH 365/367] build: geo-rep requires relevant selinux permission
+ for rsync
+
+If selinux is set in enforcing mode geo-rep goes into faulty state.
+
+To avoid this from happening some relevant selinux booleans need to be set
+in 'on' state to allow rsync operation.
+
+Backport of:
+   >Upstream Patch: https://review.gluster.org/#/c/glusterfs/+/24348.
+   >Change-Id: Ia8ce530d6548c2a545f4c99c600f5aac2bbb3363
+   >Fixes: #1182
+   >Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
+
+BUG: 1813917
+Change-Id: Ia8ce530d6548c2a545f4c99c600f5aac2bbb3363
+Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/198599
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ glusterfs.spec.in | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/glusterfs.spec.in b/glusterfs.spec.in
+index 7c8a751..5ed07e7 100644
+--- a/glusterfs.spec.in
++++ b/glusterfs.spec.in
+@@ -130,6 +130,12 @@
+ ## All %%global definitions should be placed here and keep them sorted
+ ##
+ 
++# selinux booleans whose defalut value needs modification
++# these booleans will be consumed by "%%selinux_set_booleans" macro.
++%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
++%global selinuxbooleans rsync_full_access=1 rsync_client=1
++%endif
++
+ %if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
+ %global _with_systemd true
+ %endif
+@@ -515,6 +521,12 @@ Requires:         python%{_pythonver}-gluster = %{version}-%{release}
+ Requires:         rsync
+ Requires:         util-linux
+ Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
++# required for setting selinux bools
++%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
++Requires:            selinux-policy-targeted
++Requires(post):      selinux-policy-targeted
++BuildRequires:       selinux-policy-devel
++%endif
+ 
+ %description geo-replication
+ GlusterFS is a distributed file-system capable of scaling to several
+@@ -941,6 +953,9 @@ exit 0
+ 
+ %if ( 0%{!?_without_georeplication:1} )
+ %post geo-replication
++%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
++%selinux_set_booleans %{selinuxbooleans}
++%endif
+ if [ $1 -ge 1 ]; then
+     %systemd_postun_with_restart glusterd
+ fi
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0366-snapshot-fix-python3-issue-in-gcron.patch b/SOURCES/0366-snapshot-fix-python3-issue-in-gcron.patch
new file mode 100644
index 0000000..c704a17
--- /dev/null
+++ b/SOURCES/0366-snapshot-fix-python3-issue-in-gcron.patch
@@ -0,0 +1,55 @@
+From d7b84014cbb19e65dfae6248af47cc23fabc64e5 Mon Sep 17 00:00:00 2001
+From: Sunny Kumar <sunkumar@redhat.com>
+Date: Wed, 22 Apr 2020 15:09:16 +0100
+Subject: [PATCH 366/367] snapshot: fix python3 issue in gcron
+
+`$gcron.py test_vol Job`
+Traceback:
+  File "/usr/sbin/gcron.py", line 189, in <module>
+    main()
+  File "/usr/sbin/gcron.py", line 121, in main
+    initLogger(script_name)
+  File "/usr/sbin/gcron.py", line 44, in initLogger
+    logfile = os.path.join(out.strip(), script_name[:-3]+".log")
+  File "/usr/lib64/python3.6/posixpath.py", line 94, in join
+    genericpath._check_arg_types('join', a, *p)
+  File "/usr/lib64/python3.6/genericpath.py", line 151, in _check_arg_types
+    raise TypeError("Can't mix strings and bytes in path components") from None
+TypeError: Can't mix strings and bytes in path components
+
+Solution: Added the 'universal_newlines' flag to Popen.
+
+Backport of:
+
+   >Upstream Patch: https://review.gluster.org/#/c/glusterfs/+/24364/
+   >Change-Id: I4c7a0e5bce605e4c134f6786c9dd8162b89fc77f
+   >Fixes: #1193
+   >Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
+
+BUG: 1825195
+Change-Id: I4c7a0e5bce605e4c134f6786c9dd8162b89fc77f
+Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/198641
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ extras/snap_scheduler/gcron.py | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/extras/snap_scheduler/gcron.py b/extras/snap_scheduler/gcron.py
+index 1127be0..cc16310 100755
+--- a/extras/snap_scheduler/gcron.py
++++ b/extras/snap_scheduler/gcron.py
+@@ -38,7 +38,8 @@ def initLogger(script_name):
+     sh.setFormatter(formatter)
+ 
+     process = subprocess.Popen(["gluster", "--print-logdir"],
+-                               stdout=subprocess.PIPE)
++                               stdout=subprocess.PIPE,
++                               universal_newlines=True)
+     out, err = process.communicate()
+     if process.returncode == 0:
+         logfile = os.path.join(out.strip(), script_name[:-3]+".log")
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0367-dht-Handle-setxattr-and-rm-race-for-directory-in-reb.patch b/SOURCES/0367-dht-Handle-setxattr-and-rm-race-for-directory-in-reb.patch
new file mode 100644
index 0000000..b94f8fc
--- /dev/null
+++ b/SOURCES/0367-dht-Handle-setxattr-and-rm-race-for-directory-in-reb.patch
@@ -0,0 +1,95 @@
+From aef8e51b9974603d397cc8f5301b24451d012e46 Mon Sep 17 00:00:00 2001
+From: Susant Palai <spalai@redhat.com>
+Date: Fri, 24 Apr 2020 13:32:51 +0530
+Subject: [PATCH 367/367] dht: Handle setxattr and rm race for directory in
+ rebalance
+
+Problem: Selfheal as part of directory does not return an error if
+the layout setxattr fails. This is because the actual lookup fop
+must have been successful to proceed for layout heal. Hence, we could
+not tell if fix-layout failed in rebalance.
+
+Solution: We can check this information in the layout structure that
+whether all the xlators have returned error.
+
+> fixes: #1200
+> hange-Id: I3e5f2a36c0d934c21476a73a9a5473d8e490cde7
+> Signed-off-by: Susant Palai <spalai@redhat.com>
+(backport of https://review.gluster.org/#/c/glusterfs/+/24375/)
+
+BUG: 1812789
+Change-Id: I897826c4c2e883b3085c9314deff32d649b4588e
+Signed-off-by: Susant Palai <spalai@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/198726
+Reviewed-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/cluster/dht/src/dht-common.c    | 19 +++++++++++++++++++
+ xlators/cluster/dht/src/dht-common.h    |  3 +++
+ xlators/cluster/dht/src/dht-rebalance.c | 11 +++++++++++
+ 3 files changed, 33 insertions(+)
+
+diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c
+index d0b5287..7890e7a 100644
+--- a/xlators/cluster/dht/src/dht-common.c
++++ b/xlators/cluster/dht/src/dht-common.c
+@@ -11286,3 +11286,22 @@ dht_pt_fgetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *key,
+                FIRST_CHILD(this)->fops->fgetxattr, fd, key, xdata);
+     return 0;
+ }
++
++/* The job of this function is to check if all the xlators have updated
++ * error in the layout. */
++int
++dht_dir_layout_error_check(xlator_t *this, inode_t *inode)
++{
++    dht_layout_t *layout = NULL;
++    int i = 0;
++
++    layout = dht_layout_get(this, inode);
++    for (i = 0; i < layout->cnt; i++) {
++        if (layout->list[i].err == 0) {
++            return 0;
++        }
++    }
++
++    /* Returning the first xlator error as all xlators have errors */
++    return layout->list[0].err;
++}
+diff --git a/xlators/cluster/dht/src/dht-common.h b/xlators/cluster/dht/src/dht-common.h
+index ce11f02..4d2aae6 100644
+--- a/xlators/cluster/dht/src/dht-common.h
++++ b/xlators/cluster/dht/src/dht-common.h
+@@ -1544,4 +1544,7 @@ dht_pt_rename(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+ int32_t
+ dht_check_remote_fd_failed_error(dht_local_t *local, int op_ret, int op_errno);
+ 
++int
++dht_dir_layout_error_check(xlator_t *this, inode_t *inode);
++
+ #endif /* _DHT_H */
+diff --git a/xlators/cluster/dht/src/dht-rebalance.c b/xlators/cluster/dht/src/dht-rebalance.c
+index 7d9df02..33cacfe 100644
+--- a/xlators/cluster/dht/src/dht-rebalance.c
++++ b/xlators/cluster/dht/src/dht-rebalance.c
+@@ -3928,6 +3928,17 @@ gf_defrag_fix_layout(xlator_t *this, gf_defrag_info_t *defrag, loc_t *loc,
+     }
+ 
+     ret = syncop_setxattr(this, loc, fix_layout, 0, NULL, NULL);
++
++    /* In case of a race where the directory is deleted just before
++     * layout setxattr, the errors are updated in the layout structure.
++     * We can use this information to make a decision whether the directory
++     * is deleted entirely.
++     */
++    if (ret == 0) {
++        ret = dht_dir_layout_error_check(this, loc->inode);
++        ret = -ret;
++    }
++
+     if (ret) {
+         if (-ret == ENOENT || -ret == ESTALE) {
+             gf_msg(this->name, GF_LOG_INFO, -ret, DHT_MSG_LAYOUT_FIX_FAILED,
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0368-Update-rfc.sh-to-rhgs-3.5.2.patch b/SOURCES/0368-Update-rfc.sh-to-rhgs-3.5.2.patch
new file mode 100644
index 0000000..c103891
--- /dev/null
+++ b/SOURCES/0368-Update-rfc.sh-to-rhgs-3.5.2.patch
@@ -0,0 +1,26 @@
+From 00b79c4e2837980f36f7d8387d90cfb7dc8d0d58 Mon Sep 17 00:00:00 2001
+From: Rinku Kothiya <rkothiya@redhat.com>
+Date: Tue, 5 May 2020 12:41:41 -0400
+Subject: [PATCH 368/375] Update rfc.sh to rhgs-3.5.2
+
+Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
+---
+ rfc.sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/rfc.sh b/rfc.sh
+index a408e45..37d551f 100755
+--- a/rfc.sh
++++ b/rfc.sh
+@@ -18,7 +18,7 @@ done
+ shift $((OPTIND-1))
+ 
+ 
+-branch="rhgs-3.5.1-rhel-8";
++branch="rhgs-3.5.2";
+ 
+ set_hooks_commit_msg()
+ {
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0369-cluster-ec-Return-correct-error-code-and-log-message.patch b/SOURCES/0369-cluster-ec-Return-correct-error-code-and-log-message.patch
new file mode 100644
index 0000000..c3c8925
--- /dev/null
+++ b/SOURCES/0369-cluster-ec-Return-correct-error-code-and-log-message.patch
@@ -0,0 +1,53 @@
+From f30fa3938f980f03d08479776037090e7fc11f42 Mon Sep 17 00:00:00 2001
+From: Ashish Pandey <aspandey@redhat.com>
+Date: Tue, 5 May 2020 18:17:49 +0530
+Subject: [PATCH 369/375] cluster/ec: Return correct error code and log message
+
+In case of readdir was send with an FD on which opendir
+was failed, this FD will be useless and we return it with error.
+For now, we are returning it with EINVAL without logging any
+message in log file.
+
+Return a correct error code and also log the message to improve thing to debug.
+
+>fixes: #1220
+>Change-Id: Iaf035254b9c5aa52fa43ace72d328be622b06169
+>Signed-off-by: Ashish Pandey <aspandey@redhat.com>
+(Backport of https://review.gluster.org/#/c/glusterfs/+/24407/)
+
+BUG: 1831403
+Change-Id: Ib5bf30c47b7491abd0ad5ca0ce52ec77945b2e53
+Signed-off-by: Ashish Pandey <aspandey@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/200209
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/cluster/ec/src/ec-dir-read.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/xlators/cluster/ec/src/ec-dir-read.c b/xlators/cluster/ec/src/ec-dir-read.c
+index 8310d4a..9924425 100644
+--- a/xlators/cluster/ec/src/ec-dir-read.c
++++ b/xlators/cluster/ec/src/ec-dir-read.c
+@@ -388,9 +388,16 @@ ec_manager_readdir(ec_fop_data_t *fop, int32_t state)
+             /* Return error if opendir has not been successfully called on
+              * any subvolume. */
+             ctx = ec_fd_get(fop->fd, fop->xl);
+-            if ((ctx == NULL) || (ctx->open == 0)) {
+-                fop->error = EINVAL;
++            if (ctx == NULL) {
++                fop->error = ENOMEM;
++            } else if (ctx->open == 0) {
++                fop->error = EBADFD;
++            }
+ 
++            if (fop->error) {
++                gf_msg(fop->xl->name, GF_LOG_ERROR, fop->error,
++                       EC_MSG_INVALID_REQUEST, "EC is not winding readdir: %s",
++                       ec_msg_str(fop));
+                 return EC_STATE_REPORT;
+             }
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0370-dht-Do-opendir-selectively-in-gf_defrag_process_dir.patch b/SOURCES/0370-dht-Do-opendir-selectively-in-gf_defrag_process_dir.patch
new file mode 100644
index 0000000..6648a4e
--- /dev/null
+++ b/SOURCES/0370-dht-Do-opendir-selectively-in-gf_defrag_process_dir.patch
@@ -0,0 +1,203 @@
+From 3d230880aed85737365deafe3c9a32c67da2a79e Mon Sep 17 00:00:00 2001
+From: Susant Palai <spalai@redhat.com>
+Date: Mon, 4 May 2020 19:09:00 +0530
+Subject: [PATCH 370/375] dht: Do opendir selectively in gf_defrag_process_dir
+
+Currently opendir is done from the cluster view. Hence, even if
+one opendir is successful, the opendir operation as a whole is considered
+successful.
+
+But since in gf_defrag_get_entry we fetch entries selectively from
+local_subvols, we need to opendir individually on those local subvols
+and keep track of fds separately. Otherwise it is possible that opendir
+failed on one of the subvol and we wind readdirp call on the fd to the
+corresponding subvol, which will ultimately result in EINVAL error.
+
+> fixes: #1218
+> Change-Id: I50dd88b9597852a15579f4ee325918979417f570
+> Signed-off-by: Susant Palai <spalai@redhat.com>
+(Backport of https://review.gluster.org/#/c/glusterfs/+/24404/)
+
+BUG: 1831403
+Change-Id: I96e19fdd630279c3ef44f361c1d1fc5c1c429821
+Signed-off-by: Susant Palai <spalai@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/200306
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/cluster/dht/src/dht-common.h    |  2 +
+ xlators/cluster/dht/src/dht-rebalance.c | 74 +++++++++++++++++++++++----------
+ 2 files changed, 54 insertions(+), 22 deletions(-)
+
+diff --git a/xlators/cluster/dht/src/dht-common.h b/xlators/cluster/dht/src/dht-common.h
+index 4d2aae6..8e65111 100644
+--- a/xlators/cluster/dht/src/dht-common.h
++++ b/xlators/cluster/dht/src/dht-common.h
+@@ -742,6 +742,8 @@ struct dir_dfmeta {
+     struct list_head **head;
+     struct list_head **iterator;
+     int *fetch_entries;
++    /* fds corresponding to local subvols only */
++    fd_t **lfd;
+ };
+ 
+ typedef struct dht_migrate_info {
+diff --git a/xlators/cluster/dht/src/dht-rebalance.c b/xlators/cluster/dht/src/dht-rebalance.c
+index 33cacfe..c692119 100644
+--- a/xlators/cluster/dht/src/dht-rebalance.c
++++ b/xlators/cluster/dht/src/dht-rebalance.c
+@@ -48,6 +48,8 @@ gf_defrag_free_dir_dfmeta(struct dir_dfmeta *meta, int local_subvols_cnt)
+     if (meta) {
+         for (i = 0; i < local_subvols_cnt; i++) {
+             gf_dirent_free(&meta->equeue[i]);
++            if (meta->lfd && meta->lfd[i])
++                fd_unref(meta->lfd[i]);
+         }
+ 
+         GF_FREE(meta->equeue);
+@@ -55,6 +57,7 @@ gf_defrag_free_dir_dfmeta(struct dir_dfmeta *meta, int local_subvols_cnt)
+         GF_FREE(meta->iterator);
+         GF_FREE(meta->offset_var);
+         GF_FREE(meta->fetch_entries);
++        GF_FREE(meta->lfd);
+         GF_FREE(meta);
+     }
+ }
+@@ -3095,7 +3098,7 @@ int static gf_defrag_get_entry(xlator_t *this, int i,
+                                struct dir_dfmeta *dir_dfmeta, dict_t *xattr_req,
+                                int *should_commit_hash, int *perrno)
+ {
+-    int ret = -1;
++    int ret = 0;
+     char is_linkfile = 0;
+     gf_dirent_t *df_entry = NULL;
+     struct dht_container *tmp_container = NULL;
+@@ -3111,6 +3114,13 @@ int static gf_defrag_get_entry(xlator_t *this, int i,
+     }
+ 
+     if (dir_dfmeta->fetch_entries[i] == 1) {
++        if (!fd) {
++            dir_dfmeta->fetch_entries[i] = 0;
++            dir_dfmeta->offset_var[i].readdir_done = 1;
++            ret = 0;
++            goto out;
++        }
++
+         ret = syncop_readdirp(conf->local_subvols[i], fd, 131072,
+                               dir_dfmeta->offset_var[i].offset,
+                               &(dir_dfmeta->equeue[i]), xattr_req, NULL);
+@@ -3270,7 +3280,6 @@ gf_defrag_process_dir(xlator_t *this, gf_defrag_info_t *defrag, loc_t *loc,
+                       dict_t *migrate_data, int *perrno)
+ {
+     int ret = -1;
+-    fd_t *fd = NULL;
+     dht_conf_t *conf = NULL;
+     gf_dirent_t entries;
+     dict_t *xattr_req = NULL;
+@@ -3304,28 +3313,49 @@ gf_defrag_process_dir(xlator_t *this, gf_defrag_info_t *defrag, loc_t *loc,
+         goto out;
+     }
+ 
+-    fd = fd_create(loc->inode, defrag->pid);
+-    if (!fd) {
+-        gf_log(this->name, GF_LOG_ERROR, "Failed to create fd");
++    dir_dfmeta = GF_CALLOC(1, sizeof(*dir_dfmeta), gf_common_mt_pointer);
++    if (!dir_dfmeta) {
++        gf_log(this->name, GF_LOG_ERROR, "dir_dfmeta is NULL");
+         ret = -1;
+         goto out;
+     }
+ 
+-    ret = syncop_opendir(this, loc, fd, NULL, NULL);
+-    if (ret) {
+-        gf_msg(this->name, GF_LOG_WARNING, -ret, DHT_MSG_MIGRATE_DATA_FAILED,
+-               "Migrate data failed: Failed to open dir %s", loc->path);
+-        *perrno = -ret;
++    dir_dfmeta->lfd = GF_CALLOC(local_subvols_cnt, sizeof(fd_t *),
++                                gf_common_mt_pointer);
++    if (!dir_dfmeta->lfd) {
++        gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0,
++               "could not allocate memory for dir_dfmeta");
+         ret = -1;
++        *perrno = ENOMEM;
+         goto out;
+     }
+ 
+-    fd_bind(fd);
+-    dir_dfmeta = GF_CALLOC(1, sizeof(*dir_dfmeta), gf_common_mt_pointer);
+-    if (!dir_dfmeta) {
+-        gf_log(this->name, GF_LOG_ERROR, "dir_dfmeta is NULL");
+-        ret = -1;
+-        goto out;
++    for (i = 0; i < local_subvols_cnt; i++) {
++        dir_dfmeta->lfd[i] = fd_create(loc->inode, defrag->pid);
++        if (!dir_dfmeta->lfd[i]) {
++            gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0, "failed to create fd");
++            *perrno = ENOMEM;
++            ret = -1;
++            goto out;
++        }
++
++        ret = syncop_opendir(conf->local_subvols[i], loc, dir_dfmeta->lfd[i],
++                             NULL, NULL);
++        if (ret) {
++            fd_unref(dir_dfmeta->lfd[i]);
++            dir_dfmeta->lfd[i] = NULL;
++            gf_smsg(this->name, GF_LOG_WARNING, 0, 0,
++                    "failed to open dir: %s subvol: %s", loc->path,
++                    conf->local_subvols[i]->name);
++
++            if (conf->decommission_in_progress) {
++                *perrno = -ret;
++                ret = -1;
++                goto out;
++            }
++        } else {
++            fd_bind(dir_dfmeta->lfd[i]);
++        }
+     }
+ 
+     dir_dfmeta->head = GF_CALLOC(local_subvols_cnt, sizeof(*(dir_dfmeta->head)),
+@@ -3360,6 +3390,7 @@ gf_defrag_process_dir(xlator_t *this, gf_defrag_info_t *defrag, loc_t *loc,
+         ret = -1;
+         goto out;
+     }
++
+     ret = gf_defrag_ctx_subvols_init(dir_dfmeta->offset_var, this);
+     if (ret) {
+         gf_log(this->name, GF_LOG_ERROR,
+@@ -3372,7 +3403,8 @@ gf_defrag_process_dir(xlator_t *this, gf_defrag_info_t *defrag, loc_t *loc,
+     dir_dfmeta->fetch_entries = GF_CALLOC(local_subvols_cnt, sizeof(int),
+                                           gf_common_mt_int);
+     if (!dir_dfmeta->fetch_entries) {
+-        gf_log(this->name, GF_LOG_ERROR, "dir_dfmeta->fetch_entries is NULL");
++        gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0,
++               "could not allocate memory for dir_dfmeta->fetch_entries");
+         ret = -1;
+         goto out;
+     }
+@@ -3442,8 +3474,9 @@ gf_defrag_process_dir(xlator_t *this, gf_defrag_info_t *defrag, loc_t *loc,
+             ldfq_count <= MAX_MIGRATE_QUEUE_COUNT &&
+             !dht_dfreaddirp_done(dir_dfmeta->offset_var, local_subvols_cnt)) {
+             ret = gf_defrag_get_entry(this, dfc_index, &container, loc, conf,
+-                                      defrag, fd, migrate_data, dir_dfmeta,
+-                                      xattr_req, &should_commit_hash, perrno);
++                                      defrag, dir_dfmeta->lfd[dfc_index],
++                                      migrate_data, dir_dfmeta, xattr_req,
++                                      &should_commit_hash, perrno);
+ 
+             if (ret) {
+                 gf_log(this->name, GF_LOG_WARNING,
+@@ -3497,9 +3530,6 @@ out:
+     if (xattr_req)
+         dict_unref(xattr_req);
+ 
+-    if (fd)
+-        fd_unref(fd);
+-
+     if (ret == 0 && should_commit_hash == 0) {
+         ret = 2;
+     }
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0371-common-ha-cluster-status-shows-FAILOVER-when-actuall.patch b/SOURCES/0371-common-ha-cluster-status-shows-FAILOVER-when-actuall.patch
new file mode 100644
index 0000000..a395da3
--- /dev/null
+++ b/SOURCES/0371-common-ha-cluster-status-shows-FAILOVER-when-actuall.patch
@@ -0,0 +1,53 @@
+From 05bd0226716516d37ead173c7d6924225bd474db Mon Sep 17 00:00:00 2001
+From: "Kaleb S. KEITHLEY" <kkeithle@redhat.com>
+Date: Wed, 6 May 2020 07:24:38 -0400
+Subject: [PATCH 371/375] common-ha: cluster status shows "FAILOVER" when
+ actually HEALTHY
+
+pacemaker devs change the format of the ouput of `pcs status`
+
+Expected to find a line in the format:
+
+    Online: ....
+
+but now it's
+
+    * Online: ...
+
+And the `grep -E "^Online:" no longer finds the list of nodes that
+are online.
+
+Also other lines now have '*' in first few characters of the line
+throwing off `grep -x ...`
+
+https://review.gluster.org/#/c/glusterfs/+/24403/
+
+Change-Id: Ia04a89e76914f2a455a755f0a93fa415f60aefd0
+BUG: 1823706
+Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/199442
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ extras/ganesha/scripts/ganesha-ha.sh | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
+index df333a1..4ecf91b 100644
+--- a/extras/ganesha/scripts/ganesha-ha.sh
++++ b/extras/ganesha/scripts/ganesha-ha.sh
+@@ -919,8 +919,9 @@ status()
+     local index=1
+     local nodes
+ 
+-    # change tabs to spaces, strip leading spaces
+-    pcs status | sed -e "s/\t/ /g" -e "s/^[ ]*//" > ${scratch}
++    # change tabs to spaces, strip leading spaces, including any 
++    # new '*' at the beginning of a line introduced in pcs-0.10.x
++    pcs status | sed -e "s/\t/ /g" -e "s/^[ ]*\*//" -e "s/^[ ]*//" > ${scratch}
+ 
+     nodes[0]=${1}; shift
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0372-posix-fix-seek-functionality.patch b/SOURCES/0372-posix-fix-seek-functionality.patch
new file mode 100644
index 0000000..7c286c2
--- /dev/null
+++ b/SOURCES/0372-posix-fix-seek-functionality.patch
@@ -0,0 +1,49 @@
+From 955fea10809861aa9b3da85d386c2cc92b319cdb Mon Sep 17 00:00:00 2001
+From: Barak Sason Rofman <bsasonro@redhat.com>
+Date: Thu, 7 May 2020 18:57:37 +0300
+Subject: [PATCH 372/375] posix - fix seek functionality
+
+A wrong pointer check causes the offset returned by seek to be always
+wrong
+
+backport of https://review.gluster.org/#/c/glusterfs/+/24412/
+>fixes: #1228
+>Change-Id: Iac4c6a163175617ac4f14544fc6b7c6fb4041cd6
+>Signed-off-by: Barak Sason Rofman <bsasonro@redhat.com>
+
+BUG: 1833017
+Change-Id: Iac4c6a163175617ac4f14544fc6b7c6fb4041cd6
+Signed-off-by: Barak Sason Rofman <bsasonro@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/199761
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ libglusterfs/src/syncop.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/libglusterfs/src/syncop.c b/libglusterfs/src/syncop.c
+index 0de53c6..693970f 100644
+--- a/libglusterfs/src/syncop.c
++++ b/libglusterfs/src/syncop.c
+@@ -2881,12 +2881,13 @@ syncop_seek(xlator_t *subvol, fd_t *fd, off_t offset, gf_seek_what_t what,
+     SYNCOP(subvol, (&args), syncop_seek_cbk, subvol->fops->seek, fd, offset,
+            what, xdata_in);
+ 
+-    if (*off)
+-        *off = args.offset;
+-
+-    if (args.op_ret == -1)
++    if (args.op_ret < 0) {
+         return -args.op_errno;
+-    return args.op_ret;
++    } else {
++        if (off)
++            *off = args.offset;
++        return args.op_ret;
++    }
+ }
+ 
+ int
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0373-build-geo-rep-sub-pkg-requires-policycoreutils-pytho.patch b/SOURCES/0373-build-geo-rep-sub-pkg-requires-policycoreutils-pytho.patch
new file mode 100644
index 0000000..7abaf0e
--- /dev/null
+++ b/SOURCES/0373-build-geo-rep-sub-pkg-requires-policycoreutils-pytho.patch
@@ -0,0 +1,51 @@
+From bbf43008e6d21d649536547f500662b940562c3e Mon Sep 17 00:00:00 2001
+From: Sunny Kumar <sunkumar@redhat.com>
+Date: Mon, 11 May 2020 10:02:08 +0100
+Subject: [PATCH 373/375] build: geo-rep sub-pkg requires
+ policycoreutils-python-utils on rhel8
+
+glusterfs-geo-replication sub-package requires policycoreutils-python-utils
+on rhel8 to set relevant selinux boolean to allow rsync.
+
+Backport of:
+    >Upstream Patch: https://review.gluster.org/#/c/glusterfs/+/24433/
+    >Change-Id: Ia0fdcfdd8c7d18cd194e011f6b365bf5cb70a20a
+    >Fixes: #1236
+    >Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
+
+BUG: 1825177
+Change-Id: Ia0fdcfdd8c7d18cd194e011f6b365bf5cb70a20a
+Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/200242
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ glusterfs.spec.in | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/glusterfs.spec.in b/glusterfs.spec.in
+index 5ed07e7..9def416 100644
+--- a/glusterfs.spec.in
++++ b/glusterfs.spec.in
+@@ -523,6 +523,8 @@ Requires:         util-linux
+ Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
+ # required for setting selinux bools
+ %if ( 0%{?rhel} && 0%{?rhel} >= 8 )
++Requires(post):      policycoreutils-python-utils
++Requires(postun):    policycoreutils-python-utils
+ Requires:            selinux-policy-targeted
+ Requires(post):      selinux-policy-targeted
+ BuildRequires:       selinux-policy-devel
+@@ -1978,6 +1980,10 @@ fi
+ %endif
+ 
+ %changelog
++
++* Mon May 11 2020 Sunny Kumar <sunkumar@redhat.com>
++- added requires policycoreutils-python-utils on rhel8 for geo-replication
++
+ * Tue Aug 27 2019 Hari Gowtham <hgowtham@redhat.com>
+ - Added scripts to collect machine stats and component stats (#1719171)
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0374-open-behind-fix-missing-fd-reference.patch b/SOURCES/0374-open-behind-fix-missing-fd-reference.patch
new file mode 100644
index 0000000..94a1fb9
--- /dev/null
+++ b/SOURCES/0374-open-behind-fix-missing-fd-reference.patch
@@ -0,0 +1,121 @@
+From 30cbdf8c06145a0c290da42ecc0a7eae928200b7 Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Sun, 8 Mar 2020 18:36:45 +0100
+Subject: [PATCH 374/375] open-behind: fix missing fd reference
+
+Open behind was not keeping any reference on fd's pending to be
+opened. This makes it possible that a concurrent close and en entry
+fop (unlink, rename, ...) caused destruction of the fd while it
+was still being used.
+
+Upstream patch:
+> Upstream patch link: https://review.gluster.org/c/glusterfs/+/24204
+> Change-Id: Ie9e992902cf2cd7be4af1f8b4e57af9bd6afd8e9
+> Fixes: bz#1810934
+> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+
+Change-Id: Ie9e992902cf2cd7be4af1f8b4e57af9bd6afd8e9
+BUG: 1830713
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/199714
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/performance/open-behind/src/open-behind.c | 27 ++++++++++++++---------
+ 1 file changed, 16 insertions(+), 11 deletions(-)
+
+diff --git a/xlators/performance/open-behind/src/open-behind.c b/xlators/performance/open-behind/src/open-behind.c
+index 268c717..14ebc12 100644
+--- a/xlators/performance/open-behind/src/open-behind.c
++++ b/xlators/performance/open-behind/src/open-behind.c
+@@ -206,8 +206,13 @@ ob_fd_free(ob_fd_t *ob_fd)
+     if (ob_fd->xdata)
+         dict_unref(ob_fd->xdata);
+ 
+-    if (ob_fd->open_frame)
++    if (ob_fd->open_frame) {
++        /* If we sill have a frame it means that background open has never
++         * been triggered. We need to release the pending reference. */
++        fd_unref(ob_fd->fd);
++
+         STACK_DESTROY(ob_fd->open_frame->root);
++    }
+ 
+     GF_FREE(ob_fd);
+ }
+@@ -297,6 +302,7 @@ ob_wake_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+             call_resume(stub);
+     }
+ 
++    /* The background open is completed. We can release the 'fd' reference. */
+     fd_unref(fd);
+ 
+     STACK_DESTROY(frame->root);
+@@ -331,7 +337,9 @@ ob_fd_wake(xlator_t *this, fd_t *fd, ob_fd_t *ob_fd)
+     }
+ 
+     if (frame) {
+-        frame->local = fd_ref(fd);
++        /* We don't need to take a reference here. We already have a reference
++         * while the open is pending. */
++        frame->local = fd;
+ 
+         STACK_WIND(frame, ob_wake_cbk, FIRST_CHILD(this),
+                    FIRST_CHILD(this)->fops->open, &ob_fd->loc, ob_fd->flags, fd,
+@@ -345,15 +353,12 @@ void
+ ob_inode_wake(xlator_t *this, struct list_head *ob_fds)
+ {
+     ob_fd_t *ob_fd = NULL, *tmp = NULL;
+-    fd_t *fd = NULL;
+ 
+     if (!list_empty(ob_fds)) {
+         list_for_each_entry_safe(ob_fd, tmp, ob_fds, ob_fds_on_inode)
+         {
+             ob_fd_wake(this, ob_fd->fd, ob_fd);
+-            fd = ob_fd->fd;
+             ob_fd_free(ob_fd);
+-            fd_unref(fd);
+         }
+     }
+ }
+@@ -365,7 +370,7 @@ ob_fd_copy(ob_fd_t *src, ob_fd_t *dst)
+     if (!src || !dst)
+         goto out;
+ 
+-    dst->fd = __fd_ref(src->fd);
++    dst->fd = src->fd;
+     dst->loc.inode = inode_ref(src->loc.inode);
+     gf_uuid_copy(dst->loc.gfid, src->loc.gfid);
+     dst->flags = src->flags;
+@@ -509,7 +514,6 @@ ob_open_behind(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
+ 
+     ob_fd->ob_inode = ob_inode;
+ 
+-    /* don't do fd_ref, it'll cause leaks */
+     ob_fd->fd = fd;
+ 
+     ob_fd->open_frame = copy_frame(frame);
+@@ -539,15 +543,16 @@ ob_open_behind(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
+     }
+     UNLOCK(&fd->inode->lock);
+ 
+-    if (!open_in_progress && !unlinked) {
+-        fd_ref(fd);
++    /* We take a reference while the background open is pending or being
++     * processed. If we finally wind the request in the foreground, then
++     * ob_fd_free() will take care of this additional reference. */
++    fd_ref(fd);
+ 
++    if (!open_in_progress && !unlinked) {
+         STACK_UNWIND_STRICT(open, frame, 0, 0, fd, xdata);
+ 
+         if (!conf->lazy_open)
+             ob_fd_wake(this, fd, NULL);
+-
+-        fd_unref(fd);
+     } else {
+         ob_fd_free(ob_fd);
+         STACK_WIND(frame, default_open_cbk, FIRST_CHILD(this),
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0375-features-shard-Send-correct-size-when-reads-are-sent.patch b/SOURCES/0375-features-shard-Send-correct-size-when-reads-are-sent.patch
new file mode 100644
index 0000000..32f9c19
--- /dev/null
+++ b/SOURCES/0375-features-shard-Send-correct-size-when-reads-are-sent.patch
@@ -0,0 +1,75 @@
+From ac5b1b38e705bd0e4c00cc50580a71dfaa4d3b5f Mon Sep 17 00:00:00 2001
+From: Krutika Dhananjay <kdhananj@redhat.com>
+Date: Wed, 7 Aug 2019 12:12:43 +0530
+Subject: [PATCH 375/375] features/shard: Send correct size when reads are sent
+ beyond file size
+
+Backport of:
+> https://review.gluster.org/c/glusterfs/+/23175
+> Change-Id: I0cebaaf55c09eb1fb77a274268ff564e871b743b
+> fixes bz#1738419
+> Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
+
+Change-Id: I0cebaaf55c09eb1fb77a274268ff564e871b743b
+BUG: 1802013
+Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/199570
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/bugs/shard/bug-1738419.t     | 29 +++++++++++++++++++++++++++++
+ xlators/features/shard/src/shard.c |  2 ++
+ 2 files changed, 31 insertions(+)
+ create mode 100644 tests/bugs/shard/bug-1738419.t
+
+diff --git a/tests/bugs/shard/bug-1738419.t b/tests/bugs/shard/bug-1738419.t
+new file mode 100644
+index 0000000..8d0a31d
+--- /dev/null
++++ b/tests/bugs/shard/bug-1738419.t
+@@ -0,0 +1,29 @@
++#!/bin/bash
++
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++
++cleanup
++
++TEST glusterd
++TEST pidof glusterd
++TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
++TEST $CLI volume set $V0 features.shard on
++TEST $CLI volume set $V0 network.remote-dio off
++TEST $CLI volume set $V0 performance.io-cache off
++TEST $CLI volume set $V0 performance.quick-read off
++TEST $CLI volume set $V0 performance.read-ahead off
++TEST $CLI volume set $V0 performance.strict-o-direct on
++TEST $CLI volume start $V0
++
++TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
++
++TEST dd if=/dev/zero of=$M0/metadata bs=501 count=1
++
++EXPECT "501" echo $("dd" if=$M0/metadata bs=4096 count=1 of=/dev/null iflag=direct 2>&1 | awk '/bytes/ {print $1}')
++
++EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
++TEST $CLI volume stop $V0
++TEST $CLI volume delete $V0
++
++cleanup
+diff --git a/xlators/features/shard/src/shard.c b/xlators/features/shard/src/shard.c
+index b224abd..9ed597b 100644
+--- a/xlators/features/shard/src/shard.c
++++ b/xlators/features/shard/src/shard.c
+@@ -4433,6 +4433,8 @@ out:
+       if (xdata)
+         local->xattr_rsp = dict_ref(xdata);
+       vec.iov_base = local->iobuf->ptr;
++      if (local->offset + local->req_size > local->prebuf.ia_size)
++          local->total_size = local->prebuf.ia_size - local->offset;
+       vec.iov_len = local->total_size;
+       local->op_ret = local->total_size;
+       SHARD_STACK_UNWIND(readv, frame, local->op_ret, local->op_errno, &vec, 1,
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0376-features-shard-Fix-crash-during-shards-cleanup-in-er.patch b/SOURCES/0376-features-shard-Fix-crash-during-shards-cleanup-in-er.patch
new file mode 100644
index 0000000..b295fc2
--- /dev/null
+++ b/SOURCES/0376-features-shard-Fix-crash-during-shards-cleanup-in-er.patch
@@ -0,0 +1,70 @@
+From 341d75642ecc4e27bc6fecb56eb98a0ba03d8544 Mon Sep 17 00:00:00 2001
+From: Krutika Dhananjay <kdhananj@redhat.com>
+Date: Mon, 23 Mar 2020 11:47:10 +0530
+Subject: [PATCH 376/379] features/shard: Fix crash during shards cleanup in
+ error cases
+
+Backport of:
+> https://review.gluster.org/c/glusterfs/+/24244
+> Change-Id: I0b49f2b58becd0d8874b3d4b14ff8d92a89d02d5
+> Fixes: #1127
+> Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
+
+A crash is seen during a reattempt to clean up shards in background
+upon remount. And this happens even on remount (which means a remount
+is no workaround for the crash).
+
+In such a situation, the in-memory base inode object will not be
+existent (new process, non-existent base shard).
+So local->resolver_base_inode will be NULL.
+
+In the event of an error (in this case, of space running out), the
+process would crash at the time of logging the error in the following line -
+
+        gf_msg(this->name, GF_LOG_ERROR, local->op_errno, SHARD_MSG_FOP_FAILED,
+               "failed to delete shards of %s",
+               uuid_utoa(local->resolver_base_inode->gfid));
+
+Fixed that by using local->base_gfid as the source of gfid when
+local->resolver_base_inode is NULL.
+
+Change-Id: I0b49f2b58becd0d8874b3d4b14ff8d92a89d02d5
+BUG: 1836233
+Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/200689
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/features/shard/src/shard.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/xlators/features/shard/src/shard.c b/xlators/features/shard/src/shard.c
+index 9ed597b..ee38ed2 100644
+--- a/xlators/features/shard/src/shard.c
++++ b/xlators/features/shard/src/shard.c
+@@ -2729,13 +2729,20 @@ int shard_unlink_shards_do(call_frame_t *frame, xlator_t *this, inode_t *inode);
+ int shard_post_lookup_shards_unlink_handler(call_frame_t *frame,
+                                             xlator_t *this) {
+   shard_local_t *local = NULL;
++    uuid_t gfid = {
++        0,
++    };
+ 
+   local = frame->local;
+ 
++    if (local->resolver_base_inode)
++        gf_uuid_copy(gfid, local->resolver_base_inode->gfid);
++    else
++        gf_uuid_copy(gfid, local->base_gfid);
++
+   if ((local->op_ret < 0) && (local->op_errno != ENOENT)) {
+     gf_msg(this->name, GF_LOG_ERROR, local->op_errno, SHARD_MSG_FOP_FAILED,
+-           "failed to delete shards of %s",
+-           uuid_utoa(local->resolver_base_inode->gfid));
++           "failed to delete shards of %s", uuid_utoa(gfid));
+     return 0;
+   }
+   local->op_ret = 0;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0377-syncop-improve-scaling-and-implement-more-tools.patch b/SOURCES/0377-syncop-improve-scaling-and-implement-more-tools.patch
new file mode 100644
index 0000000..66cccc3
--- /dev/null
+++ b/SOURCES/0377-syncop-improve-scaling-and-implement-more-tools.patch
@@ -0,0 +1,862 @@
+From 66600fb55522d405a68d7340a5680a2633c4237e Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Thu, 30 Apr 2020 11:19:01 +0200
+Subject: [PATCH 377/379] syncop: improve scaling and implement more tools
+
+The current scaling of the syncop thread pool is not working properly
+and can leave some tasks in the run queue more time than necessary
+when the maximum number of threads is not reached.
+
+This patch provides a better scaling condition to react faster to
+pending work.
+
+Condition variables and sleep in the context of a synctask have also
+been implemented. Their purpose is to replace regular condition
+variables and sleeps that block synctask threads and prevent other
+tasks to be executed.
+
+The new features have been applied to several places in glusterd.
+
+upstream patch: https://review.gluster.org/#/c/glusterfs/+/24396/
+
+> Change-Id: Ic50b7c73c104f9e41f08101a357d30b95efccfbf
+> Fixes: #1116
+> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+
+Change-Id: Ic50b7c73c104f9e41f08101a357d30b95efccfbf
+BUG: 1810516
+Signed-off-by: Sanju Rakonde <srakonde@redhta.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/200409
+Tested-by: Sanju Rakonde <srakonde@redhat.com>
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Xavi Hernandez Juan <xhernandez@redhat.com>
+---
+ libglusterfs/src/glusterfs/syncop.h                |  52 +++-
+ libglusterfs/src/libglusterfs.sym                  |   7 +
+ libglusterfs/src/syncop.c                          | 306 ++++++++++++++++-----
+ xlators/cluster/dht/src/dht-rebalance.c            |   2 +-
+ xlators/mgmt/glusterd/src/glusterd-op-sm.c         |   9 +-
+ xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c     |   2 +-
+ .../mgmt/glusterd/src/glusterd-snapshot-utils.c    |   5 +-
+ xlators/mgmt/glusterd/src/glusterd-syncop.h        |   2 +-
+ xlators/mgmt/glusterd/src/glusterd-utils.c         |  29 +-
+ xlators/mgmt/glusterd/src/glusterd.c               |   2 +
+ xlators/mgmt/glusterd/src/glusterd.h               |   2 +
+ 11 files changed, 317 insertions(+), 101 deletions(-)
+
+diff --git a/libglusterfs/src/glusterfs/syncop.h b/libglusterfs/src/glusterfs/syncop.h
+index e0f1017..3011b4c 100644
+--- a/libglusterfs/src/glusterfs/syncop.h
++++ b/libglusterfs/src/glusterfs/syncop.h
+@@ -15,6 +15,7 @@
+ #include <sys/time.h>
+ #include <pthread.h>
+ #include <ucontext.h>
++#include "glusterfs/timer.h"
+ 
+ #define SYNCENV_PROC_MAX 16
+ #define SYNCENV_PROC_MIN 2
+@@ -32,6 +33,7 @@
+ struct synctask;
+ struct syncproc;
+ struct syncenv;
++struct synccond;
+ 
+ typedef int (*synctask_cbk_t)(int ret, call_frame_t *frame, void *opaque);
+ 
+@@ -55,9 +57,12 @@ struct synctask {
+     call_frame_t *opframe;
+     synctask_cbk_t synccbk;
+     synctask_fn_t syncfn;
+-    synctask_state_t state;
++    struct timespec *delta;
++    gf_timer_t *timer;
++    struct synccond *synccond;
+     void *opaque;
+     void *stack;
++    synctask_state_t state;
+     int woken;
+     int slept;
+     int ret;
+@@ -85,19 +90,21 @@ struct syncproc {
+ /* hosts the scheduler thread and framework for executing synctasks */
+ struct syncenv {
+     struct syncproc proc[SYNCENV_PROC_MAX];
+-    int procs;
++
++    pthread_mutex_t mutex;
++    pthread_cond_t cond;
+ 
+     struct list_head runq;
+-    int runcount;
+     struct list_head waitq;
+-    int waitcount;
++
++    int procs;
++    int procs_idle;
++
++    int runcount;
+ 
+     int procmin;
+     int procmax;
+ 
+-    pthread_mutex_t mutex;
+-    pthread_cond_t cond;
+-
+     size_t stacksize;
+ 
+     int destroy; /* FLAG to mark syncenv is in destroy mode
+@@ -123,6 +130,13 @@ struct synclock {
+ };
+ typedef struct synclock synclock_t;
+ 
++struct synccond {
++    pthread_mutex_t pmutex;
++    pthread_cond_t pcond;
++    struct list_head waitq;
++};
++typedef struct synccond synccond_t;
++
+ struct syncbarrier {
+     gf_boolean_t initialized; /*Set on successful initialization*/
+     pthread_mutex_t guard;    /* guard the remaining members, pair @cond */
+@@ -219,7 +233,7 @@ struct syncopctx {
+ #define __yield(args)                                                          \
+     do {                                                                       \
+         if (args->task) {                                                      \
+-            synctask_yield(args->task);                                        \
++            synctask_yield(args->task, NULL);                                  \
+         } else {                                                               \
+             pthread_mutex_lock(&args->mutex);                                  \
+             {                                                                  \
+@@ -307,7 +321,9 @@ synctask_join(struct synctask *task);
+ void
+ synctask_wake(struct synctask *task);
+ void
+-synctask_yield(struct synctask *task);
++synctask_yield(struct synctask *task, struct timespec *delta);
++void
++synctask_sleep(int32_t secs);
+ void
+ synctask_waitfor(struct synctask *task, int count);
+ 
+@@ -405,6 +421,24 @@ synclock_trylock(synclock_t *lock);
+ int
+ synclock_unlock(synclock_t *lock);
+ 
++int32_t
++synccond_init(synccond_t *cond);
++
++void
++synccond_destroy(synccond_t *cond);
++
++int
++synccond_wait(synccond_t *cond, synclock_t *lock);
++
++int
++synccond_timedwait(synccond_t *cond, synclock_t *lock, struct timespec *delta);
++
++void
++synccond_signal(synccond_t *cond);
++
++void
++synccond_broadcast(synccond_t *cond);
++
+ int
+ syncbarrier_init(syncbarrier_t *barrier);
+ int
+diff --git a/libglusterfs/src/libglusterfs.sym b/libglusterfs/src/libglusterfs.sym
+index 467a1b7..5a721e0 100644
+--- a/libglusterfs/src/libglusterfs.sym
++++ b/libglusterfs/src/libglusterfs.sym
+@@ -938,6 +938,12 @@ syncbarrier_destroy
+ syncbarrier_init
+ syncbarrier_wait
+ syncbarrier_wake
++synccond_init
++synccond_destroy
++synccond_wait
++synccond_timedwait
++synccond_signal
++synccond_broadcast
+ syncenv_destroy
+ syncenv_new
+ synclock_destroy
+@@ -1015,6 +1021,7 @@ synctask_new
+ synctask_new1
+ synctask_set
+ synctask_setid
++synctask_sleep
+ synctask_wake
+ synctask_yield
+ sys_access
+diff --git a/libglusterfs/src/syncop.c b/libglusterfs/src/syncop.c
+index 693970f..71d37b7 100644
+--- a/libglusterfs/src/syncop.c
++++ b/libglusterfs/src/syncop.c
+@@ -154,10 +154,14 @@ out:
+     return ret;
+ }
+ 
++void *
++syncenv_processor(void *thdata);
++
+ static void
+ __run(struct synctask *task)
+ {
+     struct syncenv *env = NULL;
++    int32_t total, ret, i;
+ 
+     env = task->env;
+ 
+@@ -173,7 +177,6 @@ __run(struct synctask *task)
+             env->runcount--;
+             break;
+         case SYNCTASK_WAIT:
+-            env->waitcount--;
+             break;
+         case SYNCTASK_DONE:
+             gf_msg(task->xl->name, GF_LOG_WARNING, 0, LG_MSG_COMPLETED_TASK,
+@@ -187,8 +190,27 @@ __run(struct synctask *task)
+     }
+ 
+     list_add_tail(&task->all_tasks, &env->runq);
+-    env->runcount++;
+     task->state = SYNCTASK_RUN;
++
++    env->runcount++;
++
++    total = env->procs + env->runcount - env->procs_idle;
++    if (total > env->procmax) {
++        total = env->procmax;
++    }
++    if (total > env->procs) {
++        for (i = 0; i < env->procmax; i++) {
++            if (env->proc[i].env == NULL) {
++                env->proc[i].env = env;
++                ret = gf_thread_create(&env->proc[i].processor, NULL,
++                                       syncenv_processor, &env->proc[i],
++                                       "sproc%d", i);
++                if ((ret < 0) || (++env->procs >= total)) {
++                    break;
++                }
++            }
++        }
++    }
+ }
+ 
+ static void
+@@ -210,7 +232,6 @@ __wait(struct synctask *task)
+             gf_msg(task->xl->name, GF_LOG_WARNING, 0, LG_MSG_REWAITING_TASK,
+                    "re-waiting already waiting "
+                    "task");
+-            env->waitcount--;
+             break;
+         case SYNCTASK_DONE:
+             gf_msg(task->xl->name, GF_LOG_WARNING, 0, LG_MSG_COMPLETED_TASK,
+@@ -223,12 +244,11 @@ __wait(struct synctask *task)
+     }
+ 
+     list_add_tail(&task->all_tasks, &env->waitq);
+-    env->waitcount++;
+     task->state = SYNCTASK_WAIT;
+ }
+ 
+ void
+-synctask_yield(struct synctask *task)
++synctask_yield(struct synctask *task, struct timespec *delta)
+ {
+     xlator_t *oldTHIS = THIS;
+ 
+@@ -237,6 +257,8 @@ synctask_yield(struct synctask *task)
+     task->proc->sched.uc_flags &= ~_UC_TLSBASE;
+ #endif
+ 
++    task->delta = delta;
++
+     if (task->state != SYNCTASK_DONE) {
+         task->state = SYNCTASK_SUSPEND;
+     }
+@@ -249,6 +271,35 @@ synctask_yield(struct synctask *task)
+ }
+ 
+ void
++synctask_sleep(int32_t secs)
++{
++    struct timespec delta;
++    struct synctask *task;
++
++    task = synctask_get();
++
++    if (task == NULL) {
++        sleep(secs);
++    } else {
++        delta.tv_sec = secs;
++        delta.tv_nsec = 0;
++
++        synctask_yield(task, &delta);
++    }
++}
++
++static void
++__synctask_wake(struct synctask *task)
++{
++    task->woken = 1;
++
++    if (task->slept)
++        __run(task);
++
++    pthread_cond_broadcast(&task->env->cond);
++}
++
++void
+ synctask_wake(struct synctask *task)
+ {
+     struct syncenv *env = NULL;
+@@ -257,13 +308,18 @@ synctask_wake(struct synctask *task)
+ 
+     pthread_mutex_lock(&env->mutex);
+     {
+-        task->woken = 1;
++        if (task->timer != NULL) {
++            if (gf_timer_call_cancel(task->xl->ctx, task->timer) != 0) {
++                goto unlock;
++            }
+ 
+-        if (task->slept)
+-            __run(task);
++            task->timer = NULL;
++            task->synccond = NULL;
++        }
+ 
+-        pthread_cond_broadcast(&env->cond);
++        __synctask_wake(task);
+     }
++unlock:
+     pthread_mutex_unlock(&env->mutex);
+ }
+ 
+@@ -282,7 +338,7 @@ synctask_wrap(void)
+ 
+     task->state = SYNCTASK_DONE;
+ 
+-    synctask_yield(task);
++    synctask_yield(task, NULL);
+ }
+ 
+ void
+@@ -422,11 +478,6 @@ synctask_create(struct syncenv *env, size_t stacksize, synctask_fn_t fn,
+     }
+ 
+     synctask_wake(newtask);
+-    /*
+-     * Make sure someone's there to execute anything we just put on the
+-     * run queue.
+-     */
+-    syncenv_scale(env);
+ 
+     return newtask;
+ err:
+@@ -520,8 +571,12 @@ syncenv_task(struct syncproc *proc)
+                 goto unlock;
+             }
+ 
++            env->procs_idle++;
++
+             sleep_till.tv_sec = time(NULL) + SYNCPROC_IDLE_TIME;
+             ret = pthread_cond_timedwait(&env->cond, &env->mutex, &sleep_till);
++
++            env->procs_idle--;
+         }
+ 
+         task = list_entry(env->runq.next, struct synctask, all_tasks);
+@@ -540,6 +595,34 @@ unlock:
+     return task;
+ }
+ 
++static void
++synctask_timer(void *data)
++{
++    struct synctask *task = data;
++    struct synccond *cond;
++
++    cond = task->synccond;
++    if (cond != NULL) {
++        pthread_mutex_lock(&cond->pmutex);
++
++        list_del_init(&task->waitq);
++        task->synccond = NULL;
++
++        pthread_mutex_unlock(&cond->pmutex);
++
++        task->ret = -ETIMEDOUT;
++    }
++
++    pthread_mutex_lock(&task->env->mutex);
++
++    gf_timer_call_cancel(task->xl->ctx, task->timer);
++    task->timer = NULL;
++
++    __synctask_wake(task);
++
++    pthread_mutex_unlock(&task->env->mutex);
++}
++
+ void
+ synctask_switchto(struct synctask *task)
+ {
+@@ -572,7 +655,14 @@ synctask_switchto(struct synctask *task)
+         } else {
+             task->slept = 1;
+             __wait(task);
++
++            if (task->delta != NULL) {
++                task->timer = gf_timer_call_after(task->xl->ctx, *task->delta,
++                                                  synctask_timer, task);
++            }
+         }
++
++        task->delta = NULL;
+     }
+     pthread_mutex_unlock(&env->mutex);
+ }
+@@ -580,65 +670,18 @@ synctask_switchto(struct synctask *task)
+ void *
+ syncenv_processor(void *thdata)
+ {
+-    struct syncenv *env = NULL;
+     struct syncproc *proc = NULL;
+     struct synctask *task = NULL;
+ 
+     proc = thdata;
+-    env = proc->env;
+-
+-    for (;;) {
+-        task = syncenv_task(proc);
+-        if (!task)
+-            break;
+ 
++    while ((task = syncenv_task(proc)) != NULL) {
+         synctask_switchto(task);
+-
+-        syncenv_scale(env);
+     }
+ 
+     return NULL;
+ }
+ 
+-void
+-syncenv_scale(struct syncenv *env)
+-{
+-    int diff = 0;
+-    int scale = 0;
+-    int i = 0;
+-    int ret = 0;
+-
+-    pthread_mutex_lock(&env->mutex);
+-    {
+-        if (env->procs > env->runcount)
+-            goto unlock;
+-
+-        scale = env->runcount;
+-        if (scale > env->procmax)
+-            scale = env->procmax;
+-        if (scale > env->procs)
+-            diff = scale - env->procs;
+-        while (diff) {
+-            diff--;
+-            for (; (i < env->procmax); i++) {
+-                if (env->proc[i].processor == 0)
+-                    break;
+-            }
+-
+-            env->proc[i].env = env;
+-            ret = gf_thread_create(&env->proc[i].processor, NULL,
+-                                   syncenv_processor, &env->proc[i],
+-                                   "sproc%03hx", env->procs & 0x3ff);
+-            if (ret)
+-                break;
+-            env->procs++;
+-            i++;
+-        }
+-    }
+-unlock:
+-    pthread_mutex_unlock(&env->mutex);
+-}
+-
+ /* The syncenv threads are cleaned up in this routine.
+  */
+ void
+@@ -715,12 +758,13 @@ syncenv_new(size_t stacksize, int procmin, int procmax)
+         newenv->stacksize = stacksize;
+     newenv->procmin = procmin;
+     newenv->procmax = procmax;
++    newenv->procs_idle = 0;
+ 
+     for (i = 0; i < newenv->procmin; i++) {
+         newenv->proc[i].env = newenv;
+         ret = gf_thread_create(&newenv->proc[i].processor, NULL,
+                                syncenv_processor, &newenv->proc[i], "sproc%d",
+-                               newenv->procs);
++                               i);
+         if (ret)
+             break;
+         newenv->procs++;
+@@ -810,7 +854,7 @@ __synclock_lock(struct synclock *lock)
+             task->woken = 0;
+             list_add_tail(&task->waitq, &lock->waitq);
+             pthread_mutex_unlock(&lock->guard);
+-            synctask_yield(task);
++            synctask_yield(task, NULL);
+             /* task is removed from waitq in unlock,
+              * under lock->guard.*/
+             pthread_mutex_lock(&lock->guard);
+@@ -963,6 +1007,136 @@ synclock_unlock(synclock_t *lock)
+     return ret;
+ }
+ 
++/* Condition variables */
++
++int32_t
++synccond_init(synccond_t *cond)
++{
++    int32_t ret;
++
++    INIT_LIST_HEAD(&cond->waitq);
++
++    ret = pthread_mutex_init(&cond->pmutex, NULL);
++    if (ret != 0) {
++        return -ret;
++    }
++
++    ret = pthread_cond_init(&cond->pcond, NULL);
++    if (ret != 0) {
++        pthread_mutex_destroy(&cond->pmutex);
++    }
++
++    return -ret;
++}
++
++void
++synccond_destroy(synccond_t *cond)
++{
++    pthread_cond_destroy(&cond->pcond);
++    pthread_mutex_destroy(&cond->pmutex);
++}
++
++int
++synccond_timedwait(synccond_t *cond, synclock_t *lock, struct timespec *delta)
++{
++    struct timespec now;
++    struct synctask *task = NULL;
++    int ret;
++
++    task = synctask_get();
++
++    if (task == NULL) {
++        if (delta != NULL) {
++            timespec_now_realtime(&now);
++            timespec_adjust_delta(&now, *delta);
++        }
++
++        pthread_mutex_lock(&cond->pmutex);
++
++        if (delta == NULL) {
++            ret = -pthread_cond_wait(&cond->pcond, &cond->pmutex);
++        } else {
++            ret = -pthread_cond_timedwait(&cond->pcond, &cond->pmutex, &now);
++        }
++    } else {
++        pthread_mutex_lock(&cond->pmutex);
++
++        list_add_tail(&task->waitq, &cond->waitq);
++        task->synccond = cond;
++
++        ret = synclock_unlock(lock);
++        if (ret == 0) {
++            pthread_mutex_unlock(&cond->pmutex);
++
++            synctask_yield(task, delta);
++
++            ret = synclock_lock(lock);
++            if (ret == 0) {
++                ret = task->ret;
++            }
++            task->ret = 0;
++
++            return ret;
++        }
++
++        list_del_init(&task->waitq);
++    }
++
++    pthread_mutex_unlock(&cond->pmutex);
++
++    return ret;
++}
++
++int
++synccond_wait(synccond_t *cond, synclock_t *lock)
++{
++    return synccond_timedwait(cond, lock, NULL);
++}
++
++void
++synccond_signal(synccond_t *cond)
++{
++    struct synctask *task;
++
++    pthread_mutex_lock(&cond->pmutex);
++
++    if (!list_empty(&cond->waitq)) {
++        task = list_first_entry(&cond->waitq, struct synctask, waitq);
++        list_del_init(&task->waitq);
++
++        pthread_mutex_unlock(&cond->pmutex);
++
++        synctask_wake(task);
++    } else {
++        pthread_cond_signal(&cond->pcond);
++
++        pthread_mutex_unlock(&cond->pmutex);
++    }
++}
++
++void
++synccond_broadcast(synccond_t *cond)
++{
++    struct list_head list;
++    struct synctask *task;
++
++    INIT_LIST_HEAD(&list);
++
++    pthread_mutex_lock(&cond->pmutex);
++
++    list_splice_init(&cond->waitq, &list);
++    pthread_cond_broadcast(&cond->pcond);
++
++    pthread_mutex_unlock(&cond->pmutex);
++
++    while (!list_empty(&list)) {
++        task = list_first_entry(&list, struct synctask, waitq);
++        list_del_init(&task->waitq);
++
++        synctask_wake(task);
++    }
++}
++
+ /* Barriers */
+ 
+ int
+@@ -1032,7 +1206,7 @@ __syncbarrier_wait(struct syncbarrier *barrier, int waitfor)
+             /* called within a synctask */
+             list_add_tail(&task->waitq, &barrier->waitq);
+             pthread_mutex_unlock(&barrier->guard);
+-            synctask_yield(task);
++            synctask_yield(task, NULL);
+             pthread_mutex_lock(&barrier->guard);
+         } else {
+             /* called by a non-synctask */
+diff --git a/xlators/cluster/dht/src/dht-rebalance.c b/xlators/cluster/dht/src/dht-rebalance.c
+index c692119..957deaa 100644
+--- a/xlators/cluster/dht/src/dht-rebalance.c
++++ b/xlators/cluster/dht/src/dht-rebalance.c
+@@ -5224,7 +5224,7 @@ gf_defrag_pause_tier(xlator_t *this, gf_defrag_info_t *defrag)
+     defrag->tier_conf.pause_timer = gf_timer_call_after(
+         this->ctx, delta, gf_defrag_pause_tier_timeout, this);
+ 
+-    synctask_yield(defrag->tier_conf.pause_synctask);
++    synctask_yield(defrag->tier_conf.pause_synctask, NULL);
+ 
+     if (gf_defrag_get_pause_state(&defrag->tier_conf) == TIER_PAUSED)
+         goto out;
+diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+index 0d29de2..6475611 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
++++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+@@ -6076,13 +6076,8 @@ glusterd_op_stage_validate(glusterd_op_t op, dict_t *dict, char **op_errstr,
+ static void
+ glusterd_wait_for_blockers(glusterd_conf_t *priv)
+ {
+-    uint64_t blockers = GF_ATOMIC_GET(priv->blockers);
+-
+-    while (blockers) {
+-        synclock_unlock(&priv->big_lock);
+-        sleep(1);
+-        blockers = GF_ATOMIC_GET(priv->blockers);
+-        synclock_lock(&priv->big_lock);
++    while (GF_ATOMIC_GET(priv->blockers)) {
++        synccond_wait(&priv->cond_blockers, &priv->big_lock);
+     }
+ }
+ 
+diff --git a/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c
+index 36018a0..f55a5fd 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c
++++ b/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c
+@@ -112,7 +112,7 @@ glusterd_proc_stop(glusterd_proc_t *proc, int sig, int flags)
+         goto out;
+ 
+     synclock_unlock(&conf->big_lock);
+-    sleep(1);
++    synctask_sleep(1);
+     synclock_lock(&conf->big_lock);
+     if (gf_is_service_running(proc->pidfile, &pid)) {
+         ret = kill(pid, SIGKILL);
+diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
+index d225854..386eed2 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
+@@ -1961,9 +1961,7 @@ glusterd_update_snaps_synctask(void *opaque)
+     synclock_lock(&conf->big_lock);
+ 
+     while (conf->restart_bricks) {
+-        synclock_unlock(&conf->big_lock);
+-        sleep(2);
+-        synclock_lock(&conf->big_lock);
++        synccond_wait(&conf->cond_restart_bricks, &conf->big_lock);
+     }
+     conf->restart_bricks = _gf_true;
+ 
+@@ -2070,6 +2068,7 @@ out:
+     if (dict)
+         dict_unref(dict);
+     conf->restart_bricks = _gf_false;
++    synccond_broadcast(&conf->cond_restart_bricks);
+ 
+     return ret;
+ }
+diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.h b/xlators/mgmt/glusterd/src/glusterd-syncop.h
+index ce4a940..a265f21 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-syncop.h
++++ b/xlators/mgmt/glusterd/src/glusterd-syncop.h
+@@ -32,7 +32,7 @@
+         ret = gd_syncop_submit_request(rpc, req, stb, cookie, prog, procnum,   \
+                                        cbk, (xdrproc_t)xdrproc);               \
+         if (!ret)                                                              \
+-            synctask_yield(stb->task);                                         \
++            synctask_yield(stb->task, NULL);                                   \
+         else                                                                   \
+             gf_asprintf(&stb->errstr,                                          \
+                         "%s failed. Check log file"                            \
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index 812c698..ce9931c 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -5068,22 +5068,22 @@ glusterd_import_friend_volumes_synctask(void *opaque)
+      * restarted (refer glusterd_restart_bricks ())
+      */
+     while (conf->restart_bricks) {
+-        synclock_unlock(&conf->big_lock);
+-        sleep(2);
+-        synclock_lock(&conf->big_lock);
++        synccond_wait(&conf->cond_restart_bricks, &conf->big_lock);
+     }
+     conf->restart_bricks = _gf_true;
+ 
+     while (i <= count) {
+         ret = glusterd_import_friend_volume(peer_data, i);
+         if (ret) {
+-            conf->restart_bricks = _gf_false;
+-            goto out;
++            break;
+         }
+         i++;
+     }
+-    glusterd_svcs_manager(NULL);
++    if (i > count) {
++        glusterd_svcs_manager(NULL);
++    }
+     conf->restart_bricks = _gf_false;
++    synccond_broadcast(&conf->cond_restart_bricks);
+ out:
+     if (peer_data)
+         dict_unref(peer_data);
+@@ -5769,7 +5769,9 @@ my_callback(struct rpc_req *req, struct iovec *iov, int count, void *v_frame)
+     call_frame_t *frame = v_frame;
+     glusterd_conf_t *conf = frame->this->private;
+ 
+-    GF_ATOMIC_DEC(conf->blockers);
++    if (GF_ATOMIC_DEC(conf->blockers) == 0) {
++        synccond_broadcast(&conf->cond_blockers);
++    }
+ 
+     STACK_DESTROY(frame->root);
+     return 0;
+@@ -5865,7 +5867,9 @@ attach_brick_callback(struct rpc_req *req, struct iovec *iov, int count,
+         }
+     }
+ out:
+-    GF_ATOMIC_DEC(conf->blockers);
++    if (GF_ATOMIC_DEC(conf->blockers) == 0) {
++        synccond_broadcast(&conf->cond_blockers);
++    }
+     STACK_DESTROY(frame->root);
+     return 0;
+ }
+@@ -6053,7 +6057,7 @@ attach_brick(xlator_t *this, glusterd_brickinfo_t *brickinfo,
+          * TBD: see if there's a better way
+          */
+         synclock_unlock(&conf->big_lock);
+-        sleep(1);
++        synctask_sleep(1);
+         synclock_lock(&conf->big_lock);
+     }
+ 
+@@ -6193,7 +6197,7 @@ find_compat_brick_in_vol(glusterd_conf_t *conf,
+                          "brick %s is still"
+                          " starting, waiting for 2 seconds ",
+                          other_brick->path);
+-            sleep(2);
++            synctask_sleep(2);
+             synclock_lock(&conf->big_lock);
+             retries--;
+         }
+@@ -6680,9 +6684,7 @@ glusterd_restart_bricks(void *opaque)
+      * glusterd_compare_friend_data ())
+      */
+     while (conf->restart_bricks) {
+-        synclock_unlock(&conf->big_lock);
+-        sleep(2);
+-        synclock_lock(&conf->big_lock);
++        synccond_wait(&conf->cond_restart_bricks, &conf->big_lock);
+     }
+     conf->restart_bricks = _gf_true;
+ 
+@@ -6798,6 +6800,7 @@ out:
+     GF_ATOMIC_DEC(conf->blockers);
+     conf->restart_done = _gf_true;
+     conf->restart_bricks = _gf_false;
++    synccond_broadcast(&conf->cond_restart_bricks);
+ 
+ return_block:
+     return ret;
+diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c
+index d360312..a01034a 100644
+--- a/xlators/mgmt/glusterd/src/glusterd.c
++++ b/xlators/mgmt/glusterd/src/glusterd.c
+@@ -1845,6 +1845,8 @@ init(xlator_t *this)
+     (void)strncpy(conf->rundir, rundir, sizeof(conf->rundir));
+ 
+     synclock_init(&conf->big_lock, SYNC_LOCK_RECURSIVE);
++    synccond_init(&conf->cond_restart_bricks);
++    synccond_init(&conf->cond_blockers);
+     pthread_mutex_init(&conf->xprt_lock, NULL);
+     INIT_LIST_HEAD(&conf->xprt_list);
+     pthread_mutex_init(&conf->import_volumes, NULL);
+diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
+index 2be005c..1c6c3b1 100644
+--- a/xlators/mgmt/glusterd/src/glusterd.h
++++ b/xlators/mgmt/glusterd/src/glusterd.h
+@@ -209,6 +209,8 @@ typedef struct {
+     dict_t *opts;
+     synclock_t big_lock;
+     gf_boolean_t restart_done;
++    synccond_t cond_restart_bricks;
++    synccond_t cond_blockers;
+     rpcsvc_t *uds_rpc; /* RPCSVC for the unix domain socket */
+     uint32_t base_port;
+     uint32_t max_port;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0378-Revert-open-behind-fix-missing-fd-reference.patch b/SOURCES/0378-Revert-open-behind-fix-missing-fd-reference.patch
new file mode 100644
index 0000000..e228be2
--- /dev/null
+++ b/SOURCES/0378-Revert-open-behind-fix-missing-fd-reference.patch
@@ -0,0 +1,120 @@
+From d79660ccc65f163e0d9cf91cc13a199bec04d5f1 Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez Juan <xhernandez@redhat.com>
+Date: Wed, 20 May 2020 12:55:43 +0000
+Subject: [PATCH 378/379] Revert "open-behind: fix missing fd reference"
+
+This reverts commit 30cbdf8c06145a0c290da42ecc0a7eae928200b7.
+
+The patch is not complete because there have been some crash reports
+upstream recently after the patch was released. A new patch that should
+cover all corner cases is under review (), but it's a big change and it
+could be risky to backport it without enough testing.
+
+Since there exists a workaround to avoid the problem (disable
+open-behind), for now we revert the patch.
+
+Change-Id: I9cfc55623c33758cf5530b18f03c0d795b0f650b
+BUG: 1830713
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/200952
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/performance/open-behind/src/open-behind.c | 27 +++++++++--------------
+ 1 file changed, 11 insertions(+), 16 deletions(-)
+
+diff --git a/xlators/performance/open-behind/src/open-behind.c b/xlators/performance/open-behind/src/open-behind.c
+index 14ebc12..268c717 100644
+--- a/xlators/performance/open-behind/src/open-behind.c
++++ b/xlators/performance/open-behind/src/open-behind.c
+@@ -206,13 +206,8 @@ ob_fd_free(ob_fd_t *ob_fd)
+     if (ob_fd->xdata)
+         dict_unref(ob_fd->xdata);
+ 
+-    if (ob_fd->open_frame) {
+-        /* If we sill have a frame it means that background open has never
+-         * been triggered. We need to release the pending reference. */
+-        fd_unref(ob_fd->fd);
+-
++    if (ob_fd->open_frame)
+         STACK_DESTROY(ob_fd->open_frame->root);
+-    }
+ 
+     GF_FREE(ob_fd);
+ }
+@@ -302,7 +297,6 @@ ob_wake_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+             call_resume(stub);
+     }
+ 
+-    /* The background open is completed. We can release the 'fd' reference. */
+     fd_unref(fd);
+ 
+     STACK_DESTROY(frame->root);
+@@ -337,9 +331,7 @@ ob_fd_wake(xlator_t *this, fd_t *fd, ob_fd_t *ob_fd)
+     }
+ 
+     if (frame) {
+-        /* We don't need to take a reference here. We already have a reference
+-         * while the open is pending. */
+-        frame->local = fd;
++        frame->local = fd_ref(fd);
+ 
+         STACK_WIND(frame, ob_wake_cbk, FIRST_CHILD(this),
+                    FIRST_CHILD(this)->fops->open, &ob_fd->loc, ob_fd->flags, fd,
+@@ -353,12 +345,15 @@ void
+ ob_inode_wake(xlator_t *this, struct list_head *ob_fds)
+ {
+     ob_fd_t *ob_fd = NULL, *tmp = NULL;
++    fd_t *fd = NULL;
+ 
+     if (!list_empty(ob_fds)) {
+         list_for_each_entry_safe(ob_fd, tmp, ob_fds, ob_fds_on_inode)
+         {
+             ob_fd_wake(this, ob_fd->fd, ob_fd);
++            fd = ob_fd->fd;
+             ob_fd_free(ob_fd);
++            fd_unref(fd);
+         }
+     }
+ }
+@@ -370,7 +365,7 @@ ob_fd_copy(ob_fd_t *src, ob_fd_t *dst)
+     if (!src || !dst)
+         goto out;
+ 
+-    dst->fd = src->fd;
++    dst->fd = __fd_ref(src->fd);
+     dst->loc.inode = inode_ref(src->loc.inode);
+     gf_uuid_copy(dst->loc.gfid, src->loc.gfid);
+     dst->flags = src->flags;
+@@ -514,6 +509,7 @@ ob_open_behind(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
+ 
+     ob_fd->ob_inode = ob_inode;
+ 
++    /* don't do fd_ref, it'll cause leaks */
+     ob_fd->fd = fd;
+ 
+     ob_fd->open_frame = copy_frame(frame);
+@@ -543,16 +539,15 @@ ob_open_behind(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
+     }
+     UNLOCK(&fd->inode->lock);
+ 
+-    /* We take a reference while the background open is pending or being
+-     * processed. If we finally wind the request in the foreground, then
+-     * ob_fd_free() will take care of this additional reference. */
+-    fd_ref(fd);
+-
+     if (!open_in_progress && !unlinked) {
++        fd_ref(fd);
++
+         STACK_UNWIND_STRICT(open, frame, 0, 0, fd, xdata);
+ 
+         if (!conf->lazy_open)
+             ob_fd_wake(this, fd, NULL);
++
++        fd_unref(fd);
+     } else {
+         ob_fd_free(ob_fd);
+         STACK_WIND(frame, default_open_cbk, FIRST_CHILD(this),
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0379-glusterd-add-missing-synccond_broadcast.patch b/SOURCES/0379-glusterd-add-missing-synccond_broadcast.patch
new file mode 100644
index 0000000..cd51c6d
--- /dev/null
+++ b/SOURCES/0379-glusterd-add-missing-synccond_broadcast.patch
@@ -0,0 +1,45 @@
+From e06882a7fea9720a2899f7d52d5d3866ff098866 Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Thu, 21 May 2020 08:26:11 +0200
+Subject: [PATCH 379/379] glusterd: add missing synccond_broadcast()
+
+After the changes in commit 3da22f8cb08b05562a4c6bd2694f2f19199cff7f,
+there was a place where synccond_broadcast() was missing. It could
+cause a hang if another synctask was waiting on the condition variable.
+
+Upstream patch:
+> Upstream patch link: https://review.gluster.org/c/glusterfs/+/24476
+> Change-Id: I92bfe4e15c5c3591e4854a64aa9e1566d50dd204
+> Fixes: #1116
+> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+
+Change-Id: I92bfe4e15c5c3591e4854a64aa9e1566d50dd204
+BUG: 1810516
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/201057
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mgmt/glusterd/src/glusterd-utils.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index ce9931c..c92cdf3 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -6797,9 +6797,11 @@ glusterd_restart_bricks(void *opaque)
+     ret = 0;
+ 
+ out:
+-    GF_ATOMIC_DEC(conf->blockers);
+     conf->restart_done = _gf_true;
+     conf->restart_bricks = _gf_false;
++    if (GF_ATOMIC_DEC(conf->blockers) == 0) {
++        synccond_broadcast(&conf->cond_blockers);
++    }
+     synccond_broadcast(&conf->cond_restart_bricks);
+ 
+ return_block:
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0380-features-shard-Aggregate-size-block-count-in-iatt-be.patch b/SOURCES/0380-features-shard-Aggregate-size-block-count-in-iatt-be.patch
new file mode 100644
index 0000000..05915d9
--- /dev/null
+++ b/SOURCES/0380-features-shard-Aggregate-size-block-count-in-iatt-be.patch
@@ -0,0 +1,306 @@
+From 2cf22e54c8424949607c4a20df84887b838b2702 Mon Sep 17 00:00:00 2001
+From: Krutika Dhananjay <kdhananj@redhat.com>
+Date: Fri, 15 May 2020 11:29:36 +0530
+Subject: [PATCH 380/382] features/shard: Aggregate size, block-count in iatt
+ before unwinding setxattr
+
+Backport of:
+> Upstream patch - https://review.gluster.org/c/glusterfs/+/24471
+> Fixes: #1243
+> Change-Id: I4da0eceb4235b91546df79270bcc0af8cd64e9ea
+> Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
+
+Posix translator returns pre and postbufs in the dict in {F}SETXATTR fops.
+These iatts are further cached at layers like md-cache.
+Shard translator, in its current state, simply returns these values without
+updating the aggregated file size and block-count.
+
+This patch fixes this problem.
+
+Change-Id: I4da0eceb4235b91546df79270bcc0af8cd64e9ea
+BUG: 1823423
+Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/201135
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Xavi Hernandez Juan <xhernandez@redhat.com>
+---
+ tests/bugs/shard/issue-1243.t      |  31 ++++++
+ xlators/features/shard/src/shard.c | 218 +++++++++++++++++++++++++++++++++----
+ 2 files changed, 225 insertions(+), 24 deletions(-)
+ create mode 100644 tests/bugs/shard/issue-1243.t
+
+diff --git a/tests/bugs/shard/issue-1243.t b/tests/bugs/shard/issue-1243.t
+new file mode 100644
+index 0000000..b0c092c
+--- /dev/null
++++ b/tests/bugs/shard/issue-1243.t
+@@ -0,0 +1,31 @@
++#!/bin/bash
++
++. $(dirname $0)/../../include.rc
++
++cleanup;
++
++TEST glusterd
++TEST pidof glusterd
++TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
++TEST $CLI volume set $V0 features.shard on
++TEST $CLI volume set $V0 features.shard-block-size 4MB
++TEST $CLI volume set $V0 performance.quick-read off
++TEST $CLI volume set $V0 performance.io-cache off
++TEST $CLI volume set $V0 performance.read-ahead off
++TEST $CLI volume set $V0 performance.strict-o-direct on
++TEST $CLI volume start $V0
++
++TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
++
++TEST $CLI volume set $V0 md-cache-timeout 10
++
++# Write data into a file such that its size crosses shard-block-size
++TEST dd if=/dev/zero of=$M0/foo bs=1048576 count=8 oflag=direct
++
++# Execute a setxattr on the file.
++TEST setfattr -n trusted.libvirt -v some-value $M0/foo
++
++# Size of the file should be the aggregated size, not the shard-block-size
++EXPECT '8388608' stat -c %s $M0/foo
++
++cleanup
+diff --git a/xlators/features/shard/src/shard.c b/xlators/features/shard/src/shard.c
+index ee38ed2..6ae4c41 100644
+--- a/xlators/features/shard/src/shard.c
++++ b/xlators/features/shard/src/shard.c
+@@ -5929,36 +5929,206 @@ out:
+   return 0;
+ }
+ 
+-int32_t shard_fsetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd,
+-                        dict_t *dict, int32_t flags, dict_t *xdata) {
+-  int op_errno = EINVAL;
++int32_t shard_common_set_xattr_cbk(call_frame_t *frame, void *cookie,
++                                   xlator_t *this, int32_t op_ret,
++                                   int32_t op_errno, dict_t *xdata) {
++    int ret = -1;
++    struct iatt *prebuf = NULL;
++    struct iatt *postbuf = NULL;
++    struct iatt *stbuf = NULL;
++    data_t *data = NULL;
++    shard_local_t *local = NULL;
+ 
+-  if (frame->root->pid != GF_CLIENT_PID_GSYNCD) {
+-    GF_IF_INTERNAL_XATTR_GOTO(SHARD_XATTR_PREFIX "*", dict, op_errno, out);
+-  }
++    local = frame->local;
+ 
+-  STACK_WIND_TAIL(frame, FIRST_CHILD(this), FIRST_CHILD(this)->fops->fsetxattr,
+-                  fd, dict, flags, xdata);
+-  return 0;
+-out:
+-  shard_common_failure_unwind(GF_FOP_FSETXATTR, frame, -1, op_errno);
+-  return 0;
++    if (op_ret < 0) {
++        local->op_ret = op_ret;
++        local->op_errno = op_errno;
++        goto err;
++    }
++
++    if (!xdata)
++        goto unwind;
++
++    data = dict_get(xdata, GF_PRESTAT);
++    if (data) {
++        stbuf = data_to_iatt(data, GF_PRESTAT);
++        prebuf = GF_MALLOC(sizeof(struct iatt), gf_common_mt_char);
++        if (prebuf == NULL) {
++            local->op_ret = -1;
++            local->op_errno = ENOMEM;
++            goto err;
++        }
++        *prebuf = *stbuf;
++        prebuf->ia_size = local->prebuf.ia_size;
++        prebuf->ia_blocks = local->prebuf.ia_blocks;
++        ret = dict_set_iatt(xdata, GF_PRESTAT, prebuf, false);
++        if (ret < 0) {
++            local->op_ret = -1;
++            local->op_errno = ENOMEM;
++            goto err;
++        }
++        prebuf = NULL;
++    }
++
++    data = dict_get(xdata, GF_POSTSTAT);
++    if (data) {
++        stbuf = data_to_iatt(data, GF_POSTSTAT);
++        postbuf = GF_MALLOC(sizeof(struct iatt), gf_common_mt_char);
++        if (postbuf == NULL) {
++            local->op_ret = -1;
++            local->op_errno = ENOMEM;
++            goto err;
++        }
++        *postbuf = *stbuf;
++        postbuf->ia_size = local->prebuf.ia_size;
++        postbuf->ia_blocks = local->prebuf.ia_blocks;
++        ret = dict_set_iatt(xdata, GF_POSTSTAT, postbuf, false);
++        if (ret < 0) {
++            local->op_ret = -1;
++            local->op_errno = ENOMEM;
++            goto err;
++        }
++        postbuf = NULL;
++    }
++
++unwind:
++    if (local->fd)
++        SHARD_STACK_UNWIND(fsetxattr, frame, local->op_ret, local->op_errno,
++                           xdata);
++    else
++        SHARD_STACK_UNWIND(setxattr, frame, local->op_ret, local->op_errno,
++                           xdata);
++    return 0;
++
++err:
++    GF_FREE(prebuf);
++    GF_FREE(postbuf);
++    shard_common_failure_unwind(local->fop, frame, local->op_ret,
++                                local->op_errno);
++    return 0;
+ }
+ 
+-int32_t shard_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+-                       dict_t *dict, int32_t flags, dict_t *xdata) {
+-  int op_errno = EINVAL;
++int32_t shard_post_lookup_set_xattr_handler(call_frame_t *frame,
++                                            xlator_t *this) {
++    shard_local_t *local = NULL;
+ 
+-  if (frame->root->pid != GF_CLIENT_PID_GSYNCD) {
+-    GF_IF_INTERNAL_XATTR_GOTO(SHARD_XATTR_PREFIX "*", dict, op_errno, out);
+-  }
++    local = frame->local;
+ 
+-  STACK_WIND_TAIL(frame, FIRST_CHILD(this), FIRST_CHILD(this)->fops->setxattr,
+-                  loc, dict, flags, xdata);
+-  return 0;
+-out:
+-  shard_common_failure_unwind(GF_FOP_SETXATTR, frame, -1, op_errno);
+-  return 0;
++    if (local->op_ret < 0) {
++        shard_common_failure_unwind(local->fop, frame, local->op_ret,
++                                    local->op_errno);
++        return 0;
++    }
++
++    if (local->fd)
++        STACK_WIND(frame, shard_common_set_xattr_cbk, FIRST_CHILD(this),
++                   FIRST_CHILD(this)->fops->fsetxattr, local->fd,
++                   local->xattr_req, local->flags, local->xattr_rsp);
++    else
++        STACK_WIND(frame, shard_common_set_xattr_cbk, FIRST_CHILD(this),
++                   FIRST_CHILD(this)->fops->setxattr, &local->loc,
++                   local->xattr_req, local->flags, local->xattr_rsp);
++    return 0;
++}
++
++int32_t shard_common_set_xattr(call_frame_t *frame, xlator_t *this,
++                               glusterfs_fop_t fop, loc_t *loc, fd_t *fd,
++                               dict_t *dict, int32_t flags, dict_t *xdata) {
++    int ret = -1;
++    int op_errno = ENOMEM;
++    uint64_t block_size = 0;
++    shard_local_t *local = NULL;
++    inode_t *inode = loc ? loc->inode : fd->inode;
++
++    if ((IA_ISDIR(inode->ia_type)) || (IA_ISLNK(inode->ia_type))) {
++        if (loc)
++            STACK_WIND_TAIL(frame, FIRST_CHILD(this),
++                            FIRST_CHILD(this)->fops->setxattr, loc, dict, flags,
++                            xdata);
++        else
++            STACK_WIND_TAIL(frame, FIRST_CHILD(this),
++                            FIRST_CHILD(this)->fops->fsetxattr, fd, dict, flags,
++                            xdata);
++        return 0;
++    }
++
++    /* Sharded or not, if shard's special xattrs are attempted to be set,
++     * fail the fop with EPERM (except if the client is gsyncd.
++     */
++    if (frame->root->pid != GF_CLIENT_PID_GSYNCD) {
++        GF_IF_INTERNAL_XATTR_GOTO(SHARD_XATTR_PREFIX "*", dict, op_errno, err);
++    }
++
++    ret = shard_inode_ctx_get_block_size(inode, this, &block_size);
++    if (ret) {
++        gf_msg(this->name, GF_LOG_ERROR, 0, SHARD_MSG_INODE_CTX_GET_FAILED,
++               "Failed to get block size from inode ctx of %s",
++               uuid_utoa(inode->gfid));
++        goto err;
++    }
++
++    if (!block_size || frame->root->pid == GF_CLIENT_PID_GSYNCD) {
++        if (loc)
++            STACK_WIND_TAIL(frame, FIRST_CHILD(this),
++                            FIRST_CHILD(this)->fops->setxattr, loc, dict, flags,
++                            xdata);
++        else
++            STACK_WIND_TAIL(frame, FIRST_CHILD(this),
++                            FIRST_CHILD(this)->fops->fsetxattr, fd, dict, flags,
++                            xdata);
++        return 0;
++    }
++
++    local = mem_get0(this->local_pool);
++    if (!local)
++        goto err;
++
++    frame->local = local;
++    local->fop = fop;
++    if (loc) {
++        if (loc_copy(&local->loc, loc) != 0)
++            goto err;
++    }
++
++    if (fd) {
++        local->fd = fd_ref(fd);
++        local->loc.inode = inode_ref(fd->inode);
++        gf_uuid_copy(local->loc.gfid, fd->inode->gfid);
++    }
++    local->flags = flags;
++    /* Reusing local->xattr_req and local->xattr_rsp to store the setxattr dict
++     * and the xdata dict
++     */
++    if (dict)
++        local->xattr_req = dict_ref(dict);
++    if (xdata)
++        local->xattr_rsp = dict_ref(xdata);
++
++    /* To-Do: Switch from LOOKUP which is path-based, to FSTAT if the fop is
++     * on an fd. This comes under a generic class of bugs in shard tracked by
++     * bz #1782428.
++     */
++    shard_lookup_base_file(frame, this, &local->loc,
++                           shard_post_lookup_set_xattr_handler);
++    return 0;
++err:
++    shard_common_failure_unwind(fop, frame, -1, op_errno);
++    return 0;
++}
++
++int32_t shard_fsetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd,
++                        dict_t *dict, int32_t flags, dict_t *xdata) {
++    shard_common_set_xattr(frame, this, GF_FOP_FSETXATTR, NULL, fd, dict, flags,
++                           xdata);
++    return 0;
++}
++
++int32_t shard_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
++                       dict_t *dict, int32_t flags, dict_t *xdata) {
++    shard_common_set_xattr(frame, this, GF_FOP_SETXATTR, loc, NULL, dict, flags,
++                           xdata);
++    return 0;
+ }
+ 
+ int shard_post_setattr_handler(call_frame_t *frame, xlator_t *this) {
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0381-dht-add-null-check-in-gf_defrag_free_dir_dfmeta.patch b/SOURCES/0381-dht-add-null-check-in-gf_defrag_free_dir_dfmeta.patch
new file mode 100644
index 0000000..aa875a2
--- /dev/null
+++ b/SOURCES/0381-dht-add-null-check-in-gf_defrag_free_dir_dfmeta.patch
@@ -0,0 +1,48 @@
+From 63ea2aad2474a0ca169342c699cb1689e6c1d83f Mon Sep 17 00:00:00 2001
+From: Susant Palai <spalai@redhat.com>
+Date: Fri, 22 May 2020 13:49:14 +0530
+Subject: [PATCH 381/382] dht: add null check in gf_defrag_free_dir_dfmeta
+
+Backport of https://review.gluster.org/#/c/glusterfs/+/24479/
+
+BUG:1812789
+Change-Id: I502ed43051bd60d9e5d2b69d4e4d7b6eea997285
+Signed-off-by: Susant Palai <spalai@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/201150
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Ashish Pandey <aspandey@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/cluster/dht/src/dht-rebalance.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/xlators/cluster/dht/src/dht-rebalance.c b/xlators/cluster/dht/src/dht-rebalance.c
+index 957deaa..8f31dca 100644
+--- a/xlators/cluster/dht/src/dht-rebalance.c
++++ b/xlators/cluster/dht/src/dht-rebalance.c
+@@ -47,7 +47,8 @@ gf_defrag_free_dir_dfmeta(struct dir_dfmeta *meta, int local_subvols_cnt)
+ 
+     if (meta) {
+         for (i = 0; i < local_subvols_cnt; i++) {
+-            gf_dirent_free(&meta->equeue[i]);
++            if (meta->equeue)
++                gf_dirent_free(&meta->equeue[i]);
+             if (meta->lfd && meta->lfd[i])
+                 fd_unref(meta->lfd[i]);
+         }
+@@ -3344,9 +3345,9 @@ gf_defrag_process_dir(xlator_t *this, gf_defrag_info_t *defrag, loc_t *loc,
+         if (ret) {
+             fd_unref(dir_dfmeta->lfd[i]);
+             dir_dfmeta->lfd[i] = NULL;
+-            gf_smsg(this->name, GF_LOG_WARNING, 0, 0,
+-                    "failed to open dir: %s subvol: %s", loc->path,
+-                    conf->local_subvols[i]->name);
++            gf_msg(this->name, GF_LOG_WARNING, -ret, 0,
++                   "failed to open dir: %s subvol: %s", loc->path,
++                   conf->local_subvols[i]->name);
+ 
+             if (conf->decommission_in_progress) {
+                 *perrno = -ret;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0382-features-shard-Aggregate-file-size-block-count-befor.patch b/SOURCES/0382-features-shard-Aggregate-file-size-block-count-befor.patch
new file mode 100644
index 0000000..a6528f5
--- /dev/null
+++ b/SOURCES/0382-features-shard-Aggregate-file-size-block-count-befor.patch
@@ -0,0 +1,422 @@
+From 4097a748cbb7616d78886b35e3360177d570b7a6 Mon Sep 17 00:00:00 2001
+From: Krutika Dhananjay <kdhananj@redhat.com>
+Date: Fri, 22 May 2020 13:25:26 +0530
+Subject: [PATCH 382/382] features/shard: Aggregate file size, block-count
+ before unwinding removexattr
+
+Posix translator returns pre and postbufs in the dict in {F}REMOVEXATTR fops.
+These iatts are further cached at layers like md-cache.
+Shard translator, in its current state, simply returns these values without
+updating the aggregated file size and block-count.
+
+This patch fixes this problem.
+
+Upstream patch:
+> Upstream patch link: https://review.gluster.org/c/glusterfs/+/24480
+> Change-Id: I4b2dd41ede472c5829af80a67401ec5a6376d872
+> Fixes: #1243
+> Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
+
+Change-Id: I4b2dd41ede472c5829af80a67401ec5a6376d872
+BUG: 1823423
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/201456
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/bugs/shard/issue-1243.t      |  12 ++
+ xlators/features/shard/src/shard.c | 293 ++++++++++++++++++++++++++-----------
+ xlators/features/shard/src/shard.h |   1 +
+ 3 files changed, 224 insertions(+), 82 deletions(-)
+
+diff --git a/tests/bugs/shard/issue-1243.t b/tests/bugs/shard/issue-1243.t
+index b0c092c..ba22d2b 100644
+--- a/tests/bugs/shard/issue-1243.t
++++ b/tests/bugs/shard/issue-1243.t
+@@ -1,6 +1,7 @@
+ #!/bin/bash
+ 
+ . $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
+ 
+ cleanup;
+ 
+@@ -22,10 +23,21 @@ TEST $CLI volume set $V0 md-cache-timeout 10
+ # Write data into a file such that its size crosses shard-block-size
+ TEST dd if=/dev/zero of=$M0/foo bs=1048576 count=8 oflag=direct
+ 
++EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
++TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
++
+ # Execute a setxattr on the file.
+ TEST setfattr -n trusted.libvirt -v some-value $M0/foo
+ 
+ # Size of the file should be the aggregated size, not the shard-block-size
+ EXPECT '8388608' stat -c %s $M0/foo
+ 
++EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
++TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
++
++# Execute a removexattr on the file.
++TEST setfattr -x trusted.libvirt $M0/foo
++
++# Size of the file should be the aggregated size, not the shard-block-size
++EXPECT '8388608' stat -c %s $M0/foo
+ cleanup
+diff --git a/xlators/features/shard/src/shard.c b/xlators/features/shard/src/shard.c
+index 6ae4c41..2e2ef5d 100644
+--- a/xlators/features/shard/src/shard.c
++++ b/xlators/features/shard/src/shard.c
+@@ -442,6 +442,9 @@ void shard_local_wipe(shard_local_t *local) {
+   loc_wipe(&local->int_entrylk.loc);
+   loc_wipe(&local->newloc);
+ 
++  if (local->name)
++    GF_FREE(local->name);
++
+   if (local->int_entrylk.basename)
+     GF_FREE(local->int_entrylk.basename);
+   if (local->fd)
+@@ -5819,46 +5822,216 @@ int32_t shard_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd,
+   return 0;
+ }
+ 
+-int32_t shard_removexattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+-                          const char *name, dict_t *xdata) {
+-  int op_errno = EINVAL;
++int32_t
++shard_modify_and_set_iatt_in_dict(dict_t *xdata, shard_local_t *local,
++                                  char *key)
++{
++    int ret = 0;
++    struct iatt *tmpbuf = NULL;
++    struct iatt *stbuf = NULL;
++    data_t *data = NULL;
+ 
+-  if (frame->root->pid != GF_CLIENT_PID_GSYNCD) {
+-    GF_IF_NATIVE_XATTR_GOTO(SHARD_XATTR_PREFIX "*", name, op_errno, out);
+-  }
++    if (!xdata)
++        return 0;
+ 
+-  if (xdata && (frame->root->pid != GF_CLIENT_PID_GSYNCD)) {
+-    dict_del(xdata, GF_XATTR_SHARD_BLOCK_SIZE);
+-    dict_del(xdata, GF_XATTR_SHARD_FILE_SIZE);
+-  }
++    data = dict_get(xdata, key);
++    if (!data)
++        return 0;
+ 
+-  STACK_WIND_TAIL(frame, FIRST_CHILD(this),
+-                  FIRST_CHILD(this)->fops->removexattr, loc, name, xdata);
+-  return 0;
+-out:
+-  shard_common_failure_unwind(GF_FOP_REMOVEXATTR, frame, -1, op_errno);
+-  return 0;
++    tmpbuf = data_to_iatt(data, key);
++    stbuf = GF_MALLOC(sizeof(struct iatt), gf_common_mt_char);
++    if (stbuf == NULL) {
++        local->op_ret = -1;
++        local->op_errno = ENOMEM;
++        goto err;
++    }
++    *stbuf = *tmpbuf;
++    stbuf->ia_size = local->prebuf.ia_size;
++    stbuf->ia_blocks = local->prebuf.ia_blocks;
++    ret = dict_set_iatt(xdata, key, stbuf, false);
++    if (ret < 0) {
++        local->op_ret = -1;
++        local->op_errno = ENOMEM;
++        goto err;
++    }
++    return 0;
++
++err:
++    GF_FREE(stbuf);
++    return -1;
+ }
+ 
+-int32_t shard_fremovexattr(call_frame_t *frame, xlator_t *this, fd_t *fd,
+-                           const char *name, dict_t *xdata) {
+-  int op_errno = EINVAL;
++int32_t
++shard_common_remove_xattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
++                              int32_t op_ret, int32_t op_errno, dict_t *xdata)
++{
++    int ret = -1;
++    shard_local_t *local = NULL;
+ 
+-  if (frame->root->pid != GF_CLIENT_PID_GSYNCD) {
+-    GF_IF_NATIVE_XATTR_GOTO(SHARD_XATTR_PREFIX "*", name, op_errno, out);
+-  }
++    local = frame->local;
+ 
+-  if (xdata && (frame->root->pid != GF_CLIENT_PID_GSYNCD)) {
+-    dict_del(xdata, GF_XATTR_SHARD_BLOCK_SIZE);
+-    dict_del(xdata, GF_XATTR_SHARD_FILE_SIZE);
+-  }
++    if (op_ret < 0) {
++        local->op_ret = op_ret;
++        local->op_errno = op_errno;
++        goto err;
++    }
+ 
+-  STACK_WIND_TAIL(frame, FIRST_CHILD(this),
+-                  FIRST_CHILD(this)->fops->fremovexattr, fd, name, xdata);
+-  return 0;
+-out:
+-  shard_common_failure_unwind(GF_FOP_FREMOVEXATTR, frame, -1, op_errno);
+-  return 0;
++    ret = shard_modify_and_set_iatt_in_dict(xdata, local, GF_PRESTAT);
++    if (ret < 0)
++        goto err;
++
++    ret = shard_modify_and_set_iatt_in_dict(xdata, local, GF_POSTSTAT);
++    if (ret < 0)
++        goto err;
++
++    if (local->fd)
++        SHARD_STACK_UNWIND(fremovexattr, frame, local->op_ret, local->op_errno,
++                           xdata);
++    else
++        SHARD_STACK_UNWIND(removexattr, frame, local->op_ret, local->op_errno,
++                           xdata);
++    return 0;
++
++err:
++    shard_common_failure_unwind(local->fop, frame, local->op_ret,
++                                local->op_errno);
++    return 0;
++}
++
++int32_t
++shard_post_lookup_remove_xattr_handler(call_frame_t *frame, xlator_t *this)
++{
++    shard_local_t *local = NULL;
++
++    local = frame->local;
++
++    if (local->op_ret < 0) {
++        shard_common_failure_unwind(local->fop, frame, local->op_ret,
++                                    local->op_errno);
++        return 0;
++    }
++
++    if (local->fd)
++        STACK_WIND(frame, shard_common_remove_xattr_cbk, FIRST_CHILD(this),
++                   FIRST_CHILD(this)->fops->fremovexattr, local->fd,
++                   local->name, local->xattr_req);
++    else
++        STACK_WIND(frame, shard_common_remove_xattr_cbk, FIRST_CHILD(this),
++                   FIRST_CHILD(this)->fops->removexattr, &local->loc,
++                   local->name, local->xattr_req);
++    return 0;
++}
++
++int32_t
++shard_common_remove_xattr(call_frame_t *frame, xlator_t *this,
++                          glusterfs_fop_t fop, loc_t *loc, fd_t *fd,
++                          const char *name, dict_t *xdata)
++{
++    int ret = -1;
++    int op_errno = ENOMEM;
++    uint64_t block_size = 0;
++    shard_local_t *local = NULL;
++    inode_t *inode = loc ? loc->inode : fd->inode;
++
++    if ((IA_ISDIR(inode->ia_type)) || (IA_ISLNK(inode->ia_type))) {
++        if (loc)
++            STACK_WIND_TAIL(frame, FIRST_CHILD(this),
++                            FIRST_CHILD(this)->fops->removexattr, loc, name,
++                            xdata);
++        else
++            STACK_WIND_TAIL(frame, FIRST_CHILD(this),
++                            FIRST_CHILD(this)->fops->fremovexattr, fd, name,
++                            xdata);
++        return 0;
++    }
++
++    /* If shard's special xattrs are attempted to be removed,
++     * fail the fop with EPERM (except if the client is gsyncd).
++     */
++    if (frame->root->pid != GF_CLIENT_PID_GSYNCD) {
++        GF_IF_NATIVE_XATTR_GOTO(SHARD_XATTR_PREFIX "*", name, op_errno, err);
++    }
++
++    /* Repeat the same check for bulk-removexattr */
++    if (xdata && (frame->root->pid != GF_CLIENT_PID_GSYNCD)) {
++        dict_del(xdata, GF_XATTR_SHARD_BLOCK_SIZE);
++        dict_del(xdata, GF_XATTR_SHARD_FILE_SIZE);
++    }
++
++    ret = shard_inode_ctx_get_block_size(inode, this, &block_size);
++    if (ret) {
++        gf_msg(this->name, GF_LOG_ERROR, 0, SHARD_MSG_INODE_CTX_GET_FAILED,
++               "Failed to get block size from inode ctx of %s",
++               uuid_utoa(inode->gfid));
++        goto err;
++    }
++
++    if (!block_size || frame->root->pid == GF_CLIENT_PID_GSYNCD) {
++        if (loc)
++            STACK_WIND_TAIL(frame, FIRST_CHILD(this),
++                            FIRST_CHILD(this)->fops->removexattr, loc, name,
++                            xdata);
++        else
++            STACK_WIND_TAIL(frame, FIRST_CHILD(this),
++                            FIRST_CHILD(this)->fops->fremovexattr, fd, name,
++                            xdata);
++        return 0;
++    }
++
++    local = mem_get0(this->local_pool);
++    if (!local)
++        goto err;
++
++    frame->local = local;
++    local->fop = fop;
++    if (loc) {
++        if (loc_copy(&local->loc, loc) != 0)
++            goto err;
++    }
++
++    if (fd) {
++        local->fd = fd_ref(fd);
++        local->loc.inode = inode_ref(fd->inode);
++        gf_uuid_copy(local->loc.gfid, fd->inode->gfid);
++    }
++
++    if (name) {
++        local->name = gf_strdup(name);
++        if (!local->name)
++            goto err;
++    }
++
++    if (xdata)
++        local->xattr_req = dict_ref(xdata);
++
++    /* To-Do: Switch from LOOKUP which is path-based, to FSTAT if the fop is
++     * on an fd. This comes under a generic class of bugs in shard tracked by
++     * bz #1782428.
++     */
++    shard_lookup_base_file(frame, this, &local->loc,
++                           shard_post_lookup_remove_xattr_handler);
++    return 0;
++err:
++    shard_common_failure_unwind(fop, frame, -1, op_errno);
++    return 0;
++}
++
++int32_t
++shard_removexattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
++                  const char *name, dict_t *xdata)
++{
++    shard_common_remove_xattr(frame, this, GF_FOP_REMOVEXATTR, loc, NULL, name,
++                              xdata);
++    return 0;
++}
++
++int32_t
++shard_fremovexattr(call_frame_t *frame, xlator_t *this, fd_t *fd,
++                   const char *name, dict_t *xdata)
++{
++    shard_common_remove_xattr(frame, this, GF_FOP_FREMOVEXATTR, NULL, fd, name,
++                              xdata);
++    return 0;
+ }
+ 
+ int32_t shard_fgetxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+@@ -5933,10 +6106,6 @@ int32_t shard_common_set_xattr_cbk(call_frame_t *frame, void *cookie,
+                                    xlator_t *this, int32_t op_ret,
+                                    int32_t op_errno, dict_t *xdata) {
+     int ret = -1;
+-    struct iatt *prebuf = NULL;
+-    struct iatt *postbuf = NULL;
+-    struct iatt *stbuf = NULL;
+-    data_t *data = NULL;
+     shard_local_t *local = NULL;
+ 
+     local = frame->local;
+@@ -5947,52 +6116,14 @@ int32_t shard_common_set_xattr_cbk(call_frame_t *frame, void *cookie,
+         goto err;
+     }
+ 
+-    if (!xdata)
+-        goto unwind;
+-
+-    data = dict_get(xdata, GF_PRESTAT);
+-    if (data) {
+-        stbuf = data_to_iatt(data, GF_PRESTAT);
+-        prebuf = GF_MALLOC(sizeof(struct iatt), gf_common_mt_char);
+-        if (prebuf == NULL) {
+-            local->op_ret = -1;
+-            local->op_errno = ENOMEM;
+-            goto err;
+-        }
+-        *prebuf = *stbuf;
+-        prebuf->ia_size = local->prebuf.ia_size;
+-        prebuf->ia_blocks = local->prebuf.ia_blocks;
+-        ret = dict_set_iatt(xdata, GF_PRESTAT, prebuf, false);
+-        if (ret < 0) {
+-            local->op_ret = -1;
+-            local->op_errno = ENOMEM;
+-            goto err;
+-        }
+-        prebuf = NULL;
+-    }
++    ret = shard_modify_and_set_iatt_in_dict(xdata, local, GF_PRESTAT);
++    if (ret < 0)
++        goto err;
+ 
+-    data = dict_get(xdata, GF_POSTSTAT);
+-    if (data) {
+-        stbuf = data_to_iatt(data, GF_POSTSTAT);
+-        postbuf = GF_MALLOC(sizeof(struct iatt), gf_common_mt_char);
+-        if (postbuf == NULL) {
+-            local->op_ret = -1;
+-            local->op_errno = ENOMEM;
+-            goto err;
+-        }
+-        *postbuf = *stbuf;
+-        postbuf->ia_size = local->prebuf.ia_size;
+-        postbuf->ia_blocks = local->prebuf.ia_blocks;
+-        ret = dict_set_iatt(xdata, GF_POSTSTAT, postbuf, false);
+-        if (ret < 0) {
+-            local->op_ret = -1;
+-            local->op_errno = ENOMEM;
+-            goto err;
+-        }
+-        postbuf = NULL;
+-    }
++    ret = shard_modify_and_set_iatt_in_dict(xdata, local, GF_POSTSTAT);
++    if (ret < 0)
++        goto err;
+ 
+-unwind:
+     if (local->fd)
+         SHARD_STACK_UNWIND(fsetxattr, frame, local->op_ret, local->op_errno,
+                            xdata);
+@@ -6002,8 +6133,6 @@ unwind:
+     return 0;
+ 
+ err:
+-    GF_FREE(prebuf);
+-    GF_FREE(postbuf);
+     shard_common_failure_unwind(local->fop, frame, local->op_ret,
+                                 local->op_errno);
+     return 0;
+diff --git a/xlators/features/shard/src/shard.h b/xlators/features/shard/src/shard.h
+index 04abd62..1721417 100644
+--- a/xlators/features/shard/src/shard.h
++++ b/xlators/features/shard/src/shard.h
+@@ -318,6 +318,7 @@ typedef struct shard_local {
+     uint32_t deletion_rate;
+     gf_boolean_t cleanup_required;
+     uuid_t base_gfid;
++    char *name;
+ } shard_local_t;
+ 
+ typedef struct shard_inode_ctx {
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0383-common-ha-ganesha-ha.sh-bad-test-for-rhel-centos-for.patch b/SOURCES/0383-common-ha-ganesha-ha.sh-bad-test-for-rhel-centos-for.patch
new file mode 100644
index 0000000..3adaa65
--- /dev/null
+++ b/SOURCES/0383-common-ha-ganesha-ha.sh-bad-test-for-rhel-centos-for.patch
@@ -0,0 +1,38 @@
+From f880df2ce4706dd748a09d3d6db57d49f62a234c Mon Sep 17 00:00:00 2001
+From: "Kaleb S. KEITHLEY" <kkeithle@redhat.com>
+Date: Thu, 28 May 2020 08:26:47 -0400
+Subject: [PATCH 383/383] common-ha: ganesha-ha.sh bad test for {rhel,centos}
+ for pcs options
+
+bash [[ ... =~ ... ]] built-in returns _0_ when the regex matches,
+not 1, thus the sense of the test is backwards and never correctly
+detects rhel or centos.
+
+https://review.gluster.org/#/c/glusterfs/+/24502/
+
+Change-Id: Ic9e60aae4ea38aff8f13979080995e60621a68fe
+BUG: 1840794
+Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/201686
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ extras/ganesha/scripts/ganesha-ha.sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
+index 4ecf91b..a6814b1 100644
+--- a/extras/ganesha/scripts/ganesha-ha.sh
++++ b/extras/ganesha/scripts/ganesha-ha.sh
+@@ -1054,7 +1054,7 @@ main()
+         # Fedora 29+ and rhel/centos 8 has PCS-0.10.x
+         # default is pcs-0.10.x options but check for
+         # rhel/centos 7 (pcs-0.9.x) and adjust accordingly
+-        if [[ ${ID} =~ {rhel,centos} ]]; then
++        if [[ ! ${ID} =~ {rhel,centos} ]]; then
+             if [[ ${VERSION_ID} == 7.* ]]; then
+                 PCS9OR10_PCS_CNAME_OPTION="--name"
+                 PCS9OR10_PCS_CLONE_OPTION="--clone"
+-- 
+1.8.3.1
+
diff --git a/SPECS/glusterfs.spec b/SPECS/glusterfs.spec
index 50ca112..e97233c 100644
--- a/SPECS/glusterfs.spec
+++ b/SPECS/glusterfs.spec
@@ -79,6 +79,11 @@
 # rpmbuild -ta glusterfs-6.0.tar.gz --without rdma
 %{?_without_rdma:%global _without_rdma --disable-ibverbs}
 
+# No RDMA Support on 32-bit ARM
+%ifarch armv7hl
+%global _without_rdma --disable-ibverbs
+%endif
+
 # server
 # if you wish to build rpms without server components, compile like this
 # rpmbuild -ta glusterfs-6.0.tar.gz --without server
@@ -125,6 +130,12 @@
 ## All %%global definitions should be placed here and keep them sorted
 ##
 
+# selinux booleans whose defalut value needs modification
+# these booleans will be consumed by "%%selinux_set_booleans" macro.
+%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
+%global selinuxbooleans rsync_full_access=1 rsync_client=1
+%endif
+
 %if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
 %global _with_systemd true
 %endif
@@ -226,7 +237,7 @@ Release:          0.1%{?prereltag:.%{prereltag}}%{?dist}
 %else
 Name:             glusterfs
 Version:          6.0
-Release:          29%{?dist}
+Release:          37%{?dist}
 ExcludeArch:      i686
 %endif
 License:          GPLv2 or LGPLv3+
@@ -655,6 +666,38 @@ Patch0348: 0348-glusterfind-Fix-py2-py3-issues.patch
 Patch0349: 0349-glusterfind-python3-compatibility.patch
 Patch0350: 0350-tools-glusterfind-Remove-an-extra-argument.patch
 Patch0351: 0351-server-Mount-fails-after-reboot-1-3-gluster-nodes.patch
+Patch0352: 0352-spec-fixed-missing-dependencies-for-glusterfs-clouds.patch
+Patch0353: 0353-build-glusterfs-ganesha-pkg-requires-python3-policyc.patch
+Patch0354: 0354-core-fix-memory-pool-management-races.patch
+Patch0355: 0355-core-Prevent-crash-on-process-termination.patch
+Patch0356: 0356-Update-rfc.sh-to-rhgs-3.5.1-rhel-8.patch
+Patch0357: 0357-ganesha-ha-updates-for-pcs-0.10.x-i.e.-in-Fedora-29-.patch
+Patch0358: 0358-inode-fix-wrong-loop-count-in-__inode_ctx_free.patch
+Patch0359: 0359-dht-gf_defrag_process_dir-is-called-even-if-gf_defra.patch
+Patch0360: 0360-rpc-Make-ssl-log-more-useful.patch
+Patch0361: 0361-snap_scheduler-python3-compatibility-and-new-test-ca.patch
+Patch0362: 0362-write-behind-fix-data-corruption.patch
+Patch0363: 0363-common-ha-cluster-status-shows-FAILOVER-when-actuall.patch
+Patch0364: 0364-dht-fixing-rebalance-failures-for-files-with-holes.patch
+Patch0365: 0365-build-geo-rep-requires-relevant-selinux-permission-f.patch
+Patch0366: 0366-snapshot-fix-python3-issue-in-gcron.patch
+Patch0367: 0367-dht-Handle-setxattr-and-rm-race-for-directory-in-reb.patch
+Patch0368: 0368-Update-rfc.sh-to-rhgs-3.5.2.patch
+Patch0369: 0369-cluster-ec-Return-correct-error-code-and-log-message.patch
+Patch0370: 0370-dht-Do-opendir-selectively-in-gf_defrag_process_dir.patch
+Patch0371: 0371-common-ha-cluster-status-shows-FAILOVER-when-actuall.patch
+Patch0372: 0372-posix-fix-seek-functionality.patch
+Patch0373: 0373-build-geo-rep-sub-pkg-requires-policycoreutils-pytho.patch
+Patch0374: 0374-open-behind-fix-missing-fd-reference.patch
+Patch0375: 0375-features-shard-Send-correct-size-when-reads-are-sent.patch
+Patch0376: 0376-features-shard-Fix-crash-during-shards-cleanup-in-er.patch
+Patch0377: 0377-syncop-improve-scaling-and-implement-more-tools.patch
+Patch0378: 0378-Revert-open-behind-fix-missing-fd-reference.patch
+Patch0379: 0379-glusterd-add-missing-synccond_broadcast.patch
+Patch0380: 0380-features-shard-Aggregate-size-block-count-in-iatt-be.patch
+Patch0381: 0381-dht-add-null-check-in-gf_defrag_free_dir_dfmeta.patch
+Patch0382: 0382-features-shard-Aggregate-file-size-block-count-befor.patch
+Patch0383: 0383-common-ha-ganesha-ha.sh-bad-test-for-rhel-centos-for.patch
 
 %description
 GlusterFS is a distributed file-system capable of scaling to several
@@ -722,6 +765,7 @@ This package provides the GlusterFS CLI application and its man page
 %package cloudsync-plugins
 Summary:          Cloudsync Plugins
 BuildRequires:    libcurl-devel
+Requires:         glusterfs-libs = %{version}-%{release}
 
 %description cloudsync-plugins
 GlusterFS is a distributed file-system capable of scaling to several
@@ -809,6 +853,7 @@ Summary:          NFS-Ganesha configuration
 Group:            Applications/File
 
 Requires:         %{name}-server%{?_isa} = %{version}-%{release}
+Requires:         nfs-ganesha-selinux >= 2.7.3
 Requires:         nfs-ganesha-gluster >= 2.7.3
 Requires:         pcs, dbus
 %if ( 0%{?rhel} && 0%{?rhel} == 6 )
@@ -822,7 +867,7 @@ Requires:         net-tools
 %endif
 
 %if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
-%if ( 0%{?rhel} )
+%if ( 0%{?rhel} && 0%{?rhel} < 8 )
 Requires: selinux-policy >= 3.13.1-160
 Requires(post):   policycoreutils-python
 Requires(postun): policycoreutils-python
@@ -861,6 +906,14 @@ Requires:         python%{_pythonver}-gluster = %{version}-%{release}
 Requires:         rsync
 Requires:         util-linux
 Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
+# required for setting selinux bools
+%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
+Requires(post):      policycoreutils-python-utils
+Requires(postun):    policycoreutils-python-utils
+Requires:            selinux-policy-targeted
+Requires(post):      selinux-policy-targeted
+BuildRequires:       selinux-policy-devel
+%endif
 
 %description geo-replication
 GlusterFS is a distributed file-system capable of scaling to several
@@ -1360,6 +1413,9 @@ exit 0
 
 %if ( 0%{!?_without_georeplication:1} )
 %post geo-replication
+%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
+%selinux_set_booleans %{selinuxbooleans}
+%endif
 if [ $1 -ge 1 ]; then
     %systemd_postun_with_restart glusterd
 fi
@@ -2382,8 +2438,32 @@ fi
 %endif
 
 %changelog
-* Tue Mar 31 2020 CentOS Sources <bugs@centos.org> - 6.0-29.el7.centos
-- remove vendor and/or packager lines
+* Fri May 29 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-37
+- fixes bugs bz#1840794
+
+* Wed May 27 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-36
+- fixes bugs bz#1812789 bz#1823423
+
+* Fri May 22 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-35
+- fixes bugs bz#1810516 bz#1830713 bz#1836233
+
+* Sun May 17 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-34
+- fixes bugs bz#1802013 bz#1823706 bz#1825177 bz#1830713 bz#1831403 bz#1833017
+
+* Wed Apr 29 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-33
+- fixes bugs bz#1812789 bz#1813917 bz#1823703 bz#1823706 bz#1825195
+
+* Sat Apr 04 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-32
+- fixes bugs bz#1781543 bz#1812789 bz#1812824 bz#1817369 bz#1819059
+
+* Tue Mar 17 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-31
+- fixes bugs bz#1802727
+
+* Thu Feb 20 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-30.1
+- fixes bugs bz#1800703
+
+* Sat Feb 01 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-30
+- fixes bugs bz#1775564 bz#1794153
 
 * Thu Jan 23 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-29
 - fixes bugs bz#1793035