From 74096ce252316894332f548e95ce2cc8a1063b5c Mon Sep 17 00:00:00 2001
From: CentOS Sources <bugs@centos.org>
Date: Oct 06 2021 14:56:09 +0000
Subject: import glusterfs-6.0-56.4.el8


---

diff --git a/README.debrand b/README.debrand
deleted file mode 100644
index 01c46d2..0000000
--- a/README.debrand
+++ /dev/null
@@ -1,2 +0,0 @@
-Warning: This package was configured for automatic debranding, but the changes
-failed to apply.
diff --git a/SOURCES/0384-Update-rfc.sh-to-rhgs-3.5.3.patch b/SOURCES/0384-Update-rfc.sh-to-rhgs-3.5.3.patch
new file mode 100644
index 0000000..4db2222
--- /dev/null
+++ b/SOURCES/0384-Update-rfc.sh-to-rhgs-3.5.3.patch
@@ -0,0 +1,26 @@
+From 27dc773af276e33fcca10788fae17d131c8d9bce Mon Sep 17 00:00:00 2001
+From: Rinku Kothiya <rkothiya@redhat.com>
+Date: Sun, 31 May 2020 15:46:24 -0400
+Subject: [PATCH 384/449] Update rfc.sh to rhgs-3.5.3
+
+Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
+---
+ rfc.sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/rfc.sh b/rfc.sh
+index 37d551f..1dca29f 100755
+--- a/rfc.sh
++++ b/rfc.sh
+@@ -18,7 +18,7 @@ done
+ shift $((OPTIND-1))
+ 
+ 
+-branch="rhgs-3.5.2";
++branch="rhgs-3.5.3";
+ 
+ set_hooks_commit_msg()
+ {
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0384-extras-Modify-group-virt-to-include-network-related-.patch b/SOURCES/0384-extras-Modify-group-virt-to-include-network-related-.patch
deleted file mode 100644
index 45684e1..0000000
--- a/SOURCES/0384-extras-Modify-group-virt-to-include-network-related-.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-From 2948ee521316d40384130138233178ba940b175f Mon Sep 17 00:00:00 2001
-From: Krutika Dhananjay <kdhananj@redhat.com>
-Date: Mon, 4 May 2020 14:30:57 +0530
-Subject: [PATCH 384/392] extras: Modify group 'virt' to include
- network-related options
-
-This is needed to work around an issue seen where vms running on
-online hosts are getting killed when a different host is rebooted
-in ovirt-gluster hyperconverged environments. Actual RCA is quite
-lengthy and documented in the github issue. Please refer to it
-for more details.
-
-Upstream patch:
-> Upstream patch link: https://review.gluster.org/c/glusterfs/+/24400
-> Change-Id: Ic25b5f50144ad42458e5c847e1e7e191032396c1
-> Fixes: #1217
-> Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
-
-Change-Id: Ic25b5f50144ad42458e5c847e1e7e191032396c1
-BUG: 1848899
-Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
-Reviewed-on: https://code.engineering.redhat.com/gerrit/203685
-Tested-by: RHGS Build Bot <nigelb@redhat.com>
-Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
----
- extras/group-virt.example | 5 +++++
- 1 file changed, 5 insertions(+)
-
-diff --git a/extras/group-virt.example b/extras/group-virt.example
-index c2ce89d..3a441eb 100644
---- a/extras/group-virt.example
-+++ b/extras/group-virt.example
-@@ -16,3 +16,8 @@ cluster.choose-local=off
- client.event-threads=4
- server.event-threads=4
- performance.client-io-threads=on
-+network.ping-timeout=20
-+server.tcp-user-timeout=20
-+server.keepalive-time=10
-+server.keepalive-interval=2
-+server.keepalive-count=5
--- 
-1.8.3.1
-
diff --git a/SOURCES/0385-cluster-afr-Prioritize-ENOSPC-over-other-errors.patch b/SOURCES/0385-cluster-afr-Prioritize-ENOSPC-over-other-errors.patch
deleted file mode 100644
index 5572e7f..0000000
--- a/SOURCES/0385-cluster-afr-Prioritize-ENOSPC-over-other-errors.patch
+++ /dev/null
@@ -1,237 +0,0 @@
-From cdd067dcc0cd70d4f57e173b4050d8e2eb79725a Mon Sep 17 00:00:00 2001
-From: karthik-us <ksubrahm@redhat.com>
-Date: Fri, 5 Jun 2020 17:20:04 +0530
-Subject: [PATCH 385/392] cluster/afr: Prioritize ENOSPC over other errors
-
-Backport of: https://review.gluster.org/#/c/glusterfs/+/24477/
-
-Problem:
-In a replicate/arbiter volume if file creations or writes fails on
-quorum number of bricks and on one brick it is due to ENOSPC and
-on other brick it fails for a different reason, it may fail with
-errors other than ENOSPC in some cases.
-
-Fix:
-Prioritize ENOSPC over other lesser priority errors and do not set
-op_errno in posix_gfid_set if op_ret is 0 to avoid receiving any
-error_no which can be misinterpreted by __afr_dir_write_finalize().
-
-Also removing the function afr_has_arbiter_fop_cbk_quorum() which
-might consider a successful reply form a single brick as quorum
-success in some cases, whereas we always need fop to be successful
-on quorum number of bricks in arbiter configuration.
-
-Change-Id: I4dd2bff17e6812bc7c8372130976e365e2407d88
-Signed-off-by: karthik-us <ksubrahm@redhat.com>
-BUG: 1848895
-(cherry picked from commit 8b11ac1575ef167af2a47a96f7b7ed0f32bb5897)
-Reviewed-on: https://code.engineering.redhat.com/gerrit/203691
-Tested-by: RHGS Build Bot <nigelb@redhat.com>
-Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
----
- .../bugs/replicate/issue-1254-prioritize-enospc.t  | 80 ++++++++++++++++++++++
- xlators/cluster/afr/src/afr-common.c               |  4 +-
- xlators/cluster/afr/src/afr-transaction.c          | 48 +------------
- xlators/storage/posix/src/posix-helpers.c          |  2 +-
- 4 files changed, 86 insertions(+), 48 deletions(-)
- create mode 100644 tests/bugs/replicate/issue-1254-prioritize-enospc.t
-
-diff --git a/tests/bugs/replicate/issue-1254-prioritize-enospc.t b/tests/bugs/replicate/issue-1254-prioritize-enospc.t
-new file mode 100644
-index 0000000..fab94b7
---- /dev/null
-+++ b/tests/bugs/replicate/issue-1254-prioritize-enospc.t
-@@ -0,0 +1,80 @@
-+#!/bin/bash
-+
-+. $(dirname $0)/../../include.rc
-+. $(dirname $0)/../../volume.rc
-+
-+cleanup
-+
-+function create_bricks {
-+    TEST truncate -s 100M $B0/brick0
-+    TEST truncate -s 100M $B0/brick1
-+    TEST truncate -s 20M $B0/brick2
-+    LO1=`SETUP_LOOP $B0/brick0`
-+    TEST [ $? -eq 0 ]
-+    TEST MKFS_LOOP $LO1
-+    LO2=`SETUP_LOOP $B0/brick1`
-+    TEST [ $? -eq 0 ]
-+    TEST MKFS_LOOP $LO2
-+    LO3=`SETUP_LOOP $B0/brick2`
-+    TEST [ $? -eq 0 ]
-+    TEST MKFS_LOOP $LO3
-+    TEST mkdir -p $B0/${V0}0 $B0/${V0}1 $B0/${V0}2
-+    TEST MOUNT_LOOP $LO1 $B0/${V0}0
-+    TEST MOUNT_LOOP $LO2 $B0/${V0}1
-+    TEST MOUNT_LOOP $LO3 $B0/${V0}2
-+}
-+
-+function create_files {
-+        local i=1
-+        while (true)
-+        do
-+                touch $M0/file$i
-+                if [ -e $B0/${V0}2/file$i ];
-+                then
-+                        ((i++))
-+                else
-+                        break
-+                fi
-+        done
-+}
-+
-+TESTS_EXPECTED_IN_LOOP=13
-+
-+#Arbiter volume: Check for ENOSPC when arbiter brick becomes full#
-+TEST glusterd
-+create_bricks
-+TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2}
-+TEST $CLI volume start $V0
-+TEST $CLI volume set $V0 performance.write-behind off
-+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
-+
-+create_files
-+TEST kill_brick $V0 $H0 $B0/${V0}1
-+error1=$(touch $M0/file-1 2>&1)
-+EXPECT "No space left on device" echo $error1
-+error2=$(mkdir $M0/dir-1 2>&1)
-+EXPECT "No space left on device" echo $error2
-+error3=$((echo "Test" > $M0/file-3) 2>&1)
-+EXPECT "No space left on device" echo $error3
-+
-+cleanup
-+
-+#Replica-3 volume: Check for ENOSPC when one of the brick becomes full#
-+#Keeping the third brick of lower size to simulate disk full scenario#
-+TEST glusterd
-+create_bricks
-+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
-+TEST $CLI volume start $V0
-+TEST $CLI volume set $V0 performance.write-behind off
-+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
-+
-+create_files
-+TEST kill_brick $V0 $H0 $B0/${V0}1
-+error1=$(touch $M0/file-1 2>&1)
-+EXPECT "No space left on device" echo $error1
-+error2=$(mkdir $M0/dir-1 2>&1)
-+EXPECT "No space left on device" echo $error2
-+error3=$((cat /dev/zero > $M0/file1) 2>&1)
-+EXPECT "No space left on device" echo $error3
-+
-+cleanup
-diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
-index 3690b84..d6b70e9 100644
---- a/xlators/cluster/afr/src/afr-common.c
-+++ b/xlators/cluster/afr/src/afr-common.c
-@@ -2463,7 +2463,7 @@ error:
-  * others in that they must be given higher priority while
-  * returning to the user.
-  *
-- * The hierarchy is ENODATA > ENOENT > ESTALE > others
-+ * The hierarchy is ENODATA > ENOENT > ESTALE > ENOSPC others
-  */
- 
- int
-@@ -2475,6 +2475,8 @@ afr_higher_errno(int32_t old_errno, int32_t new_errno)
-         return ENOENT;
-     if (old_errno == ESTALE || new_errno == ESTALE)
-         return ESTALE;
-+    if (old_errno == ENOSPC || new_errno == ENOSPC)
-+        return ENOSPC;
- 
-     return new_errno;
- }
-diff --git a/xlators/cluster/afr/src/afr-transaction.c b/xlators/cluster/afr/src/afr-transaction.c
-index 15f3a7e..8e65ae2 100644
---- a/xlators/cluster/afr/src/afr-transaction.c
-+++ b/xlators/cluster/afr/src/afr-transaction.c
-@@ -514,42 +514,6 @@ afr_compute_pre_op_sources(call_frame_t *frame, xlator_t *this)
-                 local->transaction.pre_op_sources[j] = 0;
- }
- 
--gf_boolean_t
--afr_has_arbiter_fop_cbk_quorum(call_frame_t *frame)
--{
--    afr_local_t *local = NULL;
--    afr_private_t *priv = NULL;
--    xlator_t *this = NULL;
--    gf_boolean_t fop_failed = _gf_false;
--    unsigned char *pre_op_sources = NULL;
--    int i = 0;
--
--    local = frame->local;
--    this = frame->this;
--    priv = this->private;
--    pre_op_sources = local->transaction.pre_op_sources;
--
--    /* If the fop failed on the brick, it is not a source. */
--    for (i = 0; i < priv->child_count; i++)
--        if (local->transaction.failed_subvols[i])
--            pre_op_sources[i] = 0;
--
--    switch (AFR_COUNT(pre_op_sources, priv->child_count)) {
--        case 1:
--            if (pre_op_sources[ARBITER_BRICK_INDEX])
--                fop_failed = _gf_true;
--            break;
--        case 0:
--            fop_failed = _gf_true;
--            break;
--    }
--
--    if (fop_failed)
--        return _gf_false;
--
--    return _gf_true;
--}
--
- void
- afr_txn_arbitrate_fop(call_frame_t *frame, xlator_t *this)
- {
-@@ -968,12 +932,8 @@ afr_need_dirty_marking(call_frame_t *frame, xlator_t *this)
-         priv->child_count)
-         return _gf_false;
- 
--    if (priv->arbiter_count) {
--        if (!afr_has_arbiter_fop_cbk_quorum(frame))
--            need_dirty = _gf_true;
--    } else if (!afr_has_fop_cbk_quorum(frame)) {
-+    if (!afr_has_fop_cbk_quorum(frame))
-         need_dirty = _gf_true;
--    }
- 
-     return need_dirty;
- }
-@@ -1023,12 +983,8 @@ afr_handle_quorum(call_frame_t *frame, xlator_t *this)
-      * no split-brain with the fix. The problem is eliminated completely.
-      */
- 
--    if (priv->arbiter_count) {
--        if (afr_has_arbiter_fop_cbk_quorum(frame))
--            return;
--    } else if (afr_has_fop_cbk_quorum(frame)) {
-+    if (afr_has_fop_cbk_quorum(frame))
-         return;
--    }
- 
-     if (afr_need_dirty_marking(frame, this))
-         goto set_response;
-diff --git a/xlators/storage/posix/src/posix-helpers.c b/xlators/storage/posix/src/posix-helpers.c
-index 35dd3b6..aca0df6 100644
---- a/xlators/storage/posix/src/posix-helpers.c
-+++ b/xlators/storage/posix/src/posix-helpers.c
-@@ -1059,7 +1059,7 @@ verify_handle:
-         ret = posix_handle_soft(this, path, loc, uuid_curr, &stat);
- 
- out:
--    if (!(*op_errno))
-+    if (ret && !(*op_errno))
-         *op_errno = errno;
-     return ret;
- }
--- 
-1.8.3.1
-
diff --git a/SOURCES/0385-glusterd-start-glusterd-automatically-on-abnormal-sh.patch b/SOURCES/0385-glusterd-start-glusterd-automatically-on-abnormal-sh.patch
new file mode 100644
index 0000000..2b194d3
--- /dev/null
+++ b/SOURCES/0385-glusterd-start-glusterd-automatically-on-abnormal-sh.patch
@@ -0,0 +1,50 @@
+From 143f85f55ded7a9075408e97d05abd9568d56e7b Mon Sep 17 00:00:00 2001
+From: Sanju Rakonde <srakonde@redhat.com>
+Date: Mon, 25 Nov 2019 16:35:42 +0530
+Subject: [PATCH 385/449] glusterd: start glusterd automatically on abnormal
+ shutdown
+
+If glusterd crashes or goes down abnormally, systemd should
+automatically bring the glusterd up.
+
+With this change, systemd brings glusterd up for atmost 3 times
+within time period of 1 hour. If the limit exceeds, we have to
+start the glusterd manually and reset the failure count using
+systemctl reset-failed.
+
+credits: John Strunk <jstrunk@redhat.com>
+
+> upstream patch link: https://review.gluster.org/#/c/glusterfs/+/23751/
+> fixes: bz#1776264
+> Change-Id: I312d243652fb13ba028814a2ea615b67e3b10b6a
+> Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+
+BUG: 1663557
+Change-Id: I312d243652fb13ba028814a2ea615b67e3b10b6a
+Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202251
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ extras/systemd/glusterd.service.in | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/extras/systemd/glusterd.service.in b/extras/systemd/glusterd.service.in
+index f604160..b944762 100644
+--- a/extras/systemd/glusterd.service.in
++++ b/extras/systemd/glusterd.service.in
+@@ -15,6 +15,11 @@ ExecStart=@prefix@/sbin/glusterd -p @localstatedir@/run/glusterd.pid  --log-leve
+ KillMode=process
+ TimeoutSec=300
+ SuccessExitStatus=15
++Restart=on-abnormal
++RestartSec=60
++StartLimitBurst=3
++StartLimitIntervalSec=3600
++StartLimitInterval=3600
+ 
+ [Install]
+ WantedBy=multi-user.target
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0386-afr-prevent-spurious-entry-heals-leading-to-gfid-spl.patch b/SOURCES/0386-afr-prevent-spurious-entry-heals-leading-to-gfid-spl.patch
deleted file mode 100644
index 9ba5451..0000000
--- a/SOURCES/0386-afr-prevent-spurious-entry-heals-leading-to-gfid-spl.patch
+++ /dev/null
@@ -1,251 +0,0 @@
-From 7689fbb4be83f0e0657ec2729c4d66ed341b5751 Mon Sep 17 00:00:00 2001
-From: Ravishankar N <ravishankar@redhat.com>
-Date: Tue, 11 Feb 2020 14:34:48 +0530
-Subject: [PATCH 386/392] afr: prevent spurious entry heals leading to gfid
- split-brain
-
-Problem:
-In a hyperconverged setup with granular-entry-heal enabled, if a file is
-recreated while one of the bricks is down, and an index heal is triggered
-(with the brick still down), entry-self heal was doing a spurious heal
-with just the 2 good bricks. It was doing a post-op leading to removal
-of the filename from .glusterfs/indices/entry-changes as well as
-erroneous setting of afr xattrs on the parent. When the brick came up,
-the xattrs were cleared, resulting in the renamed file not getting
-healed and leading to gfid split-brain and EIO on the mount.
-
-Fix:
-Proceed with entry heal only when shd can connect to all bricks of the replica,
-just like in data and metadata heal.
-
-BUG: 1848893
-
-> Upstream patch:https://review.gluster.org/#/c/glusterfs/+/24109/
-> fixes: bz#1801624
-> Change-Id: I916ae26ad1fabf259bc6362da52d433b7223b17e
-> Signed-off-by: Ravishankar N <ravishankar@redhat.com>
-
-Change-Id: I23f57e543cff1e3f35eb8dbc60a2babfae6838c7
-Signed-off-by: Ravishankar N <ravishankar@redhat.com>
-(cherry picked from commit 2b2eb846c49caba13ab92ec66af20292e7780fc1)
-Reviewed-on: https://code.engineering.redhat.com/gerrit/203692
-Tested-by: RHGS Build Bot <nigelb@redhat.com>
-Tested-by: Karthik Subrahmanya <ksubrahm@redhat.com>
-Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
----
- .../bug-1433571-undo-pending-only-on-up-bricks.t   | 18 ++-----
- tests/bugs/replicate/bug-1801624-entry-heal.t      | 58 ++++++++++++++++++++++
- xlators/cluster/afr/src/afr-common.c               |  4 +-
- xlators/cluster/afr/src/afr-self-heal-common.c     |  8 +--
- xlators/cluster/afr/src/afr-self-heal-entry.c      |  6 +--
- xlators/cluster/afr/src/afr-self-heal-name.c       |  2 +-
- xlators/cluster/afr/src/afr-self-heal.h            |  2 -
- 7 files changed, 69 insertions(+), 29 deletions(-)
- create mode 100644 tests/bugs/replicate/bug-1801624-entry-heal.t
-
-diff --git a/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t b/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t
-index 0767f47..10ce013 100644
---- a/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t
-+++ b/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t
-@@ -49,25 +49,15 @@ TEST $CLI volume start $V0 force
- EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
- EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
- 
--#Kill brick 0 and turn on the client side heal and do ls to trigger the heal.
--#The pending xattrs on bricks 1 & 2 should have pending entry on brick 0.
--TEST kill_brick $V0 $H0 $B0/${V0}0
-+# We were killing one brick and checking that entry heal does not reset the
-+# pending xattrs for the down brick. Now that we need all bricks to be up for
-+# entry heal, I'm removing that test from the .t
-+
- TEST $CLI volume set $V0 cluster.data-self-heal on
- TEST $CLI volume set $V0 cluster.metadata-self-heal on
- TEST $CLI volume set $V0 cluster.entry-self-heal on
- 
- TEST ls $M0
--EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1
--EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2
--EXPECT_WITHIN $HEAL_TIMEOUT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
--EXPECT_WITHIN $HEAL_TIMEOUT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2
--
--#Bring back all the bricks and trigger the heal again by doing ls. Now the
--#pending xattrs on all the bricks should be 0.
--TEST $CLI volume start $V0 force
--EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
--TEST ls $M0
--
- TEST cat $M0/f1
- TEST cat $M0/f2
- TEST cat $M0/f3
-diff --git a/tests/bugs/replicate/bug-1801624-entry-heal.t b/tests/bugs/replicate/bug-1801624-entry-heal.t
-new file mode 100644
-index 0000000..94b4651
---- /dev/null
-+++ b/tests/bugs/replicate/bug-1801624-entry-heal.t
-@@ -0,0 +1,58 @@
-+#!/bin/bash
-+
-+. $(dirname $0)/../../include.rc
-+. $(dirname $0)/../../volume.rc
-+cleanup;
-+
-+TEST glusterd
-+TEST pidof glusterd
-+TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0,1,2}
-+TEST $CLI volume set $V0 heal-timeout 5
-+TEST $CLI volume start $V0
-+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
-+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
-+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
-+TEST $CLI volume heal $V0 granular-entry-heal enable
-+
-+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
-+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
-+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
-+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
-+echo "Data">$M0/FILE
-+ret=$?
-+TEST [ $ret -eq 0 ]
-+
-+# Re-create the file when a brick is down.
-+TEST kill_brick $V0 $H0 $B0/brick1
-+TEST rm $M0/FILE
-+echo "New Data">$M0/FILE
-+ret=$?
-+TEST [ $ret -eq 0 ]
-+EXPECT_WITHIN $HEAL_TIMEOUT "4" get_pending_heal_count $V0
-+
-+# Launching index heal must not reset parent dir afr xattrs or remove granular entry indices.
-+$CLI volume heal $V0 # CLI will fail but heal is launched anyway.
-+TEST sleep 5 # give index heal a chance to do one run.
-+brick0_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/brick0/)
-+brick2_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/brick2/)
-+TEST [ $brick0_pending -eq "000000000000000000000002" ]
-+TEST [ $brick2_pending -eq "000000000000000000000002" ]
-+EXPECT "FILE" ls $B0/brick0/.glusterfs/indices/entry-changes/00000000-0000-0000-0000-000000000001/
-+EXPECT "FILE" ls $B0/brick2/.glusterfs/indices/entry-changes/00000000-0000-0000-0000-000000000001/
-+
-+TEST $CLI volume start $V0 force
-+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick1
-+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
-+$CLI volume heal $V0
-+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
-+
-+# No gfid-split-brain (i.e. EIO) must be seen. Try on fresh mount to avoid cached values.
-+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
-+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
-+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
-+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
-+TEST cat $M0/FILE
-+
-+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-+cleanup;
-diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
-index d6b70e9..939246e 100644
---- a/xlators/cluster/afr/src/afr-common.c
-+++ b/xlators/cluster/afr/src/afr-common.c
-@@ -6632,7 +6632,7 @@ afr_fav_child_reset_sink_xattrs(void *opaque)
-         ret = afr_selfheal_inodelk(heal_frame, this, inode, this->name, 0, 0,
-                                    locked_on);
-         {
--            if (ret < AFR_SH_MIN_PARTICIPANTS)
-+            if (ret < priv->child_count)
-                 goto data_unlock;
-             ret = __afr_selfheal_data_prepare(
-                 heal_frame, this, inode, locked_on, sources, sinks,
-@@ -6649,7 +6649,7 @@ afr_fav_child_reset_sink_xattrs(void *opaque)
-         ret = afr_selfheal_inodelk(heal_frame, this, inode, this->name,
-                                    LLONG_MAX - 1, 0, locked_on);
-         {
--            if (ret < AFR_SH_MIN_PARTICIPANTS)
-+            if (ret < priv->child_count)
-                 goto mdata_unlock;
-             ret = __afr_selfheal_metadata_prepare(
-                 heal_frame, this, inode, locked_on, sources, sinks,
-diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c
-index 81ef38a..ce1ea50 100644
---- a/xlators/cluster/afr/src/afr-self-heal-common.c
-+++ b/xlators/cluster/afr/src/afr-self-heal-common.c
-@@ -1575,7 +1575,6 @@ afr_selfheal_find_direction(call_frame_t *frame, xlator_t *this,
-     char *accused = NULL;      /* Accused others without any self-accusal */
-     char *pending = NULL;      /* Have pending operations on others */
-     char *self_accused = NULL; /* Accused itself */
--    int min_participants = -1;
- 
-     priv = this->private;
- 
-@@ -1599,12 +1598,7 @@ afr_selfheal_find_direction(call_frame_t *frame, xlator_t *this,
-         }
-     }
- 
--    if (type == AFR_DATA_TRANSACTION || type == AFR_METADATA_TRANSACTION) {
--        min_participants = priv->child_count;
--    } else {
--        min_participants = AFR_SH_MIN_PARTICIPANTS;
--    }
--    if (afr_success_count(replies, priv->child_count) < min_participants) {
-+    if (afr_success_count(replies, priv->child_count) < priv->child_count) {
-         /* Treat this just like locks not being acquired */
-         return -ENOTCONN;
-     }
-diff --git a/xlators/cluster/afr/src/afr-self-heal-entry.c b/xlators/cluster/afr/src/afr-self-heal-entry.c
-index 3ce882e..40be898 100644
---- a/xlators/cluster/afr/src/afr-self-heal-entry.c
-+++ b/xlators/cluster/afr/src/afr-self-heal-entry.c
-@@ -597,7 +597,7 @@ afr_selfheal_entry_dirent(call_frame_t *frame, xlator_t *this, fd_t *fd,
-     ret = afr_selfheal_entrylk(frame, this, fd->inode, this->name, NULL,
-                                locked_on);
-     {
--        if (ret < AFR_SH_MIN_PARTICIPANTS) {
-+        if (ret < priv->child_count) {
-             gf_msg_debug(this->name, 0,
-                          "%s: Skipping "
-                          "entry self-heal as only %d sub-volumes "
-@@ -991,7 +991,7 @@ __afr_selfheal_entry(call_frame_t *frame, xlator_t *this, fd_t *fd,
-     ret = afr_selfheal_entrylk(frame, this, fd->inode, this->name, NULL,
-                                data_lock);
-     {
--        if (ret < AFR_SH_MIN_PARTICIPANTS) {
-+        if (ret < priv->child_count) {
-             gf_msg_debug(this->name, 0,
-                          "%s: Skipping "
-                          "entry self-heal as only %d sub-volumes could "
-@@ -1115,7 +1115,7 @@ afr_selfheal_entry(call_frame_t *frame, xlator_t *this, inode_t *inode)
-     ret = afr_selfheal_tie_breaker_entrylk(frame, this, inode, priv->sh_domain,
-                                            NULL, locked_on);
-     {
--        if (ret < AFR_SH_MIN_PARTICIPANTS) {
-+        if (ret < priv->child_count) {
-             gf_msg_debug(this->name, 0,
-                          "%s: Skipping "
-                          "entry self-heal as only %d sub-volumes could "
-diff --git a/xlators/cluster/afr/src/afr-self-heal-name.c b/xlators/cluster/afr/src/afr-self-heal-name.c
-index 36640b5..7d4f208 100644
---- a/xlators/cluster/afr/src/afr-self-heal-name.c
-+++ b/xlators/cluster/afr/src/afr-self-heal-name.c
-@@ -514,7 +514,7 @@ afr_selfheal_name_do(call_frame_t *frame, xlator_t *this, inode_t *parent,
-     ret = afr_selfheal_entrylk(frame, this, parent, this->name, bname,
-                                locked_on);
-     {
--        if (ret < AFR_SH_MIN_PARTICIPANTS) {
-+        if (ret < priv->child_count) {
-             ret = -ENOTCONN;
-             goto unlock;
-         }
-diff --git a/xlators/cluster/afr/src/afr-self-heal.h b/xlators/cluster/afr/src/afr-self-heal.h
-index 6555ec5..8234cec 100644
---- a/xlators/cluster/afr/src/afr-self-heal.h
-+++ b/xlators/cluster/afr/src/afr-self-heal.h
-@@ -11,8 +11,6 @@
- #ifndef _AFR_SELFHEAL_H
- #define _AFR_SELFHEAL_H
- 
--#define AFR_SH_MIN_PARTICIPANTS 2
--
- /* Perform fop on all UP subvolumes and wait for all callbacks to return */
- 
- #define AFR_ONALL(frame, rfn, fop, args...)                                    \
--- 
-1.8.3.1
-
diff --git a/SOURCES/0386-glusterd-increase-the-StartLimitBurst.patch b/SOURCES/0386-glusterd-increase-the-StartLimitBurst.patch
new file mode 100644
index 0000000..ff6d0f9
--- /dev/null
+++ b/SOURCES/0386-glusterd-increase-the-StartLimitBurst.patch
@@ -0,0 +1,39 @@
+From 02e7afdfb740db7cfa1a2f0f79933172d172ff27 Mon Sep 17 00:00:00 2001
+From: Sanju Rakonde <srakonde@redhat.com>
+Date: Tue, 7 Jan 2020 15:32:13 +0530
+Subject: [PATCH 386/449] glusterd: increase the StartLimitBurst
+
+Based on https://bugzilla.redhat.com/show_bug.cgi?id=1782200#c6
+increasing the limit.
+
+> upstream patch link: https://review.gluster.org/#/c/glusterfs/+/23970/
+> fixes: bz#1782200
+> Change-Id: Ia885c7bdb2a90f0946c5268da894f6a4da5a69b7
+> Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+
+BUG: 1663557
+Change-Id: Ia885c7bdb2a90f0946c5268da894f6a4da5a69b7
+Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202252
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ extras/systemd/glusterd.service.in | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/extras/systemd/glusterd.service.in b/extras/systemd/glusterd.service.in
+index b944762..699aea3 100644
+--- a/extras/systemd/glusterd.service.in
++++ b/extras/systemd/glusterd.service.in
+@@ -17,7 +17,7 @@ TimeoutSec=300
+ SuccessExitStatus=15
+ Restart=on-abnormal
+ RestartSec=60
+-StartLimitBurst=3
++StartLimitBurst=6
+ StartLimitIntervalSec=3600
+ StartLimitInterval=3600
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0387-To-fix-readdir-ahead-memory-leak.patch b/SOURCES/0387-To-fix-readdir-ahead-memory-leak.patch
new file mode 100644
index 0000000..b685215
--- /dev/null
+++ b/SOURCES/0387-To-fix-readdir-ahead-memory-leak.patch
@@ -0,0 +1,47 @@
+From d54f087a2484695ff7ac214d39f2750fddcef2d5 Mon Sep 17 00:00:00 2001
+From: HuangShujun <549702281@qq.com>
+Date: Thu, 5 Dec 2019 10:07:10 +0200
+Subject: [PATCH 387/449] To fix readdir-ahead memory leak
+
+Glusterfs client process has memory leak if create several files under
+one folder, and delete the folder. According to statedump, the ref
+counts of readdir-ahead is bigger than zero in the inode table.
+
+Readdir-ahead get parent inode by inode_parent in rda_mark_inode_dirty
+when each rda_writev_cbk,the inode ref count of parent folder will be
+increased in inode_parent, but readdir-ahead do not unref it later.
+
+The correction is unref the parent inode at the end of
+rda_mark_inode_dirty.
+
+Upstream patch:
+> Upstream patch link: https://review.gluster.org/c/glusterfs/+/23815
+> Fixes: bz#1779055
+> Signed-off-by: HuangShujun <549702281@qq.com>
+> Change-Id: Iee68ab1089cbc2fbc4185b93720fb1f66ee89524
+
+BUG: 1781550
+Change-Id: Iee68ab1089cbc2fbc4185b93720fb1f66ee89524
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202312
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/performance/readdir-ahead/src/readdir-ahead.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/xlators/performance/readdir-ahead/src/readdir-ahead.c b/xlators/performance/readdir-ahead/src/readdir-ahead.c
+index 7fd4f8d..933941d 100644
+--- a/xlators/performance/readdir-ahead/src/readdir-ahead.c
++++ b/xlators/performance/readdir-ahead/src/readdir-ahead.c
+@@ -254,6 +254,7 @@ rda_mark_inode_dirty(xlator_t *this, inode_t *inode)
+             }
+         }
+         UNLOCK(&parent->lock);
++        inode_unref(parent);
+     }
+ 
+     return;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0387-cluster-afr-Delay-post-op-for-fsync.patch b/SOURCES/0387-cluster-afr-Delay-post-op-for-fsync.patch
deleted file mode 100644
index 3913c14..0000000
--- a/SOURCES/0387-cluster-afr-Delay-post-op-for-fsync.patch
+++ /dev/null
@@ -1,440 +0,0 @@
-From 399fad1ac0f9273483270e8af06a5b2d28927533 Mon Sep 17 00:00:00 2001
-From: Pranith Kumar K <pkarampu@redhat.com>
-Date: Fri, 29 May 2020 14:24:53 +0530
-Subject: [PATCH 387/392] cluster/afr: Delay post-op for fsync
-
-Problem:
-AFR doesn't delay post-op for fsync fop. For fsync heavy workloads
-this leads to un-necessary fxattrop/finodelk for every fsync leading
-to bad performance.
-
-Fix:
-Have delayed post-op for fsync. Add special flag in xdata to indicate
-that afr shouldn't delay post-op in cases where either the
-process will terminate or graph-switch would happen. Otherwise it leads
-to un-necessary heals when the graph-switch/process-termination
-happens before delayed-post-op completes.
-
-> Upstream-patch: https://review.gluster.org/c/glusterfs/+/24473
-> Fixes: #1253
-
-BUG: 1848896
-Change-Id: I531940d13269a111c49e0510d49514dc169f4577
-Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
-(cherry picked from commit 3ed98fc9dcb39223032e343fd5b0ad17fa3cae14)
-Reviewed-on: https://code.engineering.redhat.com/gerrit/203694
-Tested-by: RHGS Build Bot <nigelb@redhat.com>
-Tested-by: Karthik Subrahmanya <ksubrahm@redhat.com>
-Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
----
- api/src/glfs-resolve.c                         |  14 ++-
- tests/basic/afr/durability-off.t               |   2 +
- tests/basic/gfapi/gfapi-graph-switch-open-fd.t |  44 +++++++++
- tests/basic/gfapi/gfapi-keep-writing.c         | 129 +++++++++++++++++++++++++
- xlators/cluster/afr/src/afr-inode-write.c      |  11 ++-
- xlators/cluster/afr/src/afr-transaction.c      |   9 +-
- xlators/cluster/afr/src/afr.h                  |   2 +-
- xlators/cluster/dht/src/dht-rebalance.c        |  15 ++-
- xlators/mount/fuse/src/fuse-bridge.c           |  23 ++++-
- 9 files changed, 239 insertions(+), 10 deletions(-)
- create mode 100644 tests/basic/gfapi/gfapi-graph-switch-open-fd.t
- create mode 100644 tests/basic/gfapi/gfapi-keep-writing.c
-
-diff --git a/api/src/glfs-resolve.c b/api/src/glfs-resolve.c
-index a79f490..062b7dc 100644
---- a/api/src/glfs-resolve.c
-+++ b/api/src/glfs-resolve.c
-@@ -722,6 +722,7 @@ glfs_migrate_fd_safe(struct glfs *fs, xlator_t *newsubvol, fd_t *oldfd)
-         0,
-     };
-     char uuid1[64];
-+    dict_t *xdata = NULL;
- 
-     oldinode = oldfd->inode;
-     oldsubvol = oldinode->table->xl;
-@@ -730,7 +731,15 @@ glfs_migrate_fd_safe(struct glfs *fs, xlator_t *newsubvol, fd_t *oldfd)
-         return fd_ref(oldfd);
- 
-     if (!oldsubvol->switched) {
--        ret = syncop_fsync(oldsubvol, oldfd, 0, NULL, NULL, NULL, NULL);
-+        xdata = dict_new();
-+        if (!xdata || dict_set_int8(xdata, "last-fsync", 1)) {
-+            gf_msg(fs->volname, GF_LOG_WARNING, ENOMEM, API_MSG_FSYNC_FAILED,
-+                   "last-fsync set failed on %s graph %s (%d)",
-+                   uuid_utoa_r(oldfd->inode->gfid, uuid1),
-+                   graphid_str(oldsubvol), oldsubvol->graph->id);
-+        }
-+
-+        ret = syncop_fsync(oldsubvol, oldfd, 0, NULL, NULL, xdata, NULL);
-         DECODE_SYNCOP_ERR(ret);
-         if (ret) {
-             gf_msg(fs->volname, GF_LOG_WARNING, errno, API_MSG_FSYNC_FAILED,
-@@ -809,6 +818,9 @@ out:
-         newfd = NULL;
-     }
- 
-+    if (xdata)
-+        dict_unref(xdata);
-+
-     return newfd;
- }
- 
-diff --git a/tests/basic/afr/durability-off.t b/tests/basic/afr/durability-off.t
-index 155ffa0..6e0f18b 100644
---- a/tests/basic/afr/durability-off.t
-+++ b/tests/basic/afr/durability-off.t
-@@ -26,6 +26,8 @@ TEST $CLI volume heal $V0
- EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
- EXPECT "^0$" echo $($CLI volume profile $V0 info | grep -w FSYNC | wc -l)
- 
-+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
-+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
- #Test that fsyncs happen when durability is on
- TEST $CLI volume set $V0 cluster.ensure-durability on
- TEST $CLI volume set $V0 performance.strict-write-ordering on
-diff --git a/tests/basic/gfapi/gfapi-graph-switch-open-fd.t b/tests/basic/gfapi/gfapi-graph-switch-open-fd.t
-new file mode 100644
-index 0000000..2e666be
---- /dev/null
-+++ b/tests/basic/gfapi/gfapi-graph-switch-open-fd.t
-@@ -0,0 +1,44 @@
-+#!/bin/bash
-+
-+. $(dirname $0)/../../include.rc
-+. $(dirname $0)/../../volume.rc
-+
-+cleanup;
-+
-+TEST glusterd
-+
-+TEST $CLI volume create $V0 replica 3 ${H0}:$B0/brick{0..2};
-+EXPECT 'Created' volinfo_field $V0 'Status';
-+
-+TEST $CLI volume start $V0;
-+EXPECT 'Started' volinfo_field $V0 'Status';
-+
-+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
-+TEST touch $M0/sync
-+logdir=`gluster --print-logdir`
-+
-+TEST build_tester $(dirname $0)/gfapi-keep-writing.c -lgfapi
-+
-+
-+#Launch a program to keep doing writes on an fd
-+./$(dirname $0)/gfapi-keep-writing ${H0} $V0 $logdir/gfapi-async-calls-test.log sync &
-+p=$!
-+sleep 1 #Let some writes go through
-+#Check if graph switch will lead to any pending markers for ever
-+TEST $CLI volume set $V0 performance.quick-read off
-+TEST $CLI volume set $V0 performance.io-cache off
-+TEST $CLI volume set $V0 performance.stat-prefetch off
-+TEST $CLI volume set $V0 performance.read-ahead off
-+
-+
-+TEST rm -f $M0/sync #Make sure the glfd is closed
-+TEST wait #Wait for background process to die
-+#Goal is to check if there is permanent FOOL changelog
-+sleep 5
-+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/brick0/glfs_test.txt trusted.afr.dirty
-+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/brick1/glfs_test.txt trusted.afr.dirty
-+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/brick2/glfs_test.txt trusted.afr.dirty
-+
-+cleanup_tester $(dirname $0)/gfapi-async-calls-test
-+
-+cleanup;
-diff --git a/tests/basic/gfapi/gfapi-keep-writing.c b/tests/basic/gfapi/gfapi-keep-writing.c
-new file mode 100644
-index 0000000..91b59ce
---- /dev/null
-+++ b/tests/basic/gfapi/gfapi-keep-writing.c
-@@ -0,0 +1,129 @@
-+#include <fcntl.h>
-+#include <unistd.h>
-+#include <time.h>
-+#include <limits.h>
-+#include <string.h>
-+#include <stdio.h>
-+#include <stdlib.h>
-+#include <errno.h>
-+#include <glusterfs/api/glfs.h>
-+#include <glusterfs/api/glfs-handles.h>
-+
-+#define LOG_ERR(msg)                                                           \
-+    do {                                                                       \
-+        fprintf(stderr, "%s : Error (%s)\n", msg, strerror(errno));            \
-+    } while (0)
-+
-+glfs_t *
-+init_glfs(const char *hostname, const char *volname, const char *logfile)
-+{
-+    int ret = -1;
-+    glfs_t *fs = NULL;
-+
-+    fs = glfs_new(volname);
-+    if (!fs) {
-+        LOG_ERR("glfs_new failed");
-+        return NULL;
-+    }
-+
-+    ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
-+    if (ret < 0) {
-+        LOG_ERR("glfs_set_volfile_server failed");
-+        goto out;
-+    }
-+
-+    ret = glfs_set_logging(fs, logfile, 7);
-+    if (ret < 0) {
-+        LOG_ERR("glfs_set_logging failed");
-+        goto out;
-+    }
-+
-+    ret = glfs_init(fs);
-+    if (ret < 0) {
-+        LOG_ERR("glfs_init failed");
-+        goto out;
-+    }
-+
-+    ret = 0;
-+out:
-+    if (ret) {
-+        glfs_fini(fs);
-+        fs = NULL;
-+    }
-+
-+    return fs;
-+}
-+
-+int
-+glfs_test_function(const char *hostname, const char *volname,
-+                   const char *logfile, const char *syncfile)
-+{
-+    int ret = -1;
-+    int flags = O_CREAT | O_RDWR;
-+    glfs_t *fs = NULL;
-+    glfs_fd_t *glfd = NULL;
-+    const char *buff = "This is from my prog\n";
-+    const char *filename = "glfs_test.txt";
-+    struct stat buf = {0};
-+
-+    fs = init_glfs(hostname, volname, logfile);
-+    if (fs == NULL) {
-+        LOG_ERR("init_glfs failed");
-+        return -1;
-+    }
-+
-+    glfd = glfs_creat(fs, filename, flags, 0644);
-+    if (glfd == NULL) {
-+        LOG_ERR("glfs_creat failed");
-+        goto out;
-+    }
-+
-+    while (glfs_stat(fs, syncfile, &buf) == 0) {
-+        ret = glfs_write(glfd, buff, strlen(buff), flags);
-+        if (ret < 0) {
-+            LOG_ERR("glfs_write failed");
-+            goto out;
-+        }
-+    }
-+
-+    ret = glfs_close(glfd);
-+    if (ret < 0) {
-+        LOG_ERR("glfs_write failed");
-+        goto out;
-+    }
-+
-+out:
-+    ret = glfs_fini(fs);
-+    if (ret) {
-+        LOG_ERR("glfs_fini failed");
-+    }
-+
-+    return ret;
-+}
-+
-+int
-+main(int argc, char *argv[])
-+{
-+    int ret = 0;
-+    char *hostname = NULL;
-+    char *volname = NULL;
-+    char *logfile = NULL;
-+    char *syncfile = NULL;
-+
-+    if (argc != 5) {
-+        fprintf(stderr, "Invalid argument\n");
-+        exit(1);
-+    }
-+
-+    hostname = argv[1];
-+    volname = argv[2];
-+    logfile = argv[3];
-+    syncfile = argv[4];
-+
-+    ret = glfs_test_function(hostname, volname, logfile, syncfile);
-+    if (ret) {
-+        LOG_ERR("glfs_test_function failed");
-+    }
-+
-+    return ret;
-+}
-diff --git a/xlators/cluster/afr/src/afr-inode-write.c b/xlators/cluster/afr/src/afr-inode-write.c
-index 7fcc9d4..df82b6e 100644
---- a/xlators/cluster/afr/src/afr-inode-write.c
-+++ b/xlators/cluster/afr/src/afr-inode-write.c
-@@ -2492,6 +2492,7 @@ afr_fsync(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t datasync,
-     call_frame_t *transaction_frame = NULL;
-     int ret = -1;
-     int32_t op_errno = ENOMEM;
-+    int8_t last_fsync = 0;
- 
-     transaction_frame = copy_frame(frame);
-     if (!transaction_frame)
-@@ -2501,10 +2502,16 @@ afr_fsync(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t datasync,
-     if (!local)
-         goto out;
- 
--    if (xdata)
-+    if (xdata) {
-         local->xdata_req = dict_copy_with_ref(xdata, NULL);
--    else
-+        if (dict_get_int8(xdata, "last-fsync", &last_fsync) == 0) {
-+            if (last_fsync) {
-+                local->transaction.disable_delayed_post_op = _gf_true;
-+            }
-+        }
-+    } else {
-         local->xdata_req = dict_new();
-+    }
- 
-     if (!local->xdata_req)
-         goto out;
-diff --git a/xlators/cluster/afr/src/afr-transaction.c b/xlators/cluster/afr/src/afr-transaction.c
-index 8e65ae2..ffd0ab8 100644
---- a/xlators/cluster/afr/src/afr-transaction.c
-+++ b/xlators/cluster/afr/src/afr-transaction.c
-@@ -2385,8 +2385,13 @@ afr_is_delayed_changelog_post_op_needed(call_frame_t *frame, xlator_t *this,
-         goto out;
-     }
- 
--    if ((local->op != GF_FOP_WRITE) && (local->op != GF_FOP_FXATTROP)) {
--        /*Only allow writes but shard does [f]xattrops on writes, so
-+    if (local->transaction.disable_delayed_post_op) {
-+        goto out;
-+    }
-+
-+    if ((local->op != GF_FOP_WRITE) && (local->op != GF_FOP_FXATTROP) &&
-+        (local->op != GF_FOP_FSYNC)) {
-+        /*Only allow writes/fsyncs but shard does [f]xattrops on writes, so
-          * they are fine too*/
-         goto out;
-     }
-diff --git a/xlators/cluster/afr/src/afr.h b/xlators/cluster/afr/src/afr.h
-index e731cfa..6bc4721 100644
---- a/xlators/cluster/afr/src/afr.h
-+++ b/xlators/cluster/afr/src/afr.h
-@@ -854,7 +854,7 @@ typedef struct _afr_local {
- 
-         int (*unwind)(call_frame_t *frame, xlator_t *this);
- 
--        /* post-op hook */
-+        gf_boolean_t disable_delayed_post_op;
-     } transaction;
- 
-     syncbarrier_t barrier;
-diff --git a/xlators/cluster/dht/src/dht-rebalance.c b/xlators/cluster/dht/src/dht-rebalance.c
-index 8f31dca..145e616 100644
---- a/xlators/cluster/dht/src/dht-rebalance.c
-+++ b/xlators/cluster/dht/src/dht-rebalance.c
-@@ -1564,6 +1564,7 @@ dht_migrate_file(xlator_t *this, loc_t *loc, xlator_t *from, xlator_t *to,
-     xlator_t *old_target = NULL;
-     xlator_t *hashed_subvol = NULL;
-     fd_t *linkto_fd = NULL;
-+    dict_t *xdata = NULL;
- 
-     if (from == to) {
-         gf_msg_debug(this->name, 0,
-@@ -1882,7 +1883,15 @@ dht_migrate_file(xlator_t *this, loc_t *loc, xlator_t *from, xlator_t *to,
- 
-     /* TODO: Sync the locks */
- 
--    ret = syncop_fsync(to, dst_fd, 0, NULL, NULL, NULL, NULL);
-+    xdata = dict_new();
-+    if (!xdata || dict_set_int8(xdata, "last-fsync", 1)) {
-+        gf_log(this->name, GF_LOG_ERROR,
-+               "%s: failed to set last-fsync flag on "
-+               "%s (%s)",
-+               loc->path, to->name, strerror(ENOMEM));
-+    }
-+
-+    ret = syncop_fsync(to, dst_fd, 0, NULL, NULL, xdata, NULL);
-     if (ret) {
-         gf_log(this->name, GF_LOG_WARNING, "%s: failed to fsync on %s (%s)",
-                loc->path, to->name, strerror(-ret));
-@@ -2356,11 +2365,15 @@ out:
- 
-     if (dst_fd)
-         syncop_close(dst_fd);
-+
-     if (src_fd)
-         syncop_close(src_fd);
-     if (linkto_fd)
-         syncop_close(linkto_fd);
- 
-+    if (xdata)
-+        dict_unref(xdata);
-+
-     loc_wipe(&tmp_loc);
-     loc_wipe(&parent_loc);
- 
-diff --git a/xlators/mount/fuse/src/fuse-bridge.c b/xlators/mount/fuse/src/fuse-bridge.c
-index 6e99053..1592067 100644
---- a/xlators/mount/fuse/src/fuse-bridge.c
-+++ b/xlators/mount/fuse/src/fuse-bridge.c
-@@ -5551,6 +5551,7 @@ fuse_migrate_fd(xlator_t *this, fd_t *basefd, xlator_t *old_subvol,
-     char create_in_progress = 0;
-     fuse_fd_ctx_t *basefd_ctx = NULL;
-     fd_t *oldfd = NULL;
-+    dict_t *xdata = NULL;
- 
-     basefd_ctx = fuse_fd_ctx_get(this, basefd);
-     GF_VALIDATE_OR_GOTO("glusterfs-fuse", basefd_ctx, out);
-@@ -5587,10 +5588,23 @@ fuse_migrate_fd(xlator_t *this, fd_t *basefd, xlator_t *old_subvol,
-     }
- 
-     if (oldfd->inode->table->xl == old_subvol) {
--        if (IA_ISDIR(oldfd->inode->ia_type))
-+        if (IA_ISDIR(oldfd->inode->ia_type)) {
-             ret = syncop_fsyncdir(old_subvol, oldfd, 0, NULL, NULL);
--        else
--            ret = syncop_fsync(old_subvol, oldfd, 0, NULL, NULL, NULL, NULL);
-+        } else {
-+            xdata = dict_new();
-+            if (!xdata || dict_set_int8(xdata, "last-fsync", 1)) {
-+                gf_log("glusterfs-fuse", GF_LOG_WARNING,
-+                       "last-fsync set failed (%s) on fd (%p)"
-+                       "(basefd:%p basefd-inode.gfid:%s) "
-+                       "(old-subvolume:%s-%d new-subvolume:%s-%d)",
-+                       strerror(ENOMEM), oldfd, basefd,
-+                       uuid_utoa(basefd->inode->gfid), old_subvol->name,
-+                       old_subvol->graph->id, new_subvol->name,
-+                       new_subvol->graph->id);
-+            }
-+
-+            ret = syncop_fsync(old_subvol, oldfd, 0, NULL, NULL, xdata, NULL);
-+        }
- 
-         if (ret < 0) {
-             gf_log("glusterfs-fuse", GF_LOG_WARNING,
-@@ -5645,6 +5659,9 @@ out:
- 
-     fd_unref(oldfd);
- 
-+    if (xdata)
-+        dict_unref(xdata);
-+
-     return ret;
- }
- 
--- 
-1.8.3.1
-
diff --git a/SOURCES/0388-rpc-Cleanup-SSL-specific-data-at-the-time-of-freeing.patch b/SOURCES/0388-rpc-Cleanup-SSL-specific-data-at-the-time-of-freeing.patch
index 094a484..dc23ba8 100644
--- a/SOURCES/0388-rpc-Cleanup-SSL-specific-data-at-the-time-of-freeing.patch
+++ b/SOURCES/0388-rpc-Cleanup-SSL-specific-data-at-the-time-of-freeing.patch
@@ -1,7 +1,7 @@
-From be6fafebe1e391e9d9f14d9aed18adbfda8a262b Mon Sep 17 00:00:00 2001
+From fbda9baaf7231e3237277348cc7e873f3113fd14 Mon Sep 17 00:00:00 2001
 From: l17zhou <cynthia.zhou@nokia-sbell.com.cn>
 Date: Mon, 4 Nov 2019 08:45:52 +0200
-Subject: [PATCH 388/392] rpc: Cleanup SSL specific data at the time of freeing
+Subject: [PATCH 388/449] rpc: Cleanup SSL specific data at the time of freeing
  rpc object
 
 Problem: At the time of cleanup rpc object ssl specific data
@@ -17,9 +17,9 @@ Solution: To avoid the leak cleanup ssl specific data at the
 > (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/23650/)
 
 Change-Id: I37f598673ae2d7a33c75f39eb8843ccc6dffaaf0
-BUG: 1848891
+BUG: 1786516
 Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
-Reviewed-on: https://code.engineering.redhat.com/gerrit/203698
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202308
 Tested-by: RHGS Build Bot <nigelb@redhat.com>
 Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
 ---
diff --git a/SOURCES/0389-posix-Avoid-diskpace-error-in-case-of-overwriting-th.patch b/SOURCES/0389-posix-Avoid-diskpace-error-in-case-of-overwriting-th.patch
new file mode 100644
index 0000000..7f20fb2
--- /dev/null
+++ b/SOURCES/0389-posix-Avoid-diskpace-error-in-case-of-overwriting-th.patch
@@ -0,0 +1,297 @@
+From 50318713486e79d9258cf22e656caff402256dde Mon Sep 17 00:00:00 2001
+From: Mohit Agrawal <moagrawal@redhat.com>
+Date: Sun, 20 Oct 2019 22:01:01 +0530
+Subject: [PATCH 389/449] posix: Avoid diskpace error in case of overwriting
+ the data
+
+Problem: Sometime fops like posix_writev, posix_fallocate, posix_zerofile
+         failed and throw error ENOSPC if storage.reserve threshold limit
+         has reached even fops is overwriting the data
+
+Solution: Retry the fops in case of overwrite if diskspace check
+          is failed
+
+> Credits: kinsu <vpolakis@gmail.com>
+> Change-Id: I987d73bcf47ed1bb27878df40c39751296e95fe8
+> Updates: #745
+> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+> (Cherry pick from commit ca3e5905ac02fb9c373ac3de10b44f061d04cd6f)
+> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/23572/)
+
+Change-Id: I987d73bcf47ed1bb27878df40c39751296e95fe8
+BUG: 1787331
+Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202307
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/bugs/posix/bug-1651445.t                 |   1 +
+ xlators/storage/posix/src/posix-entry-ops.c    |   1 -
+ xlators/storage/posix/src/posix-inode-fd-ops.c | 141 ++++++++++++++++++++++---
+ 3 files changed, 126 insertions(+), 17 deletions(-)
+
+diff --git a/tests/bugs/posix/bug-1651445.t b/tests/bugs/posix/bug-1651445.t
+index 5248d47..4d08b69 100644
+--- a/tests/bugs/posix/bug-1651445.t
++++ b/tests/bugs/posix/bug-1651445.t
+@@ -33,6 +33,7 @@ sleep 5
+ # setup_lvm create lvm partition of 150M and 40M are reserve so after
+ # consuming more than 110M next dd should fail
+ TEST ! dd if=/dev/zero of=$M0/c bs=5M count=1
++TEST dd if=/dev/urandom of=$M0/a  bs=1022 count=1  oflag=seek_bytes,sync seek=102 conv=notrunc
+ 
+ rm -rf $M0/*
+ 
+diff --git a/xlators/storage/posix/src/posix-entry-ops.c b/xlators/storage/posix/src/posix-entry-ops.c
+index 283b305..bea0bbf 100644
+--- a/xlators/storage/posix/src/posix-entry-ops.c
++++ b/xlators/storage/posix/src/posix-entry-ops.c
+@@ -1634,7 +1634,6 @@ posix_rename(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+ 
+     priv = this->private;
+     VALIDATE_OR_GOTO(priv, out);
+-    DISK_SPACE_CHECK_AND_GOTO(frame, priv, xdata, op_ret, op_errno, out);
+ 
+     SET_FS_ID(frame->root->uid, frame->root->gid);
+     MAKE_ENTRY_HANDLE(real_oldpath, par_oldpath, this, oldloc, NULL);
+diff --git a/xlators/storage/posix/src/posix-inode-fd-ops.c b/xlators/storage/posix/src/posix-inode-fd-ops.c
+index a2a518f..bcce06e 100644
+--- a/xlators/storage/posix/src/posix-inode-fd-ops.c
++++ b/xlators/storage/posix/src/posix-inode-fd-ops.c
+@@ -692,6 +692,10 @@ posix_do_fallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t flags,
+     gf_boolean_t locked = _gf_false;
+     posix_inode_ctx_t *ctx = NULL;
+     struct posix_private *priv = NULL;
++    gf_boolean_t check_space_error = _gf_false;
++    struct stat statbuf = {
++        0,
++    };
+ 
+     DECLARE_OLD_FS_ID_VAR;
+ 
+@@ -711,7 +715,10 @@ posix_do_fallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t flags,
+     if (priv->disk_reserve)
+         posix_disk_space_check(this);
+ 
+-    DISK_SPACE_CHECK_AND_GOTO(frame, priv, xdata, ret, ret, out);
++    DISK_SPACE_CHECK_AND_GOTO(frame, priv, xdata, ret, ret, unlock);
++
++overwrite:
++    check_space_error = _gf_true;
+ 
+     ret = posix_fd_ctx_get(fd, this, &pfd, &op_errno);
+     if (ret < 0) {
+@@ -735,7 +742,7 @@ posix_do_fallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t flags,
+         ret = -errno;
+         gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_FSTAT_FAILED,
+                "fallocate (fstat) failed on fd=%p", fd);
+-        goto out;
++        goto unlock;
+     }
+ 
+     if (xdata) {
+@@ -745,7 +752,7 @@ posix_do_fallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t flags,
+             gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+                    "file state check failed, fd %p", fd);
+             ret = -EIO;
+-            goto out;
++            goto unlock;
+         }
+     }
+ 
+@@ -756,7 +763,7 @@ posix_do_fallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t flags,
+                "fallocate failed on %s offset: %jd, "
+                "len:%zu, flags: %d",
+                uuid_utoa(fd->inode->gfid), offset, len, flags);
+-        goto out;
++        goto unlock;
+     }
+ 
+     ret = posix_fdstat(this, fd->inode, pfd->fd, statpost);
+@@ -764,16 +771,47 @@ posix_do_fallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t flags,
+         ret = -errno;
+         gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_FSTAT_FAILED,
+                "fallocate (fstat) failed on fd=%p", fd);
+-        goto out;
++        goto unlock;
+     }
+ 
+     posix_set_ctime(frame, this, NULL, pfd->fd, fd->inode, statpost);
+ 
+-out:
++unlock:
+     if (locked) {
+         pthread_mutex_unlock(&ctx->write_atomic_lock);
+         locked = _gf_false;
+     }
++
++    if (op_errno == ENOSPC && priv->disk_space_full && !check_space_error) {
++#ifdef FALLOC_FL_KEEP_SIZE
++        if (flags & FALLOC_FL_KEEP_SIZE) {
++            goto overwrite;
++        }
++#endif
++        ret = posix_fd_ctx_get(fd, this, &pfd, &op_errno);
++        if (ret < 0) {
++            gf_msg(this->name, GF_LOG_WARNING, ret, P_MSG_PFD_NULL,
++                   "pfd is NULL from fd=%p", fd);
++            goto out;
++        }
++
++        if (sys_fstat(pfd->fd, &statbuf) < 0) {
++            gf_msg(this->name, GF_LOG_WARNING, op_errno, P_MSG_FILE_OP_FAILED,
++                   "%d", pfd->fd);
++            goto out;
++        }
++
++        if (offset + len <= statbuf.st_size) {
++            gf_msg_debug(this->name, 0,
++                         "io vector size will not"
++                         " change disk size so allow overwrite for"
++                         " fd %d",
++                         pfd->fd);
++            goto overwrite;
++        }
++    }
++
++out:
+     SET_TO_OLD_FS_ID();
+     if (ret == ENOSPC)
+         ret = -ENOSPC;
+@@ -1083,25 +1121,57 @@ posix_zerofill(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+     int op_ret = -1;
+     int op_errno = EINVAL;
+     dict_t *rsp_xdata = NULL;
++    gf_boolean_t check_space_error = _gf_false;
++    struct posix_fd *pfd = NULL;
++    struct stat statbuf = {
++        0,
++    };
+ 
+-    VALIDATE_OR_GOTO(frame, out);
+-    VALIDATE_OR_GOTO(this, out);
++    VALIDATE_OR_GOTO(frame, unwind);
++    VALIDATE_OR_GOTO(this, unwind);
+ 
+     priv = this->private;
+     DISK_SPACE_CHECK_AND_GOTO(frame, priv, xdata, op_ret, op_errno, out);
+ 
++overwrite:
++    check_space_error = _gf_true;
+     ret = posix_do_zerofill(frame, this, fd, offset, len, &statpre, &statpost,
+                             xdata, &rsp_xdata);
+     if (ret < 0) {
+         op_ret = -1;
+         op_errno = -ret;
+-        goto out;
++        goto unwind;
+     }
+ 
+     STACK_UNWIND_STRICT(zerofill, frame, 0, 0, &statpre, &statpost, rsp_xdata);
+     return 0;
+ 
+ out:
++    if (op_errno == ENOSPC && priv->disk_space_full && !check_space_error) {
++        ret = posix_fd_ctx_get(fd, this, &pfd, &op_errno);
++        if (ret < 0) {
++            gf_msg(this->name, GF_LOG_WARNING, ret, P_MSG_PFD_NULL,
++                   "pfd is NULL from fd=%p", fd);
++            goto out;
++        }
++
++        if (sys_fstat(pfd->fd, &statbuf) < 0) {
++            gf_msg(this->name, GF_LOG_WARNING, op_errno, P_MSG_FILE_OP_FAILED,
++                   "%d", pfd->fd);
++            goto out;
++        }
++
++        if (offset + len <= statbuf.st_size) {
++            gf_msg_debug(this->name, 0,
++                         "io vector size will not"
++                         " change disk size so allow overwrite for"
++                         " fd %d",
++                         pfd->fd);
++            goto overwrite;
++        }
++    }
++
++unwind:
+     STACK_UNWIND_STRICT(zerofill, frame, op_ret, op_errno, NULL, NULL,
+                         rsp_xdata);
+     return 0;
+@@ -1857,19 +1927,28 @@ posix_writev(call_frame_t *frame, xlator_t *this, fd_t *fd,
+     gf_boolean_t write_append = _gf_false;
+     gf_boolean_t update_atomic = _gf_false;
+     posix_inode_ctx_t *ctx = NULL;
++    gf_boolean_t check_space_error = _gf_false;
++    struct stat statbuf = {
++        0,
++    };
++    int totlen = 0;
++    int idx = 0;
+ 
+-    VALIDATE_OR_GOTO(frame, out);
+-    VALIDATE_OR_GOTO(this, out);
+-    VALIDATE_OR_GOTO(fd, out);
+-    VALIDATE_OR_GOTO(fd->inode, out);
+-    VALIDATE_OR_GOTO(vector, out);
+-    VALIDATE_OR_GOTO(this->private, out);
++    VALIDATE_OR_GOTO(frame, unwind);
++    VALIDATE_OR_GOTO(this, unwind);
++    VALIDATE_OR_GOTO(fd, unwind);
++    VALIDATE_OR_GOTO(fd->inode, unwind);
++    VALIDATE_OR_GOTO(vector, unwind);
++    VALIDATE_OR_GOTO(this->private, unwind);
+ 
+     priv = this->private;
+ 
+-    VALIDATE_OR_GOTO(priv, out);
++    VALIDATE_OR_GOTO(priv, unwind);
+     DISK_SPACE_CHECK_AND_GOTO(frame, priv, xdata, op_ret, op_errno, out);
+ 
++overwrite:
++
++    check_space_error = _gf_true;
+     if ((fd->inode->ia_type == IA_IFBLK) || (fd->inode->ia_type == IA_IFCHR)) {
+         gf_msg(this->name, GF_LOG_ERROR, EINVAL, P_MSG_INVALID_ARGUMENT,
+                "writev received on a block/char file (%s)",
+@@ -2011,6 +2090,36 @@ out:
+         locked = _gf_false;
+     }
+ 
++    if (op_errno == ENOSPC && priv->disk_space_full && !check_space_error) {
++        ret = posix_fd_ctx_get(fd, this, &pfd, &op_errno);
++        if (ret < 0) {
++            gf_msg(this->name, GF_LOG_WARNING, ret, P_MSG_PFD_NULL,
++                   "pfd is NULL from fd=%p", fd);
++            goto unwind;
++        }
++
++        if (sys_fstat(pfd->fd, &statbuf) < 0) {
++            gf_msg(this->name, GF_LOG_WARNING, op_errno, P_MSG_FILE_OP_FAILED,
++                   "%d", pfd->fd);
++            goto unwind;
++        }
++
++        for (idx = 0; idx < count; idx++) {
++            totlen = vector[idx].iov_len;
++        }
++
++        if ((offset + totlen <= statbuf.st_size) &&
++            !(statbuf.st_blocks * statbuf.st_blksize < statbuf.st_size)) {
++            gf_msg_debug(this->name, 0,
++                         "io vector size will not"
++                         " change disk size so allow overwrite for"
++                         " fd %d",
++                         pfd->fd);
++            goto overwrite;
++        }
++    }
++
++unwind:
+     STACK_UNWIND_STRICT(writev, frame, op_ret, op_errno, &preop, &postop,
+                         rsp_xdata);
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0389-socket-Resolve-ssl_ctx-leak-for-a-brick-while-only-m.patch b/SOURCES/0389-socket-Resolve-ssl_ctx-leak-for-a-brick-while-only-m.patch
deleted file mode 100644
index a4f9bf2..0000000
--- a/SOURCES/0389-socket-Resolve-ssl_ctx-leak-for-a-brick-while-only-m.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From d3558cfbded7e973fae45ce2196767611336e351 Mon Sep 17 00:00:00 2001
-From: Mohit Agrawal <moagrawa@redhat.com>
-Date: Mon, 8 Jun 2020 13:27:50 +0530
-Subject: [PATCH 389/392] socket: Resolve ssl_ctx leak for a brick while only
- mgmt SSL is enabled
-
-Problem: While only mgmt SSL is enabled for a brick process use_ssl flag
-         is false for a brick process and socket api's cleanup ssl_ctx only
-         while use_ssl and ssl_ctx both are valid
-
-Solution: To avoid a leak check only ssl_ctx, if it is valid cleanup
-          ssl_ctx
-
-> Fixes: #1196
-> Change-Id: I2f4295478f4149dcb7d608ea78ee5104f28812c3
-> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
-> (Cherry pick from commit 9873baee34afdf0c20f5fc98a7dbf2a9f07447e2)
-> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/24366/)
-
-BUG: 1848894
-Change-Id: I2f4295478f4149dcb7d608ea78ee5104f28812c3
-Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
-Reviewed-on: https://code.engineering.redhat.com/gerrit/203705
-Tested-by: RHGS Build Bot <nigelb@redhat.com>
-Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
----
- rpc/rpc-transport/socket/src/socket.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/rpc/rpc-transport/socket/src/socket.c b/rpc/rpc-transport/socket/src/socket.c
-index 226b2e2..54cd5df 100644
---- a/rpc/rpc-transport/socket/src/socket.c
-+++ b/rpc/rpc-transport/socket/src/socket.c
-@@ -1163,7 +1163,7 @@ __socket_reset(rpc_transport_t *this)
-         SSL_free(priv->ssl_ssl);
-         priv->ssl_ssl = NULL;
-     }
--    if (priv->use_ssl && priv->ssl_ctx) {
-+    if (priv->ssl_ctx) {
-         SSL_CTX_free(priv->ssl_ctx);
-         priv->ssl_ctx = NULL;
-     }
-@@ -4685,7 +4685,7 @@ fini(rpc_transport_t *this)
-             SSL_free(priv->ssl_ssl);
-             priv->ssl_ssl = NULL;
-         }
--        if (priv->use_ssl && priv->ssl_ctx) {
-+        if (priv->ssl_ctx) {
-             SSL_CTX_free(priv->ssl_ctx);
-             priv->ssl_ctx = NULL;
-         }
--- 
-1.8.3.1
-
diff --git a/SOURCES/0390-glusterd-deafult-options-after-volume-reset.patch b/SOURCES/0390-glusterd-deafult-options-after-volume-reset.patch
new file mode 100644
index 0000000..d95ce71
--- /dev/null
+++ b/SOURCES/0390-glusterd-deafult-options-after-volume-reset.patch
@@ -0,0 +1,93 @@
+From 86df0ced1cac0e3c48f6149bb2f5442f8548f89e Mon Sep 17 00:00:00 2001
+From: Sanju Rakonde <srakonde@redhat.com>
+Date: Wed, 25 Dec 2019 21:56:32 +0530
+Subject: [PATCH 390/449] glusterd: deafult options after volume reset
+
+Problem: default option itransport.address-family is disappered
+in volume info output after a volume reset.
+
+Cause: with 3.8.0 onwards volume option transport.address-family
+has default value, any volume which is created will have this
+option set. So, volume info will show this in its output. But,
+with reset volume, this option is not handled.
+
+Solution: In glusterd_enable_default_options(), we should add this
+option along with other default options. This function is called
+by glusterd_options_reset() with volume reset command.
+
+> upstream patch link: https://review.gluster.org/#/c/glusterfs/+/23921/
+> fixes: bz#1786478
+> Change-Id: I58f7aa24cf01f308c4efe6cae748cc3bc8b99b1d
+> Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+
+BUG: 1781710
+Change-Id: I58f7aa24cf01f308c4efe6cae748cc3bc8b99b1d
+Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202258
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/bugs/glusterd/optimized-basic-testcases.t |  5 +++++
+ xlators/mgmt/glusterd/src/glusterd-utils.c      | 23 +++++++++++++++++++++++
+ 2 files changed, 28 insertions(+)
+
+diff --git a/tests/bugs/glusterd/optimized-basic-testcases.t b/tests/bugs/glusterd/optimized-basic-testcases.t
+index d700b5e..c7e8c32 100644
+--- a/tests/bugs/glusterd/optimized-basic-testcases.t
++++ b/tests/bugs/glusterd/optimized-basic-testcases.t
+@@ -69,6 +69,11 @@ TEST pidof glusterd;
+ TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+ EXPECT 'Created' volinfo_field $V0 'Status';
+ 
++#bug-1786478 - default volume option after volume reset
++addr_family=`volinfo_field $V0 'transport.address-family'`
++TEST $CLI volume reset $V0
++EXPECT $addr_family  volinfo_field $V0 'transport.address-family'
++
+ #bug-955588 - uuid validation
+ 
+ uuid=`grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=`
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index c92cdf3..6654741 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -13032,6 +13032,11 @@ glusterd_enable_default_options(glusterd_volinfo_t *volinfo, char *option)
+     int ret = 0;
+     xlator_t *this = NULL;
+     glusterd_conf_t *conf = NULL;
++#ifdef IPV6_DEFAULT
++    char *addr_family = "inet6";
++#else
++    char *addr_family = "inet";
++#endif
+ 
+     this = THIS;
+     GF_ASSERT(this);
+@@ -13109,6 +13114,24 @@ glusterd_enable_default_options(glusterd_volinfo_t *volinfo, char *option)
+             }
+         }
+     }
++
++    if (conf->op_version >= GD_OP_VERSION_3_9_0) {
++        if (!option || !strcmp("transport.address-family", option)) {
++            if (volinfo->transport_type == GF_TRANSPORT_TCP) {
++                ret = dict_set_dynstr_with_alloc(
++                    volinfo->dict, "transport.address-family", addr_family);
++                if (ret) {
++                    gf_msg(this->name, GF_LOG_ERROR, errno,
++                           GD_MSG_DICT_SET_FAILED,
++                           "failed to set transport."
++                           "address-family on %s",
++                           volinfo->volname);
++                    goto out;
++                }
++            }
++        }
++    }
++
+     if (conf->op_version >= GD_OP_VERSION_7_0) {
+         ret = dict_set_dynstr_with_alloc(volinfo->dict,
+                                          "storage.fips-mode-rchecksum", "on");
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0390-tests-Avoid-ssl-authz.t-failure.patch b/SOURCES/0390-tests-Avoid-ssl-authz.t-failure.patch
deleted file mode 100644
index 443cdb9..0000000
--- a/SOURCES/0390-tests-Avoid-ssl-authz.t-failure.patch
+++ /dev/null
@@ -1,51 +0,0 @@
-From b68fa363c5981441c20fbc78b6dc00437bd698a7 Mon Sep 17 00:00:00 2001
-From: Mohit Agrawal <moagrawa@redhat.com>
-Date: Mon, 22 Jun 2020 11:35:29 +0530
-Subject: [PATCH 390/392] tests: Avoid ssl-authz.t failure
-
-Problem: ssl-authz.t is failing at the time of checking memory
-         consumption if brick is consuming more than 5M
-
-Solution: Update the check to avoid a failure.
-
-> Change-Id: Iffb031f0695a7da83d5a2f6bac8863dad225317e
-> Fixes: bz#1811631
-> Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
-> Reviewd on upstream link https://review.gluster.org/#/c/glusterfs/+/24221/)
-> (Cherry pick from commit fb20713b380e1df8d7f9e9df96563be2f9144fd6)
-
-BUG: 1848894
-Change-Id: I4fc5d2e2597abfafc1e26d908c8c4184ab82afd5
-Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
-Reviewed-on: https://code.engineering.redhat.com/gerrit/203844
-Tested-by: RHGS Build Bot <nigelb@redhat.com>
-Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
----
- tests/features/ssl-authz.t | 6 ++++--
- 1 file changed, 4 insertions(+), 2 deletions(-)
-
-diff --git a/tests/features/ssl-authz.t b/tests/features/ssl-authz.t
-index 132b598..ab05c49 100755
---- a/tests/features/ssl-authz.t
-+++ b/tests/features/ssl-authz.t
-@@ -67,13 +67,15 @@ echo "Memory consumption for glusterfsd process"
- for i in $(seq 1 100); do
-         gluster v heal $V0 info >/dev/null
- done
-+#Wait to cleanup memory
-+sleep 10
- 
- end=`pmap -x $glusterfsd_pid | grep total | awk -F " " '{print $4}'`
- diff=$((end-start))
- 
--# If memory consumption is more than 5M some leak in SSL code path
-+# If memory consumption is more than 15M some leak in SSL code path
- 
--TEST [ $diff -lt 5000 ]
-+TEST [ $diff -lt 15000 ]
- 
- 
- # Set ssl-allow to a wildcard that includes our identity.
--- 
-1.8.3.1
-
diff --git a/SOURCES/0391-cluster-syncop-avoid-duplicate-unlock-of-inodelk-ent.patch b/SOURCES/0391-cluster-syncop-avoid-duplicate-unlock-of-inodelk-ent.patch
deleted file mode 100644
index 414f259..0000000
--- a/SOURCES/0391-cluster-syncop-avoid-duplicate-unlock-of-inodelk-ent.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From e9cb714d66a7926a746b8cd5f9288d59aefee918 Mon Sep 17 00:00:00 2001
-From: Kinglong Mee <kinglongmee@gmail.com>
-Date: Mon, 18 Mar 2019 20:47:54 +0800
-Subject: [PATCH 391/392] cluster-syncop: avoid duplicate unlock of
- inodelk/entrylk
-
-When using ec, there are many messages at brick log as,
-
-[inodelk.c:514:__inode_unlock_lock] 0-test-locks:  Matching lock not found for unlock 0-9223372036854775807, lo=68e040a84b7f0000 on 0x7f208c006f78
-[MSGID: 115053] [server-rpc-fops_v2.c:280:server4_inodelk_cbk] 0-test-server: 2557439: INODELK <gfid:df4e41be-723f-4289-b7af-b4272b3e880c> (df4e41be-723f-4289-b7af-b4272b3e880c), client: CTX_ID:67d4a7f3-605a-4965-89a5-31309d62d1fa-GRAPH_ID:0-PID:1659-HOST:openfs-node2-PC_NAME:test-client-1-RECON_NO:-28, error-xlator: test-locks [Invalid argument]
-
-> Change-Id: Ib164d29ebb071f620a4ca9679c4345ef7c88512a
-> Updates: bz#1689920
-> Signed-off-by: Kinglong Mee <mijinlong@open-fs.com>
-> Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/22377/
-
-BUG: 1848890
-Change-Id: Ib164d29ebb071f620a4ca9679c4345ef7c88512a
-Signed-off-by: Sheetal Pamecha <spamecha@redhat.com>
-Reviewed-on: https://code.engineering.redhat.com/gerrit/203852
-Tested-by: RHGS Build Bot <nigelb@redhat.com>
-Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
----
- libglusterfs/src/cluster-syncop.c | 6 ++++++
- 1 file changed, 6 insertions(+)
-
-diff --git a/libglusterfs/src/cluster-syncop.c b/libglusterfs/src/cluster-syncop.c
-index 5a08f26..6ee89dd 100644
---- a/libglusterfs/src/cluster-syncop.c
-+++ b/libglusterfs/src/cluster-syncop.c
-@@ -1203,6 +1203,10 @@ cluster_tiebreaker_inodelk(xlator_t **subvols, unsigned char *on,
-             if (num_success) {
-                 FOP_SEQ(subvols, on, numsubvols, replies, locked_on, frame,
-                         inodelk, dom, &loc, F_SETLKW, &flock, NULL);
-+            } else {
-+                loc_wipe(&loc);
-+                memset(locked_on, 0, numsubvols);
-+                return 0;
-             }
-             break;
-         }
-@@ -1244,7 +1248,9 @@ cluster_tiebreaker_entrylk(xlator_t **subvols, unsigned char *on,
-                         entrylk, dom, &loc, name, ENTRYLK_LOCK, ENTRYLK_WRLCK,
-                         NULL);
-             } else {
-+                loc_wipe(&loc);
-                 memset(locked_on, 0, numsubvols);
-+                return 0;
-             }
-             break;
-         }
--- 
-1.8.3.1
-
diff --git a/SOURCES/0391-glusterd-unlink-the-file-after-killing-the-process.patch b/SOURCES/0391-glusterd-unlink-the-file-after-killing-the-process.patch
new file mode 100644
index 0000000..2a88254
--- /dev/null
+++ b/SOURCES/0391-glusterd-unlink-the-file-after-killing-the-process.patch
@@ -0,0 +1,39 @@
+From d23859d5cbd5823b2587811aa57030436ce9e74c Mon Sep 17 00:00:00 2001
+From: Sanju Rakonde <srakonde@redhat.com>
+Date: Tue, 17 Dec 2019 15:52:30 +0530
+Subject: [PATCH 391/449] glusterd: unlink the file after killing the process
+
+In glusterd_proc_stop(), after killing the pid
+we should remove the pidfile.
+
+> upstream patch link: https://review.gluster.org/#/c/glusterfs/+/23890/
+> fixes: bz#1784375
+> Change-Id: Ib6367aed590932c884b0f6f892fc40542aa19686
+> Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+
+BUG: 1784211
+Change-Id: Ib6367aed590932c884b0f6f892fc40542aa19686
+Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202257
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c
+index f55a5fd..a05c90d 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c
++++ b/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c
+@@ -107,6 +107,8 @@ glusterd_proc_stop(glusterd_proc_t *proc, int sig, int flags)
+                        "service, reason:%s",
+                        proc->name, strerror(errno));
+         }
++    } else {
++        (void)glusterd_unlink_file(proc->pidfile);
+     }
+     if (flags != PROC_STOP_FORCE)
+         goto out;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0392-glusterd-Brick-process-fails-to-come-up-with-brickmu.patch b/SOURCES/0392-glusterd-Brick-process-fails-to-come-up-with-brickmu.patch
new file mode 100644
index 0000000..e295e4f
--- /dev/null
+++ b/SOURCES/0392-glusterd-Brick-process-fails-to-come-up-with-brickmu.patch
@@ -0,0 +1,187 @@
+From a30a5fdef2e252eba9f44a3c671de8f3aa4f17d7 Mon Sep 17 00:00:00 2001
+From: Vishal Pandey <vpandey@redhat.com>
+Date: Tue, 19 Nov 2019 11:39:22 +0530
+Subject: [PATCH 392/449] glusterd: Brick process fails to come up with
+ brickmux on
+
+Issue:
+1- In a cluster of 3 Nodes N1, N2, N3. Create 3 volumes vol1,
+vol2, vol3 with 3 bricks (one from each node)
+2- Set cluster.brick-multiplex on
+3- Start all 3 volumes
+4- Check if all bricks on a node are running on same port
+5- Kill N1
+6- Set performance.readdir-ahead for volumes vol1, vol2, vol3
+7- Bring N1 up and check volume status
+8- All bricks processes not running on N1.
+
+Root Cause -
+Since, There is a diff in volfile versions in N1 as compared
+to N2 and N3 therefore glusterd_import_friend_volume() is called.
+glusterd_import_friend_volume() copies the new_volinfo and deletes
+old_volinfo and then calls glusterd_start_bricks().
+glusterd_start_bricks() looks for the volfiles and sends an rpc
+request to glusterfs_handle_attach(). Now, since the volinfo
+has been deleted by glusterd_delete_stale_volume()
+from priv->volumes list before glusterd_start_bricks() and
+glusterd_create_volfiles_and_notify_services() and
+glusterd_list_add_order is called after glusterd_start_bricks(),
+therefore the attach RPC req gets an empty volfile path
+and that causes the brick to crash.
+
+Fix- Call glusterd_list_add_order() and
+glusterd_create_volfiles_and_notify_services before
+glusterd_start_bricks() cal is made in glusterd_import_friend_volume
+
+> upstream patch link: https://review.gluster.org/#/c/glusterfs/+/23724/
+> Change-Id: Idfe0e8710f7eb77ca3ddfa1cabeb45b2987f41aa
+> Fixes: bz#1773856
+> Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
+
+BUG: 1683602
+Change-Id: Idfe0e8710f7eb77ca3ddfa1cabeb45b2987f41aa
+Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202255
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ .../glusterd/brick-mux-validation-in-cluster.t     | 61 +++++++++++++++++++++-
+ xlators/mgmt/glusterd/src/glusterd-utils.c         | 28 +++++-----
+ 2 files changed, 75 insertions(+), 14 deletions(-)
+
+diff --git a/tests/bugs/glusterd/brick-mux-validation-in-cluster.t b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
+index 4e57038..f088dbb 100644
+--- a/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
++++ b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
+@@ -7,6 +7,20 @@ function count_brick_processes {
+         pgrep glusterfsd | wc -l
+ }
+ 
++function count_brick_pids {
++        $CLI_1 --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
++                                     | grep -v "N/A" | sort | uniq | wc -l
++}
++
++function count_N/A_brick_pids {
++        $CLI_1 --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
++                                     | grep -- '\-1' | sort | uniq | wc -l
++}
++
++function check_peers {
++        $CLI_2 peer status | grep 'Peer in Cluster (Connected)' | wc -l
++}
++
+ cleanup;
+ 
+ TEST launch_cluster 3
+@@ -48,4 +62,49 @@ TEST $CLI_1 volume stop $V1
+ 
+ EXPECT 3 count_brick_processes
+ 
+-cleanup
++TEST $CLI_1 volume stop $META_VOL
++
++TEST $CLI_1 volume delete $META_VOL
++TEST $CLI_1 volume delete $V0
++TEST $CLI_1 volume delete $V1
++
++#bug-1773856 - Brick process fails to come up with brickmux on
++
++TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1 $H3:$B3/${V0}1 force
++TEST $CLI_1 volume start $V0
++
++
++EXPECT 3 count_brick_processes
++
++#create and start a new volume
++TEST $CLI_1 volume create $V1 $H1:$B1/${V1}2 $H2:$B2/${V1}2 $H3:$B3/${V1}2 force
++TEST $CLI_1 volume start $V1
++
++EXPECT 3 count_brick_processes
++
++V2=patchy2
++TEST $CLI_1 volume create $V2 $H1:$B1/${V2}3 $H2:$B2/${V2}3 $H3:$B3/${V2}3 force
++TEST $CLI_1 volume start $V2
++
++EXPECT 3 count_brick_processes
++
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_brick_pids
++
++TEST kill_node 1
++
++sleep 10
++
++EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
++
++$CLI_2 volume set $V0 performance.readdir-ahead on
++$CLI_2 volume set $V1 performance.readdir-ahead on
++
++TEST $glusterd_1;
++
++sleep 10
++
++EXPECT 4 count_brick_processes
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_brick_pids
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0  count_N/A_brick_pids
++
++cleanup;
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index 6654741..1b78812 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -4988,16 +4988,6 @@ glusterd_import_friend_volume(dict_t *peer_data, int count)
+         glusterd_volinfo_unref(old_volinfo);
+     }
+ 
+-    if (glusterd_is_volume_started(new_volinfo)) {
+-        (void)glusterd_start_bricks(new_volinfo);
+-        if (glusterd_is_snapd_enabled(new_volinfo)) {
+-            svc = &(new_volinfo->snapd.svc);
+-            if (svc->manager(svc, new_volinfo, PROC_START_NO_WAIT)) {
+-                gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
+-            }
+-        }
+-    }
+-
+     ret = glusterd_store_volinfo(new_volinfo, GLUSTERD_VOLINFO_VER_AC_NONE);
+     if (ret) {
+         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_STORE_FAIL,
+@@ -5007,19 +4997,31 @@ glusterd_import_friend_volume(dict_t *peer_data, int count)
+         goto out;
+     }
+ 
+-    ret = glusterd_create_volfiles_and_notify_services(new_volinfo);
++    ret = glusterd_create_volfiles(new_volinfo);
+     if (ret)
+         goto out;
+ 
++    glusterd_list_add_order(&new_volinfo->vol_list, &priv->volumes,
++                            glusterd_compare_volume_name);
++
++    if (glusterd_is_volume_started(new_volinfo)) {
++        (void)glusterd_start_bricks(new_volinfo);
++        if (glusterd_is_snapd_enabled(new_volinfo)) {
++            svc = &(new_volinfo->snapd.svc);
++            if (svc->manager(svc, new_volinfo, PROC_START_NO_WAIT)) {
++                gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
++            }
++        }
++    }
++
+     ret = glusterd_import_quota_conf(peer_data, count, new_volinfo, "volume");
+     if (ret) {
+         gf_event(EVENT_IMPORT_QUOTA_CONF_FAILED, "volume=%s",
+                  new_volinfo->volname);
+         goto out;
+     }
+-    glusterd_list_add_order(&new_volinfo->vol_list, &priv->volumes,
+-                            glusterd_compare_volume_name);
+ 
++    ret = glusterd_fetchspec_notify(this);
+ out:
+     gf_msg_debug("glusterd", 0, "Returning with ret: %d", ret);
+     return ret;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0392-glusterd-unlink-the-file-after-killing-the-process.patch b/SOURCES/0392-glusterd-unlink-the-file-after-killing-the-process.patch
deleted file mode 100644
index 03d67f8..0000000
--- a/SOURCES/0392-glusterd-unlink-the-file-after-killing-the-process.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-From 6a2cef546457e6dc9a2268cc2f5cc11b850b7f5c Mon Sep 17 00:00:00 2001
-From: Sanju Rakonde <srakonde@redhat.com>
-Date: Tue, 17 Dec 2019 15:52:30 +0530
-Subject: [PATCH 392/392] glusterd: unlink the file after killing the process
-
-In glusterd_proc_stop(), after killing the pid
-we should remove the pidfile.
-
-> upstream patch: https://review.gluster.org/#/c/glusterfs/+/23890/
-> fixes: bz#1784375
-> Change-Id: Ib6367aed590932c884b0f6f892fc40542aa19686
-> Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
-
-BUG: 1849533
-Change-Id: Ib6367aed590932c884b0f6f892fc40542aa19686
-Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
-Reviewed-on: https://code.engineering.redhat.com/gerrit/203871
-Tested-by: RHGS Build Bot <nigelb@redhat.com>
-Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
----
- xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c | 2 ++
- 1 file changed, 2 insertions(+)
-
-diff --git a/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c
-index f55a5fd..a05c90d 100644
---- a/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c
-+++ b/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c
-@@ -107,6 +107,8 @@ glusterd_proc_stop(glusterd_proc_t *proc, int sig, int flags)
-                        "service, reason:%s",
-                        proc->name, strerror(errno));
-         }
-+    } else {
-+        (void)glusterd_unlink_file(proc->pidfile);
-     }
-     if (flags != PROC_STOP_FORCE)
-         goto out;
--- 
-1.8.3.1
-
diff --git a/SOURCES/0393-afr-restore-timestamp-of-files-during-metadata-heal.patch b/SOURCES/0393-afr-restore-timestamp-of-files-during-metadata-heal.patch
new file mode 100644
index 0000000..bb93180
--- /dev/null
+++ b/SOURCES/0393-afr-restore-timestamp-of-files-during-metadata-heal.patch
@@ -0,0 +1,129 @@
+From b528c21e6fedc9ac841942828b82e0c808da5efb Mon Sep 17 00:00:00 2001
+From: Sheetal Pamecha <spamecha@redhat.com>
+Date: Thu, 2 Jan 2020 12:05:12 +0530
+Subject: [PATCH 393/449] afr: restore timestamp of files during metadata heal
+
+For files: During metadata heal, we restore timestamps
+only for non-regular (char, block etc.) files.
+Extenting it for regular files as timestamp is updated
+via touch command also
+
+> upstream patch link: https://review.gluster.org/#/c/glusterfs/+/23953/
+> fixes: bz#1787274
+> Change-Id: I26fe4fb6dff679422ba4698a7f828bf62ca7ca18
+> Signed-off-by: Sheetal Pamecha <spamecha@redhat.com>
+
+BUG: 1761531
+Change-Id: I26fe4fb6dff679422ba4698a7f828bf62ca7ca18
+Signed-off-by: Sheetal Pamecha <spamecha@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202332
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ .../bug-1761531-metadata-heal-restore-time.t       | 74 ++++++++++++++++++++++
+ xlators/cluster/afr/src/afr-self-heal-metadata.c   |  8 +--
+ 2 files changed, 76 insertions(+), 6 deletions(-)
+ create mode 100644 tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t
+
+diff --git a/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t b/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t
+new file mode 100644
+index 0000000..7e24eae
+--- /dev/null
++++ b/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t
+@@ -0,0 +1,74 @@
++#!/bin/bash
++
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++. $(dirname $0)/../../afr.rc
++cleanup
++
++GET_MDATA_PATH=$(dirname $0)/../../utils
++build_tester $GET_MDATA_PATH/get-mdata-xattr.c
++
++TEST glusterd
++TEST pidof glusterd
++TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0..2}
++TEST $CLI volume start $V0
++TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
++
++TEST touch $M0/a
++sleep 1
++TEST kill_brick $V0 $H0 $B0/brick0
++TEST touch $M0/a
++
++EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
++
++TEST $CLI volume start $V0 force
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
++
++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
++
++mtime0=$(get_mtime $B0/brick0/a)
++mtime1=$(get_mtime $B0/brick1/a)
++TEST [ $mtime0 -eq $mtime1 ]
++
++ctime0=$(get_ctime $B0/brick0/a)
++ctime1=$(get_ctime $B0/brick1/a)
++TEST [ $ctime0 -eq $ctime1 ]
++
++###############################################################################
++# Repeat the test with ctime feature disabled.
++TEST $CLI volume set $V0 features.ctime off
++
++TEST touch $M0/b
++sleep 1
++TEST kill_brick $V0 $H0 $B0/brick0
++TEST touch $M0/b
++
++EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
++
++TEST $CLI volume start $V0 force
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
++
++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
++
++mtime2=$(get_mtime $B0/brick0/b)
++mtime3=$(get_mtime $B0/brick1/b)
++TEST [ $mtime2 -eq $mtime3 ]
++
++TEST rm $GET_MDATA_PATH/get-mdata-xattr
++
++TEST force_umount $M0
++TEST $CLI volume stop $V0
++TEST $CLI volume delete $V0
++
++cleanup
+diff --git a/xlators/cluster/afr/src/afr-self-heal-metadata.c b/xlators/cluster/afr/src/afr-self-heal-metadata.c
+index ecfa791..f4e31b6 100644
+--- a/xlators/cluster/afr/src/afr-self-heal-metadata.c
++++ b/xlators/cluster/afr/src/afr-self-heal-metadata.c
+@@ -421,12 +421,8 @@ afr_selfheal_metadata(call_frame_t *frame, xlator_t *this, inode_t *inode)
+         if (ret)
+             goto unlock;
+ 
+-        /* Restore atime/mtime for files that don't need data heal as
+-         * restoring timestamps happens only as a part of data-heal.
+-         */
+-        if (!IA_ISREG(locked_replies[source].poststat.ia_type))
+-            afr_selfheal_restore_time(frame, this, inode, source, healed_sinks,
+-                                      locked_replies);
++        afr_selfheal_restore_time(frame, this, inode, source, healed_sinks,
++                                  locked_replies);
+ 
+         ret = afr_selfheal_undo_pending(
+             frame, this, inode, sources, sinks, healed_sinks, undid_pending,
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0394-man-gluster-Add-volume-top-command-to-gluster-man-pa.patch b/SOURCES/0394-man-gluster-Add-volume-top-command-to-gluster-man-pa.patch
new file mode 100644
index 0000000..96a8f74
--- /dev/null
+++ b/SOURCES/0394-man-gluster-Add-volume-top-command-to-gluster-man-pa.patch
@@ -0,0 +1,38 @@
+From 768a6d9bca86c0a50128b8776c11ef2b6d36388d Mon Sep 17 00:00:00 2001
+From: Vishal Pandey <vpandey@redhat.com>
+Date: Thu, 21 Nov 2019 12:56:34 +0530
+Subject: [PATCH 394/449] man/gluster: Add volume top command to gluster man
+ page
+
+> Upstream patch link: https://review.gluster.org/#/c/glusterfs/+/23735/
+> Change-Id: Ib74607d2b2e5a1a0316221f1176a7dcccea632d4
+> Fixes: bz#1774866
+> Signed-off-by: Vishal Pandey <vpandey@redhat.com>
+
+BUG: 1754391
+Change-Id: Ib74607d2b2e5a1a0316221f1176a7dcccea632d4
+Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202333
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ doc/gluster.8 | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/doc/gluster.8 b/doc/gluster.8
+index da6472d..88cbf44 100644
+--- a/doc/gluster.8
++++ b/doc/gluster.8
+@@ -113,6 +113,9 @@ Rotate the log file for corresponding volume/brick.
+ \fB\ volume profile <VOLNAME> {start|info [peek|incremental [peek]|cumulative|clear]|stop} [nfs] \fR
+ Profile operations on the volume. Once started, volume profile <volname> info provides cumulative statistics of the FOPs performed.
+ .TP
++\fB\ volume top <VOLNAME> {open|read|write|opendir|readdir|clear} [nfs|brick <brick>] [list-cnt <value>] | {read-perf|write-perf} [bs <size> count <count>] [brick <brick>] [list-cnt <value>] \fR
++Generates a profile of a volume representing the performance and bottlenecks/hotspots of each brick.
++.TP
+ \fB\ volume statedump <VOLNAME> [[nfs|quotad] [all|mem|iobuf|callpool|priv|fd|inode|history]... | [client <hostname:process-id>]] \fR
+ Dumps the in memory state of the specified process or the bricks of the volume.
+ .TP
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0395-Cli-Removing-old-log-rotate-command.patch b/SOURCES/0395-Cli-Removing-old-log-rotate-command.patch
new file mode 100644
index 0000000..0918777
--- /dev/null
+++ b/SOURCES/0395-Cli-Removing-old-log-rotate-command.patch
@@ -0,0 +1,111 @@
+From 5b3fcc8db86b4dc7af1eb63315ca2ff41c60fdea Mon Sep 17 00:00:00 2001
+From: kshithijiyer <kshithij.ki@gmail.com>
+Date: Sat, 30 Nov 2019 15:25:11 +0530
+Subject: [PATCH 395/449] [Cli] Removing old log rotate command.
+
+The old command for log rotate is still present removing
+it completely. Also adding testcase to test the
+log rotate command with both the old as well as the new command
+and fixing testcase which use the old syntax to use the new
+one.
+
+Code to be removed:
+1. In cli-cmd-volume.c from struct cli_cmd volume_cmds[]:
+{"volume log rotate <VOLNAME> [BRICK]", cli_cmd_log_rotate_cbk,
+ "rotate the log file for corresponding volume/brick"
+ " NOTE: This is an old syntax, will be deprecated from next release."},
+
+2. In cli-cmd-volume.c from cli_cmd_log_rotate_cbk():
+ ||(strcmp("rotate", words[2]) == 0)))
+
+3. In cli-cmd-parser.c from cli_cmd_log_rotate_parse()
+if (strcmp("rotate", words[2]) == 0)
+   volname = (char *)words[3];
+else
+
+> upstream patch link: https://review.gluster.org/#/c/glusterfs/+/23392/
+> fixes: bz#1750387
+> Change-Id: I56e4d295044e8d5fd1fc0d848bc87e135e9e32b4
+> Signed-off-by: kshithijiyer <kshithij.ki@gmail.com>
+
+BUG: 1784415
+Change-Id: I56e4d295044e8d5fd1fc0d848bc87e135e9e32b4
+Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202334
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ cli/src/cli-cmd-parser.c                        | 2 --
+ cli/src/cli-cmd-volume.c                        | 7 +------
+ tests/bugs/glusterd/optimized-basic-testcases.t | 3 ++-
+ tests/bugs/glusterfs-server/bug-852147.t        | 2 +-
+ 4 files changed, 4 insertions(+), 10 deletions(-)
+
+diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
+index 4456a7b..ac0a263 100644
+--- a/cli/src/cli-cmd-parser.c
++++ b/cli/src/cli-cmd-parser.c
+@@ -2592,8 +2592,6 @@ cli_cmd_log_rotate_parse(const char **words, int wordcount, dict_t **options)
+ 
+     if (strcmp("rotate", words[3]) == 0)
+         volname = (char *)words[2];
+-    else if (strcmp("rotate", words[2]) == 0)
+-        volname = (char *)words[3];
+     GF_ASSERT(volname);
+ 
+     ret = dict_set_str(dict, "volname", volname);
+diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c
+index 754d333..f33fc99 100644
+--- a/cli/src/cli-cmd-volume.c
++++ b/cli/src/cli-cmd-volume.c
+@@ -2349,8 +2349,7 @@ cli_cmd_log_rotate_cbk(struct cli_state *state, struct cli_cmd_word *word,
+         goto out;
+     }
+ 
+-    if (!((strcmp("rotate", words[2]) == 0) ||
+-          (strcmp("rotate", words[3]) == 0))) {
++    if (!(strcmp("rotate", words[3]) == 0)) {
+         cli_usage_out(word->pattern);
+         parse_error = 1;
+         goto out;
+@@ -3401,10 +3400,6 @@ struct cli_cmd volume_cmds[] = {
+     {"volume log <VOLNAME> rotate [BRICK]", cli_cmd_log_rotate_cbk,
+      "rotate the log file for corresponding volume/brick"},
+ 
+-    {"volume log rotate <VOLNAME> [BRICK]", cli_cmd_log_rotate_cbk,
+-     "rotate the log file for corresponding volume/brick"
+-     " NOTE: This is an old syntax, will be deprecated from next release."},
+-
+     {"volume sync <HOSTNAME> [all|<VOLNAME>]", cli_cmd_sync_volume_cbk,
+      "sync the volume information from a peer"},
+ 
+diff --git a/tests/bugs/glusterd/optimized-basic-testcases.t b/tests/bugs/glusterd/optimized-basic-testcases.t
+index c7e8c32..862f329 100644
+--- a/tests/bugs/glusterd/optimized-basic-testcases.t
++++ b/tests/bugs/glusterd/optimized-basic-testcases.t
+@@ -129,7 +129,8 @@ TEST ! $CLI volume set all $V0 cluster.op-version $OP_VERS_NEW
+ 
+ #bug-1022055 - validate log rotate command
+ 
+-TEST $CLI volume log rotate $V0;
++TEST ! $CLI volume log rotate $V0;
++TEST $CLI volume log $V0 rotate;
+ 
+ #bug-1092841 - validating barrier enable/disable
+ 
+diff --git a/tests/bugs/glusterfs-server/bug-852147.t b/tests/bugs/glusterfs-server/bug-852147.t
+index c644cfa..75db2a2 100755
+--- a/tests/bugs/glusterfs-server/bug-852147.t
++++ b/tests/bugs/glusterfs-server/bug-852147.t
+@@ -66,7 +66,7 @@ ren_file=$log_file".*"
+ rm -rf $ren_file
+ 
+ #Initiating log rotate
+-TEST $CLI volume log rotate $V0
++TEST $CLI volume log $V0 rotate
+ 
+ #Capturing new log file's size
+ new_file_size=`file-size $log_file`
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0396-Updating-gluster-manual.patch b/SOURCES/0396-Updating-gluster-manual.patch
new file mode 100644
index 0000000..bb33d10
--- /dev/null
+++ b/SOURCES/0396-Updating-gluster-manual.patch
@@ -0,0 +1,56 @@
+From 728aab1c1cfcf352d4ca1fde0b80044dc24bd9fa Mon Sep 17 00:00:00 2001
+From: Rishubh Jain <risjain@redhat.com>
+Date: Sun, 18 Aug 2019 18:02:57 +0530
+Subject: [PATCH 396/449] Updating gluster manual.
+
+Adding disperse-data to gluster manual under
+volume create command
+
+> Upstream Patch Link: https://review.gluster.org/#/c/glusterfs/+/23258/
+> Change-Id: Ic9eb47c9e71a1d7a11af9394c615c8e90f8d1d69
+> Fixes: bz#1668239
+> Signed-off-by: Rishubh Jain <risjain@redhat.com>
+> Signed-off-by: Sheetal Pamecha <spamecha@redhat.com>
+
+BUG: 1667954
+Change-Id: Ic9eb47c9e71a1d7a11af9394c615c8e90f8d1d69
+Signed-off-by: Sheetal Pamecha <spamecha@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202342
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ doc/gluster.8                          | 2 +-
+ tests/basic/glusterd/disperse-create.t | 4 ++++
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/doc/gluster.8 b/doc/gluster.8
+index 88cbf44..66bdb48 100644
+--- a/doc/gluster.8
++++ b/doc/gluster.8
+@@ -41,7 +41,7 @@ List all volumes in cluster
+ \fB\ volume status [all | <VOLNAME> [nfs|shd|<BRICK>|quotad|tierd]] [detail|clients|mem|inode|fd|callpool|tasks|client-list] \fR
+ Display status of all or specified volume(s)/brick
+ .TP
+-\fB\ volume create <NEW-VOLNAME> [stripe <COUNT>] [replica <COUNT>] [disperse [<COUNT>]] [redundancy <COUNT>] [transport <tcp|rdma|tcp,rdma>] <NEW-BRICK> ... \fR
++\fB\ volume create <NEW-VOLNAME> [stripe <COUNT>] [[replica <COUNT> [arbiter <COUNT>]]|[replica 2 thin-arbiter 1]] [disperse [<COUNT>]] [disperse-data <COUNT>] [redundancy <COUNT>] [transport <tcp|rdma|tcp,rdma>] <NEW-BRICK> ... <TA-BRICK> \fR
+ Create a new volume of the specified type using the specified bricks and transport type (the default transport type is tcp).
+ To create a volume with both transports (tcp and rdma), give 'transport tcp,rdma' as an option.
+ .TP
+diff --git a/tests/basic/glusterd/disperse-create.t b/tests/basic/glusterd/disperse-create.t
+index 384c675..db8a621 100644
+--- a/tests/basic/glusterd/disperse-create.t
++++ b/tests/basic/glusterd/disperse-create.t
+@@ -20,6 +20,10 @@ TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/b7 $H0:$B0/b8 $H0:$B
+ EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
+ 
+ TEST $CLI volume delete $V0
++TEST $CLI volume create $V0 disperse-data 2 $H0:$B0/b10 $H0:$B0/b11 $H0:$B0/b12
++EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
++
++TEST $CLI volume delete $V0
+ TEST $CLI volume create $V0 redundancy 1 $H0:$B0/b10 $H0:$B0/b11 $H0:$B0/b12
+ EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0397-mgmt-brick-mux-Avoid-sending-two-response-when-attac.patch b/SOURCES/0397-mgmt-brick-mux-Avoid-sending-two-response-when-attac.patch
new file mode 100644
index 0000000..6694813
--- /dev/null
+++ b/SOURCES/0397-mgmt-brick-mux-Avoid-sending-two-response-when-attac.patch
@@ -0,0 +1,52 @@
+From 73cef29731c0d7b8b4f3b880c032dc232b8fcc31 Mon Sep 17 00:00:00 2001
+From: Mohit Agrawal <moagrawa@redhat.com>
+Date: Thu, 4 Jun 2020 16:06:44 +0530
+Subject: [PATCH 397/449] mgmt/brick-mux: Avoid sending two response when
+ attach is failed.
+
+We were sending two response back to glusterd when an attach is
+failed. One from the handler function glusterfs_handle_attach and
+another from rpcsvc_check_and_reply_error. It was causing problems
+like ref leaks, transport disconnect etc.
+
+> Change-Id: I3bb5b59959530760b568d52becb519499b3dcd2b
+> updates: bz#1785143
+> Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
+> (Cherry pick from commit 42f484dcecd9942611396d9bd2ad3a39019b0e1f)
+> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/23906/)
+
+Change-Id: I3bb5b59959530760b568d52becb519499b3dcd2b
+BUG: 1776901
+Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202346
+Reviewed-by: Sanju Rakonde <srakonde@redhat.com>
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ glusterfsd/src/glusterfsd-mgmt.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
+index 15acc10..61d1b21 100644
+--- a/glusterfsd/src/glusterfsd-mgmt.c
++++ b/glusterfsd/src/glusterfsd-mgmt.c
+@@ -954,7 +954,15 @@ glusterfs_handle_attach(rpcsvc_request_t *req)
+             ret = -1;
+         }
+ 
+-        glusterfs_translator_info_response_send(req, ret, NULL, NULL);
++        ret = glusterfs_translator_info_response_send(req, ret, NULL, NULL);
++        if (ret) {
++            /* Response sent back to glusterd, req is already destroyed. So
++             * resetting the ret to 0. Otherwise another response will be
++             * send from rpcsvc_check_and_reply_error. Which will lead to
++             * double resource leak.
++             */
++            ret = 0;
++        }
+ 
+     out:
+         UNLOCK(&ctx->volfile_lock);
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0398-ec-change-error-message-for-heal-commands-for-disper.patch b/SOURCES/0398-ec-change-error-message-for-heal-commands-for-disper.patch
new file mode 100644
index 0000000..5779539
--- /dev/null
+++ b/SOURCES/0398-ec-change-error-message-for-heal-commands-for-disper.patch
@@ -0,0 +1,75 @@
+From 03d2c7b52da5efd6ad660315a0548c8b91e51439 Mon Sep 17 00:00:00 2001
+From: Sheetal Pamecha <spamecha@redhat.com>
+Date: Sun, 22 Dec 2019 22:52:30 +0530
+Subject: [PATCH 398/449] ec: change error message for heal commands for
+ disperse volume
+
+Currently when we issue a heal statistics or similar commands
+for disperse volume, it fails with message "Volume is not of
+type replicate." Adding message "this command is supported for
+volumes of type replicate" to reflect supportability and better
+understanding of heal functionality for disperse volumes.
+
+> Upstream Patch Link: https://review.gluster.org/#/c/glusterfs/+/23916/
+> fixes: bz#1785998
+> Change-Id: I9688a9fdf427cb6f657cfd5b8db2f76a6c56f6e2
+> Signed-off-by: Sheetal Pamecha <spamecha@redhat.com>
+
+BUG: 1487177
+Change-Id: I9688a9fdf427cb6f657cfd5b8db2f76a6c56f6e2
+Signed-off-by: Sheetal Pamecha <spamecha@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202344
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ heal/src/glfs-heal.c                            | 15 ++++++++++-----
+ xlators/mgmt/glusterd/src/glusterd-volume-ops.c |  4 +++-
+ 2 files changed, 13 insertions(+), 6 deletions(-)
+
+diff --git a/heal/src/glfs-heal.c b/heal/src/glfs-heal.c
+index 7e37e47..125b12c 100644
+--- a/heal/src/glfs-heal.c
++++ b/heal/src/glfs-heal.c
+@@ -1726,14 +1726,19 @@ main(int argc, char **argv)
+         goto out;
+     }
+ 
++    char *var_str = (heal_op == GF_SHD_OP_INDEX_SUMMARY ||
++                     heal_op == GF_SHD_OP_HEAL_SUMMARY)
++                        ? "replicate/disperse"
++                        : "replicate";
++
+     ret = glfsh_validate_volume(top_subvol, heal_op);
+     if (ret < 0) {
+         ret = -EINVAL;
+-        gf_asprintf(&op_errstr, "Volume %s is not of type %s", volname,
+-                    (heal_op == GF_SHD_OP_INDEX_SUMMARY ||
+-                     heal_op == GF_SHD_OP_HEAL_SUMMARY)
+-                        ? "replicate/disperse"
+-                        : "replicate");
++        gf_asprintf(&op_errstr,
++                    "This command is supported "
++                    "for only volumes of %s type. Volume %s "
++                    "is not of type %s",
++                    var_str, volname, var_str);
+         goto out;
+     }
+     rootloc.inode = inode_ref(top_subvol->itable->root);
+diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+index 076bc80..93042ab 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
++++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+@@ -2008,7 +2008,9 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo,
+             if (!glusterd_is_volume_replicate(volinfo)) {
+                 ret = -1;
+                 snprintf(msg, sizeof(msg),
+-                         "Volume %s is not of type "
++                         "This command is supported "
++                         "for only volume of replicated "
++                         "type. Volume %s is not of type "
+                          "replicate",
+                          volinfo->volname);
+                 *op_errstr = gf_strdup(msg);
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0399-glusterd-coverity-fixes.patch b/SOURCES/0399-glusterd-coverity-fixes.patch
new file mode 100644
index 0000000..8052a46
--- /dev/null
+++ b/SOURCES/0399-glusterd-coverity-fixes.patch
@@ -0,0 +1,79 @@
+From 1ebd2a3227469b1775f19c8f78af7d3d19f749a3 Mon Sep 17 00:00:00 2001
+From: Atin Mukherjee <amukherj@redhat.com>
+Date: Fri, 26 Apr 2019 08:47:12 +0530
+Subject: [PATCH 399/449] glusterd: coverity fixes
+
+1400775 - USE_AFTER_FREE
+1400742 - Missing Unlock
+1400736 - CHECKED_RETURN
+1398470 - Missing Unlock
+
+Missing unlock is the tricky one, we have had annotation added, but
+coverity still continued to complaint. Added pthread_mutex_unlock to
+clean up the lock before destroying it to see if it makes coverity
+happy.
+
+> upstream patch link:  https://review.gluster.org/#/c/glusterfs/+/22634/
+> Updates: bz#789278
+> Change-Id: I1d892612a17f805144d96c1b15004a85a1639414
+> Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
+
+BUG: 1787310
+Change-Id: I1d892612a17f805144d96c1b15004a85a1639414
+Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202343
+Reviewed-by: Mohit Agrawal <moagrawa@redhat.com>
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mgmt/glusterd/src/glusterd-peer-utils.c | 1 +
+ xlators/mgmt/glusterd/src/glusterd-sm.c         | 1 -
+ xlators/mgmt/glusterd/src/glusterd-utils.c      | 8 +++++++-
+ 3 files changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
+index f24c86e..8c1feeb 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
+@@ -48,6 +48,7 @@ glusterd_peerinfo_destroy(struct rcu_head *head)
+     }
+ 
+     glusterd_sm_tr_log_delete(&peerinfo->sm_log);
++    pthread_mutex_unlock(&peerinfo->delete_lock);
+     pthread_mutex_destroy(&peerinfo->delete_lock);
+     GF_FREE(peerinfo);
+ 
+diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c
+index 54a7bd1..044da3d 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-sm.c
++++ b/xlators/mgmt/glusterd/src/glusterd-sm.c
+@@ -868,7 +868,6 @@ glusterd_ac_friend_remove(glusterd_friend_sm_event_t *event, void *ctx)
+                "Cleanup returned: %d", ret);
+     }
+ out:
+-    /* coverity[ LOCK] */
+     return 0;
+ }
+ 
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index 1b78812..a1299bc 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -5840,7 +5840,13 @@ attach_brick_callback(struct rpc_req *req, struct iovec *iov, int count,
+         /* PID file is copied once brick has attached
+            successfully
+         */
+-        glusterd_copy_file(pidfile1, pidfile2);
++        ret = glusterd_copy_file(pidfile1, pidfile2);
++        if (ret) {
++            gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
++                   "Could not copy file %s to %s", pidfile1, pidfile2);
++            goto out;
++        }
++
+         brickinfo->status = GF_BRICK_STARTED;
+         brickinfo->rpc = rpc_clnt_ref(other_brick->rpc);
+         gf_log(THIS->name, GF_LOG_INFO, "brick %s is attached successfully",
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0400-cli-throw-a-warning-if-replica-count-greater-than-3.patch b/SOURCES/0400-cli-throw-a-warning-if-replica-count-greater-than-3.patch
new file mode 100644
index 0000000..dd1ea52
--- /dev/null
+++ b/SOURCES/0400-cli-throw-a-warning-if-replica-count-greater-than-3.patch
@@ -0,0 +1,98 @@
+From 12ed9226fa24d073ab2b89692194b454a194c379 Mon Sep 17 00:00:00 2001
+From: Sanju Rakonde <srakonde@redhat.com>
+Date: Thu, 4 Jun 2020 15:14:29 +0530
+Subject: [PATCH 400/449] cli: throw a warning if replica count greater than 3
+
+As volumes with replica count greater than 3 are not
+supported, a warning message is be thrown to user
+while creating the volume with replica count greater
+than 3 or while converting a volume to replica > 3
+volume by add-brick/remove-brick operations.
+
+Label: DOWNSTREAM ONLY
+
+BUG: 1763129
+Change-Id: I5a32a5a2d99b5175fb692dfcab27396089f24b72
+Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202338
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ cli/src/cli-cmd-parser.c | 45 +++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 45 insertions(+)
+
+diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
+index ac0a263..5e7ce53 100644
+--- a/cli/src/cli-cmd-parser.c
++++ b/cli/src/cli-cmd-parser.c
+@@ -619,6 +619,23 @@ cli_cmd_volume_create_parse(struct cli_state *state, const char **words,
+                     }
+                 }
+             }
++
++            if (replica_count > 3) {
++                if (strcmp(words[wordcount - 1], "force")) {
++                    question =
++                        "Volumes with replica count greater than 3 are"
++                        "not supported. \nDo you still want to continue?\n";
++                    answer = cli_cmd_get_confirmation(state, question);
++                    if (GF_ANSWER_NO == answer) {
++                        gf_log("cli", GF_LOG_ERROR,
++                               "Volume create "
++                               "cancelled, exiting");
++                        ret = -1;
++                        goto out;
++                    }
++                }
++            }
++
+             ret = dict_set_int32(dict, "replica-count", replica_count);
+             if (ret)
+                 goto out;
+@@ -1815,6 +1832,20 @@ cli_cmd_volume_add_brick_parse(struct cli_state *state, const char **words,
+                     goto out;
+                 }
+             }
++        } else if (count > 3) {
++            if (strcmp(words[wordcount - 1], "force")) {
++                question =
++                    "Volumes with replica count greater than 3 are"
++                    "not supported. \nDo you still want to continue?\n";
++                answer = cli_cmd_get_confirmation(state, question);
++                if (GF_ANSWER_NO == answer) {
++                    gf_log("cli", GF_LOG_ERROR,
++                           "add-brick "
++                           "cancelled, exiting");
++                    ret = -1;
++                    goto out;
++                }
++            }
+         }
+     } else if ((strcmp(w, "stripe")) == 0) {
+         cli_err("stripe option not supported");
+@@ -2082,6 +2113,20 @@ cli_cmd_volume_remove_brick_parse(struct cli_state *state, const char **words,
+                     goto out;
+                 }
+             }
++        } else if (count > 3) {
++            if (strcmp(words[wordcount - 1], "force")) {
++                ques =
++                    "Volumes with replica count greater than 3 are"
++                    "not supported. \nDo you still want to continue?\n";
++                answer = cli_cmd_get_confirmation(state, ques);
++                if (GF_ANSWER_NO == answer) {
++                    gf_log("cli", GF_LOG_ERROR,
++                           "Remove-brick "
++                           "cancelled, exiting");
++                    ret = -1;
++                    goto out;
++                }
++            }
+         }
+ 
+         ret = dict_set_int32(dict, "replica-count", count);
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0401-cli-change-the-warning-message.patch b/SOURCES/0401-cli-change-the-warning-message.patch
new file mode 100644
index 0000000..5c3e895
--- /dev/null
+++ b/SOURCES/0401-cli-change-the-warning-message.patch
@@ -0,0 +1,70 @@
+From 704bf84d432e1eea1534e35ee27d4116a7273146 Mon Sep 17 00:00:00 2001
+From: Sanju Rakonde <srakonde@redhat.com>
+Date: Thu, 4 Jun 2020 16:15:35 +0530
+Subject: [PATCH 401/449] cli: change the warning message
+
+while creating the replica 2 volume or converting
+a volume to replica 2 volume, we issue a warning
+saying "replica 2 volumes are prone to split brain".
+As the support for replica 2 volumes has been deprecated,
+warning message should be changed accordingly to reflect
+the same.
+
+Label: DOWNSTREAM ONLY
+
+BUG: 1763124
+Change-Id: If55e5412cda2e4a21a6359492d8d704dd702530d
+Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202348
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ cli/src/cli-cmd-parser.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
+index 5e7ce53..7446b95 100644
+--- a/cli/src/cli-cmd-parser.c
++++ b/cli/src/cli-cmd-parser.c
+@@ -603,8 +603,8 @@ cli_cmd_volume_create_parse(struct cli_state *state, const char **words,
+             if (replica_count == 2) {
+                 if (strcmp(words[wordcount - 1], "force")) {
+                     question =
+-                        "Replica 2 volumes are prone"
+-                        " to split-brain. Use "
++                        "Support for replica 2 volumes stands deprecated as "
++                        "they are prone to split-brain. Use "
+                         "Arbiter or Replica 3 to "
+                         "avoid this.\n"
+                         "Do you still want to "
+@@ -1817,9 +1817,9 @@ cli_cmd_volume_add_brick_parse(struct cli_state *state, const char **words,
+         if (count == 2) {
+             if (strcmp(words[wordcount - 1], "force")) {
+                 question =
+-                    "Replica 2 volumes are prone to "
+-                    "split-brain. Use Arbiter or "
+-                    "Replica 3 to avaoid this. See: "
++                    "Support for replica 2 volumes stands deprecated as they "
++                    "are prone to split-brain. Use Arbiter or "
++                    "Replica 3 to avoid this. See: "
+                     "http://docs.gluster.org/en/latest/Administrator%20Guide/"
+                     "Split%20brain%20and%20ways%20to%20deal%20with%20it/."
+                     "\nDo you still want to continue?\n";
+@@ -2098,9 +2098,9 @@ cli_cmd_volume_remove_brick_parse(struct cli_state *state, const char **words,
+         if (count == 2) {
+             if (strcmp(words[wordcount - 1], "force")) {
+                 ques =
+-                    "Replica 2 volumes are prone to "
+-                    "split-brain. Use Arbiter or Replica 3 "
+-                    "to avaoid this. See: "
++                    "Support for replica 2 volumes stands deprecated as they "
++                    "are prone to split-brain. Use Arbiter or Replica 3 "
++                    "to avoid this. See: "
+                     "http://docs.gluster.org/en/latest/Administrator%20Guide/"
+                     "Split%20brain%20and%20ways%20to%20deal%20with%20it/."
+                     "\nDo you still want to continue?\n";
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0402-afr-wake-up-index-healer-threads.patch b/SOURCES/0402-afr-wake-up-index-healer-threads.patch
new file mode 100644
index 0000000..34ca329
--- /dev/null
+++ b/SOURCES/0402-afr-wake-up-index-healer-threads.patch
@@ -0,0 +1,198 @@
+From ecaa0f10820f4b6e803021919ce59a43aedf356b Mon Sep 17 00:00:00 2001
+From: Ravishankar N <ravishankar@redhat.com>
+Date: Thu, 4 Jun 2020 16:15:35 +0530
+Subject: [PATCH 402/449] afr: wake up index healer threads
+
+...whenever shd is re-enabled after disabling or there is a change in
+`cluster.heal-timeout`, without needing to restart shd or waiting for the
+current `cluster.heal-timeout` seconds to expire.
+
+> Upstream patch link:https://review.gluster.org/#/c/glusterfs/+/23288/
+> Change-Id: Ia5ebd7c8e9f5b54cba3199c141fdd1af2f9b9bfe
+> fixes: bz#1744548
+> Reported-by: Glen Kiessling <glenk1973@hotmail.com>
+> Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+
+BUG: 1764091
+Change-Id: I42aa0807f09b5a09510fe9efb4a1697dad3410a3
+Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202368
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/bugs/replicate/bug-1744548-heal-timeout.t | 42 +++++++++++++++++++++++++
+ xlators/cluster/afr/src/afr-common.c            |  6 ++--
+ xlators/cluster/afr/src/afr-self-heald.c        | 14 ++++++---
+ xlators/cluster/afr/src/afr-self-heald.h        |  3 --
+ xlators/cluster/afr/src/afr.c                   | 10 ++++++
+ xlators/cluster/afr/src/afr.h                   |  2 ++
+ 6 files changed, 66 insertions(+), 11 deletions(-)
+ create mode 100644 tests/bugs/replicate/bug-1744548-heal-timeout.t
+
+diff --git a/tests/bugs/replicate/bug-1744548-heal-timeout.t b/tests/bugs/replicate/bug-1744548-heal-timeout.t
+new file mode 100644
+index 0000000..3cb73bc
+--- /dev/null
++++ b/tests/bugs/replicate/bug-1744548-heal-timeout.t
+@@ -0,0 +1,42 @@
++#!/bin/bash
++
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++. $(dirname $0)/../../afr.rc
++
++cleanup;
++
++TEST glusterd;
++TEST pidof glusterd;
++TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
++TEST $CLI volume heal $V0 disable
++TEST $CLI volume start $V0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
++TEST ! $CLI volume heal $V0
++
++# Enable shd and verify that index crawl is triggered immediately.
++TEST $CLI volume profile $V0 start
++TEST $CLI volume profile $V0 info clear
++TEST $CLI volume heal $V0 enable
++TEST $CLI volume heal $V0
++# Each brick does 3 opendirs, corresponding to dirty, xattrop and entry-changes
++COUNT=`$CLI volume profile $V0 info incremental |grep OPENDIR|awk '{print $8}'|tr -d '\n'`
++TEST [ "$COUNT" == "333" ]
++
++# Check that a change in heal-timeout is honoured immediately.
++TEST $CLI volume set $V0 cluster.heal-timeout 5
++sleep 10
++COUNT=`$CLI volume profile $V0 info incremental |grep OPENDIR|awk '{print $8}'|tr -d '\n'`
++# Two crawls must have happened.
++TEST [ "$COUNT" == "666" ]
++
++# shd must not heal if it is disabled and heal-timeout is changed.
++TEST $CLI volume heal $V0 disable
++TEST $CLI volume profile $V0 info clear
++TEST $CLI volume set $V0 cluster.heal-timeout 6
++sleep 6
++COUNT=`$CLI volume profile $V0 info incremental |grep OPENDIR|awk '{print $8}'|tr -d '\n'`
++TEST [ -z $COUNT ]
++cleanup;
+diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
+index 3690b84..eef7fd2 100644
+--- a/xlators/cluster/afr/src/afr-common.c
++++ b/xlators/cluster/afr/src/afr-common.c
+@@ -5613,10 +5613,8 @@ afr_notify(xlator_t *this, int32_t event, void *data, void *data2)
+          * b) Already heard from everyone, but we now got a child-up
+          *    event.
+          */
+-        if (have_heard_from_all && priv->shd.iamshd) {
+-            for (i = 0; i < priv->child_count; i++)
+-                if (priv->child_up[i])
+-                    afr_selfheal_childup(this, i);
++        if (have_heard_from_all) {
++            afr_selfheal_childup(this, priv);
+         }
+     }
+ out:
+diff --git a/xlators/cluster/afr/src/afr-self-heald.c b/xlators/cluster/afr/src/afr-self-heald.c
+index 7eb1207..95ac5f2 100644
+--- a/xlators/cluster/afr/src/afr-self-heald.c
++++ b/xlators/cluster/afr/src/afr-self-heald.c
+@@ -1258,12 +1258,18 @@ out:
+     return ret;
+ }
+ 
+-int
+-afr_selfheal_childup(xlator_t *this, int subvol)
++void
++afr_selfheal_childup(xlator_t *this, afr_private_t *priv)
+ {
+-    afr_shd_index_healer_spawn(this, subvol);
++    int subvol = 0;
+ 
+-    return 0;
++    if (!priv->shd.iamshd)
++        return;
++    for (subvol = 0; subvol < priv->child_count; subvol++)
++        if (priv->child_up[subvol])
++            afr_shd_index_healer_spawn(this, subvol);
++
++    return;
+ }
+ 
+ int
+diff --git a/xlators/cluster/afr/src/afr-self-heald.h b/xlators/cluster/afr/src/afr-self-heald.h
+index 7de7c43..1990539 100644
+--- a/xlators/cluster/afr/src/afr-self-heald.h
++++ b/xlators/cluster/afr/src/afr-self-heald.h
+@@ -60,9 +60,6 @@ typedef struct {
+ } afr_self_heald_t;
+ 
+ int
+-afr_selfheal_childup(xlator_t *this, int subvol);
+-
+-int
+ afr_selfheal_daemon_init(xlator_t *this);
+ 
+ int
+diff --git a/xlators/cluster/afr/src/afr.c b/xlators/cluster/afr/src/afr.c
+index 33258a0..8f9e71f 100644
+--- a/xlators/cluster/afr/src/afr.c
++++ b/xlators/cluster/afr/src/afr.c
+@@ -141,6 +141,7 @@ reconfigure(xlator_t *this, dict_t *options)
+     afr_private_t *priv = NULL;
+     xlator_t *read_subvol = NULL;
+     int read_subvol_index = -1;
++    int timeout_old = 0;
+     int ret = -1;
+     int index = -1;
+     char *qtype = NULL;
+@@ -150,6 +151,7 @@ reconfigure(xlator_t *this, dict_t *options)
+     char *locking_scheme = NULL;
+     gf_boolean_t consistent_io = _gf_false;
+     gf_boolean_t choose_local_old = _gf_false;
++    gf_boolean_t enabled_old = _gf_false;
+ 
+     priv = this->private;
+ 
+@@ -255,11 +257,13 @@ reconfigure(xlator_t *this, dict_t *options)
+     GF_OPTION_RECONF("ensure-durability", priv->ensure_durability, options,
+                      bool, out);
+ 
++    enabled_old = priv->shd.enabled;
+     GF_OPTION_RECONF("self-heal-daemon", priv->shd.enabled, options, bool, out);
+ 
+     GF_OPTION_RECONF("iam-self-heal-daemon", priv->shd.iamshd, options, bool,
+                      out);
+ 
++    timeout_old = priv->shd.timeout;
+     GF_OPTION_RECONF("heal-timeout", priv->shd.timeout, options, int32, out);
+ 
+     GF_OPTION_RECONF("consistent-metadata", priv->consistent_metadata, options,
+@@ -283,6 +287,12 @@ reconfigure(xlator_t *this, dict_t *options)
+         consistent_io = _gf_false;
+     priv->consistent_io = consistent_io;
+ 
++    if (priv->shd.enabled) {
++        if ((priv->shd.enabled != enabled_old) ||
++            (timeout_old != priv->shd.timeout))
++            afr_selfheal_childup(this, priv);
++    }
++
+     ret = 0;
+ out:
+     return ret;
+diff --git a/xlators/cluster/afr/src/afr.h b/xlators/cluster/afr/src/afr.h
+index e731cfa..18f1a6a 100644
+--- a/xlators/cluster/afr/src/afr.h
++++ b/xlators/cluster/afr/src/afr.h
+@@ -1332,4 +1332,6 @@ afr_lookup_has_quorum(call_frame_t *frame, xlator_t *this,
+ void
+ afr_mark_new_entry_changelog(call_frame_t *frame, xlator_t *this);
+ 
++void
++afr_selfheal_childup(xlator_t *this, afr_private_t *priv);
+ #endif /* __AFR_H__ */
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0403-Fix-spurious-failure-in-bug-1744548-heal-timeout.t.patch b/SOURCES/0403-Fix-spurious-failure-in-bug-1744548-heal-timeout.t.patch
new file mode 100644
index 0000000..569bdc0
--- /dev/null
+++ b/SOURCES/0403-Fix-spurious-failure-in-bug-1744548-heal-timeout.t.patch
@@ -0,0 +1,84 @@
+From b311385a3c4bd56d69d1fa7e9bd3d9a2ae5c344e Mon Sep 17 00:00:00 2001
+From: Pranith Kumar K <pkarampu@redhat.com>
+Date: Mon, 7 Oct 2019 12:27:01 +0530
+Subject: [PATCH 403/449] Fix spurious failure in bug-1744548-heal-timeout.t
+
+Script was assuming that the heal would have triggered
+by the time test was executed, which may not be the case.
+It can lead to following failures when the race happens:
+
+...
+18:29:45 not ok  14 [     85/      1] <  26> '[ 331 == 333 ]' -> ''
+...
+18:29:45 not ok  16 [  10097/      1] <  33> '[ 668 == 666 ]' -> ''
+
+Heal on 3rd brick didn't start completely first time the command was executed.
+So the extra count got added to the next profile info.
+
+Fixed it by depending on cumulative stats and waiting until the count is
+satisfied using EXPECT_WITHIN
+
+> Upstream patch link:https://review.gluster.org/23523
+>fixes: bz#1759002
+>Change-Id: I3b410671c902d6b1458a757fa245613cb29d967d
+>Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
+
+BUG: 1764091
+Change-Id: Ic4d16b6c8a1bbc35735567d60fd0383456b9f534
+Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202369
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/bugs/replicate/bug-1744548-heal-timeout.t | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+diff --git a/tests/bugs/replicate/bug-1744548-heal-timeout.t b/tests/bugs/replicate/bug-1744548-heal-timeout.t
+index 3cb73bc..0aaa3ea 100644
+--- a/tests/bugs/replicate/bug-1744548-heal-timeout.t
++++ b/tests/bugs/replicate/bug-1744548-heal-timeout.t
+@@ -4,6 +4,11 @@
+ . $(dirname $0)/../../volume.rc
+ . $(dirname $0)/../../afr.rc
+ 
++function get_cumulative_opendir_count {
++#sed 'n:d' prints odd-numbered lines
++    $CLI volume profile $V0 info |grep OPENDIR|sed 'n;d' | awk '{print $8}'|tr -d '\n'
++}
++
+ cleanup;
+ 
+ TEST glusterd;
+@@ -20,23 +25,23 @@ TEST ! $CLI volume heal $V0
+ TEST $CLI volume profile $V0 start
+ TEST $CLI volume profile $V0 info clear
+ TEST $CLI volume heal $V0 enable
+-TEST $CLI volume heal $V0
+ # Each brick does 3 opendirs, corresponding to dirty, xattrop and entry-changes
+-COUNT=`$CLI volume profile $V0 info incremental |grep OPENDIR|awk '{print $8}'|tr -d '\n'`
+-TEST [ "$COUNT" == "333" ]
++EXPECT_WITHIN $HEAL_TIMEOUT "^333$" get_cumulative_opendir_count
+ 
+ # Check that a change in heal-timeout is honoured immediately.
+ TEST $CLI volume set $V0 cluster.heal-timeout 5
+ sleep 10
+-COUNT=`$CLI volume profile $V0 info incremental |grep OPENDIR|awk '{print $8}'|tr -d '\n'`
+ # Two crawls must have happened.
+-TEST [ "$COUNT" == "666" ]
++EXPECT_WITHIN $HEAL_TIMEOUT "^999$" get_cumulative_opendir_count
+ 
+ # shd must not heal if it is disabled and heal-timeout is changed.
+ TEST $CLI volume heal $V0 disable
++#Wait for configuration update and any opendir fops to complete
++sleep 10
+ TEST $CLI volume profile $V0 info clear
+ TEST $CLI volume set $V0 cluster.heal-timeout 6
+-sleep 6
++#Better to wait for more than 6 seconds to account for configuration updates
++sleep 10
+ COUNT=`$CLI volume profile $V0 info incremental |grep OPENDIR|awk '{print $8}'|tr -d '\n'`
+ TEST [ -z $COUNT ]
+ cleanup;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0404-tests-Fix-spurious-failure.patch b/SOURCES/0404-tests-Fix-spurious-failure.patch
new file mode 100644
index 0000000..9cbb6ea
--- /dev/null
+++ b/SOURCES/0404-tests-Fix-spurious-failure.patch
@@ -0,0 +1,38 @@
+From b65ca1045910bc18c601681788eb322dbb8ec2fa Mon Sep 17 00:00:00 2001
+From: Pranith Kumar K <pkarampu@redhat.com>
+Date: Mon, 14 Oct 2019 10:29:31 +0530
+Subject: [PATCH 404/449] tests: Fix spurious failure
+
+> Upstream patch:https://review.gluster.org/23546
+> fixes: bz#1759002
+> Change-Id: I4d49e1c2ca9b3c1d74b9dd5a30f1c66983a76529
+> Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
+
+BUG: 1764091
+Change-Id: I8b66f08cce7a87788867c6373aed71d6fc65155f
+Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202370
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/bugs/replicate/bug-1744548-heal-timeout.t | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/tests/bugs/replicate/bug-1744548-heal-timeout.t b/tests/bugs/replicate/bug-1744548-heal-timeout.t
+index 0aaa3ea..c208112 100644
+--- a/tests/bugs/replicate/bug-1744548-heal-timeout.t
++++ b/tests/bugs/replicate/bug-1744548-heal-timeout.t
+@@ -5,8 +5,8 @@
+ . $(dirname $0)/../../afr.rc
+ 
+ function get_cumulative_opendir_count {
+-#sed 'n:d' prints odd-numbered lines
+-    $CLI volume profile $V0 info |grep OPENDIR|sed 'n;d' | awk '{print $8}'|tr -d '\n'
++#sed command prints content between Cumulative and Interval, this keeps content from Cumulative stats
++    $CLI volume profile $V0 info |sed -n '/^Cumulative/,/^Interval/p'|grep OPENDIR| awk '{print $8}'|tr -d '\n'
+ }
+ 
+ cleanup;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0405-core-fix-return-of-local-in-__nlc_inode_ctx_get.patch b/SOURCES/0405-core-fix-return-of-local-in-__nlc_inode_ctx_get.patch
new file mode 100644
index 0000000..765c154
--- /dev/null
+++ b/SOURCES/0405-core-fix-return-of-local-in-__nlc_inode_ctx_get.patch
@@ -0,0 +1,175 @@
+From 9c5f5b4ffd49e8c8631defb7b6873248bbfdaf9c Mon Sep 17 00:00:00 2001
+From: Rinku Kothiya <rkothiya@redhat.com>
+Date: Tue, 23 Jul 2019 13:16:04 +0000
+Subject: [PATCH 405/449] [core] fix return of local in  __nlc_inode_ctx_get
+
+__nlc_inode_ctx_get assigns a value to nlc_pe_p which is never used by
+its parent function or any of the predecessor hence remove the
+assignment and also that function argument as it is not being used
+anywhere.
+
+> fixes: bz#1732496
+> Change-Id: I5b950e1e251bd50a646616da872a4efe9d2ff8c9
+> (Cherry pick from commit 84a55090123a7e3124100e5564da8c521c3c22ab )
+> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/23093/)
+
+BUG: 1686897
+
+Change-Id: I5b950e1e251bd50a646616da872a4efe9d2ff8c9
+Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202372
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/performance/nl-cache/src/nl-cache-helper.c | 36 +++++++++-------------
+ 1 file changed, 14 insertions(+), 22 deletions(-)
+
+diff --git a/xlators/performance/nl-cache/src/nl-cache-helper.c b/xlators/performance/nl-cache/src/nl-cache-helper.c
+index 009f33a..4314038 100644
+--- a/xlators/performance/nl-cache/src/nl-cache-helper.c
++++ b/xlators/performance/nl-cache/src/nl-cache-helper.c
+@@ -145,12 +145,10 @@ nlc_disable_cache(xlator_t *this)
+ }
+ 
+ static int
+-__nlc_inode_ctx_get(xlator_t *this, inode_t *inode, nlc_ctx_t **nlc_ctx_p,
+-                    nlc_pe_t **nlc_pe_p)
++__nlc_inode_ctx_get(xlator_t *this, inode_t *inode, nlc_ctx_t **nlc_ctx_p)
+ {
+     int ret = 0;
+     nlc_ctx_t *nlc_ctx = NULL;
+-    nlc_pe_t *nlc_pe = NULL;
+     uint64_t nlc_ctx_int = 0;
+     uint64_t nlc_pe_int = 0;
+ 
+@@ -159,10 +157,6 @@ __nlc_inode_ctx_get(xlator_t *this, inode_t *inode, nlc_ctx_t **nlc_ctx_p,
+         nlc_ctx = (void *)(long)(nlc_ctx_int);
+         *nlc_ctx_p = nlc_ctx;
+     }
+-    if (ret == 0 && nlc_pe_p) {
+-        nlc_pe = (void *)(long)(nlc_pe_int);
+-        *nlc_pe_p = nlc_pe;
+-    }
+     return ret;
+ }
+ 
+@@ -186,14 +180,13 @@ nlc_inode_ctx_set(xlator_t *this, inode_t *inode, nlc_ctx_t *nlc_ctx,
+ }
+ 
+ static void
+-nlc_inode_ctx_get(xlator_t *this, inode_t *inode, nlc_ctx_t **nlc_ctx_p,
+-                  nlc_pe_t **nlc_pe_p)
++nlc_inode_ctx_get(xlator_t *this, inode_t *inode, nlc_ctx_t **nlc_ctx_p)
+ {
+     int ret = 0;
+ 
+     LOCK(&inode->lock);
+     {
+-        ret = __nlc_inode_ctx_get(this, inode, nlc_ctx_p, nlc_pe_p);
++        ret = __nlc_inode_ctx_get(this, inode, nlc_ctx_p);
+         if (ret < 0)
+             gf_msg_debug(this->name, 0,
+                          "inode ctx get failed for "
+@@ -290,8 +283,7 @@ out:
+ }
+ 
+ static nlc_ctx_t *
+-nlc_inode_ctx_get_set(xlator_t *this, inode_t *inode, nlc_ctx_t **nlc_ctx_p,
+-                      nlc_pe_t **nlc_pe_p)
++nlc_inode_ctx_get_set(xlator_t *this, inode_t *inode, nlc_ctx_t **nlc_ctx_p)
+ {
+     int ret = 0;
+     nlc_ctx_t *nlc_ctx = NULL;
+@@ -301,7 +293,7 @@ nlc_inode_ctx_get_set(xlator_t *this, inode_t *inode, nlc_ctx_t **nlc_ctx_p,
+ 
+     LOCK(&inode->lock);
+     {
+-        ret = __nlc_inode_ctx_get(this, inode, &nlc_ctx, nlc_pe_p);
++        ret = __nlc_inode_ctx_get(this, inode, &nlc_ctx);
+         if (nlc_ctx)
+             goto unlock;
+ 
+@@ -410,7 +402,7 @@ nlc_set_dir_state(xlator_t *this, inode_t *inode, uint64_t state)
+         goto out;
+     }
+ 
+-    nlc_inode_ctx_get_set(this, inode, &nlc_ctx, NULL);
++    nlc_inode_ctx_get_set(this, inode, &nlc_ctx);
+     if (!nlc_ctx)
+         goto out;
+ 
+@@ -430,7 +422,7 @@ nlc_cache_timeout_handler(struct gf_tw_timer_list *timer, void *data,
+     nlc_timer_data_t *tmp = data;
+     nlc_ctx_t *nlc_ctx = NULL;
+ 
+-    nlc_inode_ctx_get(tmp->this, tmp->inode, &nlc_ctx, NULL);
++    nlc_inode_ctx_get(tmp->this, tmp->inode, &nlc_ctx);
+     if (!nlc_ctx)
+         goto out;
+ 
+@@ -696,7 +688,7 @@ nlc_inode_clear_cache(xlator_t *this, inode_t *inode, int reason)
+ {
+     nlc_ctx_t *nlc_ctx = NULL;
+ 
+-    nlc_inode_ctx_get(this, inode, &nlc_ctx, NULL);
++    nlc_inode_ctx_get(this, inode, &nlc_ctx);
+     if (!nlc_ctx)
+         goto out;
+ 
+@@ -883,7 +875,7 @@ nlc_dir_add_ne(xlator_t *this, inode_t *inode, const char *name)
+         goto out;
+     }
+ 
+-    nlc_inode_ctx_get_set(this, inode, &nlc_ctx, NULL);
++    nlc_inode_ctx_get_set(this, inode, &nlc_ctx);
+     if (!nlc_ctx)
+         goto out;
+ 
+@@ -914,7 +906,7 @@ nlc_dir_remove_pe(xlator_t *this, inode_t *parent, inode_t *entry_ino,
+         goto out;
+     }
+ 
+-    nlc_inode_ctx_get(this, parent, &nlc_ctx, NULL);
++    nlc_inode_ctx_get(this, parent, &nlc_ctx);
+     if (!nlc_ctx)
+         goto out;
+ 
+@@ -945,7 +937,7 @@ nlc_dir_add_pe(xlator_t *this, inode_t *inode, inode_t *entry_ino,
+         goto out;
+     }
+ 
+-    nlc_inode_ctx_get_set(this, inode, &nlc_ctx, NULL);
++    nlc_inode_ctx_get_set(this, inode, &nlc_ctx);
+     if (!nlc_ctx)
+         goto out;
+ 
+@@ -1051,7 +1043,7 @@ nlc_is_negative_lookup(xlator_t *this, loc_t *loc)
+         goto out;
+     }
+ 
+-    nlc_inode_ctx_get(this, inode, &nlc_ctx, NULL);
++    nlc_inode_ctx_get(this, inode, &nlc_ctx);
+     if (!nlc_ctx)
+         goto out;
+ 
+@@ -1102,7 +1094,7 @@ nlc_get_real_file_name(xlator_t *this, loc_t *loc, const char *fname,
+         goto out;
+     }
+ 
+-    nlc_inode_ctx_get(this, inode, &nlc_ctx, NULL);
++    nlc_inode_ctx_get(this, inode, &nlc_ctx);
+     if (!nlc_ctx)
+         goto out;
+ 
+@@ -1152,7 +1144,7 @@ nlc_dump_inodectx(xlator_t *this, inode_t *inode)
+     nlc_ne_t *ne = NULL;
+     nlc_ne_t *tmp1 = NULL;
+ 
+-    nlc_inode_ctx_get(this, inode, &nlc_ctx, NULL);
++    nlc_inode_ctx_get(this, inode, &nlc_ctx);
+ 
+     if (!nlc_ctx)
+         goto out;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0406-afr-support-split-brain-CLI-for-replica-3.patch b/SOURCES/0406-afr-support-split-brain-CLI-for-replica-3.patch
new file mode 100644
index 0000000..4b57e8a
--- /dev/null
+++ b/SOURCES/0406-afr-support-split-brain-CLI-for-replica-3.patch
@@ -0,0 +1,185 @@
+From a75bb15fbe64f14580c44b8a33314c8bbeffdede Mon Sep 17 00:00:00 2001
+From: Ravishankar N <ravishankar@redhat.com>
+Date: Thu, 4 Jun 2020 18:54:46 +0530
+Subject: [PATCH 406/449] afr: support split-brain CLI for replica 3
+
+Patch in upstream master: https://review.gluster.org/#/c/glusterfs/+/23502/
+
+Ever since we added quorum checks for lookups in afr via commit
+bd44d59741bb8c0f5d7a62c5b1094179dd0ce8a4, the split-brain resolution
+commands would not work for replica 3 because there would be no
+readables for the lookup fop.
+
+The argument was that split-brains do not occur in replica 3 but we do
+see (data/metadata) split-brain cases once in a while which indicate that there are
+a few bugs/corner cases yet to be discovered and fixed.
+
+Fortunately, commit  8016d51a3bbd410b0b927ed66be50a09574b7982 added
+GF_CLIENT_PID_GLFS_HEALD as the pid for all fops made by glfsheal. If we
+leverage this and allow lookups in afr when pid is GF_CLIENT_PID_GLFS_HEALD,
+split-brain resolution commands will work for replica 3 volumes too.
+
+Likewise, the check is added in shard_lookup as well to permit resolving
+split-brains by specifying "/.shard/shard-file.xx" as the file name
+(which previously used to fail with EPERM).
+
+BUG: 1759875
+Change-Id: I203735b909c7d30fc4faaf3ecd4f5b6b379ab266
+Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202375
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ .../replicate/bug-1756938-replica-3-sbrain-cli.t   | 111 +++++++++++++++++++++
+ xlators/cluster/afr/src/afr-common.c               |   3 +-
+ xlators/features/shard/src/shard.c                 |   3 +-
+ 3 files changed, 115 insertions(+), 2 deletions(-)
+ create mode 100644 tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t
+
+diff --git a/tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t b/tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t
+new file mode 100644
+index 0000000..c1bdf34
+--- /dev/null
++++ b/tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t
+@@ -0,0 +1,111 @@
++#!/bin/bash
++
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++. $(dirname $0)/../../afr.rc
++
++cleanup;
++
++TEST glusterd;
++TEST pidof glusterd;
++TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
++TEST $CLI volume set $V0 features.shard enable
++TEST $CLI volume set $V0 features.shard-block-size 4MB
++
++TEST $CLI volume start $V0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
++TEST glusterfs --volfile-server=$H0 --volfile-id=/$V0 $M0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
++
++#Create split-brain by setting afr xattrs/gfids manually.
++#file1 is non-sharded and will be in data split-brain.
++#file2 will have one shard which will be in data split-brain.
++#file3 will have one shard which will be in gfid split-brain.
++#file4 will have one shard which will be in data & metadata split-brain.
++TEST dd if=/dev/zero of=$M0/file1 bs=1024 count=1024 oflag=direct
++TEST dd if=/dev/zero of=$M0/file2 bs=1M count=6 oflag=direct
++TEST dd if=/dev/zero of=$M0/file3 bs=1M count=6 oflag=direct
++TEST dd if=/dev/zero of=$M0/file4 bs=1M count=6 oflag=direct
++EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
++
++#-------------------------------------------------------------------------------
++TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}0/file1
++TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}0/file1
++TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}1/file1
++TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}1/file1
++TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}2/file1
++TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}2/file1
++
++#-------------------------------------------------------------------------------
++gfid_f2=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/file2))
++TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}0/.shard/$gfid_f2.1
++TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}0/.shard/$gfid_f2.1
++TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}1/.shard/$gfid_f2.1
++TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}1/.shard/$gfid_f2.1
++TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}2/.shard/$gfid_f2.1
++TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}2/.shard/$gfid_f2.1
++
++#-------------------------------------------------------------------------------
++TESTS_EXPECTED_IN_LOOP=5
++function assign_new_gfid {
++    brickpath=$1
++    filename=$2
++    gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brickpath/$filename))
++    gfid_shard=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brickpath/.shard/$gfid.1))
++
++    TEST rm $brickpath/.glusterfs/${gfid_shard:0:2}/${gfid_shard:2:2}/$gfid_shard
++    TEST setfattr -x trusted.gfid $brickpath/.shard/$gfid.1
++    new_gfid=$(get_random_gfid)
++    new_gfid_str=$(gf_gfid_xattr_to_str $new_gfid)
++    TEST setfattr -n trusted.gfid -v $new_gfid $brickpath/.shard/$gfid.1
++    TEST mkdir -p $brickpath/.glusterfs/${new_gfid_str:0:2}/${new_gfid_str:2:2}
++    TEST ln $brickpath/.shard/$gfid.1 $brickpath/.glusterfs/${new_gfid_str:0:2}/${new_gfid_str:2:2}/$new_gfid_str
++}
++assign_new_gfid $B0/$V0"1" file3
++assign_new_gfid $B0/$V0"2" file3
++
++#-------------------------------------------------------------------------------
++gfid_f4=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/file4))
++TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000100000000 $B0/${V0}0/.shard/$gfid_f4.1
++TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000100000000 $B0/${V0}0/.shard/$gfid_f4.1
++TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000100000000 $B0/${V0}1/.shard/$gfid_f4.1
++TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000100000000 $B0/${V0}1/.shard/$gfid_f4.1
++TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000100000000 $B0/${V0}2/.shard/$gfid_f4.1
++TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000100000000 $B0/${V0}2/.shard/$gfid_f4.1
++
++#-------------------------------------------------------------------------------
++#Add entry to xattrop dir on first brick and check for split-brain.
++xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
++base_entry_b0=`ls $xattrop_dir0`
++
++gfid_f1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/file1))
++TEST ln  $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f1
++
++gfid_f2_shard1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/.shard/$gfid_f2.1))
++TEST ln  $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f2_shard1
++
++gfid_f3=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/file3))
++gfid_f3_shard1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/.shard/$gfid_f3.1))
++TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f3_shard1
++
++gfid_f4_shard1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/.shard/$gfid_f4.1))
++TEST ln  $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f4_shard1
++
++#-------------------------------------------------------------------------------
++#gfid split-brain won't show up in split-brain count.
++EXPECT "3" afr_get_split_brain_count $V0
++EXPECT_NOT "^0$" get_pending_heal_count $V0
++
++#Resolve split-brains
++TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 /file1
++GFIDSTR="gfid:$gfid_f2_shard1"
++TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 $GFIDSTR
++TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 /.shard/$gfid_f3.1
++TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 /.shard/$gfid_f4.1
++TEST $CLI volume heal $V0
++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
++cleanup;
+diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
+index eef7fd2..32127c6 100644
+--- a/xlators/cluster/afr/src/afr-common.c
++++ b/xlators/cluster/afr/src/afr-common.c
+@@ -2250,7 +2250,8 @@ afr_attempt_readsubvol_set(call_frame_t *frame, xlator_t *this,
+     if ((spb_choice >= 0) &&
+         (AFR_COUNT(success_replies, child_count) == child_count)) {
+         *read_subvol = spb_choice;
+-    } else if (!priv->quorum_count) {
++    } else if (!priv->quorum_count ||
++               frame->root->pid == GF_CLIENT_PID_GLFS_HEAL) {
+         *read_subvol = afr_first_up_child(frame, this);
+     } else if (priv->quorum_count &&
+                afr_has_quorum(data_readable, this, NULL)) {
+diff --git a/xlators/features/shard/src/shard.c b/xlators/features/shard/src/shard.c
+index 2e2ef5d..16d557b 100644
+--- a/xlators/features/shard/src/shard.c
++++ b/xlators/features/shard/src/shard.c
+@@ -1472,7 +1472,8 @@ int shard_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc,
+   shard_local_t *local = NULL;
+ 
+   this->itable = loc->inode->table;
+-  if (frame->root->pid != GF_CLIENT_PID_GSYNCD) {
++  if ((frame->root->pid != GF_CLIENT_PID_GSYNCD) &&
++      (frame->root->pid != GF_CLIENT_PID_GLFS_HEAL)) {
+     SHARD_ENTRY_FOP_CHECK(loc, op_errno, err);
+   }
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0407-geo-rep-Improving-help-message-in-schedule_georep.py.patch b/SOURCES/0407-geo-rep-Improving-help-message-in-schedule_georep.py.patch
new file mode 100644
index 0000000..459462d
--- /dev/null
+++ b/SOURCES/0407-geo-rep-Improving-help-message-in-schedule_georep.py.patch
@@ -0,0 +1,60 @@
+From de31f2b0cb09a59941892c9981cb8a8b3aced9ec Mon Sep 17 00:00:00 2001
+From: kshithijiyer <kshithij.ki@gmail.com>
+Date: Tue, 24 Dec 2019 13:02:21 +0530
+Subject: [PATCH 407/449] [geo-rep] Improving help message in
+ schedule_georep.py.in
+
+SLAVE positional argument doesn't provide a clear
+picture of what it is when compared to mastervol and slavevol
+in schedule_georep.py.in. It would be better if we change it to
+something like "Slave hostame (<username>@SLAVEHOST or SLAVEHOST)"
+
+Present:
+----------
+positional arguments:
+  mastervol            Master Volume Name
+  SLAVE                SLAVEHOST or root@SLAVEHOST or user@SLAVEHOST
+  slavevol             Slave Volume Name
+
+Suggested:
+-----------
+positional arguments:
+  mastervol            Master Volume Name
+  SLAVE                Slave hostname (<username>@SLAVEHOST or SLAVEHOST)
+  slavevol             Slave Volume Name
+
+Backport of:
+    >Upstream Patch: https://review.gluster.org/#/c/glusterfs/+/23919/
+    >fixes: bz#1786276
+    >Change-Id: I73d52247997d623f77d55e51cbb6eccc08eb95ff
+    >Signed-off-by: kshithijiyer <kshithij.ki@gmail.com>
+
+BUG: 1787994
+Change-Id: I73d52247997d623f77d55e51cbb6eccc08eb95ff
+Signed-off-by: kshithijiyer <kshithij.ki@gmail.com>
+Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202454
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ extras/geo-rep/schedule_georep.py.in | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/extras/geo-rep/schedule_georep.py.in b/extras/geo-rep/schedule_georep.py.in
+index f29ae02..ac93716 100644
+--- a/extras/geo-rep/schedule_georep.py.in
++++ b/extras/geo-rep/schedule_georep.py.in
+@@ -459,8 +459,8 @@ if __name__ == "__main__":
+                             description=__doc__)
+     parser.add_argument("mastervol", help="Master Volume Name")
+     parser.add_argument("slave",
+-                        help="SLAVEHOST or root@SLAVEHOST "
+-                        "or user@SLAVEHOST",
++                        help="Slave hostname "
++                        "(<username>@SLAVEHOST or SLAVEHOST)",
+                         metavar="SLAVE")
+     parser.add_argument("slavevol", help="Slave Volume Name")
+     parser.add_argument("--interval", help="Interval in Seconds. "
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0408-geo-rep-Fix-ssh-port-validation.patch b/SOURCES/0408-geo-rep-Fix-ssh-port-validation.patch
new file mode 100644
index 0000000..9fad8d1
--- /dev/null
+++ b/SOURCES/0408-geo-rep-Fix-ssh-port-validation.patch
@@ -0,0 +1,107 @@
+From 07ab5a460da007fc3809b1a943614d1c7f5fcfef Mon Sep 17 00:00:00 2001
+From: Sunny Kumar <sunkumar@redhat.com>
+Date: Fri, 17 Jan 2020 11:03:46 +0000
+Subject: [PATCH 408/449] geo-rep: Fix ssh-port validation
+
+If non-standard ssh-port is used, Geo-rep can be configured to use ssh port
+by using config option, the value should be in allowed port range and non negative.
+
+At present it can accept negative value and outside allowed port range which is incorrect.
+
+Many Linux kernels use the port range 32768 to 61000.
+IANA suggests it should be in the range 1 to 2^16 - 1, so keeping the same.
+
+$ gluster volume geo-replication master 127.0.0.1::slave config ssh-port -22
+geo-replication config updated successfully
+$ gluster volume geo-replication master 127.0.0.1::slave config ssh-port 22222222
+geo-replication config updated successfully
+
+This patch fixes the above issue and have added few validations around this
+in test cases.
+Backport of:
+    >Upstream Patch: https://review.gluster.org/#/c/glusterfs/+/24035/
+    >Change-Id: I9875ab3f00d7257370fbac6f5ed4356d2fed3f3c
+    >Fixes: bz#1792276
+    >Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
+
+BUG: 1796814
+Change-Id: I9875ab3f00d7257370fbac6f5ed4356d2fed3f3c
+Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202453
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ geo-replication/gsyncd.conf.in                     |  4 +++-
+ tests/00-geo-rep/00-georep-verify-non-root-setup.t | 16 ++++++++++++++++
+ tests/00-geo-rep/georep-basic-dr-rsync.t           | 13 +++++++++++++
+ 3 files changed, 32 insertions(+), 1 deletion(-)
+
+diff --git a/geo-replication/gsyncd.conf.in b/geo-replication/gsyncd.conf.in
+index 9155cd8..11e57fd 100644
+--- a/geo-replication/gsyncd.conf.in
++++ b/geo-replication/gsyncd.conf.in
+@@ -266,7 +266,9 @@ allowed_values=ERROR,INFO,WARNING,DEBUG
+ 
+ [ssh-port]
+ value=22
+-validation=int
++validation=minmax
++min=1
++max=65535
+ help=Set SSH port
+ type=int
+ 
+diff --git a/tests/00-geo-rep/00-georep-verify-non-root-setup.t b/tests/00-geo-rep/00-georep-verify-non-root-setup.t
+index c9fd8b2..12f0c01 100644
+--- a/tests/00-geo-rep/00-georep-verify-non-root-setup.t
++++ b/tests/00-geo-rep/00-georep-verify-non-root-setup.t
+@@ -223,6 +223,22 @@ TEST $GEOREP_CLI  $master $slave_url resume
+ #Validate failure of volume stop when geo-rep is running
+ TEST ! $CLI volume stop $GMV0
+ 
++#Negative test for ssh-port
++#Port should be integer and between 1-65535 range
++
++TEST ! $GEOREP_CLI $master $slave_url config ssh-port -22
++
++TEST ! $GEOREP_CLI $master $slave_url config ssh-port abc
++
++TEST ! $GEOREP_CLI $master $slave_url config ssh-port 6875943
++
++TEST ! $GEOREP_CLI $master $slave_url config ssh-port 4.5
++
++TEST ! $GEOREP_CLI $master $slave_url config ssh-port 22a
++
++#Config Set ssh-port to validate int validation
++TEST $GEOREP_CLI $master $slave config ssh-port 22
++
+ #Hybrid directory rename test BZ#1763439
+ TEST $GEOREP_CLI $master $slave_url config change_detector xsync
+ mkdir ${master_mnt}/dir1
+diff --git a/tests/00-geo-rep/georep-basic-dr-rsync.t b/tests/00-geo-rep/georep-basic-dr-rsync.t
+index b6fbf18..d785aa5 100644
+--- a/tests/00-geo-rep/georep-basic-dr-rsync.t
++++ b/tests/00-geo-rep/georep-basic-dr-rsync.t
+@@ -71,6 +71,19 @@ EXPECT_WITHIN $GEO_REP_TIMEOUT 4 check_status_num_rows "Created"
+ #Config gluster-command-dir
+ TEST $GEOREP_CLI $master $slave config gluster-command-dir ${GLUSTER_CMD_DIR}
+ 
++#Negative test for ssh-port
++#Port should be integer and between 1-65535 range
++
++TEST ! $GEOREP_CLI $master $slave config ssh-port -22
++
++TEST ! $GEOREP_CLI $master $slave config ssh-port abc
++
++TEST ! $GEOREP_CLI $master $slave config ssh-port 6875943
++
++TEST ! $GEOREP_CLI $master $slave config ssh-port 4.5
++
++TEST ! $GEOREP_CLI $master $slave config ssh-port 22a
++
+ #Config Set ssh-port to validate int validation
+ TEST $GEOREP_CLI $master $slave config ssh-port 22
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0409-system-posix-acl-update-ctx-only-if-iatt-is-non-NULL.patch b/SOURCES/0409-system-posix-acl-update-ctx-only-if-iatt-is-non-NULL.patch
new file mode 100644
index 0000000..ca1c25a
--- /dev/null
+++ b/SOURCES/0409-system-posix-acl-update-ctx-only-if-iatt-is-non-NULL.patch
@@ -0,0 +1,52 @@
+From a92b4f6373cb18544325436cf86abfebd6780d79 Mon Sep 17 00:00:00 2001
+From: Homma <homma@allworks.co.jp>
+Date: Fri, 5 Jul 2019 16:10:41 +0530
+Subject: [PATCH 409/449] system/posix-acl: update ctx only if iatt is non-NULL
+
+We need to safe-guard against possible zero'ing out of iatt
+structure in acl ctx, which can cause many issues.
+
+> upstream patch: https://review.gluster.org/#/c/glusterfs/+/23003/
+> fixes: 1668286
+> Change-Id: Ie81a57d7453a6624078de3be8c0845bf4d432773
+> Signed-off-by: Amar Tumballi <amarts@redhat.com>
+
+BUG: 1781649
+Change-Id: I655b61551d30215b9f23cafc3ef9a5c0d98a43d0
+Signed-off-by: Raghavendra M <raghavendra@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202446
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/system/posix-acl/src/posix-acl.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/xlators/system/posix-acl/src/posix-acl.c b/xlators/system/posix-acl/src/posix-acl.c
+index 38e48b8..c6ba281 100644
+--- a/xlators/system/posix-acl/src/posix-acl.c
++++ b/xlators/system/posix-acl/src/posix-acl.c
+@@ -875,6 +875,13 @@ posix_acl_ctx_update(inode_t *inode, xlator_t *this, struct iatt *buf,
+     int ret = 0;
+     int i = 0;
+ 
++    if (!buf || !buf->ia_ctime) {
++        /* No need to update ctx if buf is empty */
++        gf_log_callingfn(this->name, GF_LOG_DEBUG, "iatt struct is empty (%d)",
++                         fop);
++        goto out;
++    }
++
+     LOCK(&inode->lock);
+     {
+         ctx = __posix_acl_ctx_get(inode, this, _gf_true);
+@@ -928,6 +935,7 @@ posix_acl_ctx_update(inode_t *inode, xlator_t *this, struct iatt *buf,
+     }
+ unlock:
+     UNLOCK(&inode->lock);
++out:
+     return ret;
+ }
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0410-afr-prevent-spurious-entry-heals-leading-to-gfid-spl.patch b/SOURCES/0410-afr-prevent-spurious-entry-heals-leading-to-gfid-spl.patch
new file mode 100644
index 0000000..97bdc78
--- /dev/null
+++ b/SOURCES/0410-afr-prevent-spurious-entry-heals-leading-to-gfid-spl.patch
@@ -0,0 +1,249 @@
+From 2b2eb846c49caba13ab92ec66af20292e7780fc1 Mon Sep 17 00:00:00 2001
+From: Ravishankar N <ravishankar@redhat.com>
+Date: Tue, 11 Feb 2020 14:34:48 +0530
+Subject: [PATCH 410/449] afr: prevent spurious entry heals leading to gfid
+ split-brain
+
+Problem:
+In a hyperconverged setup with granular-entry-heal enabled, if a file is
+recreated while one of the bricks is down, and an index heal is triggered
+(with the brick still down), entry-self heal was doing a spurious heal
+with just the 2 good bricks. It was doing a post-op leading to removal
+of the filename from .glusterfs/indices/entry-changes as well as
+erroneous setting of afr xattrs on the parent. When the brick came up,
+the xattrs were cleared, resulting in the renamed file not getting
+healed and leading to gfid split-brain and EIO on the mount.
+
+Fix:
+Proceed with entry heal only when shd can connect to all bricks of the replica,
+just like in data and metadata heal.
+
+BUG: 1804164
+
+> Upstream patch:https://review.gluster.org/#/c/glusterfs/+/24109/
+> fixes: bz#1801624
+> Change-Id: I916ae26ad1fabf259bc6362da52d433b7223b17e
+> Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+
+Change-Id: I23f57e543cff1e3f35eb8dbc60a2babfae6838c7
+Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202395
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ .../bug-1433571-undo-pending-only-on-up-bricks.t   | 18 ++-----
+ tests/bugs/replicate/bug-1801624-entry-heal.t      | 58 ++++++++++++++++++++++
+ xlators/cluster/afr/src/afr-common.c               |  4 +-
+ xlators/cluster/afr/src/afr-self-heal-common.c     |  8 +--
+ xlators/cluster/afr/src/afr-self-heal-entry.c      |  6 +--
+ xlators/cluster/afr/src/afr-self-heal-name.c       |  2 +-
+ xlators/cluster/afr/src/afr-self-heal.h            |  2 -
+ 7 files changed, 69 insertions(+), 29 deletions(-)
+ create mode 100644 tests/bugs/replicate/bug-1801624-entry-heal.t
+
+diff --git a/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t b/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t
+index 0767f47..10ce013 100644
+--- a/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t
++++ b/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t
+@@ -49,25 +49,15 @@ TEST $CLI volume start $V0 force
+ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+ 
+-#Kill brick 0 and turn on the client side heal and do ls to trigger the heal.
+-#The pending xattrs on bricks 1 & 2 should have pending entry on brick 0.
+-TEST kill_brick $V0 $H0 $B0/${V0}0
++# We were killing one brick and checking that entry heal does not reset the
++# pending xattrs for the down brick. Now that we need all bricks to be up for
++# entry heal, I'm removing that test from the .t
++
+ TEST $CLI volume set $V0 cluster.data-self-heal on
+ TEST $CLI volume set $V0 cluster.metadata-self-heal on
+ TEST $CLI volume set $V0 cluster.entry-self-heal on
+ 
+ TEST ls $M0
+-EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1
+-EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2
+-EXPECT_WITHIN $HEAL_TIMEOUT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
+-EXPECT_WITHIN $HEAL_TIMEOUT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2
+-
+-#Bring back all the bricks and trigger the heal again by doing ls. Now the
+-#pending xattrs on all the bricks should be 0.
+-TEST $CLI volume start $V0 force
+-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+-TEST ls $M0
+-
+ TEST cat $M0/f1
+ TEST cat $M0/f2
+ TEST cat $M0/f3
+diff --git a/tests/bugs/replicate/bug-1801624-entry-heal.t b/tests/bugs/replicate/bug-1801624-entry-heal.t
+new file mode 100644
+index 0000000..94b4651
+--- /dev/null
++++ b/tests/bugs/replicate/bug-1801624-entry-heal.t
+@@ -0,0 +1,58 @@
++#!/bin/bash
++
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++cleanup;
++
++TEST glusterd
++TEST pidof glusterd
++TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0,1,2}
++TEST $CLI volume set $V0 heal-timeout 5
++TEST $CLI volume start $V0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
++TEST $CLI volume heal $V0 granular-entry-heal enable
++
++TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
++echo "Data">$M0/FILE
++ret=$?
++TEST [ $ret -eq 0 ]
++
++# Re-create the file when a brick is down.
++TEST kill_brick $V0 $H0 $B0/brick1
++TEST rm $M0/FILE
++echo "New Data">$M0/FILE
++ret=$?
++TEST [ $ret -eq 0 ]
++EXPECT_WITHIN $HEAL_TIMEOUT "4" get_pending_heal_count $V0
++
++# Launching index heal must not reset parent dir afr xattrs or remove granular entry indices.
++$CLI volume heal $V0 # CLI will fail but heal is launched anyway.
++TEST sleep 5 # give index heal a chance to do one run.
++brick0_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/brick0/)
++brick2_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/brick2/)
++TEST [ $brick0_pending -eq "000000000000000000000002" ]
++TEST [ $brick2_pending -eq "000000000000000000000002" ]
++EXPECT "FILE" ls $B0/brick0/.glusterfs/indices/entry-changes/00000000-0000-0000-0000-000000000001/
++EXPECT "FILE" ls $B0/brick2/.glusterfs/indices/entry-changes/00000000-0000-0000-0000-000000000001/
++
++TEST $CLI volume start $V0 force
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick1
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
++$CLI volume heal $V0
++EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
++
++# No gfid-split-brain (i.e. EIO) must be seen. Try on fresh mount to avoid cached values.
++EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
++TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
++TEST cat $M0/FILE
++
++EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
++cleanup;
+diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
+index 32127c6..5806556 100644
+--- a/xlators/cluster/afr/src/afr-common.c
++++ b/xlators/cluster/afr/src/afr-common.c
+@@ -6629,7 +6629,7 @@ afr_fav_child_reset_sink_xattrs(void *opaque)
+         ret = afr_selfheal_inodelk(heal_frame, this, inode, this->name, 0, 0,
+                                    locked_on);
+         {
+-            if (ret < AFR_SH_MIN_PARTICIPANTS)
++            if (ret < priv->child_count)
+                 goto data_unlock;
+             ret = __afr_selfheal_data_prepare(
+                 heal_frame, this, inode, locked_on, sources, sinks,
+@@ -6646,7 +6646,7 @@ afr_fav_child_reset_sink_xattrs(void *opaque)
+         ret = afr_selfheal_inodelk(heal_frame, this, inode, this->name,
+                                    LLONG_MAX - 1, 0, locked_on);
+         {
+-            if (ret < AFR_SH_MIN_PARTICIPANTS)
++            if (ret < priv->child_count)
+                 goto mdata_unlock;
+             ret = __afr_selfheal_metadata_prepare(
+                 heal_frame, this, inode, locked_on, sources, sinks,
+diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c
+index 81ef38a..ce1ea50 100644
+--- a/xlators/cluster/afr/src/afr-self-heal-common.c
++++ b/xlators/cluster/afr/src/afr-self-heal-common.c
+@@ -1575,7 +1575,6 @@ afr_selfheal_find_direction(call_frame_t *frame, xlator_t *this,
+     char *accused = NULL;      /* Accused others without any self-accusal */
+     char *pending = NULL;      /* Have pending operations on others */
+     char *self_accused = NULL; /* Accused itself */
+-    int min_participants = -1;
+ 
+     priv = this->private;
+ 
+@@ -1599,12 +1598,7 @@ afr_selfheal_find_direction(call_frame_t *frame, xlator_t *this,
+         }
+     }
+ 
+-    if (type == AFR_DATA_TRANSACTION || type == AFR_METADATA_TRANSACTION) {
+-        min_participants = priv->child_count;
+-    } else {
+-        min_participants = AFR_SH_MIN_PARTICIPANTS;
+-    }
+-    if (afr_success_count(replies, priv->child_count) < min_participants) {
++    if (afr_success_count(replies, priv->child_count) < priv->child_count) {
+         /* Treat this just like locks not being acquired */
+         return -ENOTCONN;
+     }
+diff --git a/xlators/cluster/afr/src/afr-self-heal-entry.c b/xlators/cluster/afr/src/afr-self-heal-entry.c
+index 3ce882e..40be898 100644
+--- a/xlators/cluster/afr/src/afr-self-heal-entry.c
++++ b/xlators/cluster/afr/src/afr-self-heal-entry.c
+@@ -597,7 +597,7 @@ afr_selfheal_entry_dirent(call_frame_t *frame, xlator_t *this, fd_t *fd,
+     ret = afr_selfheal_entrylk(frame, this, fd->inode, this->name, NULL,
+                                locked_on);
+     {
+-        if (ret < AFR_SH_MIN_PARTICIPANTS) {
++        if (ret < priv->child_count) {
+             gf_msg_debug(this->name, 0,
+                          "%s: Skipping "
+                          "entry self-heal as only %d sub-volumes "
+@@ -991,7 +991,7 @@ __afr_selfheal_entry(call_frame_t *frame, xlator_t *this, fd_t *fd,
+     ret = afr_selfheal_entrylk(frame, this, fd->inode, this->name, NULL,
+                                data_lock);
+     {
+-        if (ret < AFR_SH_MIN_PARTICIPANTS) {
++        if (ret < priv->child_count) {
+             gf_msg_debug(this->name, 0,
+                          "%s: Skipping "
+                          "entry self-heal as only %d sub-volumes could "
+@@ -1115,7 +1115,7 @@ afr_selfheal_entry(call_frame_t *frame, xlator_t *this, inode_t *inode)
+     ret = afr_selfheal_tie_breaker_entrylk(frame, this, inode, priv->sh_domain,
+                                            NULL, locked_on);
+     {
+-        if (ret < AFR_SH_MIN_PARTICIPANTS) {
++        if (ret < priv->child_count) {
+             gf_msg_debug(this->name, 0,
+                          "%s: Skipping "
+                          "entry self-heal as only %d sub-volumes could "
+diff --git a/xlators/cluster/afr/src/afr-self-heal-name.c b/xlators/cluster/afr/src/afr-self-heal-name.c
+index 36640b5..7d4f208 100644
+--- a/xlators/cluster/afr/src/afr-self-heal-name.c
++++ b/xlators/cluster/afr/src/afr-self-heal-name.c
+@@ -514,7 +514,7 @@ afr_selfheal_name_do(call_frame_t *frame, xlator_t *this, inode_t *parent,
+     ret = afr_selfheal_entrylk(frame, this, parent, this->name, bname,
+                                locked_on);
+     {
+-        if (ret < AFR_SH_MIN_PARTICIPANTS) {
++        if (ret < priv->child_count) {
+             ret = -ENOTCONN;
+             goto unlock;
+         }
+diff --git a/xlators/cluster/afr/src/afr-self-heal.h b/xlators/cluster/afr/src/afr-self-heal.h
+index 6555ec5..8234cec 100644
+--- a/xlators/cluster/afr/src/afr-self-heal.h
++++ b/xlators/cluster/afr/src/afr-self-heal.h
+@@ -11,8 +11,6 @@
+ #ifndef _AFR_SELFHEAL_H
+ #define _AFR_SELFHEAL_H
+ 
+-#define AFR_SH_MIN_PARTICIPANTS 2
+-
+ /* Perform fop on all UP subvolumes and wait for all callbacks to return */
+ 
+ #define AFR_ONALL(frame, rfn, fop, args...)                                    \
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0411-tools-glusterfind-validate-session-name.patch b/SOURCES/0411-tools-glusterfind-validate-session-name.patch
new file mode 100644
index 0000000..db633f2
--- /dev/null
+++ b/SOURCES/0411-tools-glusterfind-validate-session-name.patch
@@ -0,0 +1,116 @@
+From 854defb4ff5e0d53f51545d20796aff662f9850f Mon Sep 17 00:00:00 2001
+From: Saravanakumar Arumugam <sarumuga@redhat.com>
+Date: Thu, 9 Jul 2015 15:56:28 +0530
+Subject: [PATCH 411/449] tools/glusterfind : validate session name
+
+Validate a session name(during create) for the following:
+1. minimum 2 character length.
+2. Maximum 256 characters.
+3. No special characters apart from underscore, hyphen allowed.
+
+Also, validate volume(expect, while using glusterfind list).
+
+>Change-Id: I1b1e64e218f93d0a531d3cf69fc2ce7e2ed11d01
+>BUG: 1241494
+>Signed-off-by: Saravanakumar Arumugam <sarumuga@redhat.com>
+>Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
+
+backport of https://review.gluster.org/#/c/glusterfs/+/11602/
+
+BUG: 1234220
+Change-Id: I1b1e64e218f93d0a531d3cf69fc2ce7e2ed11d01
+Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202469
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tools/glusterfind/src/main.py | 50 ++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 42 insertions(+), 8 deletions(-)
+
+diff --git a/tools/glusterfind/src/main.py b/tools/glusterfind/src/main.py
+index 5ca1fec..4b5466d 100644
+--- a/tools/glusterfind/src/main.py
++++ b/tools/glusterfind/src/main.py
+@@ -23,6 +23,7 @@ import tempfile
+ import signal
+ from datetime import datetime
+ import codecs
++import re
+ 
+ from utils import execute, is_host_local, mkdirp, fail
+ from utils import setup_logger, human_time, handle_rm_error
+@@ -520,11 +521,8 @@ def write_output(outfile, outfilemerger, field_separator):
+                 else:
+                     gfind_write(f, row[0], field_separator, p_rep)
+ 
+-def mode_create(session_dir, args):
+-    logger.debug("Init is called - Session: %s, Volume: %s"
+-                 % (args.session, args.volume))
+-
+-    cmd = ["gluster", 'volume', 'info', args.volume, "--xml"]
++def validate_volume(volume):
++    cmd = ["gluster", 'volume', 'info', volume, "--xml"]
+     _, data, _ = execute(cmd,
+                          exit_msg="Failed to Run Gluster Volume Info",
+                          logger=logger)
+@@ -532,11 +530,42 @@ def mode_create(session_dir, args):
+         tree = etree.fromstring(data)
+         statusStr = tree.find('volInfo/volumes/volume/statusStr').text
+     except (ParseError, AttributeError) as e:
+-        fail("Invalid Volume: %s" % e, logger=logger)
+-
++        fail("Invalid Volume: Check the Volume name! %s" % e)
+     if statusStr != "Started":
+-        fail("Volume %s is not online" % args.volume, logger=logger)
++        fail("Volume %s is not online" % volume)
++
++# The rules for a valid session name.
++SESSION_NAME_RULES = {
++    'min_length': 2,
++    'max_length': 256,  # same as maximum volume length
++    # Specifies all alphanumeric characters, underscore, hyphen.
++    'valid_chars': r'0-9a-zA-Z_-',
++}
++
++
++# checks valid session name, fail otherwise
++def validate_session_name(session):
++    # Check for minimum length
++    if len(session) < SESSION_NAME_RULES['min_length']:
++        fail('session_name must be at least ' +
++                 str(SESSION_NAME_RULES['min_length']) + ' characters long.')
++    # Check for maximum length
++    if len(session) > SESSION_NAME_RULES['max_length']:
++        fail('session_name must not exceed ' +
++                 str(SESSION_NAME_RULES['max_length']) + ' characters length.')
++
++    # Matches strings composed entirely of characters specified within
++    if not re.match(r'^[' + SESSION_NAME_RULES['valid_chars'] +
++                        ']+$', session):
++        fail('Session name can only contain these characters: ' +
++                         SESSION_NAME_RULES['valid_chars'])
++
++
++def mode_create(session_dir, args):
++    validate_session_name(args.session)
+ 
++    logger.debug("Init is called - Session: %s, Volume: %s"
++                 % (args.session, args.volume))
+     mkdirp(session_dir, exit_on_err=True, logger=logger)
+     mkdirp(os.path.join(session_dir, args.volume), exit_on_err=True,
+            logger=logger)
+@@ -850,6 +879,11 @@ def main():
+                 args.mode not in ["create", "list", "query"]:
+             fail("Invalid session %s" % args.session)
+ 
++        # volume involved, validate the volume first
++        if args.mode not in ["list"]:
++            validate_volume(args.volume)
++
++
+         # "default" is a system defined session name
+         if args.mode in ["create", "post", "pre", "delete"] and \
+                 args.session == "default":
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0412-gluster-smb-add-smb-parameter-when-access-gluster-by.patch b/SOURCES/0412-gluster-smb-add-smb-parameter-when-access-gluster-by.patch
new file mode 100644
index 0000000..865fddf
--- /dev/null
+++ b/SOURCES/0412-gluster-smb-add-smb-parameter-when-access-gluster-by.patch
@@ -0,0 +1,46 @@
+From 0769c5ddc78ea37b9a43ac35dd71ec8cea4b8da8 Mon Sep 17 00:00:00 2001
+From: yinkui <13965432176@163.com>
+Date: Fri, 16 Aug 2019 10:15:07 +0800
+Subject: [PATCH 412/449] gluster-smb:add smb parameter when access gluster by
+ cifs
+
+Backport of https://review.gluster.org/23240
+
+Change-Id: I9ff54f2ca6f86bb5b2f4740485a0159e1fd7785f
+BUG: 1783232
+Signed-off-by: yinkui <13965432176@163.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202472
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ extras/hook-scripts/set/post/S30samba-set.sh     | 1 +
+ extras/hook-scripts/start/post/S30samba-start.sh | 1 +
+ 2 files changed, 2 insertions(+)
+
+diff --git a/extras/hook-scripts/set/post/S30samba-set.sh b/extras/hook-scripts/set/post/S30samba-set.sh
+index d2a62d3..e73f00f 100755
+--- a/extras/hook-scripts/set/post/S30samba-set.sh
++++ b/extras/hook-scripts/set/post/S30samba-set.sh
+@@ -90,6 +90,7 @@ function add_samba_share () {
+         STRING+="path = /\n"
+         STRING+="read only = no\n"
+         STRING+="guest ok = yes\n"
++        STRING+="kernel share modes = no\n"
+         printf "$STRING"  >> ${CONFIGFILE}
+ }
+ 
+diff --git a/extras/hook-scripts/start/post/S30samba-start.sh b/extras/hook-scripts/start/post/S30samba-start.sh
+index 2854bdd..0d5a5ed 100755
+--- a/extras/hook-scripts/start/post/S30samba-start.sh
++++ b/extras/hook-scripts/start/post/S30samba-start.sh
+@@ -89,6 +89,7 @@ function add_samba_share () {
+         STRING+="path = /\n"
+         STRING+="read only = no\n"
+         STRING+="guest ok = yes\n"
++        STRING+="kernel share modes = no\n"
+         printf "$STRING"  >> "${CONFIGFILE}"
+ }
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0413-extras-hooks-Remove-smb.conf-parameter-allowing-gues.patch b/SOURCES/0413-extras-hooks-Remove-smb.conf-parameter-allowing-gues.patch
new file mode 100644
index 0000000..1ff6348
--- /dev/null
+++ b/SOURCES/0413-extras-hooks-Remove-smb.conf-parameter-allowing-gues.patch
@@ -0,0 +1,46 @@
+From aec3dd00fa76547316fddd07e6ded428d945986c Mon Sep 17 00:00:00 2001
+From: Anoop C S <anoopcs@redhat.com>
+Date: Fri, 22 Nov 2019 17:36:55 +0530
+Subject: [PATCH 413/449] extras/hooks: Remove smb.conf parameter allowing
+ guest access
+
+Backport of https://review.gluster.org/23745
+
+Change-Id: I88f494f16153d27ab6e2f2faf4d557e075671b10
+BUG: 1775637
+Signed-off-by: Anoop C S <anoopcs@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202473
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ extras/hook-scripts/set/post/S30samba-set.sh     | 1 -
+ extras/hook-scripts/start/post/S30samba-start.sh | 1 -
+ 2 files changed, 2 deletions(-)
+
+diff --git a/extras/hook-scripts/set/post/S30samba-set.sh b/extras/hook-scripts/set/post/S30samba-set.sh
+index e73f00f..854f131 100755
+--- a/extras/hook-scripts/set/post/S30samba-set.sh
++++ b/extras/hook-scripts/set/post/S30samba-set.sh
+@@ -89,7 +89,6 @@ function add_samba_share () {
+         STRING+="glusterfs:loglevel = 7\n"
+         STRING+="path = /\n"
+         STRING+="read only = no\n"
+-        STRING+="guest ok = yes\n"
+         STRING+="kernel share modes = no\n"
+         printf "$STRING"  >> ${CONFIGFILE}
+ }
+diff --git a/extras/hook-scripts/start/post/S30samba-start.sh b/extras/hook-scripts/start/post/S30samba-start.sh
+index 0d5a5ed..cac0cbf 100755
+--- a/extras/hook-scripts/start/post/S30samba-start.sh
++++ b/extras/hook-scripts/start/post/S30samba-start.sh
+@@ -88,7 +88,6 @@ function add_samba_share () {
+         STRING+="glusterfs:loglevel = 7\n"
+         STRING+="path = /\n"
+         STRING+="read only = no\n"
+-        STRING+="guest ok = yes\n"
+         STRING+="kernel share modes = no\n"
+         printf "$STRING"  >> "${CONFIGFILE}"
+ }
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0414-cluster-syncop-avoid-duplicate-unlock-of-inodelk-ent.patch b/SOURCES/0414-cluster-syncop-avoid-duplicate-unlock-of-inodelk-ent.patch
new file mode 100644
index 0000000..67b71dd
--- /dev/null
+++ b/SOURCES/0414-cluster-syncop-avoid-duplicate-unlock-of-inodelk-ent.patch
@@ -0,0 +1,62 @@
+From 5b549cbf3f1873054c6d187b09aa9f9313971b1f Mon Sep 17 00:00:00 2001
+From: Kinglong Mee <kinglongmee@gmail.com>
+Date: Mon, 18 Mar 2019 20:47:54 +0800
+Subject: [PATCH 414/449] cluster-syncop: avoid duplicate unlock of
+ inodelk/entrylk
+
+When using ec, there are many spam messages in brick and client
+logs files.
+
+When shd does entry heal, it takes lock on a directory using
+cluster_tiebreaker_inodelk(). If it does not get lock on all
+the bricks because other clients has got lock on some bricks,
+it will unlock the locks on those bricks which it got and then
+will try blocking locks (If any one of the previous was successful).
+
+The problem come here. In case we do not get locks on all the
+required bricks, we are sending unlock request twice on those
+bricks where we got the locks.
+
+BUG: 1750211
+> Upstream patch: https://review.gluster.org/#/c/glusterfs/+/22377/
+> Change-Id: Ib164d29ebb071f620a4ca9679c4345ef7c88512a
+> Updates: bz#1689920
+> Signed-off-by: Kinglong Mee <mijinlong@open-fs.com>
+
+Change-Id: I1647548ba75fdd27fd4e20dec08db67774f43375
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202477
+Tested-by: Ashish Pandey <aspandey@redhat.com>
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ libglusterfs/src/cluster-syncop.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/libglusterfs/src/cluster-syncop.c b/libglusterfs/src/cluster-syncop.c
+index 5a08f26..6ee89dd 100644
+--- a/libglusterfs/src/cluster-syncop.c
++++ b/libglusterfs/src/cluster-syncop.c
+@@ -1203,6 +1203,10 @@ cluster_tiebreaker_inodelk(xlator_t **subvols, unsigned char *on,
+             if (num_success) {
+                 FOP_SEQ(subvols, on, numsubvols, replies, locked_on, frame,
+                         inodelk, dom, &loc, F_SETLKW, &flock, NULL);
++            } else {
++                loc_wipe(&loc);
++                memset(locked_on, 0, numsubvols);
++                return 0;
+             }
+             break;
+         }
+@@ -1244,7 +1248,9 @@ cluster_tiebreaker_entrylk(xlator_t **subvols, unsigned char *on,
+                         entrylk, dom, &loc, name, ENTRYLK_LOCK, ENTRYLK_WRLCK,
+                         NULL);
+             } else {
++                loc_wipe(&loc);
+                 memset(locked_on, 0, numsubvols);
++                return 0;
+             }
+             break;
+         }
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0415-dht-Fix-stale-layout-and-create-issue.patch b/SOURCES/0415-dht-Fix-stale-layout-and-create-issue.patch
new file mode 100644
index 0000000..476a8cc
--- /dev/null
+++ b/SOURCES/0415-dht-Fix-stale-layout-and-create-issue.patch
@@ -0,0 +1,523 @@
+From ba23e6d8f4eff11a228816149a8a1ccd6df41146 Mon Sep 17 00:00:00 2001
+From: Susant Palai <spalai@redhat.com>
+Date: Fri, 27 Dec 2019 12:06:19 +0530
+Subject: [PATCH 415/449] dht: Fix stale-layout and create issue
+
+Problem: With lookup-optimize set to on by default, a client with
+stale-layout can create a new file on a wrong subvol. This will lead to
+possible duplicate files if two different clients attempt to create the
+same file with two different layouts.
+
+Solution: Send in-memory layout to be cross checked at posix before
+commiting a "create". In case of a mismatch, sync the client layout with
+that of the server and attempt the create fop one more time.
+
+test: Manual, testcase(attached)
+
+(Backport of https://review.gluster.org/#/c/glusterfs/+/23927/)
+
+BUG: 1748865
+Change-Id: I6c82c97418654ae8eb3b81ab65f1247aa4002ceb
+Signed-off-by: Susant Palai <spalai@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202465
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/bugs/distribute/bug-1786679.t              |  69 +++++++++++
+ xlators/cluster/dht/src/dht-common.c             | 147 ++++++++++++++++++++---
+ xlators/cluster/dht/src/dht-common.h             |   6 +
+ xlators/protocol/client/src/client-rpc-fops_v2.c |   9 +-
+ xlators/storage/posix/src/posix-entry-ops.c      |  29 ++++-
+ xlators/storage/posix/src/posix-helpers.c        |  76 ++++++++++++
+ xlators/storage/posix/src/posix.h                |   4 +
+ 7 files changed, 321 insertions(+), 19 deletions(-)
+ create mode 100755 tests/bugs/distribute/bug-1786679.t
+
+diff --git a/tests/bugs/distribute/bug-1786679.t b/tests/bugs/distribute/bug-1786679.t
+new file mode 100755
+index 0000000..219ce51
+--- /dev/null
++++ b/tests/bugs/distribute/bug-1786679.t
+@@ -0,0 +1,69 @@
++#!/bin/bash
++
++SCRIPT_TIMEOUT=250
++
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++. $(dirname $0)/../../dht.rc
++
++
++# create 2 subvols
++# create a dir
++# create a file
++# change layout
++# remove the file
++# execute create from a different mount
++# Without the patch, the file will be present on both of the bricks
++
++cleanup
++
++function get_layout () {
++
++layout=`getfattr -n trusted.glusterfs.dht -e hex $1 2>&1 | grep dht | gawk -F"=" '{print $2}'`
++
++echo $layout
++
++}
++
++function set_layout()
++{
++    setfattr -n  "trusted.glusterfs.dht" -v $1 $2
++}
++
++TEST glusterd
++TEST pidof glusterd
++
++BRICK1=$B0/${V0}-0
++BRICK2=$B0/${V0}-1
++
++TEST $CLI volume create $V0 $H0:$BRICK1 $H0:$BRICK2
++TEST $CLI volume start $V0
++
++# Mount FUSE and create symlink
++TEST glusterfs -s $H0 --volfile-id $V0 $M0
++TEST mkdir $M0/dir
++TEST touch $M0/dir/file
++TEST ! stat "$BRICK1/dir/file"
++TEST stat "$BRICK2/dir/file"
++
++layout1="$(get_layout "$BRICK1/dir")"
++layout2="$(get_layout "$BRICK2/dir")"
++
++TEST set_layout $layout1 "$BRICK2/dir"
++TEST set_layout $layout2 "$BRICK1/dir"
++
++TEST rm $M0/dir/file -f
++TEST gluster v set $V0 client-log-level DEBUG
++
++#Without the patch in place, this client will create the file in $BRICK2
++#which will lead to two files being on both the bricks when a new client
++#create the file with the same name
++TEST touch $M0/dir/file
++
++TEST glusterfs -s $H0 --volfile-id $V0 $M1
++TEST touch $M1/dir/file
++
++TEST stat "$BRICK1/dir/file"
++TEST ! stat "$BRICK2/dir/file"
++
++cleanup
+diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c
+index 7890e7a..6aa18f3 100644
+--- a/xlators/cluster/dht/src/dht-common.c
++++ b/xlators/cluster/dht/src/dht-common.c
+@@ -8262,6 +8262,11 @@ dht_create_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+     xlator_t *prev = NULL;
+     int ret = -1;
+     dht_local_t *local = NULL;
++    gf_boolean_t parent_layout_changed = _gf_false;
++    char pgfid[GF_UUID_BUF_SIZE] = {0};
++    xlator_t *subvol = NULL;
++
++    local = frame->local;
+ 
+     local = frame->local;
+     if (!local) {
+@@ -8270,8 +8275,69 @@ dht_create_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+         goto out;
+     }
+ 
+-    if (op_ret == -1)
++    if (op_ret == -1) {
++        local->op_errno = op_errno;
++        parent_layout_changed = (xdata &&
++                                 dict_get(xdata, GF_PREOP_CHECK_FAILED))
++                                    ? _gf_true
++                                    : _gf_false;
++
++        if (parent_layout_changed) {
++            if (local && local->lock[0].layout.parent_layout.locks) {
++                /* Returning failure as the layout could not be fixed even under
++                 * the lock */
++                goto out;
++            }
++
++            gf_uuid_unparse(local->loc.parent->gfid, pgfid);
++            gf_msg(this->name, GF_LOG_INFO, 0, DHT_MSG_PARENT_LAYOUT_CHANGED,
++                   "create (%s/%s) (path: %s): parent layout "
++                   "changed. Attempting a layout refresh and then a "
++                   "retry",
++                   pgfid, local->loc.name, local->loc.path);
++
++            /*
++              dht_refresh_layout needs directory info in local->loc.Hence,
++              storing the parent_loc in local->loc and storing the create
++              context in local->loc2. We will restore this information in
++              dht_creation_do.
++             */
++
++            loc_wipe(&local->loc2);
++
++            ret = loc_copy(&local->loc2, &local->loc);
++            if (ret) {
++                gf_msg(this->name, GF_LOG_ERROR, ENOMEM, DHT_MSG_NO_MEMORY,
++                       "loc_copy failed %s", local->loc.path);
++
++                goto out;
++            }
++
++            loc_wipe(&local->loc);
++
++            ret = dht_build_parent_loc(this, &local->loc, &local->loc2,
++                                       &op_errno);
++
++            if (ret) {
++                gf_msg(this->name, GF_LOG_ERROR, ENOMEM, DHT_MSG_LOC_FAILED,
++                       "parent loc build failed");
++                goto out;
++            }
++
++            subvol = dht_subvol_get_hashed(this, &local->loc2);
++
++            ret = dht_create_lock(frame, subvol);
++            if (ret < 0) {
++                gf_msg(this->name, GF_LOG_ERROR, 0, DHT_MSG_INODE_LK_ERROR,
++                       "locking parent failed");
++                goto out;
++            }
++
++            return 0;
++        }
++
+         goto out;
++    }
+ 
+     prev = cookie;
+ 
+@@ -8392,6 +8458,8 @@ dht_create_wind_to_avail_subvol(call_frame_t *frame, xlator_t *this,
+         gf_msg_debug(this->name, 0, "creating %s on %s", loc->path,
+                      subvol->name);
+ 
++        dht_set_parent_layout_in_dict(loc, this, local);
++
+         STACK_WIND_COOKIE(frame, dht_create_cbk, subvol, subvol,
+                           subvol->fops->create, loc, flags, mode, umask, fd,
+                           params);
+@@ -8400,10 +8468,6 @@ dht_create_wind_to_avail_subvol(call_frame_t *frame, xlator_t *this,
+         avail_subvol = dht_free_disk_available_subvol(this, subvol, local);
+ 
+         if (avail_subvol != subvol) {
+-            local->params = dict_ref(params);
+-            local->flags = flags;
+-            local->mode = mode;
+-            local->umask = umask;
+             local->cached_subvol = avail_subvol;
+             local->hashed_subvol = subvol;
+ 
+@@ -8419,6 +8483,8 @@ dht_create_wind_to_avail_subvol(call_frame_t *frame, xlator_t *this,
+         gf_msg_debug(this->name, 0, "creating %s on %s", loc->path,
+                      subvol->name);
+ 
++        dht_set_parent_layout_in_dict(loc, this, local);
++
+         STACK_WIND_COOKIE(frame, dht_create_cbk, subvol, subvol,
+                           subvol->fops->create, loc, flags, mode, umask, fd,
+                           params);
+@@ -8680,6 +8746,60 @@ err:
+ }
+ 
+ int
++dht_set_parent_layout_in_dict(loc_t *loc, xlator_t *this, dht_local_t *local)
++{
++    dht_conf_t *conf = this->private;
++    dht_layout_t *parent_layout = NULL;
++    int *parent_disk_layout = NULL;
++    xlator_t *hashed_subvol = NULL;
++    char pgfid[GF_UUID_BUF_SIZE] = {0};
++    int ret = 0;
++
++    gf_uuid_unparse(loc->parent->gfid, pgfid);
++
++    parent_layout = dht_layout_get(this, loc->parent);
++    hashed_subvol = dht_subvol_get_hashed(this, loc);
++
++    ret = dht_disk_layout_extract_for_subvol(this, parent_layout, hashed_subvol,
++                                             &parent_disk_layout);
++    if (ret == -1) {
++        gf_msg(this->name, GF_LOG_WARNING, local->op_errno,
++               DHT_MSG_PARENT_LAYOUT_CHANGED,
++               "%s (%s/%s) (path: %s): "
++               "extracting in-memory layout of parent failed. ",
++               gf_fop_list[local->fop], pgfid, loc->name, loc->path);
++        goto err;
++    }
++
++    ret = dict_set_str_sizen(local->params, GF_PREOP_PARENT_KEY,
++                             conf->xattr_name);
++    if (ret < 0) {
++        gf_msg(this->name, GF_LOG_WARNING, local->op_errno,
++               DHT_MSG_PARENT_LAYOUT_CHANGED,
++               "%s (%s/%s) (path: %s): "
++               "setting %s key in params dictionary failed. ",
++               gf_fop_list[local->fop], pgfid, loc->name, loc->path,
++               GF_PREOP_PARENT_KEY);
++        goto err;
++    }
++
++    ret = dict_set_bin(local->params, conf->xattr_name, parent_disk_layout,
++                       4 * 4);
++    if (ret < 0) {
++        gf_msg(this->name, GF_LOG_WARNING, local->op_errno,
++               DHT_MSG_PARENT_LAYOUT_CHANGED,
++               "%s (%s/%s) (path: %s): "
++               "setting parent-layout in params dictionary failed. ",
++               gf_fop_list[local->fop], pgfid, loc->name, loc->path);
++        goto err;
++    }
++
++err:
++    dht_layout_unref(this, parent_layout);
++    return ret;
++}
++
++int
+ dht_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+            mode_t mode, mode_t umask, fd_t *fd, dict_t *params)
+ {
+@@ -8705,6 +8825,11 @@ dht_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+         goto err;
+     }
+ 
++    local->params = dict_ref(params);
++    local->flags = flags;
++    local->mode = mode;
++    local->umask = umask;
++
+     if (dht_filter_loc_subvol_key(this, loc, &local->loc, &subvol)) {
+         gf_msg(this->name, GF_LOG_INFO, 0, DHT_MSG_SUBVOL_INFO,
+                "creating %s on %s (got create on %s)", local->loc.path,
+@@ -8720,10 +8845,6 @@ dht_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ 
+         if (hashed_subvol && (hashed_subvol != subvol)) {
+             /* Create the linkto file and then the data file */
+-            local->params = dict_ref(params);
+-            local->flags = flags;
+-            local->mode = mode;
+-            local->umask = umask;
+             local->cached_subvol = subvol;
+             local->hashed_subvol = hashed_subvol;
+ 
+@@ -8736,6 +8857,9 @@ dht_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+          * file as we expect a lookup everywhere if there are problems
+          * with the parent layout
+          */
++
++        dht_set_parent_layout_in_dict(loc, this, local);
++
+         STACK_WIND_COOKIE(frame, dht_create_cbk, subvol, subvol,
+                           subvol->fops->create, &local->loc, flags, mode, umask,
+                           fd, params);
+@@ -8787,11 +8911,6 @@ dht_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+                     goto err;
+                 }
+ 
+-                local->params = dict_ref(params);
+-                local->flags = flags;
+-                local->mode = mode;
+-                local->umask = umask;
+-
+                 loc_wipe(&local->loc);
+ 
+                 ret = dht_build_parent_loc(this, &local->loc, loc, &op_errno);
+diff --git a/xlators/cluster/dht/src/dht-common.h b/xlators/cluster/dht/src/dht-common.h
+index 8e65111..1b3e826 100644
+--- a/xlators/cluster/dht/src/dht-common.h
++++ b/xlators/cluster/dht/src/dht-common.h
+@@ -1549,4 +1549,10 @@ dht_check_remote_fd_failed_error(dht_local_t *local, int op_ret, int op_errno);
+ int
+ dht_dir_layout_error_check(xlator_t *this, inode_t *inode);
+ 
++int32_t
++dht_create_lock(call_frame_t *frame, xlator_t *subvol);
++
++int
++dht_set_parent_layout_in_dict(loc_t *loc, xlator_t *this, dht_local_t *local);
++
+ #endif /* _DHT_H */
+diff --git a/xlators/protocol/client/src/client-rpc-fops_v2.c b/xlators/protocol/client/src/client-rpc-fops_v2.c
+index 2673b6e..613dda8 100644
+--- a/xlators/protocol/client/src/client-rpc-fops_v2.c
++++ b/xlators/protocol/client/src/client-rpc-fops_v2.c
+@@ -2094,11 +2094,12 @@ client4_0_create_cbk(struct rpc_req *req, struct iovec *iov, int count,
+         goto out;
+     }
+ 
++    ret = client_post_create_v2(this, &rsp, &stbuf, &preparent, &postparent,
++                                local, &xdata);
++    if (ret < 0)
++        goto out;
++
+     if (-1 != rsp.op_ret) {
+-        ret = client_post_create_v2(this, &rsp, &stbuf, &preparent, &postparent,
+-                                    local, &xdata);
+-        if (ret < 0)
+-            goto out;
+         ret = client_add_fd_to_saved_fds(frame->this, fd, &local->loc,
+                                          local->flags, rsp.fd, 0);
+         if (ret) {
+diff --git a/xlators/storage/posix/src/posix-entry-ops.c b/xlators/storage/posix/src/posix-entry-ops.c
+index bea0bbf..65650b3 100644
+--- a/xlators/storage/posix/src/posix-entry-ops.c
++++ b/xlators/storage/posix/src/posix-entry-ops.c
+@@ -2070,6 +2070,8 @@ posix_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+     gf_boolean_t entry_created = _gf_false, gfid_set = _gf_false;
+     mode_t mode_bit = 0;
+ 
++    dict_t *xdata_rsp = dict_ref(xdata);
++
+     DECLARE_OLD_FS_ID_VAR;
+ 
+     VALIDATE_OR_GOTO(frame, out);
+@@ -2118,6 +2120,28 @@ posix_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+         was_present = 0;
+     }
+ 
++    if (!was_present) {
++        if (posix_is_layout_stale(xdata, par_path, this)) {
++            op_ret = -1;
++            op_errno = EIO;
++            if (!xdata_rsp) {
++                xdata_rsp = dict_new();
++                if (!xdata_rsp) {
++                    op_errno = ENOMEM;
++                    goto out;
++                }
++            }
++
++            if (dict_set_int32_sizen(xdata_rsp, GF_PREOP_CHECK_FAILED, 1) ==
++                -1) {
++                gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_DICT_SET_FAILED,
++                       "setting key %s in dict failed", GF_PREOP_CHECK_FAILED);
++            }
++
++            goto out;
++        }
++    }
++
+     if (priv->o_direct)
+         _flags |= O_DIRECT;
+ 
+@@ -2239,7 +2263,10 @@ out:
+ 
+     STACK_UNWIND_STRICT(create, frame, op_ret, op_errno, fd,
+                         (loc) ? loc->inode : NULL, &stbuf, &preparent,
+-                        &postparent, xdata);
++                        &postparent, xdata_rsp);
++
++    if (xdata_rsp)
++        dict_unref(xdata_rsp);
+ 
+     return 0;
+ }
+diff --git a/xlators/storage/posix/src/posix-helpers.c b/xlators/storage/posix/src/posix-helpers.c
+index 35dd3b6..2c27d22 100644
+--- a/xlators/storage/posix/src/posix-helpers.c
++++ b/xlators/storage/posix/src/posix-helpers.c
+@@ -3559,3 +3559,79 @@ posix_update_iatt_buf(struct iatt *buf, int fd, char *loc, dict_t *xattr_req)
+         }
+     }
+ }
++
++gf_boolean_t
++posix_is_layout_stale(dict_t *xdata, char *par_path, xlator_t *this)
++{
++    int op_ret = 0;
++    ssize_t size = 0;
++    char value_buf[4096] = {
++        0,
++    };
++    gf_boolean_t have_val = _gf_false;
++    data_t *arg_data = NULL;
++    char *xattr_name = NULL;
++    gf_boolean_t is_stale = _gf_false;
++
++    op_ret = dict_get_str_sizen(xdata, GF_PREOP_PARENT_KEY, &xattr_name);
++    if (xattr_name == NULL) {
++        op_ret = 0;
++        goto out;
++    }
++
++    arg_data = dict_get(xdata, xattr_name);
++    if (!arg_data) {
++        op_ret = 0;
++        goto out;
++    }
++
++    size = sys_lgetxattr(par_path, xattr_name, value_buf,
++                         sizeof(value_buf) - 1);
++
++    if (size >= 0) {
++        have_val = _gf_true;
++    } else {
++        if (errno == ERANGE) {
++            gf_msg(this->name, GF_LOG_INFO, errno, P_MSG_PREOP_CHECK_FAILED,
++                   "getxattr on key (%s) path (%s) failed due to"
++                   " buffer overflow",
++                   xattr_name, par_path);
++            size = sys_lgetxattr(par_path, xattr_name, NULL, 0);
++        }
++        if (size < 0) {
++            op_ret = -1;
++            gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_PREOP_CHECK_FAILED,
++                   "getxattr on key (%s)  failed, path : %s", xattr_name,
++                   par_path);
++            goto out;
++        }
++    }
++
++    if (!have_val) {
++        size = sys_lgetxattr(par_path, xattr_name, value_buf, size);
++        if (size < 0) {
++            gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_PREOP_CHECK_FAILED,
++                   "getxattr on key (%s) failed (%s)", xattr_name,
++                   strerror(errno));
++            goto out;
++        }
++    }
++
++    if ((arg_data->len != size) || (memcmp(arg_data->data, value_buf, size))) {
++        gf_msg(this->name, GF_LOG_INFO, EIO, P_MSG_PREOP_CHECK_FAILED,
++               "failing preop as on-disk xattr value differs from argument "
++               "value for key %s",
++               xattr_name);
++        op_ret = -1;
++    }
++
++out:
++    dict_del_sizen(xdata, xattr_name);
++    dict_del_sizen(xdata, GF_PREOP_PARENT_KEY);
++
++    if (op_ret == -1) {
++        is_stale = _gf_true;
++    }
++
++    return is_stale;
++}
+diff --git a/xlators/storage/posix/src/posix.h b/xlators/storage/posix/src/posix.h
+index dd51062..ac9d83c 100644
+--- a/xlators/storage/posix/src/posix.h
++++ b/xlators/storage/posix/src/posix.h
+@@ -671,4 +671,8 @@ posix_spawn_ctx_janitor_thread(xlator_t *this);
+ 
+ void
+ posix_update_iatt_buf(struct iatt *buf, int fd, char *loc, dict_t *xdata);
++
++gf_boolean_t
++posix_is_layout_stale(dict_t *xdata, char *par_path, xlator_t *this);
++
+ #endif /* _POSIX_H */
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0416-tests-fix-spurious-failure-of-bug-1402841.t-mt-dir-s.patch b/SOURCES/0416-tests-fix-spurious-failure-of-bug-1402841.t-mt-dir-s.patch
new file mode 100644
index 0000000..1954e6a
--- /dev/null
+++ b/SOURCES/0416-tests-fix-spurious-failure-of-bug-1402841.t-mt-dir-s.patch
@@ -0,0 +1,72 @@
+From 63cfdd987b1dfbf97486f0f884380faee0ae25d0 Mon Sep 17 00:00:00 2001
+From: Ravishankar N <ravishankar@redhat.com>
+Date: Wed, 4 Sep 2019 11:27:30 +0530
+Subject: [PATCH 416/449] tests: fix spurious failure of
+ bug-1402841.t-mt-dir-scan-race.t
+
+Upstream patch: https://review.gluster.org/23352
+
+Problem:
+Since commit 600ba94183333c4af9b4a09616690994fd528478, shd starts
+healing as soon as it is toggled from disabled to enabled. This was
+causing the following line in the .t to fail on a 'fast' machine (always
+on my laptop and sometimes on the jenkins slaves).
+
+EXPECT_NOT "^0$" get_pending_heal_count $V0
+
+because by the time shd was disabled, the heal was already completed.
+
+Fix:
+Increase the no. of files to be healed and make it a variable called
+FILE_COUNT, should we need to bump it up further because the machines
+become even faster. Also created pending metadata heals to increase the
+time taken to heal a file.
+
+>fixes: bz#1748744
+>Change-Id: I5a26b08e45b8c19bce3c01ce67bdcc28ed48198d
+Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+
+BUG: 1844359
+Change-Id: Ie3676c6c2c27e7574b958d2eaac23801dfaed3a9
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202481
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t b/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t
+index 6351ba2..a1b9a85 100755
+--- a/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t
++++ b/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t
+@@ -3,6 +3,8 @@
+ . $(dirname $0)/../../volume.rc
+ cleanup;
+ 
++FILE_COUNT=500
++
+ TEST glusterd
+ TEST pidof glusterd
+ TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+@@ -11,15 +13,14 @@ TEST $CLI volume set $V0 cluster.shd-wait-qlength 100
+ TEST $CLI volume start $V0
+ 
+ TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+-touch $M0/file{1..200}
+-
++for i in `seq 1 $FILE_COUNT`;  do touch $M0/file$i; done
+ TEST kill_brick $V0 $H0 $B0/${V0}1
+-for i in {1..200}; do echo hello>$M0/file$i; done
++for i in `seq 1 $FILE_COUNT`; do echo hello>$M0/file$i; chmod -x $M0/file$i; done
+ TEST $CLI volume start $V0 force
+ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+ 
+-EXPECT "200" get_pending_heal_count $V0
++EXPECT "$FILE_COUNT" get_pending_heal_count $V0
+ TEST $CLI volume set $V0 self-heal-daemon on
+ 
+ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0417-events-fix-IPv6-memory-corruption.patch b/SOURCES/0417-events-fix-IPv6-memory-corruption.patch
new file mode 100644
index 0000000..cefb5bf
--- /dev/null
+++ b/SOURCES/0417-events-fix-IPv6-memory-corruption.patch
@@ -0,0 +1,153 @@
+From 5e231ceb35bb763d6fafc7c3efe1c3c582929cc2 Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Tue, 14 Jan 2020 13:28:47 +0100
+Subject: [PATCH 417/449] events: fix IPv6 memory corruption
+
+When an event was generated and the target host was resolved to an IPv6
+address, there was a memory overflow when that address was copied to a
+fixed IPv4 structure (IPv6 addresses are longer than IPv4 ones).
+
+This fix correctly handles IPv4 and IPv6 addresses returned by
+getaddrinfo()
+
+Backport of:
+> Upstream-patch-link: https://review.gluster.org/24014
+> Change-Id: I5864a0c6e6f1b405bd85988529570140cf23b250
+> Fixes: bz#1790870
+> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+
+BUG: 1792873
+Change-Id: I5864a0c6e6f1b405bd85988529570140cf23b250
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202486
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ libglusterfs/src/events.c | 56 +++++++++++++----------------------------------
+ 1 file changed, 15 insertions(+), 41 deletions(-)
+
+diff --git a/libglusterfs/src/events.c b/libglusterfs/src/events.c
+index 4e2f8f9..6d1e383 100644
+--- a/libglusterfs/src/events.c
++++ b/libglusterfs/src/events.c
+@@ -34,7 +34,6 @@ _gf_event(eventtypes_t event, const char *fmt, ...)
+     int ret = 0;
+     int sock = -1;
+     char *eventstr = NULL;
+-    struct sockaddr_in server;
+     va_list arguments;
+     char *msg = NULL;
+     glusterfs_ctx_t *ctx = NULL;
+@@ -42,11 +41,10 @@ _gf_event(eventtypes_t event, const char *fmt, ...)
+     struct addrinfo hints;
+     struct addrinfo *result = NULL;
+     xlator_t *this = THIS;
+-    int sin_family = AF_INET;
+     char *volfile_server_transport = NULL;
+ 
+     /* Global context */
+-    ctx = THIS->ctx;
++    ctx = this->ctx;
+ 
+     if (event < 0 || event >= EVENT_LAST) {
+         ret = EVENT_ERROR_INVALID_INPUTS;
+@@ -60,48 +58,31 @@ _gf_event(eventtypes_t event, const char *fmt, ...)
+         goto out;
+     }
+ 
+-    memset(&hints, 0, sizeof(hints));
+-    hints.ai_family = AF_UNSPEC;
+-
+     if (ctx) {
+         volfile_server_transport = ctx->cmd_args.volfile_server_transport;
+     }
+-
+     if (!volfile_server_transport) {
+         volfile_server_transport = "tcp";
+     }
+-    /* Get Host name to send message */
++
++    /* host = NULL returns localhost */
++    host = NULL;
+     if (ctx && ctx->cmd_args.volfile_server &&
+         (strcmp(volfile_server_transport, "unix"))) {
+         /* If it is client code then volfile_server is set
+            use that information to push the events. */
+-        if ((getaddrinfo(ctx->cmd_args.volfile_server, NULL, &hints,
+-                         &result)) != 0) {
+-            ret = EVENT_ERROR_RESOLVE;
+-            goto out;
+-        }
+-
+-        if (get_ip_from_addrinfo(result, &host) == NULL) {
+-            ret = EVENT_ERROR_RESOLVE;
+-            goto out;
+-        }
+-
+-        sin_family = result->ai_family;
+-    } else {
+-        /* Localhost, Use the defined IP for localhost */
+-        host = gf_strdup(EVENT_HOST);
++        host = ctx->cmd_args.volfile_server;
+     }
+ 
+-    /* Socket Configurations */
+-    server.sin_family = sin_family;
+-    server.sin_port = htons(EVENT_PORT);
+-    ret = inet_pton(server.sin_family, host, &server.sin_addr);
+-    if (ret <= 0) {
+-        gf_msg(this->name, GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
+-               "inet_pton failed with return code %d", ret);
++    memset(&hints, 0, sizeof(hints));
++    hints.ai_family = AF_UNSPEC;
++    hints.ai_socktype = SOCK_DGRAM;
++    hints.ai_flags = AI_ADDRCONFIG;
++
++    if ((getaddrinfo(host, TOSTRING(EVENT_PORT), &hints, &result)) != 0) {
++        ret = EVENT_ERROR_RESOLVE;
+         goto out;
+     }
+-    memset(&server.sin_zero, '\0', sizeof(server.sin_zero));
+ 
+     va_start(arguments, fmt);
+     ret = gf_vasprintf(&msg, fmt, arguments);
+@@ -113,15 +94,15 @@ _gf_event(eventtypes_t event, const char *fmt, ...)
+     }
+ 
+     ret = gf_asprintf(&eventstr, "%u %d %s", (unsigned)time(NULL), event, msg);
+-
++    GF_FREE(msg);
+     if (ret <= 0) {
+         ret = EVENT_ERROR_MSG_FORMAT;
+         goto out;
+     }
+ 
+     /* Send Message */
+-    if (sendto(sock, eventstr, strlen(eventstr), 0, (struct sockaddr *)&server,
+-               sizeof(server)) <= 0) {
++    if (sendto(sock, eventstr, strlen(eventstr), 0, result->ai_addr,
++               result->ai_addrlen) <= 0) {
+         ret = EVENT_ERROR_SEND;
+         goto out;
+     }
+@@ -133,17 +114,10 @@ out:
+         sys_close(sock);
+     }
+ 
+-    /* Allocated by gf_vasprintf */
+-    if (msg)
+-        GF_FREE(msg);
+-
+     /* Allocated by gf_asprintf */
+     if (eventstr)
+         GF_FREE(eventstr);
+ 
+-    if (host)
+-        GF_FREE(host);
+-
+     if (result)
+         freeaddrinfo(result);
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0418-md-cache-avoid-clearing-cache-when-not-necessary.patch b/SOURCES/0418-md-cache-avoid-clearing-cache-when-not-necessary.patch
new file mode 100644
index 0000000..45622d9
--- /dev/null
+++ b/SOURCES/0418-md-cache-avoid-clearing-cache-when-not-necessary.patch
@@ -0,0 +1,439 @@
+From 7ad8c03a28fca67150972cda964ebe9233766b54 Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Mon, 30 Mar 2020 11:09:39 +0200
+Subject: [PATCH 418/449] md-cache: avoid clearing cache when not necessary
+
+mdc_inode_xatt_set() blindly cleared current cache when dict was not
+NULL, even if there was no xattr requested.
+
+This patch fixes this by only calling mdc_inode_xatt_set() when we have
+explicitly requested something to cache.
+
+Backport of:
+> Upstream-patch-link: https://review.gluster.org/24267
+> Change-Id: Idc91a4693f1ff39f7059acde26682ccc361b947d
+> Fixes: #1140
+> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+
+BUG: 1815434
+Change-Id: Idc91a4693f1ff39f7059acde26682ccc361b947d
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202487
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/performance/md-cache/src/md-cache.c | 165 ++++++++++++++++------------
+ 1 file changed, 93 insertions(+), 72 deletions(-)
+
+diff --git a/xlators/performance/md-cache/src/md-cache.c b/xlators/performance/md-cache/src/md-cache.c
+index a6b363f..bbbee3b 100644
+--- a/xlators/performance/md-cache/src/md-cache.c
++++ b/xlators/performance/md-cache/src/md-cache.c
+@@ -133,6 +133,7 @@ struct mdc_local {
+     char *key;
+     dict_t *xattr;
+     uint64_t incident_time;
++    bool update_cache;
+ };
+ 
+ int
+@@ -969,7 +970,7 @@ out:
+     return ret;
+ }
+ 
+-void
++static bool
+ mdc_load_reqs(xlator_t *this, dict_t *dict)
+ {
+     struct mdc_conf *conf = this->private;
+@@ -978,6 +979,7 @@ mdc_load_reqs(xlator_t *this, dict_t *dict)
+     char *tmp = NULL;
+     char *tmp1 = NULL;
+     int ret = 0;
++    bool loaded = false;
+ 
+     tmp1 = conf->mdc_xattr_str;
+     if (!tmp1)
+@@ -995,13 +997,17 @@ mdc_load_reqs(xlator_t *this, dict_t *dict)
+             conf->mdc_xattr_str = NULL;
+             gf_msg("md-cache", GF_LOG_ERROR, 0, MD_CACHE_MSG_NO_XATTR_CACHE,
+                    "Disabled cache for xattrs, dict_set failed");
++            goto out;
+         }
+         pattern = strtok_r(NULL, ",", &tmp);
+     }
+ 
+-    GF_FREE(mdc_xattr_str);
++    loaded = true;
++
+ out:
+-    return;
++    GF_FREE(mdc_xattr_str);
++
++    return loaded;
+ }
+ 
+ struct checkpair {
+@@ -1092,6 +1098,25 @@ err:
+     return ret;
+ }
+ 
++static dict_t *
++mdc_prepare_request(xlator_t *this, mdc_local_t *local, dict_t *xdata)
++{
++    if (xdata == NULL) {
++        xdata = dict_new();
++        if (xdata == NULL) {
++            local->update_cache = false;
++
++            return NULL;
++        }
++    } else {
++        dict_ref(xdata);
++    }
++
++    local->update_cache = mdc_load_reqs(this, xdata);
++
++    return xdata;
++}
++
+ int
+ mdc_statfs_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+                int32_t op_ret, int32_t op_errno, struct statvfs *buf,
+@@ -1201,7 +1226,9 @@ mdc_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ 
+     if (local->loc.inode) {
+         mdc_inode_iatt_set(this, local->loc.inode, stbuf, local->incident_time);
+-        mdc_inode_xatt_set(this, local->loc.inode, dict);
++        if (local->update_cache) {
++            mdc_inode_xatt_set(this, local->loc.inode, dict);
++        }
+     }
+ out:
+     MDC_STACK_UNWIND(lookup, frame, op_ret, op_errno, inode, stbuf, dict,
+@@ -1220,7 +1247,6 @@ mdc_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+         0,
+     };
+     dict_t *xattr_rsp = NULL;
+-    dict_t *xattr_alloc = NULL;
+     mdc_local_t *local = NULL;
+     struct mdc_conf *conf = this->private;
+ 
+@@ -1271,18 +1297,18 @@ mdc_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+     return 0;
+ 
+ uncached:
+-    if (!xdata)
+-        xdata = xattr_alloc = dict_new();
+-    if (xdata)
+-        mdc_load_reqs(this, xdata);
++    xdata = mdc_prepare_request(this, local, xdata);
+ 
+     STACK_WIND(frame, mdc_lookup_cbk, FIRST_CHILD(this),
+                FIRST_CHILD(this)->fops->lookup, loc, xdata);
+ 
+     if (xattr_rsp)
+         dict_unref(xattr_rsp);
+-    if (xattr_alloc)
+-        dict_unref(xattr_alloc);
++
++    if (xdata != NULL) {
++        dict_unref(xdata);
++    }
++
+     return 0;
+ }
+ 
+@@ -1305,7 +1331,9 @@ mdc_stat_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+     }
+ 
+     mdc_inode_iatt_set(this, local->loc.inode, buf, local->incident_time);
+-    mdc_inode_xatt_set(this, local->loc.inode, xdata);
++    if (local->update_cache) {
++        mdc_inode_xatt_set(this, local->loc.inode, xdata);
++    }
+ 
+ out:
+     MDC_STACK_UNWIND(stat, frame, op_ret, op_errno, buf, xdata);
+@@ -1319,7 +1347,6 @@ mdc_stat(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+     int ret;
+     struct iatt stbuf;
+     mdc_local_t *local = NULL;
+-    dict_t *xattr_alloc = NULL;
+     struct mdc_conf *conf = this->private;
+ 
+     local = mdc_local_get(frame, loc->inode);
+@@ -1343,17 +1370,16 @@ mdc_stat(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+     return 0;
+ 
+ uncached:
+-    if (!xdata)
+-        xdata = xattr_alloc = dict_new();
+-    if (xdata)
+-        mdc_load_reqs(this, xdata);
++    xdata = mdc_prepare_request(this, local, xdata);
+ 
+     GF_ATOMIC_INC(conf->mdc_counter.stat_miss);
+     STACK_WIND(frame, mdc_stat_cbk, FIRST_CHILD(this),
+                FIRST_CHILD(this)->fops->stat, loc, xdata);
+ 
+-    if (xattr_alloc)
+-        dict_unref(xattr_alloc);
++    if (xdata != NULL) {
++        dict_unref(xdata);
++    }
++
+     return 0;
+ }
+ 
+@@ -1376,7 +1402,9 @@ mdc_fstat_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+     }
+ 
+     mdc_inode_iatt_set(this, local->fd->inode, buf, local->incident_time);
+-    mdc_inode_xatt_set(this, local->fd->inode, xdata);
++    if (local->update_cache) {
++        mdc_inode_xatt_set(this, local->fd->inode, xdata);
++    }
+ 
+ out:
+     MDC_STACK_UNWIND(fstat, frame, op_ret, op_errno, buf, xdata);
+@@ -1390,7 +1418,6 @@ mdc_fstat(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
+     int ret;
+     struct iatt stbuf;
+     mdc_local_t *local = NULL;
+-    dict_t *xattr_alloc = NULL;
+     struct mdc_conf *conf = this->private;
+ 
+     local = mdc_local_get(frame, fd->inode);
+@@ -1409,17 +1436,16 @@ mdc_fstat(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
+     return 0;
+ 
+ uncached:
+-    if (!xdata)
+-        xdata = xattr_alloc = dict_new();
+-    if (xdata)
+-        mdc_load_reqs(this, xdata);
++    xdata = mdc_prepare_request(this, local, xdata);
+ 
+     GF_ATOMIC_INC(conf->mdc_counter.stat_miss);
+     STACK_WIND(frame, mdc_fstat_cbk, FIRST_CHILD(this),
+                FIRST_CHILD(this)->fops->fstat, fd, xdata);
+ 
+-    if (xattr_alloc)
+-        dict_unref(xattr_alloc);
++    if (xdata != NULL) {
++        dict_unref(xdata);
++    }
++
+     return 0;
+ }
+ 
+@@ -2393,7 +2419,9 @@ mdc_getxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+         goto out;
+     }
+ 
+-    mdc_inode_xatt_set(this, local->loc.inode, xdata);
++    if (local->update_cache) {
++        mdc_inode_xatt_set(this, local->loc.inode, xdata);
++    }
+ 
+ out:
+     MDC_STACK_UNWIND(getxattr, frame, op_ret, op_errno, xattr, xdata);
+@@ -2410,7 +2438,6 @@ mdc_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, const char *key,
+     mdc_local_t *local = NULL;
+     dict_t *xattr = NULL;
+     struct mdc_conf *conf = this->private;
+-    dict_t *xattr_alloc = NULL;
+     gf_boolean_t key_satisfied = _gf_true;
+ 
+     local = mdc_local_get(frame, loc->inode);
+@@ -2443,18 +2470,17 @@ mdc_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, const char *key,
+ 
+ uncached:
+     if (key_satisfied) {
+-        if (!xdata)
+-            xdata = xattr_alloc = dict_new();
+-        if (xdata)
+-            mdc_load_reqs(this, xdata);
++        xdata = mdc_prepare_request(this, local, xdata);
+     }
+ 
+     GF_ATOMIC_INC(conf->mdc_counter.xattr_miss);
+     STACK_WIND(frame, mdc_getxattr_cbk, FIRST_CHILD(this),
+                FIRST_CHILD(this)->fops->getxattr, loc, key, xdata);
+ 
+-    if (xattr_alloc)
+-        dict_unref(xattr_alloc);
++    if (key_satisfied && (xdata != NULL)) {
++        dict_unref(xdata);
++    }
++
+     return 0;
+ }
+ 
+@@ -2481,7 +2507,9 @@ mdc_fgetxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+         goto out;
+     }
+ 
+-    mdc_inode_xatt_set(this, local->fd->inode, xdata);
++    if (local->update_cache) {
++        mdc_inode_xatt_set(this, local->fd->inode, xdata);
++    }
+ 
+ out:
+     MDC_STACK_UNWIND(fgetxattr, frame, op_ret, op_errno, xattr, xdata);
+@@ -2498,7 +2526,6 @@ mdc_fgetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *key,
+     dict_t *xattr = NULL;
+     int op_errno = ENODATA;
+     struct mdc_conf *conf = this->private;
+-    dict_t *xattr_alloc = NULL;
+     gf_boolean_t key_satisfied = _gf_true;
+ 
+     local = mdc_local_get(frame, fd->inode);
+@@ -2531,18 +2558,17 @@ mdc_fgetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *key,
+ 
+ uncached:
+     if (key_satisfied) {
+-        if (!xdata)
+-            xdata = xattr_alloc = dict_new();
+-        if (xdata)
+-            mdc_load_reqs(this, xdata);
++        xdata = mdc_prepare_request(this, local, xdata);
+     }
+ 
+     GF_ATOMIC_INC(conf->mdc_counter.xattr_miss);
+     STACK_WIND(frame, mdc_fgetxattr_cbk, FIRST_CHILD(this),
+                FIRST_CHILD(this)->fops->fgetxattr, fd, key, xdata);
+ 
+-    if (xattr_alloc)
+-        dict_unref(xattr_alloc);
++    if (key_satisfied && (xdata != NULL)) {
++        dict_unref(xdata);
++    }
++
+     return 0;
+ }
+ 
+@@ -2752,27 +2778,22 @@ int
+ mdc_opendir(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd,
+             dict_t *xdata)
+ {
+-    dict_t *xattr_alloc = NULL;
+     mdc_local_t *local = NULL;
+ 
+     local = mdc_local_get(frame, loc->inode);
+ 
+     loc_copy(&local->loc, loc);
+ 
+-    if (!xdata)
+-        xdata = xattr_alloc = dict_new();
+-
+-    if (xdata) {
+-        /* Tell readdir-ahead to include these keys in xdata when it
+-         * internally issues readdirp() in it's opendir_cbk */
+-        mdc_load_reqs(this, xdata);
+-    }
++    /* Tell readdir-ahead to include these keys in xdata when it
++     * internally issues readdirp() in it's opendir_cbk */
++    xdata = mdc_prepare_request(this, local, xdata);
+ 
+     STACK_WIND(frame, mdc_opendir_cbk, FIRST_CHILD(this),
+                FIRST_CHILD(this)->fops->opendir, loc, fd, xdata);
+ 
+-    if (xattr_alloc)
+-        dict_unref(xattr_alloc);
++    if (xdata != NULL) {
++        dict_unref(xdata);
++    }
+ 
+     return 0;
+ }
+@@ -2800,7 +2821,9 @@ mdc_readdirp_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+             continue;
+         mdc_inode_iatt_set(this, entry->inode, &entry->d_stat,
+                            local->incident_time);
+-        mdc_inode_xatt_set(this, entry->inode, entry->dict);
++        if (local->update_cache) {
++            mdc_inode_xatt_set(this, entry->inode, entry->dict);
++        }
+     }
+ 
+ unwind:
+@@ -2812,7 +2835,6 @@ int
+ mdc_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+              off_t offset, dict_t *xdata)
+ {
+-    dict_t *xattr_alloc = NULL;
+     mdc_local_t *local = NULL;
+ 
+     local = mdc_local_get(frame, fd->inode);
+@@ -2821,15 +2843,15 @@ mdc_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ 
+     local->fd = fd_ref(fd);
+ 
+-    if (!xdata)
+-        xdata = xattr_alloc = dict_new();
+-    if (xdata)
+-        mdc_load_reqs(this, xdata);
++    xdata = mdc_prepare_request(this, local, xdata);
+ 
+     STACK_WIND(frame, mdc_readdirp_cbk, FIRST_CHILD(this),
+                FIRST_CHILD(this)->fops->readdirp, fd, size, offset, xdata);
+-    if (xattr_alloc)
+-        dict_unref(xattr_alloc);
++
++    if (xdata != NULL) {
++        dict_unref(xdata);
++    }
++
+     return 0;
+ out:
+     MDC_STACK_UNWIND(readdirp, frame, -1, ENOMEM, NULL, NULL);
+@@ -2860,7 +2882,6 @@ int
+ mdc_readdir(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+             off_t offset, dict_t *xdata)
+ {
+-    int need_unref = 0;
+     mdc_local_t *local = NULL;
+     struct mdc_conf *conf = this->private;
+ 
+@@ -2876,19 +2897,14 @@ mdc_readdir(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+         return 0;
+     }
+ 
+-    if (!xdata) {
+-        xdata = dict_new();
+-        need_unref = 1;
+-    }
+-
+-    if (xdata)
+-        mdc_load_reqs(this, xdata);
++    xdata = mdc_prepare_request(this, local, xdata);
+ 
+     STACK_WIND(frame, mdc_readdirp_cbk, FIRST_CHILD(this),
+                FIRST_CHILD(this)->fops->readdirp, fd, size, offset, xdata);
+ 
+-    if (need_unref && xdata)
++    if (xdata != NULL) {
+         dict_unref(xdata);
++    }
+ 
+     return 0;
+ unwind:
+@@ -3468,7 +3484,12 @@ mdc_register_xattr_inval(xlator_t *this)
+         goto out;
+     }
+ 
+-    mdc_load_reqs(this, xattr);
++    if (!mdc_load_reqs(this, xattr)) {
++        gf_msg(this->name, GF_LOG_WARNING, ENOMEM, MD_CACHE_MSG_NO_MEMORY,
++               "failed to populate cache entries");
++        ret = -1;
++        goto out;
++    }
+ 
+     frame = create_frame(this, this->ctx->pool);
+     if (!frame) {
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0419-cluster-afr-fix-race-when-bricks-come-up.patch b/SOURCES/0419-cluster-afr-fix-race-when-bricks-come-up.patch
new file mode 100644
index 0000000..ea8c2ea
--- /dev/null
+++ b/SOURCES/0419-cluster-afr-fix-race-when-bricks-come-up.patch
@@ -0,0 +1,104 @@
+From b9b479de2a7fd1c5eefa7aa1142e0a39e0c96ca9 Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Sun, 1 Mar 2020 19:49:04 +0100
+Subject: [PATCH 419/449] cluster/afr: fix race when bricks come up
+
+The was a problem when self-heal was sending lookups at the same time
+that one of the bricks was coming up. In this case there was a chance
+that the number of 'up' bricks changes in the middle of sending the
+requests to subvolumes which caused a discrepancy in the expected
+number of replies and the actual number of sent requests.
+
+This discrepancy caused that AFR continued executing requests before
+all requests were complete. Eventually, the frame of the pending
+request was destroyed when the operation terminated, causing a use-
+after-free issue when the answer was finally received.
+
+In theory the same thing could happen in the reverse way, i.e. AFR
+tries to wait for more replies than sent requests, causing a hang.
+
+Backport of:
+> Upstream-patch-link: https://review.gluster.org/24191
+> Change-Id: I7ed6108554ca379d532efb1a29b2de8085410b70
+> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+> Fixes: bz#1808875
+
+BUG: 1794663
+Change-Id: I7ed6108554ca379d532efb1a29b2de8085410b70
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202489
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/cluster/afr/src/afr-self-heal-common.c | 6 +++---
+ xlators/cluster/afr/src/afr-self-heal-name.c   | 4 +++-
+ xlators/cluster/afr/src/afr-self-heal.h        | 7 +++++--
+ 3 files changed, 11 insertions(+), 6 deletions(-)
+
+diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c
+index ce1ea50..d942ccf 100644
+--- a/xlators/cluster/afr/src/afr-self-heal-common.c
++++ b/xlators/cluster/afr/src/afr-self-heal-common.c
+@@ -1869,12 +1869,12 @@ int
+ afr_selfheal_unlocked_discover(call_frame_t *frame, inode_t *inode, uuid_t gfid,
+                                struct afr_reply *replies)
+ {
+-    afr_private_t *priv = NULL;
++    afr_local_t *local = NULL;
+ 
+-    priv = frame->this->private;
++    local = frame->local;
+ 
+     return afr_selfheal_unlocked_discover_on(frame, inode, gfid, replies,
+-                                             priv->child_up);
++                                             local->child_up);
+ }
+ 
+ unsigned int
+diff --git a/xlators/cluster/afr/src/afr-self-heal-name.c b/xlators/cluster/afr/src/afr-self-heal-name.c
+index 7d4f208..dace071 100644
+--- a/xlators/cluster/afr/src/afr-self-heal-name.c
++++ b/xlators/cluster/afr/src/afr-self-heal-name.c
+@@ -560,13 +560,15 @@ afr_selfheal_name_unlocked_inspect(call_frame_t *frame, xlator_t *this,
+     struct afr_reply *replies = NULL;
+     inode_t *inode = NULL;
+     int first_idx = -1;
++    afr_local_t *local = NULL;
+ 
+     priv = this->private;
++    local = frame->local;
+ 
+     replies = alloca0(sizeof(*replies) * priv->child_count);
+ 
+     inode = afr_selfheal_unlocked_lookup_on(frame, parent, bname, replies,
+-                                            priv->child_up, NULL);
++                                            local->child_up, NULL);
+     if (!inode)
+         return -ENOMEM;
+ 
+diff --git a/xlators/cluster/afr/src/afr-self-heal.h b/xlators/cluster/afr/src/afr-self-heal.h
+index 8234cec..f7ecf5d 100644
+--- a/xlators/cluster/afr/src/afr-self-heal.h
++++ b/xlators/cluster/afr/src/afr-self-heal.h
+@@ -46,13 +46,16 @@
+         afr_local_t *__local = frame->local;                                   \
+         afr_private_t *__priv = frame->this->private;                          \
+         int __i = 0;                                                           \
+-        int __count = AFR_COUNT(list, __priv->child_count);                    \
++        int __count = 0;                                                       \
++        unsigned char *__list = alloca(__priv->child_count);                   \
+                                                                                \
++        memcpy(__list, list, sizeof(*__list) * __priv->child_count);           \
++        __count = AFR_COUNT(__list, __priv->child_count);                      \
+         __local->barrier.waitfor = __count;                                    \
+         afr_local_replies_wipe(__local, __priv);                               \
+                                                                                \
+         for (__i = 0; __i < __priv->child_count; __i++) {                      \
+-            if (!list[__i])                                                    \
++            if (!__list[__i])                                                  \
+                 continue;                                                      \
+             STACK_WIND_COOKIE(frame, rfn, (void *)(long)__i,                   \
+                               __priv->children[__i],                           \
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0420-scripts-quota_fsck-script-TypeError-d-format-not-dic.patch b/SOURCES/0420-scripts-quota_fsck-script-TypeError-d-format-not-dic.patch
new file mode 100644
index 0000000..cb27b33
--- /dev/null
+++ b/SOURCES/0420-scripts-quota_fsck-script-TypeError-d-format-not-dic.patch
@@ -0,0 +1,46 @@
+From 42a05c7f8464f529f53bced31a64ea373e16f58b Mon Sep 17 00:00:00 2001
+From: Hari Gowtham <hgowtham@redhat.com>
+Date: Thu, 24 Oct 2019 17:40:44 +0530
+Subject: [PATCH 420/449] scripts: quota_fsck script TypeError: %d format:not
+ dict
+
+Problem: One of the prints in the script have been using
+%i as the format for printing which doesn't work.
+
+Fix: use %s as the format in the place of %i
+
+>Fixes: bz#1764129
+>Change-Id: I4480ede7bf62906ddedbe5f880a1e89c76946641
+>Signed-off-by: Hari Gowtham <hgowtham@redhat.com>
+>Upstream patch: https://review.gluster.org/#/c/glusterfs/+/23586/
+
+BUG: 1786681
+Change-Id: I4480ede7bf62906ddedbe5f880a1e89c76946641
+Signed-off-by: hari gowtham <hgowtham@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202484
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunny Kumar <sunkumar@redhat.com>
+---
+ extras/quota/quota_fsck.py | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/extras/quota/quota_fsck.py b/extras/quota/quota_fsck.py
+index 485a37a..174f2a2 100755
+--- a/extras/quota/quota_fsck.py
++++ b/extras/quota/quota_fsck.py
+@@ -58,10 +58,10 @@ def print_msg(log_type, path, xattr_dict = {}, stbuf = "", dir_size = None):
+     elif log_type == QUOTA_SIZE_MISMATCH:
+         print("mismatch")
+         if dir_size is not None:
+-            print('%24s %60s %12s %12s' % ("Size Mismatch", path, 
++            print('%24s %60s %12s %12s' % ("Size Mismatch", path,
+                 xattr_dict, dir_size))
+         else:
+-            print('%-24s %-60s %-12i %-12i' % ("Size Mismatch", path, xattr_dict,
++            print('%-24s %-60s %-12s %-12s' % ("Size Mismatch", path, xattr_dict,
+                    stbuf.st_size))
+ 
+ def size_differs_lot(s1, s2):
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0421-Improve-logging-in-EC-client-and-lock-translator.patch b/SOURCES/0421-Improve-logging-in-EC-client-and-lock-translator.patch
new file mode 100644
index 0000000..06f0304
--- /dev/null
+++ b/SOURCES/0421-Improve-logging-in-EC-client-and-lock-translator.patch
@@ -0,0 +1,93 @@
+From 8267e5e97327633bf21fd02df8d52e3a97f0f9ea Mon Sep 17 00:00:00 2001
+From: Ashish Pandey <aspandey@redhat.com>
+Date: Wed, 4 Dec 2019 17:06:18 +0530
+Subject: [PATCH 421/449] Improve logging in EC, client and lock translator
+
+BUG: 1787294
+> Upstream patch: https://review.gluster.org/#/c/glusterfs/+/23814/
+> Change-Id: I98af8672a25ff9fd9dba91a2e1384719f9155255
+> Fixes: bz#1779760
+
+Change-Id: I5cb04993f12d6248f2349a0c5a9e2c0ceecaf528
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202533
+Tested-by: Ashish Pandey <aspandey@redhat.com>
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/cluster/ec/src/ec-combine.c  |  5 +++--
+ xlators/cluster/ec/src/ec-common.c   |  2 +-
+ xlators/features/locks/src/inodelk.c | 12 ++++++++----
+ 3 files changed, 12 insertions(+), 7 deletions(-)
+
+diff --git a/xlators/cluster/ec/src/ec-combine.c b/xlators/cluster/ec/src/ec-combine.c
+index c5af2ab..99e5534 100644
+--- a/xlators/cluster/ec/src/ec-combine.c
++++ b/xlators/cluster/ec/src/ec-combine.c
+@@ -179,13 +179,14 @@ ec_iatt_combine(ec_fop_data_t *fop, struct iatt *dst, struct iatt *src,
+                    "links: %u-%u, uid: %u-%u, gid: %u-%u, "
+                    "rdev: %" PRIu64 "-%" PRIu64 ", size: %" PRIu64 "-%" PRIu64
+                    ", "
+-                   "mode: %o-%o)",
++                   "mode: %o-%o), %s",
+                    dst[i].ia_ino, src[i].ia_ino, dst[i].ia_nlink,
+                    src[i].ia_nlink, dst[i].ia_uid, src[i].ia_uid, dst[i].ia_gid,
+                    src[i].ia_gid, dst[i].ia_rdev, src[i].ia_rdev,
+                    dst[i].ia_size, src[i].ia_size,
+                    st_mode_from_ia(dst[i].ia_prot, dst[i].ia_type),
+-                   st_mode_from_ia(src[i].ia_prot, dst[i].ia_type));
++                   st_mode_from_ia(src[i].ia_prot, dst[i].ia_type),
++                   ec_msg_str(fop));
+ 
+             return 0;
+         }
+diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c
+index 5cae37b..e580bfb 100644
+--- a/xlators/cluster/ec/src/ec-common.c
++++ b/xlators/cluster/ec/src/ec-common.c
+@@ -2240,7 +2240,7 @@ ec_unlocked(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ 
+     if (op_ret < 0) {
+         gf_msg(this->name, GF_LOG_WARNING, op_errno, EC_MSG_UNLOCK_FAILED,
+-               "entry/inode unlocking failed (%s)", ec_fop_name(link->fop->id));
++               "entry/inode unlocking failed :(%s)", ec_msg_str(link->fop));
+     } else {
+         ec_trace("UNLOCKED", link->fop, "lock=%p", link->lock);
+     }
+diff --git a/xlators/features/locks/src/inodelk.c b/xlators/features/locks/src/inodelk.c
+index df00ede..a9c42f1 100644
+--- a/xlators/features/locks/src/inodelk.c
++++ b/xlators/features/locks/src/inodelk.c
+@@ -502,22 +502,26 @@ static pl_inode_lock_t *
+ __inode_unlock_lock(xlator_t *this, pl_inode_lock_t *lock, pl_dom_list_t *dom)
+ {
+     pl_inode_lock_t *conf = NULL;
++    inode_t *inode = NULL;
++
++    inode = lock->pl_inode->inode;
+ 
+     conf = find_matching_inodelk(lock, dom);
+     if (!conf) {
+         gf_log(this->name, GF_LOG_ERROR,
+                " Matching lock not found for unlock %llu-%llu, by %s "
+-               "on %p",
++               "on %p for gfid:%s",
+                (unsigned long long)lock->fl_start,
+                (unsigned long long)lock->fl_end, lkowner_utoa(&lock->owner),
+-               lock->client);
++               lock->client, inode ? uuid_utoa(inode->gfid) : "UNKNOWN");
+         goto out;
+     }
+     __delete_inode_lock(conf);
+     gf_log(this->name, GF_LOG_DEBUG,
+-           " Matching lock found for unlock %llu-%llu, by %s on %p",
++           " Matching lock found for unlock %llu-%llu, by %s on %p for gfid:%s",
+            (unsigned long long)lock->fl_start, (unsigned long long)lock->fl_end,
+-           lkowner_utoa(&lock->owner), lock->client);
++           lkowner_utoa(&lock->owner), lock->client,
++           inode ? uuid_utoa(inode->gfid) : "UNKNOWN");
+ 
+ out:
+     return conf;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0422-cluster-afr-Prioritize-ENOSPC-over-other-errors.patch b/SOURCES/0422-cluster-afr-Prioritize-ENOSPC-over-other-errors.patch
new file mode 100644
index 0000000..400ba67
--- /dev/null
+++ b/SOURCES/0422-cluster-afr-Prioritize-ENOSPC-over-other-errors.patch
@@ -0,0 +1,236 @@
+From 8b11ac1575ef167af2a47a96f7b7ed0f32bb5897 Mon Sep 17 00:00:00 2001
+From: karthik-us <ksubrahm@redhat.com>
+Date: Fri, 5 Jun 2020 17:20:04 +0530
+Subject: [PATCH 422/449] cluster/afr: Prioritize ENOSPC over other errors
+
+Backport of: https://review.gluster.org/#/c/glusterfs/+/24477/
+
+Problem:
+In a replicate/arbiter volume if file creations or writes fails on
+quorum number of bricks and on one brick it is due to ENOSPC and
+on other brick it fails for a different reason, it may fail with
+errors other than ENOSPC in some cases.
+
+Fix:
+Prioritize ENOSPC over other lesser priority errors and do not set
+op_errno in posix_gfid_set if op_ret is 0 to avoid receiving any
+error_no which can be misinterpreted by __afr_dir_write_finalize().
+
+Also removing the function afr_has_arbiter_fop_cbk_quorum() which
+might consider a successful reply form a single brick as quorum
+success in some cases, whereas we always need fop to be successful
+on quorum number of bricks in arbiter configuration.
+
+Change-Id: I4dd2bff17e6812bc7c8372130976e365e2407d88
+Signed-off-by: karthik-us <ksubrahm@redhat.com>
+BUG: 1837467
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202526
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ .../bugs/replicate/issue-1254-prioritize-enospc.t  | 80 ++++++++++++++++++++++
+ xlators/cluster/afr/src/afr-common.c               |  4 +-
+ xlators/cluster/afr/src/afr-transaction.c          | 48 +------------
+ xlators/storage/posix/src/posix-helpers.c          |  2 +-
+ 4 files changed, 86 insertions(+), 48 deletions(-)
+ create mode 100644 tests/bugs/replicate/issue-1254-prioritize-enospc.t
+
+diff --git a/tests/bugs/replicate/issue-1254-prioritize-enospc.t b/tests/bugs/replicate/issue-1254-prioritize-enospc.t
+new file mode 100644
+index 0000000..fab94b7
+--- /dev/null
++++ b/tests/bugs/replicate/issue-1254-prioritize-enospc.t
+@@ -0,0 +1,80 @@
++#!/bin/bash
++
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++
++cleanup
++
++function create_bricks {
++    TEST truncate -s 100M $B0/brick0
++    TEST truncate -s 100M $B0/brick1
++    TEST truncate -s 20M $B0/brick2
++    LO1=`SETUP_LOOP $B0/brick0`
++    TEST [ $? -eq 0 ]
++    TEST MKFS_LOOP $LO1
++    LO2=`SETUP_LOOP $B0/brick1`
++    TEST [ $? -eq 0 ]
++    TEST MKFS_LOOP $LO2
++    LO3=`SETUP_LOOP $B0/brick2`
++    TEST [ $? -eq 0 ]
++    TEST MKFS_LOOP $LO3
++    TEST mkdir -p $B0/${V0}0 $B0/${V0}1 $B0/${V0}2
++    TEST MOUNT_LOOP $LO1 $B0/${V0}0
++    TEST MOUNT_LOOP $LO2 $B0/${V0}1
++    TEST MOUNT_LOOP $LO3 $B0/${V0}2
++}
++
++function create_files {
++        local i=1
++        while (true)
++        do
++                touch $M0/file$i
++                if [ -e $B0/${V0}2/file$i ];
++                then
++                        ((i++))
++                else
++                        break
++                fi
++        done
++}
++
++TESTS_EXPECTED_IN_LOOP=13
++
++#Arbiter volume: Check for ENOSPC when arbiter brick becomes full#
++TEST glusterd
++create_bricks
++TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2}
++TEST $CLI volume start $V0
++TEST $CLI volume set $V0 performance.write-behind off
++TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
++
++create_files
++TEST kill_brick $V0 $H0 $B0/${V0}1
++error1=$(touch $M0/file-1 2>&1)
++EXPECT "No space left on device" echo $error1
++error2=$(mkdir $M0/dir-1 2>&1)
++EXPECT "No space left on device" echo $error2
++error3=$((echo "Test" > $M0/file-3) 2>&1)
++EXPECT "No space left on device" echo $error3
++
++cleanup
++
++#Replica-3 volume: Check for ENOSPC when one of the brick becomes full#
++#Keeping the third brick of lower size to simulate disk full scenario#
++TEST glusterd
++create_bricks
++TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
++TEST $CLI volume start $V0
++TEST $CLI volume set $V0 performance.write-behind off
++TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
++
++create_files
++TEST kill_brick $V0 $H0 $B0/${V0}1
++error1=$(touch $M0/file-1 2>&1)
++EXPECT "No space left on device" echo $error1
++error2=$(mkdir $M0/dir-1 2>&1)
++EXPECT "No space left on device" echo $error2
++error3=$((cat /dev/zero > $M0/file1) 2>&1)
++EXPECT "No space left on device" echo $error3
++
++cleanup
+diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
+index 5806556..59710aa 100644
+--- a/xlators/cluster/afr/src/afr-common.c
++++ b/xlators/cluster/afr/src/afr-common.c
+@@ -2464,7 +2464,7 @@ error:
+  * others in that they must be given higher priority while
+  * returning to the user.
+  *
+- * The hierarchy is ENODATA > ENOENT > ESTALE > others
++ * The hierarchy is ENODATA > ENOENT > ESTALE > ENOSPC others
+  */
+ 
+ int
+@@ -2476,6 +2476,8 @@ afr_higher_errno(int32_t old_errno, int32_t new_errno)
+         return ENOENT;
+     if (old_errno == ESTALE || new_errno == ESTALE)
+         return ESTALE;
++    if (old_errno == ENOSPC || new_errno == ENOSPC)
++        return ENOSPC;
+ 
+     return new_errno;
+ }
+diff --git a/xlators/cluster/afr/src/afr-transaction.c b/xlators/cluster/afr/src/afr-transaction.c
+index 15f3a7e..8e65ae2 100644
+--- a/xlators/cluster/afr/src/afr-transaction.c
++++ b/xlators/cluster/afr/src/afr-transaction.c
+@@ -514,42 +514,6 @@ afr_compute_pre_op_sources(call_frame_t *frame, xlator_t *this)
+                 local->transaction.pre_op_sources[j] = 0;
+ }
+ 
+-gf_boolean_t
+-afr_has_arbiter_fop_cbk_quorum(call_frame_t *frame)
+-{
+-    afr_local_t *local = NULL;
+-    afr_private_t *priv = NULL;
+-    xlator_t *this = NULL;
+-    gf_boolean_t fop_failed = _gf_false;
+-    unsigned char *pre_op_sources = NULL;
+-    int i = 0;
+-
+-    local = frame->local;
+-    this = frame->this;
+-    priv = this->private;
+-    pre_op_sources = local->transaction.pre_op_sources;
+-
+-    /* If the fop failed on the brick, it is not a source. */
+-    for (i = 0; i < priv->child_count; i++)
+-        if (local->transaction.failed_subvols[i])
+-            pre_op_sources[i] = 0;
+-
+-    switch (AFR_COUNT(pre_op_sources, priv->child_count)) {
+-        case 1:
+-            if (pre_op_sources[ARBITER_BRICK_INDEX])
+-                fop_failed = _gf_true;
+-            break;
+-        case 0:
+-            fop_failed = _gf_true;
+-            break;
+-    }
+-
+-    if (fop_failed)
+-        return _gf_false;
+-
+-    return _gf_true;
+-}
+-
+ void
+ afr_txn_arbitrate_fop(call_frame_t *frame, xlator_t *this)
+ {
+@@ -968,12 +932,8 @@ afr_need_dirty_marking(call_frame_t *frame, xlator_t *this)
+         priv->child_count)
+         return _gf_false;
+ 
+-    if (priv->arbiter_count) {
+-        if (!afr_has_arbiter_fop_cbk_quorum(frame))
+-            need_dirty = _gf_true;
+-    } else if (!afr_has_fop_cbk_quorum(frame)) {
++    if (!afr_has_fop_cbk_quorum(frame))
+         need_dirty = _gf_true;
+-    }
+ 
+     return need_dirty;
+ }
+@@ -1023,12 +983,8 @@ afr_handle_quorum(call_frame_t *frame, xlator_t *this)
+      * no split-brain with the fix. The problem is eliminated completely.
+      */
+ 
+-    if (priv->arbiter_count) {
+-        if (afr_has_arbiter_fop_cbk_quorum(frame))
+-            return;
+-    } else if (afr_has_fop_cbk_quorum(frame)) {
++    if (afr_has_fop_cbk_quorum(frame))
+         return;
+-    }
+ 
+     if (afr_need_dirty_marking(frame, this))
+         goto set_response;
+diff --git a/xlators/storage/posix/src/posix-helpers.c b/xlators/storage/posix/src/posix-helpers.c
+index 2c27d22..949c799 100644
+--- a/xlators/storage/posix/src/posix-helpers.c
++++ b/xlators/storage/posix/src/posix-helpers.c
+@@ -1059,7 +1059,7 @@ verify_handle:
+         ret = posix_handle_soft(this, path, loc, uuid_curr, &stat);
+ 
+ out:
+-    if (!(*op_errno))
++    if (ret && !(*op_errno))
+         *op_errno = errno;
+     return ret;
+ }
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0423-ctime-Fix-ctime-inconsisteny-with-utimensat.patch b/SOURCES/0423-ctime-Fix-ctime-inconsisteny-with-utimensat.patch
new file mode 100644
index 0000000..6a547ea
--- /dev/null
+++ b/SOURCES/0423-ctime-Fix-ctime-inconsisteny-with-utimensat.patch
@@ -0,0 +1,128 @@
+From c140d30382306d08eaf2bc5c53e5be26d3e381e1 Mon Sep 17 00:00:00 2001
+From: Kotresh HR <khiremat@redhat.com>
+Date: Mon, 18 Nov 2019 05:24:33 -0500
+Subject: [PATCH 423/449] ctime: Fix ctime inconsisteny with utimensat
+
+Problem:
+When touch is used to create a file, the ctime is not matching
+atime and mtime which ideally should match. There is a difference
+in nano seconds.
+
+Cause:
+When touch is used modify atime or mtime to current time (UTIME_NOW),
+the current time is taken from kernel. The ctime gets updated to current
+time when atime or mtime is updated. But the current time to update
+ctime is taken from utime xlator. Hence the difference in nano seconds.
+
+Fix:
+When utimesat uses UTIME_NOW, use the current time from kernel.
+
+>fixes: bz#1773530
+>Change-Id: I9ccfa47dcd39df23396852b4216f1773c49250ce
+>Signed-off-by: Kotresh HR <khiremat@redhat.com>
+
+backport of: https://review.gluster.org/#/c/glusterfs/+/23719/
+BUG: 1761932
+Change-Id: I9ccfa47dcd39df23396852b4216f1773c49250ce
+Signed-off-by: Kotresh HR <khiremat@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202541
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ libglusterfs/src/glusterfs/xlator.h            |  2 ++
+ tests/basic/ctime/ctime-utimesat.t             | 28 ++++++++++++++++++++++++++
+ xlators/features/utime/src/utime-gen-fops-c.py | 10 +++++++++
+ xlators/mount/fuse/src/fuse-bridge.c           |  8 ++++++++
+ 4 files changed, 48 insertions(+)
+ create mode 100644 tests/basic/ctime/ctime-utimesat.t
+
+diff --git a/libglusterfs/src/glusterfs/xlator.h b/libglusterfs/src/glusterfs/xlator.h
+index da551e9..db04c4d 100644
+--- a/libglusterfs/src/glusterfs/xlator.h
++++ b/libglusterfs/src/glusterfs/xlator.h
+@@ -35,6 +35,8 @@
+ #define GF_SET_ATTR_ATIME 0x10
+ #define GF_SET_ATTR_MTIME 0x20
+ #define GF_SET_ATTR_CTIME 0x40
++#define GF_ATTR_ATIME_NOW 0x80
++#define GF_ATTR_MTIME_NOW 0x100
+ 
+ #define gf_attr_mode_set(mode) ((mode)&GF_SET_ATTR_MODE)
+ #define gf_attr_uid_set(mode) ((mode)&GF_SET_ATTR_UID)
+diff --git a/tests/basic/ctime/ctime-utimesat.t b/tests/basic/ctime/ctime-utimesat.t
+new file mode 100644
+index 0000000..540e57a
+--- /dev/null
++++ b/tests/basic/ctime/ctime-utimesat.t
+@@ -0,0 +1,28 @@
++#!/bin/bash
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++. $(dirname $0)/../../afr.rc
++cleanup;
++
++TEST glusterd
++TEST pidof glusterd
++TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
++TEST $CLI volume set $V0 performance.stat-prefetch off
++TEST $CLI volume set $V0 performance.read-ahead off
++TEST $CLI volume set $V0 performance.quick-read off
++TEST $CLI volume set $V0 performance.read-after-open off
++TEST $CLI volume set $V0 performance.open-behind off
++TEST $CLI volume set $V0 performance.write-behind off
++TEST $CLI volume set $V0 performance.io-cache off
++
++TEST $CLI volume start $V0
++
++TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
++
++touch $M0/FILE
++
++atime=$(stat -c "%.X" $M0/FILE)
++EXPECT $atime stat -c "%.Y" $M0/FILE
++EXPECT $atime stat -c "%.Z" $M0/FILE
++
++cleanup
+diff --git a/xlators/features/utime/src/utime-gen-fops-c.py b/xlators/features/utime/src/utime-gen-fops-c.py
+index 8730a51..9fb3e1b 100755
+--- a/xlators/features/utime/src/utime-gen-fops-c.py
++++ b/xlators/features/utime/src/utime-gen-fops-c.py
+@@ -95,6 +95,16 @@ gf_utime_@NAME@ (call_frame_t *frame, xlator_t *this,
+                 frame->root->flags |= MDATA_CTIME;
+         }
+ 
++        if (valid & (GF_SET_ATTR_ATIME | GF_SET_ATTR_MTIME)) {
++            if (valid & GF_ATTR_ATIME_NOW) {
++                frame->root->ctime.tv_sec = stbuf->ia_atime;
++                frame->root->ctime.tv_nsec = stbuf->ia_atime_nsec;
++            } else if (valid & GF_ATTR_MTIME_NOW) {
++                frame->root->ctime.tv_sec = stbuf->ia_mtime;
++                frame->root->ctime.tv_nsec = stbuf->ia_mtime_nsec;
++            }
++        }
++
+         STACK_WIND (frame, gf_utime_@NAME@_cbk, FIRST_CHILD(this),
+                     FIRST_CHILD(this)->fops->@NAME@, @SHORT_ARGS@);
+         return 0;
+diff --git a/xlators/mount/fuse/src/fuse-bridge.c b/xlators/mount/fuse/src/fuse-bridge.c
+index 6e99053..fdeec49 100644
+--- a/xlators/mount/fuse/src/fuse-bridge.c
++++ b/xlators/mount/fuse/src/fuse-bridge.c
+@@ -1706,6 +1706,14 @@ fattr_to_gf_set_attr(int32_t valid)
+         gf_valid |= GF_SET_ATTR_CTIME;
+ #endif
+ 
++#if FUSE_KERNEL_MINOR_VERSION >= 9
++    if (valid & FATTR_ATIME_NOW)
++        gf_valid |= GF_ATTR_ATIME_NOW;
++
++    if (valid & FATTR_MTIME_NOW)
++        gf_valid |= GF_ATTR_MTIME_NOW;
++#endif
++
+     if (valid & FATTR_SIZE)
+         gf_valid |= GF_SET_ATTR_SIZE;
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0424-afr-make-heal-info-lockless.patch b/SOURCES/0424-afr-make-heal-info-lockless.patch
new file mode 100644
index 0000000..593fa34
--- /dev/null
+++ b/SOURCES/0424-afr-make-heal-info-lockless.patch
@@ -0,0 +1,884 @@
+From 54d4ea44fec96560aad9c41f7e4f5aad164ffb8b Mon Sep 17 00:00:00 2001
+From: Ravishankar N <ravishankar@redhat.com>
+Date: Fri, 5 Jun 2020 14:14:15 +0530
+Subject: [PATCH 424/449] afr: make heal info lockless
+
+Changes in locks xlator:
+Added support for per-domain inodelk count requests.
+Caller needs to set GLUSTERFS_MULTIPLE_DOM_LK_CNT_REQUESTS key in the
+dict and then set each key with name
+'GLUSTERFS_INODELK_DOM_PREFIX:<domain name>'.
+In the response dict, the xlator will send the per domain count as
+values for each of these keys.
+
+Changes in AFR:
+Replaced afr_selfheal_locked_inspect() with afr_lockless_inspect(). Logic has
+been added to make the latter behave same as the former, thus not
+breaking the current heal info output behaviour.
+
+> Upstream patch: https://review.gluster.org/#/c/glusterfs/+/23771/
+> fixes: bz#1774011
+> Change-Id: Ie9e83c162aa77f44a39c2ba7115de558120ada4d
+
+BUG: 1721355
+Change-Id: I8ed4b504880b19e00068312efd90cd0706787404
+Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202490
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Karthik Subrahmanya <ksubrahm@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ heal/src/glfs-heal.c                           |  17 +-
+ libglusterfs/src/glusterfs/glusterfs.h         |   2 +
+ xlators/cluster/afr/src/afr-common.c           | 367 +++++++++++--------------
+ xlators/cluster/afr/src/afr-self-heal-common.c |  43 ++-
+ xlators/cluster/afr/src/afr-self-heal.h        |   3 +-
+ xlators/features/locks/src/common.h            |   4 +
+ xlators/features/locks/src/locks.h             |   8 +
+ xlators/features/locks/src/posix.c             | 117 +++++++-
+ 8 files changed, 338 insertions(+), 223 deletions(-)
+
+diff --git a/heal/src/glfs-heal.c b/heal/src/glfs-heal.c
+index 125b12c..5af9e31 100644
+--- a/heal/src/glfs-heal.c
++++ b/heal/src/glfs-heal.c
+@@ -775,7 +775,8 @@ static int
+ glfsh_process_entries(xlator_t *xl, fd_t *fd, gf_dirent_t *entries,
+                       uint64_t *offset, num_entries_t *num_entries,
+                       print_status glfsh_print_status,
+-                      gf_boolean_t ignore_dirty, glfsh_fail_mode_t mode)
++                      gf_boolean_t ignore_dirty, glfsh_fail_mode_t mode,
++                      dict_t *xattr_req)
+ {
+     gf_dirent_t *entry = NULL;
+     gf_dirent_t *tmp = NULL;
+@@ -807,7 +808,7 @@ glfsh_process_entries(xlator_t *xl, fd_t *fd, gf_dirent_t *entries,
+ 
+         gf_uuid_parse(entry->d_name, gfid);
+         gf_uuid_copy(loc.gfid, gfid);
+-        ret = syncop_getxattr(this, &loc, &dict, GF_HEAL_INFO, NULL, NULL);
++        ret = syncop_getxattr(this, &loc, &dict, GF_HEAL_INFO, xattr_req, NULL);
+         if (ret) {
+             if ((mode != GLFSH_MODE_CONTINUE_ON_ERROR) && (ret == -ENOTCONN))
+                 goto out;
+@@ -876,19 +877,19 @@ glfsh_crawl_directory(glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,
+         if (heal_op == GF_SHD_OP_INDEX_SUMMARY) {
+             ret = glfsh_process_entries(readdir_xl, fd, &entries, &offset,
+                                         num_entries, glfsh_print_heal_status,
+-                                        ignore, mode);
++                                        ignore, mode, xattr_req);
+             if (ret < 0)
+                 goto out;
+         } else if (heal_op == GF_SHD_OP_SPLIT_BRAIN_FILES) {
+             ret = glfsh_process_entries(readdir_xl, fd, &entries, &offset,
+                                         num_entries, glfsh_print_spb_status,
+-                                        ignore, mode);
++                                        ignore, mode, xattr_req);
+             if (ret < 0)
+                 goto out;
+         } else if (heal_op == GF_SHD_OP_HEAL_SUMMARY) {
+             ret = glfsh_process_entries(readdir_xl, fd, &entries, &offset,
+                                         num_entries, glfsh_print_summary_status,
+-                                        ignore, mode);
++                                        ignore, mode, xattr_req);
+             if (ret < 0)
+                 goto out;
+         } else if (heal_op == GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK) {
+@@ -897,7 +898,7 @@ glfsh_crawl_directory(glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,
+         } else if (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) {
+             ret = glfsh_process_entries(readdir_xl, fd, &entries, &offset,
+                                         num_entries, glfsh_heal_status_boolean,
+-                                        ignore, mode);
++                                        ignore, mode, xattr_req);
+             if (ret < 0)
+                 goto out;
+         }
+@@ -951,6 +952,10 @@ glfsh_print_pending_heals_type(glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,
+     int32_t op_errno = 0;
+     gf_boolean_t ignore = _gf_false;
+ 
++    ret = dict_set_str(xattr_req, "index-vgfid", vgfid);
++    if (ret)
++        return ret;
++
+     if (!strcmp(vgfid, GF_XATTROP_DIRTY_GFID))
+         ignore = _gf_true;
+ 
+diff --git a/libglusterfs/src/glusterfs/glusterfs.h b/libglusterfs/src/glusterfs/glusterfs.h
+index 3b594c0..177a020 100644
+--- a/libglusterfs/src/glusterfs/glusterfs.h
++++ b/libglusterfs/src/glusterfs/glusterfs.h
+@@ -217,6 +217,8 @@ enum gf_internal_fop_indicator {
+ #define GLUSTERFS_POSIXLK_COUNT "glusterfs.posixlk-count"
+ #define GLUSTERFS_PARENT_ENTRYLK "glusterfs.parent-entrylk"
+ #define GLUSTERFS_INODELK_DOM_COUNT "glusterfs.inodelk-dom-count"
++#define GLUSTERFS_INODELK_DOM_PREFIX "glusterfs.inodelk-dom-prefix"
++#define GLUSTERFS_MULTIPLE_DOM_LK_CNT_REQUESTS "glusterfs.multi-dom-lk-cnt-req"
+ #define GFID_TO_PATH_KEY "glusterfs.gfid2path"
+ #define GF_XATTR_STIME_PATTERN "trusted.glusterfs.*.stime"
+ #define GF_XATTR_XTIME_PATTERN "trusted.glusterfs.*.xtime"
+diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
+index 59710aa..c355ec5 100644
+--- a/xlators/cluster/afr/src/afr-common.c
++++ b/xlators/cluster/afr/src/afr-common.c
+@@ -5908,259 +5908,218 @@ out:
+     return _gf_true;
+ }
+ 
+-int
+-afr_selfheal_locked_metadata_inspect(call_frame_t *frame, xlator_t *this,
+-                                     inode_t *inode, gf_boolean_t *msh,
+-                                     unsigned char *pending)
++static dict_t *
++afr_set_heal_info(char *status)
+ {
++    dict_t *dict = NULL;
+     int ret = -1;
+-    unsigned char *locked_on = NULL;
+-    unsigned char *sources = NULL;
+-    unsigned char *sinks = NULL;
+-    unsigned char *healed_sinks = NULL;
+-    unsigned char *undid_pending = NULL;
+-    struct afr_reply *locked_replies = NULL;
+-
+-    afr_private_t *priv = this->private;
+ 
+-    locked_on = alloca0(priv->child_count);
+-    sources = alloca0(priv->child_count);
+-    sinks = alloca0(priv->child_count);
+-    healed_sinks = alloca0(priv->child_count);
+-    undid_pending = alloca0(priv->child_count);
++    dict = dict_new();
++    if (!dict) {
++        ret = -ENOMEM;
++        goto out;
++    }
+ 
+-    locked_replies = alloca0(sizeof(*locked_replies) * priv->child_count);
++    ret = dict_set_dynstr_sizen(dict, "heal-info", status);
++    if (ret)
++        gf_msg("", GF_LOG_WARNING, -ret, AFR_MSG_DICT_SET_FAILED,
++               "Failed to set heal-info key to "
++               "%s",
++               status);
++out:
++    /* Any error other than EINVAL, dict_set_dynstr frees status */
++    if (ret == -ENOMEM || ret == -EINVAL) {
++        GF_FREE(status);
++    }
+ 
+-    ret = afr_selfheal_inodelk(frame, this, inode, this->name, LLONG_MAX - 1, 0,
+-                               locked_on);
+-    {
+-        if (ret == 0) {
+-            /* Not a single lock */
+-            ret = -afr_final_errno(frame->local, priv);
+-            if (ret == 0)
+-                ret = -ENOTCONN; /* all invalid responses */
+-            goto out;
+-        }
+-        ret = __afr_selfheal_metadata_prepare(
+-            frame, this, inode, locked_on, sources, sinks, healed_sinks,
+-            undid_pending, locked_replies, pending);
+-        *msh = afr_decide_heal_info(priv, sources, ret);
++    if (ret && dict) {
++        dict_unref(dict);
++        dict = NULL;
+     }
+-    afr_selfheal_uninodelk(frame, this, inode, this->name, LLONG_MAX - 1, 0,
+-                           locked_on);
+-out:
+-    if (locked_replies)
+-        afr_replies_wipe(locked_replies, priv->child_count);
+-    return ret;
++    return dict;
+ }
+ 
+-int
+-afr_selfheal_locked_data_inspect(call_frame_t *frame, xlator_t *this, fd_t *fd,
+-                                 gf_boolean_t *dsh, unsigned char *pflag)
++static gf_boolean_t
++afr_is_dirty_count_non_unary_for_txn(xlator_t *this, struct afr_reply *replies,
++                                     afr_transaction_type type)
+ {
+-    int ret = -1;
+-    unsigned char *data_lock = NULL;
+-    unsigned char *sources = NULL;
+-    unsigned char *sinks = NULL;
+-    unsigned char *healed_sinks = NULL;
+-    unsigned char *undid_pending = NULL;
+-    afr_private_t *priv = NULL;
+-    struct afr_reply *locked_replies = NULL;
+-    inode_t *inode = fd->inode;
++    afr_private_t *priv = this->private;
++    int *dirty = alloca0(priv->child_count * sizeof(int));
++    int i = 0;
+ 
+-    priv = this->private;
+-    data_lock = alloca0(priv->child_count);
+-    sources = alloca0(priv->child_count);
+-    sinks = alloca0(priv->child_count);
+-    healed_sinks = alloca0(priv->child_count);
+-    undid_pending = alloca0(priv->child_count);
++    afr_selfheal_extract_xattr(this, replies, type, dirty, NULL);
++    for (i = 0; i < priv->child_count; i++) {
++        if (dirty[i] > 1)
++            return _gf_true;
++    }
+ 
+-    locked_replies = alloca0(sizeof(*locked_replies) * priv->child_count);
++    return _gf_false;
++}
+ 
+-    ret = afr_selfheal_inodelk(frame, this, inode, this->name, 0, 0, data_lock);
+-    {
+-        if (ret == 0) {
+-            ret = -afr_final_errno(frame->local, priv);
+-            if (ret == 0)
+-                ret = -ENOTCONN; /* all invalid responses */
+-            goto out;
+-        }
+-        ret = __afr_selfheal_data_prepare(frame, this, inode, data_lock,
+-                                          sources, sinks, healed_sinks,
+-                                          undid_pending, locked_replies, pflag);
+-        *dsh = afr_decide_heal_info(priv, sources, ret);
++static gf_boolean_t
++afr_is_dirty_count_non_unary(xlator_t *this, struct afr_reply *replies,
++                             ia_type_t ia_type)
++{
++    gf_boolean_t data_chk = _gf_false;
++    gf_boolean_t mdata_chk = _gf_false;
++    gf_boolean_t entry_chk = _gf_false;
++
++    switch (ia_type) {
++        case IA_IFDIR:
++            mdata_chk = _gf_true;
++            entry_chk = _gf_true;
++            break;
++        case IA_IFREG:
++            mdata_chk = _gf_true;
++            data_chk = _gf_true;
++            break;
++        default:
++            /*IA_IFBLK, IA_IFCHR, IA_IFLNK, IA_IFIFO, IA_IFSOCK*/
++            mdata_chk = _gf_true;
++            break;
+     }
+-    afr_selfheal_uninodelk(frame, this, inode, this->name, 0, 0, data_lock);
+-out:
+-    if (locked_replies)
+-        afr_replies_wipe(locked_replies, priv->child_count);
+-    return ret;
++
++    if (data_chk && afr_is_dirty_count_non_unary_for_txn(
++                        this, replies, AFR_DATA_TRANSACTION)) {
++        return _gf_true;
++    } else if (mdata_chk && afr_is_dirty_count_non_unary_for_txn(
++                                this, replies, AFR_METADATA_TRANSACTION)) {
++        return _gf_true;
++    } else if (entry_chk && afr_is_dirty_count_non_unary_for_txn(
++                                this, replies, AFR_ENTRY_TRANSACTION)) {
++        return _gf_true;
++    }
++
++    return _gf_false;
+ }
+ 
+-int
+-afr_selfheal_locked_entry_inspect(call_frame_t *frame, xlator_t *this,
+-                                  inode_t *inode, gf_boolean_t *esh,
+-                                  unsigned char *pflag)
++static int
++afr_update_heal_status(xlator_t *this, struct afr_reply *replies,
++                       char *index_vgfid, ia_type_t ia_type, gf_boolean_t *esh,
++                       gf_boolean_t *dsh, gf_boolean_t *msh)
+ {
+     int ret = -1;
+-    int source = -1;
++    GF_UNUSED int ret1 = 0;
++    int i = 0;
++    int io_domain_lk_count = 0;
++    int shd_domain_lk_count = 0;
+     afr_private_t *priv = NULL;
+-    unsigned char *locked_on = NULL;
+-    unsigned char *data_lock = NULL;
+-    unsigned char *sources = NULL;
+-    unsigned char *sinks = NULL;
+-    unsigned char *healed_sinks = NULL;
+-    struct afr_reply *locked_replies = NULL;
+-    gf_boolean_t granular_locks = _gf_false;
++    char *key1 = NULL;
++    char *key2 = NULL;
+ 
+     priv = this->private;
+-    granular_locks = priv->granular_locks; /*Assign to local variable so that
+-                                             reconfigure doesn't change this
+-                                             value between locking and unlocking
+-                                             below*/
+-    locked_on = alloca0(priv->child_count);
+-    data_lock = alloca0(priv->child_count);
+-    sources = alloca0(priv->child_count);
+-    sinks = alloca0(priv->child_count);
+-    healed_sinks = alloca0(priv->child_count);
+-
+-    locked_replies = alloca0(sizeof(*locked_replies) * priv->child_count);
++    key1 = alloca0(strlen(GLUSTERFS_INODELK_DOM_PREFIX) + 2 +
++                   strlen(this->name));
++    key2 = alloca0(strlen(GLUSTERFS_INODELK_DOM_PREFIX) + 2 +
++                   strlen(priv->sh_domain));
++    sprintf(key1, "%s:%s", GLUSTERFS_INODELK_DOM_PREFIX, this->name);
++    sprintf(key2, "%s:%s", GLUSTERFS_INODELK_DOM_PREFIX, priv->sh_domain);
+ 
+-    if (!granular_locks) {
+-        ret = afr_selfheal_tryentrylk(frame, this, inode, priv->sh_domain, NULL,
+-                                      locked_on);
+-    }
+-    {
+-        if (!granular_locks && ret == 0) {
+-            ret = -afr_final_errno(frame->local, priv);
+-            if (ret == 0)
+-                ret = -ENOTCONN; /* all invalid responses */
+-            goto out;
++    for (i = 0; i < priv->child_count; i++) {
++        if ((replies[i].valid != 1) || (replies[i].op_ret != 0))
++            continue;
++        if (!io_domain_lk_count) {
++            ret1 = dict_get_int32(replies[i].xdata, key1, &io_domain_lk_count);
+         }
++        if (!shd_domain_lk_count) {
++            ret1 = dict_get_int32(replies[i].xdata, key2, &shd_domain_lk_count);
++        }
++    }
+ 
+-        ret = afr_selfheal_entrylk(frame, this, inode, this->name, NULL,
+-                                   data_lock);
+-        {
+-            if (ret == 0) {
+-                ret = -afr_final_errno(frame->local, priv);
+-                if (ret == 0)
+-                    ret = -ENOTCONN;
+-                /* all invalid responses */
+-                goto unlock;
+-            }
+-            ret = __afr_selfheal_entry_prepare(frame, this, inode, data_lock,
+-                                               sources, sinks, healed_sinks,
+-                                               locked_replies, &source, pflag);
+-            if ((ret == 0) && (*pflag & PFLAG_SBRAIN))
+-                ret = -EIO;
+-            *esh = afr_decide_heal_info(priv, sources, ret);
++    if (!strcmp(index_vgfid, GF_XATTROP_INDEX_GFID)) {
++        if (shd_domain_lk_count) {
++            ret = -EAGAIN; /*For 'possibly-healing'. */
++        } else {
++            ret = 0; /*needs heal. Just set a non -ve value so that it is
++                       assumed as the source index.*/
++        }
++    } else if (!strcmp(index_vgfid, GF_XATTROP_DIRTY_GFID)) {
++        if ((afr_is_dirty_count_non_unary(this, replies, ia_type)) ||
++            (!io_domain_lk_count)) {
++            /* Needs heal. */
++            ret = 0;
++        } else {
++            /* No heal needed. */
++            *dsh = *esh = *msh = 0;
+         }
+-        afr_selfheal_unentrylk(frame, this, inode, this->name, NULL, data_lock,
+-                               NULL);
+     }
+-unlock:
+-    if (!granular_locks)
+-        afr_selfheal_unentrylk(frame, this, inode, priv->sh_domain, NULL,
+-                               locked_on, NULL);
+-out:
+-    if (locked_replies)
+-        afr_replies_wipe(locked_replies, priv->child_count);
+     return ret;
+ }
+ 
++/*return EIO, EAGAIN or pending*/
+ int
+-afr_selfheal_locked_inspect(call_frame_t *frame, xlator_t *this, uuid_t gfid,
+-                            inode_t **inode, gf_boolean_t *entry_selfheal,
+-                            gf_boolean_t *data_selfheal,
+-                            gf_boolean_t *metadata_selfheal,
+-                            unsigned char *pending)
+-
++afr_lockless_inspect(call_frame_t *frame, xlator_t *this, uuid_t gfid,
++                     inode_t **inode, char *index_vgfid,
++                     gf_boolean_t *entry_selfheal, gf_boolean_t *data_selfheal,
++                     gf_boolean_t *metadata_selfheal, unsigned char *pending)
+ {
+     int ret = -1;
+-    fd_t *fd = NULL;
++    int i = 0;
++    afr_private_t *priv = NULL;
++    struct afr_reply *replies = NULL;
+     gf_boolean_t dsh = _gf_false;
+     gf_boolean_t msh = _gf_false;
+     gf_boolean_t esh = _gf_false;
++    unsigned char *sources = NULL;
++    unsigned char *sinks = NULL;
++    unsigned char *valid_on = NULL;
++    uint64_t *witness = NULL;
++
++    priv = this->private;
++    replies = alloca0(sizeof(*replies) * priv->child_count);
++    sources = alloca0(sizeof(*sources) * priv->child_count);
++    sinks = alloca0(sizeof(*sinks) * priv->child_count);
++    witness = alloca0(sizeof(*witness) * priv->child_count);
++    valid_on = alloca0(sizeof(*valid_on) * priv->child_count);
+ 
+     ret = afr_selfheal_unlocked_inspect(frame, this, gfid, inode, &dsh, &msh,
+-                                        &esh);
++                                        &esh, replies);
+     if (ret)
+         goto out;
+-
+-    /* For every heal type hold locks and check if it indeed needs heal */
+-
+-    /* Heal-info does an open() on the file being examined so that the
+-     * current eager-lock holding client, if present, at some point sees
+-     * open-fd count being > 1 and releases the eager-lock so that heal-info
+-     * doesn't remain blocked forever until IO completes.
+-     */
+-    if ((*inode)->ia_type == IA_IFREG) {
+-        ret = afr_selfheal_data_open(this, *inode, &fd);
+-        if (ret < 0) {
+-            gf_msg_debug(this->name, -ret, "%s: Failed to open",
+-                         uuid_utoa((*inode)->gfid));
+-            goto out;
++    for (i = 0; i < priv->child_count; i++) {
++        if (replies[i].valid && replies[i].op_ret == 0) {
++            valid_on[i] = 1;
+         }
+     }
+-
+     if (msh) {
+-        ret = afr_selfheal_locked_metadata_inspect(frame, this, *inode, &msh,
+-                                                   pending);
+-        if (ret == -EIO)
++        ret = afr_selfheal_find_direction(frame, this, replies,
++                                          AFR_METADATA_TRANSACTION, valid_on,
++                                          sources, sinks, witness, pending);
++        if (*pending & PFLAG_SBRAIN)
++            ret = -EIO;
++        if (ret)
+             goto out;
+     }
+-
+     if (dsh) {
+-        ret = afr_selfheal_locked_data_inspect(frame, this, fd, &dsh, pending);
+-        if (ret == -EIO || (ret == -EAGAIN))
++        ret = afr_selfheal_find_direction(frame, this, replies,
++                                          AFR_DATA_TRANSACTION, valid_on,
++                                          sources, sinks, witness, pending);
++        if (*pending & PFLAG_SBRAIN)
++            ret = -EIO;
++        if (ret)
+             goto out;
+     }
+-
+     if (esh) {
+-        ret = afr_selfheal_locked_entry_inspect(frame, this, *inode, &esh,
+-                                                pending);
++        ret = afr_selfheal_find_direction(frame, this, replies,
++                                          AFR_ENTRY_TRANSACTION, valid_on,
++                                          sources, sinks, witness, pending);
++        if (*pending & PFLAG_SBRAIN)
++            ret = -EIO;
++        if (ret)
++            goto out;
+     }
+ 
++    ret = afr_update_heal_status(this, replies, index_vgfid, (*inode)->ia_type,
++                                 &esh, &dsh, &msh);
+ out:
+     *data_selfheal = dsh;
+     *entry_selfheal = esh;
+     *metadata_selfheal = msh;
+-    if (fd)
+-        fd_unref(fd);
++    if (replies)
++        afr_replies_wipe(replies, priv->child_count);
+     return ret;
+ }
+ 
+-static dict_t *
+-afr_set_heal_info(char *status)
+-{
+-    dict_t *dict = NULL;
+-    int ret = -1;
+-
+-    dict = dict_new();
+-    if (!dict) {
+-        ret = -ENOMEM;
+-        goto out;
+-    }
+-
+-    ret = dict_set_dynstr_sizen(dict, "heal-info", status);
+-    if (ret)
+-        gf_msg("", GF_LOG_WARNING, -ret, AFR_MSG_DICT_SET_FAILED,
+-               "Failed to set heal-info key to "
+-               "%s",
+-               status);
+-out:
+-    /* Any error other than EINVAL, dict_set_dynstr frees status */
+-    if (ret == -ENOMEM || ret == -EINVAL) {
+-        GF_FREE(status);
+-    }
+-
+-    if (ret && dict) {
+-        dict_unref(dict);
+-        dict = NULL;
+-    }
+-    return dict;
+-}
+-
+ int
+ afr_get_heal_info(call_frame_t *frame, xlator_t *this, loc_t *loc)
+ {
+@@ -6174,10 +6133,18 @@ afr_get_heal_info(call_frame_t *frame, xlator_t *this, loc_t *loc)
+     inode_t *inode = NULL;
+     char *substr = NULL;
+     char *status = NULL;
++    afr_local_t *local = NULL;
++    char *index_vgfid = NULL;
++
++    local = frame->local;
++    if (dict_get_str(local->xdata_req, "index-vgfid", &index_vgfid)) {
++        ret = -1;
++        goto out;
++    }
+ 
+-    ret = afr_selfheal_locked_inspect(frame, this, loc->gfid, &inode,
+-                                      &entry_selfheal, &data_selfheal,
+-                                      &metadata_selfheal, &pending);
++    ret = afr_lockless_inspect(frame, this, loc->gfid, &inode, index_vgfid,
++                               &entry_selfheal, &data_selfheal,
++                               &metadata_selfheal, &pending);
+ 
+     if (ret == -ENOMEM) {
+         ret = -1;
+diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c
+index d942ccf..1608f75 100644
+--- a/xlators/cluster/afr/src/afr-self-heal-common.c
++++ b/xlators/cluster/afr/src/afr-self-heal-common.c
+@@ -1827,6 +1827,37 @@ afr_selfheal_unlocked_lookup_on(call_frame_t *frame, inode_t *parent,
+     return inode;
+ }
+ 
++static int
++afr_set_multi_dom_lock_count_request(xlator_t *this, dict_t *dict)
++{
++    int ret = 0;
++    afr_private_t *priv = NULL;
++    char *key1 = NULL;
++    char *key2 = NULL;
++
++    priv = this->private;
++    key1 = alloca0(strlen(GLUSTERFS_INODELK_DOM_PREFIX) + 2 +
++                   strlen(this->name));
++    key2 = alloca0(strlen(GLUSTERFS_INODELK_DOM_PREFIX) + 2 +
++                   strlen(priv->sh_domain));
++
++    ret = dict_set_uint32(dict, GLUSTERFS_MULTIPLE_DOM_LK_CNT_REQUESTS, 1);
++    if (ret)
++        return ret;
++
++    sprintf(key1, "%s:%s", GLUSTERFS_INODELK_DOM_PREFIX, this->name);
++    ret = dict_set_uint32(dict, key1, 1);
++    if (ret)
++        return ret;
++
++    sprintf(key2, "%s:%s", GLUSTERFS_INODELK_DOM_PREFIX, priv->sh_domain);
++    ret = dict_set_uint32(dict, key2, 1);
++    if (ret)
++        return ret;
++
++    return 0;
++}
++
+ int
+ afr_selfheal_unlocked_discover_on(call_frame_t *frame, inode_t *inode,
+                                   uuid_t gfid, struct afr_reply *replies,
+@@ -1851,6 +1882,11 @@ afr_selfheal_unlocked_discover_on(call_frame_t *frame, inode_t *inode,
+         return -ENOMEM;
+     }
+ 
++    if (afr_set_multi_dom_lock_count_request(frame->this, xattr_req)) {
++        dict_unref(xattr_req);
++        return -1;
++    }
++
+     loc.inode = inode_ref(inode);
+     gf_uuid_copy(loc.gfid, gfid);
+ 
+@@ -2241,7 +2277,8 @@ int
+ afr_selfheal_unlocked_inspect(call_frame_t *frame, xlator_t *this, uuid_t gfid,
+                               inode_t **link_inode, gf_boolean_t *data_selfheal,
+                               gf_boolean_t *metadata_selfheal,
+-                              gf_boolean_t *entry_selfheal)
++                              gf_boolean_t *entry_selfheal,
++                              struct afr_reply *replies_dst)
+ {
+     afr_private_t *priv = NULL;
+     inode_t *inode = NULL;
+@@ -2377,6 +2414,8 @@ afr_selfheal_unlocked_inspect(call_frame_t *frame, xlator_t *this, uuid_t gfid,
+ 
+     ret = 0;
+ out:
++    if (replies && replies_dst)
++        afr_replies_copy(replies_dst, replies, priv->child_count);
+     if (inode)
+         inode_unref(inode);
+     if (replies)
+@@ -2493,7 +2532,7 @@ afr_selfheal_do(call_frame_t *frame, xlator_t *this, uuid_t gfid)
+ 
+     ret = afr_selfheal_unlocked_inspect(frame, this, gfid, &inode,
+                                         &data_selfheal, &metadata_selfheal,
+-                                        &entry_selfheal);
++                                        &entry_selfheal, NULL);
+     if (ret)
+         goto out;
+ 
+diff --git a/xlators/cluster/afr/src/afr-self-heal.h b/xlators/cluster/afr/src/afr-self-heal.h
+index f7ecf5d..b39af02 100644
+--- a/xlators/cluster/afr/src/afr-self-heal.h
++++ b/xlators/cluster/afr/src/afr-self-heal.h
+@@ -327,7 +327,8 @@ int
+ afr_selfheal_unlocked_inspect(call_frame_t *frame, xlator_t *this, uuid_t gfid,
+                               inode_t **link_inode, gf_boolean_t *data_selfheal,
+                               gf_boolean_t *metadata_selfheal,
+-                              gf_boolean_t *entry_selfheal);
++                              gf_boolean_t *entry_selfheal,
++                              struct afr_reply *replies);
+ 
+ int
+ afr_selfheal_do(call_frame_t *frame, xlator_t *this, uuid_t gfid);
+diff --git a/xlators/features/locks/src/common.h b/xlators/features/locks/src/common.h
+index 3a74967..ea86b96 100644
+--- a/xlators/features/locks/src/common.h
++++ b/xlators/features/locks/src/common.h
+@@ -45,6 +45,10 @@
+                 fd_unref(__local->fd);                                         \
+             if (__local->inode)                                                \
+                 inode_unref(__local->inode);                                   \
++            if (__local->xdata) {                                              \
++                dict_unref(__local->xdata);                                    \
++                __local->xdata = NULL;                                         \
++            }                                                                  \
+             mem_put(__local);                                                  \
+         }                                                                      \
+     } while (0)
+diff --git a/xlators/features/locks/src/locks.h b/xlators/features/locks/src/locks.h
+index b817960..aa267de 100644
+--- a/xlators/features/locks/src/locks.h
++++ b/xlators/features/locks/src/locks.h
+@@ -239,6 +239,7 @@ typedef struct {
+     gf_boolean_t inodelk_count_req;
+     gf_boolean_t posixlk_count_req;
+     gf_boolean_t parent_entrylk_req;
++    gf_boolean_t multiple_dom_lk_requests;
+     int update_mlock_enforced_flag;
+ } pl_local_t;
+ 
+@@ -260,6 +261,13 @@ typedef struct _locks_ctx {
+     struct list_head metalk_list;
+ } pl_ctx_t;
+ 
++typedef struct _multi_dom_lk_data {
++    xlator_t *this;
++    inode_t *inode;
++    dict_t *xdata_rsp;
++    gf_boolean_t keep_max;
++} multi_dom_lk_data;
++
+ typedef enum { DECREMENT, INCREMENT } pl_count_op_t;
+ 
+ pl_ctx_t *
+diff --git a/xlators/features/locks/src/posix.c b/xlators/features/locks/src/posix.c
+index 4592240..9a14c64 100644
+--- a/xlators/features/locks/src/posix.c
++++ b/xlators/features/locks/src/posix.c
+@@ -150,13 +150,20 @@ fetch_pathinfo(xlator_t *, inode_t *, int32_t *, char **);
+ gf_boolean_t
+ pl_has_xdata_requests(dict_t *xdata)
+ {
+-    static char *reqs[] = {GLUSTERFS_ENTRYLK_COUNT,     GLUSTERFS_INODELK_COUNT,
+-                           GLUSTERFS_INODELK_DOM_COUNT, GLUSTERFS_POSIXLK_COUNT,
+-                           GLUSTERFS_PARENT_ENTRYLK,    NULL};
+-    static int reqs_size[] = {
+-        SLEN(GLUSTERFS_ENTRYLK_COUNT),     SLEN(GLUSTERFS_INODELK_COUNT),
+-        SLEN(GLUSTERFS_INODELK_DOM_COUNT), SLEN(GLUSTERFS_POSIXLK_COUNT),
+-        SLEN(GLUSTERFS_PARENT_ENTRYLK),    0};
++    static char *reqs[] = {GLUSTERFS_ENTRYLK_COUNT,
++                           GLUSTERFS_INODELK_COUNT,
++                           GLUSTERFS_INODELK_DOM_COUNT,
++                           GLUSTERFS_POSIXLK_COUNT,
++                           GLUSTERFS_PARENT_ENTRYLK,
++                           GLUSTERFS_MULTIPLE_DOM_LK_CNT_REQUESTS,
++                           NULL};
++    static int reqs_size[] = {SLEN(GLUSTERFS_ENTRYLK_COUNT),
++                              SLEN(GLUSTERFS_INODELK_COUNT),
++                              SLEN(GLUSTERFS_INODELK_DOM_COUNT),
++                              SLEN(GLUSTERFS_POSIXLK_COUNT),
++                              SLEN(GLUSTERFS_PARENT_ENTRYLK),
++                              SLEN(GLUSTERFS_MULTIPLE_DOM_LK_CNT_REQUESTS),
++                              0};
+     int i = 0;
+ 
+     if (!xdata)
+@@ -169,12 +176,22 @@ pl_has_xdata_requests(dict_t *xdata)
+     return _gf_false;
+ }
+ 
++static int
++dict_delete_domain_key(dict_t *dict, char *key, data_t *value, void *data)
++{
++    dict_del(dict, key);
++    return 0;
++}
++
+ void
+ pl_get_xdata_requests(pl_local_t *local, dict_t *xdata)
+ {
+     if (!local || !xdata)
+         return;
+ 
++    GF_ASSERT(local->xdata == NULL);
++    local->xdata = dict_copy_with_ref(xdata, NULL);
++
+     if (dict_get_sizen(xdata, GLUSTERFS_ENTRYLK_COUNT)) {
+         local->entrylk_count_req = 1;
+         dict_del_sizen(xdata, GLUSTERFS_ENTRYLK_COUNT);
+@@ -183,6 +200,12 @@ pl_get_xdata_requests(pl_local_t *local, dict_t *xdata)
+         local->inodelk_count_req = 1;
+         dict_del_sizen(xdata, GLUSTERFS_INODELK_COUNT);
+     }
++    if (dict_get_sizen(xdata, GLUSTERFS_MULTIPLE_DOM_LK_CNT_REQUESTS)) {
++        local->multiple_dom_lk_requests = 1;
++        dict_del_sizen(xdata, GLUSTERFS_MULTIPLE_DOM_LK_CNT_REQUESTS);
++        dict_foreach_fnmatch(xdata, GLUSTERFS_INODELK_DOM_PREFIX "*",
++                             dict_delete_domain_key, NULL);
++    }
+ 
+     local->inodelk_dom_count_req = dict_get_sizen(xdata,
+                                                   GLUSTERFS_INODELK_DOM_COUNT);
+@@ -210,7 +233,7 @@ pl_needs_xdata_response(pl_local_t *local)
+ 
+     if (local->parent_entrylk_req || local->entrylk_count_req ||
+         local->inodelk_dom_count_req || local->inodelk_count_req ||
+-        local->posixlk_count_req)
++        local->posixlk_count_req || local->multiple_dom_lk_requests)
+         return _gf_true;
+ 
+     return _gf_false;
+@@ -411,6 +434,75 @@ pl_posixlk_xattr_fill(xlator_t *this, inode_t *inode, dict_t *dict,
+ }
+ 
+ void
++pl_inodelk_xattr_fill_each(xlator_t *this, inode_t *inode, dict_t *dict,
++                           char *domname, gf_boolean_t keep_max, char *key)
++{
++    int32_t count = 0;
++    int32_t maxcount = -1;
++    int ret = -1;
++
++    if (keep_max) {
++        ret = dict_get_int32(dict, key, &maxcount);
++        if (ret < 0)
++            gf_msg_debug(this->name, 0, " Failed to fetch the value for key %s",
++                         GLUSTERFS_INODELK_COUNT);
++    }
++    count = get_inodelk_count(this, inode, domname);
++    if (maxcount >= count)
++        return;
++
++    ret = dict_set_int32(dict, key, count);
++    if (ret < 0) {
++        gf_msg_debug(this->name, 0,
++                     "Failed to set count for "
++                     "key %s",
++                     key);
++    }
++
++    return;
++}
++
++static int
++pl_inodelk_xattr_fill_multiple(dict_t *this, char *key, data_t *value,
++                               void *data)
++{
++    multi_dom_lk_data *d = data;
++    char *tmp_key = NULL;
++    char *save_ptr = NULL;
++
++    tmp_key = gf_strdup(key);
++    strtok_r(tmp_key, ":", &save_ptr);
++    if (!*save_ptr) {
++        gf_msg(THIS->name, GF_LOG_ERROR, 0, EINVAL,
++               "Could not tokenize domain string from key %s", key);
++        return -1;
++    }
++
++    pl_inodelk_xattr_fill_each(d->this, d->inode, d->xdata_rsp, save_ptr,
++                               d->keep_max, key);
++    if (tmp_key)
++        GF_FREE(tmp_key);
++
++    return 0;
++}
++
++void
++pl_fill_multiple_dom_lk_requests(xlator_t *this, pl_local_t *local,
++                                 inode_t *inode, dict_t *dict,
++                                 gf_boolean_t keep_max)
++{
++    multi_dom_lk_data data;
++
++    data.this = this;
++    data.inode = inode;
++    data.xdata_rsp = dict;
++    data.keep_max = keep_max;
++
++    dict_foreach_fnmatch(local->xdata, GLUSTERFS_INODELK_DOM_PREFIX "*",
++                         pl_inodelk_xattr_fill_multiple, &data);
++}
++
++void
+ pl_set_xdata_response(xlator_t *this, pl_local_t *local, inode_t *parent,
+                       inode_t *inode, char *name, dict_t *xdata,
+                       gf_boolean_t max_lock)
+@@ -437,6 +529,9 @@ pl_set_xdata_response(xlator_t *this, pl_local_t *local, inode_t *parent,
+ 
+     if (local->posixlk_count_req)
+         pl_posixlk_xattr_fill(this, inode, xdata, max_lock);
++
++    if (local->multiple_dom_lk_requests)
++        pl_fill_multiple_dom_lk_requests(this, local, inode, xdata, max_lock);
+ }
+ 
+ /* Checks whether the region where fop is acting upon conflicts
+@@ -773,9 +868,6 @@ pl_truncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ {
+     pl_local_t *local = frame->local;
+ 
+-    if (local->xdata)
+-        dict_unref(local->xdata);
+-
+     pl_track_io_fop_count(local, this, DECREMENT);
+ 
+     if (local->op == GF_FOP_TRUNCATE)
+@@ -932,9 +1024,6 @@ unwind:
+                "ret: %d, error: %s",
+                op_ret, strerror(op_errno));
+ 
+-        if (local->xdata)
+-            dict_unref(local->xdata);
+-
+         switch (local->op) {
+             case GF_FOP_TRUNCATE:
+                 PL_STACK_UNWIND(truncate, xdata, frame, op_ret, op_errno, buf,
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0425-tests-Fix-spurious-self-heald.t-failure.patch b/SOURCES/0425-tests-Fix-spurious-self-heald.t-failure.patch
new file mode 100644
index 0000000..7bfc04a
--- /dev/null
+++ b/SOURCES/0425-tests-Fix-spurious-self-heald.t-failure.patch
@@ -0,0 +1,187 @@
+From 2c582ea6c76031463501b31d9250e739d5aeda79 Mon Sep 17 00:00:00 2001
+From: Ravishankar N <ravishankar@redhat.com>
+Date: Fri, 5 Jun 2020 14:28:11 +0530
+Subject: [PATCH 425/449] tests: Fix spurious self-heald.t failure
+
+Problem:
+heal-info code assumes that all indices in xattrop directory
+definitely need heal. There is one corner case.
+The very first xattrop on the file will lead to adding the
+gfid to 'xattrop' index in fop path and in _cbk path it is
+removed because the fop is zero-xattr xattrop in success case.
+These gfids could be read by heal-info and shown as needing heal.
+
+Fix:
+Check the pending flag to see if the file definitely needs or
+not instead of which index is being crawled at the moment.
+
+> Upstream patch: https://review.gluster.org/#/c/glusterfs/+/24110/
+> fixes: bz#1801623
+> Change-Id: I79f00dc7366fedbbb25ec4bec838dba3b34c7ad5
+> Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
+
+BUG: 1721355
+Change-Id: I7efdf45a5158fadfdbdd21c91837f193d80fa6c7
+Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202491
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
+---
+ heal/src/glfs-heal.c                 | 17 ++++++----------
+ xlators/cluster/afr/src/afr-common.c | 38 ++++++++++++++----------------------
+ 2 files changed, 21 insertions(+), 34 deletions(-)
+
+diff --git a/heal/src/glfs-heal.c b/heal/src/glfs-heal.c
+index 5af9e31..125b12c 100644
+--- a/heal/src/glfs-heal.c
++++ b/heal/src/glfs-heal.c
+@@ -775,8 +775,7 @@ static int
+ glfsh_process_entries(xlator_t *xl, fd_t *fd, gf_dirent_t *entries,
+                       uint64_t *offset, num_entries_t *num_entries,
+                       print_status glfsh_print_status,
+-                      gf_boolean_t ignore_dirty, glfsh_fail_mode_t mode,
+-                      dict_t *xattr_req)
++                      gf_boolean_t ignore_dirty, glfsh_fail_mode_t mode)
+ {
+     gf_dirent_t *entry = NULL;
+     gf_dirent_t *tmp = NULL;
+@@ -808,7 +807,7 @@ glfsh_process_entries(xlator_t *xl, fd_t *fd, gf_dirent_t *entries,
+ 
+         gf_uuid_parse(entry->d_name, gfid);
+         gf_uuid_copy(loc.gfid, gfid);
+-        ret = syncop_getxattr(this, &loc, &dict, GF_HEAL_INFO, xattr_req, NULL);
++        ret = syncop_getxattr(this, &loc, &dict, GF_HEAL_INFO, NULL, NULL);
+         if (ret) {
+             if ((mode != GLFSH_MODE_CONTINUE_ON_ERROR) && (ret == -ENOTCONN))
+                 goto out;
+@@ -877,19 +876,19 @@ glfsh_crawl_directory(glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,
+         if (heal_op == GF_SHD_OP_INDEX_SUMMARY) {
+             ret = glfsh_process_entries(readdir_xl, fd, &entries, &offset,
+                                         num_entries, glfsh_print_heal_status,
+-                                        ignore, mode, xattr_req);
++                                        ignore, mode);
+             if (ret < 0)
+                 goto out;
+         } else if (heal_op == GF_SHD_OP_SPLIT_BRAIN_FILES) {
+             ret = glfsh_process_entries(readdir_xl, fd, &entries, &offset,
+                                         num_entries, glfsh_print_spb_status,
+-                                        ignore, mode, xattr_req);
++                                        ignore, mode);
+             if (ret < 0)
+                 goto out;
+         } else if (heal_op == GF_SHD_OP_HEAL_SUMMARY) {
+             ret = glfsh_process_entries(readdir_xl, fd, &entries, &offset,
+                                         num_entries, glfsh_print_summary_status,
+-                                        ignore, mode, xattr_req);
++                                        ignore, mode);
+             if (ret < 0)
+                 goto out;
+         } else if (heal_op == GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK) {
+@@ -898,7 +897,7 @@ glfsh_crawl_directory(glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,
+         } else if (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) {
+             ret = glfsh_process_entries(readdir_xl, fd, &entries, &offset,
+                                         num_entries, glfsh_heal_status_boolean,
+-                                        ignore, mode, xattr_req);
++                                        ignore, mode);
+             if (ret < 0)
+                 goto out;
+         }
+@@ -952,10 +951,6 @@ glfsh_print_pending_heals_type(glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,
+     int32_t op_errno = 0;
+     gf_boolean_t ignore = _gf_false;
+ 
+-    ret = dict_set_str(xattr_req, "index-vgfid", vgfid);
+-    if (ret)
+-        return ret;
+-
+     if (!strcmp(vgfid, GF_XATTROP_DIRTY_GFID))
+         ignore = _gf_true;
+ 
+diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
+index c355ec5..89e2483 100644
+--- a/xlators/cluster/afr/src/afr-common.c
++++ b/xlators/cluster/afr/src/afr-common.c
+@@ -5995,8 +5995,8 @@ afr_is_dirty_count_non_unary(xlator_t *this, struct afr_reply *replies,
+ 
+ static int
+ afr_update_heal_status(xlator_t *this, struct afr_reply *replies,
+-                       char *index_vgfid, ia_type_t ia_type, gf_boolean_t *esh,
+-                       gf_boolean_t *dsh, gf_boolean_t *msh)
++                       ia_type_t ia_type, gf_boolean_t *esh, gf_boolean_t *dsh,
++                       gf_boolean_t *msh, unsigned char pending)
+ {
+     int ret = -1;
+     GF_UNUSED int ret1 = 0;
+@@ -6026,14 +6026,7 @@ afr_update_heal_status(xlator_t *this, struct afr_reply *replies,
+         }
+     }
+ 
+-    if (!strcmp(index_vgfid, GF_XATTROP_INDEX_GFID)) {
+-        if (shd_domain_lk_count) {
+-            ret = -EAGAIN; /*For 'possibly-healing'. */
+-        } else {
+-            ret = 0; /*needs heal. Just set a non -ve value so that it is
+-                       assumed as the source index.*/
+-        }
+-    } else if (!strcmp(index_vgfid, GF_XATTROP_DIRTY_GFID)) {
++    if (!pending) {
+         if ((afr_is_dirty_count_non_unary(this, replies, ia_type)) ||
+             (!io_domain_lk_count)) {
+             /* Needs heal. */
+@@ -6042,6 +6035,13 @@ afr_update_heal_status(xlator_t *this, struct afr_reply *replies,
+             /* No heal needed. */
+             *dsh = *esh = *msh = 0;
+         }
++    } else {
++        if (shd_domain_lk_count) {
++            ret = -EAGAIN; /*For 'possibly-healing'. */
++        } else {
++            ret = 0; /*needs heal. Just set a non -ve value so that it is
++                       assumed as the source index.*/
++        }
+     }
+     return ret;
+ }
+@@ -6049,8 +6049,8 @@ afr_update_heal_status(xlator_t *this, struct afr_reply *replies,
+ /*return EIO, EAGAIN or pending*/
+ int
+ afr_lockless_inspect(call_frame_t *frame, xlator_t *this, uuid_t gfid,
+-                     inode_t **inode, char *index_vgfid,
+-                     gf_boolean_t *entry_selfheal, gf_boolean_t *data_selfheal,
++                     inode_t **inode, gf_boolean_t *entry_selfheal,
++                     gf_boolean_t *data_selfheal,
+                      gf_boolean_t *metadata_selfheal, unsigned char *pending)
+ {
+     int ret = -1;
+@@ -6109,8 +6109,8 @@ afr_lockless_inspect(call_frame_t *frame, xlator_t *this, uuid_t gfid,
+             goto out;
+     }
+ 
+-    ret = afr_update_heal_status(this, replies, index_vgfid, (*inode)->ia_type,
+-                                 &esh, &dsh, &msh);
++    ret = afr_update_heal_status(this, replies, (*inode)->ia_type, &esh, &dsh,
++                                 &msh, *pending);
+ out:
+     *data_selfheal = dsh;
+     *entry_selfheal = esh;
+@@ -6133,16 +6133,8 @@ afr_get_heal_info(call_frame_t *frame, xlator_t *this, loc_t *loc)
+     inode_t *inode = NULL;
+     char *substr = NULL;
+     char *status = NULL;
+-    afr_local_t *local = NULL;
+-    char *index_vgfid = NULL;
+-
+-    local = frame->local;
+-    if (dict_get_str(local->xdata_req, "index-vgfid", &index_vgfid)) {
+-        ret = -1;
+-        goto out;
+-    }
+ 
+-    ret = afr_lockless_inspect(frame, this, loc->gfid, &inode, index_vgfid,
++    ret = afr_lockless_inspect(frame, this, loc->gfid, &inode,
+                                &entry_selfheal, &data_selfheal,
+                                &metadata_selfheal, &pending);
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0426-geo-rep-Fix-for-Transport-End-Point-not-connected-is.patch b/SOURCES/0426-geo-rep-Fix-for-Transport-End-Point-not-connected-is.patch
new file mode 100644
index 0000000..a96b66e
--- /dev/null
+++ b/SOURCES/0426-geo-rep-Fix-for-Transport-End-Point-not-connected-is.patch
@@ -0,0 +1,216 @@
+From 91936fe5ef854bd9d2f91e643795d0e7791b97ba Mon Sep 17 00:00:00 2001
+From: Harpreet Kaur <hlalwani@redhat.com>
+Date: Mon, 7 Jan 2019 16:38:25 +0530
+Subject: [PATCH 426/449] geo-rep: Fix for "Transport End Point not connected"
+ issue
+
+problem: Geo-rep gsyncd process mounts the master and slave volume
+         on master nodes and slave nodes respectively and starts
+         the sync. But it doesn't wait for the mount to be in ready
+         state to accept I/O. The gluster mount is considered to be
+         ready when all the distribute sub-volumes is up. If the all
+         the distribute subvolumes are not up, it can cause ENOTCONN
+         error, when lookup on file comes and file is on the subvol
+         that is down.
+
+solution: Added a Virtual Xattr "dht.subvol.status" which returns "1"
+          if all subvols are up and "0" if all subvols are not up.
+          Geo-rep then uses this virtual xattr after a fresh mount, to
+          check whether all subvols are up or not and then starts the
+          I/O.
+
+>fixes: bz#1664335
+>Change-Id: If3ad01d728b1372da7c08ccbe75a45bdc1ab2a91
+>Signed-off-by: Harpreet Kaur <hlalwani@redhat.com>
+>Signed-off-by: Kotresh HR <khiremat@redhat.com>
+
+backport of https://review.gluster.org/#/c/glusterfs/+/22001/
+BUG: 1640573
+Change-Id: If3ad01d728b1372da7c08ccbe75a45bdc1ab2a91
+Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202554
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ geo-replication/syncdaemon/resource.py   | 11 ++++++
+ geo-replication/syncdaemon/syncdutils.py | 20 +++++++++--
+ xlators/cluster/dht/src/dht-common.c     | 59 ++++++++++++++++++++++++++++++++
+ xlators/cluster/dht/src/dht-common.h     |  4 +++
+ 4 files changed, 91 insertions(+), 3 deletions(-)
+
+diff --git a/geo-replication/syncdaemon/resource.py b/geo-replication/syncdaemon/resource.py
+index 189d8a1..0c61de9 100644
+--- a/geo-replication/syncdaemon/resource.py
++++ b/geo-replication/syncdaemon/resource.py
+@@ -37,6 +37,7 @@ from syncdutils import ChangelogException, ChangelogHistoryNotAvailable
+ from syncdutils import get_changelog_log_level, get_rsync_version
+ from syncdutils import CHANGELOG_AGENT_CLIENT_VERSION
+ from syncdutils import GX_GFID_CANONICAL_LEN
++from syncdutils import gf_mount_ready
+ from gsyncdstatus import GeorepStatus
+ from syncdutils import lf, Popen, sup
+ from syncdutils import Xattr, matching_disk_gfid, get_gfid_from_mnt
+@@ -950,6 +951,16 @@ class Mounter(object):
+                 logging.exception('mount cleanup failure:')
+                 rv = 200
+             os._exit(rv)
++
++        #Polling the dht.subvol.status value.
++        RETRIES = 10
++        while not gf_mount_ready():
++            if RETRIES < 0:
++                logging.error('Subvols are not up')
++                break
++            RETRIES -= 1
++            time.sleep(0.2)
++
+         logging.debug('auxiliary glusterfs mount prepared')
+ 
+ 
+diff --git a/geo-replication/syncdaemon/syncdutils.py b/geo-replication/syncdaemon/syncdutils.py
+index b08098e..7560fa1 100644
+--- a/geo-replication/syncdaemon/syncdutils.py
++++ b/geo-replication/syncdaemon/syncdutils.py
+@@ -21,8 +21,8 @@ import subprocess
+ import socket
+ from subprocess import PIPE
+ from threading import Lock, Thread as baseThread
+-from errno import EACCES, EAGAIN, EPIPE, ENOTCONN, ECONNABORTED
+-from errno import EINTR, ENOENT, ESTALE, EBUSY, errorcode
++from errno import EACCES, EAGAIN, EPIPE, ENOTCONN, ENOMEM, ECONNABORTED
++from errno import EINTR, ENOENT, ESTALE, EBUSY, ENODATA, errorcode
+ from signal import signal, SIGTERM
+ import select as oselect
+ from os import waitpid as owaitpid
+@@ -55,6 +55,8 @@ from rconf import rconf
+ 
+ from hashlib import sha256 as sha256
+ 
++ENOTSUP = getattr(errno, 'ENOTSUP', 'EOPNOTSUPP')
++
+ # auxiliary gfid based access prefix
+ _CL_AUX_GFID_PFX = ".gfid/"
+ ROOT_GFID = "00000000-0000-0000-0000-000000000001"
+@@ -100,6 +102,19 @@ def unescape_space_newline(s):
+             .replace(NEWLINE_ESCAPE_CHAR, "\n")\
+             .replace(PERCENTAGE_ESCAPE_CHAR, "%")
+ 
++# gf_mount_ready() returns 1 if all subvols are up, else 0
++def gf_mount_ready():
++    ret = errno_wrap(Xattr.lgetxattr,
++                     ['.', 'dht.subvol.status', 16],
++                     [ENOENT, ENOTSUP, ENODATA], [ENOMEM])
++
++    if isinstance(ret, int):
++       logging.error("failed to get the xattr value")
++       return 1
++    ret = ret.rstrip('\x00')
++    if ret == "1":
++       return 1
++    return 0
+ 
+ def norm(s):
+     if s:
+@@ -564,7 +579,6 @@ def errno_wrap(call, arg=[], errnos=[], retry_errnos=[]):
+ def lstat(e):
+     return errno_wrap(os.lstat, [e], [ENOENT], [ESTALE, EBUSY])
+ 
+-
+ def get_gfid_from_mnt(gfidpath):
+     return errno_wrap(Xattr.lgetxattr,
+                       [gfidpath, 'glusterfs.gfid.string',
+diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c
+index 6aa18f3..23cc80c 100644
+--- a/xlators/cluster/dht/src/dht-common.c
++++ b/xlators/cluster/dht/src/dht-common.c
+@@ -4858,6 +4858,60 @@ out:
+     return 0;
+ }
+ 
++/* Virtual Xattr which returns 1 if all subvols are up,
++   else returns 0. Geo-rep then uses this virtual xattr
++   after a fresh mount and starts the I/O.
++*/
++
++enum dht_vxattr_subvol {
++    DHT_VXATTR_SUBVOLS_UP = 1,
++    DHT_VXATTR_SUBVOLS_DOWN = 0,
++};
++
++int
++dht_vgetxattr_subvol_status(call_frame_t *frame, xlator_t *this,
++                            const char *key)
++{
++    dht_local_t *local = NULL;
++    int ret = -1;
++    int op_errno = ENODATA;
++    int value = DHT_VXATTR_SUBVOLS_UP;
++    int i = 0;
++    dht_conf_t *conf = NULL;
++
++    conf = this->private;
++    local = frame->local;
++
++    if (!key) {
++        op_errno = EINVAL;
++        goto out;
++    }
++    local->xattr = dict_new();
++    if (!local->xattr) {
++        op_errno = ENOMEM;
++        goto out;
++    }
++    for (i = 0; i < conf->subvolume_cnt; i++) {
++        if (!conf->subvolume_status[i]) {
++            value = DHT_VXATTR_SUBVOLS_DOWN;
++            gf_msg_debug(this->name, 0, "subvol %s is down ",
++                         conf->subvolumes[i]->name);
++            break;
++        }
++    }
++    ret = dict_set_int8(local->xattr, (char *)key, value);
++    if (ret < 0) {
++        op_errno = -ret;
++        ret = -1;
++        goto out;
++    }
++    ret = 0;
++
++out:
++    DHT_STACK_UNWIND(getxattr, frame, ret, op_errno, local->xattr, NULL);
++    return 0;
++}
++
+ int
+ dht_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, const char *key,
+              dict_t *xdata)
+@@ -4915,6 +4969,11 @@ dht_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, const char *key,
+         goto err;
+     }
+ 
++    if (strncmp(key, DHT_SUBVOL_STATUS_KEY, SLEN(DHT_SUBVOL_STATUS_KEY)) == 0) {
++        dht_vgetxattr_subvol_status(frame, this, key);
++        return 0;
++    }
++
+     /* skip over code which is irrelevant if !DHT_IS_DIR(layout) */
+     if (!DHT_IS_DIR(layout))
+         goto no_dht_is_dir;
+diff --git a/xlators/cluster/dht/src/dht-common.h b/xlators/cluster/dht/src/dht-common.h
+index 1b3e826..9ec5b51 100644
+--- a/xlators/cluster/dht/src/dht-common.h
++++ b/xlators/cluster/dht/src/dht-common.h
+@@ -45,6 +45,10 @@
+ #define DHT_DIR_STAT_BLOCKS 8
+ #define DHT_DIR_STAT_SIZE 4096
+ 
++/* Virtual xattr for subvols status */
++
++#define DHT_SUBVOL_STATUS_KEY "dht.subvol.status"
++
+ /* Virtual xattrs for debugging */
+ 
+ #define DHT_DBG_HASHED_SUBVOL_PATTERN "dht.file.hashed-subvol.*"
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0427-storage-posix-Fixing-a-coverity-issue.patch b/SOURCES/0427-storage-posix-Fixing-a-coverity-issue.patch
new file mode 100644
index 0000000..ebeb556
--- /dev/null
+++ b/SOURCES/0427-storage-posix-Fixing-a-coverity-issue.patch
@@ -0,0 +1,38 @@
+From 3943fce5818a353117fc1c492e6383434d742979 Mon Sep 17 00:00:00 2001
+From: Barak Sason <bsasonro@redhat.com>
+Date: Sun, 18 Aug 2019 17:52:04 +0300
+Subject: [PATCH 427/449] storage/posix - Fixing a coverity issue
+
+Fixed a resource leak of variable 'pfd'
+
+backport of https://review.gluster.org/#/c/glusterfs/+/23261/
+>CID: 1400673
+>Updates: bz#789278
+>Change-Id: I78e1e8a89e0604b56e35a75c25d436b35db096c3
+>Signed-off-by: Barak Sason <bsasonro@redhat.com>
+
+BUG: 1787310
+Change-Id: I78e1e8a89e0604b56e35a75c25d436b35db096c3
+Signed-off-by: Barak Sason Rofman <bsasonro@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202563
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/storage/posix/src/posix-inode-fd-ops.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/xlators/storage/posix/src/posix-inode-fd-ops.c b/xlators/storage/posix/src/posix-inode-fd-ops.c
+index bcce06e..5748b9f 100644
+--- a/xlators/storage/posix/src/posix-inode-fd-ops.c
++++ b/xlators/storage/posix/src/posix-inode-fd-ops.c
+@@ -1603,6 +1603,7 @@ posix_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+         if (op_ret == -1) {
+             gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_FSTAT_FAILED,
+                    "pre-operation fstat failed on fd=%p", fd);
++            GF_FREE(pfd);
+             goto out;
+         }
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0428-glusterd-ganesha-fixing-resource-leak-in-tear_down_c.patch b/SOURCES/0428-glusterd-ganesha-fixing-resource-leak-in-tear_down_c.patch
new file mode 100644
index 0000000..dba8f3c
--- /dev/null
+++ b/SOURCES/0428-glusterd-ganesha-fixing-resource-leak-in-tear_down_c.patch
@@ -0,0 +1,48 @@
+From 7e1bf1e338a6effe209f57b1b92a70d5d25a73bf Mon Sep 17 00:00:00 2001
+From: Jiffin Tony Thottan <jthottan@redhat.com>
+Date: Mon, 26 Aug 2019 11:32:18 +0530
+Subject: [PATCH 428/449] glusterd/ganesha: fixing resource leak in
+ tear_down_cluster()
+
+backport of https://review.gluster.org/#/c/glusterfs/+/23295/
+>CID: 1370947
+>Updates: bz#789278
+>Change-Id: Ib694056430ff0536ed705a0e77e5ace22486891e
+>Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
+
+BUG: 1787310
+Change-Id: Ib694056430ff0536ed705a0e77e5ace22486891e
+Signed-off-by: Barak Sason Rofman <bsasonro@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202561
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mgmt/glusterd/src/glusterd-ganesha.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
+index 0a16925..06f028f 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c
++++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
+@@ -659,10 +659,18 @@ tear_down_cluster(gf_boolean_t run_teardown)
+                          "Failed to close dir %s. Reason :"
+                          " %s",
+                          CONFDIR, strerror(errno));
++            goto exit;
+         }
+     }
+ 
+ out:
++    if (dir && sys_closedir(dir)) {
++        gf_msg_debug(THIS->name, 0,
++                     "Failed to close dir %s. Reason :"
++                     " %s",
++                     CONFDIR, strerror(errno));
++    }
++exit:
+     return ret;
+ }
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0429-dht-rebalance-fixing-failure-occurace-due-to-rebalan.patch b/SOURCES/0429-dht-rebalance-fixing-failure-occurace-due-to-rebalan.patch
new file mode 100644
index 0000000..8ac6529
--- /dev/null
+++ b/SOURCES/0429-dht-rebalance-fixing-failure-occurace-due-to-rebalan.patch
@@ -0,0 +1,61 @@
+From 1370db202a2a60810409f74c390448bf8fbd6998 Mon Sep 17 00:00:00 2001
+From: Barak Sason Rofman <bsasonro@redhat.com>
+Date: Sun, 9 Feb 2020 15:09:30 +0200
+Subject: [PATCH 429/449] dht/rebalance - fixing failure occurace due to
+ rebalance stop
+
+Probelm description:
+When topping rebalance, the following error messages appear in the
+rebalance log file:
+[2020-01-28 14:31:42.452070] W [dht-rebalance.c:3447:gf_defrag_process_dir] 0-distrep-dht: Found error from gf_defrag_get_entry
+[2020-01-28 14:31:42.452764] E [MSGID: 109111] [dht-rebalance.c:3971:gf_defrag_fix_layout] 0-distrep-dht: gf_defrag_process_dir failed for directory: /0/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/31
+[2020-01-28 14:31:42.453498] E [MSGID: 109016] [dht-rebalance.c:3906:gf_defrag_fix_layout] 0-distrep-dht: Fix layout failed for /0/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30
+
+In order to avoid seing these error messages, a modification to the
+error handling mechanism has been made.
+In addition, several log messages had been added in order to improve debugging efficiency
+
+backport of https://review.gluster.org/#/c/glusterfs/+/24103/
+>fixes: bz#1800956
+>Change-Id: Ifc82dae79ab3da9fe22ee25088a2a6b855afcfcf
+>Signed-off-by: Barak Sason Rofman <bsasonro@redhat.com>
+
+BUG: 1286171
+Change-Id: Ifc82dae79ab3da9fe22ee25088a2a6b855afcfcf
+Signed-off-by: Barak Sason Rofman <bsasonro@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202562
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/cluster/dht/src/dht-rebalance.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/xlators/cluster/dht/src/dht-rebalance.c b/xlators/cluster/dht/src/dht-rebalance.c
+index 8f31dca..88b6b54 100644
+--- a/xlators/cluster/dht/src/dht-rebalance.c
++++ b/xlators/cluster/dht/src/dht-rebalance.c
+@@ -3479,6 +3479,10 @@ gf_defrag_process_dir(xlator_t *this, gf_defrag_info_t *defrag, loc_t *loc,
+                                       migrate_data, dir_dfmeta, xattr_req,
+                                       &should_commit_hash, perrno);
+ 
++            if (defrag->defrag_status == GF_DEFRAG_STATUS_STOPPED) {
++                goto out;
++            }
++
+             if (ret) {
+                 gf_log(this->name, GF_LOG_WARNING,
+                        "Found "
+@@ -3935,6 +3939,10 @@ gf_defrag_fix_layout(xlator_t *this, gf_defrag_info_t *defrag, loc_t *loc,
+             ret = gf_defrag_fix_layout(this, defrag, &entry_loc, fix_layout,
+                                        migrate_data);
+ 
++            if (defrag->defrag_status == GF_DEFRAG_STATUS_STOPPED) {
++                goto out;
++            }
++
+             if (ret && ret != 2) {
+                 gf_msg(this->name, GF_LOG_ERROR, 0, DHT_MSG_LAYOUT_FIX_FAILED,
+                        "Fix layout failed for %s", entry_loc.path);
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0430-Fix-some-Null-pointer-dereference-coverity-issues.patch b/SOURCES/0430-Fix-some-Null-pointer-dereference-coverity-issues.patch
new file mode 100644
index 0000000..6ff69e8
--- /dev/null
+++ b/SOURCES/0430-Fix-some-Null-pointer-dereference-coverity-issues.patch
@@ -0,0 +1,291 @@
+From 7fe500a03d42dba6082c28ef7284c950c44fbfa3 Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Wed, 22 May 2019 17:46:19 +0200
+Subject: [PATCH 430/449] Fix some "Null pointer dereference" coverity issues
+
+This patch fixes the following CID's:
+
+  * 1124829
+  * 1274075
+  * 1274083
+  * 1274128
+  * 1274135
+  * 1274141
+  * 1274143
+  * 1274197
+  * 1274205
+  * 1274210
+  * 1274211
+  * 1288801
+  * 1398629
+
+Backport of:
+> Upstream-patch-link: https://review.gluster.org/22767
+> Change-Id: Ia7c86cfab3245b20777ffa296e1a59748040f558
+> Updates: bz#789278
+> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+
+BUG: 1787310
+Change-Id: Ia7c86cfab3245b20777ffa296e1a59748040f558
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202616
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ cli/src/cli-cmd-system.c                     |  2 +-
+ cli/src/cli-xml-output.c                     |  2 +-
+ glusterfsd/src/glusterfsd.c                  | 24 +++++++++++++-----------
+ libglusterfs/src/inode.c                     |  3 +++
+ rpc/rpc-lib/src/rpcsvc.c                     |  4 ++++
+ xlators/cluster/dht/src/dht-shared.c         |  4 ++++
+ xlators/cluster/dht/src/switch.c             |  9 +++++++++
+ xlators/features/trash/src/trash.c           |  2 +-
+ xlators/mgmt/glusterd/src/glusterd-geo-rep.c |  7 +++++--
+ xlators/nfs/server/src/mount3.c              |  6 ++++++
+ xlators/protocol/client/src/client.c         |  7 ++++++-
+ xlators/storage/posix/src/posix-helpers.c    |  3 +++
+ 12 files changed, 56 insertions(+), 17 deletions(-)
+
+diff --git a/cli/src/cli-cmd-system.c b/cli/src/cli-cmd-system.c
+index 8cd1542..cb3a9ea 100644
+--- a/cli/src/cli-cmd-system.c
++++ b/cli/src/cli-cmd-system.c
+@@ -446,7 +446,7 @@ cli_cmd_sys_exec_cbk(struct cli_state *state, struct cli_cmd_word *word,
+     dict_t *dict = NULL;
+     cli_local_t *local = NULL;
+ 
+-    if (wordcount < 3) {
++    if ((wordcount < 3) || (words[2] == NULL)) {
+         cli_usage_out(word->pattern);
+         goto out;
+     }
+diff --git a/cli/src/cli-xml-output.c b/cli/src/cli-xml-output.c
+index 006e2fb..903997c 100644
+--- a/cli/src/cli-xml-output.c
++++ b/cli/src/cli-xml-output.c
+@@ -64,7 +64,7 @@ cli_begin_xml_output(xmlTextWriterPtr *writer, xmlDocPtr *doc)
+     int ret = -1;
+ 
+     *writer = xmlNewTextWriterDoc(doc, 0);
+-    if (writer == NULL) {
++    if (*writer == NULL) {
+         ret = -1;
+         goto out;
+     }
+diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
+index 974fb88..9821180 100644
+--- a/glusterfsd/src/glusterfsd.c
++++ b/glusterfsd/src/glusterfsd.c
+@@ -1235,19 +1235,21 @@ parse_opts(int key, char *arg, struct argp_state *state)
+         case ARGP_BRICK_PORT_KEY:
+             n = 0;
+ 
+-            port_str = strtok_r(arg, ",", &tmp_str);
+-            if (gf_string2uint_base10(port_str, &n) == 0) {
+-                cmd_args->brick_port = n;
+-                port_str = strtok_r(NULL, ",", &tmp_str);
+-                if (port_str) {
+-                    if (gf_string2uint_base10(port_str, &n) == 0) {
+-                        cmd_args->brick_port2 = n;
+-                        break;
++            if (arg != NULL) {
++                port_str = strtok_r(arg, ",", &tmp_str);
++                if (gf_string2uint_base10(port_str, &n) == 0) {
++                    cmd_args->brick_port = n;
++                    port_str = strtok_r(NULL, ",", &tmp_str);
++                    if (port_str) {
++                        if (gf_string2uint_base10(port_str, &n) == 0) {
++                            cmd_args->brick_port2 = n;
++                            break;
++                        }
++                        argp_failure(state, -1, 0,
++                                     "wrong brick (listen) port %s", arg);
+                     }
+-                    argp_failure(state, -1, 0, "wrong brick (listen) port %s",
+-                                 arg);
++                    break;
+                 }
+-                break;
+             }
+ 
+             argp_failure(state, -1, 0, "unknown brick (listen) port %s", arg);
+diff --git a/libglusterfs/src/inode.c b/libglusterfs/src/inode.c
+index 9dbb25b..4c3c546 100644
+--- a/libglusterfs/src/inode.c
++++ b/libglusterfs/src/inode.c
+@@ -899,6 +899,9 @@ inode_resolve(inode_table_t *table, char *path)
+ 
+     parent = inode_ref(table->root);
+     str = tmp = gf_strdup(path);
++    if (str == NULL) {
++        goto out;
++    }
+ 
+     while (1) {
+         bname = strtok_r(str, "/", &saveptr);
+diff --git a/rpc/rpc-lib/src/rpcsvc.c b/rpc/rpc-lib/src/rpcsvc.c
+index 5a35139..b058932 100644
+--- a/rpc/rpc-lib/src/rpcsvc.c
++++ b/rpc/rpc-lib/src/rpcsvc.c
+@@ -2874,6 +2874,10 @@ rpcsvc_transport_peer_check_search(dict_t *options, char *pattern, char *ip,
+     }
+ 
+     dup_addrstr = gf_strdup(addrstr);
++    if (dup_addrstr == NULL) {
++        ret = -1;
++        goto err;
++    }
+     addrtok = strtok_r(dup_addrstr, ",", &svptr);
+     while (addrtok) {
+         /* CASEFOLD not present on Solaris */
+diff --git a/xlators/cluster/dht/src/dht-shared.c b/xlators/cluster/dht/src/dht-shared.c
+index ea4b7c6..58e3339 100644
+--- a/xlators/cluster/dht/src/dht-shared.c
++++ b/xlators/cluster/dht/src/dht-shared.c
+@@ -278,6 +278,10 @@ dht_parse_decommissioned_bricks(xlator_t *this, dht_conf_t *conf,
+         goto out;
+ 
+     dup_brick = gf_strdup(bricks);
++    if (dup_brick == NULL) {
++        goto out;
++    }
++
+     node = strtok_r(dup_brick, ",", &tmpstr);
+     while (node) {
+         for (i = 0; i < conf->subvolume_cnt; i++) {
+diff --git a/xlators/cluster/dht/src/switch.c b/xlators/cluster/dht/src/switch.c
+index a782fcd..207d109 100644
+--- a/xlators/cluster/dht/src/switch.c
++++ b/xlators/cluster/dht/src/switch.c
+@@ -610,9 +610,15 @@ set_switch_pattern(xlator_t *this, dht_conf_t *conf, const char *pattern_str)
+     /* Get the pattern for considering switch case.
+        "option block-size *avi:10MB" etc */
+     option_string = gf_strdup(pattern_str);
++    if (option_string == NULL) {
++        goto err;
++    }
+     switch_str = strtok_r(option_string, ";", &tmp_str);
+     while (switch_str) {
+         dup_str = gf_strdup(switch_str);
++        if (dup_str == NULL) {
++            goto err;
++        }
+         switch_opt = GF_CALLOC(1, sizeof(struct switch_struct),
+                                gf_switch_mt_switch_struct);
+         if (!switch_opt) {
+@@ -647,6 +653,9 @@ set_switch_pattern(xlator_t *this, dht_conf_t *conf, const char *pattern_str)
+ 
+         if (childs) {
+             dup_childs = gf_strdup(childs);
++            if (dup_childs == NULL) {
++                goto err;
++            }
+             child = strtok_r(dup_childs, ",", &tmp);
+             while (child) {
+                 if (gf_switch_valid_child(this, child)) {
+diff --git a/xlators/features/trash/src/trash.c b/xlators/features/trash/src/trash.c
+index d668436..f96ed73 100644
+--- a/xlators/features/trash/src/trash.c
++++ b/xlators/features/trash/src/trash.c
+@@ -170,7 +170,7 @@ store_eliminate_path(char *str, trash_elim_path **eliminate)
+     int ret = 0;
+     char *strtokptr = NULL;
+ 
+-    if (eliminate == NULL) {
++    if ((str == NULL) || (eliminate == NULL)) {
+         ret = EINVAL;
+         goto out;
+     }
+diff --git a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
+index 0f40bea..85c06c1 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
++++ b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
+@@ -5981,7 +5981,7 @@ glusterd_get_slave_info(char *slave, char **slave_url, char **hostname,
+     GF_ASSERT(this);
+ 
+     ret = glusterd_urltransform_single(slave, "normalize", &linearr);
+-    if (ret == -1) {
++    if ((ret == -1) || (linearr[0] == NULL)) {
+         ret = snprintf(errmsg, sizeof(errmsg) - 1, "Invalid Url: %s", slave);
+         errmsg[ret] = '\0';
+         *op_errstr = gf_strdup(errmsg);
+@@ -5992,7 +5992,10 @@ glusterd_get_slave_info(char *slave, char **slave_url, char **hostname,
+ 
+     tmp = strtok_r(linearr[0], "/", &save_ptr);
+     tmp = strtok_r(NULL, "/", &save_ptr);
+-    slave = strtok_r(tmp, ":", &save_ptr);
++    slave = NULL;
++    if (tmp != NULL) {
++        slave = strtok_r(tmp, ":", &save_ptr);
++    }
+     if (slave) {
+         ret = glusterd_geo_rep_parse_slave(slave, hostname, op_errstr);
+         if (ret) {
+diff --git a/xlators/nfs/server/src/mount3.c b/xlators/nfs/server/src/mount3.c
+index 396809c..734453c 100644
+--- a/xlators/nfs/server/src/mount3.c
++++ b/xlators/nfs/server/src/mount3.c
+@@ -3205,6 +3205,12 @@ mnt3_export_parse_auth_param(struct mnt3_export *exp, char *exportpath)
+     struct host_auth_spec *host = NULL;
+     int ret = 0;
+ 
++    if (exportpath == NULL) {
++        gf_msg(GF_MNT, GF_LOG_ERROR, EINVAL, NFS_MSG_PARSE_HOSTSPEC_FAIL,
++               "Export path is NULL");
++        return -1;
++    }
++
+     /* Using exportpath directly in strtok_r because we want
+      * to strip off AUTH parameter from exportpath. */
+     token = strtok_r(exportpath, "(", &savPtr);
+diff --git a/xlators/protocol/client/src/client.c b/xlators/protocol/client/src/client.c
+index e156d4d..ed855ca 100644
+--- a/xlators/protocol/client/src/client.c
++++ b/xlators/protocol/client/src/client.c
+@@ -1222,9 +1222,12 @@ client_set_remote_options(char *value, xlator_t *this)
+     char *remote_port_str = NULL;
+     char *tmp = NULL;
+     int remote_port = 0;
+-    int ret = 0;
++    int ret = -1;
+ 
+     dup_value = gf_strdup(value);
++    if (dup_value == NULL) {
++        goto out;
++    }
+     host = strtok_r(dup_value, ":", &tmp);
+     subvol = strtok_r(NULL, ":", &tmp);
+     remote_port_str = strtok_r(NULL, ":", &tmp);
+@@ -1238,6 +1241,7 @@ client_set_remote_options(char *value, xlator_t *this)
+         if (ret) {
+             gf_msg(this->name, GF_LOG_WARNING, 0, PC_MSG_DICT_SET_FAILED,
+                    "failed to set remote-host with %s", host);
++            GF_FREE(host_dup);
+             goto out;
+         }
+     }
+@@ -1252,6 +1256,7 @@ client_set_remote_options(char *value, xlator_t *this)
+         if (ret) {
+             gf_msg(this->name, GF_LOG_WARNING, 0, PC_MSG_DICT_SET_FAILED,
+                    "failed to set remote-host with %s", host);
++            GF_FREE(subvol_dup);
+             goto out;
+         }
+     }
+diff --git a/xlators/storage/posix/src/posix-helpers.c b/xlators/storage/posix/src/posix-helpers.c
+index 949c799..2336add 100644
+--- a/xlators/storage/posix/src/posix-helpers.c
++++ b/xlators/storage/posix/src/posix-helpers.c
+@@ -390,6 +390,9 @@ _posix_get_marker_quota_contributions(posix_xattr_filler_t *filler, char *key)
+     int i = 0, ret = 0;
+ 
+     tmp_key = ptr = gf_strdup(key);
++    if (tmp_key == NULL) {
++        return -1;
++    }
+     for (i = 0; i < 4; i++) {
+         token = strtok_r(tmp_key, ".", &saveptr);
+         tmp_key = NULL;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0431-glusterd-check-for-same-node-while-adding-bricks-in-.patch b/SOURCES/0431-glusterd-check-for-same-node-while-adding-bricks-in-.patch
new file mode 100644
index 0000000..341cfc1
--- /dev/null
+++ b/SOURCES/0431-glusterd-check-for-same-node-while-adding-bricks-in-.patch
@@ -0,0 +1,638 @@
+From d7c52ddd2cbadb1d9a55767c2f7fe6ba38d9a2ed Mon Sep 17 00:00:00 2001
+From: Sheetal Pamecha <spamecha@redhat.com>
+Date: Wed, 20 Nov 2019 12:42:12 +0530
+Subject: [PATCH 431/449] glusterd: check for same node while adding bricks in
+ disperse volume
+
+The optimal way for configuring disperse and replicate volumes
+is to have all bricks in different nodes.
+
+During create operation it fails saying it is not optimal, user
+must use force to over-ride this behavior. Implementing same
+during add-brick operation to avoid situation where all the added
+bricks end up from same host. Operation will error out accordingly.
+and this can be over-ridden by using force same as create.
+
+> Upstream Patch Link: https://review.gluster.org/#/c/glusterfs/+/23729
+> fixes: #1047
+> Change-Id: I3ee9c97c1a14b73f4532893bc00187ef9355238b
+> Signed-off-by: Sheetal Pamecha <spamecha@redhat.com>
+
+BUG: 1524457
+Change-Id: I3ee9c97c1a14b73f4532893bc00187ef9355238b
+Signed-off-by: Sheetal Pamecha <spamecha@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202621
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sanju Rakonde <srakonde@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mgmt/glusterd/src/glusterd-brick-ops.c  |  20 +-
+ xlators/mgmt/glusterd/src/glusterd-utils.c      | 224 ++++++++++++++++++
+ xlators/mgmt/glusterd/src/glusterd-utils.h      |   4 +
+ xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 293 +++---------------------
+ 4 files changed, 276 insertions(+), 265 deletions(-)
+
+diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+index c5141de..d424f31 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
++++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+@@ -21,7 +21,6 @@
+ #include "glusterd-messages.h"
+ #include "glusterd-server-quorum.h"
+ #include <glusterfs/run.h>
+-#include "glusterd-volgen.h"
+ #include <glusterfs/syscall.h>
+ #include <sys/signal.h>
+ 
+@@ -1575,6 +1574,25 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
+ 
+     is_force = dict_get_str_boolean(dict, "force", _gf_false);
+ 
++    /* Check brick order if the volume type is replicate or disperse. If
++     * force at the end of command not given then check brick order.
++     */
++
++    if (!is_force) {
++        if ((volinfo->type == GF_CLUSTER_TYPE_REPLICATE) ||
++            (volinfo->type == GF_CLUSTER_TYPE_DISPERSE)) {
++            ret = glusterd_check_brick_order(dict, msg, volinfo->type);
++            if (ret) {
++                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER,
++                       "Not adding brick because of "
++                       "bad brick order. %s",
++                       msg);
++                *op_errstr = gf_strdup(msg);
++                goto out;
++            }
++        }
++    }
++
+     if (volinfo->replica_count < replica_count && !is_force) {
+         cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+         {
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index a1299bc..14e23d1 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -14759,3 +14759,227 @@ glusterd_is_profile_on(glusterd_volinfo_t *volinfo)
+         return _gf_true;
+     return _gf_false;
+ }
++
++static gf_ai_compare_t
++glusterd_compare_addrinfo(struct addrinfo *first, struct addrinfo *next)
++{
++    int ret = -1;
++    struct addrinfo *tmp1 = NULL;
++    struct addrinfo *tmp2 = NULL;
++    char firstip[NI_MAXHOST] = {0.};
++    char nextip[NI_MAXHOST] = {
++        0,
++    };
++
++    for (tmp1 = first; tmp1 != NULL; tmp1 = tmp1->ai_next) {
++        ret = getnameinfo(tmp1->ai_addr, tmp1->ai_addrlen, firstip, NI_MAXHOST,
++                          NULL, 0, NI_NUMERICHOST);
++        if (ret)
++            return GF_AI_COMPARE_ERROR;
++        for (tmp2 = next; tmp2 != NULL; tmp2 = tmp2->ai_next) {
++            ret = getnameinfo(tmp2->ai_addr, tmp2->ai_addrlen, nextip,
++                              NI_MAXHOST, NULL, 0, NI_NUMERICHOST);
++            if (ret)
++                return GF_AI_COMPARE_ERROR;
++            if (!strcmp(firstip, nextip)) {
++                return GF_AI_COMPARE_MATCH;
++            }
++        }
++    }
++    return GF_AI_COMPARE_NO_MATCH;
++}
++
++/* Check for non optimal brick order for Replicate/Disperse :
++ * Checks if bricks belonging to a replicate or disperse
++ * volume are present on the same server
++ */
++int32_t
++glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type)
++{
++    int ret = -1;
++    int i = 0;
++    int j = 0;
++    int k = 0;
++    xlator_t *this = NULL;
++    addrinfo_list_t *ai_list = NULL;
++    addrinfo_list_t *ai_list_tmp1 = NULL;
++    addrinfo_list_t *ai_list_tmp2 = NULL;
++    char *brick = NULL;
++    char *brick_list = NULL;
++    char *brick_list_dup = NULL;
++    char *brick_list_ptr = NULL;
++    char *tmpptr = NULL;
++    char *volname = NULL;
++    int32_t brick_count = 0;
++    int32_t sub_count = 0;
++    struct addrinfo *ai_info = NULL;
++    char brick_addr[128] = {
++        0,
++    };
++    int addrlen = 0;
++
++    const char failed_string[2048] =
++        "Failed to perform brick order "
++        "check. Use 'force' at the end of the command"
++        " if you want to override this behavior. ";
++    const char found_string[2048] =
++        "Multiple bricks of a %s "
++        "volume are present on the same server. This "
++        "setup is not optimal. Bricks should be on "
++        "different nodes to have best fault tolerant "
++        "configuration. Use 'force' at the end of the "
++        "command if you want to override this "
++        "behavior. ";
++
++    this = THIS;
++
++    GF_ASSERT(this);
++
++    ai_list = MALLOC(sizeof(addrinfo_list_t));
++    ai_list->info = NULL;
++    CDS_INIT_LIST_HEAD(&ai_list->list);
++
++    ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
++    if (ret) {
++        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
++               "Unable to get volume name");
++        goto out;
++    }
++
++    ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &brick_list);
++    if (ret) {
++        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
++               "Bricks check : Could not "
++               "retrieve bricks list");
++        goto out;
++    }
++
++    ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count);
++    if (ret) {
++        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
++               "Bricks check : Could not "
++               "retrieve brick count");
++        goto out;
++    }
++
++    if (type != GF_CLUSTER_TYPE_DISPERSE) {
++        ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
++                              &sub_count);
++        if (ret) {
++            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
++                   "Bricks check : Could"
++                   " not retrieve replica count");
++            goto out;
++        }
++        gf_msg_debug(this->name, 0,
++                     "Replicate cluster type "
++                     "found. Checking brick order.");
++    } else {
++        ret = dict_get_int32n(dict, "disperse-count", SLEN("disperse-count"),
++                              &sub_count);
++        if (ret) {
++            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
++                   "Bricks check : Could"
++                   " not retrieve disperse count");
++            goto out;
++        }
++        gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DISPERSE_CLUSTER_FOUND,
++               "Disperse cluster type"
++               " found. Checking brick order.");
++    }
++    brick_list_dup = brick_list_ptr = gf_strdup(brick_list);
++    /* Resolve hostnames and get addrinfo */
++    while (i < brick_count) {
++        ++i;
++        brick = strtok_r(brick_list_dup, " \n", &tmpptr);
++        brick_list_dup = tmpptr;
++        if (brick == NULL)
++            goto check_failed;
++        tmpptr = strrchr(brick, ':');
++        if (tmpptr == NULL)
++            goto check_failed;
++        addrlen = strlen(brick) - strlen(tmpptr);
++        strncpy(brick_addr, brick, addrlen);
++        brick_addr[addrlen] = '\0';
++        ret = getaddrinfo(brick_addr, NULL, NULL, &ai_info);
++        if (ret != 0) {
++            ret = 0;
++            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_RESOLVE_FAIL,
++                   "unable to resolve host name for addr %s", brick_addr);
++            goto out;
++        }
++        ai_list_tmp1 = MALLOC(sizeof(addrinfo_list_t));
++        if (ai_list_tmp1 == NULL) {
++            ret = 0;
++            gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
++                   "failed to allocate "
++                   "memory");
++            freeaddrinfo(ai_info);
++            goto out;
++        }
++        ai_list_tmp1->info = ai_info;
++        cds_list_add_tail(&ai_list_tmp1->list, &ai_list->list);
++        ai_list_tmp1 = NULL;
++    }
++
++    i = 0;
++    ai_list_tmp1 = cds_list_entry(ai_list->list.next, addrinfo_list_t, list);
++
++    /* Check for bad brick order */
++    while (i < brick_count) {
++        ++i;
++        ai_info = ai_list_tmp1->info;
++        ai_list_tmp1 = cds_list_entry(ai_list_tmp1->list.next, addrinfo_list_t,
++                                      list);
++        if (0 == i % sub_count) {
++            j = 0;
++            continue;
++        }
++        ai_list_tmp2 = ai_list_tmp1;
++        k = j;
++        while (k < sub_count - 1) {
++            ++k;
++            ret = glusterd_compare_addrinfo(ai_info, ai_list_tmp2->info);
++            if (GF_AI_COMPARE_ERROR == ret)
++                goto check_failed;
++            if (GF_AI_COMPARE_MATCH == ret)
++                goto found_bad_brick_order;
++            ai_list_tmp2 = cds_list_entry(ai_list_tmp2->list.next,
++                                          addrinfo_list_t, list);
++        }
++        ++j;
++    }
++    gf_msg_debug(this->name, 0, "Brick order okay");
++    ret = 0;
++    goto out;
++
++check_failed:
++    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER_CHECK_FAIL,
++           "Failed bad brick order check");
++    snprintf(err_str, sizeof(failed_string), failed_string);
++    ret = -1;
++    goto out;
++
++found_bad_brick_order:
++    gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_BAD_BRKORDER,
++           "Bad brick order found");
++    if (type == GF_CLUSTER_TYPE_DISPERSE) {
++        snprintf(err_str, sizeof(found_string), found_string, "disperse");
++    } else {
++        snprintf(err_str, sizeof(found_string), found_string, "replicate");
++    }
++
++    ret = -1;
++out:
++    ai_list_tmp2 = NULL;
++    GF_FREE(brick_list_ptr);
++    cds_list_for_each_entry(ai_list_tmp1, &ai_list->list, list)
++    {
++        if (ai_list_tmp1->info)
++            freeaddrinfo(ai_list_tmp1->info);
++        free(ai_list_tmp2);
++        ai_list_tmp2 = ai_list_tmp1;
++    }
++    free(ai_list_tmp2);
++    return ret;
++}
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
+index ead16b2..e2e2454 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
+@@ -881,4 +881,8 @@ glusterd_is_profile_on(glusterd_volinfo_t *volinfo);
+ 
+ char *
+ search_brick_path_from_proc(pid_t brick_pid, char *brickpath);
++
++int32_t
++glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type);
++
+ #endif
+diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+index 93042ab..8da2ff3 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
++++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+@@ -41,240 +41,6 @@
+ #define glusterd_op_start_volume_args_get(dict, volname, flags)                \
+     glusterd_op_stop_volume_args_get(dict, volname, flags)
+ 
+-gf_ai_compare_t
+-glusterd_compare_addrinfo(struct addrinfo *first, struct addrinfo *next)
+-{
+-    int ret = -1;
+-    struct addrinfo *tmp1 = NULL;
+-    struct addrinfo *tmp2 = NULL;
+-    char firstip[NI_MAXHOST] = {0.};
+-    char nextip[NI_MAXHOST] = {
+-        0,
+-    };
+-
+-    for (tmp1 = first; tmp1 != NULL; tmp1 = tmp1->ai_next) {
+-        ret = getnameinfo(tmp1->ai_addr, tmp1->ai_addrlen, firstip, NI_MAXHOST,
+-                          NULL, 0, NI_NUMERICHOST);
+-        if (ret)
+-            return GF_AI_COMPARE_ERROR;
+-        for (tmp2 = next; tmp2 != NULL; tmp2 = tmp2->ai_next) {
+-            ret = getnameinfo(tmp2->ai_addr, tmp2->ai_addrlen, nextip,
+-                              NI_MAXHOST, NULL, 0, NI_NUMERICHOST);
+-            if (ret)
+-                return GF_AI_COMPARE_ERROR;
+-            if (!strcmp(firstip, nextip)) {
+-                return GF_AI_COMPARE_MATCH;
+-            }
+-        }
+-    }
+-    return GF_AI_COMPARE_NO_MATCH;
+-}
+-
+-/* Check for non optimal brick order for replicate :
+- * Checks if bricks belonging to a replicate volume
+- * are present on the same server
+- */
+-int32_t
+-glusterd_check_brick_order(dict_t *dict, char *err_str)
+-{
+-    int ret = -1;
+-    int i = 0;
+-    int j = 0;
+-    int k = 0;
+-    xlator_t *this = NULL;
+-    addrinfo_list_t *ai_list = NULL;
+-    addrinfo_list_t *ai_list_tmp1 = NULL;
+-    addrinfo_list_t *ai_list_tmp2 = NULL;
+-    char *brick = NULL;
+-    char *brick_list = NULL;
+-    char *brick_list_dup = NULL;
+-    char *brick_list_ptr = NULL;
+-    char *tmpptr = NULL;
+-    char *volname = NULL;
+-    int32_t brick_count = 0;
+-    int32_t type = GF_CLUSTER_TYPE_NONE;
+-    int32_t sub_count = 0;
+-    struct addrinfo *ai_info = NULL;
+-    char brick_addr[128] = {
+-        0,
+-    };
+-    int addrlen = 0;
+-
+-    const char failed_string[2048] =
+-        "Failed to perform brick order "
+-        "check. Use 'force' at the end of the command"
+-        " if you want to override this behavior. ";
+-    const char found_string[2048] =
+-        "Multiple bricks of a %s "
+-        "volume are present on the same server. This "
+-        "setup is not optimal. Bricks should be on "
+-        "different nodes to have best fault tolerant "
+-        "configuration. Use 'force' at the end of the "
+-        "command if you want to override this "
+-        "behavior. ";
+-
+-    this = THIS;
+-
+-    GF_ASSERT(this);
+-
+-    ai_list = MALLOC(sizeof(addrinfo_list_t));
+-    ai_list->info = NULL;
+-    CDS_INIT_LIST_HEAD(&ai_list->list);
+-
+-    ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+-    if (ret) {
+-        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+-               "Unable to get volume name");
+-        goto out;
+-    }
+-
+-    ret = dict_get_int32n(dict, "type", SLEN("type"), &type);
+-    if (ret) {
+-        snprintf(err_str, 512, "Unable to get type of volume %s", volname);
+-        gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, "%s",
+-               err_str);
+-        goto out;
+-    }
+-
+-    ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &brick_list);
+-    if (ret) {
+-        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+-               "Bricks check : Could not "
+-               "retrieve bricks list");
+-        goto out;
+-    }
+-
+-    ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count);
+-    if (ret) {
+-        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+-               "Bricks check : Could not "
+-               "retrieve brick count");
+-        goto out;
+-    }
+-
+-    if (type != GF_CLUSTER_TYPE_DISPERSE) {
+-        ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
+-                              &sub_count);
+-        if (ret) {
+-            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+-                   "Bricks check : Could"
+-                   " not retrieve replica count");
+-            goto out;
+-        }
+-        gf_msg_debug(this->name, 0,
+-                     "Replicate cluster type "
+-                     "found. Checking brick order.");
+-    } else {
+-        ret = dict_get_int32n(dict, "disperse-count", SLEN("disperse-count"),
+-                              &sub_count);
+-        if (ret) {
+-            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+-                   "Bricks check : Could"
+-                   " not retrieve disperse count");
+-            goto out;
+-        }
+-        gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DISPERSE_CLUSTER_FOUND,
+-               "Disperse cluster type"
+-               " found. Checking brick order.");
+-    }
+-
+-    brick_list_dup = brick_list_ptr = gf_strdup(brick_list);
+-    /* Resolve hostnames and get addrinfo */
+-    while (i < brick_count) {
+-        ++i;
+-        brick = strtok_r(brick_list_dup, " \n", &tmpptr);
+-        brick_list_dup = tmpptr;
+-        if (brick == NULL)
+-            goto check_failed;
+-        tmpptr = strrchr(brick, ':');
+-        if (tmpptr == NULL)
+-            goto check_failed;
+-        addrlen = strlen(brick) - strlen(tmpptr);
+-        strncpy(brick_addr, brick, addrlen);
+-        brick_addr[addrlen] = '\0';
+-        ret = getaddrinfo(brick_addr, NULL, NULL, &ai_info);
+-        if (ret != 0) {
+-            ret = 0;
+-            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_RESOLVE_FAIL,
+-                   "unable to resolve host name for addr %s", brick_addr);
+-            goto out;
+-        }
+-        ai_list_tmp1 = MALLOC(sizeof(addrinfo_list_t));
+-        if (ai_list_tmp1 == NULL) {
+-            ret = 0;
+-            gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+-                   "failed to allocate "
+-                   "memory");
+-            freeaddrinfo(ai_info);
+-            goto out;
+-        }
+-        ai_list_tmp1->info = ai_info;
+-        cds_list_add_tail(&ai_list_tmp1->list, &ai_list->list);
+-        ai_list_tmp1 = NULL;
+-    }
+-
+-    i = 0;
+-    ai_list_tmp1 = cds_list_entry(ai_list->list.next, addrinfo_list_t, list);
+-
+-    /* Check for bad brick order */
+-    while (i < brick_count) {
+-        ++i;
+-        ai_info = ai_list_tmp1->info;
+-        ai_list_tmp1 = cds_list_entry(ai_list_tmp1->list.next, addrinfo_list_t,
+-                                      list);
+-        if (0 == i % sub_count) {
+-            j = 0;
+-            continue;
+-        }
+-        ai_list_tmp2 = ai_list_tmp1;
+-        k = j;
+-        while (k < sub_count - 1) {
+-            ++k;
+-            ret = glusterd_compare_addrinfo(ai_info, ai_list_tmp2->info);
+-            if (GF_AI_COMPARE_ERROR == ret)
+-                goto check_failed;
+-            if (GF_AI_COMPARE_MATCH == ret)
+-                goto found_bad_brick_order;
+-            ai_list_tmp2 = cds_list_entry(ai_list_tmp2->list.next,
+-                                          addrinfo_list_t, list);
+-        }
+-        ++j;
+-    }
+-    gf_msg_debug(this->name, 0, "Brick order okay");
+-    ret = 0;
+-    goto out;
+-
+-check_failed:
+-    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER_CHECK_FAIL,
+-           "Failed bad brick order check");
+-    snprintf(err_str, sizeof(failed_string), failed_string);
+-    ret = -1;
+-    goto out;
+-
+-found_bad_brick_order:
+-    gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_BAD_BRKORDER,
+-           "Bad brick order found");
+-    if (type == GF_CLUSTER_TYPE_DISPERSE) {
+-        snprintf(err_str, sizeof(found_string), found_string, "disperse");
+-    } else {
+-        snprintf(err_str, sizeof(found_string), found_string, "replicate");
+-    }
+-
+-    ret = -1;
+-out:
+-    ai_list_tmp2 = NULL;
+-    GF_FREE(brick_list_ptr);
+-    cds_list_for_each_entry(ai_list_tmp1, &ai_list->list, list)
+-    {
+-        if (ai_list_tmp1->info)
+-            freeaddrinfo(ai_list_tmp1->info);
+-        free(ai_list_tmp2);
+-        ai_list_tmp2 = ai_list_tmp1;
+-    }
+-    free(ai_list_tmp2);
+-    return ret;
+-}
+-
+ int
+ __glusterd_handle_create_volume(rpcsvc_request_t *req)
+ {
+@@ -1337,6 +1103,35 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr,
+         }
+     }
+ 
++    /*Check brick order if the volume type is replicate or disperse. If
++     * force at the end of command not given then check brick order.
++     */
++    if (is_origin_glusterd(dict)) {
++        ret = dict_get_int32n(dict, "type", SLEN("type"), &type);
++        if (ret) {
++            snprintf(msg, sizeof(msg),
++                     "Unable to get type of "
++                     "volume %s",
++                     volname);
++            gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, "%s",
++                   msg);
++            goto out;
++        }
++
++        if (!is_force) {
++            if ((type == GF_CLUSTER_TYPE_REPLICATE) ||
++                (type == GF_CLUSTER_TYPE_DISPERSE)) {
++                ret = glusterd_check_brick_order(dict, msg, type);
++                if (ret) {
++                    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER,
++                           "Not creating volume because of "
++                           "bad brick order");
++                    goto out;
++                }
++            }
++        }
++    }
++
+     while (i < brick_count) {
+         i++;
+         brick = strtok_r(brick_list, " \n", &tmpptr);
+@@ -1423,36 +1218,6 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr,
+         brick_info = NULL;
+     }
+ 
+-    /*Check brick order if the volume type is replicate or disperse. If
+-     * force at the end of command not given then check brick order.
+-     */
+-    if (is_origin_glusterd(dict)) {
+-        ret = dict_get_int32n(dict, "type", SLEN("type"), &type);
+-        if (ret) {
+-            snprintf(msg, sizeof(msg),
+-                     "Unable to get type of "
+-                     "volume %s",
+-                     volname);
+-            gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, "%s",
+-                   msg);
+-            goto out;
+-        }
+-
+-        if (!is_force) {
+-            if ((type == GF_CLUSTER_TYPE_REPLICATE) ||
+-                (type == GF_CLUSTER_TYPE_DISPERSE)) {
+-                ret = glusterd_check_brick_order(dict, msg);
+-                if (ret) {
+-                    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER,
+-                           "Not "
+-                           "creating volume because of "
+-                           "bad brick order");
+-                    goto out;
+-                }
+-            }
+-        }
+-    }
+-
+     ret = dict_set_int32n(rsp_dict, "brick_count", SLEN("brick_count"),
+                           local_brick_count);
+     if (ret) {
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0432-glusterd-Fix-coverity-defects-put-coverity-annotatio.patch b/SOURCES/0432-glusterd-Fix-coverity-defects-put-coverity-annotatio.patch
new file mode 100644
index 0000000..ef589de
--- /dev/null
+++ b/SOURCES/0432-glusterd-Fix-coverity-defects-put-coverity-annotatio.patch
@@ -0,0 +1,503 @@
+From aa215163cb7d806dc98bef2386a4e282a5e54a31 Mon Sep 17 00:00:00 2001
+From: Atin Mukherjee <amukherj@redhat.com>
+Date: Thu, 25 Apr 2019 12:00:52 +0530
+Subject: [PATCH 432/449] glusterd: Fix coverity defects & put coverity
+ annotations
+
+Along with fixing few defect, put the required annotations for the defects which
+are marked ignore/false positive/intentional as per the coverity defect sheet.
+This should avoid the per component graph showing many defects as open in the
+coverity glusterfs web page.
+
+> upstream patch link: https://review.gluster.org/#/c/glusterfs/+/22619/
+> Updates: bz#789278
+> Change-Id: I19461dc3603a3bd8f88866a1ab3db43d783af8e4
+> Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
+
+BUG: 1787310
+Change-Id: I19461dc3603a3bd8f88866a1ab3db43d783af8e4
+Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202631
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mgmt/glusterd/src/glusterd-brick-ops.c     |  7 +++--
+ xlators/mgmt/glusterd/src/glusterd-geo-rep.c       |  3 +-
+ .../glusterd/src/glusterd-gfproxyd-svc-helper.c    |  2 +-
+ xlators/mgmt/glusterd/src/glusterd-handler.c       |  8 ++++-
+ xlators/mgmt/glusterd/src/glusterd-mountbroker.c   |  5 ++-
+ xlators/mgmt/glusterd/src/glusterd-op-sm.c         |  8 +++++
+ xlators/mgmt/glusterd/src/glusterd-peer-utils.c    |  2 ++
+ xlators/mgmt/glusterd/src/glusterd-server-quorum.c |  1 +
+ xlators/mgmt/glusterd/src/glusterd-store.c         |  4 ---
+ xlators/mgmt/glusterd/src/glusterd-svc-helper.c    |  4 +--
+ xlators/mgmt/glusterd/src/glusterd-syncop.c        |  1 +
+ .../mgmt/glusterd/src/glusterd-tierd-svc-helper.c  |  4 +--
+ xlators/mgmt/glusterd/src/glusterd-utils.c         |  9 ++++--
+ xlators/mgmt/glusterd/src/glusterd-volgen.c        | 36 +++++++++++++---------
+ 14 files changed, 63 insertions(+), 31 deletions(-)
+
+diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+index d424f31..121346c 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
++++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+@@ -2032,7 +2032,6 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr)
+         case GF_OP_CMD_STATUS:
+             ret = 0;
+             goto out;
+-
+         case GF_OP_CMD_DETACH_START:
+             if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
+                 snprintf(msg, sizeof(msg),
+@@ -2044,7 +2043,7 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr)
+                        errstr);
+                 goto out;
+             }
+-
++        /* Fall through */
+         case GF_OP_CMD_START: {
+             if ((volinfo->type == GF_CLUSTER_TYPE_REPLICATE) &&
+                 dict_getn(dict, "replica-count", SLEN("replica-count"))) {
+@@ -2259,7 +2258,8 @@ out:
+         if (op_errstr)
+             *op_errstr = errstr;
+     }
+-
++    if (!op_errstr && errstr)
++        GF_FREE(errstr);
+     return ret;
+ }
+ 
+@@ -2687,6 +2687,7 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
+              * Update defrag_cmd as well or it will only be done
+              * for nodes on which the brick to be removed exists.
+              */
++            /* coverity[MIXED_ENUMS] */
+             volinfo->rebal.defrag_cmd = cmd;
+             volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_NOT_STARTED;
+             ret = dict_get_strn(dict, GF_REMOVE_BRICK_TID_KEY,
+diff --git a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
+index 85c06c1..5a91df4 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
++++ b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
+@@ -4107,6 +4107,7 @@ gd_pause_or_resume_gsync(dict_t *dict, char *master, char *slave,
+ 
+ out:
+     sys_close(pfd);
++    /* coverity[INTEGER_OVERFLOW] */
+     return ret;
+ }
+ 
+@@ -4183,7 +4184,7 @@ stop_gsync(char *master, char *slave, char **msg, char *conf_path,
+ 
+ out:
+     sys_close(pfd);
+-
++    /* coverity[INTEGER_OVERFLOW] */
+     return ret;
+ }
+ 
+diff --git a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.c
+index 67e3f41..e338bf4 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.c
++++ b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.c
+@@ -111,7 +111,7 @@ glusterd_svc_get_gfproxyd_volfile(glusterd_volinfo_t *volinfo, char *svc_name,
+         goto out;
+     }
+ 
+-    /* coverity[secure_temp] mkstemp uses 0600 as the mode and is safe */
++    /* coverity[SECURE_TEMP] mkstemp uses 0600 as the mode and is safe */
+     tmp_fd = mkstemp(*tmpvol);
+     if (tmp_fd < 0) {
+         gf_msg("glusterd", GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
+diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
+index 2e73c98..1f31e72 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
++++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
+@@ -930,6 +930,7 @@ __glusterd_handle_cluster_lock(rpcsvc_request_t *req)
+ 
+     op_ctx = dict_new();
+     if (!op_ctx) {
++        ret = -1;
+         gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
+                "Unable to set new dict");
+         goto out;
+@@ -956,6 +957,9 @@ out:
+     glusterd_friend_sm();
+     glusterd_op_sm();
+ 
++    if (ret)
++        GF_FREE(ctx);
++
+     return ret;
+ }
+ 
+@@ -3470,6 +3474,7 @@ glusterd_rpc_create(struct rpc_clnt **rpc, dict_t *options,
+     GF_ASSERT(this);
+ 
+     GF_ASSERT(options);
++    GF_VALIDATE_OR_GOTO(this->name, rpc, out);
+ 
+     if (force && rpc && *rpc) {
+         (void)rpc_clnt_unref(*rpc);
+@@ -3482,7 +3487,6 @@ glusterd_rpc_create(struct rpc_clnt **rpc, dict_t *options,
+         goto out;
+ 
+     ret = rpc_clnt_register_notify(new_rpc, notify_fn, notify_data);
+-    *rpc = new_rpc;
+     if (ret)
+         goto out;
+     ret = rpc_clnt_start(new_rpc);
+@@ -3491,6 +3495,8 @@ out:
+         if (new_rpc) {
+             (void)rpc_clnt_unref(new_rpc);
+         }
++    } else {
++        *rpc = new_rpc;
+     }
+ 
+     gf_msg_debug(this->name, 0, "returning %d", ret);
+diff --git a/xlators/mgmt/glusterd/src/glusterd-mountbroker.c b/xlators/mgmt/glusterd/src/glusterd-mountbroker.c
+index 332ddef..c017ccb 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-mountbroker.c
++++ b/xlators/mgmt/glusterd/src/glusterd-mountbroker.c
+@@ -334,7 +334,10 @@ make_ghadoop_mountspec(gf_mount_spec_t *mspec, const char *volname, char *user,
+     if (ret == -1)
+         return ret;
+ 
+-    return parse_mount_pattern_desc(mspec, hadoop_mnt_desc);
++    ret = parse_mount_pattern_desc(mspec, hadoop_mnt_desc);
++    GF_FREE(hadoop_mnt_desc);
++
++    return ret;
+ }
+ 
+ static gf_boolean_t
+diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+index 6475611..46fc607 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
++++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+@@ -2467,6 +2467,7 @@ glusterd_start_bricks(glusterd_volinfo_t *volinfo)
+         if (!brickinfo->start_triggered) {
+             pthread_mutex_lock(&brickinfo->restart_mutex);
+             {
++                /* coverity[SLEEP] */
+                 ret = glusterd_brick_start(volinfo, brickinfo, _gf_false,
+                                            _gf_false);
+             }
+@@ -3466,6 +3467,7 @@ _add_task_to_dict(dict_t *dict, glusterd_volinfo_t *volinfo, int op, int index)
+ 
+     switch (op) {
+         case GD_OP_REMOVE_TIER_BRICK:
++        /* Fall through */
+         case GD_OP_REMOVE_BRICK:
+             snprintf(key, sizeof(key), "task%d", index);
+             ret = _add_remove_bricks_to_dict(dict, volinfo, key);
+@@ -7550,6 +7552,7 @@ glusterd_op_ac_send_brick_op(glusterd_op_sm_event_t *event, void *ctx)
+     glusterd_op_t op = GD_OP_NONE;
+     glusterd_req_ctx_t *req_ctx = NULL;
+     char *op_errstr = NULL;
++    gf_boolean_t free_req_ctx = _gf_false;
+ 
+     this = THIS;
+     priv = this->private;
+@@ -7558,6 +7561,9 @@ glusterd_op_ac_send_brick_op(glusterd_op_sm_event_t *event, void *ctx)
+         req_ctx = ctx;
+     } else {
+         req_ctx = GF_CALLOC(1, sizeof(*req_ctx), gf_gld_mt_op_allack_ctx_t);
++        if (!req_ctx)
++            goto out;
++        free_req_ctx = _gf_true;
+         op = glusterd_op_get_op();
+         req_ctx->op = op;
+         gf_uuid_copy(req_ctx->uuid, MY_UUID);
+@@ -7588,6 +7594,8 @@ glusterd_op_ac_send_brick_op(glusterd_op_sm_event_t *event, void *ctx)
+     }
+ 
+ out:
++    if (ret && req_ctx && free_req_ctx)
++        GF_FREE(req_ctx);
+     gf_msg_debug(this->name, 0, "Returning with %d", ret);
+ 
+     return ret;
+diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
+index 8c1feeb..1a65359 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
+@@ -82,6 +82,7 @@ glusterd_peerinfo_cleanup(glusterd_peerinfo_t *peerinfo)
+     call_rcu(&peerinfo->rcu_head.head, glusterd_peerinfo_destroy);
+ 
+     if (quorum_action)
++        /* coverity[SLEEP] */
+         glusterd_do_quorum_action();
+     return 0;
+ }
+@@ -358,6 +359,7 @@ glusterd_uuid_to_hostname(uuid_t uuid)
+ 
+     if (!gf_uuid_compare(MY_UUID, uuid)) {
+         hostname = gf_strdup("localhost");
++        return hostname;
+     }
+     RCU_READ_LOCK;
+     if (!cds_list_empty(&priv->peers)) {
+diff --git a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
+index fd334e6..f378187 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
++++ b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
+@@ -372,6 +372,7 @@ glusterd_do_volume_quorum_action(xlator_t *this, glusterd_volinfo_t *volinfo,
+             if (!brickinfo->start_triggered) {
+                 pthread_mutex_lock(&brickinfo->restart_mutex);
+                 {
++                    /* coverity[SLEEP] */
+                     ret = glusterd_brick_start(volinfo, brickinfo, _gf_false,
+                                                _gf_false);
+                 }
+diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
+index b3b5ee9..4fa8116 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-store.c
++++ b/xlators/mgmt/glusterd/src/glusterd-store.c
+@@ -4764,10 +4764,6 @@ glusterd_store_retrieve_peers(xlator_t *this)
+          */
+         address = cds_list_entry(peerinfo->hostnames.next,
+                                  glusterd_peer_hostname_t, hostname_list);
+-        if (!address) {
+-            ret = -1;
+-            goto next;
+-        }
+         peerinfo->hostname = gf_strdup(address->hostname);
+ 
+         ret = glusterd_friend_add_from_peerinfo(peerinfo, 1, NULL);
+diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
+index ca19a75..1d1f42d 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
++++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
+@@ -179,7 +179,7 @@ glusterd_svc_check_volfile_identical(char *svc_name,
+         goto out;
+     }
+ 
+-    /* coverity[secure_temp] mkstemp uses 0600 as the mode and is safe */
++    /* coverity[SECURE_TEMP] mkstemp uses 0600 as the mode and is safe */
+     tmp_fd = mkstemp(tmpvol);
+     if (tmp_fd < 0) {
+         gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
+@@ -241,7 +241,7 @@ glusterd_svc_check_topology_identical(char *svc_name,
+         goto out;
+     }
+ 
+-    /* coverity[secure_temp] mkstemp uses 0600 as the mode and is safe */
++    /* coverity[SECURE_TEMP] mkstemp uses 0600 as the mode and is safe */
+     tmpfd = mkstemp(tmpvol);
+     if (tmpfd < 0) {
+         gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
+diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
+index 618d8bc..9e47d14 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
++++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
+@@ -1752,6 +1752,7 @@ gd_brick_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
+                 if (dict_get(op_ctx, "client-count"))
+                     break;
+             }
++            /* coverity[MIXED_ENUMS] */
+         } else if (cmd == GF_OP_CMD_DETACH_START) {
+             op = GD_OP_REMOVE_BRICK;
+             dict_del(req_dict, "rebalance-command");
+diff --git a/xlators/mgmt/glusterd/src/glusterd-tierd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-tierd-svc-helper.c
+index 922eae7..59843a0 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-tierd-svc-helper.c
++++ b/xlators/mgmt/glusterd/src/glusterd-tierd-svc-helper.c
+@@ -116,7 +116,7 @@ glusterd_svc_check_tier_volfile_identical(char *svc_name,
+         goto out;
+     }
+ 
+-    /* coverity[secure_temp] mkstemp uses 0600 as the mode and is safe */
++    /* coverity[SECURE_TEMP] mkstemp uses 0600 as the mode and is safe */
+     tmp_fd = mkstemp(tmpvol);
+     if (tmp_fd < 0) {
+         gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
+@@ -177,7 +177,7 @@ glusterd_svc_check_tier_topology_identical(char *svc_name,
+         goto out;
+     }
+ 
+-    /* coverity[secure_temp] mkstemp uses 0600 as the mode and is safe */
++    /* coverity[SECURE_TEMP] mkstemp uses 0600 as the mode and is safe */
+     tmpfd = mkstemp(tmpvol);
+     if (tmpfd < 0) {
+         gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index 14e23d1..8b0fc9a 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -908,6 +908,7 @@ glusterd_create_sub_tier_volinfo(glusterd_volinfo_t *volinfo,
+         (*dup_volinfo)->brick_count = tier_info->cold_brick_count;
+     }
+ out:
++    /* coverity[REVERSE_NULL] */
+     if (ret && *dup_volinfo) {
+         glusterd_volinfo_delete(*dup_volinfo);
+         *dup_volinfo = NULL;
+@@ -2738,6 +2739,7 @@ glusterd_readin_file(const char *filepath, int *line_count)
+     /* Reduce allocation to minimal size.  */
+     p = GF_REALLOC(lines, (counter + 1) * sizeof(char *));
+     if (!p) {
++        /* coverity[TAINTED_SCALAR] */
+         free_lines(lines, counter);
+         lines = NULL;
+         goto out;
+@@ -6782,6 +6784,7 @@ glusterd_restart_bricks(void *opaque)
+                 if (!brickinfo->start_triggered) {
+                     pthread_mutex_lock(&brickinfo->restart_mutex);
+                     {
++                        /* coverity[SLEEP] */
+                         glusterd_brick_start(volinfo, brickinfo, _gf_false,
+                                              _gf_false);
+                     }
+@@ -8886,7 +8889,7 @@ glusterd_nfs_statedump(char *options, int option_cnt, char **op_errstr)
+     kill(pid, SIGUSR1);
+ 
+     sleep(1);
+-
++    /* coverity[TAINTED_STRING] */
+     sys_unlink(dumpoptions_path);
+     ret = 0;
+ out:
+@@ -9012,6 +9015,7 @@ glusterd_quotad_statedump(char *options, int option_cnt, char **op_errstr)
+ 
+     sleep(1);
+ 
++    /* coverity[TAINTED_STRING] */
+     sys_unlink(dumpoptions_path);
+     ret = 0;
+ out:
+@@ -13423,7 +13427,7 @@ glusterd_get_global_options_for_all_vols(rpcsvc_request_t *req, dict_t *ctx,
+         if (key_fixed)
+             key = key_fixed;
+     }
+-
++    /* coverity[CONSTANT_EXPRESSION_RESULT] */
+     ALL_VOLUME_OPTION_CHECK("all", _gf_true, key, ret, op_errstr, out);
+ 
+     for (i = 0; valid_all_vol_opts[i].option; i++) {
+@@ -14153,6 +14157,7 @@ glusterd_disallow_op_for_tier(glusterd_volinfo_t *volinfo, glusterd_op_t op,
+             break;
+         case GD_OP_REMOVE_BRICK:
+             switch (cmd) {
++                /* coverity[MIXED_ENUMS] */
+                 case GF_DEFRAG_CMD_DETACH_START:
+                 case GF_OP_CMD_DETACH_COMMIT_FORCE:
+                 case GF_OP_CMD_DETACH_COMMIT:
+diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
+index 539e8a5..6852f8e 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
++++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
+@@ -322,7 +322,7 @@ volopt_trie_cbk(char *word, void *param)
+ }
+ 
+ static int
+-process_nodevec(struct trienodevec *nodevec, char **hint)
++process_nodevec(struct trienodevec *nodevec, char **outputhint, char *inputhint)
+ {
+     int ret = 0;
+     char *hint1 = NULL;
+@@ -331,14 +331,14 @@ process_nodevec(struct trienodevec *nodevec, char **hint)
+     trienode_t **nodes = nodevec->nodes;
+ 
+     if (!nodes[0]) {
+-        *hint = NULL;
++        *outputhint = NULL;
+         return 0;
+     }
+ 
+ #if 0
+         /* Limit as in git */
+         if (trienode_get_dist (nodes[0]) >= 6) {
+-                *hint = NULL;
++                *outputhint = NULL;
+                 return 0;
+         }
+ #endif
+@@ -347,23 +347,30 @@ process_nodevec(struct trienodevec *nodevec, char **hint)
+         return -1;
+ 
+     if (nodevec->cnt < 2 || !nodes[1]) {
+-        *hint = hint1;
++        *outputhint = hint1;
+         return 0;
+     }
+ 
+-    if (trienode_get_word(nodes[1], &hint2))
++    if (trienode_get_word(nodes[1], &hint2)) {
++        GF_FREE(hint1);
+         return -1;
++    }
+ 
+-    if (*hint)
+-        hintinfx = *hint;
+-    ret = gf_asprintf(hint, "%s or %s%s", hint1, hintinfx, hint2);
++    if (inputhint)
++        hintinfx = inputhint;
++    ret = gf_asprintf(outputhint, "%s or %s%s", hint1, hintinfx, hint2);
+     if (ret > 0)
+         ret = 0;
++    if (hint1)
++        GF_FREE(hint1);
++    if (hint2)
++        GF_FREE(hint2);
+     return ret;
+ }
+ 
+ static int
+-volopt_trie_section(int lvl, char **patt, char *word, char **hint, int hints)
++volopt_trie_section(int lvl, char **patt, char *word, char **outputhint,
++                    char *inputhint, int hints)
+ {
+     trienode_t *nodes[] = {NULL, NULL};
+     struct trienodevec nodevec = {nodes, 2};
+@@ -384,7 +391,7 @@ volopt_trie_section(int lvl, char **patt, char *word, char **hint, int hints)
+     nodevec.cnt = hints;
+     ret = trie_measure_vec(trie, word, &nodevec);
+     if (!ret && nodevec.nodes[0])
+-        ret = process_nodevec(&nodevec, hint);
++        ret = process_nodevec(&nodevec, outputhint, inputhint);
+ 
+     trie_destroy(trie);
+ 
+@@ -396,6 +403,7 @@ volopt_trie(char *key, char **hint)
+ {
+     char *patt[] = {NULL};
+     char *fullhint = NULL;
++    char *inputhint = NULL;
+     char *dot = NULL;
+     char *dom = NULL;
+     int len = 0;
+@@ -405,7 +413,7 @@ volopt_trie(char *key, char **hint)
+ 
+     dot = strchr(key, '.');
+     if (!dot)
+-        return volopt_trie_section(1, patt, key, hint, 2);
++        return volopt_trie_section(1, patt, key, hint, inputhint, 2);
+ 
+     len = dot - key;
+     dom = gf_strdup(key);
+@@ -413,7 +421,7 @@ volopt_trie(char *key, char **hint)
+         return -1;
+     dom[len] = '\0';
+ 
+-    ret = volopt_trie_section(0, NULL, dom, patt, 1);
++    ret = volopt_trie_section(0, NULL, dom, patt, inputhint, 1);
+     GF_FREE(dom);
+     if (ret) {
+         patt[0] = NULL;
+@@ -422,8 +430,8 @@ volopt_trie(char *key, char **hint)
+     if (!patt[0])
+         goto out;
+ 
+-    *hint = "...";
+-    ret = volopt_trie_section(1, patt, dot + 1, hint, 2);
++    inputhint = "...";
++    ret = volopt_trie_section(1, patt, dot + 1, hint, inputhint, 2);
+     if (ret)
+         goto out;
+     if (*hint) {
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0433-socket-Resolve-ssl_ctx-leak-for-a-brick-while-only-m.patch b/SOURCES/0433-socket-Resolve-ssl_ctx-leak-for-a-brick-while-only-m.patch
new file mode 100644
index 0000000..d980eb5
--- /dev/null
+++ b/SOURCES/0433-socket-Resolve-ssl_ctx-leak-for-a-brick-while-only-m.patch
@@ -0,0 +1,54 @@
+From 0e3871a57b7a621444dc5cfd49935a1e412f6436 Mon Sep 17 00:00:00 2001
+From: Mohit Agrawal <moagrawa@redhat.com>
+Date: Mon, 8 Jun 2020 13:27:50 +0530
+Subject: [PATCH 433/449] socket: Resolve ssl_ctx leak for a brick while only
+ mgmt SSL is enabled
+
+Problem: While only mgmt SSL is enabled for a brick process use_ssl flag
+         is false for a brick process and socket api's cleanup ssl_ctx only
+         while use_ssl and ssl_ctx both are valid
+
+Solution: To avoid a leak check only ssl_ctx, if it is valid cleanup
+          ssl_ctx
+
+> Fixes: #1196
+> Change-Id: I2f4295478f4149dcb7d608ea78ee5104f28812c3
+> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+> (Cherry pick from commit 9873baee34afdf0c20f5fc98a7dbf2a9f07447e2)
+> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/24366/)
+
+BUG: 1810924
+Change-Id: I2f4295478f4149dcb7d608ea78ee5104f28812c3
+Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202625
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ rpc/rpc-transport/socket/src/socket.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/rpc/rpc-transport/socket/src/socket.c b/rpc/rpc-transport/socket/src/socket.c
+index 226b2e2..54cd5df 100644
+--- a/rpc/rpc-transport/socket/src/socket.c
++++ b/rpc/rpc-transport/socket/src/socket.c
+@@ -1163,7 +1163,7 @@ __socket_reset(rpc_transport_t *this)
+         SSL_free(priv->ssl_ssl);
+         priv->ssl_ssl = NULL;
+     }
+-    if (priv->use_ssl && priv->ssl_ctx) {
++    if (priv->ssl_ctx) {
+         SSL_CTX_free(priv->ssl_ctx);
+         priv->ssl_ctx = NULL;
+     }
+@@ -4685,7 +4685,7 @@ fini(rpc_transport_t *this)
+             SSL_free(priv->ssl_ssl);
+             priv->ssl_ssl = NULL;
+         }
+-        if (priv->use_ssl && priv->ssl_ctx) {
++        if (priv->ssl_ctx) {
+             SSL_CTX_free(priv->ssl_ctx);
+             priv->ssl_ctx = NULL;
+         }
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0434-glusterd-ganesha-fix-Coverity-CID-1405785.patch b/SOURCES/0434-glusterd-ganesha-fix-Coverity-CID-1405785.patch
new file mode 100644
index 0000000..3f038a3
--- /dev/null
+++ b/SOURCES/0434-glusterd-ganesha-fix-Coverity-CID-1405785.patch
@@ -0,0 +1,39 @@
+From 2ea3fc203671429d0aa9994e5bbd57f6a604523d Mon Sep 17 00:00:00 2001
+From: Xie Changlong <xiechanglong@cmss.chinamobile.com>
+Date: Mon, 28 Oct 2019 17:43:28 +0800
+Subject: [PATCH 434/449] glusterd/ganesha: fix Coverity CID 1405785
+
+To avoid double free
+
+> upstream patch link: https://review.gluster.org/#/c/glusterfs/+/23630/
+> updates: bz#789278
+> Change-Id: I15ae54ed696295d4cb015668722e77983b062ccb
+> Signed-off-by: Xie Changlong <xiechanglong@cmss.chinamobile.com>
+
+BUG: 1787310
+Change-Id: I15ae54ed696295d4cb015668722e77983b062ccb
+Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202623
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mgmt/glusterd/src/glusterd-ganesha.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
+index 06f028f..caba34f 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c
++++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
+@@ -659,8 +659,8 @@ tear_down_cluster(gf_boolean_t run_teardown)
+                          "Failed to close dir %s. Reason :"
+                          " %s",
+                          CONFDIR, strerror(errno));
+-            goto exit;
+         }
++        goto exit;
+     }
+ 
+ out:
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0435-glusterd-coverity-fix.patch b/SOURCES/0435-glusterd-coverity-fix.patch
new file mode 100644
index 0000000..f587107
--- /dev/null
+++ b/SOURCES/0435-glusterd-coverity-fix.patch
@@ -0,0 +1,38 @@
+From 9425fd5a49a17a8f91c13632ae055a6510b0b44c Mon Sep 17 00:00:00 2001
+From: Sanju Rakonde <srakonde@redhat.com>
+Date: Fri, 17 May 2019 14:27:58 +0530
+Subject: [PATCH 435/449] glusterd: coverity fix
+
+CID: 1401345 - Unused value
+
+> upstream patch link: https://review.gluster.org/#/c/glusterfs/+/22737/
+> updates: bz#789278
+> Change-Id: I6b8f2611151ce0174042384b7632019c312ebae3
+> Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+
+BUG: 1787310
+Change-Id: I6b8f2611151ce0174042384b7632019c312ebae3
+Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202622
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mgmt/glusterd/src/glusterd-utils.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index 8b0fc9a..2eb2a76 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -3740,7 +3740,7 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
+          * deleting a volume, so we shouldn't be trying to create a
+          * fresh volume here which would lead to a stale entry
+          */
+-        if (stage_deleted == 0)
++        if (!ret && stage_deleted == 0)
+             *status = GLUSTERD_VOL_COMP_UPDATE_REQ;
+         ret = 0;
+         goto out;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0436-glusterd-coverity-fixes.patch b/SOURCES/0436-glusterd-coverity-fixes.patch
new file mode 100644
index 0000000..799681f
--- /dev/null
+++ b/SOURCES/0436-glusterd-coverity-fixes.patch
@@ -0,0 +1,187 @@
+From 179213798496448316547506da65dbd9fd741dfa Mon Sep 17 00:00:00 2001
+From: Atin Mukherjee <amukherj@redhat.com>
+Date: Wed, 24 Apr 2019 22:02:51 +0530
+Subject: [PATCH 436/449] glusterd: coverity fixes
+
+Addresses the following:
+
+* CID 1124776:  Resource leaks  (RESOURCE_LEAK) - Variable "aa" going out
+of scope leaks the storage it points to in glusterd-volgen.c
+
+* Bunch of CHECKED_RETURN defects in the callers of synctask_barrier_init
+
+* CID 1400742:  Program hangs  (LOCK) - adding annotation to fix this
+false positive
+
+> upstream patch link: https://review.gluster.org/#/c/glusterfs/+/22615
+> Updates: bz#789278
+> Change-Id: I02f16e7eeb8c5cf72f7d0b29d00df4f03b3718b3
+> Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
+
+BUG: 1787310
+Change-Id: I02f16e7eeb8c5cf72f7d0b29d00df4f03b3718b3
+Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202626
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mgmt/glusterd/src/glusterd-handler.c |  6 ++++++
+ xlators/mgmt/glusterd/src/glusterd-mgmt.c    | 24 +++++++++++++++++++-----
+ xlators/mgmt/glusterd/src/glusterd-syncop.c  | 22 ++++++++++++++++++----
+ xlators/mgmt/glusterd/src/glusterd-volgen.c  |  5 +++--
+ 4 files changed, 46 insertions(+), 11 deletions(-)
+
+diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
+index 1f31e72..b8799ab 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
++++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
+@@ -3458,6 +3458,12 @@ glusterd_friend_remove(uuid_t uuid, char *hostname)
+     ret = glusterd_peerinfo_cleanup(peerinfo);
+ out:
+     gf_msg_debug(THIS->name, 0, "returning %d", ret);
++    /* We don't need to do a mutex unlock of peerinfo->delete_lock as the same
++     * will be anyway destroyed within glusterd_peerinfo_cleanup, coverity
++     * though cries about it
++     */
++    /* coverity[LOCK] */
++
+     return ret;
+ }
+ 
+diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
+index a4915f3..1e185d7 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c
++++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
+@@ -757,7 +757,10 @@ glusterd_mgmt_v3_initiate_lockdown(glusterd_op_t op, dict_t *dict,
+ 
+     /* Sending mgmt_v3 lock req to other nodes in the cluster */
+     gd_syncargs_init(&args, NULL);
+-    synctask_barrier_init((&args));
++    ret = synctask_barrier_init((&args));
++    if (ret)
++        goto out;
++
+     peer_cnt = 0;
+ 
+     RCU_READ_LOCK;
+@@ -1108,7 +1111,10 @@ glusterd_mgmt_v3_pre_validate(glusterd_op_t op, dict_t *req_dict,
+ 
+     /* Sending Pre Validation req to other nodes in the cluster */
+     gd_syncargs_init(&args, req_dict);
+-    synctask_barrier_init((&args));
++    ret = synctask_barrier_init((&args));
++    if (ret)
++        goto out;
++
+     peer_cnt = 0;
+ 
+     RCU_READ_LOCK;
+@@ -1458,7 +1464,10 @@ glusterd_mgmt_v3_brick_op(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
+ 
+     /* Sending brick op req to other nodes in the cluster */
+     gd_syncargs_init(&args, op_ctx);
+-    synctask_barrier_init((&args));
++    ret = synctask_barrier_init((&args));
++    if (ret)
++        goto out;
++
+     peer_cnt = 0;
+ 
+     RCU_READ_LOCK;
+@@ -1722,7 +1731,9 @@ glusterd_mgmt_v3_commit(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
+ 
+     /* Sending commit req to other nodes in the cluster */
+     gd_syncargs_init(&args, op_ctx);
+-    synctask_barrier_init((&args));
++    ret = synctask_barrier_init((&args));
++    if (ret)
++        goto out;
+     peer_cnt = 0;
+ 
+     RCU_READ_LOCK;
+@@ -1963,7 +1974,10 @@ glusterd_mgmt_v3_post_validate(glusterd_op_t op, int32_t op_ret, dict_t *dict,
+ 
+     /* Sending Post Validation req to other nodes in the cluster */
+     gd_syncargs_init(&args, req_dict);
+-    synctask_barrier_init((&args));
++    ret = synctask_barrier_init((&args));
++    if (ret)
++        goto out;
++
+     peer_cnt = 0;
+ 
+     RCU_READ_LOCK;
+diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
+index 9e47d14..c78983a 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
++++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
+@@ -1191,7 +1191,12 @@ gd_lock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx,
+     struct syncargs args = {0};
+ 
+     this = THIS;
+-    synctask_barrier_init((&args));
++    GF_VALIDATE_OR_GOTO("glusterd", this, out);
++
++    ret = synctask_barrier_init((&args));
++    if (ret)
++        goto out;
++
+     peer_cnt = 0;
+ 
+     RCU_READ_LOCK;
+@@ -1321,7 +1326,10 @@ stage_done:
+     }
+ 
+     gd_syncargs_init(&args, aggr_dict);
+-    synctask_barrier_init((&args));
++    ret = synctask_barrier_init((&args));
++    if (ret)
++        goto out;
++
+     peer_cnt = 0;
+ 
+     RCU_READ_LOCK;
+@@ -1449,7 +1457,10 @@ commit_done:
+     }
+ 
+     gd_syncargs_init(&args, op_ctx);
+-    synctask_barrier_init((&args));
++    ret = synctask_barrier_init((&args));
++    if (ret)
++        goto out;
++
+     peer_cnt = 0;
+     origin_glusterd = is_origin_glusterd(req_dict);
+ 
+@@ -1541,7 +1552,10 @@ gd_unlock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
+         goto out;
+     }
+ 
+-    synctask_barrier_init((&args));
++    ret = synctask_barrier_init((&args));
++    if (ret)
++        goto out;
++
+     peer_cnt = 0;
+ 
+     if (cluster_lock) {
+diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
+index 6852f8e..16346e7 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
++++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
+@@ -4808,9 +4808,10 @@ nfs_option_handler(volgen_graph_t *graph, struct volopt_map_entry *vme,
+ 
+         if (ret != -1) {
+             ret = gf_canonicalize_path(vme->value);
+-            if (ret)
++            if (ret) {
++                GF_FREE(aa);
+                 return -1;
+-
++            }
+             ret = xlator_set_option(xl, aa, ret, vme->value);
+             GF_FREE(aa);
+         }
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0437-glusterd-prevent-use-after-free-in-glusterd_op_ac_se.patch b/SOURCES/0437-glusterd-prevent-use-after-free-in-glusterd_op_ac_se.patch
new file mode 100644
index 0000000..21fcd8a
--- /dev/null
+++ b/SOURCES/0437-glusterd-prevent-use-after-free-in-glusterd_op_ac_se.patch
@@ -0,0 +1,48 @@
+From ffd428d07036531b7ed98c7393b87490aaa223ec Mon Sep 17 00:00:00 2001
+From: Niels de Vos <ndevos@redhat.com>
+Date: Fri, 3 May 2019 09:18:31 +0200
+Subject: [PATCH 437/449] glusterd: prevent use-after-free in
+ glusterd_op_ac_send_brick_op()
+
+Coverity reported that GF_FREE(req_ctx) could be called 2x on req_ctx.
+
+> upstream patch link: https://review.gluster.org/#/c/glusterfs/+/22656/
+> Change-Id: I9120686e5920de8c27688e10de0db6aa26292064
+> CID: 1401115
+> Updates: bz#789278
+> Signed-off-by: Niels de Vos <ndevos@redhat.com>
+
+BUG: 1787310
+Change-Id: I9120686e5920de8c27688e10de0db6aa26292064
+Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202619
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mgmt/glusterd/src/glusterd-op-sm.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+index 46fc607..1e84f5f 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
++++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+@@ -7575,7 +7575,6 @@ glusterd_op_ac_send_brick_op(glusterd_op_sm_event_t *event, void *ctx)
+             if (op_errstr == NULL)
+                 gf_asprintf(&op_errstr, OPERRSTR_BUILD_PAYLOAD);
+             opinfo.op_errstr = op_errstr;
+-            GF_FREE(req_ctx);
+             goto out;
+         }
+     }
+@@ -7594,7 +7593,7 @@ glusterd_op_ac_send_brick_op(glusterd_op_sm_event_t *event, void *ctx)
+     }
+ 
+ out:
+-    if (ret && req_ctx && free_req_ctx)
++    if (ret && free_req_ctx)
+         GF_FREE(req_ctx);
+     gf_msg_debug(this->name, 0, "Returning with %d", ret);
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0438-dht-sparse-files-rebalance-enhancements.patch b/SOURCES/0438-dht-sparse-files-rebalance-enhancements.patch
new file mode 100644
index 0000000..6e10ce6
--- /dev/null
+++ b/SOURCES/0438-dht-sparse-files-rebalance-enhancements.patch
@@ -0,0 +1,324 @@
+From 7b2f1bd4e5a57ea3abd5f14a7d81b120735faecd Mon Sep 17 00:00:00 2001
+From: Barak Sason Rofman <bsasonro@redhat.com>
+Date: Wed, 6 May 2020 13:28:40 +0300
+Subject: [PATCH 438/449] dht - sparse files rebalance enhancements
+
+Currently data migration in rebalance reads sparse file sequentially,
+disregarding which segments are holes and which are data. This can lead
+to extremely long migration time for large sparse file.
+Data migration mechanism needs to be enhanced so only data segments are
+read and migrated. This can be achieved using lseek to seek for holes
+and data in the file.
+This enhancement is a consequence of
+https://bugzilla.redhat.com/show_bug.cgi?id=1823703
+
+> fixes: #1222
+> Change-Id: If5f448a0c532926464e1f34f504c5c94749b08c3
+> Signed-off-by: Barak Sason Rofman <bsasonro@redhat.com>
+> (Cherry pick from commit 7b7559733ca0c25c63f9d56cb7f4650dbd694c40)
+> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/24409/)
+
+BUG: 1836099
+Change-Id: If5f448a0c532926464e1f34f504c5c94749b08c3
+Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202647
+Reviewed-by: Barak Sason Rofman <bsasonro@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/basic/distribute/spare_file_rebalance.t |  51 ++++++++
+ xlators/cluster/dht/src/dht-rebalance.c       | 172 ++++++++++++--------------
+ 2 files changed, 130 insertions(+), 93 deletions(-)
+ create mode 100644 tests/basic/distribute/spare_file_rebalance.t
+
+diff --git a/tests/basic/distribute/spare_file_rebalance.t b/tests/basic/distribute/spare_file_rebalance.t
+new file mode 100644
+index 0000000..061c02f
+--- /dev/null
++++ b/tests/basic/distribute/spare_file_rebalance.t
+@@ -0,0 +1,51 @@
++#!/bin/bash
++
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++. $(dirname $0)/../../dht.rc
++
++# Initialize
++#------------------------------------------------------------
++cleanup;
++
++# Start glusterd
++TEST glusterd;
++TEST pidof glusterd;
++TEST $CLI volume info;
++
++# Create a volume
++TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
++
++# Verify volume creation
++EXPECT "$V0" volinfo_field $V0 'Volume Name';
++EXPECT 'Created' volinfo_field $V0 'Status';
++
++# Start volume and verify successful start
++TEST $CLI volume start $V0;
++EXPECT 'Started' volinfo_field $V0 'Status';
++TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
++
++#------------------------------------------------------------
++
++# Test case - Create sparse files on MP and verify
++# file info after rebalance
++#------------------------------------------------------------
++
++# Create some sparse files and get their size
++TEST cd $M0;
++dd if=/dev/urandom of=sparse_file bs=10k count=1 seek=2M
++cp --sparse=always sparse_file sparse_file_3;
++
++# Add a 3rd brick
++TEST $CLI volume add-brick $V0 $H0:$B0/${V0}3;
++
++# Trigger rebalance
++TEST $CLI volume rebalance $V0 start force;
++EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed;
++
++# Compare original and rebalanced files
++TEST cd $B0/${V0}2
++TEST cmp sparse_file $B0/${V0}3/sparse_file_3
++EXPECT_WITHIN 30 "";
++
++cleanup;
+diff --git a/xlators/cluster/dht/src/dht-rebalance.c b/xlators/cluster/dht/src/dht-rebalance.c
+index 88b6b54..d0c21b4 100644
+--- a/xlators/cluster/dht/src/dht-rebalance.c
++++ b/xlators/cluster/dht/src/dht-rebalance.c
+@@ -18,8 +18,8 @@
+ #include <glusterfs/events.h>
+ 
+ #define GF_DISK_SECTOR_SIZE 512
+-#define DHT_REBALANCE_PID 4242              /* Change it if required */
+-#define DHT_REBALANCE_BLKSIZE (1024 * 1024) /* 1 MB */
++#define DHT_REBALANCE_PID 4242        /* Change it if required */
++#define DHT_REBALANCE_BLKSIZE 1048576 /* 1 MB */
+ #define MAX_MIGRATE_QUEUE_COUNT 500
+ #define MIN_MIGRATE_QUEUE_COUNT 200
+ #define MAX_REBAL_TYPE_SIZE 16
+@@ -178,75 +178,6 @@ dht_strip_out_acls(dict_t *dict)
+     }
+ }
+ 
+-static int
+-dht_write_with_holes(xlator_t *to, fd_t *fd, struct iovec *vec, int count,
+-                     int32_t size, off_t offset, struct iobref *iobref,
+-                     int *fop_errno)
+-{
+-    int i = 0;
+-    int ret = -1;
+-    int start_idx = 0;
+-    int tmp_offset = 0;
+-    int write_needed = 0;
+-    int buf_len = 0;
+-    int size_pending = 0;
+-    char *buf = NULL;
+-
+-    /* loop through each vector */
+-    for (i = 0; i < count; i++) {
+-        buf = vec[i].iov_base;
+-        buf_len = vec[i].iov_len;
+-
+-        for (start_idx = 0; (start_idx + GF_DISK_SECTOR_SIZE) <= buf_len;
+-             start_idx += GF_DISK_SECTOR_SIZE) {
+-            if (mem_0filled(buf + start_idx, GF_DISK_SECTOR_SIZE) != 0) {
+-                write_needed = 1;
+-                continue;
+-            }
+-
+-            if (write_needed) {
+-                ret = syncop_write(
+-                    to, fd, (buf + tmp_offset), (start_idx - tmp_offset),
+-                    (offset + tmp_offset), iobref, 0, NULL, NULL);
+-                /* 'path' will be logged in calling function */
+-                if (ret < 0) {
+-                    gf_log(THIS->name, GF_LOG_WARNING, "failed to write (%s)",
+-                           strerror(-ret));
+-                    *fop_errno = -ret;
+-                    ret = -1;
+-                    goto out;
+-                }
+-
+-                write_needed = 0;
+-            }
+-            tmp_offset = start_idx + GF_DISK_SECTOR_SIZE;
+-        }
+-
+-        if ((start_idx < buf_len) || write_needed) {
+-            /* This means, last chunk is not yet written.. write it */
+-            ret = syncop_write(to, fd, (buf + tmp_offset),
+-                               (buf_len - tmp_offset), (offset + tmp_offset),
+-                               iobref, 0, NULL, NULL);
+-            if (ret < 0) {
+-                /* 'path' will be logged in calling function */
+-                gf_log(THIS->name, GF_LOG_WARNING, "failed to write (%s)",
+-                       strerror(-ret));
+-                *fop_errno = -ret;
+-                ret = -1;
+-                goto out;
+-            }
+-        }
+-
+-        size_pending = (size - buf_len);
+-        if (!size_pending)
+-            break;
+-    }
+-
+-    ret = size;
+-out:
+-    return ret;
+-}
+-
+ /*
+    return values:
+    -1 : failure
+@@ -1101,32 +1032,97 @@ __dht_rebalance_migrate_data(xlator_t *this, gf_defrag_info_t *defrag,
+     int ret = 0;
+     int count = 0;
+     off_t offset = 0;
++    off_t data_offset = 0;
++    off_t hole_offset = 0;
+     struct iovec *vector = NULL;
+     struct iobref *iobref = NULL;
+     uint64_t total = 0;
+     size_t read_size = 0;
++    size_t data_block_size = 0;
+     dict_t *xdata = NULL;
+     dht_conf_t *conf = NULL;
+ 
+     conf = this->private;
++
+     /* if file size is '0', no need to enter this loop */
+     while (total < ia_size) {
+-        read_size = (((ia_size - total) > DHT_REBALANCE_BLKSIZE)
+-                         ? DHT_REBALANCE_BLKSIZE
+-                         : (ia_size - total));
++        /* This is a regular file - read it sequentially */
++        if (!hole_exists) {
++            read_size = (((ia_size - total) > DHT_REBALANCE_BLKSIZE)
++                             ? DHT_REBALANCE_BLKSIZE
++                             : (ia_size - total));
++        } else {
++            /* This is a sparse file - read only the data segments in the file
++             */
++
++            /* If the previous data block is fully copied, find the next data
++             * segment
++             * starting at the offset of the last read and written byte,  */
++            if (data_block_size <= 0) {
++                ret = syncop_seek(from, src, offset, GF_SEEK_DATA, NULL,
++                                  &data_offset);
++                if (ret) {
++                    if (ret == -ENXIO)
++                        ret = 0; /* No more data segments */
++                    else
++                        *fop_errno = -ret; /* Error occurred */
++
++                    break;
++                }
++
++                /* If the position of the current data segment is greater than
++                 * the position of the next hole, find the next hole in order to
++                 * calculate the length of the new data segment */
++                if (data_offset > hole_offset) {
++                    /* Starting at the offset of the last data segment, find the
++                     * next hole */
++                    ret = syncop_seek(from, src, data_offset, GF_SEEK_HOLE,
++                                      NULL, &hole_offset);
++                    if (ret) {
++                        /* If an error occurred here it's a real error because
++                         * if the seek for a data segment was successful then
++                         * necessarily another hole must exist (EOF is a hole)
++                         */
++                        *fop_errno = -ret;
++                        break;
++                    }
++
++                    /* Calculate the total size of the current data block */
++                    data_block_size = hole_offset - data_offset;
++                }
++            } else {
++                /* There is still data in the current segment, move the
++                 * data_offset to the position of the last written byte */
++                data_offset = offset;
++            }
++
++            /* Calculate how much data needs to be read and written. If the data
++             * segment's length is bigger than DHT_REBALANCE_BLKSIZE, read and
++             * write DHT_REBALANCE_BLKSIZE data length and the rest in the
++             * next iteration(s) */
++            read_size = ((data_block_size > DHT_REBALANCE_BLKSIZE)
++                             ? DHT_REBALANCE_BLKSIZE
++                             : data_block_size);
++
++            /* Calculate the remaining size of the data block - maybe there's no
++             * need to seek for data in the next iteration */
++            data_block_size -= read_size;
++
++            /* Set offset to the offset of the data segment so read and write
++             * will have the correct position */
++            offset = data_offset;
++        }
+ 
+         ret = syncop_readv(from, src, read_size, offset, 0, &vector, &count,
+                            &iobref, NULL, NULL, NULL);
++
+         if (!ret || (ret < 0)) {
+             *fop_errno = -ret;
+             break;
+         }
+ 
+-        if (hole_exists) {
+-            ret = dht_write_with_holes(to, dst, vector, count, ret, offset,
+-                                       iobref, fop_errno);
+-        } else {
+-            if (!conf->force_migration && !dht_is_tier_xlator(this)) {
++        if (!conf->force_migration && !dht_is_tier_xlator(this)) {
++            if (!xdata) {
+                 xdata = dict_new();
+                 if (!xdata) {
+                     gf_msg("dht", GF_LOG_ERROR, 0, DHT_MSG_MIGRATE_FILE_FAILED,
+@@ -1146,7 +1142,7 @@ __dht_rebalance_migrate_data(xlator_t *this, gf_defrag_info_t *defrag,
+                  * https://github.com/gluster/glusterfs/issues/308
+                  * for more details.
+                  */
+-                ret = dict_set_int32(xdata, GF_AVOID_OVERWRITE, 1);
++                ret = dict_set_int32_sizen(xdata, GF_AVOID_OVERWRITE, 1);
+                 if (ret) {
+                     gf_msg("dht", GF_LOG_ERROR, 0, ENOMEM,
+                            "failed to set dict");
+@@ -1155,22 +1151,12 @@ __dht_rebalance_migrate_data(xlator_t *this, gf_defrag_info_t *defrag,
+                     break;
+                 }
+             }
+-
+-            ret = syncop_writev(to, dst, vector, count, offset, iobref, 0, NULL,
+-                                NULL, xdata, NULL);
+-            if (ret < 0) {
+-                *fop_errno = -ret;
+-            }
+-        }
+-
+-        if ((defrag && defrag->cmd == GF_DEFRAG_CMD_START_TIER) &&
+-            (gf_defrag_get_pause_state(&defrag->tier_conf) != TIER_RUNNING)) {
+-            gf_msg("tier", GF_LOG_INFO, 0, DHT_MSG_TIER_PAUSED,
+-                   "Migrate file paused");
+-            ret = -1;
+         }
+ 
++        ret = syncop_writev(to, dst, vector, count, offset, iobref, 0, NULL,
++                            NULL, xdata, NULL);
+         if (ret < 0) {
++            *fop_errno = -ret;
+             break;
+         }
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0439-cluster-afr-Delay-post-op-for-fsync.patch b/SOURCES/0439-cluster-afr-Delay-post-op-for-fsync.patch
new file mode 100644
index 0000000..dc1593b
--- /dev/null
+++ b/SOURCES/0439-cluster-afr-Delay-post-op-for-fsync.patch
@@ -0,0 +1,438 @@
+From 3ed98fc9dcb39223032e343fd5b0ad17fa3cae14 Mon Sep 17 00:00:00 2001
+From: Pranith Kumar K <pkarampu@redhat.com>
+Date: Fri, 29 May 2020 14:24:53 +0530
+Subject: [PATCH 439/449] cluster/afr: Delay post-op for fsync
+
+Problem:
+AFR doesn't delay post-op for fsync fop. For fsync heavy workloads
+this leads to un-necessary fxattrop/finodelk for every fsync leading
+to bad performance.
+
+Fix:
+Have delayed post-op for fsync. Add special flag in xdata to indicate
+that afr shouldn't delay post-op in cases where either the
+process will terminate or graph-switch would happen. Otherwise it leads
+to un-necessary heals when the graph-switch/process-termination
+happens before delayed-post-op completes.
+
+> Upstream-patch: https://review.gluster.org/c/glusterfs/+/24473
+> Fixes: #1253
+
+BUG: 1838479
+Change-Id: I531940d13269a111c49e0510d49514dc169f4577
+Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202676
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ api/src/glfs-resolve.c                         |  14 ++-
+ tests/basic/afr/durability-off.t               |   2 +
+ tests/basic/gfapi/gfapi-graph-switch-open-fd.t |  44 +++++++++
+ tests/basic/gfapi/gfapi-keep-writing.c         | 129 +++++++++++++++++++++++++
+ xlators/cluster/afr/src/afr-inode-write.c      |  11 ++-
+ xlators/cluster/afr/src/afr-transaction.c      |   9 +-
+ xlators/cluster/afr/src/afr.h                  |   2 +-
+ xlators/cluster/dht/src/dht-rebalance.c        |  15 ++-
+ xlators/mount/fuse/src/fuse-bridge.c           |  23 ++++-
+ 9 files changed, 239 insertions(+), 10 deletions(-)
+ create mode 100644 tests/basic/gfapi/gfapi-graph-switch-open-fd.t
+ create mode 100644 tests/basic/gfapi/gfapi-keep-writing.c
+
+diff --git a/api/src/glfs-resolve.c b/api/src/glfs-resolve.c
+index a79f490..062b7dc 100644
+--- a/api/src/glfs-resolve.c
++++ b/api/src/glfs-resolve.c
+@@ -722,6 +722,7 @@ glfs_migrate_fd_safe(struct glfs *fs, xlator_t *newsubvol, fd_t *oldfd)
+         0,
+     };
+     char uuid1[64];
++    dict_t *xdata = NULL;
+ 
+     oldinode = oldfd->inode;
+     oldsubvol = oldinode->table->xl;
+@@ -730,7 +731,15 @@ glfs_migrate_fd_safe(struct glfs *fs, xlator_t *newsubvol, fd_t *oldfd)
+         return fd_ref(oldfd);
+ 
+     if (!oldsubvol->switched) {
+-        ret = syncop_fsync(oldsubvol, oldfd, 0, NULL, NULL, NULL, NULL);
++        xdata = dict_new();
++        if (!xdata || dict_set_int8(xdata, "last-fsync", 1)) {
++            gf_msg(fs->volname, GF_LOG_WARNING, ENOMEM, API_MSG_FSYNC_FAILED,
++                   "last-fsync set failed on %s graph %s (%d)",
++                   uuid_utoa_r(oldfd->inode->gfid, uuid1),
++                   graphid_str(oldsubvol), oldsubvol->graph->id);
++        }
++
++        ret = syncop_fsync(oldsubvol, oldfd, 0, NULL, NULL, xdata, NULL);
+         DECODE_SYNCOP_ERR(ret);
+         if (ret) {
+             gf_msg(fs->volname, GF_LOG_WARNING, errno, API_MSG_FSYNC_FAILED,
+@@ -809,6 +818,9 @@ out:
+         newfd = NULL;
+     }
+ 
++    if (xdata)
++        dict_unref(xdata);
++
+     return newfd;
+ }
+ 
+diff --git a/tests/basic/afr/durability-off.t b/tests/basic/afr/durability-off.t
+index 155ffa0..6e0f18b 100644
+--- a/tests/basic/afr/durability-off.t
++++ b/tests/basic/afr/durability-off.t
+@@ -26,6 +26,8 @@ TEST $CLI volume heal $V0
+ EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+ EXPECT "^0$" echo $($CLI volume profile $V0 info | grep -w FSYNC | wc -l)
+ 
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+ #Test that fsyncs happen when durability is on
+ TEST $CLI volume set $V0 cluster.ensure-durability on
+ TEST $CLI volume set $V0 performance.strict-write-ordering on
+diff --git a/tests/basic/gfapi/gfapi-graph-switch-open-fd.t b/tests/basic/gfapi/gfapi-graph-switch-open-fd.t
+new file mode 100644
+index 0000000..2e666be
+--- /dev/null
++++ b/tests/basic/gfapi/gfapi-graph-switch-open-fd.t
+@@ -0,0 +1,44 @@
++#!/bin/bash
++
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++
++cleanup;
++
++TEST glusterd
++
++TEST $CLI volume create $V0 replica 3 ${H0}:$B0/brick{0..2};
++EXPECT 'Created' volinfo_field $V0 'Status';
++
++TEST $CLI volume start $V0;
++EXPECT 'Started' volinfo_field $V0 'Status';
++
++TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
++TEST touch $M0/sync
++logdir=`gluster --print-logdir`
++
++TEST build_tester $(dirname $0)/gfapi-keep-writing.c -lgfapi
++
++
++#Launch a program to keep doing writes on an fd
++./$(dirname $0)/gfapi-keep-writing ${H0} $V0 $logdir/gfapi-async-calls-test.log sync &
++p=$!
++sleep 1 #Let some writes go through
++#Check if graph switch will lead to any pending markers for ever
++TEST $CLI volume set $V0 performance.quick-read off
++TEST $CLI volume set $V0 performance.io-cache off
++TEST $CLI volume set $V0 performance.stat-prefetch off
++TEST $CLI volume set $V0 performance.read-ahead off
++
++
++TEST rm -f $M0/sync #Make sure the glfd is closed
++TEST wait #Wait for background process to die
++#Goal is to check if there is permanent FOOL changelog
++sleep 5
++EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/brick0/glfs_test.txt trusted.afr.dirty
++EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/brick1/glfs_test.txt trusted.afr.dirty
++EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/brick2/glfs_test.txt trusted.afr.dirty
++
++cleanup_tester $(dirname $0)/gfapi-async-calls-test
++
++cleanup;
+diff --git a/tests/basic/gfapi/gfapi-keep-writing.c b/tests/basic/gfapi/gfapi-keep-writing.c
+new file mode 100644
+index 0000000..91b59ce
+--- /dev/null
++++ b/tests/basic/gfapi/gfapi-keep-writing.c
+@@ -0,0 +1,129 @@
++#include <fcntl.h>
++#include <unistd.h>
++#include <time.h>
++#include <limits.h>
++#include <string.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <errno.h>
++#include <glusterfs/api/glfs.h>
++#include <glusterfs/api/glfs-handles.h>
++
++#define LOG_ERR(msg)                                                           \
++    do {                                                                       \
++        fprintf(stderr, "%s : Error (%s)\n", msg, strerror(errno));            \
++    } while (0)
++
++glfs_t *
++init_glfs(const char *hostname, const char *volname, const char *logfile)
++{
++    int ret = -1;
++    glfs_t *fs = NULL;
++
++    fs = glfs_new(volname);
++    if (!fs) {
++        LOG_ERR("glfs_new failed");
++        return NULL;
++    }
++
++    ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
++    if (ret < 0) {
++        LOG_ERR("glfs_set_volfile_server failed");
++        goto out;
++    }
++
++    ret = glfs_set_logging(fs, logfile, 7);
++    if (ret < 0) {
++        LOG_ERR("glfs_set_logging failed");
++        goto out;
++    }
++
++    ret = glfs_init(fs);
++    if (ret < 0) {
++        LOG_ERR("glfs_init failed");
++        goto out;
++    }
++
++    ret = 0;
++out:
++    if (ret) {
++        glfs_fini(fs);
++        fs = NULL;
++    }
++
++    return fs;
++}
++
++int
++glfs_test_function(const char *hostname, const char *volname,
++                   const char *logfile, const char *syncfile)
++{
++    int ret = -1;
++    int flags = O_CREAT | O_RDWR;
++    glfs_t *fs = NULL;
++    glfs_fd_t *glfd = NULL;
++    const char *buff = "This is from my prog\n";
++    const char *filename = "glfs_test.txt";
++    struct stat buf = {0};
++
++    fs = init_glfs(hostname, volname, logfile);
++    if (fs == NULL) {
++        LOG_ERR("init_glfs failed");
++        return -1;
++    }
++
++    glfd = glfs_creat(fs, filename, flags, 0644);
++    if (glfd == NULL) {
++        LOG_ERR("glfs_creat failed");
++        goto out;
++    }
++
++    while (glfs_stat(fs, syncfile, &buf) == 0) {
++        ret = glfs_write(glfd, buff, strlen(buff), flags);
++        if (ret < 0) {
++            LOG_ERR("glfs_write failed");
++            goto out;
++        }
++    }
++
++    ret = glfs_close(glfd);
++    if (ret < 0) {
++        LOG_ERR("glfs_write failed");
++        goto out;
++    }
++
++out:
++    ret = glfs_fini(fs);
++    if (ret) {
++        LOG_ERR("glfs_fini failed");
++    }
++
++    return ret;
++}
++
++int
++main(int argc, char *argv[])
++{
++    int ret = 0;
++    char *hostname = NULL;
++    char *volname = NULL;
++    char *logfile = NULL;
++    char *syncfile = NULL;
++
++    if (argc != 5) {
++        fprintf(stderr, "Invalid argument\n");
++        exit(1);
++    }
++
++    hostname = argv[1];
++    volname = argv[2];
++    logfile = argv[3];
++    syncfile = argv[4];
++
++    ret = glfs_test_function(hostname, volname, logfile, syncfile);
++    if (ret) {
++        LOG_ERR("glfs_test_function failed");
++    }
++
++    return ret;
++}
+diff --git a/xlators/cluster/afr/src/afr-inode-write.c b/xlators/cluster/afr/src/afr-inode-write.c
+index 7fcc9d4..df82b6e 100644
+--- a/xlators/cluster/afr/src/afr-inode-write.c
++++ b/xlators/cluster/afr/src/afr-inode-write.c
+@@ -2492,6 +2492,7 @@ afr_fsync(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t datasync,
+     call_frame_t *transaction_frame = NULL;
+     int ret = -1;
+     int32_t op_errno = ENOMEM;
++    int8_t last_fsync = 0;
+ 
+     transaction_frame = copy_frame(frame);
+     if (!transaction_frame)
+@@ -2501,10 +2502,16 @@ afr_fsync(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t datasync,
+     if (!local)
+         goto out;
+ 
+-    if (xdata)
++    if (xdata) {
+         local->xdata_req = dict_copy_with_ref(xdata, NULL);
+-    else
++        if (dict_get_int8(xdata, "last-fsync", &last_fsync) == 0) {
++            if (last_fsync) {
++                local->transaction.disable_delayed_post_op = _gf_true;
++            }
++        }
++    } else {
+         local->xdata_req = dict_new();
++    }
+ 
+     if (!local->xdata_req)
+         goto out;
+diff --git a/xlators/cluster/afr/src/afr-transaction.c b/xlators/cluster/afr/src/afr-transaction.c
+index 8e65ae2..ffd0ab8 100644
+--- a/xlators/cluster/afr/src/afr-transaction.c
++++ b/xlators/cluster/afr/src/afr-transaction.c
+@@ -2385,8 +2385,13 @@ afr_is_delayed_changelog_post_op_needed(call_frame_t *frame, xlator_t *this,
+         goto out;
+     }
+ 
+-    if ((local->op != GF_FOP_WRITE) && (local->op != GF_FOP_FXATTROP)) {
+-        /*Only allow writes but shard does [f]xattrops on writes, so
++    if (local->transaction.disable_delayed_post_op) {
++        goto out;
++    }
++
++    if ((local->op != GF_FOP_WRITE) && (local->op != GF_FOP_FXATTROP) &&
++        (local->op != GF_FOP_FSYNC)) {
++        /*Only allow writes/fsyncs but shard does [f]xattrops on writes, so
+          * they are fine too*/
+         goto out;
+     }
+diff --git a/xlators/cluster/afr/src/afr.h b/xlators/cluster/afr/src/afr.h
+index 18f1a6a..ff96246 100644
+--- a/xlators/cluster/afr/src/afr.h
++++ b/xlators/cluster/afr/src/afr.h
+@@ -854,7 +854,7 @@ typedef struct _afr_local {
+ 
+         int (*unwind)(call_frame_t *frame, xlator_t *this);
+ 
+-        /* post-op hook */
++        gf_boolean_t disable_delayed_post_op;
+     } transaction;
+ 
+     syncbarrier_t barrier;
+diff --git a/xlators/cluster/dht/src/dht-rebalance.c b/xlators/cluster/dht/src/dht-rebalance.c
+index d0c21b4..e9974cd 100644
+--- a/xlators/cluster/dht/src/dht-rebalance.c
++++ b/xlators/cluster/dht/src/dht-rebalance.c
+@@ -1550,6 +1550,7 @@ dht_migrate_file(xlator_t *this, loc_t *loc, xlator_t *from, xlator_t *to,
+     xlator_t *old_target = NULL;
+     xlator_t *hashed_subvol = NULL;
+     fd_t *linkto_fd = NULL;
++    dict_t *xdata = NULL;
+ 
+     if (from == to) {
+         gf_msg_debug(this->name, 0,
+@@ -1868,7 +1869,15 @@ dht_migrate_file(xlator_t *this, loc_t *loc, xlator_t *from, xlator_t *to,
+ 
+     /* TODO: Sync the locks */
+ 
+-    ret = syncop_fsync(to, dst_fd, 0, NULL, NULL, NULL, NULL);
++    xdata = dict_new();
++    if (!xdata || dict_set_int8(xdata, "last-fsync", 1)) {
++        gf_log(this->name, GF_LOG_ERROR,
++               "%s: failed to set last-fsync flag on "
++               "%s (%s)",
++               loc->path, to->name, strerror(ENOMEM));
++    }
++
++    ret = syncop_fsync(to, dst_fd, 0, NULL, NULL, xdata, NULL);
+     if (ret) {
+         gf_log(this->name, GF_LOG_WARNING, "%s: failed to fsync on %s (%s)",
+                loc->path, to->name, strerror(-ret));
+@@ -2342,11 +2351,15 @@ out:
+ 
+     if (dst_fd)
+         syncop_close(dst_fd);
++
+     if (src_fd)
+         syncop_close(src_fd);
+     if (linkto_fd)
+         syncop_close(linkto_fd);
+ 
++    if (xdata)
++        dict_unref(xdata);
++
+     loc_wipe(&tmp_loc);
+     loc_wipe(&parent_loc);
+ 
+diff --git a/xlators/mount/fuse/src/fuse-bridge.c b/xlators/mount/fuse/src/fuse-bridge.c
+index fdeec49..4264fad 100644
+--- a/xlators/mount/fuse/src/fuse-bridge.c
++++ b/xlators/mount/fuse/src/fuse-bridge.c
+@@ -5559,6 +5559,7 @@ fuse_migrate_fd(xlator_t *this, fd_t *basefd, xlator_t *old_subvol,
+     char create_in_progress = 0;
+     fuse_fd_ctx_t *basefd_ctx = NULL;
+     fd_t *oldfd = NULL;
++    dict_t *xdata = NULL;
+ 
+     basefd_ctx = fuse_fd_ctx_get(this, basefd);
+     GF_VALIDATE_OR_GOTO("glusterfs-fuse", basefd_ctx, out);
+@@ -5595,10 +5596,23 @@ fuse_migrate_fd(xlator_t *this, fd_t *basefd, xlator_t *old_subvol,
+     }
+ 
+     if (oldfd->inode->table->xl == old_subvol) {
+-        if (IA_ISDIR(oldfd->inode->ia_type))
++        if (IA_ISDIR(oldfd->inode->ia_type)) {
+             ret = syncop_fsyncdir(old_subvol, oldfd, 0, NULL, NULL);
+-        else
+-            ret = syncop_fsync(old_subvol, oldfd, 0, NULL, NULL, NULL, NULL);
++        } else {
++            xdata = dict_new();
++            if (!xdata || dict_set_int8(xdata, "last-fsync", 1)) {
++                gf_log("glusterfs-fuse", GF_LOG_WARNING,
++                       "last-fsync set failed (%s) on fd (%p)"
++                       "(basefd:%p basefd-inode.gfid:%s) "
++                       "(old-subvolume:%s-%d new-subvolume:%s-%d)",
++                       strerror(ENOMEM), oldfd, basefd,
++                       uuid_utoa(basefd->inode->gfid), old_subvol->name,
++                       old_subvol->graph->id, new_subvol->name,
++                       new_subvol->graph->id);
++            }
++
++            ret = syncop_fsync(old_subvol, oldfd, 0, NULL, NULL, xdata, NULL);
++        }
+ 
+         if (ret < 0) {
+             gf_log("glusterfs-fuse", GF_LOG_WARNING,
+@@ -5653,6 +5667,9 @@ out:
+ 
+     fd_unref(oldfd);
+ 
++    if (xdata)
++        dict_unref(xdata);
++
+     return ret;
+ }
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0440-glusterd-snapshot-Improve-log-message-during-snapsho.patch b/SOURCES/0440-glusterd-snapshot-Improve-log-message-during-snapsho.patch
new file mode 100644
index 0000000..a7c1869
--- /dev/null
+++ b/SOURCES/0440-glusterd-snapshot-Improve-log-message-during-snapsho.patch
@@ -0,0 +1,62 @@
+From 9cbab9110523cfafe23d6c6b3080d0d744062b85 Mon Sep 17 00:00:00 2001
+From: Mohammed Rafi KC <rkavunga@redhat.com>
+Date: Thu, 21 May 2020 16:04:33 +0530
+Subject: [PATCH 440/449] glusterd/snapshot: Improve log message during
+ snapshot clone
+
+While taking a snapshot clone, if the snapshot is not activated,
+the cli was returning that the bricks are down.
+This patch clearly print tha the error is due to the snapshot
+state.
+
+>Change-Id: Ia840e6e071342e061ad38bf15e2e2ff2b0dacdfa
+>Fixes: #1255
+>Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
+Upstream Patch: https://review.gluster.org/#/c/glusterfs/+/24478/
+
+BUG: 1837926
+Change-Id: Ia840e6e071342e061ad38bf15e2e2ff2b0dacdfa
+Signed-off-by: Srijan Sivakumar <ssivakum@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202707
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mgmt/glusterd/src/glusterd-snapshot.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+index c56be91..5b8ae97 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
++++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+@@ -2238,7 +2238,6 @@ glusterd_snapshot_clone_prevalidate(dict_t *dict, char **op_errstr,
+     char *clonename = NULL;
+     char *snapname = NULL;
+     char device_name[64] = "";
+-    char key[PATH_MAX] = "";
+     glusterd_snap_t *snap = NULL;
+     char err_str[PATH_MAX] = "";
+     int ret = -1;
+@@ -2299,8 +2298,18 @@ glusterd_snapshot_clone_prevalidate(dict_t *dict, char **op_errstr,
+         goto out;
+     }
+ 
+-    snprintf(key, sizeof(key) - 1, "vol1_volid");
+-    ret = dict_get_bin(dict, key, (void **)&snap_volid);
++
++    if (!glusterd_is_volume_started(snap_vol)) {
++        snprintf(err_str, sizeof(err_str),
++                 "Snapshot %s is "
++                 "not activated",
++                 snap->snapname);
++        loglevel = GF_LOG_WARNING;
++        *op_errno = EG_VOLSTP;
++        goto out;
++    }
++
++    ret = dict_get_bin(dict, "vol1_volid", (void **)&snap_volid);
+     if (ret) {
+         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+                "Unable to fetch snap_volid");
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0441-fuse-occasional-logging-for-fuse-device-weird-write-.patch b/SOURCES/0441-fuse-occasional-logging-for-fuse-device-weird-write-.patch
new file mode 100644
index 0000000..1e49684
--- /dev/null
+++ b/SOURCES/0441-fuse-occasional-logging-for-fuse-device-weird-write-.patch
@@ -0,0 +1,195 @@
+From 1bde083cbd1e06be66d00e4ca52075687cee0d60 Mon Sep 17 00:00:00 2001
+From: Csaba Henk <csaba@redhat.com>
+Date: Fri, 8 May 2020 23:01:04 +0200
+Subject: [PATCH 441/449] fuse: occasional logging for fuse device 'weird'
+ write errors
+
+This change is a followup to
+I510158843e4b1d482bdc496c2e97b1860dc1ba93.
+
+In referred change we pushed log messages about 'weird'
+write errors to fuse device out of sight, by reporting
+them at Debug loglevel instead of Error (where
+'weird' means errno is not POSIX compliant but having
+meaningful semantics for FUSE protocol).
+
+This solved the issue of spurious error reporting.
+And so far so good: these messages don't indicate
+an error condition by themselves. However, when they
+come in high repetitions, that indicates a suboptimal
+condition which should be reported.[1]
+
+Therefore now we shall emit a Warning if a certain
+errno occurs a certain number of times[2] as the
+outcome of a write to the fuse device.
+
+___
+[1] typically ENOENTs and ENOTDIRs accumulate
+when glusterfs' inode invalidation lags behind
+the kernel's internal inode garbage collection
+(in this case above errnos mean that the inode
+which we requested to be invalidated is not found
+in kernel). This can be mitigated with the
+invalidate-limit command line / mount option,
+cf. bz#1732717.
+
+[2] 256, as of the current implementation.
+
+Upstream on https://review.gluster.org/24415
+> Change-Id: I8cc7fe104da43a88875f93b0db49d5677cc16045
+> Updates: #1000
+> Signed-off-by: Csaba Henk <csaba@redhat.com>
+
+BUG: 1839137
+Change-Id: I8448d6d328d47cb01d560cd99a2f43cd8dab312d
+Signed-off-by: Csaba Henk <csaba@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202646
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mount/fuse/src/fuse-bridge.c | 36 +++++++++++++++++++++++++++++++++++-
+ xlators/mount/fuse/src/fuse-bridge.h | 18 ++++++++++++++++++
+ 2 files changed, 53 insertions(+), 1 deletion(-)
+
+diff --git a/xlators/mount/fuse/src/fuse-bridge.c b/xlators/mount/fuse/src/fuse-bridge.c
+index 4264fad..2e7584c 100644
+--- a/xlators/mount/fuse/src/fuse-bridge.c
++++ b/xlators/mount/fuse/src/fuse-bridge.c
+@@ -218,14 +218,30 @@ check_and_dump_fuse_W(fuse_private_t *priv, struct iovec *iov_out, int count,
+     if (res == -1) {
+         const char *errdesc = NULL;
+         gf_loglevel_t loglevel = GF_LOG_ERROR;
++        gf_boolean_t errno_degraded = _gf_false;
++        gf_boolean_t errno_promoted = _gf_false;
++
++#define ACCOUNT_ERRNO(eno)                                                     \
++    do {                                                                       \
++        if (errno_degraded) {                                                  \
++            pthread_mutex_lock(&priv->fusedev_errno_cnt_mutex);                \
++            {                                                                  \
++                if (!++priv->fusedev_errno_cnt[FUSEDEV_##eno])                 \
++                    errno_promoted = _gf_true;                                 \
++            }                                                                  \
++            pthread_mutex_unlock(&priv->fusedev_errno_cnt_mutex);              \
++        }                                                                      \
++    } while (0)
+ 
+         /* If caller masked the errno, then it
+          * does not indicate an error at the application
+          * level, so we degrade the log severity to DEBUG.
+          */
+         if (errnomask && errno < ERRNOMASK_MAX &&
+-            GET_ERRNO_MASK(errnomask, errno))
++            GET_ERRNO_MASK(errnomask, errno)) {
+             loglevel = GF_LOG_DEBUG;
++            errno_degraded = _gf_true;
++        }
+ 
+         switch (errno) {
+             /* The listed errnos are FUSE status indicators,
+@@ -235,33 +251,43 @@ check_and_dump_fuse_W(fuse_private_t *priv, struct iovec *iov_out, int count,
+              */
+             case ENOENT:
+                 errdesc = "ENOENT";
++                ACCOUNT_ERRNO(ENOENT);
+                 break;
+             case ENOTDIR:
+                 errdesc = "ENOTDIR";
++                ACCOUNT_ERRNO(ENOTDIR);
+                 break;
+             case ENODEV:
+                 errdesc = "ENODEV";
++                ACCOUNT_ERRNO(ENODEV);
+                 break;
+             case EPERM:
+                 errdesc = "EPERM";
++                ACCOUNT_ERRNO(EPERM);
+                 break;
+             case ENOMEM:
+                 errdesc = "ENOMEM";
++                ACCOUNT_ERRNO(ENOMEM);
+                 break;
+             case ENOTCONN:
+                 errdesc = "ENOTCONN";
++                ACCOUNT_ERRNO(ENOTCONN);
+                 break;
+             case ECONNREFUSED:
+                 errdesc = "ECONNREFUSED";
++                ACCOUNT_ERRNO(ECONNREFUSED);
+                 break;
+             case EOVERFLOW:
+                 errdesc = "EOVERFLOW";
++                ACCOUNT_ERRNO(EOVERFLOW);
+                 break;
+             case EBUSY:
+                 errdesc = "EBUSY";
++                ACCOUNT_ERRNO(EBUSY);
+                 break;
+             case ENOTEMPTY:
+                 errdesc = "ENOTEMPTY";
++                ACCOUNT_ERRNO(ENOTEMPTY);
+                 break;
+             default:
+                 errdesc = strerror(errno);
+@@ -269,7 +295,13 @@ check_and_dump_fuse_W(fuse_private_t *priv, struct iovec *iov_out, int count,
+ 
+         gf_log_callingfn("glusterfs-fuse", loglevel,
+                          "writing to fuse device failed: %s", errdesc);
++        if (errno_promoted)
++            gf_log("glusterfs-fuse", GF_LOG_WARNING,
++                   "writing to fuse device yielded %s %d times", errdesc,
++                   UINT8_MAX + 1);
+         return errno;
++
++#undef ACCOUNT_ERRNO
+     }
+ 
+     fouh = iov_out[0].iov_base;
+@@ -6584,6 +6616,8 @@ init(xlator_t *this_xl)
+     INIT_LIST_HEAD(&priv->interrupt_list);
+     pthread_mutex_init(&priv->interrupt_mutex, NULL);
+ 
++    pthread_mutex_init(&priv->fusedev_errno_cnt_mutex, NULL);
++
+     /* get options from option dictionary */
+     ret = dict_get_str(options, ZR_MOUNTPOINT_OPT, &value_string);
+     if (ret == -1 || value_string == NULL) {
+diff --git a/xlators/mount/fuse/src/fuse-bridge.h b/xlators/mount/fuse/src/fuse-bridge.h
+index d2d462c..2fb15a6 100644
+--- a/xlators/mount/fuse/src/fuse-bridge.h
++++ b/xlators/mount/fuse/src/fuse-bridge.h
+@@ -78,6 +78,20 @@ typedef struct fuse_in_header fuse_in_header_t;
+ typedef void(fuse_handler_t)(xlator_t *this, fuse_in_header_t *finh, void *msg,
+                              struct iobuf *iobuf);
+ 
++enum fusedev_errno {
++    FUSEDEV_ENOENT,
++    FUSEDEV_ENOTDIR,
++    FUSEDEV_ENODEV,
++    FUSEDEV_EPERM,
++    FUSEDEV_ENOMEM,
++    FUSEDEV_ENOTCONN,
++    FUSEDEV_ECONNREFUSED,
++    FUSEDEV_EOVERFLOW,
++    FUSEDEV_EBUSY,
++    FUSEDEV_ENOTEMPTY,
++    FUSEDEV_EMAXPLUS
++};
++
+ struct fuse_private {
+     int fd;
+     uint32_t proto_minor;
+@@ -192,6 +206,10 @@ struct fuse_private {
+     /* LRU Limit, if not set, default is 64k for now */
+     uint32_t lru_limit;
+     uint32_t invalidate_limit;
++
++    /* counters for fusdev errnos */
++    uint8_t fusedev_errno_cnt[FUSEDEV_EMAXPLUS];
++    pthread_mutex_t fusedev_errno_cnt_mutex;
+ };
+ typedef struct fuse_private fuse_private_t;
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0442-fuse-correctly-handle-setxattr-values.patch b/SOURCES/0442-fuse-correctly-handle-setxattr-values.patch
new file mode 100644
index 0000000..4be3b85
--- /dev/null
+++ b/SOURCES/0442-fuse-correctly-handle-setxattr-values.patch
@@ -0,0 +1,139 @@
+From 56c8ef4a64506c64aeb95d5a2c38d7107f90ac3a Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Tue, 5 Feb 2019 16:57:52 +0100
+Subject: [PATCH 442/449] fuse: correctly handle setxattr values
+
+The setxattr function receives a pointer to raw data, which may not be
+null-terminated. When this data needs to be interpreted as a string, an
+explicit null termination needs to be added before using the value.
+
+Upstream patch https://review.gluster.org/#/c/glusterfs/+/22157
+> Change-Id: Id110f9b215b22786da5782adec9449ce38d0d563
+> updates: bz#1193929
+> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+
+Note: this change is not addressing the issue of bz 1787310,
+indeed it is prerequisite for other changes that do.
+
+BUG: 1787310
+Change-Id: I56417b130eb2a1f388108456c905a577eb658793
+Signed-off-by: Csaba Henk <csaba@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202758
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ libglusterfs/src/glusterfs/xlator.h  |  2 +-
+ libglusterfs/src/xlator.c            | 28 +++++++++++++++++++++++++---
+ xlators/mount/fuse/src/fuse-bridge.c | 20 ++++++++++++++++----
+ 3 files changed, 42 insertions(+), 8 deletions(-)
+
+diff --git a/libglusterfs/src/glusterfs/xlator.h b/libglusterfs/src/glusterfs/xlator.h
+index db04c4d..8650ccc 100644
+--- a/libglusterfs/src/glusterfs/xlator.h
++++ b/libglusterfs/src/glusterfs/xlator.h
+@@ -1043,7 +1043,7 @@ xlator_mem_acct_init(xlator_t *xl, int num_types);
+ void
+ xlator_mem_acct_unref(struct mem_acct *mem_acct);
+ int
+-is_gf_log_command(xlator_t *trans, const char *name, char *value);
++is_gf_log_command(xlator_t *trans, const char *name, char *value, size_t size);
+ int
+ glusterd_check_log_level(const char *value);
+ int
+diff --git a/libglusterfs/src/xlator.c b/libglusterfs/src/xlator.c
+index 6bd4f09..108b96a 100644
+--- a/libglusterfs/src/xlator.c
++++ b/libglusterfs/src/xlator.c
+@@ -1278,8 +1278,21 @@ xlator_destroy(xlator_t *xl)
+     return 0;
+ }
+ 
++static int32_t
++gf_bin_to_string(char *dst, size_t size, void *src, size_t len)
++{
++    if (len >= size) {
++        return EINVAL;
++    }
++
++    memcpy(dst, src, len);
++    dst[len] = 0;
++
++    return 0;
++}
++
+ int
+-is_gf_log_command(xlator_t *this, const char *name, char *value)
++is_gf_log_command(xlator_t *this, const char *name, char *value, size_t size)
+ {
+     xlator_t *trav = NULL;
+     char key[1024] = {
+@@ -1291,7 +1304,11 @@ is_gf_log_command(xlator_t *this, const char *name, char *value)
+     glusterfs_ctx_t *ctx = NULL;
+ 
+     if (!strcmp("trusted.glusterfs.syslog", name)) {
+-        ret = gf_string2boolean(value, &syslog_flag);
++        ret = gf_bin_to_string(key, sizeof(key), value, size);
++        if (ret != 0) {
++            goto out;
++        }
++        ret = gf_string2boolean(key, &syslog_flag);
+         if (ret) {
+             ret = EOPNOTSUPP;
+             goto out;
+@@ -1307,7 +1324,12 @@ is_gf_log_command(xlator_t *this, const char *name, char *value)
+     if (fnmatch("trusted.glusterfs*set-log-level", name, FNM_NOESCAPE))
+         goto out;
+ 
+-    log_level = glusterd_check_log_level(value);
++    ret = gf_bin_to_string(key, sizeof(key), value, size);
++    if (ret != 0) {
++        goto out;
++    }
++
++    log_level = glusterd_check_log_level(key);
+     if (log_level == -1) {
+         ret = EOPNOTSUPP;
+         goto out;
+diff --git a/xlators/mount/fuse/src/fuse-bridge.c b/xlators/mount/fuse/src/fuse-bridge.c
+index 2e7584c..cfad2b4 100644
+--- a/xlators/mount/fuse/src/fuse-bridge.c
++++ b/xlators/mount/fuse/src/fuse-bridge.c
+@@ -4112,7 +4112,7 @@ fuse_setxattr(xlator_t *this, fuse_in_header_t *finh, void *msg,
+ 
+     /* Check if the command is for changing the log
+        level of process or specific xlator */
+-    ret = is_gf_log_command(this, name, value);
++    ret = is_gf_log_command(this, name, value, fsi->size);
+     if (ret >= 0) {
+         op_errno = ret;
+         goto done;
+@@ -4159,11 +4159,23 @@ fuse_setxattr(xlator_t *this, fuse_in_header_t *finh, void *msg,
+          * fixups to make sure that's the case.  To avoid nasty
+          * surprises, allocate an extra byte and add a NUL here.
+          */
+-        dict_value = memdup(value, fsi->size + 1);
++        dict_value = GF_MALLOC(fsi->size + 1, gf_common_mt_char);
++        if (dict_value == NULL) {
++            gf_log("glusterfs-fuse", GF_LOG_ERROR,
++                   "%" PRIu64 ": SETXATTR value allocation failed",
++                   finh->unique);
++            op_errno = ENOMEM;
++            goto done;
++        }
++        memcpy(dict_value, value, fsi->size);
+         dict_value[fsi->size] = '\0';
+     }
+-    dict_set(state->xattr, newkey,
+-             data_from_dynptr((void *)dict_value, fsi->size));
++    ret = dict_set_dynptr(state->xattr, newkey, dict_value, fsi->size);
++    if (ret < 0) {
++        op_errno = -ret;
++        GF_FREE(dict_value);
++        goto done;
++    }
+ 
+     state->flags = fsi->flags;
+     state->name = newkey;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0443-fuse-fix-high-sev-coverity-issue.patch b/SOURCES/0443-fuse-fix-high-sev-coverity-issue.patch
new file mode 100644
index 0000000..7c5e9c0
--- /dev/null
+++ b/SOURCES/0443-fuse-fix-high-sev-coverity-issue.patch
@@ -0,0 +1,55 @@
+From 3ac3312d63b9dc3c15cd8765ab8b7c601b007500 Mon Sep 17 00:00:00 2001
+From: Sunny Kumar <sunkumar@redhat.com>
+Date: Tue, 19 Mar 2019 22:51:14 +0530
+Subject: [PATCH 443/449] fuse : fix high sev coverity issue
+
+This patch fixed coverity issue in fuse-bridge.c.
+
+CID : 1398630 : Resource leak
+CID : 1399757 : Uninitialized pointer read
+
+Upstream patch https://review.gluster.org/c/glusterfs/+/22382
+> updates: bz#789278
+>
+> Change-Id: I69f8591400ee56a5d215eeac443a8e3d7777db27
+> Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
+
+BUG: 1787310
+Change-Id: Ib2c9af25019ee57131b3d384fc4b557437e75d3e
+Signed-off-by: Csaba Henk <csaba@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202759
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mount/fuse/src/fuse-bridge.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/xlators/mount/fuse/src/fuse-bridge.c b/xlators/mount/fuse/src/fuse-bridge.c
+index cfad2b4..d17320b 100644
+--- a/xlators/mount/fuse/src/fuse-bridge.c
++++ b/xlators/mount/fuse/src/fuse-bridge.c
+@@ -4174,6 +4174,7 @@ fuse_setxattr(xlator_t *this, fuse_in_header_t *finh, void *msg,
+     if (ret < 0) {
+         op_errno = -ret;
+         GF_FREE(dict_value);
++        GF_FREE(newkey);
+         goto done;
+     }
+ 
+@@ -5963,7 +5964,12 @@ fuse_thread_proc(void *data)
+     ssize_t res = 0;
+     struct iobuf *iobuf = NULL;
+     fuse_in_header_t *finh = NULL;
+-    struct iovec iov_in[2];
++    struct iovec iov_in[2] = {
++        {
++            0,
++        },
++    };
++
+     void *msg = NULL;
+     /* we need 512 extra buffer size for BATCH_FORGET fop. By tests, it is
+        found to be reduces 'REALLOC()' in the loop */
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0444-mount-fuse-Fixing-a-coverity-issue.patch b/SOURCES/0444-mount-fuse-Fixing-a-coverity-issue.patch
new file mode 100644
index 0000000..c8e3e8c
--- /dev/null
+++ b/SOURCES/0444-mount-fuse-Fixing-a-coverity-issue.patch
@@ -0,0 +1,40 @@
+From 53a6aed98aad73ff51f884bf815bccfa337eb524 Mon Sep 17 00:00:00 2001
+From: Barak Sason <bsasonro@redhat.com>
+Date: Sun, 18 Aug 2019 17:38:09 +0300
+Subject: [PATCH 444/449] mount/fuse - Fixing a coverity issue
+
+Fixed resource leak of dict_value and newkey variables
+
+CID: 1398630
+
+Upstream patch https://review.gluster.org/c/glusterfs/+/23260
+> Updates: bz#789278
+>
+> Change-Id: I589fdc0aecaeb4f446cd29f95bad36ccd7a35beb
+> Signed-off-by: Barak Sason <bsasonro@redhat.com>
+
+BUG: 1787310
+Change-Id: Id191face7b082e2e8d6e62f60b56248688d396f6
+Signed-off-by: Csaba Henk <csaba@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202760
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mount/fuse/src/fuse-bridge.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/xlators/mount/fuse/src/fuse-bridge.c b/xlators/mount/fuse/src/fuse-bridge.c
+index d17320b..f61fa39 100644
+--- a/xlators/mount/fuse/src/fuse-bridge.c
++++ b/xlators/mount/fuse/src/fuse-bridge.c
+@@ -4165,6 +4165,7 @@ fuse_setxattr(xlator_t *this, fuse_in_header_t *finh, void *msg,
+                    "%" PRIu64 ": SETXATTR value allocation failed",
+                    finh->unique);
+             op_errno = ENOMEM;
++            GF_FREE(newkey);
+             goto done;
+         }
+         memcpy(dict_value, value, fsi->size);
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0445-feature-changelog-Avoid-thread-creation-if-xlator-is.patch b/SOURCES/0445-feature-changelog-Avoid-thread-creation-if-xlator-is.patch
new file mode 100644
index 0000000..dea23f2
--- /dev/null
+++ b/SOURCES/0445-feature-changelog-Avoid-thread-creation-if-xlator-is.patch
@@ -0,0 +1,481 @@
+From dc03340654d921916ac3890d713fc84ef4bb1e28 Mon Sep 17 00:00:00 2001
+From: Mohit Agrawal <moagrawal@redhat.com>
+Date: Sat, 29 Sep 2018 13:15:35 +0530
+Subject: [PATCH 445/449] feature/changelog: Avoid thread creation if xlator is
+ not enabled
+
+Problem:
+Changelog creates threads even if the changelog is not enabled
+
+Background:
+Changelog xlator broadly does two things
+  1. Journalling - Cosumers are geo-rep and glusterfind
+  2. Event Notification for registered events like (open, release etc) -
+     Consumers are bitrot, geo-rep
+
+The existing option "changelog.changelog" controls journalling and
+there is no option to control event notification and is enabled by
+default. So when bitrot/geo-rep is not enabled on the volume, threads
+and resources(rpc and rbuf) related to event notifications consumes
+resources and cpu cycle which is unnecessary.
+
+Solution:
+The solution is to have two different options as below.
+ 1. changelog-notification : Event notifications
+ 2. changelog : Journalling
+
+This patch introduces the option "changelog-notification" which is
+not exposed to user. When either bitrot or changelog (journalling)
+is enabled, it internally enbales 'changelog-notification'. But
+once the 'changelog-notification' is enabled, it will not be disabled
+for the life time of the brick process even after bitrot and changelog
+is disabled. As of now, rpc resource cleanup has lot of races and is
+difficult to cleanup cleanly. If allowed, it leads to memory leaks
+and crashes on enable/disable of bitrot or changelog (journal) in a
+loop. Hence to be safer, the event notification is not disabled within
+lifetime of process once enabled.
+
+> Change-Id: Ifd00286e0966049e8eb9f21567fe407cf11bb02a
+> Updates: #475
+> Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
+> (Cherry pick from commit 6de80bcd6366778ac34ce58ec496fa08cc02bd0b)
+> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/21896/)
+
+BUG: 1790336
+Change-Id: Ifd00286e0966049e8eb9f21567fe407cf11bb02a
+Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202778
+Tested-by: Mohit Agrawal <moagrawa@redhat.com>
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ rpc/rpc-lib/src/rpcsvc.c                           |  26 ++--
+ tests/basic/changelog/changelog-history.t          |  12 +-
+ tests/bugs/bitrot/bug-1227996.t                    |   1 -
+ tests/bugs/bitrot/bug-1245981.t                    |   4 +-
+ xlators/features/changelog/src/changelog-helpers.h |   4 +
+ .../features/changelog/src/changelog-rpc-common.c  |   3 +
+ xlators/features/changelog/src/changelog.c         | 149 +++++++++++++++------
+ xlators/mgmt/glusterd/src/glusterd-volgen.c        |  13 ++
+ 8 files changed, 154 insertions(+), 58 deletions(-)
+
+diff --git a/rpc/rpc-lib/src/rpcsvc.c b/rpc/rpc-lib/src/rpcsvc.c
+index b058932..3f184bf 100644
+--- a/rpc/rpc-lib/src/rpcsvc.c
++++ b/rpc/rpc-lib/src/rpcsvc.c
+@@ -1865,6 +1865,18 @@ rpcsvc_program_unregister(rpcsvc_t *svc, rpcsvc_program_t *program)
+         goto out;
+     }
+ 
++    pthread_rwlock_rdlock(&svc->rpclock);
++    {
++        list_for_each_entry(prog, &svc->programs, program)
++        {
++            if ((prog->prognum == program->prognum) &&
++                (prog->progver == program->progver)) {
++                break;
++            }
++        }
++    }
++    pthread_rwlock_unlock(&svc->rpclock);
++
+     ret = rpcsvc_program_unregister_portmap(program);
+     if (ret == -1) {
+         gf_log(GF_RPCSVC, GF_LOG_ERROR,
+@@ -1881,17 +1893,6 @@ rpcsvc_program_unregister(rpcsvc_t *svc, rpcsvc_program_t *program)
+         goto out;
+     }
+ #endif
+-    pthread_rwlock_rdlock(&svc->rpclock);
+-    {
+-        list_for_each_entry(prog, &svc->programs, program)
+-        {
+-            if ((prog->prognum == program->prognum) &&
+-                (prog->progver == program->progver)) {
+-                break;
+-            }
+-        }
+-    }
+-    pthread_rwlock_unlock(&svc->rpclock);
+ 
+     gf_log(GF_RPCSVC, GF_LOG_DEBUG,
+            "Program unregistered: %s, Num: %d,"
+@@ -1912,6 +1913,9 @@ rpcsvc_program_unregister(rpcsvc_t *svc, rpcsvc_program_t *program)
+ 
+     ret = 0;
+ out:
++    if (prog)
++        GF_FREE(prog);
++
+     if (ret == -1) {
+         if (program) {
+             gf_log(GF_RPCSVC, GF_LOG_ERROR,
+diff --git a/tests/basic/changelog/changelog-history.t b/tests/basic/changelog/changelog-history.t
+index 3ce4098..b56e247 100644
+--- a/tests/basic/changelog/changelog-history.t
++++ b/tests/basic/changelog/changelog-history.t
+@@ -5,6 +5,7 @@
+ 
+ cleanup;
+ 
++SCRIPT_TIMEOUT=300
+ HISTORY_BIN_PATH=$(dirname $0)/../../utils/changelog
+ build_tester $HISTORY_BIN_PATH/get-history.c -lgfchangelog
+ 
+@@ -68,18 +69,21 @@ TEST $CLI volume set $V0 changelog.changelog off
+ sleep 3
+ time_after_disable=$(date '+%s')
+ 
++TEST $CLI volume set $V0 changelog.changelog on
++sleep 5
++
+ #Passes, gives the changelogs till continuous changelogs are available
+ # but returns 1
+-EXPECT "1" $HISTORY_BIN_PATH/get-history $time_after_enable1 $time_in_sec_htime2
++EXPECT_WITHIN 10 "1" $HISTORY_BIN_PATH/get-history $time_after_enable1 $time_in_sec_htime2
+ 
+ #Fails as start falls between htime files
+-EXPECT "-3" $HISTORY_BIN_PATH/get-history $time_between_htime $time_in_sec_htime1
++EXPECT_WITHIN 10 "-3" $HISTORY_BIN_PATH/get-history $time_between_htime $time_in_sec_htime1
+ 
+ #Passes as start and end falls in same htime file
+-EXPECT "0" $HISTORY_BIN_PATH/get-history $time_in_sec_htime1 $time_in_sec_htime2
++EXPECT_WITHIN 10 "0" $HISTORY_BIN_PATH/get-history $time_in_sec_htime1 $time_in_sec_htime2
+ 
+ #Passes, gives the changelogs till continuous changelogs are available
+-EXPECT "0" $HISTORY_BIN_PATH/get-history $time_in_sec_htime2 $time_after_disable
++EXPECT_WITHIN 10 "0" $HISTORY_BIN_PATH/get-history $time_in_sec_htime2 $time_after_disable
+ 
+ TEST rm $HISTORY_BIN_PATH/get-history
+ 
+diff --git a/tests/bugs/bitrot/bug-1227996.t b/tests/bugs/bitrot/bug-1227996.t
+index 47ebc42..121c7b5 100644
+--- a/tests/bugs/bitrot/bug-1227996.t
++++ b/tests/bugs/bitrot/bug-1227996.t
+@@ -17,7 +17,6 @@ TEST pidof glusterd;
+ ## Lets create and start the volume
+ TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+ TEST $CLI volume start $V0
+-
+ ## Enable bitrot on volume $V0
+ TEST $CLI volume bitrot $V0 enable
+ 
+diff --git a/tests/bugs/bitrot/bug-1245981.t b/tests/bugs/bitrot/bug-1245981.t
+index 2bed4d9..f395525 100644
+--- a/tests/bugs/bitrot/bug-1245981.t
++++ b/tests/bugs/bitrot/bug-1245981.t
+@@ -47,9 +47,9 @@ touch $M0/5
+ sleep `expr $SLEEP_TIME \* 2`
+ 
+ backpath=$(get_backend_paths $fname)
+-TEST getfattr -m . -n trusted.bit-rot.signature $backpath
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.bit-rot.signature' check_for_xattr 'trusted.bit-rot.signature' $backpath
+ 
+ backpath=$(get_backend_paths $M0/new_file)
+-TEST getfattr -m . -n trusted.bit-rot.signature $backpath
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.bit-rot.signature' check_for_xattr 'trusted.bit-rot.signature' $backpath
+ 
+ cleanup;
+diff --git a/xlators/features/changelog/src/changelog-helpers.h b/xlators/features/changelog/src/changelog-helpers.h
+index 517c4dc..3afacc9 100644
+--- a/xlators/features/changelog/src/changelog-helpers.h
++++ b/xlators/features/changelog/src/changelog-helpers.h
+@@ -190,8 +190,12 @@ typedef struct changelog_ev_selector {
+ 
+ /* changelog's private structure */
+ struct changelog_priv {
++    /* changelog journalling */
+     gf_boolean_t active;
+ 
++    /* changelog live notifications */
++    gf_boolean_t rpc_active;
++
+     /* to generate unique socket file per brick */
+     char *changelog_brick;
+ 
+diff --git a/xlators/features/changelog/src/changelog-rpc-common.c b/xlators/features/changelog/src/changelog-rpc-common.c
+index dcdcfb1..f2d1853 100644
+--- a/xlators/features/changelog/src/changelog-rpc-common.c
++++ b/xlators/features/changelog/src/changelog-rpc-common.c
+@@ -263,6 +263,9 @@ changelog_rpc_server_destroy(xlator_t *this, rpcsvc_t *rpc, char *sockfile,
+     struct rpcsvc_program *prog = NULL;
+     rpc_transport_t *trans = NULL;
+ 
++    if (!rpc)
++        return;
++
+     while (*progs) {
+         prog = *progs;
+         (void)rpcsvc_program_unregister(rpc, prog);
+diff --git a/xlators/features/changelog/src/changelog.c b/xlators/features/changelog/src/changelog.c
+index d9025f3..ff06c09 100644
+--- a/xlators/features/changelog/src/changelog.c
++++ b/xlators/features/changelog/src/changelog.c
+@@ -34,6 +34,12 @@ static struct changelog_bootstrap cb_bootstrap[] = {
+     },
+ };
+ 
++static int
++changelog_init_rpc(xlator_t *this, changelog_priv_t *priv);
++
++static int
++changelog_init(xlator_t *this, changelog_priv_t *priv);
++
+ /* Entry operations - TYPE III */
+ 
+ /**
+@@ -2008,6 +2014,11 @@ notify(xlator_t *this, int event, void *data, ...)
+     uint64_t clntcnt = 0;
+     changelog_clnt_t *conn = NULL;
+     gf_boolean_t cleanup_notify = _gf_false;
++    char sockfile[UNIX_PATH_MAX] = {
++        0,
++    };
++    rpcsvc_listener_t *listener = NULL;
++    rpcsvc_listener_t *next = NULL;
+ 
+     INIT_LIST_HEAD(&queue);
+ 
+@@ -2021,23 +2032,40 @@ notify(xlator_t *this, int event, void *data, ...)
+                "cleanup changelog rpc connection of brick %s",
+                priv->victim->name);
+ 
+-        this->cleanup_starting = 1;
+-        changelog_destroy_rpc_listner(this, priv);
+-        conn = &priv->connections;
+-        if (conn)
+-            changelog_ev_cleanup_connections(this, conn);
+-        xprtcnt = GF_ATOMIC_GET(priv->xprtcnt);
+-        clntcnt = GF_ATOMIC_GET(priv->clntcnt);
+-
+-        if (!xprtcnt && !clntcnt) {
+-            LOCK(&priv->lock);
+-            {
+-                cleanup_notify = priv->notify_down;
+-                priv->notify_down = _gf_true;
++        if (priv->rpc_active) {
++            this->cleanup_starting = 1;
++            changelog_destroy_rpc_listner(this, priv);
++            conn = &priv->connections;
++            if (conn)
++                changelog_ev_cleanup_connections(this, conn);
++            xprtcnt = GF_ATOMIC_GET(priv->xprtcnt);
++            clntcnt = GF_ATOMIC_GET(priv->clntcnt);
++            if (!xprtcnt && !clntcnt) {
++                LOCK(&priv->lock);
++                {
++                    cleanup_notify = priv->notify_down;
++                    priv->notify_down = _gf_true;
++                }
++                UNLOCK(&priv->lock);
++                list_for_each_entry_safe(listener, next, &priv->rpc->listeners,
++                                         list)
++                {
++                    if (listener->trans) {
++                        rpc_transport_unref(listener->trans);
++                    }
++                }
++                CHANGELOG_MAKE_SOCKET_PATH(priv->changelog_brick, sockfile,
++                                           UNIX_PATH_MAX);
++                sys_unlink(sockfile);
++                if (priv->rpc) {
++                    rpcsvc_destroy(priv->rpc);
++                    priv->rpc = NULL;
++                }
++                if (!cleanup_notify)
++                    default_notify(this, GF_EVENT_PARENT_DOWN, data);
+             }
+-            UNLOCK(&priv->lock);
+-            if (!cleanup_notify)
+-                default_notify(this, GF_EVENT_PARENT_DOWN, data);
++        } else {
++            default_notify(this, GF_EVENT_PARENT_DOWN, data);
+         }
+         goto out;
+     }
+@@ -2425,6 +2453,22 @@ changelog_barrier_pthread_destroy(changelog_priv_t *priv)
+     LOCK_DESTROY(&priv->bflags.lock);
+ }
+ 
++static void
++changelog_cleanup_rpc(xlator_t *this, changelog_priv_t *priv)
++{
++    /* terminate rpc server */
++    if (!this->cleanup_starting)
++        changelog_destroy_rpc_listner(this, priv);
++
++    (void)changelog_cleanup_rpc_threads(this, priv);
++    /* cleanup rot buffs */
++    rbuf_dtor(priv->rbuf);
++
++    /* cleanup poller thread */
++    if (priv->poller)
++        (void)changelog_thread_cleanup(this, priv->poller);
++}
++
+ int
+ reconfigure(xlator_t *this, dict_t *options)
+ {
+@@ -2433,6 +2477,9 @@ reconfigure(xlator_t *this, dict_t *options)
+     changelog_priv_t *priv = NULL;
+     gf_boolean_t active_earlier = _gf_true;
+     gf_boolean_t active_now = _gf_true;
++    gf_boolean_t rpc_active_earlier = _gf_true;
++    gf_boolean_t rpc_active_now = _gf_true;
++    gf_boolean_t iniate_rpc = _gf_false;
+     changelog_time_slice_t *slice = NULL;
+     changelog_log_data_t cld = {
+         0,
+@@ -2454,6 +2501,7 @@ reconfigure(xlator_t *this, dict_t *options)
+ 
+     ret = -1;
+     active_earlier = priv->active;
++    rpc_active_earlier = priv->rpc_active;
+ 
+     /* first stop the rollover and the fsync thread */
+     changelog_cleanup_helper_threads(this, priv);
+@@ -2487,6 +2535,29 @@ reconfigure(xlator_t *this, dict_t *options)
+         goto out;
+ 
+     GF_OPTION_RECONF("changelog", active_now, options, bool, out);
++    GF_OPTION_RECONF("changelog-notification", rpc_active_now, options, bool,
++                     out);
++
++    /* If journalling is enabled, enable rpc notifications */
++    if (active_now && !active_earlier) {
++        if (!rpc_active_earlier)
++            iniate_rpc = _gf_true;
++    }
++
++    if (rpc_active_now && !rpc_active_earlier) {
++        iniate_rpc = _gf_true;
++    }
++
++    /* TODO: Disable of changelog-notifications is not supported for now
++     * as there is no clean way of cleaning up of rpc resources
++     */
++
++    if (iniate_rpc) {
++        ret = changelog_init_rpc(this, priv);
++        if (ret)
++            goto out;
++        priv->rpc_active = _gf_true;
++    }
+ 
+     /**
+      * changelog_handle_change() handles changes that could possibly
+@@ -2618,6 +2689,7 @@ changelog_init_options(xlator_t *this, changelog_priv_t *priv)
+         goto dealloc_2;
+ 
+     GF_OPTION_INIT("changelog", priv->active, bool, dealloc_2);
++    GF_OPTION_INIT("changelog-notification", priv->rpc_active, bool, dealloc_2);
+     GF_OPTION_INIT("capture-del-path", priv->capture_del_path, bool, dealloc_2);
+ 
+     GF_OPTION_INIT("op-mode", tmp, str, dealloc_2);
+@@ -2656,22 +2728,6 @@ error_return:
+     return -1;
+ }
+ 
+-static void
+-changelog_cleanup_rpc(xlator_t *this, changelog_priv_t *priv)
+-{
+-    /* terminate rpc server */
+-    if (!this->cleanup_starting)
+-        changelog_destroy_rpc_listner(this, priv);
+-
+-    (void)changelog_cleanup_rpc_threads(this, priv);
+-    /* cleanup rot buffs */
+-    rbuf_dtor(priv->rbuf);
+-
+-    /* cleanup poller thread */
+-    if (priv->poller)
+-        (void)changelog_thread_cleanup(this, priv->poller);
+-}
+-
+ static int
+ changelog_init_rpc(xlator_t *this, changelog_priv_t *priv)
+ {
+@@ -2768,10 +2824,13 @@ init(xlator_t *this)
+     INIT_LIST_HEAD(&priv->queue);
+     priv->barrier_enabled = _gf_false;
+ 
+-    /* RPC ball rolling.. */
+-    ret = changelog_init_rpc(this, priv);
+-    if (ret)
+-        goto cleanup_barrier;
++    if (priv->rpc_active || priv->active) {
++        /* RPC ball rolling.. */
++        ret = changelog_init_rpc(this, priv);
++        if (ret)
++            goto cleanup_barrier;
++        priv->rpc_active = _gf_true;
++    }
+ 
+     ret = changelog_init(this, priv);
+     if (ret)
+@@ -2783,7 +2842,9 @@ init(xlator_t *this)
+     return 0;
+ 
+ cleanup_rpc:
+-    changelog_cleanup_rpc(this, priv);
++    if (priv->rpc_active) {
++        changelog_cleanup_rpc(this, priv);
++    }
+ cleanup_barrier:
+     changelog_barrier_pthread_destroy(priv);
+ cleanup_options:
+@@ -2808,9 +2869,10 @@ fini(xlator_t *this)
+     priv = this->private;
+ 
+     if (priv) {
+-        /* terminate RPC server/threads */
+-        changelog_cleanup_rpc(this, priv);
+-
++        if (priv->active || priv->rpc_active) {
++            /* terminate RPC server/threads */
++            changelog_cleanup_rpc(this, priv);
++        }
+         /* call barrier_disable to cancel timer */
+         if (priv->barrier_enabled)
+             __chlog_barrier_disable(this, &queue);
+@@ -2879,6 +2941,13 @@ struct volume_options options[] = {
+      .flags = OPT_FLAG_SETTABLE,
+      .level = OPT_STATUS_BASIC,
+      .tags = {"journal", "georep", "glusterfind"}},
++    {.key = {"changelog-notification"},
++     .type = GF_OPTION_TYPE_BOOL,
++     .default_value = "off",
++     .description = "enable/disable changelog live notification",
++     .op_version = {3},
++     .level = OPT_STATUS_BASIC,
++     .tags = {"bitrot", "georep"}},
+     {.key = {"changelog-brick"},
+      .type = GF_OPTION_TYPE_PATH,
+      .description = "brick path to generate unique socket file name."
+diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
+index 16346e7..13f84ea 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
++++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
+@@ -1876,6 +1876,19 @@ brick_graph_add_changelog(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
+     ret = xlator_set_fixed_option(xl, "changelog-dir", changelog_basepath);
+     if (ret)
+         goto out;
++
++    ret = glusterd_is_bitrot_enabled(volinfo);
++    if (ret == -1) {
++        goto out;
++    } else if (ret) {
++        ret = xlator_set_fixed_option(xl, "changelog-notification", "on");
++        if (ret)
++            goto out;
++    } else {
++        ret = xlator_set_fixed_option(xl, "changelog-notification", "off");
++        if (ret)
++            goto out;
++    }
+ out:
+     return ret;
+ }
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0446-bitrot-Make-number-of-signer-threads-configurable.patch b/SOURCES/0446-bitrot-Make-number-of-signer-threads-configurable.patch
new file mode 100644
index 0000000..8eb2089
--- /dev/null
+++ b/SOURCES/0446-bitrot-Make-number-of-signer-threads-configurable.patch
@@ -0,0 +1,594 @@
+From 866a4c49ad9c5a9125814a9f843d4c7fd967ab2b Mon Sep 17 00:00:00 2001
+From: Kotresh HR <khiremat@redhat.com>
+Date: Mon, 3 Feb 2020 18:10:17 +0530
+Subject: [PATCH 446/449] bitrot: Make number of signer threads configurable
+
+The number of signing process threads (glfs_brpobj)
+is set to 4 by default. The recommendation is to set
+it to number of cores available. This patch makes it
+configurable as follows
+
+gluster vol bitrot <volname> signer-threads <count>
+
+> fixes: bz#1797869
+> Change-Id: Ia883b3e5e34e0bc8d095243508d320c9c9c58adc
+> Signed-off-by: Kotresh HR <khiremat@redhat.com>
+> (Cherry pick from commit 8fad76650bd85463708f59d2518f5b764ae4c702)
+> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/24091/)
+
+BUG: 1790336
+Change-Id: Ia883b3e5e34e0bc8d095243508d320c9c9c58adc
+Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202780
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ cli/src/cli-cmd-parser.c                           | 29 +++++++-
+ cli/src/cli-cmd-volume.c                           | 12 +++
+ doc/gluster.8                                      |  6 ++
+ libglusterfs/src/glusterfs/common-utils.h          |  1 +
+ rpc/xdr/src/cli1-xdr.x                             |  1 +
+ tests/bitrot/br-signer-threads-config-1797869.t    | 73 +++++++++++++++++++
+ xlators/features/bit-rot/src/bitd/bit-rot.c        | 45 +++++++++---
+ xlators/features/bit-rot/src/bitd/bit-rot.h        | 20 ++---
+ .../bit-rot/src/stub/bit-rot-stub-mem-types.h      |  1 +
+ xlators/mgmt/glusterd/src/glusterd-bitrot.c        | 85 ++++++++++++++++++++++
+ xlators/mgmt/glusterd/src/glusterd-volgen.c        | 16 ++--
+ xlators/mgmt/glusterd/src/glusterd-volume-set.c    |  9 +++
+ 12 files changed, 270 insertions(+), 28 deletions(-)
+ create mode 100644 tests/bitrot/br-signer-threads-config-1797869.t
+
+diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
+index 7446b95..5fd05f4 100644
+--- a/cli/src/cli-cmd-parser.c
++++ b/cli/src/cli-cmd-parser.c
+@@ -5661,7 +5661,7 @@ cli_cmd_bitrot_parse(const char **words, int wordcount, dict_t **options)
+     char *volname = NULL;
+     char *opwords[] = {
+         "enable",       "disable", "scrub-throttle", "scrub-frequency", "scrub",
+-        "signing-time", NULL};
++        "signing-time", "signer-threads", NULL};
+     char *scrub_throt_values[] = {"lazy", "normal", "aggressive", NULL};
+     char *scrub_freq_values[] = {"hourly",  "daily",  "weekly", "biweekly",
+                                  "monthly", "minute", NULL};
+@@ -5669,6 +5669,7 @@ cli_cmd_bitrot_parse(const char **words, int wordcount, dict_t **options)
+     dict_t *dict = NULL;
+     gf_bitrot_type type = GF_BITROT_OPTION_TYPE_NONE;
+     int32_t expiry_time = 0;
++    int32_t signer_th_count = 0;
+ 
+     GF_ASSERT(words);
+     GF_ASSERT(options);
+@@ -5849,6 +5850,31 @@ cli_cmd_bitrot_parse(const char **words, int wordcount, dict_t **options)
+             }
+             goto set_type;
+         }
++    } else if (!strcmp(words[3], "signer-threads")) {
++        if (!words[4]) {
++            cli_err(
++                "Missing signer-thread value for bitrot "
++                "option");
++            ret = -1;
++            goto out;
++        } else {
++            type = GF_BITROT_OPTION_TYPE_SIGNER_THREADS;
++
++            signer_th_count = strtol(words[4], NULL, 0);
++            if (signer_th_count < 1) {
++                cli_err("signer-thread count should not be less than 1");
++                ret = -1;
++                goto out;
++            }
++
++            ret = dict_set_uint32(dict, "signer-threads",
++                                  (unsigned int)signer_th_count);
++            if (ret) {
++                cli_out("Failed to set dict for bitrot");
++                goto out;
++            }
++            goto set_type;
++        }
+     } else {
+         cli_err(
+             "Invalid option %s for bitrot. Please enter valid "
+@@ -5857,7 +5883,6 @@ cli_cmd_bitrot_parse(const char **words, int wordcount, dict_t **options)
+         ret = -1;
+         goto out;
+     }
+-
+ set_type:
+     ret = dict_set_int32(dict, "type", type);
+     if (ret < 0)
+diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c
+index f33fc99..72504ca 100644
+--- a/cli/src/cli-cmd-volume.c
++++ b/cli/src/cli-cmd-volume.c
+@@ -3236,6 +3236,16 @@ struct cli_cmd bitrot_cmds[] = {
+     {"volume bitrot <VOLNAME> {enable|disable}", NULL, /*cli_cmd_bitrot_cbk,*/
+      "Enable/disable bitrot for volume <VOLNAME>"},
+ 
++    {"volume bitrot <VOLNAME> signing-time <time-in-secs>",
++     NULL, /*cli_cmd_bitrot_cbk,*/
++     "Waiting time for an object after last fd is closed to start signing "
++     "process"},
++
++    {"volume bitrot <VOLNAME> signer-threads <count>",
++     NULL, /*cli_cmd_bitrot_cbk,*/
++     "Number of signing process threads. Usually set to number of available "
++     "cores"},
++
+     {"volume bitrot <VOLNAME> scrub-throttle {lazy|normal|aggressive}",
+      NULL, /*cli_cmd_bitrot_cbk,*/
+      "Set the speed of the scrubber for volume <VOLNAME>"},
+@@ -3251,6 +3261,8 @@ struct cli_cmd bitrot_cmds[] = {
+      "the scrubber. ondemand starts the scrubber immediately."},
+ 
+     {"volume bitrot <VOLNAME> {enable|disable}\n"
++     "volume bitrot <VOLNAME> signing-time <time-in-secs>\n"
++     "volume bitrot <VOLNAME> signer-threads <count>\n"
+      "volume bitrot <volname> scrub-throttle {lazy|normal|aggressive}\n"
+      "volume bitrot <volname> scrub-frequency {hourly|daily|weekly|biweekly"
+      "|monthly}\n"
+diff --git a/doc/gluster.8 b/doc/gluster.8
+index 66bdb48..084346d 100644
+--- a/doc/gluster.8
++++ b/doc/gluster.8
+@@ -244,6 +244,12 @@ Use "!<OPTION>" to reset option <OPTION> to default value.
+ \fB\ volume bitrot <VOLNAME> {enable|disable} \fR
+ Enable/disable bitrot for volume <VOLNAME>
+ .TP
++\fB\ volume bitrot <VOLNAME> signing-time <time-in-secs> \fR
++Waiting time for an object after last fd is closed to start signing process.
++.TP
++\fB\ volume bitrot <VOLNAME> signer-threads <count> \fR
++Number of signing process threads. Usually set to number of available cores.
++.TP
+ \fB\ volume bitrot <VOLNAME> scrub-throttle {lazy|normal|aggressive} \fR
+ Scrub-throttle value is a measure of how fast or slow the scrubber scrubs the filesystem for volume <VOLNAME>
+ .TP
+diff --git a/libglusterfs/src/glusterfs/common-utils.h b/libglusterfs/src/glusterfs/common-utils.h
+index 0e2ecc7..f0a0a41 100644
+--- a/libglusterfs/src/glusterfs/common-utils.h
++++ b/libglusterfs/src/glusterfs/common-utils.h
+@@ -126,6 +126,7 @@ trap(void);
+ 
+ /* Default value of signing waiting time to sign a file for bitrot */
+ #define SIGNING_TIMEOUT "120"
++#define BR_WORKERS "4"
+ 
+ /* xxhash */
+ #define GF_XXH64_DIGEST_LENGTH 8
+diff --git a/rpc/xdr/src/cli1-xdr.x b/rpc/xdr/src/cli1-xdr.x
+index a32c864..777cb00 100644
+--- a/rpc/xdr/src/cli1-xdr.x
++++ b/rpc/xdr/src/cli1-xdr.x
+@@ -68,6 +68,7 @@ enum gf_bitrot_type {
+         GF_BITROT_OPTION_TYPE_EXPIRY_TIME,
+         GF_BITROT_CMD_SCRUB_STATUS,
+         GF_BITROT_CMD_SCRUB_ONDEMAND,
++        GF_BITROT_OPTION_TYPE_SIGNER_THREADS,
+         GF_BITROT_OPTION_TYPE_MAX
+ };
+ 
+diff --git a/tests/bitrot/br-signer-threads-config-1797869.t b/tests/bitrot/br-signer-threads-config-1797869.t
+new file mode 100644
+index 0000000..657ef3e
+--- /dev/null
++++ b/tests/bitrot/br-signer-threads-config-1797869.t
+@@ -0,0 +1,73 @@
++#!/bin/bash
++
++. $(dirname $0)/../include.rc
++. $(dirname $0)/../volume.rc
++. $(dirname $0)/../cluster.rc
++
++function get_bitd_count_1 {
++        ps auxww | grep glusterfs | grep bitd.pid | grep -v grep | grep $H1 | wc -l
++}
++
++function get_bitd_count_2 {
++        ps auxww | grep glusterfs | grep bitd.pid | grep -v grep | grep $H2 | wc -l
++}
++
++function get_bitd_pid_1 {
++        ps auxww | grep glusterfs | grep bitd.pid | grep -v grep | grep $H1 | awk '{print $2}'
++}
++
++function get_bitd_pid_2 {
++        ps auxww | grep glusterfs | grep bitd.pid | grep -v grep | grep $H2 | awk '{print $2}'
++}
++
++function get_signer_th_count_1 {
++        ps -eL | grep $(get_bitd_pid_1) | grep glfs_brpobj | wc -l
++}
++
++function get_signer_th_count_2 {
++        ps -eL | grep $(get_bitd_pid_2) | grep glfs_brpobj | wc -l
++}
++
++cleanup;
++
++TEST launch_cluster 2
++
++TEST $CLI_1 peer probe $H2;
++EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
++
++TEST $CLI_1 volume create $V0 $H1:$B1
++TEST $CLI_1 volume create $V1 $H2:$B2
++EXPECT 'Created' volinfo_field_1 $V0 'Status';
++EXPECT 'Created' volinfo_field_1 $V1 'Status';
++
++TEST $CLI_1 volume start $V0
++TEST $CLI_1 volume start $V1
++EXPECT 'Started' volinfo_field_1 $V0 'Status';
++EXPECT 'Started' volinfo_field_1 $V1 'Status';
++
++#Enable bitrot
++TEST $CLI_1 volume bitrot $V0 enable
++TEST $CLI_1 volume bitrot $V1 enable
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count_1
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count_2
++
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "4" get_signer_th_count_1
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "4" get_signer_th_count_2
++
++old_bitd_pid_1=$(get_bitd_pid_1)
++old_bitd_pid_2=$(get_bitd_pid_2)
++TEST $CLI_1 volume bitrot $V0 signer-threads 1
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_signer_th_count_1
++EXPECT_NOT "$old_bitd_pid_1" get_bitd_pid_1;
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "4" get_signer_th_count_2
++EXPECT "$old_bitd_pid_2" get_bitd_pid_2;
++
++old_bitd_pid_1=$(get_bitd_pid_1)
++old_bitd_pid_2=$(get_bitd_pid_2)
++TEST $CLI_1 volume bitrot $V1 signer-threads 2
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_signer_th_count_2
++EXPECT_NOT "$old_bitd_pid_2" get_bitd_pid_2;
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_signer_th_count_1
++EXPECT "$old_bitd_pid_1" get_bitd_pid_1;
++
++cleanup;
+diff --git a/xlators/features/bit-rot/src/bitd/bit-rot.c b/xlators/features/bit-rot/src/bitd/bit-rot.c
+index 7b1c5dc..b8feef7 100644
+--- a/xlators/features/bit-rot/src/bitd/bit-rot.c
++++ b/xlators/features/bit-rot/src/bitd/bit-rot.c
+@@ -1734,22 +1734,26 @@ out:
+     return 0;
+ }
+ 
+-/**
+- * Initialize signer specific structures, spawn worker threads.
+- */
+-
+ static void
+ br_fini_signer(xlator_t *this, br_private_t *priv)
+ {
+     int i = 0;
+ 
+-    for (; i < BR_WORKERS; i++) {
++    if (priv == NULL)
++        return;
++
++    for (; i < priv->signer_th_count; i++) {
+         (void)gf_thread_cleanup_xint(priv->obj_queue->workers[i]);
+     }
++    GF_FREE(priv->obj_queue->workers);
+ 
+     pthread_cond_destroy(&priv->object_cond);
+ }
+ 
++/**
++ * Initialize signer specific structures, spawn worker threads.
++ */
++
+ static int32_t
+ br_init_signer(xlator_t *this, br_private_t *priv)
+ {
+@@ -1769,7 +1773,12 @@ br_init_signer(xlator_t *this, br_private_t *priv)
+         goto cleanup_cond;
+     INIT_LIST_HEAD(&priv->obj_queue->objects);
+ 
+-    for (i = 0; i < BR_WORKERS; i++) {
++    priv->obj_queue->workers = GF_CALLOC(
++        priv->signer_th_count, sizeof(pthread_t), gf_br_mt_br_worker_t);
++    if (!priv->obj_queue->workers)
++        goto cleanup_obj_queue;
++
++    for (i = 0; i < priv->signer_th_count; i++) {
+         ret = gf_thread_create(&priv->obj_queue->workers[i], NULL,
+                                br_process_object, this, "brpobj");
+         if (ret != 0) {
+@@ -1787,7 +1796,9 @@ cleanup_threads:
+     for (i--; i >= 0; i--) {
+         (void)gf_thread_cleanup_xint(priv->obj_queue->workers[i]);
+     }
++    GF_FREE(priv->obj_queue->workers);
+ 
++cleanup_obj_queue:
+     GF_FREE(priv->obj_queue);
+ 
+ cleanup_cond:
+@@ -1840,7 +1851,7 @@ br_rate_limit_signer(xlator_t *this, int child_count, int numbricks)
+     if (contribution == 0)
+         contribution = 1;
+     spec.rate = BR_HASH_CALC_READ_SIZE * contribution;
+-    spec.maxlimit = BR_WORKERS * BR_HASH_CALC_READ_SIZE;
++    spec.maxlimit = priv->signer_th_count * BR_HASH_CALC_READ_SIZE;
+ 
+ #endif
+ 
+@@ -1860,11 +1871,16 @@ br_rate_limit_signer(xlator_t *this, int child_count, int numbricks)
+ static int32_t
+ br_signer_handle_options(xlator_t *this, br_private_t *priv, dict_t *options)
+ {
+-    if (options)
++    if (options) {
+         GF_OPTION_RECONF("expiry-time", priv->expiry_time, options, uint32,
+                          error_return);
+-    else
++        GF_OPTION_RECONF("signer-threads", priv->signer_th_count, options,
++                         uint32, error_return);
++    } else {
+         GF_OPTION_INIT("expiry-time", priv->expiry_time, uint32, error_return);
++        GF_OPTION_INIT("signer-threads", priv->signer_th_count, uint32,
++                       error_return);
++    }
+ 
+     return 0;
+ 
+@@ -1880,6 +1896,8 @@ br_signer_init(xlator_t *this, br_private_t *priv)
+ 
+     GF_OPTION_INIT("expiry-time", priv->expiry_time, uint32, error_return);
+     GF_OPTION_INIT("brick-count", numbricks, int32, error_return);
++    GF_OPTION_INIT("signer-threads", priv->signer_th_count, uint32,
++                   error_return);
+ 
+     ret = br_rate_limit_signer(this, priv->child_count, numbricks);
+     if (ret)
+@@ -2210,6 +2228,15 @@ struct volume_options options[] = {
+         .description = "Pause/Resume scrub. Upon resume, scrubber "
+                        "continues from where it left off.",
+     },
++    {
++        .key = {"signer-threads"},
++        .type = GF_OPTION_TYPE_INT,
++        .default_value = BR_WORKERS,
++        .op_version = {GD_OP_VERSION_7_0},
++        .flags = OPT_FLAG_SETTABLE,
++        .description = "Number of signing process threads. As a best "
++                       "practice, set this to the number of processor cores",
++    },
+     {.key = {NULL}},
+ };
+ 
+diff --git a/xlators/features/bit-rot/src/bitd/bit-rot.h b/xlators/features/bit-rot/src/bitd/bit-rot.h
+index a4d4fd7..8ac7dcd 100644
+--- a/xlators/features/bit-rot/src/bitd/bit-rot.h
++++ b/xlators/features/bit-rot/src/bitd/bit-rot.h
+@@ -30,12 +30,6 @@
+ 
+ #include <openssl/sha.h>
+ 
+-/**
+- * TODO: make this configurable. As a best practice, set this to the
+- * number of processor cores.
+- */
+-#define BR_WORKERS 4
+-
+ typedef enum scrub_throttle {
+     BR_SCRUB_THROTTLE_VOID = -1,
+     BR_SCRUB_THROTTLE_LAZY = 0,
+@@ -108,12 +102,12 @@ struct br_child {
+ typedef struct br_child br_child_t;
+ 
+ struct br_obj_n_workers {
+-    struct list_head objects;      /* queue of objects expired from the
+-                                      timer wheel and ready to be picked
+-                                      up for signing */
+-    pthread_t workers[BR_WORKERS]; /* Threads which pick up the objects
+-                                      from the above queue and start
+-                                      signing each object */
++    struct list_head objects; /* queue of objects expired from the
++                                 timer wheel and ready to be picked
++                                 up for signing */
++    pthread_t *workers;       /* Threads which pick up the objects
++                                 from the above queue and start
++                                 signing each object */
+ };
+ 
+ struct br_scrubber {
+@@ -209,6 +203,8 @@ struct br_private {
+ 
+     uint32_t expiry_time; /* objects "wait" time */
+ 
++    uint32_t signer_th_count; /* Number of signing process threads */
++
+     tbf_t *tbf; /* token bucket filter */
+ 
+     gf_boolean_t iamscrubber; /* function as a fs scrubber */
+diff --git a/xlators/features/bit-rot/src/stub/bit-rot-stub-mem-types.h b/xlators/features/bit-rot/src/stub/bit-rot-stub-mem-types.h
+index 40bcda1..9d93caf 100644
+--- a/xlators/features/bit-rot/src/stub/bit-rot-stub-mem-types.h
++++ b/xlators/features/bit-rot/src/stub/bit-rot-stub-mem-types.h
+@@ -29,6 +29,7 @@ enum br_mem_types {
+     gf_br_stub_mt_sigstub_t,
+     gf_br_mt_br_child_event_t,
+     gf_br_stub_mt_misc,
++    gf_br_mt_br_worker_t,
+     gf_br_stub_mt_end,
+ };
+ 
+diff --git a/xlators/mgmt/glusterd/src/glusterd-bitrot.c b/xlators/mgmt/glusterd/src/glusterd-bitrot.c
+index c653249..f79af2d 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-bitrot.c
++++ b/xlators/mgmt/glusterd/src/glusterd-bitrot.c
+@@ -34,6 +34,7 @@ const char *gd_bitrot_op_list[GF_BITROT_OPTION_TYPE_MAX] = {
+     [GF_BITROT_OPTION_TYPE_SCRUB_FREQ] = "scrub-frequency",
+     [GF_BITROT_OPTION_TYPE_SCRUB] = "scrub",
+     [GF_BITROT_OPTION_TYPE_EXPIRY_TIME] = "expiry-time",
++    [GF_BITROT_OPTION_TYPE_SIGNER_THREADS] = "signer-threads",
+ };
+ 
+ int
+@@ -354,6 +355,81 @@ out:
+     return ret;
+ }
+ 
++static gf_boolean_t
++is_bitd_configure_noop(xlator_t *this, glusterd_volinfo_t *volinfo)
++{
++    gf_boolean_t noop = _gf_true;
++    glusterd_brickinfo_t *brickinfo = NULL;
++
++    if (!glusterd_is_bitrot_enabled(volinfo))
++        goto out;
++    else if (volinfo->status != GLUSTERD_STATUS_STARTED)
++        goto out;
++    else {
++        cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
++        {
++            if (!glusterd_is_local_brick(this, volinfo, brickinfo))
++                continue;
++            noop = _gf_false;
++            return noop;
++        }
++    }
++out:
++    return noop;
++}
++
++static int
++glusterd_bitrot_signer_threads(glusterd_volinfo_t *volinfo, dict_t *dict,
++                               char *key, char **op_errstr)
++{
++    int32_t ret = -1;
++    uint32_t signer_th_count = 0;
++    uint32_t existing_th_count = 0;
++    xlator_t *this = NULL;
++    glusterd_conf_t *priv = NULL;
++    char dkey[32] = {
++        0,
++    };
++
++    this = THIS;
++    GF_ASSERT(this);
++
++    priv = this->private;
++    GF_VALIDATE_OR_GOTO(this->name, priv, out);
++
++    ret = dict_get_uint32(dict, "signer-threads", &signer_th_count);
++    if (ret) {
++        gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
++               "Unable to get bitrot signer thread count.");
++        goto out;
++    }
++
++    ret = dict_get_uint32(volinfo->dict, key, &existing_th_count);
++    if (ret == 0 && signer_th_count == existing_th_count) {
++        goto out;
++    }
++
++    snprintf(dkey, sizeof(dkey), "%d", signer_th_count);
++    ret = dict_set_dynstr_with_alloc(volinfo->dict, key, dkey);
++    if (ret) {
++        gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
++               "Failed to set option %s", key);
++        goto out;
++    }
++
++    if (!is_bitd_configure_noop(this, volinfo)) {
++        ret = priv->bitd_svc.manager(&(priv->bitd_svc), NULL,
++                                     PROC_START_NO_WAIT);
++        if (ret) {
++            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BITDSVC_RECONF_FAIL,
++                   "Failed to reconfigure bitrot services");
++            goto out;
++        }
++    }
++out:
++    return ret;
++}
++
+ static int
+ glusterd_bitrot_enable(glusterd_volinfo_t *volinfo, char **op_errstr)
+ {
+@@ -594,6 +670,15 @@ glusterd_op_bitrot(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
+                 volinfo, dict, "features.expiry-time", op_errstr);
+             if (ret)
+                 goto out;
++            break;
++
++        case GF_BITROT_OPTION_TYPE_SIGNER_THREADS:
++            ret = glusterd_bitrot_signer_threads(
++                volinfo, dict, "features.signer-threads", op_errstr);
++            if (ret)
++                goto out;
++            break;
++
+         case GF_BITROT_CMD_SCRUB_STATUS:
+         case GF_BITROT_CMD_SCRUB_ONDEMAND:
+             break;
+diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
+index 13f84ea..094a71f 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
++++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
+@@ -4658,6 +4658,12 @@ bitrot_option_handler(volgen_graph_t *graph, struct volopt_map_entry *vme,
+             return -1;
+     }
+ 
++    if (!strcmp(vme->option, "signer-threads")) {
++        ret = xlator_set_fixed_option(xl, "signer-threads", vme->value);
++        if (ret)
++            return -1;
++    }
++
+     return ret;
+ }
+ 
+@@ -4940,18 +4946,18 @@ glusterd_prepare_shd_volume_options_for_tier(glusterd_volinfo_t *volinfo,
+                                              dict_t *set_dict)
+ {
+     int ret = -1;
+-    char           *key             = NULL;
++    char *key = NULL;
+ 
+-    key = volgen_get_shd_key (volinfo->tier_info.cold_type);
++    key = volgen_get_shd_key(volinfo->tier_info.cold_type);
+     if (key) {
+-        ret = dict_set_str (set_dict, key, "enable");
++        ret = dict_set_str(set_dict, key, "enable");
+         if (ret)
+             goto out;
+     }
+ 
+-    key = volgen_get_shd_key (volinfo->tier_info.hot_type);
++    key = volgen_get_shd_key(volinfo->tier_info.hot_type);
+     if (key) {
+-        ret = dict_set_str (set_dict, key, "enable");
++        ret = dict_set_str(set_dict, key, "enable");
+         if (ret)
+             goto out;
+     }
+diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+index 9001b88..62acadf 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
++++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+@@ -3379,6 +3379,15 @@ struct volopt_map_entry glusterd_volopt_map[] = {
+         .op_version = GD_OP_VERSION_3_7_0,
+         .type = NO_DOC,
+     },
++    {
++        .key = "features.signer-threads",
++        .voltype = "features/bit-rot",
++        .value = BR_WORKERS,
++        .option = "signer-threads",
++        .op_version = GD_OP_VERSION_7_0,
++        .type = NO_DOC,
++    },
++    /* Upcall translator options */
+     /* Upcall translator options */
+     {
+         .key = "features.cache-invalidation",
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0447-core-brick_mux-brick-crashed-when-creating-and-delet.patch b/SOURCES/0447-core-brick_mux-brick-crashed-when-creating-and-delet.patch
new file mode 100644
index 0000000..a39b61b
--- /dev/null
+++ b/SOURCES/0447-core-brick_mux-brick-crashed-when-creating-and-delet.patch
@@ -0,0 +1,359 @@
+From 51090a4b3cb000d601083f12d1875547819fc03f Mon Sep 17 00:00:00 2001
+From: Mohit Agrawal <moagrawal@redhat.com>
+Date: Wed, 4 Mar 2020 09:17:26 +0530
+Subject: [PATCH 447/449] core[brick_mux]: brick crashed when creating and
+ deleting volumes over time
+
+Problem: In brick_mux environment, while volumes are created/stopped in a loop
+         after running a long time the main brick is crashed.The brick is crashed
+         because the main brick process was not cleaned up memory for all objects
+         at the time of detaching a volume.
+         Below are the objects that are missed at the time of detaching a volume
+         1) xlator object for a brick graph
+         2) local_pool for posix_lock xlator
+         3) rpc object cleanup at quota xlator
+         4) inode leak at brick xlator
+
+Solution: To avoid the crash resolve all leak at the time of detaching a brick
+> Change-Id: Ibb6e46c5fba22b9441a88cbaf6b3278823235913
+> updates: #977
+> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+> (Cherry pick from commit e589d8de66d3325da8fbbbe44d1a5bd6335e08ab)
+> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/24209/)
+
+BUG: 1790336
+Change-Id: Ibb6e46c5fba22b9441a88cbaf6b3278823235913
+Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202782
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Xavi Hernandez Juan <xhernandez@redhat.com>
+---
+ libglusterfs/src/glusterfs/glusterfs.h             |  1 +
+ libglusterfs/src/graph.c                           |  1 +
+ libglusterfs/src/graph.y                           |  2 +-
+ libglusterfs/src/xlator.c                          | 29 ++++++++----
+ xlators/features/changelog/src/changelog.c         |  1 +
+ xlators/features/locks/src/posix.c                 |  4 ++
+ xlators/features/quota/src/quota-enforcer-client.c | 14 +++++-
+ xlators/features/quota/src/quota.c                 | 54 ++++++++++++++++++++--
+ xlators/features/quota/src/quota.h                 |  3 ++
+ xlators/protocol/server/src/server.c               | 12 +++--
+ 10 files changed, 103 insertions(+), 18 deletions(-)
+
+diff --git a/libglusterfs/src/glusterfs/glusterfs.h b/libglusterfs/src/glusterfs/glusterfs.h
+index 177a020..584846e 100644
+--- a/libglusterfs/src/glusterfs/glusterfs.h
++++ b/libglusterfs/src/glusterfs/glusterfs.h
+@@ -603,6 +603,7 @@ struct _glusterfs_graph {
+     int used; /* Should be set when fuse gets
+                         first CHILD_UP */
+     uint32_t volfile_checksum;
++    pthread_mutex_t mutex;
+ };
+ typedef struct _glusterfs_graph glusterfs_graph_t;
+ 
+diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c
+index bb5e67a..1cd92db 100644
+--- a/libglusterfs/src/graph.c
++++ b/libglusterfs/src/graph.c
+@@ -1092,6 +1092,7 @@ glusterfs_graph_destroy_residual(glusterfs_graph_t *graph)
+     ret = xlator_tree_free_memacct(graph->first);
+ 
+     list_del_init(&graph->list);
++    pthread_mutex_destroy(&graph->mutex);
+     GF_FREE(graph);
+ 
+     return ret;
+diff --git a/libglusterfs/src/graph.y b/libglusterfs/src/graph.y
+index 5b92985..5733515 100644
+--- a/libglusterfs/src/graph.y
++++ b/libglusterfs/src/graph.y
+@@ -541,7 +541,7 @@ glusterfs_graph_new ()
+                 return NULL;
+ 
+         INIT_LIST_HEAD (&graph->list);
+-
++        pthread_mutex_init(&graph->mutex, NULL);
+         gettimeofday (&graph->dob, NULL);
+ 
+         return graph;
+diff --git a/libglusterfs/src/xlator.c b/libglusterfs/src/xlator.c
+index 108b96a..36cc32c 100644
+--- a/libglusterfs/src/xlator.c
++++ b/libglusterfs/src/xlator.c
+@@ -938,6 +938,8 @@ xlator_mem_cleanup(xlator_t *this)
+     xlator_list_t **trav_p = NULL;
+     xlator_t *top = NULL;
+     xlator_t *victim = NULL;
++    glusterfs_graph_t *graph = NULL;
++    gf_boolean_t graph_cleanup = _gf_false;
+ 
+     if (this->call_cleanup || !this->ctx)
+         return;
+@@ -945,6 +947,12 @@ xlator_mem_cleanup(xlator_t *this)
+     this->call_cleanup = 1;
+     ctx = this->ctx;
+ 
++    inode_table = this->itable;
++    if (inode_table) {
++        inode_table_destroy(inode_table);
++        this->itable = NULL;
++    }
++
+     xlator_call_fini(trav);
+ 
+     while (prev) {
+@@ -953,12 +961,6 @@ xlator_mem_cleanup(xlator_t *this)
+         prev = trav;
+     }
+ 
+-    inode_table = this->itable;
+-    if (inode_table) {
+-        inode_table_destroy(inode_table);
+-        this->itable = NULL;
+-    }
+-
+     if (this->fini) {
+         this->fini(this);
+     }
+@@ -968,17 +970,28 @@ xlator_mem_cleanup(xlator_t *this)
+     if (ctx->active) {
+         top = ctx->active->first;
+         LOCK(&ctx->volfile_lock);
+-        /* TODO here we have leak for xlator node in a graph */
+-        /* Need to move only top xlator from a graph */
+         for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
+             victim = (*trav_p)->xlator;
+             if (victim->call_cleanup && !strcmp(victim->name, this->name)) {
++                graph_cleanup = _gf_true;
+                 (*trav_p) = (*trav_p)->next;
+                 break;
+             }
+         }
+         UNLOCK(&ctx->volfile_lock);
+     }
++
++    if (graph_cleanup) {
++        prev = this;
++        graph = ctx->active;
++        pthread_mutex_lock(&graph->mutex);
++        while (prev) {
++            trav = prev->next;
++            GF_FREE(prev);
++            prev = trav;
++        }
++        pthread_mutex_unlock(&graph->mutex);
++    }
+ }
+ 
+ void
+diff --git a/xlators/features/changelog/src/changelog.c b/xlators/features/changelog/src/changelog.c
+index ff06c09..b54112c 100644
+--- a/xlators/features/changelog/src/changelog.c
++++ b/xlators/features/changelog/src/changelog.c
+@@ -2872,6 +2872,7 @@ fini(xlator_t *this)
+         if (priv->active || priv->rpc_active) {
+             /* terminate RPC server/threads */
+             changelog_cleanup_rpc(this, priv);
++            GF_FREE(priv->ev_dispatcher);
+         }
+         /* call barrier_disable to cancel timer */
+         if (priv->barrier_enabled)
+diff --git a/xlators/features/locks/src/posix.c b/xlators/features/locks/src/posix.c
+index 9a14c64..50f1265 100644
+--- a/xlators/features/locks/src/posix.c
++++ b/xlators/features/locks/src/posix.c
+@@ -4102,6 +4102,10 @@ fini(xlator_t *this)
+     if (!priv)
+         return;
+     this->private = NULL;
++    if (this->local_pool) {
++        mem_pool_destroy(this->local_pool);
++        this->local_pool = NULL;
++    }
+     GF_FREE(priv->brickname);
+     GF_FREE(priv);
+ 
+diff --git a/xlators/features/quota/src/quota-enforcer-client.c b/xlators/features/quota/src/quota-enforcer-client.c
+index 1a4c2e3..097439d 100644
+--- a/xlators/features/quota/src/quota-enforcer-client.c
++++ b/xlators/features/quota/src/quota-enforcer-client.c
+@@ -362,16 +362,28 @@ quota_enforcer_notify(struct rpc_clnt *rpc, void *mydata,
+ {
+     xlator_t *this = NULL;
+     int ret = 0;
++    quota_priv_t *priv = NULL;
+ 
+     this = mydata;
+-
++    priv = this->private;
+     switch (event) {
+         case RPC_CLNT_CONNECT: {
++            pthread_mutex_lock(&priv->conn_mutex);
++            {
++                priv->conn_status = _gf_true;
++            }
++            pthread_mutex_unlock(&priv->conn_mutex);
+             gf_msg_trace(this->name, 0, "got RPC_CLNT_CONNECT");
+             break;
+         }
+ 
+         case RPC_CLNT_DISCONNECT: {
++            pthread_mutex_lock(&priv->conn_mutex);
++            {
++                priv->conn_status = _gf_false;
++                pthread_cond_signal(&priv->conn_cond);
++            }
++            pthread_mutex_unlock(&priv->conn_mutex);
+             gf_msg_trace(this->name, 0, "got RPC_CLNT_DISCONNECT");
+             break;
+         }
+diff --git a/xlators/features/quota/src/quota.c b/xlators/features/quota/src/quota.c
+index a0c236d..d1123ce 100644
+--- a/xlators/features/quota/src/quota.c
++++ b/xlators/features/quota/src/quota.c
+@@ -5014,6 +5014,43 @@ quota_forget(xlator_t *this, inode_t *inode)
+     return 0;
+ }
+ 
++int
++notify(xlator_t *this, int event, void *data, ...)
++{
++    quota_priv_t *priv = NULL;
++    int ret = 0;
++    rpc_clnt_t *rpc = NULL;
++    gf_boolean_t conn_status = _gf_true;
++    xlator_t *victim = data;
++
++    priv = this->private;
++    if (!priv || !priv->is_quota_on)
++        goto out;
++
++    if (event == GF_EVENT_PARENT_DOWN) {
++        rpc = priv->rpc_clnt;
++        if (rpc) {
++            rpc_clnt_disable(rpc);
++            pthread_mutex_lock(&priv->conn_mutex);
++            {
++                conn_status = priv->conn_status;
++                while (conn_status) {
++                    (void)pthread_cond_wait(&priv->conn_cond,
++                                            &priv->conn_mutex);
++                    conn_status = priv->conn_status;
++                }
++            }
++            pthread_mutex_unlock(&priv->conn_mutex);
++            gf_log(this->name, GF_LOG_INFO,
++                   "Notify GF_EVENT_PARENT_DOWN for brick %s", victim->name);
++        }
++    }
++
++out:
++    ret = default_notify(this, event, data);
++    return ret;
++}
++
+ int32_t
+ init(xlator_t *this)
+ {
+@@ -5056,6 +5093,10 @@ init(xlator_t *this)
+         goto err;
+     }
+ 
++    pthread_mutex_init(&priv->conn_mutex, NULL);
++    pthread_cond_init(&priv->conn_cond, NULL);
++    priv->conn_status = _gf_false;
++
+     if (priv->is_quota_on) {
+         rpc = quota_enforcer_init(this, this->options);
+         if (rpc == NULL) {
+@@ -5169,20 +5210,22 @@ fini(xlator_t *this)
+ {
+     quota_priv_t *priv = NULL;
+     rpc_clnt_t *rpc = NULL;
+-    int i = 0, cnt = 0;
+ 
+     priv = this->private;
+     if (!priv)
+         return;
+     rpc = priv->rpc_clnt;
+     priv->rpc_clnt = NULL;
+-    this->private = NULL;
+     if (rpc) {
+-        cnt = GF_ATOMIC_GET(rpc->refcount);
+-        for (i = 0; i < cnt; i++)
+-            rpc_clnt_unref(rpc);
++        rpc_clnt_connection_cleanup(&rpc->conn);
++        rpc_clnt_unref(rpc);
+     }
++
++    this->private = NULL;
+     LOCK_DESTROY(&priv->lock);
++    pthread_mutex_destroy(&priv->conn_mutex);
++    pthread_cond_destroy(&priv->conn_cond);
++
+     GF_FREE(priv);
+     if (this->local_pool) {
+         mem_pool_destroy(this->local_pool);
+@@ -5314,6 +5357,7 @@ struct volume_options options[] = {
+ xlator_api_t xlator_api = {
+     .init = init,
+     .fini = fini,
++    .notify = notify,
+     .reconfigure = reconfigure,
+     .mem_acct_init = mem_acct_init,
+     .op_version = {1}, /* Present from the initial version */
+diff --git a/xlators/features/quota/src/quota.h b/xlators/features/quota/src/quota.h
+index a5a99ca..e51ffd4 100644
+--- a/xlators/features/quota/src/quota.h
++++ b/xlators/features/quota/src/quota.h
+@@ -217,6 +217,9 @@ struct quota_priv {
+     char *volume_uuid;
+     uint64_t validation_count;
+     int32_t quotad_conn_status;
++    pthread_mutex_t conn_mutex;
++    pthread_cond_t conn_cond;
++    gf_boolean_t conn_status;
+ };
+ typedef struct quota_priv quota_priv_t;
+ 
+diff --git a/xlators/protocol/server/src/server.c b/xlators/protocol/server/src/server.c
+index a5f09fe..54d9c0f 100644
+--- a/xlators/protocol/server/src/server.c
++++ b/xlators/protocol/server/src/server.c
+@@ -409,7 +409,13 @@ server_call_xlator_mem_cleanup(xlator_t *this, char *victim_name)
+ 
+     arg = calloc(1, sizeof(*arg));
+     arg->this = this;
+-    arg->victim_name = gf_strdup(victim_name);
++    arg->victim_name = strdup(victim_name);
++    if (!arg->victim_name) {
++        gf_smsg(this->name, GF_LOG_CRITICAL, ENOMEM, LG_MSG_NO_MEMORY,
++                "Memory allocation is failed");
++        return;
++    }
++
+     th_ret = gf_thread_create_detached(&th_id, server_graph_janitor_threads,
+                                        arg, "graphjanitor");
+     if (th_ret) {
+@@ -417,7 +423,7 @@ server_call_xlator_mem_cleanup(xlator_t *this, char *victim_name)
+                "graph janitor Thread"
+                " creation is failed for brick %s",
+                victim_name);
+-        GF_FREE(arg->victim_name);
++        free(arg->victim_name);
+         free(arg);
+     }
+ }
+@@ -628,7 +634,7 @@ server_graph_janitor_threads(void *data)
+     }
+ 
+ out:
+-    GF_FREE(arg->victim_name);
++    free(arg->victim_name);
+     free(arg);
+     return NULL;
+ }
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0448-Posix-Use-simple-approach-to-close-fd.patch b/SOURCES/0448-Posix-Use-simple-approach-to-close-fd.patch
new file mode 100644
index 0000000..f030358
--- /dev/null
+++ b/SOURCES/0448-Posix-Use-simple-approach-to-close-fd.patch
@@ -0,0 +1,341 @@
+From 175c99dccc47d2b4267a8819404e5cbeb8cfba11 Mon Sep 17 00:00:00 2001
+From: Mohit Agrawal <moagrawal@redhat.com>
+Date: Thu, 12 Mar 2020 21:12:13 +0530
+Subject: [PATCH 448/449] Posix: Use simple approach to close fd
+
+Problem: posix_release(dir) functions add the fd's into a ctx->janitor_fds
+         and janitor thread closes the fd's.In brick_mux environment it is
+         difficult to handle race condition in janitor threads because brick
+         spawns a single janitor thread for all bricks.
+
+Solution: Use synctask to execute posix_release(dir) functions instead of
+          using background a thread to close fds.
+
+> Credits: Pranith Karampuri <pkarampu@redhat.com>
+> Change-Id: Iffb031f0695a7da83d5a2f6bac8863dad225317e
+> Fixes: bz#1811631
+> Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
+> (Cherry pick from commit fb20713b380e1df8d7f9e9df96563be2f9144fd6)
+> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/24221/)
+
+BUG: 1790336
+Change-Id: Iffb031f0695a7da83d5a2f6bac8863dad225317e
+Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202791
+Tested-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ libglusterfs/src/glusterfs/glusterfs.h         |  6 +-
+ libglusterfs/src/glusterfs/syncop.h            |  7 +-
+ rpc/rpc-lib/src/rpcsvc.c                       |  6 ++
+ run-tests.sh                                   |  2 +-
+ tests/features/ssl-authz.t                     |  7 +-
+ xlators/storage/posix/src/posix-common.c       |  4 --
+ xlators/storage/posix/src/posix-helpers.c      | 98 --------------------------
+ xlators/storage/posix/src/posix-inode-fd-ops.c | 28 ++------
+ xlators/storage/posix/src/posix.h              |  3 -
+ 9 files changed, 20 insertions(+), 141 deletions(-)
+
+diff --git a/libglusterfs/src/glusterfs/glusterfs.h b/libglusterfs/src/glusterfs/glusterfs.h
+index 584846e..495a4d7 100644
+--- a/libglusterfs/src/glusterfs/glusterfs.h
++++ b/libglusterfs/src/glusterfs/glusterfs.h
+@@ -734,11 +734,7 @@ struct _glusterfs_ctx {
+ 
+     struct list_head volfile_list;
+ 
+-    /* Add members to manage janitor threads for cleanup fd */
+-    struct list_head janitor_fds;
+-    pthread_cond_t janitor_cond;
+-    pthread_mutex_t janitor_lock;
+-    pthread_t janitor;
++    char volume_id[GF_UUID_BUF_SIZE]; /* Used only in protocol/client */
+ };
+ typedef struct _glusterfs_ctx glusterfs_ctx_t;
+ 
+diff --git a/libglusterfs/src/glusterfs/syncop.h b/libglusterfs/src/glusterfs/syncop.h
+index 3011b4c..1e4c73b 100644
+--- a/libglusterfs/src/glusterfs/syncop.h
++++ b/libglusterfs/src/glusterfs/syncop.h
+@@ -254,7 +254,7 @@ struct syncopctx {
+         task = synctask_get();                                                 \
+         stb->task = task;                                                      \
+         if (task)                                                              \
+-            frame = task->opframe;                                             \
++            frame = copy_frame(task->opframe);                                 \
+         else                                                                   \
+             frame = syncop_create_frame(THIS);                                 \
+                                                                                \
+@@ -269,10 +269,7 @@ struct syncopctx {
+         STACK_WIND_COOKIE(frame, cbk, (void *)stb, subvol, fn_op, params);     \
+                                                                                \
+         __yield(stb);                                                          \
+-        if (task)                                                              \
+-            STACK_RESET(frame->root);                                          \
+-        else                                                                   \
+-            STACK_DESTROY(frame->root);                                        \
++        STACK_DESTROY(frame->root);                                            \
+     } while (0)
+ 
+ /*
+diff --git a/rpc/rpc-lib/src/rpcsvc.c b/rpc/rpc-lib/src/rpcsvc.c
+index 3f184bf..23ca1fd 100644
+--- a/rpc/rpc-lib/src/rpcsvc.c
++++ b/rpc/rpc-lib/src/rpcsvc.c
+@@ -375,6 +375,12 @@ rpcsvc_program_actor(rpcsvc_request_t *req)
+ 
+     req->ownthread = program->ownthread;
+     req->synctask = program->synctask;
++    if (((req->procnum == GFS3_OP_RELEASE) ||
++         (req->procnum == GFS3_OP_RELEASEDIR)) &&
++        (program->prognum == GLUSTER_FOP_PROGRAM)) {
++        req->ownthread = _gf_false;
++        req->synctask = _gf_true;
++    }
+ 
+     err = SUCCESS;
+     gf_log(GF_RPCSVC, GF_LOG_TRACE, "Actor found: %s - %s for %s",
+diff --git a/run-tests.sh b/run-tests.sh
+index 5683b21..c835d93 100755
+--- a/run-tests.sh
++++ b/run-tests.sh
+@@ -356,7 +356,7 @@ function run_tests()
+             selected_tests=$((selected_tests+1))
+             echo
+             echo $section_separator$section_separator
+-            if [[ $(get_test_status $t) == "BAD_TEST" ]] && \
++            if [[ $(get_test_status $t) =~ "BAD_TEST" ]] && \
+                [[ $skip_bad_tests == "yes" ]]
+             then
+                 skipped_bad_tests=$((skipped_bad_tests+1))
+diff --git a/tests/features/ssl-authz.t b/tests/features/ssl-authz.t
+index 132b598..497083e 100755
+--- a/tests/features/ssl-authz.t
++++ b/tests/features/ssl-authz.t
+@@ -67,13 +67,14 @@ echo "Memory consumption for glusterfsd process"
+ for i in $(seq 1 100); do
+         gluster v heal $V0 info >/dev/null
+ done
+-
++#Wait to cleanup memory
++sleep 10
+ end=`pmap -x $glusterfsd_pid | grep total | awk -F " " '{print $4}'`
+ diff=$((end-start))
+ 
+-# If memory consumption is more than 5M some leak in SSL code path
++# If memory consumption is more than 15M some leak in SSL code path
+ 
+-TEST [ $diff -lt 5000 ]
++TEST [ $diff -lt 15000 ]
+ 
+ 
+ # Set ssl-allow to a wildcard that includes our identity.
+diff --git a/xlators/storage/posix/src/posix-common.c b/xlators/storage/posix/src/posix-common.c
+index 2cb58ba..ac53796 100644
+--- a/xlators/storage/posix/src/posix-common.c
++++ b/xlators/storage/posix/src/posix-common.c
+@@ -1041,10 +1041,6 @@ posix_init(xlator_t *this)
+     pthread_mutex_init(&_private->janitor_mutex, NULL);
+     pthread_cond_init(&_private->janitor_cond, NULL);
+     INIT_LIST_HEAD(&_private->fsyncs);
+-    ret = posix_spawn_ctx_janitor_thread(this);
+-    if (ret)
+-        goto out;
+-
+     ret = gf_thread_create(&_private->fsyncer, NULL, posix_fsyncer, this,
+                            "posixfsy");
+     if (ret) {
+diff --git a/xlators/storage/posix/src/posix-helpers.c b/xlators/storage/posix/src/posix-helpers.c
+index 2336add..39dbcce 100644
+--- a/xlators/storage/posix/src/posix-helpers.c
++++ b/xlators/storage/posix/src/posix-helpers.c
+@@ -1582,104 +1582,6 @@ unlock:
+     return;
+ }
+ 
+-static struct posix_fd *
+-janitor_get_next_fd(glusterfs_ctx_t *ctx, int32_t janitor_sleep)
+-{
+-    struct posix_fd *pfd = NULL;
+-
+-    struct timespec timeout;
+-
+-    pthread_mutex_lock(&ctx->janitor_lock);
+-    {
+-        if (list_empty(&ctx->janitor_fds)) {
+-            time(&timeout.tv_sec);
+-            timeout.tv_sec += janitor_sleep;
+-            timeout.tv_nsec = 0;
+-
+-            pthread_cond_timedwait(&ctx->janitor_cond, &ctx->janitor_lock,
+-                                   &timeout);
+-            goto unlock;
+-        }
+-
+-        pfd = list_entry(ctx->janitor_fds.next, struct posix_fd, list);
+-
+-        list_del(ctx->janitor_fds.next);
+-    }
+-unlock:
+-    pthread_mutex_unlock(&ctx->janitor_lock);
+-
+-    return pfd;
+-}
+-
+-static void *
+-posix_ctx_janitor_thread_proc(void *data)
+-{
+-    xlator_t *this = NULL;
+-    struct posix_fd *pfd;
+-    glusterfs_ctx_t *ctx = NULL;
+-    struct posix_private *priv = NULL;
+-    int32_t sleep_duration = 0;
+-
+-    this = data;
+-    ctx = THIS->ctx;
+-    THIS = this;
+-
+-    priv = this->private;
+-    sleep_duration = priv->janitor_sleep_duration;
+-    while (1) {
+-        pfd = janitor_get_next_fd(ctx, sleep_duration);
+-        if (pfd) {
+-            if (pfd->dir == NULL) {
+-                gf_msg_trace(this->name, 0, "janitor: closing file fd=%d",
+-                             pfd->fd);
+-                sys_close(pfd->fd);
+-            } else {
+-                gf_msg_debug(this->name, 0, "janitor: closing dir fd=%p",
+-                             pfd->dir);
+-                sys_closedir(pfd->dir);
+-            }
+-
+-            GF_FREE(pfd);
+-        }
+-    }
+-
+-    return NULL;
+-}
+-
+-int
+-posix_spawn_ctx_janitor_thread(xlator_t *this)
+-{
+-    struct posix_private *priv = NULL;
+-    int ret = 0;
+-    glusterfs_ctx_t *ctx = NULL;
+-
+-    priv = this->private;
+-    ctx = THIS->ctx;
+-
+-    LOCK(&priv->lock);
+-    {
+-        if (!ctx->janitor) {
+-            pthread_mutex_init(&ctx->janitor_lock, NULL);
+-            pthread_cond_init(&ctx->janitor_cond, NULL);
+-            INIT_LIST_HEAD(&ctx->janitor_fds);
+-
+-            ret = gf_thread_create(&ctx->janitor, NULL,
+-                                   posix_ctx_janitor_thread_proc, this,
+-                                   "posixctxjan");
+-
+-            if (ret) {
+-                gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_THREAD_FAILED,
+-                       "spawning janitor "
+-                       "thread failed");
+-                goto unlock;
+-            }
+-        }
+-    }
+-unlock:
+-    UNLOCK(&priv->lock);
+-    return ret;
+-}
+-
+ static int
+ is_fresh_file(int64_t ctime_sec)
+ {
+diff --git a/xlators/storage/posix/src/posix-inode-fd-ops.c b/xlators/storage/posix/src/posix-inode-fd-ops.c
+index 5748b9f..d135d8b 100644
+--- a/xlators/storage/posix/src/posix-inode-fd-ops.c
++++ b/xlators/storage/posix/src/posix-inode-fd-ops.c
+@@ -1358,7 +1358,6 @@ posix_releasedir(xlator_t *this, fd_t *fd)
+     struct posix_fd *pfd = NULL;
+     uint64_t tmp_pfd = 0;
+     int ret = 0;
+-    glusterfs_ctx_t *ctx = NULL;
+ 
+     VALIDATE_OR_GOTO(this, out);
+     VALIDATE_OR_GOTO(fd, out);
+@@ -1376,21 +1375,11 @@ posix_releasedir(xlator_t *this, fd_t *fd)
+         goto out;
+     }
+ 
+-    ctx = THIS->ctx;
+-
+-    pthread_mutex_lock(&ctx->janitor_lock);
+-    {
+-        INIT_LIST_HEAD(&pfd->list);
+-        list_add_tail(&pfd->list, &ctx->janitor_fds);
+-        pthread_cond_signal(&ctx->janitor_cond);
+-    }
+-    pthread_mutex_unlock(&ctx->janitor_lock);
+-
+-    /*gf_msg_debug(this->name, 0, "janitor: closing dir fd=%p", pfd->dir);
++    gf_msg_debug(this->name, 0, "janitor: closing dir fd=%p", pfd->dir);
+ 
+     sys_closedir(pfd->dir);
+     GF_FREE(pfd);
+-    */
++
+ out:
+     return 0;
+ }
+@@ -2510,13 +2499,11 @@ posix_release(xlator_t *this, fd_t *fd)
+     struct posix_fd *pfd = NULL;
+     int ret = -1;
+     uint64_t tmp_pfd = 0;
+-    glusterfs_ctx_t *ctx = NULL;
+ 
+     VALIDATE_OR_GOTO(this, out);
+     VALIDATE_OR_GOTO(fd, out);
+ 
+     priv = this->private;
+-    ctx = THIS->ctx;
+ 
+     ret = fd_ctx_del(fd, this, &tmp_pfd);
+     if (ret < 0) {
+@@ -2531,13 +2518,10 @@ posix_release(xlator_t *this, fd_t *fd)
+                "pfd->dir is %p (not NULL) for file fd=%p", pfd->dir, fd);
+     }
+ 
+-    pthread_mutex_lock(&ctx->janitor_lock);
+-    {
+-        INIT_LIST_HEAD(&pfd->list);
+-        list_add_tail(&pfd->list, &ctx->janitor_fds);
+-        pthread_cond_signal(&ctx->janitor_cond);
+-    }
+-    pthread_mutex_unlock(&ctx->janitor_lock);
++    gf_msg_debug(this->name, 0, "janitor: closing dir fd=%p", pfd->dir);
++
++    sys_close(pfd->fd);
++    GF_FREE(pfd);
+ 
+     if (!priv)
+         goto out;
+diff --git a/xlators/storage/posix/src/posix.h b/xlators/storage/posix/src/posix.h
+index ac9d83c..61495a7 100644
+--- a/xlators/storage/posix/src/posix.h
++++ b/xlators/storage/posix/src/posix.h
+@@ -666,9 +666,6 @@ posix_cs_maintenance(xlator_t *this, fd_t *fd, loc_t *loc, int *pfd,
+ int
+ posix_check_dev_file(xlator_t *this, inode_t *inode, char *fop, int *op_errno);
+ 
+-int
+-posix_spawn_ctx_janitor_thread(xlator_t *this);
+-
+ void
+ posix_update_iatt_buf(struct iatt *buf, int fd, char *loc, dict_t *xdata);
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0449-test-Test-case-brick-mux-validation-in-cluster.t-is-.patch b/SOURCES/0449-test-Test-case-brick-mux-validation-in-cluster.t-is-.patch
new file mode 100644
index 0000000..6a161bf
--- /dev/null
+++ b/SOURCES/0449-test-Test-case-brick-mux-validation-in-cluster.t-is-.patch
@@ -0,0 +1,107 @@
+From 6e15fca1621b06270983f57ac146f0f8e52f0797 Mon Sep 17 00:00:00 2001
+From: Mohit Agrawal <moagrawal@redhat.com>
+Date: Tue, 9 Jun 2020 15:38:12 +0530
+Subject: [PATCH 449/449] test: Test case brick-mux-validation-in-cluster.t is
+ failing on RHEL-8
+
+Brick process are not properly attached on any cluster node while
+some volume options are changed on peer node and glusterd is down on
+that specific node.
+
+Solution: At the time of restart glusterd it got a friend update request
+from a peer node if peer node having some changes on volume.If the brick
+process is started before received a friend update request in that case
+brick_mux behavior is not workingproperly. All bricks are attached to
+the same process even volumes options are not the same. To avoid the
+issue introduce an atomic flag volpeerupdate and update the value while
+glusterd has received a friend update request from peer for a specific
+volume.If volpeerupdate flag is 1 volume is started by
+glusterd_import_friend_volume synctask
+
+> Change-Id: I4c026f1e7807ded249153670e6967a2be8d22cb7
+> Credit: Sanju Rakaonde <srakonde@redhat.com>
+> fixes: #1290
+> Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
+> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/24540/)
+> (Cherry pick from commit 955bfd567329cf7fe63e9c3b89d333a55e5e9a20)
+
+BUG: 1844359
+Change-Id: I4c026f1e7807ded249153670e6967a2be8d22cb7
+Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202812
+Tested-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-by: Sanju Rakonde <srakonde@redhat.com>
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/bugs/glusterd/brick-mux-validation-in-cluster.t | 4 +---
+ xlators/mgmt/glusterd/src/glusterd-utils.c            | 7 +++++--
+ xlators/mgmt/glusterd/src/glusterd.h                  | 4 ++++
+ 3 files changed, 10 insertions(+), 5 deletions(-)
+
+diff --git a/tests/bugs/glusterd/brick-mux-validation-in-cluster.t b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
+index f088dbb..b6af487 100644
+--- a/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
++++ b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
+@@ -100,10 +100,8 @@ $CLI_2 volume set $V0 performance.readdir-ahead on
+ $CLI_2 volume set $V1 performance.readdir-ahead on
+ 
+ TEST $glusterd_1;
++EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+ 
+-sleep 10
+-
+-EXPECT 4 count_brick_processes
+ EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_brick_pids
+ EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0  count_N/A_brick_pids
+ 
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index 2eb2a76..6f904ae 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -3758,6 +3758,7 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
+                "Version of volume %s differ. local version = %d, "
+                "remote version = %d on peer %s",
+                volinfo->volname, volinfo->version, version, hostname);
++        GF_ATOMIC_INIT(volinfo->volpeerupdate, 1);
+         *status = GLUSTERD_VOL_COMP_UPDATE_REQ;
+         goto out;
+     } else if (version < volinfo->version) {
+@@ -4784,7 +4785,8 @@ glusterd_volinfo_stop_stale_bricks(glusterd_volinfo_t *new_volinfo,
+          * or if it's part of the new volume and is pending a snap,
+          * then stop the brick process
+          */
+-        if (ret || (new_brickinfo->snap_status == -1)) {
++        if (ret || (new_brickinfo->snap_status == -1) ||
++            GF_ATOMIC_GET(old_volinfo->volpeerupdate)) {
+             /*TODO: may need to switch to 'atomic' flavour of
+              * brick_stop, once we make peer rpc program also
+              * synctask enabled*/
+@@ -6490,7 +6492,8 @@ glusterd_brick_start(glusterd_volinfo_t *volinfo,
+      * three different triggers for an attempt to start the brick process
+      * due to the quorum handling code in glusterd_friend_sm.
+      */
+-    if (brickinfo->status == GF_BRICK_STARTING || brickinfo->start_triggered) {
++    if (brickinfo->status == GF_BRICK_STARTING || brickinfo->start_triggered ||
++        GF_ATOMIC_GET(volinfo->volpeerupdate)) {
+         gf_msg_debug(this->name, 0,
+                      "brick %s is already in starting "
+                      "phase",
+diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
+index 1c6c3b1..f739b5d 100644
+--- a/xlators/mgmt/glusterd/src/glusterd.h
++++ b/xlators/mgmt/glusterd/src/glusterd.h
+@@ -523,6 +523,10 @@ struct glusterd_volinfo_ {
+     pthread_mutex_t store_volinfo_lock; /* acquire lock for
+                                          * updating the volinfo
+                                          */
++    gf_atomic_t volpeerupdate;
++    /* Flag to check about volume has received updates
++       from peer
++    */
+ };
+ 
+ typedef enum gd_snap_status_ {
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0450-tests-basic-ctime-enable-ctime-before-testing.patch b/SOURCES/0450-tests-basic-ctime-enable-ctime-before-testing.patch
new file mode 100644
index 0000000..96de5a1
--- /dev/null
+++ b/SOURCES/0450-tests-basic-ctime-enable-ctime-before-testing.patch
@@ -0,0 +1,35 @@
+From 09dce9ce8e946a86209b6f057bf14323036fa12a Mon Sep 17 00:00:00 2001
+From: Shwetha K Acharya <sacharya@redhat.com>
+Date: Wed, 10 Jun 2020 11:44:56 +0530
+Subject: [PATCH 450/451] tests/basic/ctime: enable ctime before testing
+
+This is to ensure that this test successfully runs, even if
+ctime is disabled by default (which is the case in downstream.)
+
+Label: DOWNSTREAM ONLY
+
+BUG: 1844359
+Change-Id: I91e80b3d8a56fc089aeb58b0254812111d394842
+Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/202874
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/basic/ctime/ctime-utimesat.t | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/tests/basic/ctime/ctime-utimesat.t b/tests/basic/ctime/ctime-utimesat.t
+index 540e57a..da12fbe 100644
+--- a/tests/basic/ctime/ctime-utimesat.t
++++ b/tests/basic/ctime/ctime-utimesat.t
+@@ -14,6 +14,7 @@ TEST $CLI volume set $V0 performance.read-after-open off
+ TEST $CLI volume set $V0 performance.open-behind off
+ TEST $CLI volume set $V0 performance.write-behind off
+ TEST $CLI volume set $V0 performance.io-cache off
++TEST $CLI volume set $V0 ctime on
+ 
+ TEST $CLI volume start $V0
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0451-extras-Modify-group-virt-to-include-network-related-.patch b/SOURCES/0451-extras-Modify-group-virt-to-include-network-related-.patch
new file mode 100644
index 0000000..bba69e1
--- /dev/null
+++ b/SOURCES/0451-extras-Modify-group-virt-to-include-network-related-.patch
@@ -0,0 +1,44 @@
+From 96d9b659fd0367abe1666a5ac6203208e0dc056d Mon Sep 17 00:00:00 2001
+From: Krutika Dhananjay <kdhananj@redhat.com>
+Date: Mon, 4 May 2020 14:30:57 +0530
+Subject: [PATCH 451/451] extras: Modify group 'virt' to include
+ network-related options
+
+This is needed to work around an issue seen where vms running on
+online hosts are getting killed when a different host is rebooted
+in ovirt-gluster hyperconverged environments. Actual RCA is quite
+lengthy and documented in the github issue. Please refer to it
+for more details.
+
+Upstream patch:
+> Upstream patch link: https://review.gluster.org/c/glusterfs/+/24400
+> Change-Id: Ic25b5f50144ad42458e5c847e1e7e191032396c1
+> Fixes: #1217
+> Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
+
+Change-Id: Ic25b5f50144ad42458e5c847e1e7e191032396c1
+BUG: 1845064
+Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/203291
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ extras/group-virt.example | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/extras/group-virt.example b/extras/group-virt.example
+index c2ce89d..3a441eb 100644
+--- a/extras/group-virt.example
++++ b/extras/group-virt.example
+@@ -16,3 +16,8 @@ cluster.choose-local=off
+ client.event-threads=4
+ server.event-threads=4
+ performance.client-io-threads=on
++network.ping-timeout=20
++server.tcp-user-timeout=20
++server.keepalive-time=10
++server.keepalive-interval=2
++server.keepalive-count=5
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0452-Tier-DHT-Handle-the-pause-case-missed-out.patch b/SOURCES/0452-Tier-DHT-Handle-the-pause-case-missed-out.patch
new file mode 100644
index 0000000..0b115bb
--- /dev/null
+++ b/SOURCES/0452-Tier-DHT-Handle-the-pause-case-missed-out.patch
@@ -0,0 +1,48 @@
+From c184943bdf38de5b4cbf165fd1cd98ce7bd9e976 Mon Sep 17 00:00:00 2001
+From: hari gowtham <hgowtham@redhat.com>
+Date: Tue, 16 Jun 2020 14:47:53 +0530
+Subject: [PATCH 452/456] Tier/DHT: Handle the pause case missed out
+
+Problem: While backporting a change from master
+the changes related to tier were removed. This started affecting
+the tier pause functionality. Backporting it
+to downstream left this usecase messed up as we still support tier.
+patch that caused this: https://code.engineering.redhat.com/gerrit/#/c/202647/2
+
+Fix: add the condition back for tier pause to work.
+
+Label: DOWNSTREAM ONLY
+
+BUG: 1844359
+Change-Id: I46c6c179b09c7e1a729be9fd257fa4a490f0287e
+Signed-off-by: hari gowtham <hgowtham@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/203560
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/cluster/dht/src/dht-rebalance.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/xlators/cluster/dht/src/dht-rebalance.c b/xlators/cluster/dht/src/dht-rebalance.c
+index e9974cd..abc10fc 100644
+--- a/xlators/cluster/dht/src/dht-rebalance.c
++++ b/xlators/cluster/dht/src/dht-rebalance.c
+@@ -1160,6 +1160,15 @@ __dht_rebalance_migrate_data(xlator_t *this, gf_defrag_info_t *defrag,
+             break;
+         }
+ 
++        if ((defrag && defrag->cmd == GF_DEFRAG_CMD_START_TIER) &&
++            (gf_defrag_get_pause_state(&defrag->tier_conf) != TIER_RUNNING)) {
++            gf_msg("tier", GF_LOG_INFO, 0, DHT_MSG_TIER_PAUSED,
++                   "Migrate file paused");
++            ret = -1;
++            break;
++        }
++
++
+         offset += ret;
+         total += ret;
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0453-glusterd-add-brick-command-failure.patch b/SOURCES/0453-glusterd-add-brick-command-failure.patch
new file mode 100644
index 0000000..dd21350
--- /dev/null
+++ b/SOURCES/0453-glusterd-add-brick-command-failure.patch
@@ -0,0 +1,300 @@
+From a04592cce9aaa6ccb8a038bc3b4e31bc125d1d10 Mon Sep 17 00:00:00 2001
+From: Sanju Rakonde <srakonde@redhat.com>
+Date: Tue, 16 Jun 2020 18:03:21 +0530
+Subject: [PATCH 453/456] glusterd: add-brick command failure
+
+Problem: add-brick operation is failing when replica or disperse
+count is not mentioned in the add-brick command.
+
+Reason: with commit a113d93 we are checking brick order while
+doing add-brick operation for replica and disperse volumes. If
+replica count or disperse count is not mentioned in the command,
+the dict get is failing and resulting add-brick operation failure.
+
+> upstream patch: https://review.gluster.org/#/c/glusterfs/+/24581/
+> fixes: #1306
+> Change-Id: Ie957540e303bfb5f2d69015661a60d7e72557353
+> Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+
+BUG: 1847081
+Change-Id: Ie957540e303bfb5f2d69015661a60d7e72557353
+Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/203867
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/bugs/glusterd/brick-order-check-add-brick.t | 40 ++++++++++++++++++++++
+ tests/cluster.rc                                  | 11 ++++--
+ xlators/mgmt/glusterd/src/glusterd-brick-ops.c    | 39 ++++++++++++++-------
+ xlators/mgmt/glusterd/src/glusterd-utils.c        | 30 ++---------------
+ xlators/mgmt/glusterd/src/glusterd-utils.h        |  3 +-
+ xlators/mgmt/glusterd/src/glusterd-volume-ops.c   | 41 +++++++++++++++++++----
+ 6 files changed, 115 insertions(+), 49 deletions(-)
+ create mode 100644 tests/bugs/glusterd/brick-order-check-add-brick.t
+
+diff --git a/tests/bugs/glusterd/brick-order-check-add-brick.t b/tests/bugs/glusterd/brick-order-check-add-brick.t
+new file mode 100644
+index 0000000..29f0ed1
+--- /dev/null
++++ b/tests/bugs/glusterd/brick-order-check-add-brick.t
+@@ -0,0 +1,40 @@
++#!/bin/bash
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++. $(dirname $0)/../../cluster.rc
++. $(dirname $0)/../../snapshot.rc
++
++cleanup;
++
++TEST verify_lvm_version;
++#Create cluster with 3 nodes
++TEST launch_cluster 3 -NO_DEBUG -NO_FORCE
++TEST setup_lvm 3
++
++TEST $CLI_1 peer probe $H2
++TEST $CLI_1 peer probe $H3
++EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
++
++TEST $CLI_1 volume create $V0 replica 3 $H1:$L1/$V0 $H2:$L2/$V0 $H3:$L3/$V0
++EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
++EXPECT 'Created' volinfo_field $V0 'Status'
++
++TEST $CLI_1 volume start $V0
++EXPECT 'Started' volinfo_field $V0 'Status'
++
++#add-brick with or without mentioning the replica count should not fail
++TEST $CLI_1 volume add-brick $V0 replica 3 $H1:$L1/${V0}_1 $H2:$L2/${V0}_1 $H3:$L3/${V0}_1
++EXPECT '2 x 3 = 6' volinfo_field $V0 'Number of Bricks'
++
++TEST $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_2 $H2:$L2/${V0}_2 $H3:$L3/${V0}_2
++EXPECT '3 x 3 = 9' volinfo_field $V0 'Number of Bricks'
++
++#adding bricks from same host should fail the brick order check
++TEST ! $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_3 $H1:$L1/${V0}_4 $H1:$L1/${V0}_5
++EXPECT '3 x 3 = 9' volinfo_field $V0 'Number of Bricks'
++
++#adding bricks from same host with force should succeed
++TEST $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_3 $H1:$L1/${V0}_4 $H1:$L1/${V0}_5 force
++EXPECT '4 x 3 = 12' volinfo_field $V0 'Number of Bricks'
++
++cleanup
+diff --git a/tests/cluster.rc b/tests/cluster.rc
+index 99be8e7..8b73153 100644
+--- a/tests/cluster.rc
++++ b/tests/cluster.rc
+@@ -11,7 +11,7 @@ function launch_cluster() {
+     define_backends $count;
+     define_hosts $count;
+     define_glusterds $count $2;
+-    define_clis $count;
++    define_clis $count $3;
+ 
+     start_glusterds;
+ }
+@@ -133,8 +133,13 @@ function define_clis() {
+         lopt1="--log-file=$logdir/$logfile1"
+ 
+ 
+-        eval "CLI_$i='$CLI --glusterd-sock=${!b}/glusterd/gd.sock $lopt'";
+-        eval "CLI$i='$CLI --glusterd-sock=${!b}/glusterd/gd.sock $lopt1'";
++        if [ "$2" == "-NO_FORCE" ]; then
++                eval "CLI_$i='$CLI_NO_FORCE --glusterd-sock=${!b}/glusterd/gd.sock $lopt'";
++                eval "CLI$i='$CLI_NO_FORCE --glusterd-sock=${!b}/glusterd/gd.sock $lopt1'";
++        else
++                eval "CLI_$i='$CLI --glusterd-sock=${!b}/glusterd/gd.sock $lopt'";
++                eval "CLI$i='$CLI --glusterd-sock=${!b}/glusterd/gd.sock $lopt1'";
++        fi
+     done
+ }
+ 
+diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+index 121346c..5ae577a 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
++++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+@@ -1576,20 +1576,35 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
+ 
+     /* Check brick order if the volume type is replicate or disperse. If
+      * force at the end of command not given then check brick order.
++     * doing this check at the originator node is sufficient.
+      */
+ 
+-    if (!is_force) {
+-        if ((volinfo->type == GF_CLUSTER_TYPE_REPLICATE) ||
+-            (volinfo->type == GF_CLUSTER_TYPE_DISPERSE)) {
+-            ret = glusterd_check_brick_order(dict, msg, volinfo->type);
+-            if (ret) {
+-                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER,
+-                       "Not adding brick because of "
+-                       "bad brick order. %s",
+-                       msg);
+-                *op_errstr = gf_strdup(msg);
+-                goto out;
+-            }
++    if (is_origin_glusterd(dict) && !is_force) {
++        ret = 0;
++        if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) {
++            gf_msg_debug(this->name, 0,
++                         "Replicate cluster type "
++                         "found. Checking brick order.");
++            if (replica_count)
++                ret = glusterd_check_brick_order(dict, msg, volinfo->type,
++                                                 replica_count);
++            else
++                ret = glusterd_check_brick_order(dict, msg, volinfo->type,
++                                                 volinfo->replica_count);
++        } else if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
++            gf_msg_debug(this->name, 0,
++                         "Disperse cluster type"
++                         " found. Checking brick order.");
++            ret = glusterd_check_brick_order(dict, msg, volinfo->type,
++                                             volinfo->disperse_count);
++        }
++        if (ret) {
++            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER,
++                   "Not adding brick because of "
++                   "bad brick order. %s",
++                   msg);
++            *op_errstr = gf_strdup(msg);
++            goto out;
+         }
+     }
+ 
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index 6f904ae..545e688 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -14802,7 +14802,8 @@ glusterd_compare_addrinfo(struct addrinfo *first, struct addrinfo *next)
+  * volume are present on the same server
+  */
+ int32_t
+-glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type)
++glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type,
++                           int32_t sub_count)
+ {
+     int ret = -1;
+     int i = 0;
+@@ -14819,7 +14820,6 @@ glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type)
+     char *tmpptr = NULL;
+     char *volname = NULL;
+     int32_t brick_count = 0;
+-    int32_t sub_count = 0;
+     struct addrinfo *ai_info = NULL;
+     char brick_addr[128] = {
+         0,
+@@ -14870,31 +14870,6 @@ glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type)
+         goto out;
+     }
+ 
+-    if (type != GF_CLUSTER_TYPE_DISPERSE) {
+-        ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
+-                              &sub_count);
+-        if (ret) {
+-            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+-                   "Bricks check : Could"
+-                   " not retrieve replica count");
+-            goto out;
+-        }
+-        gf_msg_debug(this->name, 0,
+-                     "Replicate cluster type "
+-                     "found. Checking brick order.");
+-    } else {
+-        ret = dict_get_int32n(dict, "disperse-count", SLEN("disperse-count"),
+-                              &sub_count);
+-        if (ret) {
+-            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+-                   "Bricks check : Could"
+-                   " not retrieve disperse count");
+-            goto out;
+-        }
+-        gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DISPERSE_CLUSTER_FOUND,
+-               "Disperse cluster type"
+-               " found. Checking brick order.");
+-    }
+     brick_list_dup = brick_list_ptr = gf_strdup(brick_list);
+     /* Resolve hostnames and get addrinfo */
+     while (i < brick_count) {
+@@ -14989,5 +14964,6 @@ out:
+         ai_list_tmp2 = ai_list_tmp1;
+     }
+     free(ai_list_tmp2);
++    gf_msg_debug("glusterd", 0, "Returning %d", ret);
+     return ret;
+ }
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
+index e2e2454..5f5de82 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
+@@ -883,6 +883,7 @@ char *
+ search_brick_path_from_proc(pid_t brick_pid, char *brickpath);
+ 
+ int32_t
+-glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type);
++glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type,
++                           int32_t sub_count);
+ 
+ #endif
+diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+index 8da2ff3..134b04c 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
++++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+@@ -1024,6 +1024,8 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr,
+     int32_t local_brick_count = 0;
+     int32_t i = 0;
+     int32_t type = 0;
++    int32_t replica_count = 0;
++    int32_t disperse_count = 0;
+     char *brick = NULL;
+     char *tmpptr = NULL;
+     xlator_t *this = NULL;
+@@ -1119,15 +1121,42 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr,
+         }
+ 
+         if (!is_force) {
+-            if ((type == GF_CLUSTER_TYPE_REPLICATE) ||
+-                (type == GF_CLUSTER_TYPE_DISPERSE)) {
+-                ret = glusterd_check_brick_order(dict, msg, type);
++            if (type == GF_CLUSTER_TYPE_REPLICATE) {
++                ret = dict_get_int32n(dict, "replica-count",
++                                      SLEN("replica-count"), &replica_count);
+                 if (ret) {
+-                    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER,
+-                           "Not creating volume because of "
+-                           "bad brick order");
++                    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
++                           "Bricks check : Could"
++                           " not retrieve replica count");
++                    goto out;
++                }
++                gf_msg_debug(this->name, 0,
++                             "Replicate cluster type "
++                             "found. Checking brick order.");
++                ret = glusterd_check_brick_order(dict, msg, type,
++                                                 replica_count);
++            } else if (type == GF_CLUSTER_TYPE_DISPERSE) {
++                ret = dict_get_int32n(dict, "disperse-count",
++                                      SLEN("disperse-count"), &disperse_count);
++                if (ret) {
++                    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
++                           "Bricks check : Could"
++                           " not retrieve disperse count");
+                     goto out;
+                 }
++                gf_msg_debug(this->name, 0,
++                             "Disperse cluster type"
++                             " found. Checking brick order.");
++                ret = glusterd_check_brick_order(dict, msg, type,
++                                                 disperse_count);
++            }
++            if (ret) {
++                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER,
++                       "Not creating the volume because of "
++                       "bad brick order. %s",
++                       msg);
++                *op_errstr = gf_strdup(msg);
++                goto out;
+             }
+         }
+     }
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0454-features-locks-avoid-use-after-freed-of-frame-for-bl.patch b/SOURCES/0454-features-locks-avoid-use-after-freed-of-frame-for-bl.patch
new file mode 100644
index 0000000..6ad460d
--- /dev/null
+++ b/SOURCES/0454-features-locks-avoid-use-after-freed-of-frame-for-bl.patch
@@ -0,0 +1,152 @@
+From cddd253c5e3f0a7c3b91c35cea8ad1921cb43b98 Mon Sep 17 00:00:00 2001
+From: Kinglong Mee <kinglongmee@gmail.com>
+Date: Thu, 18 Jul 2019 11:43:01 +0800
+Subject: [PATCH 454/456] features/locks: avoid use after freed of frame for
+ blocked lock
+
+The fop contains blocked lock may use freed frame info when other
+unlock fop has unwind the blocked lock.
+
+Because the blocked lock is added to block list in inode lock(or
+other lock), after that, when out of the inode lock, the fop
+contains the blocked lock should not use it.
+
+Upstream Patch - https://review.gluster.org/#/c/glusterfs/+/23155/
+
+>Change-Id: Icb309a1cc78380dc982b26d50c18d67e4f2c8915
+>fixes: bz#1737291
+>Signed-off-by: Kinglong Mee <mijinlong@horiscale.com>
+
+Change-Id: Icb309a1cc78380dc982b26d50c18d67e4f2c8915
+BUG: 1812789
+Reviewed-on: https://code.engineering.redhat.com/gerrit/206465
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Xavi Hernandez Juan <xhernandez@redhat.com>
+---
+ xlators/features/locks/src/common.c    | 4 ++++
+ xlators/features/locks/src/entrylk.c   | 4 ++--
+ xlators/features/locks/src/inodelk.c   | 7 +++++--
+ xlators/features/locks/src/posix.c     | 5 +++--
+ xlators/features/locks/src/reservelk.c | 2 --
+ 5 files changed, 14 insertions(+), 8 deletions(-)
+
+diff --git a/xlators/features/locks/src/common.c b/xlators/features/locks/src/common.c
+index 6e7fb4b..1406e70 100644
+--- a/xlators/features/locks/src/common.c
++++ b/xlators/features/locks/src/common.c
+@@ -1080,6 +1080,10 @@ pl_setlk(xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock,
+                    lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
+                    lock->client_pid, lkowner_utoa(&lock->owner),
+                    lock->user_flock.l_start, lock->user_flock.l_len);
++
++            pl_trace_block(this, lock->frame, NULL, NULL, F_SETLKW,
++                           &lock->user_flock, NULL);
++
+             lock->blocked = 1;
+             __insert_lock(pl_inode, lock);
+             ret = -1;
+diff --git a/xlators/features/locks/src/entrylk.c b/xlators/features/locks/src/entrylk.c
+index ced5eca..93c649c 100644
+--- a/xlators/features/locks/src/entrylk.c
++++ b/xlators/features/locks/src/entrylk.c
+@@ -552,6 +552,8 @@ __lock_blocked_add(xlator_t *this, pl_inode_t *pinode, pl_dom_list_t *dom,
+     gf_msg_trace(this->name, 0, "Blocking lock: {pinode=%p, basename=%s}",
+                  pinode, lock->basename);
+ 
++    entrylk_trace_block(this, lock->frame, NULL, NULL, NULL, lock->basename,
++                        ENTRYLK_LOCK, lock->type);
+ out:
+     return -EAGAIN;
+ }
+@@ -932,8 +934,6 @@ out:
+                           op_ret, op_errno);
+     unwind:
+         STACK_UNWIND_STRICT(entrylk, frame, op_ret, op_errno, NULL);
+-    } else {
+-        entrylk_trace_block(this, frame, volume, fd, loc, basename, cmd, type);
+     }
+ 
+     if (pcontend != NULL) {
+diff --git a/xlators/features/locks/src/inodelk.c b/xlators/features/locks/src/inodelk.c
+index a9c42f1..24dee49 100644
+--- a/xlators/features/locks/src/inodelk.c
++++ b/xlators/features/locks/src/inodelk.c
+@@ -420,6 +420,8 @@ __lock_blocked_add(xlator_t *this, pl_dom_list_t *dom, pl_inode_lock_t *lock,
+                  lkowner_utoa(&lock->owner), lock->user_flock.l_start,
+                  lock->user_flock.l_len);
+ 
++    pl_trace_block(this, lock->frame, NULL, NULL, F_SETLKW, &lock->user_flock,
++                   lock->volume);
+ out:
+     return -EAGAIN;
+ }
+@@ -959,6 +961,7 @@ pl_common_inodelk(call_frame_t *frame, xlator_t *this, const char *volume,
+     int ret = -1;
+     GF_UNUSED int dict_ret = -1;
+     int can_block = 0;
++    short lock_type = 0;
+     pl_inode_t *pinode = NULL;
+     pl_inode_lock_t *reqlock = NULL;
+     pl_dom_list_t *dom = NULL;
+@@ -1024,13 +1027,13 @@ pl_common_inodelk(call_frame_t *frame, xlator_t *this, const char *volume,
+             /* fall through */
+ 
+         case F_SETLK:
++            lock_type = flock->l_type;
+             memcpy(&reqlock->user_flock, flock, sizeof(struct gf_flock));
+             ret = pl_inode_setlk(this, ctx, pinode, reqlock, can_block, dom,
+                                  inode);
+ 
+             if (ret < 0) {
+-                if ((can_block) && (F_UNLCK != flock->l_type)) {
+-                    pl_trace_block(this, frame, fd, loc, cmd, flock, volume);
++                if ((can_block) && (F_UNLCK != lock_type)) {
+                     goto out;
+                 }
+                 gf_log(this->name, GF_LOG_TRACE, "returning EAGAIN");
+diff --git a/xlators/features/locks/src/posix.c b/xlators/features/locks/src/posix.c
+index 50f1265..7887b82 100644
+--- a/xlators/features/locks/src/posix.c
++++ b/xlators/features/locks/src/posix.c
+@@ -2557,6 +2557,7 @@ pl_lk(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t cmd,
+     uint32_t lk_flags = 0;
+     posix_locks_private_t *priv = this->private;
+     pl_local_t *local = NULL;
++    short lock_type = 0;
+ 
+     int ret = dict_get_uint32(xdata, GF_LOCK_MODE, &lk_flags);
+     if (ret == 0) {
+@@ -2701,6 +2702,7 @@ pl_lk(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t cmd,
+         case F_SETLK:
+             reqlock->frame = frame;
+             reqlock->this = this;
++            lock_type = flock->l_type;
+ 
+             pthread_mutex_lock(&pl_inode->mutex);
+             {
+@@ -2738,8 +2740,7 @@ pl_lk(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t cmd,
+ 
+             ret = pl_setlk(this, pl_inode, reqlock, can_block);
+             if (ret == -1) {
+-                if ((can_block) && (F_UNLCK != flock->l_type)) {
+-                    pl_trace_block(this, frame, fd, NULL, cmd, flock, NULL);
++                if ((can_block) && (F_UNLCK != lock_type)) {
+                     goto out;
+                 }
+                 gf_log(this->name, GF_LOG_DEBUG, "returning EAGAIN");
+diff --git a/xlators/features/locks/src/reservelk.c b/xlators/features/locks/src/reservelk.c
+index 51076d7..604691f 100644
+--- a/xlators/features/locks/src/reservelk.c
++++ b/xlators/features/locks/src/reservelk.c
+@@ -312,8 +312,6 @@ grant_blocked_lock_calls(xlator_t *this, pl_inode_t *pl_inode)
+         ret = pl_setlk(this, pl_inode, lock, can_block);
+         if (ret == -1) {
+             if (can_block) {
+-                pl_trace_block(this, lock->frame, fd, NULL, cmd,
+-                               &lock->user_flock, NULL);
+                 continue;
+             } else {
+                 gf_log(this->name, GF_LOG_DEBUG, "returning EAGAIN");
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0455-locks-prevent-deletion-of-locked-entries.patch b/SOURCES/0455-locks-prevent-deletion-of-locked-entries.patch
new file mode 100644
index 0000000..5960690
--- /dev/null
+++ b/SOURCES/0455-locks-prevent-deletion-of-locked-entries.patch
@@ -0,0 +1,1253 @@
+From 3f6ff474db3934f43d9963dfe4dda7d201211e75 Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Fri, 12 Jun 2020 00:06:36 +0200
+Subject: [PATCH 455/456] locks: prevent deletion of locked entries
+
+To keep consistency inside transactions started by locking an entry or
+an inode, this change delays the removal of entries that are currently
+locked by one or more clients. Once all locks are released, the removal
+is processed.
+
+It has also been improved the detection of stale inodes in the locking
+code of EC.
+
+>Upstream patch - https://review.gluster.org/#/c/glusterfs/+/20025/
+>Fixes: #990
+
+Change-Id: Ic8ba23d9480f80c7f74e7a310bf8a15922320fd5
+BUG: 1812789
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/206442
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+---
+ xlators/cluster/ec/src/ec-locks.c    |  69 ++++++--
+ xlators/features/locks/src/common.c  | 316 ++++++++++++++++++++++++++++++++++-
+ xlators/features/locks/src/common.h  |  43 +++++
+ xlators/features/locks/src/entrylk.c |  19 +--
+ xlators/features/locks/src/inodelk.c | 150 ++++++++++-------
+ xlators/features/locks/src/locks.h   |  23 ++-
+ xlators/features/locks/src/posix.c   | 183 ++++++++++++++++++--
+ 7 files changed, 689 insertions(+), 114 deletions(-)
+
+diff --git a/xlators/cluster/ec/src/ec-locks.c b/xlators/cluster/ec/src/ec-locks.c
+index ffcac07..db86296 100644
+--- a/xlators/cluster/ec/src/ec-locks.c
++++ b/xlators/cluster/ec/src/ec-locks.c
+@@ -28,9 +28,36 @@ ec_lock_check(ec_fop_data_t *fop, uintptr_t *mask)
+     ec_t *ec = fop->xl->private;
+     ec_cbk_data_t *ans = NULL;
+     ec_cbk_data_t *cbk = NULL;
+-    uintptr_t locked = 0, notlocked = 0;
++    uintptr_t locked = 0;
++    int32_t good = 0;
++    int32_t eagain = 0;
++    int32_t estale = 0;
+     int32_t error = -1;
+ 
++    /* There are some errors that we'll handle in an special way while trying
++     * to acquire a lock.
++     *
++     *   EAGAIN:  If it's found during a parallel non-blocking lock request, we
++     *            consider that there's contention on the inode, so we consider
++     *            the acquisition a failure and try again with a sequential
++     *            blocking lock request. This will ensure that we get a lock on
++     *            as many bricks as possible (ignoring EAGAIN here would cause
++     *            unnecessary triggers of self-healing).
++     *
++     *            If it's found during a sequential blocking lock request, it's
++     *            considered an error. Lock will only succeed if there are
++     *            enough other bricks locked.
++     *
++     *   ESTALE:  This can appear during parallel or sequential lock request if
++     *            the inode has just been unlinked. We consider this error is
++     *            not recoverable, but we also don't consider it as fatal. So,
++     *            if it happens during parallel lock, we won't attempt a
++     *            sequential one unless there are EAGAIN errors on other
++     *            bricks (and are enough to form a quorum), but if we reach
++     *            quorum counting the ESTALE bricks, we consider the whole
++     *            result of the operation is ESTALE instead of EIO.
++     */
++
+     list_for_each_entry(ans, &fop->cbk_list, list)
+     {
+         if (ans->op_ret >= 0) {
+@@ -38,24 +65,23 @@ ec_lock_check(ec_fop_data_t *fop, uintptr_t *mask)
+                 error = EIO;
+             }
+             locked |= ans->mask;
++            good = ans->count;
+             cbk = ans;
+-        } else {
+-            if (ans->op_errno == EAGAIN) {
+-                switch (fop->uint32) {
+-                    case EC_LOCK_MODE_NONE:
+-                    case EC_LOCK_MODE_ALL:
+-                        /* Goal is to treat non-blocking lock as failure
+-                         * even if there is a single EAGAIN*/
+-                        notlocked |= ans->mask;
+-                        break;
+-                }
+-            }
++        } else if (ans->op_errno == ESTALE) {
++            estale += ans->count;
++        } else if ((ans->op_errno == EAGAIN) &&
++                   (fop->uint32 != EC_LOCK_MODE_INC)) {
++            eagain += ans->count;
+         }
+     }
+ 
+     if (error == -1) {
+-        if (gf_bits_count(locked | notlocked) >= ec->fragments) {
+-            if (notlocked == 0) {
++        /* If we have enough quorum with succeeded and EAGAIN answers, we
++         * ignore for now any ESTALE answer. If there are EAGAIN answers,
++         * we retry with a sequential blocking lock request if needed.
++         * Otherwise we succeed. */
++        if ((good + eagain) >= ec->fragments) {
++            if (eagain == 0) {
+                 if (fop->answer == NULL) {
+                     fop->answer = cbk;
+                 }
+@@ -68,21 +94,28 @@ ec_lock_check(ec_fop_data_t *fop, uintptr_t *mask)
+                     case EC_LOCK_MODE_NONE:
+                         error = EAGAIN;
+                         break;
+-
+                     case EC_LOCK_MODE_ALL:
+                         fop->uint32 = EC_LOCK_MODE_INC;
+                         break;
+-
+                     default:
++                        /* This shouldn't happen because eagain cannot be > 0
++                         * when fop->uint32 is EC_LOCK_MODE_INC. */
+                         error = EIO;
+                         break;
+                 }
+             }
+         } else {
+-            if (fop->answer && fop->answer->op_ret < 0)
++            /* We have been unable to find enough candidates that will be able
++             * to take the lock. If we have quorum on some answer, we return
++             * it. Otherwise we check if ESTALE answers allow us to reach
++             * quorum. If so, we return ESTALE. */
++            if (fop->answer && fop->answer->op_ret < 0) {
+                 error = fop->answer->op_errno;
+-            else
++            } else if ((good + eagain + estale) >= ec->fragments) {
++                error = ESTALE;
++            } else {
+                 error = EIO;
++            }
+         }
+     }
+ 
+diff --git a/xlators/features/locks/src/common.c b/xlators/features/locks/src/common.c
+index 1406e70..0c52853 100644
+--- a/xlators/features/locks/src/common.c
++++ b/xlators/features/locks/src/common.c
+@@ -462,11 +462,16 @@ pl_inode_get(xlator_t *this, inode_t *inode, pl_local_t *local)
+         INIT_LIST_HEAD(&pl_inode->blocked_calls);
+         INIT_LIST_HEAD(&pl_inode->metalk_list);
+         INIT_LIST_HEAD(&pl_inode->queued_locks);
++        INIT_LIST_HEAD(&pl_inode->waiting);
+         gf_uuid_copy(pl_inode->gfid, inode->gfid);
+ 
+         pl_inode->check_mlock_info = _gf_true;
+         pl_inode->mlock_enforced = _gf_false;
+ 
++        /* -2 means never looked up. -1 means something went wrong and link
++         * tracking is disabled. */
++        pl_inode->links = -2;
++
+         ret = __inode_ctx_put(inode, this, (uint64_t)(long)(pl_inode));
+         if (ret) {
+             pthread_mutex_destroy(&pl_inode->mutex);
+@@ -1276,4 +1281,313 @@ pl_local_init(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd)
+     }
+ 
+     return 0;
+-}
+\ No newline at end of file
++}
++
++gf_boolean_t
++pl_is_lk_owner_valid(gf_lkowner_t *owner, client_t *client)
++{
++    if (client && (client->opversion < GD_OP_VERSION_7_0)) {
++        return _gf_true;
++    }
++
++    if (is_lk_owner_null(owner)) {
++        return _gf_false;
++    }
++    return _gf_true;
++}
++
++static int32_t
++pl_inode_from_loc(loc_t *loc, inode_t **pinode)
++{
++    inode_t *inode = NULL;
++    int32_t error = 0;
++
++    if (loc->inode != NULL) {
++        inode = inode_ref(loc->inode);
++        goto done;
++    }
++
++    if (loc->parent == NULL) {
++        error = EINVAL;
++        goto done;
++    }
++
++    if (!gf_uuid_is_null(loc->gfid)) {
++        inode = inode_find(loc->parent->table, loc->gfid);
++        if (inode != NULL) {
++            goto done;
++        }
++    }
++
++    if (loc->name == NULL) {
++        error = EINVAL;
++        goto done;
++    }
++
++    inode = inode_grep(loc->parent->table, loc->parent, loc->name);
++    if (inode == NULL) {
++        /* We haven't found any inode. This means that the file doesn't exist
++         * or that even if it exists, we don't have any knowledge about it, so
++         * we don't have locks on it either, which is fine for our purposes. */
++        goto done;
++    }
++
++done:
++    *pinode = inode;
++
++    return error;
++}
++
++static gf_boolean_t
++pl_inode_has_owners(xlator_t *xl, client_t *client, pl_inode_t *pl_inode,
++                    struct timespec *now, struct list_head *contend)
++{
++    pl_dom_list_t *dom;
++    pl_inode_lock_t *lock;
++    gf_boolean_t has_owners = _gf_false;
++
++    list_for_each_entry(dom, &pl_inode->dom_list, inode_list)
++    {
++        list_for_each_entry(lock, &dom->inodelk_list, list)
++        {
++            /* If the lock belongs to the same client, we assume it's related
++             * to the same operation, so we allow the removal to continue. */
++            if (lock->client == client) {
++                continue;
++            }
++            /* If the lock belongs to an internal process, we don't block the
++             * removal. */
++            if (lock->client_pid < 0) {
++                continue;
++            }
++            if (contend == NULL) {
++                return _gf_true;
++            }
++            has_owners = _gf_true;
++            inodelk_contention_notify_check(xl, lock, now, contend);
++        }
++    }
++
++    return has_owners;
++}
++
++int32_t
++pl_inode_remove_prepare(xlator_t *xl, call_frame_t *frame, loc_t *loc,
++                        pl_inode_t **ppl_inode, struct list_head *contend)
++{
++    struct timespec now;
++    inode_t *inode;
++    pl_inode_t *pl_inode;
++    int32_t error;
++
++    pl_inode = NULL;
++
++    error = pl_inode_from_loc(loc, &inode);
++    if ((error != 0) || (inode == NULL)) {
++        goto done;
++    }
++
++    pl_inode = pl_inode_get(xl, inode, NULL);
++    if (pl_inode == NULL) {
++        inode_unref(inode);
++        error = ENOMEM;
++        goto done;
++    }
++
++    /* pl_inode_from_loc() already increments ref count for inode, so
++     * we only assign here our reference. */
++    pl_inode->inode = inode;
++
++    timespec_now(&now);
++
++    pthread_mutex_lock(&pl_inode->mutex);
++
++    if (pl_inode->removed) {
++        error = ESTALE;
++        goto unlock;
++    }
++
++    if (pl_inode_has_owners(xl, frame->root->client, pl_inode, &now, contend)) {
++        error = -1;
++        /* We skip the unlock here because the caller must create a stub when
++         * we return -1 and do a call to pl_inode_remove_complete(), which
++         * assumes the lock is still acquired and will release it once
++         * everything else is prepared. */
++        goto done;
++    }
++
++    pl_inode->is_locked = _gf_true;
++    pl_inode->remove_running++;
++
++unlock:
++    pthread_mutex_unlock(&pl_inode->mutex);
++
++done:
++    *ppl_inode = pl_inode;
++
++    return error;
++}
++
++int32_t
++pl_inode_remove_complete(xlator_t *xl, pl_inode_t *pl_inode, call_stub_t *stub,
++                         struct list_head *contend)
++{
++    pl_inode_lock_t *lock;
++    int32_t error = -1;
++
++    if (stub != NULL) {
++        list_add_tail(&stub->list, &pl_inode->waiting);
++        pl_inode->is_locked = _gf_true;
++    } else {
++        error = ENOMEM;
++
++        while (!list_empty(contend)) {
++            lock = list_first_entry(contend, pl_inode_lock_t, list);
++            list_del_init(&lock->list);
++            __pl_inodelk_unref(lock);
++        }
++    }
++
++    pthread_mutex_unlock(&pl_inode->mutex);
++
++    if (error < 0) {
++        inodelk_contention_notify(xl, contend);
++    }
++
++    inode_unref(pl_inode->inode);
++
++    return error;
++}
++
++void
++pl_inode_remove_wake(struct list_head *list)
++{
++    call_stub_t *stub;
++
++    while (!list_empty(list)) {
++        stub = list_first_entry(list, call_stub_t, list);
++        list_del_init(&stub->list);
++
++        call_resume(stub);
++    }
++}
++
++void
++pl_inode_remove_cbk(xlator_t *xl, pl_inode_t *pl_inode, int32_t error)
++{
++    struct list_head contend, granted;
++    struct timespec now;
++    pl_dom_list_t *dom;
++
++    if (pl_inode == NULL) {
++        return;
++    }
++
++    INIT_LIST_HEAD(&contend);
++    INIT_LIST_HEAD(&granted);
++    timespec_now(&now);
++
++    pthread_mutex_lock(&pl_inode->mutex);
++
++    if (error == 0) {
++        if (pl_inode->links >= 0) {
++            pl_inode->links--;
++        }
++        if (pl_inode->links == 0) {
++            pl_inode->removed = _gf_true;
++        }
++    }
++
++    pl_inode->remove_running--;
++
++    if ((pl_inode->remove_running == 0) && list_empty(&pl_inode->waiting)) {
++        pl_inode->is_locked = _gf_false;
++
++        list_for_each_entry(dom, &pl_inode->dom_list, inode_list)
++        {
++            __grant_blocked_inode_locks(xl, pl_inode, &granted, dom, &now,
++                                        &contend);
++        }
++    }
++
++    pthread_mutex_unlock(&pl_inode->mutex);
++
++    unwind_granted_inodes(xl, pl_inode, &granted);
++
++    inodelk_contention_notify(xl, &contend);
++
++    inode_unref(pl_inode->inode);
++}
++
++void
++pl_inode_remove_unlocked(xlator_t *xl, pl_inode_t *pl_inode,
++                         struct list_head *list)
++{
++    call_stub_t *stub, *tmp;
++
++    if (!pl_inode->is_locked) {
++        return;
++    }
++
++    list_for_each_entry_safe(stub, tmp, &pl_inode->waiting, list)
++    {
++        if (!pl_inode_has_owners(xl, stub->frame->root->client, pl_inode, NULL,
++                                 NULL)) {
++            list_move_tail(&stub->list, list);
++        }
++    }
++}
++
++/* This function determines if an inodelk attempt can be done now or it needs
++ * to wait.
++ *
++ * Possible return values:
++ *   < 0: An error occurred. Currently only -ESTALE can be returned if the
++ *        inode has been deleted previously by unlink/rmdir/rename
++ *   = 0: The lock can be attempted.
++ *   > 0: The lock needs to wait because a conflicting remove operation is
++ *        ongoing.
++ */
++int32_t
++pl_inode_remove_inodelk(pl_inode_t *pl_inode, pl_inode_lock_t *lock)
++{
++    pl_dom_list_t *dom;
++    pl_inode_lock_t *ilock;
++
++    /* If the inode has been deleted, we won't allow any lock. */
++    if (pl_inode->removed) {
++        return -ESTALE;
++    }
++
++    /* We only synchronize with locks made for regular operations coming from
++     * the user. Locks done for internal purposes are hard to control and could
++     * lead to long delays or deadlocks quite easily. */
++    if (lock->client_pid < 0) {
++        return 0;
++    }
++    if (!pl_inode->is_locked) {
++        return 0;
++    }
++    if (pl_inode->remove_running > 0) {
++        return 1;
++    }
++
++    list_for_each_entry(dom, &pl_inode->dom_list, inode_list)
++    {
++        list_for_each_entry(ilock, &dom->inodelk_list, list)
++        {
++            /* If a lock from the same client is already granted, we allow this
++             * one to continue. This is necessary to prevent deadlocks when
++             * multiple locks are taken for the same operation.
++             *
++             * On the other side it's unlikely that the same client sends
++             * completely unrelated locks for the same inode.
++             */
++            if (ilock->client == lock->client) {
++                return 0;
++            }
++        }
++    }
++
++    return 1;
++}
+diff --git a/xlators/features/locks/src/common.h b/xlators/features/locks/src/common.h
+index ea86b96..6c81ac3 100644
+--- a/xlators/features/locks/src/common.h
++++ b/xlators/features/locks/src/common.h
+@@ -105,6 +105,15 @@ void
+ __pl_inodelk_unref(pl_inode_lock_t *lock);
+ 
+ void
++__grant_blocked_inode_locks(xlator_t *this, pl_inode_t *pl_inode,
++                            struct list_head *granted, pl_dom_list_t *dom,
++                            struct timespec *now, struct list_head *contend);
++
++void
++unwind_granted_inodes(xlator_t *this, pl_inode_t *pl_inode,
++                      struct list_head *granted);
++
++void
+ grant_blocked_entry_locks(xlator_t *this, pl_inode_t *pl_inode,
+                           pl_dom_list_t *dom, struct timespec *now,
+                           struct list_head *contend);
+@@ -204,6 +213,16 @@ pl_metalock_is_active(pl_inode_t *pl_inode);
+ void
+ __pl_queue_lock(pl_inode_t *pl_inode, posix_lock_t *reqlock);
+ 
++void
++inodelk_contention_notify_check(xlator_t *xl, pl_inode_lock_t *lock,
++                                struct timespec *now,
++                                struct list_head *contend);
++
++void
++entrylk_contention_notify_check(xlator_t *xl, pl_entry_lock_t *lock,
++                                struct timespec *now,
++                                struct list_head *contend);
++
+ gf_boolean_t
+ pl_does_monkey_want_stuck_lock();
+ 
+@@ -216,4 +235,28 @@ pl_clean_local(pl_local_t *local);
+ int
+ pl_local_init(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd);
+ 
++gf_boolean_t
++pl_is_lk_owner_valid(gf_lkowner_t *owner, client_t *client);
++
++int32_t
++pl_inode_remove_prepare(xlator_t *xl, call_frame_t *frame, loc_t *loc,
++                        pl_inode_t **ppl_inode, struct list_head *contend);
++
++int32_t
++pl_inode_remove_complete(xlator_t *xl, pl_inode_t *pl_inode, call_stub_t *stub,
++                         struct list_head *contend);
++
++void
++pl_inode_remove_wake(struct list_head *list);
++
++void
++pl_inode_remove_cbk(xlator_t *xl, pl_inode_t *pl_inode, int32_t error);
++
++void
++pl_inode_remove_unlocked(xlator_t *xl, pl_inode_t *pl_inode,
++                         struct list_head *list);
++
++int32_t
++pl_inode_remove_inodelk(pl_inode_t *pl_inode, pl_inode_lock_t *lock);
++
+ #endif /* __COMMON_H__ */
+diff --git a/xlators/features/locks/src/entrylk.c b/xlators/features/locks/src/entrylk.c
+index 93c649c..b97836f 100644
+--- a/xlators/features/locks/src/entrylk.c
++++ b/xlators/features/locks/src/entrylk.c
+@@ -197,9 +197,9 @@ out:
+     return revoke_lock;
+ }
+ 
+-static gf_boolean_t
+-__entrylk_needs_contention_notify(xlator_t *this, pl_entry_lock_t *lock,
+-                                  struct timespec *now)
++void
++entrylk_contention_notify_check(xlator_t *this, pl_entry_lock_t *lock,
++                                struct timespec *now, struct list_head *contend)
+ {
+     posix_locks_private_t *priv;
+     int64_t elapsed;
+@@ -209,7 +209,7 @@ __entrylk_needs_contention_notify(xlator_t *this, pl_entry_lock_t *lock,
+     /* If this lock is in a list, it means that we are about to send a
+      * notification for it, so no need to do anything else. */
+     if (!list_empty(&lock->contend)) {
+-        return _gf_false;
++        return;
+     }
+ 
+     elapsed = now->tv_sec;
+@@ -218,7 +218,7 @@ __entrylk_needs_contention_notify(xlator_t *this, pl_entry_lock_t *lock,
+         elapsed--;
+     }
+     if (elapsed < priv->notify_contention_delay) {
+-        return _gf_false;
++        return;
+     }
+ 
+     /* All contention notifications will be sent outside of the locked
+@@ -231,7 +231,7 @@ __entrylk_needs_contention_notify(xlator_t *this, pl_entry_lock_t *lock,
+ 
+     lock->contention_time = *now;
+ 
+-    return _gf_true;
++    list_add_tail(&lock->contend, contend);
+ }
+ 
+ void
+@@ -325,9 +325,7 @@ __entrylk_grantable(xlator_t *this, pl_dom_list_t *dom, pl_entry_lock_t *lock,
+                     break;
+                 }
+             }
+-            if (__entrylk_needs_contention_notify(this, tmp, now)) {
+-                list_add_tail(&tmp->contend, contend);
+-            }
++            entrylk_contention_notify_check(this, tmp, now, contend);
+         }
+     }
+ 
+@@ -690,10 +688,9 @@ __grant_blocked_entry_locks(xlator_t *this, pl_inode_t *pl_inode,
+         bl_ret = __lock_entrylk(bl->this, pl_inode, bl, 0, dom, now, contend);
+ 
+         if (bl_ret == 0) {
+-            list_add(&bl->blocked_locks, granted);
++            list_add_tail(&bl->blocked_locks, granted);
+         }
+     }
+-    return;
+ }
+ 
+ /* Grants locks if possible which are blocked on a lock */
+diff --git a/xlators/features/locks/src/inodelk.c b/xlators/features/locks/src/inodelk.c
+index 24dee49..1a07243 100644
+--- a/xlators/features/locks/src/inodelk.c
++++ b/xlators/features/locks/src/inodelk.c
+@@ -231,9 +231,9 @@ out:
+     return revoke_lock;
+ }
+ 
+-static gf_boolean_t
+-__inodelk_needs_contention_notify(xlator_t *this, pl_inode_lock_t *lock,
+-                                  struct timespec *now)
++void
++inodelk_contention_notify_check(xlator_t *this, pl_inode_lock_t *lock,
++                                struct timespec *now, struct list_head *contend)
+ {
+     posix_locks_private_t *priv;
+     int64_t elapsed;
+@@ -243,7 +243,7 @@ __inodelk_needs_contention_notify(xlator_t *this, pl_inode_lock_t *lock,
+     /* If this lock is in a list, it means that we are about to send a
+      * notification for it, so no need to do anything else. */
+     if (!list_empty(&lock->contend)) {
+-        return _gf_false;
++        return;
+     }
+ 
+     elapsed = now->tv_sec;
+@@ -252,7 +252,7 @@ __inodelk_needs_contention_notify(xlator_t *this, pl_inode_lock_t *lock,
+         elapsed--;
+     }
+     if (elapsed < priv->notify_contention_delay) {
+-        return _gf_false;
++        return;
+     }
+ 
+     /* All contention notifications will be sent outside of the locked
+@@ -265,7 +265,7 @@ __inodelk_needs_contention_notify(xlator_t *this, pl_inode_lock_t *lock,
+ 
+     lock->contention_time = *now;
+ 
+-    return _gf_true;
++    list_add_tail(&lock->contend, contend);
+ }
+ 
+ void
+@@ -353,9 +353,7 @@ __inodelk_grantable(xlator_t *this, pl_dom_list_t *dom, pl_inode_lock_t *lock,
+                     break;
+                 }
+             }
+-            if (__inodelk_needs_contention_notify(this, l, now)) {
+-                list_add_tail(&l->contend, contend);
+-            }
++            inodelk_contention_notify_check(this, l, now, contend);
+         }
+     }
+ 
+@@ -435,12 +433,17 @@ __lock_inodelk(xlator_t *this, pl_inode_t *pl_inode, pl_inode_lock_t *lock,
+                struct list_head *contend)
+ {
+     pl_inode_lock_t *conf = NULL;
+-    int ret = -EINVAL;
++    int ret;
+ 
+-    conf = __inodelk_grantable(this, dom, lock, now, contend);
+-    if (conf) {
+-        ret = __lock_blocked_add(this, dom, lock, can_block);
+-        goto out;
++    ret = pl_inode_remove_inodelk(pl_inode, lock);
++    if (ret < 0) {
++        return ret;
++    }
++    if (ret == 0) {
++        conf = __inodelk_grantable(this, dom, lock, now, contend);
++    }
++    if ((ret > 0) || (conf != NULL)) {
++        return __lock_blocked_add(this, dom, lock, can_block);
+     }
+ 
+     /* To prevent blocked locks starvation, check if there are any blocked
+@@ -462,17 +465,13 @@ __lock_inodelk(xlator_t *this, pl_inode_t *pl_inode, pl_inode_lock_t *lock,
+                    "starvation");
+         }
+ 
+-        ret = __lock_blocked_add(this, dom, lock, can_block);
+-        goto out;
++        return __lock_blocked_add(this, dom, lock, can_block);
+     }
+     __pl_inodelk_ref(lock);
+     gettimeofday(&lock->granted_time, NULL);
+     list_add(&lock->list, &dom->inodelk_list);
+ 
+-    ret = 0;
+-
+-out:
+-    return ret;
++    return 0;
+ }
+ 
+ /* Return true if the two inodelks have exactly same lock boundaries */
+@@ -529,12 +528,11 @@ out:
+     return conf;
+ }
+ 
+-static void
++void
+ __grant_blocked_inode_locks(xlator_t *this, pl_inode_t *pl_inode,
+                             struct list_head *granted, pl_dom_list_t *dom,
+                             struct timespec *now, struct list_head *contend)
+ {
+-    int bl_ret = 0;
+     pl_inode_lock_t *bl = NULL;
+     pl_inode_lock_t *tmp = NULL;
+ 
+@@ -547,52 +545,48 @@ __grant_blocked_inode_locks(xlator_t *this, pl_inode_t *pl_inode,
+     {
+         list_del_init(&bl->blocked_locks);
+ 
+-        bl_ret = __lock_inodelk(this, pl_inode, bl, 1, dom, now, contend);
++        bl->status = __lock_inodelk(this, pl_inode, bl, 1, dom, now, contend);
+ 
+-        if (bl_ret == 0) {
+-            list_add(&bl->blocked_locks, granted);
++        if (bl->status != -EAGAIN) {
++            list_add_tail(&bl->blocked_locks, granted);
+         }
+     }
+-    return;
+ }
+ 
+-/* Grant all inodelks blocked on a lock */
+ void
+-grant_blocked_inode_locks(xlator_t *this, pl_inode_t *pl_inode,
+-                          pl_dom_list_t *dom, struct timespec *now,
+-                          struct list_head *contend)
++unwind_granted_inodes(xlator_t *this, pl_inode_t *pl_inode,
++                      struct list_head *granted)
+ {
+-    struct list_head granted;
+     pl_inode_lock_t *lock;
+     pl_inode_lock_t *tmp;
++    int32_t op_ret;
++    int32_t op_errno;
+ 
+-    INIT_LIST_HEAD(&granted);
+-
+-    pthread_mutex_lock(&pl_inode->mutex);
+-    {
+-        __grant_blocked_inode_locks(this, pl_inode, &granted, dom, now,
+-                                    contend);
+-    }
+-    pthread_mutex_unlock(&pl_inode->mutex);
+-
+-    list_for_each_entry_safe(lock, tmp, &granted, blocked_locks)
++    list_for_each_entry_safe(lock, tmp, granted, blocked_locks)
+     {
+-        gf_log(this->name, GF_LOG_TRACE,
+-               "%s (pid=%d) (lk-owner=%s) %" PRId64 " - %" PRId64 " => Granted",
+-               lock->fl_type == F_UNLCK ? "Unlock" : "Lock", lock->client_pid,
+-               lkowner_utoa(&lock->owner), lock->user_flock.l_start,
+-               lock->user_flock.l_len);
+-
++        if (lock->status == 0) {
++            op_ret = 0;
++            op_errno = 0;
++            gf_log(this->name, GF_LOG_TRACE,
++                   "%s (pid=%d) (lk-owner=%s) %" PRId64 " - %" PRId64
++                   " => Granted",
++                   lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
++                   lock->client_pid, lkowner_utoa(&lock->owner),
++                   lock->user_flock.l_start, lock->user_flock.l_len);
++        } else {
++            op_ret = -1;
++            op_errno = -lock->status;
++        }
+         pl_trace_out(this, lock->frame, NULL, NULL, F_SETLKW, &lock->user_flock,
+-                     0, 0, lock->volume);
++                     op_ret, op_errno, lock->volume);
+ 
+-        STACK_UNWIND_STRICT(inodelk, lock->frame, 0, 0, NULL);
++        STACK_UNWIND_STRICT(inodelk, lock->frame, op_ret, op_errno, NULL);
+         lock->frame = NULL;
+     }
+ 
+     pthread_mutex_lock(&pl_inode->mutex);
+     {
+-        list_for_each_entry_safe(lock, tmp, &granted, blocked_locks)
++        list_for_each_entry_safe(lock, tmp, granted, blocked_locks)
+         {
+             list_del_init(&lock->blocked_locks);
+             __pl_inodelk_unref(lock);
+@@ -601,6 +595,26 @@ grant_blocked_inode_locks(xlator_t *this, pl_inode_t *pl_inode,
+     pthread_mutex_unlock(&pl_inode->mutex);
+ }
+ 
++/* Grant all inodelks blocked on a lock */
++void
++grant_blocked_inode_locks(xlator_t *this, pl_inode_t *pl_inode,
++                          pl_dom_list_t *dom, struct timespec *now,
++                          struct list_head *contend)
++{
++    struct list_head granted;
++
++    INIT_LIST_HEAD(&granted);
++
++    pthread_mutex_lock(&pl_inode->mutex);
++    {
++        __grant_blocked_inode_locks(this, pl_inode, &granted, dom, now,
++                                    contend);
++    }
++    pthread_mutex_unlock(&pl_inode->mutex);
++
++    unwind_granted_inodes(this, pl_inode, &granted);
++}
++
+ static void
+ pl_inodelk_log_cleanup(pl_inode_lock_t *lock)
+ {
+@@ -662,7 +676,7 @@ pl_inodelk_client_cleanup(xlator_t *this, pl_ctx_t *ctx)
+                  * and blocked lists, then this means that a parallel
+                  * unlock on another inodelk (L2 say) may have 'granted'
+                  * L1 and added it to 'granted' list in
+-                 * __grant_blocked_node_locks() (although using the
++                 * __grant_blocked_inode_locks() (although using the
+                  * 'blocked_locks' member). In that case, the cleanup
+                  * codepath must try and grant other overlapping
+                  * blocked inodelks from other clients, now that L1 is
+@@ -747,6 +761,7 @@ pl_inode_setlk(xlator_t *this, pl_ctx_t *ctx, pl_inode_t *pl_inode,
+     gf_boolean_t need_inode_unref = _gf_false;
+     struct list_head *pcontend = NULL;
+     struct list_head contend;
++    struct list_head wake;
+     struct timespec now = {};
+     short fl_type;
+ 
+@@ -798,6 +813,8 @@ pl_inode_setlk(xlator_t *this, pl_ctx_t *ctx, pl_inode_t *pl_inode,
+         timespec_now(&now);
+     }
+ 
++    INIT_LIST_HEAD(&wake);
++
+     if (ctx)
+         pthread_mutex_lock(&ctx->lock);
+     pthread_mutex_lock(&pl_inode->mutex);
+@@ -820,18 +837,17 @@ pl_inode_setlk(xlator_t *this, pl_ctx_t *ctx, pl_inode_t *pl_inode,
+                        lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
+                        lock->client_pid, lkowner_utoa(&lock->owner),
+                        lock->user_flock.l_start, lock->user_flock.l_len);
+-                if (can_block)
++                if (can_block) {
+                     unref = _gf_false;
+-                /* For all but the case where a non-blocking
+-                 * lock attempt fails, the extra ref taken at
+-                 * the start of this function must be negated.
+-                 */
+-                else
+-                    need_inode_unref = _gf_true;
++                }
+             }
+-
+-            if (ctx && (!ret || can_block))
++            /* For all but the case where a non-blocking lock attempt fails
++             * with -EAGAIN, the extra ref taken at the start of this function
++             * must be negated. */
++            need_inode_unref = (ret != 0) && ((ret != -EAGAIN) || !can_block);
++            if (ctx && !need_inode_unref) {
+                 list_add_tail(&lock->client_list, &ctx->inodelk_lockers);
++            }
+         } else {
+             /* Irrespective of whether unlock succeeds or not,
+              * the extra inode ref that was done at the start of
+@@ -849,6 +865,8 @@ pl_inode_setlk(xlator_t *this, pl_ctx_t *ctx, pl_inode_t *pl_inode,
+             list_del_init(&retlock->client_list);
+             __pl_inodelk_unref(retlock);
+ 
++            pl_inode_remove_unlocked(this, pl_inode, &wake);
++
+             ret = 0;
+         }
+     out:
+@@ -859,6 +877,8 @@ pl_inode_setlk(xlator_t *this, pl_ctx_t *ctx, pl_inode_t *pl_inode,
+     if (ctx)
+         pthread_mutex_unlock(&ctx->lock);
+ 
++    pl_inode_remove_wake(&wake);
++
+     /* The following (extra) unref corresponds to the ref that
+      * was done at the time the lock was granted.
+      */
+@@ -1033,10 +1053,14 @@ pl_common_inodelk(call_frame_t *frame, xlator_t *this, const char *volume,
+                                  inode);
+ 
+             if (ret < 0) {
+-                if ((can_block) && (F_UNLCK != lock_type)) {
+-                    goto out;
++                if (ret == -EAGAIN) {
++                    if (can_block && (F_UNLCK != lock_type)) {
++                        goto out;
++                    }
++                    gf_log(this->name, GF_LOG_TRACE, "returning EAGAIN");
++                } else {
++                    gf_log(this->name, GF_LOG_TRACE, "returning %d", ret);
+                 }
+-                gf_log(this->name, GF_LOG_TRACE, "returning EAGAIN");
+                 op_errno = -ret;
+                 goto unwind;
+             }
+diff --git a/xlators/features/locks/src/locks.h b/xlators/features/locks/src/locks.h
+index aa267de..6666feb 100644
+--- a/xlators/features/locks/src/locks.h
++++ b/xlators/features/locks/src/locks.h
+@@ -102,6 +102,9 @@ struct __pl_inode_lock {
+ 
+     struct list_head client_list; /* list of all locks from a client */
+     short fl_type;
++
++    int32_t status; /* Error code when we try to grant a lock in blocked
++                       state */
+ };
+ typedef struct __pl_inode_lock pl_inode_lock_t;
+ 
+@@ -164,13 +167,14 @@ struct __pl_inode {
+     struct list_head rw_list;            /* list of waiting r/w requests */
+     struct list_head reservelk_list;     /* list of reservelks */
+     struct list_head blocked_reservelks; /* list of blocked reservelks */
+-    struct list_head
+-        blocked_calls; /* List of blocked lock calls while a reserve is held*/
+-    struct list_head metalk_list; /* Meta lock list */
+-                                  /* This is to store the incoming lock
+-                                     requests while meta lock is enabled */
+-    struct list_head queued_locks;
+-    int mandatory; /* if mandatory locking is enabled */
++    struct list_head blocked_calls;      /* List of blocked lock calls while a
++                                            reserve is held*/
++    struct list_head metalk_list;        /* Meta lock list */
++    struct list_head queued_locks;       /* This is to store the incoming lock
++                                            requests while meta lock is enabled */
++    struct list_head waiting; /* List of pending fops waiting to unlink/rmdir
++                                 the inode. */
++    int mandatory;            /* if mandatory locking is enabled */
+ 
+     inode_t *refkeeper; /* hold refs on an inode while locks are
+                            held to prevent pruning */
+@@ -197,6 +201,11 @@ struct __pl_inode {
+     */
+     int fop_wind_count;
+     pthread_cond_t check_fop_wind_count;
++
++    int32_t links;           /* Number of hard links the inode has. */
++    uint32_t remove_running; /* Number of remove operations running. */
++    gf_boolean_t is_locked;  /* Regular locks will be blocked. */
++    gf_boolean_t removed;    /* The inode has been deleted. */
+ };
+ typedef struct __pl_inode pl_inode_t;
+ 
+diff --git a/xlators/features/locks/src/posix.c b/xlators/features/locks/src/posix.c
+index 7887b82..5ae0125 100644
+--- a/xlators/features/locks/src/posix.c
++++ b/xlators/features/locks/src/posix.c
+@@ -147,6 +147,29 @@ fetch_pathinfo(xlator_t *, inode_t *, int32_t *, char **);
+         }                                                                      \
+     } while (0)
+ 
++#define PL_INODE_REMOVE(_fop, _frame, _xl, _loc1, _loc2, _cont, _cbk,          \
++                        _args...)                                              \
++    ({                                                                         \
++        struct list_head contend;                                              \
++        pl_inode_t *__pl_inode;                                                \
++        call_stub_t *__stub;                                                   \
++        int32_t __error;                                                       \
++        INIT_LIST_HEAD(&contend);                                              \
++        __error = pl_inode_remove_prepare(_xl, _frame, _loc2 ? _loc2 : _loc1,  \
++                                          &__pl_inode, &contend);              \
++        if (__error < 0) {                                                     \
++            __stub = fop_##_fop##_stub(_frame, _cont, ##_args);                \
++            __error = pl_inode_remove_complete(_xl, __pl_inode, __stub,        \
++                                               &contend);                      \
++        } else if (__error == 0) {                                             \
++            PL_LOCAL_GET_REQUESTS(_frame, _xl, xdata, ((fd_t *)NULL), _loc1,   \
++                                  _loc2);                                      \
++            STACK_WIND_COOKIE(_frame, _cbk, __pl_inode, FIRST_CHILD(_xl),      \
++                              FIRST_CHILD(_xl)->fops->_fop, ##_args);          \
++        }                                                                      \
++        __error;                                                               \
++    })
++
+ gf_boolean_t
+ pl_has_xdata_requests(dict_t *xdata)
+ {
+@@ -2969,11 +2992,85 @@ out:
+     return ret;
+ }
+ 
++static int32_t
++pl_request_link_count(dict_t **pxdata)
++{
++    dict_t *xdata;
++
++    xdata = *pxdata;
++    if (xdata == NULL) {
++        xdata = dict_new();
++        if (xdata == NULL) {
++            return ENOMEM;
++        }
++    } else {
++        dict_ref(xdata);
++    }
++
++    if (dict_set_uint32(xdata, GET_LINK_COUNT, 0) != 0) {
++        dict_unref(xdata);
++        return ENOMEM;
++    }
++
++    *pxdata = xdata;
++
++    return 0;
++}
++
++static int32_t
++pl_check_link_count(dict_t *xdata)
++{
++    int32_t count;
++
++    /* In case we are unable to read the link count from xdata, we take a
++     * conservative approach and return -2, which will prevent the inode from
++     * being considered deleted. In fact it will cause link tracking for this
++     * inode to be disabled completely to avoid races. */
++
++    if (xdata == NULL) {
++        return -2;
++    }
++
++    if (dict_get_int32(xdata, GET_LINK_COUNT, &count) != 0) {
++        return -2;
++    }
++
++    return count;
++}
++
+ int32_t
+ pl_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+               int32_t op_errno, inode_t *inode, struct iatt *buf, dict_t *xdata,
+               struct iatt *postparent)
+ {
++    pl_inode_t *pl_inode;
++
++    if (op_ret >= 0) {
++        pl_inode = pl_inode_get(this, inode, NULL);
++        if (pl_inode == NULL) {
++            PL_STACK_UNWIND(lookup, xdata, frame, -1, ENOMEM, NULL, NULL, NULL,
++                            NULL);
++            return 0;
++        }
++
++        pthread_mutex_lock(&pl_inode->mutex);
++
++        /* We only update the link count if we previously didn't know it.
++         * Doing it always can lead to races since lookup is not executed
++         * atomically most of the times. */
++        if (pl_inode->links == -2) {
++            pl_inode->links = pl_check_link_count(xdata);
++            if (buf->ia_type == IA_IFDIR) {
++                /* Directories have at least 2 links. To avoid special handling
++                 * for directories, we simply decrement the value here to make
++                 * them equivalent to regular files. */
++                pl_inode->links--;
++            }
++        }
++
++        pthread_mutex_unlock(&pl_inode->mutex);
++    }
++
+     PL_STACK_UNWIND(lookup, xdata, frame, op_ret, op_errno, inode, buf, xdata,
+                     postparent);
+     return 0;
+@@ -2982,9 +3079,17 @@ pl_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t
+ pl_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+ {
+-    PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), loc, NULL);
+-    STACK_WIND(frame, pl_lookup_cbk, FIRST_CHILD(this),
+-               FIRST_CHILD(this)->fops->lookup, loc, xdata);
++    int32_t error;
++
++    error = pl_request_link_count(&xdata);
++    if (error == 0) {
++        PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), loc, NULL);
++        STACK_WIND(frame, pl_lookup_cbk, FIRST_CHILD(this),
++                   FIRST_CHILD(this)->fops->lookup, loc, xdata);
++        dict_unref(xdata);
++    } else {
++        STACK_UNWIND_STRICT(lookup, frame, -1, error, NULL, NULL, NULL, NULL);
++    }
+     return 0;
+ }
+ 
+@@ -3792,6 +3897,10 @@ unlock:
+             gf_proc_dump_write("posixlk-count", "%d", count);
+             __dump_posixlks(pl_inode);
+         }
++
++        gf_proc_dump_write("links", "%d", pl_inode->links);
++        gf_proc_dump_write("removes_pending", "%u", pl_inode->remove_running);
++        gf_proc_dump_write("removed", "%u", pl_inode->removed);
+     }
+     pthread_mutex_unlock(&pl_inode->mutex);
+ 
+@@ -4137,8 +4246,11 @@ pl_rename_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+               struct iatt *postoldparent, struct iatt *prenewparent,
+               struct iatt *postnewparent, dict_t *xdata)
+ {
++    pl_inode_remove_cbk(this, cookie, op_ret < 0 ? op_errno : 0);
++
+     PL_STACK_UNWIND(rename, xdata, frame, op_ret, op_errno, buf, preoldparent,
+                     postoldparent, prenewparent, postnewparent, xdata);
++
+     return 0;
+ }
+ 
+@@ -4146,10 +4258,15 @@ int32_t
+ pl_rename(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+           dict_t *xdata)
+ {
+-    PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), oldloc, newloc);
++    int32_t error;
++
++    error = PL_INODE_REMOVE(rename, frame, this, oldloc, newloc, pl_rename,
++                            pl_rename_cbk, oldloc, newloc, xdata);
++    if (error > 0) {
++        STACK_UNWIND_STRICT(rename, frame, -1, error, NULL, NULL, NULL, NULL,
++                            NULL, NULL);
++    }
+ 
+-    STACK_WIND(frame, pl_rename_cbk, FIRST_CHILD(this),
+-               FIRST_CHILD(this)->fops->rename, oldloc, newloc, xdata);
+     return 0;
+ }
+ 
+@@ -4273,8 +4390,11 @@ pl_unlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+               int32_t op_errno, struct iatt *preparent, struct iatt *postparent,
+               dict_t *xdata)
+ {
++    pl_inode_remove_cbk(this, cookie, op_ret < 0 ? op_errno : 0);
++
+     PL_STACK_UNWIND(unlink, xdata, frame, op_ret, op_errno, preparent,
+                     postparent, xdata);
++
+     return 0;
+ }
+ 
+@@ -4282,9 +4402,14 @@ int32_t
+ pl_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflag,
+           dict_t *xdata)
+ {
+-    PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), loc, NULL);
+-    STACK_WIND(frame, pl_unlink_cbk, FIRST_CHILD(this),
+-               FIRST_CHILD(this)->fops->unlink, loc, xflag, xdata);
++    int32_t error;
++
++    error = PL_INODE_REMOVE(unlink, frame, this, loc, NULL, pl_unlink,
++                            pl_unlink_cbk, loc, xflag, xdata);
++    if (error > 0) {
++        STACK_UNWIND_STRICT(unlink, frame, -1, error, NULL, NULL, NULL);
++    }
++
+     return 0;
+ }
+ 
+@@ -4351,8 +4476,11 @@ pl_rmdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+              int32_t op_errno, struct iatt *preparent, struct iatt *postparent,
+              dict_t *xdata)
+ {
++    pl_inode_remove_cbk(this, cookie, op_ret < 0 ? op_errno : 0);
++
+     PL_STACK_UNWIND_FOR_CLIENT(rmdir, xdata, frame, op_ret, op_errno, preparent,
+                                postparent, xdata);
++
+     return 0;
+ }
+ 
+@@ -4360,9 +4488,14 @@ int
+ pl_rmdir(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflags,
+          dict_t *xdata)
+ {
+-    PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), loc, NULL);
+-    STACK_WIND(frame, pl_rmdir_cbk, FIRST_CHILD(this),
+-               FIRST_CHILD(this)->fops->rmdir, loc, xflags, xdata);
++    int32_t error;
++
++    error = PL_INODE_REMOVE(rmdir, frame, this, loc, NULL, pl_rmdir,
++                            pl_rmdir_cbk, loc, xflags, xdata);
++    if (error > 0) {
++        STACK_UNWIND_STRICT(rmdir, frame, -1, error, NULL, NULL, NULL);
++    }
++
+     return 0;
+ }
+ 
+@@ -4392,6 +4525,19 @@ pl_link_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+             int32_t op_errno, inode_t *inode, struct iatt *buf,
+             struct iatt *preparent, struct iatt *postparent, dict_t *xdata)
+ {
++    pl_inode_t *pl_inode = (pl_inode_t *)cookie;
++
++    if (op_ret >= 0) {
++        pthread_mutex_lock(&pl_inode->mutex);
++
++        /* TODO: can happen pl_inode->links == 0 ? */
++        if (pl_inode->links >= 0) {
++            pl_inode->links++;
++        }
++
++        pthread_mutex_unlock(&pl_inode->mutex);
++    }
++
+     PL_STACK_UNWIND_FOR_CLIENT(link, xdata, frame, op_ret, op_errno, inode, buf,
+                                preparent, postparent, xdata);
+     return 0;
+@@ -4401,9 +4547,18 @@ int
+ pl_link(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+         dict_t *xdata)
+ {
++    pl_inode_t *pl_inode;
++
++    pl_inode = pl_inode_get(this, oldloc->inode, NULL);
++    if (pl_inode == NULL) {
++        STACK_UNWIND_STRICT(link, frame, -1, ENOMEM, NULL, NULL, NULL, NULL,
++                            NULL);
++        return 0;
++    }
++
+     PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), oldloc, newloc);
+-    STACK_WIND(frame, pl_link_cbk, FIRST_CHILD(this),
+-               FIRST_CHILD(this)->fops->link, oldloc, newloc, xdata);
++    STACK_WIND_COOKIE(frame, pl_link_cbk, pl_inode, FIRST_CHILD(this),
++                      FIRST_CHILD(this)->fops->link, oldloc, newloc, xdata);
+     return 0;
+ }
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0456-add-clean-local-after-grant-lock.patch b/SOURCES/0456-add-clean-local-after-grant-lock.patch
new file mode 100644
index 0000000..6b8210b
--- /dev/null
+++ b/SOURCES/0456-add-clean-local-after-grant-lock.patch
@@ -0,0 +1,74 @@
+From c38b38249fdf951565f6501ce8e9a4d01142d43e Mon Sep 17 00:00:00 2001
+From: l17zhou <cynthia.zhou@nokia-sbell.com>
+Date: Tue, 3 Dec 2019 07:43:35 +0200
+Subject: [PATCH 456/456] add clean local after grant lock
+
+found by flock test, without correct ref number of fd,
+lock will not be correctly released.
+
+Upstream patch:
+> Upstream patch link: https://review.gluster.org/c/glusterfs/+/23794
+> Fixes: bz#1779089
+> Change-Id: I3e466b17c852eb219c8778e43af8ad670a8449cc
+> Signed-off-by: l17zhou <cynthia.zhou@nokia-sbell.com>
+
+BUG: 1854165
+Change-Id: I3e466b17c852eb219c8778e43af8ad670a8449cc
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/206673
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/features/locks/src/common.c | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/xlators/features/locks/src/common.c b/xlators/features/locks/src/common.c
+index 0c52853..cddbfa6 100644
+--- a/xlators/features/locks/src/common.c
++++ b/xlators/features/locks/src/common.c
+@@ -961,7 +961,7 @@ grant_blocked_locks(xlator_t *this, pl_inode_t *pl_inode)
+     struct list_head granted_list;
+     posix_lock_t *tmp = NULL;
+     posix_lock_t *lock = NULL;
+-
++    pl_local_t *local = NULL;
+     INIT_LIST_HEAD(&granted_list);
+ 
+     pthread_mutex_lock(&pl_inode->mutex);
+@@ -976,9 +976,9 @@ grant_blocked_locks(xlator_t *this, pl_inode_t *pl_inode)
+ 
+         pl_trace_out(this, lock->frame, NULL, NULL, F_SETLKW, &lock->user_flock,
+                      0, 0, NULL);
+-
+-        STACK_UNWIND_STRICT(lk, lock->frame, 0, 0, &lock->user_flock, NULL);
+-
++        local = lock->frame->local;
++        PL_STACK_UNWIND_AND_FREE(local, lk, lock->frame, 0, 0,
++                                 &lock->user_flock, NULL);
+         __destroy_lock(lock);
+     }
+ 
+@@ -997,6 +997,7 @@ pl_send_prelock_unlock(xlator_t *this, pl_inode_t *pl_inode,
+     struct list_head granted_list;
+     posix_lock_t *tmp = NULL;
+     posix_lock_t *lock = NULL;
++    pl_local_t *local = NULL;
+ 
+     int ret = -1;
+ 
+@@ -1024,9 +1025,9 @@ pl_send_prelock_unlock(xlator_t *this, pl_inode_t *pl_inode,
+ 
+         pl_trace_out(this, lock->frame, NULL, NULL, F_SETLKW, &lock->user_flock,
+                      0, 0, NULL);
+-
+-        STACK_UNWIND_STRICT(lk, lock->frame, 0, 0, &lock->user_flock, NULL);
+-
++        local = lock->frame->local;
++        PL_STACK_UNWIND_AND_FREE(local, lk, lock->frame, 0, 0,
++                                 &lock->user_flock, NULL);
+         __destroy_lock(lock);
+     }
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0457-cluster-ec-Improve-detection-of-new-heals.patch b/SOURCES/0457-cluster-ec-Improve-detection-of-new-heals.patch
new file mode 100644
index 0000000..be9202a
--- /dev/null
+++ b/SOURCES/0457-cluster-ec-Improve-detection-of-new-heals.patch
@@ -0,0 +1,409 @@
+From 3e8b3a2c2c6f83635486035fc8040c87d89813d2 Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Thu, 2 Jul 2020 18:08:52 +0200
+Subject: [PATCH 457/465] cluster/ec: Improve detection of new heals
+
+When EC successfully healed a directory it assumed that maybe other
+entries inside that directory could have been created, which could
+require additional heal cycles. For this reason, when the heal happened
+as part of one index heal iteration, it triggered a new iteration.
+
+The problem happened when the directory was healthy, so no new entries
+were added, but its index entry was not removed for some reason. In
+this case self-heal started and endless loop healing the same directory
+continuously, cause high CPU utilization.
+
+This patch improves detection of new files added to the heal index so
+that a new index heal iteration is only triggered if there is new work
+to do.
+
+>Upstream patch: https://review.gluster.org/#/c/glusterfs/+/24665/
+>Fixes: #1354
+
+Change-Id: I2355742b85fbfa6de758bccc5d2e1a283c82b53f
+BUG: 1852736
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/208041
+Tested-by: Ashish Pandey <aspandey@redhat.com>
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Ashish Pandey <aspandey@redhat.com>
+---
+ xlators/cluster/ec/src/ec-common.c     |  2 +-
+ xlators/cluster/ec/src/ec-heal.c       | 58 +++++++++++++++++++++++-----------
+ xlators/cluster/ec/src/ec-heald.c      | 24 ++++++++++----
+ xlators/cluster/ec/src/ec-inode-read.c | 27 ++++++++++++++--
+ xlators/cluster/ec/src/ec-types.h      |  4 +--
+ xlators/cluster/ec/src/ec.h            |  1 +
+ 6 files changed, 86 insertions(+), 30 deletions(-)
+
+diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c
+index e580bfb..e3f8769 100644
+--- a/xlators/cluster/ec/src/ec-common.c
++++ b/xlators/cluster/ec/src/ec-common.c
+@@ -230,7 +230,7 @@ ec_child_next(ec_t *ec, ec_fop_data_t *fop, uint32_t idx)
+ int32_t
+ ec_heal_report(call_frame_t *frame, void *cookie, xlator_t *this,
+                int32_t op_ret, int32_t op_errno, uintptr_t mask, uintptr_t good,
+-               uintptr_t bad, dict_t *xdata)
++               uintptr_t bad, uint32_t pending, dict_t *xdata)
+ {
+     if (op_ret < 0) {
+         gf_msg(this->name, GF_LOG_DEBUG, op_errno, EC_MSG_HEAL_FAIL,
+diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c
+index 06a7016..e2de879 100644
+--- a/xlators/cluster/ec/src/ec-heal.c
++++ b/xlators/cluster/ec/src/ec-heal.c
+@@ -72,6 +72,7 @@ struct ec_name_data {
+     char *name;
+     inode_t *parent;
+     default_args_cbk_t *replies;
++    uint32_t heal_pending;
+ };
+ 
+ static char *ec_ignore_xattrs[] = {GF_SELINUX_XATTR_KEY, QUOTA_SIZE_KEY, NULL};
+@@ -996,6 +997,7 @@ ec_set_new_entry_dirty(ec_t *ec, loc_t *loc, struct iatt *ia,
+         ret = -ENOTCONN;
+         goto out;
+     }
++
+ out:
+     if (xattr)
+         dict_unref(xattr);
+@@ -1164,6 +1166,7 @@ ec_create_name(call_frame_t *frame, ec_t *ec, inode_t *parent, char *name,
+     dict_t *xdata = NULL;
+     char *linkname = NULL;
+     ec_config_t config;
++
+     /* There should be just one gfid key */
+     EC_REPLIES_ALLOC(replies, ec->nodes);
+     if (gfid_db->count != 1) {
+@@ -1408,6 +1411,11 @@ __ec_heal_name(call_frame_t *frame, ec_t *ec, inode_t *parent, char *name,
+ 
+     ret = ec_create_name(frame, ec, parent, name, replies, gfid_db, enoent,
+                          participants);
++    if (ret >= 0) {
++        /* If ec_create_name() succeeded we return 1 to indicate that a new
++         * file has been created and it will need to be healed. */
++        ret = 1;
++    }
+ out:
+     cluster_replies_wipe(replies, ec->nodes);
+     loc_wipe(&loc);
+@@ -1485,18 +1493,22 @@ ec_name_heal_handler(xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
+     ret = ec_heal_name(name_data->frame, ec, parent->inode, entry->d_name,
+                        name_on);
+ 
+-    if (ret < 0)
++    if (ret < 0) {
+         memset(name_on, 0, ec->nodes);
++    } else {
++        name_data->heal_pending += ret;
++    }
+ 
+     for (i = 0; i < ec->nodes; i++)
+         if (name_data->participants[i] && !name_on[i])
+             name_data->failed_on[i] = 1;
++
+     return 0;
+ }
+ 
+ int
+ ec_heal_names(call_frame_t *frame, ec_t *ec, inode_t *inode,
+-              unsigned char *participants)
++              unsigned char *participants, uint32_t *pending)
+ {
+     int i = 0;
+     int j = 0;
+@@ -1509,7 +1521,7 @@ ec_heal_names(call_frame_t *frame, ec_t *ec, inode_t *inode,
+     name_data.frame = frame;
+     name_data.participants = participants;
+     name_data.failed_on = alloca0(ec->nodes);
+-    ;
++    name_data.heal_pending = 0;
+ 
+     for (i = 0; i < ec->nodes; i++) {
+         if (!participants[i])
+@@ -1528,6 +1540,8 @@ ec_heal_names(call_frame_t *frame, ec_t *ec, inode_t *inode,
+             break;
+         }
+     }
++    *pending += name_data.heal_pending;
++
+     loc_wipe(&loc);
+     return ret;
+ }
+@@ -1535,7 +1549,7 @@ ec_heal_names(call_frame_t *frame, ec_t *ec, inode_t *inode,
+ int
+ __ec_heal_entry(call_frame_t *frame, ec_t *ec, inode_t *inode,
+                 unsigned char *heal_on, unsigned char *sources,
+-                unsigned char *healed_sinks)
++                unsigned char *healed_sinks, uint32_t *pending)
+ {
+     unsigned char *locked_on = NULL;
+     unsigned char *output = NULL;
+@@ -1580,7 +1594,7 @@ unlock:
+         if (sources[i] || healed_sinks[i])
+             participants[i] = 1;
+     }
+-    ret = ec_heal_names(frame, ec, inode, participants);
++    ret = ec_heal_names(frame, ec, inode, participants, pending);
+ 
+     if (EC_COUNT(participants, ec->nodes) <= ec->fragments)
+         goto out;
+@@ -1601,7 +1615,8 @@ out:
+ 
+ int
+ ec_heal_entry(call_frame_t *frame, ec_t *ec, inode_t *inode,
+-              unsigned char *sources, unsigned char *healed_sinks)
++              unsigned char *sources, unsigned char *healed_sinks,
++              uint32_t *pending)
+ {
+     unsigned char *locked_on = NULL;
+     unsigned char *up_subvols = NULL;
+@@ -1632,7 +1647,7 @@ ec_heal_entry(call_frame_t *frame, ec_t *ec, inode_t *inode,
+             goto unlock;
+         }
+         ret = __ec_heal_entry(frame, ec, inode, locked_on, sources,
+-                              healed_sinks);
++                              healed_sinks, pending);
+     }
+ unlock:
+     cluster_uninodelk(ec->xl_list, locked_on, ec->nodes, replies, output, frame,
+@@ -1953,14 +1968,14 @@ ec_manager_heal_block(ec_fop_data_t *fop, int32_t state)
+             if (fop->cbks.heal) {
+                 fop->cbks.heal(fop->req_frame, fop, fop->xl, 0, 0,
+                                (heal->good | heal->bad), heal->good, heal->bad,
+-                               NULL);
++                               0, NULL);
+             }
+ 
+             return EC_STATE_END;
+         case -EC_STATE_REPORT:
+             if (fop->cbks.heal) {
+-                fop->cbks.heal(fop->req_frame, fop, fop->xl, -1, fop->error, 0,
+-                               0, 0, NULL);
++                fop->cbks.heal(fop->req_frame, fop->data, fop->xl, -1,
++                               fop->error, 0, 0, 0, 0, NULL);
+             }
+ 
+             return EC_STATE_END;
+@@ -1997,14 +2012,15 @@ out:
+     if (fop != NULL) {
+         ec_manager(fop, error);
+     } else {
+-        func(frame, NULL, this, -1, error, 0, 0, 0, NULL);
++        func(frame, heal, this, -1, error, 0, 0, 0, 0, NULL);
+     }
+ }
+ 
+ int32_t
+ ec_heal_block_done(call_frame_t *frame, void *cookie, xlator_t *this,
+                    int32_t op_ret, int32_t op_errno, uintptr_t mask,
+-                   uintptr_t good, uintptr_t bad, dict_t *xdata)
++                   uintptr_t good, uintptr_t bad, uint32_t pending,
++                   dict_t *xdata)
+ {
+     ec_fop_data_t *fop = cookie;
+     ec_heal_t *heal = fop->data;
+@@ -2489,6 +2505,7 @@ ec_heal_do(xlator_t *this, void *data, loc_t *loc, int32_t partial)
+     intptr_t mbad = 0;
+     intptr_t good = 0;
+     intptr_t bad = 0;
++    uint32_t pending = 0;
+     ec_fop_data_t *fop = data;
+     gf_boolean_t blocking = _gf_false;
+     ec_heal_need_t need_heal = EC_HEAL_NONEED;
+@@ -2524,7 +2541,7 @@ ec_heal_do(xlator_t *this, void *data, loc_t *loc, int32_t partial)
+     if (loc->name && strlen(loc->name)) {
+         ret = ec_heal_name(frame, ec, loc->parent, (char *)loc->name,
+                            participants);
+-        if (ret == 0) {
++        if (ret >= 0) {
+             gf_msg_debug(this->name, 0,
+                          "%s: name heal "
+                          "successful on %" PRIXPTR,
+@@ -2542,7 +2559,7 @@ ec_heal_do(xlator_t *this, void *data, loc_t *loc, int32_t partial)
+ 
+     /* Mount triggers heal only when it detects that it must need heal, shd
+      * triggers heals periodically which need not be thorough*/
+-    if (ec->shd.iamshd) {
++    if (ec->shd.iamshd && (ret <= 0)) {
+         ec_heal_inspect(frame, ec, loc->inode, up_subvols, _gf_false, _gf_false,
+                         &need_heal);
+ 
+@@ -2552,13 +2569,15 @@ ec_heal_do(xlator_t *this, void *data, loc_t *loc, int32_t partial)
+             goto out;
+         }
+     }
++
+     sources = alloca0(ec->nodes);
+     healed_sinks = alloca0(ec->nodes);
+     if (IA_ISREG(loc->inode->ia_type)) {
+         ret = ec_heal_data(frame, ec, blocking, loc->inode, sources,
+                            healed_sinks);
+     } else if (IA_ISDIR(loc->inode->ia_type) && !partial) {
+-        ret = ec_heal_entry(frame, ec, loc->inode, sources, healed_sinks);
++        ret = ec_heal_entry(frame, ec, loc->inode, sources, healed_sinks,
++                            &pending);
+     } else {
+         ret = 0;
+         memcpy(sources, participants, ec->nodes);
+@@ -2588,10 +2607,11 @@ out:
+     if (fop->cbks.heal) {
+         fop->cbks.heal(fop->req_frame, fop, fop->xl, op_ret, op_errno,
+                        ec_char_array_to_mask(participants, ec->nodes),
+-                       mgood & good, mbad & bad, NULL);
++                       mgood & good, mbad & bad, pending, NULL);
+     }
+     if (frame)
+         STACK_DESTROY(frame->root);
++
+     return;
+ }
+ 
+@@ -2638,8 +2658,8 @@ void
+ ec_heal_fail(ec_t *ec, ec_fop_data_t *fop)
+ {
+     if (fop->cbks.heal) {
+-        fop->cbks.heal(fop->req_frame, NULL, ec->xl, -1, fop->error, 0, 0, 0,
+-                       NULL);
++        fop->cbks.heal(fop->req_frame, fop->data, ec->xl, -1, fop->error, 0, 0,
++                       0, 0, NULL);
+     }
+     ec_fop_data_release(fop);
+ }
+@@ -2826,7 +2846,7 @@ fail:
+     if (fop)
+         ec_fop_data_release(fop);
+     if (func)
+-        func(frame, NULL, this, -1, err, 0, 0, 0, NULL);
++        func(frame, data, this, -1, err, 0, 0, 0, 0, NULL);
+ }
+ 
+ int
+diff --git a/xlators/cluster/ec/src/ec-heald.c b/xlators/cluster/ec/src/ec-heald.c
+index cba111a..4f4b6aa 100644
+--- a/xlators/cluster/ec/src/ec-heald.c
++++ b/xlators/cluster/ec/src/ec-heald.c
+@@ -156,15 +156,27 @@ int
+ ec_shd_selfheal(struct subvol_healer *healer, int child, loc_t *loc,
+                 gf_boolean_t full)
+ {
++    dict_t *xdata = NULL;
++    uint32_t count;
+     int32_t ret;
+ 
+-    ret = syncop_getxattr(healer->this, loc, NULL, EC_XATTR_HEAL, NULL, NULL);
+-    if (!full && (ret >= 0) && (loc->inode->ia_type == IA_IFDIR)) {
++    ret = syncop_getxattr(healer->this, loc, NULL, EC_XATTR_HEAL, NULL, &xdata);
++    if (!full && (loc->inode->ia_type == IA_IFDIR)) {
+         /* If we have just healed a directory, it's possible that
+-         * other index entries have appeared to be healed. We put a
+-         * mark so that we can check it later and restart a scan
+-         * without delay. */
+-        healer->rerun = _gf_true;
++         * other index entries have appeared to be healed. */
++        if ((xdata != NULL) &&
++            (dict_get_uint32(xdata, EC_XATTR_HEAL_NEW, &count) == 0) &&
++            (count > 0)) {
++            /* Force a rerun of the index healer. */
++            gf_msg_debug(healer->this->name, 0, "%d more entries to heal",
++                         count);
++
++            healer->rerun = _gf_true;
++        }
++    }
++
++    if (xdata != NULL) {
++        dict_unref(xdata);
+     }
+ 
+     return ret;
+diff --git a/xlators/cluster/ec/src/ec-inode-read.c b/xlators/cluster/ec/src/ec-inode-read.c
+index f87a94a..e82e8f6 100644
+--- a/xlators/cluster/ec/src/ec-inode-read.c
++++ b/xlators/cluster/ec/src/ec-inode-read.c
+@@ -393,7 +393,8 @@ ec_manager_getxattr(ec_fop_data_t *fop, int32_t state)
+ int32_t
+ ec_getxattr_heal_cbk(call_frame_t *frame, void *cookie, xlator_t *xl,
+                      int32_t op_ret, int32_t op_errno, uintptr_t mask,
+-                     uintptr_t good, uintptr_t bad, dict_t *xdata)
++                     uintptr_t good, uintptr_t bad, uint32_t pending,
++                     dict_t *xdata)
+ {
+     ec_fop_data_t *fop = cookie;
+     fop_getxattr_cbk_t func = fop->data;
+@@ -402,6 +403,25 @@ ec_getxattr_heal_cbk(call_frame_t *frame, void *cookie, xlator_t *xl,
+     char *str;
+     char bin1[65], bin2[65];
+ 
++    /* We try to return the 'pending' information in xdata, but if this cannot
++     * be set, we will ignore it silently. We prefer to report the success or
++     * failure of the heal itself. */
++    if (xdata == NULL) {
++        xdata = dict_new();
++    } else {
++        dict_ref(xdata);
++    }
++    if (xdata != NULL) {
++        if (dict_set_uint32(xdata, EC_XATTR_HEAL_NEW, pending) != 0) {
++            /* dict_set_uint32() is marked as 'warn_unused_result' and gcc
++             * enforces to check the result in this case. However we don't
++             * really care if it succeeded or not. We'll just do the same.
++             *
++             * This empty 'if' avoids the warning, and it will be removed by
++             * the optimizer. */
++        }
++    }
++
+     if (op_ret >= 0) {
+         dict = dict_new();
+         if (dict == NULL) {
+@@ -435,11 +455,14 @@ ec_getxattr_heal_cbk(call_frame_t *frame, void *cookie, xlator_t *xl,
+     }
+ 
+ out:
+-    func(frame, NULL, xl, op_ret, op_errno, dict, NULL);
++    func(frame, NULL, xl, op_ret, op_errno, dict, xdata);
+ 
+     if (dict != NULL) {
+         dict_unref(dict);
+     }
++    if (xdata != NULL) {
++        dict_unref(xdata);
++    }
+ 
+     return 0;
+ }
+diff --git a/xlators/cluster/ec/src/ec-types.h b/xlators/cluster/ec/src/ec-types.h
+index 34a9768..f15429d 100644
+--- a/xlators/cluster/ec/src/ec-types.h
++++ b/xlators/cluster/ec/src/ec-types.h
+@@ -186,10 +186,10 @@ struct _ec_inode {
+ 
+ typedef int32_t (*fop_heal_cbk_t)(call_frame_t *, void *, xlator_t *, int32_t,
+                                   int32_t, uintptr_t, uintptr_t, uintptr_t,
+-                                  dict_t *);
++                                  uint32_t, dict_t *);
+ typedef int32_t (*fop_fheal_cbk_t)(call_frame_t *, void *, xlator_t *, int32_t,
+                                    int32_t, uintptr_t, uintptr_t, uintptr_t,
+-                                   dict_t *);
++                                   uint32_t, dict_t *);
+ 
+ union _ec_cbk {
+     fop_access_cbk_t access;
+diff --git a/xlators/cluster/ec/src/ec.h b/xlators/cluster/ec/src/ec.h
+index 1b210d9..6f6de6d 100644
+--- a/xlators/cluster/ec/src/ec.h
++++ b/xlators/cluster/ec/src/ec.h
+@@ -18,6 +18,7 @@
+ #define EC_XATTR_SIZE EC_XATTR_PREFIX "size"
+ #define EC_XATTR_VERSION EC_XATTR_PREFIX "version"
+ #define EC_XATTR_HEAL EC_XATTR_PREFIX "heal"
++#define EC_XATTR_HEAL_NEW EC_XATTR_PREFIX "heal-new"
+ #define EC_XATTR_DIRTY EC_XATTR_PREFIX "dirty"
+ #define EC_STRIPE_CACHE_MAX_SIZE 10
+ #define EC_VERSION_SIZE 2
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0458-features-bit-rot-stub-clean-the-mutex-after-cancelli.patch b/SOURCES/0458-features-bit-rot-stub-clean-the-mutex-after-cancelli.patch
new file mode 100644
index 0000000..b7b9f04
--- /dev/null
+++ b/SOURCES/0458-features-bit-rot-stub-clean-the-mutex-after-cancelli.patch
@@ -0,0 +1,182 @@
+From ed73f2046dd3fbb22341bf9fc004087d90dfbe6d Mon Sep 17 00:00:00 2001
+From: Raghavendra Bhat <raghavendra@redhat.com>
+Date: Mon, 15 Apr 2019 14:09:34 -0400
+Subject: [PATCH 458/465] features/bit-rot-stub: clean the mutex after
+ cancelling the signer thread
+
+When bit-rot feature is disabled, the signer thread from the bit-rot-stub
+xlator (the thread which performs the setxattr of the signature on to the
+disk) is cancelled. But, if the cancelled signer thread had already held
+the mutex (&priv->lock) which it uses to monitor the queue of files to
+be signed, then the mutex is never released. This creates problems in
+future when the feature is enabled again. Both the new instance of the
+signer thread and the regular thread which enqueues the files to be
+signed will be blocked on this mutex.
+
+So, as part of cancelling the signer thread, unlock the mutex associated
+with it as well using pthread_cleanup_push and pthread_cleanup_pop.
+
+Upstream patch:
+	> patch: https://review.gluster.org/22572
+	> fixes: #bz1700078
+	> Change-Id: Ib761910caed90b268e69794ddeb108165487af40
+
+Change-Id: Ib761910caed90b268e69794ddeb108165487af40
+BUG: 1851424
+Signed-off-by: Raghavendra M <raghavendra@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/208304
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ .../bit-rot/src/stub/bit-rot-stub-messages.h       |  4 +-
+ xlators/features/bit-rot/src/stub/bit-rot-stub.c   | 62 +++++++++++++++++++---
+ 2 files changed, 59 insertions(+), 7 deletions(-)
+
+diff --git a/xlators/features/bit-rot/src/stub/bit-rot-stub-messages.h b/xlators/features/bit-rot/src/stub/bit-rot-stub-messages.h
+index 7f07f29..155802b 100644
+--- a/xlators/features/bit-rot/src/stub/bit-rot-stub-messages.h
++++ b/xlators/features/bit-rot/src/stub/bit-rot-stub-messages.h
+@@ -39,6 +39,8 @@ GLFS_MSGID(BITROT_STUB, BRS_MSG_NO_MEMORY, BRS_MSG_SET_EVENT_FAILED,
+            BRS_MSG_BAD_HANDLE_DIR_NULL, BRS_MSG_BAD_OBJ_THREAD_FAIL,
+            BRS_MSG_BAD_OBJ_DIR_CLOSE_FAIL, BRS_MSG_LINK_FAIL,
+            BRS_MSG_BAD_OBJ_UNLINK_FAIL, BRS_MSG_DICT_SET_FAILED,
+-           BRS_MSG_PATH_GET_FAILED, BRS_MSG_NULL_LOCAL);
++           BRS_MSG_PATH_GET_FAILED, BRS_MSG_NULL_LOCAL,
++           BRS_MSG_SPAWN_SIGN_THRD_FAILED, BRS_MSG_KILL_SIGN_THREAD,
++           BRS_MSG_NON_BITD_PID, BRS_MSG_SIGN_PREPARE_FAIL);
+ 
+ #endif /* !_BITROT_STUB_MESSAGES_H_ */
+diff --git a/xlators/features/bit-rot/src/stub/bit-rot-stub.c b/xlators/features/bit-rot/src/stub/bit-rot-stub.c
+index 3f48a4b..c3f81bc 100644
+--- a/xlators/features/bit-rot/src/stub/bit-rot-stub.c
++++ b/xlators/features/bit-rot/src/stub/bit-rot-stub.c
+@@ -26,6 +26,15 @@
+ 
+ #define BR_STUB_REQUEST_COOKIE 0x1
+ 
++void
++br_stub_lock_cleaner(void *arg)
++{
++    pthread_mutex_t *clean_mutex = arg;
++
++    pthread_mutex_unlock(clean_mutex);
++    return;
++}
++
+ void *
+ br_stub_signth(void *);
+ 
+@@ -166,8 +175,11 @@ init(xlator_t *this)
+ 
+     ret = gf_thread_create(&priv->signth, NULL, br_stub_signth, this,
+                            "brssign");
+-    if (ret != 0)
++    if (ret != 0) {
++        gf_msg(this->name, GF_LOG_WARNING, 0, BRS_MSG_SPAWN_SIGN_THRD_FAILED,
++               "failed to create the new thread for signer");
+         goto cleanup_lock;
++    }
+ 
+     ret = br_stub_bad_object_container_init(this, priv);
+     if (ret) {
+@@ -214,11 +226,15 @@ reconfigure(xlator_t *this, dict_t *options)
+     priv = this->private;
+ 
+     GF_OPTION_RECONF("bitrot", priv->do_versioning, options, bool, err);
+-    if (priv->do_versioning) {
++    if (priv->do_versioning && !priv->signth) {
+         ret = gf_thread_create(&priv->signth, NULL, br_stub_signth, this,
+                                "brssign");
+-        if (ret != 0)
++        if (ret != 0) {
++            gf_msg(this->name, GF_LOG_WARNING, 0,
++                   BRS_MSG_SPAWN_SIGN_THRD_FAILED,
++                   "failed to create the new thread for signer");
+             goto err;
++        }
+ 
+         ret = br_stub_bad_object_container_init(this, priv);
+         if (ret) {
+@@ -232,8 +248,11 @@ reconfigure(xlator_t *this, dict_t *options)
+                 gf_msg(this->name, GF_LOG_ERROR, 0,
+                        BRS_MSG_CANCEL_SIGN_THREAD_FAILED,
+                        "Could not cancel sign serializer thread");
++            } else {
++                gf_msg(this->name, GF_LOG_INFO, 0, BRS_MSG_KILL_SIGN_THREAD,
++                       "killed the signer thread");
++                priv->signth = 0;
+             }
+-            priv->signth = 0;
+         }
+ 
+         if (priv->container.thread) {
+@@ -902,6 +921,24 @@ br_stub_signth(void *arg)
+ 
+     THIS = this;
+     while (1) {
++        /*
++         * Disabling bit-rot feature leads to this particular thread
++         * getting cleaned up by reconfigure via a call to the function
++         * gf_thread_cleanup_xint (which in turn calls pthread_cancel
++         * and pthread_join). But, if this thread had held the mutex
++         * &priv->lock at the time of cancellation, then it leads to
++         * deadlock in future when bit-rot feature is enabled (which
++         * again spawns this thread which cant hold the lock as the
++         * mutex is still held by the previous instance of the thread
++         * which got killed). Also, the br_stub_handle_object_signature
++         * function which is called whenever file has to be signed
++         * also gets blocked as it too attempts to acquire &priv->lock.
++         *
++         * So, arrange for the lock to be unlocked as part of the
++         * cleanup of this thread using pthread_cleanup_push and
++         * pthread_cleanup_pop.
++         */
++        pthread_cleanup_push(br_stub_lock_cleaner, &priv->lock);
+         pthread_mutex_lock(&priv->lock);
+         {
+             while (list_empty(&priv->squeue))
+@@ -912,6 +949,7 @@ br_stub_signth(void *arg)
+             list_del_init(&sigstub->list);
+         }
+         pthread_mutex_unlock(&priv->lock);
++        pthread_cleanup_pop(0);
+ 
+         call_resume(sigstub->stub);
+ 
+@@ -1042,12 +1080,22 @@ br_stub_handle_object_signature(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ 
+     priv = this->private;
+ 
+-    if (frame->root->pid != GF_CLIENT_PID_BITD)
++    if (frame->root->pid != GF_CLIENT_PID_BITD) {
++        gf_msg(this->name, GF_LOG_WARNING, op_errno, BRS_MSG_NON_BITD_PID,
++               "PID %d from where signature request"
++               "came, does not belong to bit-rot daemon."
++               "Unwinding the fop",
++               frame->root->pid);
+         goto dofop;
++    }
+ 
+     ret = br_stub_prepare_signature(this, dict, fd->inode, sign, &fakesuccess);
+-    if (ret)
++    if (ret) {
++        gf_msg(this->name, GF_LOG_WARNING, 0, BRS_MSG_SIGN_PREPARE_FAIL,
++               "failed to prepare the signature for %s. Unwinding the fop",
++               uuid_utoa(fd->inode->gfid));
+         goto dofop;
++    }
+     if (fakesuccess) {
+         op_ret = op_errno = 0;
+         goto dofop;
+@@ -1387,6 +1435,8 @@ br_stub_fsetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *dict,
+     /* object signature request */
+     ret = dict_get_bin(dict, GLUSTERFS_SET_OBJECT_SIGNATURE, (void **)&sign);
+     if (!ret) {
++        gf_msg_debug(this->name, 0, "got SIGNATURE request on %s",
++                     uuid_utoa(fd->inode->gfid));
+         br_stub_handle_object_signature(frame, this, fd, dict, sign, xdata);
+         goto done;
+     }
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0459-features-bit-rot-Unconditionally-sign-the-files-duri.patch b/SOURCES/0459-features-bit-rot-Unconditionally-sign-the-files-duri.patch
new file mode 100644
index 0000000..2c9b66e
--- /dev/null
+++ b/SOURCES/0459-features-bit-rot-Unconditionally-sign-the-files-duri.patch
@@ -0,0 +1,181 @@
+From 80eef2f52bb92ed740ac00eeb11ee7a3e7fffff2 Mon Sep 17 00:00:00 2001
+From: Raghavendra Bhat <raghavendra@redhat.com>
+Date: Mon, 11 Mar 2019 12:16:50 -0400
+Subject: [PATCH 459/465] features/bit-rot: Unconditionally sign the files
+ during oneshot crawl
+
+Currently bit-rot feature has an issue with disabling and reenabling it
+on the same volume. Consider enabling bit-rot detection which goes on to
+crawl and sign all the files present in the volume. Then some files are
+modified and the bit-rot daemon goes on to sign the modified files with
+the correct signature. Now, disable bit-rot feature. While, signing and
+scrubbing are not happening, previous checksums of the files continue to
+exist as extended attributes. Now, if some files with checksum xattrs get
+modified, they are not signed with new signature as the feature is off.
+
+At this point, if the feature is enabled again, the bit rot daemon will
+go and sign those files which does not have any bit-rot specific xattrs
+(i.e. those files which were created after bit-rot was disabled). Whereas
+the files with bit-rot xattrs wont get signed with proper new checksum.
+At this point if scrubber runs, it finds the on disk checksum and the actual
+checksum of the file to be different (because the file got modified) and
+marks the file as corrupted.
+
+FIX:
+
+The fix is to unconditionally sign the files when the bit-rot daemon
+comes up (instead of skipping the files with bit-rot xattrs).
+
+upstream fix:
+	> patch: https://review.gluster.org/#/c/glusterfs/+/22360/
+	> fixes: #bz1700078
+	> Change-ID: Iadfb47dd39f7e2e77f22d549a4a07a385284f4f5
+
+Change-Id: Iadfb47dd39f7e2e77f22d549a4a07a385284f4f5
+BUG: 1851424
+Signed-off-by: Raghavendra M <raghavendra@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/208305
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/bitrot/bug-1700078.t                  | 87 +++++++++++++++++++++++++++++
+ xlators/features/bit-rot/src/bitd/bit-rot.c | 15 ++++-
+ 2 files changed, 101 insertions(+), 1 deletion(-)
+ create mode 100644 tests/bitrot/bug-1700078.t
+
+diff --git a/tests/bitrot/bug-1700078.t b/tests/bitrot/bug-1700078.t
+new file mode 100644
+index 0000000..f273742
+--- /dev/null
++++ b/tests/bitrot/bug-1700078.t
+@@ -0,0 +1,87 @@
++#!/bin/bash
++
++. $(dirname $0)/../include.rc
++. $(dirname $0)/../volume.rc
++
++cleanup;
++
++## Start glusterd
++TEST glusterd;
++TEST pidof glusterd;
++
++## Lets create and start the volume
++TEST $CLI volume create $V0 $H0:$B0/${V0}1
++TEST $CLI volume start $V0
++
++## Enable bitrot for volume $V0
++TEST $CLI volume bitrot $V0 enable
++
++## Turn off quick-read so that it wont cache the contents
++# of the file in lookup. For corrupted files, it might
++# end up in reads being served from the cache instead of
++# an error.
++TEST $CLI volume set $V0 performance.quick-read off
++
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
++
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Active' scrub_status $V0 'State of scrub'
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '/var/log/glusterfs/bitd.log' scrub_status $V0 'Bitrot error log location'
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '/var/log/glusterfs/scrub.log' scrub_status $V0 'Scrubber error log location'
++
++## Set expiry-timeout to 1 sec
++TEST $CLI volume set $V0 features.expiry-time 1
++
++##Mount $V0
++TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
++
++## Turn off quick-read xlator so that, the contents are not served from the
++# quick-read cache.
++TEST $CLI volume set $V0 performance.quick-read off
++
++#Create sample file
++TEST `echo "1234" > $M0/FILE1`
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.bit-rot.signature' check_for_xattr 'trusted.bit-rot.signature' "/$B0/${V0}1/FILE1"
++
++##disable bitrot
++TEST $CLI volume bitrot $V0 disable
++
++## modify the file
++TEST `echo "write" >> $M0/FILE1`
++
++# unmount and remount when the file has to be accessed.
++# This is to ensure that, when the remount happens,
++# and the file is read, its contents are served from the
++# brick instead of cache.
++EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
++
++##enable bitrot
++TEST $CLI volume bitrot $V0 enable
++
++# expiry time is set to 1 second. Hence sleep for 2 seconds for the
++# oneshot crawler to finish its crawling and sign the file properly.
++sleep 2
++
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
++
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Active' scrub_status $V0 'State of scrub'
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '/var/log/glusterfs/bitd.log' scrub_status $V0 'Bitrot error log location'
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '/var/log/glusterfs/scrub.log' scrub_status $V0 'Scrubber error log location'
++
++## Ondemand scrub
++TEST $CLI volume bitrot $V0 scrub ondemand
++
++# the scrub ondemand CLI command, just ensures that
++# the scrubber has received the ondemand scrub directive
++# and started. sleep for 2 seconds for scrubber to finish
++# crawling and marking file(s) as bad (if if finds that
++# corruption has happened) which are filesystem operations.
++sleep 2
++
++TEST ! getfattr -n 'trusted.bit-rot.bad-file' $B0/${V0}1/FILE1
++
++##Mount $V0
++TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
++
++TEST cat $M0/FILE1
++
++cleanup;
+diff --git a/xlators/features/bit-rot/src/bitd/bit-rot.c b/xlators/features/bit-rot/src/bitd/bit-rot.c
+index b8feef7..424c0d5 100644
+--- a/xlators/features/bit-rot/src/bitd/bit-rot.c
++++ b/xlators/features/bit-rot/src/bitd/bit-rot.c
+@@ -973,6 +973,7 @@ bitd_oneshot_crawl(xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
+     int32_t ret = -1;
+     inode_t *linked_inode = NULL;
+     gf_boolean_t need_signing = _gf_false;
++    gf_boolean_t need_reopen = _gf_true;
+ 
+     GF_VALIDATE_OR_GOTO("bit-rot", subvol, out);
+     GF_VALIDATE_OR_GOTO("bit-rot", data, out);
+@@ -1046,6 +1047,18 @@ bitd_oneshot_crawl(xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
+                    uuid_utoa(linked_inode->gfid));
+     } else {
+         need_signing = br_check_object_need_sign(this, xattr, child);
++
++        /*
++         * If we are here means, bitrot daemon has started. Is it just
++         * a simple restart of the daemon or is it started because the
++         * feature is enabled is something hard to determine. Hence,
++         * if need_signing is false (because bit-rot version and signature
++         * are present), then still go ahead and sign it.
++         */
++        if (!need_signing) {
++            need_signing = _gf_true;
++            need_reopen = _gf_true;
++        }
+     }
+ 
+     if (!need_signing)
+@@ -1054,7 +1067,7 @@ bitd_oneshot_crawl(xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
+     gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_TRIGGER_SIGN,
+            "Triggering signing for %s [GFID: %s | Brick: %s]", loc.path,
+            uuid_utoa(linked_inode->gfid), child->brick_path);
+-    br_trigger_sign(this, child, linked_inode, &loc, _gf_true);
++    br_trigger_sign(this, child, linked_inode, &loc, need_reopen);
+ 
+     ret = 0;
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0460-cluster-ec-Remove-stale-entries-from-indices-xattrop.patch b/SOURCES/0460-cluster-ec-Remove-stale-entries-from-indices-xattrop.patch
new file mode 100644
index 0000000..e31349a
--- /dev/null
+++ b/SOURCES/0460-cluster-ec-Remove-stale-entries-from-indices-xattrop.patch
@@ -0,0 +1,152 @@
+From b166826b283d9071532174ebbec857dea600064b Mon Sep 17 00:00:00 2001
+From: Ashish Pandey <aspandey@redhat.com>
+Date: Thu, 23 Jul 2020 11:07:32 +0530
+Subject: [PATCH 460/465] cluster/ec: Remove stale entries from indices/xattrop
+ folder
+
+Problem:
+If a gfid is present in indices/xattrop folder while
+the file/dir is actaully healthy and all the xattrs are healthy,
+it causes lot of lookups by shd on an entry which does not need
+to be healed.
+This whole process eats up lot of CPU usage without doing meaningful
+work.
+
+Solution:
+Set trusted.ec.dirty xattr of the entry so that actual heal process
+happens and at the end of it, during unset of dirty, gfid enrty from
+indices/xattrop will be removed.
+
+>Upstream patch : https://review.gluster.org/#/c/glusterfs/+/24765/
+>Fixes: #1385
+
+Change-Id: Ib1b9377d8dda384bba49523e9ff6ba9f0699cc1b
+BUG: 1785714
+Signed-off-by: Ashish Pandey <aspandey@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/208591
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/cluster/ec/src/ec-heal.c  | 73 ++++++++++++++++++++++++++++++++++++++-
+ xlators/cluster/ec/src/ec-types.h |  7 +++-
+ 2 files changed, 78 insertions(+), 2 deletions(-)
+
+diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c
+index e2de879..7d25853 100644
+--- a/xlators/cluster/ec/src/ec-heal.c
++++ b/xlators/cluster/ec/src/ec-heal.c
+@@ -2488,6 +2488,59 @@ out:
+     return ret;
+ }
+ 
++int
++ec_heal_set_dirty_without_lock(call_frame_t *frame, ec_t *ec, inode_t *inode)
++{
++    int i = 0;
++    int ret = 0;
++    dict_t **xattr = NULL;
++    loc_t loc = {0};
++    uint64_t dirty_xattr[EC_VERSION_SIZE] = {0};
++    unsigned char *on = NULL;
++    default_args_cbk_t *replies = NULL;
++    dict_t *dict = NULL;
++
++    /* Allocate the required memory */
++    loc.inode = inode_ref(inode);
++    gf_uuid_copy(loc.gfid, inode->gfid);
++    on = alloca0(ec->nodes);
++    EC_REPLIES_ALLOC(replies, ec->nodes);
++    xattr = GF_CALLOC(ec->nodes, sizeof(*xattr), gf_common_mt_pointer);
++    if (!xattr) {
++        ret = -ENOMEM;
++        goto out;
++    }
++    dict = dict_new();
++    if (!dict) {
++        ret = -ENOMEM;
++        goto out;
++    }
++    for (i = 0; i < ec->nodes; i++) {
++        xattr[i] = dict;
++        on[i] = 1;
++    }
++    dirty_xattr[EC_METADATA_TXN] = hton64(1);
++    ret = dict_set_static_bin(dict, EC_XATTR_DIRTY, dirty_xattr,
++                              (sizeof(*dirty_xattr) * EC_VERSION_SIZE));
++    if (ret < 0) {
++        ret = -ENOMEM;
++        goto out;
++    }
++    PARALLEL_FOP_ONLIST(ec->xl_list, on, ec->nodes, replies, frame,
++                        ec_wind_xattrop_parallel, &loc, GF_XATTROP_ADD_ARRAY64,
++                        xattr, NULL);
++out:
++    if (dict) {
++        dict_unref(dict);
++    }
++    if (xattr) {
++        GF_FREE(xattr);
++    }
++    cluster_replies_wipe(replies, ec->nodes);
++    loc_wipe(&loc);
++    return ret;
++}
++
+ void
+ ec_heal_do(xlator_t *this, void *data, loc_t *loc, int32_t partial)
+ {
+@@ -2563,7 +2616,18 @@ ec_heal_do(xlator_t *this, void *data, loc_t *loc, int32_t partial)
+         ec_heal_inspect(frame, ec, loc->inode, up_subvols, _gf_false, _gf_false,
+                         &need_heal);
+ 
+-        if (need_heal == EC_HEAL_NONEED) {
++        if (need_heal == EC_HEAL_PURGE_INDEX) {
++            gf_msg(ec->xl->name, GF_LOG_INFO, 0, EC_MSG_HEAL_FAIL,
++                   "Index entry needs to be purged for: %s ",
++                   uuid_utoa(loc->gfid));
++            /* We need to send xattrop to set dirty flag so that it can be
++             * healed and index entry could be removed. We need not to take lock
++             * on this entry to do so as we are just setting dirty flag which
++             * actually increases the trusted.ec.dirty count and does not set
++             * the new value.
++             * This will make sure that it is not interfering in other fops.*/
++            ec_heal_set_dirty_without_lock(frame, ec, loc->inode);
++        } else if (need_heal == EC_HEAL_NONEED) {
+             gf_msg(ec->xl->name, GF_LOG_DEBUG, 0, EC_MSG_HEAL_FAIL,
+                    "Heal is not required for : %s ", uuid_utoa(loc->gfid));
+             goto out;
+@@ -2958,6 +3022,13 @@ _need_heal_calculate(ec_t *ec, uint64_t *dirty, unsigned char *sources,
+                     goto out;
+                 }
+             }
++            /* If lock count is 0, all dirty flags are 0 and all the
++             * versions are macthing then why are we here. It looks
++             * like something went wrong while removing the index entries
++             * after completing a successful heal or fop. In this case
++             * we need to remove this index entry to avoid triggering heal
++             * in a loop and causing lookups again and again*/
++            *need_heal = EC_HEAL_PURGE_INDEX;
+         } else {
+             for (i = 0; i < ec->nodes; i++) {
+                 /* Since each lock can only increment the dirty
+diff --git a/xlators/cluster/ec/src/ec-types.h b/xlators/cluster/ec/src/ec-types.h
+index f15429d..700dc39 100644
+--- a/xlators/cluster/ec/src/ec-types.h
++++ b/xlators/cluster/ec/src/ec-types.h
+@@ -130,7 +130,12 @@ typedef void (*ec_resume_f)(ec_fop_data_t *, int32_t);
+ 
+ enum _ec_read_policy { EC_ROUND_ROBIN, EC_GFID_HASH, EC_READ_POLICY_MAX };
+ 
+-enum _ec_heal_need { EC_HEAL_NONEED, EC_HEAL_MAYBE, EC_HEAL_MUST };
++enum _ec_heal_need {
++    EC_HEAL_NONEED,
++    EC_HEAL_MAYBE,
++    EC_HEAL_MUST,
++    EC_HEAL_PURGE_INDEX
++};
+ 
+ enum _ec_stripe_part { EC_STRIPE_HEAD, EC_STRIPE_TAIL };
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0461-geo-replication-Fix-IPv6-parsing.patch b/SOURCES/0461-geo-replication-Fix-IPv6-parsing.patch
new file mode 100644
index 0000000..098be5f
--- /dev/null
+++ b/SOURCES/0461-geo-replication-Fix-IPv6-parsing.patch
@@ -0,0 +1,127 @@
+From d425ed54261d5bc19aa853854cc3b64647e3c897 Mon Sep 17 00:00:00 2001
+From: Aravinda Vishwanathapura <aravinda@kadalu.io>
+Date: Sun, 12 Jul 2020 12:42:36 +0530
+Subject: [PATCH 461/465] geo-replication: Fix IPv6 parsing
+
+Brick paths in Volinfo used `:` as delimiter, Geo-rep uses split
+based on `:` char. This will go wrong with IPv6.
+
+This patch handles the IPv6 case and handles the split properly.
+Backport of:
+   >Upstream Patch: https://review.gluster.org/#/c/glusterfs/+/24706
+   >Fixes: #1366
+   >Change-Id: I25e88d693744381c0ccf3c1dbf1541b84be2499d
+   >Signed-off-by: Aravinda Vishwanathapura <aravinda@kadalu.io>
+
+BUG: 1855966
+Change-Id: I25e88d693744381c0ccf3c1dbf1541b84be2499d
+Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/208610
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ geo-replication/syncdaemon/master.py     |  5 ++--
+ geo-replication/syncdaemon/syncdutils.py | 43 +++++++++++++++++++++++++++++---
+ 2 files changed, 43 insertions(+), 5 deletions(-)
+
+diff --git a/geo-replication/syncdaemon/master.py b/geo-replication/syncdaemon/master.py
+index 3f98337..08e98f8 100644
+--- a/geo-replication/syncdaemon/master.py
++++ b/geo-replication/syncdaemon/master.py
+@@ -26,7 +26,8 @@ from rconf import rconf
+ from syncdutils import Thread, GsyncdError, escape_space_newline
+ from syncdutils import unescape_space_newline, gauxpfx, escape
+ from syncdutils import lstat, errno_wrap, FreeObject, lf, matching_disk_gfid
+-from syncdutils import NoStimeAvailable, PartialHistoryAvailable
++from syncdutils import NoStimeAvailable, PartialHistoryAvailable, host_brick_split
++
+ 
+ URXTIME = (-1, 0)
+ 
+@@ -1466,7 +1467,7 @@ class GMasterChangelogMixin(GMasterCommon):
+         node = rconf.args.resource_remote
+         node_data = node.split("@")
+         node = node_data[-1]
+-        remote_node_ip = node.split(":")[0]
++        remote_node_ip, _ = host_brick_split(node)
+         self.status.set_slave_node(remote_node_ip)
+ 
+     def changelogs_batch_process(self, changes):
+diff --git a/geo-replication/syncdaemon/syncdutils.py b/geo-replication/syncdaemon/syncdutils.py
+index 7560fa1..f43e13b 100644
+--- a/geo-replication/syncdaemon/syncdutils.py
++++ b/geo-replication/syncdaemon/syncdutils.py
+@@ -883,6 +883,19 @@ class Popen(subprocess.Popen):
+             self.errfail()
+ 
+ 
++def host_brick_split(value):
++    """
++    IPv6 compatible way to split and get the host
++    and brick information. Example inputs:
++    node1.example.com:/exports/bricks/brick1/brick
++    fe80::af0f:df82:844f:ef66%utun0:/exports/bricks/brick1/brick
++    """
++    parts = value.split(":")
++    brick = parts[-1]
++    hostparts = parts[0:-1]
++    return (":".join(hostparts), brick)
++
++
+ class Volinfo(object):
+ 
+     def __init__(self, vol, host='localhost', prelude=[], master=True):
+@@ -925,7 +938,7 @@ class Volinfo(object):
+     @memoize
+     def bricks(self):
+         def bparse(b):
+-            host, dirp = b.find("name").text.split(':', 2)
++            host, dirp = host_brick_split(b.find("name").text)
+             return {'host': host, 'dir': dirp, 'uuid': b.find("hostUuid").text}
+         return [bparse(b) for b in self.get('brick')]
+ 
+@@ -1001,6 +1014,16 @@ class VolinfoFromGconf(object):
+     def is_hot(self, brickpath):
+         return False
+ 
++    def is_uuid(self, value):
++        try:
++            uuid.UUID(value)
++            return True
++        except ValueError:
++            return False
++
++    def possible_path(self, value):
++        return "/" in value
++
+     @property
+     @memoize
+     def bricks(self):
+@@ -1014,8 +1037,22 @@ class VolinfoFromGconf(object):
+         out = []
+         for b in bricks_data:
+             parts = b.split(":")
+-            bpath = parts[2] if len(parts) == 3 else ""
+-            out.append({"host": parts[1], "dir": bpath, "uuid": parts[0]})
++            b_uuid = None
++            if self.is_uuid(parts[0]):
++                b_uuid = parts[0]
++                # Set all parts except first
++                parts = parts[1:]
++
++            if self.possible_path(parts[-1]):
++                bpath = parts[-1]
++                # Set all parts except last
++                parts = parts[0:-1]
++
++            out.append({
++                "host": ":".join(parts),   # if remaining parts are IPv6 name
++                "dir": bpath,
++                "uuid": b_uuid
++            })
+ 
+         return out
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0462-Issue-with-gf_fill_iatt_for_dirent.patch b/SOURCES/0462-Issue-with-gf_fill_iatt_for_dirent.patch
new file mode 100644
index 0000000..aa5fd21
--- /dev/null
+++ b/SOURCES/0462-Issue-with-gf_fill_iatt_for_dirent.patch
@@ -0,0 +1,43 @@
+From f027734165374979bd0bff8ea059dfaadca85e07 Mon Sep 17 00:00:00 2001
+From: Soumya Koduri <skoduri@redhat.com>
+Date: Thu, 2 Jul 2020 02:07:56 +0530
+Subject: [PATCH 462/465] Issue with gf_fill_iatt_for_dirent
+
+In "gf_fill_iatt_for_dirent()", while calculating inode_path for loc,
+the inode should be of parent's. Instead it is loc.inode which results in error
+ and eventually lookup/readdirp fails.
+
+This patch fixes the same.
+
+This is backport of below mainstream fix :
+
+> Change-Id: Ied086234a4634e8cb13520521ac547c87b3c76b5
+> Fixes: #1351
+> Upstream patch: https://review.gluster.org/#/c/glusterfs/+/24661/
+
+Change-Id: Ied086234a4634e8cb13520521ac547c87b3c76b5
+BUG: 1853189
+Signed-off-by: Soumya Koduri <skoduri@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/208691
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ libglusterfs/src/gf-dirent.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/libglusterfs/src/gf-dirent.c b/libglusterfs/src/gf-dirent.c
+index f289723..3fa67f2 100644
+--- a/libglusterfs/src/gf-dirent.c
++++ b/libglusterfs/src/gf-dirent.c
+@@ -277,7 +277,7 @@ gf_fill_iatt_for_dirent(gf_dirent_t *entry, inode_t *parent, xlator_t *subvol)
+     gf_uuid_copy(loc.pargfid, parent->gfid);
+     loc.name = entry->d_name;
+     loc.parent = inode_ref(parent);
+-    ret = inode_path(loc.inode, entry->d_name, &path);
++    ret = inode_path(loc.parent, entry->d_name, &path);
+     loc.path = path;
+     if (ret < 0)
+         goto out;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0463-cluster-ec-Change-handling-of-heal-failure-to-avoid-.patch b/SOURCES/0463-cluster-ec-Change-handling-of-heal-failure-to-avoid-.patch
new file mode 100644
index 0000000..b47cdd1
--- /dev/null
+++ b/SOURCES/0463-cluster-ec-Change-handling-of-heal-failure-to-avoid-.patch
@@ -0,0 +1,87 @@
+From 7d87933f648092ae55d57a96fd06e3df975d764c Mon Sep 17 00:00:00 2001
+From: Ashish Pandey <aspandey@redhat.com>
+Date: Tue, 18 Aug 2020 10:33:48 +0530
+Subject: [PATCH 463/465] cluster/ec: Change handling of heal failure to avoid
+ crash
+
+Problem:
+ec_getxattr_heal_cbk was called with NULL as second argument
+in case heal was failing.
+This function was dereferencing "cookie" argument which caused crash.
+
+Solution:
+Cookie is changed to carry the value that was supposed to be
+stored in fop->data, so even in the case when fop is NULL in error
+case, there won't be any NULL dereference.
+
+Thanks to Xavi for the suggestion about the fix.
+
+>Upstream patch: https://review.gluster.org/#/c/glusterfs/+/23050/
+>fixes: bz#1729085
+
+Change-Id: I0798000d5cadb17c3c2fbfa1baf77033ffc2bb8c
+BUG: 1852736
+Reviewed-on: https://code.engineering.redhat.com/gerrit/209012
+Tested-by: Ashish Pandey <aspandey@redhat.com>
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Xavi Hernandez Juan <xhernandez@redhat.com>
+---
+ xlators/cluster/ec/src/ec-heal.c       | 11 ++++++-----
+ xlators/cluster/ec/src/ec-inode-read.c |  4 ++--
+ 2 files changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c
+index 7d25853..6e6948b 100644
+--- a/xlators/cluster/ec/src/ec-heal.c
++++ b/xlators/cluster/ec/src/ec-heal.c
+@@ -1966,7 +1966,7 @@ ec_manager_heal_block(ec_fop_data_t *fop, int32_t state)
+ 
+         case EC_STATE_REPORT:
+             if (fop->cbks.heal) {
+-                fop->cbks.heal(fop->req_frame, fop, fop->xl, 0, 0,
++                fop->cbks.heal(fop->req_frame, fop->data, fop->xl, 0, 0,
+                                (heal->good | heal->bad), heal->good, heal->bad,
+                                0, NULL);
+             }
+@@ -2022,10 +2022,11 @@ ec_heal_block_done(call_frame_t *frame, void *cookie, xlator_t *this,
+                    uintptr_t good, uintptr_t bad, uint32_t pending,
+                    dict_t *xdata)
+ {
+-    ec_fop_data_t *fop = cookie;
+-    ec_heal_t *heal = fop->data;
++    ec_heal_t *heal = cookie;
+ 
+-    fop->heal = NULL;
++    if (heal->fop) {
++        heal->fop->heal = NULL;
++    }
+     heal->fop = NULL;
+     heal->error = op_ret < 0 ? op_errno : 0;
+     syncbarrier_wake(heal->data);
+@@ -2669,7 +2670,7 @@ ec_heal_do(xlator_t *this, void *data, loc_t *loc, int32_t partial)
+ out:
+     ec_reset_entry_healing(fop);
+     if (fop->cbks.heal) {
+-        fop->cbks.heal(fop->req_frame, fop, fop->xl, op_ret, op_errno,
++        fop->cbks.heal(fop->req_frame, fop->data, fop->xl, op_ret, op_errno,
+                        ec_char_array_to_mask(participants, ec->nodes),
+                        mgood & good, mbad & bad, pending, NULL);
+     }
+diff --git a/xlators/cluster/ec/src/ec-inode-read.c b/xlators/cluster/ec/src/ec-inode-read.c
+index e82e8f6..c50d0ad 100644
+--- a/xlators/cluster/ec/src/ec-inode-read.c
++++ b/xlators/cluster/ec/src/ec-inode-read.c
+@@ -396,8 +396,8 @@ ec_getxattr_heal_cbk(call_frame_t *frame, void *cookie, xlator_t *xl,
+                      uintptr_t good, uintptr_t bad, uint32_t pending,
+                      dict_t *xdata)
+ {
+-    ec_fop_data_t *fop = cookie;
+-    fop_getxattr_cbk_t func = fop->data;
++    fop_getxattr_cbk_t func = cookie;
++
+     ec_t *ec = xl->private;
+     dict_t *dict = NULL;
+     char *str;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0464-storage-posix-Remove-nr_files-usage.patch b/SOURCES/0464-storage-posix-Remove-nr_files-usage.patch
new file mode 100644
index 0000000..d98e33d
--- /dev/null
+++ b/SOURCES/0464-storage-posix-Remove-nr_files-usage.patch
@@ -0,0 +1,102 @@
+From 7c51addf7912a94320e6b148bd66f2dbf274c533 Mon Sep 17 00:00:00 2001
+From: Pranith Kumar K <pkarampu@redhat.com>
+Date: Mon, 11 Mar 2019 14:04:39 +0530
+Subject: [PATCH 464/465] storage/posix: Remove nr_files usage
+
+nr_files is supposed to represent the number of files opened in posix.
+Present logic doesn't seem to handle anon-fds because of which the
+counts would always be wrong.
+
+I don't remember anyone using this value in debugging any problem probably
+because we always have 'ls -l /proc/<pid>/fd' which not only prints the
+fds that are active but also prints their paths. It also handles directories
+and anon-fds which actually opened the file. So removing this code
+instead of fixing the buggy logic to have the nr_files.
+
+> fixes bz#1688106
+> Change-Id: Ibf8713fdfdc1ef094e08e6818152637206a54040
+> Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
+> (Cherry pick from commit f5987d38f216a3142dfe45f03bf66ff4827d9b55)
+> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/22333/)
+
+Change-Id: Ibf8713fdfdc1ef094e08e6818152637206a54040
+BUG: 1851989
+Signed-off-by: Mohit Agrawal<moagrawa@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/209468
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/storage/posix/src/posix-common.c       | 2 --
+ xlators/storage/posix/src/posix-entry-ops.c    | 2 --
+ xlators/storage/posix/src/posix-inode-fd-ops.c | 2 --
+ xlators/storage/posix/src/posix.h              | 1 -
+ 4 files changed, 7 deletions(-)
+
+diff --git a/xlators/storage/posix/src/posix-common.c b/xlators/storage/posix/src/posix-common.c
+index ac53796..b317627 100644
+--- a/xlators/storage/posix/src/posix-common.c
++++ b/xlators/storage/posix/src/posix-common.c
+@@ -128,7 +128,6 @@ posix_priv(xlator_t *this)
+     gf_proc_dump_write("max_read", "%" PRId64, GF_ATOMIC_GET(priv->read_value));
+     gf_proc_dump_write("max_write", "%" PRId64,
+                        GF_ATOMIC_GET(priv->write_value));
+-    gf_proc_dump_write("nr_files", "%" PRId64, GF_ATOMIC_GET(priv->nr_files));
+ 
+     return 0;
+ }
+@@ -815,7 +814,6 @@ posix_init(xlator_t *this)
+     }
+ 
+     LOCK_INIT(&_private->lock);
+-    GF_ATOMIC_INIT(_private->nr_files, 0);
+     GF_ATOMIC_INIT(_private->read_value, 0);
+     GF_ATOMIC_INIT(_private->write_value, 0);
+ 
+diff --git a/xlators/storage/posix/src/posix-entry-ops.c b/xlators/storage/posix/src/posix-entry-ops.c
+index 65650b3..b3a5381 100644
+--- a/xlators/storage/posix/src/posix-entry-ops.c
++++ b/xlators/storage/posix/src/posix-entry-ops.c
+@@ -2243,8 +2243,6 @@ fill_stat:
+         gf_msg(this->name, GF_LOG_WARNING, 0, P_MSG_FD_PATH_SETTING_FAILED,
+                "failed to set the fd context path=%s fd=%p", real_path, fd);
+ 
+-    GF_ATOMIC_INC(priv->nr_files);
+-
+     op_ret = 0;
+ 
+ out:
+diff --git a/xlators/storage/posix/src/posix-inode-fd-ops.c b/xlators/storage/posix/src/posix-inode-fd-ops.c
+index d135d8b..81f4a6b 100644
+--- a/xlators/storage/posix/src/posix-inode-fd-ops.c
++++ b/xlators/storage/posix/src/posix-inode-fd-ops.c
+@@ -1605,7 +1605,6 @@ posix_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+         gf_msg(this->name, GF_LOG_WARNING, 0, P_MSG_FD_PATH_SETTING_FAILED,
+                "failed to set the fd context path=%s fd=%p", real_path, fd);
+ 
+-    GF_ATOMIC_INC(priv->nr_files);
+     op_ret = 0;
+ 
+ out:
+@@ -2526,7 +2525,6 @@ posix_release(xlator_t *this, fd_t *fd)
+     if (!priv)
+         goto out;
+ 
+-    GF_ATOMIC_DEC(priv->nr_files);
+ out:
+     return 0;
+ }
+diff --git a/xlators/storage/posix/src/posix.h b/xlators/storage/posix/src/posix.h
+index 61495a7..124dbb4 100644
+--- a/xlators/storage/posix/src/posix.h
++++ b/xlators/storage/posix/src/posix.h
+@@ -154,7 +154,6 @@ struct posix_private {
+ 
+     gf_atomic_t read_value;  /* Total read, from init */
+     gf_atomic_t write_value; /* Total write, from init */
+-    gf_atomic_t nr_files;
+     /*
+        In some cases, two exported volumes may reside on the same
+        partition on the server. Sending statvfs info for both
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0465-posix-Implement-a-janitor-thread-to-close-fd.patch b/SOURCES/0465-posix-Implement-a-janitor-thread-to-close-fd.patch
new file mode 100644
index 0000000..fc22456
--- /dev/null
+++ b/SOURCES/0465-posix-Implement-a-janitor-thread-to-close-fd.patch
@@ -0,0 +1,384 @@
+From 143b93b230b429cc712353243ed794b68494c040 Mon Sep 17 00:00:00 2001
+From: Mohit Agrawal <moagrawa@redhat.com>
+Date: Mon, 27 Jul 2020 18:08:00 +0530
+Subject: [PATCH 465/465] posix: Implement a janitor thread to close fd
+
+Problem: In the commit fb20713b380e1df8d7f9e9df96563be2f9144fd6 we use
+         syntask to close fd but we have found the patch is reducing the
+         performance
+
+Solution: Use janitor thread to close fd's and save the pfd ctx into
+          ctx janitor list and also save the posix_xlator into pfd object to
+          avoid the race condition during cleanup in brick_mux environment
+
+> Change-Id: Ifb3d18a854b267333a3a9e39845bfefb83fbc092
+> Fixes: #1396
+> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/24755/)
+> (Cherry pick from commit 41b9616435cbdf671805856e487e373060c9455b
+
+Change-Id: Ifb3d18a854b267333a3a9e39845bfefb83fbc092
+BUG: 1851989
+Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/209448
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ glusterfsd/src/glusterfsd.c                    |  4 ++
+ libglusterfs/src/glusterfs/glusterfs.h         |  7 ++
+ rpc/rpc-lib/src/rpcsvc.c                       |  6 --
+ xlators/storage/posix/src/posix-common.c       | 34 +++++++++-
+ xlators/storage/posix/src/posix-helpers.c      | 93 ++++++++++++++++++++++++++
+ xlators/storage/posix/src/posix-inode-fd-ops.c | 33 ++++-----
+ xlators/storage/posix/src/posix.h              |  7 ++
+ 7 files changed, 161 insertions(+), 23 deletions(-)
+
+diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
+index 9821180..955bf1d 100644
+--- a/glusterfsd/src/glusterfsd.c
++++ b/glusterfsd/src/glusterfsd.c
+@@ -1839,6 +1839,10 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
+ 
+     INIT_LIST_HEAD(&cmd_args->xlator_options);
+     INIT_LIST_HEAD(&cmd_args->volfile_servers);
++    ctx->pxl_count = 0;
++    pthread_mutex_init(&ctx->fd_lock, NULL);
++    pthread_cond_init(&ctx->fd_cond, NULL);
++    INIT_LIST_HEAD(&ctx->janitor_fds);
+ 
+     lim.rlim_cur = RLIM_INFINITY;
+     lim.rlim_max = RLIM_INFINITY;
+diff --git a/libglusterfs/src/glusterfs/glusterfs.h b/libglusterfs/src/glusterfs/glusterfs.h
+index 495a4d7..bf6a987 100644
+--- a/libglusterfs/src/glusterfs/glusterfs.h
++++ b/libglusterfs/src/glusterfs/glusterfs.h
+@@ -733,6 +733,13 @@ struct _glusterfs_ctx {
+     } stats;
+ 
+     struct list_head volfile_list;
++    /* Add members to manage janitor threads for cleanup fd */
++    struct list_head janitor_fds;
++    pthread_cond_t fd_cond;
++    pthread_mutex_t fd_lock;
++    pthread_t janitor;
++    /* The variable is use to save total posix xlator count */
++    uint32_t pxl_count;
+ 
+     char volume_id[GF_UUID_BUF_SIZE]; /* Used only in protocol/client */
+ };
+diff --git a/rpc/rpc-lib/src/rpcsvc.c b/rpc/rpc-lib/src/rpcsvc.c
+index 23ca1fd..3f184bf 100644
+--- a/rpc/rpc-lib/src/rpcsvc.c
++++ b/rpc/rpc-lib/src/rpcsvc.c
+@@ -375,12 +375,6 @@ rpcsvc_program_actor(rpcsvc_request_t *req)
+ 
+     req->ownthread = program->ownthread;
+     req->synctask = program->synctask;
+-    if (((req->procnum == GFS3_OP_RELEASE) ||
+-         (req->procnum == GFS3_OP_RELEASEDIR)) &&
+-        (program->prognum == GLUSTER_FOP_PROGRAM)) {
+-        req->ownthread = _gf_false;
+-        req->synctask = _gf_true;
+-    }
+ 
+     err = SUCCESS;
+     gf_log(GF_RPCSVC, GF_LOG_TRACE, "Actor found: %s - %s for %s",
+diff --git a/xlators/storage/posix/src/posix-common.c b/xlators/storage/posix/src/posix-common.c
+index b317627..c5a43a1 100644
+--- a/xlators/storage/posix/src/posix-common.c
++++ b/xlators/storage/posix/src/posix-common.c
+@@ -150,6 +150,7 @@ posix_notify(xlator_t *this, int32_t event, void *data, ...)
+     struct timespec sleep_till = {
+         0,
+     };
++    glusterfs_ctx_t *ctx = this->ctx;
+ 
+     switch (event) {
+         case GF_EVENT_PARENT_UP: {
+@@ -160,8 +161,6 @@ posix_notify(xlator_t *this, int32_t event, void *data, ...)
+         case GF_EVENT_PARENT_DOWN: {
+             if (!victim->cleanup_starting)
+                 break;
+-            gf_log(this->name, GF_LOG_INFO, "Sending CHILD_DOWN for brick %s",
+-                   victim->name);
+ 
+             if (priv->janitor) {
+                 pthread_mutex_lock(&priv->janitor_mutex);
+@@ -187,6 +186,16 @@ posix_notify(xlator_t *this, int32_t event, void *data, ...)
+                 GF_FREE(priv->janitor);
+             }
+             priv->janitor = NULL;
++            pthread_mutex_lock(&ctx->fd_lock);
++            {
++                while (priv->rel_fdcount > 0) {
++                    pthread_cond_wait(&priv->fd_cond, &ctx->fd_lock);
++                }
++            }
++            pthread_mutex_unlock(&ctx->fd_lock);
++
++            gf_log(this->name, GF_LOG_INFO, "Sending CHILD_DOWN for brick %s",
++                   victim->name);
+             default_notify(this->parents->xlator, GF_EVENT_CHILD_DOWN, data);
+         } break;
+         default:
+@@ -1038,7 +1047,13 @@ posix_init(xlator_t *this)
+     pthread_cond_init(&_private->fsync_cond, NULL);
+     pthread_mutex_init(&_private->janitor_mutex, NULL);
+     pthread_cond_init(&_private->janitor_cond, NULL);
++    pthread_cond_init(&_private->fd_cond, NULL);
+     INIT_LIST_HEAD(&_private->fsyncs);
++    _private->rel_fdcount = 0;
++    ret = posix_spawn_ctx_janitor_thread(this);
++    if (ret)
++        goto out;
++
+     ret = gf_thread_create(&_private->fsyncer, NULL, posix_fsyncer, this,
+                            "posixfsy");
+     if (ret) {
+@@ -1133,6 +1148,8 @@ posix_fini(xlator_t *this)
+ {
+     struct posix_private *priv = this->private;
+     gf_boolean_t health_check = _gf_false;
++    glusterfs_ctx_t *ctx = this->ctx;
++    uint32_t count;
+     int ret = 0;
+ 
+     if (!priv)
+@@ -1166,6 +1183,19 @@ posix_fini(xlator_t *this)
+         priv->janitor = NULL;
+     }
+ 
++    pthread_mutex_lock(&ctx->fd_lock);
++    {
++        count = --ctx->pxl_count;
++        if (count == 0) {
++            pthread_cond_signal(&ctx->fd_cond);
++        }
++    }
++    pthread_mutex_unlock(&ctx->fd_lock);
++
++    if (count == 0) {
++        pthread_join(ctx->janitor, NULL);
++    }
++
+     if (priv->fsyncer) {
+         (void)gf_thread_cleanup_xint(priv->fsyncer);
+         priv->fsyncer = 0;
+diff --git a/xlators/storage/posix/src/posix-helpers.c b/xlators/storage/posix/src/posix-helpers.c
+index 39dbcce..73a44be 100644
+--- a/xlators/storage/posix/src/posix-helpers.c
++++ b/xlators/storage/posix/src/posix-helpers.c
+@@ -1582,6 +1582,99 @@ unlock:
+     return;
+ }
+ 
++static struct posix_fd *
++janitor_get_next_fd(glusterfs_ctx_t *ctx)
++{
++    struct posix_fd *pfd = NULL;
++
++    while (list_empty(&ctx->janitor_fds)) {
++        if (ctx->pxl_count == 0) {
++            return NULL;
++        }
++
++        pthread_cond_wait(&ctx->fd_cond, &ctx->fd_lock);
++    }
++
++    pfd = list_first_entry(&ctx->janitor_fds, struct posix_fd, list);
++    list_del_init(&pfd->list);
++
++    return pfd;
++}
++
++static void
++posix_close_pfd(xlator_t *xl, struct posix_fd *pfd)
++{
++    THIS = xl;
++
++    if (pfd->dir == NULL) {
++        gf_msg_trace(xl->name, 0, "janitor: closing file fd=%d", pfd->fd);
++        sys_close(pfd->fd);
++    } else {
++        gf_msg_debug(xl->name, 0, "janitor: closing dir fd=%p", pfd->dir);
++        sys_closedir(pfd->dir);
++    }
++
++    GF_FREE(pfd);
++}
++
++static void *
++posix_ctx_janitor_thread_proc(void *data)
++{
++    xlator_t *xl;
++    struct posix_fd *pfd;
++    glusterfs_ctx_t *ctx = NULL;
++    struct posix_private *priv_fd;
++
++    ctx = data;
++
++    pthread_mutex_lock(&ctx->fd_lock);
++
++    while ((pfd = janitor_get_next_fd(ctx)) != NULL) {
++        pthread_mutex_unlock(&ctx->fd_lock);
++
++        xl = pfd->xl;
++        posix_close_pfd(xl, pfd);
++
++        pthread_mutex_lock(&ctx->fd_lock);
++
++        priv_fd = xl->private;
++        priv_fd->rel_fdcount--;
++        if (!priv_fd->rel_fdcount)
++            pthread_cond_signal(&priv_fd->fd_cond);
++    }
++
++    pthread_mutex_unlock(&ctx->fd_lock);
++
++    return NULL;
++}
++
++int
++posix_spawn_ctx_janitor_thread(xlator_t *this)
++{
++    int ret = 0;
++    glusterfs_ctx_t *ctx = NULL;
++
++    ctx = this->ctx;
++
++    pthread_mutex_lock(&ctx->fd_lock);
++    {
++        if (ctx->pxl_count++ == 0) {
++            ret = gf_thread_create(&ctx->janitor, NULL,
++                                   posix_ctx_janitor_thread_proc, ctx,
++                                   "posixctxjan");
++
++            if (ret) {
++                gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_THREAD_FAILED,
++                       "spawning janitor thread failed");
++                ctx->pxl_count--;
++            }
++        }
++    }
++    pthread_mutex_unlock(&ctx->fd_lock);
++
++    return ret;
++}
++
+ static int
+ is_fresh_file(int64_t ctime_sec)
+ {
+diff --git a/xlators/storage/posix/src/posix-inode-fd-ops.c b/xlators/storage/posix/src/posix-inode-fd-ops.c
+index 81f4a6b..21119ea 100644
+--- a/xlators/storage/posix/src/posix-inode-fd-ops.c
++++ b/xlators/storage/posix/src/posix-inode-fd-ops.c
+@@ -1352,6 +1352,22 @@ out:
+     return 0;
+ }
+ 
++static void
++posix_add_fd_to_cleanup(xlator_t *this, struct posix_fd *pfd)
++{
++    glusterfs_ctx_t *ctx = this->ctx;
++    struct posix_private *priv = this->private;
++
++    pfd->xl = this;
++    pthread_mutex_lock(&ctx->fd_lock);
++    {
++        list_add_tail(&pfd->list, &ctx->janitor_fds);
++        priv->rel_fdcount++;
++        pthread_cond_signal(&ctx->fd_cond);
++    }
++    pthread_mutex_unlock(&ctx->fd_lock);
++}
++
+ int32_t
+ posix_releasedir(xlator_t *this, fd_t *fd)
+ {
+@@ -1374,11 +1390,7 @@ posix_releasedir(xlator_t *this, fd_t *fd)
+                "pfd->dir is NULL for fd=%p", fd);
+         goto out;
+     }
+-
+-    gf_msg_debug(this->name, 0, "janitor: closing dir fd=%p", pfd->dir);
+-
+-    sys_closedir(pfd->dir);
+-    GF_FREE(pfd);
++    posix_add_fd_to_cleanup(this, pfd);
+ 
+ out:
+     return 0;
+@@ -2494,7 +2506,6 @@ out:
+ int32_t
+ posix_release(xlator_t *this, fd_t *fd)
+ {
+-    struct posix_private *priv = NULL;
+     struct posix_fd *pfd = NULL;
+     int ret = -1;
+     uint64_t tmp_pfd = 0;
+@@ -2502,8 +2513,6 @@ posix_release(xlator_t *this, fd_t *fd)
+     VALIDATE_OR_GOTO(this, out);
+     VALIDATE_OR_GOTO(fd, out);
+ 
+-    priv = this->private;
+-
+     ret = fd_ctx_del(fd, this, &tmp_pfd);
+     if (ret < 0) {
+         gf_msg(this->name, GF_LOG_WARNING, 0, P_MSG_PFD_NULL,
+@@ -2517,13 +2526,7 @@ posix_release(xlator_t *this, fd_t *fd)
+                "pfd->dir is %p (not NULL) for file fd=%p", pfd->dir, fd);
+     }
+ 
+-    gf_msg_debug(this->name, 0, "janitor: closing dir fd=%p", pfd->dir);
+-
+-    sys_close(pfd->fd);
+-    GF_FREE(pfd);
+-
+-    if (!priv)
+-        goto out;
++    posix_add_fd_to_cleanup(this, pfd);
+ 
+ out:
+     return 0;
+diff --git a/xlators/storage/posix/src/posix.h b/xlators/storage/posix/src/posix.h
+index 124dbb4..07f367b 100644
+--- a/xlators/storage/posix/src/posix.h
++++ b/xlators/storage/posix/src/posix.h
+@@ -134,6 +134,8 @@ struct posix_fd {
+     off_t dir_eof; /* offset at dir EOF */
+     int odirect;
+     struct list_head list; /* to add to the janitor list */
++    xlator_t *xl;
++    char _pad[4]; /* manual padding */
+ };
+ 
+ struct posix_private {
+@@ -204,6 +206,7 @@ struct posix_private {
+     pthread_cond_t fsync_cond;
+     pthread_mutex_t janitor_mutex;
+     pthread_cond_t janitor_cond;
++    pthread_cond_t fd_cond;
+     int fsync_queue_count;
+ 
+     enum {
+@@ -259,6 +262,7 @@ struct posix_private {
+     gf_boolean_t fips_mode_rchecksum;
+     gf_boolean_t ctime;
+     gf_boolean_t janitor_task_stop;
++    uint32_t rel_fdcount;
+ };
+ 
+ typedef struct {
+@@ -665,6 +669,9 @@ posix_cs_maintenance(xlator_t *this, fd_t *fd, loc_t *loc, int *pfd,
+ int
+ posix_check_dev_file(xlator_t *this, inode_t *inode, char *fop, int *op_errno);
+ 
++int
++posix_spawn_ctx_janitor_thread(xlator_t *this);
++
+ void
+ posix_update_iatt_buf(struct iatt *buf, int fd, char *loc, dict_t *xdata);
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0466-cluster-ec-Change-stale-index-handling.patch b/SOURCES/0466-cluster-ec-Change-stale-index-handling.patch
new file mode 100644
index 0000000..1dc9f57
--- /dev/null
+++ b/SOURCES/0466-cluster-ec-Change-stale-index-handling.patch
@@ -0,0 +1,68 @@
+From b603170ae5f583037b8177a9d19e56c7821edf0b Mon Sep 17 00:00:00 2001
+From: Pranith Kumar K <pkarampu@redhat.com>
+Date: Tue, 25 Aug 2020 04:19:54 +0530
+Subject: [PATCH 466/466] cluster/ec: Change stale index handling
+
+Problem:
+Earlier approach is setting dirty bit which requires extra heal
+
+Fix:
+Send zero-xattrop which deletes stale index without any need
+for extra heal.
+
+ > Fixes: #1385
+ > Upstream-patch: https://review.gluster.org/c/glusterfs/+/24911/
+
+BUG: 1785714
+Change-Id: I7e97a1d8b5516f7be47cae55d0e56b14332b6cae
+Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/209904
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Tested-by: Ashish Pandey <aspandey@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/cluster/ec/src/ec-heal.c | 14 +++++---------
+ 1 file changed, 5 insertions(+), 9 deletions(-)
+
+diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c
+index 6e6948b..06bafa5 100644
+--- a/xlators/cluster/ec/src/ec-heal.c
++++ b/xlators/cluster/ec/src/ec-heal.c
+@@ -2490,7 +2490,7 @@ out:
+ }
+ 
+ int
+-ec_heal_set_dirty_without_lock(call_frame_t *frame, ec_t *ec, inode_t *inode)
++ec_heal_purge_stale_index(call_frame_t *frame, ec_t *ec, inode_t *inode)
+ {
+     int i = 0;
+     int ret = 0;
+@@ -2520,7 +2520,6 @@ ec_heal_set_dirty_without_lock(call_frame_t *frame, ec_t *ec, inode_t *inode)
+         xattr[i] = dict;
+         on[i] = 1;
+     }
+-    dirty_xattr[EC_METADATA_TXN] = hton64(1);
+     ret = dict_set_static_bin(dict, EC_XATTR_DIRTY, dirty_xattr,
+                               (sizeof(*dirty_xattr) * EC_VERSION_SIZE));
+     if (ret < 0) {
+@@ -2621,13 +2620,10 @@ ec_heal_do(xlator_t *this, void *data, loc_t *loc, int32_t partial)
+             gf_msg(ec->xl->name, GF_LOG_INFO, 0, EC_MSG_HEAL_FAIL,
+                    "Index entry needs to be purged for: %s ",
+                    uuid_utoa(loc->gfid));
+-            /* We need to send xattrop to set dirty flag so that it can be
+-             * healed and index entry could be removed. We need not to take lock
+-             * on this entry to do so as we are just setting dirty flag which
+-             * actually increases the trusted.ec.dirty count and does not set
+-             * the new value.
+-             * This will make sure that it is not interfering in other fops.*/
+-            ec_heal_set_dirty_without_lock(frame, ec, loc->inode);
++            /* We need to send zero-xattrop so that stale index entry could be
++             * removed. We need not take lock on this entry to do so as
++             * xattrop on a brick is atomic. */
++            ec_heal_purge_stale_index(frame, ec, loc->inode);
+         } else if (need_heal == EC_HEAL_NONEED) {
+             gf_msg(ec->xl->name, GF_LOG_DEBUG, 0, EC_MSG_HEAL_FAIL,
+                    "Heal is not required for : %s ", uuid_utoa(loc->gfid));
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0467-build-Added-dependency-for-glusterfs-selinux.patch b/SOURCES/0467-build-Added-dependency-for-glusterfs-selinux.patch
new file mode 100644
index 0000000..93bb140
--- /dev/null
+++ b/SOURCES/0467-build-Added-dependency-for-glusterfs-selinux.patch
@@ -0,0 +1,38 @@
+From 9176ee8f10c3c33f31d00261995ed27e8680934a Mon Sep 17 00:00:00 2001
+From: Rinku Kothiya <rkothiya@redhat.com>
+Date: Thu, 3 Sep 2020 11:46:38 +0000
+Subject: [PATCH 467/467] build: Added dependency for glusterfs-selinux
+
+> Fixes: #1442
+> Upstream-patch: https://review.gluster.org/#/c/glusterfs/+/24876/
+> Change-Id: I7d79bceff329db4d525bc8a77ba7ffe41bf53c97
+> Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
+
+BUG: 1460657
+
+Change-Id: I7d79bceff329db4d525bc8a77ba7ffe41bf53c97
+Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/210637
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ glusterfs.spec.in | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/glusterfs.spec.in b/glusterfs.spec.in
+index 9def416..ed6bdf3 100644
+--- a/glusterfs.spec.in
++++ b/glusterfs.spec.in
+@@ -650,6 +650,9 @@ Summary:          Clustered file-system server
+ Requires:         %{name}%{?_isa} = %{version}-%{release}
+ Requires:         %{name}-cli%{?_isa} = %{version}-%{release}
+ Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
++%if ( 0%{?fedora} && 0%{?fedora} >= 30  || ( 0%{?rhel} && 0%{?rhel} >= 8 ) )
++Requires:         glusterfs-selinux >= 0.1.0-2
++%endif
+ # some daemons (like quota) use a fuse-mount, glusterfsd is part of -fuse
+ Requires:         %{name}-fuse%{?_isa} = %{version}-%{release}
+ # self-heal daemon, rebalance, nfs-server etc. are actually clients
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0468-build-Update-the-glusterfs-selinux-version.patch b/SOURCES/0468-build-Update-the-glusterfs-selinux-version.patch
new file mode 100644
index 0000000..b4b5ead
--- /dev/null
+++ b/SOURCES/0468-build-Update-the-glusterfs-selinux-version.patch
@@ -0,0 +1,36 @@
+From 4b72f5e7704d480bac869f7a32ac891898bb994f Mon Sep 17 00:00:00 2001
+From: Rinku Kothiya <rkothiya@redhat.com>
+Date: Thu, 3 Sep 2020 14:56:27 +0000
+Subject: [PATCH 468/468] build: Update the glusterfs-selinux version
+
+Updated the glusterfs-selinux version according to
+the downstream official version.
+
+Label: DOWNSTREAM ONLY
+
+BUG: 1460657
+
+Change-Id: I7b8bbf53f71f6f56103042950d8910f0cb63a685
+Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/210685
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+---
+ glusterfs.spec.in | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/glusterfs.spec.in b/glusterfs.spec.in
+index ed6bdf3..30d7162 100644
+--- a/glusterfs.spec.in
++++ b/glusterfs.spec.in
+@@ -651,7 +651,7 @@ Requires:         %{name}%{?_isa} = %{version}-%{release}
+ Requires:         %{name}-cli%{?_isa} = %{version}-%{release}
+ Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
+ %if ( 0%{?fedora} && 0%{?fedora} >= 30  || ( 0%{?rhel} && 0%{?rhel} >= 8 ) )
+-Requires:         glusterfs-selinux >= 0.1.0-2
++Requires:         glusterfs-selinux >= 1.0-1
+ %endif
+ # some daemons (like quota) use a fuse-mount, glusterfsd is part of -fuse
+ Requires:         %{name}-fuse%{?_isa} = %{version}-%{release}
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0469-cluster-ec-Don-t-trigger-heal-for-stale-index.patch b/SOURCES/0469-cluster-ec-Don-t-trigger-heal-for-stale-index.patch
new file mode 100644
index 0000000..0fadfc9
--- /dev/null
+++ b/SOURCES/0469-cluster-ec-Don-t-trigger-heal-for-stale-index.patch
@@ -0,0 +1,33 @@
+From 6fed6cfcb26e6ed3c9640c5f889629315bbd83c2 Mon Sep 17 00:00:00 2001
+From: Pranith Kumar K <pkarampu@redhat.com>
+Date: Mon, 31 Aug 2020 12:22:05 +0530
+Subject: [PATCH 469/469] cluster/ec: Don't trigger heal for stale index
+
+     > Fixes: #1385
+     > Upstream-patch: https://review.gluster.org/c/glusterfs/+/24930
+
+BUG: 1785714
+Change-Id: I3609dd2e1f63c4bd6a19d528b935bf5b05443824
+Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/210731
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/cluster/ec/src/ec-heal.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c
+index 06bafa5..f6376cd 100644
+--- a/xlators/cluster/ec/src/ec-heal.c
++++ b/xlators/cluster/ec/src/ec-heal.c
+@@ -2624,6 +2624,7 @@ ec_heal_do(xlator_t *this, void *data, loc_t *loc, int32_t partial)
+              * removed. We need not take lock on this entry to do so as
+              * xattrop on a brick is atomic. */
+             ec_heal_purge_stale_index(frame, ec, loc->inode);
++            goto out;
+         } else if (need_heal == EC_HEAL_NONEED) {
+             gf_msg(ec->xl->name, GF_LOG_DEBUG, 0, EC_MSG_HEAL_FAIL,
+                    "Heal is not required for : %s ", uuid_utoa(loc->gfid));
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0470-extras-snap_scheduler-changes-in-gluster-shared-stor.patch b/SOURCES/0470-extras-snap_scheduler-changes-in-gluster-shared-stor.patch
new file mode 100644
index 0000000..e26d46a
--- /dev/null
+++ b/SOURCES/0470-extras-snap_scheduler-changes-in-gluster-shared-stor.patch
@@ -0,0 +1,63 @@
+From 8e427716f4e2855093b1a1a0e3a9ec79ebac7faf Mon Sep 17 00:00:00 2001
+From: Shwetha K Acharya <sacharya@redhat.com>
+Date: Thu, 10 Sep 2020 13:49:09 +0530
+Subject: [PATCH 470/473] extras/snap_scheduler: changes in
+ gluster-shared-storage mount path
+
+The patch https://review.gluster.org/#/c/glusterfs/+/24934/, changes mount point
+of gluster_shared_storage from /var/run to /run to address the issue of symlink
+at mount path in fstab.
+NOTE: mount point /var/run is symlink to /run
+
+The required changes with respect to gluster_shared_storage mount path are
+introduced with this patch in snap_scheduler.
+
+>Fixes: #1476
+>Change-Id: I9ce88c2f624c6aa5982de04edfee2d0a9f160d62
+>Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
+
+backport of https://review.gluster.org/#/c/glusterfs/+/24971/
+BUG: 1873469
+Change-Id: I9ce88c2f624c6aa5982de04edfee2d0a9f160d62
+Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/211391
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ extras/snap_scheduler/gcron.py          | 4 ++--
+ extras/snap_scheduler/snap_scheduler.py | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/extras/snap_scheduler/gcron.py b/extras/snap_scheduler/gcron.py
+index cc16310..0e4df77 100755
+--- a/extras/snap_scheduler/gcron.py
++++ b/extras/snap_scheduler/gcron.py
+@@ -19,10 +19,10 @@ import logging.handlers
+ import fcntl
+ 
+ 
+-GCRON_TASKS = "/var/run/gluster/shared_storage/snaps/glusterfs_snap_cron_tasks"
++GCRON_TASKS = "/run/gluster/shared_storage/snaps/glusterfs_snap_cron_tasks"
+ GCRON_CROND_TASK = "/etc/cron.d/glusterfs_snap_cron_tasks"
+ GCRON_RELOAD_FLAG = "/var/run/gluster/crond_task_reload_flag"
+-LOCK_FILE_DIR = "/var/run/gluster/shared_storage/snaps/lock_files/"
++LOCK_FILE_DIR = "/run/gluster/shared_storage/snaps/lock_files/"
+ log = logging.getLogger("gcron-logger")
+ start_time = 0.0
+ 
+diff --git a/extras/snap_scheduler/snap_scheduler.py b/extras/snap_scheduler/snap_scheduler.py
+index 5a29d41..e8fcc44 100755
+--- a/extras/snap_scheduler/snap_scheduler.py
++++ b/extras/snap_scheduler/snap_scheduler.py
+@@ -67,7 +67,7 @@ except ImportError:
+ SCRIPT_NAME = "snap_scheduler"
+ scheduler_enabled = False
+ log = logging.getLogger(SCRIPT_NAME)
+-SHARED_STORAGE_DIR="/var/run/gluster/shared_storage"
++SHARED_STORAGE_DIR="/run/gluster/shared_storage"
+ GCRON_DISABLED = SHARED_STORAGE_DIR+"/snaps/gcron_disabled"
+ GCRON_ENABLED = SHARED_STORAGE_DIR+"/snaps/gcron_enabled"
+ GCRON_TASKS = SHARED_STORAGE_DIR+"/snaps/glusterfs_snap_cron_tasks"
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0471-nfs-ganesha-gluster_shared_storage-fails-to-automoun.patch b/SOURCES/0471-nfs-ganesha-gluster_shared_storage-fails-to-automoun.patch
new file mode 100644
index 0000000..0ebba37
--- /dev/null
+++ b/SOURCES/0471-nfs-ganesha-gluster_shared_storage-fails-to-automoun.patch
@@ -0,0 +1,73 @@
+From d23ad767281af85cf07f5c3f63de482d40ee1953 Mon Sep 17 00:00:00 2001
+From: Shwetha K Acharya <sacharya@redhat.com>
+Date: Thu, 10 Sep 2020 13:16:12 +0530
+Subject: [PATCH 471/473] nfs-ganesha: gluster_shared_storage fails to
+ automount on node reboot on rhel 8
+
+The patch https://review.gluster.org/#/c/glusterfs/+/24934/, changes mount point
+of gluster_shared_storage from /var/run to /run to address the issue of symlink
+at mount path in fstab.
+NOTE: mount point /var/run is symlink to /run
+
+The required changes with respect to gluster_shared_storage mount path are
+introduced with this patch in nfs-ganesha.
+
+>Fixes: #1475
+>Change-Id: I9c7677a053e1291f71476d47ba6fa2e729f59625
+>Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
+
+backport of https://review.gluster.org/#/c/glusterfs/+/24970/
+BUG: 1873469
+Change-Id: I9c7677a053e1291f71476d47ba6fa2e729f59625
+Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/211392
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ extras/ganesha/ocf/ganesha_nfsd                    | 2 +-
+ extras/ganesha/scripts/ganesha-ha.sh               | 2 +-
+ extras/hook-scripts/start/post/S31ganesha-start.sh | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/extras/ganesha/ocf/ganesha_nfsd b/extras/ganesha/ocf/ganesha_nfsd
+index 93fc8be..f91e8b6 100644
+--- a/extras/ganesha/ocf/ganesha_nfsd
++++ b/extras/ganesha/ocf/ganesha_nfsd
+@@ -36,7 +36,7 @@ else
+ 	. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+ fi
+ 
+-OCF_RESKEY_ha_vol_mnt_default="/var/run/gluster/shared_storage"
++OCF_RESKEY_ha_vol_mnt_default="/run/gluster/shared_storage"
+ : ${OCF_RESKEY_ha_vol_mnt=${OCF_RESKEY_ha_vol_mnt_default}}
+ 
+ ganesha_meta_data() {
+diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
+index a6814b1..9790a71 100644
+--- a/extras/ganesha/scripts/ganesha-ha.sh
++++ b/extras/ganesha/scripts/ganesha-ha.sh
+@@ -24,7 +24,7 @@ GANESHA_HA_SH=$(realpath $0)
+ HA_NUM_SERVERS=0
+ HA_SERVERS=""
+ HA_VOL_NAME="gluster_shared_storage"
+-HA_VOL_MNT="/var/run/gluster/shared_storage"
++HA_VOL_MNT="/run/gluster/shared_storage"
+ HA_CONFDIR=$HA_VOL_MNT"/nfs-ganesha"
+ SERVICE_MAN="DISTRO_NOT_FOUND"
+ 
+diff --git a/extras/hook-scripts/start/post/S31ganesha-start.sh b/extras/hook-scripts/start/post/S31ganesha-start.sh
+index 90ba6bc..7ad6f23 100755
+--- a/extras/hook-scripts/start/post/S31ganesha-start.sh
++++ b/extras/hook-scripts/start/post/S31ganesha-start.sh
+@@ -4,7 +4,7 @@ OPTSPEC="volname:,gd-workdir:"
+ VOL=
+ declare -i EXPORT_ID
+ ganesha_key="ganesha.enable"
+-GANESHA_DIR="/var/run/gluster/shared_storage/nfs-ganesha"
++GANESHA_DIR="/run/gluster/shared_storage/nfs-ganesha"
+ CONF1="$GANESHA_DIR/ganesha.conf"
+ GLUSTERD_WORKDIR=
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0472-geo-rep-gluster_shared_storage-fails-to-automount-on.patch b/SOURCES/0472-geo-rep-gluster_shared_storage-fails-to-automount-on.patch
new file mode 100644
index 0000000..79d4d0e
--- /dev/null
+++ b/SOURCES/0472-geo-rep-gluster_shared_storage-fails-to-automount-on.patch
@@ -0,0 +1,98 @@
+From ccd45222c46b91b4d0cd57db9ea8b1515c97ada0 Mon Sep 17 00:00:00 2001
+From: Shwetha K Acharya <sacharya@redhat.com>
+Date: Mon, 31 Aug 2020 20:08:39 +0530
+Subject: [PATCH 472/473] geo-rep: gluster_shared_storage fails to automount on
+ node reboot on rhel 8.
+
+Issue: On reboot, all the mounts get wiped out.
+       Only the mounts mentioned in /etc/fstab  automatically gets mounted
+       during boot/reboot.
+
+       But /etc/fstab complains on not getting a canonical path
+       (it gets path containing a symlink)
+       This is because the gluster_shared_storage, is mounted to
+       /var/run which is symlink to /run. This is a general practice
+       followed by most operating systems.
+
+       [root@ ~]# ls -lsah /var/run
+       0 lrwxrwxrwx. 1 root root 6 Jul 22 19:39 /var/run -> ../run
+
+Fix:   Mount gluster_shared_storage on /run.
+       (Also It is seen that /var/run is mostly
+       used by old or legacy systems, thus it is a good practice to
+       update /var/run to /run)
+
+>fixes: #1459
+>Change-Id: I8c16653be8cd746c84f01abf0eea19284fb97c77
+>Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
+
+backport of https://review.gluster.org/#/c/glusterfs/+/24934/
+BUG: 1873469
+Change-Id: I8c16653be8cd746c84f01abf0eea19284fb97c77
+Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/211387
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ .../set/post/S32gluster_enable_shared_storage.sh       | 18 +++++++++---------
+ geo-replication/gsyncd.conf.in                         |  2 +-
+ 2 files changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
+index 885ed03..3bae37c 100755
+--- a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
++++ b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
+@@ -79,9 +79,9 @@ done
+ 
+ if [ "$option" == "disable" ]; then
+     # Unmount the volume on all the nodes
+-    umount /var/run/gluster/shared_storage
+-    cat /etc/fstab  | grep -v "gluster_shared_storage /var/run/gluster/shared_storage/" > /var/run/gluster/fstab.tmp
+-    mv /var/run/gluster/fstab.tmp /etc/fstab
++    umount /run/gluster/shared_storage
++    cat /etc/fstab  | grep -v "gluster_shared_storage /run/gluster/shared_storage/" > /run/gluster/fstab.tmp
++    mv /run/gluster/fstab.tmp /etc/fstab
+ fi
+ 
+ if [ "$is_originator" == 1 ]; then
+@@ -105,7 +105,7 @@ function check_volume_status()
+ }
+ 
+ mount_cmd="mount -t glusterfs $local_node_hostname:/gluster_shared_storage \
+-           /var/run/gluster/shared_storage"
++           /run/gluster/shared_storage"
+ 
+ if [ "$option" == "enable" ]; then
+     retry=0;
+@@ -120,10 +120,10 @@ if [ "$option" == "enable" ]; then
+         status=$(check_volume_status)
+     done
+     # Mount the volume on all the nodes
+-    umount /var/run/gluster/shared_storage
+-    mkdir -p /var/run/gluster/shared_storage
++    umount /run/gluster/shared_storage
++    mkdir -p /run/gluster/shared_storage
+     $mount_cmd
+-    cp /etc/fstab /var/run/gluster/fstab.tmp
+-    echo "$local_node_hostname:/gluster_shared_storage /var/run/gluster/shared_storage/ glusterfs defaults        0 0" >> /var/run/gluster/fstab.tmp
+-    mv /var/run/gluster/fstab.tmp /etc/fstab
++    cp /etc/fstab /run/gluster/fstab.tmp
++    echo "$local_node_hostname:/gluster_shared_storage /run/gluster/shared_storage/ glusterfs defaults        0 0" >> /run/gluster/fstab.tmp
++    mv /run/gluster/fstab.tmp /etc/fstab
+ fi
+diff --git a/geo-replication/gsyncd.conf.in b/geo-replication/gsyncd.conf.in
+index 11e57fd..9688c79 100644
+--- a/geo-replication/gsyncd.conf.in
++++ b/geo-replication/gsyncd.conf.in
+@@ -123,7 +123,7 @@ type=bool
+ help=Use this to set Active Passive mode to meta-volume.
+ 
+ [meta-volume-mnt]
+-value=/var/run/gluster/shared_storage
++value=/run/gluster/shared_storage
+ help=Meta Volume or Shared Volume mount path
+ 
+ [allow-network]
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0473-glusterd-Fix-Add-brick-with-increasing-replica-count.patch b/SOURCES/0473-glusterd-Fix-Add-brick-with-increasing-replica-count.patch
new file mode 100644
index 0000000..0629fa7
--- /dev/null
+++ b/SOURCES/0473-glusterd-Fix-Add-brick-with-increasing-replica-count.patch
@@ -0,0 +1,75 @@
+From 80f1b3aedcde02ae25b341519857ba9a5b2fa722 Mon Sep 17 00:00:00 2001
+From: Sheetal Pamecha <spamecha@redhat.com>
+Date: Thu, 24 Sep 2020 19:43:29 +0530
+Subject: [PATCH 473/473] glusterd: Fix Add-brick with increasing replica count
+ failure
+
+Problem: add-brick operation fails with multiple bricks on same
+server error when replica count is increased.
+
+This was happening because of extra runs in a loop to compare
+hostnames and if bricks supplied were less than "replica" count,
+the bricks will get compared to itself resulting in above error.
+
+>Upstream-patch: https://review.gluster.org/#/c/glusterfs/+/25029
+>Fixes: #1508
+
+BUG: 1881823
+Change-Id: I8668e964340b7bf59728bb838525d2db062197ed
+Signed-off-by: Sheetal Pamecha <spamecha@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/213064
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/bugs/glusterd/brick-order-check-add-brick.t | 21 +++++++++++++++++++++
+ xlators/mgmt/glusterd/src/glusterd-utils.c        |  4 ++++
+ 2 files changed, 25 insertions(+)
+
+diff --git a/tests/bugs/glusterd/brick-order-check-add-brick.t b/tests/bugs/glusterd/brick-order-check-add-brick.t
+index 29f0ed1..0be31da 100644
+--- a/tests/bugs/glusterd/brick-order-check-add-brick.t
++++ b/tests/bugs/glusterd/brick-order-check-add-brick.t
+@@ -37,4 +37,25 @@ EXPECT '3 x 3 = 9' volinfo_field $V0 'Number of Bricks'
+ TEST $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_3 $H1:$L1/${V0}_4 $H1:$L1/${V0}_5 force
+ EXPECT '4 x 3 = 12' volinfo_field $V0 'Number of Bricks'
+ 
++TEST $CLI_1 volume stop $V0
++TEST $CLI_1 volume delete $V0
++
++TEST $CLI_1 volume create $V0 replica 2 $H1:$L1/${V0}1 $H2:$L2/${V0}1
++EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks'
++EXPECT 'Created' volinfo_field $V0 'Status'
++
++TEST $CLI_1 volume start $V0
++EXPECT 'Started' volinfo_field $V0 'Status'
++
++#Add-brick with Increasing replica count
++TEST $CLI_1 volume add-brick $V0 replica 3 $H3:$L3/${V0}1
++EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
++
++#Add-brick with Increasing replica count from same host should fail
++TEST ! $CLI_1 volume add-brick $V0 replica 5 $H1:$L1/${V0}2 $H1:$L1/${V0}3
++
++#adding multiple bricks from same host should fail the brick order check
++TEST ! $CLI_1 volume add-brick $V0 replica 3 $H1:$L1/${V0}{4..6} $H2:$L2/${V0}{7..9}
++EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
++
+ cleanup
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index 545e688..d25fc8a 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -14908,6 +14908,10 @@ glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type,
+     i = 0;
+     ai_list_tmp1 = cds_list_entry(ai_list->list.next, addrinfo_list_t, list);
+ 
++    if (brick_count < sub_count) {
++        sub_count = brick_count;
++    }
++
+     /* Check for bad brick order */
+     while (i < brick_count) {
+         ++i;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0474-features-locks-posixlk-clear-lock-should-set-error-a.patch b/SOURCES/0474-features-locks-posixlk-clear-lock-should-set-error-a.patch
new file mode 100644
index 0000000..034a2a2
--- /dev/null
+++ b/SOURCES/0474-features-locks-posixlk-clear-lock-should-set-error-a.patch
@@ -0,0 +1,49 @@
+From 3612b3a46c33d19bb7d4aee6eb6625d8d903d459 Mon Sep 17 00:00:00 2001
+From: Pranith Kumar K <pkarampu@redhat.com>
+Date: Wed, 17 Jun 2020 10:44:37 +0530
+Subject: [PATCH 474/478] features/locks: posixlk-clear-lock should set error
+ as EINTR
+
+Problem:
+fuse on receiving interrupt for setlk sends clear-lock "fop"
+using virtual-getxattr. At the moment blocked locks which are
+cleared return EAGAIN errno as opposed to EINTR errno
+
+Fix:
+Return EINTR errno.
+
+Upstream:
+> Reviewed-on: https://review.gluster.org/24587
+> Updates: #1310
+> Change-Id: I47de0fcaec370b267f2f5f89deeb37e1b9c0ee9b
+> Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
+
+BUG: 1821743
+Change-Id: Id8301ce6e21c009949e88db5904d8b6ecc278f66
+Signed-off-by: Csaba Henk <csaba@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/216157
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/features/locks/src/clear.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/xlators/features/locks/src/clear.c b/xlators/features/locks/src/clear.c
+index 116aed6..ab1eac6 100644
+--- a/xlators/features/locks/src/clear.c
++++ b/xlators/features/locks/src/clear.c
+@@ -181,9 +181,9 @@ clrlk_clear_posixlk(xlator_t *this, pl_inode_t *pl_inode, clrlk_args *args,
+             if (plock->blocked) {
+                 bcount++;
+                 pl_trace_out(this, plock->frame, NULL, NULL, F_SETLKW,
+-                             &plock->user_flock, -1, EAGAIN, NULL);
++                             &plock->user_flock, -1, EINTR, NULL);
+ 
+-                STACK_UNWIND_STRICT(lk, plock->frame, -1, EAGAIN,
++                STACK_UNWIND_STRICT(lk, plock->frame, -1, EINTR,
+                                     &plock->user_flock, NULL);
+ 
+             } else {
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0475-fuse-lock-interrupt-fix-flock_interrupt.t.patch b/SOURCES/0475-fuse-lock-interrupt-fix-flock_interrupt.t.patch
new file mode 100644
index 0000000..24a62b3
--- /dev/null
+++ b/SOURCES/0475-fuse-lock-interrupt-fix-flock_interrupt.t.patch
@@ -0,0 +1,46 @@
+From 47d8c316f622850d060af90d1d939528ace5607a Mon Sep 17 00:00:00 2001
+From: Csaba Henk <csaba@redhat.com>
+Date: Thu, 14 Feb 2019 02:01:38 +0100
+Subject: [PATCH 475/478] fuse lock interrupt: fix flock_interrupt.t
+
+Upstream:
+> Reviewed-on: https://review.gluster.org/22213
+> updates: bz#1193929
+> Change-Id: I347de62755100cd69e3cf341434767ae23fd1ba4
+> Signed-off-by: Csaba Henk <csaba@redhat.com>
+
+BUG: 1821743
+Change-Id: I0088f804bca215152e7ca2c490402c11f7b5333a
+Signed-off-by: Csaba Henk <csaba@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/216158
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/features/flock_interrupt.t | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/tests/features/flock_interrupt.t b/tests/features/flock_interrupt.t
+index 8603b65..964a4bc 100644
+--- a/tests/features/flock_interrupt.t
++++ b/tests/features/flock_interrupt.t
+@@ -22,12 +22,12 @@ EXPECT 'Started' volinfo_field $V0 'Status';
+ TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+ TEST touch $M0/testfile;
+ 
+-function flock_interrupt {
+-        flock $MO/testfile sleep 3 & flock -w 1 $M0/testfile true;
+-        echo ok;
+-}
++echo > got_lock
++flock $M0/testfile sleep 6 & { sleep 0.3; flock -w 2 $M0/testfile true; echo ok > got_lock; } &
+ 
+-EXPECT_WITHIN 2 ok flock_interrupt;
++EXPECT_WITHIN 4 ok cat got_lock;
+ 
+ ## Finish up
++sleep 7;
++rm -f got_lock;
+ cleanup;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0476-mount-fuse-use-cookies-to-get-fuse-interrupt-record-.patch b/SOURCES/0476-mount-fuse-use-cookies-to-get-fuse-interrupt-record-.patch
new file mode 100644
index 0000000..6c9d736
--- /dev/null
+++ b/SOURCES/0476-mount-fuse-use-cookies-to-get-fuse-interrupt-record-.patch
@@ -0,0 +1,114 @@
+From 40519185067d891f06818c574301ea1af4b36479 Mon Sep 17 00:00:00 2001
+From: Pranith Kumar K <pkarampu@redhat.com>
+Date: Wed, 17 Jun 2020 10:45:19 +0530
+Subject: [PATCH 476/478] mount/fuse: use cookies to get fuse-interrupt-record
+ instead of xdata
+
+Problem:
+On executing tests/features/flock_interrupt.t the following error log
+appears
+[2020-06-16 11:51:54.631072 +0000] E
+[fuse-bridge.c:4791:fuse_setlk_interrupt_handler_cbk] 0-glusterfs-fuse:
+interrupt record not found
+
+This happens because fuse-interrupt-record is never sent on the wire by
+getxattr fop and there is no guarantee that in the cbk it will be
+available in case of failures.
+
+Fix:
+wind getxattr fop with fuse-interrupt-record as cookie and recover it
+in the cbk
+
+Upstream:
+> Reviewed-on: https://review.gluster.org/24588
+> Fixes: #1310
+> Change-Id: I4cfff154321a449114fc26e9440db0f08e5c7daa
+> Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
+
+BUG: 1821743
+Change-Id: If9576801654d4d743bd66ae90ca259c4d34746a7
+Signed-off-by: Csaba Henk <csaba@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/216159
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/features/flock_interrupt.t     |  1 -
+ xlators/mount/fuse/src/fuse-bridge.c | 28 +++++++---------------------
+ 2 files changed, 7 insertions(+), 22 deletions(-)
+
+diff --git a/tests/features/flock_interrupt.t b/tests/features/flock_interrupt.t
+index 964a4bc..b8717e3 100644
+--- a/tests/features/flock_interrupt.t
++++ b/tests/features/flock_interrupt.t
+@@ -28,6 +28,5 @@ flock $M0/testfile sleep 6 & { sleep 0.3; flock -w 2 $M0/testfile true; echo ok
+ EXPECT_WITHIN 4 ok cat got_lock;
+ 
+ ## Finish up
+-sleep 7;
+ rm -f got_lock;
+ cleanup;
+diff --git a/xlators/mount/fuse/src/fuse-bridge.c b/xlators/mount/fuse/src/fuse-bridge.c
+index f61fa39..1bddac2 100644
+--- a/xlators/mount/fuse/src/fuse-bridge.c
++++ b/xlators/mount/fuse/src/fuse-bridge.c
+@@ -4768,16 +4768,8 @@ fuse_setlk_interrupt_handler_cbk(call_frame_t *frame, void *cookie,
+                                  int32_t op_errno, dict_t *dict, dict_t *xdata)
+ {
+     fuse_interrupt_state_t intstat = INTERRUPT_NONE;
+-    fuse_interrupt_record_t *fir;
++    fuse_interrupt_record_t *fir = cookie;
+     fuse_state_t *state = NULL;
+-    int ret = 0;
+-
+-    ret = dict_get_bin(xdata, "fuse-interrupt-record", (void **)&fir);
+-    if (ret < 0) {
+-        gf_log("glusterfs-fuse", GF_LOG_ERROR, "interrupt record not found");
+-
+-        goto out;
+-    }
+ 
+     intstat = op_ret >= 0 ? INTERRUPT_HANDLED : INTERRUPT_SQUELCHED;
+ 
+@@ -4789,7 +4781,6 @@ fuse_setlk_interrupt_handler_cbk(call_frame_t *frame, void *cookie,
+         GF_FREE(state);
+     }
+ 
+-out:
+     STACK_DESTROY(frame->root);
+ 
+     return 0;
+@@ -4827,9 +4818,10 @@ fuse_setlk_interrupt_handler(xlator_t *this, fuse_interrupt_record_t *fir)
+     frame->op = GF_FOP_GETXATTR;
+     state->name = xattr_name;
+ 
+-    STACK_WIND(frame, fuse_setlk_interrupt_handler_cbk, state->active_subvol,
+-               state->active_subvol->fops->fgetxattr, state->fd, xattr_name,
+-               state->xdata);
++    STACK_WIND_COOKIE(frame, fuse_setlk_interrupt_handler_cbk, fir,
++                      state->active_subvol,
++                      state->active_subvol->fops->fgetxattr, state->fd,
++                      xattr_name, state->xdata);
+ 
+     return;
+ 
+@@ -4852,15 +4844,9 @@ fuse_setlk_resume(fuse_state_t *state)
+     fir = fuse_interrupt_record_new(state->finh, fuse_setlk_interrupt_handler);
+     state_clone = gf_memdup(state, sizeof(*state));
+     if (state_clone) {
+-        /*
+-         * Calling this allocator with fir casted to (char *) seems like
+-         * an abuse of this API, but in fact the API is stupid to assume
+-         * a (char *) argument (in the funcion it's casted to (void *)
+-         * anyway).
+-         */
+-        state_clone->xdata = dict_for_key_value(
+-            "fuse-interrupt-record", (char *)fir, sizeof(*fir), _gf_true);
++        state_clone->xdata = dict_new();
+     }
++
+     if (!fir || !state_clone || !state_clone->xdata) {
+         if (fir) {
+             GF_FREE(fir);
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0477-glusterd-snapshot-Snapshot-prevalidation-failure-not.patch b/SOURCES/0477-glusterd-snapshot-Snapshot-prevalidation-failure-not.patch
new file mode 100644
index 0000000..c604ccd
--- /dev/null
+++ b/SOURCES/0477-glusterd-snapshot-Snapshot-prevalidation-failure-not.patch
@@ -0,0 +1,51 @@
+From 3d50207b346cb5d95af94aa010ebd1ec3e795554 Mon Sep 17 00:00:00 2001
+From: srijan-sivakumar <ssivakum@redhat.com>
+Date: Wed, 4 Nov 2020 11:44:51 +0530
+Subject: [PATCH 477/478] glusterd/snapshot: Snapshot prevalidation failure not
+ failing.
+
+The value of `ret` is to be set to `-1` to indicate failure
+or else the prevalidation which is supposed to be a failure
+as the snapshot isn't even activated for cloning will move
+to next stage.
+
+Label: DOWNSTREAM ONLY
+BUG: 1837926
+
+Change-Id: I95122c3a261332630efa00033a1892a8f95fc00b
+Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/216920
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Shwetha Acharya <sacharya@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mgmt/glusterd/src/glusterd-snapshot.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+index 5b8ae97..ee3cea0 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
++++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+@@ -2298,8 +2298,8 @@ glusterd_snapshot_clone_prevalidate(dict_t *dict, char **op_errstr,
+         goto out;
+     }
+ 
+-
+     if (!glusterd_is_volume_started(snap_vol)) {
++        ret = -1;
+         snprintf(err_str, sizeof(err_str),
+                  "Snapshot %s is "
+                  "not activated",
+@@ -9361,7 +9361,8 @@ glusterd_handle_snapshot_fn(rpcsvc_request_t *req)
+                  "for a snapshot");
+         op_errno = EG_OPNOTSUP;
+         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNSUPPORTED_VERSION,
+-               "%s (%d < %d)", err_str, conf->op_version, GD_OP_VERSION_RHS_3_0);
++               "%s (%d < %d)", err_str, conf->op_version,
++               GD_OP_VERSION_RHS_3_0);
+         ret = -1;
+         goto out;
+     }
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0478-DHT-Fixing-rebalance-failure-on-issuing-stop-command.patch b/SOURCES/0478-DHT-Fixing-rebalance-failure-on-issuing-stop-command.patch
new file mode 100644
index 0000000..596fe2b
--- /dev/null
+++ b/SOURCES/0478-DHT-Fixing-rebalance-failure-on-issuing-stop-command.patch
@@ -0,0 +1,119 @@
+From e772bef5631017145cd0270d72a9ada1378e022a Mon Sep 17 00:00:00 2001
+From: Barak Sason Rofman <bsasonro@redhat.com>
+Date: Fri, 30 Oct 2020 08:27:47 +0200
+Subject: [PATCH 478/478] DHT - Fixing rebalance failure on issuing stop
+ command
+
+Issuing a stop command for an ongoing rebalance process results in an error.
+This issue was brought up in https://bugzilla.redhat.com/1286171 and a patch
+(https://review.gluster.org/24103/) was submitted to resolve the issue.
+
+However the submitted patch resolved only part of the
+problem by reducing the number of log messages that were printed (since
+rebalnace is currently a recursive process, an error message was printed
+for every directory) but didn't fully resolve the root cause for the
+failure.
+
+This patch fixes the issue by modifying the code-path which handles the
+termination of the rebalance process by issuing a stop command.
+
+Upstream:
+> Reviewed-on: https://github.com/gluster/glusterfs/pull/1628
+> fixes: #1627
+> Change-Id: I604f2b0f8b1ccb1026b8425a14200bbd1dc5bd03
+> Signed-off-by: Barak Sason Rofman bsasonro@redhat.com
+
+BUG: 1286171
+Change-Id: I604f2b0f8b1ccb1026b8425a14200bbd1dc5bd03
+Signed-off-by: Barak Sason Rofman <bsasonro@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/216896
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/cluster/dht/src/dht-rebalance.c | 22 ++++++++++++----------
+ 1 file changed, 12 insertions(+), 10 deletions(-)
+
+diff --git a/xlators/cluster/dht/src/dht-rebalance.c b/xlators/cluster/dht/src/dht-rebalance.c
+index abc10fc..d49a719 100644
+--- a/xlators/cluster/dht/src/dht-rebalance.c
++++ b/xlators/cluster/dht/src/dht-rebalance.c
+@@ -3113,12 +3113,10 @@ int static gf_defrag_get_entry(xlator_t *this, int i,
+     struct dht_container *tmp_container = NULL;
+ 
+     if (defrag->defrag_status != GF_DEFRAG_STATUS_STARTED) {
+-        ret = -1;
+         goto out;
+     }
+ 
+     if (dir_dfmeta->offset_var[i].readdir_done == 1) {
+-        ret = 0;
+         goto out;
+     }
+ 
+@@ -3135,7 +3133,6 @@ int static gf_defrag_get_entry(xlator_t *this, int i,
+                               &(dir_dfmeta->equeue[i]), xattr_req, NULL);
+         if (ret == 0) {
+             dir_dfmeta->offset_var[i].readdir_done = 1;
+-            ret = 0;
+             goto out;
+         }
+ 
+@@ -3161,7 +3158,6 @@ int static gf_defrag_get_entry(xlator_t *this, int i,
+ 
+     while (1) {
+         if (defrag->defrag_status != GF_DEFRAG_STATUS_STARTED) {
+-            ret = -1;
+             goto out;
+         }
+ 
+@@ -3273,12 +3269,14 @@ int static gf_defrag_get_entry(xlator_t *this, int i,
+     }
+ 
+ out:
+-    if (ret == 0) {
+-        *container = tmp_container;
+-    } else {
+-        if (tmp_container) {
++    if (defrag->defrag_status == GF_DEFRAG_STATUS_STARTED) {
++        if (ret == 0) {
++            *container = tmp_container;
++        } else {
+             gf_defrag_free_container(tmp_container);
+         }
++    } else {
++        gf_defrag_free_container(tmp_container);
+     }
+ 
+     return ret;
+@@ -3487,7 +3485,7 @@ gf_defrag_process_dir(xlator_t *this, gf_defrag_info_t *defrag, loc_t *loc,
+                                       migrate_data, dir_dfmeta, xattr_req,
+                                       &should_commit_hash, perrno);
+ 
+-            if (defrag->defrag_status == GF_DEFRAG_STATUS_STOPPED) {
++            if (defrag->defrag_status != GF_DEFRAG_STATUS_STARTED) {
+                 goto out;
+             }
+ 
+@@ -3947,7 +3945,7 @@ gf_defrag_fix_layout(xlator_t *this, gf_defrag_info_t *defrag, loc_t *loc,
+             ret = gf_defrag_fix_layout(this, defrag, &entry_loc, fix_layout,
+                                        migrate_data);
+ 
+-            if (defrag->defrag_status == GF_DEFRAG_STATUS_STOPPED) {
++            if (defrag->defrag_status != GF_DEFRAG_STATUS_STARTED) {
+                 goto out;
+             }
+ 
+@@ -4015,6 +4013,10 @@ gf_defrag_fix_layout(xlator_t *this, gf_defrag_info_t *defrag, loc_t *loc,
+         (defrag->cmd != GF_DEFRAG_CMD_START_LAYOUT_FIX)) {
+         ret = gf_defrag_process_dir(this, defrag, loc, migrate_data, &perrno);
+ 
++        if (defrag->defrag_status != GF_DEFRAG_STATUS_STARTED) {
++            goto out;
++        }
++
+         if (ret && (ret != 2)) {
+             if (perrno == ENOENT || perrno == ESTALE) {
+                 ret = 0;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0479-ganesha-ha-revised-regex-exprs-for-status.patch b/SOURCES/0479-ganesha-ha-revised-regex-exprs-for-status.patch
new file mode 100644
index 0000000..8bbdf9d
--- /dev/null
+++ b/SOURCES/0479-ganesha-ha-revised-regex-exprs-for-status.patch
@@ -0,0 +1,53 @@
+From 9036c9f0fd081c83c5c4fcd1ecba858421442777 Mon Sep 17 00:00:00 2001
+From: "Kaleb S. KEITHLEY" <kkeithle@redhat.com>
+Date: Tue, 10 Nov 2020 07:39:14 -0500
+Subject: [PATCH 479/479] ganesha-ha: revised regex exprs for --status
+
+better whitespace in regex
+
+This has worked for years, but somehow no longer works on rhel8
+
+> Updates: #1000
+> Change-Id: I2c1a3537573d125608334772ba1a263c55407dd4
+> Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
+> https://github.com/gluster/glusterfs/commit/4026fe9a956238d8e4785cf39c3b7290eae90f03
+
+BUG: 1895301
+Change-Id: I2c1a3537573d125608334772ba1a263c55407dd4
+Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/217480
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+---
+ extras/ganesha/scripts/ganesha-ha.sh | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
+index 9790a71..491c61d 100644
+--- a/extras/ganesha/scripts/ganesha-ha.sh
++++ b/extras/ganesha/scripts/ganesha-ha.sh
+@@ -948,18 +948,18 @@ status()
+     # check if the VIP and port block/unblock RAs are on the expected nodes
+     for n in ${nodes[*]}; do
+ 
+-        grep -E -x "${n}-nfs_block \(ocf::heartbeat:portblock\): Started ${n}" > /dev/null 2>&1 ${scratch}
++        grep -E -x "${n}-nfs_block +\(ocf::heartbeat:portblock\): +Started ${n}" > /dev/null 2>&1 ${scratch}
+         result=$?
+         ((healthy+=${result}))
+-        grep -E -x "${n}-cluster_ip-1 \(ocf::heartbeat:IPaddr\): Started ${n}" > /dev/null 2>&1 ${scratch}
++        grep -E -x "${n}-cluster_ip-1 +\(ocf::heartbeat:IPaddr\): +Started ${n}" > /dev/null 2>&1 ${scratch}
+         result=$?
+         ((healthy+=${result}))
+-        grep -E -x "${n}-nfs_unblock \(ocf::heartbeat:portblock\): Started ${n}" > /dev/null 2>&1 ${scratch}
++        grep -E -x "${n}-nfs_unblock +\(ocf::heartbeat:portblock\): +Started ${n}" > /dev/null 2>&1 ${scratch}
+         result=$?
+         ((healthy+=${result}))
+     done
+ 
+-    grep -E "\):\ Stopped|FAILED" > /dev/null 2>&1 ${scratch}
++    grep -E "\): +Stopped|FAILED" > /dev/null 2>&1 ${scratch}
+     result=$?
+ 
+     if [ ${result} -eq 0 ]; then
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0480-DHT-Rebalance-Ensure-Rebalance-reports-status-only-o.patch b/SOURCES/0480-DHT-Rebalance-Ensure-Rebalance-reports-status-only-o.patch
new file mode 100644
index 0000000..31c404f
--- /dev/null
+++ b/SOURCES/0480-DHT-Rebalance-Ensure-Rebalance-reports-status-only-o.patch
@@ -0,0 +1,255 @@
+From 759c12fc016a6399bb179aa0f930602c87d1e0f8 Mon Sep 17 00:00:00 2001
+From: Barak Sason Rofman <bsasonro@redhat.com>
+Date: Tue, 24 Nov 2020 12:56:10 +0200
+Subject: [PATCH 480/480] DHT/Rebalance - Ensure Rebalance reports status only
+ once upon stopping
+
+Upon issuing rebalance stop command, the status of rebalance is being
+logged twice to the log file, which can sometime result in an
+inconsistent reports (one report states status stopped, while the other
+may report something else).
+
+This fix ensures rebalance reports it's status only once and that the
+correct status is being reported.
+
+Upstream:
+> Reviewed-on: https://github.com/gluster/glusterfs/pull/1783
+> fixes: #1782
+> Change-Id: Id3206edfad33b3db60e9df8e95a519928dc7cb37
+> Signed-off-by: Barak Sason Rofman bsasonro@redhat.com
+
+BUG: 1286171
+Change-Id: Id3206edfad33b3db60e9df8e95a519928dc7cb37
+Signed-off-by: Barak Sason Rofman <bsasonro@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/218953
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Csaba Henk <chenk@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/bugs/distribute/bug-1286171.t     | 75 +++++++++++++++++++++++++++++++++
+ xlators/cluster/dht/src/dht-common.c    |  2 +-
+ xlators/cluster/dht/src/dht-common.h    |  2 +-
+ xlators/cluster/dht/src/dht-rebalance.c | 63 ++++++++++++++-------------
+ 4 files changed, 108 insertions(+), 34 deletions(-)
+ create mode 100644 tests/bugs/distribute/bug-1286171.t
+
+diff --git a/tests/bugs/distribute/bug-1286171.t b/tests/bugs/distribute/bug-1286171.t
+new file mode 100644
+index 0000000..a2ca36f
+--- /dev/null
++++ b/tests/bugs/distribute/bug-1286171.t
+@@ -0,0 +1,75 @@
++#!/bin/bash
++
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../cluster.rc
++. $(dirname $0)/../../volume.rc
++
++# Initialize
++#------------------------------------------------------------
++cleanup;
++
++volname=bug-1286171
++
++# Start glusterd
++TEST glusterd;
++TEST pidof glusterd;
++TEST $CLI volume info;
++
++# Create a volume
++TEST $CLI volume create $volname $H0:$B0/${volname}{1,2}
++
++# Verify volume creation
++EXPECT "$volname" volinfo_field $volname 'Volume Name';
++EXPECT 'Created' volinfo_field $volname 'Status';
++
++# Start volume and verify successful start
++TEST $CLI volume start $volname;
++EXPECT 'Started' volinfo_field $volname 'Status';
++TEST glusterfs --volfile-id=$volname --volfile-server=$H0 --entry-timeout=0 $M0;
++#------------------------------------------------------------
++
++# Create a nested dir structure and some file under MP
++cd $M0;
++for i in {1..5}
++do
++	mkdir dir$i
++	cd dir$i
++	for j in {1..5}
++	do
++		mkdir dir$i$j
++		cd dir$i$j
++		for k in {1..5}
++		do
++			mkdir dir$i$j$k
++			cd dir$i$j$k
++			touch {1..300}
++			cd ..
++		done
++		touch {1..300}
++		cd ..
++	done
++	touch {1..300}
++	cd ..
++done
++touch {1..300}
++
++# Add-brick and start rebalance
++TEST $CLI volume add-brick $volname $H0:$B0/${volname}4;
++TEST $CLI volume rebalance $volname start;
++
++# Let rebalance run for a while
++sleep 5
++
++# Stop rebalance
++TEST $CLI volume rebalance $volname stop;
++
++# Allow rebalance to stop
++sleep 5
++
++# Examine the logfile for errors
++cd /var/log/glusterfs;
++failures=`grep "failures:" ${volname}-rebalance.log | tail -1 | sed 's/.*failures: //; s/,.*//'`;
++
++TEST [ $failures == 0 ];
++
++cleanup;
+diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c
+index 23cc80c..4db89df 100644
+--- a/xlators/cluster/dht/src/dht-common.c
++++ b/xlators/cluster/dht/src/dht-common.c
+@@ -10969,7 +10969,7 @@ dht_notify(xlator_t *this, int event, void *data, ...)
+                 if ((cmd == GF_DEFRAG_CMD_STATUS) ||
+                     (cmd == GF_DEFRAG_CMD_STATUS_TIER) ||
+                     (cmd == GF_DEFRAG_CMD_DETACH_STATUS))
+-                    gf_defrag_status_get(conf, output);
++                	gf_defrag_status_get(conf, output, _gf_false);
+                 else if (cmd == GF_DEFRAG_CMD_START_DETACH_TIER)
+                     gf_defrag_start_detach_tier(defrag);
+                 else if (cmd == GF_DEFRAG_CMD_DETACH_START)
+diff --git a/xlators/cluster/dht/src/dht-common.h b/xlators/cluster/dht/src/dht-common.h
+index 9ec5b51..92f1b89 100644
+--- a/xlators/cluster/dht/src/dht-common.h
++++ b/xlators/cluster/dht/src/dht-common.h
+@@ -1252,7 +1252,7 @@ dht_fxattrop_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+                  int32_t op_ret, int32_t op_errno, dict_t *dict, dict_t *xdata);
+ 
+ int
+-gf_defrag_status_get(dht_conf_t *conf, dict_t *dict);
++gf_defrag_status_get(dht_conf_t *conf, dict_t *dict, gf_boolean_t log_status);
+ 
+ void
+ gf_defrag_set_pause_state(gf_tier_conf_t *tier_conf, tier_pause_state_t state);
+diff --git a/xlators/cluster/dht/src/dht-rebalance.c b/xlators/cluster/dht/src/dht-rebalance.c
+index d49a719..16ac16c 100644
+--- a/xlators/cluster/dht/src/dht-rebalance.c
++++ b/xlators/cluster/dht/src/dht-rebalance.c
+@@ -2720,7 +2720,6 @@ gf_defrag_migrate_single_file(void *opaque)
+     iatt_ptr = &entry->d_stat;
+ 
+     if (defrag->defrag_status != GF_DEFRAG_STATUS_STARTED) {
+-        ret = -1;
+         goto out;
+     }
+ 
+@@ -3833,7 +3832,6 @@ gf_defrag_fix_layout(xlator_t *this, gf_defrag_info_t *defrag, loc_t *loc,
+         list_for_each_entry_safe(entry, tmp, &entries.list, list)
+         {
+             if (defrag->defrag_status != GF_DEFRAG_STATUS_STARTED) {
+-                ret = 1;
+                 goto out;
+             }
+ 
+@@ -4863,7 +4861,7 @@ out:
+     LOCK(&defrag->lock);
+     {
+         status = dict_new();
+-        gf_defrag_status_get(conf, status);
++        gf_defrag_status_get(conf, status, _gf_true);
+         if (ctx && ctx->notify)
+             ctx->notify(GF_EN_DEFRAG_STATUS, status);
+         if (status)
+@@ -4998,7 +4996,7 @@ out:
+ }
+ 
+ int
+-gf_defrag_status_get(dht_conf_t *conf, dict_t *dict)
++gf_defrag_status_get(dht_conf_t *conf, dict_t *dict, gf_boolean_t log_status)
+ {
+     int ret = 0;
+     uint64_t files = 0;
+@@ -5095,34 +5093,35 @@ gf_defrag_status_get(dht_conf_t *conf, dict_t *dict)
+         gf_log(THIS->name, GF_LOG_WARNING, "failed to set time-left");
+ 
+ log:
+-    switch (defrag->defrag_status) {
+-        case GF_DEFRAG_STATUS_NOT_STARTED:
+-            status = "not started";
+-            break;
+-        case GF_DEFRAG_STATUS_STARTED:
+-            status = "in progress";
+-            break;
+-        case GF_DEFRAG_STATUS_STOPPED:
+-            status = "stopped";
+-            break;
+-        case GF_DEFRAG_STATUS_COMPLETE:
+-            status = "completed";
+-            break;
+-        case GF_DEFRAG_STATUS_FAILED:
+-            status = "failed";
+-            break;
+-        default:
+-            break;
+-    }
++    if (log_status) {
++        switch (defrag->defrag_status) {
++            case GF_DEFRAG_STATUS_NOT_STARTED:
++                status = "not started";
++                break;
++            case GF_DEFRAG_STATUS_STARTED:
++                status = "in progress";
++                break;
++            case GF_DEFRAG_STATUS_STOPPED:
++                status = "stopped";
++                break;
++            case GF_DEFRAG_STATUS_COMPLETE:
++                status = "completed";
++                break;
++            case GF_DEFRAG_STATUS_FAILED:
++                status = "failed";
++                break;
++            default:
++                break;
++        }
+ 
+-    gf_msg(THIS->name, GF_LOG_INFO, 0, DHT_MSG_REBALANCE_STATUS,
+-           "Rebalance is %s. Time taken is %.2f secs", status, elapsed);
+-    gf_msg(THIS->name, GF_LOG_INFO, 0, DHT_MSG_REBALANCE_STATUS,
+-           "Files migrated: %" PRIu64 ", size: %" PRIu64 ", lookups: %" PRIu64
+-           ", failures: %" PRIu64
+-           ", skipped: "
+-           "%" PRIu64,
+-           files, size, lookup, failures, skipped);
++        gf_msg("DHT", GF_LOG_INFO, 0, DHT_MSG_REBALANCE_STATUS,
++               "Rebalance is %s. Time taken is %.2f secs "
++               "Files migrated: %" PRIu64 ", size: %" PRIu64
++               ", lookups: %" PRIu64 ", failures: %" PRIu64
++               ", skipped: "
++               "%" PRIu64,
++               status, elapsed, files, size, lookup, failures, skipped);
++    }
+ out:
+     return 0;
+ }
+@@ -5299,7 +5298,7 @@ gf_defrag_stop(dht_conf_t *conf, gf_defrag_status_t status, dict_t *output)
+     defrag->defrag_status = status;
+ 
+     if (output)
+-        gf_defrag_status_get(conf, output);
++        gf_defrag_status_get(conf, output, _gf_false);
+     ret = 0;
+ out:
+     gf_msg_debug("", 0, "Returning %d", ret);
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0481-Update-rfc.sh-to-rhgs-3.5.4.patch b/SOURCES/0481-Update-rfc.sh-to-rhgs-3.5.4.patch
new file mode 100644
index 0000000..0ba12d2
--- /dev/null
+++ b/SOURCES/0481-Update-rfc.sh-to-rhgs-3.5.4.patch
@@ -0,0 +1,26 @@
+From 828be8e789db3c77587c708f930d7fe8c9456e3b Mon Sep 17 00:00:00 2001
+From: Rinku Kothiya <rkothiya@redhat.com>
+Date: Fri, 4 Dec 2020 05:18:45 +0530
+Subject: [PATCH 481/511] Update rfc.sh to rhgs-3.5.4
+
+Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
+---
+ rfc.sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/rfc.sh b/rfc.sh
+index 1dca29f..c0559b9 100755
+--- a/rfc.sh
++++ b/rfc.sh
+@@ -18,7 +18,7 @@ done
+ shift $((OPTIND-1))
+ 
+ 
+-branch="rhgs-3.5.3";
++branch="rhgs-3.5.4";
+ 
+ set_hooks_commit_msg()
+ {
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0482-logger-Always-print-errors-in-english.patch b/SOURCES/0482-logger-Always-print-errors-in-english.patch
new file mode 100644
index 0000000..e454bec
--- /dev/null
+++ b/SOURCES/0482-logger-Always-print-errors-in-english.patch
@@ -0,0 +1,49 @@
+From e43af5b15d14e43c3201fd0fb7bf02663e3e0127 Mon Sep 17 00:00:00 2001
+From: Rinku Kothiya <rkothiya@redhat.com>
+Date: Sat, 7 Nov 2020 12:09:36 +0530
+Subject: [PATCH 482/511] logger: Always print errors in english
+
+Upstream:
+> Reviewed-on: https://github.com/gluster/glusterfs/pull/1657
+> fixes: #1302
+> Change-Id: If0e21f016155276a953c64a8dd13ff3eb281d09d
+> Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
+
+BUG: 1896425
+
+Change-Id: If0e21f016155276a953c64a8dd13ff3eb281d09d
+Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/219999
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ libglusterfs/src/logging.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/libglusterfs/src/logging.c b/libglusterfs/src/logging.c
+index 7f0eff6..5874c34 100644
+--- a/libglusterfs/src/logging.c
++++ b/libglusterfs/src/logging.c
+@@ -513,6 +513,7 @@ gf_openlog(const char *ident, int option, int facility)
+ {
+     int _option = option;
+     int _facility = facility;
++    char *language = NULL;
+ 
+     if (-1 == _option) {
+         _option = LOG_PID | LOG_NDELAY;
+@@ -522,7 +523,10 @@ gf_openlog(const char *ident, int option, int facility)
+     }
+ 
+     /* TODO: Should check for errors here and return appropriately */
+-    setlocale(LC_ALL, "");
++    language = setlocale(LC_ALL, "en_US.UTF-8");
++    if (!language)
++        setlocale(LC_ALL, "");
++
+     setlocale(LC_NUMERIC, "C"); /* C-locale for strtod, ... */
+     /* close the previous syslog if open as we are changing settings */
+     closelog();
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0483-afr-more-quorum-checks-in-lookup-and-new-entry-marki.patch b/SOURCES/0483-afr-more-quorum-checks-in-lookup-and-new-entry-marki.patch
new file mode 100644
index 0000000..c0f2118
--- /dev/null
+++ b/SOURCES/0483-afr-more-quorum-checks-in-lookup-and-new-entry-marki.patch
@@ -0,0 +1,150 @@
+From 8c366f34a279a5ab2a6301bfd93534fe746a23e8 Mon Sep 17 00:00:00 2001
+From: Ravishankar N <ravishankar@redhat.com>
+Date: Mon, 7 Dec 2020 09:53:27 +0530
+Subject: [PATCH 483/511] afr: more quorum checks in lookup and new entry
+ marking
+
+Problem: See upstream github issue for details.
+
+Fix:
+-In lookup if the entry exists in 2 out of 3 bricks, don't fail the
+lookup with ENOENT just because there is an entrylk on the parent.
+Consider quorum before deciding.
+
+-If entry FOP does not succeed on quorum no. of bricks, do not perform
+new entry mark.
+
+Upstream patch details:
+> Reviewed-on: https://review.gluster.org/#/c/glusterfs/+/24499/
+> Fixes: #1303
+> Change-Id: I56df8c89ad53b29fa450c7930a7b7ccec9f4a6c5
+> Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+
+BUG: 1821599
+Change-Id: If513e8a7d6088a676288927630d8e616269bf5d5
+Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220363
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ ...20-mark-dirty-for-entry-txn-on-quorum-failure.t |  2 --
+ xlators/cluster/afr/src/afr-common.c               | 24 ++++++++++++----------
+ xlators/cluster/afr/src/afr-dir-write.c            |  8 ++++++++
+ xlators/cluster/afr/src/afr.h                      |  4 ++++
+ 4 files changed, 25 insertions(+), 13 deletions(-)
+
+diff --git a/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t b/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t
+index 26f9049..49c4dea 100644
+--- a/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t
++++ b/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t
+@@ -53,8 +53,6 @@ TEST ! ls $B0/${V0}1/file$i
+ TEST ls $B0/${V0}2/file$i
+ dirty=$(get_hex_xattr trusted.afr.dirty $B0/${V0}2)
+ TEST [ "$dirty" != "000000000000000000000000" ]
+-EXPECT "000000010000000100000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2/file$i
+-EXPECT "000000010000000100000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2/file$i
+ 
+ TEST $CLI volume set $V0 self-heal-daemon on
+ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
+index 89e2483..851ccad 100644
+--- a/xlators/cluster/afr/src/afr-common.c
++++ b/xlators/cluster/afr/src/afr-common.c
+@@ -1236,7 +1236,7 @@ refresh_done:
+     return 0;
+ }
+ 
+-static void
++void
+ afr_fill_success_replies(afr_local_t *local, afr_private_t *priv,
+                          unsigned char *replies)
+ {
+@@ -2290,6 +2290,7 @@ afr_lookup_done(call_frame_t *frame, xlator_t *this)
+         0,
+     };
+     gf_boolean_t locked_entry = _gf_false;
++    gf_boolean_t in_flight_create = _gf_false;
+     gf_boolean_t can_interpret = _gf_true;
+     inode_t *parent = NULL;
+     ia_type_t ia_type = IA_INVAL;
+@@ -2333,17 +2334,12 @@ afr_lookup_done(call_frame_t *frame, xlator_t *this)
+         if (!replies[i].valid)
+             continue;
+ 
+-        if (locked_entry && replies[i].op_ret == -1 &&
+-            replies[i].op_errno == ENOENT) {
+-            /* Second, check entry is still
+-               "underway" in creation */
+-            local->op_ret = -1;
+-            local->op_errno = ENOENT;
+-            goto error;
+-        }
+-
+-        if (replies[i].op_ret == -1)
++        if (replies[i].op_ret == -1) {
++            if (locked_entry && replies[i].op_errno == ENOENT) {
++                in_flight_create = _gf_true;
++            }
+             continue;
++        }
+ 
+         if (read_subvol == -1 || !readable[read_subvol]) {
+             read_subvol = i;
+@@ -2353,6 +2349,12 @@ afr_lookup_done(call_frame_t *frame, xlator_t *this)
+         }
+     }
+ 
++    if (in_flight_create && !afr_has_quorum(success_replies, this, NULL)) {
++        local->op_ret = -1;
++        local->op_errno = ENOENT;
++        goto error;
++    }
++
+     if (read_subvol == -1)
+         goto error;
+     /* We now have a read_subvol, which is readable[] (if there
+diff --git a/xlators/cluster/afr/src/afr-dir-write.c b/xlators/cluster/afr/src/afr-dir-write.c
+index 84e2a34..416c19d 100644
+--- a/xlators/cluster/afr/src/afr-dir-write.c
++++ b/xlators/cluster/afr/src/afr-dir-write.c
+@@ -349,6 +349,7 @@ afr_mark_entry_pending_changelog(call_frame_t *frame, xlator_t *this)
+     afr_private_t *priv = NULL;
+     int pre_op_count = 0;
+     int failed_count = 0;
++    unsigned char *success_replies = NULL;
+ 
+     local = frame->local;
+     priv = this->private;
+@@ -364,9 +365,16 @@ afr_mark_entry_pending_changelog(call_frame_t *frame, xlator_t *this)
+     failed_count = AFR_COUNT(local->transaction.failed_subvols,
+                              priv->child_count);
+ 
++    /* FOP succeeded on all bricks. */
+     if (pre_op_count == priv->child_count && !failed_count)
+         return;
+ 
++    /* FOP did not suceed on quorum no. of bricks. */
++    success_replies = alloca0(priv->child_count);
++    afr_fill_success_replies(local, priv, success_replies);
++    if (!afr_has_quorum(success_replies, this, NULL))
++        return;
++
+     if (priv->thin_arbiter_count) {
+         /*Mark new entry using ta file*/
+         local->is_new_entry = _gf_true;
+diff --git a/xlators/cluster/afr/src/afr.h b/xlators/cluster/afr/src/afr.h
+index ff96246..ed5096e 100644
+--- a/xlators/cluster/afr/src/afr.h
++++ b/xlators/cluster/afr/src/afr.h
+@@ -1334,4 +1334,8 @@ afr_mark_new_entry_changelog(call_frame_t *frame, xlator_t *this);
+ 
+ void
+ afr_selfheal_childup(xlator_t *this, afr_private_t *priv);
++
++void
++afr_fill_success_replies(afr_local_t *local, afr_private_t *priv,
++                         unsigned char *replies);
+ #endif /* __AFR_H__ */
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0484-glusterd-rebalance-status-displays-stats-as-0-after-.patch b/SOURCES/0484-glusterd-rebalance-status-displays-stats-as-0-after-.patch
new file mode 100644
index 0000000..56d4feb
--- /dev/null
+++ b/SOURCES/0484-glusterd-rebalance-status-displays-stats-as-0-after-.patch
@@ -0,0 +1,90 @@
+From 6c3b21ce5bb76b35856a6c270eb65d11f869061f Mon Sep 17 00:00:00 2001
+From: Sanju Rakonde <srakonde@redhat.com>
+Date: Fri, 26 Jun 2020 12:10:31 +0530
+Subject: [PATCH 484/511] glusterd: rebalance status displays stats as 0 after
+ reboot
+
+problem: while the rebalance is in progress, if a node is
+rebooted rebalance v status shows the stats of this node as
+0 once the node is back.
+
+Reason: when the node is rebooted, once it is back
+glusterd_volume_defrag_restart() starts the rebalance and
+creates the rpc. but due to some race, rebalance process is
+sending disconnect event, so rpc object is getting destroyed. As
+the rpc object is null, request for fetching the latest stats is
+not sent to rebalance process. and stats are shows as default values
+which is 0.
+
+Solution: When the rpc object null, we should create the rpc if the
+rebalance process is up. so that request can be sent to rebalance
+process using the rpc.
+
+>fixes: #1339
+>Change-Id: I1c7533fedd17dcaffc0f7a5a918c87356133a81c
+>Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+Upstream Patch : https://review.gluster.org/c/glusterfs/+/24641
+
+BUG: 1832306
+Change-Id: I1c7533fedd17dcaffc0f7a5a918c87356133a81c
+Signed-off-by: Srijan Sivakumar <ssivakum@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220369
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mgmt/glusterd/src/glusterd-syncop.c | 29 ++++++++++++++++++++---------
+ 1 file changed, 20 insertions(+), 9 deletions(-)
+
+diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
+index c78983a..df78fef 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
++++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
+@@ -1693,6 +1693,7 @@ gd_brick_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
+     rpc_clnt_t *rpc = NULL;
+     dict_t *rsp_dict = NULL;
+     int32_t cmd = GF_OP_CMD_NONE;
++    glusterd_volinfo_t *volinfo = NULL;
+ 
+     this = THIS;
+     rsp_dict = dict_new();
+@@ -1724,18 +1725,28 @@ gd_brick_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
+     cds_list_for_each_entry_safe(pending_node, tmp, &selected, list)
+     {
+         rpc = glusterd_pending_node_get_rpc(pending_node);
++        /* In the case of rebalance if the rpc object is null, we try to
++         * create the rpc object. if the rebalance daemon is down, it returns
++         * -1. otherwise, rpc object will be created and referenced.
++         */
+         if (!rpc) {
+-            if (pending_node->type == GD_NODE_REBALANCE) {
+-                ret = 0;
+-                glusterd_defrag_volume_node_rsp(req_dict, NULL, op_ctx);
++            if (pending_node->type == GD_NODE_REBALANCE && pending_node->node) {
++                volinfo = pending_node->node;
++                ret = glusterd_rebalance_rpc_create(volinfo);
++                if (ret) {
++                    ret = 0;
++                    glusterd_defrag_volume_node_rsp(req_dict, NULL, op_ctx);
++                    goto out;
++                } else {
++                    rpc = glusterd_defrag_rpc_get(volinfo->rebal.defrag);
++                }
++            } else {
++                ret = -1;
++                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE,
++                       "Brick Op failed "
++                       "due to rpc failure.");
+                 goto out;
+             }
+-
+-            ret = -1;
+-            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE,
+-                   "Brick Op failed "
+-                   "due to rpc failure.");
+-            goto out;
+         }
+ 
+         /* Redirect operation to be detach tier via rebalance flow. */
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0485-cli-rpc-conditional-init-of-global-quota-rpc-1578.patch b/SOURCES/0485-cli-rpc-conditional-init-of-global-quota-rpc-1578.patch
new file mode 100644
index 0000000..6ed4f1c
--- /dev/null
+++ b/SOURCES/0485-cli-rpc-conditional-init-of-global-quota-rpc-1578.patch
@@ -0,0 +1,87 @@
+From 2e6a5e504e66bc95208420e4882e453a53ac9ea2 Mon Sep 17 00:00:00 2001
+From: schaffung <ssivakum@redhat.com>
+Date: Mon, 2 Nov 2020 11:18:01 +0530
+Subject: [PATCH 485/511] cli-rpc: conditional init of global quota rpc (#1578)
+
+Issue: It is seem that the initialization of rpc to
+connect with quotad is done in every glusterfs cli command,
+irrespective of whether the quota feature is enabled or disabled.
+This seems to be an overkill.
+
+Code change: The file /var/run/quotad/quotad.pid is present
+signals that quotad is enabled. Hence we can put a conditional
+check for seeing when this file exists and if it doesn't we
+just skip over the initialization of the global quotad rpc.
+
+This will go on to reduce the extra rpc calls and operations
+being performed in the kernel space.
+
+>Fixes: #1577
+>Change-Id: Icb69d35330f76ce95626f59af75a12726eb620ff
+>Signed-off-by: srijan-sivakumar <ssivakumar@redhat.com>
+Upstream Patch : https://github.com/gluster/glusterfs/pull/1578
+
+BUG: 1885966
+Change-Id: Icb69d35330f76ce95626f59af75a12726eb620ff
+Signed-off-by: Srijan Sivakumar <ssivakum@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220371
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ cli/src/cli.c | 18 +++++++++++++-----
+ cli/src/cli.h |  3 +++
+ 2 files changed, 16 insertions(+), 5 deletions(-)
+
+diff --git a/cli/src/cli.c b/cli/src/cli.c
+index 99a16a0..a76c5a2 100644
+--- a/cli/src/cli.c
++++ b/cli/src/cli.c
+@@ -64,8 +64,7 @@
+ extern int connected;
+ /* using argp for command line parsing */
+ 
+-const char *argp_program_version =
+-    PACKAGE_NAME" "PACKAGE_VERSION;
++const char *argp_program_version = PACKAGE_NAME " " PACKAGE_VERSION;
+ const char *argp_program_bug_address = "<" PACKAGE_BUGREPORT ">";
+ 
+ struct rpc_clnt *global_quotad_rpc;
+@@ -840,9 +839,18 @@ main(int argc, char *argv[])
+     if (!global_rpc)
+         goto out;
+ 
+-    global_quotad_rpc = cli_quotad_clnt_rpc_init();
+-    if (!global_quotad_rpc)
+-        goto out;
++    /*
++     * Now, one doesn't need to initialize global rpc
++     * for quota unless and until quota is enabled.
++     * So why not put a check to save all the rpc related
++     * ops here.
++     */
++    ret = sys_access(QUOTAD_PID_PATH, F_OK);
++    if (!ret) {
++        global_quotad_rpc = cli_quotad_clnt_rpc_init();
++        if (!global_quotad_rpc)
++            goto out;
++    }
+ 
+     ret = cli_cmds_register(&state);
+     if (ret)
+diff --git a/cli/src/cli.h b/cli/src/cli.h
+index 37e4d9d..c30ae9c 100644
+--- a/cli/src/cli.h
++++ b/cli/src/cli.h
+@@ -30,6 +30,9 @@
+ #define CLI_TAB_LENGTH 8
+ #define CLI_BRICK_STATUS_LINE_LEN 78
+ 
++// Quotad pid path.
++#define QUOTAD_PID_PATH "/var/run/gluster/quotad/quotad.pid"
++
+ /* Geo-rep command positional arguments' index  */
+ #define GEO_REP_CMD_INDEX 1
+ #define GEO_REP_CMD_CONFIG_INDEX 4
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0486-glusterd-brick-sock-file-deleted-log-error-1560.patch b/SOURCES/0486-glusterd-brick-sock-file-deleted-log-error-1560.patch
new file mode 100644
index 0000000..60750db
--- /dev/null
+++ b/SOURCES/0486-glusterd-brick-sock-file-deleted-log-error-1560.patch
@@ -0,0 +1,87 @@
+From 9b19d4841fc3002d30ec3e44c85ec37682c11bfb Mon Sep 17 00:00:00 2001
+From: schaffung <ssivakum@redhat.com>
+Date: Thu, 22 Oct 2020 13:07:09 +0530
+Subject: [PATCH 486/511] glusterd: brick sock file deleted, log error (#1560)
+
+Issue: The satus of the brick as tracked by glusterd is
+stopped if the socket file corresponding to a running
+brick process is absent in /var/run/gluster. The glusterd
+keeps on trying to reconnect ( rpc layer ) but it fails.
+
+Code change: Rather than registering the rpc connection
+with the help of the given sockfilepath which is not
+even present as it keeps on reconnecting, why not log
+this as an error and not try to reconnect using the
+non-existing sock file path.
+
+>Fixes: #1526
+>Change-Id: I6c81691ab1624c66dec74f5ffcc6c383201ac757
+>Signed-off-by: srijan-sivakumar <ssivakumar@redhat.com>
+Upstream Patch : https://github.com/gluster/glusterfs/pull/1560
+
+BUG: 1882923
+Change-Id: I6c81691ab1624c66dec74f5ffcc6c383201ac757
+Signed-off-by: Srijan Sivakumar <ssivakum@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220376
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mgmt/glusterd/src/glusterd-utils.c | 27 +++++++++++++++++++++++++--
+ 1 file changed, 25 insertions(+), 2 deletions(-)
+
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index d25fc8a..a72c494 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -6310,7 +6310,7 @@ find_compatible_brick(glusterd_conf_t *conf, glusterd_volinfo_t *volinfo,
+    check if passed pid is match with running  glusterfs process
+ */
+ 
+-int
++static int
+ glusterd_get_sock_from_brick_pid(int pid, char *sockpath, size_t len)
+ {
+     char fname[128] = "";
+@@ -6383,7 +6383,17 @@ glusterd_get_sock_from_brick_pid(int pid, char *sockpath, size_t len)
+ 
+     if (tmpsockpath[0]) {
+         strncpy(sockpath, tmpsockpath, i);
+-        ret = 0;
++        /*
++         * Condition to check if the brick socket file is present
++         * in the stated path or not. This helps in preventing
++         * constant re-connect triggered in the RPC layer and also
++         * a log message would help out the user.
++         */
++        ret = sys_access(sockpath, F_OK);
++        if (ret) {
++            gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_NOT_FOUND,
++                    "%s not found", sockpath, NULL);
++        }
+     }
+ 
+     return ret;
+@@ -6581,7 +6591,20 @@ glusterd_brick_start(glusterd_volinfo_t *volinfo,
+             if (!is_brick_mx_enabled()) {
+                 glusterd_set_brick_socket_filepath(
+                     volinfo, brickinfo, socketpath, sizeof(socketpath));
++                /*
++                 * Condition to check if the brick socket file is present
++                 * in the stated path or not. This helps in preventing
++                 * constant re-connect triggered in the RPC layer and also
++                 * a log message would help out the user.
++                 */
++                ret = sys_access(socketpath, F_OK);
++                if (ret) {
++                    gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_NOT_FOUND,
++                            "%s not found", socketpath, NULL);
++                    goto out;
++                }
+             }
++
+             gf_log(this->name, GF_LOG_DEBUG,
+                    "Using %s as sockfile for brick %s of volume %s ",
+                    socketpath, brickinfo->path, volinfo->volname);
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0487-Events-Log-file-not-re-opened-after-logrotate.patch b/SOURCES/0487-Events-Log-file-not-re-opened-after-logrotate.patch
new file mode 100644
index 0000000..ac0d1cc
--- /dev/null
+++ b/SOURCES/0487-Events-Log-file-not-re-opened-after-logrotate.patch
@@ -0,0 +1,56 @@
+From c961ee1d7c1abb2552b79ed39ed7fd1bd1b3962f Mon Sep 17 00:00:00 2001
+From: srijan-sivakumar <ssivakum@redhat.com>
+Date: Fri, 7 Aug 2020 15:02:07 +0530
+Subject: [PATCH 487/511] Events: Log file not re-opened after logrotate.
+
+Issue: The logging is being done in the same file
+even after the logrotate utility has changed the file.
+This causes the logfile to grow indefinitely.
+
+Code Changes: Using the WatchedFileHandler class instead
+of FileHandler class. This watches the file it is logging
+into and if the file changes, it is closed and reopened
+using the file name. Hence after file rotate, a new file
+will be used for logging instead of continuing with
+the same old file.
+
+>Fixes: #1289
+>Change-Id: I773d04f17613a03709cb682692efb39fd8e664e2
+>Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
+Upstream Patch : https://review.gluster.org/c/glusterfs/+/24820
+
+BUG: 1814744
+Change-Id: I773d04f17613a03709cb682692efb39fd8e664e2
+Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220370
+Reviewed-by: Shwetha Acharya <sacharya@redhat.com>
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ events/src/utils.py | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/events/src/utils.py b/events/src/utils.py
+index 38b707a..6d4e079 100644
+--- a/events/src/utils.py
++++ b/events/src/utils.py
+@@ -13,6 +13,7 @@ import sys
+ import json
+ import os
+ import logging
++import logging.handlers
+ import fcntl
+ from errno import EBADF
+ from threading import Thread
+@@ -98,7 +99,7 @@ def setup_logger():
+     logger.setLevel(logging.INFO)
+ 
+     # create the logging file handler
+-    fh = logging.FileHandler(LOG_FILE)
++    fh = logging.handlers.WatchedFileHandler(LOG_FILE)
+ 
+     formatter = logging.Formatter("[%(asctime)s] %(levelname)s "
+                                   "[%(module)s - %(lineno)s:%(funcName)s] "
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0488-glusterd-afr-enable-granular-entry-heal-by-default.patch b/SOURCES/0488-glusterd-afr-enable-granular-entry-heal-by-default.patch
new file mode 100644
index 0000000..310bc53
--- /dev/null
+++ b/SOURCES/0488-glusterd-afr-enable-granular-entry-heal-by-default.patch
@@ -0,0 +1,864 @@
+From 0502383024cbf7e4776816e0a992dccc484a3cf2 Mon Sep 17 00:00:00 2001
+From: Ravishankar N <ravishankar@redhat.com>
+Date: Tue, 8 Dec 2020 17:23:22 +0530
+Subject: [PATCH 488/511] glusterd/afr: enable granular-entry-heal by default
+
+XXXXXXXXXXXXXXXXXXX
+    IMPORTANT:
+XXXXXXXXXXXXXXXXXXXX
+I see that for rhgs-3.5.3, GD_OP_VERSION_MAX is GD_OP_VERSION_7_0. Since
+this patch should only act on new volumes in rhgs-3.5.4, I am bumping
+the op-version to GD_OP_VERSION_7_1. In glusterfs upstream, the patch
+acts only if op-version >= GD_OP_VERSION_9_0 as seen in the commit
+messae below.
+
+Upstream patch details:
+/------------------------------------------------------------------------------/
+1. The option has been enabled and tested for quite some time now in RHHI-V
+downstream and I think it is safe to make it 'on' by default. Since it
+is not possible to simply change it from 'off' to 'on' without breaking
+rolling upgrades, old clients etc., I have made it default only for new volumes
+starting from op-verison GD_OP_VERSION_9_0.
+
+Note: If you do a volume reset, the option will be turned back off.
+This is okay as the dir's gfid will be captured in 'xattrop' folder  and heals
+will proceed. There might be stale entries inside entry-changes' folder,
+which will be removed when we enable the option again.
+
+2. I encountered a cust. issue where entry heal was pending on a dir. with
+236436 files in it and the glustershd.log output was just stuck at
+"performing entry selfheal", so I have added logs to give us
+more info in DEBUG level about whether entry heal and data heal are
+progressing (metadata heal doesn't take much time). That way, we have a
+quick visual indication to say things are not 'stuck' if we briefly
+enable debug logs, instead of taking statedumps or checking profile info
+etc.
+
+>Fixes: #1483
+>Change-Id: I4f116f8c92f8cd33f209b758ff14f3c7e1981422
+>Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+Upstream Patch: https://github.com/gluster/glusterfs/pull/1621
+/------------------------------------------------------------------------------/
+
+BUG: 1890506
+Change-Id: If449a1e873633616cfc508d74b5c22eb434b55ae
+Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220555
+Tested-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ libglusterfs/src/glusterfs/globals.h               |   4 +-
+ libglusterfs/src/syncop-utils.c                    |   4 +-
+ tests/basic/afr/add-brick-self-heal-non-granular.t |  75 +++++++++++++
+ tests/basic/afr/add-brick-self-heal.t              |   4 +-
+ tests/basic/afr/bug-1130892-non-granular.t         |  77 ++++++++++++++
+ .../basic/afr/bug-1493415-gfid-heal-non-granular.t |  79 ++++++++++++++
+ ...507-type-mismatch-error-handling-non-granular.t | 117 +++++++++++++++++++++
+ ...1749322-entry-heal-not-happening-non-granular.t |  90 ++++++++++++++++
+ .../afr/replace-brick-self-heal-non-granular.t     |  65 ++++++++++++
+ tests/basic/afr/replace-brick-self-heal.t          |   2 +-
+ tests/bugs/replicate/bug-1130892.t                 |   2 +-
+ tests/bugs/replicate/bug-1493415-gfid-heal.t       |   2 +-
+ .../bug-1722507-type-mismatch-error-handling.t     |  26 +++--
+ .../bug-1749322-entry-heal-not-happening.t         |   7 +-
+ xlators/cluster/afr/src/afr-self-heal-common.c     |   5 +
+ xlators/cluster/afr/src/afr-self-heal-data.c       |   3 +
+ xlators/cluster/afr/src/afr-self-heal-entry.c      |   7 +-
+ xlators/mgmt/glusterd/src/glusterd-utils.c         |  13 +++
+ 18 files changed, 558 insertions(+), 24 deletions(-)
+ create mode 100644 tests/basic/afr/add-brick-self-heal-non-granular.t
+ create mode 100644 tests/basic/afr/bug-1130892-non-granular.t
+ create mode 100644 tests/basic/afr/bug-1493415-gfid-heal-non-granular.t
+ create mode 100644 tests/basic/afr/bug-1722507-type-mismatch-error-handling-non-granular.t
+ create mode 100644 tests/basic/afr/bug-1749322-entry-heal-not-happening-non-granular.t
+ create mode 100644 tests/basic/afr/replace-brick-self-heal-non-granular.t
+
+diff --git a/libglusterfs/src/glusterfs/globals.h b/libglusterfs/src/glusterfs/globals.h
+index 31717ed..cc145cd 100644
+--- a/libglusterfs/src/glusterfs/globals.h
++++ b/libglusterfs/src/glusterfs/globals.h
+@@ -50,7 +50,7 @@
+     1 /* MIN is the fresh start op-version, mostly                             \
+          should not change */
+ #define GD_OP_VERSION_MAX                                                      \
+-    GD_OP_VERSION_7_0 /* MAX VERSION is the maximum                            \
++    GD_OP_VERSION_7_1 /* MAX VERSION is the maximum                            \
+                          count in VME table, should                            \
+                          keep changing with                                    \
+                          introduction of newer                                 \
+@@ -138,6 +138,8 @@
+ 
+ #define GD_OP_VERSION_7_0 70000 /* Op-version for GlusterFS 7.0 */
+ 
++#define GD_OP_VERSION_7_1 70100 /* Op-version for GlusterFS 7.1 */
++
+ #include "glusterfs/xlator.h"
+ #include "glusterfs/options.h"
+ 
+diff --git a/libglusterfs/src/syncop-utils.c b/libglusterfs/src/syncop-utils.c
+index be03527..2269c76 100644
+--- a/libglusterfs/src/syncop-utils.c
++++ b/libglusterfs/src/syncop-utils.c
+@@ -495,9 +495,7 @@ syncop_dir_scan(xlator_t *subvol, loc_t *loc, int pid, void *data,
+             if (!strcmp(entry->d_name, ".") || !strcmp(entry->d_name, ".."))
+                 continue;
+ 
+-            ret = fn(subvol, entry, loc, data);
+-            if (ret)
+-                break;
++            ret |= fn(subvol, entry, loc, data);
+         }
+         gf_dirent_free(&entries);
+         if (ret)
+diff --git a/tests/basic/afr/add-brick-self-heal-non-granular.t b/tests/basic/afr/add-brick-self-heal-non-granular.t
+new file mode 100644
+index 0000000..19caf24
+--- /dev/null
++++ b/tests/basic/afr/add-brick-self-heal-non-granular.t
+@@ -0,0 +1,75 @@
++#!/bin/bash
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++cleanup;
++
++TEST glusterd
++TEST pidof glusterd
++TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
++EXPECT 'Created' volinfo_field $V0 'Status';
++TEST $CLI volume set $V0 cluster.granular-entry-heal off
++TEST $CLI volume start $V0
++EXPECT 'Started' volinfo_field $V0 'Status';
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
++
++TEST $CLI volume set $V0 cluster.data-self-heal off
++TEST $CLI volume set $V0 cluster.metadata-self-heal off
++TEST $CLI volume set $V0 cluster.entry-self-heal off
++TEST $CLI volume set $V0 cluster.heal-timeout 5
++
++TEST $CLI volume set $V0 self-heal-daemon off
++TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
++
++# Create files
++for i in {1..5}
++do
++        echo $i > $M0/file$i.txt
++done
++
++# Metadata changes
++TEST setfattr -n user.test -v qwerty $M0/file5.txt
++
++# Add brick1
++TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}2
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
++
++# New-brick should accuse the old-bricks (Simulating case for data-loss)
++TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}2/
++TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}2/
++
++# Check if pending xattr and dirty-xattr are set for newly-added-brick
++EXPECT "000000000000000100000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0
++EXPECT "000000000000000100000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
++EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.dirty $B0/${V0}2
++
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
++
++TEST $CLI volume set $V0 self-heal-daemon on
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
++TEST $CLI volume heal $V0
++
++# Wait for heal to complete
++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
++
++# Check if entry-heal has happened
++TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}2 | sort)
++TEST diff <(ls $B0/${V0}1 | sort) <(ls $B0/${V0}2 | sort)
++
++# Test if data was healed
++TEST diff $B0/${V0}0/file1.txt $B0/${V0}2/file1.txt
++
++# Test if metadata was healed and exists on both the bricks
++EXPECT "qwerty" get_text_xattr user.test $B0/${V0}2/file5.txt
++EXPECT "qwerty" get_text_xattr user.test $B0/${V0}0/file5.txt
++
++EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0
++EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
++EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.dirty $B0/${V0}2
++
++cleanup;
+diff --git a/tests/basic/afr/add-brick-self-heal.t b/tests/basic/afr/add-brick-self-heal.t
+index c847e22..7ebf4f6 100644
+--- a/tests/basic/afr/add-brick-self-heal.t
++++ b/tests/basic/afr/add-brick-self-heal.t
+@@ -38,8 +38,8 @@ TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0
+ TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}2/
+ 
+ # Check if pending xattr and dirty-xattr are set for newly-added-brick
+-EXPECT "000000000000000100000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0
+-EXPECT "000000000000000100000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
++EXPECT "000000010000000100000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0
++EXPECT "000000010000000100000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
+ EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.dirty $B0/${V0}2
+ 
+ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+diff --git a/tests/basic/afr/bug-1130892-non-granular.t b/tests/basic/afr/bug-1130892-non-granular.t
+new file mode 100644
+index 0000000..3cdbc7d
+--- /dev/null
++++ b/tests/basic/afr/bug-1130892-non-granular.t
+@@ -0,0 +1,77 @@
++#!/bin/bash
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++. $(dirname $0)/../../afr.rc
++
++cleanup;
++
++TEST glusterd
++TEST pidof glusterd
++TEST $CLI volume info;
++
++# Create a 1X2 replica
++TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
++EXPECT 'Created' volinfo_field $V0 'Status';
++TEST $CLI volume set $V0 cluster.granular-entry-heal off
++
++# Disable self-heal daemon
++TEST gluster volume set $V0 self-heal-daemon off
++
++# Enable Client side heal
++TEST $CLI volume set $V0 cluster.data-self-heal off
++TEST $CLI volume set $V0 cluster.metadata-self-heal off
++TEST $CLI volume set $V0 cluster.entry-self-heal off
++
++# Disable all perf-xlators
++TEST $CLI volume set $V0 performance.quick-read off
++TEST $CLI volume set $V0 performance.io-cache off
++TEST $CLI volume set $V0 performance.write-behind off
++TEST $CLI volume set $V0 performance.stat-prefetch off
++TEST $CLI volume set $V0 performance.read-ahead off
++
++# Volume start
++TEST $CLI volume start $V0;
++EXPECT 'Started' volinfo_field $V0 'Status';
++
++# FUSE Mount
++TEST ${GFS} -s $H0 --volfile-id $V0 $M0
++
++# Create files and dirs
++TEST mkdir -p $M0/one/two/
++TEST `echo "Carpe diem" > $M0/one/two/three`
++
++# Simulate disk-replacement
++TEST kill_brick $V0 $H0 $B0/${V0}-1
++EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 1
++TEST rm -rf $B0/${V0}-1/one
++TEST rm -rf $B0/${V0}-1/.glusterfs
++
++#Ideally, disk replacement is done using reset-brick or replace-brick gluster CLI
++#which will create .glusterfs folder.
++mkdir $B0/${V0}-1/.glusterfs && chmod 600 $B0/${V0}-1/.glusterfs
++
++# Start force
++TEST $CLI volume start $V0 force
++
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
++
++TEST stat $M0/one
++
++sleep 1
++
++# Check pending xattrs
++EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 data
++EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 entry
++EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 metadata
++
++TEST gluster volume set $V0 self-heal-daemon on
++
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
++TEST $CLI volume heal $V0
++EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one
++EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two
++EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_file_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two/three
++
++cleanup;
+diff --git a/tests/basic/afr/bug-1493415-gfid-heal-non-granular.t b/tests/basic/afr/bug-1493415-gfid-heal-non-granular.t
+new file mode 100644
+index 0000000..aff001c
+--- /dev/null
++++ b/tests/basic/afr/bug-1493415-gfid-heal-non-granular.t
+@@ -0,0 +1,79 @@
++#!/bin/bash
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++. $(dirname $0)/../../afr.rc
++cleanup;
++
++TEST glusterd
++TEST pidof glusterd
++TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
++TEST $CLI volume set $V0 cluster.granular-entry-heal off
++TEST $CLI volume start $V0
++
++TEST $GFS --volfile-id=$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0;
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
++TEST $CLI volume set $V0 self-heal-daemon off
++
++# Create base entry in indices/xattrop
++echo "Data" > $M0/FILE
++
++#------------------------------------------------------------------------------#
++TEST touch $M0/f1
++gfid_f1=$(gf_get_gfid_xattr $B0/${V0}0/f1)
++gfid_str_f1=$(gf_gfid_xattr_to_str $gfid_f1)
++
++# Remove gfid xattr and .glusterfs hard link from 2nd brick. This simulates a
++# brick crash at the point where file got created but no xattrs were set.
++TEST setfattr -x trusted.gfid $B0/${V0}1/f1
++TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
++
++# storage/posix considers that a file without gfid changed less than a second
++# before doesn't exist, so we need to wait for a second to force posix to
++# consider that this is a valid file but without gfid.
++sleep 2
++
++# Assume there were no pending xattrs on parent dir due to 1st brick crashing
++# too. Then name heal from client must heal the gfid.
++EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
++TEST $GFS --volfile-id=$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0;
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
++TEST stat $M0/f1
++EXPECT "$gfid_f1" gf_get_gfid_xattr $B0/${V0}1/f1
++TEST stat $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
++
++#------------------------------------------------------------------------------#
++TEST mkdir $M0/dir
++TEST touch $M0/dir/f2
++gfid_f2=$(gf_get_gfid_xattr $B0/${V0}0/dir/f2)
++gfid_str_f2=$(gf_gfid_xattr_to_str $gfid_f2)
++
++# Remove gfid xattr and .glusterfs hard link from 2nd brick. This simulates a
++# brick crash at the point where file got created but no xattrs were set.
++TEST setfattr -x trusted.gfid $B0/${V0}1/dir/f2
++TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f2:0:2}/${gfid_str_f2:2:2}/$gfid_str_f2
++
++#Now simulate setting of pending entry xattr on parent dir of 1st brick.
++TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/dir
++create_brick_xattrop_entry $B0/${V0}0 dir
++
++# storage/posix considers that a file without gfid changed less than a second
++# before doesn't exist, so we need to wait for a second to force posix to
++# consider that this is a valid file but without gfid.
++sleep 2
++
++#Trigger entry-heal via shd
++TEST $CLI volume set $V0 self-heal-daemon on
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
++
++TEST $CLI volume heal $V0
++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
++
++EXPECT "$gfid_f2" gf_get_gfid_xattr $B0/${V0}1/dir/f2
++TEST stat $B0/${V0}1/.glusterfs/${gfid_str_f2:0:2}/${gfid_str_f2:2:2}/$gfid_str_f2
++
++#------------------------------------------------------------------------------#
++cleanup;
+diff --git a/tests/basic/afr/bug-1722507-type-mismatch-error-handling-non-granular.t b/tests/basic/afr/bug-1722507-type-mismatch-error-handling-non-granular.t
+new file mode 100644
+index 0000000..9079c93
+--- /dev/null
++++ b/tests/basic/afr/bug-1722507-type-mismatch-error-handling-non-granular.t
+@@ -0,0 +1,117 @@
++#!/bin/bash
++
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++. $(dirname $0)/../../afr.rc
++
++cleanup;
++
++## Start and create a volume
++TEST glusterd;
++TEST pidof glusterd;
++TEST $CLI volume info;
++
++TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
++TEST $CLI volume set $V0 cluster.granular-entry-heal off
++TEST $CLI volume start $V0;
++TEST $CLI volume set $V0 cluster.heal-timeout 5
++TEST $CLI volume heal $V0 disable
++EXPECT 'Started' volinfo_field $V0 'Status';
++TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
++
++TEST mkdir $M0/dir
++
++##########################################################################################
++# GFID link file and the GFID is missing on one brick and all the bricks are being blamed.
++
++TEST touch $M0/dir/file
++TEST `echo append>> $M0/dir/file`
++
++#B0 and B2 must blame B1
++setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
++setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir
++setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
++
++# Add entry to xattrop dir to trigger index heal.
++xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
++base_entry_b0=`ls $xattrop_dir0`
++gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
++ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
++EXPECT "^1$" get_pending_heal_count $V0
++
++# Remove the gfid xattr and the link file on one brick.
++gfid_file=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file)
++gfid_str_file=$(gf_gfid_xattr_to_str $gfid_file)
++TEST setfattr -x trusted.gfid $B0/${V0}0/dir/file
++TEST rm -f $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
++
++# Launch heal
++TEST $CLI volume heal $V0 enable
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
++
++# Wait for 2 second to force posix to consider that this is a valid file but
++# without gfid.
++sleep 2
++TEST $CLI volume heal $V0
++
++# Heal should not fail as the file is missing gfid xattr and the link file,
++# which is not actually the gfid or type mismatch.
++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
++
++EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}0/dir/file
++TEST stat $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
++rm -f $M0/dir/file
++
++
++###########################################################################################
++# GFID link file and the GFID is missing on two bricks and all the bricks are being blamed.
++
++TEST $CLI volume heal $V0 disable
++TEST touch $M0/dir/file
++#TEST kill_brick $V0 $H0 $B0/$V0"1"
++
++#B0 and B2 must blame B1
++setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
++setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir
++setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
++
++# Add entry to xattrop dir to trigger index heal.
++xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
++base_entry_b0=`ls $xattrop_dir0`
++gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
++ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
++EXPECT "^1$" get_pending_heal_count $V0
++
++# Remove the gfid xattr and the link file on two bricks.
++gfid_file=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file)
++gfid_str_file=$(gf_gfid_xattr_to_str $gfid_file)
++TEST setfattr -x trusted.gfid $B0/${V0}0/dir/file
++TEST rm -f $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
++TEST setfattr -x trusted.gfid $B0/${V0}1/dir/file
++TEST rm -f $B0/${V0}1/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
++
++# Launch heal
++TEST $CLI volume heal $V0 enable
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
++
++# Wait for 2 second to force posix to consider that this is a valid file but
++# without gfid.
++sleep 2
++TEST $CLI volume heal $V0
++
++# Heal should not fail as the file is missing gfid xattr and the link file,
++# which is not actually the gfid or type mismatch.
++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
++
++EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}0/dir/file
++TEST stat $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
++EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}1/dir/file
++TEST stat $B0/${V0}1/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
++
++cleanup
+diff --git a/tests/basic/afr/bug-1749322-entry-heal-not-happening-non-granular.t b/tests/basic/afr/bug-1749322-entry-heal-not-happening-non-granular.t
+new file mode 100644
+index 0000000..4f27da4
+--- /dev/null
++++ b/tests/basic/afr/bug-1749322-entry-heal-not-happening-non-granular.t
+@@ -0,0 +1,90 @@
++#!/bin/bash
++
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++. $(dirname $0)/../../afr.rc
++
++cleanup
++
++function check_gfid_and_link_count
++{
++        local file=$1
++
++        file_gfid_b0=$(gf_get_gfid_xattr $B0/${V0}0/$file)
++        TEST [ ! -z $file_gfid_b0 ]
++        file_gfid_b1=$(gf_get_gfid_xattr $B0/${V0}1/$file)
++        file_gfid_b2=$(gf_get_gfid_xattr $B0/${V0}2/$file)
++        EXPECT $file_gfid_b0 echo $file_gfid_b1
++        EXPECT $file_gfid_b0 echo $file_gfid_b2
++
++        EXPECT "2" stat -c %h $B0/${V0}0/$file
++        EXPECT "2" stat -c %h $B0/${V0}1/$file
++        EXPECT "2" stat -c %h $B0/${V0}2/$file
++}
++TESTS_EXPECTED_IN_LOOP=18
++
++################################################################################
++## Start and create a volume
++TEST glusterd;
++TEST pidof glusterd;
++TEST $CLI volume info;
++
++TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
++TEST $CLI volume set $V0 cluster.granular-entry-heal off
++TEST $CLI volume start $V0;
++TEST $CLI volume set $V0 cluster.heal-timeout 5
++TEST $CLI volume heal $V0 disable
++EXPECT 'Started' volinfo_field $V0 'Status';
++TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
++
++TEST mkdir $M0/dir
++TEST `echo "File 1 " > $M0/dir/file1`
++TEST touch $M0/dir/file{2..4}
++
++# Remove file2 from 1st & 3rd bricks
++TEST rm -f $B0/$V0"0"/dir/file2
++TEST rm -f $B0/$V0"2"/dir/file2
++
++# Remove file3 and the .glusterfs hardlink from 1st & 2nd bricks
++gfid_file3=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file3)
++gfid_str_file3=$(gf_gfid_xattr_to_str $gfid_file3)
++TEST rm $B0/$V0"0"/.glusterfs/${gfid_str_file3:0:2}/${gfid_str_file3:2:2}/$gfid_str_file3
++TEST rm $B0/$V0"1"/.glusterfs/${gfid_str_file3:0:2}/${gfid_str_file3:2:2}/$gfid_str_file3
++TEST rm -f $B0/$V0"0"/dir/file3
++TEST rm -f $B0/$V0"1"/dir/file3
++
++# Remove the .glusterfs hardlink and the gfid xattr of file4 on 3rd brick
++gfid_file4=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file4)
++gfid_str_file4=$(gf_gfid_xattr_to_str $gfid_file4)
++TEST rm $B0/$V0"2"/.glusterfs/${gfid_str_file4:0:2}/${gfid_str_file4:2:2}/$gfid_str_file4
++TEST setfattr -x trusted.gfid $B0/$V0"2"/dir/file4
++
++# B0 and B2 blame each other
++setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
++setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
++
++# Add entry to xattrop dir on first brick.
++xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
++base_entry_b0=`ls $xattrop_dir0`
++gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
++TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
++
++EXPECT "^1$" get_pending_heal_count $V0
++
++# Launch heal
++TEST $CLI volume heal $V0 enable
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
++TEST $CLI volume heal $V0
++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
++
++# All the files must be present on all the bricks after conservative merge and
++# should have the gfid xattr and the .glusterfs hardlink.
++check_gfid_and_link_count dir/file1
++check_gfid_and_link_count dir/file2
++check_gfid_and_link_count dir/file3
++check_gfid_and_link_count dir/file4
++
++cleanup
+diff --git a/tests/basic/afr/replace-brick-self-heal-non-granular.t b/tests/basic/afr/replace-brick-self-heal-non-granular.t
+new file mode 100644
+index 0000000..c86bff1
+--- /dev/null
++++ b/tests/basic/afr/replace-brick-self-heal-non-granular.t
+@@ -0,0 +1,65 @@
++#!/bin/bash
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++cleanup;
++
++TEST glusterd
++TEST pidof glusterd
++TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
++TEST $CLI volume set $V0 cluster.granular-entry-heal off
++TEST $CLI volume start $V0
++TEST $CLI volume set $V0 cluster.data-self-heal off
++TEST $CLI volume set $V0 cluster.metadata-self-heal off
++TEST $CLI volume set $V0 cluster.entry-self-heal off
++TEST $CLI volume set $V0 cluster.heal-timeout 5
++TEST $CLI volume set $V0 self-heal-daemon off
++TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
++
++# Create files
++for i in {1..5}
++do
++        echo $i > $M0/file$i.txt
++done
++
++# Metadata changes
++TEST setfattr -n user.test -v qwerty $M0/file5.txt
++
++# Replace brick1
++TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1_new commit force
++
++# Replaced-brick should accuse the non-replaced-brick (Simulating case for data-loss)
++TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}1_new/
++
++# Check if pending xattr and dirty-xattr are set for replaced-brick
++EXPECT "000000000000000100000001" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0
++EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.dirty $B0/${V0}1_new
++
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
++
++TEST $CLI volume set $V0 self-heal-daemon on
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
++TEST $CLI volume heal $V0
++
++# Wait for heal to complete
++EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
++
++# Check if entry-heal has happened
++TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}1_new | sort)
++
++# To make sure that files were not lost from brick0
++TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}1 | sort)
++EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0
++
++# Test if data was healed
++TEST diff $B0/${V0}0/file1.txt $B0/${V0}1_new/file1.txt
++# To make sure that data was not lost from brick0
++TEST diff $B0/${V0}0/file1.txt $B0/${V0}1/file1.txt
++
++# Test if metadata was healed and exists on both the bricks
++EXPECT "qwerty" get_text_xattr user.test $B0/${V0}1_new/file5.txt
++EXPECT "qwerty" get_text_xattr user.test $B0/${V0}0/file5.txt
++
++cleanup;
+diff --git a/tests/basic/afr/replace-brick-self-heal.t b/tests/basic/afr/replace-brick-self-heal.t
+index 0360db7..da31c87 100644
+--- a/tests/basic/afr/replace-brick-self-heal.t
++++ b/tests/basic/afr/replace-brick-self-heal.t
+@@ -30,7 +30,7 @@ TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1_new commit forc
+ TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}1_new/
+ 
+ # Check if pending xattr and dirty-xattr are set for replaced-brick
+-EXPECT "000000000000000100000001" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0
++EXPECT "000000010000000100000001" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0
+ EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.dirty $B0/${V0}1_new
+ 
+ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+diff --git a/tests/bugs/replicate/bug-1130892.t b/tests/bugs/replicate/bug-1130892.t
+index 0f57d66..e23eb26 100644
+--- a/tests/bugs/replicate/bug-1130892.t
++++ b/tests/bugs/replicate/bug-1130892.t
+@@ -56,7 +56,7 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+ TEST stat $M0/one
+ 
+ # Check pending xattrs
+-EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 data
++EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 data
+ EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 entry
+ EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 metadata
+ 
+diff --git a/tests/bugs/replicate/bug-1493415-gfid-heal.t b/tests/bugs/replicate/bug-1493415-gfid-heal.t
+index 125c35a..9714d5e 100644
+--- a/tests/bugs/replicate/bug-1493415-gfid-heal.t
++++ b/tests/bugs/replicate/bug-1493415-gfid-heal.t
+@@ -49,7 +49,7 @@ TEST setfattr -x trusted.gfid $B0/${V0}1/dir/f2
+ TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f2:0:2}/${gfid_str_f2:2:2}/$gfid_str_f2
+ 
+ #Now simulate setting of pending entry xattr on parent dir of 1st brick.
+-TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/dir
++TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000001 $B0/${V0}0/dir
+ create_brick_xattrop_entry $B0/${V0}0 dir
+ 
+ #Trigger entry-heal via shd
+diff --git a/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t b/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t
+index 0aeaaaf..1fdf7ea 100644
+--- a/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t
++++ b/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t
+@@ -23,19 +23,21 @@ TEST mkdir $M0/dir
+ ##########################################################################################
+ # GFID link file and the GFID is missing on one brick and all the bricks are being blamed.
+ 
+-TEST touch $M0/dir/file
+-#TEST kill_brick $V0 $H0 $B0/$V0"1"
++TEST `echo append>> $M0/dir/file`
+ 
+ #B0 and B2 must blame B1
+-setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
+-setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+-setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
++# Set data part of the xattr also to 1 so that local->need_full_crawl is true.
++# Another way is to create the needed entries inside indices/entry-changes
++# folder.
++setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000001 $B0/$V0"2"/dir
++setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000001 $B0/$V0"0"/dir
++setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000001 $B0/$V0"0"/dir
+ 
+ # Add entry to xattrop dir to trigger index heal.
+ xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+ base_entry_b0=`ls $xattrop_dir0`
+ gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
+-ln -s $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
++ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+ EXPECT "^1$" get_pending_heal_count $V0
+ 
+ # Remove the gfid xattr and the link file on one brick.
+@@ -70,18 +72,20 @@ rm -f $M0/dir/file
+ 
+ TEST $CLI volume heal $V0 disable
+ TEST touch $M0/dir/file
+-#TEST kill_brick $V0 $H0 $B0/$V0"1"
+ 
+ #B0 and B2 must blame B1
+-setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
+-setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+-setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
++# Set data part of the xattr also to 1 so that local->need_full_crawl is true.
++# Another way is to create the needed entries inside indices/entry-changes
++# folder.
++setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000001 $B0/$V0"2"/dir
++setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000001 $B0/$V0"0"/dir
++setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000001 $B0/$V0"0"/dir
+ 
+ # Add entry to xattrop dir to trigger index heal.
+ xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+ base_entry_b0=`ls $xattrop_dir0`
+ gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
+-ln -s $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
++ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+ EXPECT "^1$" get_pending_heal_count $V0
+ 
+ # Remove the gfid xattr and the link file on two bricks.
+diff --git a/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t b/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t
+index 9627908..3da873a 100644
+--- a/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t
++++ b/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t
+@@ -59,8 +59,11 @@ TEST rm $B0/$V0"2"/.glusterfs/${gfid_str_file4:0:2}/${gfid_str_file4:2:2}/$gfid_
+ TEST setfattr -x trusted.gfid $B0/$V0"2"/dir/file4
+ 
+ # B0 and B2 blame each other
+-setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
+-setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
++# Set data part of the xattr also to 1 so that local->need_full_crawl is true.
++# Another way is to create the needed entries inside indices/entry-changes
++# folder.
++setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000001 $B0/$V0"2"/dir
++setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000001 $B0/$V0"0"/dir
+ 
+ # Add entry to xattrop dir on first brick.
+ xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c
+index 1608f75..36fd3a9 100644
+--- a/xlators/cluster/afr/src/afr-self-heal-common.c
++++ b/xlators/cluster/afr/src/afr-self-heal-common.c
+@@ -2549,6 +2549,11 @@ afr_selfheal_do(call_frame_t *frame, xlator_t *this, uuid_t gfid)
+         }
+     }
+ 
++    gf_msg_debug(
++        this->name, 0,
++        "heals needed for %s: [entry-heal=%d, metadata-heal=%d, data-heal=%d]",
++        uuid_utoa(gfid), entry_selfheal, metadata_selfheal, data_selfheal);
++
+     if (data_selfheal && priv->data_self_heal)
+         data_ret = afr_selfheal_data(frame, this, fd);
+ 
+diff --git a/xlators/cluster/afr/src/afr-self-heal-data.c b/xlators/cluster/afr/src/afr-self-heal-data.c
+index cdff4a5..b97c66b 100644
+--- a/xlators/cluster/afr/src/afr-self-heal-data.c
++++ b/xlators/cluster/afr/src/afr-self-heal-data.c
+@@ -239,6 +239,9 @@ afr_selfheal_data_block(call_frame_t *frame, xlator_t *this, fd_t *fd,
+     sink_count = AFR_COUNT(healed_sinks, priv->child_count);
+     data_lock = alloca0(priv->child_count);
+ 
++    gf_msg_debug(this->name, 0, "gfid:%s, offset=%jd, size=%zu",
++                 uuid_utoa(fd->inode->gfid), offset, size);
++
+     ret = afr_selfheal_inodelk(frame, this, fd->inode, this->name, offset, size,
+                                data_lock);
+     {
+diff --git a/xlators/cluster/afr/src/afr-self-heal-entry.c b/xlators/cluster/afr/src/afr-self-heal-entry.c
+index 40be898..00b5b2d 100644
+--- a/xlators/cluster/afr/src/afr-self-heal-entry.c
++++ b/xlators/cluster/afr/src/afr-self-heal-entry.c
+@@ -206,8 +206,11 @@ __afr_selfheal_heal_dirent(call_frame_t *frame, xlator_t *this, fd_t *fd,
+                                             replies);
+         } else {
+             if (!gf_uuid_compare(replies[i].poststat.ia_gfid,
+-                                 replies[source].poststat.ia_gfid))
++                                 replies[source].poststat.ia_gfid)) {
++                gf_msg_debug(this->name, 0, "skipping %s, no heal needed.",
++                             name);
+                 continue;
++            }
+ 
+             ret = afr_selfheal_recreate_entry(frame, i, source, sources,
+                                               fd->inode, name, inode, replies);
+@@ -839,7 +842,7 @@ afr_selfheal_entry_granular_dirent(xlator_t *subvol, gf_dirent_t *entry,
+ 
+ out:
+     loc_wipe(&loc);
+-    return 0;
++    return ret;
+ }
+ 
+ static int
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index a72c494..bd17a82 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -13181,6 +13181,19 @@ glusterd_enable_default_options(glusterd_volinfo_t *volinfo, char *option)
+             goto out;
+         }
+     }
++
++    if ((conf->op_version >= GD_OP_VERSION_7_1) &&
++        (volinfo->status == GLUSTERD_STATUS_NONE)) {
++        ret = dict_set_dynstr_with_alloc(volinfo->dict,
++                                         "cluster.granular-entry-heal", "on");
++        if (ret) {
++            gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
++                   "Failed to set option 'cluster.granular-entry-heal' "
++                   "on volume %s",
++                   volinfo->volname);
++            goto out;
++        }
++    }
+ out:
+     return ret;
+ }
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0489-glusterd-fix-bug-in-enabling-granular-entry-heal.patch b/SOURCES/0489-glusterd-fix-bug-in-enabling-granular-entry-heal.patch
new file mode 100644
index 0000000..dde2156
--- /dev/null
+++ b/SOURCES/0489-glusterd-fix-bug-in-enabling-granular-entry-heal.patch
@@ -0,0 +1,141 @@
+From 2d172144810956225eac3599c943416c4a7e25d0 Mon Sep 17 00:00:00 2001
+From: Ravishankar N <ravishankar@redhat.com>
+Date: Tue, 8 Dec 2020 20:30:23 +0530
+Subject: [PATCH 489/511] glusterd: fix bug in enabling granular-entry-heal
+
+Upstream patch details:
+/------------------------------------------------------------------------------/
+commit f5e1eb87d4af44be3b317b7f99ab88f89c2f0b1a meant to enable  the
+volume option only for replica volumes but inadvertently enabled
+it for all volume types. Fixing it now.
+
+Also found a bug in glusterd where disabling the option on plain
+distribute was succeeding even though setting it in the fist place
+fails. Fixed that too.
+
+>Fixes: #1483
+>Change-Id: Icb6c169a8eec44cc4fb4dd636405d3b3485e91b4
+>Reported-by: Sheetal Pamecha <spamecha@redhat.com>
+>Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+Upstream Patch: https://github.com/gluster/glusterfs/pull/1752
+/------------------------------------------------------------------------------/
+
+BUG: 1890506
+Change-Id: Id63655dac08d2cfda4899d7ee0efe96e72cd6986
+Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220556
+Tested-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/basic/afr/granular-esh/cli.t              | 30 ++++++++++++++++++++-----
+ xlators/mgmt/glusterd/src/glusterd-utils.c      |  3 ++-
+ xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 12 +++++-----
+ 3 files changed, 34 insertions(+), 11 deletions(-)
+
+diff --git a/tests/basic/afr/granular-esh/cli.t b/tests/basic/afr/granular-esh/cli.t
+index 995d93e..5ab2e39 100644
+--- a/tests/basic/afr/granular-esh/cli.t
++++ b/tests/basic/afr/granular-esh/cli.t
+@@ -11,25 +11,38 @@ TESTS_EXPECTED_IN_LOOP=4
+ TEST glusterd
+ TEST pidof glusterd
+ 
+-TEST   $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+-# Test that enabling the option should work on a newly created volume
+-TEST   $CLI volume set $V0 cluster.granular-entry-heal on
+-TEST   $CLI volume set $V0 cluster.granular-entry-heal off
+-
+ #########################
+ ##### DISPERSE TEST #####
+ #########################
+ # Execute the same command on a disperse volume and make sure it fails.
+ TEST $CLI volume create $V1 disperse 3 redundancy 1 $H0:$B0/${V1}{0,1,2}
++EXPECT "no" volume_get_field $V1 cluster.granular-entry-heal
++TEST $CLI volume start $V1
++TEST ! $CLI volume heal $V1 granular-entry-heal enable
++TEST ! $CLI volume heal $V1 granular-entry-heal disable
++
++TEST $CLI volume stop $V1
++TEST $CLI volume delete $V1
++
++#########################
++##### PLAIN DISTRIBUTE TEST #####
++#########################
++# Execute the same command on a distribute volume and make sure it fails.
++TEST $CLI volume create $V1 $H0:$B0/${V1}{0,1,2}
++EXPECT "no" volume_get_field $V1 cluster.granular-entry-heal
+ TEST $CLI volume start $V1
+ TEST ! $CLI volume heal $V1 granular-entry-heal enable
+ TEST ! $CLI volume heal $V1 granular-entry-heal disable
++TEST $CLI volume stop $V1
++TEST $CLI volume delete $V1
+ 
+ #######################
+ ###### TIER TEST ######
+ #######################
+ # Execute the same command on a disperse + replicate tiered volume and make
+ # sure the option is set on the replicate leg of the volume
++TEST $CLI volume create $V1 disperse 3 redundancy 1 $H0:$B0/${V1}{0,1,2}
++TEST $CLI volume start $V1
+ TEST $CLI volume tier $V1 attach replica 2 $H0:$B0/${V1}{3,4}
+ TEST $CLI volume heal $V1 granular-entry-heal enable
+ EXPECT "enable" volume_get_field $V1 cluster.granular-entry-heal
+@@ -52,10 +65,17 @@ TEST kill_brick $V1 $H0 $B0/${V1}3
+ # failed.
+ TEST ! $CLI volume heal $V1 granular-entry-heal enable
+ EXPECT "disable" volume_get_field $V1 cluster.granular-entry-heal
++TEST $CLI volume stop $V1
++TEST $CLI volume delete $V1
+ 
+ ######################
+ ### REPLICATE TEST ###
+ ######################
++TEST   $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
++EXPECT "on" volume_get_field $V0 cluster.granular-entry-heal
++# Test that enabling the option should work on a newly created volume
++TEST   $CLI volume set $V0 cluster.granular-entry-heal on
++TEST   $CLI volume set $V0 cluster.granular-entry-heal off
+ TEST   $CLI volume start $V0
+ TEST   $CLI volume set $V0 cluster.data-self-heal off
+ TEST   $CLI volume set $V0 cluster.metadata-self-heal off
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index bd17a82..ad3750e 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -13183,7 +13183,8 @@ glusterd_enable_default_options(glusterd_volinfo_t *volinfo, char *option)
+     }
+ 
+     if ((conf->op_version >= GD_OP_VERSION_7_1) &&
+-        (volinfo->status == GLUSTERD_STATUS_NONE)) {
++        (volinfo->status == GLUSTERD_STATUS_NONE) &&
++        (volinfo->type == GF_CLUSTER_TYPE_REPLICATE)) {
+         ret = dict_set_dynstr_with_alloc(volinfo->dict,
+                                          "cluster.granular-entry-heal", "on");
+         if (ret) {
+diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+index 134b04c..09e6ead 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
++++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+@@ -621,11 +621,13 @@ glusterd_handle_heal_options_enable_disable(rpcsvc_request_t *req, dict_t *dict,
+         goto out;
+     }
+ 
+-    if (((heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) ||
+-         (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) &&
+-        (volinfo->type == GF_CLUSTER_TYPE_DISPERSE)) {
+-        ret = -1;
+-        goto out;
++    if ((heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) ||
++        (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) {
++        if ((volinfo->type != GF_CLUSTER_TYPE_REPLICATE) &&
++            (volinfo->type != GF_CLUSTER_TYPE_TIER)) {
++            ret = -1;
++            goto out;
++        }
+     }
+ 
+     if ((heal_op == GF_SHD_OP_HEAL_ENABLE) ||
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0490-Segmentation-fault-occurs-during-truncate.patch b/SOURCES/0490-Segmentation-fault-occurs-during-truncate.patch
new file mode 100644
index 0000000..bd3c777
--- /dev/null
+++ b/SOURCES/0490-Segmentation-fault-occurs-during-truncate.patch
@@ -0,0 +1,57 @@
+From 5a110946b41619577b365cdceddc4da551ff49f0 Mon Sep 17 00:00:00 2001
+From: kinsu <vpolakis@gmail.com>
+Date: Thu, 19 Sep 2019 08:34:32 +0000
+Subject: [PATCH 490/511] Segmentation fault occurs during truncate
+
+Problem:
+Segmentation fault occurs when bricks are nearly full 100% and in
+parallel truncate of a file is attempted (No space left on device).
+Prerequicite is that performance xlators are activated
+(read-ahead, write-behind etc)
+while stack unwind of the frames following an error responce
+from brick (No space left on device) frame->local includes a memory
+location that is not allocated via mem_get but via calloc.
+The destroyed frame is always ra_truncate_cbk winded from ra_ftruncate
+and the inode ptr is copied to the frame local in the wb_ftruncate.
+
+Fix:
+extra check is added for the pool ptr
+
+>Change-Id: Ic5d3bd0ab7011e40b2811c6dece063b256e4d9d1
+>Fixes: bz#1797882
+>Signed-off-by: kinsu <vpolakis@gmail.com>
+
+Upstream-patch: https://review.gluster.org/c/glusterfs/+/23445
+
+BUG: 1842449
+Change-Id: Ic5d3bd0ab7011e40b2811c6dece063b256e4d9d1
+Signed-off-by: nik-redhat <nladha@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220540
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ libglusterfs/src/mem-pool.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/libglusterfs/src/mem-pool.c b/libglusterfs/src/mem-pool.c
+index 73503e0..1390747 100644
+--- a/libglusterfs/src/mem-pool.c
++++ b/libglusterfs/src/mem-pool.c
+@@ -857,6 +857,14 @@ mem_put(void *ptr)
+         /* Not one of ours; don't touch it. */
+         return;
+     }
++
++    if (!hdr->pool_list) {
++        gf_msg_callingfn("mem-pool", GF_LOG_CRITICAL, EINVAL,
++                         LG_MSG_INVALID_ARG,
++                         "invalid argument hdr->pool_list NULL");
++        return;
++    }
++
+     pool_list = hdr->pool_list;
+     pt_pool = &pool_list->pools[hdr->power_of_two - POOL_SMALLEST];
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0491-glusterd-mount-directory-getting-truncated-on-mounti.patch b/SOURCES/0491-glusterd-mount-directory-getting-truncated-on-mounti.patch
new file mode 100644
index 0000000..375cfd2
--- /dev/null
+++ b/SOURCES/0491-glusterd-mount-directory-getting-truncated-on-mounti.patch
@@ -0,0 +1,56 @@
+From 0fed8ca9c6c9e3a9041951bc748c7936d0abc8cf Mon Sep 17 00:00:00 2001
+From: nik-redhat <nladha@redhat.com>
+Date: Tue, 15 Sep 2020 16:20:19 +0530
+Subject: [PATCH 491/511] glusterd: mount directory getting truncated on
+ mounting shared_storage
+
+Issue:
+In case of a user created volume the mount point
+is the brick path 'ex: /data/brick' but in case of
+shared_storage the mount point is '/'.So, here
+we increment the array by one so as to get the exact
+path of brick without '/', which works fine for other
+volumes as the pointer of the brick_dir variable is
+at '/', but for shared_storage it is at 'v'(where v is
+starting letter of 'var' directory). So, on incrementing
+the path we get in case of shared_storage starts from
+'ar/lib/glusterd/...'
+
+Fix:
+Only, increment the pointer if the current position is '/',
+else the path will be wrong.
+
+>Fixes: #1480
+
+>Change-Id: Id31bb13f58134ae2099884fbc5984c4e055fb357
+>Signed-off-by: nik-redhat <nladha@redhat.com>
+
+Upstream patch: https://review.gluster.org/c/glusterfs/+/24989
+
+BUG: 1878077
+Change-Id: Id31bb13f58134ae2099884fbc5984c4e055fb357
+Signed-off-by: nik-redhat <nladha@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220536
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mgmt/glusterd/src/glusterd-utils.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index ad3750e..b343eee 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -1221,7 +1221,8 @@ glusterd_get_brick_mount_dir(char *brickpath, char *hostname, char *mount_dir)
+         }
+ 
+         brick_dir = &brickpath[strlen(mnt_pt)];
+-        brick_dir++;
++        if (brick_dir[0] == '/')
++            brick_dir++;
+ 
+         snprintf(mount_dir, VALID_GLUSTERD_PATHMAX, "/%s", brick_dir);
+     }
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0492-afr-lookup-Pass-xattr_req-in-while-doing-a-selfheal-.patch b/SOURCES/0492-afr-lookup-Pass-xattr_req-in-while-doing-a-selfheal-.patch
new file mode 100644
index 0000000..a983baa
--- /dev/null
+++ b/SOURCES/0492-afr-lookup-Pass-xattr_req-in-while-doing-a-selfheal-.patch
@@ -0,0 +1,188 @@
+From bde1ad97f8739f8370a2bbb92229b1b397ecd82c Mon Sep 17 00:00:00 2001
+From: karthik-us <ksubrahm@redhat.com>
+Date: Tue, 8 Dec 2020 19:06:03 +0530
+Subject: [PATCH 492/511] afr/lookup: Pass xattr_req in while doing a selfheal
+ in lookup
+
+We were not passing xattr_req when doing a name self heal
+as well as a meta data heal. Because of this, some xdata
+was missing which causes i/o errors
+
+Upstream patch details:
+> Change-Id: Ibfb1205a7eb0195632dc3820116ffbbb8043545f
+> Fixes: bz#1728770
+> Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
+Upstream Patch : https://review.gluster.org/#/c/glusterfs/+/23024/
+
+BUG: 1726673
+Change-Id: Ibfb1205a7eb0195632dc3820116ffbbb8043545f
+Signed-off-by: karthik-us <ksubrahm@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220538
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/bugs/replicate/bug-1728770-pass-xattrs.t | 52 ++++++++++++++++++++++++++
+ tests/include.rc                               |  1 +
+ xlators/cluster/afr/src/afr-common.c           |  8 +++-
+ xlators/cluster/afr/src/afr-self-heal-common.c |  9 ++++-
+ xlators/cluster/afr/src/afr-self-heal.h        |  2 +-
+ 5 files changed, 67 insertions(+), 5 deletions(-)
+ create mode 100644 tests/bugs/replicate/bug-1728770-pass-xattrs.t
+
+diff --git a/tests/bugs/replicate/bug-1728770-pass-xattrs.t b/tests/bugs/replicate/bug-1728770-pass-xattrs.t
+new file mode 100644
+index 0000000..159c4fc
+--- /dev/null
++++ b/tests/bugs/replicate/bug-1728770-pass-xattrs.t
+@@ -0,0 +1,52 @@
++#!/bin/bash
++
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++. $(dirname $0)/../../snapshot.rc
++
++cleanup;
++
++function fop_on_bad_disk {
++    local path=$1
++    mkdir $path/dir{1..1000} 2>/dev/null
++    mv $path/dir1 $path/newdir
++    touch $path/foo.txt
++    echo $?
++}
++
++function ls_fop_on_bad_disk {
++    local path=$1
++    ls $path
++    echo $?
++}
++
++TEST init_n_bricks 6;
++TEST setup_lvm 6;
++
++TEST glusterd;
++TEST pidof glusterd;
++
++TEST $CLI volume create $V0 replica 3 $H0:$L1 $H0:$L2 $H0:$L3 $H0:$L4 $H0:$L5 $H0:$L6;
++TEST $CLI volume set $V0 health-check-interval 1000;
++
++TEST $CLI volume start $V0;
++
++TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
++#corrupt last disk
++dd if=/dev/urandom of=/dev/mapper/patchy_snap_vg_6-brick_lvm bs=512K count=200 status=progress && sync
++
++
++# Test the disk is now returning EIO for touch and ls
++EXPECT_WITHIN $DISK_FAIL_TIMEOUT "^1$" fop_on_bad_disk "$L6"
++EXPECT_WITHIN $DISK_FAIL_TIMEOUT "^2$" ls_fop_on_bad_disk "$L6"
++
++TEST touch $M0/foo{1..100}
++TEST $CLI volume remove-brick $V0 replica 3 $H0:$L4 $H0:$L5 $H0:$L6 start
++EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:$L4 $H0:$L5 $H0:$L6";
++
++#check that remove-brick status should not have any failed or skipped files
++var=`$CLI volume remove-brick $V0 $H0:$L4 $H0:$L5 $H0:$L6 status | grep completed`
++TEST [ `echo $var | awk '{print $5}'` = "0"  ]
++TEST [ `echo $var | awk '{print $6}'` = "0"  ]
++
++cleanup;
+diff --git a/tests/include.rc b/tests/include.rc
+index 762c5e2..c925941 100644
+--- a/tests/include.rc
++++ b/tests/include.rc
+@@ -89,6 +89,7 @@ GRAPH_SWITCH_TIMEOUT=10
+ UNLINK_TIMEOUT=5
+ MDC_TIMEOUT=5
+ IO_WAIT_TIMEOUT=5
++DISK_FAIL_TIMEOUT=80
+ 
+ LOGDIR=$(gluster --print-logdir)
+ 
+diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
+index 851ccad..fca2cd5 100644
+--- a/xlators/cluster/afr/src/afr-common.c
++++ b/xlators/cluster/afr/src/afr-common.c
+@@ -2609,6 +2609,10 @@ afr_lookup_sh_metadata_wrap(void *opaque)
+     dict = dict_new();
+     if (!dict)
+         goto out;
++    if (local->xattr_req) {
++        dict_copy(local->xattr_req, dict);
++    }
++
+     ret = dict_set_sizen_str_sizen(dict, "link-count", GF_XATTROP_INDEX_COUNT);
+     if (ret) {
+         gf_msg_debug(this->name, -ret, "Unable to set link-count in dict ");
+@@ -2617,7 +2621,7 @@ afr_lookup_sh_metadata_wrap(void *opaque)
+     if (loc_is_nameless(&local->loc)) {
+         ret = afr_selfheal_unlocked_discover_on(frame, local->inode,
+                                                 local->loc.gfid, local->replies,
+-                                                local->child_up);
++                                                local->child_up, dict);
+     } else {
+         inode = afr_selfheal_unlocked_lookup_on(frame, local->loc.parent,
+                                                 local->loc.name, local->replies,
+@@ -2791,7 +2795,7 @@ afr_lookup_selfheal_wrap(void *opaque)
+ 
+     inode = afr_selfheal_unlocked_lookup_on(frame, local->loc.parent,
+                                             local->loc.name, local->replies,
+-                                            local->child_up, NULL);
++                                            local->child_up, local->xattr_req);
+     if (inode)
+         inode_unref(inode);
+ 
+diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c
+index 36fd3a9..9b6575f 100644
+--- a/xlators/cluster/afr/src/afr-self-heal-common.c
++++ b/xlators/cluster/afr/src/afr-self-heal-common.c
+@@ -1861,7 +1861,7 @@ afr_set_multi_dom_lock_count_request(xlator_t *this, dict_t *dict)
+ int
+ afr_selfheal_unlocked_discover_on(call_frame_t *frame, inode_t *inode,
+                                   uuid_t gfid, struct afr_reply *replies,
+-                                  unsigned char *discover_on)
++                                  unsigned char *discover_on, dict_t *dict)
+ {
+     loc_t loc = {
+         0,
+@@ -1876,6 +1876,8 @@ afr_selfheal_unlocked_discover_on(call_frame_t *frame, inode_t *inode,
+     xattr_req = dict_new();
+     if (!xattr_req)
+         return -ENOMEM;
++    if (dict)
++        dict_copy(dict, xattr_req);
+ 
+     if (afr_xattr_req_prepare(frame->this, xattr_req) != 0) {
+         dict_unref(xattr_req);
+@@ -1906,11 +1908,14 @@ afr_selfheal_unlocked_discover(call_frame_t *frame, inode_t *inode, uuid_t gfid,
+                                struct afr_reply *replies)
+ {
+     afr_local_t *local = NULL;
++    dict_t *dict = NULL;
+ 
+     local = frame->local;
++    if (local && local->xattr_req)
++        dict = local->xattr_req;
+ 
+     return afr_selfheal_unlocked_discover_on(frame, inode, gfid, replies,
+-                                             local->child_up);
++                                             local->child_up, dict);
+ }
+ 
+ unsigned int
+diff --git a/xlators/cluster/afr/src/afr-self-heal.h b/xlators/cluster/afr/src/afr-self-heal.h
+index b39af02..8f6fb00 100644
+--- a/xlators/cluster/afr/src/afr-self-heal.h
++++ b/xlators/cluster/afr/src/afr-self-heal.h
+@@ -188,7 +188,7 @@ afr_selfheal_unlocked_discover(call_frame_t *frame, inode_t *inode, uuid_t gfid,
+ int
+ afr_selfheal_unlocked_discover_on(call_frame_t *frame, inode_t *inode,
+                                   uuid_t gfid, struct afr_reply *replies,
+-                                  unsigned char *discover_on);
++                                  unsigned char *discover_on, dict_t *dict);
+ inode_t *
+ afr_selfheal_unlocked_lookup_on(call_frame_t *frame, inode_t *parent,
+                                 const char *name, struct afr_reply *replies,
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0493-geo-rep-Note-section-is-required-for-ignore_deletes.patch b/SOURCES/0493-geo-rep-Note-section-is-required-for-ignore_deletes.patch
new file mode 100644
index 0000000..e712886
--- /dev/null
+++ b/SOURCES/0493-geo-rep-Note-section-is-required-for-ignore_deletes.patch
@@ -0,0 +1,283 @@
+From 03de45e5fb1c8aa5369848ed9e52abd1365e1d21 Mon Sep 17 00:00:00 2001
+From: Shwetha K Acharya <sacharya@redhat.com>
+Date: Wed, 31 Jul 2019 11:34:19 +0530
+Subject: [PATCH 493/511] geo-rep: Note section is required for ignore_deletes
+
+There exists a window of 15 sec, where the deletes are picked up
+by history crawl when the ignore_deletes is set to true.
+And it eventually deletes the file/s from slave which is/are not
+supposed to be deleted. Though it is working as per design, a
+note regarding this is needed.
+
+Added a warning message indicating the same.
+Also logged info when the worker restarts after ignore-deletes
+option set.
+
+>fixes: bz#1708603
+>Change-Id: I103be882fac18b4cef935efa355f5037a396f7c1
+>Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
+Upstream patch: https://review.gluster.org/c/glusterfs/+/22702
+
+BUG: 1224906
+Change-Id: I103be882fac18b4cef935efa355f5037a396f7c1
+Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220757
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ cli/src/cli-cmd-parser.c             | 45 ++++++++++++++++++++------
+ cli/src/cli-cmd-volume.c             | 20 ++++++++----
+ cli/src/cli.h                        |  3 +-
+ geo-replication/syncdaemon/gsyncd.py |  2 +-
+ geo-replication/syncdaemon/master.py |  6 ++++
+ tests/00-geo-rep/bug-1708603.t       | 63 ++++++++++++++++++++++++++++++++++++
+ 6 files changed, 120 insertions(+), 19 deletions(-)
+ create mode 100644 tests/00-geo-rep/bug-1708603.t
+
+diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
+index 5fd05f4..34f17c9 100644
+--- a/cli/src/cli-cmd-parser.c
++++ b/cli/src/cli-cmd-parser.c
+@@ -2901,7 +2901,8 @@ out:
+ }
+ 
+ int32_t
+-cli_cmd_gsync_set_parse(const char **words, int wordcount, dict_t **options)
++cli_cmd_gsync_set_parse(struct cli_state *state, const char **words,
++                        int wordcount, dict_t **options, char **errstr)
+ {
+     int32_t ret = -1;
+     dict_t *dict = NULL;
+@@ -2918,6 +2919,8 @@ cli_cmd_gsync_set_parse(const char **words, int wordcount, dict_t **options)
+     char *save_ptr = NULL;
+     char *slave_temp = NULL;
+     char *token = NULL;
++    gf_answer_t answer = GF_ANSWER_NO;
++    const char *question = NULL;
+ 
+     GF_ASSERT(words);
+     GF_ASSERT(options);
+@@ -2990,8 +2993,10 @@ cli_cmd_gsync_set_parse(const char **words, int wordcount, dict_t **options)
+ 
+     if (masteri && gsyncd_url_check(words[masteri]))
+         goto out;
+-    if (slavei && !glob && !gsyncd_url_check(words[slavei]))
++    if (slavei && !glob && !gsyncd_url_check(words[slavei])) {
++        gf_asprintf(errstr, "Invalid slave url: %s", words[slavei]);
+         goto out;
++    }
+ 
+     w = str_getunamb(words[cmdi], opwords);
+     if (!w)
+@@ -3101,16 +3106,36 @@ cli_cmd_gsync_set_parse(const char **words, int wordcount, dict_t **options)
+     }
+     if (!ret)
+         ret = dict_set_int32(dict, "type", type);
+-    if (!ret && type == GF_GSYNC_OPTION_TYPE_CONFIG)
++    if (!ret && type == GF_GSYNC_OPTION_TYPE_CONFIG) {
++        if (!strcmp((char *)words[wordcount - 2], "ignore-deletes") &&
++            !strcmp((char *)words[wordcount - 1], "true")) {
++            question =
++                "There exists ~15 seconds delay for the option to take"
++                " effect from stime of the corresponding brick. Please"
++                " check the log for the time, the option is effective."
++                " Proceed";
++
++            answer = cli_cmd_get_confirmation(state, question);
++
++            if (GF_ANSWER_NO == answer) {
++                gf_log("cli", GF_LOG_INFO,
++                       "Operation "
++                       "cancelled, exiting");
++                *errstr = gf_strdup("Aborted by user.");
++                ret = -1;
++                goto out;
++            }
++        }
++
+         ret = config_parse(words, wordcount, dict, cmdi, glob);
++    }
+ 
+ out:
+     if (slave_temp)
+         GF_FREE(slave_temp);
+-    if (ret) {
+-        if (dict)
+-            dict_unref(dict);
+-    } else
++    if (ret && dict)
++        dict_unref(dict);
++    else
+         *options = dict;
+ 
+     return ret;
+@@ -5659,9 +5684,9 @@ cli_cmd_bitrot_parse(const char **words, int wordcount, dict_t **options)
+     int32_t ret = -1;
+     char *w = NULL;
+     char *volname = NULL;
+-    char *opwords[] = {
+-        "enable",       "disable", "scrub-throttle", "scrub-frequency", "scrub",
+-        "signing-time", "signer-threads", NULL};
++    char *opwords[] = {"enable",          "disable", "scrub-throttle",
++                       "scrub-frequency", "scrub",   "signing-time",
++                       "signer-threads",  NULL};
+     char *scrub_throt_values[] = {"lazy", "normal", "aggressive", NULL};
+     char *scrub_freq_values[] = {"hourly",  "daily",  "weekly", "biweekly",
+                                  "monthly", "minute", NULL};
+diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c
+index 72504ca..6f5bf8b 100644
+--- a/cli/src/cli-cmd-volume.c
++++ b/cli/src/cli-cmd-volume.c
+@@ -2457,6 +2457,7 @@ cli_cmd_volume_gsync_set_cbk(struct cli_state *state, struct cli_cmd_word *word,
+     rpc_clnt_procedure_t *proc = NULL;
+     call_frame_t *frame = NULL;
+     cli_local_t *local = NULL;
++    char *errstr = NULL;
+ #if (USE_EVENTS)
+     int ret1 = -1;
+     int cmd_type = -1;
+@@ -2468,16 +2469,21 @@ cli_cmd_volume_gsync_set_cbk(struct cli_state *state, struct cli_cmd_word *word,
+ 
+     proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GSYNC_SET];
+ 
+-    frame = create_frame(THIS, THIS->ctx->pool);
+-    if (frame == NULL) {
+-        ret = -1;
++    ret = cli_cmd_gsync_set_parse(state, words, wordcount, &options, &errstr);
++    if (ret) {
++        if (errstr) {
++            cli_err("%s", errstr);
++            GF_FREE(errstr);
++        } else {
++            cli_usage_out(word->pattern);
++        }
++        parse_err = 1;
+         goto out;
+     }
+ 
+-    ret = cli_cmd_gsync_set_parse(words, wordcount, &options);
+-    if (ret) {
+-        cli_usage_out(word->pattern);
+-        parse_err = 1;
++    frame = create_frame(THIS, THIS->ctx->pool);
++    if (frame == NULL) {
++        ret = -1;
+         goto out;
+     }
+ 
+diff --git a/cli/src/cli.h b/cli/src/cli.h
+index c30ae9c..7b4f446 100644
+--- a/cli/src/cli.h
++++ b/cli/src/cli.h
+@@ -269,7 +269,8 @@ int32_t
+ cli_cmd_volume_reset_parse(const char **words, int wordcount, dict_t **opt);
+ 
+ int32_t
+-cli_cmd_gsync_set_parse(const char **words, int wordcount, dict_t **opt);
++cli_cmd_gsync_set_parse(struct cli_state *state, const char **words,
++                        int wordcount, dict_t **opt, char **errstr);
+ 
+ int32_t
+ cli_cmd_quota_parse(const char **words, int wordcount, dict_t **opt);
+diff --git a/geo-replication/syncdaemon/gsyncd.py b/geo-replication/syncdaemon/gsyncd.py
+index 8940384..215c62d 100644
+--- a/geo-replication/syncdaemon/gsyncd.py
++++ b/geo-replication/syncdaemon/gsyncd.py
+@@ -315,7 +315,7 @@ def main():
+ 
+     # Log message for loaded config file
+     if config_file is not None:
+-        logging.info(lf("Using session config file", path=config_file))
++        logging.debug(lf("Using session config file", path=config_file))
+ 
+     set_term_handler()
+     excont = FreeObject(exval=0)
+diff --git a/geo-replication/syncdaemon/master.py b/geo-replication/syncdaemon/master.py
+index 08e98f8..98637e7 100644
+--- a/geo-replication/syncdaemon/master.py
++++ b/geo-replication/syncdaemon/master.py
+@@ -1549,6 +1549,12 @@ class GMasterChangeloghistoryMixin(GMasterChangelogMixin):
+         data_stime = self.get_data_stime()
+ 
+         end_time = int(time.time())
++
++        #as start of historical crawl marks Geo-rep worker restart
++        if gconf.get("ignore-deletes"):
++            logging.info(lf('ignore-deletes config option is set',
++                         stime=data_stime))
++
+         logging.info(lf('starting history crawl',
+                         turns=self.history_turns,
+                         stime=data_stime,
+diff --git a/tests/00-geo-rep/bug-1708603.t b/tests/00-geo-rep/bug-1708603.t
+new file mode 100644
+index 0000000..26913f1
+--- /dev/null
++++ b/tests/00-geo-rep/bug-1708603.t
+@@ -0,0 +1,63 @@
++#!/bin/bash
++
++. $(dirname $0)/../include.rc
++. $(dirname $0)/../volume.rc
++. $(dirname $0)/../geo-rep.rc
++. $(dirname $0)/../env.rc
++
++SCRIPT_TIMEOUT=300
++
++##Cleanup and start glusterd
++cleanup;
++TEST glusterd;
++TEST pidof glusterd
++
++
++##Variables
++GEOREP_CLI="gluster volume geo-replication"
++master=$GMV0
++SH0="127.0.0.1"
++slave=${SH0}::${GSV0}
++num_active=2
++num_passive=2
++master_mnt=$M0
++slave_mnt=$M1
++
++############################################################
++#SETUP VOLUMES AND GEO-REPLICATION
++############################################################
++
++##create_and_start_master_volume
++TEST $CLI volume create $GMV0 replica 2 $H0:$B0/${GMV0}{1,2,3,4};
++TEST $CLI volume start $GMV0
++
++##create_and_start_slave_volume
++TEST $CLI volume create $GSV0 replica 2 $H0:$B0/${GSV0}{1,2,3,4};
++TEST $CLI volume start $GSV0
++
++##Mount master
++TEST glusterfs -s $H0 --volfile-id $GMV0 $M0
++
++##Mount slave
++TEST glusterfs -s $H0 --volfile-id $GSV0 $M1
++
++#Create geo-rep session
++TEST create_georep_session $master $slave
++
++echo n | $GEOREP_CLI $master $slave config ignore-deletes true >/dev/null 2>&1
++EXPECT "false" echo $($GEOREP_CLI $master $slave config ignore-deletes)
++echo y | $GEOREP_CLI $master $slave config ignore-deletes true
++EXPECT "true" echo $($GEOREP_CLI $master $slave config ignore-deletes)
++
++#Stop Geo-rep
++TEST $GEOREP_CLI $master $slave stop
++
++#Delete Geo-rep
++TEST $GEOREP_CLI $master $slave delete
++
++#Cleanup authorized keys
++sed -i '/^command=.*SSH_ORIGINAL_COMMAND#.*/d' ~/.ssh/authorized_keys
++sed -i '/^command=.*gsyncd.*/d' ~/.ssh/authorized_keys
++
++cleanup;
++#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0494-glusterd-start-the-brick-on-a-different-port.patch b/SOURCES/0494-glusterd-start-the-brick-on-a-different-port.patch
new file mode 100644
index 0000000..d11b138
--- /dev/null
+++ b/SOURCES/0494-glusterd-start-the-brick-on-a-different-port.patch
@@ -0,0 +1,54 @@
+From 1b24bc4319203128a9ff7f97fe14f4b3622c4eec Mon Sep 17 00:00:00 2001
+From: Sanju Rakonde <srakonde@redhat.com>
+Date: Wed, 26 Aug 2020 20:05:35 +0530
+Subject: [PATCH 494/511] glusterd: start the brick on a different port
+
+Problem: brick fails to start when the port provided by
+glusterd is in use by any other process
+
+Solution: glusterd should check errno set by runner_run()
+and if it is set to EADDRINUSE, it should allocate a new
+port to the brick and try to start it again.
+
+Previously ret value is checked instead of errno, so the
+retry part never executed. Now, we initialize errno to 0
+before calling runner framework. and afterwards store the
+errno into ret to avoid modification of errno in subsequent
+function calls.
+
+>fixes: #1101
+
+>Change-Id: I1aa048a77c5f8b035dece36976d60602d9753b1a
+>Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
+>Signed-off-by: nik-redhat <nladha@redhat.com>
+
+Upstream patch: https://review.gluster.org/c/glusterfs/+/24923/
+
+BUG: 1865796
+Change-Id: I1aa048a77c5f8b035dece36976d60602d9753b1a
+Signed-off-by: nik-redhat <nladha@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220541
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mgmt/glusterd/src/glusterd-utils.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index b343eee..f7030fb 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -2289,7 +2289,10 @@ retry:
+ 
+     if (wait) {
+         synclock_unlock(&priv->big_lock);
++        errno = 0;
+         ret = runner_run(&runner);
++        if (errno != 0)
++            ret = errno;
+         synclock_lock(&priv->big_lock);
+ 
+         if (ret == EADDRINUSE) {
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0495-geo-rep-descriptive-message-when-worker-crashes-due-.patch b/SOURCES/0495-geo-rep-descriptive-message-when-worker-crashes-due-.patch
new file mode 100644
index 0000000..6b3f6f5
--- /dev/null
+++ b/SOURCES/0495-geo-rep-descriptive-message-when-worker-crashes-due-.patch
@@ -0,0 +1,60 @@
+From 17a2a880290d2038c913c23985df620e3c9741b3 Mon Sep 17 00:00:00 2001
+From: Sunny Kumar <sunkumar@redhat.com>
+Date: Mon, 16 Mar 2020 15:17:23 +0000
+Subject: [PATCH 495/511] geo-rep: descriptive message when worker crashes due
+ to EIO
+
+With this patch now you can notice log if it is due to EIO:
+
+[2020-03-16 16:24:48.293837] E [syncdutils(worker /bricks/brick1/mbr3):348:log_raise_exception] <top>: Getting "Input/Output error" is most likely due to a. Brick is down or b. Split brain issue.
+[2020-03-16 16:24:48.293915] E [syncdutils(worker /bricks/brick1/mbr3):352:log_raise_exception] <top>: This is expected as per design to keep the consistency of the file system. Once the above issue is resolved geo-rep would automatically proceed further.
+
+>Change-Id: Ie33f2440bc96089731ce12afa8dab91d9550a7ca
+>Fixes: #1104
+>Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
+>Upstream Patch : https://review.gluster.org/c/glusterfs/+/24228/
+
+BUG: 1412494
+Change-Id: Ie33f2440bc96089731ce12afa8dab91d9550a7ca
+Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220874
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ geo-replication/syncdaemon/syncdutils.py | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/geo-replication/syncdaemon/syncdutils.py b/geo-replication/syncdaemon/syncdutils.py
+index f43e13b..d5a94d4 100644
+--- a/geo-replication/syncdaemon/syncdutils.py
++++ b/geo-replication/syncdaemon/syncdutils.py
+@@ -22,7 +22,7 @@ import socket
+ from subprocess import PIPE
+ from threading import Lock, Thread as baseThread
+ from errno import EACCES, EAGAIN, EPIPE, ENOTCONN, ENOMEM, ECONNABORTED
+-from errno import EINTR, ENOENT, ESTALE, EBUSY, ENODATA, errorcode
++from errno import EINTR, ENOENT, ESTALE, EBUSY, ENODATA, errorcode, EIO
+ from signal import signal, SIGTERM
+ import select as oselect
+ from os import waitpid as owaitpid
+@@ -346,6 +346,17 @@ def log_raise_exception(excont):
+                                                         ECONNABORTED):
+             logging.error(lf('Gluster Mount process exited',
+                              error=errorcode[exc.errno]))
++        elif isinstance(exc, OSError) and exc.errno == EIO:
++            logging.error("Getting \"Input/Output error\" "
++                          "is most likely due to "
++                          "a. Brick is down or "
++                          "b. Split brain issue.")
++            logging.error("This is expected as per design to "
++                          "keep the consistency of the file system. "
++                          "Once the above issue is resolved "
++                          "geo-replication would automatically "
++                          "proceed further.")
++            logtag = "FAIL"
+         else:
+             logtag = "FAIL"
+         if not logtag and logging.getLogger().isEnabledFor(logging.DEBUG):
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0496-posix-Use-MALLOC-instead-of-alloca-to-allocate-memor.patch b/SOURCES/0496-posix-Use-MALLOC-instead-of-alloca-to-allocate-memor.patch
new file mode 100644
index 0000000..590aea3
--- /dev/null
+++ b/SOURCES/0496-posix-Use-MALLOC-instead-of-alloca-to-allocate-memor.patch
@@ -0,0 +1,139 @@
+From 5893e64ca8c147b7acfa12cd9824f254d53ee261 Mon Sep 17 00:00:00 2001
+From: mohit84 <moagrawa@redhat.com>
+Date: Wed, 4 Nov 2020 09:02:03 +0530
+Subject: [PATCH 496/511] posix: Use MALLOC instead of alloca to allocate
+ memory for xattrs list (#1730)
+
+In case of file is having huge xattrs on backend a brick process is
+crashed while alloca(size) limit has been crossed 256k because iot_worker
+stack size is 256k.
+
+> Fixes: #1699
+> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+> Change-Id: I100468234f83329a7d65b43cbe4e10450c1ccecd
+> (Cherry pick from commit fd666caa35ac84dd1cba55399761982011b77112)
+> (Reviewed on upstream link https://github.com/gluster/glusterfs/pull/1828)
+
+Change-Id: I100468234f83329a7d65b43cbe4e10450c1ccecd
+Bug: 1903468
+Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220872
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/storage/posix/src/posix-gfid-path.c    |  5 ++++-
+ xlators/storage/posix/src/posix-helpers.c      |  3 ++-
+ xlators/storage/posix/src/posix-inode-fd-ops.c | 12 +++++++++---
+ 3 files changed, 15 insertions(+), 5 deletions(-)
+
+diff --git a/xlators/storage/posix/src/posix-gfid-path.c b/xlators/storage/posix/src/posix-gfid-path.c
+index 64b5c6c..01315ac 100644
+--- a/xlators/storage/posix/src/posix-gfid-path.c
++++ b/xlators/storage/posix/src/posix-gfid-path.c
+@@ -195,7 +195,8 @@ posix_get_gfid2path(xlator_t *this, inode_t *inode, const char *real_path,
+             if (size == 0)
+                 goto done;
+         }
+-        list = alloca(size);
++
++        list = GF_MALLOC(size, gf_posix_mt_char);
+         if (!list) {
+             *op_errno = errno;
+             goto err;
+@@ -309,6 +310,7 @@ done:
+             GF_FREE(paths[j]);
+     }
+     ret = 0;
++    GF_FREE(list);
+     return ret;
+ err:
+     if (path)
+@@ -317,5 +319,6 @@ err:
+         if (paths[j])
+             GF_FREE(paths[j]);
+     }
++    GF_FREE(list);
+     return ret;
+ }
+diff --git a/xlators/storage/posix/src/posix-helpers.c b/xlators/storage/posix/src/posix-helpers.c
+index 73a44be..ceac52a 100644
+--- a/xlators/storage/posix/src/posix-helpers.c
++++ b/xlators/storage/posix/src/posix-helpers.c
+@@ -349,7 +349,7 @@ _posix_get_marker_all_contributions(posix_xattr_filler_t *filler)
+         goto out;
+     }
+ 
+-    list = alloca(size);
++    list = GF_MALLOC(size, gf_posix_mt_char);
+     if (!list) {
+         goto out;
+     }
+@@ -379,6 +379,7 @@ _posix_get_marker_all_contributions(posix_xattr_filler_t *filler)
+     ret = 0;
+ 
+ out:
++    GF_FREE(list);
+     return ret;
+ }
+ 
+diff --git a/xlators/storage/posix/src/posix-inode-fd-ops.c b/xlators/storage/posix/src/posix-inode-fd-ops.c
+index 21119ea..1d37aed 100644
+--- a/xlators/storage/posix/src/posix-inode-fd-ops.c
++++ b/xlators/storage/posix/src/posix-inode-fd-ops.c
+@@ -3305,7 +3305,7 @@ posix_get_ancestry_non_directory(xlator_t *this, inode_t *leaf_inode,
+         goto out;
+     }
+ 
+-    list = alloca(size);
++    list = GF_MALLOC(size, gf_posix_mt_char);
+     if (!list) {
+         *op_errno = errno;
+         goto out;
+@@ -3385,6 +3385,7 @@ posix_get_ancestry_non_directory(xlator_t *this, inode_t *leaf_inode,
+     op_ret = 0;
+ 
+ out:
++    GF_FREE(list);
+     return op_ret;
+ }
+ 
+@@ -3810,7 +3811,8 @@ posix_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+         if (size == 0)
+             goto done;
+     }
+-    list = alloca(size);
++
++    list = GF_MALLOC(size, gf_posix_mt_char);
+     if (!list) {
+         op_errno = errno;
+         goto out;
+@@ -3937,6 +3939,7 @@ out:
+         dict_unref(dict);
+     }
+ 
++    GF_FREE(list);
+     return 0;
+ }
+ 
+@@ -4136,7 +4139,8 @@ posix_fgetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *name,
+         if (size == 0)
+             goto done;
+     }
+-    list = alloca(size + 1);
++
++    list = GF_MALLOC(size, gf_posix_mt_char);
+     if (!list) {
+         op_ret = -1;
+         op_errno = ENOMEM;
+@@ -4240,6 +4244,8 @@ out:
+     if (dict)
+         dict_unref(dict);
+ 
++    GF_FREE(list);
++
+     return 0;
+ }
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0497-socket-Use-AES128-cipher-in-SSL-if-AES-is-supported-.patch b/SOURCES/0497-socket-Use-AES128-cipher-in-SSL-if-AES-is-supported-.patch
new file mode 100644
index 0000000..9d477ae
--- /dev/null
+++ b/SOURCES/0497-socket-Use-AES128-cipher-in-SSL-if-AES-is-supported-.patch
@@ -0,0 +1,80 @@
+From 85a5cce40dba0393e636c0eb5af9d8f8746f2315 Mon Sep 17 00:00:00 2001
+From: Mohit Agrawal <moagrawal@redhat.com>
+Date: Thu, 2 Jan 2020 10:23:52 +0530
+Subject: [PATCH 497/511] socket: Use AES128 cipher in SSL if AES is supported
+ by CPU
+
+SSL performance is improved after configuring AES128 cipher
+so use AES128 cipher as a default cipher on the CPU those
+enabled AES bits otherwise ssl use AES256 cipher
+
+> Change-Id: I91c50fe987cbb22ed76f8012094730c592c63506
+> Fixes: #1050
+> Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
+> (Cherry pick from commit 177cc09d24515596eb51739ce0a276c26e3c52f1)
+> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/23952/)
+
+Change-Id: I91c50fe987cbb22ed76f8012094730c592c63506
+Bug: 1612973
+Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220870
+Tested-by: Mohit Agrawal <moagrawa@redhat.com>
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ rpc/rpc-transport/socket/src/socket.c | 32 ++++++++++++++++++++++++++++++++
+ 1 file changed, 32 insertions(+)
+
+diff --git a/rpc/rpc-transport/socket/src/socket.c b/rpc/rpc-transport/socket/src/socket.c
+index 54cd5df..1ee7320 100644
+--- a/rpc/rpc-transport/socket/src/socket.c
++++ b/rpc/rpc-transport/socket/src/socket.c
+@@ -4238,6 +4238,34 @@ static void __attribute__((destructor)) fini_openssl_mt(void)
+     ERR_free_strings();
+ }
+ 
++/* The function returns 0 if AES bit is enabled on the CPU */
++static int
++ssl_check_aes_bit(void)
++{
++    FILE *fp = fopen("/proc/cpuinfo", "r");
++    int ret = 1;
++    size_t len = 0;
++    char *line = NULL;
++    char *match = NULL;
++
++    GF_ASSERT(fp != NULL);
++
++    while (getline(&line, &len, fp) > 0) {
++        if (!strncmp(line, "flags", 5)) {
++            match = strstr(line, " aes");
++            if ((match != NULL) && ((match[4] == ' ') || (match[4] == 0))) {
++                ret = 0;
++                break;
++            }
++        }
++    }
++
++    free(line);
++    fclose(fp);
++
++    return ret;
++}
++
+ static int
+ ssl_setup_connection_params(rpc_transport_t *this)
+ {
+@@ -4261,6 +4289,10 @@ ssl_setup_connection_params(rpc_transport_t *this)
+         return 0;
+     }
+ 
++    if (!ssl_check_aes_bit()) {
++        cipher_list = "AES128:" DEFAULT_CIPHER_LIST;
++    }
++
+     priv->ssl_own_cert = DEFAULT_CERT_PATH;
+     if (dict_get_str(this->options, SSL_OWN_CERT_OPT, &optstr) == 0) {
+         if (!priv->ssl_enabled) {
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0498-geo-rep-Fix-corner-case-in-rename-on-mkdir-during-hy.patch b/SOURCES/0498-geo-rep-Fix-corner-case-in-rename-on-mkdir-during-hy.patch
new file mode 100644
index 0000000..078c390
--- /dev/null
+++ b/SOURCES/0498-geo-rep-Fix-corner-case-in-rename-on-mkdir-during-hy.patch
@@ -0,0 +1,69 @@
+From 11d648660b8bd246756f87b2f40c72fbabf084d1 Mon Sep 17 00:00:00 2001
+From: Sunny Kumar <sunkumar@redhat.com>
+Date: Tue, 19 May 2020 16:13:01 +0100
+Subject: [PATCH 498/511] geo-rep: Fix corner case in rename on mkdir during
+ hybrid crawl
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Problem:
+The issue is being hit during hybrid mode while handling rename on slave.
+In this special case the rename is recorded as mkdir and geo-rep process it
+by resolving the path form backend.
+
+While resolving the backend path during this special handling one corner case is not considered.
+
+<snip>
+Traceback (most recent call last):
+  File "/usr/libexec/glusterfs/python/syncdaemon/repce.py", line 118, in worker
+    res = getattr(self.obj, rmeth)(*in_data[2:])
+  File "/usr/libexec/glusterfs/python/syncdaemon/resource.py", line 588, in entry_ops
+    src_entry = get_slv_dir_path(slv_host, slv_volume, gfid)
+  File "/usr/libexec/glusterfs/python/syncdaemon/syncdutils.py", line 710, in get_slv_dir_path
+    dir_entry = os.path.join(pfx, pargfid, basename)
+  File "/usr/lib64/python2.7/posixpath.py", line 75, in join
+    if b.startswith('/'):
+AttributeError: 'int' object has no attribute 'startswith'
+
+In pyhthon3:
+Traceback (most recent call last):
+  File "<stdin>", line 1, in <module>
+  File "/usr/lib64/python3.8/posixpath.py", line 90, in join
+    genericpath._check_arg_types('join', a, *p)
+  File "/usr/lib64/python3.8/genericpath.py", line 152, in _check_arg_types
+    raise TypeError(f'{funcname}() argument must be str, bytes, or '
+TypeError: join() argument must be str, bytes, or os.PathLike object, not 'int'
+</snip>
+
+>Change-Id: I8b926899c60ad8c4ffc886d57028ba70fd21e332
+>Fixes: #1250
+>Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
+Upstream Patch: https://review.gluster.org/c/glusterfs/+/24468/
+
+BUG: 1835229
+Change-Id: I8b926899c60ad8c4ffc886d57028ba70fd21e332
+Signed-off-by: nik-redhat <nladha@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220867
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ geo-replication/syncdaemon/syncdutils.py | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/geo-replication/syncdaemon/syncdutils.py b/geo-replication/syncdaemon/syncdutils.py
+index d5a94d4..26c79d0 100644
+--- a/geo-replication/syncdaemon/syncdutils.py
++++ b/geo-replication/syncdaemon/syncdutils.py
+@@ -732,6 +732,8 @@ def get_slv_dir_path(slv_host, slv_volume, gfid):
+                     else:
+                         dirpath = dirpath.strip("/")
+                         pargfid = get_gfid_from_mnt(dirpath)
++                        if isinstance(pargfid, int):
++                            return None
+                     dir_entry = os.path.join(pfx, pargfid, basename)
+                     return dir_entry
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0499-gfapi-give-appropriate-error-when-size-exceeds.patch b/SOURCES/0499-gfapi-give-appropriate-error-when-size-exceeds.patch
new file mode 100644
index 0000000..edeca1a
--- /dev/null
+++ b/SOURCES/0499-gfapi-give-appropriate-error-when-size-exceeds.patch
@@ -0,0 +1,63 @@
+From f78a5d86c55149d80b6efdf60eae7221c238654e Mon Sep 17 00:00:00 2001
+From: Rinku Kothiya <rkothiya@redhat.com>
+Date: Thu, 24 Sep 2020 12:43:51 +0000
+Subject: [PATCH 499/511] gfapi: give appropriate error when size exceeds
+
+This patch help generate appropriate error message
+when the gfapi tries to write data equal to or
+greater than 1 Gb due to the limitation at the
+socket layer.
+
+Upstream:
+> Reviewed-on: https://github.com/gluster/glusterfs/pull/1557
+> fixes: #1518
+> Change-Id: I1234a0b5a6e675a0b20c6b1afe0f4390fd721f6f
+> Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
+
+BUG: 1691320
+Change-Id: I1234a0b5a6e675a0b20c6b1afe0f4390fd721f6f
+Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
+Signed-off-by: Sunil Kumar Acharya <sheggodu@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/219998
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+---
+ api/src/gfapi-messages.h | 4 +++-
+ api/src/glfs-fops.c      | 8 ++++++++
+ 2 files changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/api/src/gfapi-messages.h b/api/src/gfapi-messages.h
+index 68d1242..2ffd5ac 100644
+--- a/api/src/gfapi-messages.h
++++ b/api/src/gfapi-messages.h
+@@ -49,6 +49,8 @@ GLFS_MSGID(API, API_MSG_MEM_ACCT_INIT_FAILED, API_MSG_MASTER_XLATOR_INIT_FAILED,
+            API_MSG_INODE_LINK_FAILED, API_MSG_STATEDUMP_FAILED,
+            API_MSG_XREADDIRP_R_FAILED, API_MSG_LOCK_INSERT_MERGE_FAILED,
+            API_MSG_SETTING_LOCK_TYPE_FAILED, API_MSG_INODE_FIND_FAILED,
+-           API_MSG_FDCTX_SET_FAILED, API_MSG_UPCALL_SYNCOP_FAILED);
++           API_MSG_FDCTX_SET_FAILED, API_MSG_UPCALL_SYNCOP_FAILED,
++           API_MSG_INVALID_ARG);
+ 
++#define API_MSG_INVALID_ARG_STR "Invalid"
+ #endif /* !_GFAPI_MESSAGES_H__ */
+diff --git a/api/src/glfs-fops.c b/api/src/glfs-fops.c
+index e6adea5..051541f 100644
+--- a/api/src/glfs-fops.c
++++ b/api/src/glfs-fops.c
+@@ -1525,6 +1525,14 @@ glfs_pwritev_common(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
+ 
+     GF_REF_GET(glfd);
+ 
++    if (iovec->iov_len >= GF_UNIT_GB) {
++        ret = -1;
++        errno = EINVAL;
++        gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ARG,
++                "size >= %llu is not allowed", GF_UNIT_GB, NULL);
++        goto out;
++    }
++
+     subvol = glfs_active_subvol(glfd->fs);
+     if (!subvol) {
+         ret = -1;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0500-features-shard-Convert-shard-block-indices-to-uint64.patch b/SOURCES/0500-features-shard-Convert-shard-block-indices-to-uint64.patch
new file mode 100644
index 0000000..4898422
--- /dev/null
+++ b/SOURCES/0500-features-shard-Convert-shard-block-indices-to-uint64.patch
@@ -0,0 +1,104 @@
+From 60789c658ea22063c26168cb4ce15ac5fd279e58 Mon Sep 17 00:00:00 2001
+From: Ravishankar N <ravishankar@redhat.com>
+Date: Mon, 14 Dec 2020 10:57:03 +0530
+Subject: [PATCH 500/511] features/shard: Convert shard block indices to uint64
+
+This patch fixes a crash in FOPs that operate on really large sharded
+files where number of participant shards could sometimes exceed
+signed int32 max.
+
+The patch also adds GF_ASSERTs to ensure that number of participating
+shards is always greater than 0 for files that do have more than one
+shard.
+
+Upstream:
+> https://review.gluster.org/#/c/glusterfs/+/23407/
+> Change-Id: I354de58796f350eb1aa42fcdf8092ca2e69ccbb6
+> Fixes: #1348
+> Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
+
+BUG: 1752739
+Change-Id: I354de58796f350eb1aa42fcdf8092ca2e69ccbb6
+Signed-off-by: Vinayakswami Hariharmath <vharihar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/221061
+Tested-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
+---
+ xlators/features/shard/src/shard.c | 14 ++++++++------
+ xlators/features/shard/src/shard.h |  6 +++---
+ 2 files changed, 11 insertions(+), 9 deletions(-)
+
+diff --git a/xlators/features/shard/src/shard.c b/xlators/features/shard/src/shard.c
+index 16d557b..a967f35 100644
+--- a/xlators/features/shard/src/shard.c
++++ b/xlators/features/shard/src/shard.c
+@@ -1855,10 +1855,9 @@ int shard_truncate_last_shard(call_frame_t *frame, xlator_t *this,
+    */
+   if (!inode) {
+     gf_msg_debug(this->name, 0,
+-                 "Last shard to be truncated absent"
+-                 " in backend: %s. Directly proceeding to update "
+-                 "file size",
+-                 uuid_utoa(inode->gfid));
++                 "Last shard to be truncated absent in backend: " PRIu64
++                 " of gfid: %s. Directly proceeding to update file size",
++                 local->first_block, uuid_utoa(local->loc.inode->gfid));
+     shard_update_file_size(frame, this, NULL, &local->loc,
+                            shard_post_update_size_truncate_handler);
+     return 0;
+@@ -2389,6 +2388,7 @@ int shard_truncate_begin(call_frame_t *frame, xlator_t *this) {
+       get_highest_block(0, local->prebuf.ia_size, local->block_size);
+ 
+   local->num_blocks = local->last_block - local->first_block + 1;
++  GF_ASSERT(local->num_blocks > 0);
+   local->resolver_base_inode =
+       (local->fop == GF_FOP_TRUNCATE) ? local->loc.inode : local->fd->inode;
+ 
+@@ -4809,6 +4809,7 @@ int shard_post_lookup_readv_handler(call_frame_t *frame, xlator_t *this) {
+       get_highest_block(local->offset, local->total_size, local->block_size);
+ 
+   local->num_blocks = local->last_block - local->first_block + 1;
++  GF_ASSERT(local->num_blocks > 0);
+   local->resolver_base_inode = local->loc.inode;
+ 
+   local->inode_list =
+@@ -5266,6 +5267,7 @@ int shard_common_inode_write_post_lookup_handler(call_frame_t *frame,
+   local->last_block =
+       get_highest_block(local->offset, local->total_size, local->block_size);
+   local->num_blocks = local->last_block - local->first_block + 1;
++  GF_ASSERT(local->num_blocks > 0);
+   local->inode_list =
+       GF_CALLOC(local->num_blocks, sizeof(inode_t *), gf_shard_mt_inode_list);
+   if (!local->inode_list) {
+@@ -5274,8 +5276,8 @@ int shard_common_inode_write_post_lookup_handler(call_frame_t *frame,
+   }
+ 
+   gf_msg_trace(
+-      this->name, 0, "%s: gfid=%s first_block=%" PRIu32 " "
+-                     "last_block=%" PRIu32 " num_blocks=%" PRIu32
++      this->name, 0, "%s: gfid=%s first_block=%" PRIu64 " "
++                     "last_block=%" PRIu64 " num_blocks=%" PRIu64
+                      " offset=%" PRId64 " total_size=%zu flags=%" PRId32 "",
+       gf_fop_list[local->fop], uuid_utoa(local->resolver_base_inode->gfid),
+       local->first_block, local->last_block, local->num_blocks, local->offset,
+diff --git a/xlators/features/shard/src/shard.h b/xlators/features/shard/src/shard.h
+index 1721417..4fe181b 100644
+--- a/xlators/features/shard/src/shard.h
++++ b/xlators/features/shard/src/shard.h
+@@ -254,9 +254,9 @@ typedef int32_t (*shard_post_update_size_fop_handler_t)(call_frame_t *frame,
+ typedef struct shard_local {
+     int op_ret;
+     int op_errno;
+-    int first_block;
+-    int last_block;
+-    int num_blocks;
++    uint64_t first_block;
++    uint64_t last_block;
++    uint64_t num_blocks;
+     int call_count;
+     int eexist_count;
+     int create_count;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0501-Cli-Removing-old-syntax-of-tier-cmds-from-help-menu.patch b/SOURCES/0501-Cli-Removing-old-syntax-of-tier-cmds-from-help-menu.patch
new file mode 100644
index 0000000..5152df8
--- /dev/null
+++ b/SOURCES/0501-Cli-Removing-old-syntax-of-tier-cmds-from-help-menu.patch
@@ -0,0 +1,48 @@
+From 070698ede9c3765c95364e8207c8311dbf895499 Mon Sep 17 00:00:00 2001
+From: kiyer <kiyer@redhat.com>
+Date: Tue, 8 Dec 2020 15:18:49 +0530
+Subject: [PATCH 501/511] Cli: Removing old syntax of tier cmds from help menu
+
+Remove old syntax of attach-tier and detach-tier
+commands from help menu.
+
+Label: DOWNSTREAM ONLY
+BUG: 1813866
+
+Change-Id: If86e4828b475fb593a5105ca8deac96374f9542d
+Signed-off-by: kiyer <kiyer@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220510
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ cli/src/cli-cmd-volume.c | 13 -------------
+ 1 file changed, 13 deletions(-)
+
+diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c
+index 6f5bf8b..b6bef80 100644
+--- a/cli/src/cli-cmd-volume.c
++++ b/cli/src/cli-cmd-volume.c
+@@ -3331,19 +3331,6 @@ struct cli_cmd tier_cmds[] = {
+     {"volume tier <VOLNAME> detach <start|stop|status|commit|[force]>",
+      cli_cmd_volume_tier_cbk, "Detach the hot tier from <VOLNAME>"},
+ 
+-    {"volume attach-tier <VOLNAME> [<replica COUNT>] <NEW-BRICK>...",
+-     cli_cmd_volume_tier_cbk,
+-     "NOTE: this is old syntax, will be deprecated in next release. "
+-     "Please use gluster volume tier <vol> attach "
+-     "[<replica COUNT>] <NEW-BRICK>..."},
+-
+-    {"volume detach-tier <VOLNAME> "
+-     "<start|stop|status|commit|force>",
+-     cli_cmd_volume_tier_cbk,
+-     "NOTE: this is old syntax, will be deprecated in next release. "
+-     "Please use gluster volume tier <vol> detach "
+-     "{start|stop|commit} [force]"},
+-
+     {"volume tier <VOLNAME> status\n"
+      "volume tier <VOLNAME> start [force]\n"
+      "volume tier <VOLNAME> stop\n"
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0502-dht-fixing-a-permission-update-issue.patch b/SOURCES/0502-dht-fixing-a-permission-update-issue.patch
new file mode 100644
index 0000000..7c136d0
--- /dev/null
+++ b/SOURCES/0502-dht-fixing-a-permission-update-issue.patch
@@ -0,0 +1,225 @@
+From 3f1eee125a35c33ecb078e5d3bfd80d80e63881d Mon Sep 17 00:00:00 2001
+From: Barak Sason Rofman <bsasonro@redhat.com>
+Date: Wed, 15 Jan 2020 12:02:05 +0200
+Subject: [PATCH 502/511] dht - fixing a permission update issue
+
+When bringing back a downed brick and performing lookup from the client
+side, the permission on said brick aren't updated on the first lookup,
+but only on the second.
+
+This patch modifies permission update logic so the first lookup will
+trigger a permission update on the downed brick.
+
+LIMITATIONS OF THE PATCH:
+As the choice of source depends on whether the directory has layout or not.
+Even the directories on the newly added brick will have layout xattr[zeroed], but the same is not true for a root directory.
+Hence, in case in the entire cluster only the newly added bricks are up [and others are down], then any change in permission during this time will be overwritten by the older permissions when the cluster is restarted.
+
+Upstream:
+> Reviewed-on: https://review.gluster.org/#/c/glusterfs/+/24020/
+> fixes: #999
+> Change-Id: Ieb70246d41e59f9cae9f70bc203627a433dfbd33
+> Signed-off-by: Barak Sason Rofman <bsasonro@redhat.com>
+
+BUG: 1663821
+Change-Id: Ieb70246d41e59f9cae9f70bc203627a433dfbd33
+Signed-off-by: Barak Sason Rofman <bsasonro@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/221116
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/bugs/bug-1064147.t                 | 71 ++++++++++++++++++++++++++++++++
+ xlators/cluster/dht/src/dht-common.c     | 28 ++++++++++---
+ xlators/cluster/dht/src/dht-selfheal.c   | 15 +++++--
+ xlators/storage/posix/src/posix-common.c | 16 +++----
+ 4 files changed, 111 insertions(+), 19 deletions(-)
+ create mode 100755 tests/bugs/bug-1064147.t
+
+diff --git a/tests/bugs/bug-1064147.t b/tests/bugs/bug-1064147.t
+new file mode 100755
+index 0000000..617a1aa
+--- /dev/null
++++ b/tests/bugs/bug-1064147.t
+@@ -0,0 +1,71 @@
++#!/bin/bash
++
++. $(dirname $0)/../include.rc
++. $(dirname $0)/../volume.rc
++
++# Initialize
++#------------------------------------------------------------
++cleanup;
++
++# Start glusterd
++TEST glusterd;
++TEST pidof glusterd;
++TEST $CLI volume info;
++
++# Create a volume
++TEST $CLI volume create $V0 $H0:/${V0}{1,2};
++
++# Verify volume creation
++ EXPECT "$V0" volinfo_field $V0 'Volume Name';
++ EXPECT 'Created' volinfo_field $V0 'Status';
++
++# Start volume and verify successful start
++ TEST $CLI volume start $V0;
++ EXPECT 'Started' volinfo_field $V0 'Status';
++ TEST glusterfs -s $H0 --volfile-id=$V0 $M0
++#------------------------------------------------------------
++
++# Test case 1 - Subvolume down + Healing
++#------------------------------------------------------------
++# Kill 2nd brick process
++TEST kill -9  `ps aux | grep glusterfsd | grep ${V0}2 | grep -v grep | awk '{print $2}'`;
++
++# Change root permissions
++TEST chmod 444 $M0
++
++# Store permission for comparision
++TEST permission_new=`stat -c "%A" $M0`
++
++# Bring up the killed brick process
++TEST $CLI volume start $V0 force
++
++# Perform lookup
++sleep 5
++TEST ls $M0
++
++# Check brick permissions
++TEST brick_perm=`stat -c "%A" /${V0}2`
++TEST [ ${brick_perm} = ${permission_new} ]
++#------------------------------------------------------------
++
++# Test case 2 - Add-brick + Healing
++#------------------------------------------------------------
++# Change root permissions
++TEST chmod 777 $M0
++
++# Store permission for comparision
++TEST permission_new_2=`stat -c "%A" $M0`
++
++# Add a 3rd brick
++TEST $CLI volume add-brick $V0 $H0:/${V0}3
++
++# Perform lookup
++sleep 5
++TEST ls $M0
++
++# Check permissions on the new brick
++TEST brick_perm2=`stat -c "%A" /${V0}3`
++
++TEST [ ${brick_perm2} = ${permission_new_2} ]
++
++cleanup;
+diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c
+index 4db89df..fe1d0ee 100644
+--- a/xlators/cluster/dht/src/dht-common.c
++++ b/xlators/cluster/dht/src/dht-common.c
+@@ -1363,13 +1363,29 @@ dht_lookup_dir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+             dht_aggregate_xattr(local->xattr, xattr);
+         }
+ 
++        if (__is_root_gfid(stbuf->ia_gfid)) {
++            ret = dht_dir_has_layout(xattr, conf->xattr_name);
++            if (ret >= 0) {
++                if (is_greater_time(local->prebuf.ia_ctime,
++                                    local->prebuf.ia_ctime_nsec,
++                                    stbuf->ia_ctime, stbuf->ia_ctime_nsec)) {
++                    /* Choose source */
++                    local->prebuf.ia_gid = stbuf->ia_gid;
++                    local->prebuf.ia_uid = stbuf->ia_uid;
++
++                    local->prebuf.ia_ctime = stbuf->ia_ctime;
++                    local->prebuf.ia_ctime_nsec = stbuf->ia_ctime_nsec;
++                    local->prebuf.ia_prot = stbuf->ia_prot;
++                }
++            }
++        }
++
+         if (local->stbuf.ia_type != IA_INVAL) {
+             /* This is not the first subvol to respond */
+-            if (!__is_root_gfid(stbuf->ia_gfid) &&
+-                ((local->stbuf.ia_gid != stbuf->ia_gid) ||
+-                 (local->stbuf.ia_uid != stbuf->ia_uid) ||
+-                 (is_permission_different(&local->stbuf.ia_prot,
+-                                          &stbuf->ia_prot)))) {
++            if ((local->stbuf.ia_gid != stbuf->ia_gid) ||
++                (local->stbuf.ia_uid != stbuf->ia_uid) ||
++                (is_permission_different(&local->stbuf.ia_prot,
++                                         &stbuf->ia_prot))) {
+                 local->need_attrheal = 1;
+             }
+         }
+@@ -10969,7 +10985,7 @@ dht_notify(xlator_t *this, int event, void *data, ...)
+                 if ((cmd == GF_DEFRAG_CMD_STATUS) ||
+                     (cmd == GF_DEFRAG_CMD_STATUS_TIER) ||
+                     (cmd == GF_DEFRAG_CMD_DETACH_STATUS))
+-                	gf_defrag_status_get(conf, output, _gf_false);
++                    gf_defrag_status_get(conf, output, _gf_false);
+                 else if (cmd == GF_DEFRAG_CMD_START_DETACH_TIER)
+                     gf_defrag_start_detach_tier(defrag);
+                 else if (cmd == GF_DEFRAG_CMD_DETACH_START)
+diff --git a/xlators/cluster/dht/src/dht-selfheal.c b/xlators/cluster/dht/src/dht-selfheal.c
+index f5dfff9..f4e17d1 100644
+--- a/xlators/cluster/dht/src/dht-selfheal.c
++++ b/xlators/cluster/dht/src/dht-selfheal.c
+@@ -2097,9 +2097,18 @@ dht_selfheal_directory(call_frame_t *frame, dht_selfheal_dir_cbk_t dir_cbk,
+     local->selfheal.dir_cbk = dir_cbk;
+     local->selfheal.layout = dht_layout_ref(this, layout);
+ 
+-    if (local->need_attrheal && !IA_ISINVAL(local->mds_stbuf.ia_type)) {
+-        /*Use the one in the mds_stbuf*/
+-        local->stbuf = local->mds_stbuf;
++    if (local->need_attrheal) {
++        if (__is_root_gfid(local->stbuf.ia_gfid)) {
++            local->stbuf.ia_gid = local->prebuf.ia_gid;
++            local->stbuf.ia_uid = local->prebuf.ia_uid;
++
++            local->stbuf.ia_ctime = local->prebuf.ia_ctime;
++            local->stbuf.ia_ctime_nsec = local->prebuf.ia_ctime_nsec;
++            local->stbuf.ia_prot = local->prebuf.ia_prot;
++
++        } else if (!IA_ISINVAL(local->mds_stbuf.ia_type)) {
++            local->stbuf = local->mds_stbuf;
++        }
+     }
+ 
+     if (!__is_root_gfid(local->stbuf.ia_gfid)) {
+diff --git a/xlators/storage/posix/src/posix-common.c b/xlators/storage/posix/src/posix-common.c
+index c5a43a1..e5c6e62 100644
+--- a/xlators/storage/posix/src/posix-common.c
++++ b/xlators/storage/posix/src/posix-common.c
+@@ -598,6 +598,7 @@ posix_init(xlator_t *this)
+     int force_directory = -1;
+     int create_mask = -1;
+     int create_directory_mask = -1;
++    char value;
+ 
+     dir_data = dict_get(this->options, "directory");
+ 
+@@ -654,16 +655,11 @@ posix_init(xlator_t *this)
+     }
+ 
+     /* Check for Extended attribute support, if not present, log it */
+-    op_ret = sys_lsetxattr(dir_data->data, "trusted.glusterfs.test", "working",
+-                           8, 0);
+-    if (op_ret != -1) {
+-        ret = sys_lremovexattr(dir_data->data, "trusted.glusterfs.test");
+-        if (ret) {
+-            gf_msg(this->name, GF_LOG_DEBUG, errno, P_MSG_INVALID_OPTION,
+-                   "failed to remove xattr: "
+-                   "trusted.glusterfs.test");
+-        }
+-    } else {
++    size = sys_lgetxattr(dir_data->data, "user.x", &value, sizeof(value));
++
++    if ((size == -1) && (errno == EOPNOTSUPP)) {
++        gf_msg(this->name, GF_LOG_DEBUG, 0, P_MSG_XDATA_GETXATTR,
++               "getxattr returned %zd", size);
+         tmp_data = dict_get(this->options, "mandate-attribute");
+         if (tmp_data) {
+             if (gf_string2boolean(tmp_data->data, &tmp_bool) == -1) {
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0503-gfapi-Suspend-synctasks-instead-of-blocking-them.patch b/SOURCES/0503-gfapi-Suspend-synctasks-instead-of-blocking-them.patch
new file mode 100644
index 0000000..466bf4e
--- /dev/null
+++ b/SOURCES/0503-gfapi-Suspend-synctasks-instead-of-blocking-them.patch
@@ -0,0 +1,179 @@
+From 5946a6ec18976c0f52162fe0f47e9b5171af87ec Mon Sep 17 00:00:00 2001
+From: Soumya Koduri <skoduri@redhat.com>
+Date: Mon, 6 Apr 2020 12:36:44 +0530
+Subject: [PATCH 503/511] gfapi: Suspend synctasks instead of blocking them
+
+There are certain conditions which blocks the current
+execution thread (like waiting on mutex lock or condition
+variable or I/O response). In such cases, if it is a
+synctask thread, we should suspend the task instead
+of blocking it (like done in SYNCOP using synctask_yield)
+
+This is to avoid deadlock like the one mentioned below -
+
+1) synctaskA sets fs->migration_in_progress to 1 and
+   does I/O (LOOKUP)
+2) Other synctask threads wait for fs->migration_in_progress
+  to be reset to 0 by synctaskA and hence blocked
+3) but synctaskA cannot resume as all synctask threads are blocked
+   on (2).
+
+Note: this same approach is already used by few other components
+like syncbarrier etc.
+
+>Change-Id: If90f870d663bb242c702a5b86ac52eeda67c6f0d
+>Fixes: #1146
+>Signed-off-by: Soumya Koduri <skoduri@redhat.com>
+Upstream patch: https://review.gluster.org/c/glusterfs/+/24276
+
+BUG: 1779238
+Change-Id: If90f870d663bb242c702a5b86ac52eeda67c6f0d
+Signed-off-by: nik-redhat <nladha@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/221081
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Soumya Koduri <skoduri@redhat.com>
+---
+ api/src/glfs-internal.h | 34 ++++++++++++++++++++++++++++++++--
+ api/src/glfs-resolve.c  |  9 +++++++++
+ api/src/glfs.c          |  9 +++++++++
+ 3 files changed, 50 insertions(+), 2 deletions(-)
+
+diff --git a/api/src/glfs-internal.h b/api/src/glfs-internal.h
+index 55401b2..15cf0ee 100644
+--- a/api/src/glfs-internal.h
++++ b/api/src/glfs-internal.h
+@@ -16,6 +16,7 @@
+ #include <glusterfs/upcall-utils.h>
+ #include "glfs-handles.h"
+ #include <glusterfs/refcount.h>
++#include <glusterfs/syncop.h>
+ 
+ #define GLFS_SYMLINK_MAX_FOLLOW 2048
+ 
+@@ -207,6 +208,7 @@ struct glfs {
+     glfs_upcall_cbk up_cbk; /* upcall cbk function to be registered */
+     void *up_data;          /* Opaque data provided by application
+                              * during upcall registration */
++    struct list_head waitq; /* waiting synctasks */
+ };
+ 
+ /* This enum is used to maintain the state of glfd. In case of async fops
+@@ -442,6 +444,34 @@ glfs_process_upcall_event(struct glfs *fs, void *data)
+         THIS = glfd->fd->inode->table->xl->ctx->master;                        \
+     } while (0)
+ 
++#define __GLFS_LOCK_WAIT(fs)                                                   \
++    do {                                                                       \
++        struct synctask *task = NULL;                                          \
++                                                                               \
++        task = synctask_get();                                                 \
++                                                                               \
++        if (task) {                                                            \
++            list_add_tail(&task->waitq, &fs->waitq);                           \
++            pthread_mutex_unlock(&fs->mutex);                                  \
++            synctask_yield(task, NULL);                                              \
++            pthread_mutex_lock(&fs->mutex);                                    \
++        } else {                                                               \
++            /* non-synctask */                                                 \
++            pthread_cond_wait(&fs->cond, &fs->mutex);                          \
++        }                                                                      \
++    } while (0)
++
++#define __GLFS_SYNCTASK_WAKE(fs)                                               \
++    do {                                                                       \
++        struct synctask *waittask = NULL;                                      \
++                                                                               \
++        while (!list_empty(&fs->waitq)) {                                      \
++            waittask = list_entry(fs->waitq.next, struct synctask, waitq);     \
++            list_del_init(&waittask->waitq);                                   \
++            synctask_wake(waittask);                                           \
++        }                                                                      \
++    } while (0)
++
+ /*
+   By default all lock attempts from user context must
+   use glfs_lock() and glfs_unlock(). This allows
+@@ -466,10 +496,10 @@ glfs_lock(struct glfs *fs, gf_boolean_t wait_for_migration)
+     pthread_mutex_lock(&fs->mutex);
+ 
+     while (!fs->init)
+-        pthread_cond_wait(&fs->cond, &fs->mutex);
++        __GLFS_LOCK_WAIT(fs);
+ 
+     while (wait_for_migration && fs->migration_in_progress)
+-        pthread_cond_wait(&fs->cond, &fs->mutex);
++        __GLFS_LOCK_WAIT(fs);
+ 
+     return 0;
+ }
+diff --git a/api/src/glfs-resolve.c b/api/src/glfs-resolve.c
+index 062b7dc..58b6ace 100644
+--- a/api/src/glfs-resolve.c
++++ b/api/src/glfs-resolve.c
+@@ -65,6 +65,9 @@ __glfs_first_lookup(struct glfs *fs, xlator_t *subvol)
+     fs->migration_in_progress = 0;
+     pthread_cond_broadcast(&fs->cond);
+ 
++    /* wake up other waiting tasks */
++    __GLFS_SYNCTASK_WAKE(fs);
++
+     return ret;
+ }
+ 
+@@ -154,6 +157,9 @@ __glfs_refresh_inode(struct glfs *fs, xlator_t *subvol, inode_t *inode,
+     fs->migration_in_progress = 0;
+     pthread_cond_broadcast(&fs->cond);
+ 
++    /* wake up other waiting tasks */
++    __GLFS_SYNCTASK_WAKE(fs);
++
+     return newinode;
+ }
+ 
+@@ -841,6 +847,9 @@ __glfs_migrate_fd(struct glfs *fs, xlator_t *newsubvol, struct glfs_fd *glfd)
+     fs->migration_in_progress = 0;
+     pthread_cond_broadcast(&fs->cond);
+ 
++    /* wake up other waiting tasks */
++    __GLFS_SYNCTASK_WAKE(fs);
++
+     return newfd;
+ }
+ 
+diff --git a/api/src/glfs.c b/api/src/glfs.c
+index f36616d..ae994fa 100644
+--- a/api/src/glfs.c
++++ b/api/src/glfs.c
+@@ -740,6 +740,7 @@ glfs_new_fs(const char *volname)
+ 
+     INIT_LIST_HEAD(&fs->openfds);
+     INIT_LIST_HEAD(&fs->upcall_list);
++    INIT_LIST_HEAD(&fs->waitq);
+ 
+     PTHREAD_MUTEX_INIT(&fs->mutex, NULL, fs->pthread_flags, GLFS_INIT_MUTEX,
+                        err);
+@@ -1228,6 +1229,7 @@ pub_glfs_fini(struct glfs *fs)
+     call_pool_t *call_pool = NULL;
+     int fs_init = 0;
+     int err = -1;
++    struct synctask *waittask = NULL;
+ 
+     DECLARE_OLD_THIS;
+ 
+@@ -1249,6 +1251,13 @@ pub_glfs_fini(struct glfs *fs)
+ 
+     call_pool = fs->ctx->pool;
+ 
++    /* Wake up any suspended synctasks */
++    while (!list_empty(&fs->waitq)) {
++        waittask = list_entry(fs->waitq.next, struct synctask, waitq);
++        list_del_init(&waittask->waitq);
++        synctask_wake(waittask);
++    }
++
+     while (countdown--) {
+         /* give some time for background frames to finish */
+         pthread_mutex_lock(&fs->mutex);
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0504-io-stats-Configure-ios_sample_buf_size-based-on-samp.patch b/SOURCES/0504-io-stats-Configure-ios_sample_buf_size-based-on-samp.patch
new file mode 100644
index 0000000..21d7f7f
--- /dev/null
+++ b/SOURCES/0504-io-stats-Configure-ios_sample_buf_size-based-on-samp.patch
@@ -0,0 +1,109 @@
+From baa566be8832a56fdea7068d84844ec1ec84d8d9 Mon Sep 17 00:00:00 2001
+From: mohit84 <moagrawa@redhat.com>
+Date: Thu, 15 Oct 2020 16:28:58 +0530
+Subject: [PATCH 504/511] io-stats: Configure ios_sample_buf_size based on
+ sample_interval value (#1574)
+
+io-stats xlator declares a ios_sample_buf_size 64k object(10M) per xlator
+but in case of sample_interval is 0 this big buffer is not required so
+declare the default value only while sample_interval is not 0.The new
+change would be helpful to reduce RSS size for a brick and shd process
+while the number of volumes are huge.
+
+> Change-Id: I3e82cca92e40549355edfac32580169f3ce51af8
+> Fixes: #1542
+> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+> (Cherry picked from commit f71660eb879a9cd5761e5adbf10c783e959a990a)
+> (Reviewed on upstream link https://github.com/gluster/glusterfs/issues/1542)
+
+Change-Id: I3e82cca92e40549355edfac32580169f3ce51af8
+BUG: 1898778
+Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/221183
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/bugs/glusterd/daemon-log-level-option.t |  8 ++++----
+ xlators/debug/io-stats/src/io-stats.c         | 26 ++++++++++++++++++++++----
+ 2 files changed, 26 insertions(+), 8 deletions(-)
+
+diff --git a/tests/bugs/glusterd/daemon-log-level-option.t b/tests/bugs/glusterd/daemon-log-level-option.t
+index 66e55e3..5352a63 100644
+--- a/tests/bugs/glusterd/daemon-log-level-option.t
++++ b/tests/bugs/glusterd/daemon-log-level-option.t
+@@ -61,8 +61,8 @@ rm -f /var/log/glusterfs/glustershd.log
+ TEST $CLI volume set all cluster.daemon-log-level WARNING
+ TEST $CLI volume start $V0
+ 
+-# log should not have any info messages
+-EXPECT 0 Info_messages_count "/var/log/glusterfs/glustershd.log"
++# log does have 1 info message specific to configure ios_sample_buf_size in io-stats xlator
++EXPECT 1 Info_messages_count "/var/log/glusterfs/glustershd.log"
+ 
+ # log should not have any debug messages
+ EXPECT 0 Debug_messages_count "/var/log/glusterfs/glustershd.log"
+@@ -78,8 +78,8 @@ rm -f /var/log/glusterfs/glustershd.log
+ TEST $CLI volume set all cluster.daemon-log-level ERROR
+ TEST $CLI volume start $V0
+ 
+-# log should not have any info messages
+-EXPECT 0 Info_messages_count "/var/log/glusterfs/glustershd.log"
++# log does have 1 info message specific to configure ios_sample_buf_size in io-stats xlator
++EXPECT 1 Info_messages_count "/var/log/glusterfs/glustershd.log"
+ 
+ # log should not have any warning messages
+ EXPECT 0 Warning_messages_count "/var/log/glusterfs/glustershd.log"
+diff --git a/xlators/debug/io-stats/src/io-stats.c b/xlators/debug/io-stats/src/io-stats.c
+index aa91a0a..9b34895 100644
+--- a/xlators/debug/io-stats/src/io-stats.c
++++ b/xlators/debug/io-stats/src/io-stats.c
+@@ -3724,6 +3724,15 @@ xlator_set_loglevel(xlator_t *this, int log_level)
+     }
+ }
+ 
++void
++ios_sample_buf_size_configure(char *name, struct ios_conf *conf)
++{
++    conf->ios_sample_buf_size = 1024;
++    gf_log(name, GF_LOG_INFO,
++           "Configure ios_sample_buf "
++           " size is 1024 because ios_sample_interval is 0");
++}
++
+ int
+ reconfigure(xlator_t *this, dict_t *options)
+ {
+@@ -3779,8 +3788,13 @@ reconfigure(xlator_t *this, dict_t *options)
+                      int32, out);
+     GF_OPTION_RECONF("ios-dump-format", dump_format_str, options, str, out);
+     ios_set_log_format_code(conf, dump_format_str);
+-    GF_OPTION_RECONF("ios-sample-buf-size", conf->ios_sample_buf_size, options,
+-                     int32, out);
++    if (conf->ios_sample_interval) {
++        GF_OPTION_RECONF("ios-sample-buf-size", conf->ios_sample_buf_size,
++                         options, int32, out);
++    } else {
++        ios_sample_buf_size_configure(this->name, conf);
++    }
++
+     GF_OPTION_RECONF("sys-log-level", sys_log_str, options, str, out);
+     if (sys_log_str) {
+         sys_log_level = glusterd_check_log_level(sys_log_str);
+@@ -3947,8 +3961,12 @@ init(xlator_t *this)
+     GF_OPTION_INIT("ios-dump-format", dump_format_str, str, out);
+     ios_set_log_format_code(conf, dump_format_str);
+ 
+-    GF_OPTION_INIT("ios-sample-buf-size", conf->ios_sample_buf_size, int32,
+-                   out);
++    if (conf->ios_sample_interval) {
++        GF_OPTION_INIT("ios-sample-buf-size", conf->ios_sample_buf_size, int32,
++                       out);
++    } else {
++        ios_sample_buf_size_configure(this->name, conf);
++    }
+ 
+     ret = ios_init_sample_buf(conf);
+     if (ret) {
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0505-trash-Create-inode_table-only-while-feature-is-enabl.patch b/SOURCES/0505-trash-Create-inode_table-only-while-feature-is-enabl.patch
new file mode 100644
index 0000000..a0f6b62
--- /dev/null
+++ b/SOURCES/0505-trash-Create-inode_table-only-while-feature-is-enabl.patch
@@ -0,0 +1,107 @@
+From 43a8e2c7441b14f5f238cb11d83f32f248b16abb Mon Sep 17 00:00:00 2001
+From: Mohit Agrawal <moagrawa@redhat.com>
+Date: Tue, 13 Oct 2020 18:56:20 +0530
+Subject: [PATCH 505/511] trash: Create inode_table only while feature is
+ enabled
+
+Currently trash xlator create a inode table(1M) even if
+feature is not enabled.In brick_mux environment while 250
+bricks are attached with a single brick process and feature
+is not enable brick process increase RSS size unnecessarily.
+
+Solution: Create inode_table only while a feature is enabled.
+The patch reduces 250M RSS size per brick process
+if trash feature is not enabled.
+
+> Change-Id: I11a6fd2b8419fe2988f398be6ec30fb4f3b99a5d
+> Fixes: #1543
+> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+> (Cherry pick from commit 32f25e7b1b4b080ab2640e178b407c878e629376)
+> (Reviewed on upstream link https://github.com/gluster/glusterfs/issues/1543)
+
+Change-Id: I11a6fd2b8419fe2988f398be6ec30fb4f3b99a5d
+BUG: 1898781
+Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/221184
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/features/trash/src/trash.c | 47 +++++++++++++++++++++++++++++++++++---
+ 1 file changed, 44 insertions(+), 3 deletions(-)
+
+diff --git a/xlators/features/trash/src/trash.c b/xlators/features/trash/src/trash.c
+index f96ed73..93f020f 100644
+--- a/xlators/features/trash/src/trash.c
++++ b/xlators/features/trash/src/trash.c
+@@ -2235,16 +2235,47 @@ reconfigure(xlator_t *this, dict_t *options)
+     char trash_dir[PATH_MAX] = {
+         0,
+     };
++    gf_boolean_t active_earlier = _gf_false;
++    gf_boolean_t active_now = _gf_false;
+ 
+     priv = this->private;
+ 
+     GF_VALIDATE_OR_GOTO("trash", priv, out);
+ 
++    active_earlier = priv->state;
++    GF_OPTION_RECONF("trash", active_now, options, bool, out);
++
++    /* Disable of trash feature is not allowed at this point until
++       we are not able to find an approach to cleanup resource
++       gracefully. Here to disable the feature need to destroy inode
++       table and currently it is difficult to ensure inode is not
++       being used
++    */
++    if (active_earlier && !active_now) {
++        gf_log(this->name, GF_LOG_INFO,
++               "Disable of trash feature is not allowed "
++               "during graph reconfigure");
++        ret = 0;
++        goto out;
++    }
++
++    if (!active_earlier && active_now) {
++        if (!priv->trash_itable) {
++            priv->trash_itable = inode_table_new(0, this);
++            if (!priv->trash_itable) {
++                ret = -ENOMEM;
++                gf_log(this->name, GF_LOG_ERROR,
++                       "failed to create trash inode_table"
++                       "  during graph reconfigure");
++                goto out;
++            }
++        }
++        priv->state = active_now;
++    }
++
+     GF_OPTION_RECONF("trash-internal-op", priv->internal, options, bool, out);
+     GF_OPTION_RECONF("trash-dir", tmp, options, str, out);
+ 
+-    GF_OPTION_RECONF("trash", priv->state, options, bool, out);
+-
+     if (priv->state) {
+         ret = create_or_rename_trash_directory(this);
+ 
+@@ -2501,7 +2532,17 @@ init(xlator_t *this)
+         goto out;
+     }
+ 
+-    priv->trash_itable = inode_table_new(0, this);
++    if (priv->state) {
++        priv->trash_itable = inode_table_new(0, this);
++        if (!priv->trash_itable) {
++            ret = -ENOMEM;
++            priv->state = _gf_false;
++            gf_log(this->name, GF_LOG_ERROR,
++                   "failed to create trash inode_table disable trash");
++            goto out;
++        }
++    }
++
+     gf_log(this->name, GF_LOG_DEBUG, "brick path is%s", priv->brick_path);
+ 
+     this->private = (void *)priv;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0506-posix-Attach-a-posix_spawn_disk_thread-with-glusterf.patch b/SOURCES/0506-posix-Attach-a-posix_spawn_disk_thread-with-glusterf.patch
new file mode 100644
index 0000000..cf978f5
--- /dev/null
+++ b/SOURCES/0506-posix-Attach-a-posix_spawn_disk_thread-with-glusterf.patch
@@ -0,0 +1,499 @@
+From 17a9ce965ef2fec9ee5c8e4b76981bb7cbcf1352 Mon Sep 17 00:00:00 2001
+From: mohit84 <moagrawa@redhat.com>
+Date: Mon, 9 Nov 2020 17:15:42 +0530
+Subject: [PATCH 506/511] posix: Attach a posix_spawn_disk_thread with
+ glusterfs_ctx (#1595)
+
+Currently posix xlator spawns posix_disk_space_threads per brick and in
+case of brick_mux environment while glusterd attached bricks at maximum
+level(250) with a single brick process in that case 250 threads are
+spawned for all bricks and brick process memory size also increased.
+
+Solution: Attach a posix_disk_space thread with glusterfs_ctx to
+          spawn a thread per process basis instead of spawning a per brick
+
+> Fixes: #1482
+> Change-Id: I8dd88f252a950495b71742e2a7588bd5bb019ec7
+> Cherry-picked from commit 3f93be77e1acf5baacafa97a320e91e6879d1c0e
+> Reviewed on upstream link https://github.com/gluster/glusterfs/issues/1482
+> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+
+Change-Id: I8dd88f252a950495b71742e2a7588bd5bb019ec7
+Bug: 1898776
+Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220366
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ glusterfsd/src/glusterfsd.c                    |   4 +
+ libglusterfs/src/glusterfs/glusterfs.h         |   6 ++
+ xlators/storage/posix/src/posix-common.c       |  68 +++++++++++--
+ xlators/storage/posix/src/posix-handle.h       |   3 +-
+ xlators/storage/posix/src/posix-helpers.c      | 131 ++++++++++++++-----------
+ xlators/storage/posix/src/posix-inode-fd-ops.c |   3 +-
+ xlators/storage/posix/src/posix-mem-types.h    |   1 +
+ xlators/storage/posix/src/posix.h              |  12 ++-
+ 8 files changed, 160 insertions(+), 68 deletions(-)
+
+diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
+index 955bf1d..ac25255 100644
+--- a/glusterfsd/src/glusterfsd.c
++++ b/glusterfsd/src/glusterfsd.c
+@@ -1840,9 +1840,13 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
+     INIT_LIST_HEAD(&cmd_args->xlator_options);
+     INIT_LIST_HEAD(&cmd_args->volfile_servers);
+     ctx->pxl_count = 0;
++    ctx->diskxl_count = 0;
+     pthread_mutex_init(&ctx->fd_lock, NULL);
+     pthread_cond_init(&ctx->fd_cond, NULL);
+     INIT_LIST_HEAD(&ctx->janitor_fds);
++    pthread_mutex_init(&ctx->xl_lock, NULL);
++    pthread_cond_init(&ctx->xl_cond, NULL);
++    INIT_LIST_HEAD(&ctx->diskth_xl);
+ 
+     lim.rlim_cur = RLIM_INFINITY;
+     lim.rlim_max = RLIM_INFINITY;
+diff --git a/libglusterfs/src/glusterfs/glusterfs.h b/libglusterfs/src/glusterfs/glusterfs.h
+index bf6a987..d3400bf 100644
+--- a/libglusterfs/src/glusterfs/glusterfs.h
++++ b/libglusterfs/src/glusterfs/glusterfs.h
+@@ -740,7 +740,13 @@ struct _glusterfs_ctx {
+     pthread_t janitor;
+     /* The variable is use to save total posix xlator count */
+     uint32_t pxl_count;
++    uint32_t diskxl_count;
+ 
++    /* List of posix xlator use by disk thread*/
++    struct list_head diskth_xl;
++    pthread_mutex_t xl_lock;
++    pthread_cond_t xl_cond;
++    pthread_t disk_space_check;
+     char volume_id[GF_UUID_BUF_SIZE]; /* Used only in protocol/client */
+ };
+ typedef struct _glusterfs_ctx glusterfs_ctx_t;
+diff --git a/xlators/storage/posix/src/posix-common.c b/xlators/storage/posix/src/posix-common.c
+index e5c6e62..2c9030b 100644
+--- a/xlators/storage/posix/src/posix-common.c
++++ b/xlators/storage/posix/src/posix-common.c
+@@ -138,6 +138,36 @@ posix_inode(xlator_t *this)
+     return 0;
+ }
+ 
++static void
++delete_posix_diskxl(xlator_t *this)
++{
++    struct posix_private *priv = this->private;
++    struct posix_diskxl *pxl = priv->pxl;
++    glusterfs_ctx_t *ctx = this->ctx;
++    uint32_t count = 1;
++
++    if (pxl) {
++        pthread_mutex_lock(&ctx->xl_lock);
++        {
++            pxl->detach_notify = _gf_true;
++            while (pxl->is_use)
++                pthread_cond_wait(&pxl->cond, &ctx->xl_lock);
++            list_del_init(&pxl->list);
++            priv->pxl = NULL;
++            count = --ctx->diskxl_count;
++            if (count == 0)
++                pthread_cond_signal(&ctx->xl_cond);
++        }
++        pthread_mutex_unlock(&ctx->xl_lock);
++        pthread_cond_destroy(&pxl->cond);
++        GF_FREE(pxl);
++        if (count == 0) {
++            pthread_join(ctx->disk_space_check, NULL);
++            ctx->disk_space_check = 0;
++        }
++    }
++}
++
+ /**
+  * notify - when parent sends PARENT_UP, send CHILD_UP event from here
+  */
+@@ -194,6 +224,8 @@ posix_notify(xlator_t *this, int32_t event, void *data, ...)
+             }
+             pthread_mutex_unlock(&ctx->fd_lock);
+ 
++            delete_posix_diskxl(this);
++
+             gf_log(this->name, GF_LOG_INFO, "Sending CHILD_DOWN for brick %s",
+                    victim->name);
+             default_notify(this->parents->xlator, GF_EVENT_CHILD_DOWN, data);
+@@ -318,6 +350,7 @@ posix_reconfigure(xlator_t *this, dict_t *options)
+     int32_t force_directory_mode = -1;
+     int32_t create_mask = -1;
+     int32_t create_directory_mask = -1;
++    double old_disk_reserve = 0.0;
+ 
+     priv = this->private;
+ 
+@@ -383,6 +416,7 @@ posix_reconfigure(xlator_t *this, dict_t *options)
+                " fallback to <hostname>:<export>");
+     }
+ 
++    old_disk_reserve = priv->disk_reserve;
+     GF_OPTION_RECONF("reserve", priv->disk_reserve, options, percent_or_size,
+                      out);
+     /* option can be any one of percent or bytes */
+@@ -390,11 +424,19 @@ posix_reconfigure(xlator_t *this, dict_t *options)
+     if (priv->disk_reserve < 100.0)
+         priv->disk_unit = 'p';
+ 
+-    if (priv->disk_reserve) {
++    /* Delete a pxl object from a list of disk_reserve while something
++       is changed for reserve option during graph reconfigure
++    */
++    if (old_disk_reserve != priv->disk_reserve) {
++        delete_posix_diskxl(this);
++        old_disk_reserve = 0;
++    }
++
++    if (!old_disk_reserve && priv->disk_reserve) {
+         ret = posix_spawn_disk_space_check_thread(this);
+         if (ret) {
+             gf_msg(this->name, GF_LOG_INFO, 0, P_MSG_DISK_SPACE_CHECK_FAILED,
+-                   "Getting disk space check from thread failed");
++                   "Getting disk space check from thread failed ");
+             goto out;
+         }
+     }
+@@ -1008,13 +1050,13 @@ posix_init(xlator_t *this)
+                " fallback to <hostname>:<export>");
+     }
+ 
+-    _private->disk_space_check_active = _gf_false;
+     _private->disk_space_full = 0;
+ 
+     GF_OPTION_INIT("reserve", _private->disk_reserve, percent_or_size, out);
+ 
+     /* option can be any one of percent or bytes */
+     _private->disk_unit = 0;
++    pthread_cond_init(&_private->fd_cond, NULL);
+     if (_private->disk_reserve < 100.0)
+         _private->disk_unit = 'p';
+ 
+@@ -1162,12 +1204,6 @@ posix_fini(xlator_t *this)
+         priv->health_check = 0;
+     }
+ 
+-    if (priv->disk_space_check) {
+-        priv->disk_space_check_active = _gf_false;
+-        (void)gf_thread_cleanup_xint(priv->disk_space_check);
+-        priv->disk_space_check = 0;
+-    }
+-
+     if (priv->janitor) {
+         /*TODO: Make sure the synctask is also complete */
+         ret = gf_tw_del_timer(this->ctx->tw->timer_wheel, priv->janitor);
+@@ -1192,10 +1228,24 @@ posix_fini(xlator_t *this)
+         pthread_join(ctx->janitor, NULL);
+     }
+ 
++    pthread_mutex_lock(&ctx->xl_lock);
++    {
++        count = --ctx->diskxl_count;
++        if (count == 0)
++            pthread_cond_signal(&ctx->xl_cond);
++    }
++    pthread_mutex_unlock(&ctx->xl_lock);
++
++    if (count == 0) {
++        pthread_join(ctx->disk_space_check, NULL);
++        ctx->disk_space_check = 0;
++    }
++
+     if (priv->fsyncer) {
+         (void)gf_thread_cleanup_xint(priv->fsyncer);
+         priv->fsyncer = 0;
+     }
++
+     /*unlock brick dir*/
+     if (priv->mount_lock)
+         (void)sys_closedir(priv->mount_lock);
+diff --git a/xlators/storage/posix/src/posix-handle.h b/xlators/storage/posix/src/posix-handle.h
+index c4d7cb1..8e4c719 100644
+--- a/xlators/storage/posix/src/posix-handle.h
++++ b/xlators/storage/posix/src/posix-handle.h
+@@ -206,5 +206,6 @@ int
+ posix_check_internal_writes(xlator_t *this, fd_t *fd, int sysfd, dict_t *xdata);
+ 
+ void
+-posix_disk_space_check(xlator_t *this);
++posix_disk_space_check(struct posix_private* priv);
++
+ #endif /* !_POSIX_HANDLE_H */
+diff --git a/xlators/storage/posix/src/posix-helpers.c b/xlators/storage/posix/src/posix-helpers.c
+index ceac52a..110d383 100644
+--- a/xlators/storage/posix/src/posix-helpers.c
++++ b/xlators/storage/posix/src/posix-helpers.c
+@@ -2284,9 +2284,8 @@ unlock:
+ }
+ 
+ void
+-posix_disk_space_check(xlator_t *this)
++posix_disk_space_check(struct posix_private *priv)
+ {
+-    struct posix_private *priv = NULL;
+     char *subvol_path = NULL;
+     int op_ret = 0;
+     double size = 0;
+@@ -2295,16 +2294,14 @@ posix_disk_space_check(xlator_t *this)
+     double totsz = 0;
+     double freesz = 0;
+ 
+-    GF_VALIDATE_OR_GOTO(this->name, this, out);
+-    priv = this->private;
+-    GF_VALIDATE_OR_GOTO(this->name, priv, out);
++    GF_VALIDATE_OR_GOTO("posix-helpers", priv, out);
+ 
+     subvol_path = priv->base_path;
+ 
+     op_ret = sys_statvfs(subvol_path, &buf);
+ 
+     if (op_ret == -1) {
+-        gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_STATVFS_FAILED,
++        gf_msg("posix-disk", GF_LOG_ERROR, errno, P_MSG_STATVFS_FAILED,
+                "statvfs failed on %s", subvol_path);
+         goto out;
+     }
+@@ -2328,78 +2325,102 @@ out:
+ }
+ 
+ static void *
+-posix_disk_space_check_thread_proc(void *data)
++posix_ctx_disk_thread_proc(void *data)
+ {
+-    xlator_t *this = NULL;
+     struct posix_private *priv = NULL;
++    glusterfs_ctx_t *ctx = NULL;
+     uint32_t interval = 0;
+-    int ret = -1;
+-
+-    this = data;
+-    priv = this->private;
++    struct posix_diskxl *pthis = NULL;
++    xlator_t *this = NULL;
++    struct timespec sleep_till = {
++        0,
++    };
+ 
++    ctx = data;
+     interval = 5;
+-    gf_msg_debug(this->name, 0,
+-                 "disk-space thread started, "
++
++    gf_msg_debug("glusterfs_ctx", 0,
++                 "Ctx disk-space thread started, "
+                  "interval = %d seconds",
+                  interval);
+-    while (1) {
+-        /* aborting sleep() is a request to exit this thread, sleep()
+-         * will normally not return when cancelled */
+-        ret = sleep(interval);
+-        if (ret > 0)
+-            break;
+-        /* prevent thread errors while doing the health-check(s) */
+-        pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
+-
+-        /* Do the disk-check.*/
+-        posix_disk_space_check(this);
+-        if (!priv->disk_space_check_active)
+-            goto out;
+-        pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
+-    }
+ 
+-out:
+-    gf_msg_debug(this->name, 0, "disk space check thread exiting");
+-    LOCK(&priv->lock);
++    pthread_mutex_lock(&ctx->xl_lock);
+     {
+-        priv->disk_space_check_active = _gf_false;
++        while (ctx->diskxl_count > 0) {
++            list_for_each_entry(pthis, &ctx->diskth_xl, list)
++            {
++                pthis->is_use = _gf_true;
++                pthread_mutex_unlock(&ctx->xl_lock);
++
++                THIS = this = pthis->xl;
++                priv = this->private;
++
++                posix_disk_space_check(priv);
++
++                pthread_mutex_lock(&ctx->xl_lock);
++                pthis->is_use = _gf_false;
++                /* Send a signal to posix_notify function */
++                if (pthis->detach_notify)
++                    pthread_cond_signal(&pthis->cond);
++            }
++
++            timespec_now_realtime(&sleep_till);
++            sleep_till.tv_sec += 5;
++            (void)pthread_cond_timedwait(&ctx->xl_cond, &ctx->xl_lock,
++                                         &sleep_till);
++        }
+     }
+-    UNLOCK(&priv->lock);
++    pthread_mutex_unlock(&ctx->xl_lock);
+ 
+     return NULL;
+ }
+ 
+ int
+-posix_spawn_disk_space_check_thread(xlator_t *xl)
++posix_spawn_disk_space_check_thread(xlator_t *this)
+ {
+-    struct posix_private *priv = NULL;
+-    int ret = -1;
++    int ret = 0;
++    glusterfs_ctx_t *ctx = this->ctx;
++    struct posix_diskxl *pxl = NULL;
++    struct posix_private *priv = this->private;
+ 
+-    priv = xl->private;
++    pxl = GF_CALLOC(1, sizeof(struct posix_diskxl), gf_posix_mt_diskxl_t);
++    if (!pxl) {
++        ret = -ENOMEM;
++        gf_log(this->name, GF_LOG_ERROR,
++               "Calloc is failed to allocate "
++               "memory for diskxl object");
++        goto out;
++    }
++    pthread_cond_init(&pxl->cond, NULL);
+ 
+-    LOCK(&priv->lock);
++    pthread_mutex_lock(&ctx->xl_lock);
+     {
+-        /* cancel the running thread  */
+-        if (priv->disk_space_check_active == _gf_true) {
+-            pthread_cancel(priv->disk_space_check);
+-            priv->disk_space_check_active = _gf_false;
+-        }
++        if (ctx->diskxl_count++ == 0) {
++            ret = gf_thread_create(&ctx->disk_space_check, NULL,
++                                   posix_ctx_disk_thread_proc, ctx,
++                                   "posixctxres");
+ 
+-        ret = gf_thread_create(&priv->disk_space_check, NULL,
+-                               posix_disk_space_check_thread_proc, xl,
+-                               "posix_reserve");
+-        if (ret) {
+-            priv->disk_space_check_active = _gf_false;
+-            gf_msg(xl->name, GF_LOG_ERROR, errno, P_MSG_DISK_SPACE_CHECK_FAILED,
+-                   "unable to setup disk space check thread");
+-            goto unlock;
++            if (ret) {
++                gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_THREAD_FAILED,
++                       "spawning disk space check thread failed");
++                ctx->diskxl_count--;
++                pthread_mutex_unlock(&ctx->xl_lock);
++                goto out;
++            }
+         }
++        pxl->xl = this;
++        priv->pxl = (void *)pxl;
++        list_add_tail(&pxl->list, &ctx->diskth_xl);
++    }
++    pthread_mutex_unlock(&ctx->xl_lock);
+ 
+-        priv->disk_space_check_active = _gf_true;
++out:
++    if (ret) {
++        if (pxl) {
++            pthread_cond_destroy(&pxl->cond);
++            GF_FREE(pxl);
++        }
+     }
+-unlock:
+-    UNLOCK(&priv->lock);
+     return ret;
+ }
+ 
+diff --git a/xlators/storage/posix/src/posix-inode-fd-ops.c b/xlators/storage/posix/src/posix-inode-fd-ops.c
+index 1d37aed..761e018 100644
+--- a/xlators/storage/posix/src/posix-inode-fd-ops.c
++++ b/xlators/storage/posix/src/posix-inode-fd-ops.c
+@@ -37,6 +37,7 @@
+ #include <fcntl.h>
+ #endif /* HAVE_LINKAT */
+ 
++#include "posix-handle.h"
+ #include <glusterfs/glusterfs.h>
+ #include <glusterfs/checksum.h>
+ #include <glusterfs/dict.h>
+@@ -713,7 +714,7 @@ posix_do_fallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t flags,
+        option behaviour
+     */
+     if (priv->disk_reserve)
+-        posix_disk_space_check(this);
++        posix_disk_space_check(priv);
+ 
+     DISK_SPACE_CHECK_AND_GOTO(frame, priv, xdata, ret, ret, unlock);
+ 
+diff --git a/xlators/storage/posix/src/posix-mem-types.h b/xlators/storage/posix/src/posix-mem-types.h
+index 2253f38..bb4c56d 100644
+--- a/xlators/storage/posix/src/posix-mem-types.h
++++ b/xlators/storage/posix/src/posix-mem-types.h
+@@ -20,6 +20,7 @@ enum gf_posix_mem_types_ {
+     gf_posix_mt_paiocb,
+     gf_posix_mt_inode_ctx_t,
+     gf_posix_mt_mdata_attr,
++    gf_posix_mt_diskxl_t,
+     gf_posix_mt_end
+ };
+ #endif
+diff --git a/xlators/storage/posix/src/posix.h b/xlators/storage/posix/src/posix.h
+index 07f367b..4be979c 100644
+--- a/xlators/storage/posix/src/posix.h
++++ b/xlators/storage/posix/src/posix.h
+@@ -36,7 +36,6 @@
+ #include <glusterfs/compat.h>
+ #include <glusterfs/timer.h>
+ #include "posix-mem-types.h"
+-#include "posix-handle.h"
+ #include <glusterfs/call-stub.h>
+ 
+ #ifdef HAVE_LIBAIO
+@@ -138,6 +137,14 @@ struct posix_fd {
+     char _pad[4]; /* manual padding */
+ };
+ 
++struct posix_diskxl {
++    pthread_cond_t cond;
++    struct list_head list;
++    xlator_t *xl;
++    gf_boolean_t detach_notify;
++    gf_boolean_t is_use;
++};
++
+ struct posix_private {
+     char *base_path;
+     int32_t base_path_length;
+@@ -207,6 +214,7 @@ struct posix_private {
+     pthread_mutex_t janitor_mutex;
+     pthread_cond_t janitor_cond;
+     pthread_cond_t fd_cond;
++    pthread_cond_t disk_cond;
+     int fsync_queue_count;
+ 
+     enum {
+@@ -233,7 +241,6 @@ struct posix_private {
+     char disk_unit;
+     uint32_t disk_space_full;
+     pthread_t disk_space_check;
+-    gf_boolean_t disk_space_check_active;
+ 
+ #ifdef GF_DARWIN_HOST_OS
+     enum {
+@@ -263,6 +270,7 @@ struct posix_private {
+     gf_boolean_t ctime;
+     gf_boolean_t janitor_task_stop;
+     uint32_t rel_fdcount;
++    void *pxl;
+ };
+ 
+ typedef struct {
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0507-inode-make-critical-section-smaller.patch b/SOURCES/0507-inode-make-critical-section-smaller.patch
new file mode 100644
index 0000000..3b1dac5
--- /dev/null
+++ b/SOURCES/0507-inode-make-critical-section-smaller.patch
@@ -0,0 +1,764 @@
+From b3a17b67a69142eef1b4adde3409d5e54dda1e0b Mon Sep 17 00:00:00 2001
+From: Amar Tumballi <amarts@redhat.com>
+Date: Sat, 9 Feb 2019 13:23:06 +0530
+Subject: [PATCH 507/511] inode: make critical section smaller
+
+do all the 'static' tasks outside of locked region.
+
+* hash_dentry() and hash_gfid() are now called outside locked region.
+* remove extra __dentry_hash exported in libglusterfs.sym
+* avoid checks in locked functions, if the check is done in calling
+  function.
+* implement dentry_destroy(), which handles freeing of dentry separately,
+  from that of dentry_unset (which takes care of separating dentry from
+  inode, and table)
+
+> Updates: bz#1670031
+> Change-Id: I584213e0748464bb427fbdef3c4ab6615d7d5eb0
+> Signed-off-by: Amar Tumballi <amarts@redhat.com>
+> (Cherry pick from commit 8a90d346b9d3f69ff11241feb0011c90a8e57e30)
+> (Review on upstream link https://review.gluster.org/#/c/glusterfs/+/22184/)
+
+Change-Id: I584213e0748464bb427fbdef3c4ab6615d7d5eb0
+BUG: 1898777
+Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/221189
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ libglusterfs/src/glusterfs/inode.h |   3 -
+ libglusterfs/src/inode.c           | 323 +++++++++++++------------------------
+ libglusterfs/src/libglusterfs.sym  |   1 -
+ 3 files changed, 111 insertions(+), 216 deletions(-)
+
+diff --git a/libglusterfs/src/glusterfs/inode.h b/libglusterfs/src/glusterfs/inode.h
+index 4421c47..c875653 100644
+--- a/libglusterfs/src/glusterfs/inode.h
++++ b/libglusterfs/src/glusterfs/inode.h
+@@ -167,9 +167,6 @@ inode_rename(inode_table_t *table, inode_t *olddir, const char *oldname,
+              inode_t *newdir, const char *newname, inode_t *inode,
+              struct iatt *stbuf);
+ 
+-dentry_t *
+-__dentry_grep(inode_table_t *table, inode_t *parent, const char *name);
+-
+ inode_t *
+ inode_grep(inode_table_t *table, inode_t *parent, const char *name);
+ 
+diff --git a/libglusterfs/src/inode.c b/libglusterfs/src/inode.c
+index 4c3c546..71b2d2a 100644
+--- a/libglusterfs/src/inode.c
++++ b/libglusterfs/src/inode.c
+@@ -159,27 +159,15 @@ hash_dentry(inode_t *parent, const char *name, int mod)
+ static int
+ hash_gfid(uuid_t uuid, int mod)
+ {
+-    int ret = 0;
+-
+-    ret = uuid[15] + (uuid[14] << 8);
+-
+-    return ret;
++    return ((uuid[15] + (uuid[14] << 8)) % mod);
+ }
+ 
+ static void
+-__dentry_hash(dentry_t *dentry)
++__dentry_hash(dentry_t *dentry, const int hash)
+ {
+     inode_table_t *table = NULL;
+-    int hash = 0;
+-
+-    if (!dentry) {
+-        gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_DENTRY_NOT_FOUND,
+-                         "dentry not found");
+-        return;
+-    }
+ 
+     table = dentry->inode->table;
+-    hash = hash_dentry(dentry->parent, dentry->name, table->hashsize);
+ 
+     list_del_init(&dentry->hash);
+     list_add(&dentry->hash, &table->name_hash[hash]);
+@@ -188,49 +176,44 @@ __dentry_hash(dentry_t *dentry)
+ static int
+ __is_dentry_hashed(dentry_t *dentry)
+ {
+-    if (!dentry) {
+-        gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_DENTRY_NOT_FOUND,
+-                         "dentry not found");
+-        return 0;
+-    }
+-
+     return !list_empty(&dentry->hash);
+ }
+ 
+ static void
+ __dentry_unhash(dentry_t *dentry)
+ {
+-    if (!dentry) {
+-        gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_DENTRY_NOT_FOUND,
+-                         "dentry not found");
+-        return;
+-    }
+-
+     list_del_init(&dentry->hash);
+ }
+ 
+ static void
+-__dentry_unset(dentry_t *dentry)
++dentry_destroy(dentry_t *dentry)
+ {
+-    if (!dentry) {
+-        gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_DENTRY_NOT_FOUND,
+-                         "dentry not found");
++    if (!dentry)
+         return;
+-    }
++
++    GF_FREE(dentry->name);
++    dentry->name = NULL;
++    mem_put(dentry);
++
++    return;
++}
++
++static dentry_t *
++__dentry_unset(dentry_t *dentry)
++{
++    if (!dentry)
++        return NULL;
+ 
+     __dentry_unhash(dentry);
+ 
+     list_del_init(&dentry->inode_list);
+ 
+-    GF_FREE(dentry->name);
+-    dentry->name = NULL;
+-
+     if (dentry->parent) {
+         __inode_unref(dentry->parent, false);
+         dentry->parent = NULL;
+     }
+ 
+-    mem_put(dentry);
++    return dentry;
+ }
+ 
+ static int
+@@ -289,22 +272,14 @@ static int
+ __is_dentry_cyclic(dentry_t *dentry)
+ {
+     int ret = 0;
+-    inode_t *inode = NULL;
+-    char *name = "<nul>";
+ 
+     ret = __foreach_ancestor_dentry(dentry, __check_cycle, dentry->inode);
+     if (ret) {
+-        inode = dentry->inode;
+-
+-        if (dentry->name)
+-            name = dentry->name;
+-
+         gf_msg(dentry->inode->table->name, GF_LOG_CRITICAL, 0,
+                LG_MSG_DENTRY_CYCLIC_LOOP,
+-               "detected cyclic loop "
+-               "formation during inode linkage. inode (%s) linking "
+-               "under itself as %s",
+-               uuid_utoa(inode->gfid), name);
++               "detected cyclic loop formation during inode linkage. "
++               "inode (%s) linking under itself as %s",
++               uuid_utoa(dentry->inode->gfid), dentry->name);
+     }
+ 
+     return ret;
+@@ -313,41 +288,19 @@ __is_dentry_cyclic(dentry_t *dentry)
+ static void
+ __inode_unhash(inode_t *inode)
+ {
+-    if (!inode) {
+-        gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
+-                         "inode not found");
+-        return;
+-    }
+-
+     list_del_init(&inode->hash);
+ }
+ 
+ static int
+ __is_inode_hashed(inode_t *inode)
+ {
+-    if (!inode) {
+-        gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
+-                         "inode not found");
+-        return 0;
+-    }
+-
+     return !list_empty(&inode->hash);
+ }
+ 
+ static void
+-__inode_hash(inode_t *inode)
++__inode_hash(inode_t *inode, const int hash)
+ {
+-    inode_table_t *table = NULL;
+-    int hash = 0;
+-
+-    if (!inode) {
+-        gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
+-                         "inode not found");
+-        return;
+-    }
+-
+-    table = inode->table;
+-    hash = hash_gfid(inode->gfid, 65536);
++    inode_table_t *table = inode->table;
+ 
+     list_del_init(&inode->hash);
+     list_add(&inode->hash, &table->inode_hash[hash]);
+@@ -359,12 +312,6 @@ __dentry_search_for_inode(inode_t *inode, uuid_t pargfid, const char *name)
+     dentry_t *dentry = NULL;
+     dentry_t *tmp = NULL;
+ 
+-    if (!inode || !name) {
+-        gf_msg_callingfn(THIS->name, GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
+-                         "inode || name not found");
+-        return NULL;
+-    }
+-
+     /* earlier, just the ino was sent, which could have been 0, now
+        we deal with gfid, and if sent gfid is null or 0, no need to
+        continue with the check */
+@@ -390,12 +337,6 @@ __inode_ctx_free(inode_t *inode)
+     xlator_t *xl = NULL;
+     xlator_t *old_THIS = NULL;
+ 
+-    if (!inode) {
+-        gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
+-                         "inode not found");
+-        return;
+-    }
+-
+     if (!inode->_ctx) {
+         gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_CTX_NULL,
+                "_ctx not found");
+@@ -424,12 +365,6 @@ noctx:
+ static void
+ __inode_destroy(inode_t *inode)
+ {
+-    if (!inode) {
+-        gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
+-                         "inode not found");
+-        return;
+-    }
+-
+     __inode_ctx_free(inode);
+ 
+     LOCK_DESTROY(&inode->lock);
+@@ -472,9 +407,6 @@ inode_ctx_merge(fd_t *fd, inode_t *inode, inode_t *linked_inode)
+ static void
+ __inode_activate(inode_t *inode)
+ {
+-    if (!inode)
+-        return;
+-
+     list_move(&inode->list, &inode->table->active);
+     inode->table->active_size++;
+ }
+@@ -485,19 +417,13 @@ __inode_passivate(inode_t *inode)
+     dentry_t *dentry = NULL;
+     dentry_t *t = NULL;
+ 
+-    if (!inode) {
+-        gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
+-                         "inode not found");
+-        return;
+-    }
+-
+     list_move_tail(&inode->list, &inode->table->lru);
+     inode->table->lru_size++;
+ 
+     list_for_each_entry_safe(dentry, t, &inode->dentry_list, inode_list)
+     {
+         if (!__is_dentry_hashed(dentry))
+-            __dentry_unset(dentry);
++            dentry_destroy(__dentry_unset(dentry));
+     }
+ }
+ 
+@@ -507,12 +433,6 @@ __inode_retire(inode_t *inode)
+     dentry_t *dentry = NULL;
+     dentry_t *t = NULL;
+ 
+-    if (!inode) {
+-        gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
+-                         "inode not found");
+-        return;
+-    }
+-
+     list_move_tail(&inode->list, &inode->table->purge);
+     inode->table->purge_size++;
+ 
+@@ -520,7 +440,7 @@ __inode_retire(inode_t *inode)
+ 
+     list_for_each_entry_safe(dentry, t, &inode->dentry_list, inode_list)
+     {
+-        __dentry_unset(dentry);
++        dentry_destroy(__dentry_unset(dentry));
+     }
+ }
+ 
+@@ -547,9 +467,6 @@ __inode_unref(inode_t *inode, bool clear)
+     xlator_t *this = NULL;
+     uint64_t nlookup = 0;
+ 
+-    if (!inode)
+-        return NULL;
+-
+     /*
+      * Root inode should always be in active list of inode table. So unrefs
+      * on root inode are no-ops.
+@@ -677,16 +594,10 @@ inode_ref(inode_t *inode)
+ }
+ 
+ static dentry_t *
+-__dentry_create(inode_t *inode, inode_t *parent, const char *name)
++dentry_create(inode_t *inode, inode_t *parent, const char *name)
+ {
+     dentry_t *newd = NULL;
+ 
+-    if (!inode || !parent || !name) {
+-        gf_msg_callingfn(THIS->name, GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
+-                         "inode || parent || name not found");
+-        return NULL;
+-    }
+-
+     newd = mem_get0(parent->table->dentry_pool);
+     if (newd == NULL) {
+         goto out;
+@@ -702,10 +613,6 @@ __dentry_create(inode_t *inode, inode_t *parent, const char *name)
+         goto out;
+     }
+ 
+-    if (parent)
+-        newd->parent = __inode_ref(parent, false);
+-
+-    list_add(&newd->inode_list, &inode->dentry_list);
+     newd->inode = inode;
+ 
+ out:
+@@ -717,14 +624,6 @@ __inode_create(inode_table_t *table)
+ {
+     inode_t *newi = NULL;
+ 
+-    if (!table) {
+-        gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0,
+-                         LG_MSG_INODE_TABLE_NOT_FOUND,
+-                         "table not "
+-                         "found");
+-        return NULL;
+-    }
+-
+     newi = mem_get0(table->inode_pool);
+     if (!newi) {
+         goto out;
+@@ -795,9 +694,6 @@ __inode_ref_reduce_by_n(inode_t *inode, uint64_t nref)
+ {
+     uint64_t nlookup = 0;
+ 
+-    if (!inode)
+-        return NULL;
+-
+     GF_ASSERT(inode->ref >= nref);
+ 
+     inode->ref -= nref;
+@@ -837,17 +733,12 @@ inode_forget_atomic(inode_t *inode, uint64_t nlookup)
+ }
+ 
+ dentry_t *
+-__dentry_grep(inode_table_t *table, inode_t *parent, const char *name)
++__dentry_grep(inode_table_t *table, inode_t *parent, const char *name,
++              const int hash)
+ {
+-    int hash = 0;
+     dentry_t *dentry = NULL;
+     dentry_t *tmp = NULL;
+ 
+-    if (!table || !name || !parent)
+-        return NULL;
+-
+-    hash = hash_dentry(parent, name, table->hashsize);
+-
+     list_for_each_entry(tmp, &table->name_hash[hash], hash)
+     {
+         if (tmp->parent == parent && !strcmp(tmp->name, name)) {
+@@ -872,15 +763,16 @@ inode_grep(inode_table_t *table, inode_t *parent, const char *name)
+         return NULL;
+     }
+ 
++    int hash = hash_dentry(parent, name, table->hashsize);
++
+     pthread_mutex_lock(&table->lock);
+     {
+-        dentry = __dentry_grep(table, parent, name);
+-
+-        if (dentry)
++        dentry = __dentry_grep(table, parent, name, hash);
++        if (dentry) {
+             inode = dentry->inode;
+-
+-        if (inode)
+-            __inode_ref(inode, false);
++            if (inode)
++                __inode_ref(inode, false);
++        }
+     }
+     pthread_mutex_unlock(&table->lock);
+ 
+@@ -947,17 +839,18 @@ inode_grep_for_gfid(inode_table_t *table, inode_t *parent, const char *name,
+         return ret;
+     }
+ 
++    int hash = hash_dentry(parent, name, table->hashsize);
++
+     pthread_mutex_lock(&table->lock);
+     {
+-        dentry = __dentry_grep(table, parent, name);
+-
+-        if (dentry)
++        dentry = __dentry_grep(table, parent, name, hash);
++        if (dentry) {
+             inode = dentry->inode;
+-
+-        if (inode) {
+-            gf_uuid_copy(gfid, inode->gfid);
+-            *type = inode->ia_type;
+-            ret = 0;
++            if (inode) {
++                gf_uuid_copy(gfid, inode->gfid);
++                *type = inode->ia_type;
++                ret = 0;
++            }
+         }
+     }
+     pthread_mutex_unlock(&table->lock);
+@@ -978,25 +871,14 @@ __is_root_gfid(uuid_t gfid)
+ }
+ 
+ inode_t *
+-__inode_find(inode_table_t *table, uuid_t gfid)
++__inode_find(inode_table_t *table, uuid_t gfid, const int hash)
+ {
+     inode_t *inode = NULL;
+     inode_t *tmp = NULL;
+-    int hash = 0;
+-
+-    if (!table) {
+-        gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0,
+-                         LG_MSG_INODE_TABLE_NOT_FOUND,
+-                         "table not "
+-                         "found");
+-        goto out;
+-    }
+ 
+     if (__is_root_gfid(gfid))
+         return table->root;
+ 
+-    hash = hash_gfid(gfid, 65536);
+-
+     list_for_each_entry(tmp, &table->inode_hash[hash], hash)
+     {
+         if (gf_uuid_compare(tmp->gfid, gfid) == 0) {
+@@ -1005,7 +887,6 @@ __inode_find(inode_table_t *table, uuid_t gfid)
+         }
+     }
+ 
+-out:
+     return inode;
+ }
+ 
+@@ -1022,9 +903,11 @@ inode_find(inode_table_t *table, uuid_t gfid)
+         return NULL;
+     }
+ 
++    int hash = hash_gfid(gfid, 65536);
++
+     pthread_mutex_lock(&table->lock);
+     {
+-        inode = __inode_find(table, gfid);
++        inode = __inode_find(table, gfid, hash);
+         if (inode)
+             __inode_ref(inode, false);
+     }
+@@ -1035,7 +918,7 @@ inode_find(inode_table_t *table, uuid_t gfid)
+ 
+ static inode_t *
+ __inode_link(inode_t *inode, inode_t *parent, const char *name,
+-             struct iatt *iatt)
++             struct iatt *iatt, const int dhash)
+ {
+     dentry_t *dentry = NULL;
+     dentry_t *old_dentry = NULL;
+@@ -1043,16 +926,7 @@ __inode_link(inode_t *inode, inode_t *parent, const char *name,
+     inode_table_t *table = NULL;
+     inode_t *link_inode = NULL;
+ 
+-    if (!inode) {
+-        errno = EINVAL;
+-        return NULL;
+-    }
+-
+     table = inode->table;
+-    if (!table) {
+-        errno = EINVAL;
+-        return NULL;
+-    }
+ 
+     if (parent) {
+         /* We should prevent inode linking between different
+@@ -1090,14 +964,16 @@ __inode_link(inode_t *inode, inode_t *parent, const char *name,
+             return NULL;
+         }
+ 
+-        old_inode = __inode_find(table, iatt->ia_gfid);
++        int ihash = hash_gfid(iatt->ia_gfid, 65536);
++
++        old_inode = __inode_find(table, iatt->ia_gfid, ihash);
+ 
+         if (old_inode) {
+             link_inode = old_inode;
+         } else {
+             gf_uuid_copy(inode->gfid, iatt->ia_gfid);
+             inode->ia_type = iatt->ia_type;
+-            __inode_hash(inode);
++            __inode_hash(inode, ihash);
+         }
+     } else {
+         /* @old_inode serves another important purpose - it indicates
+@@ -1112,22 +988,16 @@ __inode_link(inode_t *inode, inode_t *parent, const char *name,
+         old_inode = inode;
+     }
+ 
+-    if (name) {
+-        if (!strcmp(name, ".") || !strcmp(name, ".."))
+-            return link_inode;
+-
+-        if (strchr(name, '/')) {
+-            GF_ASSERT(!"inode link attempted with '/' in name");
+-            return NULL;
+-        }
++    if (name && (!strcmp(name, ".") || !strcmp(name, ".."))) {
++        return link_inode;
+     }
+ 
+     /* use only link_inode beyond this point */
+     if (parent) {
+-        old_dentry = __dentry_grep(table, parent, name);
++        old_dentry = __dentry_grep(table, parent, name, dhash);
+ 
+         if (!old_dentry || old_dentry->inode != link_inode) {
+-            dentry = __dentry_create(link_inode, parent, name);
++            dentry = dentry_create(link_inode, parent, name);
+             if (!dentry) {
+                 gf_msg_callingfn(
+                     THIS->name, GF_LOG_ERROR, 0, LG_MSG_DENTRY_CREATE_FAILED,
+@@ -1137,15 +1007,20 @@ __inode_link(inode_t *inode, inode_t *parent, const char *name,
+                 errno = ENOMEM;
+                 return NULL;
+             }
++
++            /* dentry linking needs to happen inside lock */
++            dentry->parent = __inode_ref(parent, false);
++            list_add(&dentry->inode_list, &link_inode->dentry_list);
++
+             if (old_inode && __is_dentry_cyclic(dentry)) {
+                 errno = ELOOP;
+-                __dentry_unset(dentry);
++                dentry_destroy(__dentry_unset(dentry));
+                 return NULL;
+             }
+-            __dentry_hash(dentry);
++            __dentry_hash(dentry, dhash);
+ 
+             if (old_dentry)
+-                __dentry_unset(old_dentry);
++                dentry_destroy(__dentry_unset(old_dentry));
+         }
+     }
+ 
+@@ -1155,6 +1030,7 @@ __inode_link(inode_t *inode, inode_t *parent, const char *name,
+ inode_t *
+ inode_link(inode_t *inode, inode_t *parent, const char *name, struct iatt *iatt)
+ {
++    int hash = 0;
+     inode_table_t *table = NULL;
+     inode_t *linked_inode = NULL;
+ 
+@@ -1166,10 +1042,18 @@ inode_link(inode_t *inode, inode_t *parent, const char *name, struct iatt *iatt)
+ 
+     table = inode->table;
+ 
++    if (parent && name) {
++        hash = hash_dentry(parent, name, table->hashsize);
++    }
++
++    if (name && strchr(name, '/')) {
++        GF_ASSERT(!"inode link attempted with '/' in name");
++        return NULL;
++    }
++
+     pthread_mutex_lock(&table->lock);
+     {
+-        linked_inode = __inode_link(inode, parent, name, iatt);
+-
++        linked_inode = __inode_link(inode, parent, name, iatt, hash);
+         if (linked_inode)
+             __inode_ref(linked_inode, false);
+     }
+@@ -1312,48 +1196,47 @@ inode_invalidate(inode_t *inode)
+     return ret;
+ }
+ 
+-static void
++static dentry_t *
+ __inode_unlink(inode_t *inode, inode_t *parent, const char *name)
+ {
+     dentry_t *dentry = NULL;
+     char pgfid[64] = {0};
+     char gfid[64] = {0};
+ 
+-    if (!inode || !parent || !name)
+-        return;
+-
+     dentry = __dentry_search_for_inode(inode, parent->gfid, name);
+ 
+     /* dentry NULL for corrupted backend */
+     if (dentry) {
+-        __dentry_unset(dentry);
++        dentry = __dentry_unset(dentry);
+     } else {
+         gf_msg("inode", GF_LOG_WARNING, 0, LG_MSG_DENTRY_NOT_FOUND,
+                "%s/%s: dentry not found in %s",
+                uuid_utoa_r(parent->gfid, pgfid), name,
+                uuid_utoa_r(inode->gfid, gfid));
+     }
++
++    return dentry;
+ }
+ 
+ void
+ inode_unlink(inode_t *inode, inode_t *parent, const char *name)
+ {
+-    inode_table_t *table = NULL;
++    inode_table_t *table;
++    dentry_t *dentry;
+ 
+-    if (!inode) {
+-        gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
+-                         "inode not found");
++    if (!inode || !parent || !name)
+         return;
+-    }
+ 
+     table = inode->table;
+ 
+     pthread_mutex_lock(&table->lock);
+     {
+-        __inode_unlink(inode, parent, name);
++        dentry = __inode_unlink(inode, parent, name);
+     }
+     pthread_mutex_unlock(&table->lock);
+ 
++    dentry_destroy(dentry);
++
+     inode_table_prune(table);
+ }
+ 
+@@ -1362,6 +1245,9 @@ inode_rename(inode_table_t *table, inode_t *srcdir, const char *srcname,
+              inode_t *dstdir, const char *dstname, inode_t *inode,
+              struct iatt *iatt)
+ {
++    int hash = 0;
++    dentry_t *dentry = NULL;
++
+     if (!inode) {
+         gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
+                          "inode not found");
+@@ -1370,13 +1256,26 @@ inode_rename(inode_table_t *table, inode_t *srcdir, const char *srcname,
+ 
+     table = inode->table;
+ 
++    if (dstname && strchr(dstname, '/')) {
++        GF_ASSERT(!"inode link attempted with '/' in name");
++        return -1;
++    }
++
++    if (dstdir && dstname) {
++        hash = hash_dentry(dstdir, dstname, table->hashsize);
++    }
++
+     pthread_mutex_lock(&table->lock);
+     {
+-        __inode_link(inode, dstdir, dstname, iatt);
+-        __inode_unlink(inode, srcdir, srcname);
++        __inode_link(inode, dstdir, dstname, iatt, hash);
++        /* pick the old dentry */
++        dentry = __inode_unlink(inode, srcdir, srcname);
+     }
+     pthread_mutex_unlock(&table->lock);
+ 
++    /* free the old dentry */
++    dentry_destroy(dentry);
++
+     inode_table_prune(table);
+ 
+     return 0;
+@@ -1447,12 +1346,6 @@ inode_parent(inode_t *inode, uuid_t pargfid, const char *name)
+ static int
+ __inode_has_dentry(inode_t *inode)
+ {
+-    if (!inode) {
+-        gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
+-                         "inode not found");
+-        return 0;
+-    }
+-
+     return !list_empty(&inode->dentry_list);
+ }
+ 
+@@ -1461,6 +1354,12 @@ inode_has_dentry(inode_t *inode)
+ {
+     int dentry_present = 0;
+ 
++    if (!inode) {
++        gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
++                         "inode not found");
++        return 0;
++    }
++
+     LOCK(&inode->lock);
+     {
+         dentry_present = __inode_has_dentry(inode);
+@@ -1720,7 +1619,7 @@ __inode_table_init_root(inode_table_t *table)
+     iatt.ia_ino = 1;
+     iatt.ia_type = IA_IFDIR;
+ 
+-    __inode_link(root, NULL, NULL, &iatt);
++    __inode_link(root, NULL, NULL, &iatt, 0);
+     table->root = root;
+ }
+ 
+diff --git a/libglusterfs/src/libglusterfs.sym b/libglusterfs/src/libglusterfs.sym
+index 5a721e0..d060292 100644
+--- a/libglusterfs/src/libglusterfs.sym
++++ b/libglusterfs/src/libglusterfs.sym
+@@ -357,7 +357,6 @@ default_copy_file_range
+ default_copy_file_range_cbk
+ default_copy_file_range_failure_cbk
+ default_copy_file_range_resume
+-__dentry_grep
+ dht_is_linkfile
+ dict_add
+ dict_addn
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0508-fuse-fetch-arbitrary-number-of-groups-from-proc-pid-.patch b/SOURCES/0508-fuse-fetch-arbitrary-number-of-groups-from-proc-pid-.patch
new file mode 100644
index 0000000..9ccc1b5
--- /dev/null
+++ b/SOURCES/0508-fuse-fetch-arbitrary-number-of-groups-from-proc-pid-.patch
@@ -0,0 +1,232 @@
+From 87b7689f7727a542c5afa22bdebd3781dd650a2f Mon Sep 17 00:00:00 2001
+From: Csaba Henk <csaba@redhat.com>
+Date: Fri, 17 Jul 2020 11:33:36 +0200
+Subject: [PATCH 508/511] fuse: fetch arbitrary number of groups from
+ /proc/[pid]/status
+
+Glusterfs so far constrained itself with an arbitrary limit (32)
+for the number of groups read from /proc/[pid]/status (this was
+the number of groups shown there prior to Linux commit
+v3.7-9553-g8d238027b87e (v3.8-rc1~74^2~59); since this commit, all
+groups are shown).
+
+With this change we'll read groups up to the number Glusterfs
+supports in general (64k).
+
+Note: the actual number of groups that are made use of in a
+regular Glusterfs setup shall still be capped at ~93 due to limitations
+of the RPC transport. To be able to handle more groups than that,
+brick side gid resolution (server.manage-gids option) can be used along
+with NIS, LDAP or other such networked directory service (see
+https://github.com/gluster/glusterdocs/blob/5ba15a2/docs/Administrator%20Guide/Handling-of-users-with-many-groups.md#limit-in-the-glusterfs-protocol
+).
+
+Also adding some diagnostic messages to frame_fill_groups().
+
+Upstream:
+> Reviewed-on: https://review.gluster.org/c/glusterfs/+/24721
+> Change-Id: I271f3dc3e6d3c44d6d989c7a2073ea5f16c26ee0
+> fixes: #1075
+> Signed-off-by: Csaba Henk <csaba@redhat.com>
+
+BUG: 1749304
+Change-Id: I80bf99d34087fb95768bf2259d8c4774d9f5d0c5
+Signed-off-by: Csaba Henk <csaba@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220920
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ libglusterfs/src/glusterfs/stack.h    |  7 ++++
+ tests/bugs/fuse/many-groups-for-acl.t | 13 ++++++-
+ xlators/mount/fuse/src/fuse-helpers.c | 71 +++++++++++++++++++++++------------
+ 3 files changed, 65 insertions(+), 26 deletions(-)
+
+diff --git a/libglusterfs/src/glusterfs/stack.h b/libglusterfs/src/glusterfs/stack.h
+index 1758550..bd466d8 100644
+--- a/libglusterfs/src/glusterfs/stack.h
++++ b/libglusterfs/src/glusterfs/stack.h
+@@ -429,6 +429,7 @@ call_stack_alloc_groups(call_stack_t *stack, int ngrps)
+     if (ngrps <= SMALL_GROUP_COUNT) {
+         stack->groups = stack->groups_small;
+     } else {
++        GF_FREE(stack->groups_large);
+         stack->groups_large = GF_CALLOC(ngrps, sizeof(gid_t),
+                                         gf_common_mt_groups_t);
+         if (!stack->groups_large)
+@@ -442,6 +443,12 @@ call_stack_alloc_groups(call_stack_t *stack, int ngrps)
+ }
+ 
+ static inline int
++call_stack_groups_capacity(call_stack_t *stack)
++{
++    return max(stack->ngrps, SMALL_GROUP_COUNT);
++}
++
++static inline int
+ call_frames_count(call_stack_t *call_stack)
+ {
+     call_frame_t *pos;
+diff --git a/tests/bugs/fuse/many-groups-for-acl.t b/tests/bugs/fuse/many-groups-for-acl.t
+index d959f75..a51b1bc 100755
+--- a/tests/bugs/fuse/many-groups-for-acl.t
++++ b/tests/bugs/fuse/many-groups-for-acl.t
+@@ -38,6 +38,13 @@ do
+ done
+ TEST useradd -o -M -u ${NEW_UID} -g ${NEW_GID} -G ${NEW_USER}-${NEW_GIDS} ${NEW_USER}
+ 
++# Linux < 3.8 exports only first 32 gids of pid to userspace
++kernel_exports_few_gids=0
++if [ "$OSTYPE" = Linux ] && \
++   su -m ${NEW_USER} -c "grep ^Groups: /proc/self/status | wc -w | xargs -I@ expr @ - 1 '<' $LAST_GID - $NEW_GID + 1" > /dev/null; then
++       kernel_exports_few_gids=1
++fi
++
+ # preparation done, start the tests
+ 
+ TEST glusterd
+@@ -48,6 +55,8 @@ TEST $CLI volume set $V0 nfs.disable off
+ TEST $CLI volume set ${V0} server.manage-gids off
+ TEST $CLI volume start ${V0}
+ 
++# This is just a synchronization hack to make sure the bricks are
++# up before going on.
+ EXPECT_WITHIN ${NFS_EXPORT_TIMEOUT} "1" is_nfs_export_available
+ 
+ # mount the volume with POSIX ACL support, without --resolve-gids
+@@ -69,8 +78,8 @@ TEST [ $? -eq 0 ]
+ su -m ${NEW_USER} -c "touch ${M0}/first-32-gids-2/success > /dev/null"
+ TEST [ $? -eq 0 ]
+ 
+-su -m ${NEW_USER} -c "touch ${M0}/gid-64/failure > /dev/null"
+-TEST [ $? -ne 0 ]
++su -m ${NEW_USER} -c "touch ${M0}/gid-64/success--if-all-gids-exported > /dev/null"
++TEST [ $? -eq $kernel_exports_few_gids ]
+ 
+ su -m ${NEW_USER} -c "touch ${M0}/gid-120/failure > /dev/null"
+ TEST [ $? -ne 0 ]
+diff --git a/xlators/mount/fuse/src/fuse-helpers.c b/xlators/mount/fuse/src/fuse-helpers.c
+index 5bfc40c..6e04cd4 100644
+--- a/xlators/mount/fuse/src/fuse-helpers.c
++++ b/xlators/mount/fuse/src/fuse-helpers.c
+@@ -139,8 +139,6 @@ get_fuse_state(xlator_t *this, fuse_in_header_t *finh)
+     return state;
+ }
+ 
+-#define FUSE_MAX_AUX_GROUPS                                                    \
+-    32 /* We can get only up to 32 aux groups from /proc */
+ void
+ frame_fill_groups(call_frame_t *frame)
+ {
+@@ -150,8 +148,6 @@ frame_fill_groups(call_frame_t *frame)
+     char filename[32];
+     char line[4096];
+     char *ptr = NULL;
+-    FILE *fp = NULL;
+-    int idx = 0;
+     long int id = 0;
+     char *saveptr = NULL;
+     char *endptr = NULL;
+@@ -191,45 +187,72 @@ frame_fill_groups(call_frame_t *frame)
+ 
+         call_stack_set_groups(frame->root, ngroups, &mygroups);
+     } else {
++        FILE *fp = NULL;
++
+         ret = snprintf(filename, sizeof filename, "/proc/%d/status",
+                        frame->root->pid);
+-        if (ret >= sizeof filename)
++        if (ret >= sizeof filename) {
++            gf_log(this->name, GF_LOG_ERROR, "procfs path exceeds buffer size");
+             goto out;
++        }
+ 
+         fp = fopen(filename, "r");
+-        if (!fp)
++        if (!fp) {
++            gf_log(this->name, GF_LOG_ERROR, "failed to open %s: %s", filename,
++                   strerror(errno));
+             goto out;
++        }
+ 
+-        if (call_stack_alloc_groups(frame->root, ngroups) != 0)
+-            goto out;
++        for (;;) {
++            gf_boolean_t found_groups = _gf_false;
++            int idx = 0;
+ 
+-        while ((ptr = fgets(line, sizeof line, fp))) {
+-            if (strncmp(ptr, "Groups:", 7) != 0)
+-                continue;
++            if (call_stack_alloc_groups(frame->root, ngroups) != 0) {
++                gf_log(this->name, GF_LOG_ERROR,
++                       "failed to allocate gid buffer");
++                goto out;
++            }
+ 
++            while ((ptr = fgets(line, sizeof line, fp))) {
++                if (strncmp(ptr, "Groups:", 7) == 0) {
++                    found_groups = _gf_true;
++                    break;
++                }
++            }
++            if (!found_groups) {
++                gf_log(this->name, GF_LOG_ERROR, "cannot find gid list in %s",
++                       filename);
++                break;
++            }
+             ptr = line + 8;
+ 
+             for (ptr = strtok_r(ptr, " \t\r\n", &saveptr); ptr;
+                  ptr = strtok_r(NULL, " \t\r\n", &saveptr)) {
+                 errno = 0;
+                 id = strtol(ptr, &endptr, 0);
+-                if (errno == ERANGE)
+-                    break;
+-                if (!endptr || *endptr)
++                if (errno == ERANGE || !endptr || *endptr) {
++                    gf_log(this->name, GF_LOG_ERROR, "failed to parse %s",
++                           filename);
+                     break;
+-                frame->root->groups[idx++] = id;
+-                if (idx == FUSE_MAX_AUX_GROUPS)
++                }
++                if (idx < call_stack_groups_capacity(frame->root))
++                    frame->root->groups[idx] = id;
++                idx++;
++                if (idx == GF_MAX_AUX_GROUPS)
+                     break;
+             }
+-
+-            frame->root->ngrps = idx;
+-            break;
++            if (idx > call_stack_groups_capacity(frame->root)) {
++                ngroups = idx;
++                rewind(fp);
++            } else {
++                frame->root->ngrps = idx;
++                break;
++            }
+         }
++    out:
++        if (fp)
++            fclose(fp);
+     }
+-
+-out:
+-    if (fp)
+-        fclose(fp);
+ #elif defined(GF_SOLARIS_HOST_OS)
+     char filename[32];
+     char scratch[128];
+@@ -245,7 +268,7 @@ out:
+         fp = fopen(filename, "r");
+         if (fp != NULL) {
+             if (fgets(scratch, sizeof scratch, fp) != NULL) {
+-                ngrps = MIN(prcred->pr_ngroups, FUSE_MAX_AUX_GROUPS);
++                ngrps = MIN(prcred->pr_ngroups, GF_MAX_AUX_GROUPS);
+                 if (call_stack_alloc_groups(frame->root, ngrps) != 0) {
+                     fclose(fp);
+                     return;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0509-core-configure-optimum-inode-table-hash_size-for-shd.patch b/SOURCES/0509-core-configure-optimum-inode-table-hash_size-for-shd.patch
new file mode 100644
index 0000000..fdfc9bb
--- /dev/null
+++ b/SOURCES/0509-core-configure-optimum-inode-table-hash_size-for-shd.patch
@@ -0,0 +1,407 @@
+From a18f03cbf2b5652f8617cb4dd236bb4ca9838d96 Mon Sep 17 00:00:00 2001
+From: Mohit Agrawal <moagrawa@redhat.com>
+Date: Tue, 6 Oct 2020 16:54:15 +0530
+Subject: [PATCH 509/511] core: configure optimum inode table hash_size for shd
+
+In brick_mux environment a shd process consume high memory.
+After print the statedump i have found it allocates 1M per afr xlator
+for all bricks.In case of configure 4k volumes it consumes almost total
+6G RSS size in which 4G consumes by inode_tables
+
+[cluster/replicate.test1-replicate-0 - usage-type gf_common_mt_list_head memusage]
+size=1273488
+num_allocs=2
+max_size=1273488
+max_num_allocs=2
+total_allocs=2
+
+inode_new_table function allocates memory(1M) for a list of inode and dentry hash.
+For shd lru_limit size is 1 so we don't need to create a big hash table so to reduce
+RSS size for shd process pass optimum bucket count at the time of creating inode_table.
+
+> Change-Id: I039716d42321a232fdee1ee8fd50295e638715bb
+> Fixes: #1538
+> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+> (Cherry pick from commit ca6bbc486e76fdb9a8e07119bb10d7fa45b2e93b)
+> (Reviewed on upstream link https://github.com/gluster/glusterfs/issues/1538)
+
+Change-Id: I039716d42321a232fdee1ee8fd50295e638715bb
+BUG: 1898777
+Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/221191
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ api/src/glfs-master.c                          |  2 +-
+ libglusterfs/src/glusterfs/inode.h             | 17 +++++----
+ libglusterfs/src/inode.c                       | 53 +++++++++++++++++---------
+ xlators/cluster/afr/src/afr.c                  | 10 ++++-
+ xlators/cluster/dht/src/dht-rebalance.c        |  3 +-
+ xlators/cluster/ec/src/ec.c                    |  2 +-
+ xlators/features/bit-rot/src/bitd/bit-rot.c    |  2 +-
+ xlators/features/quota/src/quotad-helpers.c    |  2 +-
+ xlators/features/trash/src/trash.c             |  4 +-
+ xlators/mount/fuse/src/fuse-bridge.c           |  6 +--
+ xlators/nfs/server/src/nfs.c                   |  2 +-
+ xlators/protocol/server/src/server-handshake.c |  3 +-
+ 12 files changed, 66 insertions(+), 40 deletions(-)
+
+diff --git a/api/src/glfs-master.c b/api/src/glfs-master.c
+index b4473b1..9e604d3 100644
+--- a/api/src/glfs-master.c
++++ b/api/src/glfs-master.c
+@@ -45,7 +45,7 @@ graph_setup(struct glfs *fs, glusterfs_graph_t *graph)
+         }
+ 
+         if (!new_subvol->itable) {
+-            itable = inode_table_new(131072, new_subvol);
++            itable = inode_table_new(131072, new_subvol, 0, 0);
+             if (!itable) {
+                 errno = ENOMEM;
+                 ret = -1;
+diff --git a/libglusterfs/src/glusterfs/inode.h b/libglusterfs/src/glusterfs/inode.h
+index c875653..62c093d 100644
+--- a/libglusterfs/src/glusterfs/inode.h
++++ b/libglusterfs/src/glusterfs/inode.h
+@@ -35,11 +35,12 @@ typedef struct _dentry dentry_t;
+ 
+ struct _inode_table {
+     pthread_mutex_t lock;
+-    size_t hashsize;    /* bucket size of inode hash and dentry hash */
+-    char *name;         /* name of the inode table, just for gf_log() */
+-    inode_t *root;      /* root directory inode, with number 1 */
+-    xlator_t *xl;       /* xlator to be called to do purge */
+-    uint32_t lru_limit; /* maximum LRU cache size */
++    size_t dentry_hashsize; /* Number of buckets for dentry hash*/
++    size_t inode_hashsize;  /* Size of inode hash table */
++    char *name;             /* name of the inode table, just for gf_log() */
++    inode_t *root;          /* root directory inode, with number 1 */
++    xlator_t *xl;           /* xlator to be called to do purge */
++    uint32_t lru_limit;     /* maximum LRU cache size */
+     struct list_head *inode_hash; /* buckets for inode hash table */
+     struct list_head *name_hash;  /* buckets for dentry hash table */
+     struct list_head active; /* list of inodes currently active (in an fop) */
+@@ -116,12 +117,14 @@ struct _inode {
+ #define GFID_STR_PFX_LEN (sizeof(GFID_STR_PFX) - 1)
+ 
+ inode_table_t *
+-inode_table_new(uint32_t lru_limit, xlator_t *xl);
++inode_table_new(uint32_t lru_limit, xlator_t *xl, uint32_t dhash_size,
++                uint32_t inodehash_size);
+ 
+ inode_table_t *
+ inode_table_with_invalidator(uint32_t lru_limit, xlator_t *xl,
+                              int32_t (*invalidator_fn)(xlator_t *, inode_t *),
+-                             xlator_t *invalidator_xl);
++                             xlator_t *invalidator_xl, uint32_t dentry_hashsize,
++                             uint32_t inode_hashsize);
+ 
+ void
+ inode_table_destroy_all(glusterfs_ctx_t *ctx);
+diff --git a/libglusterfs/src/inode.c b/libglusterfs/src/inode.c
+index 71b2d2a..98f8ea6 100644
+--- a/libglusterfs/src/inode.c
++++ b/libglusterfs/src/inode.c
+@@ -763,7 +763,7 @@ inode_grep(inode_table_t *table, inode_t *parent, const char *name)
+         return NULL;
+     }
+ 
+-    int hash = hash_dentry(parent, name, table->hashsize);
++    int hash = hash_dentry(parent, name, table->dentry_hashsize);
+ 
+     pthread_mutex_lock(&table->lock);
+     {
+@@ -839,7 +839,7 @@ inode_grep_for_gfid(inode_table_t *table, inode_t *parent, const char *name,
+         return ret;
+     }
+ 
+-    int hash = hash_dentry(parent, name, table->hashsize);
++    int hash = hash_dentry(parent, name, table->dentry_hashsize);
+ 
+     pthread_mutex_lock(&table->lock);
+     {
+@@ -903,7 +903,7 @@ inode_find(inode_table_t *table, uuid_t gfid)
+         return NULL;
+     }
+ 
+-    int hash = hash_gfid(gfid, 65536);
++    int hash = hash_gfid(gfid, table->inode_hashsize);
+ 
+     pthread_mutex_lock(&table->lock);
+     {
+@@ -964,7 +964,7 @@ __inode_link(inode_t *inode, inode_t *parent, const char *name,
+             return NULL;
+         }
+ 
+-        int ihash = hash_gfid(iatt->ia_gfid, 65536);
++        int ihash = hash_gfid(iatt->ia_gfid, table->inode_hashsize);
+ 
+         old_inode = __inode_find(table, iatt->ia_gfid, ihash);
+ 
+@@ -1043,7 +1043,7 @@ inode_link(inode_t *inode, inode_t *parent, const char *name, struct iatt *iatt)
+     table = inode->table;
+ 
+     if (parent && name) {
+-        hash = hash_dentry(parent, name, table->hashsize);
++        hash = hash_dentry(parent, name, table->dentry_hashsize);
+     }
+ 
+     if (name && strchr(name, '/')) {
+@@ -1262,7 +1262,7 @@ inode_rename(inode_table_t *table, inode_t *srcdir, const char *srcname,
+     }
+ 
+     if (dstdir && dstname) {
+-        hash = hash_dentry(dstdir, dstname, table->hashsize);
++        hash = hash_dentry(dstdir, dstname, table->dentry_hashsize);
+     }
+ 
+     pthread_mutex_lock(&table->lock);
+@@ -1626,7 +1626,8 @@ __inode_table_init_root(inode_table_t *table)
+ inode_table_t *
+ inode_table_with_invalidator(uint32_t lru_limit, xlator_t *xl,
+                              int32_t (*invalidator_fn)(xlator_t *, inode_t *),
+-                             xlator_t *invalidator_xl)
++                             xlator_t *invalidator_xl, uint32_t dentry_hashsize,
++                             uint32_t inode_hashsize)
+ {
+     inode_table_t *new = NULL;
+     uint32_t mem_pool_size = lru_limit;
+@@ -1644,7 +1645,19 @@ inode_table_with_invalidator(uint32_t lru_limit, xlator_t *xl,
+     new->invalidator_fn = invalidator_fn;
+     new->invalidator_xl = invalidator_xl;
+ 
+-    new->hashsize = 14057; /* TODO: Random Number?? */
++    if (dentry_hashsize == 0) {
++        /* Prime number for uniform distribution */
++        new->dentry_hashsize = 14057;
++    } else {
++        new->dentry_hashsize = dentry_hashsize;
++    }
++
++    if (inode_hashsize == 0) {
++        /* The size of hash table always should be power of 2 */
++        new->inode_hashsize = 65536;
++    } else {
++        new->inode_hashsize = inode_hashsize;
++    }
+ 
+     /* In case FUSE is initing the inode table. */
+     if (!mem_pool_size || (mem_pool_size > DEFAULT_INODE_MEMPOOL_ENTRIES))
+@@ -1658,13 +1671,13 @@ inode_table_with_invalidator(uint32_t lru_limit, xlator_t *xl,
+     if (!new->dentry_pool)
+         goto out;
+ 
+-    new->inode_hash = (void *)GF_CALLOC(65536, sizeof(struct list_head),
+-                                        gf_common_mt_list_head);
++    new->inode_hash = (void *)GF_CALLOC(
++        new->inode_hashsize, sizeof(struct list_head), gf_common_mt_list_head);
+     if (!new->inode_hash)
+         goto out;
+ 
+-    new->name_hash = (void *)GF_CALLOC(new->hashsize, sizeof(struct list_head),
+-                                       gf_common_mt_list_head);
++    new->name_hash = (void *)GF_CALLOC(
++        new->dentry_hashsize, sizeof(struct list_head), gf_common_mt_list_head);
+     if (!new->name_hash)
+         goto out;
+ 
+@@ -1675,11 +1688,11 @@ inode_table_with_invalidator(uint32_t lru_limit, xlator_t *xl,
+     if (!new->fd_mem_pool)
+         goto out;
+ 
+-    for (i = 0; i < 65536; i++) {
++    for (i = 0; i < new->inode_hashsize; i++) {
+         INIT_LIST_HEAD(&new->inode_hash[i]);
+     }
+ 
+-    for (i = 0; i < new->hashsize; i++) {
++    for (i = 0; i < new->dentry_hashsize; i++) {
+         INIT_LIST_HEAD(&new->name_hash[i]);
+     }
+ 
+@@ -1717,10 +1730,12 @@ out:
+ }
+ 
+ inode_table_t *
+-inode_table_new(uint32_t lru_limit, xlator_t *xl)
++inode_table_new(uint32_t lru_limit, xlator_t *xl, uint32_t dentry_hashsize,
++                uint32_t inode_hashsize)
+ {
+     /* Only fuse for now requires the inode table with invalidator */
+-    return inode_table_with_invalidator(lru_limit, xl, NULL, NULL);
++    return inode_table_with_invalidator(lru_limit, xl, NULL, NULL,
++                                        dentry_hashsize, inode_hashsize);
+ }
+ 
+ int
+@@ -2439,8 +2454,10 @@ inode_table_dump(inode_table_t *itable, char *prefix)
+         return;
+     }
+ 
+-    gf_proc_dump_build_key(key, prefix, "hashsize");
+-    gf_proc_dump_write(key, "%" GF_PRI_SIZET, itable->hashsize);
++    gf_proc_dump_build_key(key, prefix, "dentry_hashsize");
++    gf_proc_dump_write(key, "%" GF_PRI_SIZET, itable->dentry_hashsize);
++    gf_proc_dump_build_key(key, prefix, "inode_hashsize");
++    gf_proc_dump_write(key, "%" GF_PRI_SIZET, itable->inode_hashsize);
+     gf_proc_dump_build_key(key, prefix, "name");
+     gf_proc_dump_write(key, "%s", itable->name);
+ 
+diff --git a/xlators/cluster/afr/src/afr.c b/xlators/cluster/afr/src/afr.c
+index 8f9e71f..bfa464f 100644
+--- a/xlators/cluster/afr/src/afr.c
++++ b/xlators/cluster/afr/src/afr.c
+@@ -594,7 +594,15 @@ init(xlator_t *this)
+         goto out;
+     }
+ 
+-    this->itable = inode_table_new(SHD_INODE_LRU_LIMIT, this);
++    if (priv->shd.iamshd) {
++        /* Number of hash bucket should be prime number so declare 131
++           total dentry hash buckets
++        */
++        this->itable = inode_table_new(SHD_INODE_LRU_LIMIT, this, 131, 128);
++    } else {
++        this->itable = inode_table_new(SHD_INODE_LRU_LIMIT, this, 0, 0);
++    }
++
+     if (!this->itable) {
+         ret = -ENOMEM;
+         goto out;
+diff --git a/xlators/cluster/dht/src/dht-rebalance.c b/xlators/cluster/dht/src/dht-rebalance.c
+index 16ac16c..072896d 100644
+--- a/xlators/cluster/dht/src/dht-rebalance.c
++++ b/xlators/cluster/dht/src/dht-rebalance.c
+@@ -1168,7 +1168,6 @@ __dht_rebalance_migrate_data(xlator_t *this, gf_defrag_info_t *defrag,
+             break;
+         }
+ 
+-
+         offset += ret;
+         total += ret;
+ 
+@@ -2467,7 +2466,7 @@ dht_build_root_inode(xlator_t *this, inode_t **inode)
+         0,
+     };
+ 
+-    itable = inode_table_new(0, this);
++    itable = inode_table_new(0, this, 0, 0);
+     if (!itable)
+         return;
+ 
+diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c
+index 3f31c74..4118c3b 100644
+--- a/xlators/cluster/ec/src/ec.c
++++ b/xlators/cluster/ec/src/ec.c
+@@ -734,7 +734,7 @@ init(xlator_t *this)
+     GF_OPTION_INIT("stripe-cache", ec->stripe_cache, uint32, failed);
+     GF_OPTION_INIT("quorum-count", ec->quorum_count, uint32, failed);
+ 
+-    this->itable = inode_table_new(EC_SHD_INODE_LRU_LIMIT, this);
++    this->itable = inode_table_new(EC_SHD_INODE_LRU_LIMIT, this, 0, 0);
+     if (!this->itable)
+         goto failed;
+ 
+diff --git a/xlators/features/bit-rot/src/bitd/bit-rot.c b/xlators/features/bit-rot/src/bitd/bit-rot.c
+index 424c0d5..4e0e798 100644
+--- a/xlators/features/bit-rot/src/bitd/bit-rot.c
++++ b/xlators/features/bit-rot/src/bitd/bit-rot.c
+@@ -1658,7 +1658,7 @@ notify(xlator_t *this, int32_t event, void *data, ...)
+                 child->child_up = 1;
+                 child->xl = subvol;
+                 if (!child->table)
+-                    child->table = inode_table_new(4096, subvol);
++                    child->table = inode_table_new(4096, subvol, 0, 0);
+ 
+                 _br_qchild_event(this, child, br_brick_connect);
+                 pthread_cond_signal(&priv->cond);
+diff --git a/xlators/features/quota/src/quotad-helpers.c b/xlators/features/quota/src/quotad-helpers.c
+index d9f0351..46ac116 100644
+--- a/xlators/features/quota/src/quotad-helpers.c
++++ b/xlators/features/quota/src/quotad-helpers.c
+@@ -32,7 +32,7 @@ get_quotad_aggregator_state(xlator_t *this, rpcsvc_request_t *req)
+     UNLOCK(&priv->lock);
+ 
+     if (active_subvol->itable == NULL)
+-        active_subvol->itable = inode_table_new(4096, active_subvol);
++        active_subvol->itable = inode_table_new(4096, active_subvol, 0, 0);
+ 
+     state->itable = active_subvol->itable;
+ 
+diff --git a/xlators/features/trash/src/trash.c b/xlators/features/trash/src/trash.c
+index 93f020f..099c887 100644
+--- a/xlators/features/trash/src/trash.c
++++ b/xlators/features/trash/src/trash.c
+@@ -2261,7 +2261,7 @@ reconfigure(xlator_t *this, dict_t *options)
+ 
+     if (!active_earlier && active_now) {
+         if (!priv->trash_itable) {
+-            priv->trash_itable = inode_table_new(0, this);
++            priv->trash_itable = inode_table_new(0, this, 0, 0);
+             if (!priv->trash_itable) {
+                 ret = -ENOMEM;
+                 gf_log(this->name, GF_LOG_ERROR,
+@@ -2533,7 +2533,7 @@ init(xlator_t *this)
+     }
+ 
+     if (priv->state) {
+-        priv->trash_itable = inode_table_new(0, this);
++        priv->trash_itable = inode_table_new(0, this, 0, 0);
+         if (!priv->trash_itable) {
+             ret = -ENOMEM;
+             priv->state = _gf_false;
+diff --git a/xlators/mount/fuse/src/fuse-bridge.c b/xlators/mount/fuse/src/fuse-bridge.c
+index 1bddac2..919eea3 100644
+--- a/xlators/mount/fuse/src/fuse-bridge.c
++++ b/xlators/mount/fuse/src/fuse-bridge.c
+@@ -6298,10 +6298,10 @@ fuse_graph_setup(xlator_t *this, glusterfs_graph_t *graph)
+         }
+ 
+ #if FUSE_KERNEL_MINOR_VERSION >= 11
+-        itable = inode_table_with_invalidator(priv->lru_limit, graph->top,
+-                                              fuse_inode_invalidate_fn, this);
++        itable = inode_table_with_invalidator(
++            priv->lru_limit, graph->top, fuse_inode_invalidate_fn, this, 0, 0);
+ #else
+-        itable = inode_table_new(0, graph->top);
++        itable = inode_table_new(0, graph->top, 0, 0);
+ #endif
+         if (!itable) {
+             ret = -1;
+diff --git a/xlators/nfs/server/src/nfs.c b/xlators/nfs/server/src/nfs.c
+index ebded41..402be30 100644
+--- a/xlators/nfs/server/src/nfs.c
++++ b/xlators/nfs/server/src/nfs.c
+@@ -564,7 +564,7 @@ nfs_init_subvolume(struct nfs_state *nfs, xlator_t *xl)
+         return -1;
+ 
+     lrusize = nfs->memfactor * GF_NFS_INODE_LRU_MULT;
+-    xl->itable = inode_table_new(lrusize, xl);
++    xl->itable = inode_table_new(lrusize, xl, 0, 0);
+     if (!xl->itable) {
+         gf_msg(GF_NFS, GF_LOG_CRITICAL, ENOMEM, NFS_MSG_NO_MEMORY,
+                "Failed to allocate inode table");
+diff --git a/xlators/protocol/server/src/server-handshake.c b/xlators/protocol/server/src/server-handshake.c
+index 1d1177d..eeca73c 100644
+--- a/xlators/protocol/server/src/server-handshake.c
++++ b/xlators/protocol/server/src/server-handshake.c
+@@ -36,7 +36,6 @@ gf_compare_client_version(rpcsvc_request_t *req, int fop_prognum,
+     return ret;
+ }
+ 
+-
+ int
+ server_getspec(rpcsvc_request_t *req)
+ {
+@@ -629,7 +628,7 @@ server_setvolume(rpcsvc_request_t *req)
+ 
+             /* TODO: what is this ? */
+             client->bound_xl->itable = inode_table_new(conf->inode_lru_limit,
+-                                                       client->bound_xl);
++                                                       client->bound_xl, 0, 0);
+         }
+     }
+     UNLOCK(&conf->itable_lock);
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0510-glusterd-brick_mux-Optimize-friend-handshake-code-to.patch b/SOURCES/0510-glusterd-brick_mux-Optimize-friend-handshake-code-to.patch
new file mode 100644
index 0000000..e8a4906
--- /dev/null
+++ b/SOURCES/0510-glusterd-brick_mux-Optimize-friend-handshake-code-to.patch
@@ -0,0 +1,784 @@
+From 5294c82e0528059b10cbaab7805b20e76ffdd66b Mon Sep 17 00:00:00 2001
+From: mohit84 <moagrawa@redhat.com>
+Date: Mon, 30 Nov 2020 17:39:53 +0530
+Subject: [PATCH 510/511] glusterd[brick_mux]: Optimize friend handshake code
+ to avoid call_bail (#1614)
+
+During glusterd handshake glusterd received a volume dictionary
+from peer end to compare the own volume dictionary data.If the options
+are differ it sets the key to recognize volume options are changed
+and call import syntask to delete/start the volume.In brick_mux
+environment while number of volumes are high(5k) the dict api in function
+glusterd_compare_friend_volume takes time because the function
+glusterd_handle_friend_req saves all peer volume data in a single dictionary.
+Due to time taken by the function glusterd_handle_friend RPC requests receives
+a call_bail from a peer end gluster(CLI) won't be able to show volume status.
+
+Solution: To optimize the code done below changes
+1) Populate a new specific dictionary to save the peer end version specific
+   data so that function won't take much time to take the decision about the
+   peer end has some volume updates.
+2) In case of volume has differ version set the key in status_arr instead
+   of saving in a dictionary to make the operation is faster.
+
+Note: To validate the changes followed below procedure
+1) Setup 5100 distributed volumes 3x1
+2) Enable brick_mux
+3) Start all the volumes
+4) Kill all gluster processes on 3rd node
+5) Run a loop to update volume option on a 1st node
+   for i in {1..5100}; do gluster v set vol$i performance.open-behind off; done
+6) Start the glusterd process on the 3rd node
+7) Wait to finish handshake and check there should not be any call_bail message
+   in the logs
+
+> Change-Id: Ibad7c23988539cc369ecc39dea2ea6985470bee1
+> Fixes: #1613
+> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+> (Cherry pick from commit 12545d91eed27ff9abb0505a12c7d4e75b45a53e)
+> (Reviewed on upstream link https://github.com/gluster/glusterfs/issues/1613)
+
+Change-Id: Ibad7c23988539cc369ecc39dea2ea6985470bee1
+BUG: 1898784
+Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/221193
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ libglusterfs/src/ctx.c                       |   4 +
+ libglusterfs/src/dict.c                      | 166 ++++++++++++++++++++++++++-
+ libglusterfs/src/globals.c                   |   2 -
+ libglusterfs/src/glusterfs/dict.h            |   5 +
+ libglusterfs/src/glusterfs/globals.h         |   2 +
+ libglusterfs/src/libglusterfs.sym            |   1 +
+ xlators/mgmt/glusterd/src/glusterd-handler.c |  39 ++++---
+ xlators/mgmt/glusterd/src/glusterd-sm.c      |   6 +-
+ xlators/mgmt/glusterd/src/glusterd-sm.h      |   1 +
+ xlators/mgmt/glusterd/src/glusterd-utils.c   | 148 ++++++++++++++----------
+ xlators/mgmt/glusterd/src/glusterd-utils.h   |   2 +-
+ xlators/mgmt/glusterd/src/glusterd.h         |   8 +-
+ 12 files changed, 301 insertions(+), 83 deletions(-)
+
+diff --git a/libglusterfs/src/ctx.c b/libglusterfs/src/ctx.c
+index 4a001c2..ae1a77a 100644
+--- a/libglusterfs/src/ctx.c
++++ b/libglusterfs/src/ctx.c
+@@ -14,6 +14,7 @@
+ #include "glusterfs/glusterfs.h"
+ #include "timer-wheel.h"
+ 
++glusterfs_ctx_t *global_ctx = NULL;
+ glusterfs_ctx_t *
+ glusterfs_ctx_new()
+ {
+@@ -51,6 +52,9 @@ glusterfs_ctx_new()
+     GF_ATOMIC_INIT(ctx->stats.max_dict_pairs, 0);
+     GF_ATOMIC_INIT(ctx->stats.total_pairs_used, 0);
+     GF_ATOMIC_INIT(ctx->stats.total_dicts_used, 0);
++
++    if (!global_ctx)
++        global_ctx = ctx;
+ out:
+     return ctx;
+ }
+diff --git a/libglusterfs/src/dict.c b/libglusterfs/src/dict.c
+index d8cdda4..e5f619c 100644
+--- a/libglusterfs/src/dict.c
++++ b/libglusterfs/src/dict.c
+@@ -56,7 +56,13 @@ struct dict_cmp {
+ static data_t *
+ get_new_data()
+ {
+-    data_t *data = mem_get(THIS->ctx->dict_data_pool);
++    data_t *data = NULL;
++
++    if (global_ctx) {
++        data = mem_get(global_ctx->dict_data_pool);
++    } else {
++        data = mem_get(THIS->ctx->dict_data_pool);
++    }
+ 
+     if (!data)
+         return NULL;
+@@ -3503,3 +3509,161 @@ unlock:
+     UNLOCK(&dict->lock);
+     return 0;
+ }
++
++/* Popluate specific dictionary on the basis of passed key array at the
++   time of unserialize buffer
++*/
++int32_t
++dict_unserialize_specific_keys(char *orig_buf, int32_t size, dict_t **fill,
++                               char **suffix_key_arr, dict_t **specific_dict,
++                               int totkeycount)
++{
++    char *buf = orig_buf;
++    int ret = -1;
++    int32_t count = 0;
++    int i = 0;
++    int j = 0;
++
++    data_t *value = NULL;
++    char *key = NULL;
++    int32_t keylen = 0;
++    int32_t vallen = 0;
++    int32_t hostord = 0;
++    xlator_t *this = NULL;
++    int32_t keylenarr[totkeycount];
++
++    this = THIS;
++    GF_ASSERT(this);
++
++    if (!buf) {
++        gf_msg_callingfn("dict", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
++                         "buf is null!");
++        goto out;
++    }
++
++    if (size == 0) {
++        gf_msg_callingfn("dict", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
++                         "size is 0!");
++        goto out;
++    }
++
++    if (!fill) {
++        gf_msg_callingfn("dict", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
++                         "fill is null!");
++        goto out;
++    }
++
++    if (!*fill) {
++        gf_msg_callingfn("dict", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
++                         "*fill is null!");
++        goto out;
++    }
++
++    if ((buf + DICT_HDR_LEN) > (orig_buf + size)) {
++        gf_msg_callingfn("dict", GF_LOG_ERROR, 0, LG_MSG_UNDERSIZED_BUF,
++                         "undersized buffer "
++                         "passed. available (%lu) < required (%lu)",
++                         (long)(orig_buf + size), (long)(buf + DICT_HDR_LEN));
++        goto out;
++    }
++
++    memcpy(&hostord, buf, sizeof(hostord));
++    count = ntoh32(hostord);
++    buf += DICT_HDR_LEN;
++
++    if (count < 0) {
++        gf_smsg("dict", GF_LOG_ERROR, 0, LG_MSG_COUNT_LESS_THAN_ZERO,
++                "count=%d", count, NULL);
++        goto out;
++    }
++
++    /* Compute specific key length and save in array */
++    for (i = 0; i < totkeycount; i++) {
++        keylenarr[i] = strlen(suffix_key_arr[i]);
++    }
++
++    for (i = 0; i < count; i++) {
++        if ((buf + DICT_DATA_HDR_KEY_LEN) > (orig_buf + size)) {
++            gf_msg_callingfn("dict", GF_LOG_ERROR, 0, LG_MSG_UNDERSIZED_BUF,
++                             "undersized "
++                             "buffer passed. available (%lu) < "
++                             "required (%lu)",
++                             (long)(orig_buf + size),
++                             (long)(buf + DICT_DATA_HDR_KEY_LEN));
++            goto out;
++        }
++        memcpy(&hostord, buf, sizeof(hostord));
++        keylen = ntoh32(hostord);
++        buf += DICT_DATA_HDR_KEY_LEN;
++
++        if ((buf + DICT_DATA_HDR_VAL_LEN) > (orig_buf + size)) {
++            gf_msg_callingfn("dict", GF_LOG_ERROR, 0, LG_MSG_UNDERSIZED_BUF,
++                             "undersized "
++                             "buffer passed. available (%lu) < "
++                             "required (%lu)",
++                             (long)(orig_buf + size),
++                             (long)(buf + DICT_DATA_HDR_VAL_LEN));
++            goto out;
++        }
++        memcpy(&hostord, buf, sizeof(hostord));
++        vallen = ntoh32(hostord);
++        buf += DICT_DATA_HDR_VAL_LEN;
++
++        if ((keylen < 0) || (vallen < 0)) {
++            gf_msg_callingfn("dict", GF_LOG_ERROR, 0, LG_MSG_UNDERSIZED_BUF,
++                             "undersized length passed "
++                             "key:%d val:%d",
++                             keylen, vallen);
++            goto out;
++        }
++        if ((buf + keylen) > (orig_buf + size)) {
++            gf_msg_callingfn("dict", GF_LOG_ERROR, 0, LG_MSG_UNDERSIZED_BUF,
++                             "undersized buffer passed. "
++                             "available (%lu) < required (%lu)",
++                             (long)(orig_buf + size), (long)(buf + keylen));
++            goto out;
++        }
++        key = buf;
++        buf += keylen + 1; /* for '\0' */
++
++        if ((buf + vallen) > (orig_buf + size)) {
++            gf_msg_callingfn("dict", GF_LOG_ERROR, 0, LG_MSG_UNDERSIZED_BUF,
++                             "undersized buffer passed. "
++                             "available (%lu) < required (%lu)",
++                             (long)(orig_buf + size), (long)(buf + vallen));
++            goto out;
++        }
++        value = get_new_data();
++
++        if (!value) {
++            ret = -1;
++            goto out;
++        }
++        value->len = vallen;
++        value->data = gf_memdup(buf, vallen);
++        value->data_type = GF_DATA_TYPE_STR_OLD;
++        value->is_static = _gf_false;
++        buf += vallen;
++
++        ret = dict_addn(*fill, key, keylen, value);
++        if (ret < 0) {
++            data_destroy(value);
++            goto out;
++        }
++        for (j = 0; j < totkeycount; j++) {
++            if (keylen > keylenarr[j]) {
++                if (!strcmp(key + keylen - keylenarr[j], suffix_key_arr[j])) {
++                    ret = dict_addn(*specific_dict, key, keylen, value);
++                    break;
++                }
++            }
++        }
++
++        if (ret < 0)
++            goto out;
++    }
++
++    ret = 0;
++out:
++    return ret;
++}
+diff --git a/libglusterfs/src/globals.c b/libglusterfs/src/globals.c
+index e433ee8..30c15b6 100644
+--- a/libglusterfs/src/globals.c
++++ b/libglusterfs/src/globals.c
+@@ -96,7 +96,6 @@ const char *gf_upcall_list[GF_UPCALL_FLAGS_MAXVALUE] = {
+ /* This global ctx is a bad hack to prevent some of the libgfapi crashes.
+  * This should be removed once the patch on resource pool is accepted
+  */
+-glusterfs_ctx_t *global_ctx = NULL;
+ pthread_mutex_t global_ctx_mutex = PTHREAD_MUTEX_INITIALIZER;
+ xlator_t global_xlator;
+ static int gf_global_mem_acct_enable = 1;
+@@ -236,7 +235,6 @@ __glusterfs_this_location()
+     if (*this_location == NULL) {
+         thread_xlator = &global_xlator;
+     }
+-
+     return this_location;
+ }
+ 
+diff --git a/libglusterfs/src/glusterfs/dict.h b/libglusterfs/src/glusterfs/dict.h
+index 8239c7a..6e469c7 100644
+--- a/libglusterfs/src/glusterfs/dict.h
++++ b/libglusterfs/src/glusterfs/dict.h
+@@ -423,4 +423,9 @@ dict_has_key_from_array(dict_t *dict, char **strings, gf_boolean_t *result);
+ 
+ int
+ dict_serialized_length_lk(dict_t *this);
++
++int32_t
++dict_unserialize_specific_keys(char *orig_buf, int32_t size, dict_t **fill,
++                               char **specific_key_arr, dict_t **specific_dict,
++                               int totkeycount);
+ #endif
+diff --git a/libglusterfs/src/glusterfs/globals.h b/libglusterfs/src/glusterfs/globals.h
+index cc145cd..33fb023 100644
+--- a/libglusterfs/src/glusterfs/globals.h
++++ b/libglusterfs/src/glusterfs/globals.h
+@@ -199,4 +199,6 @@ int
+ gf_global_mem_acct_enable_get(void);
+ int
+ gf_global_mem_acct_enable_set(int val);
++
++extern glusterfs_ctx_t *global_ctx;
+ #endif /* !_GLOBALS_H */
+diff --git a/libglusterfs/src/libglusterfs.sym b/libglusterfs/src/libglusterfs.sym
+index d060292..bc770e2 100644
+--- a/libglusterfs/src/libglusterfs.sym
++++ b/libglusterfs/src/libglusterfs.sym
+@@ -436,6 +436,7 @@ dict_clear_flag
+ dict_check_flag
+ dict_unref
+ dict_unserialize
++dict_unserialize_specific_keys
+ drop_token
+ eh_destroy
+ eh_dump
+diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
+index b8799ab..908361c 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
++++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
+@@ -86,6 +86,9 @@ glusterd_big_locked_handler(rpcsvc_request_t *req, rpcsvc_actor actor_fn)
+     return ret;
+ }
+ 
++static char *specific_key_suffix[] = {".quota-cksum", ".ckusm", ".version",
++                                      ".quota-version", ".name"};
++
+ static int
+ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
+                            int port, gd1_mgmt_friend_req *friend_req)
+@@ -97,6 +100,8 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
+     char rhost[UNIX_PATH_MAX + 1] = {0};
+     uuid_t friend_uuid = {0};
+     dict_t *dict = NULL;
++    dict_t *peer_ver = NULL;
++    int totcount = sizeof(specific_key_suffix) / sizeof(specific_key_suffix[0]);
+ 
+     gf_uuid_parse(uuid_utoa(uuid), friend_uuid);
+     if (!port)
+@@ -104,8 +109,19 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
+ 
+     ret = glusterd_remote_hostname_get(req, rhost, sizeof(rhost));
+ 
++    ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_friend_req_ctx_t);
++    dict = dict_new();
++    peer_ver = dict_new();
++
+     RCU_READ_LOCK;
+ 
++    if (!ctx || !dict || !peer_ver) {
++        gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
++               "Unable to allocate memory");
++        ret = -1;
++        goto out;
++    }
++
+     peerinfo = glusterd_peerinfo_find(uuid, rhost);
+ 
+     if (peerinfo == NULL) {
+@@ -130,28 +146,14 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
+     event->peername = gf_strdup(peerinfo->hostname);
+     gf_uuid_copy(event->peerid, peerinfo->uuid);
+ 
+-    ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_friend_req_ctx_t);
+-
+-    if (!ctx) {
+-        gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+-               "Unable to allocate memory");
+-        ret = -1;
+-        goto out;
+-    }
+-
+     gf_uuid_copy(ctx->uuid, uuid);
+     if (hostname)
+         ctx->hostname = gf_strdup(hostname);
+     ctx->req = req;
+ 
+-    dict = dict_new();
+-    if (!dict) {
+-        ret = -1;
+-        goto out;
+-    }
+-
+-    ret = dict_unserialize(friend_req->vols.vols_val, friend_req->vols.vols_len,
+-                           &dict);
++    ret = dict_unserialize_specific_keys(
++        friend_req->vols.vols_val, friend_req->vols.vols_len, &dict,
++        specific_key_suffix, &peer_ver, totcount);
+ 
+     if (ret)
+         goto out;
+@@ -159,6 +161,7 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
+         dict->extra_stdfree = friend_req->vols.vols_val;
+ 
+     ctx->vols = dict;
++    ctx->peer_ver = peer_ver;
+     event->ctx = ctx;
+ 
+     ret = glusterd_friend_sm_inject_event(event);
+@@ -188,6 +191,8 @@ out:
+         } else {
+             free(friend_req->vols.vols_val);
+         }
++        if (peer_ver)
++            dict_unref(peer_ver);
+         if (event)
+             GF_FREE(event->peername);
+         GF_FREE(event);
+diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c
+index 044da3d..d10a792 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-sm.c
++++ b/xlators/mgmt/glusterd/src/glusterd-sm.c
+@@ -106,6 +106,8 @@ glusterd_destroy_friend_req_ctx(glusterd_friend_req_ctx_t *ctx)
+ 
+     if (ctx->vols)
+         dict_unref(ctx->vols);
++    if (ctx->peer_ver)
++        dict_unref(ctx->peer_ver);
+     GF_FREE(ctx->hostname);
+     GF_FREE(ctx);
+ }
+@@ -936,8 +938,8 @@ glusterd_ac_handle_friend_add_req(glusterd_friend_sm_event_t *event, void *ctx)
+     // Build comparison logic here.
+     pthread_mutex_lock(&conf->import_volumes);
+     {
+-        ret = glusterd_compare_friend_data(ev_ctx->vols, &status,
+-                                           event->peername);
++        ret = glusterd_compare_friend_data(ev_ctx->vols, ev_ctx->peer_ver,
++                                           &status, event->peername);
+         if (ret) {
+             pthread_mutex_unlock(&conf->import_volumes);
+             goto out;
+diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.h b/xlators/mgmt/glusterd/src/glusterd-sm.h
+index ce008ac..efdf68e 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-sm.h
++++ b/xlators/mgmt/glusterd/src/glusterd-sm.h
+@@ -174,6 +174,7 @@ typedef struct glusterd_friend_req_ctx_ {
+     rpcsvc_request_t *req;
+     int port;
+     dict_t *vols;
++    dict_t *peer_ver;  // Dictionary to save peer ver data
+ } glusterd_friend_req_ctx_t;
+ 
+ typedef struct glusterd_friend_update_ctx_ {
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index f7030fb..cf32bd9 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -3709,12 +3709,14 @@ out:
+     return ret;
+ }
+ 
+-int32_t
+-glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
+-                               int32_t *status, char *hostname)
++static int32_t
++glusterd_compare_friend_volume(dict_t *peer_data,
++                               glusterd_friend_synctask_args_t *arg,
++                               int32_t count, int32_t *status, char *hostname)
+ {
+     int32_t ret = -1;
+     char key[64] = "";
++    char key_prefix[32];
+     int keylen;
+     glusterd_volinfo_t *volinfo = NULL;
+     char *volname = NULL;
+@@ -3726,15 +3728,20 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
+     xlator_t *this = NULL;
+ 
+     GF_ASSERT(peer_data);
++    GF_ASSERT(arg);
+     GF_ASSERT(status);
+ 
+     this = THIS;
+     GF_ASSERT(this);
+ 
+-    keylen = snprintf(key, sizeof(key), "volume%d.name", count);
+-    ret = dict_get_strn(peer_data, key, keylen, &volname);
+-    if (ret)
++    snprintf(key_prefix, sizeof(key_prefix), "volume%d", count);
++    keylen = snprintf(key, sizeof(key), "%s.name", key_prefix);
++    ret = dict_get_strn(arg->peer_ver_data, key, keylen, &volname);
++    if (ret) {
++        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
++                "Key=%s is NULL in peer_ver_data", key, NULL);
+         goto out;
++    }
+ 
+     ret = glusterd_volinfo_find(volname, &volinfo);
+     if (ret) {
+@@ -3750,10 +3757,13 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
+         goto out;
+     }
+ 
+-    keylen = snprintf(key, sizeof(key), "volume%d.version", count);
+-    ret = dict_get_int32n(peer_data, key, keylen, &version);
+-    if (ret)
++    keylen = snprintf(key, sizeof(key), "%s.version", key_prefix);
++    ret = dict_get_int32n(arg->peer_ver_data, key, keylen, &version);
++    if (ret) {
++        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
++                "Key=%s is NULL in peer_ver_data", key, NULL);
+         goto out;
++    }
+ 
+     if (version > volinfo->version) {
+         // Mismatch detected
+@@ -3772,10 +3782,13 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
+ 
+     // Now, versions are same, compare cksums.
+     //
+-    snprintf(key, sizeof(key), "volume%d.ckusm", count);
+-    ret = dict_get_uint32(peer_data, key, &cksum);
+-    if (ret)
++    snprintf(key, sizeof(key), "%s.ckusm", key_prefix);
++    ret = dict_get_uint32(arg->peer_ver_data, key, &cksum);
++    if (ret) {
++        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
++                "Key=%s is NULL in peer_ver_data", key, NULL);
+         goto out;
++    }
+ 
+     if (cksum != volinfo->cksum) {
+         ret = 0;
+@@ -3790,8 +3803,8 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
+     if (!dict_get_sizen(volinfo->dict, VKEY_FEATURES_QUOTA))
+         goto skip_quota;
+ 
+-    snprintf(key, sizeof(key), "volume%d.quota-version", count);
+-    ret = dict_get_uint32(peer_data, key, &quota_version);
++    snprintf(key, sizeof(key), "%s.quota-version", key_prefix);
++    ret = dict_get_uint32(arg->peer_ver_data, key, &quota_version);
+     if (ret) {
+         gf_msg_debug(this->name, 0,
+                      "quota-version key absent for"
+@@ -3809,6 +3822,7 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
+                    "%d on peer %s",
+                    volinfo->volname, volinfo->quota_conf_version, quota_version,
+                    hostname);
++            GF_ATOMIC_INIT(volinfo->volpeerupdate, 1);
+             *status = GLUSTERD_VOL_COMP_UPDATE_REQ;
+             goto out;
+         } else if (quota_version < volinfo->quota_conf_version) {
+@@ -3819,8 +3833,8 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
+ 
+     // Now, versions are same, compare cksums.
+     //
+-    snprintf(key, sizeof(key), "volume%d.quota-cksum", count);
+-    ret = dict_get_uint32(peer_data, key, &quota_cksum);
++    snprintf(key, sizeof(key), "%s.quota-cksum", key_prefix);
++    ret = dict_get_uint32(arg->peer_ver_data, key, &quota_cksum);
+     if (ret) {
+         gf_msg_debug(this->name, 0,
+                      "quota checksum absent for "
+@@ -3846,13 +3860,12 @@ skip_quota:
+     *status = GLUSTERD_VOL_COMP_SCS;
+ 
+ out:
+-    keylen = snprintf(key, sizeof(key), "volume%d.update", count);
+-
+     if (*status == GLUSTERD_VOL_COMP_UPDATE_REQ) {
+-        ret = dict_set_int32n(peer_data, key, keylen, 1);
+-    } else {
+-        ret = dict_set_int32n(peer_data, key, keylen, 0);
++        /*Set the status to ensure volume is updated on the peer
++         */
++        arg->status_arr[(count / 64)] ^= 1UL << (count % 64);
+     }
++
+     if (*status == GLUSTERD_VOL_COMP_RJT) {
+         gf_event(EVENT_COMPARE_FRIEND_VOLUME_FAILED, "volume=%s",
+                  volinfo->volname);
+@@ -4935,8 +4948,9 @@ out:
+     return ret;
+ }
+ 
+-int32_t
+-glusterd_import_friend_volume(dict_t *peer_data, int count)
++static int32_t
++glusterd_import_friend_volume(dict_t *peer_data, int count,
++                              glusterd_friend_synctask_args_t *arg)
+ {
+     int32_t ret = -1;
+     glusterd_conf_t *priv = NULL;
+@@ -4954,10 +4968,27 @@ glusterd_import_friend_volume(dict_t *peer_data, int count)
+     priv = this->private;
+     GF_ASSERT(priv);
+ 
+-    ret = snprintf(key, sizeof(key), "volume%d.update", count);
+-    ret = dict_get_int32n(peer_data, key, ret, &update);
+-    if (ret || !update) {
++    if (arg) {
++        /*Check if the volume options are updated on the other peers
++         */
++        update = (1UL & (arg->status_arr[(count / 64)] >> (count % 64)));
++    } else {
++        ret = snprintf(key, sizeof(key), "volume%d.update", count);
++        ret = dict_get_int32n(peer_data, key, ret, &update);
++        if (ret) {
++            gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
++                    "Key=%s", key, NULL);
++            goto out;
++        }
++    }
++
++    if (!update) {
+         /* if update is 0 that means the volume is not imported */
++        gf_log(this->name, GF_LOG_DEBUG,
++               "The volume%d does"
++               " not have any peer change",
++               count);
++        ret = 0;
+         goto out;
+     }
+ 
+@@ -5045,6 +5076,8 @@ glusterd_import_friend_volumes_synctask(void *opaque)
+     glusterd_conf_t *conf = NULL;
+     dict_t *peer_data = NULL;
+     glusterd_friend_synctask_args_t *arg = NULL;
++    uint64_t bm = 0;
++    uint64_t mask = 0;
+ 
+     this = THIS;
+     GF_ASSERT(this);
+@@ -5056,17 +5089,7 @@ glusterd_import_friend_volumes_synctask(void *opaque)
+     if (!arg)
+         goto out;
+ 
+-    peer_data = dict_new();
+-    if (!peer_data) {
+-        goto out;
+-    }
+-
+-    ret = dict_unserialize(arg->dict_buf, arg->dictlen, &peer_data);
+-    if (ret) {
+-        errno = ENOMEM;
+-        goto out;
+-    }
+-
++    peer_data = arg->peer_data;
+     ret = dict_get_int32n(peer_data, "count", SLEN("count"), &count);
+     if (ret)
+         goto out;
+@@ -5083,11 +5106,18 @@ glusterd_import_friend_volumes_synctask(void *opaque)
+     conf->restart_bricks = _gf_true;
+ 
+     while (i <= count) {
+-        ret = glusterd_import_friend_volume(peer_data, i);
+-        if (ret) {
+-            break;
++        bm = arg->status_arr[i / 64];
++        while (bm != 0) {
++            /* mask will contain the lowest bit set from bm. */
++            mask = bm & (-bm);
++            bm ^= mask;
++            ret = glusterd_import_friend_volume(peer_data, i + ffsll(mask) - 2,
++                                                arg);
++            if (ret < 0) {
++                break;
++            }
+         }
+-        i++;
++        i += 64;
+     }
+     if (i > count) {
+         glusterd_svcs_manager(NULL);
+@@ -5095,11 +5125,9 @@ glusterd_import_friend_volumes_synctask(void *opaque)
+     conf->restart_bricks = _gf_false;
+     synccond_broadcast(&conf->cond_restart_bricks);
+ out:
+-    if (peer_data)
+-        dict_unref(peer_data);
+     if (arg) {
+-        if (arg->dict_buf)
+-            GF_FREE(arg->dict_buf);
++        dict_unref(arg->peer_data);
++        dict_unref(arg->peer_ver_data);
+         GF_FREE(arg);
+     }
+ 
+@@ -5121,7 +5149,7 @@ glusterd_import_friend_volumes(dict_t *peer_data)
+         goto out;
+ 
+     while (i <= count) {
+-        ret = glusterd_import_friend_volume(peer_data, i);
++        ret = glusterd_import_friend_volume(peer_data, i, NULL);
+         if (ret)
+             goto out;
+         i++;
+@@ -5260,7 +5288,8 @@ out:
+ }
+ 
+ int32_t
+-glusterd_compare_friend_data(dict_t *peer_data, int32_t *status, char *hostname)
++glusterd_compare_friend_data(dict_t *peer_data, dict_t *cmp, int32_t *status,
++                             char *hostname)
+ {
+     int32_t ret = -1;
+     int32_t count = 0;
+@@ -5289,8 +5318,19 @@ glusterd_compare_friend_data(dict_t *peer_data, int32_t *status, char *hostname)
+     if (ret)
+         goto out;
+ 
++    arg = GF_CALLOC(1, sizeof(*arg) + sizeof(uint64_t) * (count / 64),
++                    gf_common_mt_char);
++    if (!arg) {
++        ret = -1;
++        gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
++               "Out Of Memory");
++        goto out;
++    }
++    arg->peer_data = dict_ref(peer_data);
++    arg->peer_ver_data = dict_ref(cmp);
+     while (i <= count) {
+-        ret = glusterd_compare_friend_volume(peer_data, i, status, hostname);
++        ret = glusterd_compare_friend_volume(peer_data, arg, i, status,
++                                             hostname);
+         if (ret)
+             goto out;
+ 
+@@ -5310,21 +5350,13 @@ glusterd_compare_friend_data(dict_t *peer_data, int32_t *status, char *hostname)
+          * first brick to come up before attaching the subsequent bricks
+          * in case brick multiplexing is enabled
+          */
+-        arg = GF_CALLOC(1, sizeof(*arg), gf_common_mt_char);
+-        ret = dict_allocate_and_serialize(peer_data, &arg->dict_buf,
+-                                          &arg->dictlen);
+-        if (ret < 0) {
+-            gf_log(this->name, GF_LOG_ERROR,
+-                   "dict_serialize failed while handling "
+-                   " import friend volume request");
+-            goto out;
+-        }
+-
+         glusterd_launch_synctask(glusterd_import_friend_volumes_synctask, arg);
+     }
+ 
+ out:
+     if (ret && arg) {
++        dict_unref(arg->peer_data);
++        dict_unref(arg->peer_ver_data);
+         GF_FREE(arg);
+     }
+     gf_msg_debug(this->name, 0, "Returning with ret: %d, status: %d", ret,
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
+index 5f5de82..02d85d2 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
+@@ -231,7 +231,7 @@ glusterd_add_volumes_to_export_dict(dict_t *peer_data, char **buf,
+                                     u_int *length);
+ 
+ int32_t
+-glusterd_compare_friend_data(dict_t *peer_data, int32_t *status,
++glusterd_compare_friend_data(dict_t *peer_data, dict_t *cmp, int32_t *status,
+                              char *hostname);
+ 
+ int
+diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
+index f739b5d..efe4d0e 100644
+--- a/xlators/mgmt/glusterd/src/glusterd.h
++++ b/xlators/mgmt/glusterd/src/glusterd.h
+@@ -234,8 +234,12 @@ typedef struct glusterd_add_dict_args {
+ } glusterd_add_dict_args_t;
+ 
+ typedef struct glusterd_friend_synctask_args {
+-    char *dict_buf;
+-    u_int dictlen;
++    dict_t *peer_data;
++    dict_t *peer_ver_data;  // Dictionary to save peer version data
++    /* This status_arr[1] is not a real size, real size of the array
++       is dynamically allocated
++    */
++    uint64_t status_arr[1];
+ } glusterd_friend_synctask_args_t;
+ 
+ typedef enum gf_brick_status {
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0511-features-shard-Missing-format-specifier.patch b/SOURCES/0511-features-shard-Missing-format-specifier.patch
new file mode 100644
index 0000000..baf6cf4
--- /dev/null
+++ b/SOURCES/0511-features-shard-Missing-format-specifier.patch
@@ -0,0 +1,39 @@
+From 868d346cc35c222d19b95bd9c367674c9ea859df Mon Sep 17 00:00:00 2001
+From: Vinayakswami Hariharmath <vharihar@redhat.com>
+Date: Tue, 15 Dec 2020 16:23:49 +0530
+Subject: [PATCH 511/511] features/shard: Missing format specifier
+
+PRIu64 format specifier explicitly needs (percent sign) as
+prefix and that was missing as part of the below commit on
+downstream
+
+https://code.engineering.redhat.com/gerrit/#/c/221061/
+
+BUG: 1752739
+Change-Id: I354de58796f350eb1aa42fcdf8092ca2e69ccbb6
+
+Signed-off-by: Vinayakswami Hariharmath <vharihar@redhat.com>
+Change-Id: I4598893e3fcca3a2b3e6e8ef9b64b3e5e98923e6
+Reviewed-on: https://code.engineering.redhat.com/gerrit/221217
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
+---
+ xlators/features/shard/src/shard.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/xlators/features/shard/src/shard.c b/xlators/features/shard/src/shard.c
+index a967f35..099b062 100644
+--- a/xlators/features/shard/src/shard.c
++++ b/xlators/features/shard/src/shard.c
+@@ -1855,7 +1855,7 @@ int shard_truncate_last_shard(call_frame_t *frame, xlator_t *this,
+    */
+   if (!inode) {
+     gf_msg_debug(this->name, 0,
+-                 "Last shard to be truncated absent in backend: " PRIu64
++                 "Last shard to be truncated absent in backend:%" PRIu64
+                  " of gfid: %s. Directly proceeding to update file size",
+                  local->first_block, uuid_utoa(local->loc.inode->gfid));
+     shard_update_file_size(frame, this, NULL, &local->loc,
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0512-glusterd-shared-storage-mount-fails-in-ipv6-environm.patch b/SOURCES/0512-glusterd-shared-storage-mount-fails-in-ipv6-environm.patch
new file mode 100644
index 0000000..37de503
--- /dev/null
+++ b/SOURCES/0512-glusterd-shared-storage-mount-fails-in-ipv6-environm.patch
@@ -0,0 +1,105 @@
+From c963653a89c3f6466af9a3e8f19246a7907f7f8c Mon Sep 17 00:00:00 2001
+From: nik-redhat <nladha@redhat.com>
+Date: Thu, 30 Jul 2020 13:04:52 +0530
+Subject: [PATCH 512/517] glusterd: shared storage mount fails in ipv6
+ environment
+
+Issue:
+In case of ipv6 environment, the mounting of glusterd_shared_storage
+volume fails as it doesn't recognises the ipv6 enviornment.
+
+Fix:
+In case of ipv6 environment, the address-family is passed
+to the hooks script on creating shared-storage, then depending
+upon the address-family --xlator-option=transport.address-family=inet6
+option is added to the mount command, and the mounting succeeds.
+
+>Fixes: #1406
+>
+>Change-Id: Ib1888c34d85e6c01618b0ba214cbe1f57576908d
+>Signed-off-by: nik-redhat <nladha@redhat.com>
+
+Upstream patch: https://review.gluster.org/c/glusterfs/+/24797
+BUG: 1856574
+
+Change-Id: Ib1888c34d85e6c01618b0ba214cbe1f57576908d
+Signed-off-by: nik-redhat <nladha@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/221844
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Srijan Sivakumar <ssivakum@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ .../set/post/S32gluster_enable_shared_storage.sh      | 11 +++++++++--
+ xlators/mgmt/glusterd/src/glusterd-hooks.c            | 19 +++++++++++++++++++
+ 2 files changed, 28 insertions(+), 2 deletions(-)
+
+diff --git a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
+index 3bae37c..9597503 100755
+--- a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
++++ b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
+@@ -104,8 +104,15 @@ function check_volume_status()
+     echo $status
+ }
+ 
+-mount_cmd="mount -t glusterfs $local_node_hostname:/gluster_shared_storage \
+-           /run/gluster/shared_storage"
++key=`echo $5 | cut -d '=' -f 1`
++val=`echo $5 | cut -d '=' -f 2`
++if [ "$key" == "transport.address-family" ]; then
++    mount_cmd="mount -t glusterfs -o xlator-option=transport.address-family=inet6 \
++               $local_node_hostname:/gluster_shared_storage /var/run/gluster/shared_storage"
++else
++    mount_cmd="mount -t glusterfs $local_node_hostname:/gluster_shared_storage \
++           /var/run/gluster/shared_storage"
++fi
+ 
+ if [ "$option" == "enable" ]; then
+     retry=0;
+diff --git a/xlators/mgmt/glusterd/src/glusterd-hooks.c b/xlators/mgmt/glusterd/src/glusterd-hooks.c
+index 216cdf7..4f0d775 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-hooks.c
++++ b/xlators/mgmt/glusterd/src/glusterd-hooks.c
+@@ -200,11 +200,16 @@ glusterd_hooks_set_volume_args(dict_t *dict, runner_t *runner)
+     int i = 0;
+     int count = 0;
+     int ret = -1;
++    int flag = 0;
+     char query[1024] = {
+         0,
+     };
+     char *key = NULL;
+     char *value = NULL;
++    char *inet_family = NULL;
++    xlator_t *this = NULL;
++    this = THIS;
++    GF_ASSERT(this);
+ 
+     ret = dict_get_int32(dict, "count", &count);
+     if (ret)
+@@ -228,9 +233,23 @@ glusterd_hooks_set_volume_args(dict_t *dict, runner_t *runner)
+             continue;
+ 
+         runner_argprintf(runner, "%s=%s", key, value);
++        if ((strncmp(key, "cluster.enable-shared-storage",
++                     SLEN("cluster.enable-shared-storage")) == 0 ||
++             strncmp(key, "enable-shared-storage",
++                     SLEN("enable-shared-storage")) == 0) &&
++            strncmp(value, "enable", SLEN("enable")) == 0)
++            flag = 1;
+     }
+ 
+     glusterd_hooks_add_custom_args(dict, runner);
++    if (flag == 1) {
++        ret = dict_get_str_sizen(this->options, "transport.address-family",
++                                 &inet_family);
++        if (!ret) {
++            runner_argprintf(runner, "transport.address-family=%s",
++                             inet_family);
++        }
++    }
+ 
+     ret = 0;
+ out:
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0513-afr-mark-pending-xattrs-as-a-part-of-metadata-heal.patch b/SOURCES/0513-afr-mark-pending-xattrs-as-a-part-of-metadata-heal.patch
new file mode 100644
index 0000000..ebd5609
--- /dev/null
+++ b/SOURCES/0513-afr-mark-pending-xattrs-as-a-part-of-metadata-heal.patch
@@ -0,0 +1,191 @@
+From 708c17a8a69b2657f384affaedfcf4ba0a123893 Mon Sep 17 00:00:00 2001
+From: karthik-us <ksubrahm@redhat.com>
+Date: Wed, 23 Dec 2020 14:45:07 +0530
+Subject: [PATCH 513/517] afr: mark pending xattrs as a part of metadata heal
+
+...if pending xattrs are zero for all children.
+
+Problem:
+If there are no pending xattrs and a metadata heal needs to be
+performed, it can be possible that we end up with xattrs inadvertendly
+deleted from all bricks, as explained in the  BZ.
+
+Fix:
+After picking one among the sources as the good copy, mark pending xattrs on
+all sources to blame the sinks. Now even if this metadata heal fails midway,
+a subsequent heal will still choose one of the valid sources that it
+picked previously.
+
+Upstream patch details:
+> Fixes: #1067
+> Change-Id: If1b050b70b0ad911e162c04db4d89b263e2b8d7b
+> Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+Upstream patch: https://review.gluster.org/#/c/glusterfs/+/21922/
+
+BUG: 1640148
+Change-Id: If1b050b70b0ad911e162c04db4d89b263e2b8d7b
+Signed-off-by: karthik-us <ksubrahm@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/222073
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
+---
+ tests/bugs/replicate/mdata-heal-no-xattrs.t      | 59 ++++++++++++++++++++++
+ xlators/cluster/afr/src/afr-self-heal-metadata.c | 62 +++++++++++++++++++++++-
+ 2 files changed, 120 insertions(+), 1 deletion(-)
+ create mode 100644 tests/bugs/replicate/mdata-heal-no-xattrs.t
+
+diff --git a/tests/bugs/replicate/mdata-heal-no-xattrs.t b/tests/bugs/replicate/mdata-heal-no-xattrs.t
+new file mode 100644
+index 0000000..d3b0c50
+--- /dev/null
++++ b/tests/bugs/replicate/mdata-heal-no-xattrs.t
+@@ -0,0 +1,59 @@
++#!/bin/bash
++
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++cleanup;
++
++TEST glusterd
++TEST pidof glusterd
++TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
++TEST $CLI volume set $V0 cluster.self-heal-daemon off
++TEST $CLI volume start $V0
++
++TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
++echo "Data">$M0/FILE
++ret=$?
++TEST [ $ret -eq 0 ]
++
++# Change permission on brick-0: simulates the case where there is metadata
++# mismatch but no pending xattrs. This brick will become the source for heal.
++TEST chmod +x $B0/$V0"0"/FILE
++
++# Add gfid to xattrop
++xattrop_b0=$(afr_get_index_path $B0/$V0"0")
++base_entry_b0=`ls $xattrop_b0`
++gfid_str_FILE=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/FILE))
++TEST ln $xattrop_b0/$base_entry_b0 $xattrop_b0/$gfid_str_FILE
++EXPECT_WITHIN $HEAL_TIMEOUT "^1$" get_pending_heal_count $V0
++
++TEST $CLI volume set $V0 cluster.self-heal-daemon on
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
++TEST $CLI volume heal $V0
++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
++
++# Brick-0 should contain xattrs blaming other 2 bricks.
++# The values will be zero because heal is over.
++EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0/FILE
++EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0/FILE
++TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}0/FILE
++
++# Brick-1 and Brick-2 must not contain any afr xattrs.
++TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}1/FILE
++TEST ! getfattr -n trusted.afr.$V0-client-1 $B0/${V0}1/FILE
++TEST ! getfattr -n trusted.afr.$V0-client-2 $B0/${V0}1/FILE
++TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}2/FILE
++TEST ! getfattr -n trusted.afr.$V0-client-1 $B0/${V0}2/FILE
++TEST ! getfattr -n trusted.afr.$V0-client-2 $B0/${V0}2/FILE
++
++# check permission bits.
++EXPECT '755' stat -c %a $B0/${V0}0/FILE
++EXPECT '755' stat -c %a $B0/${V0}1/FILE
++EXPECT '755' stat -c %a $B0/${V0}2/FILE
++
++EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
++cleanup;
+diff --git a/xlators/cluster/afr/src/afr-self-heal-metadata.c b/xlators/cluster/afr/src/afr-self-heal-metadata.c
+index f4e31b6..03f43ba 100644
+--- a/xlators/cluster/afr/src/afr-self-heal-metadata.c
++++ b/xlators/cluster/afr/src/afr-self-heal-metadata.c
+@@ -190,6 +190,59 @@ out:
+     return ret;
+ }
+ 
++static int
++__afr_selfheal_metadata_mark_pending_xattrs(call_frame_t *frame, xlator_t *this,
++                                            inode_t *inode,
++                                            struct afr_reply *replies,
++                                            unsigned char *sources)
++{
++    int ret = 0;
++    int i = 0;
++    int m_idx = 0;
++    afr_private_t *priv = NULL;
++    int raw[AFR_NUM_CHANGE_LOGS] = {0};
++    dict_t *xattr = NULL;
++
++    priv = this->private;
++    m_idx = afr_index_for_transaction_type(AFR_METADATA_TRANSACTION);
++    raw[m_idx] = 1;
++
++    xattr = dict_new();
++    if (!xattr)
++        return -ENOMEM;
++
++    for (i = 0; i < priv->child_count; i++) {
++        if (sources[i])
++            continue;
++        ret = dict_set_static_bin(xattr, priv->pending_key[i], raw,
++                                  sizeof(int) * AFR_NUM_CHANGE_LOGS);
++        if (ret) {
++            ret = -1;
++            goto out;
++        }
++    }
++
++    for (i = 0; i < priv->child_count; i++) {
++        if (!sources[i])
++            continue;
++        ret = afr_selfheal_post_op(frame, this, inode, i, xattr, NULL);
++        if (ret < 0) {
++            gf_msg(this->name, GF_LOG_INFO, -ret, AFR_MSG_SELF_HEAL_INFO,
++                   "Failed to set pending metadata xattr on child %d for %s", i,
++                   uuid_utoa(inode->gfid));
++            goto out;
++        }
++    }
++
++    afr_replies_wipe(replies, priv->child_count);
++    ret = afr_selfheal_unlocked_discover(frame, inode, inode->gfid, replies);
++
++out:
++    if (xattr)
++        dict_unref(xattr);
++    return ret;
++}
++
+ /*
+  * Look for mismatching uid/gid or mode or user xattrs even if
+  * AFR xattrs don't say so, and pick one arbitrarily as winner. */
+@@ -210,6 +263,7 @@ __afr_selfheal_metadata_finalize_source(call_frame_t *frame, xlator_t *this,
+     };
+     int source = -1;
+     int sources_count = 0;
++    int ret = 0;
+ 
+     priv = this->private;
+ 
+@@ -300,7 +354,13 @@ __afr_selfheal_metadata_finalize_source(call_frame_t *frame, xlator_t *this,
+             healed_sinks[i] = 1;
+         }
+     }
+-
++    if ((sources_count == priv->child_count) && (source > -1) &&
++        (AFR_COUNT(healed_sinks, priv->child_count) != 0)) {
++        ret = __afr_selfheal_metadata_mark_pending_xattrs(frame, this, inode,
++                                                          replies, sources);
++        if (ret < 0)
++            return ret;
++    }
+ out:
+     afr_mark_active_sinks(this, sources, locked_on, healed_sinks);
+     return source;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0514-afr-event-gen-changes.patch b/SOURCES/0514-afr-event-gen-changes.patch
new file mode 100644
index 0000000..9f9562e
--- /dev/null
+++ b/SOURCES/0514-afr-event-gen-changes.patch
@@ -0,0 +1,308 @@
+From 4c47d6dd7c5ddcaa2a1e159427c0f6713fd33907 Mon Sep 17 00:00:00 2001
+From: karthik-us <ksubrahm@redhat.com>
+Date: Wed, 23 Dec 2020 14:57:51 +0530
+Subject: [PATCH 514/517] afr: event gen changes
+
+The general idea of the changes is to prevent resetting event generation
+to zero in the inode ctx, since event gen is something that should
+follow 'causal order'.
+
+Change #1:
+For a read txn, in inode refresh cbk, if event_generation is
+found zero, we are failing the read fop. This is not needed
+because change in event gen is only a marker for the next inode refresh to
+happen and should not be taken into account by the current read txn.
+
+Change #2:
+The event gen being zero above can happen if there is a racing lookup,
+which resets even get (in afr_lookup_done) if there are non zero afr
+xattrs. The resetting is done only to trigger an inode refresh and a
+possible client side heal on the next lookup. That can be acheived by
+setting the need_refresh flag in the inode ctx. So replaced all
+occurences of resetting even gen to zero with a call to
+afr_inode_need_refresh_set().
+
+Change #3:
+In both lookup and discover path, we are doing an inode refresh which is
+not required since all 3 essentially do the same thing- update the inode
+ctx with the good/bad copies from the brick replies. Inode refresh also
+triggers background heals, but I think it is okay to do it when we call
+refresh during the read and write txns and not in the lookup path.
+
+The .ts which relied on inode refresh in lookup path to trigger heals are
+now changed to do read txn so that inode refresh and the heal happens.
+
+Upstream patch details:
+> Change-Id: Iebf39a9be6ffd7ffd6e4046c96b0fa78ade6c5ec
+> Fixes: #1179
+> Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+> Reported-by: Erik Jacobson <erik.jacobson at hpe.com>
+Upstream patch: https://review.gluster.org/#/c/glusterfs/+/24316/
+
+BUG: 1640148
+Change-Id: Iebf39a9be6ffd7ffd6e4046c96b0fa78ade6c5ec
+Signed-off-by: karthik-us <ksubrahm@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/222074
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
+---
+ ...fid-mismatch-resolution-with-fav-child-policy.t |  8 +-
+ xlators/cluster/afr/src/afr-common.c               | 92 +++++-----------------
+ xlators/cluster/afr/src/afr-dir-write.c            |  6 +-
+ xlators/cluster/afr/src/afr.h                      |  5 +-
+ 4 files changed, 29 insertions(+), 82 deletions(-)
+
+diff --git a/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t b/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t
+index f4aa351..12af0c8 100644
+--- a/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t
++++ b/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t
+@@ -168,8 +168,8 @@ TEST [ "$gfid_1" != "$gfid_2" ]
+ #We know that second brick has the bigger size file
+ BIGGER_FILE_MD5=$(md5sum $B0/${V0}1/f3 | cut -d\  -f1)
+ 
+-TEST ls $M0/f3
+-TEST cat $M0/f3
++TEST ls $M0 #Trigger entry heal via readdir inode refresh
++TEST cat $M0/f3 #Trigger data heal via readv inode refresh
+ EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+ 
+ #gfid split-brain should be resolved
+@@ -215,8 +215,8 @@ TEST $CLI volume start $V0 force
+ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+ 
+-TEST ls $M0/f4
+-TEST cat $M0/f4
++TEST ls $M0 #Trigger entry heal via readdir inode refresh
++TEST cat $M0/f4  #Trigger data heal via readv inode refresh
+ EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+ 
+ #gfid split-brain should be resolved
+diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
+index fca2cd5..90b4f14 100644
+--- a/xlators/cluster/afr/src/afr-common.c
++++ b/xlators/cluster/afr/src/afr-common.c
+@@ -284,7 +284,7 @@ __afr_set_in_flight_sb_status(xlator_t *this, afr_local_t *local,
+                 metadatamap |= (1 << index);
+             }
+             if (metadatamap_old != metadatamap) {
+-                event = 0;
++                __afr_inode_need_refresh_set(inode, this);
+             }
+             break;
+ 
+@@ -297,7 +297,7 @@ __afr_set_in_flight_sb_status(xlator_t *this, afr_local_t *local,
+                 datamap |= (1 << index);
+             }
+             if (datamap_old != datamap)
+-                event = 0;
++                __afr_inode_need_refresh_set(inode, this);
+             break;
+ 
+         default:
+@@ -461,34 +461,6 @@ out:
+ }
+ 
+ int
+-__afr_inode_event_gen_reset_small(inode_t *inode, xlator_t *this)
+-{
+-    int ret = -1;
+-    uint16_t datamap = 0;
+-    uint16_t metadatamap = 0;
+-    uint32_t event = 0;
+-    uint64_t val = 0;
+-    afr_inode_ctx_t *ctx = NULL;
+-
+-    ret = __afr_inode_ctx_get(this, inode, &ctx);
+-    if (ret)
+-        return ret;
+-
+-    val = ctx->read_subvol;
+-
+-    metadatamap = (val & 0x000000000000ffff) >> 0;
+-    datamap = (val & 0x00000000ffff0000) >> 16;
+-    event = 0;
+-
+-    val = ((uint64_t)metadatamap) | (((uint64_t)datamap) << 16) |
+-          (((uint64_t)event) << 32);
+-
+-    ctx->read_subvol = val;
+-
+-    return ret;
+-}
+-
+-int
+ __afr_inode_read_subvol_get(inode_t *inode, xlator_t *this, unsigned char *data,
+                             unsigned char *metadata, int *event_p)
+ {
+@@ -559,22 +531,6 @@ out:
+ }
+ 
+ int
+-__afr_inode_event_gen_reset(inode_t *inode, xlator_t *this)
+-{
+-    afr_private_t *priv = NULL;
+-    int ret = -1;
+-
+-    priv = this->private;
+-
+-    if (priv->child_count <= 16)
+-        ret = __afr_inode_event_gen_reset_small(inode, this);
+-    else
+-        ret = -1;
+-
+-    return ret;
+-}
+-
+-int
+ afr_inode_read_subvol_get(inode_t *inode, xlator_t *this, unsigned char *data,
+                           unsigned char *metadata, int *event_p)
+ {
+@@ -723,30 +679,22 @@ out:
+     return need_refresh;
+ }
+ 
+-static int
+-afr_inode_need_refresh_set(inode_t *inode, xlator_t *this)
++int
++__afr_inode_need_refresh_set(inode_t *inode, xlator_t *this)
+ {
+     int ret = -1;
+     afr_inode_ctx_t *ctx = NULL;
+ 
+-    GF_VALIDATE_OR_GOTO(this->name, inode, out);
+-
+-    LOCK(&inode->lock);
+-    {
+-        ret = __afr_inode_ctx_get(this, inode, &ctx);
+-        if (ret)
+-            goto unlock;
+-
++    ret = __afr_inode_ctx_get(this, inode, &ctx);
++    if (ret == 0) {
+         ctx->need_refresh = _gf_true;
+     }
+-unlock:
+-    UNLOCK(&inode->lock);
+-out:
++
+     return ret;
+ }
+ 
+ int
+-afr_inode_event_gen_reset(inode_t *inode, xlator_t *this)
++afr_inode_need_refresh_set(inode_t *inode, xlator_t *this)
+ {
+     int ret = -1;
+ 
+@@ -754,7 +702,7 @@ afr_inode_event_gen_reset(inode_t *inode, xlator_t *this)
+ 
+     LOCK(&inode->lock);
+     {
+-        ret = __afr_inode_event_gen_reset(inode, this);
++        ret = __afr_inode_need_refresh_set(inode, this);
+     }
+     UNLOCK(&inode->lock);
+ out:
+@@ -1191,7 +1139,7 @@ afr_txn_refresh_done(call_frame_t *frame, xlator_t *this, int err)
+     ret = afr_inode_get_readable(frame, inode, this, local->readable,
+                                  &event_generation, local->transaction.type);
+ 
+-    if (ret == -EIO || (local->is_read_txn && !event_generation)) {
++    if (ret == -EIO) {
+         /* No readable subvolume even after refresh ==> splitbrain.*/
+         if (!priv->fav_child_policy) {
+             err = EIO;
+@@ -2413,7 +2361,7 @@ afr_lookup_done(call_frame_t *frame, xlator_t *this)
+         if (read_subvol == -1)
+             goto cant_interpret;
+         if (ret) {
+-            afr_inode_event_gen_reset(local->inode, this);
++            afr_inode_need_refresh_set(local->inode, this);
+             dict_del_sizen(local->replies[read_subvol].xdata, GF_CONTENT_KEY);
+         }
+     } else {
+@@ -2971,6 +2919,7 @@ afr_discover_unwind(call_frame_t *frame, xlator_t *this)
+     afr_private_t *priv = NULL;
+     afr_local_t *local = NULL;
+     int read_subvol = -1;
++    int ret = 0;
+     unsigned char *data_readable = NULL;
+     unsigned char *success_replies = NULL;
+ 
+@@ -2992,7 +2941,10 @@ afr_discover_unwind(call_frame_t *frame, xlator_t *this)
+     if (!afr_has_quorum(success_replies, this, frame))
+         goto unwind;
+ 
+-    afr_replies_interpret(frame, this, local->inode, NULL);
++    ret = afr_replies_interpret(frame, this, local->inode, NULL);
++    if (ret) {
++        afr_inode_need_refresh_set(local->inode, this);
++    }
+ 
+     read_subvol = afr_read_subvol_decide(local->inode, this, NULL,
+                                          data_readable);
+@@ -3248,11 +3200,7 @@ afr_discover(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xattr_req)
+     afr_read_subvol_get(loc->inode, this, NULL, NULL, &event,
+                         AFR_DATA_TRANSACTION, NULL);
+ 
+-    if (afr_is_inode_refresh_reqd(loc->inode, this, event,
+-                                  local->event_generation))
+-        afr_inode_refresh(frame, this, loc->inode, NULL, afr_discover_do);
+-    else
+-        afr_discover_do(frame, this, 0);
++    afr_discover_do(frame, this, 0);
+ 
+     return 0;
+ out:
+@@ -3393,11 +3341,7 @@ afr_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xattr_req)
+     afr_read_subvol_get(loc->parent, this, NULL, NULL, &event,
+                         AFR_DATA_TRANSACTION, NULL);
+ 
+-    if (afr_is_inode_refresh_reqd(loc->inode, this, event,
+-                                  local->event_generation))
+-        afr_inode_refresh(frame, this, loc->parent, NULL, afr_lookup_do);
+-    else
+-        afr_lookup_do(frame, this, 0);
++    afr_lookup_do(frame, this, 0);
+ 
+     return 0;
+ out:
+diff --git a/xlators/cluster/afr/src/afr-dir-write.c b/xlators/cluster/afr/src/afr-dir-write.c
+index 416c19d..d419bfc 100644
+--- a/xlators/cluster/afr/src/afr-dir-write.c
++++ b/xlators/cluster/afr/src/afr-dir-write.c
+@@ -123,11 +123,11 @@ __afr_dir_write_finalize(call_frame_t *frame, xlator_t *this)
+             continue;
+         if (local->replies[i].op_ret < 0) {
+             if (local->inode)
+-                afr_inode_event_gen_reset(local->inode, this);
++                afr_inode_need_refresh_set(local->inode, this);
+             if (local->parent)
+-                afr_inode_event_gen_reset(local->parent, this);
++                afr_inode_need_refresh_set(local->parent, this);
+             if (local->parent2)
+-                afr_inode_event_gen_reset(local->parent2, this);
++                afr_inode_need_refresh_set(local->parent2, this);
+             continue;
+         }
+ 
+diff --git a/xlators/cluster/afr/src/afr.h b/xlators/cluster/afr/src/afr.h
+index ed5096e..3a2b26d 100644
+--- a/xlators/cluster/afr/src/afr.h
++++ b/xlators/cluster/afr/src/afr.h
+@@ -948,7 +948,10 @@ afr_inode_read_subvol_set(inode_t *inode, xlator_t *this,
+                           int event_generation);
+ 
+ int
+-afr_inode_event_gen_reset(inode_t *inode, xlator_t *this);
++__afr_inode_need_refresh_set(inode_t *inode, xlator_t *this);
++
++int
++afr_inode_need_refresh_set(inode_t *inode, xlator_t *this);
+ 
+ int
+ afr_read_subvol_select_by_policy(inode_t *inode, xlator_t *this,
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0515-cluster-afr-Heal-directory-rename-without-rmdir-mkdi.patch b/SOURCES/0515-cluster-afr-Heal-directory-rename-without-rmdir-mkdi.patch
new file mode 100644
index 0000000..9c7693a
--- /dev/null
+++ b/SOURCES/0515-cluster-afr-Heal-directory-rename-without-rmdir-mkdi.patch
@@ -0,0 +1,2155 @@
+From aab8a587360214432c4a2ab59134411f1d38c509 Mon Sep 17 00:00:00 2001
+From: karthik-us <ksubrahm@redhat.com>
+Date: Wed, 9 Dec 2020 10:46:31 +0530
+Subject: [PATCH 515/517] cluster/afr: Heal directory rename without
+ rmdir/mkdir
+
+Problem1:
+When a directory is renamed while a brick
+is down entry-heal always did an rm -rf on that directory on
+the sink on old location and did mkdir and created the directory
+hierarchy again in the new location. This is inefficient.
+
+Problem2:
+Renamedir heal order may lead to a scenario where directory in
+the new location could be created before deleting it from old
+location leading to 2 directories with same gfid in posix.
+
+Fix:
+As part of heal, if oldlocation is healed first and is not present in
+source-brick always rename it into a hidden directory inside the
+sink-brick so that when heal is triggered in new-location shd can
+rename it from this hidden directory to the new-location.
+
+If new-location heal is triggered first and it detects that the
+directory already exists in the brick, then it should skip healing the
+directory until it appears in the hidden directory.
+
+Credits: Ravi for rename-data-loss.t script
+
+Upstream patch details:
+> Fixes: #1211
+> Change-Id: I0cba2006f35cd03d314d18211ce0bd530e254843
+> Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
+Upstream patch: https://review.gluster.org/#/c/glusterfs/+/24373/
+
+BUG: 1640148
+Change-Id: I0cba2006f35cd03d314d18211ce0bd530e254843
+Signed-off-by: karthik-us <ksubrahm@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/220660
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
+---
+ tests/afr.rc                                    |  16 +
+ tests/basic/afr/afr-anon-inode-no-quorum.t      |  63 ++++
+ tests/basic/afr/afr-anon-inode.t                | 114 ++++++
+ tests/basic/afr/entry-self-heal-anon-dir-off.t  | 464 ++++++++++++++++++++++++
+ tests/basic/afr/rename-data-loss.t              |  72 ++++
+ tests/bugs/replicate/bug-1744548-heal-timeout.t |   6 +-
+ tests/features/trash.t                          |  74 ++--
+ xlators/cluster/afr/src/afr-common.c            |  46 ++-
+ xlators/cluster/afr/src/afr-dir-read.c          |  12 +-
+ xlators/cluster/afr/src/afr-self-heal-common.c  | 182 ++++++++++
+ xlators/cluster/afr/src/afr-self-heal-entry.c   | 206 +++++++++--
+ xlators/cluster/afr/src/afr-self-heal-name.c    |  33 +-
+ xlators/cluster/afr/src/afr-self-heal.h         |   5 +
+ xlators/cluster/afr/src/afr-self-heald.c        | 178 ++++++++-
+ xlators/cluster/afr/src/afr-self-heald.h        |   2 +-
+ xlators/cluster/afr/src/afr.c                   |  40 +-
+ xlators/cluster/afr/src/afr.h                   |  11 +
+ xlators/mgmt/glusterd/src/glusterd-volgen.c     |  39 ++
+ xlators/mgmt/glusterd/src/glusterd-volume-set.c |   6 +
+ 19 files changed, 1442 insertions(+), 127 deletions(-)
+ create mode 100644 tests/basic/afr/afr-anon-inode-no-quorum.t
+ create mode 100644 tests/basic/afr/afr-anon-inode.t
+ create mode 100644 tests/basic/afr/entry-self-heal-anon-dir-off.t
+ create mode 100644 tests/basic/afr/rename-data-loss.t
+
+diff --git a/tests/afr.rc b/tests/afr.rc
+index 35f352d..2417899 100644
+--- a/tests/afr.rc
++++ b/tests/afr.rc
+@@ -105,3 +105,19 @@ function get_quorum_type()
+         local repl_id="$3"
+         cat $m/.meta/graphs/active/$v-replicate-$repl_id/private|grep quorum-type|awk '{print $3}'
+ }
++
++function afr_private_key_value()
++{
++        local v=$1
++        local m=$2
++        local replica_id=$3
++        local key=$4
++#xargs at the end will strip leading spaces
++        grep -E "^${key} = " $m/.meta/graphs/active/${v}-replicate-${replica_id}/private | cut -f2 -d'=' | xargs
++}
++
++function afr_anon_entry_count()
++{
++    local b=$1
++    ls $b/.glusterfs-anonymous-inode* | wc -l
++}
+diff --git a/tests/basic/afr/afr-anon-inode-no-quorum.t b/tests/basic/afr/afr-anon-inode-no-quorum.t
+new file mode 100644
+index 0000000..896ba0c
+--- /dev/null
++++ b/tests/basic/afr/afr-anon-inode-no-quorum.t
+@@ -0,0 +1,63 @@
++#!/bin/bash
++
++#Test that anon-inode entry is not cleaned up as long as there exists at least
++#one valid entry
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++. $(dirname $0)/../../afr.rc
++
++cleanup;
++
++TEST glusterd
++TEST pidof glusterd
++TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
++TEST $CLI volume heal $V0 disable
++TEST $CLI volume set $V0 performance.write-behind off
++TEST $CLI volume set $V0 performance.read-ahead off
++TEST $CLI volume set $V0 performance.readdir-ahead off
++TEST $CLI volume set $V0 performance.open-behind off
++TEST $CLI volume set $V0 performance.stat-prefetch off
++TEST $CLI volume set $V0 performance.io-cache off
++TEST $CLI volume set $V0 performance.quick-read off
++TEST $CLI volume set $V0 cluster.entry-self-heal off
++TEST $CLI volume start $V0
++
++TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
++
++TEST touch $M0/a $M0/b
++
++gfid_a=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/a))
++gfid_b=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/b))
++TEST kill_brick $V0 $H0 $B0/${V0}0
++TEST mv $M0/a $M0/a-new
++TEST mv $M0/b $M0/b-new
++
++TEST $CLI volume start $V0 force
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
++TEST ! ls $M0/a
++TEST ! ls $M0/b
++anon_inode_name=$(ls -a $B0/${V0}0 | grep glusterfs-anonymous-inode)
++TEST stat $B0/${V0}0/$anon_inode_name/$gfid_a
++TEST stat $B0/${V0}0/$anon_inode_name/$gfid_b
++#Make sure index heal doesn't happen after enabling heal
++TEST setfattr -x trusted.afr.$V0-client-0 $B0/${V0}1
++TEST rm -f $B0/${V0}1/.glusterfs/indices/xattrop/*
++TEST $CLI volume heal $V0 enable
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
++TEST $CLI volume heal $V0
++#Allow time for a scan
++sleep 5
++TEST stat $B0/${V0}0/$anon_inode_name/$gfid_a
++TEST stat $B0/${V0}0/$anon_inode_name/$gfid_b
++inum_b=$(STAT_INO $B0/${V0}0/$anon_inode_name/$gfid_b)
++TEST rm -f $M0/a-new
++TEST stat $M0/b-new
++
++TEST $CLI volume heal $V0
++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}0
++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}1
++EXPECT "$inum_b" STAT_INO $B0/${V0}0/b-new
++
++cleanup
+diff --git a/tests/basic/afr/afr-anon-inode.t b/tests/basic/afr/afr-anon-inode.t
+new file mode 100644
+index 0000000..f4cf37a
+--- /dev/null
++++ b/tests/basic/afr/afr-anon-inode.t
+@@ -0,0 +1,114 @@
++#!/bin/bash
++#Tests that afr-anon-inode test cases work fine as expected
++#These are cases where in entry-heal/name-heal we dont know entry for an inode
++#so these inodes are kept in a special directory
++
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++. $(dirname $0)/../../afr.rc
++
++cleanup;
++
++TEST glusterd
++TEST pidof glusterd
++TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..2}
++TEST $CLI volume set $V0 performance.quick-read off
++TEST $CLI volume set $V0 performance.io-cache off
++TEST $CLI volume set $V0 performance.write-behind off
++TEST $CLI volume set $V0 performance.stat-prefetch off
++TEST $CLI volume set $V0 performance.read-ahead off
++TEST $CLI volume set $V0 performance.open-behind off
++TEST $CLI volume start $V0
++TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
++EXPECT "^1$" afr_private_key_value $V0 $M0 0 "use-anonymous-inode"
++TEST $CLI volume set $V0 cluster.use-anonymous-inode no
++EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^0$" afr_private_key_value $V0 $M0 0 "use-anonymous-inode"
++TEST $CLI volume set $V0 cluster.use-anonymous-inode yes
++EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "use-anonymous-inode"
++TEST mkdir -p $M0/d1/b $M0/d2/a
++TEST kill_brick $V0 $H0 $B0/${V0}0
++TEST mv $M0/d2/a $M0/d1
++TEST mv $M0/d1/b $M0/d2
++TEST $CLI volume start $V0 force
++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
++anon_inode_name=$(ls -a $B0/${V0}0 | grep glusterfs-anonymous-inode)
++TEST [[ -d $B0/${V0}1/$anon_inode_name ]]
++TEST [[ -d $B0/${V0}2/$anon_inode_name ]]
++anon_gfid=$(gf_get_gfid_xattr $B0/${V0}0/$anon_inode_name)
++EXPECT "$anon_gfid" gf_get_gfid_xattr $B0/${V0}1/$anon_inode_name
++EXPECT "$anon_gfid" gf_get_gfid_xattr $B0/${V0}2/$anon_inode_name
++
++TEST ! ls $M0/$anon_inode_name
++EXPECT "^4$" echo $(ls -a $M0 | wc -l)
++
++#Test purging code path by shd
++TEST $CLI volume heal $V0 disable
++TEST mkdir $M0/l0 $M0/l1 $M0/l2
++TEST touch $M0/del-file $M0/del-file-nolink $M0/l0/file
++TEST ln $M0/del-file $M0/del-file-link
++TEST ln $M0/l0/file $M0/l1/file-link1
++TEST ln $M0/l0/file $M0/l2/file-link2
++TEST mkdir -p $M0/del-recursive-dir/d1
++
++TEST kill_brick $V0 $H0 $B0/${V0}0
++TEST rm -f $M0/del-file $M0/del-file-nolink
++TEST rm -rf $M0/del-recursive-dir
++TEST mv $M0/d1/a $M0/d2
++TEST mv $M0/l0/file $M0/l0/renamed-file
++TEST $CLI volume start $V0 force
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status $V0 0
++
++nolink_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/del-file-nolink))
++link_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/del-file))
++dir_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/del-recursive-dir))
++rename_dir_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/d1/a))
++rename_file_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/l0/file))
++TEST ! stat $M0/del-file
++TEST stat $B0/${V0}0/$anon_inode_name/$link_gfid
++TEST ! stat $M0/del-file-nolink
++TEST ! stat $B0/${V0}0/$anon_inode_name/$nolink_gfid
++TEST ! stat $M0/del-recursive-dir
++TEST stat $B0/${V0}0/$anon_inode_name/$dir_gfid
++TEST ! stat $M0/d1/a
++TEST stat $B0/${V0}0/$anon_inode_name/$rename_dir_gfid
++TEST ! stat $M0/l0/file
++TEST stat $B0/${V0}0/$anon_inode_name/$rename_file_gfid
++
++TEST kill_brick $V0 $H0 $B0/${V0}1
++TEST mv $M0/l1/file-link1 $M0/l1/renamed-file-link1
++TEST $CLI volume start $V0 force
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status $V0 1
++TEST ! stat $M0/l1/file-link1
++TEST stat $B0/${V0}1/$anon_inode_name/$rename_file_gfid
++
++TEST kill_brick $V0 $H0 $B0/${V0}2
++TEST mv $M0/l2/file-link2 $M0/l2/renamed-file-link2
++TEST $CLI volume start $V0 force
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status $V0 2
++TEST ! stat $M0/l2/file-link2
++TEST stat $B0/${V0}2/$anon_inode_name/$rename_file_gfid
++
++#Simulate only anon-inodes present in all bricks
++TEST rm -f $M0/l0/renamed-file $M0/l1/renamed-file-link1 $M0/l2/renamed-file-link2
++
++#Test that shd doesn't cleanup anon-inodes when some bricks are down
++TEST kill_brick $V0 $H0 $B0/${V0}1
++TEST $CLI volume heal $V0 enable
++$CLI volume heal $V0
++sleep 5 #Allow time for completion of one scan
++TEST stat $B0/${V0}0/$anon_inode_name/$link_gfid
++TEST stat $B0/${V0}0/$anon_inode_name/$rename_dir_gfid
++TEST stat $B0/${V0}0/$anon_inode_name/$dir_gfid
++rename_dir_inum=$(STAT_INO $B0/${V0}0/$anon_inode_name/$rename_dir_gfid)
++
++TEST $CLI volume start $V0 force
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status $V0 1
++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}0
++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}1
++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}2
++
++#Test that rename indeed happened instead of rmdir/mkdir
++renamed_dir_inum=$(STAT_INO $B0/${V0}0/d2/a)
++EXPECT "$rename_dir_inum" echo $renamed_dir_inum
++cleanup;
+diff --git a/tests/basic/afr/entry-self-heal-anon-dir-off.t b/tests/basic/afr/entry-self-heal-anon-dir-off.t
+new file mode 100644
+index 0000000..0803a08
+--- /dev/null
++++ b/tests/basic/afr/entry-self-heal-anon-dir-off.t
+@@ -0,0 +1,464 @@
++#!/bin/bash
++
++#This file checks if missing entry self-heal and entry self-heal are working
++#as expected.
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++. $(dirname $0)/../../afr.rc
++
++cleanup;
++
++function get_file_type {
++        stat -c "%a:%F:%g:%t:%T:%u" $1
++}
++
++function diff_dirs {
++        diff <(ls $1 | sort) <(ls $2 | sort)
++}
++
++function heal_status {
++        local f1_path="${1}/${3}"
++        local f2_path="${2}/${3}"
++        local insync=""
++        diff_dirs $f1_path $f2_path
++        if [ $? -eq 0 ];
++        then
++                insync="Y"
++        else
++                insync="N"
++        fi
++        local xattr11=$(get_hex_xattr trusted.afr.$V0-client-0 $f1_path)
++        local xattr12=$(get_hex_xattr trusted.afr.$V0-client-1 $f1_path)
++        local xattr21=$(get_hex_xattr trusted.afr.$V0-client-0 $f2_path)
++        local xattr22=$(get_hex_xattr trusted.afr.$V0-client-1 $f2_path)
++        local dirty1=$(get_hex_xattr trusted.afr.dirty $f1_path)
++        local dirty2=$(get_hex_xattr trusted.afr.dirty $f2_path)
++        if [ -z $xattr11 ]; then xattr11="000000000000000000000000"; fi
++        if [ -z $xattr12 ]; then xattr12="000000000000000000000000"; fi
++        if [ -z $xattr21 ]; then xattr21="000000000000000000000000"; fi
++        if [ -z $xattr22 ]; then xattr22="000000000000000000000000"; fi
++        if [ -z $dirty1 ]; then dirty1="000000000000000000000000"; fi
++        if [ -z $dirty2 ]; then dirty2="000000000000000000000000"; fi
++        echo ${insync}${xattr11}${xattr12}${xattr21}${xattr22}${dirty1}${dirty2}
++}
++
++function is_heal_done {
++        local zero_xattr="000000000000000000000000"
++        if [ "$(heal_status $@)" == "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" ];
++        then
++                echo "Y"
++        else
++                echo "N"
++        fi
++}
++
++function print_pending_heals {
++        local result=":"
++        for i in "$@";
++        do
++                if [ "N" == $(is_heal_done $B0/${V0}0 $B0/${V0}1 $i) ];
++                then
++                        result="$result:$i"
++                fi
++        done
++#To prevent any match for EXPECT_WITHIN, print a char non-existent in file-names
++        if [ $result == ":" ]; then result="~"; fi
++        echo $result
++}
++
++zero_xattr="000000000000000000000000"
++TEST glusterd
++TEST pidof glusterd
++TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
++TEST $CLI volume heal $V0 disable
++TEST $CLI volume set $V0 cluster.use-anonymous-inode off
++TEST $CLI volume set $V0 performance.write-behind off
++TEST $CLI volume set $V0 performance.read-ahead off
++TEST $CLI volume set $V0 performance.readdir-ahead off
++TEST $CLI volume set $V0 performance.open-behind off
++TEST $CLI volume set $V0 performance.stat-prefetch off
++TEST $CLI volume set $V0 performance.io-cache off
++TEST $CLI volume set $V0 performance.quick-read off
++TEST $CLI volume set $V0 cluster.data-self-heal on
++TEST $CLI volume set $V0 cluster.metadata-self-heal on
++TEST $CLI volume set $V0 cluster.entry-self-heal on
++TEST $CLI volume start $V0
++
++TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --use-readdirp=no $M0
++cd $M0
++#_me_ is dir on which missing entry self-heal happens, _heal is where dir self-heal happens
++#spb is split-brain, fool is all fool
++
++#source_self_accusing means there exists source and a sink which self-accuses.
++#This simulates failures where fops failed on the bricks without it going down.
++#Something like EACCESS/EDQUOT etc
++
++TEST mkdir spb_heal spb spb_me_heal spb_me fool_heal fool_me v1_fool_heal v1_fool_me source_creations_heal source_deletions_heal source_creations_me source_deletions_me v1_dirty_me v1_dirty_heal source_self_accusing
++TEST mkfifo source_deletions_heal/fifo
++TEST mknod  source_deletions_heal/block b 4 5
++TEST mknod  source_deletions_heal/char c 1 5
++TEST touch  source_deletions_heal/file
++TEST ln -s  source_deletions_heal/file source_deletions_heal/slink
++TEST mkdir  source_deletions_heal/dir1
++TEST mkdir  source_deletions_heal/dir1/dir2
++
++TEST mkfifo source_deletions_me/fifo
++TEST mknod  source_deletions_me/block b 4 5
++TEST mknod  source_deletions_me/char c 1 5
++TEST touch  source_deletions_me/file
++TEST ln -s  source_deletions_me/file source_deletions_me/slink
++TEST mkdir  source_deletions_me/dir1
++TEST mkdir  source_deletions_me/dir1/dir2
++
++TEST mkfifo source_self_accusing/fifo
++TEST mknod  source_self_accusing/block b 4 5
++TEST mknod  source_self_accusing/char c 1 5
++TEST touch  source_self_accusing/file
++TEST ln -s  source_self_accusing/file source_self_accusing/slink
++TEST mkdir  source_self_accusing/dir1
++TEST mkdir  source_self_accusing/dir1/dir2
++
++TEST kill_brick $V0 $H0 $B0/${V0}0
++
++TEST touch spb_heal/0 spb/0 spb_me_heal/0 spb_me/0 fool_heal/0 fool_me/0 v1_fool_heal/0 v1_fool_me/0 v1_dirty_heal/0 v1_dirty_me/0
++TEST rm -rf source_deletions_heal/fifo source_deletions_heal/block source_deletions_heal/char source_deletions_heal/file source_deletions_heal/slink source_deletions_heal/dir1
++TEST rm -rf source_deletions_me/fifo source_deletions_me/block source_deletions_me/char source_deletions_me/file source_deletions_me/slink source_deletions_me/dir1
++TEST rm -rf source_self_accusing/fifo source_self_accusing/block source_self_accusing/char source_self_accusing/file source_self_accusing/slink source_self_accusing/dir1
++
++#Test that the files are deleted
++TEST ! stat $B0/${V0}1/source_deletions_heal/fifo
++TEST ! stat $B0/${V0}1/source_deletions_heal/block
++TEST ! stat $B0/${V0}1/source_deletions_heal/char
++TEST ! stat $B0/${V0}1/source_deletions_heal/file
++TEST ! stat $B0/${V0}1/source_deletions_heal/slink
++TEST ! stat $B0/${V0}1/source_deletions_heal/dir1
++TEST ! stat $B0/${V0}1/source_deletions_me/fifo
++TEST ! stat $B0/${V0}1/source_deletions_me/block
++TEST ! stat $B0/${V0}1/source_deletions_me/char
++TEST ! stat $B0/${V0}1/source_deletions_me/file
++TEST ! stat $B0/${V0}1/source_deletions_me/slink
++TEST ! stat $B0/${V0}1/source_deletions_me/dir1
++TEST ! stat $B0/${V0}1/source_self_accusing/fifo
++TEST ! stat $B0/${V0}1/source_self_accusing/block
++TEST ! stat $B0/${V0}1/source_self_accusing/char
++TEST ! stat $B0/${V0}1/source_self_accusing/file
++TEST ! stat $B0/${V0}1/source_self_accusing/slink
++TEST ! stat $B0/${V0}1/source_self_accusing/dir1
++
++
++TEST mkfifo source_creations_heal/fifo
++TEST mknod  source_creations_heal/block b 4 5
++TEST mknod  source_creations_heal/char c 1 5
++TEST touch  source_creations_heal/file
++TEST ln -s  source_creations_heal/file source_creations_heal/slink
++TEST mkdir  source_creations_heal/dir1
++TEST mkdir  source_creations_heal/dir1/dir2
++
++TEST mkfifo source_creations_me/fifo
++TEST mknod  source_creations_me/block b 4 5
++TEST mknod  source_creations_me/char c 1 5
++TEST touch  source_creations_me/file
++TEST ln -s  source_creations_me/file source_creations_me/slink
++TEST mkdir  source_creations_me/dir1
++TEST mkdir  source_creations_me/dir1/dir2
++
++$CLI volume stop $V0
++
++#simulate fool fool scenario for fool_* dirs
++setfattr -x trusted.afr.$V0-client-0 $B0/${V0}1/{fool_heal,fool_me}
++setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}1/{fool_heal,fool_me}
++setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}1/{v1_fool_heal,v1_fool_me}
++
++#Simulate v1-dirty(self-accusing but no pending ops on others) scenario for v1-dirty
++setfattr -x trusted.afr.$V0-client-0 $B0/${V0}1/v1_dirty_{heal,me}
++setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}1/v1_dirty_{heal,me}
++
++$CLI volume start $V0 force
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
++TEST kill_brick $V0 $H0 $B0/${V0}1
++
++TEST touch spb_heal/1 spb/0 spb_me_heal/1 spb_me/0 fool_heal/1 fool_me/1 v1_fool_heal/1 v1_fool_me/1
++
++$CLI volume stop $V0
++
++#simulate fool fool scenario for fool_* dirs
++setfattr -x trusted.afr.$V0-client-1 $B0/${V0}0/{fool_heal,fool_me}
++setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}1/{fool_heal,fool_me}
++setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}1/{v1_fool_heal,v1_fool_me}
++
++#simulate self-accusing for source_self_accusing
++TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000006 $B0/${V0}0/source_self_accusing
++
++$CLI volume start $V0 force
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
++
++# Check if conservative merges happened correctly on _me_ dirs
++TEST stat spb_me_heal/1
++TEST stat $B0/${V0}0/spb_me_heal/1
++TEST stat $B0/${V0}1/spb_me_heal/1
++
++TEST stat spb_me_heal/0
++TEST stat $B0/${V0}0/spb_me_heal/0
++TEST stat $B0/${V0}1/spb_me_heal/0
++
++TEST stat fool_me/1
++TEST stat $B0/${V0}0/fool_me/1
++TEST stat $B0/${V0}1/fool_me/1
++
++TEST stat fool_me/0
++TEST stat $B0/${V0}0/fool_me/0
++TEST stat $B0/${V0}1/fool_me/0
++
++TEST stat v1_fool_me/0
++TEST stat $B0/${V0}0/v1_fool_me/0
++TEST stat $B0/${V0}1/v1_fool_me/0
++
++TEST stat v1_fool_me/1
++TEST stat $B0/${V0}0/v1_fool_me/1
++TEST stat $B0/${V0}1/v1_fool_me/1
++
++TEST stat v1_dirty_me/0
++TEST stat $B0/${V0}0/v1_dirty_me/0
++TEST stat $B0/${V0}1/v1_dirty_me/0
++
++#Check if files that have gfid-mismatches in _me_ are giving EIO
++TEST ! stat spb_me/0
++
++#Check if stale files are deleted on access
++TEST ! stat source_deletions_me/fifo
++TEST ! stat $B0/${V0}0/source_deletions_me/fifo
++TEST ! stat $B0/${V0}1/source_deletions_me/fifo
++TEST ! stat source_deletions_me/block
++TEST ! stat $B0/${V0}0/source_deletions_me/block
++TEST ! stat $B0/${V0}1/source_deletions_me/block
++TEST ! stat source_deletions_me/char
++TEST ! stat $B0/${V0}0/source_deletions_me/char
++TEST ! stat $B0/${V0}1/source_deletions_me/char
++TEST ! stat source_deletions_me/file
++TEST ! stat $B0/${V0}0/source_deletions_me/file
++TEST ! stat $B0/${V0}1/source_deletions_me/file
++TEST ! stat source_deletions_me/file
++TEST ! stat $B0/${V0}0/source_deletions_me/file
++TEST ! stat $B0/${V0}1/source_deletions_me/file
++TEST ! stat source_deletions_me/dir1/dir2
++TEST ! stat $B0/${V0}0/source_deletions_me/dir1/dir2
++TEST ! stat $B0/${V0}1/source_deletions_me/dir1/dir2
++TEST ! stat source_deletions_me/dir1
++TEST ! stat $B0/${V0}0/source_deletions_me/dir1
++TEST ! stat $B0/${V0}1/source_deletions_me/dir1
++
++#Test if the files created as part of access are healed correctly
++r=$(get_file_type source_creations_me/fifo)
++EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/fifo
++EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/fifo
++TEST [ -p source_creations_me/fifo ]
++
++r=$(get_file_type source_creations_me/block)
++EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/block
++EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/block
++EXPECT "^4 5$" stat -c "%t %T" $B0/${V0}1/source_creations_me/block
++EXPECT "^4 5$" stat -c "%t %T" $B0/${V0}0/source_creations_me/block
++TEST [ -b source_creations_me/block ]
++
++r=$(get_file_type source_creations_me/char)
++EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/char
++EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/char
++EXPECT "^1 5$" stat -c "%t %T" $B0/${V0}1/source_creations_me/char
++EXPECT "^1 5$" stat -c "%t %T" $B0/${V0}0/source_creations_me/char
++TEST [ -c source_creations_me/char ]
++
++r=$(get_file_type source_creations_me/file)
++EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/file
++EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/file
++TEST [ -f source_creations_me/file ]
++
++r=$(get_file_type source_creations_me/slink)
++EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/slink
++EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/slink
++TEST [ -h source_creations_me/slink ]
++
++r=$(get_file_type source_creations_me/dir1/dir2)
++EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/dir1/dir2
++EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/dir1/dir2
++TEST [ -d source_creations_me/dir1/dir2 ]
++
++r=$(get_file_type source_creations_me/dir1)
++EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/dir1
++EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/dir1
++TEST [ -d source_creations_me/dir1 ]
++
++#Trigger heal and check _heal dirs are healed properly
++#Trigger change in event generation number. That way inodes would get refreshed during lookup
++TEST kill_brick $V0 $H0 $B0/${V0}1
++$CLI volume start $V0 force
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
++
++TEST stat spb_heal
++TEST stat spb_me_heal
++TEST stat fool_heal
++TEST stat fool_me
++TEST stat v1_fool_heal
++TEST stat v1_fool_me
++TEST stat source_deletions_heal
++TEST stat source_deletions_me
++TEST stat source_self_accusing
++TEST stat source_creations_heal
++TEST stat source_creations_me
++TEST stat v1_dirty_heal
++TEST stat v1_dirty_me
++TEST $CLI volume stop $V0
++TEST rm -rf $B0/${V0}{0,1}/.glusterfs/indices/xattrop/*
++
++$CLI volume start $V0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
++
++#Create base entry in indices/xattrop
++echo "Data" > $M0/FILE
++rm -f $M0/FILE
++EXPECT "1" count_index_entries $B0/${V0}0
++EXPECT "1" count_index_entries $B0/${V0}1
++
++TEST $CLI volume stop $V0;
++
++#Create entries for fool_heal and fool_me to ensure they are fully healed and dirty xattrs erased, before triggering index heal
++create_brick_xattrop_entry $B0/${V0}0 fool_heal fool_me source_creations_heal/dir1
++
++$CLI volume start $V0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
++
++$CLI volume heal $V0 enable
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
++
++TEST $CLI volume heal $V0;
++EXPECT_WITHIN $HEAL_TIMEOUT "~" print_pending_heals spb_heal spb_me_heal fool_heal fool_me v1_fool_heal v1_fool_me source_deletions_heal source_deletions_me source_creations_heal source_creations_me v1_dirty_heal v1_dirty_me source_self_accusing
++
++EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 spb_heal
++EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 spb_me_heal
++EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 fool_heal
++EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 fool_me
++EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 v1_fool_heal
++EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 v1_fool_me
++EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_deletions_heal
++EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_deletions_me
++EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_self_accusing
++EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_creations_heal
++EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_creations_me
++EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 v1_dirty_heal
++EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 v1_dirty_me
++
++#Don't access the files/dirs from mount point as that may cause self-heals
++# Check if conservative merges happened correctly on heal dirs
++TEST stat $B0/${V0}0/spb_heal/1
++TEST stat $B0/${V0}1/spb_heal/1
++
++TEST stat $B0/${V0}0/spb_heal/0
++TEST stat $B0/${V0}1/spb_heal/0
++
++TEST stat $B0/${V0}0/fool_heal/1
++TEST stat $B0/${V0}1/fool_heal/1
++
++TEST stat $B0/${V0}0/fool_heal/0
++TEST stat $B0/${V0}1/fool_heal/0
++
++TEST stat $B0/${V0}0/v1_fool_heal/0
++TEST stat $B0/${V0}1/v1_fool_heal/0
++
++TEST stat $B0/${V0}0/v1_fool_heal/1
++TEST stat $B0/${V0}1/v1_fool_heal/1
++
++TEST stat $B0/${V0}0/v1_dirty_heal/0
++TEST stat $B0/${V0}1/v1_dirty_heal/0
++
++#Check if files that have gfid-mismatches in spb are giving EIO
++TEST ! stat spb/0
++
++#Check if stale files are deleted on access
++TEST ! stat $B0/${V0}0/source_deletions_heal/fifo
++TEST ! stat $B0/${V0}1/source_deletions_heal/fifo
++TEST ! stat $B0/${V0}0/source_deletions_heal/block
++TEST ! stat $B0/${V0}1/source_deletions_heal/block
++TEST ! stat $B0/${V0}0/source_deletions_heal/char
++TEST ! stat $B0/${V0}1/source_deletions_heal/char
++TEST ! stat $B0/${V0}0/source_deletions_heal/file
++TEST ! stat $B0/${V0}1/source_deletions_heal/file
++TEST ! stat $B0/${V0}0/source_deletions_heal/file
++TEST ! stat $B0/${V0}1/source_deletions_heal/file
++TEST ! stat $B0/${V0}0/source_deletions_heal/dir1/dir2
++TEST ! stat $B0/${V0}1/source_deletions_heal/dir1/dir2
++TEST ! stat $B0/${V0}0/source_deletions_heal/dir1
++TEST ! stat $B0/${V0}1/source_deletions_heal/dir1
++
++#Check if stale files are deleted on access
++TEST ! stat $B0/${V0}0/source_self_accusing/fifo
++TEST ! stat $B0/${V0}1/source_self_accusing/fifo
++TEST ! stat $B0/${V0}0/source_self_accusing/block
++TEST ! stat $B0/${V0}1/source_self_accusing/block
++TEST ! stat $B0/${V0}0/source_self_accusing/char
++TEST ! stat $B0/${V0}1/source_self_accusing/char
++TEST ! stat $B0/${V0}0/source_self_accusing/file
++TEST ! stat $B0/${V0}1/source_self_accusing/file
++TEST ! stat $B0/${V0}0/source_self_accusing/file
++TEST ! stat $B0/${V0}1/source_self_accusing/file
++TEST ! stat $B0/${V0}0/source_self_accusing/dir1/dir2
++TEST ! stat $B0/${V0}1/source_self_accusing/dir1/dir2
++TEST ! stat $B0/${V0}0/source_self_accusing/dir1
++TEST ! stat $B0/${V0}1/source_self_accusing/dir1
++
++#Test if the files created as part of full self-heal correctly
++r=$(get_file_type $B0/${V0}0/source_creations_heal/fifo)
++EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/fifo
++TEST [ -p $B0/${V0}0/source_creations_heal/fifo ]
++EXPECT "^4 5$" stat -c "%t %T" $B0/${V0}1/source_creations_heal/block
++EXPECT "^4 5$" stat -c "%t %T" $B0/${V0}0/source_creations_heal/block
++
++r=$(get_file_type $B0/${V0}0/source_creations_heal/block)
++EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/block
++
++r=$(get_file_type $B0/${V0}0/source_creations_heal/char)
++EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/char
++EXPECT "^1 5$" stat -c "%t %T" $B0/${V0}1/source_creations_heal/char
++EXPECT "^1 5$" stat -c "%t %T" $B0/${V0}0/source_creations_heal/char
++
++r=$(get_file_type $B0/${V0}0/source_creations_heal/file)
++EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/file
++TEST [ -f $B0/${V0}0/source_creations_heal/file ]
++
++r=$(get_file_type source_creations_heal/file $B0/${V0}0/slink)
++EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/file slink
++TEST [ -h $B0/${V0}0/source_creations_heal/slink ]
++
++r=$(get_file_type $B0/${V0}0/source_creations_heal/dir1/dir2)
++EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/dir1/dir2
++TEST [ -d $B0/${V0}0/source_creations_heal/dir1/dir2 ]
++
++r=$(get_file_type $B0/${V0}0/source_creations_heal/dir1)
++EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/dir1
++TEST [ -d $B0/${V0}0/source_creations_heal/dir1 ]
++
++cd -
++
++#Anonymous directory shouldn't be created
++TEST mkdir $M0/rename-dir
++before_rename=$(STAT_INO $B0/${V0}1/rename-dir)
++TEST kill_brick $V0 $H0 $B0/${V0}1
++TEST mv $M0/rename-dir $M0/new-name
++TEST $CLI volume start $V0 force
++#Since features.ctime is not enabled by default in downstream, the below test
++#will fail. If ctime feature is enabled, there will be trusted.glusterfs.mdata
++#xattr set which will differ for the parent in the gfid split-brain scenario
++#and when lookup is triggered, the gfid gets added to indices/xattrop leading
++#the below test to pass in upstream. Hence commenting it here.
++#'spb' is in split-brain so pending-heal-count will be 2
++#EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
++after_rename=$(STAT_INO $B0/${V0}1/new-name)
++EXPECT "0" echo $(ls -a $B0/${V0}0/ | grep anonymous-inode | wc -l)
++EXPECT "0" echo $(ls -a $B0/${V0}1/ | grep anonymous-inode | wc -l)
++EXPECT_NOT "$before_rename" echo $after_rename
++cleanup
+diff --git a/tests/basic/afr/rename-data-loss.t b/tests/basic/afr/rename-data-loss.t
+new file mode 100644
+index 0000000..256ee2a
+--- /dev/null
++++ b/tests/basic/afr/rename-data-loss.t
+@@ -0,0 +1,72 @@
++#!/bin/bash
++#Self-heal tests
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++. $(dirname $0)/../../afr.rc
++
++cleanup;
++
++TEST glusterd
++TEST pidof glusterd
++TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}
++TEST $CLI volume set $V0 write-behind off
++TEST $CLI volume set $V0 self-heal-daemon off
++TEST $CLI volume set $V0 data-self-heal off
++TEST $CLI volume set $V0 metadata-self-heal off
++TEST $CLI volume set $V0 entry-self-heal off
++TEST $CLI volume start $V0
++EXPECT 'Started' volinfo_field $V0 'Status'
++TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
++
++cd $M0
++TEST `echo "line1" >> file1`
++TEST mkdir dir1
++TEST mkdir dir2
++TEST mkdir -p dir1/dira/dirb
++TEST `echo "line1">>dir1/dira/dirb/file1`
++TEST mkdir delete_me
++TEST `echo "line1" >> delete_me/file1`
++
++#brick0 has witnessed the second write while brick1 is down.
++TEST kill_brick $V0 $H0 $B0/brick1
++TEST `echo "line2" >> file1`
++TEST `echo "line2" >> dir1/dira/dirb/file1`
++TEST `echo "line2" >> delete_me/file1`
++
++#Toggle the bricks that are up/down.
++TEST $CLI volume start $V0 force
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
++TEST kill_brick $V0 $H0 $B0/brick0
++
++#Rename when the 'source' brick0 for data-selfheals is down.
++mv file1 file2
++mv dir1/dira dir2
++
++#Delete a dir when brick0 is down.
++rm -rf delete_me
++cd -
++
++#Bring everything up and trigger heal
++TEST $CLI volume set $V0 self-heal-daemon on
++TEST $CLI volume start $V0 force
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
++TEST $CLI volume heal $V0
++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/brick0
++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/brick1
++
++#Remount to avoid reading from caches
++EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
++TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
++EXPECT "line2" tail -1 $M0/file2
++EXPECT "line2" tail -1 $M0/dir2/dira/dirb/file1
++TEST ! stat $M0/delete_me/file1
++TEST ! stat $M0/delete_me
++
++anon_inode_name=$(ls -a $B0/brick0 | grep glusterfs-anonymous-inode)
++TEST [[ -d $B0/brick0/$anon_inode_name ]]
++TEST [[ -d $B0/brick1/$anon_inode_name ]]
++cleanup
+diff --git a/tests/bugs/replicate/bug-1744548-heal-timeout.t b/tests/bugs/replicate/bug-1744548-heal-timeout.t
+index c208112..0115350 100644
+--- a/tests/bugs/replicate/bug-1744548-heal-timeout.t
++++ b/tests/bugs/replicate/bug-1744548-heal-timeout.t
+@@ -25,14 +25,14 @@ TEST ! $CLI volume heal $V0
+ TEST $CLI volume profile $V0 start
+ TEST $CLI volume profile $V0 info clear
+ TEST $CLI volume heal $V0 enable
+-# Each brick does 3 opendirs, corresponding to dirty, xattrop and entry-changes
+-EXPECT_WITHIN $HEAL_TIMEOUT "^333$" get_cumulative_opendir_count
++# Each brick does 4 opendirs, corresponding to dirty, xattrop and entry-changes, anonymous-inode
++EXPECT_WITHIN 4 "^444$" get_cumulative_opendir_count
+ 
+ # Check that a change in heal-timeout is honoured immediately.
+ TEST $CLI volume set $V0 cluster.heal-timeout 5
+ sleep 10
+ # Two crawls must have happened.
+-EXPECT_WITHIN $HEAL_TIMEOUT "^999$" get_cumulative_opendir_count
++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^121212$" get_cumulative_opendir_count
+ 
+ # shd must not heal if it is disabled and heal-timeout is changed.
+ TEST $CLI volume heal $V0 disable
+diff --git a/tests/features/trash.t b/tests/features/trash.t
+index 472e909..da5b50b 100755
+--- a/tests/features/trash.t
++++ b/tests/features/trash.t
+@@ -94,105 +94,105 @@ wildcard_not_exists() {
+         if [ $? -eq 0 ]; then echo "Y"; else echo "N"; fi
+ }
+ 
+-# testing glusterd [1-3]
++# testing glusterd
+ TEST glusterd
+ TEST pidof glusterd
+ TEST $CLI volume info
+ 
+-# creating distributed volume [4]
++# creating distributed volume
+ TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}
+ 
+-# checking volume status [5-7]
++# checking volume status
+ EXPECT "$V0" volinfo_field $V0 'Volume Name'
+ EXPECT 'Created' volinfo_field $V0 'Status'
+ EXPECT '2' brick_count $V0
+ 
+-# test without enabling trash translator [8]
++# test without enabling trash translator
+ TEST start_vol $V0 $M0
+ 
+-# test on enabling trash translator [9-10]
++# test on enabling trash translator
+ TEST $CLI volume set $V0 features.trash on
+ EXPECT 'on' volinfo_field $V0 'features.trash'
+ 
+-# files directly under mount point [11]
++# files directly under mount point
+ create_files $M0/file1 $M0/file2
+ TEST file_exists $V0 file1 file2
+ 
+-# perform unlink [12]
++# perform unlink
+ TEST unlink_op file1
+ 
+-# perform truncate [13]
++# perform truncate
+ TEST truncate_op file2 4
+ 
+-# create files directory hierarchy and check [14]
++# create files directory hierarchy and check
+ mkdir -p $M0/1/2/3
+ create_files $M0/1/2/3/foo1 $M0/1/2/3/foo2
+ TEST file_exists $V0 1/2/3/foo1 1/2/3/foo2
+ 
+-# perform unlink [15]
++# perform unlink
+ TEST unlink_op 1/2/3/foo1
+ 
+-# perform truncate [16]
++# perform truncate
+ TEST truncate_op 1/2/3/foo2 4
+ 
+ # create a directory for eliminate pattern
+ mkdir $M0/a
+ 
+-# set the eliminate pattern [17-18]
++# set the eliminate pattern
+ TEST $CLI volume set $V0 features.trash-eliminate-path /a
+ EXPECT '/a' volinfo_field $V0 'features.trash-eliminate-path'
+ 
+-# create two files and check [19]
++# create two files and check
+ create_files $M0/a/test1 $M0/a/test2
+ TEST file_exists $V0 a/test1 a/test2
+ 
+-# remove from eliminate pattern [20]
++# remove from eliminate pattern
+ rm -f $M0/a/test1
+ EXPECT "Y" wildcard_not_exists $M0/.trashcan/a/test1*
+ 
+-# truncate from eliminate path [21-23]
++# truncate from eliminate path
+ truncate -s 2 $M0/a/test2
+ TEST [ -e $M0/a/test2 ]
+ TEST [ `ls -l $M0/a/test2 | awk '{print $5}'` -eq 2 ]
+ EXPECT "Y" wildcard_not_exists $M0/.trashcan/a/test2*
+ 
+-# set internal op on [24-25]
++# set internal op on
+ TEST $CLI volume set $V0 features.trash-internal-op on
+ EXPECT 'on' volinfo_field $V0 'features.trash-internal-op'
+ 
+-# again create two files and check [26]
++# again create two files and check
+ create_files $M0/inop1 $M0/inop2
+ TEST file_exists $V0 inop1 inop2
+ 
+-# perform unlink [27]
++# perform unlink
+ TEST unlink_op inop1
+ 
+-# perform truncate [28]
++# perform truncate
+ TEST truncate_op inop2 4
+ 
+-# remove one brick and restart the volume [28-31]
++# remove one brick and restart the volume
+ TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force
+ EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+ TEST $CLI volume stop $V0
+ TEST start_vol $V0 $M0 $M0/.trashcan
+ 
+-# again create two files and check [33]
++# again create two files and check
+ create_files $M0/rebal1 $M0/rebal2
+ TEST file_exists $V0 rebal1 rebal2
+ 
+-# add one brick [34-35]
++# add one brick
+ TEST $CLI volume add-brick $V0 $H0:$B0/${V0}3
+ TEST [ -d $B0/${V0}3 ]
+ 
+ 
+-# perform rebalance [36]
++# perform rebalance
+ TEST $CLI volume rebalance $V0 start force
+ EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed
+ 
+ #Find out which file was migrated to the new brick
+ file_name=$(ls $B0/${V0}3/rebal*| xargs basename)
+ 
+-# check whether rebalance was succesful [37-40]
++# check whether rebalance was succesful
+ EXPECT "Y" wildcard_exists $B0/${V0}3/$file_name*
+ EXPECT "Y" wildcard_exists $B0/${V0}1/.trashcan/internal_op/$file_name*
+ 
+@@ -201,52 +201,42 @@ EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+ # force required in case rebalance is not over
+ TEST $CLI volume stop $V0 force
+ 
+-# create a replicated volume [41]
++# create a replicated volume
+ TEST $CLI volume create $V1 replica 2 $H0:$B0/${V1}{1,2}
+ 
+-# checking volume status [42-45]
++# checking volume status
+ EXPECT "$V1" volinfo_field $V1 'Volume Name'
+ EXPECT 'Replicate' volinfo_field $V1 'Type'
+ EXPECT 'Created' volinfo_field $V1 'Status'
+ EXPECT '2' brick_count $V1
+ 
+-# enable trash with options and start the replicate volume by disabling automatic self-heal [46-50]
++# enable trash with options and start the replicate volume by disabling automatic self-heal
+ TEST $CLI volume set $V1 features.trash on
+ TEST $CLI volume set $V1 features.trash-internal-op on
+ EXPECT 'on' volinfo_field $V1 'features.trash'
+ EXPECT 'on' volinfo_field $V1 'features.trash-internal-op'
+ TEST start_vol $V1 $M1 $M1/.trashcan
+ 
+-# mount and check for trash directory [51]
++# mount and check for trash directory
+ TEST [ -d $M1/.trashcan/internal_op ]
+ 
+-# create a file and check [52]
++# create a file and check
+ touch $M1/self
+ TEST [ -e $B0/${V1}1/self -a -e $B0/${V1}2/self ]
+ 
+-# kill one brick and delete the file from mount point [53-54]
++# kill one brick and delete the file from mount point
+ kill_brick $V1 $H0 $B0/${V1}1
+ EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "1" online_brick_count
+ rm -f $M1/self
+ EXPECT "Y" wildcard_exists $B0/${V1}2/.trashcan/self*
+ 
+-# force start the volume and trigger the self-heal manually [55-57]
+-TEST $CLI volume start $V1 force
+-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" online_brick_count
+-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+-# Since we created the file under root of the volume, it will be
+-# healed automatically
+-
+-# check for the removed file in trashcan [58]
+-EXPECT_WITHIN $HEAL_TIMEOUT "Y" wildcard_exists $B0/${V1}1/.trashcan/internal_op/self*
+-
+-# check renaming of trash directory through cli [59-62]
++# check renaming of trash directory through cli
+ TEST $CLI volume set $V0 trash-dir abc
+ TEST start_vol $V0 $M0 $M0/abc
+ TEST [ -e $M0/abc -a ! -e $M0/.trashcan ]
+ EXPECT "Y" wildcard_exists $B0/${V0}1/abc/internal_op/rebal*
+ 
+-# ensure that rename and delete operation on trash directory fails [63-65]
++# ensure that rename and delete operation on trash directory fails
+ rm -rf $M0/abc/internal_op
+ TEST [ -e $M0/abc/internal_op ]
+ rm -rf $M0/abc/
+diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
+index 90b4f14..6f2da11 100644
+--- a/xlators/cluster/afr/src/afr-common.c
++++ b/xlators/cluster/afr/src/afr-common.c
+@@ -47,6 +47,41 @@ afr_quorum_errno(afr_private_t *priv)
+     return ENOTCONN;
+ }
+ 
++gf_boolean_t
++afr_is_private_directory(afr_private_t *priv, uuid_t pargfid, const char *name,
++                         pid_t pid)
++{
++    if (!__is_root_gfid(pargfid)) {
++        return _gf_false;
++    }
++
++    if (strcmp(name, GF_REPLICATE_TRASH_DIR) == 0) {
++        /*For backward compatibility /.landfill is private*/
++        return _gf_true;
++    }
++
++    if (pid == GF_CLIENT_PID_GSYNCD) {
++        /*geo-rep needs to create/sync private directory on slave because
++         * it appears in changelog*/
++        return _gf_false;
++    }
++
++    if (pid == GF_CLIENT_PID_GLFS_HEAL || pid == GF_CLIENT_PID_SELF_HEALD) {
++        if (strcmp(name, priv->anon_inode_name) == 0) {
++            /* anonymous-inode dir is private*/
++            return _gf_true;
++        }
++    } else {
++        if (strncmp(name, AFR_ANON_DIR_PREFIX, strlen(AFR_ANON_DIR_PREFIX)) ==
++            0) {
++            /* anonymous-inode dir prefix is private for geo-rep to work*/
++            return _gf_true;
++        }
++    }
++
++    return _gf_false;
++}
++
+ int
+ afr_fav_child_reset_sink_xattrs(void *opaque);
+ 
+@@ -3301,11 +3336,10 @@ afr_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xattr_req)
+         return 0;
+     }
+ 
+-    if (__is_root_gfid(loc->parent->gfid)) {
+-        if (!strcmp(loc->name, GF_REPLICATE_TRASH_DIR)) {
+-            op_errno = EPERM;
+-            goto out;
+-        }
++    if (afr_is_private_directory(this->private, loc->parent->gfid, loc->name,
++                                 frame->root->pid)) {
++        op_errno = EPERM;
++        goto out;
+     }
+ 
+     local = AFR_FRAME_INIT(frame, op_errno);
+@@ -4832,6 +4866,7 @@ afr_priv_dump(xlator_t *this)
+                        priv->background_self_heal_count);
+     gf_proc_dump_write("healers", "%d", priv->healers);
+     gf_proc_dump_write("read-hash-mode", "%d", priv->hash_mode);
++    gf_proc_dump_write("use-anonymous-inode", "%d", priv->use_anon_inode);
+     if (priv->quorum_count == AFR_QUORUM_AUTO) {
+         gf_proc_dump_write("quorum-type", "auto");
+     } else if (priv->quorum_count == 0) {
+@@ -5792,6 +5827,7 @@ afr_priv_destroy(afr_private_t *priv)
+     GF_FREE(priv->local);
+     GF_FREE(priv->pending_key);
+     GF_FREE(priv->children);
++    GF_FREE(priv->anon_inode);
+     GF_FREE(priv->child_up);
+     GF_FREE(priv->child_latency);
+     LOCK_DESTROY(&priv->lock);
+diff --git a/xlators/cluster/afr/src/afr-dir-read.c b/xlators/cluster/afr/src/afr-dir-read.c
+index 6307b63..d64b6a9 100644
+--- a/xlators/cluster/afr/src/afr-dir-read.c
++++ b/xlators/cluster/afr/src/afr-dir-read.c
+@@ -158,8 +158,8 @@ afr_validate_read_subvol(inode_t *inode, xlator_t *this, int par_read_subvol)
+ }
+ 
+ static void
+-afr_readdir_transform_entries(gf_dirent_t *subvol_entries, int subvol,
+-                              gf_dirent_t *entries, fd_t *fd)
++afr_readdir_transform_entries(call_frame_t *frame, gf_dirent_t *subvol_entries,
++                              int subvol, gf_dirent_t *entries, fd_t *fd)
+ {
+     int ret = -1;
+     gf_dirent_t *entry = NULL;
+@@ -177,8 +177,8 @@ afr_readdir_transform_entries(gf_dirent_t *subvol_entries, int subvol,
+ 
+     list_for_each_entry_safe(entry, tmp, &subvol_entries->list, list)
+     {
+-        if (__is_root_gfid(fd->inode->gfid) &&
+-            !strcmp(entry->d_name, GF_REPLICATE_TRASH_DIR)) {
++        if (afr_is_private_directory(priv, fd->inode->gfid, entry->d_name,
++                                     frame->root->pid)) {
+             continue;
+         }
+ 
+@@ -222,8 +222,8 @@ afr_readdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+     }
+ 
+     if (op_ret >= 0)
+-        afr_readdir_transform_entries(subvol_entries, (long)cookie, &entries,
+-                                      local->fd);
++        afr_readdir_transform_entries(frame, subvol_entries, (long)cookie,
++                                      &entries, local->fd);
+ 
+     AFR_STACK_UNWIND(readdir, frame, op_ret, op_errno, &entries, xdata);
+ 
+diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c
+index 9b6575f..0a8a7fd 100644
+--- a/xlators/cluster/afr/src/afr-self-heal-common.c
++++ b/xlators/cluster/afr/src/afr-self-heal-common.c
+@@ -2753,3 +2753,185 @@ afr_choose_source_by_policy(afr_private_t *priv, unsigned char *sources,
+ out:
+     return source;
+ }
++
++static int
++afr_anon_inode_mkdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
++                         int32_t op_ret, int32_t op_errno, inode_t *inode,
++                         struct iatt *buf, struct iatt *preparent,
++                         struct iatt *postparent, dict_t *xdata)
++{
++    afr_local_t *local = frame->local;
++    int i = (long)cookie;
++
++    local->replies[i].valid = 1;
++    local->replies[i].op_ret = op_ret;
++    local->replies[i].op_errno = op_errno;
++    if (op_ret == 0) {
++        local->op_ret = 0;
++        local->replies[i].poststat = *buf;
++        local->replies[i].preparent = *preparent;
++        local->replies[i].postparent = *postparent;
++    }
++    if (xdata) {
++        local->replies[i].xdata = dict_ref(xdata);
++    }
++
++    syncbarrier_wake(&local->barrier);
++    return 0;
++}
++
++int
++afr_anon_inode_create(xlator_t *this, int child, inode_t **linked_inode)
++{
++    call_frame_t *frame = NULL;
++    afr_local_t *local = NULL;
++    afr_private_t *priv = this->private;
++    unsigned char *mkdir_on = alloca0(priv->child_count);
++    unsigned char *lookup_on = alloca0(priv->child_count);
++    loc_t loc = {0};
++    int32_t op_errno = 0;
++    int32_t child_op_errno = 0;
++    struct iatt iatt = {0};
++    dict_t *xdata = NULL;
++    uuid_t anon_inode_gfid = {0};
++    int mkdir_count = 0;
++    int i = 0;
++
++    /*Try to mkdir everywhere and return success if the dir exists on 'child'
++     */
++
++    if (!priv->use_anon_inode) {
++        op_errno = EINVAL;
++        goto out;
++    }
++
++    frame = afr_frame_create(this, &op_errno);
++    if (op_errno) {
++        goto out;
++    }
++    local = frame->local;
++    if (!local->child_up[child]) {
++        /*Other bricks may need mkdir so don't error out yet*/
++        child_op_errno = ENOTCONN;
++    }
++    gf_uuid_parse(priv->anon_gfid_str, anon_inode_gfid);
++    for (i = 0; i < priv->child_count; i++) {
++        if (!local->child_up[i])
++            continue;
++
++        if (priv->anon_inode[i]) {
++            mkdir_on[i] = 0;
++        } else {
++            mkdir_on[i] = 1;
++            mkdir_count++;
++        }
++    }
++
++    if (mkdir_count == 0) {
++        *linked_inode = inode_find(this->itable, anon_inode_gfid);
++        if (*linked_inode) {
++            op_errno = 0;
++            goto out;
++        }
++    }
++
++    loc.parent = inode_ref(this->itable->root);
++    loc.name = priv->anon_inode_name;
++    loc.inode = inode_new(this->itable);
++    if (!loc.inode) {
++        op_errno = ENOMEM;
++        goto out;
++    }
++
++    xdata = dict_new();
++    if (!xdata) {
++        op_errno = ENOMEM;
++        goto out;
++    }
++
++    op_errno = -dict_set_gfuuid(xdata, "gfid-req", anon_inode_gfid, _gf_true);
++    if (op_errno) {
++        goto out;
++    }
++
++    if (mkdir_count == 0) {
++        memcpy(lookup_on, local->child_up, priv->child_count);
++        goto lookup;
++    }
++
++    AFR_ONLIST(mkdir_on, frame, afr_anon_inode_mkdir_cbk, mkdir, &loc, 0755, 0,
++               xdata);
++
++    for (i = 0; i < priv->child_count; i++) {
++        if (!mkdir_on[i]) {
++            continue;
++        }
++
++        if (local->replies[i].op_ret == 0) {
++            priv->anon_inode[i] = 1;
++            iatt = local->replies[i].poststat;
++        } else if (local->replies[i].op_ret < 0 &&
++                   local->replies[i].op_errno == EEXIST) {
++            lookup_on[i] = 1;
++        } else if (i == child) {
++            child_op_errno = local->replies[i].op_errno;
++        }
++    }
++
++    if (AFR_COUNT(lookup_on, priv->child_count) == 0) {
++        goto link;
++    }
++
++lookup:
++    AFR_ONLIST(lookup_on, frame, afr_selfheal_discover_cbk, lookup, &loc,
++               xdata);
++    for (i = 0; i < priv->child_count; i++) {
++        if (!lookup_on[i]) {
++            continue;
++        }
++
++        if (local->replies[i].op_ret == 0) {
++            if (gf_uuid_compare(anon_inode_gfid,
++                                local->replies[i].poststat.ia_gfid) == 0) {
++                priv->anon_inode[i] = 1;
++                iatt = local->replies[i].poststat;
++            } else {
++                if (i == child)
++                    child_op_errno = EINVAL;
++                gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_INVALID_DATA,
++                       "%s has gfid: %s", priv->anon_inode_name,
++                       uuid_utoa(local->replies[i].poststat.ia_gfid));
++            }
++        } else if (i == child) {
++            child_op_errno = local->replies[i].op_errno;
++        }
++    }
++link:
++    if (!gf_uuid_is_null(iatt.ia_gfid)) {
++        *linked_inode = inode_link(loc.inode, loc.parent, loc.name, &iatt);
++        if (*linked_inode) {
++            op_errno = 0;
++            inode_lookup(*linked_inode);
++        } else {
++            op_errno = ENOMEM;
++        }
++        goto out;
++    }
++
++out:
++    if (xdata)
++        dict_unref(xdata);
++    loc_wipe(&loc);
++    /*child_op_errno takes precedence*/
++    if (child_op_errno == 0) {
++        child_op_errno = op_errno;
++    }
++
++    if (child_op_errno && *linked_inode) {
++        inode_unref(*linked_inode);
++        *linked_inode = NULL;
++    }
++    if (frame)
++        AFR_STACK_DESTROY(frame);
++    return -child_op_errno;
++}
+diff --git a/xlators/cluster/afr/src/afr-self-heal-entry.c b/xlators/cluster/afr/src/afr-self-heal-entry.c
+index 00b5b2d..20b07dd 100644
+--- a/xlators/cluster/afr/src/afr-self-heal-entry.c
++++ b/xlators/cluster/afr/src/afr-self-heal-entry.c
+@@ -16,54 +16,170 @@
+ #include <glusterfs/syncop-utils.h>
+ #include <glusterfs/events.h>
+ 
+-static int
+-afr_selfheal_entry_delete(xlator_t *this, inode_t *dir, const char *name,
+-                          inode_t *inode, int child, struct afr_reply *replies)
++int
++afr_selfheal_entry_anon_inode(xlator_t *this, inode_t *dir, const char *name,
++                              inode_t *inode, int child,
++                              struct afr_reply *replies,
++                              gf_boolean_t *anon_inode)
+ {
+     afr_private_t *priv = NULL;
++    afr_local_t *local = NULL;
+     xlator_t *subvol = NULL;
+     int ret = 0;
++    int i = 0;
++    char g[64] = {0};
++    unsigned char *lookup_success = NULL;
++    call_frame_t *frame = NULL;
++    loc_t loc2 = {
++        0,
++    };
+     loc_t loc = {
+         0,
+     };
+-    char g[64];
+ 
+     priv = this->private;
+-
+     subvol = priv->children[child];
++    lookup_success = alloca0(priv->child_count);
++    uuid_utoa_r(replies[child].poststat.ia_gfid, g);
++    loc.inode = inode_new(inode->table);
++    if (!loc.inode) {
++        ret = -ENOMEM;
++        goto out;
++    }
++
++    if (replies[child].poststat.ia_type == IA_IFDIR) {
++        /* This directory may have sub-directory hierarchy which may need to
++         * be preserved for subsequent heals. So unconditionally move the
++         * directory to anonymous-inode directory*/
++        *anon_inode = _gf_true;
++        goto anon_inode;
++    }
++
++    frame = afr_frame_create(this, &ret);
++    if (!frame) {
++        ret = -ret;
++        goto out;
++    }
++    local = frame->local;
++    gf_uuid_copy(loc.gfid, replies[child].poststat.ia_gfid);
++    AFR_ONLIST(local->child_up, frame, afr_selfheal_discover_cbk, lookup, &loc,
++               NULL);
++    for (i = 0; i < priv->child_count; i++) {
++        if (local->replies[i].op_ret == 0) {
++            lookup_success[i] = 1;
++        } else if (local->replies[i].op_errno != ENOENT &&
++                   local->replies[i].op_errno != ESTALE) {
++            ret = -local->replies[i].op_errno;
++        }
++    }
++
++    if (priv->quorum_count) {
++        if (afr_has_quorum(lookup_success, this, NULL)) {
++            *anon_inode = _gf_true;
++        }
++    } else if (AFR_COUNT(lookup_success, priv->child_count) > 1) {
++        *anon_inode = _gf_true;
++    } else if (ret) {
++        goto out;
++    }
++
++anon_inode:
++    if (!*anon_inode) {
++        ret = 0;
++        goto out;
++    }
+ 
+     loc.parent = inode_ref(dir);
+     gf_uuid_copy(loc.pargfid, dir->gfid);
+     loc.name = name;
+-    loc.inode = inode_ref(inode);
+ 
+-    if (replies[child].valid && replies[child].op_ret == 0) {
+-        switch (replies[child].poststat.ia_type) {
+-            case IA_IFDIR:
+-                gf_msg(this->name, GF_LOG_WARNING, 0,
+-                       AFR_MSG_EXPUNGING_FILE_OR_DIR,
+-                       "expunging dir %s/%s (%s) on %s", uuid_utoa(dir->gfid),
+-                       name, uuid_utoa_r(replies[child].poststat.ia_gfid, g),
+-                       subvol->name);
+-                ret = syncop_rmdir(subvol, &loc, 1, NULL, NULL);
+-                break;
+-            default:
+-                gf_msg(this->name, GF_LOG_WARNING, 0,
+-                       AFR_MSG_EXPUNGING_FILE_OR_DIR,
+-                       "expunging file %s/%s (%s) on %s", uuid_utoa(dir->gfid),
+-                       name, uuid_utoa_r(replies[child].poststat.ia_gfid, g),
+-                       subvol->name);
+-                ret = syncop_unlink(subvol, &loc, NULL, NULL);
+-                break;
+-        }
++    ret = afr_anon_inode_create(this, child, &loc2.parent);
++    if (ret < 0)
++        goto out;
++
++    loc2.name = g;
++    ret = syncop_rename(subvol, &loc, &loc2, NULL, NULL);
++    if (ret < 0) {
++        gf_msg(this->name, GF_LOG_WARNING, -ret, AFR_MSG_EXPUNGING_FILE_OR_DIR,
++               "Rename to %s dir %s/%s (%s) on %s failed",
++               priv->anon_inode_name, uuid_utoa(dir->gfid), name, g,
++               subvol->name);
++    } else {
++        gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_EXPUNGING_FILE_OR_DIR,
++               "Rename to %s dir %s/%s (%s) on %s successful",
++               priv->anon_inode_name, uuid_utoa(dir->gfid), name, g,
++               subvol->name);
+     }
+ 
++out:
+     loc_wipe(&loc);
++    loc_wipe(&loc2);
++    if (frame) {
++        AFR_STACK_DESTROY(frame);
++    }
+ 
+     return ret;
+ }
+ 
+ int
++afr_selfheal_entry_delete(xlator_t *this, inode_t *dir, const char *name,
++                          inode_t *inode, int child, struct afr_reply *replies)
++{
++    char g[64] = {0};
++    afr_private_t *priv = NULL;
++    xlator_t *subvol = NULL;
++    int ret = 0;
++    loc_t loc = {
++        0,
++    };
++    gf_boolean_t anon_inode = _gf_false;
++
++    priv = this->private;
++    subvol = priv->children[child];
++
++    if ((!replies[child].valid) || (replies[child].op_ret < 0)) {
++        /*Nothing to do*/
++        ret = 0;
++        goto out;
++    }
++
++    if (priv->use_anon_inode) {
++        ret = afr_selfheal_entry_anon_inode(this, dir, name, inode, child,
++                                            replies, &anon_inode);
++        if (ret < 0 || anon_inode)
++            goto out;
++    }
++
++    loc.parent = inode_ref(dir);
++    loc.inode = inode_new(inode->table);
++    if (!loc.inode) {
++        ret = -ENOMEM;
++        goto out;
++    }
++    loc.name = name;
++    switch (replies[child].poststat.ia_type) {
++        case IA_IFDIR:
++            gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_EXPUNGING_FILE_OR_DIR,
++                   "expunging dir %s/%s (%s) on %s", uuid_utoa(dir->gfid), name,
++                   uuid_utoa_r(replies[child].poststat.ia_gfid, g),
++                   subvol->name);
++            ret = syncop_rmdir(subvol, &loc, 1, NULL, NULL);
++            break;
++        default:
++            gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_EXPUNGING_FILE_OR_DIR,
++                   "expunging file %s/%s (%s) on %s", uuid_utoa(dir->gfid),
++                   name, uuid_utoa_r(replies[child].poststat.ia_gfid, g),
++                   subvol->name);
++            ret = syncop_unlink(subvol, &loc, NULL, NULL);
++            break;
++    }
++
++out:
++    loc_wipe(&loc);
++    return ret;
++}
++
++int
+ afr_selfheal_recreate_entry(call_frame_t *frame, int dst, int source,
+                             unsigned char *sources, inode_t *dir,
+                             const char *name, inode_t *inode,
+@@ -76,6 +192,9 @@ afr_selfheal_recreate_entry(call_frame_t *frame, int dst, int source,
+     loc_t srcloc = {
+         0,
+     };
++    loc_t anonloc = {
++        0,
++    };
+     xlator_t *this = frame->this;
+     afr_private_t *priv = NULL;
+     dict_t *xdata = NULL;
+@@ -86,15 +205,18 @@ afr_selfheal_recreate_entry(call_frame_t *frame, int dst, int source,
+         0,
+     };
+     unsigned char *newentry = NULL;
++    char iatt_uuid_str[64] = {0};
++    char dir_uuid_str[64] = {0};
+ 
+     priv = this->private;
+     iatt = &replies[source].poststat;
++    uuid_utoa_r(iatt->ia_gfid, iatt_uuid_str);
+     if (iatt->ia_type == IA_INVAL || gf_uuid_is_null(iatt->ia_gfid)) {
+         gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SELF_HEAL_FAILED,
+                "Invalid ia_type (%d) or gfid(%s). source brick=%d, "
+                "pargfid=%s, name=%s",
+-               iatt->ia_type, uuid_utoa(iatt->ia_gfid), source,
+-               uuid_utoa(dir->gfid), name);
++               iatt->ia_type, iatt_uuid_str, source,
++               uuid_utoa_r(dir->gfid, dir_uuid_str), name);
+         ret = -EINVAL;
+         goto out;
+     }
+@@ -119,14 +241,24 @@ afr_selfheal_recreate_entry(call_frame_t *frame, int dst, int source,
+ 
+     srcloc.inode = inode_ref(inode);
+     gf_uuid_copy(srcloc.gfid, iatt->ia_gfid);
+-    if (iatt->ia_type != IA_IFDIR)
+-        ret = syncop_lookup(priv->children[dst], &srcloc, 0, 0, 0, 0);
+-    if (iatt->ia_type == IA_IFDIR || ret == -ENOENT || ret == -ESTALE) {
++    ret = syncop_lookup(priv->children[dst], &srcloc, 0, 0, 0, 0);
++    if (ret == -ENOENT || ret == -ESTALE) {
+         newentry[dst] = 1;
+         ret = afr_selfheal_newentry_mark(frame, this, inode, source, replies,
+                                          sources, newentry);
+         if (ret)
+             goto out;
++    } else if (ret == 0 && iatt->ia_type == IA_IFDIR && priv->use_anon_inode) {
++        // Try rename from hidden directory
++        ret = afr_anon_inode_create(this, dst, &anonloc.parent);
++        if (ret < 0)
++            goto out;
++        anonloc.inode = inode_ref(inode);
++        anonloc.name = iatt_uuid_str;
++        ret = syncop_rename(priv->children[dst], &anonloc, &loc, NULL, NULL);
++        if (ret == -ENOENT || ret == -ESTALE)
++            ret = -1; /*This sets 'mismatch' to true*/
++        goto out;
+     }
+ 
+     mode = st_mode_from_ia(iatt->ia_prot, iatt->ia_type);
+@@ -165,6 +297,7 @@ out:
+     GF_FREE(linkname);
+     loc_wipe(&loc);
+     loc_wipe(&srcloc);
++    loc_wipe(&anonloc);
+     return ret;
+ }
+ 
+@@ -580,6 +713,11 @@ afr_selfheal_entry_dirent(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ 
+     priv = this->private;
+ 
++    if (afr_is_private_directory(priv, fd->inode->gfid, name,
++                                 GF_CLIENT_PID_SELF_HEALD)) {
++        return 0;
++    }
++
+     xattr = dict_new();
+     if (!xattr)
+         return -ENOMEM;
+@@ -628,7 +766,7 @@ afr_selfheal_entry_dirent(call_frame_t *frame, xlator_t *this, fd_t *fd,
+                                           replies);
+ 
+         if ((ret == 0) && (priv->esh_granular) && parent_idx_inode) {
+-            ret = afr_shd_index_purge(subvol, parent_idx_inode, name,
++            ret = afr_shd_entry_purge(subvol, parent_idx_inode, name,
+                                       inode->ia_type);
+             /* Why is ret force-set to 0? We do not care about
+              * index purge failing for full heal as it is quite
+@@ -758,10 +896,6 @@ afr_selfheal_entry_do_subvol(call_frame_t *frame, xlator_t *this, fd_t *fd,
+             if (!strcmp(entry->d_name, ".") || !strcmp(entry->d_name, ".."))
+                 continue;
+ 
+-            if (__is_root_gfid(fd->inode->gfid) &&
+-                !strcmp(entry->d_name, GF_REPLICATE_TRASH_DIR))
+-                continue;
+-
+             ret = afr_selfheal_entry_dirent(iter_frame, this, fd, entry->d_name,
+                                             loc.inode, subvol,
+                                             local->need_full_crawl);
+@@ -824,7 +958,7 @@ afr_selfheal_entry_granular_dirent(xlator_t *subvol, gf_dirent_t *entry,
+         /* The name indices under the pgfid index dir are guaranteed
+          * to be regular files. Hence the hardcoding.
+          */
+-        afr_shd_index_purge(subvol, parent->inode, entry->d_name, IA_IFREG);
++        afr_shd_entry_purge(subvol, parent->inode, entry->d_name, IA_IFREG);
+         ret = 0;
+         goto out;
+     }
+diff --git a/xlators/cluster/afr/src/afr-self-heal-name.c b/xlators/cluster/afr/src/afr-self-heal-name.c
+index dace071..51e3d8c 100644
+--- a/xlators/cluster/afr/src/afr-self-heal-name.c
++++ b/xlators/cluster/afr/src/afr-self-heal-name.c
+@@ -98,21 +98,12 @@ __afr_selfheal_name_expunge(xlator_t *this, inode_t *parent, uuid_t pargfid,
+                             const char *bname, inode_t *inode,
+                             struct afr_reply *replies)
+ {
+-    loc_t loc = {
+-        0,
+-    };
+     int i = 0;
+     afr_private_t *priv = NULL;
+-    char g[64];
+     int ret = 0;
+ 
+     priv = this->private;
+ 
+-    loc.parent = inode_ref(parent);
+-    gf_uuid_copy(loc.pargfid, pargfid);
+-    loc.name = bname;
+-    loc.inode = inode_ref(inode);
+-
+     for (i = 0; i < priv->child_count; i++) {
+         if (!replies[i].valid)
+             continue;
+@@ -120,30 +111,10 @@ __afr_selfheal_name_expunge(xlator_t *this, inode_t *parent, uuid_t pargfid,
+         if (replies[i].op_ret)
+             continue;
+ 
+-        switch (replies[i].poststat.ia_type) {
+-            case IA_IFDIR:
+-                gf_msg(this->name, GF_LOG_WARNING, 0,
+-                       AFR_MSG_EXPUNGING_FILE_OR_DIR,
+-                       "expunging dir %s/%s (%s) on %s", uuid_utoa(pargfid),
+-                       bname, uuid_utoa_r(replies[i].poststat.ia_gfid, g),
+-                       priv->children[i]->name);
+-
+-                ret |= syncop_rmdir(priv->children[i], &loc, 1, NULL, NULL);
+-                break;
+-            default:
+-                gf_msg(this->name, GF_LOG_WARNING, 0,
+-                       AFR_MSG_EXPUNGING_FILE_OR_DIR,
+-                       "expunging file %s/%s (%s) on %s", uuid_utoa(pargfid),
+-                       bname, uuid_utoa_r(replies[i].poststat.ia_gfid, g),
+-                       priv->children[i]->name);
+-
+-                ret |= syncop_unlink(priv->children[i], &loc, NULL, NULL);
+-                break;
+-        }
++        ret |= afr_selfheal_entry_delete(this, parent, bname, inode, i,
++                                         replies);
+     }
+ 
+-    loc_wipe(&loc);
+-
+     return ret;
+ }
+ 
+diff --git a/xlators/cluster/afr/src/afr-self-heal.h b/xlators/cluster/afr/src/afr-self-heal.h
+index 8f6fb00..c8dc384 100644
+--- a/xlators/cluster/afr/src/afr-self-heal.h
++++ b/xlators/cluster/afr/src/afr-self-heal.h
+@@ -370,4 +370,9 @@ gf_boolean_t
+ afr_is_file_empty_on_all_children(afr_private_t *priv,
+                                   struct afr_reply *replies);
+ 
++int
++afr_selfheal_entry_delete(xlator_t *this, inode_t *dir, const char *name,
++                          inode_t *inode, int child, struct afr_reply *replies);
++int
++afr_anon_inode_create(xlator_t *this, int child, inode_t **linked_inode);
+ #endif /* !_AFR_SELFHEAL_H */
+diff --git a/xlators/cluster/afr/src/afr-self-heald.c b/xlators/cluster/afr/src/afr-self-heald.c
+index 95ac5f2..939a135 100644
+--- a/xlators/cluster/afr/src/afr-self-heald.c
++++ b/xlators/cluster/afr/src/afr-self-heald.c
+@@ -222,7 +222,7 @@ out:
+ }
+ 
+ int
+-afr_shd_index_purge(xlator_t *subvol, inode_t *inode, char *name,
++afr_shd_entry_purge(xlator_t *subvol, inode_t *inode, char *name,
+                     ia_type_t type)
+ {
+     int ret = 0;
+@@ -422,7 +422,7 @@ afr_shd_index_heal(xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
+     ret = afr_shd_selfheal(healer, healer->subvol, gfid);
+ 
+     if (ret == -ENOENT || ret == -ESTALE)
+-        afr_shd_index_purge(subvol, parent->inode, entry->d_name, val);
++        afr_shd_entry_purge(subvol, parent->inode, entry->d_name, val);
+ 
+     if (ret == 2)
+         /* If bricks crashed in pre-op after creating indices/xattrop
+@@ -798,6 +798,176 @@ afr_bricks_available_for_heal(afr_private_t *priv)
+     return _gf_true;
+ }
+ 
++static int
++afr_shd_anon_inode_cleaner(xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
++                           void *data)
++{
++    struct subvol_healer *healer = data;
++    afr_private_t *priv = healer->this->private;
++    call_frame_t *frame = NULL;
++    afr_local_t *local = NULL;
++    int ret = 0;
++    loc_t loc = {0};
++    int count = 0;
++    int i = 0;
++    int op_errno = 0;
++    struct iatt *iatt = NULL;
++    gf_boolean_t multiple_links = _gf_false;
++    unsigned char *gfid_present = alloca0(priv->child_count);
++    unsigned char *entry_present = alloca0(priv->child_count);
++    char *type = "file";
++
++    frame = afr_frame_create(healer->this, &ret);
++    if (!frame) {
++        ret = -ret;
++        goto out;
++    }
++    local = frame->local;
++    if (AFR_COUNT(local->child_up, priv->child_count) != priv->child_count) {
++        gf_msg_debug(healer->this->name, 0,
++                     "Not all bricks are up. Skipping "
++                     "cleanup of %s on %s",
++                     entry->d_name, subvol->name);
++        ret = 0;
++        goto out;
++    }
++
++    loc.inode = inode_new(parent->inode->table);
++    if (!loc.inode) {
++        ret = -ENOMEM;
++        goto out;
++    }
++    ret = gf_uuid_parse(entry->d_name, loc.gfid);
++    if (ret) {
++        ret = 0;
++        goto out;
++    }
++    AFR_ONLIST(local->child_up, frame, afr_selfheal_discover_cbk, lookup, &loc,
++               NULL);
++    for (i = 0; i < priv->child_count; i++) {
++        if (local->replies[i].op_ret == 0) {
++            count++;
++            gfid_present[i] = 1;
++            iatt = &local->replies[i].poststat;
++            if (iatt->ia_type == IA_IFDIR) {
++                type = "dir";
++            }
++
++            if (i == healer->subvol) {
++                if (local->replies[i].poststat.ia_nlink > 1) {
++                    multiple_links = _gf_true;
++                }
++            }
++        } else if (local->replies[i].op_errno != ENOENT &&
++                   local->replies[i].op_errno != ESTALE) {
++            /*We don't have complete view. Skip the entry*/
++            gf_msg_debug(healer->this->name, local->replies[i].op_errno,
++                         "Skipping cleanup of %s on %s", entry->d_name,
++                         subvol->name);
++            ret = 0;
++            goto out;
++        }
++    }
++
++    /*Inode is deleted from subvol*/
++    if (count == 1 || (iatt->ia_type != IA_IFDIR && multiple_links)) {
++        gf_msg(healer->this->name, GF_LOG_WARNING, 0,
++               AFR_MSG_EXPUNGING_FILE_OR_DIR, "expunging %s %s/%s on %s", type,
++               priv->anon_inode_name, entry->d_name, subvol->name);
++        ret = afr_shd_entry_purge(subvol, parent->inode, entry->d_name,
++                                  iatt->ia_type);
++        if (ret == -ENOENT || ret == -ESTALE)
++            ret = 0;
++    } else if (count > 1) {
++        loc_wipe(&loc);
++        loc.parent = inode_ref(parent->inode);
++        loc.name = entry->d_name;
++        loc.inode = inode_new(parent->inode->table);
++        if (!loc.inode) {
++            ret = -ENOMEM;
++            goto out;
++        }
++        AFR_ONLIST(local->child_up, frame, afr_selfheal_discover_cbk, lookup,
++                   &loc, NULL);
++        count = 0;
++        for (i = 0; i < priv->child_count; i++) {
++            if (local->replies[i].op_ret == 0) {
++                count++;
++                entry_present[i] = 1;
++                iatt = &local->replies[i].poststat;
++            } else if (local->replies[i].op_errno != ENOENT &&
++                       local->replies[i].op_errno != ESTALE) {
++                /*We don't have complete view. Skip the entry*/
++                gf_msg_debug(healer->this->name, local->replies[i].op_errno,
++                             "Skipping cleanup of %s on %s", entry->d_name,
++                             subvol->name);
++                ret = 0;
++                goto out;
++            }
++        }
++        for (i = 0; i < priv->child_count; i++) {
++            if (gfid_present[i] && !entry_present[i]) {
++                /*Entry is not anonymous on at least one subvol*/
++                gf_msg_debug(healer->this->name, 0,
++                             "Valid entry present on %s "
++                             "Skipping cleanup of %s on %s",
++                             priv->children[i]->name, entry->d_name,
++                             subvol->name);
++                ret = 0;
++                goto out;
++            }
++        }
++
++        gf_msg(healer->this->name, GF_LOG_WARNING, 0,
++               AFR_MSG_EXPUNGING_FILE_OR_DIR,
++               "expunging %s %s/%s on all subvols", type, priv->anon_inode_name,
++               entry->d_name);
++        ret = 0;
++        for (i = 0; i < priv->child_count; i++) {
++            op_errno = -afr_shd_entry_purge(priv->children[i], loc.parent,
++                                            entry->d_name, iatt->ia_type);
++            if (op_errno != ENOENT && op_errno != ESTALE) {
++                ret |= -op_errno;
++            }
++        }
++    }
++
++out:
++    if (frame)
++        AFR_STACK_DESTROY(frame);
++    loc_wipe(&loc);
++    return ret;
++}
++
++static void
++afr_cleanup_anon_inode_dir(struct subvol_healer *healer)
++{
++    int ret = 0;
++    call_frame_t *frame = NULL;
++    afr_private_t *priv = healer->this->private;
++    loc_t loc = {0};
++
++    ret = afr_anon_inode_create(healer->this, healer->subvol, &loc.inode);
++    if (ret)
++        goto out;
++
++    frame = afr_frame_create(healer->this, &ret);
++    if (!frame) {
++        ret = -ret;
++        goto out;
++    }
++
++    ret = syncop_mt_dir_scan(frame, priv->children[healer->subvol], &loc,
++                             GF_CLIENT_PID_SELF_HEALD, healer,
++                             afr_shd_anon_inode_cleaner, NULL,
++                             priv->shd.max_threads, priv->shd.wait_qlength);
++out:
++    if (frame)
++        AFR_STACK_DESTROY(frame);
++    loc_wipe(&loc);
++    return;
++}
++
+ void *
+ afr_shd_index_healer(void *data)
+ {
+@@ -854,6 +1024,10 @@ afr_shd_index_healer(void *data)
+             sleep(1);
+         } while (ret > 0);
+ 
++        if (ret == 0) {
++            afr_cleanup_anon_inode_dir(healer);
++        }
++
+         if (pre_crawl_xdata && !healer->crawl_event.heal_failed_count) {
+             afr_shd_ta_check_and_unset_xattrs(this, &loc, healer,
+                                               pre_crawl_xdata);
+diff --git a/xlators/cluster/afr/src/afr-self-heald.h b/xlators/cluster/afr/src/afr-self-heald.h
+index 1990539..acd567e 100644
+--- a/xlators/cluster/afr/src/afr-self-heald.h
++++ b/xlators/cluster/afr/src/afr-self-heald.h
+@@ -70,6 +70,6 @@ afr_shd_gfid_to_path(xlator_t *this, xlator_t *subvol, uuid_t gfid,
+                      char **path_p);
+ 
+ int
+-afr_shd_index_purge(xlator_t *subvol, inode_t *inode, char *name,
++afr_shd_entry_purge(xlator_t *subvol, inode_t *inode, char *name,
+                     ia_type_t type);
+ #endif /* !_AFR_SELF_HEALD_H */
+diff --git a/xlators/cluster/afr/src/afr.c b/xlators/cluster/afr/src/afr.c
+index bfa464f..33fe4d8 100644
+--- a/xlators/cluster/afr/src/afr.c
++++ b/xlators/cluster/afr/src/afr.c
+@@ -135,6 +135,27 @@ set_data_self_heal_algorithm(afr_private_t *priv, char *algo)
+     }
+ }
+ 
++void
++afr_handle_anon_inode_options(afr_private_t *priv, dict_t *options)
++{
++    char *volfile_id_str = NULL;
++    uuid_t anon_inode_gfid = {0};
++
++    /*If volume id is not present don't enable anything*/
++    if (dict_get_str(options, "volume-id", &volfile_id_str))
++        return;
++    GF_ASSERT(strlen(AFR_ANON_DIR_PREFIX) + strlen(volfile_id_str) <= NAME_MAX);
++    /*anon_inode_name is not supposed to change once assigned*/
++    if (!priv->anon_inode_name[0]) {
++        snprintf(priv->anon_inode_name, sizeof(priv->anon_inode_name), "%s-%s",
++                 AFR_ANON_DIR_PREFIX, volfile_id_str);
++        gf_uuid_parse(volfile_id_str, anon_inode_gfid);
++        /*Flip a bit to make sure volfile-id and anon-gfid are not same*/
++        anon_inode_gfid[0] ^= 1;
++        uuid_utoa_r(anon_inode_gfid, priv->anon_gfid_str);
++    }
++}
++
+ int
+ reconfigure(xlator_t *this, dict_t *options)
+ {
+@@ -287,6 +308,10 @@ reconfigure(xlator_t *this, dict_t *options)
+         consistent_io = _gf_false;
+     priv->consistent_io = consistent_io;
+ 
++    afr_handle_anon_inode_options(priv, options);
++
++    GF_OPTION_RECONF("use-anonymous-inode", priv->use_anon_inode, options, bool,
++                     out);
+     if (priv->shd.enabled) {
+         if ((priv->shd.enabled != enabled_old) ||
+             (timeout_old != priv->shd.timeout))
+@@ -535,7 +560,9 @@ init(xlator_t *this)
+ 
+     GF_OPTION_INIT("consistent-metadata", priv->consistent_metadata, bool, out);
+     GF_OPTION_INIT("consistent-io", priv->consistent_io, bool, out);
++    afr_handle_anon_inode_options(priv, this->options);
+ 
++    GF_OPTION_INIT("use-anonymous-inode", priv->use_anon_inode, bool, out);
+     if (priv->quorum_count != 0)
+         priv->consistent_io = _gf_false;
+ 
+@@ -547,13 +574,16 @@ init(xlator_t *this)
+         goto out;
+     }
+ 
++    priv->anon_inode = GF_CALLOC(sizeof(unsigned char), child_count,
++                                 gf_afr_mt_char);
++
+     priv->child_up = GF_CALLOC(sizeof(unsigned char), child_count,
+                                gf_afr_mt_char);
+ 
+     priv->child_latency = GF_MALLOC(sizeof(*priv->child_latency) * child_count,
+                                     gf_afr_mt_child_latency_t);
+ 
+-    if (!priv->child_up || !priv->child_latency) {
++    if (!priv->child_up || !priv->child_latency || !priv->anon_inode) {
+         ret = -ENOMEM;
+         goto out;
+     }
+@@ -1218,6 +1248,14 @@ struct volume_options options[] = {
+      .tags = {"replicate"},
+      .description = "This option exists only for backward compatibility "
+                     "and configuring it doesn't have any effect"},
++    {.key = {"use-anonymous-inode"},
++     .type = GF_OPTION_TYPE_BOOL,
++     .default_value = "no",
++     .op_version = {GD_OP_VERSION_7_0},
++     .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE,
++     .tags = {"replicate"},
++     .description = "Setting this option heals directory renames efficiently"},
++
+     {.key = {NULL}},
+ };
+ 
+diff --git a/xlators/cluster/afr/src/afr.h b/xlators/cluster/afr/src/afr.h
+index 3a2b26d..6a9a763 100644
+--- a/xlators/cluster/afr/src/afr.h
++++ b/xlators/cluster/afr/src/afr.h
+@@ -40,6 +40,8 @@
+ #define AFR_TA_DOM_MODIFY "afr.ta.dom-modify"
+ 
+ #define AFR_HALO_MAX_LATENCY 99999
++#define AFR_ANON_DIR_PREFIX ".glusterfs-anonymous-inode"
++
+ 
+ #define PFLAG_PENDING (1 << 0)
+ #define PFLAG_SBRAIN (1 << 1)
+@@ -155,6 +157,7 @@ typedef struct _afr_private {
+     struct list_head ta_waitq;
+     struct list_head ta_onwireq;
+ 
++    unsigned char *anon_inode;
+     unsigned char *child_up;
+     int64_t *child_latency;
+     unsigned char *local;
+@@ -240,6 +243,11 @@ typedef struct _afr_private {
+     gf_boolean_t esh_granular;
+     gf_boolean_t consistent_io;
+     gf_boolean_t data_self_heal; /* on/off */
++    gf_boolean_t use_anon_inode;
++
++    /*For anon-inode handling */
++    char anon_inode_name[NAME_MAX + 1];
++    char anon_gfid_str[UUID_SIZE + 1];
+ } afr_private_t;
+ 
+ typedef enum {
+@@ -1341,4 +1349,7 @@ afr_selfheal_childup(xlator_t *this, afr_private_t *priv);
+ void
+ afr_fill_success_replies(afr_local_t *local, afr_private_t *priv,
+                          unsigned char *replies);
++gf_boolean_t
++afr_is_private_directory(afr_private_t *priv, uuid_t pargfid, const char *name,
++                         pid_t pid);
+ #endif /* __AFR_H__ */
+diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
+index 094a71f..1920284 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
++++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
+@@ -3867,6 +3867,38 @@ out:
+ }
+ 
+ static int
++set_volfile_id_option(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
++                      int clusters)
++{
++    xlator_t *xlator = NULL;
++    int i = 0;
++    int ret = -1;
++    glusterd_conf_t *conf = NULL;
++    xlator_t *this = NULL;
++
++    this = THIS;
++    GF_VALIDATE_OR_GOTO("glusterd", this, out);
++    conf = this->private;
++    GF_VALIDATE_OR_GOTO(this->name, conf, out);
++
++    if (conf->op_version < GD_OP_VERSION_7_1)
++        return 0;
++    xlator = first_of(graph);
++
++    for (i = 0; i < clusters; i++) {
++        ret = xlator_set_fixed_option(xlator, "volume-id",
++                                      uuid_utoa(volinfo->volume_id));
++        if (ret)
++            goto out;
++
++        xlator = xlator->next;
++    }
++
++out:
++    return ret;
++}
++
++static int
+ volgen_graph_build_afr_clusters(volgen_graph_t *graph,
+                                 glusterd_volinfo_t *volinfo)
+ {
+@@ -3906,6 +3938,13 @@ volgen_graph_build_afr_clusters(volgen_graph_t *graph,
+         clusters = -1;
+         goto out;
+     }
++
++    ret = set_volfile_id_option(graph, volinfo, clusters);
++    if (ret) {
++        clusters = -1;
++        goto out;
++    }
++
+     if (!volinfo->arbiter_count)
+         goto out;
+ 
+diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+index 62acadf..c1ca190 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
++++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+@@ -3789,4 +3789,10 @@ struct volopt_map_entry glusterd_volopt_map[] = {
+      .voltype = "features/cloudsync",
+      .op_version = GD_OP_VERSION_7_0,
+      .flags = VOLOPT_FLAG_CLIENT_OPT},
++
++    {.key = "cluster.use-anonymous-inode",
++     .voltype = "cluster/replicate",
++     .op_version = GD_OP_VERSION_7_1,
++     .value = "yes",
++     .flags = VOLOPT_FLAG_CLIENT_OPT},
+     {.key = NULL}};
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0516-afr-return-EIO-for-gfid-split-brains.patch b/SOURCES/0516-afr-return-EIO-for-gfid-split-brains.patch
new file mode 100644
index 0000000..0f6249e
--- /dev/null
+++ b/SOURCES/0516-afr-return-EIO-for-gfid-split-brains.patch
@@ -0,0 +1,338 @@
+From 8d24d891aade910b0bb86b27c25a8d2382e19ba0 Mon Sep 17 00:00:00 2001
+From: karthik-us <ksubrahm@redhat.com>
+Date: Tue, 15 Dec 2020 15:04:19 +0530
+Subject: [PATCH 516/517] afr: return -EIO for gfid split-brains.
+
+Problem:
+entry-self-heal-anon-dir-off.t was failing occasionally because
+afr_gfid_split_brain_source() returned -1 instead of -EIO for
+split-brains, causing the code to proceed to afr_lookup_done(), which
+in turn succeeded the lookup if there was a parallel client side heal
+going on.
+
+Fix:
+Return -EIO instead of -1 so that lookp fails.
+
+Also, afr_selfheal_name() was using the same dict to get and set values. This
+could be problematic if the caller passed local->xdata_req, since
+setting a response in a request dict can lead to bugs.So changed it to use
+separate request and response dicts.
+
+Upstream patch details:
+> Fixes: #1739
+> Credits Pranith Karampuri <pranith.karampuri@phonepe.com>
+> Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+>Change-Id: I5cb4c547fb25e6bfc8bec1740f7eb64e1a5ad443
+Upstream patch: https://github.com/gluster/glusterfs/pull/1819/
+
+BUG: 1640148
+Signed-off-by: karthik-us <ksubrahm@redhat.com>
+Change-Id: I5cb4c547fb25e6bfc8bec1740f7eb64e1a5ad443
+Reviewed-on: https://code.engineering.redhat.com/gerrit/221209
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
+---
+ xlators/cluster/afr/src/afr-common.c           | 12 ++++++++----
+ xlators/cluster/afr/src/afr-self-heal-common.c | 27 +++++++++++++-------------
+ xlators/cluster/afr/src/afr-self-heal-entry.c  |  8 ++++----
+ xlators/cluster/afr/src/afr-self-heal-name.c   | 23 +++++++++++-----------
+ xlators/cluster/afr/src/afr-self-heal.h        |  5 +++--
+ xlators/cluster/afr/src/afr-self-heald.c       |  2 +-
+ 6 files changed, 42 insertions(+), 35 deletions(-)
+
+diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
+index 6f2da11..416012c 100644
+--- a/xlators/cluster/afr/src/afr-common.c
++++ b/xlators/cluster/afr/src/afr-common.c
+@@ -2366,7 +2366,7 @@ afr_lookup_done(call_frame_t *frame, xlator_t *this)
+         /* If we were called from glfsheal and there is still a gfid
+          * mismatch, succeed the lookup and let glfsheal print the
+          * response via gfid-heal-msg.*/
+-        if (!dict_get_str_sizen(local->xattr_req, "gfid-heal-msg",
++        if (!dict_get_str_sizen(local->xattr_rsp, "gfid-heal-msg",
+                                 &gfid_heal_msg))
+             goto cant_interpret;
+ 
+@@ -2421,7 +2421,7 @@ afr_lookup_done(call_frame_t *frame, xlator_t *this)
+         goto error;
+     }
+ 
+-    ret = dict_get_str_sizen(local->xattr_req, "gfid-heal-msg", &gfid_heal_msg);
++    ret = dict_get_str_sizen(local->xattr_rsp, "gfid-heal-msg", &gfid_heal_msg);
+     if (!ret) {
+         ret = dict_set_str_sizen(local->replies[read_subvol].xdata,
+                                  "gfid-heal-msg", gfid_heal_msg);
+@@ -2768,9 +2768,12 @@ afr_lookup_selfheal_wrap(void *opaque)
+     local = frame->local;
+     this = frame->this;
+     loc_pargfid(&local->loc, pargfid);
++    if (!local->xattr_rsp)
++        local->xattr_rsp = dict_new();
+ 
+     ret = afr_selfheal_name(frame->this, pargfid, local->loc.name,
+-                            &local->cont.lookup.gfid_req, local->xattr_req);
++                            &local->cont.lookup.gfid_req, local->xattr_req,
++                            local->xattr_rsp);
+     if (ret == -EIO)
+         goto unwind;
+ 
+@@ -2786,7 +2789,8 @@ afr_lookup_selfheal_wrap(void *opaque)
+     return 0;
+ 
+ unwind:
+-    AFR_STACK_UNWIND(lookup, frame, -1, EIO, NULL, NULL, NULL, NULL);
++    AFR_STACK_UNWIND(lookup, frame, -1, EIO, NULL, NULL, local->xattr_rsp,
++                     NULL);
+     return 0;
+ }
+ 
+diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c
+index 0a8a7fd..0954d2c 100644
+--- a/xlators/cluster/afr/src/afr-self-heal-common.c
++++ b/xlators/cluster/afr/src/afr-self-heal-common.c
+@@ -245,7 +245,8 @@ int
+ afr_gfid_split_brain_source(xlator_t *this, struct afr_reply *replies,
+                             inode_t *inode, uuid_t pargfid, const char *bname,
+                             int src_idx, int child_idx,
+-                            unsigned char *locked_on, int *src, dict_t *xdata)
++                            unsigned char *locked_on, int *src, dict_t *req,
++                            dict_t *rsp)
+ {
+     afr_private_t *priv = NULL;
+     char g1[64] = {
+@@ -266,8 +267,8 @@ afr_gfid_split_brain_source(xlator_t *this, struct afr_reply *replies,
+         gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
+                "All the bricks should be up to resolve the gfid split "
+                "barin");
+-        if (xdata) {
+-            ret = dict_set_sizen_str_sizen(xdata, "gfid-heal-msg",
++        if (rsp) {
++            ret = dict_set_sizen_str_sizen(rsp, "gfid-heal-msg",
+                                            SALL_BRICKS_UP_TO_RESOLVE);
+             if (ret)
+                 gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_DICT_SET_FAILED,
+@@ -277,8 +278,8 @@ afr_gfid_split_brain_source(xlator_t *this, struct afr_reply *replies,
+         goto out;
+     }
+ 
+-    if (xdata) {
+-        ret = dict_get_int32_sizen(xdata, "heal-op", &heal_op);
++    if (req) {
++        ret = dict_get_int32_sizen(req, "heal-op", &heal_op);
+         if (ret)
+             goto fav_child;
+     } else {
+@@ -292,8 +293,8 @@ afr_gfid_split_brain_source(xlator_t *this, struct afr_reply *replies,
+             if (*src == -1) {
+                 gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
+                        SNO_BIGGER_FILE);
+-                if (xdata) {
+-                    ret = dict_set_sizen_str_sizen(xdata, "gfid-heal-msg",
++                if (rsp) {
++                    ret = dict_set_sizen_str_sizen(rsp, "gfid-heal-msg",
+                                                    SNO_BIGGER_FILE);
+                     if (ret)
+                         gf_msg(this->name, GF_LOG_ERROR, 0,
+@@ -310,8 +311,8 @@ afr_gfid_split_brain_source(xlator_t *this, struct afr_reply *replies,
+             if (*src == -1) {
+                 gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
+                        SNO_DIFF_IN_MTIME);
+-                if (xdata) {
+-                    ret = dict_set_sizen_str_sizen(xdata, "gfid-heal-msg",
++                if (rsp) {
++                    ret = dict_set_sizen_str_sizen(rsp, "gfid-heal-msg",
+                                                    SNO_DIFF_IN_MTIME);
+                     if (ret)
+                         gf_msg(this->name, GF_LOG_ERROR, 0,
+@@ -323,7 +324,7 @@ afr_gfid_split_brain_source(xlator_t *this, struct afr_reply *replies,
+             break;
+ 
+         case GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK:
+-            ret = dict_get_str_sizen(xdata, "child-name", &src_brick);
++            ret = dict_get_str_sizen(req, "child-name", &src_brick);
+             if (ret) {
+                 gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
+                        "Error getting the source "
+@@ -335,8 +336,8 @@ afr_gfid_split_brain_source(xlator_t *this, struct afr_reply *replies,
+             if (*src == -1) {
+                 gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
+                        SERROR_GETTING_SRC_BRICK);
+-                if (xdata) {
+-                    ret = dict_set_sizen_str_sizen(xdata, "gfid-heal-msg",
++                if (rsp) {
++                    ret = dict_set_sizen_str_sizen(rsp, "gfid-heal-msg",
+                                                    SERROR_GETTING_SRC_BRICK);
+                     if (ret)
+                         gf_msg(this->name, GF_LOG_ERROR, 0,
+@@ -400,7 +401,7 @@ out:
+                  uuid_utoa_r(replies[child_idx].poststat.ia_gfid, g1), src_idx,
+                  priv->children[src_idx]->name, src_idx,
+                  uuid_utoa_r(replies[src_idx].poststat.ia_gfid, g2));
+-        return -1;
++        return -EIO;
+     }
+     return 0;
+ }
+diff --git a/xlators/cluster/afr/src/afr-self-heal-entry.c b/xlators/cluster/afr/src/afr-self-heal-entry.c
+index 20b07dd..a17dd93 100644
+--- a/xlators/cluster/afr/src/afr-self-heal-entry.c
++++ b/xlators/cluster/afr/src/afr-self-heal-entry.c
+@@ -399,7 +399,7 @@ afr_selfheal_detect_gfid_and_type_mismatch(xlator_t *this,
+             (ia_type == replies[i].poststat.ia_type)) {
+             ret = afr_gfid_split_brain_source(this, replies, inode, pargfid,
+                                               bname, src_idx, i, locked_on, src,
+-                                              NULL);
++                                              NULL, NULL);
+             if (ret)
+                 gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
+                        "Skipping conservative merge on the "
+@@ -474,7 +474,7 @@ __afr_selfheal_merge_dirent(call_frame_t *frame, xlator_t *this, fd_t *fd,
+         return ret;
+ 
+     /* In case of type mismatch / unable to resolve gfid mismatch on the
+-     * entry, return -1.*/
++     * entry, return -EIO.*/
+     ret = afr_selfheal_detect_gfid_and_type_mismatch(
+         this, replies, inode, fd->inode->gfid, name, source, locked_on, &src);
+ 
+@@ -905,7 +905,7 @@ afr_selfheal_entry_do_subvol(call_frame_t *frame, xlator_t *this, fd_t *fd,
+                 break;
+             }
+ 
+-            if (ret == -1) {
++            if (ret == -EIO) {
+                 /* gfid or type mismatch. */
+                 mismatch = _gf_true;
+                 ret = 0;
+@@ -1072,7 +1072,7 @@ afr_selfheal_entry_do(call_frame_t *frame, xlator_t *this, fd_t *fd, int source,
+         else
+             ret = afr_selfheal_entry_do_subvol(frame, this, fd, i);
+ 
+-        if (ret == -1) {
++        if (ret == -EIO) {
+             /* gfid or type mismatch. */
+             mismatch = _gf_true;
+             ret = 0;
+diff --git a/xlators/cluster/afr/src/afr-self-heal-name.c b/xlators/cluster/afr/src/afr-self-heal-name.c
+index 51e3d8c..9ec2066 100644
+--- a/xlators/cluster/afr/src/afr-self-heal-name.c
++++ b/xlators/cluster/afr/src/afr-self-heal-name.c
+@@ -217,7 +217,8 @@ afr_selfheal_name_gfid_mismatch_check(xlator_t *this, struct afr_reply *replies,
+                                       int source, unsigned char *sources,
+                                       int *gfid_idx, uuid_t pargfid,
+                                       const char *bname, inode_t *inode,
+-                                      unsigned char *locked_on, dict_t *xdata)
++                                      unsigned char *locked_on, dict_t *req,
++                                      dict_t *rsp)
+ {
+     int i = 0;
+     int gfid_idx_iter = -1;
+@@ -245,11 +246,11 @@ afr_selfheal_name_gfid_mismatch_check(xlator_t *this, struct afr_reply *replies,
+         if (sources[i] || source == -1) {
+             if ((sources[gfid_idx_iter] || source == -1) &&
+                 gf_uuid_compare(gfid, gfid1)) {
+-                ret = afr_gfid_split_brain_source(this, replies, inode, pargfid,
+-                                                  bname, gfid_idx_iter, i,
+-                                                  locked_on, gfid_idx, xdata);
++                ret = afr_gfid_split_brain_source(
++                    this, replies, inode, pargfid, bname, gfid_idx_iter, i,
++                    locked_on, gfid_idx, req, rsp);
+                 if (!ret && *gfid_idx >= 0) {
+-                    ret = dict_set_sizen_str_sizen(xdata, "gfid-heal-msg",
++                    ret = dict_set_sizen_str_sizen(rsp, "gfid-heal-msg",
+                                                    "GFID split-brain resolved");
+                     if (ret)
+                         gf_msg(this->name, GF_LOG_ERROR, 0,
+@@ -303,7 +304,7 @@ __afr_selfheal_name_do(call_frame_t *frame, xlator_t *this, inode_t *parent,
+                        unsigned char *sources, unsigned char *sinks,
+                        unsigned char *healed_sinks, int source,
+                        unsigned char *locked_on, struct afr_reply *replies,
+-                       void *gfid_req, dict_t *xdata)
++                       void *gfid_req, dict_t *req, dict_t *rsp)
+ {
+     int gfid_idx = -1;
+     int ret = -1;
+@@ -333,7 +334,7 @@ __afr_selfheal_name_do(call_frame_t *frame, xlator_t *this, inode_t *parent,
+ 
+     ret = afr_selfheal_name_gfid_mismatch_check(this, replies, source, sources,
+                                                 &gfid_idx, pargfid, bname,
+-                                                inode, locked_on, xdata);
++                                                inode, locked_on, req, rsp);
+     if (ret)
+         return ret;
+ 
+@@ -450,7 +451,7 @@ out:
+ int
+ afr_selfheal_name_do(call_frame_t *frame, xlator_t *this, inode_t *parent,
+                      uuid_t pargfid, const char *bname, void *gfid_req,
+-                     dict_t *xdata)
++                     dict_t *req, dict_t *rsp)
+ {
+     afr_private_t *priv = NULL;
+     unsigned char *sources = NULL;
+@@ -505,7 +506,7 @@ afr_selfheal_name_do(call_frame_t *frame, xlator_t *this, inode_t *parent,
+ 
+         ret = __afr_selfheal_name_do(frame, this, parent, pargfid, bname, inode,
+                                      sources, sinks, healed_sinks, source,
+-                                     locked_on, replies, gfid_req, xdata);
++                                     locked_on, replies, gfid_req, req, rsp);
+     }
+ unlock:
+     afr_selfheal_unentrylk(frame, this, parent, this->name, bname, locked_on,
+@@ -578,7 +579,7 @@ afr_selfheal_name_unlocked_inspect(call_frame_t *frame, xlator_t *this,
+ 
+ int
+ afr_selfheal_name(xlator_t *this, uuid_t pargfid, const char *bname,
+-                  void *gfid_req, dict_t *xdata)
++                  void *gfid_req, dict_t *req, dict_t *rsp)
+ {
+     inode_t *parent = NULL;
+     call_frame_t *frame = NULL;
+@@ -600,7 +601,7 @@ afr_selfheal_name(xlator_t *this, uuid_t pargfid, const char *bname,
+ 
+     if (need_heal) {
+         ret = afr_selfheal_name_do(frame, this, parent, pargfid, bname,
+-                                   gfid_req, xdata);
++                                   gfid_req, req, rsp);
+         if (ret)
+             goto out;
+     }
+diff --git a/xlators/cluster/afr/src/afr-self-heal.h b/xlators/cluster/afr/src/afr-self-heal.h
+index c8dc384..6b0bf69 100644
+--- a/xlators/cluster/afr/src/afr-self-heal.h
++++ b/xlators/cluster/afr/src/afr-self-heal.h
+@@ -127,7 +127,7 @@ afr_throttled_selfheal(call_frame_t *frame, xlator_t *this);
+ 
+ int
+ afr_selfheal_name(xlator_t *this, uuid_t gfid, const char *name, void *gfid_req,
+-                  dict_t *xdata);
++                  dict_t *req, dict_t *rsp);
+ 
+ int
+ afr_selfheal_data(call_frame_t *frame, xlator_t *this, fd_t *fd);
+@@ -357,7 +357,8 @@ int
+ afr_gfid_split_brain_source(xlator_t *this, struct afr_reply *replies,
+                             inode_t *inode, uuid_t pargfid, const char *bname,
+                             int src_idx, int child_idx,
+-                            unsigned char *locked_on, int *src, dict_t *xdata);
++                            unsigned char *locked_on, int *src, dict_t *req,
++                            dict_t *rsp);
+ int
+ afr_mark_source_sinks_if_file_empty(xlator_t *this, unsigned char *sources,
+                                     unsigned char *sinks,
+diff --git a/xlators/cluster/afr/src/afr-self-heald.c b/xlators/cluster/afr/src/afr-self-heald.c
+index 939a135..18aed93 100644
+--- a/xlators/cluster/afr/src/afr-self-heald.c
++++ b/xlators/cluster/afr/src/afr-self-heald.c
+@@ -295,7 +295,7 @@ afr_shd_selfheal_name(struct subvol_healer *healer, int child, uuid_t parent,
+ {
+     int ret = -1;
+ 
+-    ret = afr_selfheal_name(THIS, parent, bname, NULL, NULL);
++    ret = afr_selfheal_name(THIS, parent, bname, NULL, NULL, NULL);
+ 
+     return ret;
+ }
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0517-gfapi-glfs_h_creat_open-new-API-to-create-handle-and.patch b/SOURCES/0517-gfapi-glfs_h_creat_open-new-API-to-create-handle-and.patch
new file mode 100644
index 0000000..bc1b263
--- /dev/null
+++ b/SOURCES/0517-gfapi-glfs_h_creat_open-new-API-to-create-handle-and.patch
@@ -0,0 +1,388 @@
+From da75c2857fd8b173d47fb7fc3b925ffd14105f64 Mon Sep 17 00:00:00 2001
+From: "Kaleb S. KEITHLEY" <kkeithle@rhel7x.kkeithle.usersys.redhat.com>
+Date: Wed, 23 Dec 2020 07:39:13 -0500
+Subject: [PATCH 517/517] gfapi: 'glfs_h_creat_open' - new API to create handle
+ and open fd
+
+Right now we have two separate APIs, one
+- 'glfs_h_creat_handle' to create handle & another
+- 'glfs_h_open' to create a glfd to return to application
+
+Having two separate routines can result in access errors
+while trying to create and write into a read-only file.
+
+Since a fd is opened even during file/directory creation,
+introducing a new API to make these two operations atomic i.e,
+which can create both handle & fd and pass them to application
+
+This is backport of below mainline patch -
+- https://review.gluster.org/#/c/glusterfs/+/23448/
+- bz#1753569
+
+> Signed-off-by: Soumya Koduri <skoduri@redhat.com>
+> Change-Id: Ibf513fcfcdad175f4d7eb6fa7a61b8feec6d33b5
+> release-6: commit 5a2af2fd06356f6fc79d591c352caffd4c511c9e
+> master:    commit 41a0f2aa755ec7162facd30209f2fa3f40308766
+
+BUG: 1910119
+Change-Id: Ib397dbe82a6928d8f24251809d30febddd007bfc
+Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/222083
+Reviewed-by: Soumya Koduri <skoduri@redhat.com>
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ api/src/gfapi.aliases                 |   1 +
+ api/src/gfapi.map                     |   5 ++
+ api/src/glfs-handleops.c              | 135 ++++++++++++++++++++++++++++++++++
+ api/src/glfs-handles.h                |   5 ++
+ tests/basic/gfapi/glfs_h_creat_open.c | 118 +++++++++++++++++++++++++++++
+ tests/basic/gfapi/glfs_h_creat_open.t |  27 +++++++
+ 6 files changed, 291 insertions(+)
+ create mode 100644 tests/basic/gfapi/glfs_h_creat_open.c
+ create mode 100755 tests/basic/gfapi/glfs_h_creat_open.t
+
+diff --git a/api/src/gfapi.aliases b/api/src/gfapi.aliases
+index 692ae13..3d3415c 100644
+--- a/api/src/gfapi.aliases
++++ b/api/src/gfapi.aliases
+@@ -197,3 +197,4 @@ _pub_glfs_fsetattr _glfs_fsetattr$GFAPI_6.0
+ _pub_glfs_setattr _glfs_setattr$GFAPI_6.0
+ 
+ _pub_glfs_set_statedump_path _glfs_set_statedump_path@GFAPI_6.4
++_pub_glfs_h_creat_open _glfs_h_creat_open@GFAPI_6.6
+diff --git a/api/src/gfapi.map b/api/src/gfapi.map
+index df65837..614f3f6 100644
+--- a/api/src/gfapi.map
++++ b/api/src/gfapi.map
+@@ -276,3 +276,8 @@ GFAPI_6.4 {
+ 	global:
+ 		glfs_set_statedump_path;
+ } GFAPI_PRIVATE_6.1;
++
++GFAPI_6.6 {
++	global:
++		glfs_h_creat_open;
++} GFAPI_6.4;
+diff --git a/api/src/glfs-handleops.c b/api/src/glfs-handleops.c
+index d4e1545..7b8ff14 100644
+--- a/api/src/glfs-handleops.c
++++ b/api/src/glfs-handleops.c
+@@ -843,6 +843,141 @@ invalid_fs:
+ GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_creat, 3.4.2);
+ 
+ struct glfs_object *
++pub_glfs_h_creat_open(struct glfs *fs, struct glfs_object *parent,
++                      const char *path, int flags, mode_t mode,
++                      struct stat *stat, struct glfs_fd **out_fd)
++{
++    int ret = -1;
++    struct glfs_fd *glfd = NULL;
++    xlator_t *subvol = NULL;
++    inode_t *inode = NULL;
++    loc_t loc = {
++        0,
++    };
++    struct iatt iatt = {
++        0,
++    };
++    uuid_t gfid;
++    dict_t *xattr_req = NULL;
++    struct glfs_object *object = NULL;
++    dict_t *fop_attr = NULL;
++
++    /* validate in args */
++    if ((fs == NULL) || (parent == NULL) || (path == NULL) ||
++        (out_fd == NULL)) {
++        errno = EINVAL;
++        return NULL;
++    }
++
++    DECLARE_OLD_THIS;
++    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
++
++    /* get the active volume */
++    subvol = glfs_active_subvol(fs);
++    if (!subvol) {
++        ret = -1;
++        goto out;
++    }
++
++    /* get/refresh the in arg objects inode in correlation to the xlator */
++    inode = glfs_resolve_inode(fs, subvol, parent);
++    if (!inode) {
++        ret = -1;
++        goto out;
++    }
++
++    xattr_req = dict_new();
++    if (!xattr_req) {
++        ret = -1;
++        errno = ENOMEM;
++        goto out;
++    }
++
++    gf_uuid_generate(gfid);
++    ret = dict_set_gfuuid(xattr_req, "gfid-req", gfid, true);
++    if (ret) {
++        ret = -1;
++        errno = ENOMEM;
++        goto out;
++    }
++
++    GLFS_LOC_FILL_PINODE(inode, loc, ret, errno, out, path);
++
++    glfd = glfs_fd_new(fs);
++    if (!glfd) {
++        ret = -1;
++        errno = ENOMEM;
++        goto out;
++    }
++
++    glfd->fd = fd_create(loc.inode, getpid());
++    if (!glfd->fd) {
++        ret = -1;
++        errno = ENOMEM;
++        goto out;
++    }
++    glfd->fd->flags = flags;
++
++    ret = get_fop_attr_thrd_key(&fop_attr);
++    if (ret)
++        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
++
++    /* fop/op */
++    ret = syncop_create(subvol, &loc, flags, mode, glfd->fd, &iatt, xattr_req,
++                        NULL);
++    DECODE_SYNCOP_ERR(ret);
++
++    /* populate out args */
++    if (ret == 0) {
++        glfd->fd->flags = flags;
++
++        ret = glfs_loc_link(&loc, &iatt);
++        if (ret != 0) {
++            goto out;
++        }
++
++        if (stat)
++            glfs_iatt_to_stat(fs, &iatt, stat);
++
++        ret = glfs_create_object(&loc, &object);
++    }
++
++out:
++    if (ret && object != NULL) {
++        /* Release the held reference */
++        glfs_h_close(object);
++        object = NULL;
++    }
++
++    loc_wipe(&loc);
++
++    if (inode)
++        inode_unref(inode);
++
++    if (fop_attr)
++        dict_unref(fop_attr);
++
++    if (xattr_req)
++        dict_unref(xattr_req);
++
++    if (ret && glfd) {
++        GF_REF_PUT(glfd);
++    } else if (glfd) {
++        glfd_set_state_bind(glfd);
++        *out_fd = glfd;
++    }
++
++    glfs_subvol_done(fs, subvol);
++
++    __GLFS_EXIT_FS;
++
++invalid_fs:
++    return object;
++}
++
++GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_creat_open, 6.6);
++
++struct glfs_object *
+ pub_glfs_h_mkdir(struct glfs *fs, struct glfs_object *parent, const char *path,
+                  mode_t mode, struct stat *stat)
+ {
+diff --git a/api/src/glfs-handles.h b/api/src/glfs-handles.h
+index f7e6a06..4d039b9 100644
+--- a/api/src/glfs-handles.h
++++ b/api/src/glfs-handles.h
+@@ -250,6 +250,11 @@ int
+ glfs_h_access(glfs_t *fs, glfs_object_t *object, int mask) __THROW
+     GFAPI_PUBLIC(glfs_h_access, 3.6.0);
+ 
++struct glfs_object *
++glfs_h_creat_open(struct glfs *fs, struct glfs_object *parent, const char *path,
++                  int flags, mode_t mode, struct stat *stat,
++                  struct glfs_fd **out_fd) __THROW
++    GFAPI_PUBLIC(glfs_h_creat_open, 6.6);
+ /*
+   SYNOPSIS
+ 
+diff --git a/tests/basic/gfapi/glfs_h_creat_open.c b/tests/basic/gfapi/glfs_h_creat_open.c
+new file mode 100644
+index 0000000..7672561
+--- /dev/null
++++ b/tests/basic/gfapi/glfs_h_creat_open.c
+@@ -0,0 +1,118 @@
++#include <fcntl.h>
++#include <unistd.h>
++#include <time.h>
++#include <limits.h>
++#include <string.h>
++#include <stdio.h>
++#include <errno.h>
++#include <stdlib.h>
++#include <glusterfs/api/glfs.h>
++#include <glusterfs/api/glfs-handles.h>
++
++#define LOG_ERR(func, ret)                                                     \
++    do {                                                                       \
++        if (ret != 0) {                                                        \
++            fprintf(stderr, "%s : returned error ret(%d), errno(%d)\n", func,  \
++                    ret, errno);                                               \
++            exit(1);                                                           \
++        } else {                                                               \
++            fprintf(stderr, "%s : returned %d\n", func, ret);                  \
++        }                                                                      \
++    } while (0)
++#define LOG_IF_NO_ERR(func, ret)                                               \
++    do {                                                                       \
++        if (ret == 0) {                                                        \
++            fprintf(stderr, "%s : hasn't returned error %d\n", func, ret);     \
++            exit(1);                                                           \
++        } else {                                                               \
++            fprintf(stderr, "%s : returned %d\n", func, ret);                  \
++        }                                                                      \
++    } while (0)
++int
++main(int argc, char *argv[])
++{
++    glfs_t *fs = NULL;
++    int ret = 0;
++    struct glfs_object *root = NULL, *leaf = NULL;
++    glfs_fd_t *fd = NULL;
++    char *filename = "/ro-file";
++    struct stat sb = {
++        0,
++    };
++    char *logfile = NULL;
++    char *volname = NULL;
++    char *hostname = NULL;
++    char buf[32] = "abcdefghijklmnopqrstuvwxyz012345";
++
++    fprintf(stderr, "Starting glfs_h_creat_open\n");
++
++    if (argc != 4) {
++        fprintf(stderr, "Invalid argument\n");
++        exit(1);
++    }
++
++    hostname = argv[1];
++    volname = argv[2];
++    logfile = argv[3];
++
++    fs = glfs_new(volname);
++    if (!fs) {
++        fprintf(stderr, "glfs_new: returned NULL\n");
++        return 1;
++    }
++
++    ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
++    LOG_ERR("glfs_set_volfile_server", ret);
++
++    ret = glfs_set_logging(fs, logfile, 7);
++    LOG_ERR("glfs_set_logging", ret);
++
++    ret = glfs_init(fs);
++    LOG_ERR("glfs_init", ret);
++
++    sleep(2);
++    root = glfs_h_lookupat(fs, NULL, "/", &sb, 0);
++    if (!root) {
++        ret = -1;
++        LOG_ERR("glfs_h_lookupat root", ret);
++    }
++    leaf = glfs_h_lookupat(fs, root, filename, &sb, 0);
++    if (!leaf) {
++        ret = -1;
++        LOG_IF_NO_ERR("glfs_h_lookupat leaf", ret);
++    }
++
++    leaf = glfs_h_creat_open(fs, root, filename, O_RDONLY, 00444, &sb, &fd);
++    if (!leaf || !fd) {
++        ret = -1;
++        LOG_ERR("glfs_h_creat leaf", ret);
++    }
++    fprintf(stderr, "glfs_h_create_open leaf - %p\n", leaf);
++
++    ret = glfs_write(fd, buf, 32, 0);
++    if (ret < 0) {
++        fprintf(stderr, "glfs_write: error writing to file %s, %s\n", filename,
++                strerror(errno));
++        goto out;
++    }
++
++    ret = glfs_h_getattrs(fs, leaf, &sb);
++    LOG_ERR("glfs_h_getattrs", ret);
++
++    if (sb.st_size != 32) {
++        fprintf(stderr, "glfs_write: post size mismatch\n");
++        goto out;
++    }
++
++    fprintf(stderr, "Successfully opened and written to a read-only file \n");
++out:
++    if (fd)
++        glfs_close(fd);
++
++    ret = glfs_fini(fs);
++    LOG_ERR("glfs_fini", ret);
++
++    fprintf(stderr, "End of libgfapi_fini\n");
++
++    exit(0);
++}
+diff --git a/tests/basic/gfapi/glfs_h_creat_open.t b/tests/basic/gfapi/glfs_h_creat_open.t
+new file mode 100755
+index 0000000..f24ae73
+--- /dev/null
++++ b/tests/basic/gfapi/glfs_h_creat_open.t
+@@ -0,0 +1,27 @@
++#!/bin/bash
++
++. $(dirname $0)/../../include.rc
++. $(dirname $0)/../../volume.rc
++
++cleanup;
++
++TEST glusterd
++
++TEST $CLI volume create $V0 $H0:$B0/brick1;
++EXPECT 'Created' volinfo_field $V0 'Status';
++
++TEST $CLI volume start $V0;
++EXPECT 'Started' volinfo_field $V0 'Status';
++
++logdir=`gluster --print-logdir`
++
++TEST build_tester $(dirname $0)/glfs_h_creat_open.c -lgfapi
++
++TEST ./$(dirname $0)/glfs_h_creat_open $H0 $V0  $logdir/glfs.log
++
++cleanup_tester $(dirname $0)/glfs_h_creat_open
++
++TEST $CLI volume stop $V0
++TEST $CLI volume delete $V0
++
++cleanup;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0518-glusterd-Fix-for-shared-storage-in-ipv6-env.patch b/SOURCES/0518-glusterd-Fix-for-shared-storage-in-ipv6-env.patch
new file mode 100644
index 0000000..00d29b9
--- /dev/null
+++ b/SOURCES/0518-glusterd-Fix-for-shared-storage-in-ipv6-env.patch
@@ -0,0 +1,41 @@
+From 818025e467ea98b32a855c92ba6aef6e172e029f Mon Sep 17 00:00:00 2001
+From: Nikhil Ladha <nladha@redhat.com>
+Date: Fri, 8 Jan 2021 13:12:46 +0530
+Subject: [PATCH 518/526] glusterd: Fix for shared storage in ipv6 env
+
+Issue:
+Mounting shared storage volume was failing in ipv6 env if the hostnames were FQDNs.
+The brickname for the volume was being cut off, as a result, volume creation was failing.
+
+>Change-Id: Ib38993724c709b35b603f9ac666630c50c932c3e
+>Fixes: #1406
+>Signed-off-by: nik-redhat <nladha@redhat.com>
+Upstream patch: https://github.com/gluster/glusterfs/pull/1972
+
+BUG: 1856574
+
+Change-Id: Ib38993724c709b35b603f9ac666630c50c932c3e
+Signed-off-by: nik-redhat <nladha@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/223248
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
+---
+ extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
+index 9597503..e9261af 100755
+--- a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
++++ b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
+@@ -46,7 +46,7 @@ do
+ 
+     key=`echo $line | cut -d ':' -f 1`
+     if [ "$key" == "Hostname" ]; then
+-        hostname=`echo $line | cut -d ':' -f 2 | xargs`
++        hostname=`echo $line | cut -d ' ' -f 2 | xargs`
+     fi
+ 
+     if [ "$key" == "State" ]; then
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0519-glusterfs-events-Fix-incorrect-attribute-access-2002.patch b/SOURCES/0519-glusterfs-events-Fix-incorrect-attribute-access-2002.patch
new file mode 100644
index 0000000..f37acfd
--- /dev/null
+++ b/SOURCES/0519-glusterfs-events-Fix-incorrect-attribute-access-2002.patch
@@ -0,0 +1,58 @@
+From 6ed227367b6eb7d6d7afde3859ad0a711a3adf36 Mon Sep 17 00:00:00 2001
+From: Leela Venkaiah G <gleelavenkaiah@gmail.com>
+Date: Wed, 13 Jan 2021 16:02:25 +0530
+Subject: [PATCH 519/526] glusterfs-events: Fix incorrect attribute access
+ (#2002)
+
+Issue: When GlusterCmdException is raised, current code try to access
+message atrribute which doesn't exist and resulting in a malformed
+error string on failure operations
+
+Code Change: Replace `message` with `args[0]`
+
+>Fixes: #2001
+>Change-Id: I65c9f0ee79310937a384025b8d454acda154e4bb
+>Signed-off-by: Leela Venkaiah G <lgangava@redhat.com>
+Upstream patch: https://github.com/gluster/glusterfs/pull/2002
+
+BUG: 1600459
+Change-Id: I65c9f0ee79310937a384025b8d454acda154e4bb
+Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/223584
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ events/src/peer_eventsapi.py | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/events/src/peer_eventsapi.py b/events/src/peer_eventsapi.py
+index 26b77a0..c388da4 100644
+--- a/events/src/peer_eventsapi.py
++++ b/events/src/peer_eventsapi.py
+@@ -174,9 +174,9 @@ def sync_to_peers(args):
+             sync_file_to_peers(WEBHOOKS_FILE_TO_SYNC)
+         except GlusterCmdException as e:
+             # Print stdout if stderr is empty
+-            errmsg = e.message[2] if e.message[2] else e.message[1]
++            errmsg = e.args[0][2] if e.args[0][2] else e.args[0][1]
+             handle_output_error("Failed to sync Webhooks file: [Error: {0}]"
+-                                "{1}".format(e.message[0], errmsg),
++                                "{1}".format(e.args[0][0], errmsg),
+                                 errcode=ERROR_WEBHOOK_SYNC_FAILED,
+                                 json_output=args.json)
+ 
+@@ -185,9 +185,9 @@ def sync_to_peers(args):
+             sync_file_to_peers(CUSTOM_CONFIG_FILE_TO_SYNC)
+         except GlusterCmdException as e:
+             # Print stdout if stderr is empty
+-            errmsg = e.message[2] if e.message[2] else e.message[1]
++            errmsg = e.args[0][2] if e.args[0][2] else e.args[0][1]
+             handle_output_error("Failed to sync Config file: [Error: {0}]"
+-                                "{1}".format(e.message[0], errmsg),
++                                "{1}".format(e.args[0][0], errmsg),
+                                 errcode=ERROR_CONFIG_SYNC_FAILED,
+                                 json_output=args.json)
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0520-performance-open-behind-seek-fop-should-open_and_res.patch b/SOURCES/0520-performance-open-behind-seek-fop-should-open_and_res.patch
new file mode 100644
index 0000000..c46a9ca
--- /dev/null
+++ b/SOURCES/0520-performance-open-behind-seek-fop-should-open_and_res.patch
@@ -0,0 +1,70 @@
+From a3fd2c9d85bbd23131c985599d9c9d74f66f32d2 Mon Sep 17 00:00:00 2001
+From: Pranith Kumar K <pkarampu@redhat.com>
+Date: Thu, 10 Oct 2019 10:50:59 +0530
+Subject: [PATCH 520/526] performance/open-behind: seek fop should
+ open_and_resume
+
+Upstream patch:
+> Upstream-patch-link: https://review.gluster.org/#/c/glusterfs/+/23530
+> fixes: bz#1760187
+> Change-Id: I4c6ad13194d4fc5c7705e35bf9a27fce504b51f9
+> Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
+
+BUG: 1830713
+Change-Id: I4c6ad13194d4fc5c7705e35bf9a27fce504b51f9
+Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/224484
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/performance/open-behind/src/open-behind.c | 27 +++++++++++++++++++++++
+ 1 file changed, 27 insertions(+)
+
+diff --git a/xlators/performance/open-behind/src/open-behind.c b/xlators/performance/open-behind/src/open-behind.c
+index 268c717..3ee3c40 100644
+--- a/xlators/performance/open-behind/src/open-behind.c
++++ b/xlators/performance/open-behind/src/open-behind.c
+@@ -711,6 +711,32 @@ err:
+ }
+ 
+ int
++ob_seek(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
++        gf_seek_what_t what, dict_t *xdata)
++{
++    call_stub_t *stub = NULL;
++    fd_t *wind_fd = NULL;
++
++    wind_fd = ob_get_wind_fd(this, fd, NULL);
++
++    stub = fop_seek_stub(frame, default_seek_resume, wind_fd, offset, what,
++                         xdata);
++
++    fd_unref(wind_fd);
++
++    if (!stub)
++        goto err;
++
++    open_and_resume(this, wind_fd, stub);
++
++    return 0;
++err:
++    STACK_UNWIND_STRICT(fstat, frame, -1, ENOMEM, 0, 0);
++
++    return 0;
++}
++
++int
+ ob_flush(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
+ {
+     call_stub_t *stub = NULL;
+@@ -1276,6 +1302,7 @@ struct xlator_fops fops = {
+     .flush = ob_flush,
+     .fsync = ob_fsync,
+     .fstat = ob_fstat,
++    .seek = ob_seek,
+     .ftruncate = ob_ftruncate,
+     .fsetxattr = ob_fsetxattr,
+     .setxattr = ob_setxattr,
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0521-open-behind-fix-missing-fd-reference.patch b/SOURCES/0521-open-behind-fix-missing-fd-reference.patch
new file mode 100644
index 0000000..8e18af8
--- /dev/null
+++ b/SOURCES/0521-open-behind-fix-missing-fd-reference.patch
@@ -0,0 +1,121 @@
+From 211d0f7dbb4991b2191925973222ebc79f010e84 Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Sun, 8 Mar 2020 18:36:45 +0100
+Subject: [PATCH 521/526] open-behind: fix missing fd reference
+
+Open behind was not keeping any reference on fd's pending to be
+opened. This makes it possible that a concurrent close and en entry
+fop (unlink, rename, ...) caused destruction of the fd while it
+was still being used.
+
+Upstream patch:
+> Upstream patch link: https://review.gluster.org/c/glusterfs/+/24204
+> Change-Id: Ie9e992902cf2cd7be4af1f8b4e57af9bd6afd8e9
+> Fixes: bz#1810934
+> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+
+Change-Id: Ie9e992902cf2cd7be4af1f8b4e57af9bd6afd8e9
+BUG: 1830713
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/224485
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/performance/open-behind/src/open-behind.c | 27 ++++++++++++++---------
+ 1 file changed, 16 insertions(+), 11 deletions(-)
+
+diff --git a/xlators/performance/open-behind/src/open-behind.c b/xlators/performance/open-behind/src/open-behind.c
+index 3ee3c40..dd2f2fd 100644
+--- a/xlators/performance/open-behind/src/open-behind.c
++++ b/xlators/performance/open-behind/src/open-behind.c
+@@ -206,8 +206,13 @@ ob_fd_free(ob_fd_t *ob_fd)
+     if (ob_fd->xdata)
+         dict_unref(ob_fd->xdata);
+ 
+-    if (ob_fd->open_frame)
++    if (ob_fd->open_frame) {
++        /* If we sill have a frame it means that background open has never
++         * been triggered. We need to release the pending reference. */
++        fd_unref(ob_fd->fd);
++
+         STACK_DESTROY(ob_fd->open_frame->root);
++    }
+ 
+     GF_FREE(ob_fd);
+ }
+@@ -297,6 +302,7 @@ ob_wake_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+             call_resume(stub);
+     }
+ 
++    /* The background open is completed. We can release the 'fd' reference. */
+     fd_unref(fd);
+ 
+     STACK_DESTROY(frame->root);
+@@ -331,7 +337,9 @@ ob_fd_wake(xlator_t *this, fd_t *fd, ob_fd_t *ob_fd)
+     }
+ 
+     if (frame) {
+-        frame->local = fd_ref(fd);
++        /* We don't need to take a reference here. We already have a reference
++         * while the open is pending. */
++        frame->local = fd;
+ 
+         STACK_WIND(frame, ob_wake_cbk, FIRST_CHILD(this),
+                    FIRST_CHILD(this)->fops->open, &ob_fd->loc, ob_fd->flags, fd,
+@@ -345,15 +353,12 @@ void
+ ob_inode_wake(xlator_t *this, struct list_head *ob_fds)
+ {
+     ob_fd_t *ob_fd = NULL, *tmp = NULL;
+-    fd_t *fd = NULL;
+ 
+     if (!list_empty(ob_fds)) {
+         list_for_each_entry_safe(ob_fd, tmp, ob_fds, ob_fds_on_inode)
+         {
+             ob_fd_wake(this, ob_fd->fd, ob_fd);
+-            fd = ob_fd->fd;
+             ob_fd_free(ob_fd);
+-            fd_unref(fd);
+         }
+     }
+ }
+@@ -365,7 +370,7 @@ ob_fd_copy(ob_fd_t *src, ob_fd_t *dst)
+     if (!src || !dst)
+         goto out;
+ 
+-    dst->fd = __fd_ref(src->fd);
++    dst->fd = src->fd;
+     dst->loc.inode = inode_ref(src->loc.inode);
+     gf_uuid_copy(dst->loc.gfid, src->loc.gfid);
+     dst->flags = src->flags;
+@@ -509,7 +514,6 @@ ob_open_behind(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
+ 
+     ob_fd->ob_inode = ob_inode;
+ 
+-    /* don't do fd_ref, it'll cause leaks */
+     ob_fd->fd = fd;
+ 
+     ob_fd->open_frame = copy_frame(frame);
+@@ -539,15 +543,16 @@ ob_open_behind(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
+     }
+     UNLOCK(&fd->inode->lock);
+ 
+-    if (!open_in_progress && !unlinked) {
+-        fd_ref(fd);
++    /* We take a reference while the background open is pending or being
++     * processed. If we finally wind the request in the foreground, then
++     * ob_fd_free() will take care of this additional reference. */
++    fd_ref(fd);
+ 
++    if (!open_in_progress && !unlinked) {
+         STACK_UNWIND_STRICT(open, frame, 0, 0, fd, xdata);
+ 
+         if (!conf->lazy_open)
+             ob_fd_wake(this, fd, NULL);
+-
+-        fd_unref(fd);
+     } else {
+         ob_fd_free(ob_fd);
+         STACK_WIND(frame, default_open_cbk, FIRST_CHILD(this),
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0522-lcov-improve-line-coverage.patch b/SOURCES/0522-lcov-improve-line-coverage.patch
new file mode 100644
index 0000000..13ece12
--- /dev/null
+++ b/SOURCES/0522-lcov-improve-line-coverage.patch
@@ -0,0 +1,746 @@
+From 46e2bbd52d4427c1348fa38dcb5d2b5f125555f1 Mon Sep 17 00:00:00 2001
+From: Amar Tumballi <amarts@redhat.com>
+Date: Thu, 30 May 2019 15:25:01 +0530
+Subject: [PATCH 522/526] lcov: improve line coverage
+
+upcall: remove extra variable assignment and use just one
+        initialization.
+open-behind: reduce the overall number of lines, in functions
+             not frequently called
+selinux: reduce some lines in init failure cases
+
+Upstream patch:
+> Upstream-patch-link: https://review.gluster.org/#/c/glusterfs/+/22789
+> updates: bz#1693692
+> Change-Id: I7c1de94f2ec76a5bfe1f48a9632879b18e5fbb95
+> Signed-off-by: Amar Tumballi <amarts@redhat.com>
+
+BUG: 1830713
+Change-Id: I7c1de94f2ec76a5bfe1f48a9632879b18e5fbb95
+Signed-off-by: Amar Tumballi <amarts@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/224486
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/features/selinux/src/selinux.c            |   6 +-
+ xlators/features/upcall/src/upcall.c              | 108 +++++++---------------
+ xlators/performance/open-behind/src/open-behind.c |  58 ++++--------
+ 3 files changed, 55 insertions(+), 117 deletions(-)
+
+diff --git a/xlators/features/selinux/src/selinux.c b/xlators/features/selinux/src/selinux.c
+index 58b4c5d..e8e16cd 100644
+--- a/xlators/features/selinux/src/selinux.c
++++ b/xlators/features/selinux/src/selinux.c
+@@ -234,7 +234,6 @@ init(xlator_t *this)
+     priv = GF_CALLOC(1, sizeof(*priv), gf_selinux_mt_selinux_priv_t);
+     if (!priv) {
+         gf_log(this->name, GF_LOG_ERROR, "out of memory");
+-        ret = ENOMEM;
+         goto out;
+     }
+ 
+@@ -242,7 +241,6 @@ init(xlator_t *this)
+ 
+     this->local_pool = mem_pool_new(selinux_priv_t, 64);
+     if (!this->local_pool) {
+-        ret = -1;
+         gf_msg(this->name, GF_LOG_ERROR, ENOMEM, SL_MSG_ENOMEM,
+                "Failed to create local_t's memory pool");
+         goto out;
+@@ -252,9 +250,7 @@ init(xlator_t *this)
+     ret = 0;
+ out:
+     if (ret) {
+-        if (priv) {
+-            GF_FREE(priv);
+-        }
++        GF_FREE(priv);
+         mem_pool_destroy(this->local_pool);
+     }
+     return ret;
+diff --git a/xlators/features/upcall/src/upcall.c b/xlators/features/upcall/src/upcall.c
+index 2583c50..0795f58 100644
+--- a/xlators/features/upcall/src/upcall.c
++++ b/xlators/features/upcall/src/upcall.c
+@@ -57,14 +57,13 @@ static int32_t
+ up_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+         fd_t *fd, dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -111,14 +110,13 @@ up_writev(call_frame_t *frame, xlator_t *this, fd_t *fd, struct iovec *vector,
+           int count, off_t off, uint32_t flags, struct iobref *iobref,
+           dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -167,14 +165,13 @@ static int32_t
+ up_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+          off_t offset, uint32_t flags, dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -220,14 +217,13 @@ static int32_t
+ up_lk(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t cmd,
+       struct gf_flock *flock, dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -274,14 +270,13 @@ static int32_t
+ up_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc, off_t offset,
+             dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, loc->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -343,14 +338,13 @@ static int32_t
+ up_setattr(call_frame_t *frame, xlator_t *this, loc_t *loc, struct iatt *stbuf,
+            int32_t valid, dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, loc->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -410,14 +404,13 @@ static int32_t
+ up_rename(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+           dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, newloc, NULL, oldloc->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -472,14 +465,13 @@ static int32_t
+ up_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflag,
+           dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, loc, NULL, loc->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -531,14 +523,13 @@ static int32_t
+ up_link(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+         dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, newloc, NULL, oldloc->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -592,14 +583,13 @@ static int32_t
+ up_rmdir(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
+          dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, loc, NULL, loc->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -653,14 +643,13 @@ static int32_t
+ up_mkdir(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+          mode_t umask, dict_t *params)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, loc, NULL, loc->parent, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -717,15 +706,13 @@ static int32_t
+ up_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+           mode_t mode, mode_t umask, fd_t *fd, dict_t *params)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, loc, NULL, loc->parent, NULL);
+-
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -774,14 +761,13 @@ out:
+ static int32_t
+ up_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xattr_req)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, loc->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -826,14 +812,13 @@ out:
+ static int32_t
+ up_stat(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, loc->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -852,14 +837,13 @@ err:
+ static int32_t
+ up_fstat(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -879,14 +863,13 @@ static int32_t
+ up_ftruncate(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+              dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -932,14 +915,13 @@ static int32_t
+ up_access(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t mask,
+           dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, loc->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -986,14 +968,13 @@ static int32_t
+ up_readlink(call_frame_t *frame, xlator_t *this, loc_t *loc, size_t size,
+             dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, loc->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -1047,14 +1028,13 @@ static int32_t
+ up_mknod(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+          dev_t rdev, mode_t umask, dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, loc, NULL, loc->parent, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -1110,14 +1090,13 @@ static int32_t
+ up_symlink(call_frame_t *frame, xlator_t *this, const char *linkpath,
+            loc_t *loc, mode_t umask, dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, loc, NULL, loc->parent, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -1164,14 +1143,13 @@ static int32_t
+ up_opendir(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd,
+            dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, loc->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -1216,14 +1194,13 @@ out:
+ static int32_t
+ up_statfs(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, loc->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -1270,14 +1247,13 @@ static int32_t
+ up_readdir(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+            off_t off, dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -1334,14 +1310,13 @@ static int32_t
+ up_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+             off_t off, dict_t *dict)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -1361,14 +1336,13 @@ static int32_t
+ up_fsetattr(call_frame_t *frame, xlator_t *this, fd_t *fd, struct iatt *stbuf,
+             int32_t valid, dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -1415,14 +1389,13 @@ static int32_t
+ up_fallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t mode,
+              off_t offset, size_t len, dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -1470,14 +1443,13 @@ static int32_t
+ up_discard(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+            size_t len, dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -1524,14 +1496,13 @@ static int
+ up_zerofill(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+             off_t len, dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -1577,14 +1548,13 @@ static int32_t
+ up_seek(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+         gf_seek_what_t what, dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -1652,14 +1622,13 @@ static int32_t
+ up_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict,
+             int32_t flags, dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, loc, NULL, loc->inode, dict);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -1727,14 +1696,13 @@ static int32_t
+ up_fsetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *dict,
+              int32_t flags, dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, fd, fd->inode, dict);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -1800,7 +1768,7 @@ static int32_t
+ up_fremovexattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *name,
+                 dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+     dict_t *xattr = NULL;
+ 
+@@ -1808,13 +1776,11 @@ up_fremovexattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *name,
+ 
+     xattr = dict_for_key_value(name, "", 1, _gf_true);
+     if (!xattr) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+     local = upcall_local_init(frame, this, NULL, fd, fd->inode, xattr);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -1885,7 +1851,7 @@ static int32_t
+ up_removexattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+                const char *name, dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+     dict_t *xattr = NULL;
+ 
+@@ -1893,13 +1859,11 @@ up_removexattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ 
+     xattr = dict_for_key_value(name, "", 1, _gf_true);
+     if (!xattr) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+     local = upcall_local_init(frame, this, loc, NULL, loc->inode, xattr);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -1950,14 +1914,13 @@ static int32_t
+ up_fgetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *name,
+              dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -2000,14 +1963,13 @@ static int32_t
+ up_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, const char *name,
+             dict_t *xdata)
+ {
+-    int32_t op_errno = -1;
++    int32_t op_errno = ENOMEM;
+     upcall_local_t *local = NULL;
+ 
+     EXIT_IF_UPCALL_OFF(this, out);
+ 
+     local = upcall_local_init(frame, this, NULL, NULL, loc->inode, NULL);
+     if (!local) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+diff --git a/xlators/performance/open-behind/src/open-behind.c b/xlators/performance/open-behind/src/open-behind.c
+index dd2f2fd..cbe89ec 100644
+--- a/xlators/performance/open-behind/src/open-behind.c
++++ b/xlators/performance/open-behind/src/open-behind.c
+@@ -581,7 +581,7 @@ ob_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags, fd_t *fd,
+ {
+     fd_t *old_fd = NULL;
+     int ret = -1;
+-    int op_errno = 0;
++    int op_errno = ENOMEM;
+     call_stub_t *stub = NULL;
+ 
+     old_fd = fd_lookup(fd->inode, 0);
+@@ -589,7 +589,6 @@ ob_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags, fd_t *fd,
+         /* open-behind only when this is the first FD */
+         stub = fop_open_stub(frame, default_open_resume, loc, flags, fd, xdata);
+         if (!stub) {
+-            op_errno = ENOMEM;
+             fd_unref(old_fd);
+             goto err;
+         }
+@@ -603,7 +602,6 @@ ob_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags, fd_t *fd,
+ 
+     ret = ob_open_behind(frame, this, loc, flags, fd, xdata);
+     if (ret) {
+-        op_errno = ENOMEM;
+         goto err;
+     }
+ 
+@@ -900,18 +898,12 @@ int
+ ob_finodelk(call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
+             int cmd, struct gf_flock *flock, dict_t *xdata)
+ {
+-    call_stub_t *stub = NULL;
+-
+-    stub = fop_finodelk_stub(frame, default_finodelk_resume, volume, fd, cmd,
+-                             flock, xdata);
+-    if (!stub)
+-        goto err;
+-
+-    open_and_resume(this, fd, stub);
+-
+-    return 0;
+-err:
+-    STACK_UNWIND_STRICT(finodelk, frame, -1, ENOMEM, 0);
++    call_stub_t *stub = fop_finodelk_stub(frame, default_finodelk_resume,
++                                          volume, fd, cmd, flock, xdata);
++    if (stub)
++        open_and_resume(this, fd, stub);
++    else
++        STACK_UNWIND_STRICT(finodelk, frame, -1, ENOMEM, 0);
+ 
+     return 0;
+ }
+@@ -921,18 +913,12 @@ ob_fentrylk(call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
+             const char *basename, entrylk_cmd cmd, entrylk_type type,
+             dict_t *xdata)
+ {
+-    call_stub_t *stub = NULL;
+-
+-    stub = fop_fentrylk_stub(frame, default_fentrylk_resume, volume, fd,
+-                             basename, cmd, type, xdata);
+-    if (!stub)
+-        goto err;
+-
+-    open_and_resume(this, fd, stub);
+-
+-    return 0;
+-err:
+-    STACK_UNWIND_STRICT(fentrylk, frame, -1, ENOMEM, 0);
++    call_stub_t *stub = fop_fentrylk_stub(
++        frame, default_fentrylk_resume, volume, fd, basename, cmd, type, xdata);
++    if (stub)
++        open_and_resume(this, fd, stub);
++    else
++        STACK_UNWIND_STRICT(fentrylk, frame, -1, ENOMEM, 0);
+ 
+     return 0;
+ }
+@@ -941,18 +927,12 @@ int
+ ob_fxattrop(call_frame_t *frame, xlator_t *this, fd_t *fd,
+             gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata)
+ {
+-    call_stub_t *stub = NULL;
+-
+-    stub = fop_fxattrop_stub(frame, default_fxattrop_resume, fd, optype, xattr,
+-                             xdata);
+-    if (!stub)
+-        goto err;
+-
+-    open_and_resume(this, fd, stub);
+-
+-    return 0;
+-err:
+-    STACK_UNWIND_STRICT(fxattrop, frame, -1, ENOMEM, 0, 0);
++    call_stub_t *stub = fop_fxattrop_stub(frame, default_fxattrop_resume, fd,
++                                          optype, xattr, xdata);
++    if (stub)
++        open_and_resume(this, fd, stub);
++    else
++        STACK_UNWIND_STRICT(fxattrop, frame, -1, ENOMEM, 0, 0);
+ 
+     return 0;
+ }
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0523-open-behind-rewrite-of-internal-logic.patch b/SOURCES/0523-open-behind-rewrite-of-internal-logic.patch
new file mode 100644
index 0000000..621d5ae
--- /dev/null
+++ b/SOURCES/0523-open-behind-rewrite-of-internal-logic.patch
@@ -0,0 +1,2720 @@
+From b924c8ca8a133fc9413c8ed1407e63f1658c7e79 Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Tue, 12 May 2020 23:54:54 +0200
+Subject: [PATCH 523/526] open-behind: rewrite of internal logic
+
+There was a critical flaw in the previous implementation of open-behind.
+
+When an open is done in the background, it's necessary to take a
+reference on the fd_t object because once we "fake" the open answer,
+the fd could be destroyed. However as long as there's a reference,
+the release function won't be called. So, if the application closes
+the file descriptor without having actually opened it, there will
+always remain at least 1 reference, causing a leak.
+
+To avoid this problem, the previous implementation didn't take a
+reference on the fd_t, so there were races where the fd could be
+destroyed while it was still in use.
+
+To fix this, I've implemented a new xlator cbk that gets called from
+fuse when the application closes a file descriptor.
+
+The whole logic of handling background opens have been simplified and
+it's more efficient now. Only if the fop needs to be delayed until an
+open completes, a stub is created. Otherwise no memory allocations are
+needed.
+
+Correctly handling the close request while the open is still pending
+has added a bit of complexity, but overall normal operation is simpler.
+
+Upstream patch:
+> Upstream-patch-link: https://review.gluster.org/#/c/glusterfs/+/24451
+> Change-Id: I6376a5491368e0e1c283cc452849032636261592
+> Fixes: #1225
+> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+
+BUG: 1830713
+Change-Id: I6376a5491368e0e1c283cc452849032636261592
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/224487
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ libglusterfs/src/fd.c                              |   26 +
+ libglusterfs/src/glusterfs/fd.h                    |    3 +
+ libglusterfs/src/glusterfs/xlator.h                |    4 +
+ libglusterfs/src/libglusterfs.sym                  |    1 +
+ tests/basic/open-behind/open-behind.t              |  183 +++
+ tests/basic/open-behind/tester-fd.c                |   99 ++
+ tests/basic/open-behind/tester.c                   |  444 +++++++
+ tests/basic/open-behind/tester.h                   |  145 +++
+ tests/bugs/glusterfs/bug-873962-spb.t              |    1 +
+ xlators/mount/fuse/src/fuse-bridge.c               |    2 +
+ .../open-behind/src/open-behind-messages.h         |    6 +-
+ xlators/performance/open-behind/src/open-behind.c  | 1302 ++++++++------------
+ 12 files changed, 1393 insertions(+), 823 deletions(-)
+ create mode 100644 tests/basic/open-behind/open-behind.t
+ create mode 100644 tests/basic/open-behind/tester-fd.c
+ create mode 100644 tests/basic/open-behind/tester.c
+ create mode 100644 tests/basic/open-behind/tester.h
+
+diff --git a/libglusterfs/src/fd.c b/libglusterfs/src/fd.c
+index 314546a..e4ec401 100644
+--- a/libglusterfs/src/fd.c
++++ b/libglusterfs/src/fd.c
+@@ -501,6 +501,32 @@ out:
+ }
+ 
+ void
++fd_close(fd_t *fd)
++{
++    xlator_t *xl, *old_THIS;
++
++    old_THIS = THIS;
++
++    for (xl = fd->inode->table->xl->graph->first; xl != NULL; xl = xl->next) {
++        if (!xl->call_cleanup) {
++            THIS = xl;
++
++            if (IA_ISDIR(fd->inode->ia_type)) {
++                if (xl->cbks->fdclosedir != NULL) {
++                    xl->cbks->fdclosedir(xl, fd);
++                }
++            } else {
++                if (xl->cbks->fdclose != NULL) {
++                    xl->cbks->fdclose(xl, fd);
++                }
++            }
++        }
++    }
++
++    THIS = old_THIS;
++}
++
++void
+ fd_unref(fd_t *fd)
+ {
+     int32_t refcount = 0;
+diff --git a/libglusterfs/src/glusterfs/fd.h b/libglusterfs/src/glusterfs/fd.h
+index cdbe289..4d157c4 100644
+--- a/libglusterfs/src/glusterfs/fd.h
++++ b/libglusterfs/src/glusterfs/fd.h
+@@ -107,6 +107,9 @@ fd_ref(fd_t *fd);
+ void
+ fd_unref(fd_t *fd);
+ 
++void
++fd_close(fd_t *fd);
++
+ fd_t *
+ fd_create(struct _inode *inode, pid_t pid);
+ 
+diff --git a/libglusterfs/src/glusterfs/xlator.h b/libglusterfs/src/glusterfs/xlator.h
+index 8650ccc..273039a 100644
+--- a/libglusterfs/src/glusterfs/xlator.h
++++ b/libglusterfs/src/glusterfs/xlator.h
+@@ -705,6 +705,8 @@ typedef size_t (*cbk_inodectx_size_t)(xlator_t *this, inode_t *inode);
+ 
+ typedef size_t (*cbk_fdctx_size_t)(xlator_t *this, fd_t *fd);
+ 
++typedef void (*cbk_fdclose_t)(xlator_t *this, fd_t *fd);
++
+ struct xlator_cbks {
+     cbk_forget_t forget;
+     cbk_release_t release;
+@@ -715,6 +717,8 @@ struct xlator_cbks {
+     cbk_ictxmerge_t ictxmerge;
+     cbk_inodectx_size_t ictxsize;
+     cbk_fdctx_size_t fdctxsize;
++    cbk_fdclose_t fdclose;
++    cbk_fdclose_t fdclosedir;
+ };
+ 
+ typedef int32_t (*dumpop_priv_t)(xlator_t *this);
+diff --git a/libglusterfs/src/libglusterfs.sym b/libglusterfs/src/libglusterfs.sym
+index bc770e2..0a0862e 100644
+--- a/libglusterfs/src/libglusterfs.sym
++++ b/libglusterfs/src/libglusterfs.sym
+@@ -456,6 +456,7 @@ event_unregister_close
+ fd_anonymous
+ fd_anonymous_with_flags
+ fd_bind
++fd_close
+ fd_create
+ fd_create_uint64
+ __fd_ctx_del
+diff --git a/tests/basic/open-behind/open-behind.t b/tests/basic/open-behind/open-behind.t
+new file mode 100644
+index 0000000..5e865d6
+--- /dev/null
++++ b/tests/basic/open-behind/open-behind.t
+@@ -0,0 +1,183 @@
++#!/bin/bash
++
++WD="$(dirname "${0}")"
++
++. ${WD}/../../include.rc
++. ${WD}/../../volume.rc
++
++function assign() {
++    local _assign_var="${1}"
++    local _assign_value="${2}"
++
++    printf -v "${_assign_var}" "%s" "${_assign_value}"
++}
++
++function pipe_create() {
++    local _pipe_create_var="${1}"
++    local _pipe_create_name
++    local _pipe_create_fd
++
++    _pipe_create_name="$(mktemp -u)"
++    mkfifo "${_pipe_create_name}"
++    exec {_pipe_create_fd}<>"${_pipe_create_name}"
++    rm "${_pipe_create_name}"
++
++    assign "${_pipe_create_var}" "${_pipe_create_fd}"
++}
++
++function pipe_close() {
++    local _pipe_close_fd="${!1}"
++
++    exec {_pipe_close_fd}>&-
++}
++
++function tester_start() {
++    declare -ag tester
++    local tester_in
++    local tester_out
++
++    pipe_create tester_in
++    pipe_create tester_out
++
++    ${WD}/tester <&${tester_in} >&${tester_out} &
++
++    tester=("$!" "${tester_in}" "${tester_out}")
++}
++
++function tester_send() {
++    declare -ag tester
++    local tester_res
++    local tester_extra
++
++    echo "${*}" >&${tester[1]}
++
++    read -t 3 -u ${tester[2]} tester_res tester_extra
++    echo "${tester_res} ${tester_extra}"
++    if [[ "${tester_res}" == "OK" ]]; then
++        return 0
++    fi
++
++    return 1
++}
++
++function tester_stop() {
++    declare -ag tester
++    local tester_res
++
++    tester_send "quit"
++
++    tester_res=0
++    if ! wait ${tester[0]}; then
++        tester_res=$?
++    fi
++
++    unset tester
++
++    return ${tester_res}
++}
++
++function count_open() {
++    local file="$(realpath "${B0}/${V0}/${1}")"
++    local count="0"
++    local inode
++    local ref
++
++    inode="$(stat -c %i "${file}")"
++
++    for fd in /proc/${BRICK_PID}/fd/*; do
++        ref="$(readlink "${fd}")"
++        if [[ "${ref}" == "${B0}/${V0}/"* ]]; then
++            if [[ "$(stat -c %i "${ref}")" == "${inode}" ]]; then
++                count="$((${count} + 1))"
++            fi
++        fi
++    done
++
++    echo "${count}"
++}
++
++cleanup
++
++TEST build_tester ${WD}/tester.c ${WD}/tester-fd.c
++
++TEST glusterd
++TEST pidof glusterd
++TEST ${CLI} volume create ${V0} ${H0}:${B0}/${V0}
++TEST ${CLI} volume set ${V0} flush-behind off
++TEST ${CLI} volume set ${V0} write-behind off
++TEST ${CLI} volume set ${V0} quick-read off
++TEST ${CLI} volume set ${V0} stat-prefetch on
++TEST ${CLI} volume set ${V0} io-cache off
++TEST ${CLI} volume set ${V0} open-behind on
++TEST ${CLI} volume set ${V0} lazy-open off
++TEST ${CLI} volume set ${V0} read-after-open off
++TEST ${CLI} volume start ${V0}
++
++TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
++
++BRICK_PID="$(get_brick_pid ${V0} ${H0} ${B0}/${V0})"
++
++TEST touch "${M0}/test"
++
++EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
++TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
++
++TEST tester_start
++
++TEST tester_send fd open 0 "${M0}/test"
++EXPECT_WITHIN 5 "1" count_open "/test"
++TEST tester_send fd close 0
++EXPECT_WITHIN 5 "0" count_open "/test"
++
++EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
++TEST ${CLI} volume set ${V0} lazy-open on
++TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
++
++TEST tester_send fd open 0 "${M0}/test"
++sleep 2
++EXPECT "0" count_open "/test"
++TEST tester_send fd write 0 "test"
++EXPECT "1" count_open "/test"
++TEST tester_send fd close 0
++EXPECT_WITHIN 5 "0" count_open "/test"
++
++EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
++TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
++
++TEST tester_send fd open 0 "${M0}/test"
++EXPECT "0" count_open "/test"
++EXPECT "test" tester_send fd read 0 64
++# Even though read-after-open is disabled, use-anonymous-fd is also disabled,
++# so reads need to open the file first.
++EXPECT "1" count_open "/test"
++TEST tester_send fd close 0
++EXPECT "0" count_open "/test"
++
++EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
++TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
++
++TEST tester_send fd open 0 "${M0}/test"
++EXPECT "0" count_open "/test"
++TEST tester_send fd open 1 "${M0}/test"
++EXPECT "2" count_open "/test"
++TEST tester_send fd close 0
++EXPECT_WITHIN 5 "1" count_open "/test"
++TEST tester_send fd close 1
++EXPECT_WITHIN 5 "0" count_open "/test"
++
++EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
++TEST ${CLI} volume set ${V0} read-after-open on
++TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
++
++TEST tester_send fd open 0 "${M0}/test"
++EXPECT "0" count_open "/test"
++EXPECT "test" tester_send fd read 0 64
++EXPECT "1" count_open "/test"
++TEST tester_send fd close 0
++EXPECT_WITHIN 5 "0" count_open "/test"
++
++EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
++
++TEST tester_stop
++
++cleanup
+diff --git a/tests/basic/open-behind/tester-fd.c b/tests/basic/open-behind/tester-fd.c
+new file mode 100644
+index 0000000..00f02bc
+--- /dev/null
++++ b/tests/basic/open-behind/tester-fd.c
+@@ -0,0 +1,99 @@
++/*
++  Copyright (c) 2020 Red Hat, Inc. <http://www.redhat.com>
++  This file is part of GlusterFS.
++
++  This file is licensed to you under your choice of the GNU Lesser
++  General Public License, version 3 or any later version (LGPLv3 or
++  later), or the GNU General Public License, version 2 (GPLv2), in all
++  cases as published by the Free Software Foundation.
++*/
++
++#include "tester.h"
++
++#include <stdlib.h>
++#include <unistd.h>
++#include <sys/types.h>
++#include <sys/stat.h>
++#include <fcntl.h>
++#include <string.h>
++#include <ctype.h>
++#include <errno.h>
++
++static int32_t
++fd_open(context_t *ctx, command_t *cmd)
++{
++    obj_t *obj;
++    int32_t fd;
++
++    obj = cmd->args[0].obj.ref;
++
++    fd = open(cmd->args[1].str.data, O_RDWR);
++    if (fd < 0) {
++        return error(errno, "open() failed");
++    }
++
++    obj->type = OBJ_TYPE_FD;
++    obj->fd = fd;
++
++    out_ok("%d", fd);
++
++    return 0;
++}
++
++static int32_t
++fd_close(context_t *ctx, command_t *cmd)
++{
++    obj_t *obj;
++
++    obj = cmd->args[0].obj.ref;
++    obj->type = OBJ_TYPE_NONE;
++
++    if (close(obj->fd) != 0) {
++        return error(errno, "close() failed");
++    }
++
++    out_ok();
++
++    return 0;
++}
++
++static int32_t
++fd_write(context_t *ctx, command_t *cmd)
++{
++    ssize_t len, ret;
++
++    len = strlen(cmd->args[1].str.data);
++    ret = write(cmd->args[0].obj.ref->fd, cmd->args[1].str.data, len);
++    if (ret < 0) {
++        return error(errno, "write() failed");
++    }
++
++    out_ok("%zd", ret);
++
++    return 0;
++}
++
++static int32_t
++fd_read(context_t *ctx, command_t *cmd)
++{
++    char data[cmd->args[1].num.value + 1];
++    ssize_t ret;
++
++    ret = read(cmd->args[0].obj.ref->fd, data, cmd->args[1].num.value);
++    if (ret < 0) {
++        return error(errno, "read() failed");
++    }
++
++    data[ret] = 0;
++
++    out_ok("%zd %s", ret, data);
++
++    return 0;
++}
++
++command_t fd_commands[] = {
++    {"open", fd_open, CMD_ARGS(ARG_VAL(OBJ_TYPE_NONE), ARG_STR(1024))},
++    {"close", fd_close, CMD_ARGS(ARG_VAL(OBJ_TYPE_FD))},
++    {"write", fd_write, CMD_ARGS(ARG_VAL(OBJ_TYPE_FD), ARG_STR(1024))},
++    {"read", fd_read, CMD_ARGS(ARG_VAL(OBJ_TYPE_FD), ARG_NUM(0, 1024))},
++    CMD_END};
+diff --git a/tests/basic/open-behind/tester.c b/tests/basic/open-behind/tester.c
+new file mode 100644
+index 0000000..b2da71c
+--- /dev/null
++++ b/tests/basic/open-behind/tester.c
+@@ -0,0 +1,444 @@
++/*
++  Copyright (c) 2020 Red Hat, Inc. <http://www.redhat.com>
++  This file is part of GlusterFS.
++
++  This file is licensed to you under your choice of the GNU Lesser
++  General Public License, version 3 or any later version (LGPLv3 or
++  later), or the GNU General Public License, version 2 (GPLv2), in all
++  cases as published by the Free Software Foundation.
++*/
++
++#include "tester.h"
++
++#include <stdlib.h>
++#include <unistd.h>
++#include <string.h>
++#include <ctype.h>
++#include <errno.h>
++
++static void *
++mem_alloc(size_t size)
++{
++    void *ptr;
++
++    ptr = malloc(size);
++    if (ptr == NULL) {
++        error(ENOMEM, "Failed to allocate memory (%zu bytes)", size);
++    }
++
++    return ptr;
++}
++
++static void
++mem_free(void *ptr)
++{
++    free(ptr);
++}
++
++static bool
++buffer_create(context_t *ctx, size_t size)
++{
++    ctx->buffer.base = mem_alloc(size);
++    if (ctx->buffer.base == NULL) {
++        return false;
++    }
++
++    ctx->buffer.size = size;
++    ctx->buffer.len = 0;
++    ctx->buffer.pos = 0;
++
++    return true;
++}
++
++static void
++buffer_destroy(context_t *ctx)
++{
++    mem_free(ctx->buffer.base);
++    ctx->buffer.size = 0;
++    ctx->buffer.len = 0;
++}
++
++static int32_t
++buffer_get(context_t *ctx)
++{
++    ssize_t len;
++
++    if (ctx->buffer.pos >= ctx->buffer.len) {
++        len = read(0, ctx->buffer.base, ctx->buffer.size);
++        if (len < 0) {
++            return error(errno, "read() failed");
++        }
++        if (len == 0) {
++            return 0;
++        }
++
++        ctx->buffer.len = len;
++        ctx->buffer.pos = 0;
++    }
++
++    return ctx->buffer.base[ctx->buffer.pos++];
++}
++
++static int32_t
++str_skip_spaces(context_t *ctx, int32_t current)
++{
++    while ((current > 0) && (current != '\n') && isspace(current)) {
++        current = buffer_get(ctx);
++    }
++
++    return current;
++}
++
++static int32_t
++str_token(context_t *ctx, char *buffer, uint32_t size, int32_t current)
++{
++    uint32_t len;
++
++    current = str_skip_spaces(ctx, current);
++
++    len = 0;
++    while ((size > 0) && (current > 0) && (current != '\n') &&
++           !isspace(current)) {
++        len++;
++        *buffer++ = current;
++        size--;
++        current = buffer_get(ctx);
++    }
++
++    if (len == 0) {
++        return error(ENODATA, "Expecting a token");
++    }
++
++    if (size == 0) {
++        return error(ENOBUFS, "Token too long");
++    }
++
++    *buffer = 0;
++
++    return current;
++}
++
++static int32_t
++str_number(context_t *ctx, uint64_t min, uint64_t max, uint64_t *value,
++           int32_t current)
++{
++    char text[32], *ptr;
++    uint64_t num;
++
++    current = str_token(ctx, text, sizeof(text), current);
++    if (current > 0) {
++        num = strtoul(text, &ptr, 0);
++        if ((*ptr != 0) || (num < min) || (num > max)) {
++            return error(ERANGE, "Invalid number");
++        }
++        *value = num;
++    }
++
++    return current;
++}
++
++static int32_t
++str_eol(context_t *ctx, int32_t current)
++{
++    current = str_skip_spaces(ctx, current);
++    if (current != '\n') {
++        return error(EINVAL, "Expecting end of command");
++    }
++
++    return current;
++}
++
++static void
++str_skip(context_t *ctx, int32_t current)
++{
++    while ((current > 0) && (current != '\n')) {
++        current = buffer_get(ctx);
++    }
++}
++
++static int32_t
++cmd_parse_obj(context_t *ctx, arg_t *arg, int32_t current)
++{
++    obj_t *obj;
++    uint64_t id;
++
++    current = str_number(ctx, 0, ctx->obj_count, &id, current);
++    if (current <= 0) {
++        return current;
++    }
++
++    obj = &ctx->objs[id];
++    if (obj->type != arg->obj.type) {
++        if (obj->type != OBJ_TYPE_NONE) {
++            return error(EBUSY, "Object is in use");
++        }
++        return error(ENOENT, "Object is not defined");
++    }
++
++    arg->obj.ref = obj;
++
++    return current;
++}
++
++static int32_t
++cmd_parse_num(context_t *ctx, arg_t *arg, int32_t current)
++{
++    return str_number(ctx, arg->num.min, arg->num.max, &arg->num.value,
++                      current);
++}
++
++static int32_t
++cmd_parse_str(context_t *ctx, arg_t *arg, int32_t current)
++{
++    return str_token(ctx, arg->str.data, arg->str.size, current);
++}
++
++static int32_t
++cmd_parse_args(context_t *ctx, command_t *cmd, int32_t current)
++{
++    arg_t *arg;
++
++    for (arg = cmd->args; arg->type != ARG_TYPE_NONE; arg++) {
++        switch (arg->type) {
++            case ARG_TYPE_OBJ:
++                current = cmd_parse_obj(ctx, arg, current);
++                break;
++            case ARG_TYPE_NUM:
++                current = cmd_parse_num(ctx, arg, current);
++                break;
++            case ARG_TYPE_STR:
++                current = cmd_parse_str(ctx, arg, current);
++                break;
++            default:
++                return error(EINVAL, "Unknown argument type");
++        }
++    }
++
++    if (current < 0) {
++        return current;
++    }
++
++    current = str_eol(ctx, current);
++    if (current <= 0) {
++        return error(EINVAL, "Syntax error");
++    }
++
++    return cmd->handler(ctx, cmd);
++}
++
++static int32_t
++cmd_parse(context_t *ctx, command_t *cmds)
++{
++    char text[32];
++    command_t *cmd;
++    int32_t current;
++
++    cmd = cmds;
++    do {
++        current = str_token(ctx, text, sizeof(text), buffer_get(ctx));
++        if (current <= 0) {
++            return current;
++        }
++
++        while (cmd->name != NULL) {
++            if (strcmp(cmd->name, text) == 0) {
++                if (cmd->handler != NULL) {
++                    return cmd_parse_args(ctx, cmd, current);
++                }
++                cmd = cmd->cmds;
++                break;
++            }
++            cmd++;
++        }
++    } while (cmd->name != NULL);
++
++    str_skip(ctx, current);
++
++    return error(ENOTSUP, "Unknown command");
++}
++
++static void
++cmd_fini(context_t *ctx, command_t *cmds)
++{
++    command_t *cmd;
++    arg_t *arg;
++
++    for (cmd = cmds; cmd->name != NULL; cmd++) {
++        if (cmd->handler == NULL) {
++            cmd_fini(ctx, cmd->cmds);
++        } else {
++            for (arg = cmd->args; arg->type != ARG_TYPE_NONE; arg++) {
++                switch (arg->type) {
++                    case ARG_TYPE_STR:
++                        mem_free(arg->str.data);
++                        arg->str.data = NULL;
++                        break;
++                    default:
++                        break;
++                }
++            }
++        }
++    }
++}
++
++static bool
++cmd_init(context_t *ctx, command_t *cmds)
++{
++    command_t *cmd;
++    arg_t *arg;
++
++    for (cmd = cmds; cmd->name != NULL; cmd++) {
++        if (cmd->handler == NULL) {
++            if (!cmd_init(ctx, cmd->cmds)) {
++                return false;
++            }
++        } else {
++            for (arg = cmd->args; arg->type != ARG_TYPE_NONE; arg++) {
++                switch (arg->type) {
++                    case ARG_TYPE_STR:
++                        arg->str.data = mem_alloc(arg->str.size);
++                        if (arg->str.data == NULL) {
++                            return false;
++                        }
++                        break;
++                    default:
++                        break;
++                }
++            }
++        }
++    }
++
++    return true;
++}
++
++static bool
++objs_create(context_t *ctx, uint32_t count)
++{
++    uint32_t i;
++
++    ctx->objs = mem_alloc(sizeof(obj_t) * count);
++    if (ctx->objs == NULL) {
++        return false;
++    }
++    ctx->obj_count = count;
++
++    for (i = 0; i < count; i++) {
++        ctx->objs[i].type = OBJ_TYPE_NONE;
++    }
++
++    return true;
++}
++
++static int32_t
++objs_destroy(context_t *ctx)
++{
++    uint32_t i;
++    int32_t err;
++
++    err = 0;
++    for (i = 0; i < ctx->obj_count; i++) {
++        if (ctx->objs[i].type != OBJ_TYPE_NONE) {
++            err = error(ENOTEMPTY, "Objects not destroyed");
++            break;
++        }
++    }
++
++    mem_free(ctx->objs);
++    ctx->objs = NULL;
++    ctx->obj_count = 0;
++
++    return err;
++}
++
++static context_t *
++init(size_t size, uint32_t objs, command_t *cmds)
++{
++    context_t *ctx;
++
++    ctx = mem_alloc(sizeof(context_t));
++    if (ctx == NULL) {
++        goto failed;
++    }
++
++    if (!buffer_create(ctx, size)) {
++        goto failed_ctx;
++    }
++
++    if (!objs_create(ctx, objs)) {
++        goto failed_buffer;
++    }
++
++    if (!cmd_init(ctx, cmds)) {
++        goto failed_objs;
++    }
++
++    ctx->active = true;
++
++    return ctx;
++
++failed_objs:
++    cmd_fini(ctx, cmds);
++    objs_destroy(ctx);
++failed_buffer:
++    buffer_destroy(ctx);
++failed_ctx:
++    mem_free(ctx);
++failed:
++    return NULL;
++}
++
++static int32_t
++fini(context_t *ctx, command_t *cmds)
++{
++    int32_t ret;
++
++    cmd_fini(ctx, cmds);
++    buffer_destroy(ctx);
++
++    ret = objs_destroy(ctx);
++
++    ctx->active = false;
++
++    return ret;
++}
++
++static int32_t
++exec_quit(context_t *ctx, command_t *cmd)
++{
++    ctx->active = false;
++
++    return 0;
++}
++
++static command_t commands[] = {{"fd", NULL, CMD_SUB(fd_commands)},
++                               {"quit", exec_quit, CMD_ARGS()},
++                               CMD_END};
++
++int32_t
++main(int32_t argc, char *argv[])
++{
++    context_t *ctx;
++    int32_t res;
++
++    ctx = init(1024, 16, commands);
++    if (ctx == NULL) {
++        return 1;
++    }
++
++    do {
++        res = cmd_parse(ctx, commands);
++        if (res < 0) {
++            out_err(-res);
++        }
++    } while (ctx->active);
++
++    res = fini(ctx, commands);
++    if (res >= 0) {
++        out_ok();
++        return 0;
++    }
++
++    out_err(-res);
++
++    return 1;
++}
+diff --git a/tests/basic/open-behind/tester.h b/tests/basic/open-behind/tester.h
+new file mode 100644
+index 0000000..64e940c
+--- /dev/null
++++ b/tests/basic/open-behind/tester.h
+@@ -0,0 +1,145 @@
++/*
++  Copyright (c) 2020 Red Hat, Inc. <http://www.redhat.com>
++  This file is part of GlusterFS.
++
++  This file is licensed to you under your choice of the GNU Lesser
++  General Public License, version 3 or any later version (LGPLv3 or
++  later), or the GNU General Public License, version 2 (GPLv2), in all
++  cases as published by the Free Software Foundation.
++*/
++
++#ifndef __TESTER_H__
++#define __TESTER_H__
++
++#include <stdio.h>
++#include <inttypes.h>
++#include <stdbool.h>
++
++enum _obj_type;
++typedef enum _obj_type obj_type_t;
++
++enum _arg_type;
++typedef enum _arg_type arg_type_t;
++
++struct _buffer;
++typedef struct _buffer buffer_t;
++
++struct _obj;
++typedef struct _obj obj_t;
++
++struct _context;
++typedef struct _context context_t;
++
++struct _arg;
++typedef struct _arg arg_t;
++
++struct _command;
++typedef struct _command command_t;
++
++enum _obj_type { OBJ_TYPE_NONE, OBJ_TYPE_FD };
++
++enum _arg_type { ARG_TYPE_NONE, ARG_TYPE_OBJ, ARG_TYPE_NUM, ARG_TYPE_STR };
++
++struct _buffer {
++    char *base;
++    uint32_t size;
++    uint32_t len;
++    uint32_t pos;
++};
++
++struct _obj {
++    obj_type_t type;
++    union {
++        int32_t fd;
++    };
++};
++
++struct _context {
++    obj_t *objs;
++    buffer_t buffer;
++    uint32_t obj_count;
++    bool active;
++};
++
++struct _arg {
++    arg_type_t type;
++    union {
++        struct {
++            obj_type_t type;
++            obj_t *ref;
++        } obj;
++        struct {
++            uint64_t value;
++            uint64_t min;
++            uint64_t max;
++        } num;
++        struct {
++            uint32_t size;
++            char *data;
++        } str;
++    };
++};
++
++struct _command {
++    const char *name;
++    int32_t (*handler)(context_t *ctx, command_t *cmd);
++    union {
++        arg_t *args;
++        command_t *cmds;
++    };
++};
++
++#define msg(_stream, _fmt, _args...)                                           \
++    do {                                                                       \
++        fprintf(_stream, _fmt "\n", ##_args);                                  \
++        fflush(_stream);                                                       \
++    } while (0)
++
++#define msg_out(_fmt, _args...) msg(stdout, _fmt, ##_args)
++#define msg_err(_err, _fmt, _args...)                                          \
++    ({                                                                         \
++        int32_t __msg_err = (_err);                                            \
++        msg(stderr, "[%4u:%-15s] " _fmt, __LINE__, __FUNCTION__, __msg_err,    \
++            ##_args);                                                          \
++        -__msg_err;                                                            \
++    })
++
++#define error(_err, _fmt, _args...) msg_err(_err, "E(%4d) " _fmt, ##_args)
++#define warn(_err, _fmt, _args...) msg_err(_err, "W(%4d) " _fmt, ##_args)
++#define info(_err, _fmt, _args...) msg_err(_err, "I(%4d) " _fmt, ##_args)
++
++#define out_ok(_args...) msg_out("OK " _args)
++#define out_err(_err) msg_out("ERR %d", _err)
++
++#define ARG_END                                                                \
++    {                                                                          \
++        ARG_TYPE_NONE                                                          \
++    }
++
++#define CMD_ARGS1(_x, _args...)                                                \
++    .args = (arg_t[]) { _args }
++#define CMD_ARGS(_args...) CMD_ARGS1(, ##_args, ARG_END)
++
++#define CMD_SUB(_cmds) .cmds = _cmds
++
++#define CMD_END                                                                \
++    {                                                                          \
++        NULL, NULL, CMD_SUB(NULL)                                              \
++    }
++
++#define ARG_VAL(_type)                                                         \
++    {                                                                          \
++        ARG_TYPE_OBJ, .obj = {.type = _type }                                  \
++    }
++#define ARG_NUM(_min, _max)                                                    \
++    {                                                                          \
++        ARG_TYPE_NUM, .num = {.min = _min, .max = _max }                       \
++    }
++#define ARG_STR(_size)                                                         \
++    {                                                                          \
++        ARG_TYPE_STR, .str = {.size = _size }                                  \
++    }
++
++extern command_t fd_commands[];
++
++#endif /* __TESTER_H__ */
+\ No newline at end of file
+diff --git a/tests/bugs/glusterfs/bug-873962-spb.t b/tests/bugs/glusterfs/bug-873962-spb.t
+index db84a22..db71cc0 100644
+--- a/tests/bugs/glusterfs/bug-873962-spb.t
++++ b/tests/bugs/glusterfs/bug-873962-spb.t
+@@ -14,6 +14,7 @@ TEST $CLI volume set $V0 performance.io-cache off
+ TEST $CLI volume set $V0 performance.write-behind off
+ TEST $CLI volume set $V0 performance.stat-prefetch off
+ TEST $CLI volume set $V0 performance.read-ahead off
++TEST $CLI volume set $V0 performance.open-behind off
+ TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+ TEST $CLI volume start $V0
+ TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable
+diff --git a/xlators/mount/fuse/src/fuse-bridge.c b/xlators/mount/fuse/src/fuse-bridge.c
+index 919eea3..76b5809 100644
+--- a/xlators/mount/fuse/src/fuse-bridge.c
++++ b/xlators/mount/fuse/src/fuse-bridge.c
+@@ -3398,6 +3398,8 @@ fuse_release(xlator_t *this, fuse_in_header_t *finh, void *msg,
+     gf_log("glusterfs-fuse", GF_LOG_TRACE,
+            "finh->unique: %" PRIu64 ": RELEASE %p", finh->unique, state->fd);
+ 
++    fd_close(state->fd);
++
+     fuse_fd_ctx_destroy(this, state->fd);
+     fd_unref(fd);
+ 
+diff --git a/xlators/performance/open-behind/src/open-behind-messages.h b/xlators/performance/open-behind/src/open-behind-messages.h
+index f250824..0e78917 100644
+--- a/xlators/performance/open-behind/src/open-behind-messages.h
++++ b/xlators/performance/open-behind/src/open-behind-messages.h
+@@ -23,6 +23,10 @@
+  */
+ 
+ GLFS_MSGID(OPEN_BEHIND, OPEN_BEHIND_MSG_XLATOR_CHILD_MISCONFIGURED,
+-           OPEN_BEHIND_MSG_VOL_MISCONFIGURED, OPEN_BEHIND_MSG_NO_MEMORY);
++           OPEN_BEHIND_MSG_VOL_MISCONFIGURED, OPEN_BEHIND_MSG_NO_MEMORY,
++           OPEN_BEHIND_MSG_FAILED, OPEN_BEHIND_MSG_BAD_STATE);
++
++#define OPEN_BEHIND_MSG_FAILED_STR "Failed to submit fop"
++#define OPEN_BEHIND_MSG_BAD_STATE_STR "Unexpected state"
+ 
+ #endif /* _OPEN_BEHIND_MESSAGES_H_ */
+diff --git a/xlators/performance/open-behind/src/open-behind.c b/xlators/performance/open-behind/src/open-behind.c
+index cbe89ec..e43fe73 100644
+--- a/xlators/performance/open-behind/src/open-behind.c
++++ b/xlators/performance/open-behind/src/open-behind.c
+@@ -16,6 +16,18 @@
+ #include "open-behind-messages.h"
+ #include <glusterfs/glusterfs-acl.h>
+ 
++/* Note: The initial design of open-behind was made to cover the simple case
++ *       of open, read, close for small files. This pattern combined with
++ *       quick-read can do the whole operation without a single request to the
++ *       bricks (except the initial lookup).
++ *
++ *       The way to do this has been improved, but the logic remains the same.
++ *       Basically, this means that any operation sent to the fd or the inode
++ *       that it's not a read, causes the open request to be sent to the
++ *       bricks, and all future operations will be executed synchronously,
++ *       including opens (it's reset once all fd's are closed).
++ */
++
+ typedef struct ob_conf {
+     gf_boolean_t use_anonymous_fd; /* use anonymous FDs wherever safe
+                                       e.g - fstat() readv()
+@@ -32,1096 +44,754 @@ typedef struct ob_conf {
+                                         */
+ } ob_conf_t;
+ 
+-typedef struct ob_inode {
+-    inode_t *inode;
+-    struct list_head resume_fops;
+-    struct list_head ob_fds;
+-    int count;
+-    int op_ret;
+-    int op_errno;
+-    gf_boolean_t open_in_progress;
+-    int unlinked;
+-} ob_inode_t;
++/* A negative state represents an errno value negated. In this case the
++ * current operation cannot be processed. */
++typedef enum _ob_state {
++    /* There are no opens on the inode or the first open is already
++     * completed. The current operation can be sent directly. */
++    OB_STATE_READY = 0,
+ 
+-typedef struct ob_fd {
+-    call_frame_t *open_frame;
+-    loc_t loc;
+-    dict_t *xdata;
+-    int flags;
+-    int op_errno;
+-    ob_inode_t *ob_inode;
+-    fd_t *fd;
+-    gf_boolean_t opened;
+-    gf_boolean_t ob_inode_fops_waiting;
+-    struct list_head list;
+-    struct list_head ob_fds_on_inode;
+-} ob_fd_t;
++    /* There's an open pending and it has been triggered. The current
++     * operation should be "stubbified" and processed with
++     * ob_stub_dispatch(). */
++    OB_STATE_OPEN_TRIGGERED,
+ 
+-ob_inode_t *
+-ob_inode_alloc(inode_t *inode)
+-{
+-    ob_inode_t *ob_inode = NULL;
++    /* There's an open pending but it has not been triggered. The current
++     * operation can be processed directly but using an anonymous fd. */
++    OB_STATE_OPEN_PENDING,
+ 
+-    ob_inode = GF_CALLOC(1, sizeof(*ob_inode), gf_ob_mt_inode_t);
+-    if (ob_inode == NULL)
+-        goto out;
++    /* The current operation is the first open on the inode. */
++    OB_STATE_FIRST_OPEN
++} ob_state_t;
+ 
+-    ob_inode->inode = inode;
+-    INIT_LIST_HEAD(&ob_inode->resume_fops);
+-    INIT_LIST_HEAD(&ob_inode->ob_fds);
+-out:
+-    return ob_inode;
+-}
+-
+-void
+-ob_inode_free(ob_inode_t *ob_inode)
+-{
+-    if (ob_inode == NULL)
+-        goto out;
++typedef struct ob_inode {
++    /* List of stubs pending on the first open. Once the first open is
++     * complete, all these stubs will be resubmitted, and dependencies
++     * will be checked again. */
++    struct list_head resume_fops;
+ 
+-    list_del_init(&ob_inode->resume_fops);
+-    list_del_init(&ob_inode->ob_fds);
++    /* The inode this object references. */
++    inode_t *inode;
+ 
+-    GF_FREE(ob_inode);
+-out:
+-    return;
+-}
++    /* The fd from the first open sent to this inode. It will be set
++     * from the moment the open is processed until the open if fully
++     * executed or closed before actually opened. It's NULL in all
++     * other cases. */
++    fd_t *first_fd;
++
++    /* The stub from the first open operation. When open fop starts
++     * being processed, it's assigned the OB_OPEN_PREPARING value
++     * until the actual stub is created. This is necessary to avoid
++     * creating the stub inside a locked region. Once the stub is
++     * successfully created, it's assigned here. This value is set
++     * to NULL once the stub is resumed. */
++    call_stub_t *first_open;
++
++    /* The total number of currently open fd's on this inode. */
++    int32_t open_count;
++
++    /* This flag is set as soon as we know that the open will be
++     * sent to the bricks, even before the stub is ready. */
++    bool triggered;
++} ob_inode_t;
+ 
+-ob_inode_t *
+-ob_inode_get(xlator_t *this, inode_t *inode)
++/* Dummy pointer used temporarily while the actual open stub is being created */
++#define OB_OPEN_PREPARING ((call_stub_t *)-1)
++
++#define OB_POST_COMMON(_fop, _xl, _frame, _fd, _args...)                       \
++    case OB_STATE_FIRST_OPEN:                                                  \
++        gf_smsg((_xl)->name, GF_LOG_ERROR, EINVAL, OPEN_BEHIND_MSG_BAD_STATE,  \
++                "fop=%s", #_fop, "state=%d", __ob_state, NULL);                \
++        default_##_fop##_failure_cbk(_frame, EINVAL);                          \
++        break;                                                                 \
++    case OB_STATE_READY:                                                       \
++        default_##_fop(_frame, _xl, ##_args);                                  \
++        break;                                                                 \
++    case OB_STATE_OPEN_TRIGGERED: {                                            \
++        call_stub_t *__ob_stub = fop_##_fop##_stub(_frame, ob_##_fop,          \
++                                                   ##_args);                   \
++        if (__ob_stub != NULL) {                                               \
++            ob_stub_dispatch(_xl, __ob_inode, _fd, __ob_stub);                 \
++            break;                                                             \
++        }                                                                      \
++        __ob_state = -ENOMEM;                                                  \
++    }                                                                          \
++    default:                                                                   \
++        gf_smsg((_xl)->name, GF_LOG_ERROR, -__ob_state,                        \
++                OPEN_BEHIND_MSG_FAILED, "fop=%s", #_fop, NULL);                \
++        default_##_fop##_failure_cbk(_frame, -__ob_state)
++
++#define OB_POST_FD(_fop, _xl, _frame, _fd, _trigger, _args...)                 \
++    do {                                                                       \
++        ob_inode_t *__ob_inode;                                                \
++        fd_t *__first_fd;                                                      \
++        ob_state_t __ob_state = ob_open_and_resume_fd(                         \
++            _xl, _fd, 0, true, _trigger, &__ob_inode, &__first_fd);            \
++        switch (__ob_state) {                                                  \
++            case OB_STATE_OPEN_PENDING:                                        \
++                if (!(_trigger)) {                                             \
++                    fd_t *__ob_fd = fd_anonymous_with_flags((_fd)->inode,      \
++                                                            (_fd)->flags);     \
++                    if (__ob_fd != NULL) {                                     \
++                        default_##_fop(_frame, _xl, ##_args);                  \
++                        fd_unref(__ob_fd);                                     \
++                        break;                                                 \
++                    }                                                          \
++                    __ob_state = -ENOMEM;                                      \
++                }                                                              \
++                OB_POST_COMMON(_fop, _xl, _frame, __first_fd, ##_args);        \
++        }                                                                      \
++    } while (0)
++
++#define OB_POST_FLUSH(_xl, _frame, _fd, _args...)                              \
++    do {                                                                       \
++        ob_inode_t *__ob_inode;                                                \
++        fd_t *__first_fd;                                                      \
++        ob_state_t __ob_state = ob_open_and_resume_fd(                         \
++            _xl, _fd, 0, true, false, &__ob_inode, &__first_fd);               \
++        switch (__ob_state) {                                                  \
++            case OB_STATE_OPEN_PENDING:                                        \
++                default_flush_cbk(_frame, NULL, _xl, 0, 0, NULL);              \
++                break;                                                         \
++                OB_POST_COMMON(flush, _xl, _frame, __first_fd, ##_args);       \
++        }                                                                      \
++    } while (0)
++
++#define OB_POST_INODE(_fop, _xl, _frame, _inode, _trigger, _args...)           \
++    do {                                                                       \
++        ob_inode_t *__ob_inode;                                                \
++        fd_t *__first_fd;                                                      \
++        ob_state_t __ob_state = ob_open_and_resume_inode(                      \
++            _xl, _inode, NULL, 0, true, _trigger, &__ob_inode, &__first_fd);   \
++        switch (__ob_state) {                                                  \
++            case OB_STATE_OPEN_PENDING:                                        \
++                OB_POST_COMMON(_fop, _xl, _frame, __first_fd, ##_args);        \
++        }                                                                      \
++    } while (0)
++
++static ob_inode_t *
++ob_inode_get_locked(xlator_t *this, inode_t *inode)
+ {
+     ob_inode_t *ob_inode = NULL;
+     uint64_t value = 0;
+-    int ret = 0;
+ 
+-    if (!inode)
+-        goto out;
++    if ((__inode_ctx_get(inode, this, &value) == 0) && (value != 0)) {
++        return (ob_inode_t *)(uintptr_t)value;
++    }
+ 
+-    LOCK(&inode->lock);
+-    {
+-        __inode_ctx_get(inode, this, &value);
+-        if (value == 0) {
+-            ob_inode = ob_inode_alloc(inode);
+-            if (ob_inode == NULL)
+-                goto unlock;
+-
+-            value = (uint64_t)(uintptr_t)ob_inode;
+-            ret = __inode_ctx_set(inode, this, &value);
+-            if (ret < 0) {
+-                ob_inode_free(ob_inode);
+-                ob_inode = NULL;
+-            }
+-        } else {
+-            ob_inode = (ob_inode_t *)(uintptr_t)value;
++    ob_inode = GF_CALLOC(1, sizeof(*ob_inode), gf_ob_mt_inode_t);
++    if (ob_inode != NULL) {
++        ob_inode->inode = inode;
++        INIT_LIST_HEAD(&ob_inode->resume_fops);
++
++        value = (uint64_t)(uintptr_t)ob_inode;
++        if (__inode_ctx_set(inode, this, &value) < 0) {
++            GF_FREE(ob_inode);
++            ob_inode = NULL;
+         }
+     }
+-unlock:
+-    UNLOCK(&inode->lock);
+ 
+-out:
+     return ob_inode;
+ }
+ 
+-ob_fd_t *
+-__ob_fd_ctx_get(xlator_t *this, fd_t *fd)
++static ob_state_t
++ob_open_and_resume_inode(xlator_t *xl, inode_t *inode, fd_t *fd,
++                         int32_t open_count, bool synchronous, bool trigger,
++                         ob_inode_t **pob_inode, fd_t **pfd)
+ {
+-    uint64_t value = 0;
+-    int ret = -1;
+-    ob_fd_t *ob_fd = NULL;
++    ob_conf_t *conf;
++    ob_inode_t *ob_inode;
++    call_stub_t *open_stub;
+ 
+-    ret = __fd_ctx_get(fd, this, &value);
+-    if (ret)
+-        return NULL;
++    if (inode == NULL) {
++        return OB_STATE_READY;
++    }
+ 
+-    ob_fd = (void *)((long)value);
++    conf = xl->private;
+ 
+-    return ob_fd;
+-}
++    *pfd = NULL;
+ 
+-ob_fd_t *
+-ob_fd_ctx_get(xlator_t *this, fd_t *fd)
+-{
+-    ob_fd_t *ob_fd = NULL;
+-
+-    LOCK(&fd->lock);
++    LOCK(&inode->lock);
+     {
+-        ob_fd = __ob_fd_ctx_get(this, fd);
+-    }
+-    UNLOCK(&fd->lock);
+-
+-    return ob_fd;
+-}
++        ob_inode = ob_inode_get_locked(xl, inode);
++        if (ob_inode == NULL) {
++            UNLOCK(&inode->lock);
+ 
+-int
+-__ob_fd_ctx_set(xlator_t *this, fd_t *fd, ob_fd_t *ob_fd)
+-{
+-    uint64_t value = 0;
+-    int ret = -1;
++            return -ENOMEM;
++        }
++        *pob_inode = ob_inode;
++
++        ob_inode->open_count += open_count;
++
++        /* If first_fd is not NULL, it means that there's a previous open not
++         * yet completed. */
++        if (ob_inode->first_fd != NULL) {
++            *pfd = ob_inode->first_fd;
++            /* If the current request doesn't trigger the open and it hasn't
++             * been triggered yet, we can continue without issuing the open
++             * only if the current request belongs to the same fd as the
++             * first one. */
++            if (!trigger && !ob_inode->triggered &&
++                (ob_inode->first_fd == fd)) {
++                UNLOCK(&inode->lock);
++
++                return OB_STATE_OPEN_PENDING;
++            }
+ 
+-    value = (long)((void *)ob_fd);
++            /* We need to issue the open. It could have already been triggered
++             * before. In this case open_stub will be NULL. Or the initial open
++             * may not be completely ready yet. In this case open_stub will be
++             * OB_OPEN_PREPARING. */
++            open_stub = ob_inode->first_open;
++            ob_inode->first_open = NULL;
++            ob_inode->triggered = true;
+ 
+-    ret = __fd_ctx_set(fd, this, value);
++            UNLOCK(&inode->lock);
+ 
+-    return ret;
+-}
++            if ((open_stub != NULL) && (open_stub != OB_OPEN_PREPARING)) {
++                call_resume(open_stub);
++            }
+ 
+-int
+-ob_fd_ctx_set(xlator_t *this, fd_t *fd, ob_fd_t *ob_fd)
+-{
+-    int ret = -1;
++            return OB_STATE_OPEN_TRIGGERED;
++        }
+ 
+-    LOCK(&fd->lock);
+-    {
+-        ret = __ob_fd_ctx_set(this, fd, ob_fd);
+-    }
+-    UNLOCK(&fd->lock);
++        /* There's no pending open. Only opens can be non synchronous, so all
++         * regular fops will be processed directly. For non synchronous opens,
++         * we'll still process them normally (i.e. synchornous) if there are
++         * more file descriptors open. */
++        if (synchronous || (ob_inode->open_count > open_count)) {
++            UNLOCK(&inode->lock);
+ 
+-    return ret;
+-}
++            return OB_STATE_READY;
++        }
+ 
+-ob_fd_t *
+-ob_fd_new(void)
+-{
+-    ob_fd_t *ob_fd = NULL;
++        *pfd = fd;
+ 
+-    ob_fd = GF_CALLOC(1, sizeof(*ob_fd), gf_ob_mt_fd_t);
++        /* This is the first open. We keep a reference on the fd and set
++         * first_open stub to OB_OPEN_PREPARING until the actual stub can
++         * be assigned (we don't create the stub here to avoid doing memory
++         * allocations inside the mutex). */
++        ob_inode->first_fd = __fd_ref(fd);
++        ob_inode->first_open = OB_OPEN_PREPARING;
+ 
+-    INIT_LIST_HEAD(&ob_fd->list);
+-    INIT_LIST_HEAD(&ob_fd->ob_fds_on_inode);
++        /* If lazy_open is not set, we'll need to immediately send the open,
++         * so we set triggered right now. */
++        ob_inode->triggered = !conf->lazy_open;
++    }
++    UNLOCK(&inode->lock);
+ 
+-    return ob_fd;
++    return OB_STATE_FIRST_OPEN;
+ }
+ 
+-void
+-ob_fd_free(ob_fd_t *ob_fd)
++static ob_state_t
++ob_open_and_resume_fd(xlator_t *xl, fd_t *fd, int32_t open_count,
++                      bool synchronous, bool trigger, ob_inode_t **pob_inode,
++                      fd_t **pfd)
+ {
+-    LOCK(&ob_fd->fd->inode->lock);
+-    {
+-        list_del_init(&ob_fd->ob_fds_on_inode);
+-    }
+-    UNLOCK(&ob_fd->fd->inode->lock);
+-
+-    loc_wipe(&ob_fd->loc);
+-
+-    if (ob_fd->xdata)
+-        dict_unref(ob_fd->xdata);
++    uint64_t err;
+ 
+-    if (ob_fd->open_frame) {
+-        /* If we sill have a frame it means that background open has never
+-         * been triggered. We need to release the pending reference. */
+-        fd_unref(ob_fd->fd);
+-
+-        STACK_DESTROY(ob_fd->open_frame->root);
++    if ((fd_ctx_get(fd, xl, &err) == 0) && (err != 0)) {
++        return (ob_state_t)-err;
+     }
+ 
+-    GF_FREE(ob_fd);
++    return ob_open_and_resume_inode(xl, fd->inode, fd, open_count, synchronous,
++                                    trigger, pob_inode, pfd);
+ }
+ 
+-int
+-ob_wake_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+-            int op_errno, fd_t *fd_ret, dict_t *xdata)
++static ob_state_t
++ob_open_behind(xlator_t *xl, fd_t *fd, int32_t flags, ob_inode_t **pob_inode,
++               fd_t **pfd)
+ {
+-    fd_t *fd = NULL;
+-    int count = 0;
+-    int ob_inode_op_ret = 0;
+-    int ob_inode_op_errno = 0;
+-    ob_fd_t *ob_fd = NULL;
+-    call_stub_t *stub = NULL, *tmp = NULL;
+-    ob_inode_t *ob_inode = NULL;
+-    gf_boolean_t ob_inode_fops_waiting = _gf_false;
+-    struct list_head fops_waiting_on_fd, fops_waiting_on_inode;
++    bool synchronous;
+ 
+-    fd = frame->local;
+-    frame->local = NULL;
+-
+-    INIT_LIST_HEAD(&fops_waiting_on_fd);
+-    INIT_LIST_HEAD(&fops_waiting_on_inode);
++    /* TODO: If O_CREAT, O_APPEND, O_WRONLY or O_DIRECT are specified, shouldn't
++     *       we also execute this open synchronously ? */
++    synchronous = (flags & O_TRUNC) != 0;
+ 
+-    ob_inode = ob_inode_get(this, fd->inode);
++    return ob_open_and_resume_fd(xl, fd, 1, synchronous, true, pob_inode, pfd);
++}
+ 
+-    LOCK(&fd->lock);
++static int32_t
++ob_stub_dispatch(xlator_t *xl, ob_inode_t *ob_inode, fd_t *fd,
++                 call_stub_t *stub)
++{
++    LOCK(&ob_inode->inode->lock);
+     {
+-        ob_fd = __ob_fd_ctx_get(this, fd);
+-        ob_fd->opened = _gf_true;
+-
+-        ob_inode_fops_waiting = ob_fd->ob_inode_fops_waiting;
+-
+-        list_splice_init(&ob_fd->list, &fops_waiting_on_fd);
+-
+-        if (op_ret < 0) {
+-            /* mark fd BAD for ever */
+-            ob_fd->op_errno = op_errno;
+-            ob_fd = NULL; /*shouldn't be freed*/
+-        } else {
+-            __fd_ctx_del(fd, this, NULL);
+-        }
+-    }
+-    UNLOCK(&fd->lock);
+-
+-    if (ob_inode_fops_waiting) {
+-        LOCK(&fd->inode->lock);
+-        {
+-            count = --ob_inode->count;
+-            if (op_ret < 0) {
+-                /* TODO: when to reset the error? */
+-                ob_inode->op_ret = -1;
+-                ob_inode->op_errno = op_errno;
+-            }
+-
+-            if (count == 0) {
+-                ob_inode->open_in_progress = _gf_false;
+-                ob_inode_op_ret = ob_inode->op_ret;
+-                ob_inode_op_errno = ob_inode->op_errno;
+-                list_splice_init(&ob_inode->resume_fops,
+-                                 &fops_waiting_on_inode);
+-            }
++        /* We only queue a stub if the open has not been completed or
++         * cancelled. */
++        if (ob_inode->first_fd == fd) {
++            list_add_tail(&stub->list, &ob_inode->resume_fops);
++            stub = NULL;
+         }
+-        UNLOCK(&fd->inode->lock);
+-    }
+-
+-    if (ob_fd)
+-        ob_fd_free(ob_fd);
+-
+-    list_for_each_entry_safe(stub, tmp, &fops_waiting_on_fd, list)
+-    {
+-        list_del_init(&stub->list);
+-
+-        if (op_ret < 0)
+-            call_unwind_error(stub, -1, op_errno);
+-        else
+-            call_resume(stub);
+     }
++    UNLOCK(&ob_inode->inode->lock);
+ 
+-    list_for_each_entry_safe(stub, tmp, &fops_waiting_on_inode, list)
+-    {
+-        list_del_init(&stub->list);
+-
+-        if (ob_inode_op_ret < 0)
+-            call_unwind_error(stub, -1, ob_inode_op_errno);
+-        else
+-            call_resume(stub);
++    if (stub != NULL) {
++        call_resume(stub);
+     }
+ 
+-    /* The background open is completed. We can release the 'fd' reference. */
+-    fd_unref(fd);
+-
+-    STACK_DESTROY(frame->root);
+-
+     return 0;
+ }
+ 
+-int
+-ob_fd_wake(xlator_t *this, fd_t *fd, ob_fd_t *ob_fd)
++static int32_t
++ob_open_dispatch(xlator_t *xl, ob_inode_t *ob_inode, fd_t *fd,
++                 call_stub_t *stub)
+ {
+-    call_frame_t *frame = NULL;
+-
+-    if (ob_fd == NULL) {
+-        LOCK(&fd->lock);
+-        {
+-            ob_fd = __ob_fd_ctx_get(this, fd);
+-            if (!ob_fd)
+-                goto unlock;
++    bool closed;
+ 
+-            frame = ob_fd->open_frame;
+-            ob_fd->open_frame = NULL;
+-        }
+-    unlock:
+-        UNLOCK(&fd->lock);
+-    } else {
+-        LOCK(&fd->lock);
+-        {
+-            frame = ob_fd->open_frame;
+-            ob_fd->open_frame = NULL;
++    LOCK(&ob_inode->inode->lock);
++    {
++        closed = ob_inode->first_fd != fd;
++        if (!closed) {
++            if (ob_inode->triggered) {
++                ob_inode->first_open = NULL;
++            } else {
++                ob_inode->first_open = stub;
++                stub = NULL;
++            }
+         }
+-        UNLOCK(&fd->lock);
+     }
++    UNLOCK(&ob_inode->inode->lock);
+ 
+-    if (frame) {
+-        /* We don't need to take a reference here. We already have a reference
+-         * while the open is pending. */
+-        frame->local = fd;
+-
+-        STACK_WIND(frame, ob_wake_cbk, FIRST_CHILD(this),
+-                   FIRST_CHILD(this)->fops->open, &ob_fd->loc, ob_fd->flags, fd,
+-                   ob_fd->xdata);
++    if (stub != NULL) {
++        if (closed) {
++            call_stub_destroy(stub);
++            fd_unref(fd);
++        } else {
++            call_resume(stub);
++        }
+     }
+ 
+     return 0;
+ }
+ 
+-void
+-ob_inode_wake(xlator_t *this, struct list_head *ob_fds)
++static void
++ob_resume_pending(struct list_head *list)
+ {
+-    ob_fd_t *ob_fd = NULL, *tmp = NULL;
++    call_stub_t *stub;
+ 
+-    if (!list_empty(ob_fds)) {
+-        list_for_each_entry_safe(ob_fd, tmp, ob_fds, ob_fds_on_inode)
+-        {
+-            ob_fd_wake(this, ob_fd->fd, ob_fd);
+-            ob_fd_free(ob_fd);
+-        }
+-    }
+-}
++    while (!list_empty(list)) {
++        stub = list_first_entry(list, call_stub_t, list);
++        list_del_init(&stub->list);
+ 
+-/* called holding inode->lock and fd->lock */
+-void
+-ob_fd_copy(ob_fd_t *src, ob_fd_t *dst)
+-{
+-    if (!src || !dst)
+-        goto out;
+-
+-    dst->fd = src->fd;
+-    dst->loc.inode = inode_ref(src->loc.inode);
+-    gf_uuid_copy(dst->loc.gfid, src->loc.gfid);
+-    dst->flags = src->flags;
+-    dst->xdata = dict_ref(src->xdata);
+-    dst->ob_inode = src->ob_inode;
+-out:
+-    return;
++        call_resume(stub);
++    }
+ }
+ 
+-int
+-open_all_pending_fds_and_resume(xlator_t *this, inode_t *inode,
+-                                call_stub_t *stub)
++static void
++ob_open_completed(xlator_t *xl, ob_inode_t *ob_inode, fd_t *fd, int32_t op_ret,
++                  int32_t op_errno)
+ {
+-    ob_inode_t *ob_inode = NULL;
+-    ob_fd_t *ob_fd = NULL, *tmp = NULL;
+-    gf_boolean_t was_open_in_progress = _gf_false;
+-    gf_boolean_t wait_for_open = _gf_false;
+-    struct list_head ob_fds;
++    struct list_head list;
+ 
+-    ob_inode = ob_inode_get(this, inode);
+-    if (ob_inode == NULL)
+-        goto out;
++    INIT_LIST_HEAD(&list);
+ 
+-    INIT_LIST_HEAD(&ob_fds);
++    if (op_ret < 0) {
++        fd_ctx_set(fd, xl, op_errno <= 0 ? EIO : op_errno);
++    }
+ 
+-    LOCK(&inode->lock);
++    LOCK(&ob_inode->inode->lock);
+     {
+-        was_open_in_progress = ob_inode->open_in_progress;
+-        ob_inode->unlinked = 1;
+-
+-        if (was_open_in_progress) {
+-            list_add_tail(&stub->list, &ob_inode->resume_fops);
+-            goto inode_unlock;
+-        }
+-
+-        list_for_each_entry(ob_fd, &ob_inode->ob_fds, ob_fds_on_inode)
+-        {
+-            LOCK(&ob_fd->fd->lock);
+-            {
+-                if (ob_fd->opened)
+-                    goto fd_unlock;
+-
+-                ob_inode->count++;
+-                ob_fd->ob_inode_fops_waiting = _gf_true;
+-
+-                if (ob_fd->open_frame == NULL) {
+-                    /* open in progress no need of wake */
+-                } else {
+-                    tmp = ob_fd_new();
+-                    tmp->open_frame = ob_fd->open_frame;
+-                    ob_fd->open_frame = NULL;
+-
+-                    ob_fd_copy(ob_fd, tmp);
+-                    list_add_tail(&tmp->ob_fds_on_inode, &ob_fds);
+-                }
+-            }
+-        fd_unlock:
+-            UNLOCK(&ob_fd->fd->lock);
+-        }
+-
+-        if (ob_inode->count) {
+-            wait_for_open = ob_inode->open_in_progress = _gf_true;
+-            list_add_tail(&stub->list, &ob_inode->resume_fops);
++        /* Only update the fields if the file has not been closed before
++         * getting here. */
++        if (ob_inode->first_fd == fd) {
++            list_splice_init(&ob_inode->resume_fops, &list);
++            ob_inode->first_fd = NULL;
++            ob_inode->first_open = NULL;
++            ob_inode->triggered = false;
+         }
+     }
+-inode_unlock:
+-    UNLOCK(&inode->lock);
++    UNLOCK(&ob_inode->inode->lock);
+ 
+-out:
+-    if (!was_open_in_progress) {
+-        if (!wait_for_open) {
+-            call_resume(stub);
+-        } else {
+-            ob_inode_wake(this, &ob_fds);
+-        }
+-    }
++    ob_resume_pending(&list);
+ 
+-    return 0;
++    fd_unref(fd);
+ }
+ 
+-int
+-open_and_resume(xlator_t *this, fd_t *fd, call_stub_t *stub)
++static int32_t
++ob_open_cbk(call_frame_t *frame, void *cookie, xlator_t *xl, int32_t op_ret,
++            int32_t op_errno, fd_t *fd, dict_t *xdata)
+ {
+-    ob_fd_t *ob_fd = NULL;
+-    int op_errno = 0;
+-
+-    if (!fd)
+-        goto nofd;
+-
+-    LOCK(&fd->lock);
+-    {
+-        ob_fd = __ob_fd_ctx_get(this, fd);
+-        if (!ob_fd)
+-            goto unlock;
++    ob_inode_t *ob_inode;
+ 
+-        if (ob_fd->op_errno) {
+-            op_errno = ob_fd->op_errno;
+-            goto unlock;
+-        }
++    ob_inode = frame->local;
++    frame->local = NULL;
+ 
+-        list_add_tail(&stub->list, &ob_fd->list);
+-    }
+-unlock:
+-    UNLOCK(&fd->lock);
++    ob_open_completed(xl, ob_inode, cookie, op_ret, op_errno);
+ 
+-nofd:
+-    if (op_errno)
+-        call_unwind_error(stub, -1, op_errno);
+-    else if (ob_fd)
+-        ob_fd_wake(this, fd, NULL);
+-    else
+-        call_resume(stub);
++    STACK_DESTROY(frame->root);
+ 
+     return 0;
+ }
+ 
+-int
+-ob_open_behind(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
++static int32_t
++ob_open_resume(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
+                fd_t *fd, dict_t *xdata)
+ {
+-    ob_fd_t *ob_fd = NULL;
+-    int ret = -1;
+-    ob_conf_t *conf = NULL;
+-    ob_inode_t *ob_inode = NULL;
+-    gf_boolean_t open_in_progress = _gf_false;
+-    int unlinked = 0;
+-
+-    conf = this->private;
+-
+-    if (flags & O_TRUNC) {
+-        STACK_WIND(frame, default_open_cbk, FIRST_CHILD(this),
+-                   FIRST_CHILD(this)->fops->open, loc, flags, fd, xdata);
+-        return 0;
+-    }
+-
+-    ob_inode = ob_inode_get(this, fd->inode);
+-
+-    ob_fd = ob_fd_new();
+-    if (!ob_fd)
+-        goto enomem;
+-
+-    ob_fd->ob_inode = ob_inode;
+-
+-    ob_fd->fd = fd;
+-
+-    ob_fd->open_frame = copy_frame(frame);
+-    if (!ob_fd->open_frame)
+-        goto enomem;
+-    ret = loc_copy(&ob_fd->loc, loc);
+-    if (ret)
+-        goto enomem;
+-
+-    ob_fd->flags = flags;
+-    if (xdata)
+-        ob_fd->xdata = dict_ref(xdata);
+-
+-    LOCK(&fd->inode->lock);
+-    {
+-        open_in_progress = ob_inode->open_in_progress;
+-        unlinked = ob_inode->unlinked;
+-        if (!open_in_progress && !unlinked) {
+-            ret = ob_fd_ctx_set(this, fd, ob_fd);
+-            if (ret) {
+-                UNLOCK(&fd->inode->lock);
+-                goto enomem;
+-            }
+-
+-            list_add(&ob_fd->ob_fds_on_inode, &ob_inode->ob_fds);
+-        }
+-    }
+-    UNLOCK(&fd->inode->lock);
+-
+-    /* We take a reference while the background open is pending or being
+-     * processed. If we finally wind the request in the foreground, then
+-     * ob_fd_free() will take care of this additional reference. */
+-    fd_ref(fd);
+-
+-    if (!open_in_progress && !unlinked) {
+-        STACK_UNWIND_STRICT(open, frame, 0, 0, fd, xdata);
+-
+-        if (!conf->lazy_open)
+-            ob_fd_wake(this, fd, NULL);
+-    } else {
+-        ob_fd_free(ob_fd);
+-        STACK_WIND(frame, default_open_cbk, FIRST_CHILD(this),
+-                   FIRST_CHILD(this)->fops->open, loc, flags, fd, xdata);
+-    }
++    STACK_WIND_COOKIE(frame, ob_open_cbk, fd, FIRST_CHILD(this),
++                      FIRST_CHILD(this)->fops->open, loc, flags, fd, xdata);
+ 
+     return 0;
+-enomem:
+-    if (ob_fd) {
+-        if (ob_fd->open_frame)
+-            STACK_DESTROY(ob_fd->open_frame->root);
+-
+-        loc_wipe(&ob_fd->loc);
+-        if (ob_fd->xdata)
+-            dict_unref(ob_fd->xdata);
+-
+-        GF_FREE(ob_fd);
+-    }
+-
+-    return -1;
+ }
+ 
+-int
++static int32_t
+ ob_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags, fd_t *fd,
+         dict_t *xdata)
+ {
+-    fd_t *old_fd = NULL;
+-    int ret = -1;
+-    int op_errno = ENOMEM;
+-    call_stub_t *stub = NULL;
+-
+-    old_fd = fd_lookup(fd->inode, 0);
+-    if (old_fd) {
+-        /* open-behind only when this is the first FD */
+-        stub = fop_open_stub(frame, default_open_resume, loc, flags, fd, xdata);
+-        if (!stub) {
+-            fd_unref(old_fd);
+-            goto err;
+-        }
+-
+-        open_and_resume(this, old_fd, stub);
++    ob_inode_t *ob_inode;
++    call_frame_t *open_frame;
++    call_stub_t *stub;
++    fd_t *first_fd;
++    ob_state_t state;
++
++    state = ob_open_behind(this, fd, flags, &ob_inode, &first_fd);
++    if (state == OB_STATE_READY) {
++        /* There's no pending open, but there are other file descriptors opened
++         * or the current flags require a synchronous open. */
++        return default_open(frame, this, loc, flags, fd, xdata);
++    }
+ 
+-        fd_unref(old_fd);
++    if (state == OB_STATE_OPEN_TRIGGERED) {
++        /* The first open is in progress (either because it was already issued
++         * or because this request triggered it). We try to create a new stub
++         * to retry the operation once the initial open completes. */
++        stub = fop_open_stub(frame, ob_open, loc, flags, fd, xdata);
++        if (stub != NULL) {
++            return ob_stub_dispatch(this, ob_inode, first_fd, stub);
++        }
+ 
+-        return 0;
++        state = -ENOMEM;
+     }
+ 
+-    ret = ob_open_behind(frame, this, loc, flags, fd, xdata);
+-    if (ret) {
+-        goto err;
+-    }
++    if (state == OB_STATE_FIRST_OPEN) {
++        /* We try to create a stub for the new open. A new frame needs to be
++         * used because the current one may be destroyed soon after sending
++         * the open's reply. */
++        open_frame = copy_frame(frame);
++        if (open_frame != NULL) {
++            stub = fop_open_stub(open_frame, ob_open_resume, loc, flags, fd,
++                                 xdata);
++            if (stub != NULL) {
++                open_frame->local = ob_inode;
+ 
+-    return 0;
+-err:
+-    gf_msg(this->name, GF_LOG_ERROR, op_errno, OPEN_BEHIND_MSG_NO_MEMORY, "%s",
+-           loc->path);
++                /* TODO: Previous version passed xdata back to the caller, but
++                 *       probably this doesn't make sense since it won't contain
++                 *       any requested data. I think it would be better to pass
++                 *       NULL for xdata. */
++                default_open_cbk(frame, NULL, this, 0, 0, fd, xdata);
+ 
+-    STACK_UNWIND_STRICT(open, frame, -1, op_errno, 0, 0);
++                return ob_open_dispatch(this, ob_inode, first_fd, stub);
++            }
+ 
+-    return 0;
+-}
++            STACK_DESTROY(open_frame->root);
++        }
+ 
+-fd_t *
+-ob_get_wind_fd(xlator_t *this, fd_t *fd, uint32_t *flag)
+-{
+-    fd_t *wind_fd = NULL;
+-    ob_fd_t *ob_fd = NULL;
+-    ob_conf_t *conf = NULL;
++        /* In case of error, simulate a regular completion but with an error
++         * code. */
++        ob_open_completed(this, ob_inode, first_fd, -1, ENOMEM);
+ 
+-    conf = this->private;
++        state = -ENOMEM;
++    }
+ 
+-    ob_fd = ob_fd_ctx_get(this, fd);
++    /* In case of failure we need to decrement the number of open files because
++     * ob_fdclose() won't be called. */
+ 
+-    if (ob_fd && ob_fd->open_frame && conf->use_anonymous_fd) {
+-        wind_fd = fd_anonymous(fd->inode);
+-        if ((ob_fd->flags & O_DIRECT) && (flag))
+-            *flag = *flag | O_DIRECT;
+-    } else {
+-        wind_fd = fd_ref(fd);
++    LOCK(&fd->inode->lock);
++    {
++        ob_inode->open_count--;
+     }
++    UNLOCK(&fd->inode->lock);
+ 
+-    return wind_fd;
++    gf_smsg(this->name, GF_LOG_ERROR, -state, OPEN_BEHIND_MSG_FAILED, "fop=%s",
++            "open", "path=%s", loc->path, NULL);
++
++    return default_open_failure_cbk(frame, -state);
+ }
+ 
+-int
++static int32_t
+ ob_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+          off_t offset, uint32_t flags, dict_t *xdata)
+ {
+-    call_stub_t *stub = NULL;
+-    fd_t *wind_fd = NULL;
+-    ob_conf_t *conf = NULL;
++    ob_conf_t *conf = this->private;
++    bool trigger = conf->read_after_open || !conf->use_anonymous_fd;
+ 
+-    conf = this->private;
+-
+-    if (!conf->read_after_open)
+-        wind_fd = ob_get_wind_fd(this, fd, &flags);
+-    else
+-        wind_fd = fd_ref(fd);
+-
+-    stub = fop_readv_stub(frame, default_readv_resume, wind_fd, size, offset,
+-                          flags, xdata);
+-    fd_unref(wind_fd);
+-
+-    if (!stub)
+-        goto err;
+-
+-    open_and_resume(this, wind_fd, stub);
+-
+-    return 0;
+-err:
+-    STACK_UNWIND_STRICT(readv, frame, -1, ENOMEM, 0, 0, 0, 0, 0);
++    OB_POST_FD(readv, this, frame, fd, trigger, fd, size, offset, flags, xdata);
+ 
+     return 0;
+ }
+ 
+-int
++static int32_t
+ ob_writev(call_frame_t *frame, xlator_t *this, fd_t *fd, struct iovec *iov,
+           int count, off_t offset, uint32_t flags, struct iobref *iobref,
+           dict_t *xdata)
+ {
+-    call_stub_t *stub = NULL;
+-
+-    stub = fop_writev_stub(frame, default_writev_resume, fd, iov, count, offset,
+-                           flags, iobref, xdata);
+-    if (!stub)
+-        goto err;
+-
+-    open_and_resume(this, fd, stub);
+-
+-    return 0;
+-err:
+-    STACK_UNWIND_STRICT(writev, frame, -1, ENOMEM, 0, 0, 0);
++    OB_POST_FD(writev, this, frame, fd, true, fd, iov, count, offset, flags,
++               iobref, xdata);
+ 
+     return 0;
+ }
+ 
+-int
++static int32_t
+ ob_fstat(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
+ {
+-    call_stub_t *stub = NULL;
+-    fd_t *wind_fd = NULL;
+-
+-    wind_fd = ob_get_wind_fd(this, fd, NULL);
+-
+-    stub = fop_fstat_stub(frame, default_fstat_resume, wind_fd, xdata);
++    ob_conf_t *conf = this->private;
++    bool trigger = !conf->use_anonymous_fd;
+ 
+-    fd_unref(wind_fd);
+-
+-    if (!stub)
+-        goto err;
+-
+-    open_and_resume(this, wind_fd, stub);
+-
+-    return 0;
+-err:
+-    STACK_UNWIND_STRICT(fstat, frame, -1, ENOMEM, 0, 0);
++    OB_POST_FD(fstat, this, frame, fd, trigger, fd, xdata);
+ 
+     return 0;
+ }
+ 
+-int
++static int32_t
+ ob_seek(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+         gf_seek_what_t what, dict_t *xdata)
+ {
+-    call_stub_t *stub = NULL;
+-    fd_t *wind_fd = NULL;
+-
+-    wind_fd = ob_get_wind_fd(this, fd, NULL);
++    ob_conf_t *conf = this->private;
++    bool trigger = !conf->use_anonymous_fd;
+ 
+-    stub = fop_seek_stub(frame, default_seek_resume, wind_fd, offset, what,
+-                         xdata);
+-
+-    fd_unref(wind_fd);
+-
+-    if (!stub)
+-        goto err;
+-
+-    open_and_resume(this, wind_fd, stub);
+-
+-    return 0;
+-err:
+-    STACK_UNWIND_STRICT(fstat, frame, -1, ENOMEM, 0, 0);
++    OB_POST_FD(seek, this, frame, fd, trigger, fd, offset, what, xdata);
+ 
+     return 0;
+ }
+ 
+-int
++static int32_t
+ ob_flush(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
+ {
+-    call_stub_t *stub = NULL;
+-    ob_fd_t *ob_fd = NULL;
+-    gf_boolean_t unwind = _gf_false;
+-
+-    LOCK(&fd->lock);
+-    {
+-        ob_fd = __ob_fd_ctx_get(this, fd);
+-        if (ob_fd && ob_fd->open_frame)
+-            /* if open() was never wound to backend,
+-               no need to wind flush() either.
+-            */
+-            unwind = _gf_true;
+-    }
+-    UNLOCK(&fd->lock);
+-
+-    if (unwind)
+-        goto unwind;
+-
+-    stub = fop_flush_stub(frame, default_flush_resume, fd, xdata);
+-    if (!stub)
+-        goto err;
+-
+-    open_and_resume(this, fd, stub);
+-
+-    return 0;
+-err:
+-    STACK_UNWIND_STRICT(flush, frame, -1, ENOMEM, 0);
+-
+-    return 0;
+-
+-unwind:
+-    STACK_UNWIND_STRICT(flush, frame, 0, 0, 0);
++    OB_POST_FLUSH(this, frame, fd, fd, xdata);
+ 
+     return 0;
+ }
+ 
+-int
++static int32_t
+ ob_fsync(call_frame_t *frame, xlator_t *this, fd_t *fd, int flag, dict_t *xdata)
+ {
+-    call_stub_t *stub = NULL;
+-
+-    stub = fop_fsync_stub(frame, default_fsync_resume, fd, flag, xdata);
+-    if (!stub)
+-        goto err;
+-
+-    open_and_resume(this, fd, stub);
+-
+-    return 0;
+-err:
+-    STACK_UNWIND_STRICT(fsync, frame, -1, ENOMEM, 0, 0, 0);
++    OB_POST_FD(fsync, this, frame, fd, true, fd, flag, xdata);
+ 
+     return 0;
+ }
+ 
+-int
++static int32_t
+ ob_lk(call_frame_t *frame, xlator_t *this, fd_t *fd, int cmd,
+       struct gf_flock *flock, dict_t *xdata)
+ {
+-    call_stub_t *stub = NULL;
+-
+-    stub = fop_lk_stub(frame, default_lk_resume, fd, cmd, flock, xdata);
+-    if (!stub)
+-        goto err;
+-
+-    open_and_resume(this, fd, stub);
+-
+-    return 0;
+-err:
+-    STACK_UNWIND_STRICT(lk, frame, -1, ENOMEM, 0, 0);
++    OB_POST_FD(lk, this, frame, fd, true, fd, cmd, flock, xdata);
+ 
+     return 0;
+ }
+ 
+-int
++static int32_t
+ ob_ftruncate(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+              dict_t *xdata)
+ {
+-    call_stub_t *stub = NULL;
+-
+-    stub = fop_ftruncate_stub(frame, default_ftruncate_resume, fd, offset,
+-                              xdata);
+-    if (!stub)
+-        goto err;
+-
+-    open_and_resume(this, fd, stub);
+-
+-    return 0;
+-err:
+-    STACK_UNWIND_STRICT(ftruncate, frame, -1, ENOMEM, 0, 0, 0);
++    OB_POST_FD(ftruncate, this, frame, fd, true, fd, offset, xdata);
+ 
+     return 0;
+ }
+ 
+-int
++static int32_t
+ ob_fsetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xattr,
+              int flags, dict_t *xdata)
+ {
+-    call_stub_t *stub = NULL;
+-
+-    stub = fop_fsetxattr_stub(frame, default_fsetxattr_resume, fd, xattr, flags,
+-                              xdata);
+-    if (!stub)
+-        goto err;
+-
+-    open_and_resume(this, fd, stub);
+-
+-    return 0;
+-err:
+-    STACK_UNWIND_STRICT(fsetxattr, frame, -1, ENOMEM, 0);
++    OB_POST_FD(fsetxattr, this, frame, fd, true, fd, xattr, flags, xdata);
+ 
+     return 0;
+ }
+ 
+-int
++static int32_t
+ ob_fgetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *name,
+              dict_t *xdata)
+ {
+-    call_stub_t *stub = NULL;
+-
+-    stub = fop_fgetxattr_stub(frame, default_fgetxattr_resume, fd, name, xdata);
+-    if (!stub)
+-        goto err;
+-
+-    open_and_resume(this, fd, stub);
+-
+-    return 0;
+-err:
+-    STACK_UNWIND_STRICT(fgetxattr, frame, -1, ENOMEM, 0, 0);
++    OB_POST_FD(fgetxattr, this, frame, fd, true, fd, name, xdata);
+ 
+     return 0;
+ }
+ 
+-int
++static int32_t
+ ob_fremovexattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *name,
+                 dict_t *xdata)
+ {
+-    call_stub_t *stub = NULL;
+-
+-    stub = fop_fremovexattr_stub(frame, default_fremovexattr_resume, fd, name,
+-                                 xdata);
+-    if (!stub)
+-        goto err;
+-
+-    open_and_resume(this, fd, stub);
+-
+-    return 0;
+-err:
+-    STACK_UNWIND_STRICT(fremovexattr, frame, -1, ENOMEM, 0);
++    OB_POST_FD(fremovexattr, this, frame, fd, true, fd, name, xdata);
+ 
+     return 0;
+ }
+ 
+-int
++static int32_t
+ ob_finodelk(call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
+             int cmd, struct gf_flock *flock, dict_t *xdata)
+ {
+-    call_stub_t *stub = fop_finodelk_stub(frame, default_finodelk_resume,
+-                                          volume, fd, cmd, flock, xdata);
+-    if (stub)
+-        open_and_resume(this, fd, stub);
+-    else
+-        STACK_UNWIND_STRICT(finodelk, frame, -1, ENOMEM, 0);
++    OB_POST_FD(finodelk, this, frame, fd, true, volume, fd, cmd, flock, xdata);
+ 
+     return 0;
+ }
+ 
+-int
++static int32_t
+ ob_fentrylk(call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
+             const char *basename, entrylk_cmd cmd, entrylk_type type,
+             dict_t *xdata)
+ {
+-    call_stub_t *stub = fop_fentrylk_stub(
+-        frame, default_fentrylk_resume, volume, fd, basename, cmd, type, xdata);
+-    if (stub)
+-        open_and_resume(this, fd, stub);
+-    else
+-        STACK_UNWIND_STRICT(fentrylk, frame, -1, ENOMEM, 0);
++    OB_POST_FD(fentrylk, this, frame, fd, true, volume, fd, basename, cmd, type,
++               xdata);
+ 
+     return 0;
+ }
+ 
+-int
++static int32_t
+ ob_fxattrop(call_frame_t *frame, xlator_t *this, fd_t *fd,
+             gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata)
+ {
+-    call_stub_t *stub = fop_fxattrop_stub(frame, default_fxattrop_resume, fd,
+-                                          optype, xattr, xdata);
+-    if (stub)
+-        open_and_resume(this, fd, stub);
+-    else
+-        STACK_UNWIND_STRICT(fxattrop, frame, -1, ENOMEM, 0, 0);
++    OB_POST_FD(fxattrop, this, frame, fd, true, fd, optype, xattr, xdata);
+ 
+     return 0;
+ }
+ 
+-int
++static int32_t
+ ob_fsetattr(call_frame_t *frame, xlator_t *this, fd_t *fd, struct iatt *iatt,
+             int valid, dict_t *xdata)
+ {
+-    call_stub_t *stub = NULL;
+-
+-    stub = fop_fsetattr_stub(frame, default_fsetattr_resume, fd, iatt, valid,
+-                             xdata);
+-    if (!stub)
+-        goto err;
+-
+-    open_and_resume(this, fd, stub);
+-
+-    return 0;
+-err:
+-    STACK_UNWIND_STRICT(fsetattr, frame, -1, ENOMEM, 0, 0, 0);
++    OB_POST_FD(fsetattr, this, frame, fd, true, fd, iatt, valid, xdata);
+ 
+     return 0;
+ }
+ 
+-int
++static int32_t
+ ob_fallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t mode,
+              off_t offset, size_t len, dict_t *xdata)
+ {
+-    call_stub_t *stub;
+-
+-    stub = fop_fallocate_stub(frame, default_fallocate_resume, fd, mode, offset,
+-                              len, xdata);
+-    if (!stub)
+-        goto err;
+-
+-    open_and_resume(this, fd, stub);
++    OB_POST_FD(fallocate, this, frame, fd, true, fd, mode, offset, len, xdata);
+ 
+     return 0;
+-err:
+-    STACK_UNWIND_STRICT(fallocate, frame, -1, ENOMEM, NULL, NULL, NULL);
+-    return 0;
+ }
+ 
+-int
++static int32_t
+ ob_discard(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+            size_t len, dict_t *xdata)
+ {
+-    call_stub_t *stub;
+-
+-    stub = fop_discard_stub(frame, default_discard_resume, fd, offset, len,
+-                            xdata);
+-    if (!stub)
+-        goto err;
+-
+-    open_and_resume(this, fd, stub);
++    OB_POST_FD(discard, this, frame, fd, true, fd, offset, len, xdata);
+ 
+     return 0;
+-err:
+-    STACK_UNWIND_STRICT(discard, frame, -1, ENOMEM, NULL, NULL, NULL);
+-    return 0;
+ }
+ 
+-int
++static int32_t
+ ob_zerofill(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+             off_t len, dict_t *xdata)
+ {
+-    call_stub_t *stub;
+-
+-    stub = fop_zerofill_stub(frame, default_zerofill_resume, fd, offset, len,
+-                             xdata);
+-    if (!stub)
+-        goto err;
++    OB_POST_FD(zerofill, this, frame, fd, true, fd, offset, len, xdata);
+ 
+-    open_and_resume(this, fd, stub);
+-
+-    return 0;
+-err:
+-    STACK_UNWIND_STRICT(zerofill, frame, -1, ENOMEM, NULL, NULL, NULL);
+     return 0;
+ }
+ 
+-int
++static int32_t
+ ob_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflags,
+           dict_t *xdata)
+ {
+-    call_stub_t *stub = NULL;
+-
+-    stub = fop_unlink_stub(frame, default_unlink_resume, loc, xflags, xdata);
+-    if (!stub)
+-        goto err;
+-
+-    open_all_pending_fds_and_resume(this, loc->inode, stub);
+-
+-    return 0;
+-err:
+-    STACK_UNWIND_STRICT(unlink, frame, -1, ENOMEM, 0, 0, 0);
++    OB_POST_INODE(unlink, this, frame, loc->inode, true, loc, xflags, xdata);
+ 
+     return 0;
+ }
+ 
+-int
++static int32_t
+ ob_rename(call_frame_t *frame, xlator_t *this, loc_t *src, loc_t *dst,
+           dict_t *xdata)
+ {
+-    call_stub_t *stub = NULL;
+-
+-    stub = fop_rename_stub(frame, default_rename_resume, src, dst, xdata);
+-    if (!stub)
+-        goto err;
+-
+-    open_all_pending_fds_and_resume(this, dst->inode, stub);
+-
+-    return 0;
+-err:
+-    STACK_UNWIND_STRICT(rename, frame, -1, ENOMEM, 0, 0, 0, 0, 0, 0);
++    OB_POST_INODE(rename, this, frame, dst->inode, true, src, dst, xdata);
+ 
+     return 0;
+ }
+ 
+-int32_t
++static int32_t
+ ob_setattr(call_frame_t *frame, xlator_t *this, loc_t *loc, struct iatt *stbuf,
+            int32_t valid, dict_t *xdata)
+ {
+-    call_stub_t *stub = NULL;
+-
+-    stub = fop_setattr_stub(frame, default_setattr_resume, loc, stbuf, valid,
+-                            xdata);
+-    if (!stub)
+-        goto err;
++    OB_POST_INODE(setattr, this, frame, loc->inode, true, loc, stbuf, valid,
++                  xdata);
+ 
+-    open_all_pending_fds_and_resume(this, loc->inode, stub);
+-
+-    return 0;
+-err:
+-    STACK_UNWIND_STRICT(setattr, frame, -1, ENOMEM, NULL, NULL, NULL);
+     return 0;
+ }
+ 
+-int32_t
++static int32_t
+ ob_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict,
+             int32_t flags, dict_t *xdata)
+ {
+-    call_stub_t *stub = NULL;
+-    gf_boolean_t access_xattr = _gf_false;
+-
+     if (dict_get(dict, POSIX_ACL_DEFAULT_XATTR) ||
+         dict_get(dict, POSIX_ACL_ACCESS_XATTR) ||
+-        dict_get(dict, GF_SELINUX_XATTR_KEY))
+-        access_xattr = _gf_true;
+-
+-    if (!access_xattr)
++        dict_get(dict, GF_SELINUX_XATTR_KEY)) {
+         return default_setxattr(frame, this, loc, dict, flags, xdata);
++    }
+ 
+-    stub = fop_setxattr_stub(frame, default_setxattr_resume, loc, dict, flags,
+-                             xdata);
+-    if (!stub)
+-        goto err;
+-
+-    open_all_pending_fds_and_resume(this, loc->inode, stub);
++    OB_POST_INODE(setxattr, this, frame, loc->inode, true, loc, dict, flags,
++                  xdata);
+ 
+     return 0;
+-err:
+-    STACK_UNWIND_STRICT(setxattr, frame, -1, ENOMEM, NULL);
+-    return 0;
+ }
+ 
+-int
+-ob_release(xlator_t *this, fd_t *fd)
++static void
++ob_fdclose(xlator_t *this, fd_t *fd)
+ {
+-    ob_fd_t *ob_fd = NULL;
++    struct list_head list;
++    ob_inode_t *ob_inode;
++    call_stub_t *stub;
++
++    INIT_LIST_HEAD(&list);
++    stub = NULL;
+ 
+-    ob_fd = ob_fd_ctx_get(this, fd);
++    LOCK(&fd->inode->lock);
++    {
++        ob_inode = ob_inode_get_locked(this, fd->inode);
++        if (ob_inode != NULL) {
++            ob_inode->open_count--;
++
++            /* If this fd is the same as ob_inode->first_fd, it means that
++             * the initial open has not fully completed. We'll try to cancel
++             * it. */
++            if (ob_inode->first_fd == fd) {
++                if (ob_inode->first_open == OB_OPEN_PREPARING) {
++                    /* In this case ob_open_dispatch() has not been called yet.
++                     * We clear first_fd and first_open to allow that function
++                     * to know that the open is not really needed. This also
++                     * allows other requests to work as expected if they
++                     * arrive before the dispatch function is called. If there
++                     * are pending fops, we can directly process them here.
++                     * (note that there shouldn't be any fd related fops, but
++                     * if there are, it's fine if they fail). */
++                    ob_inode->first_fd = NULL;
++                    ob_inode->first_open = NULL;
++                    ob_inode->triggered = false;
++                    list_splice_init(&ob_inode->resume_fops, &list);
++                } else if (!ob_inode->triggered) {
++                    /* If the open has already been dispatched, we can only
++                     * cancel it if it has not been triggered. Otherwise we
++                     * simply wait until it completes. While it's not triggered,
++                     * first_open must be a valid stub and there can't be any
++                     * pending fops. */
++                    GF_ASSERT((ob_inode->first_open != NULL) &&
++                              list_empty(&ob_inode->resume_fops));
++
++                    ob_inode->first_fd = NULL;
++                    stub = ob_inode->first_open;
++                    ob_inode->first_open = NULL;
++                }
++            }
++        }
++    }
++    UNLOCK(&fd->inode->lock);
+ 
+-    ob_fd_free(ob_fd);
++    if (stub != NULL) {
++        call_stub_destroy(stub);
++        fd_unref(fd);
++    }
+ 
+-    return 0;
++    ob_resume_pending(&list);
+ }
+ 
+ int
+ ob_forget(xlator_t *this, inode_t *inode)
+ {
+-    ob_inode_t *ob_inode = NULL;
++    ob_inode_t *ob_inode;
+     uint64_t value = 0;
+ 
+-    inode_ctx_del(inode, this, &value);
+-
+-    if (value) {
++    if ((inode_ctx_del(inode, this, &value) == 0) && (value != 0)) {
+         ob_inode = (ob_inode_t *)(uintptr_t)value;
+-        ob_inode_free(ob_inode);
++        GF_FREE(ob_inode);
+     }
+ 
+     return 0;
+@@ -1153,20 +823,18 @@ ob_priv_dump(xlator_t *this)
+ int
+ ob_fdctx_dump(xlator_t *this, fd_t *fd)
+ {
+-    ob_fd_t *ob_fd = NULL;
+     char key_prefix[GF_DUMP_MAX_BUF_LEN] = {
+         0,
+     };
+-    int ret = 0;
++    uint64_t value = 0;
++    int ret = 0, error = 0;
+ 
+     ret = TRY_LOCK(&fd->lock);
+     if (ret)
+         return 0;
+ 
+-    ob_fd = __ob_fd_ctx_get(this, fd);
+-    if (!ob_fd) {
+-        UNLOCK(&fd->lock);
+-        return 0;
++    if ((__fd_ctx_get(fd, this, &value) == 0) && (value != 0)) {
++        error = (int32_t)value;
+     }
+ 
+     gf_proc_dump_build_key(key_prefix, "xlator.performance.open-behind",
+@@ -1175,17 +843,7 @@ ob_fdctx_dump(xlator_t *this, fd_t *fd)
+ 
+     gf_proc_dump_write("fd", "%p", fd);
+ 
+-    gf_proc_dump_write("open_frame", "%p", ob_fd->open_frame);
+-
+-    if (ob_fd->open_frame)
+-        gf_proc_dump_write("open_frame.root.unique", "%" PRIu64,
+-                           ob_fd->open_frame->root->unique);
+-
+-    gf_proc_dump_write("loc.path", "%s", ob_fd->loc.path);
+-
+-    gf_proc_dump_write("loc.ino", "%s", uuid_utoa(ob_fd->loc.gfid));
+-
+-    gf_proc_dump_write("flags", "%d", ob_fd->flags);
++    gf_proc_dump_write("error", "%d", error);
+ 
+     UNLOCK(&fd->lock);
+ 
+@@ -1307,7 +965,7 @@ struct xlator_fops fops = {
+ };
+ 
+ struct xlator_cbks cbks = {
+-    .release = ob_release,
++    .fdclose = ob_fdclose,
+     .forget = ob_forget,
+ };
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0524-open-behind-fix-call_frame-leak.patch b/SOURCES/0524-open-behind-fix-call_frame-leak.patch
new file mode 100644
index 0000000..75a243d
--- /dev/null
+++ b/SOURCES/0524-open-behind-fix-call_frame-leak.patch
@@ -0,0 +1,70 @@
+From 36dddf59a02d91d3db5b124be626ab6bc235ed5a Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Wed, 19 Aug 2020 23:27:38 +0200
+Subject: [PATCH 524/526] open-behind: fix call_frame leak
+
+When an open was delayed, a copy of the frame was created because the
+current frame was used to unwind the "fake" open. When the open was
+actually sent, the frame was correctly destroyed. However if the file
+was closed before needing to send the open, the frame was not destroyed.
+
+This patch correctly destroys the frame in all cases.
+
+Upstream patch:
+> Upstream-patch-link: https://review.gluster.org/#/c/glusterfs/+/24892
+> Change-Id: I8c00fc7f15545c240e8151305d9e4cf06d653926
+> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+> Fixes: #1440
+
+BUG: 1830713
+Change-Id: I8c00fc7f15545c240e8151305d9e4cf06d653926
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/224488
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/performance/open-behind/src/open-behind.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/xlators/performance/open-behind/src/open-behind.c b/xlators/performance/open-behind/src/open-behind.c
+index e43fe73..1ab635e 100644
+--- a/xlators/performance/open-behind/src/open-behind.c
++++ b/xlators/performance/open-behind/src/open-behind.c
+@@ -333,6 +333,14 @@ ob_stub_dispatch(xlator_t *xl, ob_inode_t *ob_inode, fd_t *fd,
+     return 0;
+ }
+ 
++static void
++ob_open_destroy(call_stub_t *stub, fd_t *fd)
++{
++    STACK_DESTROY(stub->frame->root);
++    call_stub_destroy(stub);
++    fd_unref(fd);
++}
++
+ static int32_t
+ ob_open_dispatch(xlator_t *xl, ob_inode_t *ob_inode, fd_t *fd,
+                  call_stub_t *stub)
+@@ -355,8 +363,7 @@ ob_open_dispatch(xlator_t *xl, ob_inode_t *ob_inode, fd_t *fd,
+ 
+     if (stub != NULL) {
+         if (closed) {
+-            call_stub_destroy(stub);
+-            fd_unref(fd);
++            ob_open_destroy(stub, fd);
+         } else {
+             call_resume(stub);
+         }
+@@ -776,8 +783,7 @@ ob_fdclose(xlator_t *this, fd_t *fd)
+     UNLOCK(&fd->inode->lock);
+ 
+     if (stub != NULL) {
+-        call_stub_destroy(stub);
+-        fd_unref(fd);
++        ob_open_destroy(stub, fd);
+     }
+ 
+     ob_resume_pending(&list);
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0525-open-behind-implement-create-fop.patch b/SOURCES/0525-open-behind-implement-create-fop.patch
new file mode 100644
index 0000000..c7a5329
--- /dev/null
+++ b/SOURCES/0525-open-behind-implement-create-fop.patch
@@ -0,0 +1,109 @@
+From 41aae052b5e3afe64d3e0668643726bab0e77265 Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Fri, 4 Sep 2020 14:49:50 +0200
+Subject: [PATCH 525/526] open-behind: implement create fop
+
+Open behind didn't implement create fop. This caused that files created
+were not accounted for the number of open fd's. This could cause future
+opens to be delayed when they shouldn't.
+
+This patch implements the create fop. It also fixes a problem when
+destroying the stack: when frame->local was not NULL, STACK_DESTROY()
+tried to mem_put() it, which is not correct.
+
+Upstream patch:
+> Upstream-patch-link: https://review.gluster.org/#/c/glusterfs/+/24953
+> Fixes: #1440
+> Change-Id: Ic982bad07d4af30b915d7eb1fbcef7a847a45869
+> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+
+BUG: 1830713
+Change-Id: Ic982bad07d4af30b915d7eb1fbcef7a847a45869
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/224489
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/performance/open-behind/src/open-behind.c | 52 +++++++++++++++++++++++
+ 1 file changed, 52 insertions(+)
+
+diff --git a/xlators/performance/open-behind/src/open-behind.c b/xlators/performance/open-behind/src/open-behind.c
+index 1ab635e..600c3b6 100644
+--- a/xlators/performance/open-behind/src/open-behind.c
++++ b/xlators/performance/open-behind/src/open-behind.c
+@@ -336,6 +336,7 @@ ob_stub_dispatch(xlator_t *xl, ob_inode_t *ob_inode, fd_t *fd,
+ static void
+ ob_open_destroy(call_stub_t *stub, fd_t *fd)
+ {
++    stub->frame->local = NULL;
+     STACK_DESTROY(stub->frame->root);
+     call_stub_destroy(stub);
+     fd_unref(fd);
+@@ -516,6 +517,56 @@ ob_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags, fd_t *fd,
+ }
+ 
+ static int32_t
++ob_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
++          mode_t mode, mode_t umask, fd_t *fd, dict_t *xdata)
++{
++    ob_inode_t *ob_inode;
++    call_stub_t *stub;
++    fd_t *first_fd;
++    ob_state_t state;
++
++    /* Create requests are never delayed. We always send them synchronously. */
++    state = ob_open_and_resume_fd(this, fd, 1, true, true, &ob_inode,
++                                  &first_fd);
++    if (state == OB_STATE_READY) {
++        /* There's no pending open, but there are other file descriptors opened
++         * so we simply forward the request synchronously. */
++        return default_create(frame, this, loc, flags, mode, umask, fd, xdata);
++    }
++
++    if (state == OB_STATE_OPEN_TRIGGERED) {
++        /* The first open is in progress (either because it was already issued
++         * or because this request triggered it). We try to create a new stub
++         * to retry the operation once the initial open completes. */
++        stub = fop_create_stub(frame, ob_create, loc, flags, mode, umask, fd,
++                               xdata);
++        if (stub != NULL) {
++            return ob_stub_dispatch(this, ob_inode, first_fd, stub);
++        }
++
++        state = -ENOMEM;
++    }
++
++    /* Since we forced a synchronous request, OB_STATE_FIRST_OPEN will never
++     * be returned by ob_open_and_resume_fd(). If we are here it can only be
++     * because there has been a problem. */
++
++    /* In case of failure we need to decrement the number of open files because
++     * ob_fdclose() won't be called. */
++
++    LOCK(&fd->inode->lock);
++    {
++        ob_inode->open_count--;
++    }
++    UNLOCK(&fd->inode->lock);
++
++    gf_smsg(this->name, GF_LOG_ERROR, -state, OPEN_BEHIND_MSG_FAILED, "fop=%s",
++            "create", "path=%s", loc->path, NULL);
++
++    return default_create_failure_cbk(frame, -state);
++}
++
++static int32_t
+ ob_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+          off_t offset, uint32_t flags, dict_t *xdata)
+ {
+@@ -946,6 +997,7 @@ fini(xlator_t *this)
+ 
+ struct xlator_fops fops = {
+     .open = ob_open,
++    .create = ob_create,
+     .readv = ob_readv,
+     .writev = ob_writev,
+     .flush = ob_flush,
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0526-Quota-quota_fsck.py-converting-byte-string-to-string.patch b/SOURCES/0526-Quota-quota_fsck.py-converting-byte-string-to-string.patch
new file mode 100644
index 0000000..fb74fd8
--- /dev/null
+++ b/SOURCES/0526-Quota-quota_fsck.py-converting-byte-string-to-string.patch
@@ -0,0 +1,44 @@
+From baeca3c9b70548463ceea0ae27e6f98cf06e96b7 Mon Sep 17 00:00:00 2001
+From: srijan-sivakumar <ssivakum@redhat.com>
+Date: Tue, 28 Jul 2020 22:27:34 +0530
+Subject: [PATCH 526/526] Quota quota_fsck.py, converting byte string to string
+
+Issue: The quota_fsck.py script throws an TypeError
+due to the fact that the data is read as bytes and then
+the string operations are applied on the. Now, in python3
+string is unicode and hence we get the type error.
+
+Code Changes:
+Decoding the bytes value into utf-8 format.
+
+>Change-Id: Ia1ff52a821d664a371c8166692ff506ae39f6e40
+>Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
+>Fixes: #1401
+Upstream patch: https://review.gluster.org/c/glusterfs/+/24785
+
+BUG: 1719171
+Change-Id: Ia1ff52a821d664a371c8166692ff506ae39f6e40
+Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/224780
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Kshithij Iyer <kiyer@redhat.com>
+Reviewed-by: Rinku Kothiya <rkothiya@redhat.com>
+---
+ extras/quota/quota_fsck.py | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/extras/quota/quota_fsck.py b/extras/quota/quota_fsck.py
+index 174f2a2..ea8d638 100755
+--- a/extras/quota/quota_fsck.py
++++ b/extras/quota/quota_fsck.py
+@@ -157,6 +157,7 @@ def get_quota_xattr_brick(dpath):
+     xattr_dict['parents'] = {}
+ 
+     for xattr in pairs:
++        xattr = xattr.decode("utf-8")
+         xattr_key = xattr.split("=")[0]
+         if re.search("# file:", xattr_key):
+             # skip the file comment
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0527-Events-Socket-creation-after-getaddrinfo-and-IPv4-an.patch b/SOURCES/0527-Events-Socket-creation-after-getaddrinfo-and-IPv4-an.patch
new file mode 100644
index 0000000..133a24e
--- /dev/null
+++ b/SOURCES/0527-Events-Socket-creation-after-getaddrinfo-and-IPv4-an.patch
@@ -0,0 +1,200 @@
+From 4152c77defac24ace3b1b6b9cc81a4f614254e4f Mon Sep 17 00:00:00 2001
+From: srijan-sivakumar <ssivakum@redhat.com>
+Date: Sat, 18 Jul 2020 05:59:09 +0530
+Subject: [PATCH 527/532] Events: Socket creation after getaddrinfo and IPv4
+ and IPv6 packet capture
+
+Issue: Currently, the socket creation is done
+prior to getaddrinfo function being invoked. This
+can cause mismatch in the protocol and address
+families of the created socket and the result
+of the getaddrinfo api. Also, the glustereventsd
+UDP server by default only captures IPv4 packets
+hence IPv6 packets are not even captured.
+
+Code Changes:
+1. Modified the socket creation in such a way that
+the parameters taken in are dependent upon the
+result of the getaddrinfo function.
+2. Created a subclass for adding address family
+in glustereventsd.py for both AF_INET and AF_INET6.
+3. Modified addresses in the eventsapiconf.py.in
+
+Reasoning behind the approach:
+1. If we are using getaddrinfo function then
+socket creation should happen only after we
+check if we received back valid addresses.
+Hence socket creation should come after the call
+to getaddrinfo
+2. The listening server which pushes the events
+to the webhook has to listen for both IPv4
+and IPv6 messages as we would not be sure as to
+what address family is picked in _gf_event.
+
+>Fixes: #1377
+>Change-Id: I568dcd1a977c8832f0fef981e1f81cac7043c760
+>Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
+Upstream patch: https://review.gluster.org/c/glusterfs/+/24722
+
+BUG: 1814744
+Change-Id: I568dcd1a977c8832f0fef981e1f81cac7043c760
+Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/225567
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
+---
+ events/src/eventsapiconf.py.in |  2 ++
+ events/src/glustereventsd.py   | 37 ++++++++++++++++++++++++++++++-------
+ libglusterfs/src/events.c      | 27 +++++++++++++++++++--------
+ 3 files changed, 51 insertions(+), 15 deletions(-)
+
+diff --git a/events/src/eventsapiconf.py.in b/events/src/eventsapiconf.py.in
+index 76b5954..700093b 100644
+--- a/events/src/eventsapiconf.py.in
++++ b/events/src/eventsapiconf.py.in
+@@ -28,6 +28,8 @@ def get_glusterd_workdir():
+     return glusterd_workdir
+ 
+ SERVER_ADDRESS = "0.0.0.0"
++SERVER_ADDRESSv4 = "0.0.0.0"
++SERVER_ADDRESSv6 = "::1"
+ DEFAULT_CONFIG_FILE = "@SYSCONF_DIR@/glusterfs/eventsconfig.json"
+ CUSTOM_CONFIG_FILE_TO_SYNC = "/events/config.json"
+ CUSTOM_CONFIG_FILE = get_glusterd_workdir() + CUSTOM_CONFIG_FILE_TO_SYNC
+diff --git a/events/src/glustereventsd.py b/events/src/glustereventsd.py
+index c4c7b65..341a3b6 100644
+--- a/events/src/glustereventsd.py
++++ b/events/src/glustereventsd.py
+@@ -13,6 +13,7 @@
+ from __future__ import print_function
+ import sys
+ import signal
++import threading
+ try:
+     import socketserver
+ except ImportError:
+@@ -23,10 +24,17 @@ from argparse import ArgumentParser, RawDescriptionHelpFormatter
+ from eventtypes import all_events
+ import handlers
+ import utils
+-from eventsapiconf import SERVER_ADDRESS, PID_FILE
++from eventsapiconf import SERVER_ADDRESSv4, SERVER_ADDRESSv6, PID_FILE
+ from eventsapiconf import AUTO_BOOL_ATTRIBUTES, AUTO_INT_ATTRIBUTES
+ from utils import logger, PidFile, PidFileLockFailed, boolify
+ 
++# Subclass so that specifically IPv4 packets are captured
++class UDPServerv4(socketserver.ThreadingUDPServer):
++    address_family = socket.AF_INET
++
++# Subclass so that specifically IPv6 packets are captured
++class UDPServerv6(socketserver.ThreadingUDPServer):
++    address_family = socket.AF_INET6
+ 
+ class GlusterEventsRequestHandler(socketserver.BaseRequestHandler):
+ 
+@@ -89,6 +97,10 @@ def signal_handler_sigusr2(sig, frame):
+     utils.restart_webhook_pool()
+ 
+ 
++def UDP_server_thread(sock):
++    sock.serve_forever()
++
++
+ def init_event_server():
+     utils.setup_logger()
+     utils.load_all()
+@@ -99,15 +111,26 @@ def init_event_server():
+         sys.stderr.write("Unable to get Port details from Config\n")
+         sys.exit(1)
+ 
+-    # Start the Eventing Server, UDP Server
++    # Creating the Eventing Server, UDP Server for IPv4 packets
++    try:
++        serverv4 = UDPServerv4((SERVER_ADDRESSv4, port),
++                   GlusterEventsRequestHandler)
++    except socket.error as e:
++        sys.stderr.write("Failed to start Eventsd for IPv4: {0}\n".format(e))
++        sys.exit(1)
++    # Creating the Eventing Server, UDP Server for IPv6 packets
+     try:
+-        server = socketserver.ThreadingUDPServer(
+-            (SERVER_ADDRESS, port),
+-            GlusterEventsRequestHandler)
++        serverv6 = UDPServerv6((SERVER_ADDRESSv6, port),
++                   GlusterEventsRequestHandler)
+     except socket.error as e:
+-        sys.stderr.write("Failed to start Eventsd: {0}\n".format(e))
++        sys.stderr.write("Failed to start Eventsd for IPv6: {0}\n".format(e))
+         sys.exit(1)
+-    server.serve_forever()
++    server_thread1 = threading.Thread(target=UDP_server_thread,
++                     args=(serverv4,))
++    server_thread2 = threading.Thread(target=UDP_server_thread,
++                     args=(serverv6,))
++    server_thread1.start()
++    server_thread2.start()
+ 
+ 
+ def get_args():
+diff --git a/libglusterfs/src/events.c b/libglusterfs/src/events.c
+index 6d1e383..4d720ca 100644
+--- a/libglusterfs/src/events.c
++++ b/libglusterfs/src/events.c
+@@ -40,6 +40,7 @@ _gf_event(eventtypes_t event, const char *fmt, ...)
+     char *host = NULL;
+     struct addrinfo hints;
+     struct addrinfo *result = NULL;
++    struct addrinfo *iter_result_ptr = NULL;
+     xlator_t *this = THIS;
+     char *volfile_server_transport = NULL;
+ 
+@@ -51,13 +52,6 @@ _gf_event(eventtypes_t event, const char *fmt, ...)
+         goto out;
+     }
+ 
+-    /* Initialize UDP socket */
+-    sock = socket(AF_INET, SOCK_DGRAM, 0);
+-    if (sock < 0) {
+-        ret = EVENT_ERROR_SOCKET;
+-        goto out;
+-    }
+-
+     if (ctx) {
+         volfile_server_transport = ctx->cmd_args.volfile_server_transport;
+     }
+@@ -66,7 +60,6 @@ _gf_event(eventtypes_t event, const char *fmt, ...)
+     }
+ 
+     /* host = NULL returns localhost */
+-    host = NULL;
+     if (ctx && ctx->cmd_args.volfile_server &&
+         (strcmp(volfile_server_transport, "unix"))) {
+         /* If it is client code then volfile_server is set
+@@ -84,6 +77,24 @@ _gf_event(eventtypes_t event, const char *fmt, ...)
+         goto out;
+     }
+ 
++    // iterate over the result and break when socket creation is success.
++    for (iter_result_ptr = result; iter_result_ptr != NULL;
++         iter_result_ptr = iter_result_ptr->ai_next) {
++        sock = socket(iter_result_ptr->ai_family, iter_result_ptr->ai_socktype,
++                      iter_result_ptr->ai_protocol);
++        if (sock != -1) {
++            break;
++        }
++    }
++    /*
++     * If none of the addrinfo structures lead to a successful socket
++     * creation, socket creation has failed.
++     */
++    if (sock < 0) {
++        ret = EVENT_ERROR_SOCKET;
++        goto out;
++    }
++
+     va_start(arguments, fmt);
+     ret = gf_vasprintf(&msg, fmt, arguments);
+     va_end(arguments);
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0528-Extras-Removing-xattr_analysis-script.patch b/SOURCES/0528-Extras-Removing-xattr_analysis-script.patch
new file mode 100644
index 0000000..d04068d
--- /dev/null
+++ b/SOURCES/0528-Extras-Removing-xattr_analysis-script.patch
@@ -0,0 +1,134 @@
+From 3fc74ce6c282f0f43fdcfeda47b71a1b19945b6d Mon Sep 17 00:00:00 2001
+From: srijan-sivakumar <ssivakum@redhat.com>
+Date: Wed, 3 Feb 2021 10:11:04 +0530
+Subject: [PATCH 528/532] Extras: Removing xattr_analysis script
+
+The xattr_analysis.py script is used rarely for
+debugging and seeing that it has some dependencies,
+removing it from the release.
+
+If need be, it would be directly shared with the cu.
+
+Label: DOWNSTREAM ONLY
+BUG: 1719171
+
+Change-Id: I4bb0df3ebfa7e43e13858b4b6e3efbb02ea79d5f
+Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/226301
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ extras/quota/Makefile.am       |  4 +--
+ extras/quota/xattr_analysis.py | 73 ------------------------------------------
+ glusterfs.spec.in              |  1 -
+ 3 files changed, 2 insertions(+), 76 deletions(-)
+ delete mode 100755 extras/quota/xattr_analysis.py
+
+diff --git a/extras/quota/Makefile.am b/extras/quota/Makefile.am
+index cdb6be1..e4d9322 100644
+--- a/extras/quota/Makefile.am
++++ b/extras/quota/Makefile.am
+@@ -2,7 +2,7 @@ scriptsdir = $(datadir)/glusterfs/scripts
+ scripts_SCRIPTS =  log_accounting.sh
+ 
+ if WITH_SERVER
+-scripts_SCRIPTS += xattr_analysis.py quota_fsck.py
++scripts_SCRIPTS += quota_fsck.py
+ endif
+ 
+-EXTRA_DIST = log_accounting.sh xattr_analysis.py quota_fsck.py
++EXTRA_DIST = log_accounting.sh quota_fsck.py
+diff --git a/extras/quota/xattr_analysis.py b/extras/quota/xattr_analysis.py
+deleted file mode 100755
+index 7bd7d96..0000000
+--- a/extras/quota/xattr_analysis.py
++++ /dev/null
+@@ -1,73 +0,0 @@
+-#!/usr/bin/python3
+-# Below script has two purposes
+-#  1. Display xattr of entire FS tree in a human readable form
+-#  2. Display all the directory where contri and size mismatch.
+-#      (If there are any directory with contri and size mismatch that are not dirty
+-#       then that highlights a propagation issue)
+-#  The script takes only one input LOG _FILE generated from the command,
+-#  find <brick_path> | xargs  getfattr -d -m. -e hex  > log_gluster_xattr
+-
+-from __future__ import print_function
+-import re
+-import subprocess
+-import sys
+-from hurry.filesize import size
+-
+-if len(sys.argv) < 2:
+-    sys.exit('Usage: %s log_gluster_xattr \n'
+-              'to generate log_gluster_xattr use: \n'
+-              'find <brick_path> | xargs  getfattr -d -m. -e hex  > log_gluster_xattr'
+-              % sys.argv[0])
+-LOG_FILE=sys.argv[1]
+-
+-def get_quota_xattr_brick():
+-    out = subprocess.check_output (["/usr/bin/cat", LOG_FILE])
+-    pairs = out.splitlines()
+-
+-    xdict = {}
+-    mismatch_size = [('====contri_size===', '====size====')]
+-    for xattr in pairs:
+-        k = xattr.split("=")[0]
+-        if re.search("# file:", k):
+-            print(xdict)
+-            filename=k
+-            print("=====" + filename + "=======")
+-            xdict = {}
+-        elif k is "":
+-            pass
+-        else:
+-            print(xattr)
+-            v = xattr.split("=")[1]
+-            if re.search("contri", k):
+-                if len(v) == 34:
+-                    # for files size is obtained in iatt, file count should be 1, dir count=0
+-                    xdict['contri_file_count'] = int(v[18:34], 16)
+-                    xdict['contri_dir_count'] = 0
+-                else:
+-                    xdict['contri_size'] = size(int(v[2:18], 16))
+-                    xdict['contri_file_count'] = int(v[18:34], 16)
+-                    xdict['contri_dir_count'] = int(v[34:], 16)
+-            elif re.search("size", k):
+-                xdict['size'] = size(int(v[2:18], 16))
+-                xdict['file_count'] = int(v[18:34], 16)
+-                xdict['dir_count'] = int(v[34:], 16)
+-            elif re.search("dirty", k):
+-                if v == '0x3000':
+-                    xdict['dirty'] = False
+-                elif v == '0x3100':
+-                    xdict['dirty'] = True
+-            elif re.search("limit_objects", k):
+-                xdict['limit_objects'] = int(v[2:18], 16)
+-            elif re.search("limit_set", k):
+-                xdict['limit_set'] = size(int(v[2:18], 16))
+-
+-            if 'size' in xdict and 'contri_size' in xdict and xdict['size'] != xdict['contri_size']:
+-                mismatch_size.append((xdict['contri_size'], xdict['size'], filename))
+-
+-    for values in mismatch_size:
+-        print(values)
+-
+-
+-if __name__ == '__main__':
+-    get_quota_xattr_brick()
+-
+diff --git a/glusterfs.spec.in b/glusterfs.spec.in
+index 30d7162..2be7677 100644
+--- a/glusterfs.spec.in
++++ b/glusterfs.spec.in
+@@ -1380,7 +1380,6 @@ exit 0
+ %if ( 0%{!?_without_server:1} )
+ %files server
+ %doc extras/clear_xattrs.sh
+-%{_datadir}/glusterfs/scripts/xattr_analysis.py*
+ %{_datadir}/glusterfs/scripts/quota_fsck.py*
+ # sysconf
+ %config(noreplace) %{_sysconfdir}/glusterfs
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0529-geo-rep-prompt-should-work-for-ignore_deletes.patch b/SOURCES/0529-geo-rep-prompt-should-work-for-ignore_deletes.patch
new file mode 100644
index 0000000..671451d
--- /dev/null
+++ b/SOURCES/0529-geo-rep-prompt-should-work-for-ignore_deletes.patch
@@ -0,0 +1,75 @@
+From 1c7e96e73273b7891ea6ef0d768c2bf7ff5de7b0 Mon Sep 17 00:00:00 2001
+From: Shwetha K Acharya <sacharya@redhat.com>
+Date: Thu, 4 Feb 2021 16:29:39 +0530
+Subject: [PATCH 529/532] geo-rep: prompt should work for ignore_deletes
+
+The python cli is intelligent enough to parse both "-" and "_" alike:
+
+Example:
+geo-replication config updated successfully
+sync_job 4
+geo-replication config updated successfully
+gluster volume geo-replication primary 127.0.0.1::secondary config | grep sync_jobs
+sync_jobs:5
+
+Thus the prompt which appears after ignore-deletes true should
+work for both ignore-deletes and ignore_deletes.
+
+Label: DOWNSTREAM ONLY
+
+BUG: 1224906
+Change-Id: I89f854200a604d07d3ac6c374fe6d445ce9f22ca
+Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/226599
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ cli/src/cli-cmd-parser.c       |  5 +++--
+ tests/00-geo-rep/bug-1708603.t | 12 ++++++++++--
+ 2 files changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
+index 34f17c9..dda8979 100644
+--- a/cli/src/cli-cmd-parser.c
++++ b/cli/src/cli-cmd-parser.c
+@@ -3107,8 +3107,9 @@ cli_cmd_gsync_set_parse(struct cli_state *state, const char **words,
+     if (!ret)
+         ret = dict_set_int32(dict, "type", type);
+     if (!ret && type == GF_GSYNC_OPTION_TYPE_CONFIG) {
+-        if (!strcmp((char *)words[wordcount - 2], "ignore-deletes") &&
+-            !strcmp((char *)words[wordcount - 1], "true")) {
++        if ((((!strcmp((char *)words[wordcount - 2], "ignore_deletes")) ||
++              (!strcmp((char *)words[wordcount - 2], "ignore-deletes")))) &&
++            ((!strcmp((char *)words[wordcount - 1], "true")))) {
+             question =
+                 "There exists ~15 seconds delay for the option to take"
+                 " effect from stime of the corresponding brick. Please"
+diff --git a/tests/00-geo-rep/bug-1708603.t b/tests/00-geo-rep/bug-1708603.t
+index 26913f1..edafb48 100644
+--- a/tests/00-geo-rep/bug-1708603.t
++++ b/tests/00-geo-rep/bug-1708603.t
+@@ -44,11 +44,19 @@ TEST glusterfs -s $H0 --volfile-id $GSV0 $M1
+ #Create geo-rep session
+ TEST create_georep_session $master $slave
+ 
+-echo n | $GEOREP_CLI $master $slave config ignore-deletes true >/dev/null 2>&1
+-EXPECT "false" echo $($GEOREP_CLI $master $slave config ignore-deletes)
++echo n | $GEOREP_CLI $master $slave config ignore_deletes true >/dev/null 2>&1
++EXPECT "false" echo $($GEOREP_CLI $master $slave config ignore_deletes)
++
++echo y | $GEOREP_CLI $master $slave config ignore_deletes true
++EXPECT "true" echo $($GEOREP_CLI $master $slave config ignore_deletes)
++
++$GEOREP_CLI $master $slave config ignore_deletes false
+ echo y | $GEOREP_CLI $master $slave config ignore-deletes true
+ EXPECT "true" echo $($GEOREP_CLI $master $slave config ignore-deletes)
+ 
++echo n | $GEOREP_CLI $master $slave config ignore-deletes true >/dev/null 2>&1
++EXPECT "true" echo $($GEOREP_CLI $master $slave config ignore-deletes)
++
+ #Stop Geo-rep
+ TEST $GEOREP_CLI $master $slave stop
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0530-gfapi-avoid-crash-while-logging-message.patch b/SOURCES/0530-gfapi-avoid-crash-while-logging-message.patch
new file mode 100644
index 0000000..aec73b7
--- /dev/null
+++ b/SOURCES/0530-gfapi-avoid-crash-while-logging-message.patch
@@ -0,0 +1,41 @@
+From 5a7348a266587704dae4f1ddda16b7c95f547251 Mon Sep 17 00:00:00 2001
+From: Rinku Kothiya <rkothiya@redhat.com>
+Date: Sun, 7 Feb 2021 13:40:24 +0000
+Subject: [PATCH 530/532] gfapi: avoid crash while logging message.
+
+Breaking parameter into two different parameter
+to avoid a crash.
+
+Upstream:
+> Reviewed-on: https://github.com/gluster/glusterfs/pull/2139
+> fixes: #2138
+> Change-Id: Idd5f3631488c1d892748f83e6847fb6fd2d0802a
+> Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
+
+BUG: 1691320
+
+Change-Id: Ifd6a96982ffd4e5334f8be2297de2ad826f3145b
+Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/226851
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ api/src/glfs-fops.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/api/src/glfs-fops.c b/api/src/glfs-fops.c
+index 051541f..6dc3b66 100644
+--- a/api/src/glfs-fops.c
++++ b/api/src/glfs-fops.c
+@@ -1529,7 +1529,7 @@ glfs_pwritev_common(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
+         ret = -1;
+         errno = EINVAL;
+         gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ARG,
+-                "size >= %llu is not allowed", GF_UNIT_GB, NULL);
++                "Data size too large", "size=%llu", GF_UNIT_GB, NULL);
+         goto out;
+     }
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0531-Glustereventsd-Default-port-change-2091.patch b/SOURCES/0531-Glustereventsd-Default-port-change-2091.patch
new file mode 100644
index 0000000..8c2ecbf
--- /dev/null
+++ b/SOURCES/0531-Glustereventsd-Default-port-change-2091.patch
@@ -0,0 +1,69 @@
+From 058a853a1438b2a62586c545f71150ade3de23b7 Mon Sep 17 00:00:00 2001
+From: schaffung <ssivakum@redhat.com>
+Date: Wed, 10 Feb 2021 13:43:48 +0530
+Subject: [PATCH 531/532] Glustereventsd Default port change (#2091)
+
+Issue : The default port of glustereventsd is currently 24009
+which is preventing glustereventsd from binding to the UDP port
+due to selinux policies.
+
+Fix: Changing the default port to be bound by chanding it to something
+in the ephemeral range.
+
+>Fixes: #2080
+>Change-Id: Ibdc87f83f82f69660dca95d6d14b226e10d8bd33
+>Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
+Upstream Patch : https://github.com/gluster/glusterfs/pull/2091
+
+BUG: 1814744
+Change-Id: Ibdc87f83f82f69660dca95d6d14b226e10d8bd33
+Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/227249
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ events/src/eventsconfig.json   | 2 +-
+ extras/firewalld/glusterfs.xml | 2 +-
+ libglusterfs/src/events.c      | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/events/src/eventsconfig.json b/events/src/eventsconfig.json
+index 89e5b9c..14d8f84 100644
+--- a/events/src/eventsconfig.json
++++ b/events/src/eventsconfig.json
+@@ -1,5 +1,5 @@
+ {
+     "log-level": "INFO",
+-    "port": 24009,
++    "port": 55555,
+     "disable-events-log": false
+ }
+diff --git a/extras/firewalld/glusterfs.xml b/extras/firewalld/glusterfs.xml
+index 7e17644..dc74b2e 100644
+--- a/extras/firewalld/glusterfs.xml
++++ b/extras/firewalld/glusterfs.xml
+@@ -4,7 +4,7 @@
+ <description>Default ports for gluster-distributed storage</description>
+ <port protocol="tcp" port="24007"/>    <!--For glusterd -->
+ <port protocol="tcp" port="24008"/>    <!--For glusterd RDMA port management -->
+-<port protocol="tcp" port="24009"/>    <!--For glustereventsd -->
++<port protocol="tcp" port="55555"/>    <!--For glustereventsd -->
+ <port protocol="tcp" port="38465"/>    <!--Gluster NFS service -->
+ <port protocol="tcp" port="38466"/>    <!--Gluster NFS service -->
+ <port protocol="tcp" port="38467"/>    <!--Gluster NFS service -->
+diff --git a/libglusterfs/src/events.c b/libglusterfs/src/events.c
+index 4d720ca..3659606 100644
+--- a/libglusterfs/src/events.c
++++ b/libglusterfs/src/events.c
+@@ -26,7 +26,7 @@
+ #include "glusterfs/events.h"
+ 
+ #define EVENT_HOST "127.0.0.1"
+-#define EVENT_PORT 24009
++#define EVENT_PORT 55555
+ 
+ int
+ _gf_event(eventtypes_t event, const char *fmt, ...)
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0532-glusterd-fix-for-starting-brick-on-new-port.patch b/SOURCES/0532-glusterd-fix-for-starting-brick-on-new-port.patch
new file mode 100644
index 0000000..97e5aa7
--- /dev/null
+++ b/SOURCES/0532-glusterd-fix-for-starting-brick-on-new-port.patch
@@ -0,0 +1,79 @@
+From 2dad17fdbaab2ab2cda6a05dec9dcd2d37ea32ff Mon Sep 17 00:00:00 2001
+From: Nikhil Ladha <nladha@redhat.com>
+Date: Wed, 10 Feb 2021 15:07:32 +0530
+Subject: [PATCH 532/532] glusterd: fix for starting brick on new port
+
+The Errno set by the runner code was not correct when the bind() fails
+to assign an already occupied port in the __socket_server_bind().
+
+Fix:
+Updated the code to return the correct errno from the
+__socket_server_bind() if the bind() fails due to EADDRINUSE error. And,
+use the returned errno from runner_run() to retry allocating a new port
+to the brick process.
+
+>Fixes: #1101
+
+>Change-Id: If124337f41344a04f050754e402490529ef4ecdc
+>Signed-off-by: nik-redhat nladha@redhat.com
+
+Upstream patch: https://github.com/gluster/glusterfs/pull/2090
+
+BUG: 1865796
+
+Change-Id: If124337f41344a04f050754e402490529ef4ecdc
+Signed-off-by: nik-redhat <nladha@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/227261
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Xavi Hernandez Juan <xhernandez@redhat.com>
+---
+ rpc/rpc-transport/socket/src/socket.c      | 3 +++
+ xlators/mgmt/glusterd/src/glusterd-utils.c | 6 ++----
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/rpc/rpc-transport/socket/src/socket.c b/rpc/rpc-transport/socket/src/socket.c
+index 1ee7320..96ed9f1 100644
+--- a/rpc/rpc-transport/socket/src/socket.c
++++ b/rpc/rpc-transport/socket/src/socket.c
+@@ -973,8 +973,11 @@ __socket_server_bind(rpc_transport_t *this)
+                this->myinfo.identifier, strerror(errno));
+         if (errno == EADDRINUSE) {
+             gf_log(this->name, GF_LOG_ERROR, "Port is already in use");
++            ret = -EADDRINUSE;
++            goto out;
+         }
+     }
++
+     if (AF_UNIX != SA(&this->myinfo.sockaddr)->sa_family) {
+         if (getsockname(priv->sock, SA(&this->myinfo.sockaddr),
+                         &this->myinfo.sockaddr_len) != 0) {
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index cf32bd9..bc188a2 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -2151,6 +2151,7 @@ glusterd_volume_start_glusterfs(glusterd_volinfo_t *volinfo,
+         ret = -1;
+         goto out;
+     }
++
+     /* Build the exp_path, before starting the glusterfsd even in
+        valgrind mode. Otherwise all the glusterfsd processes start
+        writing the valgrind log to the same file.
+@@ -2289,13 +2290,10 @@ retry:
+ 
+     if (wait) {
+         synclock_unlock(&priv->big_lock);
+-        errno = 0;
+         ret = runner_run(&runner);
+-        if (errno != 0)
+-            ret = errno;
+         synclock_lock(&priv->big_lock);
+ 
+-        if (ret == EADDRINUSE) {
++        if (ret == -EADDRINUSE) {
+             /* retry after getting a new port */
+             gf_msg(this->name, GF_LOG_WARNING, -ret,
+                    GD_MSG_SRC_BRICK_PORT_UNAVAIL,
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0533-glusterd-Rebalance-cli-is-not-showing-correct-status.patch b/SOURCES/0533-glusterd-Rebalance-cli-is-not-showing-correct-status.patch
new file mode 100644
index 0000000..158b4b7
--- /dev/null
+++ b/SOURCES/0533-glusterd-Rebalance-cli-is-not-showing-correct-status.patch
@@ -0,0 +1,250 @@
+From 854ab79dbef449c39adf66e3faebb4681359fce4 Mon Sep 17 00:00:00 2001
+From: mohit84 <moagrawa@redhat.com>
+Date: Thu, 18 Feb 2021 09:40:44 +0530
+Subject: [PATCH 533/538] glusterd: Rebalance cli is not showing correct status
+ after reboot (#2172)
+
+Rebalance cli is not showing correct status after reboot.
+
+The CLI is not correct status because defrag object is not
+valid at the time of creating a rpc connection to show the status.
+The defrag object is not valid because at the time of start a glusterd
+glusterd_restart_rebalance can be call almost at the same time by two
+different synctask and glusterd got a disconnect on rpc object and it
+cleanup the defrag object.
+
+Solution: To avoid the defrag object populate a reference count before
+          create a defrag rpc object.
+>Fixes: #1339
+>Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+>Change-Id: Ia284015d79beaa3d703ebabb92f26870a5aaafba
+Upstream Patch : https://github.com/gluster/glusterfs/pull/2172
+
+BUG: 1832306
+Change-Id: Ia284015d79beaa3d703ebabb92f26870a5aaafba
+Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/228249
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mgmt/glusterd/src/glusterd-rebalance.c | 35 ++++++++++-----
+ xlators/mgmt/glusterd/src/glusterd-syncop.c    |  1 +
+ xlators/mgmt/glusterd/src/glusterd-utils.c     | 59 +++++++++++++++++++++++++-
+ xlators/mgmt/glusterd/src/glusterd-utils.h     |  5 +++
+ xlators/mgmt/glusterd/src/glusterd.h           |  1 +
+ 5 files changed, 90 insertions(+), 11 deletions(-)
+
+diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
+index b419a89..fcd5318 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c
++++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
+@@ -86,6 +86,7 @@ __glusterd_defrag_notify(struct rpc_clnt *rpc, void *mydata,
+     glusterd_conf_t *priv = NULL;
+     xlator_t *this = NULL;
+     int pid = -1;
++    int refcnt = 0;
+ 
+     this = THIS;
+     if (!this)
+@@ -125,11 +126,12 @@ __glusterd_defrag_notify(struct rpc_clnt *rpc, void *mydata,
+         }
+ 
+         case RPC_CLNT_DISCONNECT: {
+-            if (!defrag->connected)
+-                return 0;
+-
+             LOCK(&defrag->lock);
+             {
++                if (!defrag->connected) {
++                    UNLOCK(&defrag->lock);
++                    return 0;
++                }
+                 defrag->connected = 0;
+             }
+             UNLOCK(&defrag->lock);
+@@ -146,11 +148,11 @@ __glusterd_defrag_notify(struct rpc_clnt *rpc, void *mydata,
+             glusterd_defrag_rpc_put(defrag);
+             if (defrag->cbk_fn)
+                 defrag->cbk_fn(volinfo, volinfo->rebal.defrag_status);
+-
+-            GF_FREE(defrag);
++            refcnt = glusterd_defrag_unref(defrag);
+             gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_REBALANCE_DISCONNECTED,
+-                   "Rebalance process for volume %s has disconnected.",
+-                   volinfo->volname);
++                   "Rebalance process for volume %s has disconnected"
++                   " and defrag refcnt is %d.",
++                   volinfo->volname, refcnt);
+             break;
+         }
+         case RPC_CLNT_DESTROY:
+@@ -309,7 +311,11 @@ glusterd_handle_defrag_start(glusterd_volinfo_t *volinfo, char *op_errstr,
+         gf_msg_debug("glusterd", 0, "rebalance command failed");
+         goto out;
+     }
+-
++    /* Take reference before sleep to save defrag object cleanup while
++       glusterd_restart_rebalance call for other bricks by syncktask
++       at the time of restart a glusterd.
++    */
++    glusterd_defrag_ref(defrag);
+     sleep(5);
+ 
+     ret = glusterd_rebalance_rpc_create(volinfo);
+@@ -372,6 +378,7 @@ glusterd_rebalance_rpc_create(glusterd_volinfo_t *volinfo)
+     GF_ASSERT(this);
+     priv = this->private;
+     GF_ASSERT(priv);
++    struct rpc_clnt *rpc = NULL;
+ 
+     // rebalance process is not started
+     if (!defrag)
+@@ -396,13 +403,21 @@ glusterd_rebalance_rpc_create(glusterd_volinfo_t *volinfo)
+     }
+ 
+     glusterd_volinfo_ref(volinfo);
+-    ret = glusterd_rpc_create(&defrag->rpc, options, glusterd_defrag_notify,
+-                              volinfo, _gf_true);
++    ret = glusterd_rpc_create(&rpc, options, glusterd_defrag_notify, volinfo,
++                              _gf_false);
+     if (ret) {
+         gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_RPC_CREATE_FAIL,
+                "Glusterd RPC creation failed");
+         goto out;
+     }
++    LOCK(&defrag->lock);
++    {
++        if (!defrag->rpc)
++            defrag->rpc = rpc;
++        else
++            rpc_clnt_unref(rpc);
++    }
++    UNLOCK(&defrag->lock);
+     ret = 0;
+ out:
+     if (options)
+diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
+index df78fef..05c9e11 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
++++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
+@@ -1732,6 +1732,7 @@ gd_brick_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
+         if (!rpc) {
+             if (pending_node->type == GD_NODE_REBALANCE && pending_node->node) {
+                 volinfo = pending_node->node;
++                glusterd_defrag_ref(volinfo->rebal.defrag);
+                 ret = glusterd_rebalance_rpc_create(volinfo);
+                 if (ret) {
+                     ret = 0;
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index bc188a2..9fb8eab 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -93,6 +93,44 @@
+ #define NLMV4_VERSION 4
+ #define NLMV1_VERSION 1
+ 
++int
++glusterd_defrag_ref(glusterd_defrag_info_t *defrag)
++{
++    int refcnt = 0;
++
++    if (!defrag)
++        goto out;
++
++    LOCK(&defrag->lock);
++    {
++        refcnt = ++defrag->refcnt;
++    }
++    UNLOCK(&defrag->lock);
++
++out:
++    return refcnt;
++}
++
++int
++glusterd_defrag_unref(glusterd_defrag_info_t *defrag)
++{
++    int refcnt = -1;
++
++    if (!defrag)
++        goto out;
++
++    LOCK(&defrag->lock);
++    {
++        refcnt = --defrag->refcnt;
++        if (refcnt <= 0)
++            GF_FREE(defrag);
++    }
++    UNLOCK(&defrag->lock);
++
++out:
++    return refcnt;
++}
++
+ gf_boolean_t
+ is_brick_mx_enabled(void)
+ {
+@@ -9370,6 +9408,7 @@ glusterd_volume_defrag_restart(glusterd_volinfo_t *volinfo, char *op_errstr,
+     char pidfile[PATH_MAX] = "";
+     int ret = -1;
+     pid_t pid = 0;
++    int refcnt = 0;
+ 
+     this = THIS;
+     GF_ASSERT(this);
+@@ -9410,7 +9449,25 @@ glusterd_volume_defrag_restart(glusterd_volinfo_t *volinfo, char *op_errstr,
+                              volinfo->volname);
+                     goto out;
+                 }
+-                ret = glusterd_rebalance_rpc_create(volinfo);
++                refcnt = glusterd_defrag_ref(volinfo->rebal.defrag);
++                /* If refcnt value is 1 it means either defrag object is
++                   poulated by glusterd_rebalance_defrag_init or previous
++                   rpc creation was failed.If it is not 1 it means it(defrag)
++                   was populated at the time of start a rebalance daemon.
++                   We need to create a rpc object only while a previous
++                   rpc connection was not established successfully at the
++                   time of restart a rebalance daemon by
++                   glusterd_handle_defrag_start otherwise rebalance cli
++                   does not show correct status after just reboot a node and try
++                   to print the rebalance status because defrag object has been
++                   destroyed during handling of rpc disconnect.
++                */
++                if (refcnt == 1) {
++                    ret = glusterd_rebalance_rpc_create(volinfo);
++                } else {
++                    ret = 0;
++                    glusterd_defrag_unref(volinfo->rebal.defrag);
++                }
+                 break;
+             }
+         case GF_DEFRAG_STATUS_NOT_STARTED:
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
+index 02d85d2..4541471 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
+@@ -886,4 +886,9 @@ int32_t
+ glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type,
+                            int32_t sub_count);
+ 
++int
++glusterd_defrag_ref(glusterd_defrag_info_t *defrag);
++
++int
++glusterd_defrag_unref(glusterd_defrag_info_t *defrag);
+ #endif
+diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
+index efe4d0e..9de3f28 100644
+--- a/xlators/mgmt/glusterd/src/glusterd.h
++++ b/xlators/mgmt/glusterd/src/glusterd.h
+@@ -321,6 +321,7 @@ struct glusterd_defrag_info_ {
+     uint64_t total_data;
+     uint64_t num_files_lookedup;
+     uint64_t total_failures;
++    int refcnt;
+     gf_lock_t lock;
+     int cmd;
+     pthread_t th;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0534-glusterd-Resolve-use-after-free-bug-2181.patch b/SOURCES/0534-glusterd-Resolve-use-after-free-bug-2181.patch
new file mode 100644
index 0000000..2dc72c1
--- /dev/null
+++ b/SOURCES/0534-glusterd-Resolve-use-after-free-bug-2181.patch
@@ -0,0 +1,47 @@
+From b3647eb5415b2e3d9e1a11ad6c4689e520f17b39 Mon Sep 17 00:00:00 2001
+From: mohit84 <moagrawa@redhat.com>
+Date: Mon, 22 Feb 2021 10:09:34 +0530
+Subject: [PATCH 534/538] glusterd: Resolve use after free bug (#2181)
+
+In the commit 61ae58e67567ea4de8f8efc6b70a9b1f8e0f1bea
+introduced a coverity bug use object after cleanup
+the object.
+
+Cleanup memory after comeout from a critical section
+>Fixes: #2180
+
+>Change-Id: Iee2050c4883a0dd44b8523bb822b664462ab6041
+>Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+Upstream Patch : https://github.com/gluster/glusterfs/pull/2181
+
+BUG: 1832306
+Change-Id: Iee2050c4883a0dd44b8523bb822b664462ab6041
+Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/228578
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mgmt/glusterd/src/glusterd-utils.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
+index 9fb8eab..6d40be5 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
+@@ -122,11 +122,10 @@ glusterd_defrag_unref(glusterd_defrag_info_t *defrag)
+     LOCK(&defrag->lock);
+     {
+         refcnt = --defrag->refcnt;
+-        if (refcnt <= 0)
+-            GF_FREE(defrag);
+     }
+     UNLOCK(&defrag->lock);
+-
++    if (refcnt <= 0)
++        GF_FREE(defrag);
+ out:
+     return refcnt;
+ }
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0535-multiple-files-use-dict_allocate_and_serialize-where.patch b/SOURCES/0535-multiple-files-use-dict_allocate_and_serialize-where.patch
new file mode 100644
index 0000000..e1622de
--- /dev/null
+++ b/SOURCES/0535-multiple-files-use-dict_allocate_and_serialize-where.patch
@@ -0,0 +1,270 @@
+From 775d500cd136bd8c940faaeffde1217c25a87e3d Mon Sep 17 00:00:00 2001
+From: Yaniv Kaul <ykaul@redhat.com>
+Date: Sun, 2 Jun 2019 21:14:18 +0300
+Subject: [PATCH 535/538] (multiple files) use dict_allocate_and_serialize()
+ where applicable.
+
+This function does length, allocation and serialization for you.
+
+Upstream patch:
+> Upstream-patch-link: https://review.gluster.org/#/c/glusterfs/+/22800
+> Change-Id: I142a259952a2fe83dd719442afaefe4a43a8e55e
+> updates: bz#1193929
+> Signed-off-by: Yaniv Kaul <ykaul@redhat.com>
+
+Change-Id: I142a259952a2fe83dd719442afaefe4a43a8e55e
+BUG: 1911292
+Signed-off-by: Yaniv Kaul <ykaul@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/228611
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/cluster/afr/src/afr-inode-read.c       | 34 +++++---------------------
+ xlators/cluster/ec/src/ec-combine.c            | 16 +++---------
+ xlators/features/locks/src/posix.c             | 23 +++--------------
+ xlators/protocol/client/src/client-handshake.c | 14 +++--------
+ xlators/protocol/server/src/server-handshake.c | 24 +++++++-----------
+ xlators/protocol/server/src/server-helpers.c   | 27 +++-----------------
+ 6 files changed, 28 insertions(+), 110 deletions(-)
+
+diff --git a/xlators/cluster/afr/src/afr-inode-read.c b/xlators/cluster/afr/src/afr-inode-read.c
+index 523a5b4..cf305af 100644
+--- a/xlators/cluster/afr/src/afr-inode-read.c
++++ b/xlators/cluster/afr/src/afr-inode-read.c
+@@ -948,24 +948,13 @@ unlock:
+             goto unwind;
+         }
+ 
+-        len = dict_serialized_length(local->dict);
+-        if (len <= 0) {
+-            goto unwind;
+-        }
+-
+-        lockinfo_buf = GF_CALLOC(1, len, gf_common_mt_char);
+-        if (!lockinfo_buf) {
++        op_ret = dict_allocate_and_serialize(
++            local->dict, (char **)&lockinfo_buf, (unsigned int *)&len);
++        if (op_ret != 0) {
+             local->op_ret = -1;
+-            local->op_errno = ENOMEM;
+             goto unwind;
+         }
+ 
+-        op_ret = dict_serialize(local->dict, lockinfo_buf);
+-        if (op_ret < 0) {
+-            local->op_ret = -1;
+-            local->op_errno = -op_ret;
+-        }
+-
+         op_ret = dict_set_dynptr(newdict, GF_XATTR_LOCKINFO_KEY,
+                                  (void *)lockinfo_buf, len);
+         if (op_ret < 0) {
+@@ -1064,24 +1053,13 @@ unlock:
+             goto unwind;
+         }
+ 
+-        len = dict_serialized_length(local->dict);
+-        if (len <= 0) {
+-            goto unwind;
+-        }
+-
+-        lockinfo_buf = GF_CALLOC(1, len, gf_common_mt_char);
+-        if (!lockinfo_buf) {
++        op_ret = dict_allocate_and_serialize(
++            local->dict, (char **)&lockinfo_buf, (unsigned int *)&len);
++        if (op_ret != 0) {
+             local->op_ret = -1;
+-            local->op_errno = ENOMEM;
+             goto unwind;
+         }
+ 
+-        op_ret = dict_serialize(local->dict, lockinfo_buf);
+-        if (op_ret < 0) {
+-            local->op_ret = -1;
+-            local->op_errno = -op_ret;
+-        }
+-
+         op_ret = dict_set_dynptr(newdict, GF_XATTR_LOCKINFO_KEY,
+                                  (void *)lockinfo_buf, len);
+         if (op_ret < 0) {
+diff --git a/xlators/cluster/ec/src/ec-combine.c b/xlators/cluster/ec/src/ec-combine.c
+index 99e5534..9d712b3 100644
+--- a/xlators/cluster/ec/src/ec-combine.c
++++ b/xlators/cluster/ec/src/ec-combine.c
+@@ -486,22 +486,12 @@ ec_dict_data_merge(ec_cbk_data_t *cbk, int32_t which, char *key)
+ 
+     tmp = NULL;
+ 
+-    len = dict_serialized_length(lockinfo);
+-    if (len < 0) {
+-        err = len;
+-
+-        goto out;
+-    }
+-    ptr = GF_MALLOC(len, gf_common_mt_char);
+-    if (ptr == NULL) {
+-        err = -ENOMEM;
+-
+-        goto out;
+-    }
+-    err = dict_serialize(lockinfo, ptr);
++    err = dict_allocate_and_serialize(lockinfo, (char **)&ptr,
++                                      (unsigned int *)&len);
+     if (err != 0) {
+         goto out;
+     }
++
+     dict = (which == EC_COMBINE_XDATA) ? cbk->xdata : cbk->dict;
+     err = dict_set_dynptr(dict, key, ptr, len);
+     if (err != 0) {
+diff --git a/xlators/features/locks/src/posix.c b/xlators/features/locks/src/posix.c
+index 5ae0125..cdd1ff7 100644
+--- a/xlators/features/locks/src/posix.c
++++ b/xlators/features/locks/src/posix.c
+@@ -1547,8 +1547,9 @@ pl_fgetxattr_handle_lockinfo(xlator_t *this, fd_t *fd, dict_t *dict,
+         goto out;
+     }
+ 
+-    len = dict_serialized_length(tmp);
+-    if (len < 0) {
++    op_ret = dict_allocate_and_serialize(tmp, (char **)&buf,
++                                         (unsigned int *)&len);
++    if (op_ret != 0) {
+         *op_errno = -op_ret;
+         op_ret = -1;
+         gf_log(this->name, GF_LOG_WARNING,
+@@ -1558,24 +1559,6 @@ pl_fgetxattr_handle_lockinfo(xlator_t *this, fd_t *fd, dict_t *dict,
+         goto out;
+     }
+ 
+-    buf = GF_CALLOC(1, len, gf_common_mt_char);
+-    if (buf == NULL) {
+-        op_ret = -1;
+-        *op_errno = ENOMEM;
+-        goto out;
+-    }
+-
+-    op_ret = dict_serialize(tmp, buf);
+-    if (op_ret < 0) {
+-        *op_errno = -op_ret;
+-        op_ret = -1;
+-        gf_log(this->name, GF_LOG_WARNING,
+-               "dict_serialize failed (%s) while handling lockinfo "
+-               "for fd (ptr: %p inode-gfid:%s)",
+-               strerror(*op_errno), fd, uuid_utoa(fd->inode->gfid));
+-        goto out;
+-    }
+-
+     op_ret = dict_set_dynptr(dict, GF_XATTR_LOCKINFO_KEY, buf, len);
+     if (op_ret < 0) {
+         *op_errno = -op_ret;
+diff --git a/xlators/protocol/client/src/client-handshake.c b/xlators/protocol/client/src/client-handshake.c
+index 0002361..6b20d92 100644
+--- a/xlators/protocol/client/src/client-handshake.c
++++ b/xlators/protocol/client/src/client-handshake.c
+@@ -1286,18 +1286,10 @@ client_setvolume(xlator_t *this, struct rpc_clnt *rpc)
+                "Failed to set client opversion in handshake message");
+     }
+ 
+-    ret = dict_serialized_length(options);
+-    if (ret < 0) {
+-        gf_msg(this->name, GF_LOG_ERROR, 0, PC_MSG_DICT_ERROR,
+-               "failed to get serialized length of dict");
++    ret = dict_allocate_and_serialize(options, (char **)&req.dict.dict_val,
++                                      &req.dict.dict_len);
++    if (ret != 0) {
+         ret = -1;
+-        goto fail;
+-    }
+-    req.dict.dict_len = ret;
+-    req.dict.dict_val = GF_CALLOC(1, req.dict.dict_len,
+-                                  gf_client_mt_clnt_req_buf_t);
+-    ret = dict_serialize(options, req.dict.dict_val);
+-    if (ret < 0) {
+         gf_msg(this->name, GF_LOG_ERROR, 0, PC_MSG_DICT_SERIALIZE_FAIL,
+                "failed to serialize "
+                "dictionary");
+diff --git a/xlators/protocol/server/src/server-handshake.c b/xlators/protocol/server/src/server-handshake.c
+index eeca73c..54dc030 100644
+--- a/xlators/protocol/server/src/server-handshake.c
++++ b/xlators/protocol/server/src/server-handshake.c
+@@ -676,22 +676,16 @@ fail:
+     GF_ASSERT(rsp);
+ 
+     rsp->op_ret = 0;
+-    ret = dict_serialized_length(reply);
+-    if (ret > 0) {
+-        rsp->dict.dict_len = ret;
+-        rsp->dict.dict_val = GF_CALLOC(1, rsp->dict.dict_len,
+-                                       gf_server_mt_rsp_buf_t);
+-        if (rsp->dict.dict_val) {
+-            ret = dict_serialize(reply, rsp->dict.dict_val);
+-            if (ret < 0) {
+-                gf_msg_debug("server-handshake", 0,
+-                             "failed "
+-                             "to serialize reply dict");
+-                op_ret = -1;
+-                op_errno = -ret;
+-            }
+-        }
++
++    ret = dict_allocate_and_serialize(reply, (char **)&rsp->dict.dict_val,
++                                      &rsp->dict.dict_len);
++    if (ret != 0) {
++        ret = -1;
++        gf_msg_debug("server-handshake", 0, "failed to serialize reply dict");
++        op_ret = -1;
++        op_errno = -ret;
+     }
++
+     rsp->op_ret = op_ret;
+     rsp->op_errno = gf_errno_to_error(op_errno);
+ 
+diff --git a/xlators/protocol/server/src/server-helpers.c b/xlators/protocol/server/src/server-helpers.c
+index e74a24d..33959b5 100644
+--- a/xlators/protocol/server/src/server-helpers.c
++++ b/xlators/protocol/server/src/server-helpers.c
+@@ -902,7 +902,6 @@ serialize_rsp_direntp(gf_dirent_t *entries, gfs3_readdirp_rsp *rsp)
+     gfs3_dirplist *trav = NULL;
+     gfs3_dirplist *prev = NULL;
+     int ret = -1;
+-    int temp = 0;
+ 
+     GF_VALIDATE_OR_GOTO("server", entries, out);
+     GF_VALIDATE_OR_GOTO("server", rsp, out);
+@@ -923,28 +922,10 @@ serialize_rsp_direntp(gf_dirent_t *entries, gfs3_readdirp_rsp *rsp)
+ 
+         /* if 'dict' is present, pack it */
+         if (entry->dict) {
+-            temp = dict_serialized_length(entry->dict);
+-
+-            if (temp < 0) {
+-                gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, PS_MSG_INVALID_ENTRY,
+-                       "failed to get "
+-                       "serialized length of reply dict");
+-                errno = EINVAL;
+-                trav->dict.dict_len = 0;
+-                goto out;
+-            }
+-            trav->dict.dict_len = temp;
+-
+-            trav->dict.dict_val = GF_CALLOC(1, trav->dict.dict_len,
+-                                            gf_server_mt_rsp_buf_t);
+-            if (!trav->dict.dict_val) {
+-                errno = ENOMEM;
+-                trav->dict.dict_len = 0;
+-                goto out;
+-            }
+-
+-            ret = dict_serialize(entry->dict, trav->dict.dict_val);
+-            if (ret < 0) {
++            ret = dict_allocate_and_serialize(entry->dict,
++                                              (char **)&trav->dict.dict_val,
++                                              &trav->dict.dict_len);
++            if (ret != 0) {
+                 gf_msg(THIS->name, GF_LOG_ERROR, 0, PS_MSG_DICT_SERIALIZE_FAIL,
+                        "failed to serialize reply dict");
+                 errno = -ret;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0536-dht-Ongoing-IO-is-failed-during-volume-shrink-operat.patch b/SOURCES/0536-dht-Ongoing-IO-is-failed-during-volume-shrink-operat.patch
new file mode 100644
index 0000000..94e0b64
--- /dev/null
+++ b/SOURCES/0536-dht-Ongoing-IO-is-failed-during-volume-shrink-operat.patch
@@ -0,0 +1,102 @@
+From 32281b4b5cf79d0ef6f0c65775bb81093e1ba479 Mon Sep 17 00:00:00 2001
+From: Mohit Agrawal <moagrawa@redhat.com>
+Date: Wed, 24 Feb 2021 18:44:12 +0530
+Subject: [PATCH 536/538] dht: Ongoing IO is failed during volume shrink
+ operation (#2188)
+
+In the commit (c878174) we have introduced a check
+to avoid stale layout issue.To avoid a stale layout
+issue dht has set a key along with layout at the time
+of wind a create fop and posix validates the parent
+layout based on the key value. If layout does not match
+it throw and error.In case of volume shrink layout has
+been changed by reabalance daemon and if layout does not
+matches dht is not able to wind a create fop successfully.
+
+Solution: To avoid the issue populate a key only while
+          dht has wind a fop first time. After got an
+          error in 2nd attempt dht takes a lock and then
+          reattempt to wind a fop again.
+
+> Fixes: #2187
+> Change-Id: Ie018386e7823a11eea415496bb226ca032453a55
+> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+> (Cherry pick from commit da6ce622b722f7d12619c5860293faf03f7cd00c
+> Reviewed on upstream link https://github.com/gluster/glusterfs/pull/2188
+
+Bug: 1924044
+Change-Id: I7670dbe2d562b83db0af3753f994653ffdd49591
+Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/228941
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/cluster/dht/src/dht-common.c | 41 ++++++++++++++++++++++++++----------
+ 1 file changed, 30 insertions(+), 11 deletions(-)
+
+diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c
+index fe1d0ee..7425c1a 100644
+--- a/xlators/cluster/dht/src/dht-common.c
++++ b/xlators/cluster/dht/src/dht-common.c
+@@ -8526,15 +8526,32 @@ dht_create_wind_to_avail_subvol(call_frame_t *frame, xlator_t *this,
+ {
+     dht_local_t *local = NULL;
+     xlator_t *avail_subvol = NULL;
++    int lk_count = 0;
+ 
+     local = frame->local;
+ 
+     if (!dht_is_subvol_filled(this, subvol)) {
+-        gf_msg_debug(this->name, 0, "creating %s on %s", loc->path,
+-                     subvol->name);
+-
+-        dht_set_parent_layout_in_dict(loc, this, local);
+-
++        lk_count = local->lock[0].layout.parent_layout.lk_count;
++        gf_msg_debug(this->name, 0, "creating %s on %s with lock_count %d",
++                     loc->path, subvol->name, lk_count);
++        /*The function dht_set_parent_layout_in_dict sets the layout
++          in dictionary and posix_create validates a layout before
++          creating a file.In case if parent layout does not match
++          with disk layout posix xlator throw an error but in case
++          if volume is shrunk layout has been changed by rebalance daemon
++          so we need to call this function only while a function is calling
++          without taking any lock otherwise we would not able to populate a
++          layout on disk in case if layout has changed.
++        */
++        if (!lk_count) {
++            dht_set_parent_layout_in_dict(loc, this, local);
++        } else {
++            /* Delete a key to avoid layout validate if it was set by
++               previous STACK_WIND attempt when a lock was not taken
++               by dht_create
++            */
++            (void)dict_del_sizen(local->params, GF_PREOP_PARENT_KEY);
++        }
+         STACK_WIND_COOKIE(frame, dht_create_cbk, subvol, subvol,
+                           subvol->fops->create, loc, flags, mode, umask, fd,
+                           params);
+@@ -8554,12 +8571,14 @@ dht_create_wind_to_avail_subvol(call_frame_t *frame, xlator_t *this,
+ 
+             goto out;
+         }
+-
+-        gf_msg_debug(this->name, 0, "creating %s on %s", loc->path,
+-                     subvol->name);
+-
+-        dht_set_parent_layout_in_dict(loc, this, local);
+-
++        lk_count = local->lock[0].layout.parent_layout.lk_count;
++        gf_msg_debug(this->name, 0, "creating %s on %s with lk_count %d",
++                     loc->path, subvol->name, lk_count);
++        if (!lk_count) {
++            dht_set_parent_layout_in_dict(loc, this, local);
++        } else {
++            (void)dict_del_sizen(local->params, GF_PREOP_PARENT_KEY);
++        }
+         STACK_WIND_COOKIE(frame, dht_create_cbk, subvol, subvol,
+                           subvol->fops->create, loc, flags, mode, umask, fd,
+                           params);
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0537-cluster-afr-Fix-race-in-lockinfo-f-getxattr.patch b/SOURCES/0537-cluster-afr-Fix-race-in-lockinfo-f-getxattr.patch
new file mode 100644
index 0000000..dcf0940
--- /dev/null
+++ b/SOURCES/0537-cluster-afr-Fix-race-in-lockinfo-f-getxattr.patch
@@ -0,0 +1,387 @@
+From 7b7ec67680415c22773ebb2a5daacf298b6b1e06 Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Sat, 13 Feb 2021 18:37:32 +0100
+Subject: [PATCH 537/538] cluster/afr: Fix race in lockinfo (f)getxattr
+
+A shared dictionary was updated outside the lock after having updated
+the number of remaining answers. This means that one thread may be
+processing the last answer and unwinding the request before another
+thread completes updating the dict.
+
+    Thread 1                           Thread 2
+
+    LOCK()
+    call_cnt-- (=1)
+    UNLOCK()
+                                       LOCK()
+                                       call_cnt-- (=0)
+                                       UNLOCK()
+                                       update_dict(dict)
+                                       if (call_cnt == 0) {
+                                           STACK_UNWIND(dict);
+                                       }
+    update_dict(dict)
+    if (call_cnt == 0) {
+        STACK_UNWIND(dict);
+    }
+
+The updates from thread 1 are lost.
+
+This patch also reduces the work done inside the locked region and
+reduces code duplication.
+
+Upstream-patch:
+> Upstream-patch-link: https://github.com/gluster/glusterfs/pull/2162
+> Fixes: #2161
+> Change-Id: Idc0d34ab19ea6031de0641f7b05c624d90fac8fa
+> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+
+BUG: 1911292
+Change-Id: Idc0d34ab19ea6031de0641f7b05c624d90fac8fa
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/228924
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/cluster/afr/src/afr-inode-read.c | 254 ++++++++++++++-----------------
+ 1 file changed, 112 insertions(+), 142 deletions(-)
+
+diff --git a/xlators/cluster/afr/src/afr-inode-read.c b/xlators/cluster/afr/src/afr-inode-read.c
+index cf305af..98e195a 100644
+--- a/xlators/cluster/afr/src/afr-inode-read.c
++++ b/xlators/cluster/afr/src/afr-inode-read.c
+@@ -15,6 +15,8 @@
+ #include <stdlib.h>
+ #include <signal.h>
+ 
++#include <urcu/uatomic.h>
++
+ #include <glusterfs/glusterfs.h>
+ #include "afr.h"
+ #include <glusterfs/dict.h>
+@@ -868,188 +870,121 @@ afr_getxattr_quota_size_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+     return 0;
+ }
+ 
+-int32_t
+-afr_getxattr_lockinfo_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+-                          int32_t op_ret, int32_t op_errno, dict_t *dict,
+-                          dict_t *xdata)
++static int32_t
++afr_update_local_dicts(call_frame_t *frame, dict_t *dict, dict_t *xdata)
+ {
+-    int call_cnt = 0, len = 0;
+-    char *lockinfo_buf = NULL;
+-    dict_t *lockinfo = NULL, *newdict = NULL;
+-    afr_local_t *local = NULL;
++    afr_local_t *local;
++    dict_t *local_dict;
++    dict_t *local_xdata;
++    int32_t ret;
+ 
+-    LOCK(&frame->lock);
+-    {
+-        local = frame->local;
++    local = frame->local;
++    local_dict = NULL;
++    local_xdata = NULL;
+ 
+-        call_cnt = --local->call_count;
++    ret = -ENOMEM;
+ 
+-        if ((op_ret < 0) || (!dict && !xdata)) {
+-            goto unlock;
+-        }
+-
+-        if (xdata) {
+-            if (!local->xdata_rsp) {
+-                local->xdata_rsp = dict_new();
+-                if (!local->xdata_rsp) {
+-                    local->op_ret = -1;
+-                    local->op_errno = ENOMEM;
+-                    goto unlock;
+-                }
+-            }
++    if ((dict != NULL) && (local->dict == NULL)) {
++        local_dict = dict_new();
++        if (local_dict == NULL) {
++            goto done;
+         }
++    }
+ 
+-        if (!dict) {
+-            goto unlock;
++    if ((xdata != NULL) && (local->xdata_rsp == NULL)) {
++        local_xdata = dict_new();
++        if (local_xdata == NULL) {
++            goto done;
+         }
++    }
+ 
+-        op_ret = dict_get_ptr_and_len(dict, GF_XATTR_LOCKINFO_KEY,
+-                                      (void **)&lockinfo_buf, &len);
++    if ((local_dict != NULL) || (local_xdata != NULL)) {
++        /* TODO: Maybe it would be better to preallocate both dicts before
++         *       sending the requests. This way we don't need to use a LOCK()
++         *       here. */
++        LOCK(&frame->lock);
+ 
+-        if (!lockinfo_buf) {
+-            goto unlock;
++        if ((local_dict != NULL) && (local->dict == NULL)) {
++            local->dict = local_dict;
++            local_dict = NULL;
+         }
+ 
+-        if (!local->dict) {
+-            local->dict = dict_new();
+-            if (!local->dict) {
+-                local->op_ret = -1;
+-                local->op_errno = ENOMEM;
+-                goto unlock;
+-            }
++        if ((local_xdata != NULL) && (local->xdata_rsp == NULL)) {
++            local->xdata_rsp = local_xdata;
++            local_xdata = NULL;
+         }
+-    }
+-unlock:
+-    UNLOCK(&frame->lock);
+ 
+-    if (lockinfo_buf != NULL) {
+-        lockinfo = dict_new();
+-        if (lockinfo == NULL) {
+-            local->op_ret = -1;
+-            local->op_errno = ENOMEM;
+-        } else {
+-            op_ret = dict_unserialize(lockinfo_buf, len, &lockinfo);
+-
+-            if (lockinfo && local->dict) {
+-                dict_copy(lockinfo, local->dict);
+-            }
+-        }
+-    }
+-
+-    if (xdata && local->xdata_rsp) {
+-        dict_copy(xdata, local->xdata_rsp);
++        UNLOCK(&frame->lock);
+     }
+ 
+-    if (!call_cnt) {
+-        newdict = dict_new();
+-        if (!newdict) {
+-            local->op_ret = -1;
+-            local->op_errno = ENOMEM;
+-            goto unwind;
++    if (dict != NULL) {
++        if (dict_copy(dict, local->dict) < 0) {
++            goto done;
+         }
++    }
+ 
+-        op_ret = dict_allocate_and_serialize(
+-            local->dict, (char **)&lockinfo_buf, (unsigned int *)&len);
+-        if (op_ret != 0) {
+-            local->op_ret = -1;
+-            goto unwind;
++    if (xdata != NULL) {
++        if (dict_copy(xdata, local->xdata_rsp) < 0) {
++            goto done;
+         }
++    }
+ 
+-        op_ret = dict_set_dynptr(newdict, GF_XATTR_LOCKINFO_KEY,
+-                                 (void *)lockinfo_buf, len);
+-        if (op_ret < 0) {
+-            local->op_ret = -1;
+-            local->op_errno = -op_ret;
+-            goto unwind;
+-        }
++    ret = 0;
+ 
+-    unwind:
+-        AFR_STACK_UNWIND(getxattr, frame, op_ret, op_errno, newdict,
+-                         local->xdata_rsp);
++done:
++    if (local_dict != NULL) {
++        dict_unref(local_dict);
+     }
+ 
+-    dict_unref(lockinfo);
++    if (local_xdata != NULL) {
++        dict_unref(local_xdata);
++    }
+ 
+-    return 0;
++    return ret;
+ }
+ 
+-int32_t
+-afr_fgetxattr_lockinfo_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+-                           int32_t op_ret, int32_t op_errno, dict_t *dict,
+-                           dict_t *xdata)
++static void
++afr_getxattr_lockinfo_cbk_common(call_frame_t *frame, int32_t op_ret,
++                                 int32_t op_errno, dict_t *dict, dict_t *xdata,
++                                 bool is_fgetxattr)
+ {
+-    int call_cnt = 0, len = 0;
++    int len = 0;
+     char *lockinfo_buf = NULL;
+     dict_t *lockinfo = NULL, *newdict = NULL;
+     afr_local_t *local = NULL;
+ 
+-    LOCK(&frame->lock);
+-    {
+-        local = frame->local;
+-
+-        call_cnt = --local->call_count;
+-
+-        if ((op_ret < 0) || (!dict && !xdata)) {
+-            goto unlock;
+-        }
+-
+-        if (xdata) {
+-            if (!local->xdata_rsp) {
+-                local->xdata_rsp = dict_new();
+-                if (!local->xdata_rsp) {
+-                    local->op_ret = -1;
+-                    local->op_errno = ENOMEM;
+-                    goto unlock;
+-                }
+-            }
+-        }
+-
+-        if (!dict) {
+-            goto unlock;
+-        }
++    local = frame->local;
+ 
++    if ((op_ret >= 0) && (dict != NULL)) {
+         op_ret = dict_get_ptr_and_len(dict, GF_XATTR_LOCKINFO_KEY,
+                                       (void **)&lockinfo_buf, &len);
+-
+-        if (!lockinfo_buf) {
+-            goto unlock;
+-        }
+-
+-        if (!local->dict) {
+-            local->dict = dict_new();
+-            if (!local->dict) {
+-                local->op_ret = -1;
+-                local->op_errno = ENOMEM;
+-                goto unlock;
++        if (lockinfo_buf != NULL) {
++            lockinfo = dict_new();
++            if (lockinfo == NULL) {
++                op_ret = -1;
++            } else {
++                op_ret = dict_unserialize(lockinfo_buf, len, &lockinfo);
+             }
+         }
+     }
+-unlock:
+-    UNLOCK(&frame->lock);
+ 
+-    if (lockinfo_buf != NULL) {
+-        lockinfo = dict_new();
+-        if (lockinfo == NULL) {
+-            local->op_ret = -1;
+-            local->op_errno = ENOMEM;
+-        } else {
+-            op_ret = dict_unserialize(lockinfo_buf, len, &lockinfo);
+-
+-            if (lockinfo && local->dict) {
+-                dict_copy(lockinfo, local->dict);
+-            }
++    if ((op_ret >= 0) && ((lockinfo != NULL) || (xdata != NULL))) {
++        op_ret = afr_update_local_dicts(frame, lockinfo, xdata);
++        if (lockinfo != NULL) {
++            dict_unref(lockinfo);
+         }
+     }
+ 
+-    if (xdata && local->xdata_rsp) {
+-        dict_copy(xdata, local->xdata_rsp);
++    if (op_ret < 0) {
++        local->op_ret = -1;
++        local->op_errno = ENOMEM;
+     }
+ 
+-    if (!call_cnt) {
++    if (uatomic_sub_return(&local->call_count, 1) == 0) {
+         newdict = dict_new();
+         if (!newdict) {
+             local->op_ret = -1;
+-            local->op_errno = ENOMEM;
++            local->op_errno = op_errno = ENOMEM;
+             goto unwind;
+         }
+ 
+@@ -1057,23 +992,58 @@ unlock:
+             local->dict, (char **)&lockinfo_buf, (unsigned int *)&len);
+         if (op_ret != 0) {
+             local->op_ret = -1;
++            local->op_errno = op_errno = ENOMEM;
+             goto unwind;
+         }
+ 
+         op_ret = dict_set_dynptr(newdict, GF_XATTR_LOCKINFO_KEY,
+                                  (void *)lockinfo_buf, len);
+         if (op_ret < 0) {
+-            local->op_ret = -1;
+-            local->op_errno = -op_ret;
++            GF_FREE(lockinfo_buf);
++            local->op_ret = op_ret = -1;
++            local->op_errno = op_errno = -op_ret;
+             goto unwind;
+         }
+ 
+     unwind:
+-        AFR_STACK_UNWIND(fgetxattr, frame, op_ret, op_errno, newdict,
+-                         local->xdata_rsp);
++        /* TODO: These unwinds use op_ret and op_errno instead of local->op_ret
++         *       and local->op_errno. This doesn't seem right because any
++         *       failure during processing of each answer could be silently
++         *       ignored. This is kept this was the old behavior and because
++         *       local->op_ret is initialized as -1 and local->op_errno is
++         *       initialized as EUCLEAN, which makes these values useless. */
++        if (is_fgetxattr) {
++            AFR_STACK_UNWIND(fgetxattr, frame, op_ret, op_errno, newdict,
++                             local->xdata_rsp);
++        } else {
++            AFR_STACK_UNWIND(getxattr, frame, op_ret, op_errno, newdict,
++                             local->xdata_rsp);
++        }
++
++        if (newdict != NULL) {
++            dict_unref(newdict);
++        }
+     }
++}
++
++static int32_t
++afr_getxattr_lockinfo_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
++                          int32_t op_ret, int32_t op_errno, dict_t *dict,
++                          dict_t *xdata)
++{
++    afr_getxattr_lockinfo_cbk_common(frame, op_ret, op_errno, dict, xdata,
++                                     false);
+ 
+-    dict_unref(lockinfo);
++    return 0;
++}
++
++static int32_t
++afr_fgetxattr_lockinfo_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
++                           int32_t op_ret, int32_t op_errno, dict_t *dict,
++                           dict_t *xdata)
++{
++    afr_getxattr_lockinfo_cbk_common(frame, op_ret, op_errno, dict, xdata,
++                                     true);
+ 
+     return 0;
+ }
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0538-afr-fix-coverity-issue-introduced-by-90cefde.patch b/SOURCES/0538-afr-fix-coverity-issue-introduced-by-90cefde.patch
new file mode 100644
index 0000000..de164a3
--- /dev/null
+++ b/SOURCES/0538-afr-fix-coverity-issue-introduced-by-90cefde.patch
@@ -0,0 +1,46 @@
+From 31cd7627ff329a39691239322df3bc88e962ad02 Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Mon, 1 Mar 2021 05:19:39 +0100
+Subject: [PATCH 538/538] afr: fix coverity issue introduced by 90cefde
+
+Fixes coverity issues 1447029 and 1447028.
+
+Backport of:
+> Upstream-patch-link: https://github.com/gluster/glusterfs/pull/2201
+> Updates: #2161
+> Change-Id: I6a564231d6aeb76de20675b7ced5d45eed8c377f
+> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+
+BUG: 1911292
+Change-Id: I6a564231d6aeb76de20675b7ced5d45eed8c377f
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/229200
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/cluster/afr/src/afr-inode-read.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/xlators/cluster/afr/src/afr-inode-read.c b/xlators/cluster/afr/src/afr-inode-read.c
+index 98e195a..d874172 100644
+--- a/xlators/cluster/afr/src/afr-inode-read.c
++++ b/xlators/cluster/afr/src/afr-inode-read.c
+@@ -918,13 +918,13 @@ afr_update_local_dicts(call_frame_t *frame, dict_t *dict, dict_t *xdata)
+     }
+ 
+     if (dict != NULL) {
+-        if (dict_copy(dict, local->dict) < 0) {
++        if (dict_copy(dict, local->dict) == NULL) {
+             goto done;
+         }
+     }
+ 
+     if (xdata != NULL) {
+-        if (dict_copy(xdata, local->xdata_rsp) < 0) {
++        if (dict_copy(xdata, local->xdata_rsp) == NULL) {
+             goto done;
+         }
+     }
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0539-extras-disable-lookup-optimize-in-virt-and-block-gro.patch b/SOURCES/0539-extras-disable-lookup-optimize-in-virt-and-block-gro.patch
new file mode 100644
index 0000000..18f851f
--- /dev/null
+++ b/SOURCES/0539-extras-disable-lookup-optimize-in-virt-and-block-gro.patch
@@ -0,0 +1,62 @@
+From 88523814fe296c9cc9f7619e06210830f59c5edf Mon Sep 17 00:00:00 2001
+From: Xavi Hernandez <xhernandez@redhat.com>
+Date: Fri, 12 Mar 2021 10:32:09 +0100
+Subject: [PATCH 539/539] extras: disable lookup-optimize in virt and block
+ groups
+
+lookup-optimize doesn't provide any benefit for virtualized
+environments and gluster-block workloads, but it's known to cause
+corruption in some cases when sharding is also enabled and the volume
+is expanded or shrunk.
+
+For this reason, we disable lookup-optimize by default on those
+environments.
+
+Backport of:
+> Upstream-patch-link: https://github.com/gluster/glusterfs/pull/2254
+> Fixes: #2253
+> Change-Id: I25861aa50b335556a995a9c33318dd3afb41bf71
+> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+
+BUG: 1939372
+Change-Id: I25861aa50b335556a995a9c33318dd3afb41bf71
+Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/231173
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ extras/group-distributed-virt | 1 +
+ extras/group-gluster-block    | 1 +
+ extras/group-virt.example     | 1 +
+ 3 files changed, 3 insertions(+)
+
+diff --git a/extras/group-distributed-virt b/extras/group-distributed-virt
+index a960b76..6da3de0 100644
+--- a/extras/group-distributed-virt
++++ b/extras/group-distributed-virt
+@@ -8,3 +8,4 @@ user.cifs=off
+ client.event-threads=4
+ server.event-threads=4
+ performance.client-io-threads=on
++cluster.lookup-optimize=off
+diff --git a/extras/group-gluster-block b/extras/group-gluster-block
+index 1e39801..b8d3e8d 100644
+--- a/extras/group-gluster-block
++++ b/extras/group-gluster-block
+@@ -25,3 +25,4 @@ features.shard-block-size=64MB
+ user.cifs=off
+ server.allow-insecure=on
+ cluster.choose-local=off
++cluster.lookup-optimize=off
+diff --git a/extras/group-virt.example b/extras/group-virt.example
+index 3a441eb..155f5f5 100644
+--- a/extras/group-virt.example
++++ b/extras/group-virt.example
+@@ -21,3 +21,4 @@ server.tcp-user-timeout=20
+ server.keepalive-time=10
+ server.keepalive-interval=2
+ server.keepalive-count=5
++cluster.lookup-optimize=off
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0540-extras-Disable-write-behind-for-group-samba.patch b/SOURCES/0540-extras-Disable-write-behind-for-group-samba.patch
new file mode 100644
index 0000000..0a89c64
--- /dev/null
+++ b/SOURCES/0540-extras-Disable-write-behind-for-group-samba.patch
@@ -0,0 +1,37 @@
+From 6895b6c67e9c29af3f966b4d9ee5cb40da763d24 Mon Sep 17 00:00:00 2001
+From: Mohit Agrawal <moagrawa@redhat.com>
+Date: Wed, 14 Apr 2021 12:38:45 +0530
+Subject: [PATCH 540/540] extras: Disable write-behind for group samba.
+
+when write-behind is enabled with Samba it could be a
+source of data corruption. The translator, while
+processing a write call, immediately returns success but continues
+writing the data to the server in the background. This can cause data
+corruption when two clients relying on Samba to provide data consistency
+are operating on the same file.
+
+> fixes: https://github.com/gluster/glusterfs/issues/2329
+
+Change-Id: I5265056ff315a5f3cd97ea11b18db0831b1b901d
+Solution: Disable write-behind for samba group
+BUG: 1948547
+Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/235876
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ extras/group-samba | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/extras/group-samba b/extras/group-samba
+index eeee6e0..9611a1f 100644
+--- a/extras/group-samba
++++ b/extras/group-samba
+@@ -9,3 +9,4 @@ performance.nl-cache=on
+ performance.nl-cache-timeout=600
+ performance.readdir-ahead=on
+ performance.parallel-readdir=on
++performance.write-behind=off
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0541-glusterd-volgen-Add-functionality-to-accept-any-cust.patch b/SOURCES/0541-glusterd-volgen-Add-functionality-to-accept-any-cust.patch
new file mode 100644
index 0000000..29135df
--- /dev/null
+++ b/SOURCES/0541-glusterd-volgen-Add-functionality-to-accept-any-cust.patch
@@ -0,0 +1,545 @@
+From 23ab7175e64ab4d75fbcb6874008843cc78b65b8 Mon Sep 17 00:00:00 2001
+From: Ashish Pandey <aspandey@redhat.com>
+Date: Fri, 16 Apr 2021 18:48:56 +0530
+Subject: [PATCH 541/542] glusterd-volgen: Add functionality to accept any
+ custom xlator
+
+Add new function which allow users to insert any custom xlators.
+It makes to provide a way to add any processing into file operations.
+
+Users can deploy the plugin(xlator shared object) and integrate it to glusterfsd.
+
+If users want to enable a custom xlator, do the follows:
+
+1. put xlator object(.so file) into "XLATOR_DIR/user/"
+2. set the option user.xlator.<xlator> to the existing xlator-name to specify of the position in graph
+3. restart gluster volume
+
+Options for custom xlator are able to set in "user.xlator.<xlator>.<optkey>".
+
+Backport of :
+>https://github.com/gluster/glusterfs/commit/ea86b664f3b1f54901ce1b7d7fba7d80456f2089
+>Fixes: https://github.com/gluster/glusterfs/issues/1943
+>Change-Id: Ife3ae1514ea474f5dae2897223012f9d04b64674
+>Signed-off-by:Ryo Furuhashi <ryo.furuhashi.nh@hitachi.com>
+>Co-authored-by: Yaniv Kaul <ykaul@redhat.com>
+>Co-authored-by: Xavi Hernandez <xhernandez@users.noreply.github.com>
+
+Change-Id: Ic8f28bfcfde67213eb1092b0ebf4822c874d37bb
+BUG: 1927235
+Signed-off-by: Ashish Pandey <aspandey@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/236830
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
+Reviewed-by: Xavi Hernandez Juan <xhernandez@redhat.com>
+---
+ cli/src/cli-rpc-ops.c                       | 148 ++++++++++++++++++++------
+ cli/src/cli.h                               |   2 -
+ tests/basic/user-xlator.t                   |  65 ++++++++++++
+ tests/env.rc.in                             |   3 +
+ xlators/mgmt/glusterd/src/glusterd-volgen.c | 155 ++++++++++++++++++++++++++++
+ 5 files changed, 342 insertions(+), 31 deletions(-)
+ create mode 100755 tests/basic/user-xlator.t
+
+diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
+index 4e91265..51b5447 100644
+--- a/cli/src/cli-rpc-ops.c
++++ b/cli/src/cli-rpc-ops.c
+@@ -2269,49 +2269,131 @@ out:
+     return ret;
+ }
+ 
+-char *
+-is_server_debug_xlator(void *myframe)
++/*
++ * returns
++ *   1 : is server debug xlator
++ *   0 : is not server debug xlator
++ *  <0 : error
++ */
++static int
++is_server_debug_xlator(char *key, char *value)
++{
++    if (!key || !value)
++        return -1;
++
++    if (strcmp("debug.trace", key) == 0 ||
++        strcmp("debug.error-gen", key) == 0) {
++        if (strcmp("client", value) == 0)
++            return 0;
++        else
++            return 1;
++    }
++
++    return 0;
++}
++
++/*
++ * returns
++ *   1 : is user xlator
++ *   0 : is not user xlator
++ *  <0 : error
++ */
++static int
++is_server_user_xlator(char *key, char *value)
++{
++    int ret = 0;
++
++    if (!key || !value)
++        return -1;
++
++    ret = fnmatch("user.xlator.*", key, 0);
++    if (ret < 0) {
++        ret = -1;
++        goto out;
++    } else if (ret == FNM_NOMATCH) {
++        ret = 0;
++        goto out;
++    }
++
++    ret = fnmatch("user.xlator.*.*", key, 0);
++    if (ret < 0) {
++        ret = -1;
++        goto out;
++    } else if (ret != FNM_NOMATCH) {  // this is user xlator's option key
++        ret = 0;
++        goto out;
++    }
++
++    ret = 1;
++
++out:
++    return ret;
++}
++
++static int
++added_server_xlator(void *myframe, char **added_xlator)
+ {
+     call_frame_t *frame = NULL;
+     cli_local_t *local = NULL;
+     char **words = NULL;
+     char *key = NULL;
+     char *value = NULL;
+-    char *debug_xlator = NULL;
++    int ret = 0;
+ 
+     frame = myframe;
+     local = frame->local;
+     words = (char **)local->words;
+ 
+     while (*words != NULL) {
+-        if (strstr(*words, "trace") == NULL &&
+-            strstr(*words, "error-gen") == NULL) {
+-            words++;
+-            continue;
+-        }
+-
+         key = *words;
+         words++;
+         value = *words;
+-        if (value == NULL)
++
++        if (!value) {
+             break;
+-        if (strstr(value, "client")) {
+-            words++;
+-            continue;
+-        } else {
+-            if (!(strstr(value, "posix") || strstr(value, "acl") ||
+-                  strstr(value, "locks") || strstr(value, "io-threads") ||
+-                  strstr(value, "marker") || strstr(value, "index"))) {
+-                words++;
+-                continue;
+-            } else {
+-                debug_xlator = gf_strdup(key);
+-                break;
++        }
++
++        ret = is_server_debug_xlator(key, value);
++        if (ret < 0) {
++            gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
++                   "failed to check that debug xlator was added");
++            ret = -1;
++            goto out;
++        }
++
++        if (ret) {
++            *added_xlator = gf_strdup(key);
++            if (!*added_xlator) {
++                gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
++                       "Out of memory");
++                ret = -1;
++                goto out;
++            }
++            break;
++        }
++
++        ret = is_server_user_xlator(key, value);
++        if (ret < 0) {
++            gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
++                   "failed to check that user xlator was added");
++            ret = -1;
++            goto out;
++        }
++
++        if (ret) {
++            *added_xlator = gf_strdup(key);
++            if (!*added_xlator) {
++                gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
++                       "Out of memory");
++                ret = -1;
++                goto out;
+             }
++            break;
+         }
+     }
+ 
+-    return debug_xlator;
++out:
++    return ret;
+ }
+ 
+ int
+@@ -2327,7 +2409,7 @@ gf_cli_set_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
+     char msg[1024] = {
+         0,
+     };
+-    char *debug_xlator = NULL;
++    char *added_xlator = NULL;
+     char tmp_str[512] = {
+         0,
+     };
+@@ -2365,18 +2447,26 @@ gf_cli_set_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
+      * The process has to be restarted. So this is a check from the
+      * volume set option such that if debug xlators such as trace/errorgen
+      * are provided in the set command, warn the user.
++     * volume set option such that if user custom xlators or debug
++     * xlators such as trace/errorgen are provided in the set command,
++     * warn the user.
+      */
+-    debug_xlator = is_server_debug_xlator(myframe);
++    ret = added_server_xlator(myframe, &added_xlator);
++    if (ret < 0) {
++        gf_log("cli", GF_LOG_ERROR,
++               "failed to check that server graph has been changed");
++        goto out;
++    }
+ 
+     if (dict_get_str(dict, "help-str", &help_str) && !msg[0])
+         snprintf(msg, sizeof(msg), "Set volume %s",
+                  (rsp.op_ret) ? "unsuccessful" : "successful");
+-    if (rsp.op_ret == 0 && debug_xlator) {
++    if (rsp.op_ret == 0 && added_xlator) {
+         snprintf(tmp_str, sizeof(tmp_str),
+                  "\n%s translator has been "
+                  "added to the server volume file. Please restart the"
+                  " volume for enabling the translator",
+-                 debug_xlator);
++                 added_xlator);
+     }
+ 
+     if ((global_state->mode & GLUSTER_MODE_XML) && (help_str == NULL)) {
+@@ -2394,7 +2484,7 @@ gf_cli_set_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
+             cli_err("volume set: failed");
+     } else {
+         if (help_str == NULL) {
+-            if (debug_xlator == NULL)
++            if (added_xlator == NULL)
+                 cli_out("volume set: success");
+             else
+                 cli_out("volume set: success%s", tmp_str);
+@@ -2408,7 +2498,7 @@ gf_cli_set_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
+ out:
+     if (dict)
+         dict_unref(dict);
+-    GF_FREE(debug_xlator);
++    GF_FREE(added_xlator);
+     cli_cmd_broadcast_response(ret);
+     gf_free_xdr_cli_rsp(rsp);
+     return ret;
+diff --git a/cli/src/cli.h b/cli/src/cli.h
+index 7b4f446..b5b69ea 100644
+--- a/cli/src/cli.h
++++ b/cli/src/cli.h
+@@ -502,8 +502,6 @@ cli_xml_output_snapshot(int cmd_type, dict_t *dict, int op_ret, int op_errno,
+ int
+ cli_xml_snapshot_status_single_snap(cli_local_t *local, dict_t *dict,
+                                     char *key);
+-char *
+-is_server_debug_xlator(void *myframe);
+ 
+ int32_t
+ cli_cmd_snapshot_parse(const char **words, int wordcount, dict_t **options,
+diff --git a/tests/basic/user-xlator.t b/tests/basic/user-xlator.t
+new file mode 100755
+index 0000000..a711f9f
+--- /dev/null
++++ b/tests/basic/user-xlator.t
+@@ -0,0 +1,65 @@
++#!/bin/bash
++
++. $(dirname $0)/../include.rc
++. $(dirname $0)/../volume.rc
++
++#### patchy.dev.d-backends-patchy1.vol
++brick=${B0//\//-}
++SERVER_VOLFILE="/var/lib/glusterd/vols/${V0}/${V0}.${H0}.${brick:1}-${V0}1.vol"
++
++cleanup;
++
++TEST mkdir -p $B0/single-brick
++TEST mkdir -p ${GLUSTER_XLATOR_DIR}/user
++
++## deploy dummy user xlator
++TEST cp ${GLUSTER_XLATOR_DIR}/playground/template.so ${GLUSTER_XLATOR_DIR}/user/hoge.so
++
++TEST glusterd
++TEST $CLI volume create $V0 replica 3  $H0:$B0/${V0}{1,2,3,4,5,6};
++TEST $CLI volume set $V0 user.xlator.hoge posix
++TEST grep -q 'user/hoge' ${SERVER_VOLFILE}
++
++TEST $CLI volume set $V0 user.xlator.hoge.opt1 10
++TEST grep -q '"option opt1 10"' ${SERVER_VOLFILE}
++TEST $CLI volume set $V0 user.xlator.hoge.opt2 hogehoge
++TEST grep -q '"option opt2 hogehoge"' ${SERVER_VOLFILE}
++TEST $CLI volume set $V0 user.xlator.hoge.opt3 true
++TEST grep -q '"option opt3 true"' ${SERVER_VOLFILE}
++
++TEST $CLI volume start $V0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}3
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}4
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}5
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}6
++
++TEST $CLI volume set $V0 user.xlator.hoge trash
++TEST grep -q 'user/hoge' ${SERVER_VOLFILE}
++
++TEST $CLI volume stop $V0
++TEST $CLI volume start $V0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}3
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}4
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}5
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}6
++
++TEST ! $CLI volume set $V0 user.xlator.hoge unknown
++TEST grep -q 'user/hoge' ${SERVER_VOLFILE} # When the CLI fails, the volfile is not modified.
++
++TEST $CLI volume stop $V0
++TEST $CLI volume start $V0
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}3
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}4
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}5
++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}6
++
++#### teardown
++
++TEST rm -f ${GLUSTER_XLATOR_DIR}/user/hoge.so
++cleanup;
+diff --git a/tests/env.rc.in b/tests/env.rc.in
+index c7472a7..1f0ca88 100644
+--- a/tests/env.rc.in
++++ b/tests/env.rc.in
+@@ -40,3 +40,6 @@ export GLUSTER_LIBEXECDIR
+ 
+ RUN_NFS_TESTS=@BUILD_GNFS@
+ export RUN_NFS_TESTS
++
++GLUSTER_XLATOR_DIR=@libdir@/glusterfs/@PACKAGE_VERSION@/xlator
++export GLUSTER_XLATOR_DIR
+\ No newline at end of file
+diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
+index 1920284..a242b5c 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
++++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
+@@ -45,6 +45,11 @@ struct gd_validate_reconf_opts {
+ 
+ extern struct volopt_map_entry glusterd_volopt_map[];
+ 
++struct check_and_add_user_xlator_t {
++    volgen_graph_t *graph;
++    char *volname;
++};
++
+ #define RPC_SET_OPT(XL, CLI_OPT, XLATOR_OPT, ERROR_CMD)                        \
+     do {                                                                       \
+         char *_value = NULL;                                                   \
+@@ -2822,6 +2827,145 @@ out:
+     return ret;
+ }
+ 
++static gf_boolean_t
++check_user_xlator_position(dict_t *dict, char *key, data_t *value,
++                           void *prev_xlname)
++{
++    if (strncmp(key, "user.xlator.", SLEN("user.xlator.")) != 0) {
++        return false;
++    }
++
++    if (fnmatch("user.xlator.*.*", key, 0) == 0) {
++        return false;
++    }
++
++    char *value_str = data_to_str(value);
++    if (!value_str) {
++        return false;
++    }
++
++    if (strcmp(value_str, prev_xlname) == 0) {
++        gf_log("glusterd", GF_LOG_INFO,
++               "found insert position of user-xlator(%s)", key);
++        return true;
++    }
++
++    return false;
++}
++
++static int
++set_user_xlator_option(dict_t *set_dict, char *key, data_t *value, void *data)
++{
++    xlator_t *xl = data;
++    char *optname = strrchr(key, '.') + 1;
++
++    gf_log("glusterd", GF_LOG_DEBUG, "set user xlator option %s = %s", key,
++           value->data);
++
++    return xlator_set_option(xl, optname, strlen(optname), data_to_str(value));
++}
++
++static int
++insert_user_xlator_to_graph(dict_t *set_dict, char *key, data_t *value,
++                            void *action_data)
++{
++    int ret = -1;
++
++    struct check_and_add_user_xlator_t *data = action_data;
++
++    char *xlator_name = strrchr(key, '.') + 1;  // user.xlator.<xlator_name>
++    char *xlator_option_matcher = NULL;
++    char *type = NULL;
++    xlator_t *xl = NULL;
++
++    // convert optkey to xlator type
++    if (gf_asprintf(&type, "user/%s", xlator_name) < 0) {
++        gf_log("glusterd", GF_LOG_ERROR, "failed to generate user-xlator type");
++        goto out;
++    }
++
++    gf_log("glusterd", GF_LOG_INFO, "add user xlator=%s to graph", type);
++
++    xl = volgen_graph_add(data->graph, type, data->volname);
++    if (!xl) {
++        goto out;
++    }
++
++    ret = gf_asprintf(&xlator_option_matcher, "user.xlator.%s.*", xlator_name);
++    if (ret < 0) {
++        gf_log("glusterd", GF_LOG_ERROR,
++               "failed to generate user-xlator option matcher");
++        goto out;
++    }
++
++    dict_foreach_fnmatch(set_dict, xlator_option_matcher,
++                         set_user_xlator_option, xl);
++
++out:
++    if (type)
++        GF_FREE(type);
++    if (xlator_option_matcher)
++        GF_FREE(xlator_option_matcher);
++
++    return ret;
++}
++
++static int
++validate_user_xlator_position(dict_t *this, char *key, data_t *value,
++                              void *unused)
++{
++    int ret = -1;
++    int i = 0;
++
++    if (!value)
++        goto out;
++
++    if (fnmatch("user.xlator.*.*", key, 0) == 0) {
++        ret = 0;
++        goto out;
++    }
++
++    char *value_str = data_to_str(value);
++    if (!value_str)
++        goto out;
++
++    int num_xlators = sizeof(server_graph_table) /
++                      sizeof(server_graph_table[0]);
++    for (i = 0; i < num_xlators; i++) {
++        if (server_graph_table[i].dbg_key &&
++            strcmp(value_str, server_graph_table[i].dbg_key) == 0) {
++            ret = 0;
++            goto out;
++        }
++    }
++
++out:
++    if (ret == -1)
++        gf_log("glusterd", GF_LOG_ERROR, "invalid user xlator position %s = %s",
++               key, value->data);
++
++    return ret;
++}
++
++static int
++check_and_add_user_xl(volgen_graph_t *graph, dict_t *set_dict, char *volname,
++                      char *prev_xlname)
++{
++    if (!prev_xlname)
++        goto out;
++
++    struct check_and_add_user_xlator_t data = {.graph = graph,
++                                               .volname = volname};
++
++    if (dict_foreach_match(set_dict, check_user_xlator_position, prev_xlname,
++                           insert_user_xlator_to_graph, &data) < 0) {
++        return -1;
++    }
++
++out:
++    return 0;
++}
++
+ static int
+ server_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
+                      dict_t *set_dict, void *param)
+@@ -2831,6 +2975,12 @@ server_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
+     char *loglevel = NULL;
+     int i = 0;
+ 
++    if (dict_foreach_fnmatch(set_dict, "user.xlator.*",
++                             validate_user_xlator_position, NULL) < 0) {
++        ret = -EINVAL;
++        goto out;
++    }
++
+     i = sizeof(server_graph_table) / sizeof(server_graph_table[0]) - 1;
+ 
+     while (i >= 0) {
+@@ -2848,6 +2998,11 @@ server_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
+         if (ret)
+             goto out;
+ 
++        ret = check_and_add_user_xl(graph, set_dict, volinfo->volname,
++                                    server_graph_table[i].dbg_key);
++        if (ret)
++            goto out;
++
+         i--;
+     }
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0542-xlaotrs-mgmt-Fixing-coverity-issue-1445996.patch b/SOURCES/0542-xlaotrs-mgmt-Fixing-coverity-issue-1445996.patch
new file mode 100644
index 0000000..f6e0641
--- /dev/null
+++ b/SOURCES/0542-xlaotrs-mgmt-Fixing-coverity-issue-1445996.patch
@@ -0,0 +1,64 @@
+From f3db0c99faf813e0f2e9ffcf599416555a59df1f Mon Sep 17 00:00:00 2001
+From: Ashish Pandey <aspandey@redhat.com>
+Date: Tue, 9 Feb 2021 16:43:35 +0530
+Subject: [PATCH 542/542] xlaotrs/mgmt: Fixing coverity issue 1445996
+
+Backport of https://github.com/gluster/glusterfs/pull/2148/commits/9785e96e0bdf6e60896570fdf5e4a6976a6f60ba
+
+Fixing "Null pointer dereferences"
+
+BUG: 1927235
+Change-Id: Idbc014e1302d2450f97bccd028681198c0d97424
+Signed-off-by: Ashish Pandey <aspandey@redhat.com>
+Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/237433
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ xlators/mgmt/glusterd/src/glusterd-volgen.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
+index a242b5c..71aed08 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
++++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
+@@ -2916,21 +2916,23 @@ validate_user_xlator_position(dict_t *this, char *key, data_t *value,
+ {
+     int ret = -1;
+     int i = 0;
++    char *value_str = NULL;
+ 
+     if (!value)
+         goto out;
+ 
++    value_str = data_to_str(value);
++    if (!value_str)
++        goto out;
++
+     if (fnmatch("user.xlator.*.*", key, 0) == 0) {
+         ret = 0;
+         goto out;
+     }
+ 
+-    char *value_str = data_to_str(value);
+-    if (!value_str)
+-        goto out;
+-
+     int num_xlators = sizeof(server_graph_table) /
+                       sizeof(server_graph_table[0]);
++
+     for (i = 0; i < num_xlators; i++) {
+         if (server_graph_table[i].dbg_key &&
+             strcmp(value_str, server_graph_table[i].dbg_key) == 0) {
+@@ -2942,7 +2944,7 @@ validate_user_xlator_position(dict_t *this, char *key, data_t *value,
+ out:
+     if (ret == -1)
+         gf_log("glusterd", GF_LOG_ERROR, "invalid user xlator position %s = %s",
+-               key, value->data);
++               key, value_str);
+ 
+     return ret;
+ }
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0543-glusterd-handle-custom-xlator-failure-cases.patch b/SOURCES/0543-glusterd-handle-custom-xlator-failure-cases.patch
new file mode 100644
index 0000000..c6194c7
--- /dev/null
+++ b/SOURCES/0543-glusterd-handle-custom-xlator-failure-cases.patch
@@ -0,0 +1,162 @@
+From 71fc5b7949e00c4448f5ec1291e756b201a70082 Mon Sep 17 00:00:00 2001
+From: Ravishankar N <ravishankar@redhat.com>
+Date: Thu, 29 Apr 2021 18:34:57 +0530
+Subject: [PATCH 543/543] glusterd: handle custom xlator failure cases
+
+Problem-1:
+custom xlator insertion was failing for those xlators in the brick graph
+whose dbg_key was NULL in the server_graph_table. Looking at the git log,
+the dbg_key was added in commit d1397dbd7d6cdbd2d81d5d36d608b6175d449db4
+for inserting debug xlators.
+
+Fix: I think it is fine to define it for all brick xlators below server.
+
+Problem-2:
+In the commit-op phase, glusterd_op_set_volume() updates the volinfo
+dict with the key-value pairs and then proceeds to create the volfiles.
+If any of the steps fail, the volinfo dict retains those key-values,
+until glusterd is restarted or `gluster vol reset $VOLNAME` is issued.
+
+Fix:
+Make a copy of the volinfo dict and if there are any failures in
+proceeding with the set volume logic, restore the dict to its original
+state.
+
+Backport of:
+> Upstream-patch-link: https://github.com/gluster/glusterfs/pull/2371
+> Change-Id: I9010dab33d0139b8e6d603308e331b6d220a4849
+> Updates: #2370
+> Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+
+Change-Id: I9010dab33d0139b8e6d603308e331b6d220a4849
+BUG: 1953901
+Signed-off-by: Ravishankar N <ravishankar@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/239889
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ tests/basic/user-xlator.t                   | 16 ++++++++++++++--
+ xlators/mgmt/glusterd/src/glusterd-op-sm.c  | 16 ++++++++++++++++
+ xlators/mgmt/glusterd/src/glusterd-volgen.c | 14 +++++++-------
+ 3 files changed, 37 insertions(+), 9 deletions(-)
+
+diff --git a/tests/basic/user-xlator.t b/tests/basic/user-xlator.t
+index a711f9f..ed2d831 100755
+--- a/tests/basic/user-xlator.t
++++ b/tests/basic/user-xlator.t
+@@ -35,8 +35,18 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}4
+ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}5
+ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}6
+ 
+-TEST $CLI volume set $V0 user.xlator.hoge trash
+-TEST grep -q 'user/hoge' ${SERVER_VOLFILE}
++# Test that the insertion at all positions between server and posix is successful.
++# It is not guaranteed that the brick process will start/work in all positions though.
++TESTS_EXPECTED_IN_LOOP=34
++declare -a brick_side_xlators=("decompounder" "io-stats" "quota" "index" "barrier"
++                               "marker" "selinux" "io-threads" "upcall" "leases"
++                               "read-only" "worm" "locks"  "access-control"
++                               "bitrot-stub" "changelog" "trash")
++for xlator in "${brick_side_xlators[@]}"
++  do
++    TEST_IN_LOOP $CLI volume set $V0 user.xlator.hoge $xlator
++    TEST_IN_LOOP grep -q 'user/hoge' ${SERVER_VOLFILE}
++  done
+ 
+ TEST $CLI volume stop $V0
+ TEST $CLI volume start $V0
+@@ -49,6 +59,8 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}6
+ 
+ TEST ! $CLI volume set $V0 user.xlator.hoge unknown
+ TEST grep -q 'user/hoge' ${SERVER_VOLFILE} # When the CLI fails, the volfile is not modified.
++# User xlator insert failures must not prevent setting other volume options.
++TEST $CLI volume set $V0 storage.reserve 10%
+ 
+ TEST $CLI volume stop $V0
+ TEST $CLI volume start $V0
+diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+index 1e84f5f..893af29 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
++++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+@@ -2911,6 +2911,7 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
+     uint32_t new_op_version = 0;
+     gf_boolean_t quorum_action = _gf_false;
+     glusterd_svc_t *svc = NULL;
++    dict_t *volinfo_dict_orig = NULL;
+ 
+     this = THIS;
+     GF_ASSERT(this);
+@@ -2918,6 +2919,10 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
+     priv = this->private;
+     GF_ASSERT(priv);
+ 
++    volinfo_dict_orig = dict_new();
++    if (!volinfo_dict_orig)
++        goto out;
++
+     ret = dict_get_int32n(dict, "count", SLEN("count"), &dict_count);
+     if (ret) {
+         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+@@ -2949,6 +2954,11 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
+         goto out;
+     }
+ 
++    if (dict_copy(volinfo->dict, volinfo_dict_orig) == NULL) {
++        ret = -ENOMEM;
++        goto out;
++    }
++
+     /* TODO: Remove this once v3.3 compatibility is not required */
+     check_op_version = dict_get_str_boolean(dict, "check-op-version",
+                                             _gf_false);
+@@ -3171,6 +3181,12 @@ out:
+     gf_msg_debug(this->name, 0, "returning %d", ret);
+     if (quorum_action)
+         glusterd_do_quorum_action();
++    if (ret < 0 && count > 1) {
++        if (dict_reset(volinfo->dict) == 0)
++            dict_copy(volinfo_dict_orig, volinfo->dict);
++    }
++    if (volinfo_dict_orig)
++        dict_unref(volinfo_dict_orig);
+     return ret;
+ }
+ 
+diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
+index 71aed08..aa85bdb 100644
+--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
++++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
+@@ -2706,24 +2706,24 @@ out:
+ static volgen_brick_xlator_t server_graph_table[] = {
+     {brick_graph_add_server, NULL},
+     {brick_graph_add_decompounder, "decompounder"},
+-    {brick_graph_add_io_stats, "NULL"},
++    {brick_graph_add_io_stats, "io-stats"},
+     {brick_graph_add_sdfs, "sdfs"},
+     {brick_graph_add_namespace, "namespace"},
+-    {brick_graph_add_cdc, NULL},
++    {brick_graph_add_cdc, "cdc" },
+     {brick_graph_add_quota, "quota"},
+     {brick_graph_add_index, "index"},
+-    {brick_graph_add_barrier, NULL},
++    {brick_graph_add_barrier, "barrier" },
+     {brick_graph_add_marker, "marker"},
+     {brick_graph_add_selinux, "selinux"},
+     {brick_graph_add_fdl, "fdl"},
+     {brick_graph_add_iot, "io-threads"},
+     {brick_graph_add_upcall, "upcall"},
+     {brick_graph_add_leases, "leases"},
+-    {brick_graph_add_pump, NULL},
+-    {brick_graph_add_ro, NULL},
+-    {brick_graph_add_worm, NULL},
++    {brick_graph_add_pump, "pump" },
++    {brick_graph_add_ro, "read-only" },
++    {brick_graph_add_worm, "worm" },
+     {brick_graph_add_locks, "locks"},
+-    {brick_graph_add_acl, "acl"},
++    {brick_graph_add_acl, "access-control"},
+     {brick_graph_add_bitrot_stub, "bitrot-stub"},
+     {brick_graph_add_changelog, "changelog"},
+ #if USE_GFDB /* changetimerecorder depends on gfdb */
+-- 
+1.8.3.1
+
diff --git a/SOURCES/0544-RHGS-3.5.4-rebuild-to-ship-with-RHEL-8.5.patch b/SOURCES/0544-RHGS-3.5.4-rebuild-to-ship-with-RHEL-8.5.patch
new file mode 100644
index 0000000..171ed10
--- /dev/null
+++ b/SOURCES/0544-RHGS-3.5.4-rebuild-to-ship-with-RHEL-8.5.patch
@@ -0,0 +1,47 @@
+From 840f437d232fbafac9f4448b0f8d0e9976ea1e1d Mon Sep 17 00:00:00 2001
+From: Tamar Shacked <tshacked@redhat.com>
+Date: Mon, 23 Aug 2021 20:46:13 +0300
+Subject: [PATCH 544/544] RHGS-3.5.4: rebuild to ship with RHEL-8.5
+
+Label: DOWNSTREAM ONLY
+BUG: 1996984
+
+Signed-off-by: Tamar Shacked <tshacked@redhat.com>
+Change-Id: Idafc64b8ee5da165c87428b8a5166cf319ef7660
+Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/267350
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
+---
+ glusterfs.spec.in | 2 ++
+ rfc.sh            | 2 +-
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/glusterfs.spec.in b/glusterfs.spec.in
+index 2be7677..4511979 100644
+--- a/glusterfs.spec.in
++++ b/glusterfs.spec.in
+@@ -1982,6 +1982,8 @@ fi
+ %endif
+ 
+ %changelog
++* Tue Aug 24 2021 Tamar Shacked <tshacked@redhat.com>
++- build RGHS client for RHEL-8.5 (#1996984)
+ 
+ * Mon May 11 2020 Sunny Kumar <sunkumar@redhat.com>
+ - added requires policycoreutils-python-utils on rhel8 for geo-replication
+diff --git a/rfc.sh b/rfc.sh
+index c0559b9..b1153be 100755
+--- a/rfc.sh
++++ b/rfc.sh
+@@ -315,7 +315,7 @@ main()
+     if [ -z "${reference}" ]; then
+         $drier git push $ORIGIN HEAD:refs/for/$branch/rfc;
+     else
+-        $drier git push $ORIGIN HEAD:refs/for/$branch/ref-${reference};
++        $drier git push $ORIGIN HEAD:refs/for/$branch;
+     fi
+ }
+ 
+-- 
+1.8.3.1
+
diff --git a/SPECS/glusterfs.spec b/SPECS/glusterfs.spec
index 371afc8..c0e2ed4 100644
--- a/SPECS/glusterfs.spec
+++ b/SPECS/glusterfs.spec
@@ -237,7 +237,7 @@ Release:          0.1%{?prereltag:.%{prereltag}}%{?dist}
 %else
 Name:             glusterfs
 Version:          6.0
-Release:          37.2%{?dist}
+Release:          56.4%{?dist}
 ExcludeArch:      i686
 %endif
 License:          GPLv2 or LGPLv3+
@@ -698,15 +698,167 @@ Patch0380: 0380-features-shard-Aggregate-size-block-count-in-iatt-be.patch
 Patch0381: 0381-dht-add-null-check-in-gf_defrag_free_dir_dfmeta.patch
 Patch0382: 0382-features-shard-Aggregate-file-size-block-count-befor.patch
 Patch0383: 0383-common-ha-ganesha-ha.sh-bad-test-for-rhel-centos-for.patch
-Patch0384: 0384-extras-Modify-group-virt-to-include-network-related-.patch
-Patch0385: 0385-cluster-afr-Prioritize-ENOSPC-over-other-errors.patch
-Patch0386: 0386-afr-prevent-spurious-entry-heals-leading-to-gfid-spl.patch
-Patch0387: 0387-cluster-afr-Delay-post-op-for-fsync.patch
+Patch0384: 0384-Update-rfc.sh-to-rhgs-3.5.3.patch
+Patch0385: 0385-glusterd-start-glusterd-automatically-on-abnormal-sh.patch
+Patch0386: 0386-glusterd-increase-the-StartLimitBurst.patch
+Patch0387: 0387-To-fix-readdir-ahead-memory-leak.patch
 Patch0388: 0388-rpc-Cleanup-SSL-specific-data-at-the-time-of-freeing.patch
-Patch0389: 0389-socket-Resolve-ssl_ctx-leak-for-a-brick-while-only-m.patch
-Patch0390: 0390-tests-Avoid-ssl-authz.t-failure.patch
-Patch0391: 0391-cluster-syncop-avoid-duplicate-unlock-of-inodelk-ent.patch
-Patch0392: 0392-glusterd-unlink-the-file-after-killing-the-process.patch
+Patch0389: 0389-posix-Avoid-diskpace-error-in-case-of-overwriting-th.patch
+Patch0390: 0390-glusterd-deafult-options-after-volume-reset.patch
+Patch0391: 0391-glusterd-unlink-the-file-after-killing-the-process.patch
+Patch0392: 0392-glusterd-Brick-process-fails-to-come-up-with-brickmu.patch
+Patch0393: 0393-afr-restore-timestamp-of-files-during-metadata-heal.patch
+Patch0394: 0394-man-gluster-Add-volume-top-command-to-gluster-man-pa.patch
+Patch0395: 0395-Cli-Removing-old-log-rotate-command.patch
+Patch0396: 0396-Updating-gluster-manual.patch
+Patch0397: 0397-mgmt-brick-mux-Avoid-sending-two-response-when-attac.patch
+Patch0398: 0398-ec-change-error-message-for-heal-commands-for-disper.patch
+Patch0399: 0399-glusterd-coverity-fixes.patch
+Patch0400: 0400-cli-throw-a-warning-if-replica-count-greater-than-3.patch
+Patch0401: 0401-cli-change-the-warning-message.patch
+Patch0402: 0402-afr-wake-up-index-healer-threads.patch
+Patch0403: 0403-Fix-spurious-failure-in-bug-1744548-heal-timeout.t.patch
+Patch0404: 0404-tests-Fix-spurious-failure.patch
+Patch0405: 0405-core-fix-return-of-local-in-__nlc_inode_ctx_get.patch
+Patch0406: 0406-afr-support-split-brain-CLI-for-replica-3.patch
+Patch0407: 0407-geo-rep-Improving-help-message-in-schedule_georep.py.patch
+Patch0408: 0408-geo-rep-Fix-ssh-port-validation.patch
+Patch0409: 0409-system-posix-acl-update-ctx-only-if-iatt-is-non-NULL.patch
+Patch0410: 0410-afr-prevent-spurious-entry-heals-leading-to-gfid-spl.patch
+Patch0411: 0411-tools-glusterfind-validate-session-name.patch
+Patch0412: 0412-gluster-smb-add-smb-parameter-when-access-gluster-by.patch
+Patch0413: 0413-extras-hooks-Remove-smb.conf-parameter-allowing-gues.patch
+Patch0414: 0414-cluster-syncop-avoid-duplicate-unlock-of-inodelk-ent.patch
+Patch0415: 0415-dht-Fix-stale-layout-and-create-issue.patch
+Patch0416: 0416-tests-fix-spurious-failure-of-bug-1402841.t-mt-dir-s.patch
+Patch0417: 0417-events-fix-IPv6-memory-corruption.patch
+Patch0418: 0418-md-cache-avoid-clearing-cache-when-not-necessary.patch
+Patch0419: 0419-cluster-afr-fix-race-when-bricks-come-up.patch
+Patch0420: 0420-scripts-quota_fsck-script-TypeError-d-format-not-dic.patch
+Patch0421: 0421-Improve-logging-in-EC-client-and-lock-translator.patch
+Patch0422: 0422-cluster-afr-Prioritize-ENOSPC-over-other-errors.patch
+Patch0423: 0423-ctime-Fix-ctime-inconsisteny-with-utimensat.patch
+Patch0424: 0424-afr-make-heal-info-lockless.patch
+Patch0425: 0425-tests-Fix-spurious-self-heald.t-failure.patch
+Patch0426: 0426-geo-rep-Fix-for-Transport-End-Point-not-connected-is.patch
+Patch0427: 0427-storage-posix-Fixing-a-coverity-issue.patch
+Patch0428: 0428-glusterd-ganesha-fixing-resource-leak-in-tear_down_c.patch
+Patch0429: 0429-dht-rebalance-fixing-failure-occurace-due-to-rebalan.patch
+Patch0430: 0430-Fix-some-Null-pointer-dereference-coverity-issues.patch
+Patch0431: 0431-glusterd-check-for-same-node-while-adding-bricks-in-.patch
+Patch0432: 0432-glusterd-Fix-coverity-defects-put-coverity-annotatio.patch
+Patch0433: 0433-socket-Resolve-ssl_ctx-leak-for-a-brick-while-only-m.patch
+Patch0434: 0434-glusterd-ganesha-fix-Coverity-CID-1405785.patch
+Patch0435: 0435-glusterd-coverity-fix.patch
+Patch0436: 0436-glusterd-coverity-fixes.patch
+Patch0437: 0437-glusterd-prevent-use-after-free-in-glusterd_op_ac_se.patch
+Patch0438: 0438-dht-sparse-files-rebalance-enhancements.patch
+Patch0439: 0439-cluster-afr-Delay-post-op-for-fsync.patch
+Patch0440: 0440-glusterd-snapshot-Improve-log-message-during-snapsho.patch
+Patch0441: 0441-fuse-occasional-logging-for-fuse-device-weird-write-.patch
+Patch0442: 0442-fuse-correctly-handle-setxattr-values.patch
+Patch0443: 0443-fuse-fix-high-sev-coverity-issue.patch
+Patch0444: 0444-mount-fuse-Fixing-a-coverity-issue.patch
+Patch0445: 0445-feature-changelog-Avoid-thread-creation-if-xlator-is.patch
+Patch0446: 0446-bitrot-Make-number-of-signer-threads-configurable.patch
+Patch0447: 0447-core-brick_mux-brick-crashed-when-creating-and-delet.patch
+Patch0448: 0448-Posix-Use-simple-approach-to-close-fd.patch
+Patch0449: 0449-test-Test-case-brick-mux-validation-in-cluster.t-is-.patch
+Patch0450: 0450-tests-basic-ctime-enable-ctime-before-testing.patch
+Patch0451: 0451-extras-Modify-group-virt-to-include-network-related-.patch
+Patch0452: 0452-Tier-DHT-Handle-the-pause-case-missed-out.patch
+Patch0453: 0453-glusterd-add-brick-command-failure.patch
+Patch0454: 0454-features-locks-avoid-use-after-freed-of-frame-for-bl.patch
+Patch0455: 0455-locks-prevent-deletion-of-locked-entries.patch
+Patch0456: 0456-add-clean-local-after-grant-lock.patch
+Patch0457: 0457-cluster-ec-Improve-detection-of-new-heals.patch
+Patch0458: 0458-features-bit-rot-stub-clean-the-mutex-after-cancelli.patch
+Patch0459: 0459-features-bit-rot-Unconditionally-sign-the-files-duri.patch
+Patch0460: 0460-cluster-ec-Remove-stale-entries-from-indices-xattrop.patch
+Patch0461: 0461-geo-replication-Fix-IPv6-parsing.patch
+Patch0462: 0462-Issue-with-gf_fill_iatt_for_dirent.patch
+Patch0463: 0463-cluster-ec-Change-handling-of-heal-failure-to-avoid-.patch
+Patch0464: 0464-storage-posix-Remove-nr_files-usage.patch
+Patch0465: 0465-posix-Implement-a-janitor-thread-to-close-fd.patch
+Patch0466: 0466-cluster-ec-Change-stale-index-handling.patch
+Patch0467: 0467-build-Added-dependency-for-glusterfs-selinux.patch
+Patch0468: 0468-build-Update-the-glusterfs-selinux-version.patch
+Patch0469: 0469-cluster-ec-Don-t-trigger-heal-for-stale-index.patch
+Patch0470: 0470-extras-snap_scheduler-changes-in-gluster-shared-stor.patch
+Patch0471: 0471-nfs-ganesha-gluster_shared_storage-fails-to-automoun.patch
+Patch0472: 0472-geo-rep-gluster_shared_storage-fails-to-automount-on.patch
+Patch0473: 0473-glusterd-Fix-Add-brick-with-increasing-replica-count.patch
+Patch0474: 0474-features-locks-posixlk-clear-lock-should-set-error-a.patch
+Patch0475: 0475-fuse-lock-interrupt-fix-flock_interrupt.t.patch
+Patch0476: 0476-mount-fuse-use-cookies-to-get-fuse-interrupt-record-.patch
+Patch0477: 0477-glusterd-snapshot-Snapshot-prevalidation-failure-not.patch
+Patch0478: 0478-DHT-Fixing-rebalance-failure-on-issuing-stop-command.patch
+Patch0479: 0479-ganesha-ha-revised-regex-exprs-for-status.patch
+Patch0480: 0480-DHT-Rebalance-Ensure-Rebalance-reports-status-only-o.patch
+Patch0481: 0481-Update-rfc.sh-to-rhgs-3.5.4.patch
+Patch0482: 0482-logger-Always-print-errors-in-english.patch
+Patch0483: 0483-afr-more-quorum-checks-in-lookup-and-new-entry-marki.patch
+Patch0484: 0484-glusterd-rebalance-status-displays-stats-as-0-after-.patch
+Patch0485: 0485-cli-rpc-conditional-init-of-global-quota-rpc-1578.patch
+Patch0486: 0486-glusterd-brick-sock-file-deleted-log-error-1560.patch
+Patch0487: 0487-Events-Log-file-not-re-opened-after-logrotate.patch
+Patch0488: 0488-glusterd-afr-enable-granular-entry-heal-by-default.patch
+Patch0489: 0489-glusterd-fix-bug-in-enabling-granular-entry-heal.patch
+Patch0490: 0490-Segmentation-fault-occurs-during-truncate.patch
+Patch0491: 0491-glusterd-mount-directory-getting-truncated-on-mounti.patch
+Patch0492: 0492-afr-lookup-Pass-xattr_req-in-while-doing-a-selfheal-.patch
+Patch0493: 0493-geo-rep-Note-section-is-required-for-ignore_deletes.patch
+Patch0494: 0494-glusterd-start-the-brick-on-a-different-port.patch
+Patch0495: 0495-geo-rep-descriptive-message-when-worker-crashes-due-.patch
+Patch0496: 0496-posix-Use-MALLOC-instead-of-alloca-to-allocate-memor.patch
+Patch0497: 0497-socket-Use-AES128-cipher-in-SSL-if-AES-is-supported-.patch
+Patch0498: 0498-geo-rep-Fix-corner-case-in-rename-on-mkdir-during-hy.patch
+Patch0499: 0499-gfapi-give-appropriate-error-when-size-exceeds.patch
+Patch0500: 0500-features-shard-Convert-shard-block-indices-to-uint64.patch
+Patch0501: 0501-Cli-Removing-old-syntax-of-tier-cmds-from-help-menu.patch
+Patch0502: 0502-dht-fixing-a-permission-update-issue.patch
+Patch0503: 0503-gfapi-Suspend-synctasks-instead-of-blocking-them.patch
+Patch0504: 0504-io-stats-Configure-ios_sample_buf_size-based-on-samp.patch
+Patch0505: 0505-trash-Create-inode_table-only-while-feature-is-enabl.patch
+Patch0506: 0506-posix-Attach-a-posix_spawn_disk_thread-with-glusterf.patch
+Patch0507: 0507-inode-make-critical-section-smaller.patch
+Patch0508: 0508-fuse-fetch-arbitrary-number-of-groups-from-proc-pid-.patch
+Patch0509: 0509-core-configure-optimum-inode-table-hash_size-for-shd.patch
+Patch0510: 0510-glusterd-brick_mux-Optimize-friend-handshake-code-to.patch
+Patch0511: 0511-features-shard-Missing-format-specifier.patch
+Patch0512: 0512-glusterd-shared-storage-mount-fails-in-ipv6-environm.patch
+Patch0513: 0513-afr-mark-pending-xattrs-as-a-part-of-metadata-heal.patch
+Patch0514: 0514-afr-event-gen-changes.patch
+Patch0515: 0515-cluster-afr-Heal-directory-rename-without-rmdir-mkdi.patch
+Patch0516: 0516-afr-return-EIO-for-gfid-split-brains.patch
+Patch0517: 0517-gfapi-glfs_h_creat_open-new-API-to-create-handle-and.patch
+Patch0518: 0518-glusterd-Fix-for-shared-storage-in-ipv6-env.patch
+Patch0519: 0519-glusterfs-events-Fix-incorrect-attribute-access-2002.patch
+Patch0520: 0520-performance-open-behind-seek-fop-should-open_and_res.patch
+Patch0521: 0521-open-behind-fix-missing-fd-reference.patch
+Patch0522: 0522-lcov-improve-line-coverage.patch
+Patch0523: 0523-open-behind-rewrite-of-internal-logic.patch
+Patch0524: 0524-open-behind-fix-call_frame-leak.patch
+Patch0525: 0525-open-behind-implement-create-fop.patch
+Patch0526: 0526-Quota-quota_fsck.py-converting-byte-string-to-string.patch
+Patch0527: 0527-Events-Socket-creation-after-getaddrinfo-and-IPv4-an.patch
+Patch0528: 0528-Extras-Removing-xattr_analysis-script.patch
+Patch0529: 0529-geo-rep-prompt-should-work-for-ignore_deletes.patch
+Patch0530: 0530-gfapi-avoid-crash-while-logging-message.patch
+Patch0531: 0531-Glustereventsd-Default-port-change-2091.patch
+Patch0532: 0532-glusterd-fix-for-starting-brick-on-new-port.patch
+Patch0533: 0533-glusterd-Rebalance-cli-is-not-showing-correct-status.patch
+Patch0534: 0534-glusterd-Resolve-use-after-free-bug-2181.patch
+Patch0535: 0535-multiple-files-use-dict_allocate_and_serialize-where.patch
+Patch0536: 0536-dht-Ongoing-IO-is-failed-during-volume-shrink-operat.patch
+Patch0537: 0537-cluster-afr-Fix-race-in-lockinfo-f-getxattr.patch
+Patch0538: 0538-afr-fix-coverity-issue-introduced-by-90cefde.patch
+Patch0539: 0539-extras-disable-lookup-optimize-in-virt-and-block-gro.patch
+Patch0540: 0540-extras-Disable-write-behind-for-group-samba.patch
+Patch0541: 0541-glusterd-volgen-Add-functionality-to-accept-any-cust.patch
+Patch0542: 0542-xlaotrs-mgmt-Fixing-coverity-issue-1445996.patch
+Patch0543: 0543-glusterd-handle-custom-xlator-failure-cases.patch
+Patch0544: 0544-RHGS-3.5.4-rebuild-to-ship-with-RHEL-8.5.patch
 
 %description
 GlusterFS is a distributed file-system capable of scaling to several
@@ -1044,6 +1196,9 @@ Summary:          Clustered file-system server
 Requires:         %{name}%{?_isa} = %{version}-%{release}
 Requires:         %{name}-cli%{?_isa} = %{version}-%{release}
 Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
+%if ( 0%{?fedora} && 0%{?fedora} >= 30  || ( 0%{?rhel} && 0%{?rhel} >= 8 ) )
+Requires:         glusterfs-selinux >= 1.0-1
+%endif
 # some daemons (like quota) use a fuse-mount, glusterfsd is part of -fuse
 Requires:         %{name}-fuse%{?_isa} = %{version}-%{release}
 # self-heal daemon, rebalance, nfs-server etc. are actually clients
@@ -1844,7 +1999,6 @@ exit 0
 %if ( 0%{!?_without_server:1} )
 %files server
 %doc extras/clear_xattrs.sh
-%{_datadir}/glusterfs/scripts/xattr_analysis.py*
 %{_datadir}/glusterfs/scripts/quota_fsck.py*
 # sysconf
 %config(noreplace) %{_sysconfdir}/glusterfs
@@ -2447,15 +2601,84 @@ fi
 %endif
 
 %changelog
-* Tue Mar 30 2021 CentOS Sources <bugs@centos.org> - 6.0-37.2.el8.centos
-- remove vendor and/or packager lines
+* Mon Aug 30 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.4
+- Add gating.yaml, fixes bugs bz#1996984
+
+* Tue Aug 24 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.3
+- fixes bugs bz#1996984
+
+* Thu May 06 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.2
+- fixes bugs bz#1953901
+
+* Thu Apr 22 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.1
+- fixes bugs bz#1927235
+
+* Wed Apr 14 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56
+- fixes bugs bz#1948547
+
+* Fri Mar 19 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-55
+- fixes bugs bz#1939372
+
+* Wed Mar 03 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-54
+- fixes bugs bz#1832306 bz#1911292 bz#1924044
+
+* Thu Feb 11 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-53
+- fixes bugs bz#1224906 bz#1691320 bz#1719171 bz#1814744 bz#1865796
+
+* Thu Jan 28 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-52
+- fixes bugs bz#1600459 bz#1719171 bz#1830713 bz#1856574
+
+* Mon Dec 28 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-51
+- fixes bugs bz#1640148 bz#1856574 bz#1910119
+
+* Tue Dec 15 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-50
+- fixes bugs bz#1224906 bz#1412494 bz#1612973 bz#1663821 bz#1691320 
+  bz#1726673 bz#1749304 bz#1752739 bz#1779238 bz#1813866 bz#1814744 bz#1821599 
+  bz#1832306 bz#1835229 bz#1842449 bz#1865796 bz#1878077 bz#1882923 bz#1885966 
+  bz#1890506 bz#1896425 bz#1898776 bz#1898777 bz#1898778 bz#1898781 bz#1898784 
+  bz#1903468
+
+* Wed Nov 25 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-49
+- fixes bugs bz#1286171
+
+* Tue Nov 10 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-48
+- fixes bugs bz#1895301
+
+* Thu Nov 05 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-47
+- fixes bugs bz#1286171 bz#1821743 bz#1837926
+
+* Wed Oct 21 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-46
+- fixes bugs bz#1873469 bz#1881823
+
+* Wed Sep 09 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-45
+- fixes bugs bz#1785714
+
+* Thu Sep 03 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-44
+- fixes bugs bz#1460657
+
+* Thu Sep 03 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-43
+- fixes bugs bz#1460657
+
+* Wed Sep 02 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-42
+- fixes bugs bz#1785714
+
+* Tue Aug 25 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-41
+- fixes bugs bz#1785714 bz#1851424 bz#1851989 bz#1852736 bz#1853189 bz#1855966
+
+* Tue Jul 21 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-40
+- fixes bugs bz#1812789 bz#1844359 bz#1847081 bz#1854165
 
-* Tue Sep 08 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-37.2
-- fixes bugs bz#1876857
+* Wed Jun 17 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-39
+- fixes bugs bz#1844359 bz#1845064
 
-* Wed Jun 24 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-37.1
-- fixes bugs bz#1848890 bz#1848891 bz#1848893 bz#1848894 bz#1848895 
-  bz#1848896 bz#1848899 bz#1849533
+* Wed Jun 10 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-38
+- fixes bugs bz#1234220 bz#1286171 bz#1487177 bz#1524457 bz#1640573 
+  bz#1663557 bz#1667954 bz#1683602 bz#1686897 bz#1721355 bz#1748865 bz#1750211 
+  bz#1754391 bz#1759875 bz#1761531 bz#1761932 bz#1763124 bz#1763129 bz#1764091 
+  bz#1775637 bz#1776901 bz#1781550 bz#1781649 bz#1781710 bz#1783232 bz#1784211 
+  bz#1784415 bz#1786516 bz#1786681 bz#1787294 bz#1787310 bz#1787331 bz#1787994 
+  bz#1790336 bz#1792873 bz#1794663 bz#1796814 bz#1804164 bz#1810924 bz#1815434 
+  bz#1836099 bz#1837467 bz#1837926 bz#1838479 bz#1839137 bz#1844359
 
 * Fri May 29 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-37
 - fixes bugs bz#1840794