Blob Blame History Raw
From 039d3b0631336ba2197fdf203226151a488d60bb Mon Sep 17 00:00:00 2001
From: karthik-us <ksubrahm@redhat.com>
Date: Mon, 11 Mar 2019 17:03:28 +0530
Subject: [PATCH 538/538] cluster/afr: Send truncate on arbiter brick from SHD

Problem:
In an arbiter volume configuration SHD will not send any writes onto the arbiter
brick even if there is data pending marker for the arbiter brick. If we have a
arbiter setup on the geo-rep master and there are data pending markers for the files
on arbiter brick, SHD will not mark any data changelog during healing. While syncing
the data from master to slave, if the arbiter-brick is considered as ACTIVE, then
there is a chance that slave will miss out some data. If the arbiter brick is being
newly added or replaced there is a chance of slave missing all the data during sync.

Fix:
If there is data pending marker for the arbiter brick, send truncate on the arbiter
brick during heal, so that it will record truncate as the data transaction in changelog.

Backport of: https://review.gluster.org/#/c/glusterfs/+/22325/

Change-Id: I174d5d557f1ae55dbe758bc92368c133f1ad0929
BUG: 1683893
Signed-off-by: karthik-us <ksubrahm@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/164978
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
 ...bug-1686568-send-truncate-on-arbiter-from-shd.t | 38 ++++++++++++++++++++++
 tests/volume.rc                                    |  2 +-
 xlators/cluster/afr/src/afr-self-heal-data.c       | 25 +++++++-------
 3 files changed, 51 insertions(+), 14 deletions(-)
 create mode 100644 tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t

diff --git a/tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t b/tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t
new file mode 100644
index 0000000..78581e9
--- /dev/null
+++ b/tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t
@@ -0,0 +1,38 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+CHANGELOG_PATH_0="$B0/${V0}2/.glusterfs/changelogs"
+ROLLOVER_TIME=100
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 changelog.changelog on
+TEST $CLI volume set $V0 changelog.rollover-time $ROLLOVER_TIME
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST dd if=/dev/zero of=$M0/file1 bs=128K count=5
+
+TEST $CLI volume profile $V0 start
+TEST $CLI volume add-brick $V0 replica 3 arbiter 1 $H0:$B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+TEST $CLI volume profile $V0 info
+truncate_count=$($CLI volume profile $V0 info | grep TRUNCATE | awk '{count += $8} END {print count}')
+
+EXPECT "1" echo $truncate_count
+EXPECT "1" check_changelog_op ${CHANGELOG_PATH_0} "^ D "
+
+cleanup;
diff --git a/tests/volume.rc b/tests/volume.rc
index 6a983fd..3af663c 100644
--- a/tests/volume.rc
+++ b/tests/volume.rc
@@ -874,5 +874,5 @@ function check_changelog_op {
         local clog_path=$1
         local op=$2
 
-        $PYTHON $(dirname $0)/../../utils/changelogparser.py ${clog_path}/CHANGELOG | grep $op | wc -l
+        $PYTHON $(dirname $0)/../../utils/changelogparser.py ${clog_path}/CHANGELOG | grep "$op" | wc -l
 }
diff --git a/xlators/cluster/afr/src/afr-self-heal-data.c b/xlators/cluster/afr/src/afr-self-heal-data.c
index 2ac6e47..8bdea2a 100644
--- a/xlators/cluster/afr/src/afr-self-heal-data.c
+++ b/xlators/cluster/afr/src/afr-self-heal-data.c
@@ -399,17 +399,18 @@ __afr_selfheal_truncate_sinks (call_frame_t *frame, xlator_t *this,
 {
 	afr_local_t *local = NULL;
 	afr_private_t *priv = NULL;
-        unsigned char arbiter_sink_status = 0;
 	int i = 0;
 
 	local = frame->local;
 	priv = this->private;
 
-        if (priv->arbiter_count) {
-                arbiter_sink_status = healed_sinks[ARBITER_BRICK_INDEX];
-                healed_sinks[ARBITER_BRICK_INDEX] = 0;
-        }
-
+        /* This will send truncate on the arbiter brick as well if it is marked
+         * as sink. If changelog is enabled on the volume it captures truncate
+         * as a data transactions on the arbiter brick. This will help geo-rep
+         * to properly sync the data from master to slave if arbiter is the
+         * ACTIVE brick during syncing and which had got some entries healed for
+         * data as part of self heal.
+         */
 	AFR_ONLIST (healed_sinks, frame, afr_sh_generic_fop_cbk, ftruncate, fd,
                     size, NULL);
 
@@ -420,8 +421,6 @@ __afr_selfheal_truncate_sinks (call_frame_t *frame, xlator_t *this,
 			*/
 			healed_sinks[i] = 0;
 
-        if (arbiter_sink_status)
-                healed_sinks[ARBITER_BRICK_INDEX] = arbiter_sink_status;
 	return 0;
 }
 
@@ -733,6 +732,11 @@ __afr_selfheal_data (call_frame_t *frame, xlator_t *this, fd_t *fd,
                         goto unlock;
                 }
 
+		ret = __afr_selfheal_truncate_sinks (frame, this, fd, healed_sinks,
+						     locked_replies[source].poststat.ia_size);
+		if (ret < 0)
+			goto unlock;
+
                 if (priv->arbiter_count &&
                     AFR_COUNT (healed_sinks, priv->child_count) == 1 &&
                     healed_sinks[ARBITER_BRICK_INDEX]) {
@@ -740,11 +744,6 @@ __afr_selfheal_data (call_frame_t *frame, xlator_t *this, fd_t *fd,
                         goto restore_time;
                 }
 
-		ret = __afr_selfheal_truncate_sinks (frame, this, fd, healed_sinks,
-						     locked_replies[source].poststat.ia_size);
-		if (ret < 0)
-			goto unlock;
-
 		ret = 0;
 
 	}
-- 
1.8.3.1