|
|
21ab4e |
From bebc9766e75e2d7dd1f744206e16c296189762aa Mon Sep 17 00:00:00 2001
|
|
|
21ab4e |
From: Pranith Kumar K <pkarampu@redhat.com>
|
|
|
21ab4e |
Date: Mon, 12 Jun 2017 22:06:18 +0530
|
|
|
21ab4e |
Subject: [PATCH 520/525] cluster/afr: Implement quorum for lk fop
|
|
|
21ab4e |
|
|
|
21ab4e |
Problem:
|
|
|
21ab4e |
At the moment when we have replica 3 or arbiter setup, even when
|
|
|
21ab4e |
lk succeeds on just one brick we give success to application which
|
|
|
21ab4e |
is wrong
|
|
|
21ab4e |
|
|
|
21ab4e |
Fix:
|
|
|
21ab4e |
Consider quorum-number of successes as success when quorum is enabled.
|
|
|
21ab4e |
|
|
|
21ab4e |
>BUG: 1461792
|
|
|
21ab4e |
>Change-Id: I5789e6eb5defb68f8a0eb9cd594d316f5cdebaea
|
|
|
21ab4e |
>Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
|
|
|
21ab4e |
>Reviewed-on: https://review.gluster.org/17524
|
|
|
21ab4e |
>Smoke: Gluster Build System <jenkins@build.gluster.org>
|
|
|
21ab4e |
>NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
|
|
|
21ab4e |
>CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
|
|
|
21ab4e |
>Reviewed-by: Ravishankar N <ravishankar@redhat.com>
|
|
|
21ab4e |
|
|
|
21ab4e |
BUG: 1463104
|
|
|
21ab4e |
Change-Id: I5789e6eb5defb68f8a0eb9cd594d316f5cdebaea
|
|
|
21ab4e |
Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
|
|
|
21ab4e |
Reviewed-on: https://code.engineering.redhat.com/gerrit/109473
|
|
|
21ab4e |
---
|
|
|
21ab4e |
tests/basic/afr/lk-quorum.t | 255 +++++++++++++++++++++++++++++++++++
|
|
|
21ab4e |
xlators/cluster/afr/src/afr-common.c | 56 +++++---
|
|
|
21ab4e |
xlators/cluster/afr/src/afr.h | 5 -
|
|
|
21ab4e |
3 files changed, 293 insertions(+), 23 deletions(-)
|
|
|
21ab4e |
create mode 100644 tests/basic/afr/lk-quorum.t
|
|
|
21ab4e |
|
|
|
21ab4e |
diff --git a/tests/basic/afr/lk-quorum.t b/tests/basic/afr/lk-quorum.t
|
|
|
21ab4e |
new file mode 100644
|
|
|
21ab4e |
index 0000000..ad14365
|
|
|
21ab4e |
--- /dev/null
|
|
|
21ab4e |
+++ b/tests/basic/afr/lk-quorum.t
|
|
|
21ab4e |
@@ -0,0 +1,255 @@
|
|
|
21ab4e |
+#!/bin/bash
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+. $(dirname $0)/../../include.rc
|
|
|
21ab4e |
+. $(dirname $0)/../../volume.rc
|
|
|
21ab4e |
+. $(dirname $0)/../../fileio.rc
|
|
|
21ab4e |
+cleanup;
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+TEST glusterd;
|
|
|
21ab4e |
+TEST pidof glusterd
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#Tests for quorum-type option for replica 2
|
|
|
21ab4e |
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1};
|
|
|
21ab4e |
+TEST $CLI volume set $V0 performance.quick-read off
|
|
|
21ab4e |
+TEST $CLI volume set $V0 performance.io-cache off
|
|
|
21ab4e |
+TEST $CLI volume set $V0 performance.write-behind off
|
|
|
21ab4e |
+TEST $CLI volume set $V0 performance.open-behind off
|
|
|
21ab4e |
+TEST $CLI volume set $V0 performance.stat-prefetch off
|
|
|
21ab4e |
+TEST $CLI volume set $V0 performance.read-ahead off
|
|
|
21ab4e |
+TEST $CLI volume start $V0
|
|
|
21ab4e |
+TEST $GFS -s $H0 --volfile-id=$V0 --direct-io-mode=enable $M0;
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+TEST touch $M0/a
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#When all bricks are up, lock and unlock should succeed
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST flock -x $fd1
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#When all bricks are down, lock/unlock should fail
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST $CLI volume stop $V0
|
|
|
21ab4e |
+TEST ! flock -x $fd1
|
|
|
21ab4e |
+TEST $CLI volume start $V0
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#Check locking behavior with quorum 'fixed' and quorum-count 2
|
|
|
21ab4e |
+TEST $CLI volume set $V0 cluster.quorum-type fixed
|
|
|
21ab4e |
+TEST $CLI volume set $V0 cluster.quorum-count 2
|
|
|
21ab4e |
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^fixed$" mount_get_option_value $M0 $V0-replicate-0 quorum-type
|
|
|
21ab4e |
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^2$" mount_get_option_value $M0 $V0-replicate-0 quorum-count
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#When all bricks are up, lock and unlock should succeed
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST flock -x $fd1
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#When all bricks are down, lock/unlock should fail
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST $CLI volume stop $V0
|
|
|
21ab4e |
+TEST ! flock -x $fd1
|
|
|
21ab4e |
+TEST $CLI volume start $V0
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#When any of the bricks is down lock/unlock should fail
|
|
|
21ab4e |
+#kill first brick
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST kill_brick $V0 $H0 $B0/${V0}0
|
|
|
21ab4e |
+TEST ! flock -x $fd1
|
|
|
21ab4e |
+TEST $CLI volume start $V0 force
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#kill 2nd brick
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST kill_brick $V0 $H0 $B0/${V0}1
|
|
|
21ab4e |
+TEST ! flock -x $fd1
|
|
|
21ab4e |
+TEST $CLI volume start $V0 force
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#Check locking behavior with quorum 'fixed' and quorum-count 1
|
|
|
21ab4e |
+TEST $CLI volume set $V0 cluster.quorum-count 1
|
|
|
21ab4e |
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^1$" mount_get_option_value $M0 $V0-replicate-0 quorum-count
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#When all bricks are up, lock and unlock should succeed
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST flock -x $fd1
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#When all bricks are down, lock/unlock should fail
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST $CLI volume stop $V0
|
|
|
21ab4e |
+TEST ! flock -x $fd1
|
|
|
21ab4e |
+TEST $CLI volume start $V0
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#When any of the bricks is down lock/unlock should succeed
|
|
|
21ab4e |
+#kill first brick
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST kill_brick $V0 $H0 $B0/${V0}0
|
|
|
21ab4e |
+TEST flock -x $fd1
|
|
|
21ab4e |
+TEST $CLI volume start $V0 force
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#kill 2nd brick
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST kill_brick $V0 $H0 $B0/${V0}1
|
|
|
21ab4e |
+TEST flock -x $fd1
|
|
|
21ab4e |
+TEST $CLI volume start $V0 force
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#Check locking behavior with quorum 'auto'
|
|
|
21ab4e |
+TEST $CLI volume set $V0 cluster.quorum-type auto
|
|
|
21ab4e |
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^auto$" mount_get_option_value $M0 $V0-replicate-0 quorum-type
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#When all bricks are up, lock and unlock should succeed
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST flock -x $fd1
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#When all bricks are down, lock/unlock should fail
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST $CLI volume stop $V0
|
|
|
21ab4e |
+TEST ! flock -x $fd1
|
|
|
21ab4e |
+TEST $CLI volume start $V0
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#When first brick is down lock/unlock should fail
|
|
|
21ab4e |
+#kill first brick
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST kill_brick $V0 $H0 $B0/${V0}0
|
|
|
21ab4e |
+TEST ! flock -x $fd1
|
|
|
21ab4e |
+TEST $CLI volume start $V0 force
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#When second brick is down lock/unlock should succeed
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST kill_brick $V0 $H0 $B0/${V0}1
|
|
|
21ab4e |
+TEST flock -x $fd1
|
|
|
21ab4e |
+TEST $CLI volume start $V0 force
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+cleanup;
|
|
|
21ab4e |
+TEST glusterd;
|
|
|
21ab4e |
+TEST pidof glusterd
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#Tests for replica 3
|
|
|
21ab4e |
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
|
|
|
21ab4e |
+TEST $CLI volume set $V0 performance.quick-read off
|
|
|
21ab4e |
+TEST $CLI volume set $V0 performance.io-cache off
|
|
|
21ab4e |
+TEST $CLI volume set $V0 performance.write-behind off
|
|
|
21ab4e |
+TEST $CLI volume set $V0 performance.open-behind off
|
|
|
21ab4e |
+TEST $CLI volume set $V0 performance.stat-prefetch off
|
|
|
21ab4e |
+TEST $CLI volume set $V0 performance.read-ahead off
|
|
|
21ab4e |
+TEST $CLI volume start $V0
|
|
|
21ab4e |
+TEST $GFS -s $H0 --volfile-id=$V0 --direct-io-mode=enable $M0;
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+TEST touch $M0/a
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#When all bricks are up, lock and unlock should succeed
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST flock -x $fd1
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#When all bricks are down, lock/unlock should fail
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST $CLI volume stop $V0
|
|
|
21ab4e |
+TEST ! flock -x $fd1
|
|
|
21ab4e |
+TEST $CLI volume start $V0
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#When any of the bricks is down lock/unlock should succeed
|
|
|
21ab4e |
+#kill first brick
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST kill_brick $V0 $H0 $B0/${V0}0
|
|
|
21ab4e |
+TEST flock -x $fd1
|
|
|
21ab4e |
+TEST $CLI volume start $V0 force
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#kill 2nd brick
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST kill_brick $V0 $H0 $B0/${V0}1
|
|
|
21ab4e |
+TEST flock -x $fd1
|
|
|
21ab4e |
+TEST $CLI volume start $V0 force
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#kill 3rd brick
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST kill_brick $V0 $H0 $B0/${V0}2
|
|
|
21ab4e |
+TEST flock -x $fd1
|
|
|
21ab4e |
+TEST $CLI volume start $V0 force
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#When any two of the bricks are down lock/unlock should fail
|
|
|
21ab4e |
+#kill first,second bricks
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST kill_brick $V0 $H0 $B0/${V0}0
|
|
|
21ab4e |
+TEST kill_brick $V0 $H0 $B0/${V0}1
|
|
|
21ab4e |
+TEST ! flock -x $fd1
|
|
|
21ab4e |
+TEST $CLI volume start $V0 force
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#kill 2nd,3rd bricks
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST kill_brick $V0 $H0 $B0/${V0}1
|
|
|
21ab4e |
+TEST kill_brick $V0 $H0 $B0/${V0}2
|
|
|
21ab4e |
+TEST ! flock -x $fd1
|
|
|
21ab4e |
+TEST $CLI volume start $V0 force
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+#kill 1st,3rd brick
|
|
|
21ab4e |
+TEST fd1=`fd_available`
|
|
|
21ab4e |
+TEST fd_open $fd1 'w' $M0/a
|
|
|
21ab4e |
+TEST kill_brick $V0 $H0 $B0/${V0}0
|
|
|
21ab4e |
+TEST kill_brick $V0 $H0 $B0/${V0}2
|
|
|
21ab4e |
+TEST ! flock -x $fd1
|
|
|
21ab4e |
+TEST $CLI volume start $V0 force
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
|
|
|
21ab4e |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
|
|
|
21ab4e |
+TEST fd_close $fd1
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+cleanup
|
|
|
21ab4e |
diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
|
|
|
21ab4e |
index 4b8334d..d96a819 100644
|
|
|
21ab4e |
--- a/xlators/cluster/afr/src/afr-common.c
|
|
|
21ab4e |
+++ b/xlators/cluster/afr/src/afr-common.c
|
|
|
21ab4e |
@@ -3754,7 +3754,7 @@ unwind:
|
|
|
21ab4e |
|
|
|
21ab4e |
static int
|
|
|
21ab4e |
afr_common_lock_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
|
|
21ab4e |
- int32_t op_ret, int32_t op_errno, dict_t *xdata)
|
|
|
21ab4e |
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
|
|
|
21ab4e |
{
|
|
|
21ab4e |
afr_local_t *local = NULL;
|
|
|
21ab4e |
int child_index = (long)cookie;
|
|
|
21ab4e |
@@ -4123,15 +4123,27 @@ afr_lk_unlock_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
|
|
21ab4e |
int32_t op_ret, int32_t op_errno, struct gf_flock *lock,
|
|
|
21ab4e |
dict_t *xdata)
|
|
|
21ab4e |
{
|
|
|
21ab4e |
- afr_local_t * local = NULL;
|
|
|
21ab4e |
+ afr_local_t *local = NULL;
|
|
|
21ab4e |
+ afr_private_t *priv = this->private;
|
|
|
21ab4e |
int call_count = -1;
|
|
|
21ab4e |
+ int child_index = (long)cookie;
|
|
|
21ab4e |
|
|
|
21ab4e |
local = frame->local;
|
|
|
21ab4e |
- call_count = afr_frame_return (frame);
|
|
|
21ab4e |
|
|
|
21ab4e |
+ if (op_ret < 0 && op_errno != ENOTCONN && op_errno != EBADFD) {
|
|
|
21ab4e |
+ gf_msg (this->name, GF_LOG_ERROR, op_errno,
|
|
|
21ab4e |
+ AFR_MSG_UNLOCK_FAIL,
|
|
|
21ab4e |
+ "gfid=%s: unlock failed on subvolume %s "
|
|
|
21ab4e |
+ "with lock owner %s",
|
|
|
21ab4e |
+ uuid_utoa (local->fd->inode->gfid),
|
|
|
21ab4e |
+ priv->children[child_index]->name,
|
|
|
21ab4e |
+ lkowner_utoa (&frame->root->lk_owner));
|
|
|
21ab4e |
+ }
|
|
|
21ab4e |
+
|
|
|
21ab4e |
+ call_count = afr_frame_return (frame);
|
|
|
21ab4e |
if (call_count == 0)
|
|
|
21ab4e |
AFR_STACK_UNWIND (lk, frame, local->op_ret, local->op_errno,
|
|
|
21ab4e |
- lock, xdata);
|
|
|
21ab4e |
+ NULL, local->xdata_rsp);
|
|
|
21ab4e |
|
|
|
21ab4e |
return 0;
|
|
|
21ab4e |
}
|
|
|
21ab4e |
@@ -4153,7 +4165,7 @@ afr_lk_unlock (call_frame_t *frame, xlator_t *this)
|
|
|
21ab4e |
|
|
|
21ab4e |
if (call_count == 0) {
|
|
|
21ab4e |
AFR_STACK_UNWIND (lk, frame, local->op_ret, local->op_errno,
|
|
|
21ab4e |
- &local->cont.lk.ret_flock, NULL);
|
|
|
21ab4e |
+ NULL, local->xdata_rsp);
|
|
|
21ab4e |
return 0;
|
|
|
21ab4e |
}
|
|
|
21ab4e |
|
|
|
21ab4e |
@@ -4163,8 +4175,8 @@ afr_lk_unlock (call_frame_t *frame, xlator_t *this)
|
|
|
21ab4e |
|
|
|
21ab4e |
for (i = 0; i < priv->child_count; i++) {
|
|
|
21ab4e |
if (local->cont.lk.locked_nodes[i]) {
|
|
|
21ab4e |
- STACK_WIND (frame, afr_lk_unlock_cbk,
|
|
|
21ab4e |
- priv->children[i],
|
|
|
21ab4e |
+ STACK_WIND_COOKIE (frame, afr_lk_unlock_cbk,
|
|
|
21ab4e |
+ (void *) (long) i, priv->children[i],
|
|
|
21ab4e |
priv->children[i]->fops->lk,
|
|
|
21ab4e |
local->fd, F_SETLK,
|
|
|
21ab4e |
&local->cont.lk.user_flock, NULL);
|
|
|
21ab4e |
@@ -4180,12 +4192,12 @@ afr_lk_unlock (call_frame_t *frame, xlator_t *this)
|
|
|
21ab4e |
|
|
|
21ab4e |
int32_t
|
|
|
21ab4e |
afr_lk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
|
|
21ab4e |
- int32_t op_ret, int32_t op_errno, struct gf_flock *lock, dict_t *xdata)
|
|
|
21ab4e |
+ int32_t op_ret, int32_t op_errno, struct gf_flock *lock,
|
|
|
21ab4e |
+ dict_t *xdata)
|
|
|
21ab4e |
{
|
|
|
21ab4e |
afr_local_t *local = NULL;
|
|
|
21ab4e |
afr_private_t *priv = NULL;
|
|
|
21ab4e |
int child_index = -1;
|
|
|
21ab4e |
-/* int ret = 0; */
|
|
|
21ab4e |
|
|
|
21ab4e |
|
|
|
21ab4e |
local = frame->local;
|
|
|
21ab4e |
@@ -4193,9 +4205,10 @@ afr_lk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
|
|
21ab4e |
|
|
|
21ab4e |
child_index = (long) cookie;
|
|
|
21ab4e |
|
|
|
21ab4e |
- if (!child_went_down (op_ret, op_errno) && (op_ret == -1)) {
|
|
|
21ab4e |
+ afr_common_lock_cbk (frame, cookie, this, op_ret, op_errno, xdata);
|
|
|
21ab4e |
+ if (op_ret < 0 && op_errno == EAGAIN) {
|
|
|
21ab4e |
local->op_ret = -1;
|
|
|
21ab4e |
- local->op_errno = op_errno;
|
|
|
21ab4e |
+ local->op_errno = EAGAIN;
|
|
|
21ab4e |
|
|
|
21ab4e |
afr_lk_unlock (frame, this);
|
|
|
21ab4e |
return 0;
|
|
|
21ab4e |
@@ -4215,15 +4228,20 @@ afr_lk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
|
|
21ab4e |
priv->children[child_index],
|
|
|
21ab4e |
priv->children[child_index]->fops->lk,
|
|
|
21ab4e |
local->fd, local->cont.lk.cmd,
|
|
|
21ab4e |
- &local->cont.lk.user_flock, xdata);
|
|
|
21ab4e |
- } else if (local->op_ret == -1) {
|
|
|
21ab4e |
- /* all nodes have gone down */
|
|
|
21ab4e |
+ &local->cont.lk.user_flock,
|
|
|
21ab4e |
+ local->xdata_req);
|
|
|
21ab4e |
+ } else if (priv->quorum_count &&
|
|
|
21ab4e |
+ !afr_has_quorum (local->cont.lk.locked_nodes, this)) {
|
|
|
21ab4e |
+ local->op_ret = -1;
|
|
|
21ab4e |
+ local->op_errno = afr_final_errno (local, priv);
|
|
|
21ab4e |
|
|
|
21ab4e |
- AFR_STACK_UNWIND (lk, frame, -1, ENOTCONN,
|
|
|
21ab4e |
- &local->cont.lk.ret_flock, NULL);
|
|
|
21ab4e |
+ afr_lk_unlock (frame, this);
|
|
|
21ab4e |
} else {
|
|
|
21ab4e |
+ if (local->op_ret < 0)
|
|
|
21ab4e |
+ local->op_errno = afr_final_errno (local, priv);
|
|
|
21ab4e |
+
|
|
|
21ab4e |
AFR_STACK_UNWIND (lk, frame, local->op_ret, local->op_errno,
|
|
|
21ab4e |
- &local->cont.lk.ret_flock, NULL);
|
|
|
21ab4e |
+ &local->cont.lk.ret_flock, local->xdata_rsp);
|
|
|
21ab4e |
}
|
|
|
21ab4e |
|
|
|
21ab4e |
return 0;
|
|
|
21ab4e |
@@ -4258,11 +4276,13 @@ afr_lk (call_frame_t *frame, xlator_t *this,
|
|
|
21ab4e |
local->cont.lk.cmd = cmd;
|
|
|
21ab4e |
local->cont.lk.user_flock = *flock;
|
|
|
21ab4e |
local->cont.lk.ret_flock = *flock;
|
|
|
21ab4e |
+ if (xdata)
|
|
|
21ab4e |
+ local->xdata_req = dict_ref (xdata);
|
|
|
21ab4e |
|
|
|
21ab4e |
STACK_WIND_COOKIE (frame, afr_lk_cbk, (void *) (long) 0,
|
|
|
21ab4e |
priv->children[i],
|
|
|
21ab4e |
priv->children[i]->fops->lk,
|
|
|
21ab4e |
- fd, cmd, flock, xdata);
|
|
|
21ab4e |
+ fd, cmd, flock, local->xdata_req);
|
|
|
21ab4e |
|
|
|
21ab4e |
return 0;
|
|
|
21ab4e |
out:
|
|
|
21ab4e |
diff --git a/xlators/cluster/afr/src/afr.h b/xlators/cluster/afr/src/afr.h
|
|
|
21ab4e |
index 05f8249..f6a1a6a 100644
|
|
|
21ab4e |
--- a/xlators/cluster/afr/src/afr.h
|
|
|
21ab4e |
+++ b/xlators/cluster/afr/src/afr.h
|
|
|
21ab4e |
@@ -864,11 +864,6 @@ typedef struct afr_granular_esh_args {
|
|
|
21ab4e |
mismatch */
|
|
|
21ab4e |
} afr_granular_esh_args_t;
|
|
|
21ab4e |
|
|
|
21ab4e |
-/* did a call fail due to a child failing? */
|
|
|
21ab4e |
-#define child_went_down(op_ret, op_errno) (((op_ret) < 0) && \
|
|
|
21ab4e |
- ((op_errno == ENOTCONN) || \
|
|
|
21ab4e |
- (op_errno == EBADFD)))
|
|
|
21ab4e |
-
|
|
|
21ab4e |
int
|
|
|
21ab4e |
afr_inode_get_readable (call_frame_t *frame, inode_t *inode, xlator_t *this,
|
|
|
21ab4e |
unsigned char *readable, int *event_p, int type);
|
|
|
21ab4e |
--
|
|
|
21ab4e |
1.8.3.1
|
|
|
21ab4e |
|