9ae3f9
From b528c21e6fedc9ac841942828b82e0c808da5efb Mon Sep 17 00:00:00 2001
9ae3f9
From: Sheetal Pamecha <spamecha@redhat.com>
9ae3f9
Date: Thu, 2 Jan 2020 12:05:12 +0530
9ae3f9
Subject: [PATCH 393/449] afr: restore timestamp of files during metadata heal
9ae3f9
9ae3f9
For files: During metadata heal, we restore timestamps
9ae3f9
only for non-regular (char, block etc.) files.
9ae3f9
Extenting it for regular files as timestamp is updated
9ae3f9
via touch command also
9ae3f9
9ae3f9
> upstream patch link: https://review.gluster.org/#/c/glusterfs/+/23953/
9ae3f9
> fixes: bz#1787274
9ae3f9
> Change-Id: I26fe4fb6dff679422ba4698a7f828bf62ca7ca18
9ae3f9
> Signed-off-by: Sheetal Pamecha <spamecha@redhat.com>
9ae3f9
9ae3f9
BUG: 1761531
9ae3f9
Change-Id: I26fe4fb6dff679422ba4698a7f828bf62ca7ca18
9ae3f9
Signed-off-by: Sheetal Pamecha <spamecha@redhat.com>
9ae3f9
Reviewed-on: https://code.engineering.redhat.com/gerrit/202332
9ae3f9
Tested-by: RHGS Build Bot <nigelb@redhat.com>
9ae3f9
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
9ae3f9
---
9ae3f9
 .../bug-1761531-metadata-heal-restore-time.t       | 74 ++++++++++++++++++++++
9ae3f9
 xlators/cluster/afr/src/afr-self-heal-metadata.c   |  8 +--
9ae3f9
 2 files changed, 76 insertions(+), 6 deletions(-)
9ae3f9
 create mode 100644 tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t
9ae3f9
9ae3f9
diff --git a/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t b/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t
9ae3f9
new file mode 100644
9ae3f9
index 0000000..7e24eae
9ae3f9
--- /dev/null
9ae3f9
+++ b/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t
9ae3f9
@@ -0,0 +1,74 @@
9ae3f9
+#!/bin/bash
9ae3f9
+
9ae3f9
+. $(dirname $0)/../../include.rc
9ae3f9
+. $(dirname $0)/../../volume.rc
9ae3f9
+. $(dirname $0)/../../afr.rc
9ae3f9
+cleanup
9ae3f9
+
9ae3f9
+GET_MDATA_PATH=$(dirname $0)/../../utils
9ae3f9
+build_tester $GET_MDATA_PATH/get-mdata-xattr.c
9ae3f9
+
9ae3f9
+TEST glusterd
9ae3f9
+TEST pidof glusterd
9ae3f9
+TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0..2}
9ae3f9
+TEST $CLI volume start $V0
9ae3f9
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
9ae3f9
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
9ae3f9
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
9ae3f9
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
9ae3f9
+
9ae3f9
+TEST touch $M0/a
9ae3f9
+sleep 1
9ae3f9
+TEST kill_brick $V0 $H0 $B0/brick0
9ae3f9
+TEST touch $M0/a
9ae3f9
+
9ae3f9
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
9ae3f9
+
9ae3f9
+TEST $CLI volume start $V0 force
9ae3f9
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
9ae3f9
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
9ae3f9
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
9ae3f9
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
9ae3f9
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
9ae3f9
+
9ae3f9
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
9ae3f9
+
9ae3f9
+mtime0=$(get_mtime $B0/brick0/a)
9ae3f9
+mtime1=$(get_mtime $B0/brick1/a)
9ae3f9
+TEST [ $mtime0 -eq $mtime1 ]
9ae3f9
+
9ae3f9
+ctime0=$(get_ctime $B0/brick0/a)
9ae3f9
+ctime1=$(get_ctime $B0/brick1/a)
9ae3f9
+TEST [ $ctime0 -eq $ctime1 ]
9ae3f9
+
9ae3f9
+###############################################################################
9ae3f9
+# Repeat the test with ctime feature disabled.
9ae3f9
+TEST $CLI volume set $V0 features.ctime off
9ae3f9
+
9ae3f9
+TEST touch $M0/b
9ae3f9
+sleep 1
9ae3f9
+TEST kill_brick $V0 $H0 $B0/brick0
9ae3f9
+TEST touch $M0/b
9ae3f9
+
9ae3f9
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
9ae3f9
+
9ae3f9
+TEST $CLI volume start $V0 force
9ae3f9
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
9ae3f9
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
9ae3f9
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
9ae3f9
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
9ae3f9
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
9ae3f9
+
9ae3f9
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
9ae3f9
+
9ae3f9
+mtime2=$(get_mtime $B0/brick0/b)
9ae3f9
+mtime3=$(get_mtime $B0/brick1/b)
9ae3f9
+TEST [ $mtime2 -eq $mtime3 ]
9ae3f9
+
9ae3f9
+TEST rm $GET_MDATA_PATH/get-mdata-xattr
9ae3f9
+
9ae3f9
+TEST force_umount $M0
9ae3f9
+TEST $CLI volume stop $V0
9ae3f9
+TEST $CLI volume delete $V0
9ae3f9
+
9ae3f9
+cleanup
9ae3f9
diff --git a/xlators/cluster/afr/src/afr-self-heal-metadata.c b/xlators/cluster/afr/src/afr-self-heal-metadata.c
9ae3f9
index ecfa791..f4e31b6 100644
9ae3f9
--- a/xlators/cluster/afr/src/afr-self-heal-metadata.c
9ae3f9
+++ b/xlators/cluster/afr/src/afr-self-heal-metadata.c
9ae3f9
@@ -421,12 +421,8 @@ afr_selfheal_metadata(call_frame_t *frame, xlator_t *this, inode_t *inode)
9ae3f9
         if (ret)
9ae3f9
             goto unlock;
9ae3f9
 
9ae3f9
-        /* Restore atime/mtime for files that don't need data heal as
9ae3f9
-         * restoring timestamps happens only as a part of data-heal.
9ae3f9
-         */
9ae3f9
-        if (!IA_ISREG(locked_replies[source].poststat.ia_type))
9ae3f9
-            afr_selfheal_restore_time(frame, this, inode, source, healed_sinks,
9ae3f9
-                                      locked_replies);
9ae3f9
+        afr_selfheal_restore_time(frame, this, inode, source, healed_sinks,
9ae3f9
+                                  locked_replies);
9ae3f9
 
9ae3f9
         ret = afr_selfheal_undo_pending(
9ae3f9
             frame, this, inode, sources, sinks, healed_sinks, undid_pending,
9ae3f9
-- 
9ae3f9
1.8.3.1
9ae3f9