From f338ef26d17d41e321e0be80127126d60f3da763 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Jan 21 2020 21:16:55 +0000 Subject: import glusterfs-6.0-20.el8 --- diff --git a/.glusterfs.metadata b/.glusterfs.metadata index bd41365..98d5fc3 100644 --- a/.glusterfs.metadata +++ b/.glusterfs.metadata @@ -1 +1 @@ -bf1d8624cb45d10cf4ebf43bf7d3dc53dd55485a SOURCES/glusterfs-6.0.tar.gz +c9d75f37e00502a10f64cd4ba9aafb17552e0800 SOURCES/glusterfs-6.0.tar.gz diff --git a/README.debrand b/README.debrand deleted file mode 100644 index 01c46d2..0000000 --- a/README.debrand +++ /dev/null @@ -1,2 +0,0 @@ -Warning: This package was configured for automatic debranding, but the changes -failed to apply. diff --git a/SOURCES/0193-spec-fixed-python-dependency-for-rhel6.patch b/SOURCES/0193-spec-fixed-python-dependency-for-rhel6.patch new file mode 100644 index 0000000..6b00b69 --- /dev/null +++ b/SOURCES/0193-spec-fixed-python-dependency-for-rhel6.patch @@ -0,0 +1,42 @@ +From 58bc818f19cbc8e4dd97097dc3e4ec7af8fa8d4a Mon Sep 17 00:00:00 2001 +From: Rinku Kothiya +Date: Tue, 7 May 2019 05:35:11 +0000 +Subject: [PATCH 193/221] spec: fixed python dependency for rhel6 + +Installing redhat-storage-server was failing with python dependency +for glusterfs-geo-replication package. This patch conditionally sets +the python version for rhel7 and fixes the problem. + +Label: DOWNSTREAM ONLY + +BUG: 1704207 + +Change-Id: Ie3b079fd1ccfa6fd2cbf5b08b7a70bd03f090e01 +fixes: bz#1704207 +Signed-off-by: Rinku Kothiya +Reviewed-on: https://code.engineering.redhat.com/gerrit/169555 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + glusterfs.spec.in | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index c505cd9..1150101 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -500,7 +500,11 @@ Summary: GlusterFS Geo-replication + Requires: %{name}%{?_isa} = %{version}-%{release} + Requires: %{name}-server%{?_isa} = %{version}-%{release} + Requires: python%{_pythonver} ++%if ( 0%{?rhel} && 0%{?rhel} < 7 ) ++Requires: python-prettytable ++%else + Requires: python%{_pythonver}-prettytable ++%endif + Requires: python%{_pythonver}-gluster = %{version}-%{release} + + Requires: rsync +-- +1.8.3.1 + diff --git a/SOURCES/0194-stack-Make-sure-to-have-unique-call-stacks-in-all-ca.patch b/SOURCES/0194-stack-Make-sure-to-have-unique-call-stacks-in-all-ca.patch new file mode 100644 index 0000000..7b8371f --- /dev/null +++ b/SOURCES/0194-stack-Make-sure-to-have-unique-call-stacks-in-all-ca.patch @@ -0,0 +1,144 @@ +From 783f53b0b09845cd6c38f145eac685a094767ce0 Mon Sep 17 00:00:00 2001 +From: Pranith Kumar K +Date: Mon, 27 May 2019 11:43:26 +0530 +Subject: [PATCH 194/221] stack: Make sure to have unique call-stacks in all + cases + +At the moment new stack doesn't populate frame->root->unique in all cases. This +makes it difficult to debug hung frames by examining successive state dumps. +Fuse and server xlators populate it whenever they can, but other xlators won't +be able to assign 'unique' when they need to create a new frame/stack because +they don't know what 'unique' fuse/server xlators already used. What we need is +for unique to be correct. If a stack with same unique is present in successive +statedumps, that means the same operation is still in progress. This makes +'finding hung frames' part of debugging hung frames easier. + + >upstream: bz#1714098 + >Upstream-patch: https://review.gluster.org/c/glusterfs/+/22773 +fixes bz#1716760 +Change-Id: I3e9a8f6b4111e260106c48a2ac3a41ef29361b9e +Signed-off-by: Pranith Kumar K +Reviewed-on: https://code.engineering.redhat.com/gerrit/172304 +Reviewed-by: Atin Mukherjee +Tested-by: RHGS Build Bot +--- + libglusterfs/src/stack.c | 2 ++ + xlators/features/quota/src/quotad-helpers.c | 3 --- + xlators/mount/fuse/src/fuse-bridge.c | 15 ++++++++------- + xlators/mount/fuse/src/fuse-helpers.c | 1 - + xlators/protocol/server/src/server-helpers.c | 3 --- + 5 files changed, 10 insertions(+), 14 deletions(-) + +diff --git a/libglusterfs/src/stack.c b/libglusterfs/src/stack.c +index 82b3577..371f60c 100644 +--- a/libglusterfs/src/stack.c ++++ b/libglusterfs/src/stack.c +@@ -17,6 +17,7 @@ create_frame(xlator_t *xl, call_pool_t *pool) + { + call_stack_t *stack = NULL; + call_frame_t *frame = NULL; ++ static uint64_t unique = 0; + + if (!xl || !pool) { + return NULL; +@@ -52,6 +53,7 @@ create_frame(xlator_t *xl, call_pool_t *pool) + { + list_add(&stack->all_frames, &pool->all_frames); + pool->cnt++; ++ stack->unique = unique++; + } + UNLOCK(&pool->lock); + GF_ATOMIC_INC(pool->total_count); +diff --git a/xlators/features/quota/src/quotad-helpers.c b/xlators/features/quota/src/quotad-helpers.c +index be8f908..d9f0351 100644 +--- a/xlators/features/quota/src/quotad-helpers.c ++++ b/xlators/features/quota/src/quotad-helpers.c +@@ -73,7 +73,6 @@ quotad_aggregator_alloc_frame(rpcsvc_request_t *req) + goto out; + + frame->root->state = state; +- frame->root->unique = 0; + + frame->this = this; + out: +@@ -93,8 +92,6 @@ quotad_aggregator_get_frame_from_req(rpcsvc_request_t *req) + + frame->root->op = req->procnum; + +- frame->root->unique = req->xid; +- + frame->root->uid = req->uid; + frame->root->gid = req->gid; + frame->root->pid = req->pid; +diff --git a/xlators/mount/fuse/src/fuse-bridge.c b/xlators/mount/fuse/src/fuse-bridge.c +index c3945d7..c05866b 100644 +--- a/xlators/mount/fuse/src/fuse-bridge.c ++++ b/xlators/mount/fuse/src/fuse-bridge.c +@@ -3270,11 +3270,11 @@ fuse_release(xlator_t *this, fuse_in_header_t *finh, void *msg, + + priv = this->private; + +- fuse_log_eh(this, "RELEASE(): %" PRIu64 ":, fd: %p, gfid: %s", finh->unique, +- fd, uuid_utoa(fd->inode->gfid)); ++ fuse_log_eh(this, "RELEASE(): finh->unique: %" PRIu64 ":, fd: %p, gfid: %s", ++ finh->unique, fd, uuid_utoa(fd->inode->gfid)); + +- gf_log("glusterfs-fuse", GF_LOG_TRACE, "%" PRIu64 ": RELEASE %p", +- finh->unique, state->fd); ++ gf_log("glusterfs-fuse", GF_LOG_TRACE, ++ "finh->unique: %" PRIu64 ": RELEASE %p", finh->unique, state->fd); + + fuse_fd_ctx_destroy(this, state->fd); + fd_unref(fd); +@@ -3759,11 +3759,12 @@ fuse_releasedir(xlator_t *this, fuse_in_header_t *finh, void *msg, + + priv = this->private; + +- fuse_log_eh(this, "RELEASEDIR (): %" PRIu64 ": fd: %p, gfid: %s", ++ fuse_log_eh(this, ++ "RELEASEDIR (): finh->unique: %" PRIu64 ": fd: %p, gfid: %s", + finh->unique, state->fd, uuid_utoa(state->fd->inode->gfid)); + +- gf_log("glusterfs-fuse", GF_LOG_TRACE, "%" PRIu64 ": RELEASEDIR %p", +- finh->unique, state->fd); ++ gf_log("glusterfs-fuse", GF_LOG_TRACE, ++ "finh->unique: %" PRIu64 ": RELEASEDIR %p", finh->unique, state->fd); + + fuse_fd_ctx_destroy(this, state->fd); + fd_unref(state->fd); +diff --git a/xlators/mount/fuse/src/fuse-helpers.c b/xlators/mount/fuse/src/fuse-helpers.c +index cf4f8e1..5bfc40c 100644 +--- a/xlators/mount/fuse/src/fuse-helpers.c ++++ b/xlators/mount/fuse/src/fuse-helpers.c +@@ -358,7 +358,6 @@ get_call_frame_for_req(fuse_state_t *state) + frame->root->uid = finh->uid; + frame->root->gid = finh->gid; + frame->root->pid = finh->pid; +- frame->root->unique = finh->unique; + set_lk_owner_from_uint64(&frame->root->lk_owner, state->lk_owner); + } + +diff --git a/xlators/protocol/server/src/server-helpers.c b/xlators/protocol/server/src/server-helpers.c +index 1a34239..e74a24d 100644 +--- a/xlators/protocol/server/src/server-helpers.c ++++ b/xlators/protocol/server/src/server-helpers.c +@@ -459,7 +459,6 @@ server_alloc_frame(rpcsvc_request_t *req) + + frame->root->client = client; + frame->root->state = state; /* which socket */ +- frame->root->unique = 0; /* which call */ + + frame->this = client->this; + out: +@@ -487,8 +486,6 @@ get_frame_from_request(rpcsvc_request_t *req) + + frame->root->op = req->procnum; + +- frame->root->unique = req->xid; +- + client = req->trans->xl_private; + this = req->trans->xl; + priv = this->private; +-- +1.8.3.1 + diff --git a/SOURCES/0195-build-package-glusterfs-ganesha-for-rhel7-and-above.patch b/SOURCES/0195-build-package-glusterfs-ganesha-for-rhel7-and-above.patch new file mode 100644 index 0000000..949ebb6 --- /dev/null +++ b/SOURCES/0195-build-package-glusterfs-ganesha-for-rhel7-and-above.patch @@ -0,0 +1,89 @@ +From 909a6461c860fffde5f886891dd53752f60eae67 Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Tue, 18 Jun 2019 12:10:55 +0530 +Subject: [PATCH 195/221] build : package glusterfs-ganesha for rhel7 and above + +Label : DOWNSTREAM ONLY + +Change-Id: If845675b18fe055708d905ec566014baf004cb76 +fixes: bz#1720551 +Signed-off-by: Jiffin Tony Thottan +Reviewed-on: https://code.engineering.redhat.com/gerrit/173748 +Reviewed-by: Sreenath Girijan Menon +Tested-by: RHGS Build Bot +Reviewed-by: Kaleb Keithley +--- + glusterfs.spec.in | 19 ++++++++++++++----- + 1 file changed, 14 insertions(+), 5 deletions(-) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index 1150101..00603ec 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -302,6 +302,9 @@ Obsoletes: %{name}-ufo + %if ( 0%{!?_with_gnfs:1} ) + Obsoletes: %{name}-gnfs + %endif ++%if ( 0%{?rhel} < 7 ) ++Obsoletes: %{name}-ganesha ++%endif + Provides: %{name}-common = %{version}-%{release} + Provides: %{name}-core = %{version}-%{release} + +@@ -452,7 +455,7 @@ is in user space and easily manageable. + This package provides support to FUSE based clients and inlcudes the + glusterfs(d) binary. + +-%if ( 0%{!?_without_server:1} ) ++%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 ) + %package ganesha + Summary: NFS-Ganesha configuration + Group: Applications/File +@@ -855,7 +858,7 @@ install -D -p -m 0644 extras/glusterfs-logrotate \ + %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs + + # ganesha ghosts +-%if ( 0%{!?_without_server:1} ) ++%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 ) + mkdir -p %{buildroot}%{_sysconfdir}/ganesha + touch %{buildroot}%{_sysconfdir}/ganesha/ganesha-ha.conf + mkdir -p %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ +@@ -1165,11 +1168,14 @@ exit 0 + %endif + %endif + +-%if ( 0%{?_without_server:1} ) +-#exclude ganesha related files ++%if ( 0%{?_without_server:1} || 0%{?rhel} < 7 ) ++#exclude ganesha related files for rhel 6 and client builds + %exclude %{_sysconfdir}/ganesha/ganesha-ha.conf.sample + %exclude %{_libexecdir}/ganesha/* + %exclude %{_prefix}/lib/ocf/resource.d/heartbeat/* ++%if ( 0%{!?_without_server:1} ) ++%{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh ++%endif + %endif + + %exclude %{_datadir}/glusterfs/scripts/setup-thin-arbiter.sh +@@ -1324,7 +1330,7 @@ exit 0 + %exclude %{_datadir}/glusterfs/tests/vagrant + %endif + +-%if ( 0%{!?_without_server:1} ) ++%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 ) + %files ganesha + %dir %{_libexecdir}/ganesha + %{_sysconfdir}/ganesha/ganesha-ha.conf.sample +@@ -1936,6 +1942,9 @@ fi + %endif + + %changelog ++* Tue Jun 18 2019 Jiffin Tony Thottan ++- build glusterfs-ganesha for rhel 7 and above (#1720551) ++ + * Fri Jun 14 2019 Atin Mukherjee + - Ensure gluster-cli package is part of client build (#1720079) + +-- +1.8.3.1 + diff --git a/SOURCES/0196-posix-ctime-Fix-ctime-upgrade-issue.patch b/SOURCES/0196-posix-ctime-Fix-ctime-upgrade-issue.patch new file mode 100644 index 0000000..1a7b68d --- /dev/null +++ b/SOURCES/0196-posix-ctime-Fix-ctime-upgrade-issue.patch @@ -0,0 +1,384 @@ +From 584ee2dbb8158ee3d3c3f055f1b06ff3d9177192 Mon Sep 17 00:00:00 2001 +From: Kotresh HR +Date: Thu, 13 Jun 2019 16:23:21 +0530 +Subject: [PATCH 196/221] posix/ctime: Fix ctime upgrade issue + +Problem: +On a EC volume, during upgrade from the older version where +ctime feature is not enabled(or not present) to the newer +version where the ctime feature is available (enabled default), +the self heal hangs and doesn't complete. + +Cause: +The ctime feature has both client side code (utime) and +server side code (posix). The feature is driven from client. +Only if the client side sets the time in the frame, should +the server side sets the time attributes in xattr. But posix +setattr/fseattr was not doing that. When one of the server +nodes is updated, since ctime is enabled by default, it +starts setting xattr on setattr/fseattr on the updated node/brick. + +On a EC volume the first two updated nodes(bricks) are not a +problem because there are 4 other bricks with consistent data. +However once the third brick is updated, the new attribute(mdata xattr) +will cause an inconsistency on metadata on 3 bricks, which +prevents the file to be repaired. + +Fix: +Don't create mdata xattr with utimes/utimensat system call. +Only update if already present. + +Backport of: + > Patch: https://review.gluster.org/22858 + > Change-Id: Ieacedecb8a738bb437283ef3e0f042fd49dc4c8c + > fixes: bz#1720201 + > Signed-off-by: Kotresh HR + +Change-Id: Ieacedecb8a738bb437283ef3e0f042fd49dc4c8c +BUG: 1713664 +Signed-off-by: Kotresh HR +Reviewed-on: https://code.engineering.redhat.com/gerrit/174238 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + tests/basic/afr/split-brain-healing.t | 36 ++++--- + tests/utils/get-mdata-xattr.c | 152 +++++++++++++++++++++++++++++ + tests/volume.rc | 30 ++++++ + xlators/storage/posix/src/posix-metadata.c | 21 ++++ + 4 files changed, 223 insertions(+), 16 deletions(-) + create mode 100644 tests/utils/get-mdata-xattr.c + +diff --git a/tests/basic/afr/split-brain-healing.t b/tests/basic/afr/split-brain-healing.t +index c80f900..78553e6 100644 +--- a/tests/basic/afr/split-brain-healing.t ++++ b/tests/basic/afr/split-brain-healing.t +@@ -20,11 +20,14 @@ function get_replicate_subvol_number { + cleanup; + + AREQUAL_PATH=$(dirname $0)/../../utils ++GET_MDATA_PATH=$(dirname $0)/../../utils + CFLAGS="" + test "`uname -s`" != "Linux" && { + CFLAGS="$CFLAGS -lintl"; + } + build_tester $AREQUAL_PATH/arequal-checksum.c $CFLAGS ++build_tester $GET_MDATA_PATH/get-mdata-xattr.c ++ + TEST glusterd + TEST pidof glusterd + TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4} +@@ -152,13 +155,13 @@ EXPECT $SMALLER_FILE_SIZE stat -c %s file4 + subvolume=$(get_replicate_subvol_number file5) + if [ $subvolume == 0 ] + then +- mtime1=$(stat -c %Y $B0/${V0}1/file5) +- mtime2=$(stat -c %Y $B0/${V0}2/file5) ++ mtime1=$(get_mtime $B0/${V0}1/file5) ++ mtime2=$(get_mtime $B0/${V0}2/file5) + LATEST_MTIME=$(($mtime1 > $mtime2 ? $mtime1:$mtime2)) + elif [ $subvolume == 1 ] + then +- mtime1=$(stat -c %Y $B0/${V0}3/file5) +- mtime2=$(stat -c %Y $B0/${V0}4/file5) ++ mtime1=$(get_mtime $B0/${V0}3/file5) ++ mtime2=$(get_mtime $B0/${V0}4/file5) + LATEST_MTIME=$(($mtime1 > $mtime2 ? $mtime1:$mtime2)) + fi + $CLI volume heal $V0 split-brain latest-mtime /file5 +@@ -166,12 +169,12 @@ EXPECT "0" echo $? + + if [ $subvolume == 0 ] + then +- mtime1_after_heal=$(stat -c %Y $B0/${V0}1/file5) +- mtime2_after_heal=$(stat -c %Y $B0/${V0}2/file5) ++ mtime1_after_heal=$(get_mtime $B0/${V0}1/file5) ++ mtime2_after_heal=$(get_mtime $B0/${V0}2/file5) + elif [ $subvolume == 1 ] + then +- mtime1_after_heal=$(stat -c %Y $B0/${V0}3/file5) +- mtime2_after_heal=$(stat -c %Y $B0/${V0}4/file5) ++ mtime1_after_heal=$(get_mtime $B0/${V0}3/file5) ++ mtime2_after_heal=$(get_mtime $B0/${V0}4/file5) + fi + + #TODO: To below comparisons on full sub-second resolution +@@ -188,14 +191,14 @@ subvolume=$(get_replicate_subvol_number file6) + if [ $subvolume == 0 ] + then + GFID=$(gf_get_gfid_xattr $B0/${V0}1/file6) +- mtime1=$(stat -c %Y $B0/${V0}1/file6) +- mtime2=$(stat -c %Y $B0/${V0}2/file6) ++ mtime1=$(get_mtime $B0/${V0}1/file6) ++ mtime2=$(get_mtime $B0/${V0}2/file6) + LATEST_MTIME=$(($mtime1 > $mtime2 ? $mtime1:$mtime2)) + elif [ $subvolume == 1 ] + then + GFID=$(gf_get_gfid_xattr $B0/${V0}3/file6) +- mtime1=$(stat -c %Y $B0/${V0}3/file6) +- mtime2=$(stat -c %Y $B0/${V0}4/file6) ++ mtime1=$(get_mtime $B0/${V0}3/file6) ++ mtime2=$(get_mtime $B0/${V0}4/file6) + LATEST_MTIME=$(($mtime1 > $mtime2 ? $mtime1:$mtime2)) + fi + GFIDSTR="gfid:$(gf_gfid_xattr_to_str $GFID)" +@@ -204,12 +207,12 @@ EXPECT "0" echo $? + + if [ $subvolume == 0 ] + then +- mtime1_after_heal=$(stat -c %Y $B0/${V0}1/file6) +- mtime2_after_heal=$(stat -c %Y $B0/${V0}2/file6) ++ mtime1_after_heal=$(get_mtime $B0/${V0}1/file6) ++ mtime2_after_heal=$(get_mtime $B0/${V0}2/file6) + elif [ $subvolume == 1 ] + then +- mtime1_after_heal=$(stat -c %Y $B0/${V0}3/file6) +- mtime2_after_heal=$(stat -c %Y $B0/${V0}4/file6) ++ mtime1_after_heal=$(get_mtime $B0/${V0}3/file6) ++ mtime2_after_heal=$(get_mtime $B0/${V0}4/file6) + fi + + #TODO: To below comparisons on full sub-second resolution +@@ -253,4 +256,5 @@ EXPECT "1" echo $? + + cd - + TEST rm $AREQUAL_PATH/arequal-checksum ++TEST rm $GET_MDATA_PATH/get-mdata-xattr + cleanup +diff --git a/tests/utils/get-mdata-xattr.c b/tests/utils/get-mdata-xattr.c +new file mode 100644 +index 0000000..e9f5471 +--- /dev/null ++++ b/tests/utils/get-mdata-xattr.c +@@ -0,0 +1,152 @@ ++/* ++ Copyright (c) 2019 Red Hat, Inc. ++ This file is part of GlusterFS. ++ ++ This file is licensed to you under your choice of the GNU Lesser ++ General Public License, version 3 or any later version (LGPLv3 or ++ later), or the GNU General Public License, version 2 (GPLv2), in all ++ cases as published by the Free Software Foundation. ++*/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++typedef struct gf_timespec_disk { ++ uint64_t tv_sec; ++ uint64_t tv_nsec; ++} gf_timespec_disk_t; ++ ++/* posix_mdata_t on disk structure */ ++typedef struct __attribute__((__packed__)) posix_mdata_disk { ++ /* version of structure, bumped up if any new member is added */ ++ uint8_t version; ++ /* flags indicates valid fields in the structure */ ++ uint64_t flags; ++ gf_timespec_disk_t ctime; ++ gf_timespec_disk_t mtime; ++ gf_timespec_disk_t atime; ++} posix_mdata_disk_t; ++ ++/* In memory representation posix metadata xattr */ ++typedef struct { ++ /* version of structure, bumped up if any new member is added */ ++ uint8_t version; ++ /* flags indicates valid fields in the structure */ ++ uint64_t flags; ++ struct timespec ctime; ++ struct timespec mtime; ++ struct timespec atime; ++} posix_mdata_t; ++ ++#define GF_XATTR_MDATA_KEY "trusted.glusterfs.mdata" ++ ++/* posix_mdata_from_disk converts posix_mdata_disk_t into host byte order ++ */ ++static inline void ++posix_mdata_from_disk(posix_mdata_t *out, posix_mdata_disk_t *in) ++{ ++ out->version = in->version; ++ out->flags = be64toh(in->flags); ++ ++ out->ctime.tv_sec = be64toh(in->ctime.tv_sec); ++ out->ctime.tv_nsec = be64toh(in->ctime.tv_nsec); ++ ++ out->mtime.tv_sec = be64toh(in->mtime.tv_sec); ++ out->mtime.tv_nsec = be64toh(in->mtime.tv_nsec); ++ ++ out->atime.tv_sec = be64toh(in->atime.tv_sec); ++ out->atime.tv_nsec = be64toh(in->atime.tv_nsec); ++} ++ ++/* posix_fetch_mdata_xattr fetches the posix_mdata_t from disk */ ++static int ++posix_fetch_mdata_xattr(const char *real_path, posix_mdata_t *metadata) ++{ ++ size_t size = -1; ++ char *value = NULL; ++ char gfid_str[64] = {0}; ++ ++ char *key = GF_XATTR_MDATA_KEY; ++ ++ if (!metadata || !real_path) { ++ goto err; ++ } ++ ++ /* Get size */ ++ size = lgetxattr(real_path, key, NULL, 0); ++ if (size == -1) { ++ goto err; ++ } ++ ++ value = calloc(size + 1, sizeof(char)); ++ if (!value) { ++ goto err; ++ } ++ ++ /* Get xattr value */ ++ size = lgetxattr(real_path, key, value, size); ++ if (size == -1) { ++ goto err; ++ } ++ posix_mdata_from_disk(metadata, (posix_mdata_disk_t *)value); ++ ++out: ++ if (value) ++ free(value); ++ return 0; ++err: ++ if (value) ++ free(value); ++ return -1; ++} ++ ++int ++main(int argc, char *argv[]) ++{ ++ posix_mdata_t metadata; ++ uint64_t result; ++ ++ if (argc != 3) { ++ /* ++ Usage: get_mdata_xattr -c|-m|-a ++ where -c --> ctime ++ -m --> mtime ++ -a --> atime ++ */ ++ printf("-1"); ++ goto err; ++ } ++ ++ if (posix_fetch_mdata_xattr(argv[2], &metadata)) { ++ printf("-1"); ++ goto err; ++ } ++ ++ switch (argv[1][1]) { ++ case 'c': ++ result = metadata.ctime.tv_sec; ++ break; ++ case 'm': ++ result = metadata.mtime.tv_sec; ++ break; ++ case 'a': ++ result = metadata.atime.tv_sec; ++ break; ++ default: ++ printf("-1"); ++ goto err; ++ } ++ printf("%" PRIu64, result); ++ fflush(stdout); ++ return 0; ++err: ++ fflush(stdout); ++ return -1; ++} +diff --git a/tests/volume.rc b/tests/volume.rc +index bb400cc..6a78c37 100644 +--- a/tests/volume.rc ++++ b/tests/volume.rc +@@ -927,3 +927,33 @@ function number_healer_threads_shd { + local pid=$(get_shd_mux_pid $1) + pstack $pid | grep $2 | wc -l + } ++ ++function get_mtime { ++ local time=$(get-mdata-xattr -m $1) ++ if [ $time == "-1" ]; ++ then ++ echo $(stat -c %Y $1) ++ else ++ echo $time ++ fi ++} ++ ++function get_ctime { ++ local time=$(get-mdata-xattr -c $1) ++ if [ $time == "-1" ]; ++ then ++ echo $(stat -c %Z $2) ++ else ++ echo $time ++ fi ++} ++ ++function get_atime { ++ local time=$(get-mdata-xattr -a $1) ++ if [ $time == "-1" ]; ++ then ++ echo $(stat -c %X $1) ++ else ++ echo $time ++ fi ++} +diff --git a/xlators/storage/posix/src/posix-metadata.c b/xlators/storage/posix/src/posix-metadata.c +index e96f222..5a5e6cd 100644 +--- a/xlators/storage/posix/src/posix-metadata.c ++++ b/xlators/storage/posix/src/posix-metadata.c +@@ -416,6 +416,22 @@ posix_set_mdata_xattr(xlator_t *this, const char *real_path, int fd, + * still fine as the times would get eventually + * accurate. + */ ++ ++ /* Don't create xattr with utimes/utimensat, only update if ++ * present. This otherwise causes issues during inservice ++ * upgrade. It causes inconsistent xattr values with in replica ++ * set. The scenario happens during upgrade where clients are ++ * older versions (without the ctime feature) and the server is ++ * upgraded to the new version (with the ctime feature which ++ * is enabled by default). ++ */ ++ ++ if (update_utime) { ++ UNLOCK(&inode->lock); ++ GF_FREE(mdata); ++ return 0; ++ } ++ + mdata->version = 1; + mdata->flags = 0; + mdata->ctime.tv_sec = time->tv_sec; +@@ -527,6 +543,11 @@ posix_update_utime_in_mdata(xlator_t *this, const char *real_path, int fd, + + priv = this->private; + ++ /* NOTE: ++ * This routine (utimes) is intentionally allowed for all internal and ++ * external clients even if ctime is not set. This is because AFR and ++ * WORM uses time attributes for it's internal operations ++ */ + if (inode && priv->ctime) { + if ((valid & GF_SET_ATTR_ATIME) == GF_SET_ATTR_ATIME) { + tv.tv_sec = stbuf->ia_atime; +-- +1.8.3.1 + diff --git a/SOURCES/0197-posix-fix-crash-in-posix_cs_set_state.patch b/SOURCES/0197-posix-fix-crash-in-posix_cs_set_state.patch new file mode 100644 index 0000000..c17e6c2 --- /dev/null +++ b/SOURCES/0197-posix-fix-crash-in-posix_cs_set_state.patch @@ -0,0 +1,71 @@ +From 58070aa568ffbaac267b02428e974b2459ae13b0 Mon Sep 17 00:00:00 2001 +From: Susant Palai +Date: Tue, 18 Jun 2019 16:43:43 +0530 +Subject: [PATCH 197/221] :posix: fix crash in posix_cs_set_state + +> Fixes: bz#1721474 +> Change-Id: Ic2a53fa3d1e9e23424c6898e0986f80d52c5e3f6 +> Signed-off-by: Susant Palai +(cherry-pick of https://review.gluster.org/#/c/glusterfs/+/22892/) + +BUG: 1721477 +Change-Id: Ic2a53fa3d1e9e23424c6898e0986f80d52c5e3f6 +Signed-off-by: Susant Palai +Reviewed-on: https://code.engineering.redhat.com/gerrit/173936 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + xlators/storage/posix/src/posix-helpers.c | 5 +++++ + xlators/storage/posix/src/posix-inode-fd-ops.c | 7 ++++--- + 2 files changed, 9 insertions(+), 3 deletions(-) + +diff --git a/xlators/storage/posix/src/posix-helpers.c b/xlators/storage/posix/src/posix-helpers.c +index aecf4f8..849db3d 100644 +--- a/xlators/storage/posix/src/posix-helpers.c ++++ b/xlators/storage/posix/src/posix-helpers.c +@@ -3235,6 +3235,11 @@ posix_cs_set_state(xlator_t *this, dict_t **rsp, gf_cs_obj_state state, + char *value = NULL; + size_t xattrsize = 0; + ++ if (!rsp) { ++ ret = -1; ++ goto out; ++ } ++ + if (!(*rsp)) { + *rsp = dict_new(); + if (!(*rsp)) { +diff --git a/xlators/storage/posix/src/posix-inode-fd-ops.c b/xlators/storage/posix/src/posix-inode-fd-ops.c +index 7ca4d26..b92c411 100644 +--- a/xlators/storage/posix/src/posix-inode-fd-ops.c ++++ b/xlators/storage/posix/src/posix-inode-fd-ops.c +@@ -1028,6 +1028,7 @@ posix_glfallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, + struct iatt statpost = { + 0, + }; ++ dict_t *rsp_xdata = NULL; + + #ifdef FALLOC_FL_KEEP_SIZE + if (keep_size) +@@ -1035,15 +1036,15 @@ posix_glfallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, + #endif /* FALLOC_FL_KEEP_SIZE */ + + ret = posix_do_fallocate(frame, this, fd, flags, offset, len, &statpre, +- &statpost, xdata, NULL); ++ &statpost, xdata, &rsp_xdata); + if (ret < 0) + goto err; + +- STACK_UNWIND_STRICT(fallocate, frame, 0, 0, &statpre, &statpost, NULL); ++ STACK_UNWIND_STRICT(fallocate, frame, 0, 0, &statpre, &statpost, rsp_xdata); + return 0; + + err: +- STACK_UNWIND_STRICT(fallocate, frame, -1, -ret, NULL, NULL, NULL); ++ STACK_UNWIND_STRICT(fallocate, frame, -1, -ret, NULL, NULL, rsp_xdata); + return 0; + } + +-- +1.8.3.1 + diff --git a/SOURCES/0198-cluster-ec-Prevent-double-pre-op-xattrops.patch b/SOURCES/0198-cluster-ec-Prevent-double-pre-op-xattrops.patch new file mode 100644 index 0000000..5e7c272 --- /dev/null +++ b/SOURCES/0198-cluster-ec-Prevent-double-pre-op-xattrops.patch @@ -0,0 +1,119 @@ +From 9912a432dc3493007462f76c5933d04a160814ae Mon Sep 17 00:00:00 2001 +From: Pranith Kumar K +Date: Thu, 20 Jun 2019 17:05:49 +0530 +Subject: [PATCH 198/221] cluster/ec: Prevent double pre-op xattrops + +Problem: +Race: +Thread-1 Thread-2 +1) Does ec_get_size_version() to perform +pre-op fxattrop as part of write-1 + 2) Calls ec_set_dirty_flag() in + ec_get_size_version() for write-2. + This sets dirty[] to 1 +3) Completes executing +ec_prepare_update_cbk leading to +ctx->dirty[] = '1' + 4) Takes LOCK(inode->lock) to check if there are + any flags and sets dirty-flag because + lock->waiting_flag is 0 now. This leads to + fxattrop to increment on-disk dirty[] to '2' + +At the end of the writes the file will be marked for heal even when it doesn't need heal. + +Fix: +Perform ec_set_dirty_flag() and other checks inside LOCK() to prevent dirty[] to be marked +as '1' in step 2) above + + > Upstream-patch: https://review.gluster.org/c/glusterfs/+/22907 + +fixes: bz#1600918 +Change-Id: Icac2ab39c0b1e7e154387800fbededc561612865 +Signed-off-by: Pranith Kumar K +Reviewed-on: https://code.engineering.redhat.com/gerrit/174385 +Reviewed-by: Atin Mukherjee +Tested-by: Atin Mukherjee +--- + tests/basic/ec/ec-dirty-flags.t | 23 +++++++++++++++++++++++ + xlators/cluster/ec/src/ec-common.c | 13 +++++++------ + 2 files changed, 30 insertions(+), 6 deletions(-) + create mode 100644 tests/basic/ec/ec-dirty-flags.t + +diff --git a/tests/basic/ec/ec-dirty-flags.t b/tests/basic/ec/ec-dirty-flags.t +new file mode 100644 +index 0000000..68e6610 +--- /dev/null ++++ b/tests/basic/ec/ec-dirty-flags.t +@@ -0,0 +1,23 @@ ++#!/bin/bash ++ ++. $(dirname $0)/../../include.rc ++. $(dirname $0)/../../volume.rc ++ ++# This checks if the fop keeps the dirty flags settings correctly after ++# finishing the fop. ++ ++cleanup ++TEST glusterd ++TEST pidof glusterd ++TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/${V0}{0..2} ++TEST $CLI volume heal $V0 disable ++TEST $CLI volume start $V0 ++ ++TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0; ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0 ++cd $M0 ++for i in {1..1000}; do dd if=/dev/zero of=file-${i} bs=512k count=2; done ++cd - ++EXPECT "^0$" get_pending_heal_count $V0 ++ ++cleanup +diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c +index 9cc6395..35c2256 100644 +--- a/xlators/cluster/ec/src/ec-common.c ++++ b/xlators/cluster/ec/src/ec-common.c +@@ -1405,6 +1405,10 @@ ec_get_size_version(ec_lock_link_t *link) + !ec_is_data_fop(fop->id)) + link->optimistic_changelog = _gf_true; + ++ memset(&loc, 0, sizeof(loc)); ++ ++ LOCK(&lock->loc.inode->lock); ++ + set_dirty = ec_set_dirty_flag(link, ctx, dirty); + + /* If ec metadata has already been retrieved, do not try again. */ +@@ -1412,20 +1416,16 @@ ec_get_size_version(ec_lock_link_t *link) + if (ec_is_data_fop(fop->id)) { + fop->healing |= lock->healing; + } +- return; ++ goto unlock; + } + + /* Determine if there's something we need to retrieve for the current + * operation. */ + if (!set_dirty && !lock->query && (lock->loc.inode->ia_type != IA_IFREG) && + (lock->loc.inode->ia_type != IA_INVAL)) { +- return; ++ goto unlock; + } + +- memset(&loc, 0, sizeof(loc)); +- +- LOCK(&lock->loc.inode->lock); +- + changed_flags = ec_set_xattrop_flags_and_params(lock, link, dirty); + if (link->waiting_flags) { + /* This fop needs to wait until all its flags are cleared which +@@ -1436,6 +1436,7 @@ ec_get_size_version(ec_lock_link_t *link) + GF_ASSERT(!changed_flags); + } + ++unlock: + UNLOCK(&lock->loc.inode->lock); + + if (!changed_flags) +-- +1.8.3.1 + diff --git a/SOURCES/0199-upcall-Avoid-sending-notifications-for-invalid-inode.patch b/SOURCES/0199-upcall-Avoid-sending-notifications-for-invalid-inode.patch new file mode 100644 index 0000000..161675e --- /dev/null +++ b/SOURCES/0199-upcall-Avoid-sending-notifications-for-invalid-inode.patch @@ -0,0 +1,80 @@ +From e41b4a45f9f5c07ffa38582d0bb4517f6a66eaa3 Mon Sep 17 00:00:00 2001 +From: Soumya Koduri +Date: Fri, 7 Jun 2019 19:33:07 +0530 +Subject: [PATCH 199/221] upcall: Avoid sending notifications for invalid + inodes + +For nameless LOOKUPs, server creates a new inode which shall +remain invalid until the fop is successfully processed post +which it is linked to the inode table. + +But incase if there is an already linked inode for that entry, +it discards that newly created inode which results in upcall +notification. This may result in client being bombarded with +unnecessary upcalls affecting performance if the data set is huge. + +This issue can be avoided by looking up and storing the upcall +context in the original linked inode (if exists), thus saving up on +those extra callbacks. + +This is backport of below upstream fix - +mainline: https://review.gluster.org/22840 +release-6: https://review.gluster.org/22873 + +Change-Id: I044a1737819bb40d1a049d2f53c0566e746d2a17 +fixes: bz#1717784 +Signed-off-by: Soumya Koduri +Reviewed-on: https://code.engineering.redhat.com/gerrit/173507 +Tested-by: RHGS Build Bot +Reviewed-by: Kaleb Keithley +--- + xlators/features/upcall/src/upcall-internal.c | 19 ++++++++++++++++++- + 1 file changed, 18 insertions(+), 1 deletion(-) + +diff --git a/xlators/features/upcall/src/upcall-internal.c b/xlators/features/upcall/src/upcall-internal.c +index 46cf6f8..7998dd2 100644 +--- a/xlators/features/upcall/src/upcall-internal.c ++++ b/xlators/features/upcall/src/upcall-internal.c +@@ -520,6 +520,7 @@ upcall_cache_invalidate(call_frame_t *frame, xlator_t *this, client_t *client, + upcall_client_t *tmp = NULL; + upcall_inode_ctx_t *up_inode_ctx = NULL; + gf_boolean_t found = _gf_false; ++ inode_t *linked_inode = NULL; + + if (!is_upcall_enabled(this)) + return; +@@ -532,7 +533,20 @@ upcall_cache_invalidate(call_frame_t *frame, xlator_t *this, client_t *client, + return; + } + +- if (inode) ++ /* For nameless LOOKUPs, inode created shall always be ++ * invalid. Hence check if there is any already linked inode. ++ * If yes, update the inode_ctx of that valid inode ++ */ ++ if (inode && (inode->ia_type == IA_INVAL) && stbuf) { ++ linked_inode = inode_find(inode->table, stbuf->ia_gfid); ++ if (linked_inode) { ++ gf_log("upcall", GF_LOG_DEBUG, ++ "upcall_inode_ctx_get of linked inode (%p)", inode); ++ up_inode_ctx = upcall_inode_ctx_get(linked_inode, this); ++ } ++ } ++ ++ if (inode && !up_inode_ctx) + up_inode_ctx = upcall_inode_ctx_get(inode, this); + + if (!up_inode_ctx) { +@@ -600,6 +614,9 @@ upcall_cache_invalidate(call_frame_t *frame, xlator_t *this, client_t *client, + } + pthread_mutex_unlock(&up_inode_ctx->client_list_lock); + out: ++ /* release the ref from inode_find */ ++ if (linked_inode) ++ inode_unref(linked_inode); + return; + } + +-- +1.8.3.1 + diff --git a/SOURCES/0200-gfapi-fix-incorrect-initialization-of-upcall-syncop-.patch b/SOURCES/0200-gfapi-fix-incorrect-initialization-of-upcall-syncop-.patch new file mode 100644 index 0000000..ffef4d4 --- /dev/null +++ b/SOURCES/0200-gfapi-fix-incorrect-initialization-of-upcall-syncop-.patch @@ -0,0 +1,206 @@ +From bd553499909d2d57fd05696dc7604901cef3a36a Mon Sep 17 00:00:00 2001 +From: Soumya Koduri +Date: Fri, 7 Jun 2019 17:20:15 +0530 +Subject: [PATCH 200/221] gfapi: fix incorrect initialization of upcall syncop + arguments + +While sending upcall notifications via synctasks, the argument used to +carry relevant data for these tasks is not initialized properly. This patch +is to fix the same. + +This is backport of below upstream fix - +mainline: https://review.gluster.org/22839 +release-6: https://review.gluster.org/22871 + +Change-Id: I9fa8f841e71d3c37d3819fbd430382928c07176c +fixes: bz#1717784 +Signed-off-by: Soumya Koduri +Reviewed-on: https://code.engineering.redhat.com/gerrit/173508 +Tested-by: RHGS Build Bot +Reviewed-by: Kaleb Keithley +--- + api/src/glfs-fops.c | 109 ++++++++++++++++++++++++++++++++++------------------ + 1 file changed, 72 insertions(+), 37 deletions(-) + +diff --git a/api/src/glfs-fops.c b/api/src/glfs-fops.c +index 01ba60b..396f18c 100644 +--- a/api/src/glfs-fops.c ++++ b/api/src/glfs-fops.c +@@ -34,7 +34,7 @@ + + struct upcall_syncop_args { + struct glfs *fs; +- struct gf_upcall *upcall_data; ++ struct glfs_upcall *up_arg; + }; + + #define READDIRBUF_SIZE (sizeof(struct dirent) + GF_NAME_MAX + 1) +@@ -5714,12 +5714,28 @@ out: + } + + static int ++upcall_syncop_args_free(struct upcall_syncop_args *args) ++{ ++ if (args && args->up_arg) ++ GLFS_FREE(args->up_arg); ++ GF_FREE(args); ++ return 0; ++} ++ ++static int + glfs_upcall_syncop_cbk(int ret, call_frame_t *frame, void *opaque) + { + struct upcall_syncop_args *args = opaque; + +- GF_FREE(args->upcall_data); +- GF_FREE(args); ++ /* Here we not using upcall_syncop_args_free as application ++ * will be cleaning up the args->up_arg using glfs_free ++ * post processing upcall. ++ */ ++ if (ret) { ++ upcall_syncop_args_free(args); ++ } else ++ GF_FREE(args); ++ + return 0; + } + +@@ -5727,13 +5743,29 @@ static int + glfs_cbk_upcall_syncop(void *opaque) + { + struct upcall_syncop_args *args = opaque; +- int ret = -1; + struct glfs_upcall *up_arg = NULL; + struct glfs *fs; +- struct gf_upcall *upcall_data; + + fs = args->fs; +- upcall_data = args->upcall_data; ++ up_arg = args->up_arg; ++ ++ if (fs->up_cbk && up_arg) { ++ (fs->up_cbk)(up_arg, fs->up_data); ++ return 0; ++ } ++ ++ return -1; ++} ++ ++static struct upcall_syncop_args * ++upcall_syncop_args_init(struct glfs *fs, struct gf_upcall *upcall_data) ++{ ++ struct upcall_syncop_args *args = NULL; ++ int ret = -1; ++ struct glfs_upcall *up_arg = NULL; ++ ++ if (!fs || !upcall_data) ++ goto out; + + up_arg = GLFS_CALLOC(1, sizeof(struct gf_upcall), glfs_release_upcall, + glfs_mt_upcall_entry_t); +@@ -5754,33 +5786,51 @@ glfs_cbk_upcall_syncop(void *opaque) + errno = EINVAL; + } + +- if (!ret && (up_arg->reason != GLFS_UPCALL_EVENT_NULL)) { +- /* It could so happen that the file which got +- * upcall notification may have got deleted by +- * the same client. In such cases up_arg->reason +- * is set to GLFS_UPCALL_EVENT_NULL. No need to +- * send upcall then */ +- (fs->up_cbk)(up_arg, fs->up_data); +- } else if (up_arg->reason == GLFS_UPCALL_EVENT_NULL) { ++ /* It could so happen that the file which got ++ * upcall notification may have got deleted by ++ * the same client. In such cases up_arg->reason ++ * is set to GLFS_UPCALL_EVENT_NULL. No need to ++ * send upcall then ++ */ ++ if (up_arg->reason == GLFS_UPCALL_EVENT_NULL) { + gf_msg(THIS->name, GF_LOG_DEBUG, errno, API_MSG_INVALID_ENTRY, + "Upcall_EVENT_NULL received. Skipping it."); + goto out; +- } else { ++ } else if (ret) { + gf_msg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ENTRY, + "Upcall entry validation failed."); + goto out; + } + ++ args = GF_CALLOC(1, sizeof(struct upcall_syncop_args), ++ glfs_mt_upcall_entry_t); ++ if (!args) { ++ gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, ++ "Upcall syncop args allocation failed."); ++ goto out; ++ } ++ ++ /* Note: we are not taking any ref on fs here. ++ * Ideally applications have to unregister for upcall events ++ * or stop polling for upcall events before performing ++ * glfs_fini. And as for outstanding synctasks created, we wait ++ * for all syncenv threads to finish tasks before cleaning up the ++ * fs->ctx. Hence it seems safe to process these callback ++ * notification without taking any lock/ref. ++ */ ++ args->fs = fs; ++ args->up_arg = up_arg; ++ + /* application takes care of calling glfs_free on up_arg post + * their processing */ +- ret = 0; + ++ return args; + out: +- if (ret && up_arg) { ++ if (up_arg) { + GLFS_FREE(up_arg); + } + +- return 0; ++ return NULL; + } + + static void +@@ -5797,24 +5847,10 @@ glfs_cbk_upcall_data(struct glfs *fs, struct gf_upcall *upcall_data) + goto out; + } + +- args = GF_CALLOC(1, sizeof(struct upcall_syncop_args), +- glfs_mt_upcall_entry_t); +- if (!args) { +- gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, +- "Upcall syncop args allocation failed."); +- goto out; +- } ++ args = upcall_syncop_args_init(fs, upcall_data); + +- /* Note: we are not taking any ref on fs here. +- * Ideally applications have to unregister for upcall events +- * or stop polling for upcall events before performing +- * glfs_fini. And as for outstanding synctasks created, we wait +- * for all syncenv threads to finish tasks before cleaning up the +- * fs->ctx. Hence it seems safe to process these callback +- * notification without taking any lock/ref. +- */ +- args->fs = fs; +- args->upcall_data = gf_memdup(upcall_data, sizeof(*upcall_data)); ++ if (!args) ++ goto out; + + ret = synctask_new(THIS->ctx->env, glfs_cbk_upcall_syncop, + glfs_upcall_syncop_cbk, NULL, args); +@@ -5823,8 +5859,7 @@ glfs_cbk_upcall_data(struct glfs *fs, struct gf_upcall *upcall_data) + gf_msg(THIS->name, GF_LOG_ERROR, errno, API_MSG_UPCALL_SYNCOP_FAILED, + "Synctak for Upcall event_type(%d) and gfid(%s) failed", + upcall_data->event_type, (char *)(upcall_data->gfid)); +- GF_FREE(args->upcall_data); +- GF_FREE(args); ++ upcall_syncop_args_free(args); + } + + out: +-- +1.8.3.1 + diff --git a/SOURCES/0201-geo-rep-Fix-permissions-for-GEOREP_DIR-in-non-root-s.patch b/SOURCES/0201-geo-rep-Fix-permissions-for-GEOREP_DIR-in-non-root-s.patch new file mode 100644 index 0000000..0884a87 --- /dev/null +++ b/SOURCES/0201-geo-rep-Fix-permissions-for-GEOREP_DIR-in-non-root-s.patch @@ -0,0 +1,44 @@ +From a61c2a81e5731e4e0b5136147f404e60d3c72ad0 Mon Sep 17 00:00:00 2001 +From: Sunny Kumar +Date: Tue, 18 Jun 2019 16:25:35 +0530 +Subject: [PATCH 201/221] geo-rep: Fix permissions for GEOREP_DIR in non-root + setup + +During mountbroker setup: 'gluster-mountbroker ' +commad to set the permission and group for GEOREP_DIR directory +(/var/lib/glusterd/geo-replication) fails due to extra argument, which is +enssential for non-root geo-rep setup. + +Backport of: + +>Updtream patch: https://review.gluster.org/#/c/glusterfs/+/22890/ +>fixes: bz#1721441 +>Change-Id: Ia83442733bf0b29f630e8c9e398097316efca092 +>Signed-off-by: Sunny Kumar + +BUG: bz#1722331 +Change-Id: Ia83442733bf0b29f630e8c9e398097316efca092 +Signed-off-by: Sunny Kumar +Reviewed-on: https://code.engineering.redhat.com/gerrit/174169 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + geo-replication/src/peer_mountbroker.py.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/geo-replication/src/peer_mountbroker.py.in b/geo-replication/src/peer_mountbroker.py.in +index ce33f97..96a7264 100644 +--- a/geo-replication/src/peer_mountbroker.py.in ++++ b/geo-replication/src/peer_mountbroker.py.in +@@ -197,7 +197,7 @@ class NodeSetup(Cmd): + execute(["chgrp", "-R", args.group, GEOREP_DIR]) + execute(["chgrp", "-R", args.group, LOG_DIR]) + execute(["chgrp", args.group, CLI_LOG]) +- execute(["chmod", "770", args.group, GEOREP_DIR]) ++ execute(["chmod", "770", GEOREP_DIR]) + execute(["find", LOG_DIR, "-type", "d", "-exec", "chmod", "770", "{}", + "+"]) + execute(["find", LOG_DIR, "-type", "f", "-exec", "chmod", "660", "{}", +-- +1.8.3.1 + diff --git a/SOURCES/0202-shd-mux-Fix-race-between-mux_proc-unlink-and-stop.patch b/SOURCES/0202-shd-mux-Fix-race-between-mux_proc-unlink-and-stop.patch new file mode 100644 index 0000000..7cadb24 --- /dev/null +++ b/SOURCES/0202-shd-mux-Fix-race-between-mux_proc-unlink-and-stop.patch @@ -0,0 +1,46 @@ +From e386fb4f4baf834e6a8fc25cc2fbbb17eb0a7a56 Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 20 Jun 2019 20:43:24 +0530 +Subject: [PATCH 202/221] shd/mux: Fix race between mux_proc unlink and stop + +There is a small race window, where we have a shd proc +without having a connection. That is when we stopped the +last shd running on a process. The list was removed +outside of a lock just after stopping the process. + +So there is a window where we stopped the process, but +the shd proc list contains the entry. + +Backport of: https://review.gluster.org/22909 + +>Change-Id: Id82a82509e5cd72acac24e8b7b87197626525441 +>fixes: bz#1722541 +>Signed-off-by: Mohammed Rafi KC + +Change-Id: I794131ede23f32fcfa5f71181149d8c1e7e439b8 +BUG: 1721802 +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/174541 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +index d81d760..dbe2560 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +@@ -694,6 +694,9 @@ glusterd_shdsvc_stop(glusterd_svc_t *svc, int sig) + gf_is_service_running(svc->proc.pidfile, &pid); + cds_list_del_init(&svc->mux_svc); + empty = cds_list_empty(&svc_proc->svcs); ++ if (empty) { ++ cds_list_del_init(&svc_proc->svc_proc_list); ++ } + } + pthread_mutex_unlock(&conf->attach_lock); + if (empty) { +-- +1.8.3.1 + diff --git a/SOURCES/0203-glusterd-shd-Change-shd-logfile-to-a-unique-name.patch b/SOURCES/0203-glusterd-shd-Change-shd-logfile-to-a-unique-name.patch new file mode 100644 index 0000000..39c9cd8 --- /dev/null +++ b/SOURCES/0203-glusterd-shd-Change-shd-logfile-to-a-unique-name.patch @@ -0,0 +1,233 @@ +From 541e1400ecaec5fea0f56e8ca18f00c229906d8a Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Tue, 18 Jun 2019 22:15:37 +0530 +Subject: [PATCH 203/221] glusterd/shd: Change shd logfile to a unique name + +With the shd mux changes, shd was havinga a logfile +with volname of the first started volume. + +This was creating a lot confusion, as other volumes data +is also logging to a logfile which has a different vol name. + +With this changes the logfile will be changed to a unique name +ie "/var/log/glusterfs/glustershd.log". This was the same +logfile name before the shd mux + +Backport of: https://review.gluster.org/22895 + +>Change-Id: I2b94c1f0b2cf3c9493505dddf873687755a46dda +>fixes: bz#1721601 +>Signed-off-by: Mohammed Rafi KC + +Change-Id: Ia659386dd19f533fbadaf5a9d5453c9ef2acac64 +BUG: 1721351 +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/174542 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + .../mgmt/glusterd/src/glusterd-shd-svc-helper.c | 12 -------- + .../mgmt/glusterd/src/glusterd-shd-svc-helper.h | 6 ---- + xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 14 ++++----- + xlators/mgmt/glusterd/src/glusterd-svc-helper.c | 34 +++++++++++++++++----- + xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c | 4 +-- + xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h | 4 +++ + 6 files changed, 40 insertions(+), 34 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c +index 9196758..57ceda9 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c +@@ -75,18 +75,6 @@ glusterd_svc_build_shd_volfile_path(glusterd_volinfo_t *volinfo, char *path, + } + + void +-glusterd_svc_build_shd_logdir(char *logdir, char *volname, size_t len) +-{ +- snprintf(logdir, len, "%s/shd/%s", DEFAULT_LOG_FILE_DIRECTORY, volname); +-} +- +-void +-glusterd_svc_build_shd_logfile(char *logfile, char *logdir, size_t len) +-{ +- snprintf(logfile, len, "%s/shd.log", logdir); +-} +- +-void + glusterd_shd_svcproc_cleanup(glusterd_shdsvc_t *shd) + { + glusterd_svc_proc_t *svc_proc = NULL; +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h +index c70702c..59466ec 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h +@@ -27,12 +27,6 @@ glusterd_svc_build_shd_volfile_path(glusterd_volinfo_t *volinfo, char *path, + int path_len); + + void +-glusterd_svc_build_shd_logdir(char *logdir, char *volname, size_t len); +- +-void +-glusterd_svc_build_shd_logfile(char *logfile, char *logdir, size_t len); +- +-void + glusterd_shd_svcproc_cleanup(glusterd_shdsvc_t *shd); + + int +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +index dbe2560..8ad90a9 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +@@ -90,8 +90,8 @@ glusterd_shdsvc_init(void *data, glusterd_conn_t *mux_conn, + GLUSTERD_GET_SHD_RUNDIR(rundir, volinfo, priv); + glusterd_svc_create_rundir(rundir); + +- glusterd_svc_build_shd_logdir(logdir, volinfo->volname, sizeof(logdir)); +- glusterd_svc_build_shd_logfile(logfile, logdir, sizeof(logfile)); ++ glusterd_svc_build_logfile_path(shd_svc_name, DEFAULT_LOG_FILE_DIRECTORY, ++ logfile, sizeof(logfile)); + + /* Initialize the connection mgmt */ + if (mux_conn && mux_svc->rpc) { +@@ -104,7 +104,7 @@ glusterd_shdsvc_init(void *data, glusterd_conn_t *mux_conn, + if (ret < 0) + goto out; + } else { +- ret = mkdir_p(logdir, 0755, _gf_true); ++ ret = mkdir_p(DEFAULT_LOG_FILE_DIRECTORY, 0755, _gf_true); + if ((ret == -1) && (EEXIST != errno)) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED, + "Unable to create logdir %s", logdir); +@@ -460,6 +460,7 @@ glusterd_shdsvc_start(glusterd_svc_t *svc, int flags) + return -1; + + glusterd_volinfo_ref(volinfo); ++ + if (!svc->inited) { + ret = glusterd_shd_svc_mux_init(volinfo, svc); + if (ret) +@@ -471,12 +472,11 @@ glusterd_shdsvc_start(glusterd_svc_t *svc, int flags) + /* Unref will happen from glusterd_svc_attach_cbk */ + ret = glusterd_attach_svc(svc, volinfo, flags); + if (ret) { +- glusterd_volinfo_unref(volinfo); + gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, +- "Failed to attach shd svc(volume=%s) to pid=%d. Starting" +- "a new process", ++ "Failed to attach shd svc(volume=%s) to pid=%d", + volinfo->volname, glusterd_proc_get_pid(&svc->proc)); +- ret = glusterd_recover_shd_attach_failure(volinfo, svc, flags); ++ glusterd_shd_svcproc_cleanup(&volinfo->shd); ++ glusterd_volinfo_unref(volinfo); + } + goto out; + } +diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c +index a6e662f..400826f 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c ++++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c +@@ -469,6 +469,9 @@ glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc) + glusterd_conf_t *conf = NULL; + glusterd_svc_t *parent_svc = NULL; + int pid = -1; ++ char pidfile[PATH_MAX] = { ++ 0, ++ }; + + GF_VALIDATE_OR_GOTO("glusterd", svc, out); + GF_VALIDATE_OR_GOTO("glusterd", volinfo, out); +@@ -478,8 +481,26 @@ glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc) + + pthread_mutex_lock(&conf->attach_lock); + { ++ if (svc->inited && !glusterd_proc_is_running(&(svc->proc))) { ++ /* This is the case when shd process was abnormally killed */ ++ pthread_mutex_unlock(&conf->attach_lock); ++ glusterd_shd_svcproc_cleanup(&volinfo->shd); ++ pthread_mutex_lock(&conf->attach_lock); ++ } ++ + if (!svc->inited) { +- if (gf_is_service_running(svc->proc.pidfile, &pid)) { ++ glusterd_svc_build_shd_pidfile(volinfo, pidfile, sizeof(pidfile)); ++ ret = snprintf(svc->proc.name, sizeof(svc->proc.name), "%s", ++ "glustershd"); ++ if (ret < 0) ++ goto unlock; ++ ++ ret = snprintf(svc->proc.pidfile, sizeof(svc->proc.pidfile), "%s", ++ pidfile); ++ if (ret < 0) ++ goto unlock; ++ ++ if (gf_is_service_running(pidfile, &pid)) { + /* Just connect is required, but we don't know what happens + * during the disconnect. So better to reattach. + */ +@@ -487,10 +508,10 @@ glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc) + } + + if (!mux_proc) { +- if (pid != -1 && sys_access(svc->proc.pidfile, R_OK) == 0) { ++ if (pid != -1 && sys_access(pidfile, R_OK) == 0) { + /* stale pid file, stop and unlink it */ + glusterd_proc_stop(&svc->proc, SIGTERM, PROC_STOP_FORCE); +- glusterd_unlink_file(svc->proc.pidfile); ++ glusterd_unlink_file(pidfile); + } + mux_proc = __gf_find_compatible_svc(GD_NODE_SHD); + } +@@ -684,11 +705,10 @@ glusterd_svc_attach_cbk(struct rpc_req *req, struct iovec *iov, int count, + volinfo->volname, glusterd_proc_get_pid(&svc->proc)); + } else { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_ATTACH_FAIL, +- "svc %s of volume %s failed to " +- "attach to pid %d. Starting a new process", +- svc->name, volinfo->volname, glusterd_proc_get_pid(&svc->proc)); ++ "svc %s of volume %s failed to attach to pid %d", svc->name, ++ volinfo->volname, glusterd_proc_get_pid(&svc->proc)); + if (!strcmp(svc->name, "glustershd")) { +- glusterd_recover_shd_attach_failure(volinfo, svc, *flag); ++ glusterd_shd_svcproc_cleanup(&volinfo->shd); + } + } + out: +diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c +index f32dafc..fa316a6 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c ++++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c +@@ -33,14 +33,14 @@ glusterd_svc_create_rundir(char *rundir) + return ret; + } + +-static void ++void + glusterd_svc_build_logfile_path(char *server, char *logdir, char *logfile, + size_t len) + { + snprintf(logfile, len, "%s/%s.log", logdir, server); + } + +-static void ++void + glusterd_svc_build_volfileid_path(char *server, char *volfileid, size_t len) + { + snprintf(volfileid, len, "gluster/%s", server); +diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h +index fbc5225..5a5466a 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h ++++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h +@@ -74,6 +74,10 @@ glusterd_svc_build_volfile_path(char *server, char *workdir, char *volfile, + size_t len); + + void ++glusterd_svc_build_logfile_path(char *server, char *logdir, char *logfile, ++ size_t len); ++ ++void + glusterd_svc_build_svcdir(char *server, char *workdir, char *path, size_t len); + + void +-- +1.8.3.1 + diff --git a/SOURCES/0204-glusterd-conditionally-clear-txn_opinfo-in-stage-op.patch b/SOURCES/0204-glusterd-conditionally-clear-txn_opinfo-in-stage-op.patch new file mode 100644 index 0000000..6d05a0b --- /dev/null +++ b/SOURCES/0204-glusterd-conditionally-clear-txn_opinfo-in-stage-op.patch @@ -0,0 +1,60 @@ +From 4d0b11088c4a3a630d71acf902064d1ed10412e8 Mon Sep 17 00:00:00 2001 +From: Atin Mukherjee +Date: Tue, 25 Jun 2019 11:11:10 +0530 +Subject: [PATCH 204/221] glusterd: conditionally clear txn_opinfo in stage op + +...otherwise this leads to a crash when volume status is run on a +heterogeneous mode. + +> upstream patch : https://review.gluster.org/#/c/glusterfs/+/22939/ + +>Fixes: bz#1723658 +>Change-Id: I0d39f412b2e5e9d3ef0a3462b90b38bb5364b09d +>Signed-off-by: Atin Mukherjee + +BUG: 1722131 +Change-Id: I0d39f412b2e5e9d3ef0a3462b90b38bb5364b09d +Signed-off-by: Atin Mukherjee +Reviewed-on: https://code.engineering.redhat.com/gerrit/174566 +Tested-by: RHGS Build Bot +--- + xlators/mgmt/glusterd/src/glusterd-op-sm.c | 12 ++++++++++-- + 1 file changed, 10 insertions(+), 2 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +index d0c1a2c..9ea695e 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c ++++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +@@ -5714,9 +5714,14 @@ glusterd_op_ac_stage_op(glusterd_op_sm_event_t *event, void *ctx) + glusterd_op_info_t txn_op_info = { + {0}, + }; ++ glusterd_conf_t *priv = NULL; + + this = THIS; + GF_ASSERT(this); ++ ++ priv = this->private; ++ GF_ASSERT(priv); ++ + GF_ASSERT(ctx); + + req_ctx = ctx; +@@ -5768,9 +5773,12 @@ out: + gf_msg_debug(this->name, 0, "Returning with %d", ret); + + /* for no volname transactions, the txn_opinfo needs to be cleaned up +- * as there's no unlock event triggered ++ * as there's no unlock event triggered. However if the originator node of ++ * this transaction is still running with a version lower than 60000, ++ * txn_opinfo can't be cleared as that'll lead to a race of referring op_ctx ++ * after it's being freed. + */ +- if (txn_op_info.skip_locking) ++ if (txn_op_info.skip_locking && priv->op_version >= GD_OP_VERSION_6_0) + ret = glusterd_clear_txn_opinfo(txn_id); + + if (rsp_dict) +-- +1.8.3.1 + diff --git a/SOURCES/0205-glusterd-Can-t-run-rebalance-due-to-long-unix-socket.patch b/SOURCES/0205-glusterd-Can-t-run-rebalance-due-to-long-unix-socket.patch new file mode 100644 index 0000000..2b23236 --- /dev/null +++ b/SOURCES/0205-glusterd-Can-t-run-rebalance-due-to-long-unix-socket.patch @@ -0,0 +1,195 @@ +From b1a4947e382c5e2ba1137ed606ecffc69fcf00e9 Mon Sep 17 00:00:00 2001 +From: Mohit Agrawal +Date: Tue, 25 Jun 2019 17:30:17 +0530 +Subject: [PATCH 205/221] glusterd: Can't run rebalance due to long unix socket + +Problem: glusterd populate unix socket file name based + on volname and if volname is lengthy socket + system call's are failed due to breach maximum length + is defined in the kernel. + +Solution:Convert unix socket name to hash to resolve the issue + +> Change-Id: I5072e8184013095587537dbfa4767286307fff65 +> fixes: bz#1720566 +> (Cherry pick from commit 2d7b77eb971700c1073db2b74f5877c1ae8293fc) +> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/22869/) + +BUG: 1720192 +Change-Id: I5072e8184013095587537dbfa4767286307fff65 +Signed-off-by: Mohit Agrawal +Reviewed-on: https://code.engineering.redhat.com/gerrit/174557 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + tests/bugs/glusterd/bug-1720566.t | 50 ++++++++++++++++++++++++++ + xlators/mgmt/glusterd/src/glusterd-rebalance.c | 38 +------------------- + xlators/mgmt/glusterd/src/glusterd.h | 23 +++++------- + 3 files changed, 59 insertions(+), 52 deletions(-) + create mode 100644 tests/bugs/glusterd/bug-1720566.t + +diff --git a/tests/bugs/glusterd/bug-1720566.t b/tests/bugs/glusterd/bug-1720566.t +new file mode 100644 +index 0000000..99bcf6f +--- /dev/null ++++ b/tests/bugs/glusterd/bug-1720566.t +@@ -0,0 +1,50 @@ ++#!/bin/bash ++ ++. $(dirname $0)/../../include.rc ++. $(dirname $0)/../../cluster.rc ++. $(dirname $0)/../../volume.rc ++ ++ ++cleanup; ++V0="TestLongVolnamec363b7b536700ff06eedeae0dd9037fec363b7b536700ff06eedeae0dd9037fec363b7b536700ff06eedeae0dd9abcd" ++V1="TestLongVolname3102bd28a16c49440bd5210e4ec4d5d93102bd28a16c49440bd5210e4ec4d5d933102bd28a16c49440bd5210e4ebbcd" ++TEST launch_cluster 2; ++TEST $CLI_1 peer probe $H2; ++ ++EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count ++ ++$CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 ++EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status'; ++$CLI_1 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1 ++EXPECT 'Created' cluster_volinfo_field 1 $V1 'Status'; ++ ++$CLI_1 volume start $V0 ++EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status'; ++ ++$CLI_1 volume start $V1 ++EXPECT 'Started' cluster_volinfo_field 1 $V1 'Status'; ++ ++#Mount FUSE ++TEST glusterfs -s $H1 --volfile-id=$V0 $M0; ++ ++ ++#Mount FUSE ++TEST glusterfs -s $H1 --volfile-id=$V1 $M1; ++ ++TEST mkdir $M0/dir{1..4}; ++TEST touch $M0/dir{1..4}/files{1..4}; ++ ++TEST mkdir $M1/dir{1..4}; ++TEST touch $M1/dir{1..4}/files{1..4}; ++ ++TEST $CLI_1 volume add-brick $V0 $H1:$B1/${V0}_1 $H2:$B2/${V0}_1 ++TEST $CLI_1 volume add-brick $V1 $H1:$B1/${V1}_1 $H2:$B2/${V1}_1 ++ ++ ++TEST $CLI_1 volume rebalance $V0 start ++TEST $CLI_1 volume rebalance $V1 start ++ ++EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0 ++EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V1 ++ ++cleanup; +diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c +index cbed9a9..b419a89 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c ++++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c +@@ -266,18 +266,7 @@ glusterd_handle_defrag_start(glusterd_volinfo_t *volinfo, char *op_errstr, + + if (dict_get_strn(this->options, "transport.socket.bind-address", + SLEN("transport.socket.bind-address"), +- &volfileserver) == 0) { +- /*In the case of running multiple glusterds on a single machine, +- *we should ensure that log file and unix socket file should be +- *unique in given cluster */ +- +- GLUSTERD_GET_DEFRAG_SOCK_FILE_OLD(sockfile, volinfo, priv); +- snprintf(logfile, PATH_MAX, "%s/%s-%s-%s.log", +- DEFAULT_LOG_FILE_DIRECTORY, volinfo->volname, +- (cmd == GF_DEFRAG_CMD_START_TIER ? "tier" : "rebalance"), +- uuid_utoa(MY_UUID)); +- +- } else { ++ &volfileserver) != 0) { + volfileserver = "localhost"; + } + +@@ -378,9 +367,6 @@ glusterd_rebalance_rpc_create(glusterd_volinfo_t *volinfo) + glusterd_defrag_info_t *defrag = volinfo->rebal.defrag; + glusterd_conf_t *priv = NULL; + xlator_t *this = NULL; +- struct stat buf = { +- 0, +- }; + + this = THIS; + GF_ASSERT(this); +@@ -396,28 +382,6 @@ glusterd_rebalance_rpc_create(glusterd_volinfo_t *volinfo) + goto out; + + GLUSTERD_GET_DEFRAG_SOCK_FILE(sockfile, volinfo); +- /* Check if defrag sockfile exists in the new location +- * in /var/run/ , if it does not try the old location +- */ +- ret = sys_stat(sockfile, &buf); +- /* TODO: Remove this once we don't need backward compatibility +- * with the older path +- */ +- if (ret && (errno == ENOENT)) { +- gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED, +- "Rebalance sockfile " +- "%s does not exist. Trying old path.", +- sockfile); +- GLUSTERD_GET_DEFRAG_SOCK_FILE_OLD(sockfile, volinfo, priv); +- ret = sys_stat(sockfile, &buf); +- if (ret && (ENOENT == errno)) { +- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REBAL_NO_SOCK_FILE, +- "Rebalance " +- "sockfile %s does not exist", +- sockfile); +- goto out; +- } +- } + + /* Setting frame-timeout to 10mins (600seconds). + * Unix domain sockets ensures that the connection is reliable. The +diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h +index f96bca3..7d07d33 100644 +--- a/xlators/mgmt/glusterd/src/glusterd.h ++++ b/xlators/mgmt/glusterd/src/glusterd.h +@@ -910,30 +910,23 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args); + } \ + } while (0) + +-#define GLUSTERD_GET_DEFRAG_SOCK_FILE_OLD(path, volinfo, priv) \ +- do { \ +- char defrag_path[PATH_MAX]; \ +- int32_t _sockfile_old_len; \ +- GLUSTERD_GET_DEFRAG_DIR(defrag_path, volinfo, priv); \ +- _sockfile_old_len = snprintf(path, PATH_MAX, "%s/%s.sock", \ +- defrag_path, uuid_utoa(MY_UUID)); \ +- if ((_sockfile_old_len < 0) || (_sockfile_old_len >= PATH_MAX)) { \ +- path[0] = 0; \ +- } \ +- } while (0) +- + #define GLUSTERD_GET_DEFRAG_SOCK_FILE(path, volinfo) \ + do { \ + char operation[NAME_MAX]; \ ++ char tmppath[PATH_MAX] = { \ ++ 0, \ ++ }; \ + int32_t _defrag_sockfile_len; \ + GLUSTERD_GET_DEFRAG_PROCESS(operation, volinfo); \ + _defrag_sockfile_len = snprintf( \ +- path, UNIX_PATH_MAX, \ +- DEFAULT_VAR_RUN_DIRECTORY "/gluster-%s-%s.sock", operation, \ +- uuid_utoa(volinfo->volume_id)); \ ++ tmppath, PATH_MAX, \ ++ DEFAULT_VAR_RUN_DIRECTORY "/gluster-%s-%s-%s.sock", operation, \ ++ volinfo->volname, uuid_utoa(MY_UUID)); \ + if ((_defrag_sockfile_len < 0) || \ + (_defrag_sockfile_len >= PATH_MAX)) { \ + path[0] = 0; \ ++ } else { \ ++ glusterd_set_socket_filepath(tmppath, path, sizeof(path)); \ + } \ + } while (0) + +-- +1.8.3.1 + diff --git a/SOURCES/0206-glusterd-ignore-user.-options-from-compatibility-che.patch b/SOURCES/0206-glusterd-ignore-user.-options-from-compatibility-che.patch new file mode 100644 index 0000000..8908097 --- /dev/null +++ b/SOURCES/0206-glusterd-ignore-user.-options-from-compatibility-che.patch @@ -0,0 +1,41 @@ +From f77d4a024cb9b17de7d5add064b34adfb0455d17 Mon Sep 17 00:00:00 2001 +From: Atin Mukherjee +Date: Mon, 24 Jun 2019 18:32:52 +0530 +Subject: [PATCH 206/221] glusterd: ignore user.* options from compatibility + check in brick mux + +user.* options are just custom and they don't contribute anything in +terms of determining the volume compatibility in brick multiplexing + +> upstream patch : https://review.gluster.org/#/c/glusterfs/+/22933/ + +>Fixes: bz#1723402 +>Change-Id: Ic7e0181ab72993d29cab345cde64ae1340bf4faf +>Signed-off-by: Atin Mukherjee + +BUG: 1722509 +Change-Id: Ic7e0181ab72993d29cab345cde64ae1340bf4faf +Signed-off-by: Atin Mukherjee +Reviewed-on: https://code.engineering.redhat.com/gerrit/174589 +Tested-by: RHGS Build Bot +--- + xlators/mgmt/glusterd/src/glusterd-utils.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c +index 7768b8e..c6e9bb0 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-utils.c ++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c +@@ -2425,6 +2425,9 @@ unsafe_option(dict_t *this, char *key, data_t *value, void *arg) + if (fnmatch("*diagnostics.client-log*", key, 0) == 0) { + return _gf_false; + } ++ if (fnmatch("user.*", key, 0) == 0) { ++ return _gf_false; ++ } + + return _gf_true; + } +-- +1.8.3.1 + diff --git a/SOURCES/0207-glusterd-fix-use-after-free-of-a-dict_t.patch b/SOURCES/0207-glusterd-fix-use-after-free-of-a-dict_t.patch new file mode 100644 index 0000000..5a92d58 --- /dev/null +++ b/SOURCES/0207-glusterd-fix-use-after-free-of-a-dict_t.patch @@ -0,0 +1,44 @@ +From a7a7d497af4230430f8a0cc54d8b49cfea260039 Mon Sep 17 00:00:00 2001 +From: Xavi Hernandez +Date: Tue, 25 Jun 2019 18:00:06 +0200 +Subject: [PATCH 207/221] glusterd: fix use-after-free of a dict_t + +A dict was passed to a function that calls dict_unref() without taking +any additional reference. Given that the same dict is also used after +the function returns, this was causing a use-after-free situation. + +To fix the issue, we simply take an additional reference before calling +the function. + +Upstream patch: +> BUG: 1723890 +> Upstream patch link: https://review.gluster.org/c/glusterfs/+/22943 +> Change-Id: I98c6b76b08fe3fa6224edf281a26e9ba1ffe3017 +> Signed-off-by: Xavi Hernandez + +Change-Id: I98c6b76b08fe3fa6224edf281a26e9ba1ffe3017 +Updates: bz#1722801 +Signed-off-by: Xavi Hernandez +Reviewed-on: https://code.engineering.redhat.com/gerrit/174656 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + xlators/mgmt/glusterd/src/glusterd-utils.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c +index c6e9bb0..4c487d0 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-utils.c ++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c +@@ -3697,7 +3697,7 @@ glusterd_add_volumes_to_export_dict(dict_t *peer_data, char **buf, + if (totthread) { + gf_log(this->name, GF_LOG_INFO, + "Finished merger of all dictionraies into single one"); +- dict_arr[totthread++] = peer_data; ++ dict_arr[totthread++] = dict_ref(peer_data); + ret = glusterd_dict_arr_serialize(dict_arr, totthread, buf, length); + gf_log(this->name, GF_LOG_INFO, + "Serialize dictionary data return is %d", ret); +-- +1.8.3.1 + diff --git a/SOURCES/0208-mem-pool-remove-dead-code.patch b/SOURCES/0208-mem-pool-remove-dead-code.patch new file mode 100644 index 0000000..c8678f2 --- /dev/null +++ b/SOURCES/0208-mem-pool-remove-dead-code.patch @@ -0,0 +1,161 @@ +From d7ddc1cd3af86198ffca2d1958871d4c2c04bd9e Mon Sep 17 00:00:00 2001 +From: Yaniv Kaul +Date: Thu, 21 Mar 2019 19:51:30 +0200 +Subject: [PATCH 208/221] mem-pool: remove dead code. + +Upstream patch: +> Change-Id: I3bbda719027b45e1289db2e6a718627141bcbdc8 +> Upstream patch link: https://review.gluster.org/c/glusterfs/+/22394 +> BUG: 1193929 +> Signed-off-by: Yaniv Kaul + +Updates: bz#1722801 +Change-Id: I3bbda719027b45e1289db2e6a718627141bcbdc8 +Signed-off-by: Yaniv Kaul +Reviewed-on: https://code.engineering.redhat.com/gerrit/174710 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + libglusterfs/src/glusterfs/mem-pool.h | 11 ------ + libglusterfs/src/mem-pool.c | 70 ----------------------------------- + 2 files changed, 81 deletions(-) + +diff --git a/libglusterfs/src/glusterfs/mem-pool.h b/libglusterfs/src/glusterfs/mem-pool.h +index 90905fb..0250b59 100644 +--- a/libglusterfs/src/glusterfs/mem-pool.h ++++ b/libglusterfs/src/glusterfs/mem-pool.h +@@ -308,15 +308,4 @@ mem_pool_destroy(struct mem_pool *pool); + void + gf_mem_acct_enable_set(void *ctx); + +-/* hit will be set to : +- * _gf_true if the memory is served from mem pool +- * _gf_false if the requested size was not present in mem pool and hence +- * std alloc'd. +- */ +-void * +-mem_pool_get(unsigned long sizeof_type, gf_boolean_t *hit); +- +-void * +-mem_pool_get0(unsigned long sizeof_type, gf_boolean_t *hit); +- + #endif /* _MEM_POOL_H */ +diff --git a/libglusterfs/src/mem-pool.c b/libglusterfs/src/mem-pool.c +index 3934a78..9b4ea52 100644 +--- a/libglusterfs/src/mem-pool.c ++++ b/libglusterfs/src/mem-pool.c +@@ -365,10 +365,6 @@ static size_t pool_list_size; + #define N_COLD_LISTS 1024 + #define POOL_SWEEP_SECS 30 + +-static unsigned long sweep_times; +-static unsigned long sweep_usecs; +-static unsigned long frees_to_system; +- + typedef struct { + struct list_head death_row; + pooled_obj_hdr_t *cold_lists[N_COLD_LISTS]; +@@ -426,7 +422,6 @@ free_obj_list(pooled_obj_hdr_t *victim) + next = victim->next; + free(victim); + victim = next; +- ++frees_to_system; + } + } + +@@ -438,9 +433,6 @@ pool_sweeper(void *arg) + per_thread_pool_list_t *next_pl; + per_thread_pool_t *pt_pool; + unsigned int i; +- struct timeval begin_time; +- struct timeval end_time; +- struct timeval elapsed; + gf_boolean_t poisoned; + + /* +@@ -457,7 +449,6 @@ pool_sweeper(void *arg) + state.n_cold_lists = 0; + + /* First pass: collect stuff that needs our attention. */ +- (void)gettimeofday(&begin_time, NULL); + (void)pthread_mutex_lock(&pool_lock); + list_for_each_entry_safe(pool_list, next_pl, &pool_threads, thr_list) + { +@@ -470,10 +461,6 @@ pool_sweeper(void *arg) + } + } + (void)pthread_mutex_unlock(&pool_lock); +- (void)gettimeofday(&end_time, NULL); +- timersub(&end_time, &begin_time, &elapsed); +- sweep_usecs += elapsed.tv_sec * 1000000 + elapsed.tv_usec; +- sweep_times += 1; + + /* Second pass: free dead pools. */ + (void)pthread_mutex_lock(&pool_free_lock); +@@ -879,63 +866,6 @@ mem_get(struct mem_pool *mem_pool) + #endif /* GF_DISABLE_MEMPOOL */ + } + +-void * +-mem_pool_get(unsigned long sizeof_type, gf_boolean_t *hit) +-{ +-#if defined(GF_DISABLE_MEMPOOL) +- return GF_MALLOC(sizeof_type, gf_common_mt_mem_pool); +-#else +- pooled_obj_hdr_t *retval; +- unsigned int power; +- struct mem_pool_shared *pool = NULL; +- +- if (!sizeof_type) { +- gf_msg_callingfn("mem-pool", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, +- "invalid argument"); +- return NULL; +- } +- +- /* We ensure sizeof_type > 1 and the next power of two will be, at least, +- * 2^POOL_SMALLEST */ +- sizeof_type |= (1 << POOL_SMALLEST) - 1; +- power = sizeof(sizeof_type) * 8 - __builtin_clzl(sizeof_type - 1) + 1; +- if (power > POOL_LARGEST) { +- gf_msg_callingfn("mem-pool", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, +- "invalid argument"); +- return NULL; +- } +- pool = &pools[power - POOL_SMALLEST]; +- +- retval = mem_get_from_pool(NULL, pool, hit); +- +- return retval + 1; +-#endif /* GF_DISABLE_MEMPOOL */ +-} +- +-void * +-mem_pool_get0(unsigned long sizeof_type, gf_boolean_t *hit) +-{ +- void *ptr = NULL; +- unsigned int power; +- struct mem_pool_shared *pool = NULL; +- +- ptr = mem_pool_get(sizeof_type, hit); +- if (ptr) { +-#if defined(GF_DISABLE_MEMPOOL) +- memset(ptr, 0, sizeof_type); +-#else +- /* We ensure sizeof_type > 1 and the next power of two will be, at +- * least, 2^POOL_SMALLEST */ +- sizeof_type |= (1 << POOL_SMALLEST) - 1; +- power = sizeof(sizeof_type) * 8 - __builtin_clzl(sizeof_type - 1) + 1; +- pool = &pools[power - POOL_SMALLEST]; +- memset(ptr, 0, AVAILABLE_SIZE(pool->power_of_two)); +-#endif +- } +- +- return ptr; +-} +- + void + mem_put(void *ptr) + { +-- +1.8.3.1 + diff --git a/SOURCES/0209-core-avoid-dynamic-TLS-allocation-when-possible.patch b/SOURCES/0209-core-avoid-dynamic-TLS-allocation-when-possible.patch new file mode 100644 index 0000000..f46f1b6 --- /dev/null +++ b/SOURCES/0209-core-avoid-dynamic-TLS-allocation-when-possible.patch @@ -0,0 +1,1059 @@ +From 2f5969a77493814e242e6bac3c6bf7acf3202e0f Mon Sep 17 00:00:00 2001 +From: Xavi Hernandez +Date: Tue, 5 Mar 2019 18:58:20 +0100 +Subject: [PATCH 209/221] core: avoid dynamic TLS allocation when possible + +Some interdependencies between logging and memory management functions +make it impossible to use the logging framework before initializing +memory subsystem because they both depend on Thread Local Storage +allocated through pthread_key_create() during initialization. + +This causes a crash when we try to log something very early in the +initialization phase. + +To prevent this, several dynamically allocated TLS structures have +been replaced by static TLS reserved at compile time using '__thread' +keyword. This also reduces the number of error sources, making +initialization simpler. + +Upstream patch: +> BUG: 1193929 +> Upstream patch link: https://review.gluster.org/c/glusterfs/+/22302 +> Change-Id: I8ea2e072411e30790d50084b6b7e909c7bb01d50 +> Signed-off-by: Xavi Hernandez + +Change-Id: I8ea2e072411e30790d50084b6b7e909c7bb01d50 +Updates: bz#1722801 +Signed-off-by: Xavi Hernandez +Reviewed-on: https://code.engineering.redhat.com/gerrit/174711 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + api/src/glfs.c | 3 +- + cli/src/cli.c | 3 +- + glusterfsd/src/glusterfsd.c | 4 +- + libglusterfs/src/globals.c | 289 ++++----------------- + libglusterfs/src/glusterfs/globals.h | 6 +- + libglusterfs/src/glusterfs/mem-pool.h | 7 +- + libglusterfs/src/libglusterfs.sym | 3 +- + libglusterfs/src/mem-pool.c | 98 +++---- + libglusterfs/src/syncop.c | 133 ++-------- + .../changelog/lib/src/gf-changelog-helpers.c | 51 +--- + xlators/features/changelog/lib/src/gf-changelog.c | 3 +- + xlators/nfs/server/src/mount3udp_svc.c | 6 +- + 12 files changed, 114 insertions(+), 492 deletions(-) + +diff --git a/api/src/glfs.c b/api/src/glfs.c +index 6bbb620..f36616d 100644 +--- a/api/src/glfs.c ++++ b/api/src/glfs.c +@@ -829,8 +829,7 @@ pub_glfs_new(const char *volname) + * Do this as soon as possible in case something else depends on + * pool allocations. + */ +- mem_pools_init_early(); +- mem_pools_init_late(); ++ mem_pools_init(); + + fs = glfs_new_fs(volname); + if (!fs) +diff --git a/cli/src/cli.c b/cli/src/cli.c +index ff39a98..99a16a0 100644 +--- a/cli/src/cli.c ++++ b/cli/src/cli.c +@@ -795,8 +795,7 @@ main(int argc, char *argv[]) + int ret = -1; + glusterfs_ctx_t *ctx = NULL; + +- mem_pools_init_early(); +- mem_pools_init_late(); ++ mem_pools_init(); + + ctx = glusterfs_ctx_new(); + if (!ctx) +diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c +index 6aee4c1..2172af4 100644 +--- a/glusterfsd/src/glusterfsd.c ++++ b/glusterfsd/src/glusterfsd.c +@@ -2722,8 +2722,6 @@ main(int argc, char *argv[]) + }; + cmd_args_t *cmd = NULL; + +- mem_pools_init_early(); +- + gf_check_and_set_mem_acct(argc, argv); + + ctx = glusterfs_ctx_new(); +@@ -2838,7 +2836,7 @@ main(int argc, char *argv[]) + * the parent, but we want to do it as soon as possible after that in + * case something else depends on pool allocations. + */ +- mem_pools_init_late(); ++ mem_pools_init(); + + #ifdef GF_LINUX_HOST_OS + ret = set_oom_score_adj(ctx); +diff --git a/libglusterfs/src/globals.c b/libglusterfs/src/globals.c +index 4fec063..02098e6 100644 +--- a/libglusterfs/src/globals.c ++++ b/libglusterfs/src/globals.c +@@ -99,16 +99,19 @@ const char *gf_upcall_list[GF_UPCALL_FLAGS_MAXVALUE] = { + glusterfs_ctx_t *global_ctx = NULL; + pthread_mutex_t global_ctx_mutex = PTHREAD_MUTEX_INITIALIZER; + xlator_t global_xlator; +-static pthread_key_t this_xlator_key; +-static pthread_key_t synctask_key; +-static pthread_key_t uuid_buf_key; +-static char global_uuid_buf[GF_UUID_BUF_SIZE]; +-static pthread_key_t lkowner_buf_key; +-static char global_lkowner_buf[GF_LKOWNER_BUF_SIZE]; +-static pthread_key_t leaseid_buf_key; + static int gf_global_mem_acct_enable = 1; + static pthread_once_t globals_inited = PTHREAD_ONCE_INIT; + ++static pthread_key_t free_key; ++ ++static __thread xlator_t *thread_xlator = NULL; ++static __thread void *thread_synctask = NULL; ++static __thread void *thread_leaseid = NULL; ++static __thread struct syncopctx thread_syncopctx = {}; ++static __thread char thread_uuid_buf[GF_UUID_BUF_SIZE] = {}; ++static __thread char thread_lkowner_buf[GF_LKOWNER_BUF_SIZE] = {}; ++static __thread char thread_leaseid_buf[GF_LEASE_ID_BUF_SIZE] = {}; ++ + int + gf_global_mem_acct_enable_get(void) + { +@@ -122,12 +125,6 @@ gf_global_mem_acct_enable_set(int val) + return 0; + } + +-void +-glusterfs_this_destroy(void *ptr) +-{ +- FREE(ptr); +-} +- + static struct xlator_cbks global_cbks = { + .forget = NULL, + .release = NULL, +@@ -212,18 +209,9 @@ struct volume_options global_xl_options[] = { + + static volume_opt_list_t global_xl_opt_list; + +-int ++void + glusterfs_this_init() + { +- int ret = 0; +- ret = pthread_key_create(&this_xlator_key, glusterfs_this_destroy); +- if (ret != 0) { +- gf_msg("", GF_LOG_WARNING, ret, LG_MSG_PTHREAD_KEY_CREATE_FAILED, +- "failed to create " +- "the pthread key"); +- return ret; +- } +- + global_xlator.name = "glusterfs"; + global_xlator.type = GF_GLOBAL_XLATOR_NAME; + global_xlator.cbks = &global_cbks; +@@ -237,301 +225,120 @@ glusterfs_this_init() + global_xl_opt_list.given_opt = global_xl_options; + + list_add_tail(&global_xl_opt_list.list, &global_xlator.volume_options); +- +- return ret; + } + + xlator_t ** + __glusterfs_this_location() + { +- xlator_t **this_location = NULL; +- int ret = 0; +- +- this_location = pthread_getspecific(this_xlator_key); +- +- if (!this_location) { +- this_location = CALLOC(1, sizeof(*this_location)); +- if (!this_location) +- goto out; ++ xlator_t **this_location; + +- ret = pthread_setspecific(this_xlator_key, this_location); +- if (ret != 0) { +- FREE(this_location); +- this_location = NULL; +- goto out; +- } +- } +-out: +- if (this_location) { +- if (!*this_location) +- *this_location = &global_xlator; ++ this_location = &thread_xlator; ++ if (*this_location == NULL) { ++ thread_xlator = &global_xlator; + } ++ + return this_location; + } + + xlator_t * + glusterfs_this_get() + { +- xlator_t **this_location = NULL; +- +- this_location = __glusterfs_this_location(); +- if (!this_location) +- return &global_xlator; +- +- return *this_location; ++ return *__glusterfs_this_location(); + } + +-int ++void + glusterfs_this_set(xlator_t *this) + { +- xlator_t **this_location = NULL; +- +- this_location = __glusterfs_this_location(); +- if (!this_location) +- return -ENOMEM; +- +- *this_location = this; +- +- return 0; ++ thread_xlator = this; + } + + /* SYNCOPCTX */ +-static pthread_key_t syncopctx_key; +- +-static void +-syncopctx_key_destroy(void *ptr) +-{ +- struct syncopctx *opctx = ptr; +- +- if (opctx) { +- if (opctx->groups) +- GF_FREE(opctx->groups); +- +- GF_FREE(opctx); +- } +- +- return; +-} + + void * + syncopctx_getctx() + { +- void *opctx = NULL; +- +- opctx = pthread_getspecific(syncopctx_key); +- +- return opctx; +-} +- +-int +-syncopctx_setctx(void *ctx) +-{ +- int ret = 0; +- +- ret = pthread_setspecific(syncopctx_key, ctx); +- +- return ret; +-} +- +-static int +-syncopctx_init(void) +-{ +- int ret; +- +- ret = pthread_key_create(&syncopctx_key, syncopctx_key_destroy); +- +- return ret; ++ return &thread_syncopctx; + } + + /* SYNCTASK */ + +-int +-synctask_init() +-{ +- int ret = 0; +- +- ret = pthread_key_create(&synctask_key, NULL); +- +- return ret; +-} +- + void * + synctask_get() + { +- void *synctask = NULL; +- +- synctask = pthread_getspecific(synctask_key); +- +- return synctask; ++ return thread_synctask; + } + +-int ++void + synctask_set(void *synctask) + { +- int ret = 0; +- +- pthread_setspecific(synctask_key, synctask); +- +- return ret; ++ thread_synctask = synctask; + } + + // UUID_BUFFER + +-void +-glusterfs_uuid_buf_destroy(void *ptr) +-{ +- FREE(ptr); +-} +- +-int +-glusterfs_uuid_buf_init() +-{ +- int ret = 0; +- +- ret = pthread_key_create(&uuid_buf_key, glusterfs_uuid_buf_destroy); +- return ret; +-} +- + char * + glusterfs_uuid_buf_get() + { +- char *buf; +- int ret = 0; +- +- buf = pthread_getspecific(uuid_buf_key); +- if (!buf) { +- buf = MALLOC(GF_UUID_BUF_SIZE); +- ret = pthread_setspecific(uuid_buf_key, (void *)buf); +- if (ret) +- buf = global_uuid_buf; +- } +- return buf; ++ return thread_uuid_buf; + } + + /* LKOWNER_BUFFER */ + +-void +-glusterfs_lkowner_buf_destroy(void *ptr) +-{ +- FREE(ptr); +-} +- +-int +-glusterfs_lkowner_buf_init() +-{ +- int ret = 0; +- +- ret = pthread_key_create(&lkowner_buf_key, glusterfs_lkowner_buf_destroy); +- return ret; +-} +- + char * + glusterfs_lkowner_buf_get() + { +- char *buf; +- int ret = 0; +- +- buf = pthread_getspecific(lkowner_buf_key); +- if (!buf) { +- buf = MALLOC(GF_LKOWNER_BUF_SIZE); +- ret = pthread_setspecific(lkowner_buf_key, (void *)buf); +- if (ret) +- buf = global_lkowner_buf; +- } +- return buf; ++ return thread_lkowner_buf; + } + + /* Leaseid buffer */ +-void +-glusterfs_leaseid_buf_destroy(void *ptr) +-{ +- FREE(ptr); +-} +- +-int +-glusterfs_leaseid_buf_init() +-{ +- int ret = 0; +- +- ret = pthread_key_create(&leaseid_buf_key, glusterfs_leaseid_buf_destroy); +- return ret; +-} + + char * + glusterfs_leaseid_buf_get() + { + char *buf = NULL; +- int ret = 0; + +- buf = pthread_getspecific(leaseid_buf_key); +- if (!buf) { +- buf = CALLOC(1, GF_LEASE_ID_BUF_SIZE); +- ret = pthread_setspecific(leaseid_buf_key, (void *)buf); +- if (ret) { +- FREE(buf); +- buf = NULL; +- } ++ buf = thread_leaseid; ++ if (buf == NULL) { ++ buf = thread_leaseid_buf; ++ thread_leaseid = buf; + } ++ + return buf; + } + + char * + glusterfs_leaseid_exist() + { +- return pthread_getspecific(leaseid_buf_key); ++ return thread_leaseid; + } + + static void +-gf_globals_init_once() ++glusterfs_cleanup(void *ptr) + { +- int ret = 0; +- +- ret = glusterfs_this_init(); +- if (ret) { +- gf_msg("", GF_LOG_CRITICAL, 0, LG_MSG_TRANSLATOR_INIT_FAILED, +- "ERROR: glusterfs-translator init failed"); +- goto out; +- } +- +- ret = glusterfs_uuid_buf_init(); +- if (ret) { +- gf_msg("", GF_LOG_CRITICAL, 0, LG_MSG_UUID_BUF_INIT_FAILED, +- "ERROR: glusterfs uuid buffer init failed"); +- goto out; ++ if (thread_syncopctx.groups != NULL) { ++ GF_FREE(thread_syncopctx.groups); + } + +- ret = glusterfs_lkowner_buf_init(); +- if (ret) { +- gf_msg("", GF_LOG_CRITICAL, 0, LG_MSG_LKOWNER_BUF_INIT_FAILED, +- "ERROR: glusterfs lkowner buffer init failed"); +- goto out; +- } ++ mem_pool_thread_destructor(); ++} + +- ret = glusterfs_leaseid_buf_init(); +- if (ret) { +- gf_msg("", GF_LOG_CRITICAL, 0, LG_MSG_LEASEID_BUF_INIT_FAILED, +- "ERROR: glusterfs leaseid buffer init failed"); +- goto out; +- } ++static void ++gf_globals_init_once() ++{ ++ int ret = 0; + +- ret = synctask_init(); +- if (ret) { +- gf_msg("", GF_LOG_CRITICAL, 0, LG_MSG_SYNCTASK_INIT_FAILED, +- "ERROR: glusterfs synctask init failed"); +- goto out; +- } ++ glusterfs_this_init(); + +- ret = syncopctx_init(); +- if (ret) { +- gf_msg("", GF_LOG_CRITICAL, 0, LG_MSG_SYNCOPCTX_INIT_FAILED, +- "ERROR: glusterfs syncopctx init failed"); +- goto out; +- } +-out: ++ /* This is needed only to cleanup the potential allocation of ++ * thread_syncopctx.groups. */ ++ ret = pthread_key_create(&free_key, glusterfs_cleanup); ++ if (ret != 0) { ++ gf_msg("", GF_LOG_ERROR, ret, LG_MSG_PTHREAD_KEY_CREATE_FAILED, ++ "failed to create the pthread key"); + +- if (ret) { + gf_msg("", GF_LOG_CRITICAL, 0, LG_MSG_GLOBAL_INIT_FAILED, + "Exiting as global initialization failed"); ++ + exit(ret); + } + } +diff --git a/libglusterfs/src/glusterfs/globals.h b/libglusterfs/src/glusterfs/globals.h +index e45db14..55476f6 100644 +--- a/libglusterfs/src/glusterfs/globals.h ++++ b/libglusterfs/src/glusterfs/globals.h +@@ -147,7 +147,7 @@ xlator_t ** + __glusterfs_this_location(void); + xlator_t * + glusterfs_this_get(void); +-int ++void + glusterfs_this_set(xlator_t *); + + extern xlator_t global_xlator; +@@ -156,13 +156,11 @@ extern struct volume_options global_xl_options[]; + /* syncopctx */ + void * + syncopctx_getctx(void); +-int +-syncopctx_setctx(void *ctx); + + /* task */ + void * + synctask_get(void); +-int ++void + synctask_set(void *); + + /* uuid_buf */ +diff --git a/libglusterfs/src/glusterfs/mem-pool.h b/libglusterfs/src/glusterfs/mem-pool.h +index 0250b59..c5a486b 100644 +--- a/libglusterfs/src/glusterfs/mem-pool.h ++++ b/libglusterfs/src/glusterfs/mem-pool.h +@@ -279,9 +279,7 @@ struct mem_pool_shared { + }; + + void +-mem_pools_init_early(void); /* basic initialization of memory pools */ +-void +-mem_pools_init_late(void); /* start the pool_sweeper thread */ ++mem_pools_init(void); /* start the pool_sweeper thread */ + void + mem_pools_fini(void); /* cleanup memory pools */ + +@@ -306,6 +304,9 @@ void + mem_pool_destroy(struct mem_pool *pool); + + void ++mem_pool_thread_destructor(void); ++ ++void + gf_mem_acct_enable_set(void *ctx); + + #endif /* _MEM_POOL_H */ +diff --git a/libglusterfs/src/libglusterfs.sym b/libglusterfs/src/libglusterfs.sym +index 7a2edef..86215d2 100644 +--- a/libglusterfs/src/libglusterfs.sym ++++ b/libglusterfs/src/libglusterfs.sym +@@ -872,8 +872,7 @@ mem_get0 + mem_pool_destroy + mem_pool_new_fn + mem_pools_fini +-mem_pools_init_early +-mem_pools_init_late ++mem_pools_init + mem_put + mkdir_p + next_token +diff --git a/libglusterfs/src/mem-pool.c b/libglusterfs/src/mem-pool.c +index 9b4ea52..ab78804 100644 +--- a/libglusterfs/src/mem-pool.c ++++ b/libglusterfs/src/mem-pool.c +@@ -353,7 +353,6 @@ free: + FREE(ptr); + } + +-static pthread_key_t pool_key; + static pthread_mutex_t pool_lock = PTHREAD_MUTEX_INITIALIZER; + static struct list_head pool_threads; + static pthread_mutex_t pool_free_lock = PTHREAD_MUTEX_INITIALIZER; +@@ -361,6 +360,8 @@ static struct list_head pool_free_threads; + static struct mem_pool_shared pools[NPOOLS]; + static size_t pool_list_size; + ++static __thread per_thread_pool_list_t *thread_pool_list = NULL; ++ + #if !defined(GF_DISABLE_MEMPOOL) + #define N_COLD_LISTS 1024 + #define POOL_SWEEP_SECS 30 +@@ -373,7 +374,6 @@ typedef struct { + + enum init_state { + GF_MEMPOOL_INIT_NONE = 0, +- GF_MEMPOOL_INIT_PREINIT, + GF_MEMPOOL_INIT_EARLY, + GF_MEMPOOL_INIT_LATE, + GF_MEMPOOL_INIT_DESTROY +@@ -486,9 +486,9 @@ pool_sweeper(void *arg) + } + + void +-pool_destructor(void *arg) ++mem_pool_thread_destructor(void) + { +- per_thread_pool_list_t *pool_list = arg; ++ per_thread_pool_list_t *pool_list = thread_pool_list; + + /* The pool-sweeper thread will take it from here. + * +@@ -499,7 +499,10 @@ pool_destructor(void *arg) + * This change can modify what mem_put() does, but both possibilities are + * fine until the sweeper thread kicks in. The real synchronization must be + * between mem_put() and the sweeper thread. */ +- pool_list->poison = 1; ++ if (pool_list != NULL) { ++ pool_list->poison = 1; ++ thread_pool_list = NULL; ++ } + } + + static __attribute__((constructor)) void +@@ -522,46 +525,14 @@ mem_pools_preinit(void) + pool_list_size = sizeof(per_thread_pool_list_t) + + sizeof(per_thread_pool_t) * (NPOOLS - 1); + +- init_done = GF_MEMPOOL_INIT_PREINIT; ++ init_done = GF_MEMPOOL_INIT_EARLY; + } + +-/* Use mem_pools_init_early() function for basic initialization. There will be +- * no cleanup done by the pool_sweeper thread until mem_pools_init_late() has +- * been called. Calling mem_get() will be possible after this function has +- * setup the basic structures. */ ++/* Call mem_pools_init() once threading has been configured completely. This ++ * prevent the pool_sweeper thread from getting killed once the main() thread ++ * exits during deamonizing. */ + void +-mem_pools_init_early(void) +-{ +- pthread_mutex_lock(&init_mutex); +- /* Use a pthread_key destructor to clean up when a thread exits. +- * +- * We won't increase init_count here, that is only done when the +- * pool_sweeper thread is started too. +- */ +- if (init_done == GF_MEMPOOL_INIT_PREINIT || +- init_done == GF_MEMPOOL_INIT_DESTROY) { +- /* key has not been created yet */ +- if (pthread_key_create(&pool_key, pool_destructor) != 0) { +- gf_log("mem-pool", GF_LOG_CRITICAL, +- "failed to initialize mem-pool key"); +- } +- +- init_done = GF_MEMPOOL_INIT_EARLY; +- } else { +- gf_log("mem-pool", GF_LOG_CRITICAL, +- "incorrect order of mem-pool initialization " +- "(init_done=%d)", +- init_done); +- } +- +- pthread_mutex_unlock(&init_mutex); +-} +- +-/* Call mem_pools_init_late() once threading has been configured completely. +- * This prevent the pool_sweeper thread from getting killed once the main() +- * thread exits during deamonizing. */ +-void +-mem_pools_init_late(void) ++mem_pools_init(void) + { + pthread_mutex_lock(&init_mutex); + if ((init_count++) == 0) { +@@ -580,13 +551,12 @@ mem_pools_fini(void) + switch (init_count) { + case 0: + /* +- * If init_count is already zero (as e.g. if somebody called +- * this before mem_pools_init_late) then the sweeper was +- * probably never even started so we don't need to stop it. +- * Even if there's some crazy circumstance where there is a +- * sweeper but init_count is still zero, that just means we'll +- * leave it running. Not perfect, but far better than any +- * known alternative. ++ * If init_count is already zero (as e.g. if somebody called this ++ * before mem_pools_init) then the sweeper was probably never even ++ * started so we don't need to stop it. Even if there's some crazy ++ * circumstance where there is a sweeper but init_count is still ++ * zero, that just means we'll leave it running. Not perfect, but ++ * far better than any known alternative. + */ + break; + case 1: { +@@ -594,20 +564,17 @@ mem_pools_fini(void) + per_thread_pool_list_t *next_pl; + unsigned int i; + +- /* if only mem_pools_init_early() was called, sweeper_tid will +- * be invalid and the functions will error out. That is not +- * critical. In all other cases, the sweeper_tid will be valid +- * and the thread gets stopped. */ ++ /* if mem_pools_init() was not called, sweeper_tid will be invalid ++ * and the functions will error out. That is not critical. In all ++ * other cases, the sweeper_tid will be valid and the thread gets ++ * stopped. */ + (void)pthread_cancel(sweeper_tid); + (void)pthread_join(sweeper_tid, NULL); + +- /* Need to clean the pool_key to prevent further usage of the +- * per_thread_pool_list_t structure that is stored for each +- * thread. +- * This also prevents calling pool_destructor() when a thread +- * exits, so there is no chance on a use-after-free of the +- * per_thread_pool_list_t structure. */ +- (void)pthread_key_delete(pool_key); ++ /* At this point all threads should have already terminated, so ++ * it should be safe to destroy all pending per_thread_pool_list_t ++ * structures that are stored for each thread. */ ++ mem_pool_thread_destructor(); + + /* free all objects from all pools */ + list_for_each_entry_safe(pool_list, next_pl, &pool_threads, +@@ -642,11 +609,7 @@ mem_pools_fini(void) + + #else + void +-mem_pools_init_early(void) +-{ +-} +-void +-mem_pools_init_late(void) ++mem_pools_init(void) + { + } + void +@@ -734,7 +697,7 @@ mem_get_pool_list(void) + per_thread_pool_list_t *pool_list; + unsigned int i; + +- pool_list = pthread_getspecific(pool_key); ++ pool_list = thread_pool_list; + if (pool_list) { + return pool_list; + } +@@ -767,7 +730,8 @@ mem_get_pool_list(void) + list_add(&pool_list->thr_list, &pool_threads); + (void)pthread_mutex_unlock(&pool_lock); + +- (void)pthread_setspecific(pool_key, pool_list); ++ thread_pool_list = pool_list; ++ + return pool_list; + } + +diff --git a/libglusterfs/src/syncop.c b/libglusterfs/src/syncop.c +index c05939a..2eb7b49 100644 +--- a/libglusterfs/src/syncop.c ++++ b/libglusterfs/src/syncop.c +@@ -26,28 +26,10 @@ syncopctx_setfsuid(void *uid) + + opctx = syncopctx_getctx(); + +- /* alloc for this thread the first time */ +- if (!opctx) { +- opctx = GF_CALLOC(1, sizeof(*opctx), gf_common_mt_syncopctx); +- if (!opctx) { +- ret = -1; +- goto out; +- } +- +- ret = syncopctx_setctx(opctx); +- if (ret != 0) { +- GF_FREE(opctx); +- opctx = NULL; +- goto out; +- } +- } ++ opctx->uid = *(uid_t *)uid; ++ opctx->valid |= SYNCOPCTX_UID; + + out: +- if (opctx && uid) { +- opctx->uid = *(uid_t *)uid; +- opctx->valid |= SYNCOPCTX_UID; +- } +- + return ret; + } + +@@ -66,28 +48,10 @@ syncopctx_setfsgid(void *gid) + + opctx = syncopctx_getctx(); + +- /* alloc for this thread the first time */ +- if (!opctx) { +- opctx = GF_CALLOC(1, sizeof(*opctx), gf_common_mt_syncopctx); +- if (!opctx) { +- ret = -1; +- goto out; +- } +- +- ret = syncopctx_setctx(opctx); +- if (ret != 0) { +- GF_FREE(opctx); +- opctx = NULL; +- goto out; +- } +- } ++ opctx->gid = *(gid_t *)gid; ++ opctx->valid |= SYNCOPCTX_GID; + + out: +- if (opctx && gid) { +- opctx->gid = *(gid_t *)gid; +- opctx->valid |= SYNCOPCTX_GID; +- } +- + return ret; + } + +@@ -107,43 +71,20 @@ syncopctx_setfsgroups(int count, const void *groups) + + opctx = syncopctx_getctx(); + +- /* alloc for this thread the first time */ +- if (!opctx) { +- opctx = GF_CALLOC(1, sizeof(*opctx), gf_common_mt_syncopctx); +- if (!opctx) { +- ret = -1; +- goto out; +- } +- +- ret = syncopctx_setctx(opctx); +- if (ret != 0) { +- GF_FREE(opctx); +- opctx = NULL; +- goto out; +- } +- } +- + /* resize internal groups as required */ + if (count && opctx->grpsize < count) { + if (opctx->groups) { +- tmpgroups = GF_REALLOC(opctx->groups, (sizeof(gid_t) * count)); +- /* NOTE: Not really required to zero the reallocation, +- * as ngrps controls the validity of data, +- * making a note irrespective */ +- if (tmpgroups == NULL) { +- opctx->grpsize = 0; +- GF_FREE(opctx->groups); +- opctx->groups = NULL; +- ret = -1; +- goto out; +- } +- } else { +- tmpgroups = GF_CALLOC(count, sizeof(gid_t), gf_common_mt_syncopctx); +- if (tmpgroups == NULL) { +- opctx->grpsize = 0; +- ret = -1; +- goto out; +- } ++ /* Group list will be updated later, so no need to keep current ++ * data and waste time copying it. It's better to free the current ++ * allocation and then allocate a fresh new memory block. */ ++ GF_FREE(opctx->groups); ++ opctx->groups = NULL; ++ opctx->grpsize = 0; ++ } ++ tmpgroups = GF_MALLOC(count * sizeof(gid_t), gf_common_mt_syncopctx); ++ if (tmpgroups == NULL) { ++ ret = -1; ++ goto out; + } + + opctx->groups = tmpgroups; +@@ -177,28 +118,10 @@ syncopctx_setfspid(void *pid) + + opctx = syncopctx_getctx(); + +- /* alloc for this thread the first time */ +- if (!opctx) { +- opctx = GF_CALLOC(1, sizeof(*opctx), gf_common_mt_syncopctx); +- if (!opctx) { +- ret = -1; +- goto out; +- } +- +- ret = syncopctx_setctx(opctx); +- if (ret != 0) { +- GF_FREE(opctx); +- opctx = NULL; +- goto out; +- } +- } ++ opctx->pid = *(pid_t *)pid; ++ opctx->valid |= SYNCOPCTX_PID; + + out: +- if (opctx && pid) { +- opctx->pid = *(pid_t *)pid; +- opctx->valid |= SYNCOPCTX_PID; +- } +- + return ret; + } + +@@ -217,28 +140,10 @@ syncopctx_setfslkowner(gf_lkowner_t *lk_owner) + + opctx = syncopctx_getctx(); + +- /* alloc for this thread the first time */ +- if (!opctx) { +- opctx = GF_CALLOC(1, sizeof(*opctx), gf_common_mt_syncopctx); +- if (!opctx) { +- ret = -1; +- goto out; +- } +- +- ret = syncopctx_setctx(opctx); +- if (ret != 0) { +- GF_FREE(opctx); +- opctx = NULL; +- goto out; +- } +- } ++ opctx->lk_owner = *lk_owner; ++ opctx->valid |= SYNCOPCTX_LKOWNER; + + out: +- if (opctx && lk_owner) { +- opctx->lk_owner = *lk_owner; +- opctx->valid |= SYNCOPCTX_LKOWNER; +- } +- + return ret; + } + +diff --git a/xlators/features/changelog/lib/src/gf-changelog-helpers.c b/xlators/features/changelog/lib/src/gf-changelog-helpers.c +index 03dac5e..e5a9db4 100644 +--- a/xlators/features/changelog/lib/src/gf-changelog-helpers.c ++++ b/xlators/features/changelog/lib/src/gf-changelog-helpers.c +@@ -64,20 +64,7 @@ gf_rfc3986_encode_space_newline(unsigned char *s, char *enc, char *estr) + * made a part of libglusterfs. + */ + +-static pthread_key_t rl_key; +-static pthread_once_t rl_once = PTHREAD_ONCE_INIT; +- +-static void +-readline_destructor(void *ptr) +-{ +- GF_FREE(ptr); +-} +- +-static void +-readline_once(void) +-{ +- pthread_key_create(&rl_key, readline_destructor); +-} ++static __thread read_line_t thread_tsd = {}; + + static ssize_t + my_read(read_line_t *tsd, int fd, char *ptr) +@@ -97,27 +84,6 @@ my_read(read_line_t *tsd, int fd, char *ptr) + return 1; + } + +-static int +-gf_readline_init_once(read_line_t **tsd) +-{ +- if (pthread_once(&rl_once, readline_once) != 0) +- return -1; +- +- *tsd = pthread_getspecific(rl_key); +- if (*tsd) +- goto out; +- +- *tsd = GF_CALLOC(1, sizeof(**tsd), gf_changelog_mt_libgfchangelog_rl_t); +- if (!*tsd) +- return -1; +- +- if (pthread_setspecific(rl_key, *tsd) != 0) +- return -1; +- +-out: +- return 0; +-} +- + ssize_t + gf_readline(int fd, void *vptr, size_t maxlen) + { +@@ -125,10 +91,7 @@ gf_readline(int fd, void *vptr, size_t maxlen) + size_t rc = 0; + char c = ' '; + char *ptr = NULL; +- read_line_t *tsd = NULL; +- +- if (gf_readline_init_once(&tsd)) +- return -1; ++ read_line_t *tsd = &thread_tsd; + + ptr = vptr; + for (n = 1; n < maxlen; n++) { +@@ -151,10 +114,7 @@ off_t + gf_lseek(int fd, off_t offset, int whence) + { + off_t off = 0; +- read_line_t *tsd = NULL; +- +- if (gf_readline_init_once(&tsd)) +- return -1; ++ read_line_t *tsd = &thread_tsd; + + off = sys_lseek(fd, offset, whence); + if (off == -1) +@@ -169,10 +129,7 @@ gf_lseek(int fd, off_t offset, int whence) + int + gf_ftruncate(int fd, off_t length) + { +- read_line_t *tsd = NULL; +- +- if (gf_readline_init_once(&tsd)) +- return -1; ++ read_line_t *tsd = &thread_tsd; + + if (sys_ftruncate(fd, 0)) + return -1; +diff --git a/xlators/features/changelog/lib/src/gf-changelog.c b/xlators/features/changelog/lib/src/gf-changelog.c +index 7ed9e55..d6acb37 100644 +--- a/xlators/features/changelog/lib/src/gf-changelog.c ++++ b/xlators/features/changelog/lib/src/gf-changelog.c +@@ -237,9 +237,8 @@ gf_changelog_init_master() + { + int ret = 0; + +- mem_pools_init_early(); + ret = gf_changelog_init_context(); +- mem_pools_init_late(); ++ mem_pools_init(); + + return ret; + } +diff --git a/xlators/nfs/server/src/mount3udp_svc.c b/xlators/nfs/server/src/mount3udp_svc.c +index d5e4169..0688779eb 100644 +--- a/xlators/nfs/server/src/mount3udp_svc.c ++++ b/xlators/nfs/server/src/mount3udp_svc.c +@@ -216,11 +216,7 @@ mount3udp_thread(void *argv) + + GF_ASSERT(nfsx); + +- if (glusterfs_this_set(nfsx)) { +- gf_msg(GF_MNT, GF_LOG_ERROR, ENOMEM, NFS_MSG_XLATOR_SET_FAIL, +- "Failed to set xlator, nfs.mount-udp will not work"); +- return NULL; +- } ++ glusterfs_this_set(nfsx); + + transp = svcudp_create(RPC_ANYSOCK); + if (transp == NULL) { +-- +1.8.3.1 + diff --git a/SOURCES/0210-mem-pool.-c-h-minor-changes.patch b/SOURCES/0210-mem-pool.-c-h-minor-changes.patch new file mode 100644 index 0000000..c238579 --- /dev/null +++ b/SOURCES/0210-mem-pool.-c-h-minor-changes.patch @@ -0,0 +1,129 @@ +From 77a3cac0c8aed9e084296719926a534128c31dee Mon Sep 17 00:00:00 2001 +From: Yaniv Kaul +Date: Wed, 27 Feb 2019 15:48:42 +0200 +Subject: [PATCH 210/221] mem-pool.{c|h}: minor changes + +1. Removed some code that was not needed. It did not really do anything. +2. CALLOC -> MALLOC in one place. + +Compile-tested only! + +Upstream patch: +> BUG: 1193929 +> Upstream patch link: https://review.gluster.org/c/glusterfs/+/22274 +> Signed-off-by: Yaniv Kaul +> Change-Id: I4419161e1bb636158e32b5d33044b06f1eef2449 + +Change-Id: I4419161e1bb636158e32b5d33044b06f1eef2449 +Updates: bz#1722801 +Signed-off-by: Yaniv Kaul +Reviewed-on: https://code.engineering.redhat.com/gerrit/174712 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + libglusterfs/src/mem-pool.c | 37 ++++++++++++------------------------- + 1 file changed, 12 insertions(+), 25 deletions(-) + +diff --git a/libglusterfs/src/mem-pool.c b/libglusterfs/src/mem-pool.c +index ab78804..ca25ffc 100644 +--- a/libglusterfs/src/mem-pool.c ++++ b/libglusterfs/src/mem-pool.c +@@ -643,7 +643,7 @@ mem_pool_new_fn(glusterfs_ctx_t *ctx, unsigned long sizeof_type, + } + pool = &pools[power - POOL_SMALLEST]; + +- new = GF_CALLOC(sizeof(struct mem_pool), 1, gf_common_mt_mem_pool); ++ new = GF_MALLOC(sizeof(struct mem_pool), gf_common_mt_mem_pool); + if (!new) + return NULL; + +@@ -671,15 +671,7 @@ mem_pool_new_fn(glusterfs_ctx_t *ctx, unsigned long sizeof_type, + void * + mem_get0(struct mem_pool *mem_pool) + { +- void *ptr = NULL; +- +- if (!mem_pool) { +- gf_msg_callingfn("mem-pool", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, +- "invalid argument"); +- return NULL; +- } +- +- ptr = mem_get(mem_pool); ++ void *ptr = mem_get(mem_pool); + if (ptr) { + #if defined(GF_DISABLE_MEMPOOL) + memset(ptr, 0, mem_pool->sizeof_type); +@@ -736,12 +728,14 @@ mem_get_pool_list(void) + } + + pooled_obj_hdr_t * +-mem_get_from_pool(struct mem_pool *mem_pool, struct mem_pool_shared *pool, +- gf_boolean_t *hit) ++mem_get_from_pool(struct mem_pool *mem_pool, struct mem_pool_shared *pool) + { + per_thread_pool_list_t *pool_list; + per_thread_pool_t *pt_pool; + pooled_obj_hdr_t *retval; ++#ifdef DEBUG ++ gf_boolean_t hit = _gf_true; ++#endif + + pool_list = mem_get_pool_list(); + if (!pool_list || pool_list->poison) { +@@ -755,10 +749,6 @@ mem_get_from_pool(struct mem_pool *mem_pool, struct mem_pool_shared *pool, + pt_pool = &pool_list->pools[pool->power_of_two - POOL_SMALLEST]; + } + +-#ifdef DEBUG +- *hit = _gf_true; +-#endif +- + (void)pthread_spin_lock(&pool_list->lock); + + retval = pt_pool->hot_list; +@@ -778,7 +768,7 @@ mem_get_from_pool(struct mem_pool *mem_pool, struct mem_pool_shared *pool, + retval = malloc((1 << pt_pool->parent->power_of_two) + + sizeof(pooled_obj_hdr_t)); + #ifdef DEBUG +- *hit = _gf_false; ++ hit = _gf_false; + #endif + } + } +@@ -788,7 +778,7 @@ mem_get_from_pool(struct mem_pool *mem_pool, struct mem_pool_shared *pool, + retval->pool = mem_pool; + retval->power_of_two = mem_pool->pool->power_of_two; + #ifdef DEBUG +- if (*hit == _gf_true) ++ if (hit == _gf_true) + GF_ATOMIC_INC(mem_pool->hit); + else + GF_ATOMIC_INC(mem_pool->miss); +@@ -807,19 +797,16 @@ mem_get_from_pool(struct mem_pool *mem_pool, struct mem_pool_shared *pool, + void * + mem_get(struct mem_pool *mem_pool) + { +-#if defined(GF_DISABLE_MEMPOOL) +- return GF_MALLOC(mem_pool->sizeof_type, gf_common_mt_mem_pool); +-#else +- pooled_obj_hdr_t *retval; +- gf_boolean_t hit; +- + if (!mem_pool) { + gf_msg_callingfn("mem-pool", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, + "invalid argument"); + return NULL; + } + +- retval = mem_get_from_pool(mem_pool, NULL, &hit); ++#if defined(GF_DISABLE_MEMPOOL) ++ return GF_MALLOC(mem_pool->sizeof_type, gf_common_mt_mem_pool); ++#else ++ pooled_obj_hdr_t *retval = mem_get_from_pool(mem_pool, NULL); + if (!retval) { + return NULL; + } +-- +1.8.3.1 + diff --git a/SOURCES/0211-libglusterfs-Fix-compilation-when-disable-mempool-is.patch b/SOURCES/0211-libglusterfs-Fix-compilation-when-disable-mempool-is.patch new file mode 100644 index 0000000..27326a9 --- /dev/null +++ b/SOURCES/0211-libglusterfs-Fix-compilation-when-disable-mempool-is.patch @@ -0,0 +1,41 @@ +From 4fa3c0be983c3f99c2785036ded5ef5ab390419b Mon Sep 17 00:00:00 2001 +From: Pranith Kumar K +Date: Mon, 6 May 2019 15:57:16 +0530 +Subject: [PATCH 211/221] libglusterfs: Fix compilation when --disable-mempool + is used + +Upstream patch: +> BUG: 1193929 +> Upstream patch link: https://review.gluster.org/c/glusterfs/+/22665 +> Change-Id: I245c065b209bcce5db939b6a0a934ba6fd393b47 +> Signed-off-by: Pranith Kumar K + +Updates: bz#1722801 +Change-Id: I245c065b209bcce5db939b6a0a934ba6fd393b47 +Signed-off-by: Pranith Kumar K +Reviewed-on: https://code.engineering.redhat.com/gerrit/174713 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + libglusterfs/src/mem-pool.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/libglusterfs/src/mem-pool.c b/libglusterfs/src/mem-pool.c +index ca25ffc..df167b6 100644 +--- a/libglusterfs/src/mem-pool.c ++++ b/libglusterfs/src/mem-pool.c +@@ -616,6 +616,11 @@ void + mem_pools_fini(void) + { + } ++void ++mem_pool_thread_destructor(void) ++{ ++} ++ + #endif + + struct mem_pool * +-- +1.8.3.1 + diff --git a/SOURCES/0212-core-fix-memory-allocation-issues.patch b/SOURCES/0212-core-fix-memory-allocation-issues.patch new file mode 100644 index 0000000..18da11d --- /dev/null +++ b/SOURCES/0212-core-fix-memory-allocation-issues.patch @@ -0,0 +1,169 @@ +From 0bf728030e0ad7a49e6e1737ea06ae74da9279d3 Mon Sep 17 00:00:00 2001 +From: Xavi Hernandez +Date: Fri, 21 Jun 2019 11:28:08 +0200 +Subject: [PATCH 212/221] core: fix memory allocation issues + +Two problems have been identified that caused that gluster's memory +usage were twice higher than required. + +1. An off by 1 error caused that all objects allocated from the memory + pools were taken from a pool bigger than required. Since each pool + corresponds to a size equal to a power of two, this was wasting half + of the available memory. + +2. The header information used for accounting on each memory object was + not taken into consideration when searching for a suitable memory + pool. It was added later when each individual block was allocated. + This made this space "invisible" to memory accounting. + +Credits: Thanks to Nithya Balachandran for identifying this problem and + testing this patch. + +Upstream patch: +> BUG: 1722802 +> Upstream patch link: https://review.gluster.org/c/glusterfs/+/22921 +> Change-Id: I90e27ad795fe51ca11c13080f62207451f6c138c +> Signed-off-by: Xavi Hernandez + +Fixes: bz#1722801 +Change-Id: I90e27ad795fe51ca11c13080f62207451f6c138c +Signed-off-by: Xavi Hernandez +Reviewed-on: https://code.engineering.redhat.com/gerrit/174714 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + libglusterfs/src/glusterfs/mem-pool.h | 5 ++- + libglusterfs/src/mem-pool.c | 57 +++++++++++++++++++---------------- + 2 files changed, 35 insertions(+), 27 deletions(-) + +diff --git a/libglusterfs/src/glusterfs/mem-pool.h b/libglusterfs/src/glusterfs/mem-pool.h +index c5a486b..be0a26d 100644 +--- a/libglusterfs/src/glusterfs/mem-pool.h ++++ b/libglusterfs/src/glusterfs/mem-pool.h +@@ -231,7 +231,10 @@ typedef struct pooled_obj_hdr { + struct mem_pool *pool; + } pooled_obj_hdr_t; + +-#define AVAILABLE_SIZE(p2) (1 << (p2)) ++/* Each memory block inside a pool has a fixed size that is a power of two. ++ * However each object will have a header that will reduce the available ++ * space. */ ++#define AVAILABLE_SIZE(p2) ((1UL << (p2)) - sizeof(pooled_obj_hdr_t)) + + typedef struct per_thread_pool { + /* the pool that was used to request this allocation */ +diff --git a/libglusterfs/src/mem-pool.c b/libglusterfs/src/mem-pool.c +index df167b6..d88041d 100644 +--- a/libglusterfs/src/mem-pool.c ++++ b/libglusterfs/src/mem-pool.c +@@ -627,6 +627,7 @@ struct mem_pool * + mem_pool_new_fn(glusterfs_ctx_t *ctx, unsigned long sizeof_type, + unsigned long count, char *name) + { ++ unsigned long extra_size, size; + unsigned int power; + struct mem_pool *new = NULL; + struct mem_pool_shared *pool = NULL; +@@ -637,10 +638,25 @@ mem_pool_new_fn(glusterfs_ctx_t *ctx, unsigned long sizeof_type, + return NULL; + } + +- /* We ensure sizeof_type > 1 and the next power of two will be, at least, +- * 2^POOL_SMALLEST */ +- sizeof_type |= (1 << POOL_SMALLEST) - 1; +- power = sizeof(sizeof_type) * 8 - __builtin_clzl(sizeof_type - 1) + 1; ++ /* This is the overhead we'll have because of memory accounting for each ++ * memory block. */ ++ extra_size = sizeof(pooled_obj_hdr_t); ++ ++ /* We need to compute the total space needed to hold the data type and ++ * the header. Given that the smallest block size we have in the pools ++ * is 2^POOL_SMALLEST, we need to take the MAX(size, 2^POOL_SMALLEST). ++ * However, since this value is only needed to compute its rounded ++ * logarithm in base 2, and this only depends on the highest bit set, ++ * we can simply do a bitwise or with the minimum size. We need to ++ * subtract 1 for correct handling of sizes that are exactly a power ++ * of 2. */ ++ size = (sizeof_type + extra_size - 1UL) | ((1UL << POOL_SMALLEST) - 1UL); ++ ++ /* We compute the logarithm in base 2 rounded up of the resulting size. ++ * This value will identify which pool we need to use from the pools of ++ * powers of 2. This is equivalent to finding the position of the highest ++ * bit set. */ ++ power = sizeof(size) * 8 - __builtin_clzl(size); + if (power > POOL_LARGEST) { + gf_msg_callingfn("mem-pool", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, + "invalid argument"); +@@ -732,8 +748,8 @@ mem_get_pool_list(void) + return pool_list; + } + +-pooled_obj_hdr_t * +-mem_get_from_pool(struct mem_pool *mem_pool, struct mem_pool_shared *pool) ++static pooled_obj_hdr_t * ++mem_get_from_pool(struct mem_pool *mem_pool) + { + per_thread_pool_list_t *pool_list; + per_thread_pool_t *pt_pool; +@@ -747,12 +763,7 @@ mem_get_from_pool(struct mem_pool *mem_pool, struct mem_pool_shared *pool) + return NULL; + } + +- if (mem_pool) { +- pt_pool = &pool_list +- ->pools[mem_pool->pool->power_of_two - POOL_SMALLEST]; +- } else { +- pt_pool = &pool_list->pools[pool->power_of_two - POOL_SMALLEST]; +- } ++ pt_pool = &pool_list->pools[mem_pool->pool->power_of_two - POOL_SMALLEST]; + + (void)pthread_spin_lock(&pool_list->lock); + +@@ -770,8 +781,7 @@ mem_get_from_pool(struct mem_pool *mem_pool, struct mem_pool_shared *pool) + } else { + (void)pthread_spin_unlock(&pool_list->lock); + GF_ATOMIC_INC(pt_pool->parent->allocs_stdc); +- retval = malloc((1 << pt_pool->parent->power_of_two) + +- sizeof(pooled_obj_hdr_t)); ++ retval = malloc(1 << pt_pool->parent->power_of_two); + #ifdef DEBUG + hit = _gf_false; + #endif +@@ -779,19 +789,14 @@ mem_get_from_pool(struct mem_pool *mem_pool, struct mem_pool_shared *pool) + } + + if (retval != NULL) { +- if (mem_pool) { +- retval->pool = mem_pool; +- retval->power_of_two = mem_pool->pool->power_of_two; ++ retval->pool = mem_pool; ++ retval->power_of_two = mem_pool->pool->power_of_two; + #ifdef DEBUG +- if (hit == _gf_true) +- GF_ATOMIC_INC(mem_pool->hit); +- else +- GF_ATOMIC_INC(mem_pool->miss); ++ if (hit == _gf_true) ++ GF_ATOMIC_INC(mem_pool->hit); ++ else ++ GF_ATOMIC_INC(mem_pool->miss); + #endif +- } else { +- retval->power_of_two = pool->power_of_two; +- retval->pool = NULL; +- } + retval->magic = GF_MEM_HEADER_MAGIC; + retval->pool_list = pool_list; + } +@@ -811,7 +816,7 @@ mem_get(struct mem_pool *mem_pool) + #if defined(GF_DISABLE_MEMPOOL) + return GF_MALLOC(mem_pool->sizeof_type, gf_common_mt_mem_pool); + #else +- pooled_obj_hdr_t *retval = mem_get_from_pool(mem_pool, NULL); ++ pooled_obj_hdr_t *retval = mem_get_from_pool(mem_pool); + if (!retval) { + return NULL; + } +-- +1.8.3.1 + diff --git a/SOURCES/0213-cluster-dht-Strip-out-dht-xattrs.patch b/SOURCES/0213-cluster-dht-Strip-out-dht-xattrs.patch new file mode 100644 index 0000000..225379b --- /dev/null +++ b/SOURCES/0213-cluster-dht-Strip-out-dht-xattrs.patch @@ -0,0 +1,42 @@ +From ff5f06d6ba5ac87094ae5df435d1cfb38802e7ca Mon Sep 17 00:00:00 2001 +From: N Balachandran +Date: Tue, 18 Jun 2019 15:33:29 +0530 +Subject: [PATCH 213/221] cluster/dht: Strip out dht xattrs + +Some internal DHT xattrs were not being +removed when calling getxattr in pass-through mode. +This has been fixed. + +upstream patch: https://review.gluster.org/#/c/glusterfs/+/22889/ + +>Change-Id: If7e3dbc7b495db88a566bd560888e3e9c167defa +>fixes: bz#1721435 +>Signed-off-by: N Balachandran + + +BUG: 1721357 +Change-Id: I29bce7ea78bb4fd3b493404282cb2c48ef0bf4ee +Signed-off-by: N Balachandran +Reviewed-on: https://code.engineering.redhat.com/gerrit/174699 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/cluster/dht/src/dht-common.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c +index e1edb38..9a6ea5b 100644 +--- a/xlators/cluster/dht/src/dht-common.c ++++ b/xlators/cluster/dht/src/dht-common.c +@@ -11216,6 +11216,8 @@ dht_pt_getxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this, + + conf = this->private; + dict_del(xattr, conf->xattr_name); ++ dict_del(xattr, conf->mds_xattr_key); ++ dict_del(xattr, conf->commithash_xattr_name); + + if (frame->root->pid >= 0) { + GF_REMOVE_INTERNAL_XATTR("trusted.glusterfs.quota*", xattr); +-- +1.8.3.1 + diff --git a/SOURCES/0214-geo-rep-Upgrading-config-file-to-new-version.patch b/SOURCES/0214-geo-rep-Upgrading-config-file-to-new-version.patch new file mode 100644 index 0000000..711aa3b --- /dev/null +++ b/SOURCES/0214-geo-rep-Upgrading-config-file-to-new-version.patch @@ -0,0 +1,114 @@ +From 76921775b0a6760276060409882c0556f19d8d01 Mon Sep 17 00:00:00 2001 +From: Shwetha K Acharya +Date: Wed, 29 May 2019 16:49:01 +0530 +Subject: [PATCH 214/221] geo-rep: Upgrading config file to new version + +- configuration handling is enhanced with patch +https://review.gluster.org/#/c/glusterfs/+/18257/ +- hence, the old configurations are not applied when +Geo-rep session is created in the old version and upgraded. + +This patch solves the issue. It, +- checks if the config file is old. +- parses required values from old config file and stores in new + config file, which ensures that configurations are applied on + upgrade. +- stores old config file as backup. +- handles changes in options introduced in + https://review.gluster.org/#/c/glusterfs/+/18257/ + +>fixes: bz#1707731 +>Change-Id: Iad8da6c1e1ae8ecf7c84dfdf8ea3ac6966d8a2a0 +>Signed-off-by: Shwetha K Acharya + +backport of https://review.gluster.org/#/c/glusterfs/+/22894/ + +Bug: 1708064 +Change-Id: Iad8da6c1e1ae8ecf7c84dfdf8ea3ac6966d8a2a0 +Signed-off-by: Shwetha K Acharya +Reviewed-on: https://code.engineering.redhat.com/gerrit/174743 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + geo-replication/syncdaemon/gsyncd.py | 5 ++++ + geo-replication/syncdaemon/gsyncdconfig.py | 41 ++++++++++++++++++++++++++++++ + 2 files changed, 46 insertions(+) + +diff --git a/geo-replication/syncdaemon/gsyncd.py b/geo-replication/syncdaemon/gsyncd.py +index effe0ce..a4c6f32 100644 +--- a/geo-replication/syncdaemon/gsyncd.py ++++ b/geo-replication/syncdaemon/gsyncd.py +@@ -253,6 +253,11 @@ def main(): + if args.subcmd == "slave": + override_from_args = True + ++ if args.subcmd == "monitor": ++ ret = gconf.is_config_file_old(config_file, args.master, extra_tmpl_args["slavevol"]) ++ if ret is not None: ++ gconf.config_upgrade(config_file, ret) ++ + # Load Config file + gconf.load(GLUSTERFS_CONFDIR + "/gsyncd.conf", + config_file, +diff --git a/geo-replication/syncdaemon/gsyncdconfig.py b/geo-replication/syncdaemon/gsyncdconfig.py +index 23a1c57..7edc582 100644 +--- a/geo-replication/syncdaemon/gsyncdconfig.py ++++ b/geo-replication/syncdaemon/gsyncdconfig.py +@@ -14,6 +14,7 @@ try: + except ImportError: + from configparser import ConfigParser, NoSectionError + import os ++import shutil + from string import Template + from datetime import datetime + +@@ -325,6 +326,46 @@ class Gconf(object): + + return False + ++def is_config_file_old(config_file, mastervol, slavevol): ++ cnf = ConfigParser() ++ cnf.read(config_file) ++ session_section = "peers %s %s" % (mastervol, slavevol) ++ try: ++ return dict(cnf.items(session_section)) ++ except NoSectionError: ++ return None ++ ++def config_upgrade(config_file, ret): ++ config_file_backup = os.path.join(os.path.dirname(config_file), "gsyncd.conf.bkp") ++ ++ #copy old config file in a backup file ++ shutil.copyfile(config_file, config_file_backup) ++ ++ #write a new config file ++ config = ConfigParser() ++ config.add_section('vars') ++ ++ for key, value in ret.items(): ++ #handle option name changes ++ if key == "use_tarssh": ++ new_key = "sync-method" ++ if value == "true": ++ new_value = "tarssh" ++ else: ++ new_value = "rsync" ++ config.set('vars', new_key, new_value) ++ ++ if key == "timeout": ++ new_key = "slave-timeout" ++ config.set('vars', new_key, value) ++ ++ #for changes like: ignore_deletes to ignore-deletes ++ new_key = key.replace("_", "-") ++ config.set('vars', new_key, value) ++ ++ with open(config_file, 'w') as configfile: ++ config.write(configfile) ++ + + def validate_unixtime(value): + try: +-- +1.8.3.1 + diff --git a/SOURCES/0215-posix-modify-storage.reserve-option-to-take-size-and.patch b/SOURCES/0215-posix-modify-storage.reserve-option-to-take-size-and.patch new file mode 100644 index 0000000..3e4217b --- /dev/null +++ b/SOURCES/0215-posix-modify-storage.reserve-option-to-take-size-and.patch @@ -0,0 +1,319 @@ +From 0c485548b4126ed907dec9941209b1b1312d0b5d Mon Sep 17 00:00:00 2001 +From: Sheetal Pamecha +Date: Wed, 19 Jun 2019 15:08:58 +0530 +Subject: [PATCH 215/221] posix: modify storage.reserve option to take size and + percent + +* reverting changes made in +https://review.gluster.org/#/c/glusterfs/+/21686/ + +* Now storage.reserve can take value in percent or bytes + +> fixes: bz#1651445 +> Change-Id: Id4826210ec27991c55b17d1fecd90356bff3e036 +> Signed-off-by: Sheetal Pamecha +> Cherry pick from commit 5cbc87d8b8f1287e81c38b793b8d13b057208c62 +> Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/22900/ + +BUG: 1573077 +Change-Id: Id4826210ec27991c55b17d1fecd90356bff3e036 +Signed-off-by: Sheetal Pamecha +Reviewed-on: https://code.engineering.redhat.com/gerrit/174744 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + tests/bugs/posix/bug-1651445.t | 29 +++++++++------------- + xlators/mgmt/glusterd/src/glusterd-volume-set.c | 33 ------------------------- + xlators/storage/posix/src/posix-common.c | 33 +++++++++++-------------- + xlators/storage/posix/src/posix-helpers.c | 26 +++++++++---------- + xlators/storage/posix/src/posix-inode-fd-ops.c | 15 ++++++----- + xlators/storage/posix/src/posix.h | 4 +-- + 6 files changed, 51 insertions(+), 89 deletions(-) + +diff --git a/tests/bugs/posix/bug-1651445.t b/tests/bugs/posix/bug-1651445.t +index f6f1833..5248d47 100644 +--- a/tests/bugs/posix/bug-1651445.t ++++ b/tests/bugs/posix/bug-1651445.t +@@ -17,39 +17,34 @@ TEST $CLI volume start $V0 + + TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 + +-TEST $CLI volume set $V0 storage.reserve-size 10MB ++#Setting the size in bytes ++TEST $CLI volume set $V0 storage.reserve 40MB + +-#No effect as priority to reserve-size +-TEST $CLI volume set $V0 storage.reserve 20 ++#wait 5s to reset disk_space_full flag ++sleep 5 + + TEST dd if=/dev/zero of=$M0/a bs=100M count=1 +-sleep 5 ++TEST dd if=/dev/zero of=$M0/b bs=10M count=1 + +-#Below dd confirms posix is giving priority to reserve-size +-TEST dd if=/dev/zero of=$M0/b bs=40M count=1 ++# Wait 5s to update disk_space_full flag because thread check disk space ++# after every 5s + + sleep 5 ++# setup_lvm create lvm partition of 150M and 40M are reserve so after ++# consuming more than 110M next dd should fail + TEST ! dd if=/dev/zero of=$M0/c bs=5M count=1 + + rm -rf $M0/* +-#Size will reserve from the previously set reserve option = 20% +-TEST $CLI volume set $V0 storage.reserve-size 0 + +-#Overwrite reserve option +-TEST $CLI volume set $V0 storage.reserve-size 40MB ++#Setting the size in percent and repeating the above steps ++TEST $CLI volume set $V0 storage.reserve 40 + +-#wait 5s to reset disk_space_full flag + sleep 5 + +-TEST dd if=/dev/zero of=$M0/a bs=100M count=1 ++TEST dd if=/dev/zero of=$M0/a bs=80M count=1 + TEST dd if=/dev/zero of=$M0/b bs=10M count=1 + +-# Wait 5s to update disk_space_full flag because thread check disk space +-# after every 5s +- + sleep 5 +-# setup_lvm create lvm partition of 150M and 40M are reserve so after +-# consuming more than 110M next dd should fail + TEST ! dd if=/dev/zero of=$M0/c bs=5M count=1 + + TEST $CLI volume stop $V0 +diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +index 3a7ab83..7a83124 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c ++++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +@@ -1231,30 +1231,6 @@ out: + + return ret; + } +-static int +-validate_size(glusterd_volinfo_t *volinfo, dict_t *dict, char *key, char *value, +- char **op_errstr) +-{ +- xlator_t *this = NULL; +- uint64_t size = 0; +- int ret = -1; +- +- this = THIS; +- GF_VALIDATE_OR_GOTO("glusterd", this, out); +- ret = gf_string2bytesize_uint64(value, &size); +- if (ret < 0) { +- gf_asprintf(op_errstr, +- "%s is not a valid size. %s " +- "expects a valid value in bytes", +- value, key); +- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY, "%s", +- *op_errstr); +- } +-out: +- gf_msg_debug("glusterd", 0, "Returning %d", ret); +- +- return ret; +-} + + /* dispatch table for VOLUME SET + * ----------------------------- +@@ -2854,15 +2830,6 @@ struct volopt_map_entry glusterd_volopt_map[] = { + .op_version = GD_OP_VERSION_3_13_0, + }, + { +- .key = "storage.reserve-size", +- .voltype = "storage/posix", +- .value = "0", +- .validate_fn = validate_size, +- .description = "If set, priority will be given to " +- "storage.reserve-size over storage.reserve", +- .op_version = GD_OP_VERSION_7_0, +- }, +- { + .option = "health-check-timeout", + .key = "storage.health-check-timeout", + .type = NO_DOC, +diff --git a/xlators/storage/posix/src/posix-common.c b/xlators/storage/posix/src/posix-common.c +index 0f70af5..bfe2cb0 100644 +--- a/xlators/storage/posix/src/posix-common.c ++++ b/xlators/storage/posix/src/posix-common.c +@@ -345,12 +345,14 @@ posix_reconfigure(xlator_t *this, dict_t *options) + " fallback to :"); + } + +- GF_OPTION_RECONF("reserve-size", priv->disk_reserve_size, options, size, ++ GF_OPTION_RECONF("reserve", priv->disk_reserve, options, percent_or_size, + out); ++ /* option can be any one of percent or bytes */ ++ priv->disk_unit = 0; ++ if (priv->disk_reserve < 100.0) ++ priv->disk_unit = 'p'; + +- GF_OPTION_RECONF("reserve", priv->disk_reserve_percent, options, uint32, +- out); +- if (priv->disk_reserve_size || priv->disk_reserve_percent) { ++ if (priv->disk_reserve) { + ret = posix_spawn_disk_space_check_thread(this); + if (ret) { + gf_msg(this->name, GF_LOG_INFO, 0, P_MSG_DISK_SPACE_CHECK_FAILED, +@@ -975,11 +977,15 @@ posix_init(xlator_t *this) + + _private->disk_space_check_active = _gf_false; + _private->disk_space_full = 0; +- GF_OPTION_INIT("reserve-size", _private->disk_reserve_size, size, out); + +- GF_OPTION_INIT("reserve", _private->disk_reserve_percent, uint32, out); ++ GF_OPTION_INIT("reserve", _private->disk_reserve, percent_or_size, out); ++ ++ /* option can be any one of percent or bytes */ ++ _private->disk_unit = 0; ++ if (_private->disk_reserve < 100.0) ++ _private->disk_unit = 'p'; + +- if (_private->disk_reserve_size || _private->disk_reserve_percent) { ++ if (_private->disk_reserve) { + ret = posix_spawn_disk_space_check_thread(this); + if (ret) { + gf_msg(this->name, GF_LOG_INFO, 0, P_MSG_DISK_SPACE_CHECK_FAILED, +@@ -1221,23 +1227,14 @@ struct volume_options posix_options[] = { + .op_version = {GD_OP_VERSION_4_0_0}, + .flags = OPT_FLAG_SETTABLE | OPT_FLAG_DOC}, + {.key = {"reserve"}, +- .type = GF_OPTION_TYPE_INT, ++ .type = GF_OPTION_TYPE_PERCENT_OR_SIZET, + .min = 0, + .default_value = "1", + .validate = GF_OPT_VALIDATE_MIN, +- .description = "Percentage of disk space to be reserved." ++ .description = "Percentage/Size of disk space to be reserved." + " Set to 0 to disable", + .op_version = {GD_OP_VERSION_3_13_0}, + .flags = OPT_FLAG_SETTABLE | OPT_FLAG_DOC}, +- {.key = {"reserve-size"}, +- .type = GF_OPTION_TYPE_SIZET, +- .min = 0, +- .default_value = "0", +- .validate = GF_OPT_VALIDATE_MIN, +- .description = "size in megabytes to be reserved for disk space." +- " Set to 0 to disable", +- .op_version = {GD_OP_VERSION_7_0}, +- .flags = OPT_FLAG_SETTABLE | OPT_FLAG_DOC}, + {.key = {"batch-fsync-mode"}, + .type = GF_OPTION_TYPE_STR, + .default_value = "reverse-fsync", +diff --git a/xlators/storage/posix/src/posix-helpers.c b/xlators/storage/posix/src/posix-helpers.c +index 849db3d..07169b5 100644 +--- a/xlators/storage/posix/src/posix-helpers.c ++++ b/xlators/storage/posix/src/posix-helpers.c +@@ -2246,11 +2246,11 @@ posix_disk_space_check(xlator_t *this) + struct posix_private *priv = NULL; + char *subvol_path = NULL; + int op_ret = 0; +- uint64_t size = 0; +- int percent = 0; ++ double size = 0; ++ double percent = 0; + struct statvfs buf = {0}; +- uint64_t totsz = 0; +- uint64_t freesz = 0; ++ double totsz = 0; ++ double freesz = 0; + + GF_VALIDATE_OR_GOTO(this->name, this, out); + priv = this->private; +@@ -2258,14 +2258,6 @@ posix_disk_space_check(xlator_t *this) + + subvol_path = priv->base_path; + +- if (priv->disk_reserve_size) { +- size = priv->disk_reserve_size; +- } else { +- percent = priv->disk_reserve_percent; +- totsz = (buf.f_blocks * buf.f_bsize); +- size = ((totsz * percent) / 100); +- } +- + op_ret = sys_statvfs(subvol_path, &buf); + + if (op_ret == -1) { +@@ -2273,8 +2265,16 @@ posix_disk_space_check(xlator_t *this) + "statvfs failed on %s", subvol_path); + goto out; + } +- freesz = (buf.f_bfree * buf.f_bsize); + ++ if (priv->disk_unit == 'p') { ++ percent = priv->disk_reserve; ++ totsz = (buf.f_blocks * buf.f_bsize); ++ size = ((totsz * percent) / 100); ++ } else { ++ size = priv->disk_reserve; ++ } ++ ++ freesz = (buf.f_bfree * buf.f_bsize); + if (freesz <= size) { + priv->disk_space_full = 1; + } else { +diff --git a/xlators/storage/posix/src/posix-inode-fd-ops.c b/xlators/storage/posix/src/posix-inode-fd-ops.c +index b92c411..fc847d6 100644 +--- a/xlators/storage/posix/src/posix-inode-fd-ops.c ++++ b/xlators/storage/posix/src/posix-inode-fd-ops.c +@@ -720,7 +720,7 @@ posix_do_fallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t flags, + thread after every 5 sec sleep to working correctly storage.reserve + option behaviour + */ +- if (priv->disk_reserve_size || priv->disk_reserve_percent) ++ if (priv->disk_reserve) + posix_disk_space_check(this); + + DISK_SPACE_CHECK_AND_GOTO(frame, priv, xdata, ret, ret, out); +@@ -2306,7 +2306,7 @@ posix_statfs(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata) + }; + struct posix_private *priv = NULL; + int shared_by = 1; +- int percent = 0; ++ double percent = 0; + uint64_t reserved_blocks = 0; + + VALIDATE_OR_GOTO(frame, out); +@@ -2332,11 +2332,14 @@ posix_statfs(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata) + goto out; + } + +- if (priv->disk_reserve_size) { +- reserved_blocks = priv->disk_reserve_size / buf.f_bsize; ++ if (priv->disk_unit == 'p') { ++ percent = priv->disk_reserve; ++ reserved_blocks = (((buf.f_blocks * percent) / 100) + 0.5); + } else { +- percent = priv->disk_reserve_percent; +- reserved_blocks = (buf.f_blocks * percent) / 100; ++ if (buf.f_bsize) { ++ reserved_blocks = (priv->disk_reserve + buf.f_bsize - 1) / ++ buf.f_bsize; ++ } + } + + if (buf.f_bfree > reserved_blocks) { +diff --git a/xlators/storage/posix/src/posix.h b/xlators/storage/posix/src/posix.h +index 4364b96..b0935a7 100644 +--- a/xlators/storage/posix/src/posix.h ++++ b/xlators/storage/posix/src/posix.h +@@ -225,8 +225,8 @@ struct posix_private { + pthread_t health_check; + gf_boolean_t health_check_active; + +- uint32_t disk_reserve_percent; +- uint64_t disk_reserve_size; ++ double disk_reserve; ++ char disk_unit; + uint32_t disk_space_full; + pthread_t disk_space_check; + gf_boolean_t disk_space_check_active; +-- +1.8.3.1 + diff --git a/SOURCES/0216-Test-case-fixe-for-downstream-3.5.0.patch b/SOURCES/0216-Test-case-fixe-for-downstream-3.5.0.patch new file mode 100644 index 0000000..bc4ce60 --- /dev/null +++ b/SOURCES/0216-Test-case-fixe-for-downstream-3.5.0.patch @@ -0,0 +1,29 @@ +From b2204969bb0dba5de32685e1021fa44d0c406813 Mon Sep 17 00:00:00 2001 +From: Sunil Kumar Acharya +Date: Tue, 25 Jun 2019 12:17:10 +0530 +Subject: [PATCH 216/221] Test case fixe for downstream 3.5.0 + +Mark bug-1319374-THIS-crash.t as bad. + +BUG: 1704562 +Change-Id: I6afeb9a74ab88af7b741454367005250cd4c0e0f +Signed-off-by: Sunil Kumar Acharya +Reviewed-on: https://code.engineering.redhat.com/gerrit/174652 +Tested-by: RHGS Build Bot +--- + tests/bugs/gfapi/bug-1319374-THIS-crash.t | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/tests/bugs/gfapi/bug-1319374-THIS-crash.t b/tests/bugs/gfapi/bug-1319374-THIS-crash.t +index 8d3db42..429d71e 100755 +--- a/tests/bugs/gfapi/bug-1319374-THIS-crash.t ++++ b/tests/bugs/gfapi/bug-1319374-THIS-crash.t +@@ -25,3 +25,5 @@ TEST $(dirname $0)/bug-1319374 $H0 $V0 $logdir/bug-1319374.log + cleanup_tester $(dirname $0)/bug-1319374 + + cleanup; ++#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=1723673 ++#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=1723673 +-- +1.8.3.1 + diff --git a/SOURCES/0217-uss-Fix-tar-issue-with-ctime-and-uss-enabled.patch b/SOURCES/0217-uss-Fix-tar-issue-with-ctime-and-uss-enabled.patch new file mode 100644 index 0000000..055b9f2 --- /dev/null +++ b/SOURCES/0217-uss-Fix-tar-issue-with-ctime-and-uss-enabled.patch @@ -0,0 +1,75 @@ +From 71ff9b7c6356e521d98ee025554b63dd23db9836 Mon Sep 17 00:00:00 2001 +From: Kotresh HR +Date: Thu, 13 Jun 2019 22:43:47 +0530 +Subject: [PATCH 217/221] uss: Fix tar issue with ctime and uss enabled + +Problem: +If ctime and uss enabled, tar still complains with 'file +changed as we read it' + +Cause: +To clear nfs cache (gluster-nfs), the ctime was incremented +in snap-view client on stat cbk. + +Fix: +The ctime should not be incremented manually. Since gluster-nfs +is planning to be deprecated, this code is being removed to +fix the issue. + +Backport of: + > Patch: https://review.gluster.org/22861 + > Change-Id: Iae7f100c20fce880a50b008ba716077350281404 + > fixes: bz#1720290 + > Signed-off-by: Kotresh HR + +Change-Id: Iae7f100c20fce880a50b008ba716077350281404 +BUG: 1709301 +Signed-off-by: Kotresh HR +Reviewed-on: https://code.engineering.redhat.com/gerrit/173922 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + .../features/snapview-client/src/snapview-client.c | 22 +++++++++++++--------- + 1 file changed, 13 insertions(+), 9 deletions(-) + +diff --git a/xlators/features/snapview-client/src/snapview-client.c b/xlators/features/snapview-client/src/snapview-client.c +index 5d7986c..9c789ae 100644 +--- a/xlators/features/snapview-client/src/snapview-client.c ++++ b/xlators/features/snapview-client/src/snapview-client.c +@@ -577,20 +577,24 @@ gf_svc_stat_cbk(call_frame_t *frame, void *cookie, xlator_t *this, + int32_t op_ret, int32_t op_errno, struct iatt *buf, + dict_t *xdata) + { +- /* Consider a testcase: ++ /* TODO: FIX ME ++ * Consider a testcase: + * #mount -t nfs host1:/vol1 /mnt + * #ls /mnt + * #ls /mnt/.snaps (As expected this fails) + * #gluster volume set vol1 features.uss enable +- * Now `ls /mnt/.snaps` should work, +- * but fails with No such file or directory. +- * This is because NFS client caches the list of files in +- * a directory. This cache is updated if there are any changes +- * in the directory attributes. To solve this problem change +- * a attribute 'ctime' when USS is enabled ++ * Now `ls /mnt/.snaps` should work, but fails with No such file or ++ * directory. This is because NFS client (gNFS) caches the list of files ++ * in a directory. This cache is updated if there are any changes in the ++ * directory attributes. So, one way to solve this problem is to change ++ * 'ctime' attribute when USS is enabled as below. ++ * ++ * if (op_ret == 0 && IA_ISDIR(buf->ia_type)) ++ * buf->ia_ctime_nsec++; ++ * ++ * But this is not the ideal solution as applications see the unexpected ++ * ctime change causing failures. + */ +- if (op_ret == 0 && IA_ISDIR(buf->ia_type)) +- buf->ia_ctime_nsec++; + + SVC_STACK_UNWIND(stat, frame, op_ret, op_errno, buf, xdata); + return 0; +-- +1.8.3.1 + diff --git a/SOURCES/0218-graph-shd-Use-glusterfs_graph_deactivate-to-free-the.patch b/SOURCES/0218-graph-shd-Use-glusterfs_graph_deactivate-to-free-the.patch new file mode 100644 index 0000000..b7db655 --- /dev/null +++ b/SOURCES/0218-graph-shd-Use-glusterfs_graph_deactivate-to-free-the.patch @@ -0,0 +1,88 @@ +From 8cc6d8af00303c445b94715c92fe9e3e01edb867 Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Mon, 24 Jun 2019 15:49:04 +0530 +Subject: [PATCH 218/221] graph/shd: Use glusterfs_graph_deactivate to free the + xl rec + +We were using glusterfs_graph_fini to free the xl rec from +glusterfs_process_volfp as well as glusterfs_graph_cleanup. + +Instead we can use glusterfs_graph_deactivate, which does +fini as well as other common rec free. + +Backport of:https://review.gluster.org/#/c/glusterfs/+/22904/ + +>Change-Id: Ie4a5f2771e5254aa5ed9f00c3672a6d2cc8e4bc1 +>Updates: bz#1716695 +>Signed-off-by: Mohammed Rafi KC + +Change-Id: I09d7124366bc690ceca9e8d0adee8a0dc8081091 +BUG: 1711939 +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/174814 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + libglusterfs/src/graph.c | 2 +- + libglusterfs/src/xlator.c | 9 ++++++++- + xlators/features/shard/src/shard.c | 3 +++ + 3 files changed, 12 insertions(+), 2 deletions(-) + +diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c +index 27d9335..5b95fd6 100644 +--- a/libglusterfs/src/graph.c ++++ b/libglusterfs/src/graph.c +@@ -1394,7 +1394,7 @@ glusterfs_graph_cleanup(void *arg) + + pthread_mutex_lock(&ctx->cleanup_lock); + { +- glusterfs_graph_fini(graph); ++ glusterfs_graph_deactivate(graph); + glusterfs_graph_destroy(graph); + } + pthread_mutex_unlock(&ctx->cleanup_lock); +diff --git a/libglusterfs/src/xlator.c b/libglusterfs/src/xlator.c +index 71e1ed4..d9d3441 100644 +--- a/libglusterfs/src/xlator.c ++++ b/libglusterfs/src/xlator.c +@@ -659,6 +659,7 @@ xlator_fini_rec(xlator_t *xl) + trav = trav->next; + } + ++ xl->cleanup_starting = 1; + if (xl->init_succeeded) { + if (xl->fini) { + old_THIS = THIS; +@@ -666,8 +667,14 @@ xlator_fini_rec(xlator_t *xl) + + xl->fini(xl); + +- if (xl->local_pool) ++ if (xl->local_pool) { + mem_pool_destroy(xl->local_pool); ++ xl->local_pool = NULL; ++ } ++ if (xl->itable) { ++ inode_table_destroy(xl->itable); ++ xl->itable = NULL; ++ } + + THIS = old_THIS; + } else { +diff --git a/xlators/features/shard/src/shard.c b/xlators/features/shard/src/shard.c +index b248767..31c7eec 100644 +--- a/xlators/features/shard/src/shard.c ++++ b/xlators/features/shard/src/shard.c +@@ -6785,6 +6785,9 @@ fini(xlator_t *this) + + GF_VALIDATE_OR_GOTO("shard", this, out); + ++ /*Itable was not created by shard, hence setting to NULL.*/ ++ this->itable = NULL; ++ + mem_pool_destroy(this->local_pool); + this->local_pool = NULL; + +-- +1.8.3.1 + diff --git a/SOURCES/0219-posix-add-posix_set_ctime-in-posix_ftruncate.patch b/SOURCES/0219-posix-add-posix_set_ctime-in-posix_ftruncate.patch new file mode 100644 index 0000000..eefb890 --- /dev/null +++ b/SOURCES/0219-posix-add-posix_set_ctime-in-posix_ftruncate.patch @@ -0,0 +1,35 @@ +From d9781ed4964d9e752fc880c8cd8afcbd2c561ebe Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Wed, 26 Jun 2019 15:58:33 +0530 +Subject: [PATCH 219/221] posix : add posix_set_ctime() in posix_ftruncate() + +Upstream references : +> release 6: https://review.gluster.org/#/c/glusterfs/+/22965/ +> mainline: https://review.gluster.org/#/c/glusterfs/+/22948/ + +Change-Id: I0cb5320fea71306e0283509ae47024f23874b53b +fixes: bz#1720163 +Signed-off-by: Jiffin Tony Thottan +Reviewed-on: https://code.engineering.redhat.com/gerrit/174837 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/storage/posix/src/posix-inode-fd-ops.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/xlators/storage/posix/src/posix-inode-fd-ops.c b/xlators/storage/posix/src/posix-inode-fd-ops.c +index fc847d6..c949f68 100644 +--- a/xlators/storage/posix/src/posix-inode-fd-ops.c ++++ b/xlators/storage/posix/src/posix-inode-fd-ops.c +@@ -5059,6 +5059,8 @@ posix_ftruncate(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset, + goto out; + } + ++ posix_set_ctime(frame, this, NULL, pfd->fd, fd->inode, &postop); ++ + op_ret = 0; + + out: +-- +1.8.3.1 + diff --git a/SOURCES/0220-graph-shd-Use-top-down-approach-while-cleaning-xlato.patch b/SOURCES/0220-graph-shd-Use-top-down-approach-while-cleaning-xlato.patch new file mode 100644 index 0000000..07f702b --- /dev/null +++ b/SOURCES/0220-graph-shd-Use-top-down-approach-while-cleaning-xlato.patch @@ -0,0 +1,190 @@ +From b963fa8bb71963127147d33bf609f439dd5bd107 Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 27 Jun 2019 19:17:29 +0530 +Subject: [PATCH 220/221] graph/shd: Use top down approach while cleaning + xlator + +We were cleaning xlator from botton to top, which might +lead to problems when upper xlators trying to access +the xlator object loaded below. + +One such scenario is when fd_unref happens as part of the +fini call which might lead to calling the releasedir to +lower xlator. This will lead to invalid mem access + +Backport of:https://review.gluster.org/#/c/glusterfs/+/22968/ + +>Change-Id: I8a6cb619256fab0b0c01a2d564fc88287c4415a0 +>Updates: bz#1716695 +>Signed-off-by: Mohammed Rafi KC + +Change-Id: I22bbf99e9451183b3e0fe61b57b2440ab4163fe5 +BUG: 1711939 +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/174882 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + libglusterfs/src/graph.c | 10 +++++++++- + xlators/features/bit-rot/src/stub/bit-rot-stub.c | 1 + + xlators/features/changelog/src/changelog.c | 1 + + xlators/features/cloudsync/src/cloudsync.c | 4 +++- + xlators/features/index/src/index.c | 1 + + xlators/features/quiesce/src/quiesce.c | 1 + + xlators/features/read-only/src/worm.c | 1 + + xlators/features/sdfs/src/sdfs.c | 1 + + xlators/features/selinux/src/selinux.c | 2 ++ + xlators/features/trash/src/trash.c | 1 + + 10 files changed, 21 insertions(+), 2 deletions(-) + +diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c +index 5b95fd6..172dc61 100644 +--- a/libglusterfs/src/graph.c ++++ b/libglusterfs/src/graph.c +@@ -1193,6 +1193,14 @@ glusterfs_graph_fini(glusterfs_graph_t *graph) + if (trav->init_succeeded) { + trav->cleanup_starting = 1; + trav->fini(trav); ++ if (trav->local_pool) { ++ mem_pool_destroy(trav->local_pool); ++ trav->local_pool = NULL; ++ } ++ if (trav->itable) { ++ inode_table_destroy(trav->itable); ++ trav->itable = NULL; ++ } + trav->init_succeeded = 0; + } + trav = trav->next; +@@ -1394,7 +1402,7 @@ glusterfs_graph_cleanup(void *arg) + + pthread_mutex_lock(&ctx->cleanup_lock); + { +- glusterfs_graph_deactivate(graph); ++ glusterfs_graph_fini(graph); + glusterfs_graph_destroy(graph); + } + pthread_mutex_unlock(&ctx->cleanup_lock); +diff --git a/xlators/features/bit-rot/src/stub/bit-rot-stub.c b/xlators/features/bit-rot/src/stub/bit-rot-stub.c +index 3f48a4b..03446be 100644 +--- a/xlators/features/bit-rot/src/stub/bit-rot-stub.c ++++ b/xlators/features/bit-rot/src/stub/bit-rot-stub.c +@@ -185,6 +185,7 @@ cleanup_lock: + pthread_mutex_destroy(&priv->lock); + free_mempool: + mem_pool_destroy(priv->local_pool); ++ priv->local_pool = NULL; + free_priv: + GF_FREE(priv); + this->private = NULL; +diff --git a/xlators/features/changelog/src/changelog.c b/xlators/features/changelog/src/changelog.c +index d9025f3..2862d1e 100644 +--- a/xlators/features/changelog/src/changelog.c ++++ b/xlators/features/changelog/src/changelog.c +@@ -2790,6 +2790,7 @@ cleanup_options: + changelog_freeup_options(this, priv); + cleanup_mempool: + mem_pool_destroy(this->local_pool); ++ this->local_pool = NULL; + cleanup_priv: + GF_FREE(priv); + error_return: +diff --git a/xlators/features/cloudsync/src/cloudsync.c b/xlators/features/cloudsync/src/cloudsync.c +index 26e512c..0ad987e 100644 +--- a/xlators/features/cloudsync/src/cloudsync.c ++++ b/xlators/features/cloudsync/src/cloudsync.c +@@ -200,8 +200,10 @@ cs_init(xlator_t *this) + + out: + if (ret == -1) { +- if (this->local_pool) ++ if (this->local_pool) { + mem_pool_destroy(this->local_pool); ++ this->local_pool = NULL; ++ } + + cs_cleanup_private(priv); + +diff --git a/xlators/features/index/src/index.c b/xlators/features/index/src/index.c +index 2f2a6d0..4ece7ff 100644 +--- a/xlators/features/index/src/index.c ++++ b/xlators/features/index/src/index.c +@@ -2478,6 +2478,7 @@ out: + GF_FREE(priv); + this->private = NULL; + mem_pool_destroy(this->local_pool); ++ this->local_pool = NULL; + } + + if (attr_inited) +diff --git a/xlators/features/quiesce/src/quiesce.c b/xlators/features/quiesce/src/quiesce.c +index bfd1116..06f58c9 100644 +--- a/xlators/features/quiesce/src/quiesce.c ++++ b/xlators/features/quiesce/src/quiesce.c +@@ -2536,6 +2536,7 @@ fini(xlator_t *this) + this->private = NULL; + + mem_pool_destroy(priv->local_pool); ++ priv->local_pool = NULL; + LOCK_DESTROY(&priv->lock); + GF_FREE(priv); + out: +diff --git a/xlators/features/read-only/src/worm.c b/xlators/features/read-only/src/worm.c +index 24196f8..7d13180 100644 +--- a/xlators/features/read-only/src/worm.c ++++ b/xlators/features/read-only/src/worm.c +@@ -569,6 +569,7 @@ fini(xlator_t *this) + mem_put(priv); + this->private = NULL; + mem_pool_destroy(this->local_pool); ++ this->local_pool = NULL; + out: + return; + } +diff --git a/xlators/features/sdfs/src/sdfs.c b/xlators/features/sdfs/src/sdfs.c +index f0247fd..164c632 100644 +--- a/xlators/features/sdfs/src/sdfs.c ++++ b/xlators/features/sdfs/src/sdfs.c +@@ -1429,6 +1429,7 @@ void + fini(xlator_t *this) + { + mem_pool_destroy(this->local_pool); ++ this->local_pool = NULL; + return; + } + +diff --git a/xlators/features/selinux/src/selinux.c b/xlators/features/selinux/src/selinux.c +index 58b4c5d..ce5fc90 100644 +--- a/xlators/features/selinux/src/selinux.c ++++ b/xlators/features/selinux/src/selinux.c +@@ -256,6 +256,7 @@ out: + GF_FREE(priv); + } + mem_pool_destroy(this->local_pool); ++ this->local_pool = NULL; + } + return ret; + } +@@ -284,6 +285,7 @@ fini(xlator_t *this) + GF_FREE(priv); + + mem_pool_destroy(this->local_pool); ++ this->local_pool = NULL; + + return; + } +diff --git a/xlators/features/trash/src/trash.c b/xlators/features/trash/src/trash.c +index d668436..eb5007b 100644 +--- a/xlators/features/trash/src/trash.c ++++ b/xlators/features/trash/src/trash.c +@@ -2523,6 +2523,7 @@ out: + GF_FREE(priv); + } + mem_pool_destroy(this->local_pool); ++ this->local_pool = NULL; + } + return ret; + } +-- +1.8.3.1 + diff --git a/SOURCES/0221-protocol-client-propagte-GF_EVENT_CHILD_PING-only-fo.patch b/SOURCES/0221-protocol-client-propagte-GF_EVENT_CHILD_PING-only-fo.patch new file mode 100644 index 0000000..74e3796 --- /dev/null +++ b/SOURCES/0221-protocol-client-propagte-GF_EVENT_CHILD_PING-only-fo.patch @@ -0,0 +1,75 @@ +From ac216eae4775f7d95877b247937e2a4a4828b1b2 Mon Sep 17 00:00:00 2001 +From: Raghavendra G +Date: Tue, 4 Jun 2019 19:22:45 +0530 +Subject: [PATCH 221/221] protocol/client: propagte GF_EVENT_CHILD_PING only + for connections to brick + +Two reasons: +* ping responses from glusterd may not be relevant for Halo + replication. Instead, it might be interested in only knowing whether + the brick itself is responsive. +* When a brick is killed, propagating GF_EVENT_CHILD_PING of ping + response from glusterd results in GF_EVENT_DISCONNECT spuriously + propagated to parent xlators. These DISCONNECT events are from the + connections client establishes with glusterd as part of its + reconnect logic. Without GF_EVENT_CHILD_PING, the last event + propagated to parent xlators would be the first DISCONNECT event + from brick and hence subsequent DISCONNECTS to glusterd are not + propagated as protocol/client prevents same event being propagated + to parent xlators consecutively. propagating GF_EVENT_CHILD_PING for + ping responses from glusterd would change the last_sent_event to + GF_EVENT_CHILD_PING and hence protocol/client cannot prevent + subsequent DISCONNECT events + +>Signed-off-by: Raghavendra G +>Fixes: bz#1716979 +>Change-Id: I50276680c52f05ca9e12149a3094923622d6eaef + +Upstream Patch: https://review.gluster.org/#/c/glusterfs/+/22821/ + +BUG: 1703423 +Change-Id: I50276680c52f05ca9e12149a3094923622d6eaef +Signed-off-by: Sunil Kumar Acharya +Reviewed-on: https://code.engineering.redhat.com/gerrit/174883 +Tested-by: RHGS Build Bot +--- + xlators/protocol/client/src/client.c | 16 ++++++++++++---- + 1 file changed, 12 insertions(+), 4 deletions(-) + +diff --git a/xlators/protocol/client/src/client.c b/xlators/protocol/client/src/client.c +index a372807..95e4be5 100644 +--- a/xlators/protocol/client/src/client.c ++++ b/xlators/protocol/client/src/client.c +@@ -2276,6 +2276,12 @@ client_mark_fd_bad(xlator_t *this) + return 0; + } + ++static int ++is_connection_to_brick(struct rpc_clnt *rpc) ++{ ++ return (rpc->conn.config.remote_port != 0); ++} ++ + int + client_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event, + void *data) +@@ -2297,10 +2303,12 @@ client_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event, + + switch (event) { + case RPC_CLNT_PING: { +- ret = default_notify(this, GF_EVENT_CHILD_PING, data); +- if (ret) +- gf_log(this->name, GF_LOG_INFO, "CHILD_PING notify failed"); +- conf->last_sent_event = GF_EVENT_CHILD_PING; ++ if (is_connection_to_brick(rpc)) { ++ ret = default_notify(this, GF_EVENT_CHILD_PING, data); ++ if (ret) ++ gf_log(this->name, GF_LOG_INFO, "CHILD_PING notify failed"); ++ conf->last_sent_event = GF_EVENT_CHILD_PING; ++ } + break; + } + case RPC_CLNT_CONNECT: { +-- +1.8.3.1 + diff --git a/SOURCES/0222-cluster-dht-Fixed-a-memleak-in-dht_rename_cbk.patch b/SOURCES/0222-cluster-dht-Fixed-a-memleak-in-dht_rename_cbk.patch new file mode 100644 index 0000000..3a492cb --- /dev/null +++ b/SOURCES/0222-cluster-dht-Fixed-a-memleak-in-dht_rename_cbk.patch @@ -0,0 +1,109 @@ +From 5f304e003cc24ff7877ab51bdfded0dbf8ec581b Mon Sep 17 00:00:00 2001 +From: N Balachandran +Date: Fri, 21 Jun 2019 09:04:19 +0530 +Subject: [PATCH 222/255] cluster/dht: Fixed a memleak in dht_rename_cbk + +Fixed a memleak in dht_rename_cbk when creating +a linkto file. + +upstream: https://review.gluster.org/#/c/glusterfs/+/22912/ + +>Change-Id: I705adef3cb79e33806520fc2b15558e90e2c211c +>fixes: bz#1722698 +>Signed-off-by: N Balachandran + +BUG:1722512 +Change-Id: I8450cac82a0e1611e698ffac476ea5516e614236 +Signed-off-by: N Balachandran +Reviewed-on: https://code.engineering.redhat.com/gerrit/175181 +Tested-by: RHGS Build Bot +Reviewed-by: Susant Palai +--- + xlators/cluster/dht/src/dht-rename.c | 44 +++++++++++++++++++++++++++--------- + 1 file changed, 33 insertions(+), 11 deletions(-) + +diff --git a/xlators/cluster/dht/src/dht-rename.c b/xlators/cluster/dht/src/dht-rename.c +index 893b451..5ba2373 100644 +--- a/xlators/cluster/dht/src/dht-rename.c ++++ b/xlators/cluster/dht/src/dht-rename.c +@@ -1009,9 +1009,11 @@ dht_rename_links_create_cbk(call_frame_t *frame, void *cookie, xlator_t *this, + { + xlator_t *prev = NULL; + dht_local_t *local = NULL; ++ call_frame_t *main_frame = NULL; + + prev = cookie; + local = frame->local; ++ main_frame = local->main_frame; + + /* TODO: Handle this case in lookup-optimize */ + if (op_ret == -1) { +@@ -1024,7 +1026,8 @@ dht_rename_links_create_cbk(call_frame_t *frame, void *cookie, xlator_t *this, + dht_linkfile_attr_heal(frame, this); + } + +- dht_rename_unlink(frame, this); ++ dht_rename_unlink(main_frame, this); ++ DHT_STACK_DESTROY(frame); + return 0; + } + +@@ -1040,7 +1043,8 @@ dht_rename_cbk(call_frame_t *frame, void *cookie, xlator_t *this, + xlator_t *src_cached = NULL; + xlator_t *dst_hashed = NULL; + xlator_t *dst_cached = NULL; +- loc_t link_loc = {0}; ++ call_frame_t *link_frame = NULL; ++ dht_local_t *link_local = NULL; + + local = frame->local; + prev = cookie; +@@ -1110,18 +1114,36 @@ dht_rename_cbk(call_frame_t *frame, void *cookie, xlator_t *this, + + /* Create the linkto file for the dst file */ + if ((src_cached == dst_cached) && (dst_hashed != dst_cached)) { +- loc_copy(&link_loc, &local->loc2); +- if (link_loc.inode) +- inode_unref(link_loc.inode); +- link_loc.inode = inode_ref(local->loc.inode); +- gf_uuid_copy(local->gfid, local->loc.inode->gfid); +- gf_uuid_copy(link_loc.gfid, local->loc.inode->gfid); +- +- dht_linkfile_create(frame, dht_rename_links_create_cbk, this, +- src_cached, dst_hashed, &link_loc); ++ link_frame = copy_frame(frame); ++ if (!link_frame) { ++ goto unlink; ++ } ++ ++ /* fop value sent as maxvalue because it is not used ++ * anywhere in this case */ ++ link_local = dht_local_init(link_frame, &local->loc2, NULL, ++ GF_FOP_MAXVALUE); ++ if (!link_local) { ++ goto unlink; ++ } ++ ++ if (link_local->loc.inode) ++ inode_unref(link_local->loc.inode); ++ link_local->loc.inode = inode_ref(local->loc.inode); ++ link_local->main_frame = frame; ++ link_local->stbuf = local->stbuf; ++ gf_uuid_copy(link_local->gfid, local->loc.inode->gfid); ++ ++ dht_linkfile_create(link_frame, dht_rename_links_create_cbk, this, ++ src_cached, dst_hashed, &link_local->loc); + return 0; + } + ++unlink: ++ ++ if (link_frame) { ++ DHT_STACK_DESTROY(link_frame); ++ } + dht_rename_unlink(frame, this); + return 0; + +-- +1.8.3.1 + diff --git a/SOURCES/0223-change-get_real_filename-implementation-to-use-ENOAT.patch b/SOURCES/0223-change-get_real_filename-implementation-to-use-ENOAT.patch new file mode 100644 index 0000000..a533388 --- /dev/null +++ b/SOURCES/0223-change-get_real_filename-implementation-to-use-ENOAT.patch @@ -0,0 +1,123 @@ +From 36b0bd86321436a951f225fcf2e921390ed8dc33 Mon Sep 17 00:00:00 2001 +From: Michael Adam +Date: Thu, 20 Jun 2019 13:09:37 +0200 +Subject: [PATCH 223/255] change get_real_filename implementation to use + ENOATTR instead of ENOENT + +get_real_filename is implemented as a virtual extended attribute to help +Samba implement the case-insensitive but case preserving SMB protocol +more efficiently. It is implemented as a getxattr call on the parent directory +with the virtual key of "get_real_filename:" by looking for a +spelling with different case for the provided file/dir name () +and returning this correct spelling as a result if the entry is found. +Originally (05aaec645a6262d431486eb5ac7cd702646cfcfb), the +implementation used the ENOENT errno to return the authoritative answer +that does not exist in any case folding. + +Now this implementation is actually a violation or misuse of the defined +API for the getxattr call which returns ENOENT for the case that the dir +that the call is made against does not exist and ENOATTR (or the synonym +ENODATA) for the case that the xattr does not exist. + +This was not a problem until the gluster fuse-bridge was changed +to do map ENOENT to ESTALE in 59629f1da9dca670d5dcc6425f7f89b3e96b46bf, +after which we the getxattr call for get_real_filename returned an +ESTALE instead of ENOENT breaking the expectation in Samba. + +It is an independent problem that ESTALE should not leak out to user +space but is intended to trigger retries between fuse and gluster. +But nevertheless, the semantics seem to be incorrect here and should +be changed. + +This patch changes the implementation of the get_real_filename virtual +xattr to correctly return ENOATTR instead of ENOENT if the file/directory +being looked up is not found. + +The Samba glusterfs_fuse vfs module which takes advantage of the +get_real_filename over a fuse mount will receive a corresponding change +to map ENOATTR to ENOENT. Without this change, it will still work +correctly, but the performance optimization for nonexisting files is +lost. On the other hand side, this change removes the distinction +between the old not-implemented case and the implemented case. +So Samba changed to treat ENOATTR like ENOENT will not work correctly +any more against old servers that don't implement get_real_filename. +I.e. existing files will be reported as non-existing + +Backport of https://review.gluster.org/c/glusterfs/+/22925 + +Change-Id: I971b427ab8410636d5d201157d9af70e0d075b67 +fixes: bz#1724089 +Signed-off-by: Michael Adam +Reviewed-on: https://code.engineering.redhat.com/gerrit/175012 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + xlators/cluster/dht/src/dht-common.c | 8 ++++---- + xlators/storage/posix/src/posix-inode-fd-ops.c | 4 ++-- + 2 files changed, 6 insertions(+), 6 deletions(-) + +diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c +index 9a6ea5b..219b072 100644 +--- a/xlators/cluster/dht/src/dht-common.c ++++ b/xlators/cluster/dht/src/dht-common.c +@@ -4618,7 +4618,7 @@ dht_getxattr_get_real_filename_cbk(call_frame_t *frame, void *cookie, + + LOCK(&frame->lock); + { +- if (local->op_errno == ENODATA || local->op_errno == EOPNOTSUPP) { ++ if (local->op_errno == EOPNOTSUPP) { + /* Nothing to do here, we have already found + * a subvol which does not have the get_real_filename + * optimization. If condition is for simple logic. +@@ -4627,7 +4627,7 @@ dht_getxattr_get_real_filename_cbk(call_frame_t *frame, void *cookie, + } + + if (op_ret == -1) { +- if (op_errno == ENODATA || op_errno == EOPNOTSUPP) { ++ if (op_errno == EOPNOTSUPP) { + /* This subvol does not have the optimization. + * Better let the user know we don't support it. + * Remove previous results if any. +@@ -4655,7 +4655,7 @@ dht_getxattr_get_real_filename_cbk(call_frame_t *frame, void *cookie, + goto post_unlock; + } + +- if (op_errno == ENOENT) { ++ if (op_errno == ENOATTR) { + /* Do nothing, our defaults are set to this. + */ + goto unlock; +@@ -4723,7 +4723,7 @@ dht_getxattr_get_real_filename(call_frame_t *frame, xlator_t *this, loc_t *loc, + cnt = local->call_cnt = layout->cnt; + + local->op_ret = -1; +- local->op_errno = ENOENT; ++ local->op_errno = ENOATTR; + + for (i = 0; i < cnt; i++) { + subvol = layout->list[i].xlator; +diff --git a/xlators/storage/posix/src/posix-inode-fd-ops.c b/xlators/storage/posix/src/posix-inode-fd-ops.c +index c949f68..ea3b69c 100644 +--- a/xlators/storage/posix/src/posix-inode-fd-ops.c ++++ b/xlators/storage/posix/src/posix-inode-fd-ops.c +@@ -2954,7 +2954,7 @@ posix_xattr_get_real_filename(call_frame_t *frame, xlator_t *this, loc_t *loc, + (void)sys_closedir(fd); + + if (!found) +- return -ENOENT; ++ return -ENOATTR; + + ret = dict_set_dynstr(dict, (char *)key, found); + if (ret) { +@@ -3422,7 +3422,7 @@ posix_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, + if (ret < 0) { + op_ret = -1; + op_errno = -ret; +- if (op_errno == ENOENT) { ++ if (op_errno == ENOATTR) { + gf_msg_debug(this->name, 0, + "Failed to get " + "real filename (%s, %s)", +-- +1.8.3.1 + diff --git a/SOURCES/0224-core-replace-inet_addr-with-inet_pton.patch b/SOURCES/0224-core-replace-inet_addr-with-inet_pton.patch new file mode 100644 index 0000000..f9a3b56 --- /dev/null +++ b/SOURCES/0224-core-replace-inet_addr-with-inet_pton.patch @@ -0,0 +1,53 @@ +From 3528c4fb59ca4d3efda2cf0689b7549e449bb91b Mon Sep 17 00:00:00 2001 +From: Rinku Kothiya +Date: Fri, 14 Jun 2019 07:53:06 +0000 +Subject: [PATCH 224/255] core: replace inet_addr with inet_pton + +Fixes warning raised by RPMDiff on the use of inet_addr, which may +impact Ipv6 support + +> upstream patch : https://review.gluster.org/#/c/glusterfs/+/22866/ + +>fixes: bz#1721385 +>Change-Id: Id2d9afa1747efa64bc79d90dd2566bff54deedeb +>Signed-off-by: Rinku Kothiya + +BUG: 1698435 +Change-Id: Id2d9afa1747efa64bc79d90dd2566bff54deedeb +Signed-off-by: Rinku Kothiya +Reviewed-on: https://code.engineering.redhat.com/gerrit/175318 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + libglusterfs/src/events.c | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/libglusterfs/src/events.c b/libglusterfs/src/events.c +index 9b3a226..2509767 100644 +--- a/libglusterfs/src/events.c ++++ b/libglusterfs/src/events.c +@@ -41,6 +41,7 @@ _gf_event(eventtypes_t event, const char *fmt, ...) + char *host = NULL; + struct addrinfo hints; + struct addrinfo *result = NULL; ++ xlator_t *this = THIS; + + /* Global context */ + ctx = THIS->ctx; +@@ -82,7 +83,12 @@ _gf_event(eventtypes_t event, const char *fmt, ...) + /* Socket Configurations */ + server.sin_family = AF_INET; + server.sin_port = htons(EVENT_PORT); +- server.sin_addr.s_addr = inet_addr(host); ++ ret = inet_pton(server.sin_family, host, &server.sin_addr); ++ if (ret <= 0) { ++ gf_msg(this->name, GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, ++ "inet_pton failed with return code %d", ret); ++ goto out; ++ } + memset(&server.sin_zero, '\0', sizeof(server.sin_zero)); + + va_start(arguments, fmt); +-- +1.8.3.1 + diff --git a/SOURCES/0225-tests-utils-Fix-py2-py3-util-python-scripts.patch b/SOURCES/0225-tests-utils-Fix-py2-py3-util-python-scripts.patch new file mode 100644 index 0000000..5ad185d --- /dev/null +++ b/SOURCES/0225-tests-utils-Fix-py2-py3-util-python-scripts.patch @@ -0,0 +1,448 @@ +From 9d10b1fd102dc2d5bfa71891ded52b7a8f5e08d8 Mon Sep 17 00:00:00 2001 +From: Kotresh HR +Date: Thu, 6 Jun 2019 12:54:04 +0530 +Subject: [PATCH 225/255] tests/utils: Fix py2/py3 util python scripts + +Following files are fixed. + +tests/bugs/distribute/overlap.py +tests/utils/changelogparser.py +tests/utils/create-files.py +tests/utils/gfid-access.py +tests/utils/libcxattr.py + +> upstream patch link : https://review.gluster.org/#/c/glusterfs/+/22829/ + +>Change-Id: I3db857cc19e19163d368d913eaec1269fbc37140 +>updates: bz#1193929 +>Signed-off-by: Kotresh HR + +Change-Id: I3db857cc19e19163d368d913eaec1269fbc37140 +BUG: 1704562 +Signed-off-by: Kotresh HR +Reviewed-on: https://code.engineering.redhat.com/gerrit/175483 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + tests/bugs/distribute/overlap.py | 2 +- + tests/bugs/glusterfs/bug-902610.t | 2 +- + tests/utils/changelogparser.py | 5 +- + tests/utils/create-files.py | 9 +- + tests/utils/gfid-access.py | 62 +++++++++---- + tests/utils/libcxattr.py | 22 +++-- + tests/utils/py2py3.py | 186 ++++++++++++++++++++++++++++++++++++++ + 7 files changed, 258 insertions(+), 30 deletions(-) + create mode 100644 tests/utils/py2py3.py + +diff --git a/tests/bugs/distribute/overlap.py b/tests/bugs/distribute/overlap.py +index 0941d37..2813979 100755 +--- a/tests/bugs/distribute/overlap.py ++++ b/tests/bugs/distribute/overlap.py +@@ -17,7 +17,7 @@ def calculate_one (ov, nv): + + def calculate_all (values): + total = 0 +- nv_index = len(values) / 2 ++ nv_index = len(values) // 2 + for old_val in values[:nv_index]: + new_val = values[nv_index] + nv_index += 1 +diff --git a/tests/bugs/glusterfs/bug-902610.t b/tests/bugs/glusterfs/bug-902610.t +index b45e92b..112c947 100755 +--- a/tests/bugs/glusterfs/bug-902610.t ++++ b/tests/bugs/glusterfs/bug-902610.t +@@ -28,7 +28,7 @@ function get_layout() + fi + + # Figure out where the join point is. +- target=$( $PYTHON -c "print '%08x' % (0x$layout1_e + 1)") ++ target=$( $PYTHON -c "print('%08x' % (0x$layout1_e + 1))") + #echo "target for layout2 = $target" > /dev/tty + + # The second layout should cover everything that the first doesn't. +diff --git a/tests/utils/changelogparser.py b/tests/utils/changelogparser.py +index e8e252d..3b8f81d 100644 +--- a/tests/utils/changelogparser.py ++++ b/tests/utils/changelogparser.py +@@ -125,7 +125,10 @@ class Record(object): + return repr(self.__dict__) + + def __str__(self): +- return unicode(self).encode('utf-8') ++ if sys.version_info >= (3,): ++ return self.__unicode__() ++ else: ++ return unicode(self).encode('utf-8') + + + def get_num_tokens(data, tokens, version=Version.V11): +diff --git a/tests/utils/create-files.py b/tests/utils/create-files.py +index b2a1961..04736e9 100755 +--- a/tests/utils/create-files.py ++++ b/tests/utils/create-files.py +@@ -19,6 +19,11 @@ import argparse + datsiz = 0 + timr = 0 + ++def get_ascii_upper_alpha_digits(): ++ if sys.version_info > (3,0): ++ return string.ascii_uppercase+string.digits ++ else: ++ return string.uppercase+string.digits + + def setLogger(filename): + global logger +@@ -111,7 +116,7 @@ def create_tar_file(fil, size, mins, maxs, rand): + + def get_filename(flen): + size = flen +- char = string.uppercase+string.digits ++ char = get_ascii_upper_alpha_digits() + st = ''.join(random.choice(char) for i in range(size)) + ti = str((hex(int(str(time.time()).split('.')[0])))[2:]) + return ti+"%%"+st +@@ -175,7 +180,7 @@ def tar_files(files, file_count, inter, size, mins, maxs, + + + def setxattr_files(files, randname, dir_path): +- char = string.uppercase+string.digits ++ char = get_ascii_upper_alpha_digits() + if not randname: + for k in range(files): + v = ''.join(random.choice(char) for i in range(10)) +diff --git a/tests/utils/gfid-access.py b/tests/utils/gfid-access.py +index 556d2b4..c35c122 100755 +--- a/tests/utils/gfid-access.py ++++ b/tests/utils/gfid-access.py +@@ -33,23 +33,51 @@ def _fmt_mkdir(l): + def _fmt_symlink(l1, l2): + return "!II%dsI%ds%ds" % (37, l1+1, l2+1) + +-def entry_pack_reg(gf, bn, mo, uid, gid): +- blen = len(bn) +- return struct.pack(_fmt_mknod(blen), +- uid, gid, gf, mo, bn, +- stat.S_IMODE(mo), 0, umask()) +- +-def entry_pack_dir(gf, bn, mo, uid, gid): +- blen = len(bn) +- return struct.pack(_fmt_mkdir(blen), +- uid, gid, gf, mo, bn, +- stat.S_IMODE(mo), umask()) +- +-def entry_pack_symlink(gf, bn, lnk, mo, uid, gid): +- blen = len(bn) +- llen = len(lnk) +- return struct.pack(_fmt_symlink(blen, llen), +- uid, gid, gf, mo, bn, lnk) ++ ++if sys.version_info > (3,): ++ def entry_pack_reg(gf, bn, mo, uid, gid): ++ bn_encoded = bn.encode() ++ blen = len(bn_encoded) ++ return struct.pack(_fmt_mknod(blen), ++ uid, gid, gf.encode(), mo, bn_encoded, ++ stat.S_IMODE(mo), 0, umask()) ++ ++ # mkdir ++ def entry_pack_dir(gf, bn, mo, uid, gid): ++ bn_encoded = bn.encode() ++ blen = len(bn_encoded) ++ return struct.pack(_fmt_mkdir(blen), ++ uid, gid, gf.encode(), mo, bn_encoded, ++ stat.S_IMODE(mo), umask()) ++ # symlink ++ def entry_pack_symlink(gf, bn, lnk, st): ++ bn_encoded = bn.encode() ++ blen = len(bn_encoded) ++ lnk_encoded = lnk.encode() ++ llen = len(lnk_encoded) ++ return struct.pack(_fmt_symlink(blen, llen), ++ st['uid'], st['gid'], ++ gf.encode(), st['mode'], bn_encoded, ++ lnk_encoded) ++ ++else: ++ def entry_pack_reg(gf, bn, mo, uid, gid): ++ blen = len(bn) ++ return struct.pack(_fmt_mknod(blen), ++ uid, gid, gf, mo, bn, ++ stat.S_IMODE(mo), 0, umask()) ++ ++ def entry_pack_dir(gf, bn, mo, uid, gid): ++ blen = len(bn) ++ return struct.pack(_fmt_mkdir(blen), ++ uid, gid, gf, mo, bn, ++ stat.S_IMODE(mo), umask()) ++ ++ def entry_pack_symlink(gf, bn, lnk, mo, uid, gid): ++ blen = len(bn) ++ llen = len(lnk) ++ return struct.pack(_fmt_symlink(blen, llen), ++ uid, gid, gf, mo, bn, lnk) + + if __name__ == '__main__': + if len(sys.argv) < 9: +diff --git a/tests/utils/libcxattr.py b/tests/utils/libcxattr.py +index fd0b083..3f3ed1f 100644 +--- a/tests/utils/libcxattr.py ++++ b/tests/utils/libcxattr.py +@@ -10,7 +10,9 @@ + + import os + import sys +-from ctypes import CDLL, c_int, create_string_buffer ++from ctypes import CDLL, c_int ++from py2py3 import bytearray_to_str, gr_create_string_buffer ++from py2py3 import gr_query_xattr, gr_lsetxattr, gr_lremovexattr + + + class Xattr(object): +@@ -47,20 +49,23 @@ class Xattr(object): + @classmethod + def _query_xattr(cls, path, siz, syscall, *a): + if siz: +- buf = create_string_buffer('\0' * siz) ++ buf = gr_create_string_buffer(siz) + else: + buf = None + ret = getattr(cls.libc, syscall)(*((path,) + a + (buf, siz))) + if ret == -1: + cls.raise_oserr() + if siz: +- return buf.raw[:ret] ++ # py2 and py3 compatibility. Convert bytes array ++ # to string ++ result = bytearray_to_str(buf.raw) ++ return result[:ret] + else: + return ret + + @classmethod + def lgetxattr(cls, path, attr, siz=0): +- return cls._query_xattr(path, siz, 'lgetxattr', attr) ++ return gr_query_xattr(cls, path, siz, 'lgetxattr', attr) + + @classmethod + def lgetxattr_buf(cls, path, attr): +@@ -74,20 +79,21 @@ class Xattr(object): + + @classmethod + def llistxattr(cls, path, siz=0): +- ret = cls._query_xattr(path, siz, 'llistxattr') ++ ret = gr_query_xattr(cls, path, siz, 'llistxattr') + if isinstance(ret, str): +- ret = ret.split('\0') ++ ret = ret.strip('\0') ++ ret = ret.split('\0') if ret else [] + return ret + + @classmethod + def lsetxattr(cls, path, attr, val): +- ret = cls.libc.lsetxattr(path, attr, val, len(val), 0) ++ ret = gr_lsetxattr(cls, path, attr, val) + if ret == -1: + cls.raise_oserr() + + @classmethod + def lremovexattr(cls, path, attr): +- ret = cls.libc.lremovexattr(path, attr) ++ ret = gr_lremovexattr(cls, path, attr) + if ret == -1: + cls.raise_oserr() + +diff --git a/tests/utils/py2py3.py b/tests/utils/py2py3.py +new file mode 100644 +index 0000000..63aca10 +--- /dev/null ++++ b/tests/utils/py2py3.py +@@ -0,0 +1,186 @@ ++# ++# Copyright (c) 2018 Red Hat, Inc. ++# This file is part of GlusterFS. ++ ++# This file is licensed to you under your choice of the GNU Lesser ++# General Public License, version 3 or any later version (LGPLv3 or ++# later), or the GNU General Public License, version 2 (GPLv2), in all ++# cases as published by the Free Software Foundation. ++# ++ ++# All python2/python3 compatibility routines ++ ++import sys ++import os ++import stat ++import struct ++from ctypes import create_string_buffer ++ ++def umask(): ++ return os.umask(0) ++ ++if sys.version_info >= (3,): ++ def pipe(): ++ (r, w) = os.pipe() ++ os.set_inheritable(r, True) ++ os.set_inheritable(w, True) ++ return (r, w) ++ ++ # Raw conversion of bytearray to string. Used in the cases where ++ # buffer is created by create_string_buffer which is a 8-bit char ++ # array and passed to syscalls to fetch results. Using encode/decode ++ # doesn't work as it converts to string altering the size. ++ def bytearray_to_str(byte_arr): ++ return ''.join([chr(b) for b in byte_arr]) ++ ++ # Raw conversion of string to bytes. This is required to convert ++ # back the string into bytearray(c char array) to use in struc ++ # pack/unpacking. Again encode/decode can't be used as it ++ # converts it alters size. ++ def str_to_bytearray(string): ++ return bytes([ord(c) for c in string]) ++ ++ def gr_create_string_buffer(size): ++ return create_string_buffer(b'\0', size) ++ ++ def gr_query_xattr(cls, path, size, syscall, attr=None): ++ if attr: ++ return cls._query_xattr(path.encode(), size, syscall, ++ attr.encode()) ++ else: ++ return cls._query_xattr(path.encode(), size, syscall) ++ ++ def gr_lsetxattr(cls, path, attr, val): ++ return cls.libc.lsetxattr(path.encode(), attr.encode(), val, ++ len(val), 0) ++ ++ def gr_lremovexattr(cls, path, attr): ++ return cls.libc.lremovexattr(path.encode(), attr.encode()) ++ ++ def gr_cl_register(cls, brick, path, log_file, log_level, retries): ++ return cls._get_api('gf_changelog_register')(brick.encode(), ++ path.encode(), ++ log_file.encode(), ++ log_level, retries) ++ ++ def gr_cl_done(cls, clfile): ++ return cls._get_api('gf_changelog_done')(clfile.encode()) ++ ++ def gr_cl_history_changelog(cls, changelog_path, start, end, num_parallel, ++ actual_end): ++ return cls._get_api('gf_history_changelog')(changelog_path.encode(), ++ start, end, num_parallel, ++ actual_end) ++ ++ def gr_cl_history_done(cls, clfile): ++ return cls._get_api('gf_history_changelog_done')(clfile.encode()) ++ ++ # regular file ++ ++ def entry_pack_reg(cls, gf, bn, mo, uid, gid): ++ bn_encoded = bn.encode() ++ blen = len(bn_encoded) ++ return struct.pack(cls._fmt_mknod(blen), ++ uid, gid, gf.encode(), mo, bn_encoded, ++ stat.S_IMODE(mo), 0, umask()) ++ ++ def entry_pack_reg_stat(cls, gf, bn, st): ++ bn_encoded = bn.encode() ++ blen = len(bn_encoded) ++ mo = st['mode'] ++ return struct.pack(cls._fmt_mknod(blen), ++ st['uid'], st['gid'], ++ gf.encode(), mo, bn_encoded, ++ stat.S_IMODE(mo), 0, umask()) ++ # mkdir ++ ++ def entry_pack_mkdir(cls, gf, bn, mo, uid, gid): ++ bn_encoded = bn.encode() ++ blen = len(bn_encoded) ++ return struct.pack(cls._fmt_mkdir(blen), ++ uid, gid, gf.encode(), mo, bn_encoded, ++ stat.S_IMODE(mo), umask()) ++ # symlink ++ ++ def entry_pack_symlink(cls, gf, bn, lnk, st): ++ bn_encoded = bn.encode() ++ blen = len(bn_encoded) ++ lnk_encoded = lnk.encode() ++ llen = len(lnk_encoded) ++ return struct.pack(cls._fmt_symlink(blen, llen), ++ st['uid'], st['gid'], ++ gf.encode(), st['mode'], bn_encoded, ++ lnk_encoded) ++else: ++ def pipe(): ++ (r, w) = os.pipe() ++ return (r, w) ++ ++ # Raw conversion of bytearray to string ++ def bytearray_to_str(byte_arr): ++ return byte_arr ++ ++ # Raw conversion of string to bytearray ++ def str_to_bytearray(string): ++ return string ++ ++ def gr_create_string_buffer(size): ++ return create_string_buffer('\0', size) ++ ++ def gr_query_xattr(cls, path, size, syscall, attr=None): ++ if attr: ++ return cls._query_xattr(path, size, syscall, attr) ++ else: ++ return cls._query_xattr(path, size, syscall) ++ ++ def gr_lsetxattr(cls, path, attr, val): ++ return cls.libc.lsetxattr(path, attr, val, len(val), 0) ++ ++ def gr_lremovexattr(cls, path, attr): ++ return cls.libc.lremovexattr(path, attr) ++ ++ def gr_cl_register(cls, brick, path, log_file, log_level, retries): ++ return cls._get_api('gf_changelog_register')(brick, path, log_file, ++ log_level, retries) ++ ++ def gr_cl_done(cls, clfile): ++ return cls._get_api('gf_changelog_done')(clfile) ++ ++ def gr_cl_history_changelog(cls, changelog_path, start, end, num_parallel, ++ actual_end): ++ return cls._get_api('gf_history_changelog')(changelog_path, start, end, ++ num_parallel, actual_end) ++ ++ def gr_cl_history_done(cls, clfile): ++ return cls._get_api('gf_history_changelog_done')(clfile) ++ ++ # regular file ++ ++ def entry_pack_reg(cls, gf, bn, mo, uid, gid): ++ blen = len(bn) ++ return struct.pack(cls._fmt_mknod(blen), ++ uid, gid, gf, mo, bn, ++ stat.S_IMODE(mo), 0, umask()) ++ ++ def entry_pack_reg_stat(cls, gf, bn, st): ++ blen = len(bn) ++ mo = st['mode'] ++ return struct.pack(cls._fmt_mknod(blen), ++ st['uid'], st['gid'], ++ gf, mo, bn, ++ stat.S_IMODE(mo), 0, umask()) ++ # mkdir ++ ++ def entry_pack_mkdir(cls, gf, bn, mo, uid, gid): ++ blen = len(bn) ++ return struct.pack(cls._fmt_mkdir(blen), ++ uid, gid, gf, mo, bn, ++ stat.S_IMODE(mo), umask()) ++ # symlink ++ ++ def entry_pack_symlink(cls, gf, bn, lnk, st): ++ blen = len(bn) ++ llen = len(lnk) ++ return struct.pack(cls._fmt_symlink(blen, llen), ++ st['uid'], st['gid'], ++ gf, st['mode'], bn, lnk) +-- +1.8.3.1 + diff --git a/SOURCES/0226-geo-rep-fix-gluster-command-path-for-non-root-sessio.patch b/SOURCES/0226-geo-rep-fix-gluster-command-path-for-non-root-sessio.patch new file mode 100644 index 0000000..f8f382a --- /dev/null +++ b/SOURCES/0226-geo-rep-fix-gluster-command-path-for-non-root-sessio.patch @@ -0,0 +1,92 @@ +From 1c55f3633f748629cd0484f79b6c49101eb2df82 Mon Sep 17 00:00:00 2001 +From: Sunny Kumar +Date: Mon, 8 Jul 2019 11:47:28 +0530 +Subject: [PATCH 226/255] geo-rep : fix gluster command path for non-root + session + +Problem: +gluster command not found. + +Cause: +In Volinfo class we issue command 'gluster vol info' to get information +about volume like getting brick_root to perform various operation. +When geo-rep session is configured for non-root user Volinfo class +fails to issue gluster command due to unavailability of gluster +binary path for non-root user. + +Solution: +Use config value 'slave-gluster-command-dir'/'gluster-command-dir' to get path +for gluster command based on caller. + +>Backport of: +>Upstream Patch: https://review.gluster.org/#/c/glusterfs/+/22920/. +>fixes: bz#1722740 +>Change-Id: I4ec46373da01f5d00ecd160c4e8c6239da8b3859 +>Signed-off-by: Sunny Kumar + +BUG: 1712591 +Change-Id: Ifea2927253a9521fa459fea6de8a60085c3413f6 +Signed-off-by: Sunny Kumar +Reviewed-on: https://code.engineering.redhat.com/gerrit/175485 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + geo-replication/syncdaemon/monitor.py | 4 ++-- + geo-replication/syncdaemon/syncdutils.py | 12 +++++++++--- + 2 files changed, 11 insertions(+), 5 deletions(-) + +diff --git a/geo-replication/syncdaemon/monitor.py b/geo-replication/syncdaemon/monitor.py +index c45ef24..234f3f1 100644 +--- a/geo-replication/syncdaemon/monitor.py ++++ b/geo-replication/syncdaemon/monitor.py +@@ -369,7 +369,7 @@ def distribute(master, slave): + if rconf.args.use_gconf_volinfo: + mvol = VolinfoFromGconf(master.volume, master=True) + else: +- mvol = Volinfo(master.volume, master.host) ++ mvol = Volinfo(master.volume, master.host, master=True) + logging.debug('master bricks: ' + repr(mvol.bricks)) + prelude = [] + slave_host = None +@@ -385,7 +385,7 @@ def distribute(master, slave): + if rconf.args.use_gconf_volinfo: + svol = VolinfoFromGconf(slave.volume, master=False) + else: +- svol = Volinfo(slave.volume, "localhost", prelude) ++ svol = Volinfo(slave.volume, "localhost", prelude, master=False) + + sbricks = svol.bricks + suuid = svol.uuid +diff --git a/geo-replication/syncdaemon/syncdutils.py b/geo-replication/syncdaemon/syncdutils.py +index 3f41b5f..2ee10ac 100644 +--- a/geo-replication/syncdaemon/syncdutils.py ++++ b/geo-replication/syncdaemon/syncdutils.py +@@ -672,7 +672,7 @@ def get_slv_dir_path(slv_host, slv_volume, gfid): + dir_path = ENOENT + + if not slv_bricks: +- slv_info = Volinfo(slv_volume, slv_host) ++ slv_info = Volinfo(slv_volume, slv_host, master=False) + slv_bricks = slv_info.bricks + # Result of readlink would be of format as below. + # readlink = "../../pgfid[0:2]/pgfid[2:4]/pgfid/basename" +@@ -854,8 +854,14 @@ class Popen(subprocess.Popen): + + class Volinfo(object): + +- def __init__(self, vol, host='localhost', prelude=[]): +- po = Popen(prelude + ['gluster', '--xml', '--remote-host=' + host, ++ def __init__(self, vol, host='localhost', prelude=[], master=True): ++ if master: ++ gluster_cmd_dir = gconf.get("gluster-command-dir") ++ else: ++ gluster_cmd_dir = gconf.get("slave-gluster-command-dir") ++ ++ gluster_cmd = os.path.join(gluster_cmd_dir, 'gluster') ++ po = Popen(prelude + [gluster_cmd, '--xml', '--remote-host=' + host, + 'volume', 'info', vol], + stdout=PIPE, stderr=PIPE, universal_newlines=True) + vix = po.stdout.read() +-- +1.8.3.1 + diff --git a/SOURCES/0227-glusterd-svc-update-pid-of-mux-volumes-from-the-shd-.patch b/SOURCES/0227-glusterd-svc-update-pid-of-mux-volumes-from-the-shd-.patch new file mode 100644 index 0000000..41d482f --- /dev/null +++ b/SOURCES/0227-glusterd-svc-update-pid-of-mux-volumes-from-the-shd-.patch @@ -0,0 +1,914 @@ +From b0815b8a84a07d17a1215c55afc38888ee9fc37c Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Mon, 24 Jun 2019 12:00:20 +0530 +Subject: [PATCH 227/255] glusterd/svc: update pid of mux volumes from the shd + process + +For a normal volume, we are updating the pid from a the +process while we do a daemonization or at the end of the +init if it is no-daemon mode. Along with updating the pid +we also lock the file, to make sure that the process is +running fine. + +With brick mux, we were updating the pidfile from gluterd +after an attach/detach request. + +There are two problems with this approach. +1) We are not holding a pidlock for any file other than parent + process. +2) There is a chance for possible race conditions with attach/detach. + For example, shd start and a volume stop could race. Let's say + we are starting an shd and it is attached to a volume. + While we trying to link the pid file to the running process, + this would have deleted by the thread that doing a volume stop. + +> upstream patch : https://review.gluster.org/#/c/glusterfs/+/22935/ + +>Change-Id: I29a00352102877ce09ea3f376ca52affceb5cf1a +>Updates: bz#1722541 +>Signed-off-by: Mohammed Rafi KC + +Change-Id: I29a00352102877ce09ea3f376ca52affceb5cf1a +BUG: 1721802 +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175723 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + glusterfsd/src/gf_attach.c | 2 + + glusterfsd/src/glusterfsd-mgmt.c | 66 +++++++-- + libglusterfs/src/glusterfs/glusterfs.h | 2 +- + libglusterfs/src/glusterfs/libglusterfs-messages.h | 3 +- + libglusterfs/src/graph.c | 154 ++++++++++++++++++++- + rpc/xdr/src/glusterd1-xdr.x | 1 + + xlators/mgmt/glusterd/src/glusterd-handler.c | 2 + + xlators/mgmt/glusterd/src/glusterd-handshake.c | 42 +++++- + xlators/mgmt/glusterd/src/glusterd-op-sm.c | 4 + + .../mgmt/glusterd/src/glusterd-shd-svc-helper.c | 25 ++++ + .../mgmt/glusterd/src/glusterd-shd-svc-helper.h | 3 + + xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 8 +- + xlators/mgmt/glusterd/src/glusterd-svc-helper.c | 57 ++++---- + xlators/mgmt/glusterd/src/glusterd-syncop.c | 2 + + xlators/mgmt/glusterd/src/glusterd-utils.c | 6 +- + 15 files changed, 325 insertions(+), 52 deletions(-) + +diff --git a/glusterfsd/src/gf_attach.c b/glusterfsd/src/gf_attach.c +index 6293b9b..1bff854 100644 +--- a/glusterfsd/src/gf_attach.c ++++ b/glusterfsd/src/gf_attach.c +@@ -65,6 +65,8 @@ send_brick_req(xlator_t *this, struct rpc_clnt *rpc, char *path, int op) + brick_req.name = path; + brick_req.input.input_val = NULL; + brick_req.input.input_len = 0; ++ brick_req.dict.dict_val = NULL; ++ brick_req.dict.dict_len = 0; + + req_size = xdr_sizeof((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req); + iobuf = iobuf_get2(rpc->ctx->iobuf_pool, req_size); +diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c +index 1d2cd1a..f930e0a 100644 +--- a/glusterfsd/src/glusterfsd-mgmt.c ++++ b/glusterfsd/src/glusterfsd-mgmt.c +@@ -50,13 +50,16 @@ int + emancipate(glusterfs_ctx_t *ctx, int ret); + int + glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp, +- char *volfile_id, char *checksum); ++ char *volfile_id, char *checksum, ++ dict_t *dict); + int + glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx, +- gf_volfile_t *volfile_obj, char *checksum); ++ gf_volfile_t *volfile_obj, char *checksum, ++ dict_t *dict); + int + glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp, +- char *volfile_id, char *checksum); ++ char *volfile_id, char *checksum, ++ dict_t *dict); + int + glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj); + +@@ -75,7 +78,8 @@ mgmt_cbk_spec(struct rpc_clnt *rpc, void *mydata, void *data) + } + + int +-mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id) ++mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id, ++ dict_t *dict) + { + glusterfs_ctx_t *ctx = NULL; + int ret = 0; +@@ -145,11 +149,11 @@ mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id) + * the volfile + */ + ret = glusterfs_process_svc_attach_volfp(ctx, tmpfp, volfile_id, +- sha256_hash); ++ sha256_hash, dict); + goto unlock; + } + ret = glusterfs_mux_volfile_reconfigure(tmpfp, ctx, volfile_obj, +- sha256_hash); ++ sha256_hash, dict); + if (ret < 0) { + gf_msg_debug("glusterfsd-mgmt", EINVAL, "Reconfigure failed !!"); + } +@@ -387,6 +391,8 @@ err: + UNLOCK(&ctx->volfile_lock); + if (xlator_req.input.input_val) + free(xlator_req.input.input_val); ++ if (xlator_req.dict.dict_val) ++ free(xlator_req.dict.dict_val); + free(xlator_req.name); + xlator_req.name = NULL; + return 0; +@@ -561,6 +567,8 @@ out: + + free(xlator_req.name); + free(xlator_req.input.input_val); ++ if (xlator_req.dict.dict_val) ++ free(xlator_req.dict.dict_val); + if (output) + dict_unref(output); + if (dict) +@@ -982,6 +990,8 @@ out: + if (input) + dict_unref(input); + free(xlator_req.input.input_val); /*malloced by xdr*/ ++ if (xlator_req.dict.dict_val) ++ free(xlator_req.dict.dict_val); + if (output) + dict_unref(output); + free(xlator_req.name); +@@ -1062,6 +1072,8 @@ glusterfs_handle_attach(rpcsvc_request_t *req) + out: + UNLOCK(&ctx->volfile_lock); + } ++ if (xlator_req.dict.dict_val) ++ free(xlator_req.dict.dict_val); + free(xlator_req.input.input_val); + free(xlator_req.name); + +@@ -1077,6 +1089,7 @@ glusterfs_handle_svc_attach(rpcsvc_request_t *req) + }; + xlator_t *this = NULL; + glusterfs_ctx_t *ctx = NULL; ++ dict_t *dict = NULL; + + GF_ASSERT(req); + this = THIS; +@@ -1091,20 +1104,41 @@ glusterfs_handle_svc_attach(rpcsvc_request_t *req) + req->rpc_err = GARBAGE_ARGS; + goto out; + } ++ + gf_msg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_41, + "received attach " + "request for volfile-id=%s", + xlator_req.name); ++ ++ dict = dict_new(); ++ if (!dict) { ++ ret = -1; ++ errno = ENOMEM; ++ goto out; ++ } ++ ++ ret = dict_unserialize(xlator_req.dict.dict_val, xlator_req.dict.dict_len, ++ &dict); ++ if (ret) { ++ gf_msg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42, ++ "failed to unserialize xdata to dictionary"); ++ goto out; ++ } ++ dict->extra_stdfree = xlator_req.dict.dict_val; ++ + ret = 0; + + if (ctx->active) { + ret = mgmt_process_volfile(xlator_req.input.input_val, +- xlator_req.input.input_len, xlator_req.name); ++ xlator_req.input.input_len, xlator_req.name, ++ dict); + } else { + gf_msg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42, + "got attach for %s but no active graph", xlator_req.name); + } + out: ++ if (dict) ++ dict_unref(dict); + if (xlator_req.input.input_val) + free(xlator_req.input.input_val); + if (xlator_req.name) +@@ -1241,6 +1275,8 @@ out: + GF_FREE(filepath); + if (xlator_req.input.input_val) + free(xlator_req.input.input_val); ++ if (xlator_req.dict.dict_val) ++ free(xlator_req.dict.dict_val); + + return ret; + } +@@ -1313,6 +1349,8 @@ out: + if (dict) + dict_unref(dict); + free(xlator_req.input.input_val); // malloced by xdr ++ if (xlator_req.dict.dict_val) ++ free(xlator_req.dict.dict_val); + if (output) + dict_unref(output); + free(xlator_req.name); // malloced by xdr +@@ -1461,6 +1499,8 @@ out: + if (output) + dict_unref(output); + free(brick_req.input.input_val); ++ if (brick_req.dict.dict_val) ++ free(brick_req.dict.dict_val); + free(brick_req.name); + GF_FREE(xname); + GF_FREE(msg); +@@ -1654,6 +1694,8 @@ out: + if (dict) + dict_unref(dict); + free(node_req.input.input_val); ++ if (node_req.dict.dict_val) ++ free(node_req.dict.dict_val); + GF_FREE(msg); + GF_FREE(rsp.output.output_val); + GF_FREE(node_name); +@@ -1757,6 +1799,8 @@ glusterfs_handle_nfs_profile(rpcsvc_request_t *req) + + out: + free(nfs_req.input.input_val); ++ if (nfs_req.dict.dict_val) ++ free(nfs_req.dict.dict_val); + if (dict) + dict_unref(dict); + if (output) +@@ -1835,6 +1879,8 @@ out: + if (dict) + dict_unref(dict); + free(xlator_req.input.input_val); // malloced by xdr ++ if (xlator_req.dict.dict_val) ++ free(xlator_req.dict.dict_val); + if (output) + dict_unref(output); + free(xlator_req.name); // malloced by xdr +@@ -1963,7 +2009,8 @@ out: + if (dict) + dict_unref(dict); + free(brick_req.input.input_val); +- ++ if (brick_req.dict.dict_val) ++ free(brick_req.dict.dict_val); + gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; + } +@@ -2213,7 +2260,8 @@ volfile: + size = rsp.op_ret; + volfile_id = frame->local; + if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) { +- ret = mgmt_process_volfile((const char *)rsp.spec, size, volfile_id); ++ ret = mgmt_process_volfile((const char *)rsp.spec, size, volfile_id, ++ dict); + goto post_graph_mgmt; + } + +diff --git a/libglusterfs/src/glusterfs/glusterfs.h b/libglusterfs/src/glusterfs/glusterfs.h +index 9ec2365..b6327b8 100644 +--- a/libglusterfs/src/glusterfs/glusterfs.h ++++ b/libglusterfs/src/glusterfs/glusterfs.h +@@ -744,7 +744,7 @@ typedef struct { + char vol_id[NAME_MAX + 1]; + struct list_head volfile_list; + glusterfs_graph_t *graph; +- ++ FILE *pidfp; + } gf_volfile_t; + + glusterfs_ctx_t * +diff --git a/libglusterfs/src/glusterfs/libglusterfs-messages.h b/libglusterfs/src/glusterfs/libglusterfs-messages.h +index ea2aa60..7e0eebb 100644 +--- a/libglusterfs/src/glusterfs/libglusterfs-messages.h ++++ b/libglusterfs/src/glusterfs/libglusterfs-messages.h +@@ -111,6 +111,7 @@ GLFS_MSGID( + LG_MSG_PTHREAD_NAMING_FAILED, LG_MSG_SYSCALL_RETURNS_WRONG, + LG_MSG_XXH64_TO_GFID_FAILED, LG_MSG_ASYNC_WARNING, LG_MSG_ASYNC_FAILURE, + LG_MSG_GRAPH_CLEANUP_FAILED, LG_MSG_GRAPH_SETUP_FAILED, +- LG_MSG_GRAPH_DETACH_STARTED, LG_MSG_GRAPH_ATTACH_FAILED); ++ LG_MSG_GRAPH_DETACH_STARTED, LG_MSG_GRAPH_ATTACH_FAILED, ++ LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED); + + #endif /* !_LG_MESSAGES_H_ */ +diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c +index 172dc61..05f76bf 100644 +--- a/libglusterfs/src/graph.c ++++ b/libglusterfs/src/graph.c +@@ -1467,6 +1467,21 @@ out: + } + + int ++glusterfs_svc_mux_pidfile_cleanup(gf_volfile_t *volfile_obj) ++{ ++ if (!volfile_obj || !volfile_obj->pidfp) ++ return 0; ++ ++ gf_msg_trace("glusterfsd", 0, "pidfile %s cleanup", volfile_obj->vol_id); ++ ++ lockf(fileno(volfile_obj->pidfp), F_ULOCK, 0); ++ fclose(volfile_obj->pidfp); ++ volfile_obj->pidfp = NULL; ++ ++ return 0; ++} ++ ++int + glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj) + { + xlator_t *last_xl = NULL; +@@ -1502,6 +1517,7 @@ glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj) + + list_del_init(&volfile_obj->volfile_list); + glusterfs_mux_xlator_unlink(parent_graph->top, xl); ++ glusterfs_svc_mux_pidfile_cleanup(volfile_obj); + parent_graph->last_xl = glusterfs_get_last_xlator(parent_graph); + parent_graph->xl_count -= graph->xl_count; + parent_graph->leaf_count -= graph->leaf_count; +@@ -1531,8 +1547,126 @@ out: + } + + int ++glusterfs_svc_mux_pidfile_setup(gf_volfile_t *volfile_obj, const char *pid_file) ++{ ++ int ret = -1; ++ FILE *pidfp = NULL; ++ ++ if (!pid_file || !volfile_obj) ++ goto out; ++ ++ if (volfile_obj->pidfp) { ++ ret = 0; ++ goto out; ++ } ++ pidfp = fopen(pid_file, "a+"); ++ if (!pidfp) { ++ goto out; ++ } ++ volfile_obj->pidfp = pidfp; ++ ++ ret = lockf(fileno(pidfp), F_TLOCK, 0); ++ if (ret) { ++ ret = 0; ++ goto out; ++ } ++out: ++ return ret; ++} ++ ++int ++glusterfs_svc_mux_pidfile_update(gf_volfile_t *volfile_obj, ++ const char *pid_file, pid_t pid) ++{ ++ int ret = 0; ++ FILE *pidfp = NULL; ++ int old_pid; ++ ++ if (!volfile_obj->pidfp) { ++ ret = glusterfs_svc_mux_pidfile_setup(volfile_obj, pid_file); ++ if (ret == -1) ++ goto out; ++ } ++ pidfp = volfile_obj->pidfp; ++ ret = fscanf(pidfp, "%d", &old_pid); ++ if (ret <= 0) { ++ goto update; ++ } ++ if (old_pid == pid) { ++ ret = 0; ++ goto out; ++ } else { ++ gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, ++ "Old pid=%d found in pidfile %s. Cleaning the old pid and " ++ "Updating new pid=%d", ++ old_pid, pid_file, pid); ++ } ++update: ++ ret = sys_ftruncate(fileno(pidfp), 0); ++ if (ret) { ++ gf_msg("glusterfsd", GF_LOG_ERROR, errno, ++ LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, ++ "pidfile %s truncation failed", pid_file); ++ goto out; ++ } ++ ++ ret = fprintf(pidfp, "%d\n", pid); ++ if (ret <= 0) { ++ gf_msg("glusterfsd", GF_LOG_ERROR, errno, ++ LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, "pidfile %s write failed", ++ pid_file); ++ goto out; ++ } ++ ++ ret = fflush(pidfp); ++ if (ret) { ++ gf_msg("glusterfsd", GF_LOG_ERROR, errno, ++ LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, "pidfile %s write failed", ++ pid_file); ++ goto out; ++ } ++out: ++ return ret; ++} ++ ++int ++glusterfs_update_mux_pid(dict_t *dict, gf_volfile_t *volfile_obj) ++{ ++ char *file = NULL; ++ int ret = -1; ++ ++ GF_VALIDATE_OR_GOTO("graph", dict, out); ++ GF_VALIDATE_OR_GOTO("graph", volfile_obj, out); ++ ++ ret = dict_get_str(dict, "pidfile", &file); ++ if (ret < 0) { ++ gf_msg("mgmt", GF_LOG_ERROR, EINVAL, LG_MSG_GRAPH_SETUP_FAILED, ++ "Failed to get pidfile from dict for volfile_id=%s", ++ volfile_obj->vol_id); ++ } ++ ++ ret = glusterfs_svc_mux_pidfile_update(volfile_obj, file, getpid()); ++ if (ret < 0) { ++ ret = -1; ++ gf_msg("mgmt", GF_LOG_ERROR, EINVAL, LG_MSG_GRAPH_SETUP_FAILED, ++ "Failed to update " ++ "the pidfile for volfile_id=%s", ++ volfile_obj->vol_id); ++ ++ goto out; ++ } ++ ++ if (ret == 1) ++ gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, ++ "PID %d updated in pidfile=%s", getpid(), file); ++ ret = 0; ++out: ++ return ret; ++} ++int + glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp, +- char *volfile_id, char *checksum) ++ char *volfile_id, char *checksum, ++ dict_t *dict) + { + glusterfs_graph_t *graph = NULL; + glusterfs_graph_t *parent_graph = NULL; +@@ -1615,18 +1749,25 @@ glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp, + ret = -1; + goto out; + } ++ volfile_obj->pidfp = NULL; ++ snprintf(volfile_obj->vol_id, sizeof(volfile_obj->vol_id), "%s", ++ volfile_id); ++ ++ if (strcmp(ctx->cmd_args.process_name, "glustershd") == 0) { ++ ret = glusterfs_update_mux_pid(dict, volfile_obj); ++ if (ret == -1) { ++ goto out; ++ } ++ } + + graph->used = 1; + parent_graph->id++; + list_add(&graph->list, &ctx->graphs); + INIT_LIST_HEAD(&volfile_obj->volfile_list); + volfile_obj->graph = graph; +- snprintf(volfile_obj->vol_id, sizeof(volfile_obj->vol_id), "%s", +- volfile_id); + memcpy(volfile_obj->volfile_checksum, checksum, + sizeof(volfile_obj->volfile_checksum)); + list_add_tail(&volfile_obj->volfile_list, &ctx->volfile_list); +- + gf_log_dump_graph(fp, graph); + graph = NULL; + +@@ -1654,7 +1795,8 @@ out: + + int + glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx, +- gf_volfile_t *volfile_obj, char *checksum) ++ gf_volfile_t *volfile_obj, char *checksum, ++ dict_t *dict) + { + glusterfs_graph_t *oldvolfile_graph = NULL; + glusterfs_graph_t *newvolfile_graph = NULL; +@@ -1703,7 +1845,7 @@ glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx, + } + volfile_obj = NULL; + ret = glusterfs_process_svc_attach_volfp(ctx, newvolfile_fp, vol_id, +- checksum); ++ checksum, dict); + goto out; + } + +diff --git a/rpc/xdr/src/glusterd1-xdr.x b/rpc/xdr/src/glusterd1-xdr.x +index 9b36d34..02ebec2 100644 +--- a/rpc/xdr/src/glusterd1-xdr.x ++++ b/rpc/xdr/src/glusterd1-xdr.x +@@ -132,6 +132,7 @@ struct gd1_mgmt_brick_op_req { + string name<>; + int op; + opaque input<>; ++ opaque dict<>; + } ; + + struct gd1_mgmt_brick_op_rsp { +diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c +index af8a8a4..cc1f1df 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-handler.c ++++ b/xlators/mgmt/glusterd/src/glusterd-handler.c +@@ -5423,6 +5423,8 @@ glusterd_print_client_details(FILE *fp, dict_t *dict, + + brick_req->op = GLUSTERD_BRICK_STATUS; + brick_req->name = ""; ++ brick_req->dict.dict_val = NULL; ++ brick_req->dict.dict_len = 0; + + ret = dict_set_strn(dict, "brick-name", SLEN("brick-name"), + brickinfo->path); +diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c +index 1ba58c3..86dec82 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c ++++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c +@@ -203,7 +203,7 @@ out: + + size_t + build_volfile_path(char *volume_id, char *path, size_t path_len, +- char *trusted_str) ++ char *trusted_str, dict_t *dict) + { + struct stat stbuf = { + 0, +@@ -340,11 +340,19 @@ build_volfile_path(char *volume_id, char *path, size_t path_len, + + ret = glusterd_volinfo_find(volid_ptr, &volinfo); + if (ret == -1) { +- gf_log(this->name, GF_LOG_ERROR, "Couldn't find volinfo"); ++ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, ++ "Couldn't find volinfo for volid=%s", volid_ptr); + goto out; + } + + glusterd_svc_build_shd_volfile_path(volinfo, path, path_len); ++ ++ ret = glusterd_svc_set_shd_pidfile(volinfo, dict); ++ if (ret == -1) { ++ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, ++ "Couldn't set pidfile in dict for volid=%s", volid_ptr); ++ goto out; ++ } + ret = 0; + goto out; + } +@@ -919,6 +927,7 @@ __server_getspec(rpcsvc_request_t *req) + char addrstr[RPCSVC_PEER_STRLEN] = {0}; + peer_info_t *peerinfo = NULL; + xlator_t *this = NULL; ++ dict_t *dict = NULL; + + this = THIS; + GF_ASSERT(this); +@@ -971,6 +980,12 @@ __server_getspec(rpcsvc_request_t *req) + goto fail; + } + ++ dict = dict_new(); ++ if (!dict) { ++ ret = -ENOMEM; ++ goto fail; ++ } ++ + trans = req->trans; + /* addrstr will be empty for cli socket connections */ + ret = rpcsvc_transport_peername(trans, (char *)&addrstr, sizeof(addrstr)); +@@ -989,12 +1004,26 @@ __server_getspec(rpcsvc_request_t *req) + */ + if (strlen(addrstr) == 0 || gf_is_local_addr(addrstr)) { + ret = build_volfile_path(volume, filename, sizeof(filename), +- TRUSTED_PREFIX); ++ TRUSTED_PREFIX, dict); + } else { +- ret = build_volfile_path(volume, filename, sizeof(filename), NULL); ++ ret = build_volfile_path(volume, filename, sizeof(filename), NULL, ++ dict); + } + + if (ret == 0) { ++ if (dict->count > 0) { ++ ret = dict_allocate_and_serialize(dict, &rsp.xdata.xdata_val, ++ &rsp.xdata.xdata_len); ++ if (ret) { ++ gf_msg(this->name, GF_LOG_ERROR, 0, ++ GD_MSG_DICT_SERL_LENGTH_GET_FAIL, ++ "Failed to serialize dict " ++ "to request buffer"); ++ goto fail; ++ } ++ dict->extra_free = rsp.xdata.xdata_val; ++ } ++ + /* to allocate the proper buffer to hold the file data */ + ret = sys_stat(filename, &stbuf); + if (ret < 0) { +@@ -1036,7 +1065,6 @@ __server_getspec(rpcsvc_request_t *req) + goto fail; + } + } +- + /* convert to XDR */ + fail: + if (spec_fd >= 0) +@@ -1056,6 +1084,10 @@ fail: + (xdrproc_t)xdr_gf_getspec_rsp); + free(args.key); // malloced by xdr + free(rsp.spec); ++ ++ if (dict) ++ dict_unref(dict); ++ + if (args.xdata.xdata_val) + free(args.xdata.xdata_val); + +diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +index 9ea695e..454877b 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c ++++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +@@ -655,6 +655,8 @@ glusterd_brick_op_build_payload(glusterd_op_t op, + break; + } + ++ brick_req->dict.dict_len = 0; ++ brick_req->dict.dict_val = NULL; + ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val, + &brick_req->input.input_len); + if (ret) +@@ -723,6 +725,8 @@ glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req, + goto out; + } + ++ brick_req->dict.dict_len = 0; ++ brick_req->dict.dict_val = NULL; + ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val, + &brick_req->input.input_len); + +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c +index 57ceda9..5661e39 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c +@@ -126,3 +126,28 @@ glusterd_shd_svcproc_cleanup(glusterd_shdsvc_t *shd) + out: + return; + } ++ ++int ++glusterd_svc_set_shd_pidfile(glusterd_volinfo_t *volinfo, dict_t *dict) ++{ ++ int ret = -1; ++ glusterd_svc_t *svc = NULL; ++ xlator_t *this = NULL; ++ ++ this = THIS; ++ GF_VALIDATE_OR_GOTO("glusterd", this, out); ++ GF_VALIDATE_OR_GOTO(this->name, volinfo, out); ++ GF_VALIDATE_OR_GOTO(this->name, dict, out); ++ ++ svc = &(volinfo->shd.svc); ++ ++ ret = dict_set_dynstr_with_alloc(dict, "pidfile", svc->proc.pidfile); ++ if (ret) { ++ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, ++ "Failed to set pidfile %s in dict", svc->proc.pidfile); ++ goto out; ++ } ++ ret = 0; ++out: ++ return ret; ++} +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h +index 59466ec..1f0984b 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h +@@ -36,4 +36,7 @@ glusterd_recover_shd_attach_failure(glusterd_volinfo_t *volinfo, + int + glusterd_shdsvc_create_volfile(glusterd_volinfo_t *volinfo); + ++int ++glusterd_svc_set_shd_pidfile(glusterd_volinfo_t *volinfo, dict_t *dict); ++ + #endif +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +index 8ad90a9..590169f 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +@@ -258,14 +258,20 @@ glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags) + gf_boolean_t shd_restart = _gf_false; + + conf = THIS->private; +- volinfo = data; + GF_VALIDATE_OR_GOTO("glusterd", conf, out); + GF_VALIDATE_OR_GOTO("glusterd", svc, out); ++ volinfo = data; + GF_VALIDATE_OR_GOTO("glusterd", volinfo, out); + + if (volinfo) + glusterd_volinfo_ref(volinfo); + ++ if (volinfo->is_snap_volume) { ++ /* healing of a snap volume is not supported yet*/ ++ ret = 0; ++ goto out; ++ } ++ + while (conf->restart_shd) { + synclock_unlock(&conf->big_lock); + sleep(2); +diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c +index 400826f..e106111 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c ++++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c +@@ -519,7 +519,7 @@ glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc) + /* Take first entry from the process */ + parent_svc = cds_list_entry(mux_proc->svcs.next, glusterd_svc_t, + mux_svc); +- sys_link(parent_svc->proc.pidfile, svc->proc.pidfile); ++ glusterd_copy_file(parent_svc->proc.pidfile, svc->proc.pidfile); + mux_conn = &parent_svc->conn; + if (volinfo) + volinfo->shd.attached = _gf_true; +@@ -623,12 +623,9 @@ glusterd_svc_attach_cbk(struct rpc_req *req, struct iovec *iov, int count, + glusterd_volinfo_t *volinfo = NULL; + glusterd_shdsvc_t *shd = NULL; + glusterd_svc_t *svc = frame->cookie; +- glusterd_svc_t *parent_svc = NULL; +- glusterd_svc_proc_t *mux_proc = NULL; + glusterd_conf_t *conf = NULL; + int *flag = (int *)frame->local; + xlator_t *this = THIS; +- int pid = -1; + int ret = -1; + gf_getspec_rsp rsp = { + 0, +@@ -679,27 +676,7 @@ glusterd_svc_attach_cbk(struct rpc_req *req, struct iovec *iov, int count, + } + + if (rsp.op_ret == 0) { +- pthread_mutex_lock(&conf->attach_lock); +- { +- if (!strcmp(svc->name, "glustershd")) { +- mux_proc = svc->svc_proc; +- if (mux_proc && +- !gf_is_service_running(svc->proc.pidfile, &pid)) { +- /* +- * When svc's are restarting, there is a chance that the +- * attached svc might not have updated it's pid. Because +- * it was at connection stage. So in that case, we need +- * to retry the pid file copy. +- */ +- parent_svc = cds_list_entry(mux_proc->svcs.next, +- glusterd_svc_t, mux_svc); +- if (parent_svc) +- sys_link(parent_svc->proc.pidfile, svc->proc.pidfile); +- } +- } +- svc->online = _gf_true; +- } +- pthread_mutex_unlock(&conf->attach_lock); ++ svc->online = _gf_true; + gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SVC_ATTACH_FAIL, + "svc %s of volume %s attached successfully to pid %d", svc->name, + volinfo->volname, glusterd_proc_get_pid(&svc->proc)); +@@ -726,7 +703,7 @@ out: + + extern size_t + build_volfile_path(char *volume_id, char *path, size_t path_len, +- char *trusted_str); ++ char *trusted_str, dict_t *dict); + + int + __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags, +@@ -751,6 +728,7 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags, + ssize_t req_size = 0; + call_frame_t *frame = NULL; + gd1_mgmt_brick_op_req brick_req; ++ dict_t *dict = NULL; + void *req = &brick_req; + void *errlbl = &&err; + struct rpc_clnt_connection *conn; +@@ -776,6 +754,8 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags, + brick_req.name = volfile_id; + brick_req.input.input_val = NULL; + brick_req.input.input_len = 0; ++ brick_req.dict.dict_val = NULL; ++ brick_req.dict.dict_len = 0; + + frame = create_frame(this, this->ctx->pool); + if (!frame) { +@@ -783,7 +763,13 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags, + } + + if (op == GLUSTERD_SVC_ATTACH) { +- (void)build_volfile_path(volfile_id, path, sizeof(path), NULL); ++ dict = dict_new(); ++ if (!dict) { ++ ret = -ENOMEM; ++ goto *errlbl; ++ } ++ ++ (void)build_volfile_path(volfile_id, path, sizeof(path), NULL, dict); + + ret = sys_stat(path, &stbuf); + if (ret < 0) { +@@ -818,6 +804,18 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags, + ret = -EIO; + goto *errlbl; + } ++ if (dict->count > 0) { ++ ret = dict_allocate_and_serialize(dict, &brick_req.dict.dict_val, ++ &brick_req.dict.dict_len); ++ if (ret) { ++ gf_msg(this->name, GF_LOG_ERROR, 0, ++ GD_MSG_DICT_SERL_LENGTH_GET_FAIL, ++ "Failed to serialize dict " ++ "to request buffer"); ++ goto *errlbl; ++ } ++ dict->extra_free = brick_req.dict.dict_val; ++ } + + frame->cookie = svc; + frame->local = GF_CALLOC(1, sizeof(int), gf_gld_mt_int); +@@ -862,6 +860,8 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags, + GF_ATOMIC_INC(conf->blockers); + ret = rpc_clnt_submit(rpc, &gd_brick_prog, op, cbkfn, &iov, 1, NULL, 0, + iobref, frame, NULL, 0, NULL, 0, NULL); ++ if (dict) ++ dict_unref(dict); + GF_FREE(volfile_content); + if (spec_fd >= 0) + sys_close(spec_fd); +@@ -874,6 +874,9 @@ maybe_free_iobuf: + iobuf_unref(iobuf); + } + err: ++ if (dict) ++ dict_unref(dict); ++ + GF_FREE(volfile_content); + if (spec_fd >= 0) + sys_close(spec_fd); +diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c +index 618d8bc..a8098df 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c ++++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c +@@ -143,6 +143,8 @@ gd_brick_op_req_free(gd1_mgmt_brick_op_req *req) + if (!req) + return; + ++ if (req->dict.dict_val) ++ GF_FREE(req->dict.dict_val); + GF_FREE(req->input.input_val); + GF_FREE(req); + } +diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c +index 4c487d0..2eb5116 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-utils.c ++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c +@@ -5914,6 +5914,8 @@ send_attach_req(xlator_t *this, struct rpc_clnt *rpc, char *path, + brick_req.name = path; + brick_req.input.input_val = NULL; + brick_req.input.input_len = 0; ++ brick_req.dict.dict_val = NULL; ++ brick_req.dict.dict_len = 0; + + req_size = xdr_sizeof((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req); + iobuf = iobuf_get2(rpc->ctx->iobuf_pool, req_size); +@@ -5977,7 +5979,7 @@ err: + + extern size_t + build_volfile_path(char *volume_id, char *path, size_t path_len, +- char *trusted_str); ++ char *trusted_str, dict_t *dict); + + static int + attach_brick(xlator_t *this, glusterd_brickinfo_t *brickinfo, +@@ -6022,7 +6024,7 @@ attach_brick(xlator_t *this, glusterd_brickinfo_t *brickinfo, + goto out; + } + +- (void)build_volfile_path(full_id, path, sizeof(path), NULL); ++ (void)build_volfile_path(full_id, path, sizeof(path), NULL, NULL); + + for (tries = 15; tries > 0; --tries) { + rpc = rpc_clnt_ref(other_brick->rpc); +-- +1.8.3.1 + diff --git a/SOURCES/0228-locks-enable-notify-contention-by-default.patch b/SOURCES/0228-locks-enable-notify-contention-by-default.patch new file mode 100644 index 0000000..310cd8b --- /dev/null +++ b/SOURCES/0228-locks-enable-notify-contention-by-default.patch @@ -0,0 +1,39 @@ +From 21fe2ef700e76c8b7be40f21d3a4fb6b96eafaf0 Mon Sep 17 00:00:00 2001 +From: Xavi Hernandez +Date: Thu, 6 Jun 2019 08:12:34 +0200 +Subject: [PATCH 228/255] locks: enable notify-contention by default + +This patch enables the lock contention notification by default. + +Upstream patch: +> Change-Id: I10131b026a7cb09fc7c93e1e6c8549988c1d7751 +> Upstream patch link: https://review.gluster.org/c/glusterfs/+/22828 +> BUG: 1717754 +> Signed-off-by: Xavi Hernandez + +Change-Id: I10131b026a7cb09fc7c93e1e6c8549988c1d7751 +Fixes: bz#1720488 +Signed-off-by: Xavi Hernandez +Reviewed-on: https://code.engineering.redhat.com/gerrit/174655 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/features/locks/src/posix.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/xlators/features/locks/src/posix.c b/xlators/features/locks/src/posix.c +index adb0df5..9db5ac6 100644 +--- a/xlators/features/locks/src/posix.c ++++ b/xlators/features/locks/src/posix.c +@@ -4796,7 +4796,7 @@ struct volume_options options[] = { + "be used in conjunction w/ revocation-clear-all."}, + {.key = {"notify-contention"}, + .type = GF_OPTION_TYPE_BOOL, +- .default_value = "no", ++ .default_value = "yes", + .flags = OPT_FLAG_SETTABLE | OPT_FLAG_DOC, + .op_version = {GD_OP_VERSION_4_0_0}, + .tags = {"locks", "contention"}, +-- +1.8.3.1 + diff --git a/SOURCES/0229-glusterd-Show-the-correct-brick-status-in-get-state.patch b/SOURCES/0229-glusterd-Show-the-correct-brick-status-in-get-state.patch new file mode 100644 index 0000000..112c02e --- /dev/null +++ b/SOURCES/0229-glusterd-Show-the-correct-brick-status-in-get-state.patch @@ -0,0 +1,113 @@ +From 4fc0a77db5b9760fa5c00d3803c6d11a28a00b74 Mon Sep 17 00:00:00 2001 +From: Mohit Agrawal +Date: Wed, 3 Jul 2019 15:22:38 +0530 +Subject: [PATCH 229/255] glusterd: Show the correct brick status in get-state + +Problem: get-state does not show correct brick status if brick + status is not Started, it always shows started if any value + is set brickinfo->status + +Solution: Check the value of brickinfo->status to show correct status + in get-state + +> Change-Id: I12a79619024c2cf59f338220d144f2f034059b3b +> fixes: bz#1726906 +> (Cherry pick from commit af989db23d1db00e087f2b9d3dfc43b13ef17153) +> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/22983/) + +BUG: 1726991 +Change-Id: I12a79619024c2cf59f338220d144f2f034059b3b +Signed-off-by: Mohit Agrawal +Reviewed-on: https://code.engineering.redhat.com/gerrit/175355 +Tested-by: Mohit Agrawal +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + xlators/mgmt/glusterd/src/glusterd-handler.c | 7 +++++-- + xlators/mgmt/glusterd/src/glusterd-utils.c | 28 ++++++++++++++++++++++++++++ + xlators/mgmt/glusterd/src/glusterd-utils.h | 4 ++++ + 3 files changed, 37 insertions(+), 2 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c +index cc1f1df..94e1be5 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-handler.c ++++ b/xlators/mgmt/glusterd/src/glusterd-handler.c +@@ -5589,7 +5589,9 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict) + char vol_status_str[STATUS_STRLEN] = { + 0, + }; +- ++ char brick_status_str[STATUS_STRLEN] = { ++ 0, ++ }; + this = THIS; + GF_VALIDATE_OR_GOTO(THIS->name, this, out); + +@@ -5852,8 +5854,9 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict) + brickinfo->rdma_port); + fprintf(fp, "Volume%d.Brick%d.port_registered: %d\n", count_bkp, + count, brickinfo->port_registered); ++ glusterd_brick_get_status_str(brickinfo, brick_status_str); + fprintf(fp, "Volume%d.Brick%d.status: %s\n", count_bkp, count, +- brickinfo->status ? "Started" : "Stopped"); ++ brick_status_str); + + /*FIXME: This is a hacky way of figuring out whether a + * brick belongs to the hot or cold tier */ +diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c +index 2eb5116..3bdfd49 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-utils.c ++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c +@@ -13898,6 +13898,34 @@ out: + return ret; + } + ++void ++glusterd_brick_get_status_str(glusterd_brickinfo_t *brickinfo, char *status_str) ++{ ++ GF_VALIDATE_OR_GOTO(THIS->name, brickinfo, out); ++ GF_VALIDATE_OR_GOTO(THIS->name, status_str, out); ++ ++ switch (brickinfo->status) { ++ case GF_BRICK_STOPPED: ++ sprintf(status_str, "%s", "Stopped"); ++ break; ++ case GF_BRICK_STARTED: ++ sprintf(status_str, "%s", "Started"); ++ break; ++ case GF_BRICK_STARTING: ++ sprintf(status_str, "%s", "Starting"); ++ break; ++ case GF_BRICK_STOPPING: ++ sprintf(status_str, "%s", "Stopping"); ++ break; ++ default: ++ sprintf(status_str, "%s", "None"); ++ break; ++ } ++ ++out: ++ return; ++} ++ + int + glusterd_volume_get_transport_type_str(glusterd_volinfo_t *volinfo, + char *transport_type_str) +diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h +index 6ad8062..5c6a453 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-utils.h ++++ b/xlators/mgmt/glusterd/src/glusterd-utils.h +@@ -781,6 +781,10 @@ glusterd_volume_get_type_str(glusterd_volinfo_t *volinfo, char **vol_type_str); + int + glusterd_volume_get_status_str(glusterd_volinfo_t *volinfo, char *status_str); + ++void ++glusterd_brick_get_status_str(glusterd_brickinfo_t *brickinfo, ++ char *status_str); ++ + int + glusterd_volume_get_transport_type_str(glusterd_volinfo_t *volinfo, + char *transport_type_str); +-- +1.8.3.1 + diff --git a/SOURCES/0230-Revert-glusterd-svc-update-pid-of-mux-volumes-from-t.patch b/SOURCES/0230-Revert-glusterd-svc-update-pid-of-mux-volumes-from-t.patch new file mode 100644 index 0000000..a9847ed --- /dev/null +++ b/SOURCES/0230-Revert-glusterd-svc-update-pid-of-mux-volumes-from-t.patch @@ -0,0 +1,893 @@ +From 308fe0d81dbef9f84bb1ad8e7309e3ffc28d6394 Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:37:29 +0530 +Subject: [PATCH 230/255] Revert "glusterd/svc: update pid of mux volumes from + the shd process" + +This reverts commit b0815b8a84a07d17a1215c55afc38888ee9fc37c. +Label : DOWNSTREAM ONLY + +BUG: 1471742 +Change-Id: Iab11c686565e9a9c852f2b7c2d236fa1a348f96a +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175940 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + glusterfsd/src/gf_attach.c | 2 - + glusterfsd/src/glusterfsd-mgmt.c | 66 ++------- + libglusterfs/src/glusterfs/glusterfs.h | 2 +- + libglusterfs/src/glusterfs/libglusterfs-messages.h | 3 +- + libglusterfs/src/graph.c | 154 +-------------------- + rpc/xdr/src/glusterd1-xdr.x | 1 - + xlators/mgmt/glusterd/src/glusterd-handler.c | 2 - + xlators/mgmt/glusterd/src/glusterd-handshake.c | 42 +----- + xlators/mgmt/glusterd/src/glusterd-op-sm.c | 4 - + .../mgmt/glusterd/src/glusterd-shd-svc-helper.c | 25 ---- + .../mgmt/glusterd/src/glusterd-shd-svc-helper.h | 3 - + xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 8 +- + xlators/mgmt/glusterd/src/glusterd-svc-helper.c | 57 ++++---- + xlators/mgmt/glusterd/src/glusterd-syncop.c | 2 - + xlators/mgmt/glusterd/src/glusterd-utils.c | 6 +- + 15 files changed, 52 insertions(+), 325 deletions(-) + +diff --git a/glusterfsd/src/gf_attach.c b/glusterfsd/src/gf_attach.c +index 1bff854..6293b9b 100644 +--- a/glusterfsd/src/gf_attach.c ++++ b/glusterfsd/src/gf_attach.c +@@ -65,8 +65,6 @@ send_brick_req(xlator_t *this, struct rpc_clnt *rpc, char *path, int op) + brick_req.name = path; + brick_req.input.input_val = NULL; + brick_req.input.input_len = 0; +- brick_req.dict.dict_val = NULL; +- brick_req.dict.dict_len = 0; + + req_size = xdr_sizeof((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req); + iobuf = iobuf_get2(rpc->ctx->iobuf_pool, req_size); +diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c +index f930e0a..1d2cd1a 100644 +--- a/glusterfsd/src/glusterfsd-mgmt.c ++++ b/glusterfsd/src/glusterfsd-mgmt.c +@@ -50,16 +50,13 @@ int + emancipate(glusterfs_ctx_t *ctx, int ret); + int + glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp, +- char *volfile_id, char *checksum, +- dict_t *dict); ++ char *volfile_id, char *checksum); + int + glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx, +- gf_volfile_t *volfile_obj, char *checksum, +- dict_t *dict); ++ gf_volfile_t *volfile_obj, char *checksum); + int + glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp, +- char *volfile_id, char *checksum, +- dict_t *dict); ++ char *volfile_id, char *checksum); + int + glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj); + +@@ -78,8 +75,7 @@ mgmt_cbk_spec(struct rpc_clnt *rpc, void *mydata, void *data) + } + + int +-mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id, +- dict_t *dict) ++mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id) + { + glusterfs_ctx_t *ctx = NULL; + int ret = 0; +@@ -149,11 +145,11 @@ mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id, + * the volfile + */ + ret = glusterfs_process_svc_attach_volfp(ctx, tmpfp, volfile_id, +- sha256_hash, dict); ++ sha256_hash); + goto unlock; + } + ret = glusterfs_mux_volfile_reconfigure(tmpfp, ctx, volfile_obj, +- sha256_hash, dict); ++ sha256_hash); + if (ret < 0) { + gf_msg_debug("glusterfsd-mgmt", EINVAL, "Reconfigure failed !!"); + } +@@ -391,8 +387,6 @@ err: + UNLOCK(&ctx->volfile_lock); + if (xlator_req.input.input_val) + free(xlator_req.input.input_val); +- if (xlator_req.dict.dict_val) +- free(xlator_req.dict.dict_val); + free(xlator_req.name); + xlator_req.name = NULL; + return 0; +@@ -567,8 +561,6 @@ out: + + free(xlator_req.name); + free(xlator_req.input.input_val); +- if (xlator_req.dict.dict_val) +- free(xlator_req.dict.dict_val); + if (output) + dict_unref(output); + if (dict) +@@ -990,8 +982,6 @@ out: + if (input) + dict_unref(input); + free(xlator_req.input.input_val); /*malloced by xdr*/ +- if (xlator_req.dict.dict_val) +- free(xlator_req.dict.dict_val); + if (output) + dict_unref(output); + free(xlator_req.name); +@@ -1072,8 +1062,6 @@ glusterfs_handle_attach(rpcsvc_request_t *req) + out: + UNLOCK(&ctx->volfile_lock); + } +- if (xlator_req.dict.dict_val) +- free(xlator_req.dict.dict_val); + free(xlator_req.input.input_val); + free(xlator_req.name); + +@@ -1089,7 +1077,6 @@ glusterfs_handle_svc_attach(rpcsvc_request_t *req) + }; + xlator_t *this = NULL; + glusterfs_ctx_t *ctx = NULL; +- dict_t *dict = NULL; + + GF_ASSERT(req); + this = THIS; +@@ -1104,41 +1091,20 @@ glusterfs_handle_svc_attach(rpcsvc_request_t *req) + req->rpc_err = GARBAGE_ARGS; + goto out; + } +- + gf_msg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_41, + "received attach " + "request for volfile-id=%s", + xlator_req.name); +- +- dict = dict_new(); +- if (!dict) { +- ret = -1; +- errno = ENOMEM; +- goto out; +- } +- +- ret = dict_unserialize(xlator_req.dict.dict_val, xlator_req.dict.dict_len, +- &dict); +- if (ret) { +- gf_msg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42, +- "failed to unserialize xdata to dictionary"); +- goto out; +- } +- dict->extra_stdfree = xlator_req.dict.dict_val; +- + ret = 0; + + if (ctx->active) { + ret = mgmt_process_volfile(xlator_req.input.input_val, +- xlator_req.input.input_len, xlator_req.name, +- dict); ++ xlator_req.input.input_len, xlator_req.name); + } else { + gf_msg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42, + "got attach for %s but no active graph", xlator_req.name); + } + out: +- if (dict) +- dict_unref(dict); + if (xlator_req.input.input_val) + free(xlator_req.input.input_val); + if (xlator_req.name) +@@ -1275,8 +1241,6 @@ out: + GF_FREE(filepath); + if (xlator_req.input.input_val) + free(xlator_req.input.input_val); +- if (xlator_req.dict.dict_val) +- free(xlator_req.dict.dict_val); + + return ret; + } +@@ -1349,8 +1313,6 @@ out: + if (dict) + dict_unref(dict); + free(xlator_req.input.input_val); // malloced by xdr +- if (xlator_req.dict.dict_val) +- free(xlator_req.dict.dict_val); + if (output) + dict_unref(output); + free(xlator_req.name); // malloced by xdr +@@ -1499,8 +1461,6 @@ out: + if (output) + dict_unref(output); + free(brick_req.input.input_val); +- if (brick_req.dict.dict_val) +- free(brick_req.dict.dict_val); + free(brick_req.name); + GF_FREE(xname); + GF_FREE(msg); +@@ -1694,8 +1654,6 @@ out: + if (dict) + dict_unref(dict); + free(node_req.input.input_val); +- if (node_req.dict.dict_val) +- free(node_req.dict.dict_val); + GF_FREE(msg); + GF_FREE(rsp.output.output_val); + GF_FREE(node_name); +@@ -1799,8 +1757,6 @@ glusterfs_handle_nfs_profile(rpcsvc_request_t *req) + + out: + free(nfs_req.input.input_val); +- if (nfs_req.dict.dict_val) +- free(nfs_req.dict.dict_val); + if (dict) + dict_unref(dict); + if (output) +@@ -1879,8 +1835,6 @@ out: + if (dict) + dict_unref(dict); + free(xlator_req.input.input_val); // malloced by xdr +- if (xlator_req.dict.dict_val) +- free(xlator_req.dict.dict_val); + if (output) + dict_unref(output); + free(xlator_req.name); // malloced by xdr +@@ -2009,8 +1963,7 @@ out: + if (dict) + dict_unref(dict); + free(brick_req.input.input_val); +- if (brick_req.dict.dict_val) +- free(brick_req.dict.dict_val); ++ + gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; + } +@@ -2260,8 +2213,7 @@ volfile: + size = rsp.op_ret; + volfile_id = frame->local; + if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) { +- ret = mgmt_process_volfile((const char *)rsp.spec, size, volfile_id, +- dict); ++ ret = mgmt_process_volfile((const char *)rsp.spec, size, volfile_id); + goto post_graph_mgmt; + } + +diff --git a/libglusterfs/src/glusterfs/glusterfs.h b/libglusterfs/src/glusterfs/glusterfs.h +index b6327b8..9ec2365 100644 +--- a/libglusterfs/src/glusterfs/glusterfs.h ++++ b/libglusterfs/src/glusterfs/glusterfs.h +@@ -744,7 +744,7 @@ typedef struct { + char vol_id[NAME_MAX + 1]; + struct list_head volfile_list; + glusterfs_graph_t *graph; +- FILE *pidfp; ++ + } gf_volfile_t; + + glusterfs_ctx_t * +diff --git a/libglusterfs/src/glusterfs/libglusterfs-messages.h b/libglusterfs/src/glusterfs/libglusterfs-messages.h +index 7e0eebb..ea2aa60 100644 +--- a/libglusterfs/src/glusterfs/libglusterfs-messages.h ++++ b/libglusterfs/src/glusterfs/libglusterfs-messages.h +@@ -111,7 +111,6 @@ GLFS_MSGID( + LG_MSG_PTHREAD_NAMING_FAILED, LG_MSG_SYSCALL_RETURNS_WRONG, + LG_MSG_XXH64_TO_GFID_FAILED, LG_MSG_ASYNC_WARNING, LG_MSG_ASYNC_FAILURE, + LG_MSG_GRAPH_CLEANUP_FAILED, LG_MSG_GRAPH_SETUP_FAILED, +- LG_MSG_GRAPH_DETACH_STARTED, LG_MSG_GRAPH_ATTACH_FAILED, +- LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED); ++ LG_MSG_GRAPH_DETACH_STARTED, LG_MSG_GRAPH_ATTACH_FAILED); + + #endif /* !_LG_MESSAGES_H_ */ +diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c +index 05f76bf..172dc61 100644 +--- a/libglusterfs/src/graph.c ++++ b/libglusterfs/src/graph.c +@@ -1467,21 +1467,6 @@ out: + } + + int +-glusterfs_svc_mux_pidfile_cleanup(gf_volfile_t *volfile_obj) +-{ +- if (!volfile_obj || !volfile_obj->pidfp) +- return 0; +- +- gf_msg_trace("glusterfsd", 0, "pidfile %s cleanup", volfile_obj->vol_id); +- +- lockf(fileno(volfile_obj->pidfp), F_ULOCK, 0); +- fclose(volfile_obj->pidfp); +- volfile_obj->pidfp = NULL; +- +- return 0; +-} +- +-int + glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj) + { + xlator_t *last_xl = NULL; +@@ -1517,7 +1502,6 @@ glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj) + + list_del_init(&volfile_obj->volfile_list); + glusterfs_mux_xlator_unlink(parent_graph->top, xl); +- glusterfs_svc_mux_pidfile_cleanup(volfile_obj); + parent_graph->last_xl = glusterfs_get_last_xlator(parent_graph); + parent_graph->xl_count -= graph->xl_count; + parent_graph->leaf_count -= graph->leaf_count; +@@ -1547,126 +1531,8 @@ out: + } + + int +-glusterfs_svc_mux_pidfile_setup(gf_volfile_t *volfile_obj, const char *pid_file) +-{ +- int ret = -1; +- FILE *pidfp = NULL; +- +- if (!pid_file || !volfile_obj) +- goto out; +- +- if (volfile_obj->pidfp) { +- ret = 0; +- goto out; +- } +- pidfp = fopen(pid_file, "a+"); +- if (!pidfp) { +- goto out; +- } +- volfile_obj->pidfp = pidfp; +- +- ret = lockf(fileno(pidfp), F_TLOCK, 0); +- if (ret) { +- ret = 0; +- goto out; +- } +-out: +- return ret; +-} +- +-int +-glusterfs_svc_mux_pidfile_update(gf_volfile_t *volfile_obj, +- const char *pid_file, pid_t pid) +-{ +- int ret = 0; +- FILE *pidfp = NULL; +- int old_pid; +- +- if (!volfile_obj->pidfp) { +- ret = glusterfs_svc_mux_pidfile_setup(volfile_obj, pid_file); +- if (ret == -1) +- goto out; +- } +- pidfp = volfile_obj->pidfp; +- ret = fscanf(pidfp, "%d", &old_pid); +- if (ret <= 0) { +- goto update; +- } +- if (old_pid == pid) { +- ret = 0; +- goto out; +- } else { +- gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, +- "Old pid=%d found in pidfile %s. Cleaning the old pid and " +- "Updating new pid=%d", +- old_pid, pid_file, pid); +- } +-update: +- ret = sys_ftruncate(fileno(pidfp), 0); +- if (ret) { +- gf_msg("glusterfsd", GF_LOG_ERROR, errno, +- LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, +- "pidfile %s truncation failed", pid_file); +- goto out; +- } +- +- ret = fprintf(pidfp, "%d\n", pid); +- if (ret <= 0) { +- gf_msg("glusterfsd", GF_LOG_ERROR, errno, +- LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, "pidfile %s write failed", +- pid_file); +- goto out; +- } +- +- ret = fflush(pidfp); +- if (ret) { +- gf_msg("glusterfsd", GF_LOG_ERROR, errno, +- LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, "pidfile %s write failed", +- pid_file); +- goto out; +- } +-out: +- return ret; +-} +- +-int +-glusterfs_update_mux_pid(dict_t *dict, gf_volfile_t *volfile_obj) +-{ +- char *file = NULL; +- int ret = -1; +- +- GF_VALIDATE_OR_GOTO("graph", dict, out); +- GF_VALIDATE_OR_GOTO("graph", volfile_obj, out); +- +- ret = dict_get_str(dict, "pidfile", &file); +- if (ret < 0) { +- gf_msg("mgmt", GF_LOG_ERROR, EINVAL, LG_MSG_GRAPH_SETUP_FAILED, +- "Failed to get pidfile from dict for volfile_id=%s", +- volfile_obj->vol_id); +- } +- +- ret = glusterfs_svc_mux_pidfile_update(volfile_obj, file, getpid()); +- if (ret < 0) { +- ret = -1; +- gf_msg("mgmt", GF_LOG_ERROR, EINVAL, LG_MSG_GRAPH_SETUP_FAILED, +- "Failed to update " +- "the pidfile for volfile_id=%s", +- volfile_obj->vol_id); +- +- goto out; +- } +- +- if (ret == 1) +- gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, +- "PID %d updated in pidfile=%s", getpid(), file); +- ret = 0; +-out: +- return ret; +-} +-int + glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp, +- char *volfile_id, char *checksum, +- dict_t *dict) ++ char *volfile_id, char *checksum) + { + glusterfs_graph_t *graph = NULL; + glusterfs_graph_t *parent_graph = NULL; +@@ -1749,25 +1615,18 @@ glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp, + ret = -1; + goto out; + } +- volfile_obj->pidfp = NULL; +- snprintf(volfile_obj->vol_id, sizeof(volfile_obj->vol_id), "%s", +- volfile_id); +- +- if (strcmp(ctx->cmd_args.process_name, "glustershd") == 0) { +- ret = glusterfs_update_mux_pid(dict, volfile_obj); +- if (ret == -1) { +- goto out; +- } +- } + + graph->used = 1; + parent_graph->id++; + list_add(&graph->list, &ctx->graphs); + INIT_LIST_HEAD(&volfile_obj->volfile_list); + volfile_obj->graph = graph; ++ snprintf(volfile_obj->vol_id, sizeof(volfile_obj->vol_id), "%s", ++ volfile_id); + memcpy(volfile_obj->volfile_checksum, checksum, + sizeof(volfile_obj->volfile_checksum)); + list_add_tail(&volfile_obj->volfile_list, &ctx->volfile_list); ++ + gf_log_dump_graph(fp, graph); + graph = NULL; + +@@ -1795,8 +1654,7 @@ out: + + int + glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx, +- gf_volfile_t *volfile_obj, char *checksum, +- dict_t *dict) ++ gf_volfile_t *volfile_obj, char *checksum) + { + glusterfs_graph_t *oldvolfile_graph = NULL; + glusterfs_graph_t *newvolfile_graph = NULL; +@@ -1845,7 +1703,7 @@ glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx, + } + volfile_obj = NULL; + ret = glusterfs_process_svc_attach_volfp(ctx, newvolfile_fp, vol_id, +- checksum, dict); ++ checksum); + goto out; + } + +diff --git a/rpc/xdr/src/glusterd1-xdr.x b/rpc/xdr/src/glusterd1-xdr.x +index 02ebec2..9b36d34 100644 +--- a/rpc/xdr/src/glusterd1-xdr.x ++++ b/rpc/xdr/src/glusterd1-xdr.x +@@ -132,7 +132,6 @@ struct gd1_mgmt_brick_op_req { + string name<>; + int op; + opaque input<>; +- opaque dict<>; + } ; + + struct gd1_mgmt_brick_op_rsp { +diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c +index 94e1be5..ac788a0 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-handler.c ++++ b/xlators/mgmt/glusterd/src/glusterd-handler.c +@@ -5423,8 +5423,6 @@ glusterd_print_client_details(FILE *fp, dict_t *dict, + + brick_req->op = GLUSTERD_BRICK_STATUS; + brick_req->name = ""; +- brick_req->dict.dict_val = NULL; +- brick_req->dict.dict_len = 0; + + ret = dict_set_strn(dict, "brick-name", SLEN("brick-name"), + brickinfo->path); +diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c +index 86dec82..1ba58c3 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c ++++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c +@@ -203,7 +203,7 @@ out: + + size_t + build_volfile_path(char *volume_id, char *path, size_t path_len, +- char *trusted_str, dict_t *dict) ++ char *trusted_str) + { + struct stat stbuf = { + 0, +@@ -340,19 +340,11 @@ build_volfile_path(char *volume_id, char *path, size_t path_len, + + ret = glusterd_volinfo_find(volid_ptr, &volinfo); + if (ret == -1) { +- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, +- "Couldn't find volinfo for volid=%s", volid_ptr); ++ gf_log(this->name, GF_LOG_ERROR, "Couldn't find volinfo"); + goto out; + } + + glusterd_svc_build_shd_volfile_path(volinfo, path, path_len); +- +- ret = glusterd_svc_set_shd_pidfile(volinfo, dict); +- if (ret == -1) { +- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, +- "Couldn't set pidfile in dict for volid=%s", volid_ptr); +- goto out; +- } + ret = 0; + goto out; + } +@@ -927,7 +919,6 @@ __server_getspec(rpcsvc_request_t *req) + char addrstr[RPCSVC_PEER_STRLEN] = {0}; + peer_info_t *peerinfo = NULL; + xlator_t *this = NULL; +- dict_t *dict = NULL; + + this = THIS; + GF_ASSERT(this); +@@ -980,12 +971,6 @@ __server_getspec(rpcsvc_request_t *req) + goto fail; + } + +- dict = dict_new(); +- if (!dict) { +- ret = -ENOMEM; +- goto fail; +- } +- + trans = req->trans; + /* addrstr will be empty for cli socket connections */ + ret = rpcsvc_transport_peername(trans, (char *)&addrstr, sizeof(addrstr)); +@@ -1004,26 +989,12 @@ __server_getspec(rpcsvc_request_t *req) + */ + if (strlen(addrstr) == 0 || gf_is_local_addr(addrstr)) { + ret = build_volfile_path(volume, filename, sizeof(filename), +- TRUSTED_PREFIX, dict); ++ TRUSTED_PREFIX); + } else { +- ret = build_volfile_path(volume, filename, sizeof(filename), NULL, +- dict); ++ ret = build_volfile_path(volume, filename, sizeof(filename), NULL); + } + + if (ret == 0) { +- if (dict->count > 0) { +- ret = dict_allocate_and_serialize(dict, &rsp.xdata.xdata_val, +- &rsp.xdata.xdata_len); +- if (ret) { +- gf_msg(this->name, GF_LOG_ERROR, 0, +- GD_MSG_DICT_SERL_LENGTH_GET_FAIL, +- "Failed to serialize dict " +- "to request buffer"); +- goto fail; +- } +- dict->extra_free = rsp.xdata.xdata_val; +- } +- + /* to allocate the proper buffer to hold the file data */ + ret = sys_stat(filename, &stbuf); + if (ret < 0) { +@@ -1065,6 +1036,7 @@ __server_getspec(rpcsvc_request_t *req) + goto fail; + } + } ++ + /* convert to XDR */ + fail: + if (spec_fd >= 0) +@@ -1084,10 +1056,6 @@ fail: + (xdrproc_t)xdr_gf_getspec_rsp); + free(args.key); // malloced by xdr + free(rsp.spec); +- +- if (dict) +- dict_unref(dict); +- + if (args.xdata.xdata_val) + free(args.xdata.xdata_val); + +diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +index 454877b..9ea695e 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c ++++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +@@ -655,8 +655,6 @@ glusterd_brick_op_build_payload(glusterd_op_t op, + break; + } + +- brick_req->dict.dict_len = 0; +- brick_req->dict.dict_val = NULL; + ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val, + &brick_req->input.input_len); + if (ret) +@@ -725,8 +723,6 @@ glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req, + goto out; + } + +- brick_req->dict.dict_len = 0; +- brick_req->dict.dict_val = NULL; + ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val, + &brick_req->input.input_len); + +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c +index 5661e39..57ceda9 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c +@@ -126,28 +126,3 @@ glusterd_shd_svcproc_cleanup(glusterd_shdsvc_t *shd) + out: + return; + } +- +-int +-glusterd_svc_set_shd_pidfile(glusterd_volinfo_t *volinfo, dict_t *dict) +-{ +- int ret = -1; +- glusterd_svc_t *svc = NULL; +- xlator_t *this = NULL; +- +- this = THIS; +- GF_VALIDATE_OR_GOTO("glusterd", this, out); +- GF_VALIDATE_OR_GOTO(this->name, volinfo, out); +- GF_VALIDATE_OR_GOTO(this->name, dict, out); +- +- svc = &(volinfo->shd.svc); +- +- ret = dict_set_dynstr_with_alloc(dict, "pidfile", svc->proc.pidfile); +- if (ret) { +- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, +- "Failed to set pidfile %s in dict", svc->proc.pidfile); +- goto out; +- } +- ret = 0; +-out: +- return ret; +-} +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h +index 1f0984b..59466ec 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h +@@ -36,7 +36,4 @@ glusterd_recover_shd_attach_failure(glusterd_volinfo_t *volinfo, + int + glusterd_shdsvc_create_volfile(glusterd_volinfo_t *volinfo); + +-int +-glusterd_svc_set_shd_pidfile(glusterd_volinfo_t *volinfo, dict_t *dict); +- + #endif +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +index 590169f..8ad90a9 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +@@ -258,20 +258,14 @@ glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags) + gf_boolean_t shd_restart = _gf_false; + + conf = THIS->private; ++ volinfo = data; + GF_VALIDATE_OR_GOTO("glusterd", conf, out); + GF_VALIDATE_OR_GOTO("glusterd", svc, out); +- volinfo = data; + GF_VALIDATE_OR_GOTO("glusterd", volinfo, out); + + if (volinfo) + glusterd_volinfo_ref(volinfo); + +- if (volinfo->is_snap_volume) { +- /* healing of a snap volume is not supported yet*/ +- ret = 0; +- goto out; +- } +- + while (conf->restart_shd) { + synclock_unlock(&conf->big_lock); + sleep(2); +diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c +index e106111..400826f 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c ++++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c +@@ -519,7 +519,7 @@ glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc) + /* Take first entry from the process */ + parent_svc = cds_list_entry(mux_proc->svcs.next, glusterd_svc_t, + mux_svc); +- glusterd_copy_file(parent_svc->proc.pidfile, svc->proc.pidfile); ++ sys_link(parent_svc->proc.pidfile, svc->proc.pidfile); + mux_conn = &parent_svc->conn; + if (volinfo) + volinfo->shd.attached = _gf_true; +@@ -623,9 +623,12 @@ glusterd_svc_attach_cbk(struct rpc_req *req, struct iovec *iov, int count, + glusterd_volinfo_t *volinfo = NULL; + glusterd_shdsvc_t *shd = NULL; + glusterd_svc_t *svc = frame->cookie; ++ glusterd_svc_t *parent_svc = NULL; ++ glusterd_svc_proc_t *mux_proc = NULL; + glusterd_conf_t *conf = NULL; + int *flag = (int *)frame->local; + xlator_t *this = THIS; ++ int pid = -1; + int ret = -1; + gf_getspec_rsp rsp = { + 0, +@@ -676,7 +679,27 @@ glusterd_svc_attach_cbk(struct rpc_req *req, struct iovec *iov, int count, + } + + if (rsp.op_ret == 0) { +- svc->online = _gf_true; ++ pthread_mutex_lock(&conf->attach_lock); ++ { ++ if (!strcmp(svc->name, "glustershd")) { ++ mux_proc = svc->svc_proc; ++ if (mux_proc && ++ !gf_is_service_running(svc->proc.pidfile, &pid)) { ++ /* ++ * When svc's are restarting, there is a chance that the ++ * attached svc might not have updated it's pid. Because ++ * it was at connection stage. So in that case, we need ++ * to retry the pid file copy. ++ */ ++ parent_svc = cds_list_entry(mux_proc->svcs.next, ++ glusterd_svc_t, mux_svc); ++ if (parent_svc) ++ sys_link(parent_svc->proc.pidfile, svc->proc.pidfile); ++ } ++ } ++ svc->online = _gf_true; ++ } ++ pthread_mutex_unlock(&conf->attach_lock); + gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SVC_ATTACH_FAIL, + "svc %s of volume %s attached successfully to pid %d", svc->name, + volinfo->volname, glusterd_proc_get_pid(&svc->proc)); +@@ -703,7 +726,7 @@ out: + + extern size_t + build_volfile_path(char *volume_id, char *path, size_t path_len, +- char *trusted_str, dict_t *dict); ++ char *trusted_str); + + int + __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags, +@@ -728,7 +751,6 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags, + ssize_t req_size = 0; + call_frame_t *frame = NULL; + gd1_mgmt_brick_op_req brick_req; +- dict_t *dict = NULL; + void *req = &brick_req; + void *errlbl = &&err; + struct rpc_clnt_connection *conn; +@@ -754,8 +776,6 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags, + brick_req.name = volfile_id; + brick_req.input.input_val = NULL; + brick_req.input.input_len = 0; +- brick_req.dict.dict_val = NULL; +- brick_req.dict.dict_len = 0; + + frame = create_frame(this, this->ctx->pool); + if (!frame) { +@@ -763,13 +783,7 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags, + } + + if (op == GLUSTERD_SVC_ATTACH) { +- dict = dict_new(); +- if (!dict) { +- ret = -ENOMEM; +- goto *errlbl; +- } +- +- (void)build_volfile_path(volfile_id, path, sizeof(path), NULL, dict); ++ (void)build_volfile_path(volfile_id, path, sizeof(path), NULL); + + ret = sys_stat(path, &stbuf); + if (ret < 0) { +@@ -804,18 +818,6 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags, + ret = -EIO; + goto *errlbl; + } +- if (dict->count > 0) { +- ret = dict_allocate_and_serialize(dict, &brick_req.dict.dict_val, +- &brick_req.dict.dict_len); +- if (ret) { +- gf_msg(this->name, GF_LOG_ERROR, 0, +- GD_MSG_DICT_SERL_LENGTH_GET_FAIL, +- "Failed to serialize dict " +- "to request buffer"); +- goto *errlbl; +- } +- dict->extra_free = brick_req.dict.dict_val; +- } + + frame->cookie = svc; + frame->local = GF_CALLOC(1, sizeof(int), gf_gld_mt_int); +@@ -860,8 +862,6 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags, + GF_ATOMIC_INC(conf->blockers); + ret = rpc_clnt_submit(rpc, &gd_brick_prog, op, cbkfn, &iov, 1, NULL, 0, + iobref, frame, NULL, 0, NULL, 0, NULL); +- if (dict) +- dict_unref(dict); + GF_FREE(volfile_content); + if (spec_fd >= 0) + sys_close(spec_fd); +@@ -874,9 +874,6 @@ maybe_free_iobuf: + iobuf_unref(iobuf); + } + err: +- if (dict) +- dict_unref(dict); +- + GF_FREE(volfile_content); + if (spec_fd >= 0) + sys_close(spec_fd); +diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c +index a8098df..618d8bc 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c ++++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c +@@ -143,8 +143,6 @@ gd_brick_op_req_free(gd1_mgmt_brick_op_req *req) + if (!req) + return; + +- if (req->dict.dict_val) +- GF_FREE(req->dict.dict_val); + GF_FREE(req->input.input_val); + GF_FREE(req); + } +diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c +index 3bdfd49..4525ec7 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-utils.c ++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c +@@ -5914,8 +5914,6 @@ send_attach_req(xlator_t *this, struct rpc_clnt *rpc, char *path, + brick_req.name = path; + brick_req.input.input_val = NULL; + brick_req.input.input_len = 0; +- brick_req.dict.dict_val = NULL; +- brick_req.dict.dict_len = 0; + + req_size = xdr_sizeof((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req); + iobuf = iobuf_get2(rpc->ctx->iobuf_pool, req_size); +@@ -5979,7 +5977,7 @@ err: + + extern size_t + build_volfile_path(char *volume_id, char *path, size_t path_len, +- char *trusted_str, dict_t *dict); ++ char *trusted_str); + + static int + attach_brick(xlator_t *this, glusterd_brickinfo_t *brickinfo, +@@ -6024,7 +6022,7 @@ attach_brick(xlator_t *this, glusterd_brickinfo_t *brickinfo, + goto out; + } + +- (void)build_volfile_path(full_id, path, sizeof(path), NULL, NULL); ++ (void)build_volfile_path(full_id, path, sizeof(path), NULL); + + for (tries = 15; tries > 0; --tries) { + rpc = rpc_clnt_ref(other_brick->rpc); +-- +1.8.3.1 + diff --git a/SOURCES/0231-Revert-graph-shd-Use-top-down-approach-while-cleanin.patch b/SOURCES/0231-Revert-graph-shd-Use-top-down-approach-while-cleanin.patch new file mode 100644 index 0000000..3b794c9 --- /dev/null +++ b/SOURCES/0231-Revert-graph-shd-Use-top-down-approach-while-cleanin.patch @@ -0,0 +1,180 @@ +From 21f376939f03f91214218c485e7d3a2848dae4b2 Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:43:44 +0530 +Subject: [PATCH 231/255] Revert "graph/shd: Use top down approach while + cleaning xlator" + +This reverts commit b963fa8bb71963127147d33bf609f439dd5bd107. + +Label : DOWNSTREAM ONLY + +BUG: 1471742 +Change-Id: Ifb8056395c5988cf7c484891bea052f5415bf9da +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175941 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + libglusterfs/src/graph.c | 10 +--------- + xlators/features/bit-rot/src/stub/bit-rot-stub.c | 1 - + xlators/features/changelog/src/changelog.c | 1 - + xlators/features/cloudsync/src/cloudsync.c | 4 +--- + xlators/features/index/src/index.c | 1 - + xlators/features/quiesce/src/quiesce.c | 1 - + xlators/features/read-only/src/worm.c | 1 - + xlators/features/sdfs/src/sdfs.c | 1 - + xlators/features/selinux/src/selinux.c | 2 -- + xlators/features/trash/src/trash.c | 1 - + 10 files changed, 2 insertions(+), 21 deletions(-) + +diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c +index 172dc61..5b95fd6 100644 +--- a/libglusterfs/src/graph.c ++++ b/libglusterfs/src/graph.c +@@ -1193,14 +1193,6 @@ glusterfs_graph_fini(glusterfs_graph_t *graph) + if (trav->init_succeeded) { + trav->cleanup_starting = 1; + trav->fini(trav); +- if (trav->local_pool) { +- mem_pool_destroy(trav->local_pool); +- trav->local_pool = NULL; +- } +- if (trav->itable) { +- inode_table_destroy(trav->itable); +- trav->itable = NULL; +- } + trav->init_succeeded = 0; + } + trav = trav->next; +@@ -1402,7 +1394,7 @@ glusterfs_graph_cleanup(void *arg) + + pthread_mutex_lock(&ctx->cleanup_lock); + { +- glusterfs_graph_fini(graph); ++ glusterfs_graph_deactivate(graph); + glusterfs_graph_destroy(graph); + } + pthread_mutex_unlock(&ctx->cleanup_lock); +diff --git a/xlators/features/bit-rot/src/stub/bit-rot-stub.c b/xlators/features/bit-rot/src/stub/bit-rot-stub.c +index 03446be..3f48a4b 100644 +--- a/xlators/features/bit-rot/src/stub/bit-rot-stub.c ++++ b/xlators/features/bit-rot/src/stub/bit-rot-stub.c +@@ -185,7 +185,6 @@ cleanup_lock: + pthread_mutex_destroy(&priv->lock); + free_mempool: + mem_pool_destroy(priv->local_pool); +- priv->local_pool = NULL; + free_priv: + GF_FREE(priv); + this->private = NULL; +diff --git a/xlators/features/changelog/src/changelog.c b/xlators/features/changelog/src/changelog.c +index 2862d1e..d9025f3 100644 +--- a/xlators/features/changelog/src/changelog.c ++++ b/xlators/features/changelog/src/changelog.c +@@ -2790,7 +2790,6 @@ cleanup_options: + changelog_freeup_options(this, priv); + cleanup_mempool: + mem_pool_destroy(this->local_pool); +- this->local_pool = NULL; + cleanup_priv: + GF_FREE(priv); + error_return: +diff --git a/xlators/features/cloudsync/src/cloudsync.c b/xlators/features/cloudsync/src/cloudsync.c +index 0ad987e..26e512c 100644 +--- a/xlators/features/cloudsync/src/cloudsync.c ++++ b/xlators/features/cloudsync/src/cloudsync.c +@@ -200,10 +200,8 @@ cs_init(xlator_t *this) + + out: + if (ret == -1) { +- if (this->local_pool) { ++ if (this->local_pool) + mem_pool_destroy(this->local_pool); +- this->local_pool = NULL; +- } + + cs_cleanup_private(priv); + +diff --git a/xlators/features/index/src/index.c b/xlators/features/index/src/index.c +index 4ece7ff..2f2a6d0 100644 +--- a/xlators/features/index/src/index.c ++++ b/xlators/features/index/src/index.c +@@ -2478,7 +2478,6 @@ out: + GF_FREE(priv); + this->private = NULL; + mem_pool_destroy(this->local_pool); +- this->local_pool = NULL; + } + + if (attr_inited) +diff --git a/xlators/features/quiesce/src/quiesce.c b/xlators/features/quiesce/src/quiesce.c +index 06f58c9..bfd1116 100644 +--- a/xlators/features/quiesce/src/quiesce.c ++++ b/xlators/features/quiesce/src/quiesce.c +@@ -2536,7 +2536,6 @@ fini(xlator_t *this) + this->private = NULL; + + mem_pool_destroy(priv->local_pool); +- priv->local_pool = NULL; + LOCK_DESTROY(&priv->lock); + GF_FREE(priv); + out: +diff --git a/xlators/features/read-only/src/worm.c b/xlators/features/read-only/src/worm.c +index 7d13180..24196f8 100644 +--- a/xlators/features/read-only/src/worm.c ++++ b/xlators/features/read-only/src/worm.c +@@ -569,7 +569,6 @@ fini(xlator_t *this) + mem_put(priv); + this->private = NULL; + mem_pool_destroy(this->local_pool); +- this->local_pool = NULL; + out: + return; + } +diff --git a/xlators/features/sdfs/src/sdfs.c b/xlators/features/sdfs/src/sdfs.c +index 164c632..f0247fd 100644 +--- a/xlators/features/sdfs/src/sdfs.c ++++ b/xlators/features/sdfs/src/sdfs.c +@@ -1429,7 +1429,6 @@ void + fini(xlator_t *this) + { + mem_pool_destroy(this->local_pool); +- this->local_pool = NULL; + return; + } + +diff --git a/xlators/features/selinux/src/selinux.c b/xlators/features/selinux/src/selinux.c +index ce5fc90..58b4c5d 100644 +--- a/xlators/features/selinux/src/selinux.c ++++ b/xlators/features/selinux/src/selinux.c +@@ -256,7 +256,6 @@ out: + GF_FREE(priv); + } + mem_pool_destroy(this->local_pool); +- this->local_pool = NULL; + } + return ret; + } +@@ -285,7 +284,6 @@ fini(xlator_t *this) + GF_FREE(priv); + + mem_pool_destroy(this->local_pool); +- this->local_pool = NULL; + + return; + } +diff --git a/xlators/features/trash/src/trash.c b/xlators/features/trash/src/trash.c +index eb5007b..d668436 100644 +--- a/xlators/features/trash/src/trash.c ++++ b/xlators/features/trash/src/trash.c +@@ -2523,7 +2523,6 @@ out: + GF_FREE(priv); + } + mem_pool_destroy(this->local_pool); +- this->local_pool = NULL; + } + return ret; + } +-- +1.8.3.1 + diff --git a/SOURCES/0232-cluster-afr-Fix-incorrect-reporting-of-gfid-type-mis.patch b/SOURCES/0232-cluster-afr-Fix-incorrect-reporting-of-gfid-type-mis.patch new file mode 100644 index 0000000..b2a8f4c --- /dev/null +++ b/SOURCES/0232-cluster-afr-Fix-incorrect-reporting-of-gfid-type-mis.patch @@ -0,0 +1,228 @@ +From 3ddf12d0710e048878fcf8786d05efe18710c74c Mon Sep 17 00:00:00 2001 +From: karthik-us +Date: Fri, 12 Jul 2019 16:44:20 +0530 +Subject: [PATCH 232/255] cluster/afr: Fix incorrect reporting of gfid & type + mismatch + +Backport of: https://review.gluster.org/#/c/glusterfs/+/22908/ + +Problems: +1. When checking for type and gfid mismatch, if the type or gfid +is unknown because of missing gfid handle and the gfid xattr +it will be reported as type or gfid mismatch and the heal will +not complete. + +2. If the source selected during entry heal has null gfid the same +will be sent to afr_lookup_and_heal_gfid(). In this function when +we try to assign the gfid on the bricks where it does not exist, +we are considering the same gfid and try to assign that on those +bricks. This will fail in posix_gfid_set() since the gfid sent +is null. + +Fix: +If the gfid sent to afr_lookup_and_heal_gfid() is null choose a +valid gfid before proceeding to assign the gfid on the bricks +where it is missing. + +In afr_selfheal_detect_gfid_and_type_mismatch(), do not report +type/gfid mismatch if the type/gfid is unknown or not set. + +Change-Id: Icdb4967c09a48e0a3a64ce4948d5fb0a06d7a7af +fixes: bz#1715447 +Signed-off-by: karthik-us +Reviewed-on: https://code.engineering.redhat.com/gerrit/175966 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + .../bug-1722507-type-mismatch-error-handling.t | 116 +++++++++++++++++++++ + xlators/cluster/afr/src/afr-self-heal-common.c | 12 ++- + xlators/cluster/afr/src/afr-self-heal-entry.c | 13 +++ + 3 files changed, 139 insertions(+), 2 deletions(-) + create mode 100644 tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t + +diff --git a/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t b/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t +new file mode 100644 +index 0000000..0aeaaaf +--- /dev/null ++++ b/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t +@@ -0,0 +1,116 @@ ++#!/bin/bash ++ ++. $(dirname $0)/../../include.rc ++. $(dirname $0)/../../volume.rc ++. $(dirname $0)/../../afr.rc ++ ++cleanup; ++ ++## Start and create a volume ++TEST glusterd; ++TEST pidof glusterd; ++TEST $CLI volume info; ++ ++TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}; ++TEST $CLI volume start $V0; ++TEST $CLI volume set $V0 cluster.heal-timeout 5 ++TEST $CLI volume heal $V0 disable ++EXPECT 'Started' volinfo_field $V0 'Status'; ++TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0 ++ ++TEST mkdir $M0/dir ++ ++########################################################################################## ++# GFID link file and the GFID is missing on one brick and all the bricks are being blamed. ++ ++TEST touch $M0/dir/file ++#TEST kill_brick $V0 $H0 $B0/$V0"1" ++ ++#B0 and B2 must blame B1 ++setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir ++setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir ++setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir ++ ++# Add entry to xattrop dir to trigger index heal. ++xattrop_dir0=$(afr_get_index_path $B0/$V0"0") ++base_entry_b0=`ls $xattrop_dir0` ++gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/)) ++ln -s $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str ++EXPECT "^1$" get_pending_heal_count $V0 ++ ++# Remove the gfid xattr and the link file on one brick. ++gfid_file=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file) ++gfid_str_file=$(gf_gfid_xattr_to_str $gfid_file) ++TEST setfattr -x trusted.gfid $B0/${V0}0/dir/file ++TEST rm -f $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file ++ ++# Launch heal ++TEST $CLI volume heal $V0 enable ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0 ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1 ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2 ++ ++# Wait for 2 second to force posix to consider that this is a valid file but ++# without gfid. ++sleep 2 ++TEST $CLI volume heal $V0 ++ ++# Heal should not fail as the file is missing gfid xattr and the link file, ++# which is not actually the gfid or type mismatch. ++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 ++ ++EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}0/dir/file ++TEST stat $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file ++rm -f $M0/dir/file ++ ++ ++########################################################################################### ++# GFID link file and the GFID is missing on two bricks and all the bricks are being blamed. ++ ++TEST $CLI volume heal $V0 disable ++TEST touch $M0/dir/file ++#TEST kill_brick $V0 $H0 $B0/$V0"1" ++ ++#B0 and B2 must blame B1 ++setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir ++setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir ++setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir ++ ++# Add entry to xattrop dir to trigger index heal. ++xattrop_dir0=$(afr_get_index_path $B0/$V0"0") ++base_entry_b0=`ls $xattrop_dir0` ++gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/)) ++ln -s $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str ++EXPECT "^1$" get_pending_heal_count $V0 ++ ++# Remove the gfid xattr and the link file on two bricks. ++gfid_file=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file) ++gfid_str_file=$(gf_gfid_xattr_to_str $gfid_file) ++TEST setfattr -x trusted.gfid $B0/${V0}0/dir/file ++TEST rm -f $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file ++TEST setfattr -x trusted.gfid $B0/${V0}1/dir/file ++TEST rm -f $B0/${V0}1/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file ++ ++# Launch heal ++TEST $CLI volume heal $V0 enable ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0 ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1 ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2 ++ ++# Wait for 2 second to force posix to consider that this is a valid file but ++# without gfid. ++sleep 2 ++TEST $CLI volume heal $V0 ++ ++# Heal should not fail as the file is missing gfid xattr and the link file, ++# which is not actually the gfid or type mismatch. ++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 ++ ++EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}0/dir/file ++TEST stat $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file ++EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}1/dir/file ++TEST stat $B0/${V0}1/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file ++ ++cleanup +diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c +index 5157e7d..b38085a 100644 +--- a/xlators/cluster/afr/src/afr-self-heal-common.c ++++ b/xlators/cluster/afr/src/afr-self-heal-common.c +@@ -55,7 +55,8 @@ afr_lookup_and_heal_gfid(xlator_t *this, inode_t *parent, const char *name, + for (i = 0; i < priv->child_count; i++) { + if (source == -1) { + /* case (a) above. */ +- if (replies[i].valid && replies[i].op_ret == 0) { ++ if (replies[i].valid && replies[i].op_ret == 0 && ++ replies[i].poststat.ia_type != IA_INVAL) { + ia_type = replies[i].poststat.ia_type; + break; + } +@@ -63,7 +64,8 @@ afr_lookup_and_heal_gfid(xlator_t *this, inode_t *parent, const char *name, + /* case (b) above. */ + if (i == source) + continue; +- if (sources[i] && replies[i].valid && replies[i].op_ret == 0) { ++ if (sources[i] && replies[i].valid && replies[i].op_ret == 0 && ++ replies[i].poststat.ia_type != IA_INVAL) { + ia_type = replies[i].poststat.ia_type; + break; + } +@@ -77,6 +79,12 @@ heal: + for (i = 0; i < priv->child_count; i++) { + if (!replies[i].valid || replies[i].op_ret != 0) + continue; ++ ++ if (gf_uuid_is_null(gfid) && ++ !gf_uuid_is_null(replies[i].poststat.ia_gfid) && ++ replies[i].poststat.ia_type == ia_type) ++ gfid = replies[i].poststat.ia_gfid; ++ + if (!gf_uuid_is_null(replies[i].poststat.ia_gfid) || + replies[i].poststat.ia_type != ia_type) + continue; +diff --git a/xlators/cluster/afr/src/afr-self-heal-entry.c b/xlators/cluster/afr/src/afr-self-heal-entry.c +index a6890fa..e07b521 100644 +--- a/xlators/cluster/afr/src/afr-self-heal-entry.c ++++ b/xlators/cluster/afr/src/afr-self-heal-entry.c +@@ -246,6 +246,19 @@ afr_selfheal_detect_gfid_and_type_mismatch(xlator_t *this, + if (replies[i].op_ret != 0) + continue; + ++ if (gf_uuid_is_null(replies[i].poststat.ia_gfid)) ++ continue; ++ ++ if (replies[i].poststat.ia_type == IA_INVAL) ++ continue; ++ ++ if (ia_type == IA_INVAL || gf_uuid_is_null(gfid)) { ++ src_idx = i; ++ ia_type = replies[src_idx].poststat.ia_type; ++ gfid = &replies[src_idx].poststat.ia_gfid; ++ continue; ++ } ++ + if (gf_uuid_compare(gfid, replies[i].poststat.ia_gfid) && + (ia_type == replies[i].poststat.ia_type)) { + ret = afr_gfid_split_brain_source(this, replies, inode, pargfid, +-- +1.8.3.1 + diff --git a/SOURCES/0233-Revert-graph-shd-Use-glusterfs_graph_deactivate-to-f.patch b/SOURCES/0233-Revert-graph-shd-Use-glusterfs_graph_deactivate-to-f.patch new file mode 100644 index 0000000..d8e6933 --- /dev/null +++ b/SOURCES/0233-Revert-graph-shd-Use-glusterfs_graph_deactivate-to-f.patch @@ -0,0 +1,78 @@ +From 5c85ce7363b658bc8fa643742626109efe3ade0c Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:44:04 +0530 +Subject: [PATCH 233/255] Revert "graph/shd: Use glusterfs_graph_deactivate to + free the xl rec" + +This reverts commit 8cc6d8af00303c445b94715c92fe9e3e01edb867. + +BUG: 1471742 +Change-Id: Ib90fe89b85f4143db29702338decec76c83872bc +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175942 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + libglusterfs/src/graph.c | 2 +- + libglusterfs/src/xlator.c | 9 +-------- + xlators/features/shard/src/shard.c | 3 --- + 3 files changed, 2 insertions(+), 12 deletions(-) + +diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c +index 5b95fd6..27d9335 100644 +--- a/libglusterfs/src/graph.c ++++ b/libglusterfs/src/graph.c +@@ -1394,7 +1394,7 @@ glusterfs_graph_cleanup(void *arg) + + pthread_mutex_lock(&ctx->cleanup_lock); + { +- glusterfs_graph_deactivate(graph); ++ glusterfs_graph_fini(graph); + glusterfs_graph_destroy(graph); + } + pthread_mutex_unlock(&ctx->cleanup_lock); +diff --git a/libglusterfs/src/xlator.c b/libglusterfs/src/xlator.c +index d9d3441..71e1ed4 100644 +--- a/libglusterfs/src/xlator.c ++++ b/libglusterfs/src/xlator.c +@@ -659,7 +659,6 @@ xlator_fini_rec(xlator_t *xl) + trav = trav->next; + } + +- xl->cleanup_starting = 1; + if (xl->init_succeeded) { + if (xl->fini) { + old_THIS = THIS; +@@ -667,14 +666,8 @@ xlator_fini_rec(xlator_t *xl) + + xl->fini(xl); + +- if (xl->local_pool) { ++ if (xl->local_pool) + mem_pool_destroy(xl->local_pool); +- xl->local_pool = NULL; +- } +- if (xl->itable) { +- inode_table_destroy(xl->itable); +- xl->itable = NULL; +- } + + THIS = old_THIS; + } else { +diff --git a/xlators/features/shard/src/shard.c b/xlators/features/shard/src/shard.c +index 31c7eec..b248767 100644 +--- a/xlators/features/shard/src/shard.c ++++ b/xlators/features/shard/src/shard.c +@@ -6785,9 +6785,6 @@ fini(xlator_t *this) + + GF_VALIDATE_OR_GOTO("shard", this, out); + +- /*Itable was not created by shard, hence setting to NULL.*/ +- this->itable = NULL; +- + mem_pool_destroy(this->local_pool); + this->local_pool = NULL; + +-- +1.8.3.1 + diff --git a/SOURCES/0234-Revert-glusterd-shd-Change-shd-logfile-to-a-unique-n.patch b/SOURCES/0234-Revert-glusterd-shd-Change-shd-logfile-to-a-unique-n.patch new file mode 100644 index 0000000..790d9d1 --- /dev/null +++ b/SOURCES/0234-Revert-glusterd-shd-Change-shd-logfile-to-a-unique-n.patch @@ -0,0 +1,220 @@ +From feeee9a35c1219b2077ea07b6fd80976960bd181 Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:44:42 +0530 +Subject: [PATCH 234/255] Revert "glusterd/shd: Change shd logfile to a unique + name" + +This reverts commit 541e1400ecaec5fea0f56e8ca18f00c229906d8a. + +BUG: 1471742 +Change-Id: I7e0371d77db6897981f7364c04d4b9b523b865ba +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175943 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + .../mgmt/glusterd/src/glusterd-shd-svc-helper.c | 12 ++++++++ + .../mgmt/glusterd/src/glusterd-shd-svc-helper.h | 6 ++++ + xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 14 ++++----- + xlators/mgmt/glusterd/src/glusterd-svc-helper.c | 34 +++++----------------- + xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c | 4 +-- + xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h | 4 --- + 6 files changed, 34 insertions(+), 40 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c +index 57ceda9..9196758 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c +@@ -75,6 +75,18 @@ glusterd_svc_build_shd_volfile_path(glusterd_volinfo_t *volinfo, char *path, + } + + void ++glusterd_svc_build_shd_logdir(char *logdir, char *volname, size_t len) ++{ ++ snprintf(logdir, len, "%s/shd/%s", DEFAULT_LOG_FILE_DIRECTORY, volname); ++} ++ ++void ++glusterd_svc_build_shd_logfile(char *logfile, char *logdir, size_t len) ++{ ++ snprintf(logfile, len, "%s/shd.log", logdir); ++} ++ ++void + glusterd_shd_svcproc_cleanup(glusterd_shdsvc_t *shd) + { + glusterd_svc_proc_t *svc_proc = NULL; +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h +index 59466ec..c70702c 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h +@@ -27,6 +27,12 @@ glusterd_svc_build_shd_volfile_path(glusterd_volinfo_t *volinfo, char *path, + int path_len); + + void ++glusterd_svc_build_shd_logdir(char *logdir, char *volname, size_t len); ++ ++void ++glusterd_svc_build_shd_logfile(char *logfile, char *logdir, size_t len); ++ ++void + glusterd_shd_svcproc_cleanup(glusterd_shdsvc_t *shd); + + int +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +index 8ad90a9..dbe2560 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +@@ -90,8 +90,8 @@ glusterd_shdsvc_init(void *data, glusterd_conn_t *mux_conn, + GLUSTERD_GET_SHD_RUNDIR(rundir, volinfo, priv); + glusterd_svc_create_rundir(rundir); + +- glusterd_svc_build_logfile_path(shd_svc_name, DEFAULT_LOG_FILE_DIRECTORY, +- logfile, sizeof(logfile)); ++ glusterd_svc_build_shd_logdir(logdir, volinfo->volname, sizeof(logdir)); ++ glusterd_svc_build_shd_logfile(logfile, logdir, sizeof(logfile)); + + /* Initialize the connection mgmt */ + if (mux_conn && mux_svc->rpc) { +@@ -104,7 +104,7 @@ glusterd_shdsvc_init(void *data, glusterd_conn_t *mux_conn, + if (ret < 0) + goto out; + } else { +- ret = mkdir_p(DEFAULT_LOG_FILE_DIRECTORY, 0755, _gf_true); ++ ret = mkdir_p(logdir, 0755, _gf_true); + if ((ret == -1) && (EEXIST != errno)) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED, + "Unable to create logdir %s", logdir); +@@ -460,7 +460,6 @@ glusterd_shdsvc_start(glusterd_svc_t *svc, int flags) + return -1; + + glusterd_volinfo_ref(volinfo); +- + if (!svc->inited) { + ret = glusterd_shd_svc_mux_init(volinfo, svc); + if (ret) +@@ -472,11 +471,12 @@ glusterd_shdsvc_start(glusterd_svc_t *svc, int flags) + /* Unref will happen from glusterd_svc_attach_cbk */ + ret = glusterd_attach_svc(svc, volinfo, flags); + if (ret) { ++ glusterd_volinfo_unref(volinfo); + gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, +- "Failed to attach shd svc(volume=%s) to pid=%d", ++ "Failed to attach shd svc(volume=%s) to pid=%d. Starting" ++ "a new process", + volinfo->volname, glusterd_proc_get_pid(&svc->proc)); +- glusterd_shd_svcproc_cleanup(&volinfo->shd); +- glusterd_volinfo_unref(volinfo); ++ ret = glusterd_recover_shd_attach_failure(volinfo, svc, flags); + } + goto out; + } +diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c +index 400826f..a6e662f 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c ++++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c +@@ -469,9 +469,6 @@ glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc) + glusterd_conf_t *conf = NULL; + glusterd_svc_t *parent_svc = NULL; + int pid = -1; +- char pidfile[PATH_MAX] = { +- 0, +- }; + + GF_VALIDATE_OR_GOTO("glusterd", svc, out); + GF_VALIDATE_OR_GOTO("glusterd", volinfo, out); +@@ -481,26 +478,8 @@ glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc) + + pthread_mutex_lock(&conf->attach_lock); + { +- if (svc->inited && !glusterd_proc_is_running(&(svc->proc))) { +- /* This is the case when shd process was abnormally killed */ +- pthread_mutex_unlock(&conf->attach_lock); +- glusterd_shd_svcproc_cleanup(&volinfo->shd); +- pthread_mutex_lock(&conf->attach_lock); +- } +- + if (!svc->inited) { +- glusterd_svc_build_shd_pidfile(volinfo, pidfile, sizeof(pidfile)); +- ret = snprintf(svc->proc.name, sizeof(svc->proc.name), "%s", +- "glustershd"); +- if (ret < 0) +- goto unlock; +- +- ret = snprintf(svc->proc.pidfile, sizeof(svc->proc.pidfile), "%s", +- pidfile); +- if (ret < 0) +- goto unlock; +- +- if (gf_is_service_running(pidfile, &pid)) { ++ if (gf_is_service_running(svc->proc.pidfile, &pid)) { + /* Just connect is required, but we don't know what happens + * during the disconnect. So better to reattach. + */ +@@ -508,10 +487,10 @@ glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc) + } + + if (!mux_proc) { +- if (pid != -1 && sys_access(pidfile, R_OK) == 0) { ++ if (pid != -1 && sys_access(svc->proc.pidfile, R_OK) == 0) { + /* stale pid file, stop and unlink it */ + glusterd_proc_stop(&svc->proc, SIGTERM, PROC_STOP_FORCE); +- glusterd_unlink_file(pidfile); ++ glusterd_unlink_file(svc->proc.pidfile); + } + mux_proc = __gf_find_compatible_svc(GD_NODE_SHD); + } +@@ -705,10 +684,11 @@ glusterd_svc_attach_cbk(struct rpc_req *req, struct iovec *iov, int count, + volinfo->volname, glusterd_proc_get_pid(&svc->proc)); + } else { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_ATTACH_FAIL, +- "svc %s of volume %s failed to attach to pid %d", svc->name, +- volinfo->volname, glusterd_proc_get_pid(&svc->proc)); ++ "svc %s of volume %s failed to " ++ "attach to pid %d. Starting a new process", ++ svc->name, volinfo->volname, glusterd_proc_get_pid(&svc->proc)); + if (!strcmp(svc->name, "glustershd")) { +- glusterd_shd_svcproc_cleanup(&volinfo->shd); ++ glusterd_recover_shd_attach_failure(volinfo, svc, *flag); + } + } + out: +diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c +index fa316a6..f32dafc 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c ++++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c +@@ -33,14 +33,14 @@ glusterd_svc_create_rundir(char *rundir) + return ret; + } + +-void ++static void + glusterd_svc_build_logfile_path(char *server, char *logdir, char *logfile, + size_t len) + { + snprintf(logfile, len, "%s/%s.log", logdir, server); + } + +-void ++static void + glusterd_svc_build_volfileid_path(char *server, char *volfileid, size_t len) + { + snprintf(volfileid, len, "gluster/%s", server); +diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h +index 5a5466a..fbc5225 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h ++++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h +@@ -74,10 +74,6 @@ glusterd_svc_build_volfile_path(char *server, char *workdir, char *volfile, + size_t len); + + void +-glusterd_svc_build_logfile_path(char *server, char *logdir, char *logfile, +- size_t len); +- +-void + glusterd_svc_build_svcdir(char *server, char *workdir, char *path, size_t len); + + void +-- +1.8.3.1 + diff --git a/SOURCES/0235-Revert-glusterd-svc-Stop-stale-process-using-the-glu.patch b/SOURCES/0235-Revert-glusterd-svc-Stop-stale-process-using-the-glu.patch new file mode 100644 index 0000000..67348f6 --- /dev/null +++ b/SOURCES/0235-Revert-glusterd-svc-Stop-stale-process-using-the-glu.patch @@ -0,0 +1,38 @@ +From b2040d8404e0ac44742cb903e3c8da2c832b2925 Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:45:11 +0530 +Subject: [PATCH 235/255] Revert "glusterd/svc: Stop stale process using the + glusterd_proc_stop" + +This reverts commit fe9159ee42f0f67b01e6a495df8105ea0f66738d. + +BUG: 1471742 +Change-Id: Id5ac0d21319724141ad9bcb9b66435803ebe5f47 +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175944 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/mgmt/glusterd/src/glusterd-svc-helper.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c +index a6e662f..6a3ca52 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c ++++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c +@@ -488,9 +488,9 @@ glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc) + + if (!mux_proc) { + if (pid != -1 && sys_access(svc->proc.pidfile, R_OK) == 0) { +- /* stale pid file, stop and unlink it */ +- glusterd_proc_stop(&svc->proc, SIGTERM, PROC_STOP_FORCE); +- glusterd_unlink_file(svc->proc.pidfile); ++ /* stale pid file, unlink it. */ ++ kill(pid, SIGTERM); ++ sys_unlink(svc->proc.pidfile); + } + mux_proc = __gf_find_compatible_svc(GD_NODE_SHD); + } +-- +1.8.3.1 + diff --git a/SOURCES/0236-Revert-shd-mux-Fix-race-between-mux_proc-unlink-and-.patch b/SOURCES/0236-Revert-shd-mux-Fix-race-between-mux_proc-unlink-and-.patch new file mode 100644 index 0000000..e33c7dd --- /dev/null +++ b/SOURCES/0236-Revert-shd-mux-Fix-race-between-mux_proc-unlink-and-.patch @@ -0,0 +1,35 @@ +From 030b5681d47268c591a72035d5a2419234bd1f5f Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:44:55 +0530 +Subject: [PATCH 236/255] Revert "shd/mux: Fix race between mux_proc unlink and + stop" + +This reverts commit e386fb4f4baf834e6a8fc25cc2fbbb17eb0a7a56. + +BUG: 1471742 +Change-Id: I6c52835981389fc5bfeb43483feb581ad8507990 +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175945 +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +Tested-by: RHGS Build Bot +--- + xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 3 --- + 1 file changed, 3 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +index dbe2560..d81d760 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +@@ -694,9 +694,6 @@ glusterd_shdsvc_stop(glusterd_svc_t *svc, int sig) + gf_is_service_running(svc->proc.pidfile, &pid); + cds_list_del_init(&svc->mux_svc); + empty = cds_list_empty(&svc_proc->svcs); +- if (empty) { +- cds_list_del_init(&svc_proc->svc_proc_list); +- } + } + pthread_mutex_unlock(&conf->attach_lock); + if (empty) { +-- +1.8.3.1 + diff --git a/SOURCES/0237-Revert-ec-fini-Fix-race-between-xlator-cleanup-and-o.patch b/SOURCES/0237-Revert-ec-fini-Fix-race-between-xlator-cleanup-and-o.patch new file mode 100644 index 0000000..6c88d6a --- /dev/null +++ b/SOURCES/0237-Revert-ec-fini-Fix-race-between-xlator-cleanup-and-o.patch @@ -0,0 +1,227 @@ +From f0c3af09fd919e3646aae2821b0d6bfe4e2fd89c Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:45:58 +0530 +Subject: [PATCH 237/255] Revert "ec/fini: Fix race between xlator cleanup and + on going async fop" + +This reverts commit 9fd966aa6879ac9867381629f82eca24b950d731. + +BUG: 1471742 +Change-Id: I557ec138174b01d8b8f8d090acd34c179e2c632d +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175946 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/cluster/ec/src/ec-common.c | 10 ---------- + xlators/cluster/ec/src/ec-common.h | 2 -- + xlators/cluster/ec/src/ec-data.c | 4 +--- + xlators/cluster/ec/src/ec-heal.c | 17 ++--------------- + xlators/cluster/ec/src/ec-types.h | 1 - + xlators/cluster/ec/src/ec.c | 37 ++++++++++++------------------------- + 6 files changed, 15 insertions(+), 56 deletions(-) + +diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c +index 35c2256..e2e582f 100644 +--- a/xlators/cluster/ec/src/ec-common.c ++++ b/xlators/cluster/ec/src/ec-common.c +@@ -2956,13 +2956,3 @@ ec_manager(ec_fop_data_t *fop, int32_t error) + + __ec_manager(fop, error); + } +- +-gf_boolean_t +-__ec_is_last_fop(ec_t *ec) +-{ +- if ((list_empty(&ec->pending_fops)) && +- (GF_ATOMIC_GET(ec->async_fop_count) == 0)) { +- return _gf_true; +- } +- return _gf_false; +-} +diff --git a/xlators/cluster/ec/src/ec-common.h b/xlators/cluster/ec/src/ec-common.h +index bf6c97d..e948342 100644 +--- a/xlators/cluster/ec/src/ec-common.h ++++ b/xlators/cluster/ec/src/ec-common.h +@@ -204,6 +204,4 @@ void + ec_reset_entry_healing(ec_fop_data_t *fop); + char * + ec_msg_str(ec_fop_data_t *fop); +-gf_boolean_t +-__ec_is_last_fop(ec_t *ec); + #endif /* __EC_COMMON_H__ */ +diff --git a/xlators/cluster/ec/src/ec-data.c b/xlators/cluster/ec/src/ec-data.c +index 8d2d9a1..6ef9340 100644 +--- a/xlators/cluster/ec/src/ec-data.c ++++ b/xlators/cluster/ec/src/ec-data.c +@@ -202,13 +202,11 @@ ec_handle_last_pending_fop_completion(ec_fop_data_t *fop, gf_boolean_t *notify) + { + ec_t *ec = fop->xl->private; + +- *notify = _gf_false; +- + if (!list_empty(&fop->pending_list)) { + LOCK(&ec->lock); + { + list_del_init(&fop->pending_list); +- *notify = __ec_is_last_fop(ec); ++ *notify = list_empty(&ec->pending_fops); + } + UNLOCK(&ec->lock); + } +diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c +index 237fea2..8844c29 100644 +--- a/xlators/cluster/ec/src/ec-heal.c ++++ b/xlators/cluster/ec/src/ec-heal.c +@@ -2814,20 +2814,8 @@ int + ec_replace_heal_done(int ret, call_frame_t *heal, void *opaque) + { + ec_t *ec = opaque; +- gf_boolean_t last_fop = _gf_false; + +- if (GF_ATOMIC_DEC(ec->async_fop_count) == 0) { +- LOCK(&ec->lock); +- { +- last_fop = __ec_is_last_fop(ec); +- } +- UNLOCK(&ec->lock); +- } + gf_msg_debug(ec->xl->name, 0, "getxattr on bricks is done ret %d", ret); +- +- if (last_fop) +- ec_pending_fops_completed(ec); +- + return 0; + } + +@@ -2881,15 +2869,14 @@ ec_launch_replace_heal(ec_t *ec) + { + int ret = -1; + ++ if (!ec) ++ return ret; + ret = synctask_new(ec->xl->ctx->env, ec_replace_brick_heal_wrap, + ec_replace_heal_done, NULL, ec); +- + if (ret < 0) { + gf_msg_debug(ec->xl->name, 0, "Heal failed for replace brick ret = %d", + ret); +- ec_replace_heal_done(-1, NULL, ec); + } +- + return ret; + } + +diff --git a/xlators/cluster/ec/src/ec-types.h b/xlators/cluster/ec/src/ec-types.h +index 4dbf4a3..1c295c0 100644 +--- a/xlators/cluster/ec/src/ec-types.h ++++ b/xlators/cluster/ec/src/ec-types.h +@@ -643,7 +643,6 @@ struct _ec { + uintptr_t xl_notify; /* Bit flag representing + notification for bricks. */ + uintptr_t node_mask; +- gf_atomic_t async_fop_count; /* Number of on going asynchronous fops. */ + xlator_t **xl_list; + gf_lock_t lock; + gf_timer_t *timer; +diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c +index f0d58c0..df5912c 100644 +--- a/xlators/cluster/ec/src/ec.c ++++ b/xlators/cluster/ec/src/ec.c +@@ -355,7 +355,6 @@ ec_notify_cbk(void *data) + ec_t *ec = data; + glusterfs_event_t event = GF_EVENT_MAXVAL; + gf_boolean_t propagate = _gf_false; +- gf_boolean_t launch_heal = _gf_false; + + LOCK(&ec->lock); + { +@@ -385,11 +384,6 @@ ec_notify_cbk(void *data) + * still bricks DOWN, they will be healed when they + * come up. */ + ec_up(ec->xl, ec); +- +- if (ec->shd.iamshd && !ec->shutdown) { +- launch_heal = _gf_true; +- GF_ATOMIC_INC(ec->async_fop_count); +- } + } + + propagate = _gf_true; +@@ -397,12 +391,13 @@ ec_notify_cbk(void *data) + unlock: + UNLOCK(&ec->lock); + +- if (launch_heal) { +- /* We have just brought the volume UP, so we trigger +- * a self-heal check on the root directory. */ +- ec_launch_replace_heal(ec); +- } + if (propagate) { ++ if ((event == GF_EVENT_CHILD_UP) && ec->shd.iamshd) { ++ /* We have just brought the volume UP, so we trigger ++ * a self-heal check on the root directory. */ ++ ec_launch_replace_heal(ec); ++ } ++ + default_notify(ec->xl, event, NULL); + } + } +@@ -430,7 +425,7 @@ ec_disable_delays(ec_t *ec) + { + ec->shutdown = _gf_true; + +- return __ec_is_last_fop(ec); ++ return list_empty(&ec->pending_fops); + } + + void +@@ -608,10 +603,7 @@ ec_notify(xlator_t *this, int32_t event, void *data, void *data2) + if (event == GF_EVENT_CHILD_UP) { + /* We need to trigger a selfheal if a brick changes + * to UP state. */ +- if (ec_set_up_state(ec, mask, mask) && ec->shd.iamshd && +- !ec->shutdown) { +- needs_shd_check = _gf_true; +- } ++ needs_shd_check = ec_set_up_state(ec, mask, mask); + } else if (event == GF_EVENT_CHILD_DOWN) { + ec_set_up_state(ec, mask, 0); + } +@@ -641,21 +633,17 @@ ec_notify(xlator_t *this, int32_t event, void *data, void *data2) + } + } else { + propagate = _gf_false; +- needs_shd_check = _gf_false; +- } +- +- if (needs_shd_check) { +- GF_ATOMIC_INC(ec->async_fop_count); + } + } + unlock: + UNLOCK(&ec->lock); + + done: +- if (needs_shd_check) { +- ec_launch_replace_heal(ec); +- } + if (propagate) { ++ if (needs_shd_check && ec->shd.iamshd) { ++ ec_launch_replace_heal(ec); ++ } ++ + error = default_notify(this, event, data); + } + +@@ -717,7 +705,6 @@ init(xlator_t *this) + ec->xl = this; + LOCK_INIT(&ec->lock); + +- GF_ATOMIC_INIT(ec->async_fop_count, 0); + INIT_LIST_HEAD(&ec->pending_fops); + INIT_LIST_HEAD(&ec->heal_waiting); + INIT_LIST_HEAD(&ec->healing); +-- +1.8.3.1 + diff --git a/SOURCES/0238-Revert-xlator-log-Add-more-logging-in-xlator_is_clea.patch b/SOURCES/0238-Revert-xlator-log-Add-more-logging-in-xlator_is_clea.patch new file mode 100644 index 0000000..0514cd1 --- /dev/null +++ b/SOURCES/0238-Revert-xlator-log-Add-more-logging-in-xlator_is_clea.patch @@ -0,0 +1,47 @@ +From 96072cea4da1c2ba5bd87307f20b3ee2cbe6f63d Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:46:10 +0530 +Subject: [PATCH 238/255] Revert "xlator/log: Add more logging in + xlator_is_cleanup_starting" + +This reverts commit 9b94397a5a735910fab2a29670146a1feb6d890e. + +BUG: 1471742 +Change-Id: Icc3f0c8741ed780e265202adbb009063f657c7f7 +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175947 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + libglusterfs/src/xlator.c | 12 +++--------- + 1 file changed, 3 insertions(+), 9 deletions(-) + +diff --git a/libglusterfs/src/xlator.c b/libglusterfs/src/xlator.c +index 71e1ed4..fbfbbe2 100644 +--- a/libglusterfs/src/xlator.c ++++ b/libglusterfs/src/xlator.c +@@ -1494,18 +1494,12 @@ xlator_is_cleanup_starting(xlator_t *this) + glusterfs_graph_t *graph = NULL; + xlator_t *xl = NULL; + +- if (!this) { +- gf_msg("xlator", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG, +- "xlator object is null, returning false"); ++ if (!this) + goto out; +- } +- + graph = this->graph; +- if (!graph) { +- gf_msg("xlator", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG, +- "Graph is not set for xlator %s", this->name); ++ ++ if (!graph) + goto out; +- } + + xl = graph->first; + if (xl && xl->cleanup_starting) +-- +1.8.3.1 + diff --git a/SOURCES/0239-Revert-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch b/SOURCES/0239-Revert-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch new file mode 100644 index 0000000..f36c997 --- /dev/null +++ b/SOURCES/0239-Revert-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch @@ -0,0 +1,128 @@ +From ad40c0783e84e5e54a83aeb20a52f720cc881b0c Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:46:22 +0530 +Subject: [PATCH 239/255] Revert "ec/fini: Fix race with ec_fini and ec_notify" + +This reverts commit 998d9b8b5e271f407e1c654c34f45f0db36abc71. + +BUG: 1471742 +Change-Id: Ifccb8a22d9ef96c22b32dcb4b82bf4d21cf85484 +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175948 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + libglusterfs/src/glusterfs/xlator.h | 3 --- + libglusterfs/src/libglusterfs.sym | 1 - + libglusterfs/src/xlator.c | 21 --------------------- + xlators/cluster/ec/src/ec-heal.c | 4 ---- + xlators/cluster/ec/src/ec-heald.c | 6 ------ + xlators/cluster/ec/src/ec.c | 3 --- + 6 files changed, 38 deletions(-) + +diff --git a/libglusterfs/src/glusterfs/xlator.h b/libglusterfs/src/glusterfs/xlator.h +index 09e463e..8998976 100644 +--- a/libglusterfs/src/glusterfs/xlator.h ++++ b/libglusterfs/src/glusterfs/xlator.h +@@ -1092,7 +1092,4 @@ gluster_graph_take_reference(xlator_t *tree); + + gf_boolean_t + mgmt_is_multiplexed_daemon(char *name); +- +-gf_boolean_t +-xlator_is_cleanup_starting(xlator_t *this); + #endif /* _XLATOR_H */ +diff --git a/libglusterfs/src/libglusterfs.sym b/libglusterfs/src/libglusterfs.sym +index 86215d2..05f93b4 100644 +--- a/libglusterfs/src/libglusterfs.sym ++++ b/libglusterfs/src/libglusterfs.sym +@@ -1160,4 +1160,3 @@ glusterfs_process_svc_attach_volfp + glusterfs_mux_volfile_reconfigure + glusterfs_process_svc_detach + mgmt_is_multiplexed_daemon +-xlator_is_cleanup_starting +diff --git a/libglusterfs/src/xlator.c b/libglusterfs/src/xlator.c +index fbfbbe2..022c3ed 100644 +--- a/libglusterfs/src/xlator.c ++++ b/libglusterfs/src/xlator.c +@@ -1486,24 +1486,3 @@ mgmt_is_multiplexed_daemon(char *name) + } + return _gf_false; + } +- +-gf_boolean_t +-xlator_is_cleanup_starting(xlator_t *this) +-{ +- gf_boolean_t cleanup = _gf_false; +- glusterfs_graph_t *graph = NULL; +- xlator_t *xl = NULL; +- +- if (!this) +- goto out; +- graph = this->graph; +- +- if (!graph) +- goto out; +- +- xl = graph->first; +- if (xl && xl->cleanup_starting) +- cleanup = _gf_true; +-out: +- return cleanup; +-} +diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c +index 8844c29..2fa1f11 100644 +--- a/xlators/cluster/ec/src/ec-heal.c ++++ b/xlators/cluster/ec/src/ec-heal.c +@@ -2855,10 +2855,6 @@ ec_replace_brick_heal_wrap(void *opaque) + itable = ec->xl->itable; + else + goto out; +- +- if (xlator_is_cleanup_starting(ec->xl)) +- goto out; +- + ret = ec_replace_heal(ec, itable->root); + out: + return ret; +diff --git a/xlators/cluster/ec/src/ec-heald.c b/xlators/cluster/ec/src/ec-heald.c +index 91512d7..edf5e11 100644 +--- a/xlators/cluster/ec/src/ec-heald.c ++++ b/xlators/cluster/ec/src/ec-heald.c +@@ -444,9 +444,6 @@ unlock: + int + ec_shd_full_healer_spawn(xlator_t *this, int subvol) + { +- if (xlator_is_cleanup_starting(this)) +- return -1; +- + return ec_shd_healer_spawn(this, NTH_FULL_HEALER(this, subvol), + ec_shd_full_healer); + } +@@ -454,9 +451,6 @@ ec_shd_full_healer_spawn(xlator_t *this, int subvol) + int + ec_shd_index_healer_spawn(xlator_t *this, int subvol) + { +- if (xlator_is_cleanup_starting(this)) +- return -1; +- + return ec_shd_healer_spawn(this, NTH_INDEX_HEALER(this, subvol), + ec_shd_index_healer); + } +diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c +index df5912c..264582a 100644 +--- a/xlators/cluster/ec/src/ec.c ++++ b/xlators/cluster/ec/src/ec.c +@@ -486,9 +486,6 @@ ec_set_up_state(ec_t *ec, uintptr_t index_mask, uintptr_t new_state) + { + uintptr_t current_state = 0; + +- if (xlator_is_cleanup_starting(ec->xl)) +- return _gf_false; +- + if ((ec->xl_notify & index_mask) == 0) { + ec->xl_notify |= index_mask; + ec->xl_notify_count++; +-- +1.8.3.1 + diff --git a/SOURCES/0240-Revert-glusterd-shd-Optimize-the-glustershd-manager-.patch b/SOURCES/0240-Revert-glusterd-shd-Optimize-the-glustershd-manager-.patch new file mode 100644 index 0000000..54ef75e --- /dev/null +++ b/SOURCES/0240-Revert-glusterd-shd-Optimize-the-glustershd-manager-.patch @@ -0,0 +1,54 @@ +From 9b3adb28207681f49ea97fc2c473634ff0f73db6 Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:46:35 +0530 +Subject: [PATCH 240/255] Revert "glusterd/shd: Optimize the glustershd manager + to send reconfigure" + +This reverts commit 321080e55f0ae97115a9542ba5de8494e7610860. + +BUG: 1471742 +Change-Id: I5fa84baa3c3e72ca8eb605c7f1fafb53c68859f9 +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175949 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + libglusterfs/src/graph.c | 1 + + xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 9 ++++----- + 2 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c +index 27d9335..18fb2d9 100644 +--- a/libglusterfs/src/graph.c ++++ b/libglusterfs/src/graph.c +@@ -1497,6 +1497,7 @@ glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj) + parent_graph->last_xl = glusterfs_get_last_xlator(parent_graph); + parent_graph->xl_count -= graph->xl_count; + parent_graph->leaf_count -= graph->leaf_count; ++ default_notify(xl, GF_EVENT_PARENT_DOWN, xl); + parent_graph->id++; + ret = 0; + } +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +index d81d760..981cc87 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +@@ -311,11 +311,10 @@ glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags) + */ + ret = svc->stop(svc, SIGTERM); + } else if (volinfo) { +- if (volinfo->status != GLUSTERD_STATUS_STARTED) { +- ret = svc->stop(svc, SIGTERM); +- if (ret) +- goto out; +- } ++ ret = svc->stop(svc, SIGTERM); ++ if (ret) ++ goto out; ++ + if (volinfo->status == GLUSTERD_STATUS_STARTED) { + ret = svc->start(svc, flags); + if (ret) +-- +1.8.3.1 + diff --git a/SOURCES/0241-Revert-glusterd-svc-glusterd_svcs_stop-should-call-i.patch b/SOURCES/0241-Revert-glusterd-svc-glusterd_svcs_stop-should-call-i.patch new file mode 100644 index 0000000..965fcfe --- /dev/null +++ b/SOURCES/0241-Revert-glusterd-svc-glusterd_svcs_stop-should-call-i.patch @@ -0,0 +1,82 @@ +From 066189add979d2e4c74463592e5021bd060d5a51 Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:46:47 +0530 +Subject: [PATCH 241/255] Revert "glusterd/svc: glusterd_svcs_stop should call + individual wrapper function" + +This reverts commit 79fff98f9ca5f815cf0227312b9a997d555dad29. + +BUG: 1471742 +Change-Id: I258040ed9be6bc3b4498c76ed51d59258c55acff +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175950 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 12 ++---------- + xlators/mgmt/glusterd/src/glusterd-svc-helper.c | 10 +++++----- + 2 files changed, 7 insertions(+), 15 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +index 981cc87..75f9a07 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +@@ -656,18 +656,10 @@ glusterd_shdsvc_stop(glusterd_svc_t *svc, int sig) + int pid = -1; + + conf = THIS->private; +- GF_VALIDATE_OR_GOTO("glusterd", conf, out); + GF_VALIDATE_OR_GOTO("glusterd", svc, out); + svc_proc = svc->svc_proc; +- if (!svc_proc) { +- /* +- * This can happen when stop was called on a volume that is not shd +- * compatible. +- */ +- gf_msg_debug("glusterd", 0, "svc_proc is null, ie shd already stopped"); +- ret = 0; +- goto out; +- } ++ GF_VALIDATE_OR_GOTO("glusterd", svc_proc, out); ++ GF_VALIDATE_OR_GOTO("glusterd", conf, out); + + /* Get volinfo->shd from svc object */ + shd = cds_list_entry(svc, glusterd_shdsvc_t, svc); +diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c +index 6a3ca52..f7be394 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c ++++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c +@@ -86,25 +86,25 @@ glusterd_svcs_stop(glusterd_volinfo_t *volinfo) + priv = this->private; + GF_ASSERT(priv); + +- ret = priv->nfs_svc.stop(&(priv->nfs_svc), SIGKILL); ++ ret = glusterd_svc_stop(&(priv->nfs_svc), SIGKILL); + if (ret) + goto out; + +- ret = priv->quotad_svc.stop(&(priv->quotad_svc), SIGTERM); ++ ret = glusterd_svc_stop(&(priv->quotad_svc), SIGTERM); + if (ret) + goto out; + + if (volinfo) { +- ret = volinfo->shd.svc.stop(&(volinfo->shd.svc), SIGTERM); ++ ret = glusterd_svc_stop(&(volinfo->shd.svc), PROC_START_NO_WAIT); + if (ret) + goto out; + } + +- ret = priv->bitd_svc.stop(&(priv->bitd_svc), SIGTERM); ++ ret = glusterd_svc_stop(&(priv->bitd_svc), SIGTERM); + if (ret) + goto out; + +- ret = priv->scrub_svc.stop(&(priv->scrub_svc), SIGTERM); ++ ret = glusterd_svc_stop(&(priv->scrub_svc), SIGTERM); + out: + return ret; + } +-- +1.8.3.1 + diff --git a/SOURCES/0242-Revert-tests-shd-Add-test-coverage-for-shd-mux.patch b/SOURCES/0242-Revert-tests-shd-Add-test-coverage-for-shd-mux.patch new file mode 100644 index 0000000..2174063 --- /dev/null +++ b/SOURCES/0242-Revert-tests-shd-Add-test-coverage-for-shd-mux.patch @@ -0,0 +1,427 @@ +From 48f7be493588fdf5e99dff0c3b91327e07da05f3 Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:48:34 +0530 +Subject: [PATCH 242/255] Revert "tests/shd: Add test coverage for shd mux" + +This reverts commit b7f832288d2d2e57231d90765afc049ad7cb2f9d. + +BUG: 1471742 +Change-Id: Ifccac5150f07b98006714e43c77c5a4b1fd38cb8 +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175951 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + tests/basic/glusterd-restart-shd-mux.t | 96 --------------------- + tests/basic/shd-mux.t | 149 --------------------------------- + tests/basic/volume-scale-shd-mux.t | 112 ------------------------- + tests/volume.rc | 15 ---- + 4 files changed, 372 deletions(-) + delete mode 100644 tests/basic/glusterd-restart-shd-mux.t + delete mode 100644 tests/basic/shd-mux.t + delete mode 100644 tests/basic/volume-scale-shd-mux.t + +diff --git a/tests/basic/glusterd-restart-shd-mux.t b/tests/basic/glusterd-restart-shd-mux.t +deleted file mode 100644 +index a50af9d..0000000 +--- a/tests/basic/glusterd-restart-shd-mux.t ++++ /dev/null +@@ -1,96 +0,0 @@ +-#!/bin/bash +- +-. $(dirname $0)/../include.rc +-. $(dirname $0)/../volume.rc +- +-cleanup; +- +-TESTS_EXPECTED_IN_LOOP=20 +- +-TEST glusterd +-TEST pidof glusterd +-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5} +-TEST $CLI volume set $V0 cluster.background-self-heal-count 0 +-TEST $CLI volume set $V0 cluster.eager-lock off +-TEST $CLI volume set $V0 performance.flush-behind off +-TEST $CLI volume start $V0 +- +-for i in $(seq 1 3); do +- TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5} +- TEST $CLI volume start ${V0}_afr$i +- TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5} +- TEST $CLI volume start ${V0}_ec$i +-done +- +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count +- +-#Stop the glusterd +-TEST pkill glusterd +-#Only stopping glusterd, so there will be one shd +-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" shd_count +-TEST glusterd +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count +-#Check the thread count become to number of volumes*number of ec subvolume (3*6=18) +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait" +-#Check the thread count become to number of volumes*number of afr subvolume (4*6=24) +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait" +- +-shd_pid=$(get_shd_mux_pid $V0) +-for i in $(seq 1 3); do +- afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid" +- EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path +- ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid" +- EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path +-done +- +-#Reboot a node scenario +-TEST pkill gluster +-#Only stopped glusterd, so there will be one shd +-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count +- +-TEST glusterd +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count +- +-#Check the thread count become to number of volumes*number of ec subvolume (3*6=18) +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait" +-#Check the thread count become to number of volumes*number of afr subvolume (4*6=24) +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait" +- +-shd_pid=$(get_shd_mux_pid $V0) +-for i in $(seq 1 3); do +- afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid" +- EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path +- ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid" +- EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path +-done +- +-for i in $(seq 1 3); do +- TEST $CLI volume stop ${V0}_afr$i +- TEST $CLI volume stop ${V0}_ec$i +-done +- +-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait" +- +-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0 +- +-TEST kill_brick $V0 $H0 $B0/${V0}0 +-TEST kill_brick $V0 $H0 $B0/${V0}3 +- +-TEST touch $M0/foo{1..100} +- +-EXPECT_WITHIN $HEAL_TIMEOUT "^204$" get_pending_heal_count $V0 +- +-TEST $CLI volume start ${V0} force +- +-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 +- +-TEST rm -rf $M0/* +-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +- +- +-TEST $CLI volume stop ${V0} +-TEST $CLI volume delete ${V0} +- +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^0$" shd_count +- +-cleanup +diff --git a/tests/basic/shd-mux.t b/tests/basic/shd-mux.t +deleted file mode 100644 +index e42a34a..0000000 +--- a/tests/basic/shd-mux.t ++++ /dev/null +@@ -1,149 +0,0 @@ +-#!/bin/bash +- +-. $(dirname $0)/../include.rc +-. $(dirname $0)/../volume.rc +- +-cleanup; +- +-TESTS_EXPECTED_IN_LOOP=16 +- +-TEST glusterd +-TEST pidof glusterd +-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5} +-TEST $CLI volume set $V0 cluster.background-self-heal-count 0 +-TEST $CLI volume set $V0 cluster.eager-lock off +-TEST $CLI volume set $V0 performance.flush-behind off +-TEST $CLI volume start $V0 +-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0 +- +-shd_pid=$(get_shd_mux_pid $V0) +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait" +- +-#Create a one more volume +-TEST $CLI volume create ${V0}_1 replica 3 $H0:$B0/${V0}_1{0,1,2,3,4,5} +-TEST $CLI volume start ${V0}_1 +- +-#Check whether the shd has multiplexed or not +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_1 +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0} +- +-TEST $CLI volume set ${V0}_1 cluster.background-self-heal-count 0 +-TEST $CLI volume set ${V0}_1 cluster.eager-lock off +-TEST $CLI volume set ${V0}_1 performance.flush-behind off +-TEST $GFS --volfile-id=/${V0}_1 --volfile-server=$H0 $M1 +- +-TEST kill_brick $V0 $H0 $B0/${V0}0 +-TEST kill_brick $V0 $H0 $B0/${V0}4 +-TEST kill_brick ${V0}_1 $H0 $B0/${V0}_10 +-TEST kill_brick ${V0}_1 $H0 $B0/${V0}_14 +- +-TEST touch $M0/foo{1..100} +-TEST touch $M1/foo{1..100} +- +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0 +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count ${V0}_1 +- +-TEST $CLI volume start ${V0} force +-TEST $CLI volume start ${V0}_1 force +- +-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 +-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_1 +- +-TEST rm -rf $M0/* +-TEST rm -rf $M1/* +-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1 +- +-#Stop the volume +-TEST $CLI volume stop ${V0}_1 +-TEST $CLI volume delete ${V0}_1 +- +-#Check the stop succeeded and detached the volume with out restarting it +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count +- +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0 +- +-#Check the thread count become to earlier number after stopping +-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait" +- +- +-#Now create a ec volume and check mux works +-TEST $CLI volume create ${V0}_2 disperse 6 redundancy 2 $H0:$B0/${V0}_2{0,1,2,3,4,5} +-TEST $CLI volume start ${V0}_2 +- +-#Check whether the shd has multiplexed or not +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_2 +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0} +- +-TEST $CLI volume set ${V0}_2 cluster.background-self-heal-count 0 +-TEST $CLI volume set ${V0}_2 cluster.eager-lock off +-TEST $CLI volume set ${V0}_2 performance.flush-behind off +-TEST $GFS --volfile-id=/${V0}_2 --volfile-server=$H0 $M1 +- +-TEST kill_brick $V0 $H0 $B0/${V0}0 +-TEST kill_brick $V0 $H0 $B0/${V0}4 +-TEST kill_brick ${V0}_2 $H0 $B0/${V0}_20 +-TEST kill_brick ${V0}_2 $H0 $B0/${V0}_22 +- +-TEST touch $M0/foo{1..100} +-TEST touch $M1/foo{1..100} +- +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0 +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^404$" get_pending_heal_count ${V0}_2 +- +-TEST $CLI volume start ${V0} force +-TEST $CLI volume start ${V0}_2 force +- +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__ec_shd_healer_wait" +- +-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 +-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_2 +- +-TEST rm -rf $M0/* +-TEST rm -rf $M1/* +- +- +-#Stop the volume +-TEST $CLI volume stop ${V0}_2 +-TEST $CLI volume delete ${V0}_2 +- +-#Check the stop succeeded and detached the volume with out restarting it +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count +- +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0 +- +-#Check the thread count become to zero for ec related threads +-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "__ec_shd_healer_wait" +-#Check the thread count become to earlier number after stopping +-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait" +- +-for i in $(seq 1 3); do +- TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5} +- TEST $CLI volume start ${V0}_afr$i +- TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5} +- TEST $CLI volume start ${V0}_ec$i +-done +- +-#Check the thread count become to number of volumes*number of ec subvolume (3*6=18) +-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait" +-#Check the thread count become to number of volumes*number of afr subvolume (4*6=24) +-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait" +-#Delete the volumes +-for i in $(seq 1 3); do +- TEST $CLI volume stop ${V0}_afr$i +- TEST $CLI volume stop ${V0}_ec$i +- TEST $CLI volume delete ${V0}_afr$i +- TEST $CLI volume delete ${V0}_ec$i +-done +- +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0 +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count +- +-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait" +- +-TEST $CLI volume stop ${V0} +-TEST $CLI volume delete ${V0} +-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count +- +-cleanup +diff --git a/tests/basic/volume-scale-shd-mux.t b/tests/basic/volume-scale-shd-mux.t +deleted file mode 100644 +index dd9cf83..0000000 +--- a/tests/basic/volume-scale-shd-mux.t ++++ /dev/null +@@ -1,112 +0,0 @@ +-#!/bin/bash +- +-. $(dirname $0)/../include.rc +-. $(dirname $0)/../volume.rc +- +-cleanup; +- +-TESTS_EXPECTED_IN_LOOP=6 +- +-TEST glusterd +-TEST pidof glusterd +-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5} +-TEST $CLI volume set $V0 cluster.background-self-heal-count 0 +-TEST $CLI volume set $V0 cluster.eager-lock off +-TEST $CLI volume set $V0 performance.flush-behind off +-TEST $CLI volume start $V0 +- +-for i in $(seq 1 2); do +- TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5} +- TEST $CLI volume start ${V0}_afr$i +- TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5} +- TEST $CLI volume start ${V0}_ec$i +-done +- +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count +- +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count +-#Check the thread count become to number of volumes*number of ec subvolume (2*6=12) +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait" +-#Check the thread count become to number of volumes*number of afr subvolume (3*6=18) +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait" +- +-TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}{6,7,8}; +-#Check the thread count become to number of volumes*number of afr subvolume plus 3 additional threads from newly added bricks (3*6+3=21) +- +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^21$" number_healer_threads_shd $V0 "__afr_shd_healer_wait" +- +-#Remove the brick and check the detach is successful +-$CLI volume remove-brick $V0 $H0:$B0/${V0}{6,7,8} force +- +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait" +- +-TEST $CLI volume add-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5}; +-#Check the thread count become to number of volumes*number of ec subvolume plus 2 additional threads from newly added bricks (2*6+6=18) +- +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait" +- +-#Remove the brick and check the detach is successful +-$CLI volume remove-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5} force +- +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait" +- +- +-for i in $(seq 1 2); do +- TEST $CLI volume stop ${V0}_afr$i +- TEST $CLI volume stop ${V0}_ec$i +-done +- +-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait" +- +-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0 +- +-TEST kill_brick $V0 $H0 $B0/${V0}0 +-TEST kill_brick $V0 $H0 $B0/${V0}4 +- +-TEST touch $M0/foo{1..100} +- +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0 +- +-TEST $CLI volume start ${V0} force +- +-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 +- +-TEST rm -rf $M0/* +-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +-shd_pid=$(get_shd_mux_pid $V0) +-TEST $CLI volume create ${V0}_distribute1 $H0:$B0/${V0}_distribute10 +-TEST $CLI volume start ${V0}_distribute1 +- +-#Creating a non-replicate/non-ec volume should not have any effect in shd +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait" +-EXPECT "^${shd_pid}$" get_shd_mux_pid $V0 +- +-TEST mkdir $B0/add/ +-#Now convert the distributed volume to replicate +-TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3} +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^9$" number_healer_threads_shd $V0 "__afr_shd_healer_wait" +- +-#scale down the volume +-TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait" +- +-TEST $CLI volume stop ${V0} +-TEST $CLI volume delete ${V0} +-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count +- +-TEST rm -rf $B0/add/ +-TEST mkdir $B0/add/ +-#Now convert the distributed volume back to replicate and make sure that a new shd is spawned +-TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3}; +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count +-EXPECT_WITHIN $HEAL_TIMEOUT "^3$" number_healer_threads_shd ${V0}_distribute1 "__afr_shd_healer_wait" +- +-#Now convert the replica volume to distribute again and make sure the shd is now stopped +-TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force +-TEST rm -rf $B0/add/ +- +-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count +- +-cleanup +diff --git a/tests/volume.rc b/tests/volume.rc +index 6a78c37..022d972 100644 +--- a/tests/volume.rc ++++ b/tests/volume.rc +@@ -913,21 +913,6 @@ function volgen_check_ancestry { + fi + } + +-function get_shd_mux_pid { +- local volume=$1 +- pid=`$CLI volume status $volume shd | awk '/Self-heal/{print $8}'` +- echo $pid +-} +- +-function shd_count { +- ps aux | grep "glustershd" | grep -v grep | wc -l +-} +- +-function number_healer_threads_shd { +- local pid=$(get_shd_mux_pid $1) +- pstack $pid | grep $2 | wc -l +-} +- + function get_mtime { + local time=$(get-mdata-xattr -m $1) + if [ $time == "-1" ]; +-- +1.8.3.1 + diff --git a/SOURCES/0243-Revert-glusterfsd-cleanup-Protect-graph-object-under.patch b/SOURCES/0243-Revert-glusterfsd-cleanup-Protect-graph-object-under.patch new file mode 100644 index 0000000..9e918d7 --- /dev/null +++ b/SOURCES/0243-Revert-glusterfsd-cleanup-Protect-graph-object-under.patch @@ -0,0 +1,154 @@ +From 4d65506ddfa0245dcaa13b14ca13b2ea762df37d Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:48:51 +0530 +Subject: [PATCH 243/255] Revert "glusterfsd/cleanup: Protect graph object + under a lock" + +This reverts commit 11b64d494c52004002f900888694d20ef8af6df6. + +BUG: 1471742 +Change-Id: I2717207d87ad213722de33c24e451502ed4aff48 +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175952 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + libglusterfs/src/graph.c | 58 ++++++++++--------------- + libglusterfs/src/statedump.c | 16 ++----- + tests/bugs/glusterd/optimized-basic-testcases.t | 4 +- + 3 files changed, 28 insertions(+), 50 deletions(-) + +diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c +index 18fb2d9..4c8b02d 100644 +--- a/libglusterfs/src/graph.c ++++ b/libglusterfs/src/graph.c +@@ -1392,12 +1392,8 @@ glusterfs_graph_cleanup(void *arg) + } + pthread_mutex_unlock(&ctx->notify_lock); + +- pthread_mutex_lock(&ctx->cleanup_lock); +- { +- glusterfs_graph_fini(graph); +- glusterfs_graph_destroy(graph); +- } +- pthread_mutex_unlock(&ctx->cleanup_lock); ++ glusterfs_graph_fini(graph); ++ glusterfs_graph_destroy(graph); + out: + return NULL; + } +@@ -1472,37 +1468,31 @@ glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj) + + if (!ctx || !ctx->active || !volfile_obj) + goto out; ++ parent_graph = ctx->active; ++ graph = volfile_obj->graph; ++ if (!graph) ++ goto out; ++ if (graph->first) ++ xl = graph->first; + +- pthread_mutex_lock(&ctx->cleanup_lock); +- { +- parent_graph = ctx->active; +- graph = volfile_obj->graph; +- if (!graph) +- goto unlock; +- if (graph->first) +- xl = graph->first; +- +- last_xl = graph->last_xl; +- if (last_xl) +- last_xl->next = NULL; +- if (!xl || xl->cleanup_starting) +- goto unlock; ++ last_xl = graph->last_xl; ++ if (last_xl) ++ last_xl->next = NULL; ++ if (!xl || xl->cleanup_starting) ++ goto out; + +- xl->cleanup_starting = 1; +- gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_DETACH_STARTED, +- "detaching child %s", volfile_obj->vol_id); ++ xl->cleanup_starting = 1; ++ gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_DETACH_STARTED, ++ "detaching child %s", volfile_obj->vol_id); + +- list_del_init(&volfile_obj->volfile_list); +- glusterfs_mux_xlator_unlink(parent_graph->top, xl); +- parent_graph->last_xl = glusterfs_get_last_xlator(parent_graph); +- parent_graph->xl_count -= graph->xl_count; +- parent_graph->leaf_count -= graph->leaf_count; +- default_notify(xl, GF_EVENT_PARENT_DOWN, xl); +- parent_graph->id++; +- ret = 0; +- } +-unlock: +- pthread_mutex_unlock(&ctx->cleanup_lock); ++ list_del_init(&volfile_obj->volfile_list); ++ glusterfs_mux_xlator_unlink(parent_graph->top, xl); ++ parent_graph->last_xl = glusterfs_get_last_xlator(parent_graph); ++ parent_graph->xl_count -= graph->xl_count; ++ parent_graph->leaf_count -= graph->leaf_count; ++ default_notify(xl, GF_EVENT_PARENT_DOWN, xl); ++ parent_graph->id++; ++ ret = 0; + out: + if (!ret) { + list_del_init(&volfile_obj->volfile_list); +diff --git a/libglusterfs/src/statedump.c b/libglusterfs/src/statedump.c +index 0d58f8f..0cf80c0 100644 +--- a/libglusterfs/src/statedump.c ++++ b/libglusterfs/src/statedump.c +@@ -805,17 +805,11 @@ gf_proc_dump_info(int signum, glusterfs_ctx_t *ctx) + int brick_count = 0; + int len = 0; + ++ gf_proc_dump_lock(); ++ + if (!ctx) + goto out; + +- /* +- * Multiplexed daemons can change the active graph when attach/detach +- * is called. So this has to be protected with the cleanup lock. +- */ +- if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) +- pthread_mutex_lock(&ctx->cleanup_lock); +- gf_proc_dump_lock(); +- + if (!mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name) && + (ctx && ctx->active)) { + top = ctx->active->first; +@@ -929,11 +923,7 @@ gf_proc_dump_info(int signum, glusterfs_ctx_t *ctx) + out: + GF_FREE(dump_options.dump_path); + dump_options.dump_path = NULL; +- if (ctx) { +- gf_proc_dump_unlock(); +- if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) +- pthread_mutex_unlock(&ctx->cleanup_lock); +- } ++ gf_proc_dump_unlock(); + + return; + } +diff --git a/tests/bugs/glusterd/optimized-basic-testcases.t b/tests/bugs/glusterd/optimized-basic-testcases.t +index 110f1b9..d700b5e 100644 +--- a/tests/bugs/glusterd/optimized-basic-testcases.t ++++ b/tests/bugs/glusterd/optimized-basic-testcases.t +@@ -289,9 +289,7 @@ mkdir -p /xyz/var/lib/glusterd/abc + TEST $CLI volume create "test" $H0:/xyz/var/lib/glusterd/abc + EXPECT 'Created' volinfo_field "test" 'Status'; + +-#While taking a statedump, there is a TRY_LOCK on call_frame, which might may cause +-#failure. So Adding a EXPECT_WITHIN +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" generate_statedump_and_check_for_glusterd_info ++EXPECT "1" generate_statedump_and_check_for_glusterd_info + + cleanup_statedump `pidof glusterd` + cleanup +-- +1.8.3.1 + diff --git a/SOURCES/0244-Revert-ec-shd-Cleanup-self-heal-daemon-resources-dur.patch b/SOURCES/0244-Revert-ec-shd-Cleanup-self-heal-daemon-resources-dur.patch new file mode 100644 index 0000000..0888021 --- /dev/null +++ b/SOURCES/0244-Revert-ec-shd-Cleanup-self-heal-daemon-resources-dur.patch @@ -0,0 +1,292 @@ +From f6d967cd70ff41a0f93c54d50128c468e9d5dea9 Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:49:21 +0530 +Subject: [PATCH 244/255] Revert "ec/shd: Cleanup self heal daemon resources + during ec fini" + +This reverts commit edc238e40060773f5f5fd59fcdad8ae27d65749f. + +BUG: 1471742 +Change-Id: If6cb5941b964f005454a21a67938b354ef1a2037 +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175953 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + libglusterfs/src/syncop-utils.c | 2 - + xlators/cluster/afr/src/afr-self-heald.c | 5 --- + xlators/cluster/ec/src/ec-heald.c | 77 +++++--------------------------- + xlators/cluster/ec/src/ec-heald.h | 3 -- + xlators/cluster/ec/src/ec-messages.h | 3 +- + xlators/cluster/ec/src/ec.c | 47 ------------------- + 6 files changed, 13 insertions(+), 124 deletions(-) + +diff --git a/libglusterfs/src/syncop-utils.c b/libglusterfs/src/syncop-utils.c +index 4167db4..b842142 100644 +--- a/libglusterfs/src/syncop-utils.c ++++ b/libglusterfs/src/syncop-utils.c +@@ -354,8 +354,6 @@ syncop_mt_dir_scan(call_frame_t *frame, xlator_t *subvol, loc_t *loc, int pid, + + if (frame) { + this = frame->this; +- } else { +- this = THIS; + } + + /*For this functionality to be implemented in general, we need +diff --git a/xlators/cluster/afr/src/afr-self-heald.c b/xlators/cluster/afr/src/afr-self-heald.c +index 522fe5d..8bc4720 100644 +--- a/xlators/cluster/afr/src/afr-self-heald.c ++++ b/xlators/cluster/afr/src/afr-self-heald.c +@@ -524,11 +524,6 @@ afr_shd_full_heal(xlator_t *subvol, gf_dirent_t *entry, loc_t *parent, + afr_private_t *priv = NULL; + + priv = this->private; +- +- if (this->cleanup_starting) { +- return -ENOTCONN; +- } +- + if (!priv->shd.enabled) + return -EBUSY; + +diff --git a/xlators/cluster/ec/src/ec-heald.c b/xlators/cluster/ec/src/ec-heald.c +index edf5e11..cba111a 100644 +--- a/xlators/cluster/ec/src/ec-heald.c ++++ b/xlators/cluster/ec/src/ec-heald.c +@@ -71,11 +71,6 @@ disabled_loop: + break; + } + +- if (ec->shutdown) { +- healer->running = _gf_false; +- return -1; +- } +- + ret = healer->rerun; + healer->rerun = 0; + +@@ -246,11 +241,9 @@ ec_shd_index_sweep(struct subvol_healer *healer) + goto out; + } + +- _mask_cancellation(); + ret = syncop_mt_dir_scan(NULL, subvol, &loc, GF_CLIENT_PID_SELF_HEALD, + healer, ec_shd_index_heal, xdata, + ec->shd.max_threads, ec->shd.wait_qlength); +- _unmask_cancellation(); + out: + if (xdata) + dict_unref(xdata); +@@ -270,11 +263,6 @@ ec_shd_full_heal(xlator_t *subvol, gf_dirent_t *entry, loc_t *parent, + int ret = 0; + + ec = this->private; +- +- if (this->cleanup_starting) { +- return -ENOTCONN; +- } +- + if (ec->xl_up_count <= ec->fragments) { + return -ENOTCONN; + } +@@ -317,15 +305,11 @@ ec_shd_full_sweep(struct subvol_healer *healer, inode_t *inode) + { + ec_t *ec = NULL; + loc_t loc = {0}; +- int ret = -1; + + ec = healer->this->private; + loc.inode = inode; +- _mask_cancellation(); +- ret = syncop_ftw(ec->xl_list[healer->subvol], &loc, +- GF_CLIENT_PID_SELF_HEALD, healer, ec_shd_full_heal); +- _unmask_cancellation(); +- return ret; ++ return syncop_ftw(ec->xl_list[healer->subvol], &loc, ++ GF_CLIENT_PID_SELF_HEALD, healer, ec_shd_full_heal); + } + + void * +@@ -333,16 +317,13 @@ ec_shd_index_healer(void *data) + { + struct subvol_healer *healer = NULL; + xlator_t *this = NULL; +- int run = 0; + + healer = data; + THIS = this = healer->this; + ec_t *ec = this->private; + + for (;;) { +- run = ec_shd_healer_wait(healer); +- if (run == -1) +- break; ++ ec_shd_healer_wait(healer); + + if (ec->xl_up_count > ec->fragments) { + gf_msg_debug(this->name, 0, "starting index sweep on subvol %s", +@@ -371,12 +352,16 @@ ec_shd_full_healer(void *data) + + rootloc.inode = this->itable->root; + for (;;) { +- run = ec_shd_healer_wait(healer); +- if (run < 0) { +- break; +- } else if (run == 0) { +- continue; ++ pthread_mutex_lock(&healer->mutex); ++ { ++ run = __ec_shd_healer_wait(healer); ++ if (!run) ++ healer->running = _gf_false; + } ++ pthread_mutex_unlock(&healer->mutex); ++ ++ if (!run) ++ break; + + if (ec->xl_up_count > ec->fragments) { + gf_msg(this->name, GF_LOG_INFO, 0, EC_MSG_FULL_SWEEP_START, +@@ -577,41 +562,3 @@ out: + dict_del(output, this->name); + return ret; + } +- +-void +-ec_destroy_healer_object(xlator_t *this, struct subvol_healer *healer) +-{ +- if (!healer) +- return; +- +- pthread_cond_destroy(&healer->cond); +- pthread_mutex_destroy(&healer->mutex); +-} +- +-void +-ec_selfheal_daemon_fini(xlator_t *this) +-{ +- struct subvol_healer *healer = NULL; +- ec_self_heald_t *shd = NULL; +- ec_t *priv = NULL; +- int i = 0; +- +- priv = this->private; +- if (!priv) +- return; +- +- shd = &priv->shd; +- if (!shd->iamshd) +- return; +- +- for (i = 0; i < priv->nodes; i++) { +- healer = &shd->index_healers[i]; +- ec_destroy_healer_object(this, healer); +- +- healer = &shd->full_healers[i]; +- ec_destroy_healer_object(this, healer); +- } +- +- GF_FREE(shd->index_healers); +- GF_FREE(shd->full_healers); +-} +diff --git a/xlators/cluster/ec/src/ec-heald.h b/xlators/cluster/ec/src/ec-heald.h +index 8184cf4..2eda2a7 100644 +--- a/xlators/cluster/ec/src/ec-heald.h ++++ b/xlators/cluster/ec/src/ec-heald.h +@@ -24,7 +24,4 @@ ec_selfheal_daemon_init(xlator_t *this); + void + ec_shd_index_healer_wake(ec_t *ec); + +-void +-ec_selfheal_daemon_fini(xlator_t *this); +- + #endif /* __EC_HEALD_H__ */ +diff --git a/xlators/cluster/ec/src/ec-messages.h b/xlators/cluster/ec/src/ec-messages.h +index ce299bb..7c28808 100644 +--- a/xlators/cluster/ec/src/ec-messages.h ++++ b/xlators/cluster/ec/src/ec-messages.h +@@ -55,7 +55,6 @@ GLFS_MSGID(EC, EC_MSG_INVALID_CONFIG, EC_MSG_HEAL_FAIL, + EC_MSG_CONFIG_XATTR_INVALID, EC_MSG_EXTENSION, EC_MSG_EXTENSION_NONE, + EC_MSG_EXTENSION_UNKNOWN, EC_MSG_EXTENSION_UNSUPPORTED, + EC_MSG_EXTENSION_FAILED, EC_MSG_NO_GF, EC_MSG_MATRIX_FAILED, +- EC_MSG_DYN_CREATE_FAILED, EC_MSG_DYN_CODEGEN_FAILED, +- EC_MSG_THREAD_CLEANUP_FAILED); ++ EC_MSG_DYN_CREATE_FAILED, EC_MSG_DYN_CODEGEN_FAILED); + + #endif /* !_EC_MESSAGES_H_ */ +diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c +index 264582a..3c8013e 100644 +--- a/xlators/cluster/ec/src/ec.c ++++ b/xlators/cluster/ec/src/ec.c +@@ -429,51 +429,6 @@ ec_disable_delays(ec_t *ec) + } + + void +-ec_cleanup_healer_object(ec_t *ec) +-{ +- struct subvol_healer *healer = NULL; +- ec_self_heald_t *shd = NULL; +- void *res = NULL; +- int i = 0; +- gf_boolean_t is_join = _gf_false; +- +- shd = &ec->shd; +- if (!shd->iamshd) +- return; +- +- for (i = 0; i < ec->nodes; i++) { +- healer = &shd->index_healers[i]; +- pthread_mutex_lock(&healer->mutex); +- { +- healer->rerun = 1; +- if (healer->running) { +- pthread_cond_signal(&healer->cond); +- is_join = _gf_true; +- } +- } +- pthread_mutex_unlock(&healer->mutex); +- if (is_join) { +- pthread_join(healer->thread, &res); +- is_join = _gf_false; +- } +- +- healer = &shd->full_healers[i]; +- pthread_mutex_lock(&healer->mutex); +- { +- healer->rerun = 1; +- if (healer->running) { +- pthread_cond_signal(&healer->cond); +- is_join = _gf_true; +- } +- } +- pthread_mutex_unlock(&healer->mutex); +- if (is_join) { +- pthread_join(healer->thread, &res); +- is_join = _gf_false; +- } +- } +-} +-void + ec_pending_fops_completed(ec_t *ec) + { + if (ec->shutdown) { +@@ -589,7 +544,6 @@ ec_notify(xlator_t *this, int32_t event, void *data, void *data2) + /* If there aren't pending fops running after we have waken up + * them, we immediately propagate the notification. */ + propagate = ec_disable_delays(ec); +- ec_cleanup_healer_object(ec); + goto unlock; + } + +@@ -805,7 +759,6 @@ failed: + void + fini(xlator_t *this) + { +- ec_selfheal_daemon_fini(this); + __ec_destroy_private(this); + } + +-- +1.8.3.1 + diff --git a/SOURCES/0245-Revert-shd-glusterd-Serialize-shd-manager-to-prevent.patch b/SOURCES/0245-Revert-shd-glusterd-Serialize-shd-manager-to-prevent.patch new file mode 100644 index 0000000..6495e38 --- /dev/null +++ b/SOURCES/0245-Revert-shd-glusterd-Serialize-shd-manager-to-prevent.patch @@ -0,0 +1,151 @@ +From 022701465f3e642cdb7942995647615baa266a35 Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:49:31 +0530 +Subject: [PATCH 245/255] Revert "shd/glusterd: Serialize shd manager to + prevent race condition" + +This reverts commit 646292b4f73bf1b506d034b85787f794963d7196. + +BUG: 1471742 +Change-Id: Ie21fbe18965d8bdea81f4276b57960a27a4db89d +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175954 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + .../serialize-shd-manager-glusterd-restart.t | 54 ---------------------- + xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 14 ------ + xlators/mgmt/glusterd/src/glusterd.c | 1 - + xlators/mgmt/glusterd/src/glusterd.h | 3 -- + 4 files changed, 72 deletions(-) + delete mode 100644 tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t + +diff --git a/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t b/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t +deleted file mode 100644 +index 3a27c2a..0000000 +--- a/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t ++++ /dev/null +@@ -1,54 +0,0 @@ +-#! /bin/bash +- +-. $(dirname $0)/../../include.rc +-. $(dirname $0)/../../cluster.rc +- +-function check_peers { +-count=`$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l` +-echo $count +-} +- +-function check_shd { +-ps aux | grep $1 | grep glustershd | wc -l +-} +- +-cleanup +- +- +-TEST launch_cluster 6 +- +-TESTS_EXPECTED_IN_LOOP=25 +-for i in $(seq 2 6); do +- hostname="H$i" +- TEST $CLI_1 peer probe ${!hostname} +-done +- +- +-EXPECT_WITHIN $PROBE_TIMEOUT 5 check_peers; +-for i in $(seq 1 5); do +- +- TEST $CLI_1 volume create ${V0}_$i replica 3 $H1:$B1/${V0}_$i $H2:$B2/${V0}_$i $H3:$B3/${V0}_$i $H4:$B4/${V0}_$i $H5:$B5/${V0}_$i $H6:$B6/${V0}_$i +- TEST $CLI_1 volume start ${V0}_$i force +- +-done +- +-#kill a node +-TEST kill_node 3 +- +-TEST $glusterd_3; +-EXPECT_WITHIN $PROBE_TIMEOUT 5 check_peers +- +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 check_shd $H3 +- +-for i in $(seq 1 5); do +- +- TEST $CLI_1 volume stop ${V0}_$i +- TEST $CLI_1 volume delete ${V0}_$i +- +-done +- +-for i in $(seq 1 6); do +- hostname="H$i" +- EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 0 check_shd ${!hostname} +-done +-cleanup +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +index 75f9a07..a9eab42 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +@@ -254,26 +254,14 @@ glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags) + { + int ret = -1; + glusterd_volinfo_t *volinfo = NULL; +- glusterd_conf_t *conf = NULL; +- gf_boolean_t shd_restart = _gf_false; + +- conf = THIS->private; + volinfo = data; +- GF_VALIDATE_OR_GOTO("glusterd", conf, out); + GF_VALIDATE_OR_GOTO("glusterd", svc, out); + GF_VALIDATE_OR_GOTO("glusterd", volinfo, out); + + if (volinfo) + glusterd_volinfo_ref(volinfo); + +- while (conf->restart_shd) { +- synclock_unlock(&conf->big_lock); +- sleep(2); +- synclock_lock(&conf->big_lock); +- } +- conf->restart_shd = _gf_true; +- shd_restart = _gf_true; +- + ret = glusterd_shdsvc_create_volfile(volinfo); + if (ret) + goto out; +@@ -322,8 +310,6 @@ glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags) + } + } + out: +- if (shd_restart) +- conf->restart_shd = _gf_false; + if (volinfo) + glusterd_volinfo_unref(volinfo); + if (ret) +diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c +index 6d7dd4a..c0973cb 100644 +--- a/xlators/mgmt/glusterd/src/glusterd.c ++++ b/xlators/mgmt/glusterd/src/glusterd.c +@@ -1819,7 +1819,6 @@ init(xlator_t *this) + conf->rpc = rpc; + conf->uds_rpc = uds_rpc; + conf->gfs_mgmt = &gd_brick_prog; +- conf->restart_shd = _gf_false; + this->private = conf; + /* conf->workdir and conf->rundir are smaller than PATH_MAX; gcc's + * snprintf checking will throw an error here if sprintf is used. +diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h +index 7d07d33..0fbc9dd 100644 +--- a/xlators/mgmt/glusterd/src/glusterd.h ++++ b/xlators/mgmt/glusterd/src/glusterd.h +@@ -222,9 +222,6 @@ typedef struct { + gf_atomic_t blockers; + uint32_t mgmt_v3_lock_timeout; + gf_boolean_t restart_bricks; +- gf_boolean_t restart_shd; /* This flag prevents running two shd manager +- simultaneously +- */ + pthread_mutex_t attach_lock; /* Lock can be per process or a common one */ + pthread_mutex_t volume_lock; /* We release the big_lock from lot of places + which might lead the modification of volinfo +-- +1.8.3.1 + diff --git a/SOURCES/0246-Revert-glusterd-shd-Keep-a-ref-on-volinfo-until-atta.patch b/SOURCES/0246-Revert-glusterd-shd-Keep-a-ref-on-volinfo-until-atta.patch new file mode 100644 index 0000000..bbc780c --- /dev/null +++ b/SOURCES/0246-Revert-glusterd-shd-Keep-a-ref-on-volinfo-until-atta.patch @@ -0,0 +1,53 @@ +From bc5e3967864d6f6ea22deb22ba72aedca8367797 Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:49:41 +0530 +Subject: [PATCH 246/255] Revert "glusterd/shd: Keep a ref on volinfo until + attach rpc execute cbk" + +This reverts commit c429d3c63601e6ea15af76aa684c30bbeb746467. + +BUG: 1471742 +Change-Id: I614e8bdbcc5111dbf407aba047e7d2284bef8ac8 +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175955 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 3 --- + xlators/mgmt/glusterd/src/glusterd-svc-helper.c | 4 ---- + 2 files changed, 7 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +index a9eab42..19eca9f 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +@@ -452,11 +452,8 @@ glusterd_shdsvc_start(glusterd_svc_t *svc, int flags) + } + + if (shd->attached) { +- glusterd_volinfo_ref(volinfo); +- /* Unref will happen from glusterd_svc_attach_cbk */ + ret = glusterd_attach_svc(svc, volinfo, flags); + if (ret) { +- glusterd_volinfo_unref(volinfo); + gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, + "Failed to attach shd svc(volume=%s) to pid=%d. Starting" + "a new process", +diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c +index f7be394..02945b1 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c ++++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c +@@ -695,10 +695,6 @@ out: + if (flag) { + GF_FREE(flag); + } +- +- if (volinfo) +- glusterd_volinfo_unref(volinfo); +- + GF_ATOMIC_DEC(conf->blockers); + STACK_DESTROY(frame->root); + return 0; +-- +1.8.3.1 + diff --git a/SOURCES/0247-Revert-afr-shd-Cleanup-self-heal-daemon-resources-du.patch b/SOURCES/0247-Revert-afr-shd-Cleanup-self-heal-daemon-resources-du.patch new file mode 100644 index 0000000..dc17d72 --- /dev/null +++ b/SOURCES/0247-Revert-afr-shd-Cleanup-self-heal-daemon-resources-du.patch @@ -0,0 +1,144 @@ +From 33d59c74169192b4ba89abc915d8d785bc450fbb Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:49:54 +0530 +Subject: [PATCH 247/255] Revert "afr/shd: Cleanup self heal daemon resources + during afr fini" + +This reverts commit faaaa3452ceec6afcc18cffc9beca3fe19841cce. + +BUG: 1471742 +Change-Id: Id4a22ab45b89872684830f866ec4b589fca50a90 +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175956 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + libglusterfs/src/syncop-utils.c | 8 ----- + xlators/cluster/afr/src/afr-self-heald.c | 2 -- + xlators/cluster/afr/src/afr.c | 57 -------------------------------- + 3 files changed, 67 deletions(-) + +diff --git a/libglusterfs/src/syncop-utils.c b/libglusterfs/src/syncop-utils.c +index b842142..be03527 100644 +--- a/libglusterfs/src/syncop-utils.c ++++ b/libglusterfs/src/syncop-utils.c +@@ -350,11 +350,6 @@ syncop_mt_dir_scan(call_frame_t *frame, xlator_t *subvol, loc_t *loc, int pid, + gf_boolean_t cond_init = _gf_false; + gf_boolean_t mut_init = _gf_false; + gf_dirent_t entries; +- xlator_t *this = NULL; +- +- if (frame) { +- this = frame->this; +- } + + /*For this functionality to be implemented in general, we need + * synccond_t infra which doesn't block the executing thread. Until then +@@ -402,9 +397,6 @@ syncop_mt_dir_scan(call_frame_t *frame, xlator_t *subvol, loc_t *loc, int pid, + + list_for_each_entry_safe(entry, tmp, &entries.list, list) + { +- if (this && this->cleanup_starting) +- goto out; +- + list_del_init(&entry->list); + if (!strcmp(entry->d_name, ".") || !strcmp(entry->d_name, "..")) { + gf_dirent_entry_free(entry); +diff --git a/xlators/cluster/afr/src/afr-self-heald.c b/xlators/cluster/afr/src/afr-self-heald.c +index 8bc4720..7eb1207 100644 +--- a/xlators/cluster/afr/src/afr-self-heald.c ++++ b/xlators/cluster/afr/src/afr-self-heald.c +@@ -373,7 +373,6 @@ afr_shd_sweep_prepare(struct subvol_healer *healer) + + time(&event->start_time); + event->end_time = 0; +- _mask_cancellation(); + } + + void +@@ -395,7 +394,6 @@ afr_shd_sweep_done(struct subvol_healer *healer) + + if (eh_save_history(shd->statistics[healer->subvol], history) < 0) + GF_FREE(history); +- _unmask_cancellation(); + } + + int +diff --git a/xlators/cluster/afr/src/afr.c b/xlators/cluster/afr/src/afr.c +index a0a7551..33258a0 100644 +--- a/xlators/cluster/afr/src/afr.c ++++ b/xlators/cluster/afr/src/afr.c +@@ -611,70 +611,13 @@ init(xlator_t *this) + out: + return ret; + } +-void +-afr_destroy_healer_object(xlator_t *this, struct subvol_healer *healer) +-{ +- int ret = -1; +- +- if (!healer) +- return; +- +- if (healer->running) { +- /* +- * If there are any resources to cleanup, We need +- * to do that gracefully using pthread_cleanup_push +- */ +- ret = gf_thread_cleanup_xint(healer->thread); +- if (ret) +- gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_SELF_HEAL_FAILED, +- "Failed to clean up healer threads."); +- healer->thread = 0; +- } +- pthread_cond_destroy(&healer->cond); +- pthread_mutex_destroy(&healer->mutex); +-} +- +-void +-afr_selfheal_daemon_fini(xlator_t *this) +-{ +- struct subvol_healer *healer = NULL; +- afr_self_heald_t *shd = NULL; +- afr_private_t *priv = NULL; +- int i = 0; +- +- priv = this->private; +- if (!priv) +- return; +- +- shd = &priv->shd; +- if (!shd->iamshd) +- return; +- +- for (i = 0; i < priv->child_count; i++) { +- healer = &shd->index_healers[i]; +- afr_destroy_healer_object(this, healer); + +- healer = &shd->full_healers[i]; +- afr_destroy_healer_object(this, healer); +- +- if (shd->statistics[i]) +- eh_destroy(shd->statistics[i]); +- } +- GF_FREE(shd->index_healers); +- GF_FREE(shd->full_healers); +- GF_FREE(shd->statistics); +- if (shd->split_brain) +- eh_destroy(shd->split_brain); +-} + void + fini(xlator_t *this) + { + afr_private_t *priv = NULL; + + priv = this->private; +- +- afr_selfheal_daemon_fini(this); +- + LOCK(&priv->lock); + if (priv->timer != NULL) { + gf_timer_call_cancel(this->ctx, priv->timer); +-- +1.8.3.1 + diff --git a/SOURCES/0248-Revert-shd-mux-Fix-coverity-issues-introduced-by-shd.patch b/SOURCES/0248-Revert-shd-mux-Fix-coverity-issues-introduced-by-shd.patch new file mode 100644 index 0000000..d721a82 --- /dev/null +++ b/SOURCES/0248-Revert-shd-mux-Fix-coverity-issues-introduced-by-shd.patch @@ -0,0 +1,151 @@ +From 469cb9e16d46f075caf609ddcb12a7c02d73ce8b Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:50:24 +0530 +Subject: [PATCH 248/255] Revert "shd/mux: Fix coverity issues introduced by + shd mux patch" + +This reverts commit 0021a4bbc9af2bfe28d4a79f76c3cd33f23dd118. + +BUG: 1471742 +Change-Id: I0749328412ed3cc7ae5d64baea7a90b63b489a08 +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175957 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + libglusterfs/src/graph.c | 21 ++++++++------------- + xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 6 ------ + xlators/mgmt/glusterd/src/glusterd-svc-helper.c | 24 +++++++----------------- + 3 files changed, 15 insertions(+), 36 deletions(-) + +diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c +index 4c8b02d..a492dd8 100644 +--- a/libglusterfs/src/graph.c ++++ b/libglusterfs/src/graph.c +@@ -1470,9 +1470,7 @@ glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj) + goto out; + parent_graph = ctx->active; + graph = volfile_obj->graph; +- if (!graph) +- goto out; +- if (graph->first) ++ if (graph && graph->first) + xl = graph->first; + + last_xl = graph->last_xl; +@@ -1593,10 +1591,12 @@ glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp, + parent_graph->leaf_count += graph->leaf_count; + parent_graph->id++; + +- volfile_obj = GF_CALLOC(1, sizeof(gf_volfile_t), gf_common_volfile_t); + if (!volfile_obj) { +- ret = -1; +- goto out; ++ volfile_obj = GF_CALLOC(1, sizeof(gf_volfile_t), gf_common_volfile_t); ++ if (!volfile_obj) { ++ ret = -1; ++ goto out; ++ } + } + + graph->used = 1; +@@ -1641,7 +1641,6 @@ glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx, + { + glusterfs_graph_t *oldvolfile_graph = NULL; + glusterfs_graph_t *newvolfile_graph = NULL; +- char vol_id[NAME_MAX + 1]; + + int ret = -1; + +@@ -1673,9 +1672,6 @@ glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx, + glusterfs_graph_prepare(newvolfile_graph, ctx, newvolfile_graph->first); + + if (!is_graph_topology_equal(oldvolfile_graph, newvolfile_graph)) { +- ret = snprintf(vol_id, sizeof(vol_id), "%s", volfile_obj->vol_id); +- if (ret < 0) +- goto out; + ret = glusterfs_process_svc_detach(ctx, volfile_obj); + if (ret) { + gf_msg("glusterfsd-mgmt", GF_LOG_ERROR, EINVAL, +@@ -1684,9 +1680,8 @@ glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx, + "old graph. Aborting the reconfiguration operation"); + goto out; + } +- volfile_obj = NULL; +- ret = glusterfs_process_svc_attach_volfp(ctx, newvolfile_fp, vol_id, +- checksum); ++ ret = glusterfs_process_svc_attach_volfp(ctx, newvolfile_fp, ++ volfile_obj->vol_id, checksum); + goto out; + } + +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +index 19eca9f..4789843 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +@@ -101,8 +101,6 @@ glusterd_shdsvc_init(void *data, glusterd_conn_t *mux_conn, + svc->conn.rpc = rpc_clnt_ref(mux_svc->rpc); + ret = snprintf(svc->conn.sockpath, sizeof(svc->conn.sockpath), "%s", + mux_conn->sockpath); +- if (ret < 0) +- goto out; + } else { + ret = mkdir_p(logdir, 0755, _gf_true); + if ((ret == -1) && (EEXIST != errno)) { +@@ -675,10 +673,6 @@ glusterd_shdsvc_stop(glusterd_svc_t *svc, int sig) + glusterd_volinfo_ref(volinfo); + svc_proc->data = volinfo; + ret = glusterd_svc_stop(svc, sig); +- if (ret) { +- glusterd_volinfo_unref(volinfo); +- goto out; +- } + } + if (!empty && pid != -1) { + ret = glusterd_detach_svc(svc, volinfo, sig); +diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c +index 02945b1..e42703c 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c ++++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c +@@ -411,14 +411,9 @@ __gf_find_compatible_svc(gd_node_type daemon) + conf = THIS->private; + GF_VALIDATE_OR_GOTO("glusterd", conf, out); + +- switch (daemon) { +- case GD_NODE_SHD: { +- svc_procs = &conf->shd_procs; +- if (!svc_procs) +- goto out; +- } break; +- default: +- /* Add support for other client daemons here */ ++ if (daemon == GD_NODE_SHD) { ++ svc_procs = &conf->shd_procs; ++ if (!svc_procs) + goto out; + } + +@@ -545,16 +540,11 @@ __gf_find_compatible_svc_from_pid(gd_node_type daemon, pid_t pid) + if (!conf) + return NULL; + +- switch (daemon) { +- case GD_NODE_SHD: { +- svc_procs = &conf->shd_procs; +- if (!svc_procs) +- return NULL; +- } break; +- default: +- /* Add support for other client daemons here */ ++ if (daemon == GD_NODE_SHD) { ++ svc_procs = &conf->shd_procs; ++ if (!svc_proc) + return NULL; +- } ++ } /* Can be moved to switch when mux is implemented for other daemon; */ + + cds_list_for_each_entry(svc_proc, svc_procs, svc_proc_list) + { +-- +1.8.3.1 + diff --git a/SOURCES/0249-Revert-client-fini-return-fini-after-rpc-cleanup.patch b/SOURCES/0249-Revert-client-fini-return-fini-after-rpc-cleanup.patch new file mode 100644 index 0000000..5547257 --- /dev/null +++ b/SOURCES/0249-Revert-client-fini-return-fini-after-rpc-cleanup.patch @@ -0,0 +1,95 @@ +From 1864a4f382f3031915e8126440a1561035487e49 Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:53:20 +0530 +Subject: [PATCH 249/255] Revert "client/fini: return fini after rpc cleanup" + +This reverts commit d79cb2cdff6fe8d962c9ac095a7541ddf500302b. + +BUG: 1471742 +Change-Id: I15e6544d47fb7b6002c3b44de3fe0b2a13c84f51 +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175958 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/protocol/client/src/client.c | 25 +++++-------------------- + xlators/protocol/client/src/client.h | 6 ------ + 2 files changed, 5 insertions(+), 26 deletions(-) + +diff --git a/xlators/protocol/client/src/client.c b/xlators/protocol/client/src/client.c +index 95e4be5..532ef35 100644 +--- a/xlators/protocol/client/src/client.c ++++ b/xlators/protocol/client/src/client.c +@@ -49,12 +49,11 @@ client_fini_complete(xlator_t *this) + if (!conf->destroy) + return 0; + +- pthread_mutex_lock(&conf->lock); +- { +- conf->fini_completed = _gf_true; +- pthread_cond_broadcast(&conf->fini_complete_cond); +- } +- pthread_mutex_unlock(&conf->lock); ++ this->private = NULL; ++ ++ pthread_spin_destroy(&conf->fd_lock); ++ pthread_mutex_destroy(&conf->lock); ++ GF_FREE(conf); + + out: + return 0; +@@ -2730,7 +2729,6 @@ init(xlator_t *this) + goto out; + + pthread_mutex_init(&conf->lock, NULL); +- pthread_cond_init(&conf->fini_complete_cond, NULL); + pthread_spin_init(&conf->fd_lock, 0); + INIT_LIST_HEAD(&conf->saved_fds); + +@@ -2789,7 +2787,6 @@ fini(xlator_t *this) + if (!conf) + return; + +- conf->fini_completed = _gf_false; + conf->destroy = 1; + if (conf->rpc) { + /* cleanup the saved-frames before last unref */ +@@ -2797,18 +2794,6 @@ fini(xlator_t *this) + rpc_clnt_unref(conf->rpc); + } + +- pthread_mutex_lock(&conf->lock); +- { +- while (!conf->fini_completed) +- pthread_cond_wait(&conf->fini_complete_cond, &conf->lock); +- } +- pthread_mutex_unlock(&conf->lock); +- +- pthread_spin_destroy(&conf->fd_lock); +- pthread_mutex_destroy(&conf->lock); +- pthread_cond_destroy(&conf->fini_complete_cond); +- GF_FREE(conf); +- + /* Saved Fds */ + /* TODO: */ + +diff --git a/xlators/protocol/client/src/client.h b/xlators/protocol/client/src/client.h +index 8dcd72f..f12fa61 100644 +--- a/xlators/protocol/client/src/client.h ++++ b/xlators/protocol/client/src/client.h +@@ -235,12 +235,6 @@ typedef struct clnt_conf { + * up, disconnects can be + * logged + */ +- +- gf_boolean_t old_protocol; /* used only for old-protocol testing */ +- pthread_cond_t fini_complete_cond; /* Used to wait till we finsh the fini +- compltely, ie client_fini_complete +- to return*/ +- gf_boolean_t fini_completed; + } clnt_conf_t; + + typedef struct _client_fd_ctx { +-- +1.8.3.1 + diff --git a/SOURCES/0250-Revert-mgmt-shd-Implement-multiplexing-in-self-heal-.patch b/SOURCES/0250-Revert-mgmt-shd-Implement-multiplexing-in-self-heal-.patch new file mode 100644 index 0000000..637a16a --- /dev/null +++ b/SOURCES/0250-Revert-mgmt-shd-Implement-multiplexing-in-self-heal-.patch @@ -0,0 +1,4572 @@ +From ec629963d61c3ec084c95366eec5ee3a976b1213 Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 11 Jul 2019 12:57:45 +0530 +Subject: [PATCH 250/255] Revert "mgmt/shd: Implement multiplexing in self heal + daemon" + +This reverts commit 2cede2b87fb3e3e0673be9cf67e7d6eec3f7879c. + +BUG: 1471742 +Change-Id: I3830d9189dfdb567a44935aa97dc963f4594dfdb +fixes: bz#1471742 +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/175959 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + glusterfsd/src/glusterfsd-messages.h | 2 +- + glusterfsd/src/glusterfsd-mgmt.c | 238 +------ + glusterfsd/src/glusterfsd.c | 20 +- + libglusterfs/src/defaults-tmpl.c | 19 +- + libglusterfs/src/glusterfs/glusterfs.h | 7 - + libglusterfs/src/glusterfs/libglusterfs-messages.h | 4 +- + libglusterfs/src/glusterfs/xlator.h | 3 - + libglusterfs/src/graph.c | 451 ------------- + libglusterfs/src/graph.y | 3 - + libglusterfs/src/libglusterfs.sym | 5 - + libglusterfs/src/statedump.c | 3 +- + libglusterfs/src/xlator.c | 16 - + rpc/rpc-lib/src/protocol-common.h | 2 - + tests/basic/glusterd/heald.t | 49 +- + .../reset-brick-and-daemons-follow-quorum.t | 8 +- + tests/volume.rc | 6 +- + xlators/mgmt/glusterd/src/Makefile.am | 6 +- + xlators/mgmt/glusterd/src/glusterd-brick-ops.c | 2 +- + xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c | 42 -- + xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h | 4 +- + xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c | 3 +- + xlators/mgmt/glusterd/src/glusterd-handler.c | 11 +- + xlators/mgmt/glusterd/src/glusterd-handshake.c | 21 - + xlators/mgmt/glusterd/src/glusterd-mem-types.h | 1 - + xlators/mgmt/glusterd/src/glusterd-messages.h | 4 +- + xlators/mgmt/glusterd/src/glusterd-op-sm.c | 84 +-- + .../mgmt/glusterd/src/glusterd-shd-svc-helper.c | 140 ---- + .../mgmt/glusterd/src/glusterd-shd-svc-helper.h | 45 -- + xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 540 ++-------------- + xlators/mgmt/glusterd/src/glusterd-shd-svc.h | 17 +- + xlators/mgmt/glusterd/src/glusterd-sm.c | 12 +- + xlators/mgmt/glusterd/src/glusterd-snapd-svc.c | 3 +- + xlators/mgmt/glusterd/src/glusterd-statedump.c | 3 + + xlators/mgmt/glusterd/src/glusterd-svc-helper.c | 715 +-------------------- + xlators/mgmt/glusterd/src/glusterd-svc-helper.h | 40 +- + xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c | 246 ++----- + xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h | 27 - + xlators/mgmt/glusterd/src/glusterd-tier.c | 3 +- + xlators/mgmt/glusterd/src/glusterd-tierd-svc.c | 4 +- + xlators/mgmt/glusterd/src/glusterd-utils.c | 137 +--- + xlators/mgmt/glusterd/src/glusterd-utils.h | 4 - + xlators/mgmt/glusterd/src/glusterd-volgen.c | 60 +- + xlators/mgmt/glusterd/src/glusterd-volgen.h | 11 +- + xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 8 +- + xlators/mgmt/glusterd/src/glusterd.c | 12 +- + xlators/mgmt/glusterd/src/glusterd.h | 30 +- + xlators/protocol/client/src/client.c | 31 +- + 47 files changed, 292 insertions(+), 2810 deletions(-) + delete mode 100644 xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c + delete mode 100644 xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h + +diff --git a/glusterfsd/src/glusterfsd-messages.h b/glusterfsd/src/glusterfsd-messages.h +index 280624c..ce6c8ca 100644 +--- a/glusterfsd/src/glusterfsd-messages.h ++++ b/glusterfsd/src/glusterfsd-messages.h +@@ -36,6 +36,6 @@ GLFS_MSGID(GLUSTERFSD, glusterfsd_msg_1, glusterfsd_msg_2, glusterfsd_msg_3, + glusterfsd_msg_31, glusterfsd_msg_32, glusterfsd_msg_33, + glusterfsd_msg_34, glusterfsd_msg_35, glusterfsd_msg_36, + glusterfsd_msg_37, glusterfsd_msg_38, glusterfsd_msg_39, +- glusterfsd_msg_40, glusterfsd_msg_41, glusterfsd_msg_42, glusterfsd_msg_43); ++ glusterfsd_msg_40); + + #endif /* !_GLUSTERFSD_MESSAGES_H_ */ +diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c +index 1d2cd1a..15acc10 100644 +--- a/glusterfsd/src/glusterfsd-mgmt.c ++++ b/glusterfsd/src/glusterfsd-mgmt.c +@@ -48,20 +48,7 @@ int + glusterfs_graph_unknown_options(glusterfs_graph_t *graph); + int + emancipate(glusterfs_ctx_t *ctx, int ret); +-int +-glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp, +- char *volfile_id, char *checksum); +-int +-glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx, +- gf_volfile_t *volfile_obj, char *checksum); +-int +-glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp, +- char *volfile_id, char *checksum); +-int +-glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj); + +-gf_boolean_t +-mgmt_is_multiplexed_daemon(char *name); + int + mgmt_cbk_spec(struct rpc_clnt *rpc, void *mydata, void *data) + { +@@ -75,96 +62,6 @@ mgmt_cbk_spec(struct rpc_clnt *rpc, void *mydata, void *data) + } + + int +-mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id) +-{ +- glusterfs_ctx_t *ctx = NULL; +- int ret = 0; +- FILE *tmpfp = NULL; +- gf_volfile_t *volfile_obj = NULL; +- gf_volfile_t *volfile_tmp = NULL; +- char sha256_hash[SHA256_DIGEST_LENGTH] = { +- 0, +- }; +- int tmp_fd = -1; +- char template[] = "/tmp/glfs.volfile.XXXXXX"; +- +- glusterfs_compute_sha256((const unsigned char *)volfile, size, sha256_hash); +- ctx = THIS->ctx; +- LOCK(&ctx->volfile_lock); +- { +- list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list) +- { +- if (!strcmp(volfile_id, volfile_obj->vol_id)) { +- if (!memcmp(sha256_hash, volfile_obj->volfile_checksum, +- sizeof(volfile_obj->volfile_checksum))) { +- UNLOCK(&ctx->volfile_lock); +- gf_msg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_40, +- "No change in volfile, continuing"); +- goto out; +- } +- volfile_tmp = volfile_obj; +- break; +- } +- } +- +- /* coverity[secure_temp] mkstemp uses 0600 as the mode */ +- tmp_fd = mkstemp(template); +- if (-1 == tmp_fd) { +- UNLOCK(&ctx->volfile_lock); +- gf_msg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_39, +- "Unable to create temporary file: %s", template); +- ret = -1; +- goto out; +- } +- +- /* Calling unlink so that when the file is closed or program +- * terminates the temporary file is deleted. +- */ +- ret = sys_unlink(template); +- if (ret < 0) { +- gf_msg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_39, +- "Unable to delete temporary file: %s", template); +- ret = 0; +- } +- +- tmpfp = fdopen(tmp_fd, "w+b"); +- if (!tmpfp) { +- ret = -1; +- goto unlock; +- } +- +- fwrite(volfile, size, 1, tmpfp); +- fflush(tmpfp); +- if (ferror(tmpfp)) { +- ret = -1; +- goto unlock; +- } +- +- if (!volfile_tmp) { +- /* There is no checksum in the list, which means simple attach +- * the volfile +- */ +- ret = glusterfs_process_svc_attach_volfp(ctx, tmpfp, volfile_id, +- sha256_hash); +- goto unlock; +- } +- ret = glusterfs_mux_volfile_reconfigure(tmpfp, ctx, volfile_obj, +- sha256_hash); +- if (ret < 0) { +- gf_msg_debug("glusterfsd-mgmt", EINVAL, "Reconfigure failed !!"); +- } +- } +-unlock: +- UNLOCK(&ctx->volfile_lock); +-out: +- if (tmpfp) +- fclose(tmpfp); +- else if (tmp_fd != -1) +- sys_close(tmp_fd); +- return ret; +-} +- +-int + mgmt_cbk_event(struct rpc_clnt *rpc, void *mydata, void *data) + { + return 0; +@@ -1069,110 +966,6 @@ glusterfs_handle_attach(rpcsvc_request_t *req) + } + + int +-glusterfs_handle_svc_attach(rpcsvc_request_t *req) +-{ +- int32_t ret = -1; +- gd1_mgmt_brick_op_req xlator_req = { +- 0, +- }; +- xlator_t *this = NULL; +- glusterfs_ctx_t *ctx = NULL; +- +- GF_ASSERT(req); +- this = THIS; +- GF_ASSERT(this); +- +- ctx = this->ctx; +- ret = xdr_to_generic(req->msg[0], &xlator_req, +- (xdrproc_t)xdr_gd1_mgmt_brick_op_req); +- +- if (ret < 0) { +- /*failed to decode msg;*/ +- req->rpc_err = GARBAGE_ARGS; +- goto out; +- } +- gf_msg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_41, +- "received attach " +- "request for volfile-id=%s", +- xlator_req.name); +- ret = 0; +- +- if (ctx->active) { +- ret = mgmt_process_volfile(xlator_req.input.input_val, +- xlator_req.input.input_len, xlator_req.name); +- } else { +- gf_msg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42, +- "got attach for %s but no active graph", xlator_req.name); +- } +-out: +- if (xlator_req.input.input_val) +- free(xlator_req.input.input_val); +- if (xlator_req.name) +- free(xlator_req.name); +- glusterfs_translator_info_response_send(req, ret, NULL, NULL); +- return 0; +-} +- +-int +-glusterfs_handle_svc_detach(rpcsvc_request_t *req) +-{ +- gd1_mgmt_brick_op_req xlator_req = { +- 0, +- }; +- ssize_t ret; +- glusterfs_ctx_t *ctx = NULL; +- gf_volfile_t *volfile_obj = NULL; +- gf_volfile_t *volfile_tmp = NULL; +- +- ret = xdr_to_generic(req->msg[0], &xlator_req, +- (xdrproc_t)xdr_gd1_mgmt_brick_op_req); +- if (ret < 0) { +- req->rpc_err = GARBAGE_ARGS; +- return -1; +- } +- ctx = glusterfsd_ctx; +- +- LOCK(&ctx->volfile_lock); +- { +- list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list) +- { +- if (!strcmp(xlator_req.name, volfile_obj->vol_id)) { +- volfile_tmp = volfile_obj; +- break; +- } +- } +- +- if (!volfile_tmp) { +- UNLOCK(&ctx->volfile_lock); +- gf_msg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_41, +- "can't detach %s - not found", xlator_req.name); +- /* +- * Used to be -ENOENT. However, the caller asked us to +- * make sure it's down and if it's already down that's +- * good enough. +- */ +- ret = 0; +- goto out; +- } +- ret = glusterfs_process_svc_detach(ctx, volfile_tmp); +- if (ret) { +- UNLOCK(&ctx->volfile_lock); +- gf_msg("glusterfsd-mgmt", GF_LOG_ERROR, EINVAL, glusterfsd_msg_41, +- "Could not detach " +- "old graph. Aborting the reconfiguration operation"); +- goto out; +- } +- } +- UNLOCK(&ctx->volfile_lock); +-out: +- glusterfs_terminate_response_send(req, ret); +- free(xlator_req.name); +- xlator_req.name = NULL; +- +- return 0; +-} +- +-int + glusterfs_handle_dump_metrics(rpcsvc_request_t *req) + { + int32_t ret = -1; +@@ -2056,13 +1849,6 @@ rpcsvc_actor_t glusterfs_actors[GLUSTERD_BRICK_MAXVALUE] = { + + [GLUSTERD_DUMP_METRICS] = {"DUMP METRICS", GLUSTERD_DUMP_METRICS, + glusterfs_handle_dump_metrics, NULL, 0, DRC_NA}, +- +- [GLUSTERD_SVC_ATTACH] = {"ATTACH CLIENT", GLUSTERD_SVC_ATTACH, +- glusterfs_handle_svc_attach, NULL, 0, DRC_NA}, +- +- [GLUSTERD_SVC_DETACH] = {"DETACH CLIENT", GLUSTERD_SVC_DETACH, +- glusterfs_handle_svc_detach, NULL, 0, DRC_NA}, +- + }; + + struct rpcsvc_program glusterfs_mop_prog = { +@@ -2210,17 +1996,14 @@ mgmt_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count, + } + + volfile: ++ ret = 0; + size = rsp.op_ret; +- volfile_id = frame->local; +- if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) { +- ret = mgmt_process_volfile((const char *)rsp.spec, size, volfile_id); +- goto post_graph_mgmt; +- } + +- ret = 0; + glusterfs_compute_sha256((const unsigned char *)rsp.spec, size, + sha256_hash); + ++ volfile_id = frame->local; ++ + LOCK(&ctx->volfile_lock); + { + locked = 1; +@@ -2322,7 +2105,6 @@ volfile: + } + + INIT_LIST_HEAD(&volfile_tmp->volfile_list); +- volfile_tmp->graph = ctx->active; + list_add(&volfile_tmp->volfile_list, &ctx->volfile_list); + snprintf(volfile_tmp->vol_id, sizeof(volfile_tmp->vol_id), "%s", + volfile_id); +@@ -2334,7 +2116,6 @@ volfile: + + locked = 0; + +-post_graph_mgmt: + if (!is_mgmt_rpc_reconnect) { + need_emancipate = 1; + glusterfs_mgmt_pmap_signin(ctx); +@@ -2488,21 +2269,10 @@ glusterfs_volfile_fetch(glusterfs_ctx_t *ctx) + { + xlator_t *server_xl = NULL; + xlator_list_t *trav; +- gf_volfile_t *volfile_obj = NULL; +- int ret = 0; ++ int ret; + + LOCK(&ctx->volfile_lock); + { +- if (ctx->active && +- mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) { +- list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list) +- { +- ret |= glusterfs_volfile_fetch_one(ctx, volfile_obj->vol_id); +- } +- UNLOCK(&ctx->volfile_lock); +- return ret; +- } +- + if (ctx->active) { + server_xl = ctx->active->first; + if (strcmp(server_xl->type, "protocol/server") != 0) { +diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c +index 2172af4..5b5e996 100644 +--- a/glusterfsd/src/glusterfsd.c ++++ b/glusterfsd/src/glusterfsd.c +@@ -2593,6 +2593,24 @@ out: + #endif + + int ++glusterfs_graph_fini(glusterfs_graph_t *graph) ++{ ++ xlator_t *trav = NULL; ++ ++ trav = graph->first; ++ ++ while (trav) { ++ if (trav->init_succeeded) { ++ trav->fini(trav); ++ trav->init_succeeded = 0; ++ } ++ trav = trav->next; ++ } ++ ++ return 0; ++} ++ ++int + glusterfs_process_volfp(glusterfs_ctx_t *ctx, FILE *fp) + { + glusterfs_graph_t *graph = NULL; +@@ -2791,7 +2809,7 @@ main(int argc, char *argv[]) + + /* set brick_mux mode only for server process */ + if ((ctx->process_mode != GF_SERVER_PROCESS) && cmd->brick_mux) { +- gf_msg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_43, ++ gf_msg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_40, + "command line argument --brick-mux is valid only for brick " + "process"); + goto out; +diff --git a/libglusterfs/src/defaults-tmpl.c b/libglusterfs/src/defaults-tmpl.c +index 82e7f78..5bf64e8 100644 +--- a/libglusterfs/src/defaults-tmpl.c ++++ b/libglusterfs/src/defaults-tmpl.c +@@ -127,12 +127,6 @@ default_notify(xlator_t *this, int32_t event, void *data, ...) + GF_UNUSED int ret = 0; + xlator_t *victim = data; + +- glusterfs_graph_t *graph = NULL; +- +- GF_VALIDATE_OR_GOTO("notify", this, out); +- graph = this->graph; +- GF_VALIDATE_OR_GOTO(this->name, graph, out); +- + switch (event) { + case GF_EVENT_PARENT_UP: + case GF_EVENT_PARENT_DOWN: { +@@ -165,17 +159,6 @@ default_notify(xlator_t *this, int32_t event, void *data, ...) + xlator_notify(parent->xlator, event, this, NULL); + parent = parent->next; + } +- +- if (event == GF_EVENT_CHILD_DOWN && +- !(this->ctx && this->ctx->master) && (graph->top == this)) { +- /* Make sure this is not a daemon with master xlator */ +- pthread_mutex_lock(&graph->mutex); +- { +- graph->used = 0; +- pthread_cond_broadcast(&graph->child_down_cond); +- } +- pthread_mutex_unlock(&graph->mutex); +- } + } break; + case GF_EVENT_UPCALL: { + xlator_list_t *parent = this->parents; +@@ -222,7 +205,7 @@ default_notify(xlator_t *this, int32_t event, void *data, ...) + * nothing to do with readability. + */ + } +-out: ++ + return 0; + } + +diff --git a/libglusterfs/src/glusterfs/glusterfs.h b/libglusterfs/src/glusterfs/glusterfs.h +index 9ec2365..2cedf1a 100644 +--- a/libglusterfs/src/glusterfs/glusterfs.h ++++ b/libglusterfs/src/glusterfs/glusterfs.h +@@ -597,10 +597,6 @@ struct _glusterfs_graph { + int used; /* Should be set when fuse gets + first CHILD_UP */ + uint32_t volfile_checksum; +- void *last_xl; /* Stores the last xl of the graph, as of now only populated +- in client multiplexed code path */ +- pthread_mutex_t mutex; +- pthread_cond_t child_down_cond; /* for broadcasting CHILD_DOWN */ + }; + typedef struct _glusterfs_graph glusterfs_graph_t; + +@@ -743,7 +739,6 @@ typedef struct { + char volfile_checksum[SHA256_DIGEST_LENGTH]; + char vol_id[NAME_MAX + 1]; + struct list_head volfile_list; +- glusterfs_graph_t *graph; + + } gf_volfile_t; + +@@ -827,6 +822,4 @@ gf_free_mig_locks(lock_migration_info_t *locks); + + int + glusterfs_read_secure_access_file(void); +-int +-glusterfs_graph_fini(glusterfs_graph_t *graph); + #endif /* _GLUSTERFS_H */ +diff --git a/libglusterfs/src/glusterfs/libglusterfs-messages.h b/libglusterfs/src/glusterfs/libglusterfs-messages.h +index ea2aa60..1b72f6d 100644 +--- a/libglusterfs/src/glusterfs/libglusterfs-messages.h ++++ b/libglusterfs/src/glusterfs/libglusterfs-messages.h +@@ -109,8 +109,6 @@ GLFS_MSGID( + LG_MSG_PTHREAD_ATTR_INIT_FAILED, LG_MSG_INVALID_INODE_LIST, + LG_MSG_COMPACT_FAILED, LG_MSG_COMPACT_STATUS, LG_MSG_UTIMENSAT_FAILED, + LG_MSG_PTHREAD_NAMING_FAILED, LG_MSG_SYSCALL_RETURNS_WRONG, +- LG_MSG_XXH64_TO_GFID_FAILED, LG_MSG_ASYNC_WARNING, LG_MSG_ASYNC_FAILURE, +- LG_MSG_GRAPH_CLEANUP_FAILED, LG_MSG_GRAPH_SETUP_FAILED, +- LG_MSG_GRAPH_DETACH_STARTED, LG_MSG_GRAPH_ATTACH_FAILED); ++ LG_MSG_XXH64_TO_GFID_FAILED); + + #endif /* !_LG_MESSAGES_H_ */ +diff --git a/libglusterfs/src/glusterfs/xlator.h b/libglusterfs/src/glusterfs/xlator.h +index 8998976..b78daad 100644 +--- a/libglusterfs/src/glusterfs/xlator.h ++++ b/libglusterfs/src/glusterfs/xlator.h +@@ -1089,7 +1089,4 @@ handle_default_options(xlator_t *xl, dict_t *options); + + void + gluster_graph_take_reference(xlator_t *tree); +- +-gf_boolean_t +-mgmt_is_multiplexed_daemon(char *name); + #endif /* _XLATOR_H */ +diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c +index a492dd8..bb5e67a 100644 +--- a/libglusterfs/src/graph.c ++++ b/libglusterfs/src/graph.c +@@ -114,53 +114,6 @@ out: + return cert_depth; + } + +-xlator_t * +-glusterfs_get_last_xlator(glusterfs_graph_t *graph) +-{ +- xlator_t *trav = graph->first; +- if (!trav) +- return NULL; +- +- while (trav->next) +- trav = trav->next; +- +- return trav; +-} +- +-xlator_t * +-glusterfs_mux_xlator_unlink(xlator_t *pxl, xlator_t *cxl) +-{ +- xlator_list_t *unlink = NULL; +- xlator_list_t *prev = NULL; +- xlator_list_t **tmp = NULL; +- xlator_t *next_child = NULL; +- xlator_t *xl = NULL; +- +- for (tmp = &pxl->children; *tmp; tmp = &(*tmp)->next) { +- if ((*tmp)->xlator == cxl) { +- unlink = *tmp; +- *tmp = (*tmp)->next; +- if (*tmp) +- next_child = (*tmp)->xlator; +- break; +- } +- prev = *tmp; +- } +- +- if (!prev) +- xl = pxl; +- else if (prev->xlator) +- xl = prev->xlator->graph->last_xl; +- +- if (xl) +- xl->next = next_child; +- if (next_child) +- next_child->prev = xl; +- +- GF_FREE(unlink); +- return next_child; +-} +- + int + glusterfs_xlator_link(xlator_t *pxl, xlator_t *cxl) + { +@@ -1139,8 +1092,6 @@ glusterfs_graph_destroy_residual(glusterfs_graph_t *graph) + ret = xlator_tree_free_memacct(graph->first); + + list_del_init(&graph->list); +- pthread_mutex_destroy(&graph->mutex); +- pthread_cond_destroy(&graph->child_down_cond); + GF_FREE(graph); + + return ret; +@@ -1183,25 +1134,6 @@ out: + } + + int +-glusterfs_graph_fini(glusterfs_graph_t *graph) +-{ +- xlator_t *trav = NULL; +- +- trav = graph->first; +- +- while (trav) { +- if (trav->init_succeeded) { +- trav->cleanup_starting = 1; +- trav->fini(trav); +- trav->init_succeeded = 0; +- } +- trav = trav->next; +- } +- +- return 0; +-} +- +-int + glusterfs_graph_attach(glusterfs_graph_t *orig_graph, char *path, + glusterfs_graph_t **newgraph) + { +@@ -1324,386 +1256,3 @@ glusterfs_graph_attach(glusterfs_graph_t *orig_graph, char *path, + + return 0; + } +-int +-glusterfs_muxsvc_cleanup_parent(glusterfs_ctx_t *ctx, +- glusterfs_graph_t *parent_graph) +-{ +- if (parent_graph) { +- if (parent_graph->first) { +- xlator_destroy(parent_graph->first); +- } +- ctx->active = NULL; +- GF_FREE(parent_graph); +- parent_graph = NULL; +- } +- return 0; +-} +- +-void * +-glusterfs_graph_cleanup(void *arg) +-{ +- glusterfs_graph_t *graph = NULL; +- glusterfs_ctx_t *ctx = THIS->ctx; +- int ret = -1; +- graph = arg; +- +- if (!graph) +- return NULL; +- +- /* To destroy the graph, fitst sent a GF_EVENT_PARENT_DOWN +- * Then wait for GF_EVENT_CHILD_DOWN to get on the top +- * xl. Once we have GF_EVENT_CHILD_DOWN event, then proceed +- * to fini. +- * +- * During fini call, this will take a last unref on rpc and +- * rpc_transport_object. +- */ +- if (graph->first) +- default_notify(graph->first, GF_EVENT_PARENT_DOWN, graph->first); +- +- ret = pthread_mutex_lock(&graph->mutex); +- if (ret != 0) { +- gf_msg("glusterfs", GF_LOG_ERROR, EAGAIN, LG_MSG_GRAPH_CLEANUP_FAILED, +- "Failed to aquire a lock"); +- goto out; +- } +- /* check and wait for CHILD_DOWN for top xlator*/ +- while (graph->used) { +- ret = pthread_cond_wait(&graph->child_down_cond, &graph->mutex); +- if (ret != 0) +- gf_msg("glusterfs", GF_LOG_INFO, 0, LG_MSG_GRAPH_CLEANUP_FAILED, +- "cond wait failed "); +- } +- +- ret = pthread_mutex_unlock(&graph->mutex); +- if (ret != 0) { +- gf_msg("glusterfs", GF_LOG_ERROR, EAGAIN, LG_MSG_GRAPH_CLEANUP_FAILED, +- "Failed to release a lock"); +- } +- +- /* Though we got a child down on top xlator, we have to wait until +- * all the notifier to exit. Because there should not be any threads +- * that access xl variables. +- */ +- pthread_mutex_lock(&ctx->notify_lock); +- { +- while (ctx->notifying) +- pthread_cond_wait(&ctx->notify_cond, &ctx->notify_lock); +- } +- pthread_mutex_unlock(&ctx->notify_lock); +- +- glusterfs_graph_fini(graph); +- glusterfs_graph_destroy(graph); +-out: +- return NULL; +-} +- +-glusterfs_graph_t * +-glusterfs_muxsvc_setup_parent_graph(glusterfs_ctx_t *ctx, char *name, +- char *type) +-{ +- glusterfs_graph_t *parent_graph = NULL; +- xlator_t *ixl = NULL; +- int ret = -1; +- parent_graph = GF_CALLOC(1, sizeof(*parent_graph), +- gf_common_mt_glusterfs_graph_t); +- if (!parent_graph) +- goto out; +- +- INIT_LIST_HEAD(&parent_graph->list); +- +- ctx->active = parent_graph; +- ixl = GF_CALLOC(1, sizeof(*ixl), gf_common_mt_xlator_t); +- if (!ixl) +- goto out; +- +- ixl->ctx = ctx; +- ixl->graph = parent_graph; +- ixl->options = dict_new(); +- if (!ixl->options) +- goto out; +- +- ixl->name = gf_strdup(name); +- if (!ixl->name) +- goto out; +- +- ixl->is_autoloaded = 1; +- +- if (xlator_set_type(ixl, type) == -1) { +- gf_msg("glusterfs", GF_LOG_ERROR, EINVAL, LG_MSG_GRAPH_SETUP_FAILED, +- "%s (%s) set type failed", name, type); +- goto out; +- } +- +- glusterfs_graph_set_first(parent_graph, ixl); +- parent_graph->top = ixl; +- ixl = NULL; +- +- gettimeofday(&parent_graph->dob, NULL); +- fill_uuid(parent_graph->graph_uuid, 128); +- parent_graph->id = ctx->graph_id++; +- ret = 0; +-out: +- if (ixl) +- xlator_destroy(ixl); +- +- if (ret) { +- glusterfs_muxsvc_cleanup_parent(ctx, parent_graph); +- parent_graph = NULL; +- } +- return parent_graph; +-} +- +-int +-glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj) +-{ +- xlator_t *last_xl = NULL; +- glusterfs_graph_t *graph = NULL; +- glusterfs_graph_t *parent_graph = NULL; +- pthread_t clean_graph = { +- 0, +- }; +- int ret = -1; +- xlator_t *xl = NULL; +- +- if (!ctx || !ctx->active || !volfile_obj) +- goto out; +- parent_graph = ctx->active; +- graph = volfile_obj->graph; +- if (graph && graph->first) +- xl = graph->first; +- +- last_xl = graph->last_xl; +- if (last_xl) +- last_xl->next = NULL; +- if (!xl || xl->cleanup_starting) +- goto out; +- +- xl->cleanup_starting = 1; +- gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_DETACH_STARTED, +- "detaching child %s", volfile_obj->vol_id); +- +- list_del_init(&volfile_obj->volfile_list); +- glusterfs_mux_xlator_unlink(parent_graph->top, xl); +- parent_graph->last_xl = glusterfs_get_last_xlator(parent_graph); +- parent_graph->xl_count -= graph->xl_count; +- parent_graph->leaf_count -= graph->leaf_count; +- default_notify(xl, GF_EVENT_PARENT_DOWN, xl); +- parent_graph->id++; +- ret = 0; +-out: +- if (!ret) { +- list_del_init(&volfile_obj->volfile_list); +- if (graph) { +- ret = gf_thread_create_detached( +- &clean_graph, glusterfs_graph_cleanup, graph, "graph_clean"); +- if (ret) { +- gf_msg("glusterfs", GF_LOG_ERROR, EINVAL, +- LG_MSG_GRAPH_CLEANUP_FAILED, +- "%s failed to create clean " +- "up thread", +- volfile_obj->vol_id); +- ret = 0; +- } +- } +- GF_FREE(volfile_obj); +- } +- return ret; +-} +- +-int +-glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp, +- char *volfile_id, char *checksum) +-{ +- glusterfs_graph_t *graph = NULL; +- glusterfs_graph_t *parent_graph = NULL; +- glusterfs_graph_t *clean_graph = NULL; +- int ret = -1; +- xlator_t *xl = NULL; +- xlator_t *last_xl = NULL; +- gf_volfile_t *volfile_obj = NULL; +- pthread_t thread_id = { +- 0, +- }; +- +- if (!ctx) +- goto out; +- parent_graph = ctx->active; +- graph = glusterfs_graph_construct(fp); +- if (!graph) { +- gf_msg("glusterfsd", GF_LOG_ERROR, EINVAL, LG_MSG_GRAPH_ATTACH_FAILED, +- "failed to construct the graph"); +- goto out; +- } +- graph->last_xl = glusterfs_get_last_xlator(graph); +- +- for (xl = graph->first; xl; xl = xl->next) { +- if (strcmp(xl->type, "mount/fuse") == 0) { +- gf_msg("glusterfsd", GF_LOG_ERROR, EINVAL, +- LG_MSG_GRAPH_ATTACH_FAILED, +- "fuse xlator cannot be specified in volume file"); +- goto out; +- } +- } +- +- graph->leaf_count = glusterfs_count_leaves(glusterfs_root(graph)); +- xl = graph->first; +- /* TODO memory leaks everywhere need to free graph in case of error */ +- if (glusterfs_graph_prepare(graph, ctx, xl->name)) { +- gf_msg("glusterfsd", GF_LOG_WARNING, EINVAL, LG_MSG_GRAPH_ATTACH_FAILED, +- "failed to prepare graph for xlator %s", xl->name); +- ret = -1; +- goto out; +- } else if (glusterfs_graph_init(graph)) { +- gf_msg("glusterfsd", GF_LOG_WARNING, EINVAL, LG_MSG_GRAPH_ATTACH_FAILED, +- "failed to initialize graph for xlator %s", xl->name); +- ret = -1; +- goto out; +- } else if (glusterfs_graph_parent_up(graph)) { +- gf_msg("glusterfsd", GF_LOG_WARNING, EINVAL, LG_MSG_GRAPH_ATTACH_FAILED, +- "failed to link the graphs for xlator %s ", xl->name); +- ret = -1; +- goto out; +- } +- +- if (!parent_graph) { +- parent_graph = glusterfs_muxsvc_setup_parent_graph(ctx, "glustershd", +- "debug/io-stats"); +- if (!parent_graph) +- goto out; +- ((xlator_t *)parent_graph->top)->next = xl; +- clean_graph = parent_graph; +- } else { +- last_xl = parent_graph->last_xl; +- if (last_xl) +- last_xl->next = xl; +- xl->prev = last_xl; +- } +- parent_graph->last_xl = graph->last_xl; +- +- ret = glusterfs_xlator_link(parent_graph->top, xl); +- if (ret) { +- gf_msg("graph", GF_LOG_ERROR, 0, LG_MSG_EVENT_NOTIFY_FAILED, +- "parent up notification failed"); +- goto out; +- } +- parent_graph->xl_count += graph->xl_count; +- parent_graph->leaf_count += graph->leaf_count; +- parent_graph->id++; +- +- if (!volfile_obj) { +- volfile_obj = GF_CALLOC(1, sizeof(gf_volfile_t), gf_common_volfile_t); +- if (!volfile_obj) { +- ret = -1; +- goto out; +- } +- } +- +- graph->used = 1; +- parent_graph->id++; +- list_add(&graph->list, &ctx->graphs); +- INIT_LIST_HEAD(&volfile_obj->volfile_list); +- volfile_obj->graph = graph; +- snprintf(volfile_obj->vol_id, sizeof(volfile_obj->vol_id), "%s", +- volfile_id); +- memcpy(volfile_obj->volfile_checksum, checksum, +- sizeof(volfile_obj->volfile_checksum)); +- list_add_tail(&volfile_obj->volfile_list, &ctx->volfile_list); +- +- gf_log_dump_graph(fp, graph); +- graph = NULL; +- +- ret = 0; +-out: +- if (ret) { +- if (graph) { +- gluster_graph_take_reference(graph->first); +- ret = gf_thread_create_detached(&thread_id, glusterfs_graph_cleanup, +- graph, "graph_clean"); +- if (ret) { +- gf_msg("glusterfs", GF_LOG_ERROR, EINVAL, +- LG_MSG_GRAPH_CLEANUP_FAILED, +- "%s failed to create clean " +- "up thread", +- volfile_id); +- ret = 0; +- } +- } +- if (clean_graph) +- glusterfs_muxsvc_cleanup_parent(ctx, clean_graph); +- } +- return ret; +-} +- +-int +-glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx, +- gf_volfile_t *volfile_obj, char *checksum) +-{ +- glusterfs_graph_t *oldvolfile_graph = NULL; +- glusterfs_graph_t *newvolfile_graph = NULL; +- +- int ret = -1; +- +- if (!ctx) { +- gf_msg("glusterfsd-mgmt", GF_LOG_ERROR, 0, LG_MSG_CTX_NULL, +- "ctx is NULL"); +- goto out; +- } +- +- /* Change the message id */ +- if (!volfile_obj) { +- gf_msg("glusterfsd-mgmt", GF_LOG_ERROR, 0, LG_MSG_CTX_NULL, +- "failed to get volfile object"); +- goto out; +- } +- +- oldvolfile_graph = volfile_obj->graph; +- if (!oldvolfile_graph) { +- goto out; +- } +- +- newvolfile_graph = glusterfs_graph_construct(newvolfile_fp); +- +- if (!newvolfile_graph) { +- goto out; +- } +- newvolfile_graph->last_xl = glusterfs_get_last_xlator(newvolfile_graph); +- +- glusterfs_graph_prepare(newvolfile_graph, ctx, newvolfile_graph->first); +- +- if (!is_graph_topology_equal(oldvolfile_graph, newvolfile_graph)) { +- ret = glusterfs_process_svc_detach(ctx, volfile_obj); +- if (ret) { +- gf_msg("glusterfsd-mgmt", GF_LOG_ERROR, EINVAL, +- LG_MSG_GRAPH_CLEANUP_FAILED, +- "Could not detach " +- "old graph. Aborting the reconfiguration operation"); +- goto out; +- } +- ret = glusterfs_process_svc_attach_volfp(ctx, newvolfile_fp, +- volfile_obj->vol_id, checksum); +- goto out; +- } +- +- gf_msg_debug("glusterfsd-mgmt", 0, +- "Only options have changed in the" +- " new graph"); +- +- ret = glusterfs_graph_reconfigure(oldvolfile_graph, newvolfile_graph); +- if (ret) { +- gf_msg_debug("glusterfsd-mgmt", 0, +- "Could not reconfigure " +- "new options in old graph"); +- goto out; +- } +- memcpy(volfile_obj->volfile_checksum, checksum, +- sizeof(volfile_obj->volfile_checksum)); +- +- ret = 0; +-out: +- +- if (newvolfile_graph) +- glusterfs_graph_destroy(newvolfile_graph); +- +- return ret; +-} +diff --git a/libglusterfs/src/graph.y b/libglusterfs/src/graph.y +index c60ff38..5b92985 100644 +--- a/libglusterfs/src/graph.y ++++ b/libglusterfs/src/graph.y +@@ -542,9 +542,6 @@ glusterfs_graph_new () + + INIT_LIST_HEAD (&graph->list); + +- pthread_mutex_init(&graph->mutex, NULL); +- pthread_cond_init(&graph->child_down_cond, NULL); +- + gettimeofday (&graph->dob, NULL); + + return graph; +diff --git a/libglusterfs/src/libglusterfs.sym b/libglusterfs/src/libglusterfs.sym +index 05f93b4..4dca7de 100644 +--- a/libglusterfs/src/libglusterfs.sym ++++ b/libglusterfs/src/libglusterfs.sym +@@ -1155,8 +1155,3 @@ gf_changelog_register_generic + gf_gfid_generate_from_xxh64 + find_xlator_option_in_cmd_args_t + gf_d_type_from_ia_type +-glusterfs_graph_fini +-glusterfs_process_svc_attach_volfp +-glusterfs_mux_volfile_reconfigure +-glusterfs_process_svc_detach +-mgmt_is_multiplexed_daemon +diff --git a/libglusterfs/src/statedump.c b/libglusterfs/src/statedump.c +index 0cf80c0..d18b50f 100644 +--- a/libglusterfs/src/statedump.c ++++ b/libglusterfs/src/statedump.c +@@ -810,8 +810,7 @@ gf_proc_dump_info(int signum, glusterfs_ctx_t *ctx) + if (!ctx) + goto out; + +- if (!mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name) && +- (ctx && ctx->active)) { ++ if (ctx && ctx->active) { + top = ctx->active->first; + for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) { + brick_count++; +diff --git a/libglusterfs/src/xlator.c b/libglusterfs/src/xlator.c +index 022c3ed..6bd4f09 100644 +--- a/libglusterfs/src/xlator.c ++++ b/libglusterfs/src/xlator.c +@@ -1470,19 +1470,3 @@ gluster_graph_take_reference(xlator_t *tree) + } + return; + } +- +-gf_boolean_t +-mgmt_is_multiplexed_daemon(char *name) +-{ +- const char *mux_daemons[] = {"glustershd", NULL}; +- int i; +- +- if (!name) +- return _gf_false; +- +- for (i = 0; mux_daemons[i]; i++) { +- if (!strcmp(name, mux_daemons[i])) +- return _gf_true; +- } +- return _gf_false; +-} +diff --git a/rpc/rpc-lib/src/protocol-common.h b/rpc/rpc-lib/src/protocol-common.h +index 7275d75..779878f 100644 +--- a/rpc/rpc-lib/src/protocol-common.h ++++ b/rpc/rpc-lib/src/protocol-common.h +@@ -245,8 +245,6 @@ enum glusterd_brick_procnum { + GLUSTERD_NODE_BITROT, + GLUSTERD_BRICK_ATTACH, + GLUSTERD_DUMP_METRICS, +- GLUSTERD_SVC_ATTACH, +- GLUSTERD_SVC_DETACH, + GLUSTERD_BRICK_MAXVALUE, + }; + +diff --git a/tests/basic/glusterd/heald.t b/tests/basic/glusterd/heald.t +index 7dae3c3..ca112ad 100644 +--- a/tests/basic/glusterd/heald.t ++++ b/tests/basic/glusterd/heald.t +@@ -7,16 +7,11 @@ + # Covers enable/disable at the moment. Will be enhanced later to include + # the other commands as well. + +-function is_pid_running { +- local pid=$1 +- num=`ps auxww | grep glustershd | grep $pid | grep -v grep | wc -l` +- echo $num +-} +- + cleanup; + TEST glusterd + TEST pidof glusterd + ++volfile=$(gluster system:: getwd)"/glustershd/glustershd-server.vol" + #Commands should fail when volume doesn't exist + TEST ! $CLI volume heal non-existent-volume enable + TEST ! $CLI volume heal non-existent-volume disable +@@ -25,55 +20,51 @@ TEST ! $CLI volume heal non-existent-volume disable + # volumes + TEST $CLI volume create dist $H0:$B0/dist + TEST $CLI volume start dist +-TEST "[ -z $(get_shd_process_pid dist)]" ++TEST "[ -z $(get_shd_process_pid)]" + TEST ! $CLI volume heal dist enable + TEST ! $CLI volume heal dist disable + + # Commands should work on replicate/disperse volume. + TEST $CLI volume create r2 replica 2 $H0:$B0/r2_0 $H0:$B0/r2_1 +-TEST "[ -z $(get_shd_process_pid r2)]" ++TEST "[ -z $(get_shd_process_pid)]" + TEST $CLI volume start r2 +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid r2 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid + TEST $CLI volume heal r2 enable + EXPECT "enable" volume_option r2 "cluster.self-heal-daemon" +-volfiler2=$(gluster system:: getwd)"/vols/r2/r2-shd.vol" +-EXPECT "enable" volgen_volume_option $volfiler2 r2-replicate-0 cluster replicate self-heal-daemon +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid r2 +-pid=$( get_shd_process_pid r2 ) ++EXPECT "enable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid + TEST $CLI volume heal r2 disable + EXPECT "disable" volume_option r2 "cluster.self-heal-daemon" +-EXPECT "disable" volgen_volume_option $volfiler2 r2-replicate-0 cluster replicate self-heal-daemon +-EXPECT "1" is_pid_running $pid ++EXPECT "disable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid + + # Commands should work on disperse volume. + TEST $CLI volume create ec2 disperse 3 redundancy 1 $H0:$B0/ec2_0 $H0:$B0/ec2_1 $H0:$B0/ec2_2 + TEST $CLI volume start ec2 +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid ec2 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid + TEST $CLI volume heal ec2 enable + EXPECT "enable" volume_option ec2 "cluster.disperse-self-heal-daemon" +-volfileec2=$(gluster system:: getwd)"/vols/ec2/ec2-shd.vol" +-EXPECT "enable" volgen_volume_option $volfileec2 ec2-disperse-0 cluster disperse self-heal-daemon +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid ec2 +-pid=$(get_shd_process_pid ec2) ++EXPECT "enable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid + TEST $CLI volume heal ec2 disable + EXPECT "disable" volume_option ec2 "cluster.disperse-self-heal-daemon" +-EXPECT "disable" volgen_volume_option $volfileec2 ec2-disperse-0 cluster disperse self-heal-daemon +-EXPECT "1" is_pid_running $pid ++EXPECT "disable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid + + #Check that shd graph is rewritten correctly on volume stop/start +-EXPECT "Y" volgen_volume_exists $volfileec2 ec2-disperse-0 cluster disperse +- +-EXPECT "Y" volgen_volume_exists $volfiler2 r2-replicate-0 cluster replicate ++EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse ++EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate + TEST $CLI volume stop r2 +-EXPECT "Y" volgen_volume_exists $volfileec2 ec2-disperse-0 cluster disperse ++EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse ++EXPECT "N" volgen_volume_exists $volfile r2-replicate-0 cluster replicate + TEST $CLI volume stop ec2 + # When both the volumes are stopped glustershd volfile is not modified just the + # process is stopped +-TEST "[ -z $(get_shd_process_pid dist) ]" +-TEST "[ -z $(get_shd_process_pid ec2) ]" ++TEST "[ -z $(get_shd_process_pid) ]" + + TEST $CLI volume start r2 +-EXPECT "Y" volgen_volume_exists $volfiler2 r2-replicate-0 cluster replicate ++EXPECT "N" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse ++EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate + + TEST $CLI volume set r2 self-heal-daemon on + TEST $CLI volume set r2 cluster.self-heal-daemon off +diff --git a/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t b/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t +index e6e65c4..cdb1a33 100644 +--- a/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t ++++ b/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t +@@ -55,9 +55,9 @@ TEST kill_glusterd 1 + #Bring back 1st glusterd + TEST $glusterd_1 + +-# We need to wait till PROCESS_UP_TIMEOUT and then check shd service started +-#on node 2, because once glusterd regains quorum, it will restart all volume +-#level daemons +-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" shd_up_status_2 ++# We need to wait till PROCESS_UP_TIMEOUT and then check shd service does not ++# come up on node 2 ++sleep $PROCESS_UP_TIMEOUT ++EXPECT "N" shd_up_status_2 + + cleanup; +diff --git a/tests/volume.rc b/tests/volume.rc +index 022d972..76a8fd4 100644 +--- a/tests/volume.rc ++++ b/tests/volume.rc +@@ -237,13 +237,11 @@ function ec_child_up_count_shd { + } + + function get_shd_process_pid { +- local vol=$1 +- ps auxww | grep "process-name\ glustershd" | awk '{print $2}' | head -1 ++ ps auxww | grep glusterfs | grep -E "glustershd/glustershd.pid" | awk '{print $2}' | head -1 + } + + function generate_shd_statedump { +- local vol=$1 +- generate_statedump $(get_shd_process_pid $vol) ++ generate_statedump $(get_shd_process_pid) + } + + function generate_nfs_statedump { +diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am +index 11ae189..5fe5156 100644 +--- a/xlators/mgmt/glusterd/src/Makefile.am ++++ b/xlators/mgmt/glusterd/src/Makefile.am +@@ -18,12 +18,11 @@ glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c \ + glusterd-locks.c glusterd-snapshot.c glusterd-mgmt-handler.c \ + glusterd-mgmt.c glusterd-peer-utils.c glusterd-statedump.c \ + glusterd-snapshot-utils.c glusterd-conn-mgmt.c \ +- glusterd-proc-mgmt.c glusterd-svc-mgmt.c \ ++ glusterd-proc-mgmt.c glusterd-svc-mgmt.c glusterd-shd-svc.c \ + glusterd-nfs-svc.c glusterd-quotad-svc.c glusterd-svc-helper.c \ + glusterd-conn-helper.c glusterd-snapd-svc.c glusterd-snapd-svc-helper.c \ + glusterd-bitd-svc.c glusterd-scrub-svc.c glusterd-server-quorum.c \ + glusterd-reset-brick.c glusterd-tierd-svc.c glusterd-tierd-svc-helper.c \ +- glusterd-shd-svc.c glusterd-shd-svc-helper.c \ + glusterd-gfproxyd-svc.c glusterd-gfproxyd-svc-helper.c + + +@@ -39,12 +38,11 @@ noinst_HEADERS = glusterd.h glusterd-utils.h glusterd-op-sm.h \ + glusterd-mgmt.h glusterd-messages.h glusterd-peer-utils.h \ + glusterd-statedump.h glusterd-snapshot-utils.h glusterd-geo-rep.h \ + glusterd-conn-mgmt.h glusterd-conn-helper.h glusterd-proc-mgmt.h \ +- glusterd-svc-mgmt.h glusterd-nfs-svc.h \ ++ glusterd-svc-mgmt.h glusterd-shd-svc.h glusterd-nfs-svc.h \ + glusterd-quotad-svc.h glusterd-svc-helper.h glusterd-snapd-svc.h \ + glusterd-snapd-svc-helper.h glusterd-rcu.h glusterd-bitd-svc.h \ + glusterd-scrub-svc.h glusterd-server-quorum.h glusterd-errno.h \ + glusterd-tierd-svc.h glusterd-tierd-svc-helper.h \ +- glusterd-shd-svc.h glusterd-shd-svc-helper.h \ + glusterd-gfproxyd-svc.h glusterd-gfproxyd-svc-helper.h \ + $(CONTRIBDIR)/userspace-rcu/rculist-extra.h + +diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c +index 042a805..ad9a572 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c ++++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c +@@ -2863,7 +2863,7 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr) + } + + if (start_remove && volinfo->status == GLUSTERD_STATUS_STARTED) { +- ret = glusterd_svcs_reconfigure(volinfo); ++ ret = glusterd_svcs_reconfigure(); + if (ret) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_RECONF_FAIL, + "Unable to reconfigure NFS-Server"); +diff --git a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c +index 16eefa1..c6d7a00 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c ++++ b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c +@@ -138,45 +138,3 @@ glusterd_conn_build_socket_filepath(char *rundir, uuid_t uuid, char *socketpath, + glusterd_set_socket_filepath(sockfilepath, socketpath, len); + return 0; + } +- +-int +-__glusterd_muxsvc_conn_common_notify(struct rpc_clnt *rpc, void *mydata, +- rpc_clnt_event_t event, void *data) +-{ +- glusterd_conf_t *conf = THIS->private; +- glusterd_svc_proc_t *mux_proc = mydata; +- int ret = -1; +- +- /* Silently ignoring this error, exactly like the current +- * implementation */ +- if (!mux_proc) +- return 0; +- +- if (event == RPC_CLNT_DESTROY) { +- /*RPC_CLNT_DESTROY will only called after mux_proc detached from the +- * list. So it is safe to call without lock. Processing +- * RPC_CLNT_DESTROY under a lock will lead to deadlock. +- */ +- if (mux_proc->data) { +- glusterd_volinfo_unref(mux_proc->data); +- mux_proc->data = NULL; +- } +- GF_FREE(mux_proc); +- ret = 0; +- } else { +- pthread_mutex_lock(&conf->attach_lock); +- { +- ret = mux_proc->notify(mux_proc, event); +- } +- pthread_mutex_unlock(&conf->attach_lock); +- } +- return ret; +-} +- +-int +-glusterd_muxsvc_conn_common_notify(struct rpc_clnt *rpc, void *mydata, +- rpc_clnt_event_t event, void *data) +-{ +- return glusterd_big_locked_notify(rpc, mydata, event, data, +- __glusterd_muxsvc_conn_common_notify); +-} +diff --git a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h +index d1c4607..602c0ba 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h ++++ b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h +@@ -43,11 +43,9 @@ glusterd_conn_disconnect(glusterd_conn_t *conn); + int + glusterd_conn_common_notify(struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data); +-int +-glusterd_muxsvc_conn_common_notify(struct rpc_clnt *rpc, void *mydata, +- rpc_clnt_event_t event, void *data); + + int32_t + glusterd_conn_build_socket_filepath(char *rundir, uuid_t uuid, char *socketpath, + int len); ++ + #endif +diff --git a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c +index b01fd4d..f9c8617 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c ++++ b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c +@@ -370,7 +370,6 @@ int + glusterd_gfproxydsvc_restart() + { + glusterd_volinfo_t *volinfo = NULL; +- glusterd_volinfo_t *tmp = NULL; + int ret = -1; + xlator_t *this = THIS; + glusterd_conf_t *conf = NULL; +@@ -381,7 +380,7 @@ glusterd_gfproxydsvc_restart() + conf = this->private; + GF_VALIDATE_OR_GOTO(this->name, conf, out); + +- cds_list_for_each_entry_safe(volinfo, tmp, &conf->volumes, vol_list) ++ cds_list_for_each_entry(volinfo, &conf->volumes, vol_list) + { + /* Start per volume gfproxyd svc */ + if (volinfo->status == GLUSTERD_STATUS_STARTED) { +diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c +index ac788a0..cb2666b 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-handler.c ++++ b/xlators/mgmt/glusterd/src/glusterd-handler.c +@@ -5940,11 +5940,6 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict) + + GF_FREE(rebal_data); + +- fprintf(fp, "Volume%d.shd_svc.online_status: %s\n", count, +- volinfo->shd.svc.online ? "Online" : "Offline"); +- fprintf(fp, "Volume%d.shd_svc.inited: %s\n", count, +- volinfo->shd.svc.inited ? "True" : "False"); +- + if (volinfo->type == GF_CLUSTER_TYPE_TIER) { + ret = glusterd_volume_get_hot_tier_type_str(volinfo, + &hot_tier_type_str); +@@ -6014,6 +6009,12 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict) + + fprintf(fp, "\n[Services]\n"); + ++ if (priv->shd_svc.inited) { ++ fprintf(fp, "svc%d.name: %s\n", ++count, priv->shd_svc.name); ++ fprintf(fp, "svc%d.online_status: %s\n\n", count, ++ priv->shd_svc.online ? "Online" : "Offline"); ++ } ++ + if (priv->nfs_svc.inited) { + fprintf(fp, "svc%d.name: %s\n", ++count, priv->nfs_svc.name); + fprintf(fp, "svc%d.online_status: %s\n\n", count, +diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c +index 1ba58c3..5599a63 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c ++++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c +@@ -30,7 +30,6 @@ + #include "rpcsvc.h" + #include "rpc-common-xdr.h" + #include "glusterd-gfproxyd-svc-helper.h" +-#include "glusterd-shd-svc-helper.h" + + extern struct rpc_clnt_program gd_peer_prog; + extern struct rpc_clnt_program gd_mgmt_prog; +@@ -329,26 +328,6 @@ build_volfile_path(char *volume_id, char *path, size_t path_len, + goto out; + } + +- volid_ptr = strstr(volume_id, "shd/"); +- if (volid_ptr) { +- volid_ptr = strchr(volid_ptr, '/'); +- if (!volid_ptr) { +- ret = -1; +- goto out; +- } +- volid_ptr++; +- +- ret = glusterd_volinfo_find(volid_ptr, &volinfo); +- if (ret == -1) { +- gf_log(this->name, GF_LOG_ERROR, "Couldn't find volinfo"); +- goto out; +- } +- +- glusterd_svc_build_shd_volfile_path(volinfo, path, path_len); +- ret = 0; +- goto out; +- } +- + volid_ptr = strstr(volume_id, "/snaps/"); + if (volid_ptr) { + ret = get_snap_volname_and_volinfo(volid_ptr, &volname, &volinfo); +diff --git a/xlators/mgmt/glusterd/src/glusterd-mem-types.h b/xlators/mgmt/glusterd/src/glusterd-mem-types.h +index 17052ce..7a784db 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-mem-types.h ++++ b/xlators/mgmt/glusterd/src/glusterd-mem-types.h +@@ -51,7 +51,6 @@ typedef enum gf_gld_mem_types_ { + gf_gld_mt_missed_snapinfo_t, + gf_gld_mt_snap_create_args_t, + gf_gld_mt_glusterd_brick_proc_t, +- gf_gld_mt_glusterd_svc_proc_t, + gf_gld_mt_end, + } gf_gld_mem_types_t; + #endif +diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h +index 424e15f..c7b3ca8 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-messages.h ++++ b/xlators/mgmt/glusterd/src/glusterd-messages.h +@@ -298,8 +298,6 @@ GLFS_MSGID( + GD_MSG_LOCALTIME_LOGGING_ENABLE, GD_MSG_LOCALTIME_LOGGING_DISABLE, + GD_MSG_PORTS_EXHAUSTED, GD_MSG_CHANGELOG_GET_FAIL, + GD_MSG_MANAGER_FUNCTION_FAILED, GD_MSG_NFS_GANESHA_DISABLED, +- GD_MSG_GANESHA_NOT_RUNNING, GD_MSG_DAEMON_LOG_LEVEL_VOL_OPT_VALIDATE_FAIL, +- GD_MSG_SHD_START_FAIL, GD_MSG_SHD_OBJ_GET_FAIL, GD_MSG_SVC_ATTACH_FAIL, +- GD_MSG_ATTACH_INFO, GD_MSG_DETACH_INFO, GD_MSG_SVC_DETACH_FAIL); ++ GD_MSG_GANESHA_NOT_RUNNING, GD_MSG_DAEMON_LOG_LEVEL_VOL_OPT_VALIDATE_FAIL); + + #endif /* !_GLUSTERD_MESSAGES_H_ */ +diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +index 9ea695e..0d29de2 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c ++++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +@@ -44,7 +44,6 @@ + #include "glusterd-snapshot-utils.h" + #include "glusterd-svc-mgmt.h" + #include "glusterd-svc-helper.h" +-#include "glusterd-shd-svc-helper.h" + #include "glusterd-shd-svc.h" + #include "glusterd-nfs-svc.h" + #include "glusterd-quotad-svc.h" +@@ -2225,11 +2224,6 @@ glusterd_options_reset(glusterd_volinfo_t *volinfo, char *key, + if (ret) + goto out; + +- svc = &(volinfo->shd.svc); +- ret = svc->reconfigure(volinfo); +- if (ret) +- goto out; +- + ret = glusterd_create_volfiles_and_notify_services(volinfo); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL, +@@ -2244,7 +2238,7 @@ glusterd_options_reset(glusterd_volinfo_t *volinfo, char *key, + goto out; + + if (GLUSTERD_STATUS_STARTED == volinfo->status) { +- ret = glusterd_svcs_reconfigure(volinfo); ++ ret = glusterd_svcs_reconfigure(); + if (ret) + goto out; + } +@@ -2700,11 +2694,6 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict, + if (ret) + goto out; + +- svc = &(volinfo->shd.svc); +- ret = svc->reconfigure(volinfo); +- if (ret) +- goto out; +- + ret = glusterd_create_volfiles_and_notify_services(volinfo); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, +@@ -2718,7 +2707,7 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict, + } + } + if (svcs_reconfigure) { +- ret = glusterd_svcs_reconfigure(NULL); ++ ret = glusterd_svcs_reconfigure(); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL, + "Unable to restart " +@@ -3103,11 +3092,6 @@ glusterd_op_set_volume(dict_t *dict, char **errstr) + if (ret) + goto out; + +- svc = &(volinfo->shd.svc); +- ret = svc->reconfigure(volinfo); +- if (ret) +- goto out; +- + ret = glusterd_create_volfiles_and_notify_services(volinfo); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL, +@@ -3123,7 +3107,7 @@ glusterd_op_set_volume(dict_t *dict, char **errstr) + goto out; + + if (GLUSTERD_STATUS_STARTED == volinfo->status) { +- ret = glusterd_svcs_reconfigure(volinfo); ++ ret = glusterd_svcs_reconfigure(); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL, + "Unable to restart services"); +@@ -3156,11 +3140,6 @@ glusterd_op_set_volume(dict_t *dict, char **errstr) + if (ret) + goto out; + +- svc = &(volinfo->shd.svc); +- ret = svc->reconfigure(volinfo); +- if (ret) +- goto out; +- + ret = glusterd_create_volfiles_and_notify_services(volinfo); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL, +@@ -3176,7 +3155,7 @@ glusterd_op_set_volume(dict_t *dict, char **errstr) + goto out; + + if (GLUSTERD_STATUS_STARTED == volinfo->status) { +- ret = glusterd_svcs_reconfigure(volinfo); ++ ret = glusterd_svcs_reconfigure(); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL, + "Unable to restart services"); +@@ -3383,7 +3362,7 @@ glusterd_op_stats_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) + goto out; + + if (GLUSTERD_STATUS_STARTED == volinfo->status) { +- ret = glusterd_svcs_reconfigure(volinfo); ++ ret = glusterd_svcs_reconfigure(); + if (ret) + goto out; + } +@@ -3666,6 +3645,14 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) + other_count++; + node_count++; + ++ } else if ((cmd & GF_CLI_STATUS_SHD) != 0) { ++ ret = glusterd_add_node_to_dict(priv->shd_svc.name, rsp_dict, 0, ++ vol_opts); ++ if (ret) ++ goto out; ++ other_count++; ++ node_count++; ++ + } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) { + ret = glusterd_add_node_to_dict(priv->quotad_svc.name, rsp_dict, 0, + vol_opts); +@@ -3699,12 +3686,6 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) + goto out; + other_count++; + node_count++; +- } else if ((cmd & GF_CLI_STATUS_SHD) != 0) { +- ret = glusterd_add_shd_to_dict(volinfo, rsp_dict, other_index); +- if (ret) +- goto out; +- other_count++; +- node_count++; + } else if ((cmd & GF_CLI_STATUS_BRICK) != 0) { + ret = dict_get_strn(dict, "brick", SLEN("brick"), &brick); + if (ret) +@@ -3767,19 +3748,6 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) + node_count++; + } + +- if (glusterd_is_shd_compatible_volume(volinfo)) { +- shd_enabled = gd_is_self_heal_enabled(volinfo, vol_opts); +- if (shd_enabled) { +- ret = glusterd_add_shd_to_dict(volinfo, rsp_dict, +- other_index); +- if (ret) +- goto out; +- other_count++; +- other_index++; +- node_count++; +- } +- } +- + nfs_disabled = dict_get_str_boolean(vol_opts, NFS_DISABLE_MAP_KEY, + _gf_false); + if (!nfs_disabled) { +@@ -3792,6 +3760,18 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) + node_count++; + } + ++ if (glusterd_is_shd_compatible_volume(volinfo)) ++ shd_enabled = gd_is_self_heal_enabled(volinfo, vol_opts); ++ if (shd_enabled) { ++ ret = glusterd_add_node_to_dict(priv->shd_svc.name, rsp_dict, ++ other_index, vol_opts); ++ if (ret) ++ goto out; ++ other_count++; ++ node_count++; ++ other_index++; ++ } ++ + if (glusterd_is_volume_quota_enabled(volinfo)) { + ret = glusterd_add_node_to_dict(priv->quotad_svc.name, rsp_dict, + other_index, vol_opts); +@@ -6904,18 +6884,16 @@ glusterd_shd_select_brick_xlator(dict_t *dict, gf_xl_afr_op_t heal_op, + int ret = -1; + glusterd_conf_t *priv = NULL; + xlator_t *this = NULL; +- glusterd_svc_t *svc = NULL; + + this = THIS; + GF_ASSERT(this); + priv = this->private; + GF_ASSERT(priv); +- svc = &(volinfo->shd.svc); + + switch (heal_op) { + case GF_SHD_OP_INDEX_SUMMARY: + case GF_SHD_OP_STATISTICS_HEAL_COUNT: +- if (!svc->online) { ++ if (!priv->shd_svc.online) { + if (!rsp_dict) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OPCTX_NULL, + "Received " +@@ -6936,7 +6914,7 @@ glusterd_shd_select_brick_xlator(dict_t *dict, gf_xl_afr_op_t heal_op, + break; + + case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA: +- if (!svc->online) { ++ if (!priv->shd_svc.online) { + if (!rsp_dict) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OPCTX_NULL, + "Received " +@@ -7071,7 +7049,7 @@ glusterd_bricks_select_heal_volume(dict_t *dict, char **op_errstr, + ret = -1; + goto out; + } else { +- pending_node->node = &(volinfo->shd.svc); ++ pending_node->node = &(priv->shd_svc); + pending_node->type = GD_NODE_SHD; + cds_list_add_tail(&pending_node->list, selected); + pending_node = NULL; +@@ -7205,7 +7183,6 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr, + glusterd_pending_node_t *pending_node = NULL; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; +- glusterd_svc_t *svc = NULL; + + GF_ASSERT(dict); + +@@ -7301,8 +7278,7 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr, + + ret = 0; + } else if ((cmd & GF_CLI_STATUS_SHD) != 0) { +- svc = &(volinfo->shd.svc); +- if (!svc->online) { ++ if (!priv->shd_svc.online) { + ret = -1; + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SELF_HEALD_DISABLED, + "Self-heal daemon is not running"); +@@ -7314,7 +7290,7 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr, + ret = -1; + goto out; + } +- pending_node->node = svc; ++ pending_node->node = &(priv->shd_svc); + pending_node->type = GD_NODE_SHD; + pending_node->index = 0; + cds_list_add_tail(&pending_node->list, selected); +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c +deleted file mode 100644 +index 9196758..0000000 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c ++++ /dev/null +@@ -1,140 +0,0 @@ +-/* +- Copyright (c) 2016 Red Hat, Inc. +- This file is part of GlusterFS. +- +- This file is licensed to you under your choice of the GNU Lesser +- General Public License, version 3 or any later version (LGPLv3 or +- later), or the GNU General Public License, version 2 (GPLv2), in all +- cases as published by the Free Software Foundation. +-*/ +- +-#include "glusterd.h" +-#include "glusterd-utils.h" +-#include "glusterd-shd-svc-helper.h" +-#include "glusterd-messages.h" +-#include "glusterd-volgen.h" +- +-void +-glusterd_svc_build_shd_socket_filepath(glusterd_volinfo_t *volinfo, char *path, +- int path_len) +-{ +- char sockfilepath[PATH_MAX] = { +- 0, +- }; +- char rundir[PATH_MAX] = { +- 0, +- }; +- int32_t len = 0; +- glusterd_conf_t *priv = THIS->private; +- +- if (!priv) +- return; +- +- GLUSTERD_GET_SHD_RUNDIR(rundir, volinfo, priv); +- len = snprintf(sockfilepath, sizeof(sockfilepath), "%s/run-%s", rundir, +- uuid_utoa(MY_UUID)); +- if ((len < 0) || (len >= sizeof(sockfilepath))) { +- sockfilepath[0] = 0; +- } +- +- glusterd_set_socket_filepath(sockfilepath, path, path_len); +-} +- +-void +-glusterd_svc_build_shd_pidfile(glusterd_volinfo_t *volinfo, char *path, +- int path_len) +-{ +- char rundir[PATH_MAX] = { +- 0, +- }; +- glusterd_conf_t *priv = THIS->private; +- +- if (!priv) +- return; +- +- GLUSTERD_GET_SHD_RUNDIR(rundir, volinfo, priv); +- +- snprintf(path, path_len, "%s/%s-shd.pid", rundir, volinfo->volname); +-} +- +-void +-glusterd_svc_build_shd_volfile_path(glusterd_volinfo_t *volinfo, char *path, +- int path_len) +-{ +- char workdir[PATH_MAX] = { +- 0, +- }; +- glusterd_conf_t *priv = THIS->private; +- +- if (!priv) +- return; +- +- GLUSTERD_GET_VOLUME_DIR(workdir, volinfo, priv); +- +- snprintf(path, path_len, "%s/%s-shd.vol", workdir, volinfo->volname); +-} +- +-void +-glusterd_svc_build_shd_logdir(char *logdir, char *volname, size_t len) +-{ +- snprintf(logdir, len, "%s/shd/%s", DEFAULT_LOG_FILE_DIRECTORY, volname); +-} +- +-void +-glusterd_svc_build_shd_logfile(char *logfile, char *logdir, size_t len) +-{ +- snprintf(logfile, len, "%s/shd.log", logdir); +-} +- +-void +-glusterd_shd_svcproc_cleanup(glusterd_shdsvc_t *shd) +-{ +- glusterd_svc_proc_t *svc_proc = NULL; +- glusterd_svc_t *svc = NULL; +- glusterd_conf_t *conf = NULL; +- gf_boolean_t need_unref = _gf_false; +- rpc_clnt_t *rpc = NULL; +- +- conf = THIS->private; +- if (!conf) +- return; +- +- GF_VALIDATE_OR_GOTO(THIS->name, conf, out); +- GF_VALIDATE_OR_GOTO(THIS->name, shd, out); +- +- svc = &shd->svc; +- shd->attached = _gf_false; +- +- if (svc->conn.rpc) { +- rpc_clnt_unref(svc->conn.rpc); +- svc->conn.rpc = NULL; +- } +- +- pthread_mutex_lock(&conf->attach_lock); +- { +- svc_proc = svc->svc_proc; +- svc->svc_proc = NULL; +- svc->inited = _gf_false; +- cds_list_del_init(&svc->mux_svc); +- glusterd_unlink_file(svc->proc.pidfile); +- +- if (svc_proc && cds_list_empty(&svc_proc->svcs)) { +- cds_list_del_init(&svc_proc->svc_proc_list); +- /* We cannot free svc_proc list from here. Because +- * if there are pending events on the rpc, it will +- * try to access the corresponding svc_proc, so unrefing +- * rpc request and then cleaning up the memory is carried +- * from the notify function upon RPC_CLNT_DESTROY destroy. +- */ +- need_unref = _gf_true; +- rpc = svc_proc->rpc; +- svc_proc->rpc = NULL; +- } +- } +- pthread_mutex_unlock(&conf->attach_lock); +- /*rpc unref has to be performed outside the lock*/ +- if (need_unref && rpc) +- rpc_clnt_unref(rpc); +-out: +- return; +-} +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h +deleted file mode 100644 +index c70702c..0000000 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h ++++ /dev/null +@@ -1,45 +0,0 @@ +-/* +- Copyright (c) 2016 Red Hat, Inc. +- This file is part of GlusterFS. +- +- This file is licensed to you under your choice of the GNU Lesser +- General Public License, version 3 or any later version (LGPLv3 or +- later), or the GNU General Public License, version 2 (GPLv2), in all +- cases as published by the Free Software Foundation. +-*/ +- +-#ifndef _GLUSTERD_SHD_SVC_HELPER_H_ +-#define _GLUSTERD_SHD_SVC_HELPER_H_ +- +-#include "glusterd.h" +-#include "glusterd-svc-mgmt.h" +- +-void +-glusterd_svc_build_shd_socket_filepath(glusterd_volinfo_t *volinfo, char *path, +- int path_len); +- +-void +-glusterd_svc_build_shd_pidfile(glusterd_volinfo_t *volinfo, char *path, +- int path_len); +- +-void +-glusterd_svc_build_shd_volfile_path(glusterd_volinfo_t *volinfo, char *path, +- int path_len); +- +-void +-glusterd_svc_build_shd_logdir(char *logdir, char *volname, size_t len); +- +-void +-glusterd_svc_build_shd_logfile(char *logfile, char *logdir, size_t len); +- +-void +-glusterd_shd_svcproc_cleanup(glusterd_shdsvc_t *shd); +- +-int +-glusterd_recover_shd_attach_failure(glusterd_volinfo_t *volinfo, +- glusterd_svc_t *svc, int flags); +- +-int +-glusterd_shdsvc_create_volfile(glusterd_volinfo_t *volinfo); +- +-#endif +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +index 4789843..f5379b0 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +@@ -13,10 +13,9 @@ + #include "glusterd.h" + #include "glusterd-utils.h" + #include "glusterd-volgen.h" ++#include "glusterd-svc-mgmt.h" + #include "glusterd-shd-svc.h" +-#include "glusterd-shd-svc-helper.h" + #include "glusterd-svc-helper.h" +-#include "glusterd-store.h" + + #define GD_SHD_PROCESS_NAME "--process-name" + char *shd_svc_name = "glustershd"; +@@ -24,145 +23,27 @@ char *shd_svc_name = "glustershd"; + void + glusterd_shdsvc_build(glusterd_svc_t *svc) + { +- int ret = -1; +- ret = snprintf(svc->name, sizeof(svc->name), "%s", shd_svc_name); +- if (ret < 0) +- return; +- +- CDS_INIT_LIST_HEAD(&svc->mux_svc); + svc->manager = glusterd_shdsvc_manager; + svc->start = glusterd_shdsvc_start; +- svc->stop = glusterd_shdsvc_stop; +- svc->reconfigure = glusterd_shdsvc_reconfigure; ++ svc->stop = glusterd_svc_stop; + } + + int +-glusterd_shdsvc_init(void *data, glusterd_conn_t *mux_conn, +- glusterd_svc_proc_t *mux_svc) ++glusterd_shdsvc_init(glusterd_svc_t *svc) + { +- int ret = -1; +- char rundir[PATH_MAX] = { +- 0, +- }; +- char sockpath[PATH_MAX] = { +- 0, +- }; +- char pidfile[PATH_MAX] = { +- 0, +- }; +- char volfile[PATH_MAX] = { +- 0, +- }; +- char logdir[PATH_MAX] = { +- 0, +- }; +- char logfile[PATH_MAX] = { +- 0, +- }; +- char volfileid[256] = {0}; +- glusterd_svc_t *svc = NULL; +- glusterd_volinfo_t *volinfo = NULL; +- glusterd_conf_t *priv = NULL; +- glusterd_muxsvc_conn_notify_t notify = NULL; +- xlator_t *this = NULL; +- char *volfileserver = NULL; +- int32_t len = 0; +- +- this = THIS; +- GF_VALIDATE_OR_GOTO(THIS->name, this, out); +- +- priv = this->private; +- GF_VALIDATE_OR_GOTO(this->name, priv, out); +- +- volinfo = data; +- GF_VALIDATE_OR_GOTO(this->name, data, out); +- GF_VALIDATE_OR_GOTO(this->name, mux_svc, out); +- +- svc = &(volinfo->shd.svc); +- +- ret = snprintf(svc->name, sizeof(svc->name), "%s", shd_svc_name); +- if (ret < 0) +- goto out; +- +- notify = glusterd_muxsvc_common_rpc_notify; +- glusterd_store_perform_node_state_store(volinfo); +- +- GLUSTERD_GET_SHD_RUNDIR(rundir, volinfo, priv); +- glusterd_svc_create_rundir(rundir); +- +- glusterd_svc_build_shd_logdir(logdir, volinfo->volname, sizeof(logdir)); +- glusterd_svc_build_shd_logfile(logfile, logdir, sizeof(logfile)); +- +- /* Initialize the connection mgmt */ +- if (mux_conn && mux_svc->rpc) { +- /* multiplexed svc */ +- svc->conn.frame_timeout = mux_conn->frame_timeout; +- /* This will be unrefed from glusterd_shd_svcproc_cleanup*/ +- svc->conn.rpc = rpc_clnt_ref(mux_svc->rpc); +- ret = snprintf(svc->conn.sockpath, sizeof(svc->conn.sockpath), "%s", +- mux_conn->sockpath); +- } else { +- ret = mkdir_p(logdir, 0755, _gf_true); +- if ((ret == -1) && (EEXIST != errno)) { +- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED, +- "Unable to create logdir %s", logdir); +- goto out; +- } +- +- glusterd_svc_build_shd_socket_filepath(volinfo, sockpath, +- sizeof(sockpath)); +- ret = glusterd_muxsvc_conn_init(&(svc->conn), mux_svc, sockpath, 600, +- notify); +- if (ret) +- goto out; +- /* This will be unrefed when the last svcs is detached from the list */ +- if (!mux_svc->rpc) +- mux_svc->rpc = rpc_clnt_ref(svc->conn.rpc); +- } +- +- /* Initialize the process mgmt */ +- glusterd_svc_build_shd_pidfile(volinfo, pidfile, sizeof(pidfile)); +- glusterd_svc_build_shd_volfile_path(volinfo, volfile, PATH_MAX); +- len = snprintf(volfileid, sizeof(volfileid), "shd/%s", volinfo->volname); +- if ((len < 0) || (len >= sizeof(volfileid))) { +- ret = -1; +- goto out; +- } +- +- if (dict_get_strn(this->options, "transport.socket.bind-address", +- SLEN("transport.socket.bind-address"), +- &volfileserver) != 0) { +- volfileserver = "localhost"; +- } +- ret = glusterd_proc_init(&(svc->proc), shd_svc_name, pidfile, logdir, +- logfile, volfile, volfileid, volfileserver); +- if (ret) +- goto out; +- +-out: +- gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret); +- return ret; ++ return glusterd_svc_init(svc, shd_svc_name); + } + +-int +-glusterd_shdsvc_create_volfile(glusterd_volinfo_t *volinfo) ++static int ++glusterd_shdsvc_create_volfile() + { + char filepath[PATH_MAX] = { + 0, + }; +- + int ret = -1; ++ glusterd_conf_t *conf = THIS->private; + dict_t *mod_dict = NULL; + +- glusterd_svc_build_shd_volfile_path(volinfo, filepath, PATH_MAX); +- if (!glusterd_is_shd_compatible_volume(volinfo)) { +- /* If volfile exist, delete it. This case happens when we +- * change from replica/ec to distribute. +- */ +- (void)glusterd_unlink_file(filepath); +- ret = 0; +- goto out; +- } + mod_dict = dict_new(); + if (!mod_dict) + goto out; +@@ -183,7 +64,9 @@ glusterd_shdsvc_create_volfile(glusterd_volinfo_t *volinfo) + if (ret) + goto out; + +- ret = glusterd_shdsvc_generate_volfile(volinfo, filepath, mod_dict); ++ glusterd_svc_build_volfile_path(shd_svc_name, conf->workdir, filepath, ++ sizeof(filepath)); ++ ret = glusterd_create_global_volfile(build_shd_graph, filepath, mod_dict); + if (ret) { + gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL, + "Failed to create volfile"); +@@ -198,89 +81,26 @@ out: + return ret; + } + +-gf_boolean_t +-glusterd_svcs_shd_compatible_volumes_stopped(glusterd_svc_t *svc) +-{ +- glusterd_svc_proc_t *svc_proc = NULL; +- glusterd_shdsvc_t *shd = NULL; +- glusterd_svc_t *temp_svc = NULL; +- glusterd_volinfo_t *volinfo = NULL; +- gf_boolean_t comp = _gf_false; +- glusterd_conf_t *conf = THIS->private; +- +- GF_VALIDATE_OR_GOTO("glusterd", conf, out); +- GF_VALIDATE_OR_GOTO("glusterd", svc, out); +- pthread_mutex_lock(&conf->attach_lock); +- { +- svc_proc = svc->svc_proc; +- if (!svc_proc) +- goto unlock; +- cds_list_for_each_entry(temp_svc, &svc_proc->svcs, mux_svc) +- { +- /* Get volinfo->shd from svc object */ +- shd = cds_list_entry(svc, glusterd_shdsvc_t, svc); +- if (!shd) { +- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL, +- "Failed to get shd object " +- "from shd service"); +- goto unlock; +- } +- +- /* Get volinfo from shd */ +- volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd); +- if (!volinfo) { +- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, +- "Failed to get volinfo from " +- "from shd"); +- goto unlock; +- } +- if (!glusterd_is_shd_compatible_volume(volinfo)) +- continue; +- if (volinfo->status == GLUSTERD_STATUS_STARTED) +- goto unlock; +- } +- comp = _gf_true; +- } +-unlock: +- pthread_mutex_unlock(&conf->attach_lock); +-out: +- return comp; +-} +- + int + glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags) + { +- int ret = -1; ++ int ret = 0; + glusterd_volinfo_t *volinfo = NULL; + +- volinfo = data; +- GF_VALIDATE_OR_GOTO("glusterd", svc, out); +- GF_VALIDATE_OR_GOTO("glusterd", volinfo, out); +- +- if (volinfo) +- glusterd_volinfo_ref(volinfo); +- +- ret = glusterd_shdsvc_create_volfile(volinfo); +- if (ret) +- goto out; +- +- if (!glusterd_is_shd_compatible_volume(volinfo)) { +- ret = 0; +- if (svc->inited) { +- /* This means glusterd was running for this volume and now +- * it was converted to a non-shd volume. So just stop the shd +- */ +- ret = svc->stop(svc, SIGTERM); ++ if (!svc->inited) { ++ ret = glusterd_shdsvc_init(svc); ++ if (ret) { ++ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_FAILED_INIT_SHDSVC, ++ "Failed to init shd " ++ "service"); ++ goto out; ++ } else { ++ svc->inited = _gf_true; ++ gf_msg_debug(THIS->name, 0, "shd service initialized"); + } +- goto out; + } + +- ret = glusterd_shd_svc_mux_init(volinfo, svc); +- if (ret) { +- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_FAILED_INIT_SHDSVC, +- "Failed to init shd service"); +- goto out; +- } ++ volinfo = data; + + /* If all the volumes are stopped or all shd compatible volumes + * are stopped then stop the service if: +@@ -290,26 +110,31 @@ glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags) + * - volinfo is NULL or + * - volinfo is present and volume is shd compatible + */ +- if (glusterd_svcs_shd_compatible_volumes_stopped(svc)) { +- /* TODO +- * Take a lock and detach all svc's to stop the process +- * also reset the init flag +- */ +- ret = svc->stop(svc, SIGTERM); +- } else if (volinfo) { +- ret = svc->stop(svc, SIGTERM); +- if (ret) +- goto out; ++ if (glusterd_are_all_volumes_stopped() || ++ glusterd_all_shd_compatible_volumes_stopped()) { ++ if (!(volinfo && !glusterd_is_shd_compatible_volume(volinfo))) { ++ ret = svc->stop(svc, SIGTERM); ++ } ++ } else { ++ if (!(volinfo && !glusterd_is_shd_compatible_volume(volinfo))) { ++ ret = glusterd_shdsvc_create_volfile(); ++ if (ret) ++ goto out; ++ ++ ret = svc->stop(svc, SIGTERM); ++ if (ret) ++ goto out; + +- if (volinfo->status == GLUSTERD_STATUS_STARTED) { + ret = svc->start(svc, flags); + if (ret) + goto out; ++ ++ ret = glusterd_conn_connect(&(svc->conn)); ++ if (ret) ++ goto out; + } + } + out: +- if (volinfo) +- glusterd_volinfo_unref(volinfo); + if (ret) + gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name); + gf_msg_debug(THIS->name, 0, "Returning %d", ret); +@@ -318,7 +143,7 @@ out: + } + + int +-glusterd_new_shd_svc_start(glusterd_svc_t *svc, int flags) ++glusterd_shdsvc_start(glusterd_svc_t *svc, int flags) + { + int ret = -1; + char glusterd_uuid_option[PATH_MAX] = {0}; +@@ -363,136 +188,31 @@ glusterd_new_shd_svc_start(glusterd_svc_t *svc, int flags) + goto out; + + ret = glusterd_svc_start(svc, flags, cmdline); +- if (ret) +- goto out; + +- ret = glusterd_conn_connect(&(svc->conn)); + out: + if (cmdline) + dict_unref(cmdline); +- return ret; +-} + +-int +-glusterd_recover_shd_attach_failure(glusterd_volinfo_t *volinfo, +- glusterd_svc_t *svc, int flags) +-{ +- int ret = -1; +- glusterd_svc_proc_t *mux_proc = NULL; +- glusterd_conf_t *conf = NULL; +- +- conf = THIS->private; +- +- if (!conf || !volinfo || !svc) +- return -1; +- glusterd_shd_svcproc_cleanup(&volinfo->shd); +- mux_proc = glusterd_svcprocess_new(); +- if (!mux_proc) { +- return -1; +- } +- ret = glusterd_shdsvc_init(volinfo, NULL, mux_proc); +- if (ret) +- return -1; +- pthread_mutex_lock(&conf->attach_lock); +- { +- cds_list_add_tail(&mux_proc->svc_proc_list, &conf->shd_procs); +- svc->svc_proc = mux_proc; +- cds_list_del_init(&svc->mux_svc); +- cds_list_add_tail(&svc->mux_svc, &mux_proc->svcs); +- } +- pthread_mutex_unlock(&conf->attach_lock); +- +- ret = glusterd_new_shd_svc_start(svc, flags); +- if (!ret) { +- volinfo->shd.attached = _gf_true; +- } +- return ret; +-} +- +-int +-glusterd_shdsvc_start(glusterd_svc_t *svc, int flags) +-{ +- int ret = -1; +- glusterd_shdsvc_t *shd = NULL; +- glusterd_volinfo_t *volinfo = NULL; +- glusterd_conf_t *conf = NULL; +- +- GF_VALIDATE_OR_GOTO("glusterd", svc, out); +- conf = THIS->private; +- GF_VALIDATE_OR_GOTO("glusterd", conf, out); +- +- /* Get volinfo->shd from svc object */ +- shd = cds_list_entry(svc, glusterd_shdsvc_t, svc); +- if (!shd) { +- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL, +- "Failed to get shd object " +- "from shd service"); +- return -1; +- } +- +- /* Get volinfo from shd */ +- volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd); +- if (!volinfo) { +- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, +- "Failed to get volinfo from " +- "from shd"); +- return -1; +- } +- +- if (volinfo->status != GLUSTERD_STATUS_STARTED) +- return -1; +- +- glusterd_volinfo_ref(volinfo); +- if (!svc->inited) { +- ret = glusterd_shd_svc_mux_init(volinfo, svc); +- if (ret) +- goto out; +- } +- +- if (shd->attached) { +- ret = glusterd_attach_svc(svc, volinfo, flags); +- if (ret) { +- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, +- "Failed to attach shd svc(volume=%s) to pid=%d. Starting" +- "a new process", +- volinfo->volname, glusterd_proc_get_pid(&svc->proc)); +- ret = glusterd_recover_shd_attach_failure(volinfo, svc, flags); +- } +- goto out; +- } +- ret = glusterd_new_shd_svc_start(svc, flags); +- if (!ret) { +- shd->attached = _gf_true; +- } +-out: +- if (volinfo) +- glusterd_volinfo_unref(volinfo); + gf_msg_debug(THIS->name, 0, "Returning %d", ret); + + return ret; + } + + int +-glusterd_shdsvc_reconfigure(glusterd_volinfo_t *volinfo) ++glusterd_shdsvc_reconfigure() + { + int ret = -1; + xlator_t *this = NULL; ++ glusterd_conf_t *priv = NULL; + gf_boolean_t identical = _gf_false; +- dict_t *mod_dict = NULL; +- glusterd_svc_t *svc = NULL; + + this = THIS; + GF_VALIDATE_OR_GOTO("glusterd", this, out); + +- if (!volinfo) { +- /* reconfigure will be called separately*/ +- ret = 0; +- goto out; +- } ++ priv = this->private; ++ GF_VALIDATE_OR_GOTO(this->name, priv, out); + +- glusterd_volinfo_ref(volinfo); +- svc = &(volinfo->shd.svc); +- if (glusterd_svcs_shd_compatible_volumes_stopped(svc)) ++ if (glusterd_all_shd_compatible_volumes_stopped()) + goto manager; + + /* +@@ -500,42 +220,8 @@ glusterd_shdsvc_reconfigure(glusterd_volinfo_t *volinfo) + * and cksum i.e. "character-by-character". If YES, then + * NOTHING has been changed, just return. + */ +- +- if (!glusterd_is_shd_compatible_volume(volinfo)) { +- if (svc->inited) +- goto manager; +- +- /* Nothing to do if not shd compatible */ +- ret = 0; +- goto out; +- } +- mod_dict = dict_new(); +- if (!mod_dict) +- goto out; +- +- ret = dict_set_uint32(mod_dict, "cluster.background-self-heal-count", 0); +- if (ret) +- goto out; +- +- ret = dict_set_str(mod_dict, "cluster.data-self-heal", "on"); +- if (ret) +- goto out; +- +- ret = dict_set_str(mod_dict, "cluster.metadata-self-heal", "on"); +- if (ret) +- goto out; +- +- ret = dict_set_int32(mod_dict, "graph-check", 1); +- if (ret) +- goto out; +- +- ret = dict_set_str(mod_dict, "cluster.entry-self-heal", "on"); +- if (ret) +- goto out; +- +- ret = glusterd_volume_svc_check_volfile_identical( +- "glustershd", mod_dict, volinfo, glusterd_shdsvc_generate_volfile, +- &identical); ++ ret = glusterd_svc_check_volfile_identical(priv->shd_svc.name, ++ build_shd_graph, &identical); + if (ret) + goto out; + +@@ -550,9 +236,8 @@ glusterd_shdsvc_reconfigure(glusterd_volinfo_t *volinfo) + * changed, then inform the xlator to reconfigure the options. + */ + identical = _gf_false; /* RESET the FLAG */ +- ret = glusterd_volume_svc_check_topology_identical( +- "glustershd", mod_dict, volinfo, glusterd_shdsvc_generate_volfile, +- &identical); ++ ret = glusterd_svc_check_topology_identical(priv->shd_svc.name, ++ build_shd_graph, &identical); + if (ret) + goto out; + +@@ -560,7 +245,7 @@ glusterd_shdsvc_reconfigure(glusterd_volinfo_t *volinfo) + * options to shd volfile, so that shd will be reconfigured. + */ + if (identical) { +- ret = glusterd_shdsvc_create_volfile(volinfo); ++ ret = glusterd_shdsvc_create_volfile(); + if (ret == 0) { /* Only if above PASSES */ + ret = glusterd_fetchspec_notify(THIS); + } +@@ -568,129 +253,12 @@ glusterd_shdsvc_reconfigure(glusterd_volinfo_t *volinfo) + } + manager: + /* +- * shd volfile's topology has been changed. volfile needs +- * to be RECONFIGURED to ACT on the changed volfile. ++ * shd volfile's topology has been changed. shd server needs ++ * to be RESTARTED to ACT on the changed volfile. + */ +- ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT); ++ ret = priv->shd_svc.manager(&(priv->shd_svc), NULL, PROC_START_NO_WAIT); + + out: +- if (volinfo) +- glusterd_volinfo_unref(volinfo); +- if (mod_dict) +- dict_unref(mod_dict); + gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret); + return ret; + } +- +-int +-glusterd_shdsvc_restart() +-{ +- glusterd_volinfo_t *volinfo = NULL; +- glusterd_volinfo_t *tmp = NULL; +- int ret = -1; +- xlator_t *this = THIS; +- glusterd_conf_t *conf = NULL; +- glusterd_svc_t *svc = NULL; +- +- GF_VALIDATE_OR_GOTO("glusterd", this, out); +- +- conf = this->private; +- GF_VALIDATE_OR_GOTO(this->name, conf, out); +- +- pthread_mutex_lock(&conf->volume_lock); +- cds_list_for_each_entry_safe(volinfo, tmp, &conf->volumes, vol_list) +- { +- glusterd_volinfo_ref(volinfo); +- pthread_mutex_unlock(&conf->volume_lock); +- /* Start per volume shd svc */ +- if (volinfo->status == GLUSTERD_STATUS_STARTED) { +- svc = &(volinfo->shd.svc); +- ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT); +- if (ret) { +- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SHD_START_FAIL, +- "Couldn't start shd for " +- "vol: %s on restart", +- volinfo->volname); +- gf_event(EVENT_SVC_MANAGER_FAILED, "volume=%s;svc_name=%s", +- volinfo->volname, svc->name); +- glusterd_volinfo_unref(volinfo); +- goto out; +- } +- } +- glusterd_volinfo_unref(volinfo); +- pthread_mutex_lock(&conf->volume_lock); +- } +- pthread_mutex_unlock(&conf->volume_lock); +-out: +- return ret; +-} +- +-int +-glusterd_shdsvc_stop(glusterd_svc_t *svc, int sig) +-{ +- int ret = -1; +- glusterd_svc_proc_t *svc_proc = NULL; +- glusterd_shdsvc_t *shd = NULL; +- glusterd_volinfo_t *volinfo = NULL; +- gf_boolean_t empty = _gf_false; +- glusterd_conf_t *conf = NULL; +- int pid = -1; +- +- conf = THIS->private; +- GF_VALIDATE_OR_GOTO("glusterd", svc, out); +- svc_proc = svc->svc_proc; +- GF_VALIDATE_OR_GOTO("glusterd", svc_proc, out); +- GF_VALIDATE_OR_GOTO("glusterd", conf, out); +- +- /* Get volinfo->shd from svc object */ +- shd = cds_list_entry(svc, glusterd_shdsvc_t, svc); +- if (!shd) { +- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL, +- "Failed to get shd object " +- "from shd service"); +- return -1; +- } +- +- /* Get volinfo from shd */ +- volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd); +- if (!volinfo) { +- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, +- "Failed to get volinfo from " +- "from shd"); +- return -1; +- } +- +- glusterd_volinfo_ref(volinfo); +- pthread_mutex_lock(&conf->attach_lock); +- { +- gf_is_service_running(svc->proc.pidfile, &pid); +- cds_list_del_init(&svc->mux_svc); +- empty = cds_list_empty(&svc_proc->svcs); +- } +- pthread_mutex_unlock(&conf->attach_lock); +- if (empty) { +- /* Unref will happen when destroying the connection */ +- glusterd_volinfo_ref(volinfo); +- svc_proc->data = volinfo; +- ret = glusterd_svc_stop(svc, sig); +- } +- if (!empty && pid != -1) { +- ret = glusterd_detach_svc(svc, volinfo, sig); +- if (ret) +- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL, +- "shd service is failed to detach volume %s from pid %d", +- volinfo->volname, glusterd_proc_get_pid(&svc->proc)); +- else +- gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_SVC_STOP_SUCCESS, +- "Shd service is detached for volume %s from pid %d", +- volinfo->volname, glusterd_proc_get_pid(&svc->proc)); +- } +- svc->online = _gf_false; +- (void)glusterd_unlink_file((char *)svc->proc.pidfile); +- glusterd_shd_svcproc_cleanup(shd); +- ret = 0; +- glusterd_volinfo_unref(volinfo); +-out: +- gf_msg_debug(THIS->name, 0, "Returning %d", ret); +- return ret; +-} +diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.h b/xlators/mgmt/glusterd/src/glusterd-shd-svc.h +index 55b409f..775a9d4 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.h ++++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.h +@@ -12,20 +12,12 @@ + #define _GLUSTERD_SHD_SVC_H_ + + #include "glusterd-svc-mgmt.h" +-#include "glusterd.h" +- +-typedef struct glusterd_shdsvc_ glusterd_shdsvc_t; +-struct glusterd_shdsvc_ { +- glusterd_svc_t svc; +- gf_boolean_t attached; +-}; + + void + glusterd_shdsvc_build(glusterd_svc_t *svc); + + int +-glusterd_shdsvc_init(void *data, glusterd_conn_t *mux_conn, +- glusterd_svc_proc_t *svc_proc); ++glusterd_shdsvc_init(glusterd_svc_t *svc); + + int + glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags); +@@ -35,11 +27,4 @@ glusterd_shdsvc_start(glusterd_svc_t *svc, int flags); + + int + glusterd_shdsvc_reconfigure(); +- +-int +-glusterd_shdsvc_restart(); +- +-int +-glusterd_shdsvc_stop(glusterd_svc_t *svc, int sig); +- + #endif +diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c +index 943b1c6..54a7bd1 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-sm.c ++++ b/xlators/mgmt/glusterd/src/glusterd-sm.c +@@ -748,16 +748,6 @@ glusterd_peer_detach_cleanup(glusterd_conf_t *priv) + } + } + +- if (glusterd_is_shd_compatible_volume(volinfo)) { +- svc = &(volinfo->shd.svc); +- ret = svc->stop(svc, SIGTERM); +- if (ret) { +- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL, +- "Failed " +- "to stop shd daemon service"); +- } +- } +- + if (glusterd_is_gfproxyd_enabled(volinfo)) { + svc = &(volinfo->gfproxyd.svc); + ret = svc->stop(svc, SIGTERM); +@@ -785,7 +775,7 @@ glusterd_peer_detach_cleanup(glusterd_conf_t *priv) + } + + /*Reconfigure all daemon services upon peer detach*/ +- ret = glusterd_svcs_reconfigure(NULL); ++ ret = glusterd_svcs_reconfigure(); + if (ret) { + gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL, + "Failed to reconfigure all daemon services."); +diff --git a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c +index 1da4076..56bab07 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c ++++ b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c +@@ -366,7 +366,6 @@ int + glusterd_snapdsvc_restart() + { + glusterd_volinfo_t *volinfo = NULL; +- glusterd_volinfo_t *tmp = NULL; + int ret = 0; + xlator_t *this = THIS; + glusterd_conf_t *conf = NULL; +@@ -377,7 +376,7 @@ glusterd_snapdsvc_restart() + conf = this->private; + GF_ASSERT(conf); + +- cds_list_for_each_entry_safe(volinfo, tmp, &conf->volumes, vol_list) ++ cds_list_for_each_entry(volinfo, &conf->volumes, vol_list) + { + /* Start per volume snapd svc */ + if (volinfo->status == GLUSTERD_STATUS_STARTED) { +diff --git a/xlators/mgmt/glusterd/src/glusterd-statedump.c b/xlators/mgmt/glusterd/src/glusterd-statedump.c +index 69d4cf4..f5ecde7 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-statedump.c ++++ b/xlators/mgmt/glusterd/src/glusterd-statedump.c +@@ -202,6 +202,9 @@ glusterd_dump_priv(xlator_t *this) + gf_proc_dump_build_key(key, "glusterd", "ping-timeout"); + gf_proc_dump_write(key, "%d", priv->ping_timeout); + ++ gf_proc_dump_build_key(key, "glusterd", "shd.online"); ++ gf_proc_dump_write(key, "%d", priv->shd_svc.online); ++ + gf_proc_dump_build_key(key, "glusterd", "nfs.online"); + gf_proc_dump_write(key, "%d", priv->nfs_svc.online); + +diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c +index e42703c..ca19a75 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c ++++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c +@@ -7,7 +7,6 @@ + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. + */ +-#include + + #include + #include +@@ -21,14 +20,12 @@ + #include "glusterd-bitd-svc.h" + #include "glusterd-tierd-svc.h" + #include "glusterd-tierd-svc-helper.h" +-#include "glusterd-shd-svc-helper.h" + #include "glusterd-scrub-svc.h" + #include "glusterd-svc-helper.h" + #include +-#include "glusterd-snapshot-utils.h" + + int +-glusterd_svcs_reconfigure(glusterd_volinfo_t *volinfo) ++glusterd_svcs_reconfigure() + { + int ret = 0; + xlator_t *this = THIS; +@@ -46,11 +43,9 @@ glusterd_svcs_reconfigure(glusterd_volinfo_t *volinfo) + goto out; + + svc_name = "self-heald"; +- if (volinfo) { +- ret = glusterd_shdsvc_reconfigure(volinfo); +- if (ret) +- goto out; +- } ++ ret = glusterd_shdsvc_reconfigure(); ++ if (ret) ++ goto out; + + if (conf->op_version == GD_OP_VERSION_MIN) + goto out; +@@ -74,7 +69,7 @@ out: + } + + int +-glusterd_svcs_stop(glusterd_volinfo_t *volinfo) ++glusterd_svcs_stop() + { + int ret = 0; + xlator_t *this = NULL; +@@ -90,15 +85,13 @@ glusterd_svcs_stop(glusterd_volinfo_t *volinfo) + if (ret) + goto out; + +- ret = glusterd_svc_stop(&(priv->quotad_svc), SIGTERM); ++ ret = glusterd_svc_stop(&(priv->shd_svc), SIGTERM); + if (ret) + goto out; + +- if (volinfo) { +- ret = glusterd_svc_stop(&(volinfo->shd.svc), PROC_START_NO_WAIT); +- if (ret) +- goto out; +- } ++ ret = glusterd_svc_stop(&(priv->quotad_svc), SIGTERM); ++ if (ret) ++ goto out; + + ret = glusterd_svc_stop(&(priv->bitd_svc), SIGTERM); + if (ret) +@@ -128,6 +121,12 @@ glusterd_svcs_manager(glusterd_volinfo_t *volinfo) + if (ret) + goto out; + ++ ret = conf->shd_svc.manager(&(conf->shd_svc), volinfo, PROC_START_NO_WAIT); ++ if (ret == -EINVAL) ++ ret = 0; ++ if (ret) ++ goto out; ++ + if (conf->op_version == GD_OP_VERSION_MIN) + goto out; + +@@ -144,15 +143,6 @@ glusterd_svcs_manager(glusterd_volinfo_t *volinfo) + if (ret) + goto out; + +- if (volinfo) { +- ret = volinfo->shd.svc.manager(&(volinfo->shd.svc), volinfo, +- PROC_START_NO_WAIT); +- if (ret == -EINVAL) +- ret = 0; +- if (ret) +- goto out; +- } +- + ret = conf->scrub_svc.manager(&(conf->scrub_svc), NULL, PROC_START_NO_WAIT); + if (ret == -EINVAL) + ret = 0; +@@ -279,678 +269,3 @@ out: + GF_FREE(tmpvol); + return ret; + } +- +-int +-glusterd_volume_svc_check_volfile_identical( +- char *svc_name, dict_t *mode_dict, glusterd_volinfo_t *volinfo, +- glusterd_vol_graph_builder_t builder, gf_boolean_t *identical) +-{ +- char orgvol[PATH_MAX] = { +- 0, +- }; +- char *tmpvol = NULL; +- xlator_t *this = NULL; +- int ret = -1; +- int need_unlink = 0; +- int tmp_fd = -1; +- +- this = THIS; +- +- GF_VALIDATE_OR_GOTO(this->name, this, out); +- GF_VALIDATE_OR_GOTO(this->name, identical, out); +- +- /* This builds volfile for volume level dameons */ +- glusterd_volume_svc_build_volfile_path(svc_name, volinfo, orgvol, +- sizeof(orgvol)); +- +- ret = gf_asprintf(&tmpvol, "/tmp/g%s-XXXXXX", svc_name); +- if (ret < 0) { +- goto out; +- } +- +- /* coverity[secure_temp] mkstemp uses 0600 as the mode and is safe */ +- tmp_fd = mkstemp(tmpvol); +- if (tmp_fd < 0) { +- gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED, +- "Unable to create temp file" +- " %s:(%s)", +- tmpvol, strerror(errno)); +- ret = -1; +- goto out; +- } +- +- need_unlink = 1; +- +- ret = builder(volinfo, tmpvol, mode_dict); +- if (ret) +- goto out; +- +- ret = glusterd_check_files_identical(orgvol, tmpvol, identical); +-out: +- if (need_unlink) +- sys_unlink(tmpvol); +- +- if (tmpvol != NULL) +- GF_FREE(tmpvol); +- +- if (tmp_fd >= 0) +- sys_close(tmp_fd); +- +- return ret; +-} +- +-int +-glusterd_volume_svc_check_topology_identical( +- char *svc_name, dict_t *mode_dict, glusterd_volinfo_t *volinfo, +- glusterd_vol_graph_builder_t builder, gf_boolean_t *identical) +-{ +- char orgvol[PATH_MAX] = { +- 0, +- }; +- char *tmpvol = NULL; +- glusterd_conf_t *conf = NULL; +- xlator_t *this = THIS; +- int ret = -1; +- int tmpclean = 0; +- int tmpfd = -1; +- +- if ((!identical) || (!this) || (!this->private)) +- goto out; +- +- conf = this->private; +- GF_VALIDATE_OR_GOTO(this->name, conf, out); +- +- /* This builds volfile for volume level dameons */ +- glusterd_volume_svc_build_volfile_path(svc_name, volinfo, orgvol, +- sizeof(orgvol)); +- /* Create the temporary volfile */ +- ret = gf_asprintf(&tmpvol, "/tmp/g%s-XXXXXX", svc_name); +- if (ret < 0) { +- goto out; +- } +- +- /* coverity[secure_temp] mkstemp uses 0600 as the mode and is safe */ +- tmpfd = mkstemp(tmpvol); +- if (tmpfd < 0) { +- gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED, +- "Unable to create temp file" +- " %s:(%s)", +- tmpvol, strerror(errno)); +- ret = -1; +- goto out; +- } +- +- tmpclean = 1; /* SET the flag to unlink() tmpfile */ +- +- ret = builder(volinfo, tmpvol, mode_dict); +- if (ret) +- goto out; +- +- /* Compare the topology of volfiles */ +- ret = glusterd_check_topology_identical(orgvol, tmpvol, identical); +-out: +- if (tmpfd >= 0) +- sys_close(tmpfd); +- if (tmpclean) +- sys_unlink(tmpvol); +- if (tmpvol != NULL) +- GF_FREE(tmpvol); +- return ret; +-} +- +-void * +-__gf_find_compatible_svc(gd_node_type daemon) +-{ +- glusterd_svc_proc_t *svc_proc = NULL; +- glusterd_svc_proc_t *return_proc = NULL; +- glusterd_svc_t *parent_svc = NULL; +- struct cds_list_head *svc_procs = NULL; +- glusterd_conf_t *conf = NULL; +- int pid = -1; +- +- conf = THIS->private; +- GF_VALIDATE_OR_GOTO("glusterd", conf, out); +- +- if (daemon == GD_NODE_SHD) { +- svc_procs = &conf->shd_procs; +- if (!svc_procs) +- goto out; +- } +- +- cds_list_for_each_entry(svc_proc, svc_procs, svc_proc_list) +- { +- parent_svc = cds_list_entry(svc_proc->svcs.next, glusterd_svc_t, +- mux_svc); +- if (!return_proc) +- return_proc = svc_proc; +- +- /* If there is an already running shd daemons, select it. Otehrwise +- * select the first one. +- */ +- if (parent_svc && gf_is_service_running(parent_svc->proc.pidfile, &pid)) +- return (void *)svc_proc; +- /* +- * Logic to select one process goes here. Currently there is only one +- * shd_proc. So selecting the first one; +- */ +- } +-out: +- return return_proc; +-} +- +-glusterd_svc_proc_t * +-glusterd_svcprocess_new() +-{ +- glusterd_svc_proc_t *new_svcprocess = NULL; +- +- new_svcprocess = GF_CALLOC(1, sizeof(*new_svcprocess), +- gf_gld_mt_glusterd_svc_proc_t); +- +- if (!new_svcprocess) +- return NULL; +- +- CDS_INIT_LIST_HEAD(&new_svcprocess->svc_proc_list); +- CDS_INIT_LIST_HEAD(&new_svcprocess->svcs); +- new_svcprocess->notify = glusterd_muxsvc_common_rpc_notify; +- return new_svcprocess; +-} +- +-int +-glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc) +-{ +- int ret = -1; +- glusterd_svc_proc_t *mux_proc = NULL; +- glusterd_conn_t *mux_conn = NULL; +- glusterd_conf_t *conf = NULL; +- glusterd_svc_t *parent_svc = NULL; +- int pid = -1; +- +- GF_VALIDATE_OR_GOTO("glusterd", svc, out); +- GF_VALIDATE_OR_GOTO("glusterd", volinfo, out); +- conf = THIS->private; +- GF_VALIDATE_OR_GOTO("glusterd", conf, out); +- GF_VALIDATE_OR_GOTO("glusterd", svc, out); +- +- pthread_mutex_lock(&conf->attach_lock); +- { +- if (!svc->inited) { +- if (gf_is_service_running(svc->proc.pidfile, &pid)) { +- /* Just connect is required, but we don't know what happens +- * during the disconnect. So better to reattach. +- */ +- mux_proc = __gf_find_compatible_svc_from_pid(GD_NODE_SHD, pid); +- } +- +- if (!mux_proc) { +- if (pid != -1 && sys_access(svc->proc.pidfile, R_OK) == 0) { +- /* stale pid file, unlink it. */ +- kill(pid, SIGTERM); +- sys_unlink(svc->proc.pidfile); +- } +- mux_proc = __gf_find_compatible_svc(GD_NODE_SHD); +- } +- if (mux_proc) { +- /* Take first entry from the process */ +- parent_svc = cds_list_entry(mux_proc->svcs.next, glusterd_svc_t, +- mux_svc); +- sys_link(parent_svc->proc.pidfile, svc->proc.pidfile); +- mux_conn = &parent_svc->conn; +- if (volinfo) +- volinfo->shd.attached = _gf_true; +- } else { +- mux_proc = glusterd_svcprocess_new(); +- if (!mux_proc) { +- ret = -1; +- goto unlock; +- } +- cds_list_add_tail(&mux_proc->svc_proc_list, &conf->shd_procs); +- } +- svc->svc_proc = mux_proc; +- cds_list_del_init(&svc->mux_svc); +- cds_list_add_tail(&svc->mux_svc, &mux_proc->svcs); +- ret = glusterd_shdsvc_init(volinfo, mux_conn, mux_proc); +- if (ret) { +- pthread_mutex_unlock(&conf->attach_lock); +- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_FAILED_INIT_SHDSVC, +- "Failed to init shd " +- "service"); +- goto out; +- } +- gf_msg_debug(THIS->name, 0, "shd service initialized"); +- svc->inited = _gf_true; +- } +- ret = 0; +- } +-unlock: +- pthread_mutex_unlock(&conf->attach_lock); +-out: +- return ret; +-} +- +-void * +-__gf_find_compatible_svc_from_pid(gd_node_type daemon, pid_t pid) +-{ +- glusterd_svc_proc_t *svc_proc = NULL; +- struct cds_list_head *svc_procs = NULL; +- glusterd_svc_t *svc = NULL; +- pid_t mux_pid = -1; +- glusterd_conf_t *conf = NULL; +- +- conf = THIS->private; +- if (!conf) +- return NULL; +- +- if (daemon == GD_NODE_SHD) { +- svc_procs = &conf->shd_procs; +- if (!svc_proc) +- return NULL; +- } /* Can be moved to switch when mux is implemented for other daemon; */ +- +- cds_list_for_each_entry(svc_proc, svc_procs, svc_proc_list) +- { +- cds_list_for_each_entry(svc, &svc_proc->svcs, mux_svc) +- { +- if (gf_is_service_running(svc->proc.pidfile, &mux_pid)) { +- if (mux_pid == pid) { +- /*TODO +- * inefficient loop, but at the moment, there is only +- * one shd. +- */ +- return svc_proc; +- } +- } +- } +- } +- return NULL; +-} +- +-static int32_t +-my_callback(struct rpc_req *req, struct iovec *iov, int count, void *v_frame) +-{ +- call_frame_t *frame = v_frame; +- xlator_t *this = NULL; +- glusterd_conf_t *conf = NULL; +- +- GF_VALIDATE_OR_GOTO("glusterd", frame, out); +- this = frame->this; +- GF_VALIDATE_OR_GOTO("glusterd", this, out); +- conf = this->private; +- GF_VALIDATE_OR_GOTO(this->name, conf, out); +- +- GF_ATOMIC_DEC(conf->blockers); +- +- STACK_DESTROY(frame->root); +-out: +- return 0; +-} +- +-static int32_t +-glusterd_svc_attach_cbk(struct rpc_req *req, struct iovec *iov, int count, +- void *v_frame) +-{ +- call_frame_t *frame = v_frame; +- glusterd_volinfo_t *volinfo = NULL; +- glusterd_shdsvc_t *shd = NULL; +- glusterd_svc_t *svc = frame->cookie; +- glusterd_svc_t *parent_svc = NULL; +- glusterd_svc_proc_t *mux_proc = NULL; +- glusterd_conf_t *conf = NULL; +- int *flag = (int *)frame->local; +- xlator_t *this = THIS; +- int pid = -1; +- int ret = -1; +- gf_getspec_rsp rsp = { +- 0, +- }; +- +- GF_VALIDATE_OR_GOTO("glusterd", this, out); +- conf = this->private; +- GF_VALIDATE_OR_GOTO("glusterd", conf, out); +- GF_VALIDATE_OR_GOTO("glusterd", frame, out); +- GF_VALIDATE_OR_GOTO("glusterd", svc, out); +- +- frame->local = NULL; +- frame->cookie = NULL; +- +- if (!strcmp(svc->name, "glustershd")) { +- /* Get volinfo->shd from svc object */ +- shd = cds_list_entry(svc, glusterd_shdsvc_t, svc); +- if (!shd) { +- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL, +- "Failed to get shd object " +- "from shd service"); +- goto out; +- } +- +- /* Get volinfo from shd */ +- volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd); +- if (!volinfo) { +- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, +- "Failed to get volinfo from " +- "from shd"); +- goto out; +- } +- } +- +- if (!iov) { +- gf_msg(frame->this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, +- "iov is NULL"); +- ret = -1; +- goto out; +- } +- +- ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp); +- if (ret < 0) { +- gf_msg(frame->this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, +- "XDR decoding error"); +- ret = -1; +- goto out; +- } +- +- if (rsp.op_ret == 0) { +- pthread_mutex_lock(&conf->attach_lock); +- { +- if (!strcmp(svc->name, "glustershd")) { +- mux_proc = svc->svc_proc; +- if (mux_proc && +- !gf_is_service_running(svc->proc.pidfile, &pid)) { +- /* +- * When svc's are restarting, there is a chance that the +- * attached svc might not have updated it's pid. Because +- * it was at connection stage. So in that case, we need +- * to retry the pid file copy. +- */ +- parent_svc = cds_list_entry(mux_proc->svcs.next, +- glusterd_svc_t, mux_svc); +- if (parent_svc) +- sys_link(parent_svc->proc.pidfile, svc->proc.pidfile); +- } +- } +- svc->online = _gf_true; +- } +- pthread_mutex_unlock(&conf->attach_lock); +- gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SVC_ATTACH_FAIL, +- "svc %s of volume %s attached successfully to pid %d", svc->name, +- volinfo->volname, glusterd_proc_get_pid(&svc->proc)); +- } else { +- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_ATTACH_FAIL, +- "svc %s of volume %s failed to " +- "attach to pid %d. Starting a new process", +- svc->name, volinfo->volname, glusterd_proc_get_pid(&svc->proc)); +- if (!strcmp(svc->name, "glustershd")) { +- glusterd_recover_shd_attach_failure(volinfo, svc, *flag); +- } +- } +-out: +- if (flag) { +- GF_FREE(flag); +- } +- GF_ATOMIC_DEC(conf->blockers); +- STACK_DESTROY(frame->root); +- return 0; +-} +- +-extern size_t +-build_volfile_path(char *volume_id, char *path, size_t path_len, +- char *trusted_str); +- +-int +-__glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags, +- struct rpc_clnt *rpc, char *volfile_id, +- int op) +-{ +- int ret = -1; +- struct iobuf *iobuf = NULL; +- struct iobref *iobref = NULL; +- struct iovec iov = { +- 0, +- }; +- char path[PATH_MAX] = { +- '\0', +- }; +- struct stat stbuf = { +- 0, +- }; +- int32_t spec_fd = -1; +- size_t file_len = -1; +- char *volfile_content = NULL; +- ssize_t req_size = 0; +- call_frame_t *frame = NULL; +- gd1_mgmt_brick_op_req brick_req; +- void *req = &brick_req; +- void *errlbl = &&err; +- struct rpc_clnt_connection *conn; +- xlator_t *this = THIS; +- glusterd_conf_t *conf = THIS->private; +- extern struct rpc_clnt_program gd_brick_prog; +- fop_cbk_fn_t cbkfn = my_callback; +- +- if (!rpc) { +- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_PARAM_NULL, +- "called with null rpc"); +- return -1; +- } +- +- conn = &rpc->conn; +- if (!conn->connected || conn->disconnected) { +- gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_CONNECT_RETURNED, +- "not connected yet"); +- return -1; +- } +- +- brick_req.op = op; +- brick_req.name = volfile_id; +- brick_req.input.input_val = NULL; +- brick_req.input.input_len = 0; +- +- frame = create_frame(this, this->ctx->pool); +- if (!frame) { +- goto *errlbl; +- } +- +- if (op == GLUSTERD_SVC_ATTACH) { +- (void)build_volfile_path(volfile_id, path, sizeof(path), NULL); +- +- ret = sys_stat(path, &stbuf); +- if (ret < 0) { +- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_ATTACH_FAIL, +- "Unable to stat %s (%s)", path, strerror(errno)); +- ret = -EINVAL; +- goto *errlbl; +- } +- +- file_len = stbuf.st_size; +- volfile_content = GF_MALLOC(file_len + 1, gf_common_mt_char); +- if (!volfile_content) { +- ret = -ENOMEM; +- goto *errlbl; +- } +- spec_fd = open(path, O_RDONLY); +- if (spec_fd < 0) { +- gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_SVC_ATTACH_FAIL, +- "failed to read volfile %s", path); +- ret = -EIO; +- goto *errlbl; +- } +- ret = sys_read(spec_fd, volfile_content, file_len); +- if (ret == file_len) { +- brick_req.input.input_val = volfile_content; +- brick_req.input.input_len = file_len; +- } else { +- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_ATTACH_FAIL, +- "read failed on path %s. File size=%" GF_PRI_SIZET +- "read size=%d", +- path, file_len, ret); +- ret = -EIO; +- goto *errlbl; +- } +- +- frame->cookie = svc; +- frame->local = GF_CALLOC(1, sizeof(int), gf_gld_mt_int); +- *((int *)frame->local) = flags; +- cbkfn = glusterd_svc_attach_cbk; +- } +- +- req_size = xdr_sizeof((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req); +- iobuf = iobuf_get2(rpc->ctx->iobuf_pool, req_size); +- if (!iobuf) { +- goto *errlbl; +- } +- errlbl = &&maybe_free_iobuf; +- +- iov.iov_base = iobuf->ptr; +- iov.iov_len = iobuf_pagesize(iobuf); +- +- iobref = iobref_new(); +- if (!iobref) { +- goto *errlbl; +- } +- errlbl = &&free_iobref; +- +- iobref_add(iobref, iobuf); +- /* +- * Drop our reference to the iobuf. The iobref should already have +- * one after iobref_add, so when we unref that we'll free the iobuf as +- * well. This allows us to pass just the iobref as frame->local. +- */ +- iobuf_unref(iobuf); +- /* Set the pointer to null so we don't free it on a later error. */ +- iobuf = NULL; +- +- /* Create the xdr payload */ +- ret = xdr_serialize_generic(iov, req, (xdrproc_t)xdr_gd1_mgmt_brick_op_req); +- if (ret == -1) { +- goto *errlbl; +- } +- iov.iov_len = ret; +- +- /* Send the msg */ +- GF_ATOMIC_INC(conf->blockers); +- ret = rpc_clnt_submit(rpc, &gd_brick_prog, op, cbkfn, &iov, 1, NULL, 0, +- iobref, frame, NULL, 0, NULL, 0, NULL); +- GF_FREE(volfile_content); +- if (spec_fd >= 0) +- sys_close(spec_fd); +- return ret; +- +-free_iobref: +- iobref_unref(iobref); +-maybe_free_iobuf: +- if (iobuf) { +- iobuf_unref(iobuf); +- } +-err: +- GF_FREE(volfile_content); +- if (spec_fd >= 0) +- sys_close(spec_fd); +- if (frame) +- STACK_DESTROY(frame->root); +- return -1; +-} +- +-int +-glusterd_attach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo, int flags) +-{ +- glusterd_conf_t *conf = THIS->private; +- int ret = -1; +- int tries; +- rpc_clnt_t *rpc = NULL; +- +- GF_VALIDATE_OR_GOTO("glusterd", conf, out); +- GF_VALIDATE_OR_GOTO("glusterd", svc, out); +- GF_VALIDATE_OR_GOTO("glusterd", volinfo, out); +- +- gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_ATTACH_INFO, +- "adding svc %s (volume=%s) to existing " +- "process with pid %d", +- svc->name, volinfo->volname, glusterd_proc_get_pid(&svc->proc)); +- +- rpc = rpc_clnt_ref(svc->conn.rpc); +- for (tries = 15; tries > 0; --tries) { +- if (rpc) { +- pthread_mutex_lock(&conf->attach_lock); +- { +- ret = __glusterd_send_svc_configure_req( +- svc, flags, rpc, svc->proc.volfileid, GLUSTERD_SVC_ATTACH); +- } +- pthread_mutex_unlock(&conf->attach_lock); +- if (!ret) { +- volinfo->shd.attached = _gf_true; +- goto out; +- } +- } +- /* +- * It might not actually be safe to manipulate the lock +- * like this, but if we don't then the connection can +- * never actually complete and retries are useless. +- * Unfortunately, all of the alternatives (e.g. doing +- * all of this in a separate thread) are much more +- * complicated and risky. +- * TBD: see if there's a better way +- */ +- synclock_unlock(&conf->big_lock); +- sleep(1); +- synclock_lock(&conf->big_lock); +- } +- ret = -1; +- gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SVC_ATTACH_FAIL, +- "attach failed for %s(volume=%s)", svc->name, volinfo->volname); +-out: +- if (rpc) +- rpc_clnt_unref(rpc); +- return ret; +-} +- +-int +-glusterd_detach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo, int sig) +-{ +- glusterd_conf_t *conf = THIS->private; +- int ret = -1; +- int tries; +- rpc_clnt_t *rpc = NULL; +- +- GF_VALIDATE_OR_GOTO(THIS->name, conf, out); +- GF_VALIDATE_OR_GOTO(THIS->name, svc, out); +- GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out); +- +- gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_DETACH_INFO, +- "removing svc %s (volume=%s) from existing " +- "process with pid %d", +- svc->name, volinfo->volname, glusterd_proc_get_pid(&svc->proc)); +- +- rpc = rpc_clnt_ref(svc->conn.rpc); +- for (tries = 15; tries > 0; --tries) { +- if (rpc) { +- /*For detach there is no flags, and we are not using sig.*/ +- pthread_mutex_lock(&conf->attach_lock); +- { +- ret = __glusterd_send_svc_configure_req(svc, 0, svc->conn.rpc, +- svc->proc.volfileid, +- GLUSTERD_SVC_DETACH); +- } +- pthread_mutex_unlock(&conf->attach_lock); +- if (!ret) { +- goto out; +- } +- } +- /* +- * It might not actually be safe to manipulate the lock +- * like this, but if we don't then the connection can +- * never actually complete and retries are useless. +- * Unfortunately, all of the alternatives (e.g. doing +- * all of this in a separate thread) are much more +- * complicated and risky. +- * TBD: see if there's a better way +- */ +- synclock_unlock(&conf->big_lock); +- sleep(1); +- synclock_lock(&conf->big_lock); +- } +- ret = -1; +- gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SVC_DETACH_FAIL, +- "detach failed for %s(volume=%s)", svc->name, volinfo->volname); +-out: +- if (rpc) +- rpc_clnt_unref(rpc); +- return ret; +-} +diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.h b/xlators/mgmt/glusterd/src/glusterd-svc-helper.h +index 5def246..cc98e78 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.h ++++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.h +@@ -16,10 +16,10 @@ + #include "glusterd-volgen.h" + + int +-glusterd_svcs_reconfigure(glusterd_volinfo_t *volinfo); ++glusterd_svcs_reconfigure(); + + int +-glusterd_svcs_stop(glusterd_volinfo_t *vol); ++glusterd_svcs_stop(); + + int + glusterd_svcs_manager(glusterd_volinfo_t *volinfo); +@@ -41,41 +41,5 @@ int + glusterd_svc_check_tier_topology_identical(char *svc_name, + glusterd_volinfo_t *volinfo, + gf_boolean_t *identical); +-int +-glusterd_volume_svc_check_volfile_identical(char *svc_name, dict_t *mode_dict, +- glusterd_volinfo_t *volinfo, +- glusterd_vol_graph_builder_t, +- gf_boolean_t *identical); +-int +-glusterd_volume_svc_check_topology_identical(char *svc_name, dict_t *mode_dict, +- glusterd_volinfo_t *volinfo, +- glusterd_vol_graph_builder_t, +- gf_boolean_t *identical); +-void +-glusterd_volume_svc_build_volfile_path(char *server, glusterd_volinfo_t *vol, +- char *volfile, size_t len); +-void * +-__gf_find_compatible_svc(gd_node_type daemon); +- +-glusterd_svc_proc_t * +-glusterd_svcprocess_new(); +- +-int +-glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc); +- +-void * +-__gf_find_compatible_svc_from_pid(gd_node_type daemon, pid_t pid); +- +-int +-glusterd_attach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo, +- int flags); +- +-int +-glusterd_detach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo, int sig); +- +-int +-__glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flag, +- struct rpc_clnt *rpc, char *volfile_id, +- int op); + + #endif +diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c +index f32dafc..4cd4cea 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c ++++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c +@@ -18,7 +18,6 @@ + #include "glusterd-conn-mgmt.h" + #include "glusterd-messages.h" + #include +-#include "glusterd-shd-svc-helper.h" + + int + glusterd_svc_create_rundir(char *rundir) +@@ -168,75 +167,68 @@ glusterd_svc_start(glusterd_svc_t *svc, int flags, dict_t *cmdline) + GF_ASSERT(this); + + priv = this->private; +- GF_VALIDATE_OR_GOTO("glusterd", priv, out); +- GF_VALIDATE_OR_GOTO("glusterd", svc, out); +- +- pthread_mutex_lock(&priv->attach_lock); +- { +- if (glusterd_proc_is_running(&(svc->proc))) { +- ret = 0; +- goto unlock; +- } ++ GF_ASSERT(priv); + +- ret = sys_access(svc->proc.volfile, F_OK); +- if (ret) { +- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_NOT_FOUND, +- "Volfile %s is not present", svc->proc.volfile); +- goto unlock; +- } ++ if (glusterd_proc_is_running(&(svc->proc))) { ++ ret = 0; ++ goto out; ++ } + +- runinit(&runner); ++ ret = sys_access(svc->proc.volfile, F_OK); ++ if (ret) { ++ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_NOT_FOUND, ++ "Volfile %s is not present", svc->proc.volfile); ++ goto out; ++ } + +- if (this->ctx->cmd_args.valgrind) { +- len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s.log", +- svc->proc.logfile, svc->name); +- if ((len < 0) || (len >= PATH_MAX)) { +- ret = -1; +- goto unlock; +- } ++ runinit(&runner); + +- runner_add_args(&runner, "valgrind", "--leak-check=full", +- "--trace-children=yes", "--track-origins=yes", +- NULL); +- runner_argprintf(&runner, "--log-file=%s", valgrind_logfile); ++ if (this->ctx->cmd_args.valgrind) { ++ len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s.log", ++ svc->proc.logfile, svc->name); ++ if ((len < 0) || (len >= PATH_MAX)) { ++ ret = -1; ++ goto out; + } + +- runner_add_args(&runner, SBIN_DIR "/glusterfs", "-s", +- svc->proc.volfileserver, "--volfile-id", +- svc->proc.volfileid, "-p", svc->proc.pidfile, "-l", +- svc->proc.logfile, "-S", svc->conn.sockpath, NULL); ++ runner_add_args(&runner, "valgrind", "--leak-check=full", ++ "--trace-children=yes", "--track-origins=yes", NULL); ++ runner_argprintf(&runner, "--log-file=%s", valgrind_logfile); ++ } + +- if (dict_get_strn(priv->opts, GLUSTERD_LOCALTIME_LOGGING_KEY, +- SLEN(GLUSTERD_LOCALTIME_LOGGING_KEY), +- &localtime_logging) == 0) { +- if (strcmp(localtime_logging, "enable") == 0) +- runner_add_arg(&runner, "--localtime-logging"); +- } +- if (dict_get_strn(priv->opts, GLUSTERD_DAEMON_LOG_LEVEL_KEY, +- SLEN(GLUSTERD_DAEMON_LOG_LEVEL_KEY), +- &log_level) == 0) { +- snprintf(daemon_log_level, 30, "--log-level=%s", log_level); +- runner_add_arg(&runner, daemon_log_level); +- } ++ runner_add_args(&runner, SBIN_DIR "/glusterfs", "-s", ++ svc->proc.volfileserver, "--volfile-id", ++ svc->proc.volfileid, "-p", svc->proc.pidfile, "-l", ++ svc->proc.logfile, "-S", svc->conn.sockpath, NULL); ++ ++ if (dict_get_strn(priv->opts, GLUSTERD_LOCALTIME_LOGGING_KEY, ++ SLEN(GLUSTERD_LOCALTIME_LOGGING_KEY), ++ &localtime_logging) == 0) { ++ if (strcmp(localtime_logging, "enable") == 0) ++ runner_add_arg(&runner, "--localtime-logging"); ++ } ++ if (dict_get_strn(priv->opts, GLUSTERD_DAEMON_LOG_LEVEL_KEY, ++ SLEN(GLUSTERD_DAEMON_LOG_LEVEL_KEY), &log_level) == 0) { ++ snprintf(daemon_log_level, 30, "--log-level=%s", log_level); ++ runner_add_arg(&runner, daemon_log_level); ++ } + +- if (cmdline) +- dict_foreach(cmdline, svc_add_args, (void *)&runner); ++ if (cmdline) ++ dict_foreach(cmdline, svc_add_args, (void *)&runner); + +- gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SVC_START_SUCCESS, +- "Starting %s service", svc->name); ++ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SVC_START_SUCCESS, ++ "Starting %s service", svc->name); + +- if (flags == PROC_START_NO_WAIT) { +- ret = runner_run_nowait(&runner); +- } else { +- synclock_unlock(&priv->big_lock); +- { +- ret = runner_run(&runner); +- } +- synclock_lock(&priv->big_lock); ++ if (flags == PROC_START_NO_WAIT) { ++ ret = runner_run_nowait(&runner); ++ } else { ++ synclock_unlock(&priv->big_lock); ++ { ++ ret = runner_run(&runner); + } ++ synclock_lock(&priv->big_lock); + } +-unlock: +- pthread_mutex_unlock(&priv->attach_lock); ++ + out: + gf_msg_debug(this->name, 0, "Returning %d", ret); + +@@ -289,8 +281,7 @@ glusterd_svc_build_volfile_path(char *server, char *workdir, char *volfile, + + glusterd_svc_build_svcdir(server, workdir, dir, sizeof(dir)); + +- if (!strcmp(server, "quotad")) +- /*quotad has different volfile name*/ ++ if (!strcmp(server, "quotad")) /*quotad has different volfile name*/ + snprintf(volfile, len, "%s/%s.vol", dir, server); + else + snprintf(volfile, len, "%s/%s-server.vol", dir, server); +@@ -375,138 +366,3 @@ glusterd_svc_common_rpc_notify(glusterd_conn_t *conn, rpc_clnt_event_t event) + + return ret; + } +- +-void +-glusterd_volume_svc_build_volfile_path(char *server, glusterd_volinfo_t *vol, +- char *volfile, size_t len) +-{ +- GF_ASSERT(len == PATH_MAX); +- +- if (!strcmp(server, "glustershd")) { +- glusterd_svc_build_shd_volfile_path(vol, volfile, len); +- } +-} +- +-int +-glusterd_muxsvc_common_rpc_notify(glusterd_svc_proc_t *mux_proc, +- rpc_clnt_event_t event) +-{ +- int ret = 0; +- glusterd_svc_t *svc = NULL; +- glusterd_svc_t *tmp = NULL; +- xlator_t *this = NULL; +- gf_boolean_t need_logging = _gf_false; +- +- this = THIS; +- GF_ASSERT(this); +- +- if (!mux_proc) { +- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_GET_FAIL, +- "Failed to get the svc proc data"); +- return -1; +- } +- +- /* Currently this function was used for shd svc, if this function is +- * using for another svc, change ths glustershd reference. We can get +- * the svc name from any of the attached svc's +- */ +- switch (event) { +- case RPC_CLNT_CONNECT: +- gf_msg_debug(this->name, 0, +- "glustershd has connected with glusterd."); +- gf_event(EVENT_SVC_CONNECTED, "svc_name=glustershd"); +- cds_list_for_each_entry_safe(svc, tmp, &mux_proc->svcs, mux_svc) +- { +- if (svc->online) +- continue; +- svc->online = _gf_true; +- } +- break; +- +- case RPC_CLNT_DISCONNECT: +- cds_list_for_each_entry_safe(svc, tmp, &mux_proc->svcs, mux_svc) +- { +- if (svc->online) { +- if (!need_logging) +- need_logging = _gf_true; +- svc->online = _gf_false; +- } +- } +- if (need_logging) { +- gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_NODE_DISCONNECTED, +- "glustershd has disconnected from glusterd."); +- gf_event(EVENT_SVC_DISCONNECTED, "svc_name=glustershd"); +- } +- break; +- +- default: +- gf_msg_trace(this->name, 0, "got some other RPC event %d", event); +- break; +- } +- +- return ret; +-} +- +-int +-glusterd_muxsvc_conn_init(glusterd_conn_t *conn, glusterd_svc_proc_t *mux_proc, +- char *sockpath, int frame_timeout, +- glusterd_muxsvc_conn_notify_t notify) +-{ +- int ret = -1; +- dict_t *options = NULL; +- struct rpc_clnt *rpc = NULL; +- xlator_t *this = THIS; +- glusterd_svc_t *svc = NULL; +- +- options = dict_new(); +- if (!this || !options) +- goto out; +- +- svc = cds_list_entry(conn, glusterd_svc_t, conn); +- if (!svc) { +- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_GET_FAIL, +- "Failed to get the service"); +- goto out; +- } +- +- ret = rpc_transport_unix_options_build(options, sockpath, frame_timeout); +- if (ret) +- goto out; +- +- ret = dict_set_int32n(options, "transport.socket.ignore-enoent", +- SLEN("transport.socket.ignore-enoent"), 1); +- if (ret) +- goto out; +- +- /* @options is free'd by rpc_transport when destroyed */ +- rpc = rpc_clnt_new(options, this, (char *)svc->name, 16); +- if (!rpc) { +- ret = -1; +- goto out; +- } +- +- ret = rpc_clnt_register_notify(rpc, glusterd_muxsvc_conn_common_notify, +- mux_proc); +- if (ret) +- goto out; +- +- ret = snprintf(conn->sockpath, sizeof(conn->sockpath), "%s", sockpath); +- if (ret < 0) +- goto out; +- else +- ret = 0; +- +- conn->frame_timeout = frame_timeout; +- conn->rpc = rpc; +- mux_proc->notify = notify; +-out: +- if (options) +- dict_unref(options); +- if (ret) { +- if (rpc) { +- rpc_clnt_unref(rpc); +- rpc = NULL; +- } +- } +- return ret; +-} +diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h +index fbc5225..c850bfd 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h ++++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h +@@ -13,12 +13,9 @@ + + #include "glusterd-proc-mgmt.h" + #include "glusterd-conn-mgmt.h" +-#include "glusterd-rcu.h" + + struct glusterd_svc_; +- + typedef struct glusterd_svc_ glusterd_svc_t; +-typedef struct glusterd_svc_proc_ glusterd_svc_proc_t; + + typedef void (*glusterd_svc_build_t)(glusterd_svc_t *svc); + +@@ -28,17 +25,6 @@ typedef int (*glusterd_svc_start_t)(glusterd_svc_t *svc, int flags); + typedef int (*glusterd_svc_stop_t)(glusterd_svc_t *svc, int sig); + typedef int (*glusterd_svc_reconfigure_t)(void *data); + +-typedef int (*glusterd_muxsvc_conn_notify_t)(glusterd_svc_proc_t *mux_proc, +- rpc_clnt_event_t event); +- +-struct glusterd_svc_proc_ { +- struct cds_list_head svc_proc_list; +- struct cds_list_head svcs; +- glusterd_muxsvc_conn_notify_t notify; +- rpc_clnt_t *rpc; +- void *data; +-}; +- + struct glusterd_svc_ { + char name[NAME_MAX]; + glusterd_conn_t conn; +@@ -49,8 +35,6 @@ struct glusterd_svc_ { + gf_boolean_t online; + gf_boolean_t inited; + glusterd_svc_reconfigure_t reconfigure; +- glusterd_svc_proc_t *svc_proc; +- struct cds_list_head mux_svc; + }; + + int +@@ -85,15 +69,4 @@ glusterd_svc_reconfigure(int (*create_volfile)()); + int + glusterd_svc_common_rpc_notify(glusterd_conn_t *conn, rpc_clnt_event_t event); + +-int +-glusterd_muxsvc_common_rpc_notify(glusterd_svc_proc_t *conn, +- rpc_clnt_event_t event); +- +-int +-glusterd_proc_get_pid(glusterd_proc_t *proc); +- +-int +-glusterd_muxsvc_conn_init(glusterd_conn_t *conn, glusterd_svc_proc_t *mux_proc, +- char *sockpath, int frame_timeout, +- glusterd_muxsvc_conn_notify_t notify); + #endif +diff --git a/xlators/mgmt/glusterd/src/glusterd-tier.c b/xlators/mgmt/glusterd/src/glusterd-tier.c +index 23a9592..4dc0d44 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-tier.c ++++ b/xlators/mgmt/glusterd/src/glusterd-tier.c +@@ -27,7 +27,6 @@ + #include "glusterd-messages.h" + #include "glusterd-mgmt.h" + #include "glusterd-syncop.h" +-#include "glusterd-shd-svc-helper.h" + + #include + #include +@@ -616,7 +615,7 @@ glusterd_op_remove_tier_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict) + + if (cmd == GF_DEFRAG_CMD_DETACH_START && + volinfo->status == GLUSTERD_STATUS_STARTED) { +- ret = glusterd_svcs_reconfigure(volinfo); ++ ret = glusterd_svcs_reconfigure(); + if (ret) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_RECONF_FAIL, + "Unable to reconfigure NFS-Server"); +diff --git a/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c b/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c +index ab463f1..04ceec5 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c ++++ b/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c +@@ -83,6 +83,7 @@ glusterd_tierdsvc_init(void *data) + goto out; + + notify = glusterd_svc_common_rpc_notify; ++ glusterd_store_perform_node_state_store(volinfo); + + volinfo->type = GF_CLUSTER_TYPE_TIER; + +@@ -394,7 +395,6 @@ int + glusterd_tierdsvc_restart() + { + glusterd_volinfo_t *volinfo = NULL; +- glusterd_volinfo_t *tmp = NULL; + int ret = 0; + xlator_t *this = THIS; + glusterd_conf_t *conf = NULL; +@@ -405,7 +405,7 @@ glusterd_tierdsvc_restart() + conf = this->private; + GF_VALIDATE_OR_GOTO(this->name, conf, out); + +- cds_list_for_each_entry_safe(volinfo, tmp, &conf->volumes, vol_list) ++ cds_list_for_each_entry(volinfo, &conf->volumes, vol_list) + { + /* Start per volume tierd svc */ + if (volinfo->status == GLUSTERD_STATUS_STARTED && +diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c +index 4525ec7..2aa975b 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-utils.c ++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c +@@ -61,7 +61,6 @@ + #include "glusterd-server-quorum.h" + #include + #include +-#include "glusterd-shd-svc-helper.h" + + #include "xdr-generic.h" + #include +@@ -625,17 +624,13 @@ glusterd_volinfo_t * + glusterd_volinfo_unref(glusterd_volinfo_t *volinfo) + { + int refcnt = -1; +- glusterd_conf_t *conf = THIS->private; + +- pthread_mutex_lock(&conf->volume_lock); ++ pthread_mutex_lock(&volinfo->reflock); + { +- pthread_mutex_lock(&volinfo->reflock); +- { +- refcnt = --volinfo->refcnt; +- } +- pthread_mutex_unlock(&volinfo->reflock); ++ refcnt = --volinfo->refcnt; + } +- pthread_mutex_unlock(&conf->volume_lock); ++ pthread_mutex_unlock(&volinfo->reflock); ++ + if (!refcnt) { + glusterd_volinfo_delete(volinfo); + return NULL; +@@ -707,7 +702,6 @@ glusterd_volinfo_new(glusterd_volinfo_t **volinfo) + glusterd_snapdsvc_build(&new_volinfo->snapd.svc); + glusterd_tierdsvc_build(&new_volinfo->tierd.svc); + glusterd_gfproxydsvc_build(&new_volinfo->gfproxyd.svc); +- glusterd_shdsvc_build(&new_volinfo->shd.svc); + + pthread_mutex_init(&new_volinfo->reflock, NULL); + *volinfo = glusterd_volinfo_ref(new_volinfo); +@@ -1073,11 +1067,11 @@ glusterd_volinfo_delete(glusterd_volinfo_t *volinfo) + gf_store_handle_destroy(volinfo->snapd.handle); + + glusterd_auth_cleanup(volinfo); +- glusterd_shd_svcproc_cleanup(&volinfo->shd); + + pthread_mutex_destroy(&volinfo->reflock); + GF_FREE(volinfo); + ret = 0; ++ + out: + gf_msg_debug(THIS->name, 0, "Returning %d", ret); + return ret; +@@ -3929,7 +3923,6 @@ glusterd_spawn_daemons(void *opaque) + ret = glusterd_snapdsvc_restart(); + ret = glusterd_tierdsvc_restart(); + ret = glusterd_gfproxydsvc_restart(); +- ret = glusterd_shdsvc_restart(); + return ret; + } + +@@ -4880,9 +4873,6 @@ glusterd_delete_stale_volume(glusterd_volinfo_t *stale_volinfo, + svc = &(stale_volinfo->snapd.svc); + (void)svc->manager(svc, stale_volinfo, PROC_START_NO_WAIT); + } +- svc = &(stale_volinfo->shd.svc); +- (void)svc->manager(svc, stale_volinfo, PROC_START_NO_WAIT); +- + (void)glusterd_volinfo_remove(stale_volinfo); + + return 0; +@@ -4997,15 +4987,6 @@ glusterd_import_friend_volume(dict_t *peer_data, int count) + glusterd_volinfo_unref(old_volinfo); + } + +- ret = glusterd_store_volinfo(new_volinfo, GLUSTERD_VOLINFO_VER_AC_NONE); +- if (ret) { +- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_STORE_FAIL, +- "Failed to store " +- "volinfo for volume %s", +- new_volinfo->volname); +- goto out; +- } +- + if (glusterd_is_volume_started(new_volinfo)) { + (void)glusterd_start_bricks(new_volinfo); + if (glusterd_is_snapd_enabled(new_volinfo)) { +@@ -5014,10 +4995,15 @@ glusterd_import_friend_volume(dict_t *peer_data, int count) + gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name); + } + } +- svc = &(new_volinfo->shd.svc); +- if (svc->manager(svc, new_volinfo, PROC_START_NO_WAIT)) { +- gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name); +- } ++ } ++ ++ ret = glusterd_store_volinfo(new_volinfo, GLUSTERD_VOLINFO_VER_AC_NONE); ++ if (ret) { ++ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_STORE_FAIL, ++ "Failed to store " ++ "volinfo for volume %s", ++ new_volinfo->volname); ++ goto out; + } + + ret = glusterd_create_volfiles_and_notify_services(new_volinfo); +@@ -5521,7 +5507,9 @@ glusterd_add_node_to_dict(char *server, dict_t *dict, int count, + glusterd_svc_build_pidfile_path(server, priv->rundir, pidfile, + sizeof(pidfile)); + +- if (strcmp(server, priv->nfs_svc.name) == 0) ++ if (strcmp(server, priv->shd_svc.name) == 0) ++ svc = &(priv->shd_svc); ++ else if (strcmp(server, priv->nfs_svc.name) == 0) + svc = &(priv->nfs_svc); + else if (strcmp(server, priv->quotad_svc.name) == 0) + svc = &(priv->quotad_svc); +@@ -5552,6 +5540,9 @@ glusterd_add_node_to_dict(char *server, dict_t *dict, int count, + if (!strcmp(server, priv->nfs_svc.name)) + ret = dict_set_nstrn(dict, key, keylen, "NFS Server", + SLEN("NFS Server")); ++ else if (!strcmp(server, priv->shd_svc.name)) ++ ret = dict_set_nstrn(dict, key, keylen, "Self-heal Daemon", ++ SLEN("Self-heal Daemon")); + else if (!strcmp(server, priv->quotad_svc.name)) + ret = dict_set_nstrn(dict, key, keylen, "Quota Daemon", + SLEN("Quota Daemon")); +@@ -9115,21 +9106,6 @@ glusterd_friend_remove_cleanup_vols(uuid_t uuid) + "to stop snapd daemon service"); + } + } +- +- if (glusterd_is_shd_compatible_volume(volinfo)) { +- /* +- * Sending stop request for all volumes. So it is fine +- * to send stop for mux shd +- */ +- svc = &(volinfo->shd.svc); +- ret = svc->stop(svc, SIGTERM); +- if (ret) { +- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL, +- "Failed " +- "to stop shd daemon service"); +- } +- } +- + if (volinfo->type == GF_CLUSTER_TYPE_TIER) { + svc = &(volinfo->tierd.svc); + ret = svc->stop(svc, SIGTERM); +@@ -9155,7 +9131,7 @@ glusterd_friend_remove_cleanup_vols(uuid_t uuid) + } + + /* Reconfigure all daemon services upon peer detach */ +- ret = glusterd_svcs_reconfigure(NULL); ++ ret = glusterd_svcs_reconfigure(); + if (ret) { + gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL, + "Failed to reconfigure all daemon services."); +@@ -14746,74 +14722,3 @@ glusterd_is_profile_on(glusterd_volinfo_t *volinfo) + return _gf_true; + return _gf_false; + } +- +-int32_t +-glusterd_add_shd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict, +- int32_t count) +-{ +- int ret = -1; +- int32_t pid = -1; +- int32_t brick_online = -1; +- char key[64] = {0}; +- int keylen; +- char *pidfile = NULL; +- xlator_t *this = NULL; +- char *uuid_str = NULL; +- +- this = THIS; +- GF_VALIDATE_OR_GOTO(THIS->name, this, out); +- +- GF_VALIDATE_OR_GOTO(this->name, volinfo, out); +- GF_VALIDATE_OR_GOTO(this->name, dict, out); +- +- keylen = snprintf(key, sizeof(key), "brick%d.hostname", count); +- ret = dict_set_nstrn(dict, key, keylen, "Self-heal Daemon", +- SLEN("Self-heal Daemon")); +- if (ret) +- goto out; +- +- keylen = snprintf(key, sizeof(key), "brick%d.path", count); +- uuid_str = gf_strdup(uuid_utoa(MY_UUID)); +- if (!uuid_str) { +- ret = -1; +- goto out; +- } +- ret = dict_set_dynstrn(dict, key, keylen, uuid_str); +- if (ret) +- goto out; +- uuid_str = NULL; +- +- /* shd doesn't have a port. but the cli needs a port key with +- * a zero value to parse. +- * */ +- +- keylen = snprintf(key, sizeof(key), "brick%d.port", count); +- ret = dict_set_int32n(dict, key, keylen, 0); +- if (ret) +- goto out; +- +- pidfile = volinfo->shd.svc.proc.pidfile; +- +- brick_online = gf_is_service_running(pidfile, &pid); +- +- /* If shd is not running, then don't print the pid */ +- if (!brick_online) +- pid = -1; +- keylen = snprintf(key, sizeof(key), "brick%d.pid", count); +- ret = dict_set_int32n(dict, key, keylen, pid); +- if (ret) +- goto out; +- +- keylen = snprintf(key, sizeof(key), "brick%d.status", count); +- ret = dict_set_int32n(dict, key, keylen, brick_online); +- +-out: +- if (uuid_str) +- GF_FREE(uuid_str); +- if (ret) +- gf_msg(this ? this->name : "glusterd", GF_LOG_ERROR, 0, +- GD_MSG_DICT_SET_FAILED, +- "Returning %d. adding values to dict failed", ret); +- +- return ret; +-} +diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h +index 5c6a453..ead16b2 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-utils.h ++++ b/xlators/mgmt/glusterd/src/glusterd-utils.h +@@ -881,8 +881,4 @@ glusterd_is_profile_on(glusterd_volinfo_t *volinfo); + + char * + search_brick_path_from_proc(pid_t brick_pid, char *brickpath); +- +-int32_t +-glusterd_add_shd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict, +- int32_t count); + #endif +diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c +index 8b58d40..5e0214e 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c ++++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c +@@ -36,7 +36,6 @@ + #include "glusterd-svc-mgmt.h" + #include "glusterd-svc-helper.h" + #include "glusterd-snapd-svc-helper.h" +-#include "glusterd-shd-svc-helper.h" + #include "glusterd-gfproxyd-svc-helper.h" + + struct gd_validate_reconf_opts { +@@ -4865,7 +4864,7 @@ volgen_get_shd_key(int type) + static int + volgen_set_shd_key_enable(dict_t *set_dict, const int type) + { +- int ret = 0; ++ int ret = -1; + + switch (type) { + case GF_CLUSTER_TYPE_REPLICATE: +@@ -5156,15 +5155,24 @@ out: + static int + build_shd_volume_graph(xlator_t *this, volgen_graph_t *graph, + glusterd_volinfo_t *volinfo, dict_t *mod_dict, +- dict_t *set_dict, gf_boolean_t graph_check) ++ dict_t *set_dict, gf_boolean_t graph_check, ++ gf_boolean_t *valid_config) + { + volgen_graph_t cgraph = {0}; + int ret = 0; + int clusters = -1; + ++ if (!graph_check && (volinfo->status != GLUSTERD_STATUS_STARTED)) ++ goto out; ++ + if (!glusterd_is_shd_compatible_volume(volinfo)) + goto out; + ++ /* Shd graph is valid only when there is at least one ++ * replica/disperse volume is present ++ */ ++ *valid_config = _gf_true; ++ + ret = prepare_shd_volume_options(volinfo, mod_dict, set_dict); + if (ret) + goto out; +@@ -5194,16 +5202,19 @@ out: + } + + int +-build_shd_graph(glusterd_volinfo_t *volinfo, volgen_graph_t *graph, +- dict_t *mod_dict) ++build_shd_graph(volgen_graph_t *graph, dict_t *mod_dict) + { ++ glusterd_volinfo_t *voliter = NULL; + xlator_t *this = NULL; ++ glusterd_conf_t *priv = NULL; + dict_t *set_dict = NULL; + int ret = 0; ++ gf_boolean_t valid_config = _gf_false; + xlator_t *iostxl = NULL; + gf_boolean_t graph_check = _gf_false; + + this = THIS; ++ priv = this->private; + + set_dict = dict_new(); + if (!set_dict) { +@@ -5213,18 +5224,26 @@ build_shd_graph(glusterd_volinfo_t *volinfo, volgen_graph_t *graph, + + if (mod_dict) + graph_check = dict_get_str_boolean(mod_dict, "graph-check", 0); +- iostxl = volgen_graph_add_as(graph, "debug/io-stats", volinfo->volname); ++ iostxl = volgen_graph_add_as(graph, "debug/io-stats", "glustershd"); + if (!iostxl) { + ret = -1; + goto out; + } + +- ret = build_shd_volume_graph(this, graph, volinfo, mod_dict, set_dict, +- graph_check); ++ cds_list_for_each_entry(voliter, &priv->volumes, vol_list) ++ { ++ ret = build_shd_volume_graph(this, graph, voliter, mod_dict, set_dict, ++ graph_check, &valid_config); ++ ret = dict_reset(set_dict); ++ if (ret) ++ goto out; ++ } + + out: + if (set_dict) + dict_unref(set_dict); ++ if (!valid_config) ++ ret = -EINVAL; + return ret; + } + +@@ -6541,10 +6560,6 @@ glusterd_create_volfiles(glusterd_volinfo_t *volinfo) + if (ret) + gf_log(this->name, GF_LOG_ERROR, "Could not generate gfproxy volfiles"); + +- ret = glusterd_shdsvc_create_volfile(volinfo); +- if (ret) +- gf_log(this->name, GF_LOG_ERROR, "Could not generate shd volfiles"); +- + dict_del_sizen(volinfo->dict, "skip-CLIOT"); + + out: +@@ -6625,7 +6640,7 @@ validate_shdopts(glusterd_volinfo_t *volinfo, dict_t *val_dict, + ret = dict_set_int32_sizen(val_dict, "graph-check", 1); + if (ret) + goto out; +- ret = build_shd_graph(volinfo, &graph, val_dict); ++ ret = build_shd_graph(&graph, val_dict); + if (!ret) + ret = graph_reconf_validateopt(&graph.graph, op_errstr); + +@@ -7002,22 +7017,3 @@ gd_is_boolean_option(char *key) + + return _gf_false; + } +- +-int +-glusterd_shdsvc_generate_volfile(glusterd_volinfo_t *volinfo, char *filename, +- dict_t *mode_dict) +-{ +- int ret = -1; +- volgen_graph_t graph = { +- 0, +- }; +- +- graph.type = GF_SHD; +- ret = build_shd_graph(volinfo, &graph, mode_dict); +- if (!ret) +- ret = volgen_write_volfile(&graph, filename); +- +- volgen_graph_free(&graph); +- +- return ret; +-} +diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.h b/xlators/mgmt/glusterd/src/glusterd-volgen.h +index 897d8fa..f9fc068 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volgen.h ++++ b/xlators/mgmt/glusterd/src/glusterd-volgen.h +@@ -66,7 +66,6 @@ typedef enum { + GF_REBALANCED = 1, + GF_QUOTAD, + GF_SNAPD, +- GF_SHD, + } glusterd_graph_type_t; + + struct volgen_graph { +@@ -78,8 +77,6 @@ typedef struct volgen_graph volgen_graph_t; + + typedef int (*glusterd_graph_builder_t)(volgen_graph_t *graph, + dict_t *mod_dict); +-typedef int (*glusterd_vol_graph_builder_t)(glusterd_volinfo_t *, +- char *filename, dict_t *mod_dict); + + #define COMPLETE_OPTION(key, completion, ret) \ + do { \ +@@ -204,8 +201,7 @@ void + glusterd_get_shd_filepath(char *filename); + + int +-build_shd_graph(glusterd_volinfo_t *volinfo, volgen_graph_t *graph, +- dict_t *mod_dict); ++build_shd_graph(volgen_graph_t *graph, dict_t *mod_dict); + + int + build_nfs_graph(volgen_graph_t *graph, dict_t *mod_dict); +@@ -317,9 +313,4 @@ glusterd_generate_gfproxyd_volfile(glusterd_volinfo_t *volinfo); + + int + glusterd_build_gfproxyd_volfile(glusterd_volinfo_t *volinfo, char *filename); +- +-int +-glusterd_shdsvc_generate_volfile(glusterd_volinfo_t *volinfo, char *filename, +- dict_t *mode_dict); +- + #endif +diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +index 4c3ad50..1ea8ba6 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c ++++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +@@ -1940,7 +1940,7 @@ static int + glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo, + dict_t *dict, char **op_errstr) + { +- glusterd_svc_t *svc = NULL; ++ glusterd_conf_t *priv = NULL; + gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID; + int ret = 0; + char msg[2408] = { +@@ -1950,6 +1950,7 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo, + "Self-heal daemon is not running. " + "Check self-heal daemon log file."; + ++ priv = this->private; + ret = dict_get_int32n(dict, "heal-op", SLEN("heal-op"), + (int32_t *)&heal_op); + if (ret) { +@@ -1958,7 +1959,6 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo, + goto out; + } + +- svc = &(volinfo->shd.svc); + switch (heal_op) { + case GF_SHD_OP_INVALID: + case GF_SHD_OP_HEAL_ENABLE: /* This op should be handled in volume-set*/ +@@ -1988,7 +1988,7 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo, + goto out; + } + +- if (!svc->online) { ++ if (!priv->shd_svc.online) { + ret = -1; + *op_errstr = gf_strdup(offline_msg); + goto out; +@@ -2009,7 +2009,7 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo, + goto out; + } + +- if (!svc->online) { ++ if (!priv->shd_svc.online) { + ret = -1; + *op_errstr = gf_strdup(offline_msg); + goto out; +diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c +index c0973cb..d360312 100644 +--- a/xlators/mgmt/glusterd/src/glusterd.c ++++ b/xlators/mgmt/glusterd/src/glusterd.c +@@ -1537,6 +1537,14 @@ init(xlator_t *this) + exit(1); + } + ++ ret = glusterd_init_var_run_dirs(this, rundir, GLUSTERD_GLUSTERSHD_RUN_DIR); ++ if (ret) { ++ gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_CREATE_DIR_FAILED, ++ "Unable to create " ++ "glustershd running directory"); ++ exit(1); ++ } ++ + ret = glusterd_init_var_run_dirs(this, rundir, GLUSTERD_NFS_RUN_DIR); + if (ret) { + gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_CREATE_DIR_FAILED, +@@ -1811,9 +1819,6 @@ init(xlator_t *this) + CDS_INIT_LIST_HEAD(&conf->snapshots); + CDS_INIT_LIST_HEAD(&conf->missed_snaps_list); + CDS_INIT_LIST_HEAD(&conf->brick_procs); +- CDS_INIT_LIST_HEAD(&conf->shd_procs); +- pthread_mutex_init(&conf->attach_lock, NULL); +- pthread_mutex_init(&conf->volume_lock, NULL); + + pthread_mutex_init(&conf->mutex, NULL); + conf->rpc = rpc; +@@ -1894,6 +1899,7 @@ init(xlator_t *this) + glusterd_mgmt_v3_lock_timer_init(); + glusterd_txn_opinfo_dict_init(); + ++ glusterd_shdsvc_build(&conf->shd_svc); + glusterd_nfssvc_build(&conf->nfs_svc); + glusterd_quotadsvc_build(&conf->quotad_svc); + glusterd_bitdsvc_build(&conf->bitd_svc); +diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h +index 0fbc9dd..2be005c 100644 +--- a/xlators/mgmt/glusterd/src/glusterd.h ++++ b/xlators/mgmt/glusterd/src/glusterd.h +@@ -28,7 +28,6 @@ + #include "glusterd-sm.h" + #include "glusterd-snapd-svc.h" + #include "glusterd-tierd-svc.h" +-#include "glusterd-shd-svc.h" + #include "glusterd-bitd-svc.h" + #include "glusterd1-xdr.h" + #include "protocol-common.h" +@@ -173,6 +172,7 @@ typedef struct { + char workdir[VALID_GLUSTERD_PATHMAX]; + char rundir[VALID_GLUSTERD_PATHMAX]; + rpcsvc_t *rpc; ++ glusterd_svc_t shd_svc; + glusterd_svc_t nfs_svc; + glusterd_svc_t bitd_svc; + glusterd_svc_t scrub_svc; +@@ -181,7 +181,6 @@ typedef struct { + struct cds_list_head volumes; + struct cds_list_head snapshots; /*List of snap volumes */ + struct cds_list_head brick_procs; /* List of brick processes */ +- struct cds_list_head shd_procs; /* List of shd processes */ + pthread_mutex_t xprt_lock; + struct list_head xprt_list; + pthread_mutex_t import_volumes; +@@ -222,11 +221,6 @@ typedef struct { + gf_atomic_t blockers; + uint32_t mgmt_v3_lock_timeout; + gf_boolean_t restart_bricks; +- pthread_mutex_t attach_lock; /* Lock can be per process or a common one */ +- pthread_mutex_t volume_lock; /* We release the big_lock from lot of places +- which might lead the modification of volinfo +- list. +- */ + gf_atomic_t thread_count; + } glusterd_conf_t; + +@@ -519,7 +513,6 @@ struct glusterd_volinfo_ { + + glusterd_snapdsvc_t snapd; + glusterd_tierdsvc_t tierd; +- glusterd_shdsvc_t shd; + glusterd_gfproxydsvc_t gfproxyd; + int32_t quota_xattr_version; + gf_boolean_t stage_deleted; /* volume has passed staging +@@ -646,6 +639,7 @@ typedef enum { + #define GLUSTERD_DEFAULT_SNAPS_BRICK_DIR "/gluster/snaps" + #define GLUSTERD_BITD_RUN_DIR "/bitd" + #define GLUSTERD_SCRUB_RUN_DIR "/scrub" ++#define GLUSTERD_GLUSTERSHD_RUN_DIR "/glustershd" + #define GLUSTERD_NFS_RUN_DIR "/nfs" + #define GLUSTERD_QUOTAD_RUN_DIR "/quotad" + #define GLUSTER_SHARED_STORAGE_BRICK_DIR GLUSTERD_DEFAULT_WORKDIR "/ss_brick" +@@ -701,26 +695,6 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args); + } \ + } while (0) + +-#define GLUSTERD_GET_SHD_RUNDIR(path, volinfo, priv) \ +- do { \ +- int32_t _shd_dir_len; \ +- _shd_dir_len = snprintf(path, PATH_MAX, "%s/shd/%s", priv->rundir, \ +- volinfo->volname); \ +- if ((_shd_dir_len < 0) || (_shd_dir_len >= PATH_MAX)) { \ +- path[0] = 0; \ +- } \ +- } while (0) +- +-#define GLUSTERD_GET_SHD_PID_FILE(path, volinfo, priv) \ +- do { \ +- int32_t _shd_pid_len; \ +- _shd_pid_len = snprintf(path, PATH_MAX, "%s/shd/%s-shd.pid", \ +- priv->rundir, volinfo->volname); \ +- if ((_shd_pid_len < 0) || (_shd_pid_len >= PATH_MAX)) { \ +- path[0] = 0; \ +- } \ +- } while (0) +- + #define GLUSTERD_GET_VOLUME_PID_DIR(path, volinfo, priv) \ + do { \ + int32_t _vol_pid_len; \ +diff --git a/xlators/protocol/client/src/client.c b/xlators/protocol/client/src/client.c +index 532ef35..e156d4d 100644 +--- a/xlators/protocol/client/src/client.c ++++ b/xlators/protocol/client/src/client.c +@@ -46,6 +46,7 @@ client_fini_complete(xlator_t *this) + GF_VALIDATE_OR_GOTO(this->name, this->private, out); + + clnt_conf_t *conf = this->private; ++ + if (!conf->destroy) + return 0; + +@@ -68,11 +69,6 @@ client_notify_dispatch_uniq(xlator_t *this, int32_t event, void *data, ...) + return 0; + + return client_notify_dispatch(this, event, data); +- +- /* Please avoid any code that access xlator object here +- * Because for a child down event, once we do the signal +- * we will start cleanup. +- */ + } + + int +@@ -109,11 +105,6 @@ client_notify_dispatch(xlator_t *this, int32_t event, void *data, ...) + } + pthread_mutex_unlock(&ctx->notify_lock); + +- /* Please avoid any code that access xlator object here +- * Because for a child down event, once we do the signal +- * we will start cleanup. +- */ +- + return ret; + } + +@@ -2287,7 +2278,6 @@ client_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event, + { + xlator_t *this = NULL; + clnt_conf_t *conf = NULL; +- gf_boolean_t is_parent_down = _gf_false; + int ret = 0; + + this = mydata; +@@ -2351,19 +2341,6 @@ client_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event, + if (conf->portmap_err_logged) + conf->disconnect_err_logged = 1; + } +- /* +- * Once we complete the child down notification, +- * There is a chance that the graph might get freed, +- * So it is not safe to access any xlator contens +- * So here we are checking whether the parent is down +- * or not. +- */ +- pthread_mutex_lock(&conf->lock); +- { +- is_parent_down = conf->parent_down; +- } +- pthread_mutex_unlock(&conf->lock); +- + /* If the CHILD_DOWN event goes to parent xlator + multiple times, the logic of parent xlator notify + may get screwed up.. (eg. CHILD_MODIFIED event in +@@ -2371,12 +2348,6 @@ client_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event, + to parent are genuine */ + ret = client_notify_dispatch_uniq(this, GF_EVENT_CHILD_DOWN, + NULL); +- if (is_parent_down) { +- /* If parent is down, then there should not be any +- * operation after a child down. +- */ +- goto out; +- } + if (ret) + gf_msg(this->name, GF_LOG_INFO, 0, + PC_MSG_CHILD_DOWN_NOTIFY_FAILED, +-- +1.8.3.1 + diff --git a/SOURCES/0251-tests-Fix-bug-1717819-metadata-split-brain-detection.patch b/SOURCES/0251-tests-Fix-bug-1717819-metadata-split-brain-detection.patch new file mode 100644 index 0000000..028f92c --- /dev/null +++ b/SOURCES/0251-tests-Fix-bug-1717819-metadata-split-brain-detection.patch @@ -0,0 +1,57 @@ +From 7a04fb9999f5d25c17f5593eed5e98d0f5a1932d Mon Sep 17 00:00:00 2001 +From: karthik-us +Date: Mon, 15 Jul 2019 14:30:52 +0530 +Subject: [PATCH 251/255] tests: Fix + bug-1717819-metadata-split-brain-detection.t failure + + + +Problem: +tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t fails +intermittently in test cases #49 & #50, which compare the values of the +user set xattr values after enabling the heal. We are not waiting for +the heal to complete before comparing those values, which might lead +those tests to fail. + +Fix: +Wait till the HEAL-TIMEOUT before comparing the xattr values. +Also cheking for the shd to come up and the bricks to connect to the shd +process in another case. + +Change-Id: I0021c2d5d251111c695e2bf18c63e8189e456114 +fixes: bz#1704562 +Signed-off-by: karthik-us +Reviewed-on: https://code.engineering.redhat.com/gerrit/176071 +Reviewed-by: Atin Mukherjee +Tested-by: RHGS Build Bot +--- + tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t b/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t +index 94b8bf3..76d1f21 100644 +--- a/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t ++++ b/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t +@@ -76,6 +76,10 @@ EXPECT_WITHIN $HEAL_TIMEOUT "^1$" get_pending_heal_count $V0 + + # Launch heal + TEST $CLI volume heal $V0 enable ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0 ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1 ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2 + TEST $CLI volume heal $V0 + EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 + +@@ -117,6 +121,8 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status + EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0 + EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1 + EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2 ++TEST $CLI volume heal $V0 ++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 + + B0_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}0/dir/file) + B1_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}1/dir/file) +-- +1.8.3.1 + diff --git a/SOURCES/0252-glusterd-do-not-mark-skip_locking-as-true-for-geo-re.patch b/SOURCES/0252-glusterd-do-not-mark-skip_locking-as-true-for-geo-re.patch new file mode 100644 index 0000000..b722dff --- /dev/null +++ b/SOURCES/0252-glusterd-do-not-mark-skip_locking-as-true-for-geo-re.patch @@ -0,0 +1,63 @@ +From 5a35a996257d6aaa7fa55ff1e1aac407dd4824fe Mon Sep 17 00:00:00 2001 +From: Sanju Rakonde +Date: Fri, 12 Jul 2019 16:28:04 +0530 +Subject: [PATCH 252/255] glusterd: do not mark skip_locking as true for + geo-rep operations + +We need to send the commit req to peers in case of geo-rep +operations even though it is a no volname operation. In commit +phase peers try to set the txn_opinfo which will fail because +it is a no volname operation where we don't require a commit +phase. We mark skip_locking as true for no volname operations, +but we have to give an exception to geo-rep operations, so that +they can set txn_opinfo in commit phase. + +Please refer to detailed RCA at the bug: 1729463 + +> upstream patch : https://review.gluster.org/#/c/glusterfs/+/23034/ + +>fixes: bz#1729463 +>Change-Id: I9f2478b12a281f6e052035c0563c40543493a3fc +>Signed-off-by: Sanju Rakonde + +Change-Id: I9f2478b12a281f6e052035c0563c40543493a3fc +BUG: 1727785 +Signed-off-by: Sanju Rakonde +Reviewed-on: https://code.engineering.redhat.com/gerrit/176032 +Tested-by: RHGS Build Bot +Reviewed-by: Shwetha Acharya +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/mgmt/glusterd/src/glusterd-handler.c | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c +index cb2666b..2e73c98 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-handler.c ++++ b/xlators/mgmt/glusterd/src/glusterd-handler.c +@@ -1078,7 +1078,11 @@ __glusterd_handle_stage_op(rpcsvc_request_t *req) + + /* In cases where there is no volname, the receivers won't have a + * transaction opinfo created, as for those operations, the locking +- * phase where the transaction opinfos are created, won't be called. */ ++ * phase where the transaction opinfos are created, won't be called. ++ * skip_locking will be true for all such transaction and we clear ++ * the txn_opinfo after the staging phase, except for geo-replication ++ * operations where we need to access txn_opinfo in the later phases also. ++ */ + ret = glusterd_get_txn_opinfo(txn_id, &txn_op_info); + if (ret) { + gf_msg_debug(this->name, 0, "No transaction's opinfo set"); +@@ -1087,7 +1091,8 @@ __glusterd_handle_stage_op(rpcsvc_request_t *req) + glusterd_txn_opinfo_init(&txn_op_info, &state, &op_req.op, + req_ctx->dict, req); + +- txn_op_info.skip_locking = _gf_true; ++ if (req_ctx->op != GD_OP_GSYNC_SET) ++ txn_op_info.skip_locking = _gf_true; + ret = glusterd_set_txn_opinfo(txn_id, &txn_op_info); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL, +-- +1.8.3.1 + diff --git a/SOURCES/0253-core-fix-deadlock-between-statedump-and-fd_anonymous.patch b/SOURCES/0253-core-fix-deadlock-between-statedump-and-fd_anonymous.patch new file mode 100644 index 0000000..d313482 --- /dev/null +++ b/SOURCES/0253-core-fix-deadlock-between-statedump-and-fd_anonymous.patch @@ -0,0 +1,246 @@ +From ea7f11b989896d76b8d091d26bc0241bce9413f8 Mon Sep 17 00:00:00 2001 +From: Xavi Hernandez +Date: Thu, 4 Jul 2019 13:21:33 +0200 +Subject: [PATCH 253/255] core: fix deadlock between statedump and + fd_anonymous() + +There exists a deadlock between statedump generation and fd_anonymous() +function because they are acquiring inode table lock and inode lock in +reverse order. + +This patch modifies fd_anonymous() so that it takes inode lock only when +it's really necessary, avoiding the deadlock. + +Upstream patch: +> Change-Id: I24355447f0ea1b39e2546782ad07f0512cc381e7 +> Upstream patch link: https://review.gluster.org/c/glusterfs/+/22995 +> BUG: 1727068 +> Signed-off-by: Xavi Hernandez + +Change-Id: I24355447f0ea1b39e2546782ad07f0512cc381e7 +Fixes: bz#1722209 +Signed-off-by: Xavi Hernandez +Reviewed-on: https://code.engineering.redhat.com/gerrit/176096 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + libglusterfs/src/fd.c | 137 ++++++++++++++++++++++---------------------------- + 1 file changed, 61 insertions(+), 76 deletions(-) + +diff --git a/libglusterfs/src/fd.c b/libglusterfs/src/fd.c +index b8aac72..314546a 100644 +--- a/libglusterfs/src/fd.c ++++ b/libglusterfs/src/fd.c +@@ -532,7 +532,7 @@ fd_unref(fd_t *fd) + return; + } + +-fd_t * ++static fd_t * + __fd_bind(fd_t *fd) + { + list_del_init(&fd->inode_list); +@@ -562,9 +562,9 @@ fd_bind(fd_t *fd) + } + + static fd_t * +-__fd_create(inode_t *inode, uint64_t pid) ++fd_allocate(inode_t *inode, uint64_t pid) + { +- fd_t *fd = NULL; ++ fd_t *fd; + + if (inode == NULL) { + gf_msg_callingfn("fd", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, +@@ -573,64 +573,67 @@ __fd_create(inode_t *inode, uint64_t pid) + } + + fd = mem_get0(inode->table->fd_mem_pool); +- if (!fd) +- goto out; ++ if (fd == NULL) { ++ return NULL; ++ } + + fd->xl_count = inode->table->xl->graph->xl_count + 1; + + fd->_ctx = GF_CALLOC(1, (sizeof(struct _fd_ctx) * fd->xl_count), + gf_common_mt_fd_ctx); +- if (!fd->_ctx) +- goto free_fd; ++ if (fd->_ctx == NULL) { ++ goto failed; ++ } + + fd->lk_ctx = fd_lk_ctx_create(); +- if (!fd->lk_ctx) +- goto free_fd_ctx; +- +- fd->inode = inode_ref(inode); +- fd->pid = pid; +- INIT_LIST_HEAD(&fd->inode_list); +- +- LOCK_INIT(&fd->lock); +-out: +- return fd; ++ if (fd->lk_ctx != NULL) { ++ /* We need to take a reference from the inode, but we cannot do it ++ * here because this function can be called with the inode lock taken ++ * and inode_ref() takes the inode's table lock. This is the reverse ++ * of the logical lock acquisition order and can cause a deadlock. So ++ * we simply assign the inode here and we delefate the inode reference ++ * responsibility to the caller (when this function succeeds and the ++ * inode lock is released). This is safe because the caller must hold ++ * a reference of the inode to use it, so it's guaranteed that the ++ * number of references won't reach 0 before the caller finishes. ++ * ++ * TODO: minimize use of locks in favor of atomic operations to avoid ++ * these dependencies. */ ++ fd->inode = inode; ++ fd->pid = pid; ++ INIT_LIST_HEAD(&fd->inode_list); ++ LOCK_INIT(&fd->lock); ++ GF_ATOMIC_INIT(fd->refcount, 1); ++ return fd; ++ } + +-free_fd_ctx: + GF_FREE(fd->_ctx); +-free_fd: ++ ++failed: + mem_put(fd); + + return NULL; + } + + fd_t * +-fd_create(inode_t *inode, pid_t pid) ++fd_create_uint64(inode_t *inode, uint64_t pid) + { +- fd_t *fd = NULL; +- +- fd = __fd_create(inode, (uint64_t)pid); +- if (!fd) +- goto out; ++ fd_t *fd; + +- fd = fd_ref(fd); ++ fd = fd_allocate(inode, pid); ++ if (fd != NULL) { ++ /* fd_allocate() doesn't get a reference from the inode. We need to ++ * take it here in case of success. */ ++ inode_ref(inode); ++ } + +-out: + return fd; + } + + fd_t * +-fd_create_uint64(inode_t *inode, uint64_t pid) ++fd_create(inode_t *inode, pid_t pid) + { +- fd_t *fd = NULL; +- +- fd = __fd_create(inode, pid); +- if (!fd) +- goto out; +- +- fd = fd_ref(fd); +- +-out: +- return fd; ++ return fd_create_uint64(inode, (uint64_t)pid); + } + + static fd_t * +@@ -719,10 +722,13 @@ __fd_lookup_anonymous(inode_t *inode, int32_t flags) + return fd; + } + +-static fd_t * +-__fd_anonymous(inode_t *inode, int32_t flags) ++fd_t * ++fd_anonymous_with_flags(inode_t *inode, int32_t flags) + { + fd_t *fd = NULL; ++ bool ref = false; ++ ++ LOCK(&inode->lock); + + fd = __fd_lookup_anonymous(inode, flags); + +@@ -730,54 +736,33 @@ __fd_anonymous(inode_t *inode, int32_t flags) + __fd_lookup_anonymous(), so no need of one more fd_ref(). + if (!fd); then both create and bind won't bump up the ref + count, so we have to call fd_ref() after bind. */ +- if (!fd) { +- fd = __fd_create(inode, 0); +- +- if (!fd) +- return NULL; +- +- fd->anonymous = _gf_true; +- fd->flags = GF_ANON_FD_FLAGS | flags; ++ if (fd == NULL) { ++ fd = fd_allocate(inode, 0); ++ if (fd != NULL) { ++ fd->anonymous = _gf_true; ++ fd->flags = GF_ANON_FD_FLAGS | (flags & O_DIRECT); + +- __fd_bind(fd); ++ __fd_bind(fd); + +- __fd_ref(fd); ++ ref = true; ++ } + } + +- return fd; +-} +- +-fd_t * +-fd_anonymous(inode_t *inode) +-{ +- fd_t *fd = NULL; ++ UNLOCK(&inode->lock); + +- LOCK(&inode->lock); +- { +- fd = __fd_anonymous(inode, GF_ANON_FD_FLAGS); ++ if (ref) { ++ /* fd_allocate() doesn't get a reference from the inode. We need to ++ * take it here in case of success. */ ++ inode_ref(inode); + } +- UNLOCK(&inode->lock); + + return fd; + } + + fd_t * +-fd_anonymous_with_flags(inode_t *inode, int32_t flags) ++fd_anonymous(inode_t *inode) + { +- fd_t *fd = NULL; +- +- if (flags & O_DIRECT) +- flags = GF_ANON_FD_FLAGS | O_DIRECT; +- else +- flags = GF_ANON_FD_FLAGS; +- +- LOCK(&inode->lock); +- { +- fd = __fd_anonymous(inode, flags); +- } +- UNLOCK(&inode->lock); +- +- return fd; ++ return fd_anonymous_with_flags(inode, 0); + } + + fd_t * +-- +1.8.3.1 + diff --git a/SOURCES/0254-Detach-iot_worker-to-release-its-resources.patch b/SOURCES/0254-Detach-iot_worker-to-release-its-resources.patch new file mode 100644 index 0000000..6019436 --- /dev/null +++ b/SOURCES/0254-Detach-iot_worker-to-release-its-resources.patch @@ -0,0 +1,43 @@ +From 2bbb097d087bb5ef142775500708f11ccd31bac0 Mon Sep 17 00:00:00 2001 +From: Liguang Li +Date: Fri, 21 Jun 2019 12:18:58 +0800 +Subject: [PATCH 254/255] Detach iot_worker to release its resources + +When iot_worker terminates, its resources have not been reaped, which +will consumes lots of memory. + +Detach iot_worker to automically release its resources back to the +system. + +> upstream patch : https://review.gluster.org/#/c/glusterfs/+/22918/ + +>fixes: bz#1729107 +>Change-Id: I71fabb2940e76ad54dc56b4c41aeeead2644b8bb +>Signed-off-by: Liguang Li + +BUG:1729108 +Change-Id: I71fabb2940e76ad54dc56b4c41aeeead2644b8bb +Signed-off-by: Liguang Li +Reviewed-on: https://code.engineering.redhat.com/gerrit/176106 +Tested-by: RHGS Build Bot +Reviewed-by: Ravishankar Narayanankutty +Reviewed-by: Atin Mukherjee +--- + xlators/performance/io-threads/src/io-threads.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/xlators/performance/io-threads/src/io-threads.c b/xlators/performance/io-threads/src/io-threads.c +index 060d923..1d96691 100644 +--- a/xlators/performance/io-threads/src/io-threads.c ++++ b/xlators/performance/io-threads/src/io-threads.c +@@ -835,6 +835,7 @@ __iot_workers_scale(iot_conf_t *conf) + ret = gf_thread_create(&thread, &conf->w_attr, iot_worker, conf, + "iotwr%03hx", conf->curr_count & 0x3ff); + if (ret == 0) { ++ pthread_detach(thread); + conf->curr_count++; + gf_msg_debug(conf->this->name, 0, + "scaled threads to %d (queue_size=%d/%d)", +-- +1.8.3.1 + diff --git a/SOURCES/0255-Revert-tier-shd-glusterd-with-shd-mux-the-shd-volfil.patch b/SOURCES/0255-Revert-tier-shd-glusterd-with-shd-mux-the-shd-volfil.patch new file mode 100644 index 0000000..89a8aaf --- /dev/null +++ b/SOURCES/0255-Revert-tier-shd-glusterd-with-shd-mux-the-shd-volfil.patch @@ -0,0 +1,104 @@ +From 684a4949552164d3469329b3f959de4369d54faa Mon Sep 17 00:00:00 2001 +From: Atin Mukherjee +Date: Sun, 14 Jul 2019 08:06:11 +0530 +Subject: [PATCH 255/255] Revert "tier/shd/glusterd: with shd mux, the shd + volfile path have to be updated for tier-heald.t" + +This reverts commit 6e7d333625ecd9f7402c2e839338350fa86eaf45. + +Updates: bz#1471742 +Change-Id: I6c27634999f72b5bbb35d5d13cdebda7af072b01 +Reviewed-on: https://code.engineering.redhat.com/gerrit/176017 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + tests/basic/tier/tier-heald.t | 35 +++++++++++++++-------------------- + 1 file changed, 15 insertions(+), 20 deletions(-) + +diff --git a/tests/basic/tier/tier-heald.t b/tests/basic/tier/tier-heald.t +index 0ec9e43..a8e634f 100644 +--- a/tests/basic/tier/tier-heald.t ++++ b/tests/basic/tier/tier-heald.t +@@ -11,7 +11,7 @@ cleanup; + TEST glusterd + TEST pidof glusterd + +-r2_volfile=$(gluster system:: getwd)"/vols/r2/r2-shd.vol" ++volfile=$(gluster system:: getwd)"/glustershd/glustershd-server.vol" + + # Commands should fail when both tiers are not of distribute type. + # Glustershd shouldn't be running as long as there are no replicate/disperse +@@ -34,56 +34,51 @@ TEST $CLI volume tier r2 attach $H0:$B0/r2_hot + EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid + TEST $CLI volume heal r2 enable + EXPECT "enable" volume_option r2 "cluster.self-heal-daemon" +-EXPECT "enable" volgen_volume_option $r2_volfile r2-replicate-0 cluster replicate self-heal-daemon ++EXPECT "enable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon + EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid + TEST $CLI volume heal r2 disable + EXPECT "disable" volume_option r2 "cluster.self-heal-daemon" +-EXPECT "disable" volgen_volume_option $r2_volfile r2-replicate-0 cluster replicate self-heal-daemon ++EXPECT "disable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon + EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid + # Commands should work on disperse volume. + TEST $CLI volume create ec2 disperse 3 redundancy 1 $H0:$B0/ec2_0 $H0:$B0/ec2_1 $H0:$B0/ec2_2 + TEST $CLI volume start ec2 + +-ec2_volfile=$(gluster system:: getwd)"/vols/ec2/ec2-shd.vol" +- + TEST $CLI volume tier ec2 attach replica 2 $H0:$B0/ec2_hot{1..4} + EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid + TEST $CLI volume heal ec2 enable + EXPECT "enable" volume_option ec2 "cluster.disperse-self-heal-daemon" +-EXPECT "enable" volgen_volume_option $ec2_volfile ec2-disperse-0 cluster disperse self-heal-daemon ++EXPECT "enable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon + EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid + TEST $CLI volume heal ec2 disable + EXPECT "disable" volume_option ec2 "cluster.disperse-self-heal-daemon" +-EXPECT "disable" volgen_volume_option $ec2_volfile ec2-disperse-0 cluster disperse self-heal-daemon ++EXPECT "disable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon + EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid + + #Check that shd graph is rewritten correctly on volume stop/start +-EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse +-EXPECT "Y" volgen_volume_exists $r2_volfile r2-replicate-0 cluster replicate ++EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse ++EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate + TEST $CLI volume stop r2 +-EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse +- +-# Has been commented as the validations after stop using volfile dont hold true. +-#EXPECT "N" volgen_volume_exists $r2_volfile r2-replicate-0 cluster replicate ++EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse ++EXPECT "N" volgen_volume_exists $volfile r2-replicate-0 cluster replicate + TEST $CLI volume stop ec2 + # When both the volumes are stopped glustershd volfile is not modified just the + # process is stopped + TEST "[ -z $(get_shd_process_pid) ]" + + TEST $CLI volume start r2 +-# Has been commented as the validations after stop using volfile dont hold true. +-#EXPECT "N" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse +-EXPECT "Y" volgen_volume_exists $r2_volfile r2-replicate-0 cluster replicate ++EXPECT "N" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse ++EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate + + TEST $CLI volume start ec2 + +-EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse +-EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-replicate-0 cluster replicate ++EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse ++EXPECT "Y" volgen_volume_exists $volfile ec2-replicate-0 cluster replicate + + TEST $CLI volume tier ec2 detach force + +-EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse +-EXPECT "N" volgen_volume_exists $ec2_volfile ec2-replicate-0 cluster replicate ++EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse ++EXPECT "N" volgen_volume_exists $volfile ec2-replicate-0 cluster replicate + + TEST $CLI volume set r2 self-heal-daemon on + TEST $CLI volume set r2 cluster.self-heal-daemon off +-- +1.8.3.1 + diff --git a/SOURCES/0256-features-snapview-server-use-the-same-volfile-server.patch b/SOURCES/0256-features-snapview-server-use-the-same-volfile-server.patch new file mode 100644 index 0000000..d410373 --- /dev/null +++ b/SOURCES/0256-features-snapview-server-use-the-same-volfile-server.patch @@ -0,0 +1,117 @@ +From f90df1167bc70c634ba33c181232321da6770709 Mon Sep 17 00:00:00 2001 +From: Raghavendra Bhat +Date: Tue, 25 Jun 2019 10:51:33 -0400 +Subject: [PATCH 256/261] features/snapview-server: use the same volfile server + for gfapi options + +snapview server xlator makes use of "localhost" as the volfile server while +initing the new glfs instance to talk to a snapshot. While localhost is fine, +better use the same volfile server that was used to start the snapshot +daemon containing the snapview-server xlator. + +Upstream Patch: +>Change-Id: I4485d39b0e3d066f481adc6958ace53ea33237f7 +>fixes: bz#1725211 +>Signed-off-by: Raghavendra Bhat +> patch: https://review.gluster.org/#/c/glusterfs/+/22974/ + +BUG: 1722757 +Change-Id: I4485d39b0e3d066f481adc6958ace53ea33237f7 +Signed-off-by: Raghavendra Bhat +Reviewed-on: https://code.engineering.redhat.com/gerrit/175984 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + .../snapview-server/src/snapview-server-helpers.c | 44 ++++++++++++++++++++-- + .../snapview-server/src/snapview-server-messages.h | 2 +- + 2 files changed, 42 insertions(+), 4 deletions(-) + +diff --git a/xlators/features/snapview-server/src/snapview-server-helpers.c b/xlators/features/snapview-server/src/snapview-server-helpers.c +index 5514a54..62c1dda 100644 +--- a/xlators/features/snapview-server/src/snapview-server-helpers.c ++++ b/xlators/features/snapview-server/src/snapview-server-helpers.c +@@ -476,6 +476,7 @@ __svs_initialise_snapshot_volume(xlator_t *this, const char *name, + char logfile[PATH_MAX] = { + 0, + }; ++ char *volfile_server = NULL; + + GF_VALIDATE_OR_GOTO("snapview-server", this, out); + GF_VALIDATE_OR_GOTO(this->name, this->private, out); +@@ -512,14 +513,50 @@ __svs_initialise_snapshot_volume(xlator_t *this, const char *name, + goto out; + } + +- ret = glfs_set_volfile_server(fs, "tcp", "localhost", 24007); ++ /* ++ * Before, localhost was used as the volfile server. But, with that ++ * method, accessing snapshots started giving ENOENT error if a ++ * specific bind address is mentioned in the glusterd volume file. ++ * Check the bug https://bugzilla.redhat.com/show_bug.cgi?id=1725211. ++ * So, the new method is tried below, where, snapview-server first ++ * uses the volfile server used by the snapd (obtained from the ++ * command line arguments saved in the global context of the process). ++ * If the volfile server in global context is NULL, then localhost ++ * is tried (like before). ++ */ ++ if (this->ctx->cmd_args.volfile_server) { ++ volfile_server = gf_strdup(this->ctx->cmd_args.volfile_server); ++ if (!volfile_server) { ++ gf_msg(this->name, GF_LOG_WARNING, ENOMEM, ++ SVS_MSG_VOLFILE_SERVER_GET_FAIL, ++ "failed to copy volfile server %s. ", ++ this->ctx->cmd_args.volfile_server); ++ ret = -1; ++ goto out; ++ } ++ } else { ++ gf_msg(this->name, GF_LOG_WARNING, ENOMEM, ++ SVS_MSG_VOLFILE_SERVER_GET_FAIL, ++ "volfile server is NULL in cmd args. " ++ "Trying with localhost"); ++ volfile_server = gf_strdup("localhost"); ++ if (!volfile_server) { ++ gf_msg(this->name, GF_LOG_WARNING, ENOMEM, ++ SVS_MSG_VOLFILE_SERVER_GET_FAIL, ++ "failed to copy volfile server localhost."); ++ ret = -1; ++ goto out; ++ } ++ } ++ ++ ret = glfs_set_volfile_server(fs, "tcp", volfile_server, 24007); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, local_errno, + SVS_MSG_SET_VOLFILE_SERVR_FAILED, + "setting the " +- "volfile server for snap volume %s " ++ "volfile server %s for snap volume %s " + "failed", +- dirent->name); ++ volfile_server, dirent->name); + goto out; + } + +@@ -561,6 +598,7 @@ out: + dirent->fs = fs; + } + ++ GF_FREE(volfile_server); + return fs; + } + +diff --git a/xlators/features/snapview-server/src/snapview-server-messages.h b/xlators/features/snapview-server/src/snapview-server-messages.h +index 8548015..f634ab5 100644 +--- a/xlators/features/snapview-server/src/snapview-server-messages.h ++++ b/xlators/features/snapview-server/src/snapview-server-messages.h +@@ -49,6 +49,6 @@ GLFS_MSGID(SNAPVIEW_SERVER, SVS_MSG_NO_MEMORY, SVS_MSG_MEM_ACNT_FAILED, + SVS_MSG_CLOSEDIR_FAILED, SVS_MSG_CLOSE_FAILED, + SVS_MSG_GFID_GEN_FAILED, SVS_MSG_GLFS_NEW_FAILED, + SVS_MSG_SET_VOLFILE_SERVR_FAILED, SVS_MSG_SET_LOGGING_FAILED, +- SVS_MSG_GLFS_INIT_FAILED); ++ SVS_MSG_VOLFILE_SERVER_GET_FAIL, SVS_MSG_GLFS_INIT_FAILED); + + #endif /* !_SNAPVIEW_CLIENT_MESSAGES_H_ */ +-- +1.8.3.1 + diff --git a/SOURCES/0257-geo-rep-Test-case-for-upgrading-config-file.patch b/SOURCES/0257-geo-rep-Test-case-for-upgrading-config-file.patch new file mode 100644 index 0000000..ffe44f1 --- /dev/null +++ b/SOURCES/0257-geo-rep-Test-case-for-upgrading-config-file.patch @@ -0,0 +1,80 @@ +From ed6cd2b7674896c810fdd059e35a0d319aacb068 Mon Sep 17 00:00:00 2001 +From: Shwetha K Acharya +Date: Tue, 2 Jul 2019 15:00:25 +0530 +Subject: [PATCH 257/261] geo-rep: Test case for upgrading config file + +Added test case for the patch +https://review.gluster.org/#/c/glusterfs/+/22894/4 + +Also updated if else structure in gsyncdconfig.py to avoid +repeated occurance of values in new configfile. + +>fixes: bz#1707731 +>Change-Id: If97e1d37ac52dbd17d47be6cb659fc5a3ccab6d7 +>Signed-off-by: Shwetha K Acharya + +backport of https://review.gluster.org/#/c/glusterfs/+/22982/ + +Bug: 1708064 +Change-Id: If97e1d37ac52dbd17d47be6cb659fc5a3ccab6d7 +Signed-off-by: Shwetha K Acharya +Reviewed-on: https://code.engineering.redhat.com/gerrit/176603 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + geo-replication/syncdaemon/gsyncdconfig.py | 11 +++++------ + tests/00-geo-rep/georep-basic-dr-rsync.t | 13 +++++++++++++ + 2 files changed, 18 insertions(+), 6 deletions(-) + +diff --git a/geo-replication/syncdaemon/gsyncdconfig.py b/geo-replication/syncdaemon/gsyncdconfig.py +index 7edc582..1fc451f 100644 +--- a/geo-replication/syncdaemon/gsyncdconfig.py ++++ b/geo-replication/syncdaemon/gsyncdconfig.py +@@ -353,15 +353,14 @@ def config_upgrade(config_file, ret): + new_value = "tarssh" + else: + new_value = "rsync" +- config.set('vars', new_key, new_value) +- +- if key == "timeout": ++ config.set('vars', new_key, new_value) ++ elif key == "timeout": + new_key = "slave-timeout" + config.set('vars', new_key, value) +- + #for changes like: ignore_deletes to ignore-deletes +- new_key = key.replace("_", "-") +- config.set('vars', new_key, value) ++ else: ++ new_key = key.replace("_", "-") ++ config.set('vars', new_key, value) + + with open(config_file, 'w') as configfile: + config.write(configfile) +diff --git a/tests/00-geo-rep/georep-basic-dr-rsync.t b/tests/00-geo-rep/georep-basic-dr-rsync.t +index 428e9ed..b432635 100644 +--- a/tests/00-geo-rep/georep-basic-dr-rsync.t ++++ b/tests/00-geo-rep/georep-basic-dr-rsync.t +@@ -212,6 +212,19 @@ EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_rename_with_existing_destination ${slave + #Verify arequal for whole volume + EXPECT_WITHIN $GEO_REP_TIMEOUT "x0" arequal_checksum ${master_mnt} ${slave_mnt} + ++#Test config upgrade BUG: 1707731 ++config_file=$GLUSTERD_WORKDIR/geo-replication/${GMV0}_${SH0}_${GSV0}/gsyncd.conf ++cat >> $config_file< +Date: Mon, 22 Jul 2019 17:35:21 +0530 +Subject: [PATCH 258/261] geo-rep: Fix mount broker setup issue + +The patch [1] added validation in gverify.sh to check if the gluster +binary on slave by executing gluster directly on slave. But for +non-root users, even though gluster binary is present, path is not +found when executed via ssh. Hence validate the gluster binary using +bash builtin 'type' command. + +[1] https://review.gluster.org/19224 + +Backport of: + > Patch: https://review.gluster.org/23089 + > Change-Id: I93ca62c0c5b1e16263e586ddbbca8407d60ca126 + > fixes: bz#1731920 + > Signed-off-by: Kotresh HR + +Change-Id: I93ca62c0c5b1e16263e586ddbbca8407d60ca126 +BUG: 1720992 +Signed-off-by: Kotresh HR +Reviewed-on: https://code.engineering.redhat.com/gerrit/176727 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + geo-replication/src/gverify.sh | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/geo-replication/src/gverify.sh b/geo-replication/src/gverify.sh +index 7c88f9f..692c1d6 100755 +--- a/geo-replication/src/gverify.sh ++++ b/geo-replication/src/gverify.sh +@@ -207,13 +207,13 @@ function main() + fi; + + if [[ -z "${GR_SSH_IDENTITY_KEY}" ]]; then +- err=$((ssh -p ${SSH_PORT} -oNumberOfPasswordPrompts=0 -oStrictHostKeyChecking=no $2@$3 "gluster --version") 2>&1) ++ ssh -p ${SSH_PORT} -oNumberOfPasswordPrompts=0 -oStrictHostKeyChecking=no $2@$3 "type -p gluster" + else +- err=$((ssh -p ${SSH_PORT} -i ${GR_SSH_IDENTITY_KEY} -oNumberOfPasswordPrompts=0 -oStrictHostKeyChecking=no $2@$3 "gluster --version") 2>&1) ++ ssh -p ${SSH_PORT} -i ${GR_SSH_IDENTITY_KEY} -oNumberOfPasswordPrompts=0 -oStrictHostKeyChecking=no $2@$3 "type -p gluster" + fi + + if [ $? -ne 0 ]; then +- echo "FORCE_BLOCKER|gluster command on $2@$3 failed. Error: $err" > $log_file ++ echo "FORCE_BLOCKER|gluster command not found on $3 for user $2." > $log_file + exit 1; + fi; + +-- +1.8.3.1 + diff --git a/SOURCES/0259-gluster-block-tuning-perf-options.patch b/SOURCES/0259-gluster-block-tuning-perf-options.patch new file mode 100644 index 0000000..a8ef9f4 --- /dev/null +++ b/SOURCES/0259-gluster-block-tuning-perf-options.patch @@ -0,0 +1,47 @@ +From 775a62906030e5b5dc60f17284a7d516ce4118f9 Mon Sep 17 00:00:00 2001 +From: Prasanna Kumar Kalever +Date: Thu, 27 Jun 2019 13:18:32 +0530 +Subject: [PATCH 259/261] gluster-block: tuning perf options + +As per the perf experiment run by Elvir, with NVME devices used for OCP (gluster) +it was seen that particularly read operations (read/randread) benefited from +these options. + +> upstream patch : https://review.gluster.org/#/c/glusterfs/+/22963/ + +>Change-Id: Ibec4b96afd28e6f7e757b6ef203ccdbc0d9854d5 +>Fixes: bz#1727852 +>Signed-off-by: Prasanna Kumar Kalever + +Change-Id: Ibec4b96afd28e6f7e757b6ef203ccdbc0d9854d5 +BUG: 1708180 +Signed-off-by: Prasanna Kumar Kalever +Reviewed-on: https://code.engineering.redhat.com/gerrit/176724 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + extras/group-gluster-block | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/extras/group-gluster-block b/extras/group-gluster-block +index 56b406e..1e39801 100644 +--- a/extras/group-gluster-block ++++ b/extras/group-gluster-block +@@ -5,6 +5,14 @@ performance.stat-prefetch=off + performance.open-behind=off + performance.readdir-ahead=off + performance.strict-o-direct=on ++performance.client-io-threads=on ++performance.io-thread-count=32 ++performance.high-prio-threads=32 ++performance.normal-prio-threads=32 ++performance.low-prio-threads=32 ++performance.least-prio-threads=4 ++client.event-threads=8 ++server.event-threads=8 + network.remote-dio=disable + cluster.eager-lock=enable + cluster.quorum-type=auto +-- +1.8.3.1 + diff --git a/SOURCES/0260-ctime-Set-mdata-xattr-on-legacy-files.patch b/SOURCES/0260-ctime-Set-mdata-xattr-on-legacy-files.patch new file mode 100644 index 0000000..f07fb21 --- /dev/null +++ b/SOURCES/0260-ctime-Set-mdata-xattr-on-legacy-files.patch @@ -0,0 +1,885 @@ +From fc0903de1f7565e06db9d41e6dfd62221a745d24 Mon Sep 17 00:00:00 2001 +From: Kotresh HR +Date: Mon, 24 Jun 2019 13:06:49 +0530 +Subject: [PATCH 260/261] ctime: Set mdata xattr on legacy files + +Problem: +The files which were created before ctime enabled would not +have "trusted.glusterfs.mdata"(stores time attributes) xattr. +Upon fops which modifies either ctime or mtime, the xattr +gets created with latest ctime, mtime and atime, which is +incorrect. It should update only the corresponding time +attribute and rest from backend + +Solution: +Creating xattr with values from brick is not possible as +each brick of replica set would have different times. +So create the xattr upon successful lookup if the xattr +is not created + +Note To Reviewers: +The time attributes used to set xattr is got from successful +lookup. Instead of sending the whole iatt over the wire via +setxattr, a structure called mdata_iatt is sent. The mdata_iatt +contains only time attributes. + +Backport of + > Patch: https://review.gluster.org/22936 + > Change-Id: I5e535631ddef04195361ae0364336410a2895dd4 + > fixes: bz#1593542 + +Change-Id: I5e535631ddef04195361ae0364336410a2895dd4 +BUG: 1715422 +Signed-off-by: Kotresh HR +Reviewed-on: https://code.engineering.redhat.com/gerrit/176725 +Tested-by: RHGS Build Bot +Reviewed-by: Amar Tumballi Suryanarayan +Reviewed-by: Atin Mukherjee +--- + libglusterfs/src/dict.c | 59 ++++++++++ + libglusterfs/src/glusterfs/dict.h | 5 + + libglusterfs/src/glusterfs/glusterfs.h | 3 + + libglusterfs/src/glusterfs/iatt.h | 20 ++++ + libglusterfs/src/libglusterfs.sym | 3 + + rpc/xdr/src/glusterfs-fops.x | 1 + + rpc/xdr/src/glusterfs3.h | 59 ++++++++++ + rpc/xdr/src/glusterfs4-xdr.x | 12 ++ + rpc/xdr/src/libgfxdr.sym | 3 +- + tests/basic/ctime/ctime-mdata-legacy-files.t | 83 +++++++++++++ + xlators/features/utime/src/utime-messages.h | 3 +- + xlators/features/utime/src/utime.c | 154 ++++++++++++++++++++++--- + xlators/storage/posix/src/posix-inode-fd-ops.c | 17 +++ + xlators/storage/posix/src/posix-messages.h | 3 +- + xlators/storage/posix/src/posix-metadata.c | 103 ++++++++++------- + xlators/storage/posix/src/posix-metadata.h | 4 + + 16 files changed, 475 insertions(+), 57 deletions(-) + create mode 100644 tests/basic/ctime/ctime-mdata-legacy-files.t + +diff --git a/libglusterfs/src/dict.c b/libglusterfs/src/dict.c +index 6917df9..d8cdda4 100644 +--- a/libglusterfs/src/dict.c ++++ b/libglusterfs/src/dict.c +@@ -124,6 +124,7 @@ int32_t + is_data_equal(data_t *one, data_t *two) + { + struct iatt *iatt1, *iatt2; ++ struct mdata_iatt *mdata_iatt1, *mdata_iatt2; + + if (!one || !two || !one->data || !two->data) { + gf_msg_callingfn("dict", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, +@@ -188,6 +189,24 @@ is_data_equal(data_t *one, data_t *two) + */ + return 1; + } ++ if (one->data_type == GF_DATA_TYPE_MDATA) { ++ if ((one->len < sizeof(struct mdata_iatt)) || ++ (two->len < sizeof(struct mdata_iatt))) { ++ return 0; ++ } ++ mdata_iatt1 = (struct mdata_iatt *)one->data; ++ mdata_iatt2 = (struct mdata_iatt *)two->data; ++ ++ if (mdata_iatt1->ia_atime != mdata_iatt2->ia_atime || ++ mdata_iatt1->ia_mtime != mdata_iatt2->ia_mtime || ++ mdata_iatt1->ia_ctime != mdata_iatt2->ia_ctime || ++ mdata_iatt1->ia_atime_nsec != mdata_iatt2->ia_atime_nsec || ++ mdata_iatt1->ia_mtime_nsec != mdata_iatt2->ia_mtime_nsec || ++ mdata_iatt1->ia_ctime_nsec != mdata_iatt2->ia_ctime_nsec) { ++ return 0; ++ } ++ return 1; ++ } + + if (one->len != two->len) + return 0; +@@ -1078,6 +1097,7 @@ static char *data_type_name[GF_DATA_TYPE_MAX] = { + [GF_DATA_TYPE_PTR] = "pointer", + [GF_DATA_TYPE_GFUUID] = "gf-uuid", + [GF_DATA_TYPE_IATT] = "iatt", ++ [GF_DATA_TYPE_MDATA] = "mdata", + }; + + int64_t +@@ -2666,6 +2686,45 @@ err: + } + + int ++dict_set_mdata(dict_t *this, char *key, struct mdata_iatt *mdata, ++ bool is_static) ++{ ++ return dict_set_bin_common(this, key, mdata, sizeof(struct mdata_iatt), ++ is_static, GF_DATA_TYPE_MDATA); ++} ++ ++int ++dict_get_mdata(dict_t *this, char *key, struct mdata_iatt *mdata) ++{ ++ data_t *data = NULL; ++ int ret = -EINVAL; ++ ++ if (!this || !key || !mdata) { ++ goto err; ++ } ++ ret = dict_get_with_ref(this, key, &data); ++ if (ret < 0) { ++ goto err; ++ } ++ ++ VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_MDATA, key, -EINVAL); ++ if (data->len < sizeof(struct mdata_iatt)) { ++ gf_msg("glusterfs", GF_LOG_ERROR, ENOBUFS, LG_MSG_UNDERSIZED_BUF, ++ "data value for '%s' is smaller than expected", key); ++ ret = -ENOBUFS; ++ goto err; ++ } ++ ++ memcpy(mdata, data->data, min(data->len, sizeof(struct mdata_iatt))); ++ ++err: ++ if (data) ++ data_unref(data); ++ ++ return ret; ++} ++ ++int + dict_set_iatt(dict_t *this, char *key, struct iatt *iatt, bool is_static) + { + return dict_set_bin_common(this, key, iatt, sizeof(struct iatt), is_static, +diff --git a/libglusterfs/src/glusterfs/dict.h b/libglusterfs/src/glusterfs/dict.h +index 022f564..8239c7a 100644 +--- a/libglusterfs/src/glusterfs/dict.h ++++ b/libglusterfs/src/glusterfs/dict.h +@@ -392,6 +392,11 @@ GF_MUST_CHECK int + dict_set_iatt(dict_t *this, char *key, struct iatt *iatt, bool is_static); + GF_MUST_CHECK int + dict_get_iatt(dict_t *this, char *key, struct iatt *iatt); ++GF_MUST_CHECK int ++dict_set_mdata(dict_t *this, char *key, struct mdata_iatt *mdata, ++ bool is_static); ++GF_MUST_CHECK int ++dict_get_mdata(dict_t *this, char *key, struct mdata_iatt *mdata); + + void + dict_dump_to_statedump(dict_t *dict, char *dict_name, char *domain); +diff --git a/libglusterfs/src/glusterfs/glusterfs.h b/libglusterfs/src/glusterfs/glusterfs.h +index 2cedf1a..79c93ae 100644 +--- a/libglusterfs/src/glusterfs/glusterfs.h ++++ b/libglusterfs/src/glusterfs/glusterfs.h +@@ -229,6 +229,9 @@ enum gf_internal_fop_indicator { + #define VIRTUAL_QUOTA_XATTR_CLEANUP_KEY "glusterfs.quota-xattr-cleanup" + #define QUOTA_READ_ONLY_KEY "trusted.glusterfs.quota.read-only" + ++/* ctime related */ ++#define CTIME_MDATA_XDATA_KEY "set-ctime-mdata" ++ + /* afr related */ + #define AFR_XATTR_PREFIX "trusted.afr" + +diff --git a/libglusterfs/src/glusterfs/iatt.h b/libglusterfs/src/glusterfs/iatt.h +index bee7a0a..f03d68b 100644 +--- a/libglusterfs/src/glusterfs/iatt.h ++++ b/libglusterfs/src/glusterfs/iatt.h +@@ -92,6 +92,15 @@ struct old_iatt { + uint32_t ia_ctime_nsec; + }; + ++struct mdata_iatt { ++ int64_t ia_atime; /* last access time */ ++ int64_t ia_mtime; /* last modification time */ ++ int64_t ia_ctime; /* last status change time */ ++ uint32_t ia_atime_nsec; ++ uint32_t ia_mtime_nsec; ++ uint32_t ia_ctime_nsec; ++}; ++ + /* 64-bit mask for valid members in struct iatt. */ + #define IATT_TYPE 0x0000000000000001U + #define IATT_MODE 0x0000000000000002U +@@ -313,6 +322,17 @@ st_mode_from_ia(ia_prot_t prot, ia_type_t type) + return st_mode; + } + ++static inline void ++iatt_to_mdata(struct mdata_iatt *mdata, struct iatt *iatt) ++{ ++ mdata->ia_atime = iatt->ia_atime; ++ mdata->ia_atime_nsec = iatt->ia_atime_nsec; ++ mdata->ia_mtime = iatt->ia_mtime; ++ mdata->ia_mtime_nsec = iatt->ia_mtime_nsec; ++ mdata->ia_ctime = iatt->ia_ctime; ++ mdata->ia_ctime_nsec = iatt->ia_ctime_nsec; ++} ++ + static inline int + iatt_from_stat(struct iatt *iatt, struct stat *stat) + { +diff --git a/libglusterfs/src/libglusterfs.sym b/libglusterfs/src/libglusterfs.sym +index 4dca7de..b161380 100644 +--- a/libglusterfs/src/libglusterfs.sym ++++ b/libglusterfs/src/libglusterfs.sym +@@ -380,6 +380,7 @@ dict_get_bin + dict_get_double + dict_get_gfuuid + dict_get_iatt ++dict_get_mdata + dict_get_int16 + dict_get_int32 + dict_get_int32n +@@ -417,6 +418,7 @@ dict_set_dynstrn + dict_set_dynstr_with_alloc + dict_set_gfuuid + dict_set_iatt ++dict_set_mdata + dict_set_int16 + dict_set_int32 + dict_set_int32n +@@ -509,6 +511,7 @@ fop_lease_stub + fop_link_stub + fop_lk_stub + fop_log_level ++fop_lookup_cbk_stub + fop_lookup_stub + fop_mkdir_stub + fop_mknod_stub +diff --git a/rpc/xdr/src/glusterfs-fops.x b/rpc/xdr/src/glusterfs-fops.x +index bacf0773..651f8de 100644 +--- a/rpc/xdr/src/glusterfs-fops.x ++++ b/rpc/xdr/src/glusterfs-fops.x +@@ -245,5 +245,6 @@ enum gf_dict_data_type_t { + GF_DATA_TYPE_PTR, + GF_DATA_TYPE_GFUUID, + GF_DATA_TYPE_IATT, ++ GF_DATA_TYPE_MDATA, + GF_DATA_TYPE_MAX + }; +diff --git a/rpc/xdr/src/glusterfs3.h b/rpc/xdr/src/glusterfs3.h +index 5521f4d..86b3a4c 100644 +--- a/rpc/xdr/src/glusterfs3.h ++++ b/rpc/xdr/src/glusterfs3.h +@@ -585,6 +585,34 @@ out: + } + + static inline void ++gfx_mdata_iatt_to_mdata_iatt(struct gfx_mdata_iatt *gf_mdata_iatt, ++ struct mdata_iatt *mdata_iatt) ++{ ++ if (!mdata_iatt || !gf_mdata_iatt) ++ return; ++ mdata_iatt->ia_atime = gf_mdata_iatt->ia_atime; ++ mdata_iatt->ia_atime_nsec = gf_mdata_iatt->ia_atime_nsec; ++ mdata_iatt->ia_mtime = gf_mdata_iatt->ia_mtime; ++ mdata_iatt->ia_mtime_nsec = gf_mdata_iatt->ia_mtime_nsec; ++ mdata_iatt->ia_ctime = gf_mdata_iatt->ia_ctime; ++ mdata_iatt->ia_ctime_nsec = gf_mdata_iatt->ia_ctime_nsec; ++} ++ ++static inline void ++gfx_mdata_iatt_from_mdata_iatt(struct gfx_mdata_iatt *gf_mdata_iatt, ++ struct mdata_iatt *mdata_iatt) ++{ ++ if (!mdata_iatt || !gf_mdata_iatt) ++ return; ++ gf_mdata_iatt->ia_atime = mdata_iatt->ia_atime; ++ gf_mdata_iatt->ia_atime_nsec = mdata_iatt->ia_atime_nsec; ++ gf_mdata_iatt->ia_mtime = mdata_iatt->ia_mtime; ++ gf_mdata_iatt->ia_mtime_nsec = mdata_iatt->ia_mtime_nsec; ++ gf_mdata_iatt->ia_ctime = mdata_iatt->ia_ctime; ++ gf_mdata_iatt->ia_ctime_nsec = mdata_iatt->ia_ctime_nsec; ++} ++ ++static inline void + gfx_stat_to_iattx(struct gfx_iattx *gf_stat, struct iatt *iatt) + { + if (!iatt || !gf_stat) +@@ -721,6 +749,12 @@ dict_to_xdr(dict_t *this, gfx_dict *dict) + gfx_stat_from_iattx(&xpair->value.gfx_value_u.iatt, + (struct iatt *)dpair->value->data); + break; ++ case GF_DATA_TYPE_MDATA: ++ index++; ++ gfx_mdata_iatt_from_mdata_iatt( ++ &xpair->value.gfx_value_u.mdata_iatt, ++ (struct mdata_iatt *)dpair->value->data); ++ break; + case GF_DATA_TYPE_GFUUID: + index++; + memcpy(&xpair->value.gfx_value_u.uuid, dpair->value->data, +@@ -787,6 +821,7 @@ xdr_to_dict(gfx_dict *dict, dict_t **to) + dict_t *this = NULL; + unsigned char *uuid = NULL; + struct iatt *iatt = NULL; ++ struct mdata_iatt *mdata_iatt = NULL; + + if (!to || !dict) + goto out; +@@ -854,6 +889,30 @@ xdr_to_dict(gfx_dict *dict, dict_t **to) + gfx_stat_to_iattx(&xpair->value.gfx_value_u.iatt, iatt); + ret = dict_set_iatt(this, key, iatt, false); + break; ++ case GF_DATA_TYPE_MDATA: ++ mdata_iatt = GF_CALLOC(1, sizeof(struct mdata_iatt), ++ gf_common_mt_char); ++ if (!mdata_iatt) { ++ errno = ENOMEM; ++ gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY, ++ "failed to allocate memory. key: %s", key); ++ ret = -1; ++ goto out; ++ } ++ gfx_mdata_iatt_to_mdata_iatt( ++ &xpair->value.gfx_value_u.mdata_iatt, mdata_iatt); ++ ret = dict_set_mdata(this, key, mdata_iatt, false); ++ if (ret != 0) { ++ GF_FREE(mdata_iatt); ++ gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, ++ LG_MSG_DICT_SET_FAILED, ++ "failed to set the key (%s)" ++ " into dict", ++ key); ++ ret = -1; ++ goto out; ++ } ++ break; + case GF_DATA_TYPE_PTR: + case GF_DATA_TYPE_STR_OLD: + value = GF_MALLOC(xpair->value.gfx_value_u.other.other_len + 1, +diff --git a/rpc/xdr/src/glusterfs4-xdr.x b/rpc/xdr/src/glusterfs4-xdr.x +index bec0872..6f92b70 100644 +--- a/rpc/xdr/src/glusterfs4-xdr.x ++++ b/rpc/xdr/src/glusterfs4-xdr.x +@@ -46,6 +46,16 @@ struct gfx_iattx { + unsigned int mode; /* type of file and rwx mode */ + }; + ++struct gfx_mdata_iatt { ++ hyper ia_atime; /* last access time */ ++ hyper ia_mtime; /* last modification time */ ++ hyper ia_ctime; /* last status change time */ ++ ++ unsigned int ia_atime_nsec; ++ unsigned int ia_mtime_nsec; ++ unsigned int ia_ctime_nsec; ++}; ++ + union gfx_value switch (gf_dict_data_type_t type) { + case GF_DATA_TYPE_INT: + hyper value_int; +@@ -62,6 +72,8 @@ union gfx_value switch (gf_dict_data_type_t type) { + case GF_DATA_TYPE_PTR: + case GF_DATA_TYPE_STR_OLD: + opaque other<>; ++ case GF_DATA_TYPE_MDATA: ++ gfx_mdata_iatt mdata_iatt; + }; + + /* AUTH */ +diff --git a/rpc/xdr/src/libgfxdr.sym b/rpc/xdr/src/libgfxdr.sym +index 22cdf30..dd4ac85 100644 +--- a/rpc/xdr/src/libgfxdr.sym ++++ b/rpc/xdr/src/libgfxdr.sym +@@ -251,6 +251,7 @@ xdr_to_write3args + xdr_vector_round_up + xdr_gfx_read_rsp + xdr_gfx_iattx ++xdr_gfx_mdata_iatt + xdr_gfx_value + xdr_gfx_dict_pair + xdr_gfx_dict +@@ -344,4 +345,4 @@ xdr_compound_req_v2 + xdr_gfx_compound_req + xdr_compound_rsp_v2 + xdr_gfx_compound_rsp +-xdr_gfx_copy_file_range_req +\ No newline at end of file ++xdr_gfx_copy_file_range_req +diff --git a/tests/basic/ctime/ctime-mdata-legacy-files.t b/tests/basic/ctime/ctime-mdata-legacy-files.t +new file mode 100644 +index 0000000..2e782d5 +--- /dev/null ++++ b/tests/basic/ctime/ctime-mdata-legacy-files.t +@@ -0,0 +1,83 @@ ++#!/bin/bash ++. $(dirname $0)/../../include.rc ++. $(dirname $0)/../../volume.rc ++. $(dirname $0)/../../afr.rc ++cleanup; ++ ++############################################################################### ++#Replica volume ++ ++TEST glusterd ++TEST pidof glusterd ++TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2} ++TEST $CLI volume set $V0 performance.stat-prefetch off ++TEST $CLI volume start $V0 ++ ++TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0; ++ ++#Disable ctime and create file, file doesn't contain "trusted.glusterfs.mdata" xattr ++TEST $CLI volume set $V0 ctime off ++ ++TEST "mkdir $M0/DIR" ++TEST "echo hello_world > $M0/DIR/FILE" ++ ++#Verify absence of xattr ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}0/DIR" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}0/DIR/FILE" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}1/DIR" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}1/DIR/FILE" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}2/DIR" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}2/DIR/FILE" ++ ++#Enable ctime ++TEST $CLI volume set $V0 ctime on ++sleep 3 ++TEST stat $M0/DIR/FILE ++ ++#Verify presence "trusted.glusterfs.mdata" xattr on backend ++#The lookup above should have created xattr ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}0/DIR" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}0/DIR/FILE" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}1/DIR" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}1/DIR/FILE" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}2/DIR" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}2/DIR/FILE" ++ ++############################################################################### ++#Disperse Volume ++ ++TEST $CLI volume create $V1 disperse 3 redundancy 1 $H0:$B0/${V1}{0,1,2} ++TEST $CLI volume set $V1 performance.stat-prefetch off ++TEST $CLI volume start $V1 ++ ++TEST glusterfs --volfile-id=$V1 --volfile-server=$H0 --entry-timeout=0 $M1; ++ ++#Disable ctime and create file, file doesn't contain "trusted.glusterfs.mdata" xattr ++TEST $CLI volume set $V1 ctime off ++TEST "mkdir $M1/DIR" ++TEST "echo hello_world > $M1/DIR/FILE" ++ ++#Verify absence of xattr ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}0/DIR" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}0/DIR/FILE" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}1/DIR" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}1/DIR/FILE" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}2/DIR" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}2/DIR/FILE" ++ ++#Enable ctime ++TEST $CLI volume set $V1 ctime on ++sleep 3 ++TEST stat $M1/DIR/FILE ++ ++#Verify presence "trusted.glusterfs.mdata" xattr on backend ++#The lookup above should have created xattr ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}0/DIR" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}0/DIR/FILE" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}1/DIR" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}1/DIR/FILE" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}2/DIR" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}2/DIR/FILE" ++ ++cleanup; ++############################################################################### +diff --git a/xlators/features/utime/src/utime-messages.h b/xlators/features/utime/src/utime-messages.h +index bac18ab..bd40265 100644 +--- a/xlators/features/utime/src/utime-messages.h ++++ b/xlators/features/utime/src/utime-messages.h +@@ -23,6 +23,7 @@ + * glfs-message-id.h. + */ + +-GLFS_MSGID(UTIME, UTIME_MSG_NO_MEMORY); ++GLFS_MSGID(UTIME, UTIME_MSG_NO_MEMORY, UTIME_MSG_SET_MDATA_FAILED, ++ UTIME_MSG_DICT_SET_FAILED); + + #endif /* __UTIME_MESSAGES_H__ */ +diff --git a/xlators/features/utime/src/utime.c b/xlators/features/utime/src/utime.c +index 877c751..2a986e7 100644 +--- a/xlators/features/utime/src/utime.c ++++ b/xlators/features/utime/src/utime.c +@@ -9,8 +9,10 @@ + */ + + #include "utime.h" ++#include "utime-helpers.h" + #include "utime-messages.h" + #include "utime-mem-types.h" ++#include + + int32_t + gf_utime_invalidate(xlator_t *this, inode_t *inode) +@@ -133,6 +135,124 @@ mem_acct_init(xlator_t *this) + } + + int32_t ++gf_utime_set_mdata_setxattr_cbk(call_frame_t *frame, void *cookie, ++ xlator_t *this, int op_ret, int op_errno, ++ dict_t *xdata) ++{ ++ /* Don't fail lookup if mdata setxattr fails */ ++ if (op_ret) { ++ gf_msg(this->name, GF_LOG_ERROR, op_errno, UTIME_MSG_SET_MDATA_FAILED, ++ "dict set of key for set-ctime-mdata failed"); ++ } ++ call_resume(frame->local); ++ return 0; ++} ++ ++int32_t ++gf_utime_set_mdata_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this, ++ int32_t op_ret, int32_t op_errno, inode_t *inode, ++ struct iatt *stbuf, dict_t *xdata, ++ struct iatt *postparent) ++{ ++ dict_t *dict = NULL; ++ struct mdata_iatt *mdata = NULL; ++ int ret = 0; ++ loc_t loc = { ++ 0, ++ }; ++ ++ if (!op_ret && dict_get(xdata, GF_XATTR_MDATA_KEY) == NULL) { ++ dict = dict_new(); ++ if (!dict) { ++ op_errno = ENOMEM; ++ goto err; ++ } ++ mdata = GF_MALLOC(sizeof(struct mdata_iatt), gf_common_mt_char); ++ if (mdata == NULL) { ++ op_errno = ENOMEM; ++ goto err; ++ } ++ iatt_to_mdata(mdata, stbuf); ++ ret = dict_set_mdata(dict, CTIME_MDATA_XDATA_KEY, mdata, _gf_false); ++ if (ret < 0) { ++ gf_msg(this->name, GF_LOG_WARNING, ENOMEM, UTIME_MSG_NO_MEMORY, ++ "dict set of key for set-ctime-mdata failed"); ++ goto err; ++ } ++ frame->local = fop_lookup_cbk_stub(frame, default_lookup_cbk, op_ret, ++ op_errno, inode, stbuf, xdata, ++ postparent); ++ if (!frame->local) { ++ gf_msg(this->name, GF_LOG_WARNING, ENOMEM, UTIME_MSG_NO_MEMORY, ++ "lookup_cbk stub allocation failed"); ++ goto stub_err; ++ } ++ ++ loc.inode = inode_ref(inode); ++ gf_uuid_copy(loc.gfid, stbuf->ia_gfid); ++ STACK_WIND(frame, gf_utime_set_mdata_setxattr_cbk, FIRST_CHILD(this), ++ FIRST_CHILD(this)->fops->setxattr, &loc, dict, 0, NULL); ++ ++ dict_unref(dict); ++ inode_unref(loc.inode); ++ return 0; ++ } ++ ++ STACK_UNWIND_STRICT(lookup, frame, op_ret, op_errno, inode, stbuf, xdata, ++ postparent); ++ return 0; ++ ++err: ++ if (mdata) { ++ GF_FREE(mdata); ++ } ++stub_err: ++ if (dict) { ++ dict_unref(dict); ++ } ++ STACK_UNWIND_STRICT(lookup, frame, -1, op_errno, NULL, NULL, NULL, NULL); ++ return 0; ++} ++ ++int ++gf_utime_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata) ++{ ++ int op_errno = -1; ++ int ret = -1; ++ ++ VALIDATE_OR_GOTO(frame, err); ++ VALIDATE_OR_GOTO(this, err); ++ VALIDATE_OR_GOTO(loc, err); ++ VALIDATE_OR_GOTO(loc->inode, err); ++ ++ xdata = xdata ? dict_ref(xdata) : dict_new(); ++ if (!xdata) { ++ op_errno = ENOMEM; ++ goto err; ++ } ++ ++ ret = dict_set_int8(xdata, GF_XATTR_MDATA_KEY, 1); ++ if (ret < 0) { ++ gf_msg(this->name, GF_LOG_WARNING, -ret, UTIME_MSG_DICT_SET_FAILED, ++ "%s: Unable to set dict value for %s", loc->path, ++ GF_XATTR_MDATA_KEY); ++ op_errno = -ret; ++ goto free_dict; ++ } ++ ++ STACK_WIND(frame, gf_utime_set_mdata_lookup_cbk, FIRST_CHILD(this), ++ FIRST_CHILD(this)->fops->lookup, loc, xdata); ++ dict_unref(xdata); ++ return 0; ++ ++free_dict: ++ dict_unref(xdata); ++err: ++ STACK_UNWIND_STRICT(lookup, frame, -1, op_errno, NULL, NULL, NULL, NULL); ++ return 0; ++} ++ ++int32_t + init(xlator_t *this) + { + utime_priv_t *utime = NULL; +@@ -182,19 +302,27 @@ notify(xlator_t *this, int event, void *data, ...) + } + + struct xlator_fops fops = { +- /* TODO: Need to go through other fops and +- * check if they modify time attributes +- */ +- .rename = gf_utime_rename, .mknod = gf_utime_mknod, +- .readv = gf_utime_readv, .fremovexattr = gf_utime_fremovexattr, +- .open = gf_utime_open, .create = gf_utime_create, +- .mkdir = gf_utime_mkdir, .writev = gf_utime_writev, +- .rmdir = gf_utime_rmdir, .fallocate = gf_utime_fallocate, +- .truncate = gf_utime_truncate, .symlink = gf_utime_symlink, +- .zerofill = gf_utime_zerofill, .link = gf_utime_link, +- .ftruncate = gf_utime_ftruncate, .unlink = gf_utime_unlink, +- .setattr = gf_utime_setattr, .fsetattr = gf_utime_fsetattr, +- .opendir = gf_utime_opendir, .removexattr = gf_utime_removexattr, ++ .rename = gf_utime_rename, ++ .mknod = gf_utime_mknod, ++ .readv = gf_utime_readv, ++ .fremovexattr = gf_utime_fremovexattr, ++ .open = gf_utime_open, ++ .create = gf_utime_create, ++ .mkdir = gf_utime_mkdir, ++ .writev = gf_utime_writev, ++ .rmdir = gf_utime_rmdir, ++ .fallocate = gf_utime_fallocate, ++ .truncate = gf_utime_truncate, ++ .symlink = gf_utime_symlink, ++ .zerofill = gf_utime_zerofill, ++ .link = gf_utime_link, ++ .ftruncate = gf_utime_ftruncate, ++ .unlink = gf_utime_unlink, ++ .setattr = gf_utime_setattr, ++ .fsetattr = gf_utime_fsetattr, ++ .opendir = gf_utime_opendir, ++ .removexattr = gf_utime_removexattr, ++ .lookup = gf_utime_lookup, + }; + struct xlator_cbks cbks = { + .invalidate = gf_utime_invalidate, +diff --git a/xlators/storage/posix/src/posix-inode-fd-ops.c b/xlators/storage/posix/src/posix-inode-fd-ops.c +index ea3b69c..d22bbc2 100644 +--- a/xlators/storage/posix/src/posix-inode-fd-ops.c ++++ b/xlators/storage/posix/src/posix-inode-fd-ops.c +@@ -2625,6 +2625,9 @@ posix_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict, + gf_cs_obj_state state = -1; + int i = 0; + int len; ++ struct mdata_iatt mdata_iatt = { ++ 0, ++ }; + + DECLARE_OLD_FS_ID_VAR; + SET_FS_ID(frame->root->uid, frame->root->gid); +@@ -2638,6 +2641,20 @@ posix_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict, + priv = this->private; + DISK_SPACE_CHECK_AND_GOTO(frame, priv, xdata, op_ret, op_errno, out); + ++ ret = dict_get_mdata(dict, CTIME_MDATA_XDATA_KEY, &mdata_iatt); ++ if (ret == 0) { ++ /* This is initiated by lookup when ctime feature is enabled to create ++ * "trusted.glusterfs.mdata" xattr if not present. These are the files ++ * which were created when ctime feature is disabled. ++ */ ++ ret = posix_set_mdata_xattr_legacy_files(this, loc->inode, &mdata_iatt, ++ &op_errno); ++ if (ret != 0) { ++ op_ret = -1; ++ } ++ goto out; ++ } ++ + MAKE_INODE_HANDLE(real_path, this, loc, NULL); + if (!real_path) { + op_ret = -1; +diff --git a/xlators/storage/posix/src/posix-messages.h b/xlators/storage/posix/src/posix-messages.h +index 3229275..15e23ff 100644 +--- a/xlators/storage/posix/src/posix-messages.h ++++ b/xlators/storage/posix/src/posix-messages.h +@@ -68,6 +68,7 @@ GLFS_MSGID(POSIX, P_MSG_XATTR_FAILED, P_MSG_NULL_GFID, P_MSG_FCNTL_FAILED, + P_MSG_FALLOCATE_FAILED, P_MSG_STOREMDATA_FAILED, + P_MSG_FETCHMDATA_FAILED, P_MSG_GETMDATA_FAILED, + P_MSG_SETMDATA_FAILED, P_MSG_FRESHFILE, P_MSG_MUTEX_FAILED, +- P_MSG_COPY_FILE_RANGE_FAILED, P_MSG_TIMER_DELETE_FAILED); ++ P_MSG_COPY_FILE_RANGE_FAILED, P_MSG_TIMER_DELETE_FAILED, ++ P_MSG_NOMEM); + + #endif /* !_GLUSTERD_MESSAGES_H_ */ +diff --git a/xlators/storage/posix/src/posix-metadata.c b/xlators/storage/posix/src/posix-metadata.c +index 5a5e6cd..647c0bb 100644 +--- a/xlators/storage/posix/src/posix-metadata.c ++++ b/xlators/storage/posix/src/posix-metadata.c +@@ -245,6 +245,10 @@ __posix_get_mdata_xattr(xlator_t *this, const char *real_path, int _fd, + if (ret == -1 || !mdata) { + mdata = GF_CALLOC(1, sizeof(posix_mdata_t), gf_posix_mt_mdata_attr); + if (!mdata) { ++ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, P_MSG_NOMEM, ++ "Could not allocate mdata. file: %s: gfid: %s", ++ real_path ? real_path : "null", ++ inode ? uuid_utoa(inode->gfid) : "null"); + ret = -1; + goto out; + } +@@ -262,18 +266,8 @@ __posix_get_mdata_xattr(xlator_t *this, const char *real_path, int _fd, + } + } else { + /* Failed to get mdata from disk, xattr missing. +- * This happens on two cases. +- * 1. File is created before ctime is enabled. +- * 2. On new file creation. +- * +- * Do nothing, just return success. It is as +- * good as ctime feature is not enabled for this +- * file. For files created before ctime is enabled, +- * time attributes gets updated into ctime structure +- * once the metadata modification fop happens and +- * time attributes become consistent eventually. +- * For new files, it would obviously get updated +- * before the fop completion. ++ * This happens when the file is created before ++ * ctime is enabled. + */ + if (stbuf && op_errno != ENOENT) { + ret = 0; +@@ -345,6 +339,54 @@ posix_compare_timespec(struct timespec *first, struct timespec *second) + return first->tv_sec - second->tv_sec; + } + ++int ++posix_set_mdata_xattr_legacy_files(xlator_t *this, inode_t *inode, ++ struct mdata_iatt *mdata_iatt, int *op_errno) ++{ ++ posix_mdata_t *mdata = NULL; ++ int ret = 0; ++ ++ GF_VALIDATE_OR_GOTO("posix", this, out); ++ GF_VALIDATE_OR_GOTO(this->name, inode, out); ++ ++ LOCK(&inode->lock); ++ { ++ mdata = GF_CALLOC(1, sizeof(posix_mdata_t), gf_posix_mt_mdata_attr); ++ if (!mdata) { ++ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, P_MSG_NOMEM, ++ "Could not allocate mdata. gfid: %s", ++ uuid_utoa(inode->gfid)); ++ ret = -1; ++ *op_errno = ENOMEM; ++ goto unlock; ++ } ++ ++ mdata->version = 1; ++ mdata->flags = 0; ++ mdata->ctime.tv_sec = mdata_iatt->ia_ctime; ++ mdata->ctime.tv_nsec = mdata_iatt->ia_ctime_nsec; ++ mdata->atime.tv_sec = mdata_iatt->ia_atime; ++ mdata->atime.tv_nsec = mdata_iatt->ia_atime_nsec; ++ mdata->mtime.tv_sec = mdata_iatt->ia_mtime; ++ mdata->mtime.tv_nsec = mdata_iatt->ia_mtime_nsec; ++ ++ __inode_ctx_set1(inode, this, (uint64_t *)&mdata); ++ ++ ret = posix_store_mdata_xattr(this, NULL, -1, inode, mdata); ++ if (ret) { ++ gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_STOREMDATA_FAILED, ++ "gfid: %s key:%s ", uuid_utoa(inode->gfid), ++ GF_XATTR_MDATA_KEY); ++ *op_errno = errno; ++ goto unlock; ++ } ++ } ++unlock: ++ UNLOCK(&inode->lock); ++out: ++ return ret; ++} ++ + /* posix_set_mdata_xattr updates the posix_mdata_t based on the flag + * in inode context and stores it on disk + */ +@@ -372,6 +414,9 @@ posix_set_mdata_xattr(xlator_t *this, const char *real_path, int fd, + */ + mdata = GF_CALLOC(1, sizeof(posix_mdata_t), gf_posix_mt_mdata_attr); + if (!mdata) { ++ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, P_MSG_NOMEM, ++ "Could not allocate mdata. file: %s: gfid: %s", ++ real_path ? real_path : "null", uuid_utoa(inode->gfid)); + ret = -1; + goto unlock; + } +@@ -386,35 +431,11 @@ posix_set_mdata_xattr(xlator_t *this, const char *real_path, int fd, + __inode_ctx_set1(inode, this, (uint64_t *)&mdata); + } else { + /* +- * This is the first time creating the time +- * attr. This happens when you activate this +- * feature, and the legacy file will not have +- * any xattr set. +- * +- * New files will create extended attributes. +- */ +- +- /* +- * TODO: This is wrong approach, because before +- * creating fresh xattr, we should consult +- * to all replica and/or distribution set. +- * +- * We should contact the time management +- * xlators, and ask them to create an xattr. +- */ +- /* We should not be relying on backend file's +- * time attributes to load the initial ctime +- * time attribute structure. This is incorrect +- * as each replica set would have witnessed the +- * file creation at different times. +- * +- * For new file creation, ctime, atime and mtime +- * should be same, hence initiate the ctime +- * structure with the time from the frame. But +- * for the files which were created before ctime +- * feature is enabled, this is not accurate but +- * still fine as the times would get eventually +- * accurate. ++ * This is the first time creating the time attr. This happens ++ * when you activate this feature. On this code path, only new ++ * files will create mdata xattr. The legacy files (files ++ * created before ctime enabled) will not have any xattr set. ++ * The xattr on legacy file will be set via lookup. + */ + + /* Don't create xattr with utimes/utimensat, only update if +diff --git a/xlators/storage/posix/src/posix-metadata.h b/xlators/storage/posix/src/posix-metadata.h +index 3416148..dc25e59 100644 +--- a/xlators/storage/posix/src/posix-metadata.h ++++ b/xlators/storage/posix/src/posix-metadata.h +@@ -53,5 +53,9 @@ posix_set_ctime_cfr(call_frame_t *frame, xlator_t *this, + const char *real_path_in, int fd_in, inode_t *inode_in, + struct iatt *stbuf_in, const char *read_path_put, + int fd_out, inode_t *inode_out, struct iatt *stbuf_out); ++int ++posix_set_mdata_xattr_legacy_files(xlator_t *this, inode_t *inode, ++ struct mdata_iatt *mdata_iatt, ++ int *op_errno); + + #endif /* _POSIX_METADATA_H */ +-- +1.8.3.1 + diff --git a/SOURCES/0261-features-utime-Fix-mem_put-crash.patch b/SOURCES/0261-features-utime-Fix-mem_put-crash.patch new file mode 100644 index 0000000..2c3fe9e --- /dev/null +++ b/SOURCES/0261-features-utime-Fix-mem_put-crash.patch @@ -0,0 +1,52 @@ +From 1aa175f353325775517daf1d48a19799e0cafc7a Mon Sep 17 00:00:00 2001 +From: Pranith Kumar K +Date: Mon, 22 Jul 2019 20:55:33 +0530 +Subject: [PATCH 261/261] features/utime: Fix mem_put crash + +Problem: +When frame->local is not null FRAME_DESTROY calls mem_put on it. +Since the stub is already destroyed in call_resume(), it leads +to crash + +Fix: +Set frame->local to NULL before calling call_resume() + +Backport of: + > Patch: https://review.gluster.org/23091 + > fixes: bz#1593542 + > Change-Id: I0f8adf406f4cefdb89d7624ba7a9d9c2eedfb1de + > Signed-off-by: Pranith Kumar K + +BUG: 1715422 +Change-Id: I0f8adf406f4cefdb89d7624ba7a9d9c2eedfb1de +Signed-off-by: Kotresh HR +Reviewed-on: https://code.engineering.redhat.com/gerrit/176726 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + xlators/features/utime/src/utime.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/xlators/features/utime/src/utime.c b/xlators/features/utime/src/utime.c +index 2a986e7..e3a80b6 100644 +--- a/xlators/features/utime/src/utime.c ++++ b/xlators/features/utime/src/utime.c +@@ -139,12 +139,14 @@ gf_utime_set_mdata_setxattr_cbk(call_frame_t *frame, void *cookie, + xlator_t *this, int op_ret, int op_errno, + dict_t *xdata) + { ++ call_stub_t *stub = frame->local; + /* Don't fail lookup if mdata setxattr fails */ + if (op_ret) { + gf_msg(this->name, GF_LOG_ERROR, op_errno, UTIME_MSG_SET_MDATA_FAILED, + "dict set of key for set-ctime-mdata failed"); + } +- call_resume(frame->local); ++ frame->local = NULL; ++ call_resume(stub); + return 0; + } + +-- +1.8.3.1 + diff --git a/SOURCES/0262-glusterd-ctime-Disable-ctime-by-default.patch b/SOURCES/0262-glusterd-ctime-Disable-ctime-by-default.patch new file mode 100644 index 0000000..95adf23 --- /dev/null +++ b/SOURCES/0262-glusterd-ctime-Disable-ctime-by-default.patch @@ -0,0 +1,78 @@ +From c44c9f2003b703d64a2a06c53f5a2b85e9dc7a11 Mon Sep 17 00:00:00 2001 +From: Kotresh HR +Date: Wed, 31 Jul 2019 09:23:42 -0400 +Subject: [PATCH 262/262] glusterd/ctime: Disable ctime by default + +The ctime feature, in combination with gfid2path +causes peformance dip on rename workloads. Hence +disabling the feature by default. + +Change-Id: I280527eea4dc19bba39fb6a5e74760823a056dc9 +Label : DOWNSTREAM ONLY +BUG: 1713890 +Signed-off-by: Kotresh HR +Reviewed-on: https://code.engineering.redhat.com/gerrit/177421 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +Reviewed-by: Amar Tumballi Suryanarayan +--- + xlators/mgmt/glusterd/src/glusterd-volgen.c | 10 +++++++--- + xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +- + xlators/storage/posix/src/posix-common.c | 2 +- + 3 files changed, 9 insertions(+), 5 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c +index 5e0214e..539e8a5 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c ++++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c +@@ -4382,14 +4382,18 @@ client_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, + goto out; + } + } +- /* a. ret will be -1 if features.ctime is not set in the volinfo->dict which +- * means ctime should be loaded into the graph. ++ /* a. ret will be 0 (returned default) if features.ctime is not set ++ * in the volinfo->dict which means ctime should not be loaded into ++ * the graph. It is disabled by default. + * b. ret will be 1 if features.ctime is explicitly turned on through + * volume set and in that case ctime should be loaded into the graph. + * c. ret will be 0 if features.ctime is explicitly turned off and in that + * case ctime shouldn't be loaded into the graph. + */ +- ret = dict_get_str_boolean(set_dict, "features.ctime", -1); ++ ret = dict_get_str_boolean(set_dict, "features.ctime", 0); ++ if (ret == -1) ++ goto out; ++ + if (conf->op_version >= GD_OP_VERSION_5_0 && ret) { + xl = volgen_graph_add(graph, "features/utime", volname); + if (!xl) { +diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +index 7a83124..8ce338e 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c ++++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +@@ -3680,7 +3680,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + {.key = "features.ctime", + .voltype = "features/utime", + .validate_fn = validate_boolean, +- .value = "on", ++ .value = "off", + .option = "!utime", + .op_version = GD_OP_VERSION_4_1_0, + .description = "enable/disable utime translator on the volume.", +diff --git a/xlators/storage/posix/src/posix-common.c b/xlators/storage/posix/src/posix-common.c +index bfe2cb0..d738692 100644 +--- a/xlators/storage/posix/src/posix-common.c ++++ b/xlators/storage/posix/src/posix-common.c +@@ -1374,7 +1374,7 @@ struct volume_options posix_options[] = { + "SHA256 checksum. MD5 otherwise."}, + {.key = {"ctime"}, + .type = GF_OPTION_TYPE_BOOL, +- .default_value = "on", ++ .default_value = "off", + .flags = OPT_FLAG_SETTABLE | OPT_FLAG_DOC, + .op_version = {GD_OP_VERSION_4_1_0}, + .tags = {"ctime"}, +-- +1.8.3.1 + diff --git a/SOURCES/0263-tests-fix-ctime-related-tests.patch b/SOURCES/0263-tests-fix-ctime-related-tests.patch new file mode 100644 index 0000000..3c8fabb --- /dev/null +++ b/SOURCES/0263-tests-fix-ctime-related-tests.patch @@ -0,0 +1,75 @@ +From 427dab431f7e8c4c8a01e9e9ed0892708a3d22d2 Mon Sep 17 00:00:00 2001 +From: Atin Mukherjee +Date: Mon, 5 Aug 2019 08:33:29 +0530 +Subject: [PATCH 263/265] tests: fix ctime related tests + +With ctime being disabled by default, certain tests need to explicitly +turn this option off to sanitize the functionality + +Label: DOWNSTREAM ONLY + +Change-Id: Id70310b4b09e36bf66756fea447186bb073b5604 +BUG: 1704562 +Signed-off-by: Atin Mukherjee +Reviewed-on: https://code.engineering.redhat.com/gerrit/177704 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + tests/basic/ctime/ctime-noatime.t | 1 + + tests/basic/ctime/ctime-readdir.t | 1 + + tests/bugs/glusterd/bug-1696046.t | 8 +++++--- + 3 files changed, 7 insertions(+), 3 deletions(-) + +diff --git a/tests/basic/ctime/ctime-noatime.t b/tests/basic/ctime/ctime-noatime.t +index 609ccbd..a6c8d9c 100644 +--- a/tests/basic/ctime/ctime-noatime.t ++++ b/tests/basic/ctime/ctime-noatime.t +@@ -20,6 +20,7 @@ function atime_compare { + TEST glusterd + TEST pidof glusterd + TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2} ++TEST $CLI volume set $V0 ctime on + TEST $CLI volume set $V0 performance.stat-prefetch off + TEST $CLI volume set $V0 performance.read-ahead off + TEST $CLI volume set $V0 performance.quick-read off +diff --git a/tests/basic/ctime/ctime-readdir.t b/tests/basic/ctime/ctime-readdir.t +index 4564fc1..fa069b3 100644 +--- a/tests/basic/ctime/ctime-readdir.t ++++ b/tests/basic/ctime/ctime-readdir.t +@@ -9,6 +9,7 @@ TEST glusterd + + TEST $CLI volume create $V0 replica 3 ${H0}:$B0/brick{1,2,3}; + TEST $CLI volume set $V0 performance.stat-prefetch on ++TEST $CLI volume set $V0 ctime on + TEST $CLI volume set $V0 performance.readdir-ahead off + TEST $CLI volume start $V0; + +diff --git a/tests/bugs/glusterd/bug-1696046.t b/tests/bugs/glusterd/bug-1696046.t +index e1c1eb2..f7992f5 100644 +--- a/tests/bugs/glusterd/bug-1696046.t ++++ b/tests/bugs/glusterd/bug-1696046.t +@@ -22,6 +22,8 @@ TEST pidof glusterd; + TEST $CLI volume set all cluster.brick-multiplex on + TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3}; + TEST $CLI volume create $V1 replica 3 $H0:$B0/${V1}{1,2,3}; ++TEST $CLI volume set $V0 ctime on ++TEST $CLI volume set $V1 ctime on + + ## Start volume and verify + TEST $CLI volume start $V0; +@@ -64,9 +66,9 @@ TEST $CLI volume set $V0 diagnostics.brick-log-level DEBUG + # Do some operation + touch $M0/file1 + +-# Check debug message debug message should be exist only for V0 +-# Server xlator is common in brick_mux so after enabling DEBUG log +-# some debug message should be available for other xlators like posix ++# Check debug message should exist only for V0 server xlator is common in ++# brick_mux so after enabling DEBUG log some debug message should be available ++# for other xlators like posix + + brick_log_file=$logdir"/bricks/"`brick-log-file-name` + nofdlog=$(cat $brick_log_file | grep file1 | grep -v server | wc -l) +-- +1.8.3.1 + diff --git a/SOURCES/0264-gfapi-Fix-deadlock-while-processing-upcall.patch b/SOURCES/0264-gfapi-Fix-deadlock-while-processing-upcall.patch new file mode 100644 index 0000000..41ac9ee --- /dev/null +++ b/SOURCES/0264-gfapi-Fix-deadlock-while-processing-upcall.patch @@ -0,0 +1,259 @@ +From 52dc121c412de9c1cc3058d782b949dc7b25dc3e Mon Sep 17 00:00:00 2001 +From: Soumya Koduri +Date: Thu, 25 Jul 2019 12:56:12 +0530 +Subject: [PATCH 264/265] gfapi: Fix deadlock while processing upcall + +As mentioned in bug1733166, there could be potential deadlock +while processing upcalls depending on how each xlator choose +to act on it. The right way of fixing such issues +is to change rpc callback communication process. +- https://github.com/gluster/glusterfs/issues/697 + +Till then, making changes in gfapi layer to avoid any I/O +processing. + +This is backport of below mainline patch +> https://review.gluster.org/#/c/glusterfs/+/23108/ +> bz#1733166 +> https://review.gluster.org/#/c/glusterfs/+/23107/ (release-6) + +Change-Id: I2079e95339e5d761d5060707f4555cfacab95c83 +fixes: bz#1733520 +Signed-off-by: Soumya Koduri +Reviewed-on: https://code.engineering.redhat.com/gerrit/177675 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + api/src/glfs-fops.c | 164 +++++++++++++++++++++++++++++++++++++++++----------- + 1 file changed, 131 insertions(+), 33 deletions(-) + +diff --git a/api/src/glfs-fops.c b/api/src/glfs-fops.c +index 396f18c..e6adea5 100644 +--- a/api/src/glfs-fops.c ++++ b/api/src/glfs-fops.c +@@ -34,7 +34,7 @@ + + struct upcall_syncop_args { + struct glfs *fs; +- struct glfs_upcall *up_arg; ++ struct gf_upcall upcall_data; + }; + + #define READDIRBUF_SIZE (sizeof(struct dirent) + GF_NAME_MAX + 1) +@@ -5716,8 +5716,28 @@ out: + static int + upcall_syncop_args_free(struct upcall_syncop_args *args) + { +- if (args && args->up_arg) +- GLFS_FREE(args->up_arg); ++ dict_t *dict = NULL; ++ struct gf_upcall *upcall_data = NULL; ++ ++ if (args) { ++ upcall_data = &args->upcall_data; ++ switch (upcall_data->event_type) { ++ case GF_UPCALL_CACHE_INVALIDATION: ++ dict = ((struct gf_upcall_cache_invalidation *)(upcall_data ++ ->data)) ++ ->dict; ++ break; ++ case GF_UPCALL_RECALL_LEASE: ++ dict = ((struct gf_upcall_recall_lease *)(upcall_data->data)) ++ ->dict; ++ break; ++ } ++ if (dict) ++ dict_unref(dict); ++ ++ GF_FREE(upcall_data->client_uid); ++ GF_FREE(upcall_data->data); ++ } + GF_FREE(args); + return 0; + } +@@ -5727,14 +5747,7 @@ glfs_upcall_syncop_cbk(int ret, call_frame_t *frame, void *opaque) + { + struct upcall_syncop_args *args = opaque; + +- /* Here we not using upcall_syncop_args_free as application +- * will be cleaning up the args->up_arg using glfs_free +- * post processing upcall. +- */ +- if (ret) { +- upcall_syncop_args_free(args); +- } else +- GF_FREE(args); ++ (void)upcall_syncop_args_free(args); + + return 0; + } +@@ -5743,29 +5756,17 @@ static int + glfs_cbk_upcall_syncop(void *opaque) + { + struct upcall_syncop_args *args = opaque; ++ struct gf_upcall *upcall_data = NULL; + struct glfs_upcall *up_arg = NULL; + struct glfs *fs; ++ int ret = -1; + + fs = args->fs; +- up_arg = args->up_arg; +- +- if (fs->up_cbk && up_arg) { +- (fs->up_cbk)(up_arg, fs->up_data); +- return 0; +- } +- +- return -1; +-} ++ upcall_data = &args->upcall_data; + +-static struct upcall_syncop_args * +-upcall_syncop_args_init(struct glfs *fs, struct gf_upcall *upcall_data) +-{ +- struct upcall_syncop_args *args = NULL; +- int ret = -1; +- struct glfs_upcall *up_arg = NULL; +- +- if (!fs || !upcall_data) ++ if (!upcall_data) { + goto out; ++ } + + up_arg = GLFS_CALLOC(1, sizeof(struct gf_upcall), glfs_release_upcall, + glfs_mt_upcall_entry_t); +@@ -5795,6 +5796,8 @@ upcall_syncop_args_init(struct glfs *fs, struct gf_upcall *upcall_data) + if (up_arg->reason == GLFS_UPCALL_EVENT_NULL) { + gf_msg(THIS->name, GF_LOG_DEBUG, errno, API_MSG_INVALID_ENTRY, + "Upcall_EVENT_NULL received. Skipping it."); ++ ret = 0; ++ GLFS_FREE(up_arg); + goto out; + } else if (ret) { + gf_msg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ENTRY, +@@ -5802,6 +5805,85 @@ upcall_syncop_args_init(struct glfs *fs, struct gf_upcall *upcall_data) + goto out; + } + ++ if (fs->up_cbk && up_arg) ++ (fs->up_cbk)(up_arg, fs->up_data); ++ ++ /* application takes care of calling glfs_free on up_arg post ++ * their processing */ ++ ++out: ++ return ret; ++} ++ ++static struct gf_upcall_cache_invalidation * ++gf_copy_cache_invalidation(struct gf_upcall_cache_invalidation *src) ++{ ++ struct gf_upcall_cache_invalidation *dst = NULL; ++ ++ if (!src) ++ goto out; ++ ++ dst = GF_CALLOC(1, sizeof(struct gf_upcall_cache_invalidation), ++ glfs_mt_upcall_entry_t); ++ ++ if (!dst) { ++ gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, ++ "Upcall entry allocation failed."); ++ goto out; ++ } ++ ++ dst->flags = src->flags; ++ dst->expire_time_attr = src->expire_time_attr; ++ dst->stat = src->stat; ++ dst->p_stat = src->p_stat; ++ dst->oldp_stat = src->oldp_stat; ++ ++ if (src->dict) ++ dst->dict = dict_copy_with_ref(src->dict, NULL); ++ ++ return dst; ++out: ++ return NULL; ++} ++ ++static struct gf_upcall_recall_lease * ++gf_copy_recall_lease(struct gf_upcall_recall_lease *src) ++{ ++ struct gf_upcall_recall_lease *dst = NULL; ++ ++ if (!src) ++ goto out; ++ ++ dst = GF_CALLOC(1, sizeof(struct gf_upcall_recall_lease), ++ glfs_mt_upcall_entry_t); ++ ++ if (!dst) { ++ gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, ++ "Upcall entry allocation failed."); ++ goto out; ++ } ++ ++ dst->lease_type = src->lease_type; ++ memcpy(dst->tid, src->tid, 16); ++ ++ if (src->dict) ++ dst->dict = dict_copy_with_ref(src->dict, NULL); ++ ++ return dst; ++out: ++ return NULL; ++} ++ ++static struct upcall_syncop_args * ++upcall_syncop_args_init(struct glfs *fs, struct gf_upcall *upcall_data) ++{ ++ struct upcall_syncop_args *args = NULL; ++ int ret = -1; ++ struct gf_upcall *t_data = NULL; ++ ++ if (!fs || !upcall_data) ++ goto out; ++ + args = GF_CALLOC(1, sizeof(struct upcall_syncop_args), + glfs_mt_upcall_entry_t); + if (!args) { +@@ -5819,15 +5901,31 @@ upcall_syncop_args_init(struct glfs *fs, struct gf_upcall *upcall_data) + * notification without taking any lock/ref. + */ + args->fs = fs; +- args->up_arg = up_arg; ++ t_data = &(args->upcall_data); ++ t_data->client_uid = gf_strdup(upcall_data->client_uid); + +- /* application takes care of calling glfs_free on up_arg post +- * their processing */ ++ gf_uuid_copy(t_data->gfid, upcall_data->gfid); ++ t_data->event_type = upcall_data->event_type; ++ ++ switch (t_data->event_type) { ++ case GF_UPCALL_CACHE_INVALIDATION: ++ t_data->data = gf_copy_cache_invalidation( ++ (struct gf_upcall_cache_invalidation *)upcall_data->data); ++ break; ++ case GF_UPCALL_RECALL_LEASE: ++ t_data->data = gf_copy_recall_lease( ++ (struct gf_upcall_recall_lease *)upcall_data->data); ++ break; ++ } ++ ++ if (!t_data->data) ++ goto out; + + return args; + out: +- if (up_arg) { +- GLFS_FREE(up_arg); ++ if (ret) { ++ GF_FREE(args->upcall_data.client_uid); ++ GF_FREE(args); + } + + return NULL; +-- +1.8.3.1 + diff --git a/SOURCES/0265-fuse-add-missing-GF_FREE-to-fuse_interrupt.patch b/SOURCES/0265-fuse-add-missing-GF_FREE-to-fuse_interrupt.patch new file mode 100644 index 0000000..33663f0 --- /dev/null +++ b/SOURCES/0265-fuse-add-missing-GF_FREE-to-fuse_interrupt.patch @@ -0,0 +1,47 @@ +From 7455900798446681fea1a2693fac9b423ad9722a Mon Sep 17 00:00:00 2001 +From: Csaba Henk +Date: Tue, 9 Jul 2019 11:06:49 +0200 +Subject: [PATCH 265/265] fuse: add missing GF_FREE to fuse_interrupt + +Upstream: +(Reviewed on https://review.gluster.org/c/glusterfs/+/23016) +> Change-Id: Id7e003e4a53d0a0057c1c84e1cd704c80a6cb015 +> Fixes: bz#1728047 +> Signed-off-by: Csaba Henk + +BUG: 1734423 +Change-Id: I50640bf9b56349ab9b07140bdce8a45a7d07ba7a +Signed-off-by: Csaba Henk +Reviewed-on: https://code.engineering.redhat.com/gerrit/177298 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/mount/fuse/src/fuse-bridge.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/xlators/mount/fuse/src/fuse-bridge.c b/xlators/mount/fuse/src/fuse-bridge.c +index c05866b..1c946a2 100644 +--- a/xlators/mount/fuse/src/fuse-bridge.c ++++ b/xlators/mount/fuse/src/fuse-bridge.c +@@ -661,7 +661,7 @@ fuse_interrupt(xlator_t *this, fuse_in_header_t *finh, void *msg, + " failed to allocate timed message", + finh->unique, fii->unique); + +- return; ++ goto out; + } + + dmsg->fuse_out_header.unique = finh->unique; +@@ -673,6 +673,9 @@ fuse_interrupt(xlator_t *this, fuse_in_header_t *finh, void *msg, + + send_fuse_timed(this, dmsg); + } ++ ++out: ++ GF_FREE(finh); + } + + /* +-- +1.8.3.1 + diff --git a/SOURCES/0266-geo-rep-Fix-mount-broker-setup-issue.patch b/SOURCES/0266-geo-rep-Fix-mount-broker-setup-issue.patch new file mode 100644 index 0000000..bc4f84e --- /dev/null +++ b/SOURCES/0266-geo-rep-Fix-mount-broker-setup-issue.patch @@ -0,0 +1,63 @@ +From cb9d0fa4bd2664556f0564406037f9fb7fb781a6 Mon Sep 17 00:00:00 2001 +From: Kotresh HR +Date: Wed, 31 Jul 2019 15:40:55 +0530 +Subject: [PATCH 266/276] geo-rep: Fix mount broker setup issue + +Even the use builtin 'type' command as in patch [1] +causes issues if argument in question is not part of PATH +environment variable for that user. This patch fixes the +same by doing source /etc/profile. This was already being +used in another part of script. + +[1] https://review.gluster.org/23089 + +Backport of: + > Patch: https://review.gluster.org/23136 + > Change-Id: Iceb78835967ec6a4350983eec9af28398410c002 + > fixes: bz#1734738 + > Signed-off-by: Kotresh HR + +Change-Id: Iceb78835967ec6a4350983eec9af28398410c002 +BUG: 1734734 +Signed-off-by: Kotresh HR +Reviewed-on: https://code.engineering.redhat.com/gerrit/177867 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + geo-replication/src/gverify.sh | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +diff --git a/geo-replication/src/gverify.sh b/geo-replication/src/gverify.sh +index 692c1d6..f5f70d2 100755 +--- a/geo-replication/src/gverify.sh ++++ b/geo-replication/src/gverify.sh +@@ -180,6 +180,8 @@ function main() + > $log_file + + inet6=$7 ++ local cmd_line ++ local ver + + # Use FORCE_BLOCKER flag in the error message to differentiate + # between the errors which the force command should bypass +@@ -206,13 +208,14 @@ function main() + exit 1; + fi; + ++ cmd_line=$(cmd_slave); + if [[ -z "${GR_SSH_IDENTITY_KEY}" ]]; then +- ssh -p ${SSH_PORT} -oNumberOfPasswordPrompts=0 -oStrictHostKeyChecking=no $2@$3 "type -p gluster" ++ ver=$(ssh -p ${SSH_PORT} -oNumberOfPasswordPrompts=0 -oStrictHostKeyChecking=no $2@$3 bash -c "'$cmd_line'") + else +- ssh -p ${SSH_PORT} -i ${GR_SSH_IDENTITY_KEY} -oNumberOfPasswordPrompts=0 -oStrictHostKeyChecking=no $2@$3 "type -p gluster" ++ ver=$(ssh -p ${SSH_PORT} -i ${GR_SSH_IDENTITY_KEY} -oNumberOfPasswordPrompts=0 -oStrictHostKeyChecking=no $2@$3 bash -c "'$cmd_line'") + fi + +- if [ $? -ne 0 ]; then ++ if [ -z "$ver" ]; then + echo "FORCE_BLOCKER|gluster command not found on $3 for user $2." > $log_file + exit 1; + fi; +-- +1.8.3.1 + diff --git a/SOURCES/0267-posix-ctime-Fix-race-during-lookup-ctime-xattr-heal.patch b/SOURCES/0267-posix-ctime-Fix-race-during-lookup-ctime-xattr-heal.patch new file mode 100644 index 0000000..0be69e8 --- /dev/null +++ b/SOURCES/0267-posix-ctime-Fix-race-during-lookup-ctime-xattr-heal.patch @@ -0,0 +1,143 @@ +From cf13847a6341b7519ed0dc51e3b9ecf12444a3e4 Mon Sep 17 00:00:00 2001 +From: Kotresh HR +Date: Mon, 29 Jul 2019 16:22:10 +0530 +Subject: [PATCH 267/276] posix/ctime: Fix race during lookup ctime xattr heal + +Problem: +Ctime heals the ctime xattr ("trusted.glusterfs.mdata") in lookup +if it's not present. In a multi client scenario, there is a race +which results in updating the ctime xattr to older value. + +e.g. Let c1 and c2 be two clients and file1 be the file which +doesn't have the ctime xattr. Let the ctime of file1 be t1. +(from backend, ctime heals time attributes from backend when not present). + +Now following operations are done on mount +c1 -> ls -l /mnt/file1 | c2 -> ls -l /mnt/file1;echo "append" >> /mnt/file1; + +The race is that the both c1 and c2 didn't fetch the ctime xattr in lookup, +so both of them tries to heal ctime to time 't1'. If c2 wins the race and +appends the file before c1 heals it, it sets the time to 't1' and updates +it to 't2' (because of append). Now c1 proceeds to heal and sets it to 't1' +which is incorrect. + +Solution: +Compare the times during heal and only update the larger time. This is the +general approach used in ctime feature but got missed with healing legacy +files. + +> upstream patch : https://review.gluster.org/#/c/glusterfs/+/23131/ + +>fixes: bz#1734299 +>Change-Id: I930bda192c64c3d49d0aed431ce23d3bc57e51b7 +>Signed-off-by: Kotresh HR + +BUG: 1734305 +Change-Id: I930bda192c64c3d49d0aed431ce23d3bc57e51b7 +Signed-off-by: Kotresh HR +Reviewed-on: https://code.engineering.redhat.com/gerrit/177866 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/storage/posix/src/posix-metadata.c | 76 +++++++++++++++++++++++------- + 1 file changed, 58 insertions(+), 18 deletions(-) + +diff --git a/xlators/storage/posix/src/posix-metadata.c b/xlators/storage/posix/src/posix-metadata.c +index 647c0bb..57791fa 100644 +--- a/xlators/storage/posix/src/posix-metadata.c ++++ b/xlators/storage/posix/src/posix-metadata.c +@@ -344,33 +344,73 @@ posix_set_mdata_xattr_legacy_files(xlator_t *this, inode_t *inode, + struct mdata_iatt *mdata_iatt, int *op_errno) + { + posix_mdata_t *mdata = NULL; ++ posix_mdata_t imdata = { ++ 0, ++ }; + int ret = 0; ++ gf_boolean_t mdata_already_set = _gf_false; + + GF_VALIDATE_OR_GOTO("posix", this, out); + GF_VALIDATE_OR_GOTO(this->name, inode, out); + + LOCK(&inode->lock); + { +- mdata = GF_CALLOC(1, sizeof(posix_mdata_t), gf_posix_mt_mdata_attr); +- if (!mdata) { +- gf_msg(this->name, GF_LOG_ERROR, ENOMEM, P_MSG_NOMEM, +- "Could not allocate mdata. gfid: %s", +- uuid_utoa(inode->gfid)); +- ret = -1; +- *op_errno = ENOMEM; +- goto unlock; +- } ++ ret = __inode_ctx_get1(inode, this, (uint64_t *)&mdata); ++ if (ret == 0 && mdata) { ++ mdata_already_set = _gf_true; ++ } else if (ret == -1 || !mdata) { ++ mdata = GF_CALLOC(1, sizeof(posix_mdata_t), gf_posix_mt_mdata_attr); ++ if (!mdata) { ++ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, P_MSG_NOMEM, ++ "Could not allocate mdata. gfid: %s", ++ uuid_utoa(inode->gfid)); ++ ret = -1; ++ *op_errno = ENOMEM; ++ goto unlock; ++ } ++ ++ ret = posix_fetch_mdata_xattr(this, NULL, -1, inode, (void *)mdata, ++ op_errno); ++ if (ret == 0) { ++ /* Got mdata from disk. This is a race, another client ++ * has healed the xattr during lookup. So set it in inode ++ * ctx */ ++ __inode_ctx_set1(inode, this, (uint64_t *)&mdata); ++ mdata_already_set = _gf_true; ++ } else { ++ *op_errno = 0; ++ mdata->version = 1; ++ mdata->flags = 0; ++ mdata->ctime.tv_sec = mdata_iatt->ia_ctime; ++ mdata->ctime.tv_nsec = mdata_iatt->ia_ctime_nsec; ++ mdata->atime.tv_sec = mdata_iatt->ia_atime; ++ mdata->atime.tv_nsec = mdata_iatt->ia_atime_nsec; ++ mdata->mtime.tv_sec = mdata_iatt->ia_mtime; ++ mdata->mtime.tv_nsec = mdata_iatt->ia_mtime_nsec; + +- mdata->version = 1; +- mdata->flags = 0; +- mdata->ctime.tv_sec = mdata_iatt->ia_ctime; +- mdata->ctime.tv_nsec = mdata_iatt->ia_ctime_nsec; +- mdata->atime.tv_sec = mdata_iatt->ia_atime; +- mdata->atime.tv_nsec = mdata_iatt->ia_atime_nsec; +- mdata->mtime.tv_sec = mdata_iatt->ia_mtime; +- mdata->mtime.tv_nsec = mdata_iatt->ia_mtime_nsec; ++ __inode_ctx_set1(inode, this, (uint64_t *)&mdata); ++ } ++ } + +- __inode_ctx_set1(inode, this, (uint64_t *)&mdata); ++ if (mdata_already_set) { ++ /* Compare and update the larger time */ ++ imdata.ctime.tv_sec = mdata_iatt->ia_ctime; ++ imdata.ctime.tv_nsec = mdata_iatt->ia_ctime_nsec; ++ imdata.atime.tv_sec = mdata_iatt->ia_atime; ++ imdata.atime.tv_nsec = mdata_iatt->ia_atime_nsec; ++ imdata.mtime.tv_sec = mdata_iatt->ia_mtime; ++ imdata.mtime.tv_nsec = mdata_iatt->ia_mtime_nsec; ++ ++ if (posix_compare_timespec(&imdata.ctime, &mdata->ctime) > 0) { ++ mdata->ctime = imdata.ctime; ++ } ++ if (posix_compare_timespec(&imdata.mtime, &mdata->mtime) > 0) { ++ mdata->mtime = imdata.mtime; ++ } ++ if (posix_compare_timespec(&imdata.atime, &mdata->atime) > 0) { ++ mdata->atime = imdata.atime; ++ } ++ } + + ret = posix_store_mdata_xattr(this, NULL, -1, inode, mdata); + if (ret) { +-- +1.8.3.1 + diff --git a/SOURCES/0268-rpc-transport-have-default-listen-port.patch b/SOURCES/0268-rpc-transport-have-default-listen-port.patch new file mode 100644 index 0000000..176a907 --- /dev/null +++ b/SOURCES/0268-rpc-transport-have-default-listen-port.patch @@ -0,0 +1,46 @@ +From 872e344c0ab40c37b1872c32f5d5fddc097a1460 Mon Sep 17 00:00:00 2001 +From: Atin Mukherjee +Date: Mon, 5 Aug 2019 21:16:35 +0530 +Subject: [PATCH 268/276] rpc/transport: have default listen-port + +With release-6, we now can have transport.socket.listen-port parameter +configurable in glusterd.vol. However the default value wasn't defined +in the code and this breaks the backward compatibility where if one has +a modified glusterd.vol file, then post upgrade the same file will be +retained and the new changes introduced as part of the release wouldn't +be available in the glusterd.vol. So it's important that for each new +options introduced in glusterd.vol file backward compatibility is +guaranteed. + +> upstream patch : https://review.gluster.org/#/c/glusterfs/+/23160/ + +>Fixes: bz#1737676 +>Change-Id: I776b28bff786320cda299fe673d824024dc9803e +>Signed-off-by: Atin Mukherjee + +BUG: 1734534 +Change-Id: I776b28bff786320cda299fe673d824024dc9803e +Signed-off-by: Atin Mukherjee +Reviewed-on: https://code.engineering.redhat.com/gerrit/177862 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + rpc/rpc-transport/socket/src/name.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/rpc/rpc-transport/socket/src/name.c b/rpc/rpc-transport/socket/src/name.c +index ca14402..7f18cc4 100644 +--- a/rpc/rpc-transport/socket/src/name.c ++++ b/rpc/rpc-transport/socket/src/name.c +@@ -367,6 +367,8 @@ af_inet_server_get_local_sockaddr(rpc_transport_t *this, struct sockaddr *addr, + listen_port_data = dict_get(options, "transport.socket.listen-port"); + if (listen_port_data) { + listen_port = data_to_uint16(listen_port_data); ++ } else { ++ listen_port = GF_DEFAULT_SOCKET_LISTEN_PORT; + } + + listen_host_data = dict_get(options, "transport.socket.bind-address"); +-- +1.8.3.1 + diff --git a/SOURCES/0269-ec-fix-truncate-lock-to-cover-the-write-in-tuncate-c.patch b/SOURCES/0269-ec-fix-truncate-lock-to-cover-the-write-in-tuncate-c.patch new file mode 100644 index 0000000..f2cb5f2 --- /dev/null +++ b/SOURCES/0269-ec-fix-truncate-lock-to-cover-the-write-in-tuncate-c.patch @@ -0,0 +1,58 @@ +From 7c2d6e82d7d3430ad8a557b6ae726765f7e874e9 Mon Sep 17 00:00:00 2001 +From: Kinglong Mee +Date: Fri, 12 Apr 2019 11:35:55 +0800 +Subject: [PATCH 269/276] ec: fix truncate lock to cover the write in tuncate + clean + +ec_truncate_clean does writing under the lock granted for truncate, +but the lock is calculated by ec_adjust_offset_up, so that, +the write in ec_truncate_clean is out of lock. + +fixes: bz#1732770 +Upstream-patch: https://review.gluster.org/c/glusterfs/+/22552 +Change-Id: Idbe1fd48d26afe49c36b77db9f12e0907f5a4134 +Signed-off-by: Kinglong Mee +Reviewed-on: https://code.engineering.redhat.com/gerrit/177973 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/cluster/ec/src/ec-inode-write.c | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +diff --git a/xlators/cluster/ec/src/ec-inode-write.c b/xlators/cluster/ec/src/ec-inode-write.c +index a903664..ea55140 100644 +--- a/xlators/cluster/ec/src/ec-inode-write.c ++++ b/xlators/cluster/ec/src/ec-inode-write.c +@@ -1405,6 +1405,7 @@ int32_t + ec_manager_truncate(ec_fop_data_t *fop, int32_t state) + { + ec_cbk_data_t *cbk; ++ off_t offset_down; + + switch (state) { + case EC_STATE_INIT: +@@ -1416,16 +1417,19 @@ ec_manager_truncate(ec_fop_data_t *fop, int32_t state) + /* Fall through */ + + case EC_STATE_LOCK: ++ offset_down = fop->user_size; ++ ec_adjust_offset_down(fop->xl->private, &offset_down, _gf_true); ++ + if (fop->id == GF_FOP_TRUNCATE) { + ec_lock_prepare_inode( + fop, &fop->loc[0], + EC_UPDATE_DATA | EC_UPDATE_META | EC_QUERY_INFO, +- fop->offset, EC_RANGE_FULL); ++ offset_down, EC_RANGE_FULL); + } else { + ec_lock_prepare_fd( + fop, fop->fd, + EC_UPDATE_DATA | EC_UPDATE_META | EC_QUERY_INFO, +- fop->offset, EC_RANGE_FULL); ++ offset_down, EC_RANGE_FULL); + } + ec_lock(fop); + +-- +1.8.3.1 + diff --git a/SOURCES/0270-cluster-ec-inherit-healing-from-lock-when-it-has-inf.patch b/SOURCES/0270-cluster-ec-inherit-healing-from-lock-when-it-has-inf.patch new file mode 100644 index 0000000..5015e2c --- /dev/null +++ b/SOURCES/0270-cluster-ec-inherit-healing-from-lock-when-it-has-inf.patch @@ -0,0 +1,42 @@ +From 84d8a0ca5b521b9d87679ffebe420fe69869961d Mon Sep 17 00:00:00 2001 +From: Kinglong Mee +Date: Mon, 8 Jul 2019 21:13:28 +0800 +Subject: [PATCH 270/276] cluster/ec: inherit healing from lock when it has + info + +If lock has info, fop should inherit healing mask from it. +Otherwise, fop cannot inherit right healing when changed_flags is zero. + +Upstream-patch: https://review.gluster.org/c/glusterfs/+/23010 +Change-Id: Ife80c9169d2c555024347a20300b0583f7e8a87f +fixes: bz#1732792 +Signed-off-by: Kinglong Mee +Reviewed-on: https://code.engineering.redhat.com/gerrit/177974 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/cluster/ec/src/ec-common.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c +index e2e582f..db1ff5b 100644 +--- a/xlators/cluster/ec/src/ec-common.c ++++ b/xlators/cluster/ec/src/ec-common.c +@@ -1412,11 +1412,12 @@ ec_get_size_version(ec_lock_link_t *link) + set_dirty = ec_set_dirty_flag(link, ctx, dirty); + + /* If ec metadata has already been retrieved, do not try again. */ +- if (ctx->have_info && (!set_dirty)) { ++ if (ctx->have_info) { + if (ec_is_data_fop(fop->id)) { + fop->healing |= lock->healing; + } +- goto unlock; ++ if (!set_dirty) ++ goto unlock; + } + + /* Determine if there's something we need to retrieve for the current +-- +1.8.3.1 + diff --git a/SOURCES/0271-cluster-ec-fix-EIO-error-for-concurrent-writes-on-sp.patch b/SOURCES/0271-cluster-ec-fix-EIO-error-for-concurrent-writes-on-sp.patch new file mode 100644 index 0000000..26ec3e7 --- /dev/null +++ b/SOURCES/0271-cluster-ec-fix-EIO-error-for-concurrent-writes-on-sp.patch @@ -0,0 +1,116 @@ +From 52d71ad0e5c27808e7d8eea8a0920298837e408c Mon Sep 17 00:00:00 2001 +From: Xavi Hernandez +Date: Wed, 17 Jul 2019 14:50:22 +0200 +Subject: [PATCH 271/276] cluster/ec: fix EIO error for concurrent writes on + sparse files + +EC doesn't allow concurrent writes on overlapping areas, they are +serialized. However non-overlapping writes are serviced in parallel. +When a write is not aligned, EC first needs to read the entire chunk +from disk, apply the modified fragment and write it again. + +The problem appears on sparse files because a write to an offset +implicitly creates data on offsets below it (so, in some way, they +are overlapping). For example, if a file is empty and we read 10 bytes +from offset 10, read() will return 0 bytes. Now, if we write one byte +at offset 1M and retry the same read, the system call will return 10 +bytes (all containing 0's). + +So if we have two writes, the first one at offset 10 and the second one +at offset 1M, EC will send both in parallel because they do not overlap. +However, the first one will try to read missing data from the first chunk +(i.e. offsets 0 to 9) to recombine the entire chunk and do the final write. +This read will happen in parallel with the write to 1M. What could happen +is that half of the bricks process the write before the read, and the +half do the read before the write. Some bricks will return 10 bytes of +data while the otherw will return 0 bytes (because the file on the brick +has not been expanded yet). + +When EC tries to recombine the answers from the bricks, it can't, because +it needs more than half consistent answers to recover the data. So this +read fails with EIO error. This error is propagated to the parent write, +which is aborted and EIO is returned to the application. + +The issue happened because EC assumed that a write to a given offset +implies that offsets below it exist. + +This fix prevents the read of the chunk from bricks if the current size +of the file is smaller than the read chunk offset. This size is +correctly tracked, so this fixes the issue. + +Also modifying ec-stripe.t file for Test #13 within it. +In this patch, if a file size is less than the offset we are writing, we +fill zeros in head and tail and do not consider it strip cache miss. +That actually make sense as we know what data that part holds and there is +no need of reading it from bricks. + +Upstream-patch: https://review.gluster.org/c/glusterfs/+/23066 +Change-Id: Ic342e8c35c555b8534109e9314c9a0710b6225d6 +fixes: bz#1731448 +Signed-off-by: Xavi Hernandez +Reviewed-on: https://code.engineering.redhat.com/gerrit/177975 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + tests/basic/ec/ec-stripe.t | 2 +- + xlators/cluster/ec/src/ec-inode-write.c | 26 +++++++++++++++++--------- + 2 files changed, 18 insertions(+), 10 deletions(-) + +diff --git a/tests/basic/ec/ec-stripe.t b/tests/basic/ec/ec-stripe.t +index 1e940eb..98b9229 100644 +--- a/tests/basic/ec/ec-stripe.t ++++ b/tests/basic/ec/ec-stripe.t +@@ -202,7 +202,7 @@ TEST truncate -s 0 $B0/test_file + TEST truncate -s 0 $M0/test_file + TEST dd if=$B0/misc_file of=$B0/test_file bs=1022 count=5 oflag=seek_bytes,sync seek=400 conv=notrunc + TEST dd if=$B0/misc_file of=$M0/test_file bs=1022 count=5 oflag=seek_bytes,sync seek=400 conv=notrunc +-check_statedump_md5sum 4 5 ++check_statedump_md5sum 4 4 + clean_file_unmount + + ### 14 - Truncate to invalidate all but one the stripe in cache #### +diff --git a/xlators/cluster/ec/src/ec-inode-write.c b/xlators/cluster/ec/src/ec-inode-write.c +index ea55140..a45e6d6 100644 +--- a/xlators/cluster/ec/src/ec-inode-write.c ++++ b/xlators/cluster/ec/src/ec-inode-write.c +@@ -2013,20 +2013,28 @@ ec_writev_start(ec_fop_data_t *fop) + if (err != 0) { + goto failed_fd; + } ++ tail = fop->size - fop->user_size - fop->head; + if (fop->head > 0) { +- found_stripe = ec_get_and_merge_stripe(ec, fop, EC_STRIPE_HEAD); +- if (!found_stripe) { +- if (ec_make_internal_fop_xdata(&xdata)) { +- err = -ENOMEM; +- goto failed_xdata; ++ if (current > fop->offset) { ++ found_stripe = ec_get_and_merge_stripe(ec, fop, EC_STRIPE_HEAD); ++ if (!found_stripe) { ++ if (ec_make_internal_fop_xdata(&xdata)) { ++ err = -ENOMEM; ++ goto failed_xdata; ++ } ++ ec_readv(fop->frame, fop->xl, -1, EC_MINIMUM_MIN, ++ ec_writev_merge_head, NULL, fd, ec->stripe_size, ++ fop->offset, 0, xdata); ++ } ++ } else { ++ memset(fop->vector[0].iov_base, 0, fop->head); ++ memset(fop->vector[0].iov_base + fop->size - tail, 0, tail); ++ if (ec->stripe_cache && (fop->size <= ec->stripe_size)) { ++ ec_add_stripe_in_cache(ec, fop); + } +- ec_readv(fop->frame, fop->xl, -1, EC_MINIMUM_MIN, +- ec_writev_merge_head, NULL, fd, ec->stripe_size, +- fop->offset, 0, xdata); + } + } + +- tail = fop->size - fop->user_size - fop->head; + if ((tail > 0) && ((fop->head == 0) || (fop->size > ec->stripe_size))) { + /* Current locking scheme will make sure the 'current' below will + * never decrease while the fop is in progress, so the checks will +-- +1.8.3.1 + diff --git a/SOURCES/0272-cluster-ec-Always-read-from-good-mask.patch b/SOURCES/0272-cluster-ec-Always-read-from-good-mask.patch new file mode 100644 index 0000000..a6193e4 --- /dev/null +++ b/SOURCES/0272-cluster-ec-Always-read-from-good-mask.patch @@ -0,0 +1,90 @@ +From 220b95085847b5f6a9e5bee7a9519efe72600e6a Mon Sep 17 00:00:00 2001 +From: Pranith Kumar K +Date: Thu, 18 Jul 2019 11:25:31 +0530 +Subject: [PATCH 272/276] cluster/ec: Always read from good-mask + +There are cases where fop->mask may have fop->healing added +and readv shouldn't be wound on fop->healing. To avoid this +always wind readv to lock->good_mask + +Upstream-patch: https://review.gluster.org/c/glusterfs/+/23069 +fixes: bz#1730914 +Change-Id: I2226ef0229daf5ff315d51e868b980ee48060b87 +Signed-off-by: Pranith Kumar K +Reviewed-on: https://code.engineering.redhat.com/gerrit/177976 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/cluster/ec/src/ec-common.c | 3 +++ + xlators/cluster/ec/src/ec-inode-write.c | 27 ++++++++++++++++++++++----- + 2 files changed, 25 insertions(+), 5 deletions(-) + +diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c +index db1ff5b..28b31c9 100644 +--- a/xlators/cluster/ec/src/ec-common.c ++++ b/xlators/cluster/ec/src/ec-common.c +@@ -654,6 +654,9 @@ ec_child_select(ec_fop_data_t *fop) + * unlock should go on all subvols where lock is performed*/ + if (fop->parent && !ec_internal_op(fop)) { + fop->mask &= (fop->parent->mask & ~fop->parent->healing); ++ if (ec_is_data_fop(fop->id)) { ++ fop->healing |= fop->parent->healing; ++ } + } + + if ((fop->mask & ~ec->xl_up) != 0) { +diff --git a/xlators/cluster/ec/src/ec-inode-write.c b/xlators/cluster/ec/src/ec-inode-write.c +index a45e6d6..4f35b6d 100644 +--- a/xlators/cluster/ec/src/ec-inode-write.c ++++ b/xlators/cluster/ec/src/ec-inode-write.c +@@ -1977,6 +1977,20 @@ ec_get_and_merge_stripe(ec_t *ec, ec_fop_data_t *fop, ec_stripe_part_t which) + return found; + } + ++static uintptr_t ++ec_get_lock_good_mask(inode_t *inode, xlator_t *xl) ++{ ++ ec_lock_t *lock = NULL; ++ ec_inode_t *ictx = NULL; ++ LOCK(&inode->lock); ++ { ++ ictx = __ec_inode_get(inode, xl); ++ lock = ictx->inode_lock; ++ } ++ UNLOCK(&inode->lock); ++ return lock->good_mask; ++} ++ + void + ec_writev_start(ec_fop_data_t *fop) + { +@@ -2022,9 +2036,10 @@ ec_writev_start(ec_fop_data_t *fop) + err = -ENOMEM; + goto failed_xdata; + } +- ec_readv(fop->frame, fop->xl, -1, EC_MINIMUM_MIN, +- ec_writev_merge_head, NULL, fd, ec->stripe_size, +- fop->offset, 0, xdata); ++ ec_readv(fop->frame, fop->xl, ++ ec_get_lock_good_mask(fop->fd->inode, fop->xl), ++ EC_MINIMUM_MIN, ec_writev_merge_head, NULL, fd, ++ ec->stripe_size, fop->offset, 0, xdata); + } + } else { + memset(fop->vector[0].iov_base, 0, fop->head); +@@ -2047,8 +2062,10 @@ ec_writev_start(ec_fop_data_t *fop) + err = -ENOMEM; + goto failed_xdata; + } +- ec_readv(fop->frame, fop->xl, -1, EC_MINIMUM_MIN, +- ec_writev_merge_tail, NULL, fd, ec->stripe_size, ++ ec_readv(fop->frame, fop->xl, ++ ec_get_lock_good_mask(fop->fd->inode, fop->xl), ++ EC_MINIMUM_MIN, ec_writev_merge_tail, NULL, fd, ++ ec->stripe_size, + fop->offset + fop->size - ec->stripe_size, 0, xdata); + } + } else { +-- +1.8.3.1 + diff --git a/SOURCES/0273-cluster-ec-Fix-reopen-flags-to-avoid-misbehavior.patch b/SOURCES/0273-cluster-ec-Fix-reopen-flags-to-avoid-misbehavior.patch new file mode 100644 index 0000000..5c01cb5 --- /dev/null +++ b/SOURCES/0273-cluster-ec-Fix-reopen-flags-to-avoid-misbehavior.patch @@ -0,0 +1,86 @@ +From d5f931b334ac7abccaf30d277ce3ca9cfae0da5b Mon Sep 17 00:00:00 2001 +From: Pranith Kumar K +Date: Mon, 29 Jul 2019 14:08:37 +0530 +Subject: [PATCH 273/276] cluster/ec: Fix reopen flags to avoid misbehavior + +Problem: +when a file needs to be re-opened O_APPEND and O_EXCL +flags are not filtered in EC. + +- O_APPEND should be filtered because EC doesn't send O_APPEND below EC for +open to make sure writes happen on the individual fragments instead of at the +end of the file. + +- O_EXCL should be filtered because shd could have created the file so even +when file exists open should succeed + +- O_CREAT should be filtered because open happens with gfid as parameter. So +open fop will create just the gfid which will lead to problems. + +Fix: +Filter out these two flags in reopen. + +Upstream-patch:https://review.gluster.org/#/c/glusterfs/+/23121/ +Change-Id: Ia280470fcb5188a09caa07bf665a2a94bce23bc4 +fixes: bz#1735514 +Signed-off-by: Pranith Kumar K +Reviewed-on: https://code.engineering.redhat.com/gerrit/177977 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/cluster/ec/src/ec-common.c | 4 +++- + xlators/cluster/ec/src/ec-inode-write.c | 7 +++++-- + 2 files changed, 8 insertions(+), 3 deletions(-) + +diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c +index 28b31c9..5fb4610 100644 +--- a/xlators/cluster/ec/src/ec-common.c ++++ b/xlators/cluster/ec/src/ec-common.c +@@ -101,6 +101,7 @@ ec_fix_open(ec_fop_data_t *fop, uintptr_t mask) + { + uintptr_t need_open = 0; + int ret = 0; ++ int32_t flags = 0; + loc_t loc = { + 0, + }; +@@ -121,6 +122,7 @@ ec_fix_open(ec_fop_data_t *fop, uintptr_t mask) + goto out; + } + ++ flags = fop->fd->flags & (~(O_TRUNC | O_APPEND | O_CREAT | O_EXCL)); + if (IA_IFDIR == fop->fd->inode->ia_type) { + ec_opendir(fop->frame, fop->xl, need_open, + EC_MINIMUM_ONE | EC_FOP_NO_PROPAGATE_ERROR, NULL, NULL, +@@ -128,7 +130,7 @@ ec_fix_open(ec_fop_data_t *fop, uintptr_t mask) + } else { + ec_open(fop->frame, fop->xl, need_open, + EC_MINIMUM_ONE | EC_FOP_NO_PROPAGATE_ERROR, NULL, NULL, &loc, +- fop->fd->flags & (~O_TRUNC), fop->fd, NULL); ++ flags, fop->fd, NULL); + } + + out: +diff --git a/xlators/cluster/ec/src/ec-inode-write.c b/xlators/cluster/ec/src/ec-inode-write.c +index 4f35b6d..2f28e11 100644 +--- a/xlators/cluster/ec/src/ec-inode-write.c ++++ b/xlators/cluster/ec/src/ec-inode-write.c +@@ -1985,10 +1985,13 @@ ec_get_lock_good_mask(inode_t *inode, xlator_t *xl) + LOCK(&inode->lock); + { + ictx = __ec_inode_get(inode, xl); +- lock = ictx->inode_lock; ++ if (ictx) ++ lock = ictx->inode_lock; + } + UNLOCK(&inode->lock); +- return lock->good_mask; ++ if (lock) ++ return lock->good_mask; ++ return 0; + } + + void +-- +1.8.3.1 + diff --git a/SOURCES/0274-cluster-ec-Update-lock-good_mask-on-parent-fop-failu.patch b/SOURCES/0274-cluster-ec-Update-lock-good_mask-on-parent-fop-failu.patch new file mode 100644 index 0000000..0307e25 --- /dev/null +++ b/SOURCES/0274-cluster-ec-Update-lock-good_mask-on-parent-fop-failu.patch @@ -0,0 +1,49 @@ +From 4c2aa7adef3df500043dd45614d20c9987e6c0d9 Mon Sep 17 00:00:00 2001 +From: Pranith Kumar K +Date: Fri, 2 Aug 2019 12:05:09 +0530 +Subject: [PATCH 274/276] cluster/ec: Update lock->good_mask on parent fop + failure + +When discard/truncate performs write fop, it should do so +after updating lock->good_mask to make sure readv happens +on the correct mask + +Upstream-patch: https://review.gluster.org/c/glusterfs/+/23147 +fixes: bz#1730914 +Change-Id: Idfef0bbcca8860d53707094722e6ba3f81c583b7 +Signed-off-by: Pranith Kumar K +Reviewed-on: https://code.engineering.redhat.com/gerrit/177978 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/cluster/ec/src/ec-common.h | 2 ++ + xlators/cluster/ec/src/ec-inode-write.c | 2 ++ + 2 files changed, 4 insertions(+) + +diff --git a/xlators/cluster/ec/src/ec-common.h b/xlators/cluster/ec/src/ec-common.h +index e948342..3c69471 100644 +--- a/xlators/cluster/ec/src/ec-common.h ++++ b/xlators/cluster/ec/src/ec-common.h +@@ -204,4 +204,6 @@ void + ec_reset_entry_healing(ec_fop_data_t *fop); + char * + ec_msg_str(ec_fop_data_t *fop); ++void ++ec_lock_update_good(ec_lock_t *lock, ec_fop_data_t *fop); + #endif /* __EC_COMMON_H__ */ +diff --git a/xlators/cluster/ec/src/ec-inode-write.c b/xlators/cluster/ec/src/ec-inode-write.c +index 2f28e11..8bfa3b4 100644 +--- a/xlators/cluster/ec/src/ec-inode-write.c ++++ b/xlators/cluster/ec/src/ec-inode-write.c +@@ -89,6 +89,8 @@ ec_update_write(ec_fop_data_t *fop, uintptr_t mask, off_t offset, uint64_t size) + goto out; + } + ++ if (fop->locks[0].lock) ++ ec_lock_update_good(fop->locks[0].lock, fop); + vector.iov_base = iobuf->ptr; + vector.iov_len = size; + memset(vector.iov_base, 0, vector.iov_len); +-- +1.8.3.1 + diff --git a/SOURCES/0275-cluster-ec-Create-heal-task-with-heal-process-id.patch b/SOURCES/0275-cluster-ec-Create-heal-task-with-heal-process-id.patch new file mode 100644 index 0000000..ba3d85a --- /dev/null +++ b/SOURCES/0275-cluster-ec-Create-heal-task-with-heal-process-id.patch @@ -0,0 +1,74 @@ +From 0864f1ad12394a5748d92aa0ed5b455135426bc3 Mon Sep 17 00:00:00 2001 +From: Ashish Pandey +Date: Tue, 30 Jul 2019 10:32:39 +0530 +Subject: [PATCH 275/276] cluster/ec: Create heal task with heal process id + +Problem: +ec_data_undo_pending calls syncop_fxattrop->SYNCOP without +a frame. In this case SYNCOP gets the frame of the task. +However, when we create a synctask for heal we provide +frame as NULL. +Now, if the read-only feature is ON, it will receive the +process ID of the shd as 0 and will consider that it as +not an internal process. This will prevent healing of a +file with "Read-only file system" error message log. + +Solution: +While launching heal, create a synctask using frame and set +process id of the SHD which is -6. + +> upstream patch : https://review.gluster.org/#/c/glusterfs/+/23129/ + +>Change-Id: I37195399c85de322cbcac75633888922c4e3db4a +>Fixes: bz#1734252 + +BUG: 1733531 +Change-Id: I37195399c85de322cbcac75633888922c4e3db4a +Signed-off-by: Ashish Pandey +Reviewed-on: https://code.engineering.redhat.com/gerrit/178038 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + xlators/cluster/ec/src/ec-heal.c | 20 +++++++++++++++++++- + 1 file changed, 19 insertions(+), 1 deletion(-) + +diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c +index 2fa1f11..0f0f398 100644 +--- a/xlators/cluster/ec/src/ec-heal.c ++++ b/xlators/cluster/ec/src/ec-heal.c +@@ -2647,13 +2647,31 @@ void + ec_launch_heal(ec_t *ec, ec_fop_data_t *fop) + { + int ret = 0; ++ call_frame_t *frame = NULL; ++ ++ frame = create_frame(ec->xl, ec->xl->ctx->pool); ++ if (!frame) { ++ goto out; ++ ret = -1; ++ } ++ ++ ec_owner_set(frame, frame->root); ++ /*Do heal as root*/ ++ frame->root->uid = 0; ++ frame->root->gid = 0; ++ /*Mark the fops as internal*/ ++ frame->root->pid = GF_CLIENT_PID_SELF_HEALD; + + ret = synctask_new(ec->xl->ctx->env, ec_synctask_heal_wrap, ec_heal_done, +- NULL, fop); ++ frame, fop); ++out: + if (ret < 0) { + ec_fop_set_error(fop, ENOMEM); + ec_heal_fail(ec, fop); + } ++ ++ if (frame) ++ STACK_DESTROY(frame->root); + } + + void +-- +1.8.3.1 + diff --git a/SOURCES/0276-features-utime-always-update-ctime-at-setattr.patch b/SOURCES/0276-features-utime-always-update-ctime-at-setattr.patch new file mode 100644 index 0000000..f19663b --- /dev/null +++ b/SOURCES/0276-features-utime-always-update-ctime-at-setattr.patch @@ -0,0 +1,74 @@ +From 7f5658a299081cec4c77d3cca4e70099cd59b1fc Mon Sep 17 00:00:00 2001 +From: Kinglong Mee +Date: Mon, 5 Aug 2019 11:08:02 +0800 +Subject: [PATCH 276/276] features/utime: always update ctime at setattr + +For the nfs EXCLUSIVE mode create may sets a later time +to mtime (at verifier), it should not set to ctime for +storage.ctime does not allowed set ctime to a earlier time. + + /* Earlier, mdata was updated only if the existing time is less + * than the time to be updated. This would fail the scenarios + * where mtime can be set to any time using the syscall. Hence + * just updating without comparison. But the ctime is not + * allowed to changed to older date. + */ + +According to kernel's setattr, always set ctime at setattr, +and doesnot set ctime from mtime at storage.ctime. + +>Change-Id: I5cfde6cb7f8939da9617506e3dc80bd840e0d749 +>fixes: bz#1737288 +>Signed-off-by: Kinglong Mee +Upstream Patch: https://review.gluster.org/#/c/glusterfs/+/23154/ + +BUG: 1737705 +Change-Id: I5cfde6cb7f8939da9617506e3dc80bd840e0d749 +Signed-off-by: Sunil Kumar Acharya +Reviewed-on: https://code.engineering.redhat.com/gerrit/178225 +Tested-by: RHGS Build Bot +--- + xlators/features/utime/src/utime-gen-fops-c.py | 13 +------------ + xlators/storage/posix/src/posix-metadata.c | 2 +- + 2 files changed, 2 insertions(+), 13 deletions(-) + +diff --git a/xlators/features/utime/src/utime-gen-fops-c.py b/xlators/features/utime/src/utime-gen-fops-c.py +index 8730a51..a8637ff 100755 +--- a/xlators/features/utime/src/utime-gen-fops-c.py ++++ b/xlators/features/utime/src/utime-gen-fops-c.py +@@ -82,18 +82,7 @@ gf_utime_@NAME@ (call_frame_t *frame, xlator_t *this, + @LONG_ARGS@) + { + gl_timespec_get(&frame->root->ctime); +- +- if (!valid) { +- frame->root->flags |= MDATA_CTIME; +- } +- +- if (valid & (GF_SET_ATTR_UID | GF_SET_ATTR_GID)) { +- frame->root->flags |= MDATA_CTIME; +- } +- +- if (valid & GF_SET_ATTR_MODE) { +- frame->root->flags |= MDATA_CTIME; +- } ++ frame->root->flags |= MDATA_CTIME; + + STACK_WIND (frame, gf_utime_@NAME@_cbk, FIRST_CHILD(this), + FIRST_CHILD(this)->fops->@NAME@, @SHORT_ARGS@); +diff --git a/xlators/storage/posix/src/posix-metadata.c b/xlators/storage/posix/src/posix-metadata.c +index 57791fa..5cbdc98 100644 +--- a/xlators/storage/posix/src/posix-metadata.c ++++ b/xlators/storage/posix/src/posix-metadata.c +@@ -631,7 +631,7 @@ posix_update_utime_in_mdata(xlator_t *this, const char *real_path, int fd, + tv.tv_sec = stbuf->ia_mtime; + SET_TIMESPEC_NSEC_OR_TIMEVAL_USEC(tv, stbuf->ia_mtime_nsec); + +- flag.ctime = 1; ++ flag.ctime = 0; + flag.mtime = 1; + flag.atime = 0; + +-- +1.8.3.1 + diff --git a/SOURCES/0277-geo-rep-Fix-Config-Get-Race.patch b/SOURCES/0277-geo-rep-Fix-Config-Get-Race.patch new file mode 100644 index 0000000..45dada1 --- /dev/null +++ b/SOURCES/0277-geo-rep-Fix-Config-Get-Race.patch @@ -0,0 +1,109 @@ +From f40570f2f784dc61edb061a4931dcfc16bf51e7e Mon Sep 17 00:00:00 2001 +From: Aravinda VK +Date: Mon, 5 Aug 2019 19:00:21 +0530 +Subject: [PATCH 277/284] geo-rep: Fix Config Get Race + +When two threads(sync jobs) in Geo-rep worker calls `gconf.get` and +`gconf.getr`(realtime) at the sametime, `getr` resets the conf object +and other one gets None. Thread Lock is introduced to fix the issue. + +``` + File "/usr/libexec/glusterfs/python/syncdaemon/syncdutils.py", + line 368, in twrap + tf(*aargs) + File "/usr/libexec/glusterfs/python/syncdaemon/master.py", line 1987, + in syncjob + po = self.sync_engine(pb, self.log_err) + File "/usr/libexec/glusterfs/python/syncdaemon/resource.py", + line 1444, in rsync + rconf.ssh_ctl_args + \ +AttributeError: 'NoneType' object has no attribute 'split' +``` + +Backport of: + > Patch: https://review.gluster.org/#/c/glusterfs/+/23158/ + > Change-Id: I9c245e5c36338265354e158f5baa32b119eb2da5 + > Updates: bz#1737484 + > Signed-off-by: Aravinda VK + +Change-Id: I9c245e5c36338265354e158f5baa32b119eb2da5 +BUG: 1729915 +Signed-off-by: Kotresh HR +Reviewed-on: https://code.engineering.redhat.com/gerrit/178960 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + geo-replication/syncdaemon/gsyncdconfig.py | 27 +++++++++++++++++++++------ + 1 file changed, 21 insertions(+), 6 deletions(-) + +diff --git a/geo-replication/syncdaemon/gsyncdconfig.py b/geo-replication/syncdaemon/gsyncdconfig.py +index 1fc451f..38f3594 100644 +--- a/geo-replication/syncdaemon/gsyncdconfig.py ++++ b/geo-replication/syncdaemon/gsyncdconfig.py +@@ -17,6 +17,7 @@ import os + import shutil + from string import Template + from datetime import datetime ++from threading import Lock + + + # Global object which can be used in other modules +@@ -35,6 +36,7 @@ class GconfInvalidValue(Exception): + class Gconf(object): + def __init__(self, default_conf_file, custom_conf_file=None, + args={}, extra_tmpl_args={}, override_from_args=False): ++ self.lock = Lock() + self.default_conf_file = default_conf_file + self.custom_conf_file = custom_conf_file + self.tmp_conf_file = None +@@ -163,6 +165,11 @@ class Gconf(object): + if value is not None and not self._is_valid_value(name, value): + raise GconfInvalidValue() + ++ ++ def _load_with_lock(self): ++ with self.lock: ++ self._load() ++ + def _load(self): + self.gconf = {} + self.template_conf = [] +@@ -230,12 +237,19 @@ class Gconf(object): + self._tmpl_substitute() + self._do_typecast() + +- def reload(self): ++ def reload(self, with_lock=True): + if self._is_config_changed(): +- self._load() ++ if with_lock: ++ self._load_with_lock() ++ else: ++ self._load() + +- def get(self, name, default_value=None): +- return self.gconf.get(name, default_value) ++ def get(self, name, default_value=None, with_lock=True): ++ if with_lock: ++ with self.lock: ++ return self.gconf.get(name, default_value) ++ else: ++ return self.gconf.get(name, default_value) + + def getall(self, show_defaults=False, show_non_configurable=False): + cnf = {} +@@ -276,8 +290,9 @@ class Gconf(object): + return cnf + + def getr(self, name, default_value=None): +- self.reload() +- return self.get(name, default_value) ++ with self.lock: ++ self.reload(with_lock=False) ++ return self.get(name, default_value, with_lock=False) + + def get_help(self, name=None): + pass +-- +1.8.3.1 + diff --git a/SOURCES/0278-geo-rep-Fix-worker-connection-issue.patch b/SOURCES/0278-geo-rep-Fix-worker-connection-issue.patch new file mode 100644 index 0000000..00cb48f --- /dev/null +++ b/SOURCES/0278-geo-rep-Fix-worker-connection-issue.patch @@ -0,0 +1,45 @@ +From 924a25990948c9d76001cf4134fc5a2fcbf5c02c Mon Sep 17 00:00:00 2001 +From: Kotresh HR +Date: Fri, 16 Aug 2019 15:38:49 +0530 +Subject: [PATCH 278/284] geo-rep: Fix worker connection issue + +All the workers connects to primary slave node. It should +connect to available slave nodes in round robin fashion +and choose different slave node if the corresponding slave +node is down. This patch fixes the same. + +Thanks Aravinda for the help in root causing this. + +Backport of: + > Patch: https://review.gluster.org/23247/ + > Change-Id: I9f8e7744f4adb8a24833cf173681d109710f98cb + > Signed-off-by: Kotresh HR + > Updates: bz#1737484 + +Change-Id: I9f8e7744f4adb8a24833cf173681d109710f98cb +Signed-off-by: Kotresh HR +BUG: 1729915 +Reviewed-on: https://code.engineering.redhat.com/gerrit/178961 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + geo-replication/syncdaemon/subcmds.py | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/geo-replication/syncdaemon/subcmds.py b/geo-replication/syncdaemon/subcmds.py +index 4ece7e0..8de7db2 100644 +--- a/geo-replication/syncdaemon/subcmds.py ++++ b/geo-replication/syncdaemon/subcmds.py +@@ -73,7 +73,8 @@ def subcmd_worker(args): + Popen.init_errhandler() + fcntl.fcntl(args.feedback_fd, fcntl.F_SETFD, fcntl.FD_CLOEXEC) + local = GLUSTER("localhost", args.master) +- slavehost, slavevol = args.slave.split("::") ++ slavevol = args.slave.split("::")[-1] ++ slavehost = args.resource_remote + remote = SSH(slavehost, slavevol) + remote.connect_remote() + local.connect() +-- +1.8.3.1 + diff --git a/SOURCES/0279-posix-In-brick_mux-brick-is-crashed-while-start-stop.patch b/SOURCES/0279-posix-In-brick_mux-brick-is-crashed-while-start-stop.patch new file mode 100644 index 0000000..3bbd56c --- /dev/null +++ b/SOURCES/0279-posix-In-brick_mux-brick-is-crashed-while-start-stop.patch @@ -0,0 +1,253 @@ +From bf24623765817ede84ea47f3265f5e6c2ae17ee7 Mon Sep 17 00:00:00 2001 +From: Mohit Agrawal +Date: Tue, 16 Jul 2019 20:36:57 +0530 +Subject: [PATCH 279/284] posix: In brick_mux brick is crashed while start/stop + volume in loop + +Problem: In brick_mux environment sometime brick is crashed while + volume stop/start in a loop.Brick is crashed in janitor task + at the time of accessing priv.If posix priv is cleaned up before + call janitor task then janitor task is crashed. + +Solution: To avoid the crash in brick_mux environment introduce a new + flag janitor_task_stop in posix_private and before send CHILD_DOWN event + wait for update the flag by janitor_task_done + +> Change-Id: Id9fa5d183a463b2b682774ab5cb9868357d139a4 +> fixes: bz#1730409 +> Signed-off-by: Mohit Agrawal +> (Cherry picked from commit f138d3fa2237e7fa940ecf17153fd700350c4138) +> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/23060/) + +Change-Id: Id9fa5d183a463b2b682774ab5cb9868357d139a4 +fixex: bz#1729971 +Signed-off-by: Mohit Agrawal +Reviewed-on: https://code.engineering.redhat.com/gerrit/178934 +Tested-by: Mohit Agrawal +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + libglusterfs/src/glusterfs/xlator.h | 3 +++ + xlators/mgmt/glusterd/src/glusterd-utils.c | 5 ++-- + xlators/protocol/server/src/server.c | 6 ++++- + xlators/storage/posix/src/posix-common.c | 40 +++++++++++++++++++++++++++++- + xlators/storage/posix/src/posix-helpers.c | 16 ++++++++++++ + xlators/storage/posix/src/posix.h | 3 +++ + 6 files changed, 69 insertions(+), 4 deletions(-) + +diff --git a/libglusterfs/src/glusterfs/xlator.h b/libglusterfs/src/glusterfs/xlator.h +index b78daad..da551e9 100644 +--- a/libglusterfs/src/glusterfs/xlator.h ++++ b/libglusterfs/src/glusterfs/xlator.h +@@ -861,6 +861,9 @@ struct _xlator { + + /* Flag to notify got CHILD_DOWN event for detach brick */ + uint32_t notify_down; ++ ++ /* Flag to avoid throw duplicate PARENT_DOWN event */ ++ uint32_t parent_down; + }; + + /* This would be the only structure which needs to be exported by +diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c +index 2aa975b..812c698 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-utils.c ++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c +@@ -4082,8 +4082,9 @@ out: + if (msg[0]) { + gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_BRICK_IMPORT_FAIL, "%s", + msg); +- gf_event(EVENT_IMPORT_BRICK_FAILED, "peer=%s;brick=%s", +- new_brickinfo->hostname, new_brickinfo->path); ++ if (new_brickinfo) ++ gf_event(EVENT_IMPORT_BRICK_FAILED, "peer=%s;brick=%s", ++ new_brickinfo->hostname, new_brickinfo->path); + } + gf_msg_debug("glusterd", 0, "Returning with %d", ret); + return ret; +diff --git a/xlators/protocol/server/src/server.c b/xlators/protocol/server/src/server.c +index 6ae63ba..a5f09fe 100644 +--- a/xlators/protocol/server/src/server.c ++++ b/xlators/protocol/server/src/server.c +@@ -580,6 +580,7 @@ server_graph_janitor_threads(void *data) + gf_boolean_t victim_found = _gf_false; + xlator_list_t **trav_p = NULL; + xlator_t *top = NULL; ++ uint32_t parent_down = 0; + + GF_ASSERT(data); + +@@ -598,7 +599,10 @@ server_graph_janitor_threads(void *data) + victim = (*trav_p)->xlator; + if (victim->cleanup_starting && + strcmp(victim->name, victim_name) == 0) { +- victim_found = _gf_true; ++ parent_down = victim->parent_down; ++ victim->parent_down = 1; ++ if (!parent_down) ++ victim_found = _gf_true; + break; + } + } +diff --git a/xlators/storage/posix/src/posix-common.c b/xlators/storage/posix/src/posix-common.c +index d738692..69857d9 100644 +--- a/xlators/storage/posix/src/posix-common.c ++++ b/xlators/storage/posix/src/posix-common.c +@@ -146,10 +146,15 @@ int32_t + posix_notify(xlator_t *this, int32_t event, void *data, ...) + { + xlator_t *victim = data; ++ struct posix_private *priv = this->private; ++ int ret = 0; ++ struct timespec sleep_till = { ++ 0, ++ }; + + switch (event) { + case GF_EVENT_PARENT_UP: { +- /* Tell the parent that posix xlator is up */ ++ /* the parent that posix xlator is up */ + default_notify(this, GF_EVENT_CHILD_UP, data); + } break; + +@@ -158,6 +163,31 @@ posix_notify(xlator_t *this, int32_t event, void *data, ...) + break; + gf_log(this->name, GF_LOG_INFO, "Sending CHILD_DOWN for brick %s", + victim->name); ++ ++ if (priv->janitor) { ++ pthread_mutex_lock(&priv->janitor_mutex); ++ { ++ priv->janitor_task_stop = _gf_true; ++ ret = gf_tw_del_timer(this->ctx->tw->timer_wheel, ++ priv->janitor); ++ if (!ret) { ++ clock_gettime(CLOCK_REALTIME, &sleep_till); ++ sleep_till.tv_sec += 1; ++ /* Wait to set janitor_task flag to _gf_false by ++ * janitor_task_done */ ++ while (priv->janitor_task_stop) { ++ (void)pthread_cond_timedwait(&priv->janitor_cond, ++ &priv->janitor_mutex, ++ &sleep_till); ++ clock_gettime(CLOCK_REALTIME, &sleep_till); ++ sleep_till.tv_sec += 1; ++ } ++ } ++ } ++ pthread_mutex_unlock(&priv->janitor_mutex); ++ GF_FREE(priv->janitor); ++ } ++ priv->janitor = NULL; + default_notify(this->parents->xlator, GF_EVENT_CHILD_DOWN, data); + } break; + default: +@@ -1008,6 +1038,8 @@ posix_init(xlator_t *this) + + pthread_mutex_init(&_private->fsync_mutex, NULL); + pthread_cond_init(&_private->fsync_cond, NULL); ++ pthread_mutex_init(&_private->janitor_mutex, NULL); ++ pthread_cond_init(&_private->janitor_cond, NULL); + INIT_LIST_HEAD(&_private->fsyncs); + ret = posix_spawn_ctx_janitor_thread(this); + if (ret) +@@ -1128,6 +1160,7 @@ posix_fini(xlator_t *this) + (void)gf_thread_cleanup_xint(priv->disk_space_check); + priv->disk_space_check = 0; + } ++ + if (priv->janitor) { + /*TODO: Make sure the synctask is also complete */ + ret = gf_tw_del_timer(this->ctx->tw->timer_wheel, priv->janitor); +@@ -1135,8 +1168,10 @@ posix_fini(xlator_t *this) + gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_TIMER_DELETE_FAILED, + "Failed to delete janitor timer"); + } ++ GF_FREE(priv->janitor); + priv->janitor = NULL; + } ++ + if (priv->fsyncer) { + (void)gf_thread_cleanup_xint(priv->fsyncer); + priv->fsyncer = 0; +@@ -1148,6 +1183,9 @@ posix_fini(xlator_t *this) + GF_FREE(priv->base_path); + LOCK_DESTROY(&priv->lock); + pthread_mutex_destroy(&priv->fsync_mutex); ++ pthread_cond_destroy(&priv->fsync_cond); ++ pthread_mutex_destroy(&priv->janitor_mutex); ++ pthread_cond_destroy(&priv->janitor_cond); + GF_FREE(priv->hostname); + GF_FREE(priv->trash_path); + GF_FREE(priv); +diff --git a/xlators/storage/posix/src/posix-helpers.c b/xlators/storage/posix/src/posix-helpers.c +index 07169b5..ef5bfd5 100644 +--- a/xlators/storage/posix/src/posix-helpers.c ++++ b/xlators/storage/posix/src/posix-helpers.c +@@ -1432,12 +1432,24 @@ posix_janitor_task_done(int ret, call_frame_t *frame, void *data) + this = data; + priv = this->private; + ++ pthread_mutex_lock(&priv->janitor_mutex); ++ { ++ if (priv->janitor_task_stop) { ++ priv->janitor_task_stop = _gf_false; ++ pthread_cond_signal(&priv->janitor_cond); ++ pthread_mutex_unlock(&priv->janitor_mutex); ++ goto out; ++ } ++ } ++ pthread_mutex_unlock(&priv->janitor_mutex); ++ + LOCK(&priv->lock); + { + __posix_janitor_timer_start(this); + } + UNLOCK(&priv->lock); + ++out: + return 0; + } + +@@ -1456,6 +1468,9 @@ posix_janitor_task(void *data) + old_this = THIS; + THIS = this; + ++ if (!priv) ++ goto out; ++ + time(&now); + if ((now - priv->last_landfill_check) > priv->janitor_sleep_duration) { + if (priv->disable_landfill_purge) { +@@ -1475,6 +1490,7 @@ posix_janitor_task(void *data) + + THIS = old_this; + ++out: + return 0; + } + +diff --git a/xlators/storage/posix/src/posix.h b/xlators/storage/posix/src/posix.h +index b0935a7..64288a7 100644 +--- a/xlators/storage/posix/src/posix.h ++++ b/xlators/storage/posix/src/posix.h +@@ -203,6 +203,8 @@ struct posix_private { + struct list_head fsyncs; + pthread_mutex_t fsync_mutex; + pthread_cond_t fsync_cond; ++ pthread_mutex_t janitor_mutex; ++ pthread_cond_t janitor_cond; + int fsync_queue_count; + + enum { +@@ -257,6 +259,7 @@ struct posix_private { + + gf_boolean_t fips_mode_rchecksum; + gf_boolean_t ctime; ++ gf_boolean_t janitor_task_stop; + }; + + typedef struct { +-- +1.8.3.1 + diff --git a/SOURCES/0280-performance-md-cache-Do-not-skip-caching-of-null-cha.patch b/SOURCES/0280-performance-md-cache-Do-not-skip-caching-of-null-cha.patch new file mode 100644 index 0000000..38b4d48 --- /dev/null +++ b/SOURCES/0280-performance-md-cache-Do-not-skip-caching-of-null-cha.patch @@ -0,0 +1,153 @@ +From 2d7d9165c6a8619eef553859b4b7136b8e9ccb55 Mon Sep 17 00:00:00 2001 +From: Anoop C S +Date: Sat, 10 Aug 2019 10:30:26 +0530 +Subject: [PATCH 280/284] performance/md-cache: Do not skip caching of null + character xattr values + +Null character string is a valid xattr value in file system. But for +those xattrs processed by md-cache, it does not update its entries if +value is null('\0'). This results in ENODATA when those xattrs are +queried afterwards via getxattr() causing failures in basic operations +like create, copy etc in a specially configured Samba setup for Mac OS +clients. + +On the other side snapview-server is internally setting empty string("") +as value for xattrs received as part of listxattr() and are not intended +to be cached. Therefore we try to maintain that behaviour using an +additional dictionary key to prevent updation of entries in getxattr() +and fgetxattr() callbacks in md-cache. + +Credits: Poornima G + +Backport of https://review.gluster.org/c/glusterfs/+/23206 + +Change-Id: I7859cbad0a06ca6d788420c2a495e658699c6ff7 +Fixes: bz#1732376 +Signed-off-by: Anoop C S +Reviewed-on: https://code.engineering.redhat.com/gerrit/179048 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + tests/bugs/md-cache/bug-1726205.t | 22 +++++++++++++++ + .../features/snapview-server/src/snapview-server.c | 12 ++++++++- + xlators/performance/md-cache/src/md-cache.c | 31 +++++++++------------- + 3 files changed, 45 insertions(+), 20 deletions(-) + create mode 100644 tests/bugs/md-cache/bug-1726205.t + +diff --git a/tests/bugs/md-cache/bug-1726205.t b/tests/bugs/md-cache/bug-1726205.t +new file mode 100644 +index 0000000..795130e +--- /dev/null ++++ b/tests/bugs/md-cache/bug-1726205.t +@@ -0,0 +1,22 @@ ++#!/bin/bash ++ ++. $(dirname $0)/../../include.rc ++. $(dirname $0)/../../volume.rc ++ ++cleanup; ++ ++TEST glusterd; ++ ++TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2,3}; ++ ++TEST $CLI volume start $V0 ++ ++TEST $CLI volume set $V0 group samba ++ ++TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0 ++ ++TEST touch $M0/file ++TEST "setfattr -n "user.DosStream.Zone.Identifier:\$DATA" -v '\0' $M0/file" ++TEST "getfattr -n "user.DosStream.Zone.Identifier:\$DATA" -e hex $M0/file | grep -q 0x00" ++ ++cleanup; +diff --git a/xlators/features/snapview-server/src/snapview-server.c b/xlators/features/snapview-server/src/snapview-server.c +index b4998b8..1d6a5e5 100644 +--- a/xlators/features/snapview-server/src/snapview-server.c ++++ b/xlators/features/snapview-server/src/snapview-server.c +@@ -828,7 +828,8 @@ out: + * back into the dict. But to get the values for those xattrs it has to do the + * getxattr operation on each xattr which might turn out to be a costly + * operation. So for each of the xattrs present in the list, a 0 byte value +- * ("") is set into the dict before unwinding. This can be treated as an ++ * ("") is set into the dict before unwinding. Since ("") is also a valid xattr ++ * value(in a file system) we use an extra key in the same dictionary as an + * indicator to other xlators which want to cache the xattrs (as of now, + * md-cache which caches acl and selinux related xattrs) to not to cache the + * values of the xattrs present in the dict. +@@ -871,6 +872,15 @@ svs_add_xattrs_to_dict(xlator_t *this, dict_t *dict, char *list, ssize_t size) + list_offset += strlen(keybuffer) + 1; + } /* while (remaining_size > 0) */ + ++ /* Add an additional key to indicate that we don't need to cache these ++ * xattrs(with value "") */ ++ ret = dict_set_str(dict, "glusterfs.skip-cache", ""); ++ if (ret < 0) { ++ gf_msg(this->name, GF_LOG_ERROR, 0, SVS_MSG_DICT_SET_FAILED, ++ "dict set operation for the key glusterfs.skip-cache failed."); ++ goto out; ++ } ++ + ret = 0; + + out: +diff --git a/xlators/performance/md-cache/src/md-cache.c b/xlators/performance/md-cache/src/md-cache.c +index 6e0468f..a6b363f 100644 +--- a/xlators/performance/md-cache/src/md-cache.c ++++ b/xlators/performance/md-cache/src/md-cache.c +@@ -698,25 +698,6 @@ updatefn(dict_t *dict, char *key, data_t *value, void *data) + } + } + +- /* posix xlator as part of listxattr will send both names +- * and values of the xattrs in the dict. But as per man page +- * listxattr is mainly supposed to send names of the all the +- * xattrs. gfapi, as of now will put all the keys it obtained +- * in the dict (sent by posix) into a buffer provided by the +- * caller (thus the values of those xattrs are lost). If some +- * xlator makes gfapi based calls (ex: snapview-server), then +- * it has to unwind the calls by putting those names it got +- * in the buffer again into the dict. But now it would not be +- * having the values for those xattrs. So it might just put +- * a 0 byte value ("") into the dict for each xattr and unwind +- * the call. So the xlators which cache the xattrs (as of now +- * md-cache caches the acl and selinux related xattrs), should +- * not update their cache if the value of a xattr is a 0 byte +- * data (i.e. ""). +- */ +- if (value->len == 1 && value->data[0] == '\0') +- return 0; +- + if (dict_set(u->dict, key, value) < 0) { + u->ret = -1; + return -1; +@@ -2406,6 +2387,12 @@ mdc_getxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this, + goto out; + } + ++ if (dict_get(xattr, "glusterfs.skip-cache")) { ++ gf_msg(this->name, GF_LOG_DEBUG, 0, 0, ++ "Skipping xattr update due to empty value"); ++ goto out; ++ } ++ + mdc_inode_xatt_set(this, local->loc.inode, xdata); + + out: +@@ -2488,6 +2475,12 @@ mdc_fgetxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this, + goto out; + } + ++ if (dict_get(xattr, "glusterfs.skip-cache")) { ++ gf_msg(this->name, GF_LOG_DEBUG, 0, 0, ++ "Skipping xattr update due to empty value"); ++ goto out; ++ } ++ + mdc_inode_xatt_set(this, local->fd->inode, xdata); + + out: +-- +1.8.3.1 + diff --git a/SOURCES/0281-ctime-Fix-incorrect-realtime-passed-to-frame-root-ct.patch b/SOURCES/0281-ctime-Fix-incorrect-realtime-passed-to-frame-root-ct.patch new file mode 100644 index 0000000..5af12d1 --- /dev/null +++ b/SOURCES/0281-ctime-Fix-incorrect-realtime-passed-to-frame-root-ct.patch @@ -0,0 +1,105 @@ +From fa3cc9971bf1bf4ea52edfedc0cea67a0d6990d1 Mon Sep 17 00:00:00 2001 +From: Kotresh HR +Date: Tue, 20 Aug 2019 15:49:40 +0530 +Subject: [PATCH 281/284] ctime: Fix incorrect realtime passed to + frame->root->ctime + +On systems that don't support "timespec_get"(e.g., centos6), it +was using "clock_gettime" with "CLOCK_MONOTONIC" to get unix epoch +time which is incorrect. This patch introduces "timespec_now_realtime" +which uses "clock_gettime" with "CLOCK_REALTIME" which fixes +the issue. + +Backport of: + > Patch: https://review.gluster.org/23274/ + > Change-Id: I57be35ce442d7e05319e82112b687eb4f28d7612 + > Signed-off-by: Kotresh HR + > fixes: bz#1743652 + +Change-Id: I57be35ce442d7e05319e82112b687eb4f28d7612 +Signed-off-by: Kotresh HR +BUG: 1743611 +Reviewed-on: https://code.engineering.redhat.com/gerrit/179185 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + libglusterfs/src/glusterfs/timespec.h | 2 ++ + libglusterfs/src/libglusterfs.sym | 1 + + libglusterfs/src/timespec.c | 22 ++++++++++++++++++++++ + xlators/features/utime/src/utime-helpers.c | 2 +- + 4 files changed, 26 insertions(+), 1 deletion(-) + +diff --git a/libglusterfs/src/glusterfs/timespec.h b/libglusterfs/src/glusterfs/timespec.h +index 871871d..bb9ab44 100644 +--- a/libglusterfs/src/glusterfs/timespec.h ++++ b/libglusterfs/src/glusterfs/timespec.h +@@ -21,6 +21,8 @@ + void + timespec_now(struct timespec *ts); + void ++timespec_now_realtime(struct timespec *ts); ++void + timespec_adjust_delta(struct timespec *ts, struct timespec delta); + void + timespec_sub(const struct timespec *begin, const struct timespec *end, +diff --git a/libglusterfs/src/libglusterfs.sym b/libglusterfs/src/libglusterfs.sym +index b161380..467a1b7 100644 +--- a/libglusterfs/src/libglusterfs.sym ++++ b/libglusterfs/src/libglusterfs.sym +@@ -1073,6 +1073,7 @@ sys_accept + tbf_init + tbf_throttle + timespec_now ++timespec_now_realtime + timespec_sub + timespec_adjust_delta + timespec_cmp +diff --git a/libglusterfs/src/timespec.c b/libglusterfs/src/timespec.c +index c01527f..d0d5005 100644 +--- a/libglusterfs/src/timespec.c ++++ b/libglusterfs/src/timespec.c +@@ -71,6 +71,28 @@ timespec_now(struct timespec *ts) + } + + void ++timespec_now_realtime(struct timespec *ts) ++{ ++#if defined GF_LINUX_HOST_OS || defined GF_SOLARIS_HOST_OS || \ ++ defined GF_BSD_HOST_OS ++ if (0 == clock_gettime(CLOCK_REALTIME, ts)) { ++ return; ++ } ++#endif ++ ++ /* Fall back to gettimeofday()*/ ++ struct timeval tv = { ++ 0, ++ }; ++ if (0 == gettimeofday(&tv, NULL)) { ++ TIMEVAL_TO_TIMESPEC(&tv, ts); ++ return; ++ } ++ ++ return; ++} ++ ++void + timespec_adjust_delta(struct timespec *ts, struct timespec delta) + { + ts->tv_nsec = ((ts->tv_nsec + delta.tv_nsec) % 1000000000); +diff --git a/xlators/features/utime/src/utime-helpers.c b/xlators/features/utime/src/utime-helpers.c +index 79cc014..29d9ad9 100644 +--- a/xlators/features/utime/src/utime-helpers.c ++++ b/xlators/features/utime/src/utime-helpers.c +@@ -17,7 +17,7 @@ gl_timespec_get(struct timespec *ts) + #ifdef TIME_UTC + timespec_get(ts, TIME_UTC); + #else +- timespec_now(ts); ++ timespec_now_realtime(ts); + #endif + } + +-- +1.8.3.1 + diff --git a/SOURCES/0282-geo-rep-Fix-the-name-of-changelog-archive-file.patch b/SOURCES/0282-geo-rep-Fix-the-name-of-changelog-archive-file.patch new file mode 100644 index 0000000..37a0f12 --- /dev/null +++ b/SOURCES/0282-geo-rep-Fix-the-name-of-changelog-archive-file.patch @@ -0,0 +1,116 @@ +From 98c9fc8d774ae153ca6b44d3337cf5d9f7a030e2 Mon Sep 17 00:00:00 2001 +From: Kotresh HR +Date: Fri, 16 Aug 2019 16:07:03 +0530 +Subject: [PATCH 282/284] geo-rep: Fix the name of changelog archive file + +Background: +The processed changelogs are archived each month in a single tar file. +The default format is "archive_YYYYMM.tar" which is specified as "%%Y%%m" +in configuration file. + +Problem: +The created changelog archive file didn't have corresponding year +and month. It created as "archive_%Y%m.tar" on python2 only systems. + +Cause and Fix: +Geo-rep expects "%Y%m" after the ConfigParser reads it from config file. +Since it was "%%Y%%m" in config file, geo-rep used to get correct value +"%Y%m" in python3 and "%%Y%%m" in python2 which is incorrect. +The fix can be to use "%Y%m" in config file but that fails in python3. +So the fix is to use "RawConfigParser" in geo-rep and use "%Y%m". This +works both in python2 and python3. + +Backport of: + > Patch: https://review.gluster.org/23248 + > Change-Id: Ie5b7d2bc04d0d53cd1769e064c2d67aaf95d557c + > fixes: bz#1741890 + > Signed-off-by: Kotresh HR + +Change-Id: Ie5b7d2bc04d0d53cd1769e064c2d67aaf95d557c +BUG: 1743634 +Signed-off-by: Kotresh HR +Reviewed-on: https://code.engineering.redhat.com/gerrit/179188 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + geo-replication/gsyncd.conf.in | 2 +- + geo-replication/syncdaemon/gsyncdconfig.py | 14 +++++++------- + 2 files changed, 8 insertions(+), 8 deletions(-) + +diff --git a/geo-replication/gsyncd.conf.in b/geo-replication/gsyncd.conf.in +index c2e4f0d..5ebd57a 100644 +--- a/geo-replication/gsyncd.conf.in ++++ b/geo-replication/gsyncd.conf.in +@@ -109,7 +109,7 @@ type=int + help=Minimum time interval in seconds for passive worker to become Active + + [changelog-archive-format] +-value=%%Y%%m ++value=%Y%m + help=Processed changelogs will be archived in working directory. Pattern for archive file + + [use-meta-volume] +diff --git a/geo-replication/syncdaemon/gsyncdconfig.py b/geo-replication/syncdaemon/gsyncdconfig.py +index 38f3594..f823311 100644 +--- a/geo-replication/syncdaemon/gsyncdconfig.py ++++ b/geo-replication/syncdaemon/gsyncdconfig.py +@@ -10,9 +10,9 @@ + # + + try: +- from ConfigParser import ConfigParser, NoSectionError ++ from ConfigParser import RawConfigParser, NoSectionError + except ImportError: +- from configparser import ConfigParser, NoSectionError ++ from configparser import RawConfigParser, NoSectionError + import os + import shutil + from string import Template +@@ -94,7 +94,7 @@ class Gconf(object): + if name != "all" and not self._is_configurable(name): + raise GconfNotConfigurable() + +- cnf = ConfigParser() ++ cnf = RawConfigParser() + with open(self.custom_conf_file) as f: + cnf.readfp(f) + +@@ -138,7 +138,7 @@ class Gconf(object): + if curr_val == value: + return True + +- cnf = ConfigParser() ++ cnf = RawConfigParser() + with open(self.custom_conf_file) as f: + cnf.readfp(f) + +@@ -178,7 +178,7 @@ class Gconf(object): + self.session_conf_items = [] + self.default_values = {} + +- conf = ConfigParser() ++ conf = RawConfigParser() + # Default Template config file + with open(self.default_conf_file) as f: + conf.readfp(f) +@@ -342,7 +342,7 @@ class Gconf(object): + return False + + def is_config_file_old(config_file, mastervol, slavevol): +- cnf = ConfigParser() ++ cnf = RawConfigParser() + cnf.read(config_file) + session_section = "peers %s %s" % (mastervol, slavevol) + try: +@@ -357,7 +357,7 @@ def config_upgrade(config_file, ret): + shutil.copyfile(config_file, config_file_backup) + + #write a new config file +- config = ConfigParser() ++ config = RawConfigParser() + config.add_section('vars') + + for key, value in ret.items(): +-- +1.8.3.1 + diff --git a/SOURCES/0283-ctime-Fix-ctime-issue-with-utime-family-of-syscalls.patch b/SOURCES/0283-ctime-Fix-ctime-issue-with-utime-family-of-syscalls.patch new file mode 100644 index 0000000..eb9d8f8 --- /dev/null +++ b/SOURCES/0283-ctime-Fix-ctime-issue-with-utime-family-of-syscalls.patch @@ -0,0 +1,285 @@ +From 55eb2e7642e3428eaa1b2d833c0daa1d34b98324 Mon Sep 17 00:00:00 2001 +From: Kotresh HR +Date: Thu, 8 Aug 2019 10:05:12 +0530 +Subject: [PATCH 283/284] ctime: Fix ctime issue with utime family of syscalls + +When atime|mtime is updated via utime family of syscalls, +ctime is not updated. This patch fixes the same. + +Backport of: + > Patch: https://review.gluster.org/23177 + > Change-Id: I7f86d8f8a1e06a332c3449b5bbdbf128c9690f25 + > fixes: bz#1738786 + > Signed-off-by: Kotresh HR + +Change-Id: I7f86d8f8a1e06a332c3449b5bbdbf128c9690f25 +BUG: 1743627 +Signed-off-by: Kotresh HR +Reviewed-on: https://code.engineering.redhat.com/gerrit/179184 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/features/utime/src/utime-gen-fops-c.py | 13 +++- + xlators/storage/posix/src/posix-inode-fd-ops.c | 8 +-- + xlators/storage/posix/src/posix-metadata.c | 96 ++++++++++++++------------ + xlators/storage/posix/src/posix-metadata.h | 3 +- + 4 files changed, 68 insertions(+), 52 deletions(-) + +diff --git a/xlators/features/utime/src/utime-gen-fops-c.py b/xlators/features/utime/src/utime-gen-fops-c.py +index a8637ff..8730a51 100755 +--- a/xlators/features/utime/src/utime-gen-fops-c.py ++++ b/xlators/features/utime/src/utime-gen-fops-c.py +@@ -82,7 +82,18 @@ gf_utime_@NAME@ (call_frame_t *frame, xlator_t *this, + @LONG_ARGS@) + { + gl_timespec_get(&frame->root->ctime); +- frame->root->flags |= MDATA_CTIME; ++ ++ if (!valid) { ++ frame->root->flags |= MDATA_CTIME; ++ } ++ ++ if (valid & (GF_SET_ATTR_UID | GF_SET_ATTR_GID)) { ++ frame->root->flags |= MDATA_CTIME; ++ } ++ ++ if (valid & GF_SET_ATTR_MODE) { ++ frame->root->flags |= MDATA_CTIME; ++ } + + STACK_WIND (frame, gf_utime_@NAME@_cbk, FIRST_CHILD(this), + FIRST_CHILD(this)->fops->@NAME@, @SHORT_ARGS@); +diff --git a/xlators/storage/posix/src/posix-inode-fd-ops.c b/xlators/storage/posix/src/posix-inode-fd-ops.c +index d22bbc2..e0ea85b 100644 +--- a/xlators/storage/posix/src/posix-inode-fd-ops.c ++++ b/xlators/storage/posix/src/posix-inode-fd-ops.c +@@ -425,8 +425,8 @@ posix_setattr(call_frame_t *frame, xlator_t *this, loc_t *loc, + real_path); + goto out; + } +- posix_update_utime_in_mdata(this, real_path, -1, loc->inode, stbuf, +- valid); ++ posix_update_utime_in_mdata(this, real_path, -1, loc->inode, ++ &frame->root->ctime, stbuf, valid); + } + + if (valid & GF_SET_ATTR_CTIME && !priv->ctime) { +@@ -652,8 +652,8 @@ posix_fsetattr(call_frame_t *frame, xlator_t *this, fd_t *fd, + fd); + goto out; + } +- posix_update_utime_in_mdata(this, NULL, pfd->fd, fd->inode, stbuf, +- valid); ++ posix_update_utime_in_mdata(this, NULL, pfd->fd, fd->inode, ++ &frame->root->ctime, stbuf, valid); + } + + if (!valid) { +diff --git a/xlators/storage/posix/src/posix-metadata.c b/xlators/storage/posix/src/posix-metadata.c +index 5cbdc98..532daa2 100644 +--- a/xlators/storage/posix/src/posix-metadata.c ++++ b/xlators/storage/posix/src/posix-metadata.c +@@ -432,8 +432,10 @@ out: + */ + static int + posix_set_mdata_xattr(xlator_t *this, const char *real_path, int fd, +- inode_t *inode, struct timespec *time, struct iatt *stbuf, +- posix_mdata_flag_t *flag, gf_boolean_t update_utime) ++ inode_t *inode, struct timespec *time, ++ struct timespec *u_atime, struct timespec *u_mtime, ++ struct iatt *stbuf, posix_mdata_flag_t *flag, ++ gf_boolean_t update_utime) + { + posix_mdata_t *mdata = NULL; + int ret = -1; +@@ -443,6 +445,10 @@ posix_set_mdata_xattr(xlator_t *this, const char *real_path, int fd, + GF_VALIDATE_OR_GOTO(this->name, inode, out); + GF_VALIDATE_OR_GOTO(this->name, time, out); + ++ if (update_utime && (!u_atime || !u_mtime)) { ++ goto out; ++ } ++ + LOCK(&inode->lock); + { + ret = __inode_ctx_get1(inode, this, (uint64_t *)&mdata); +@@ -506,32 +512,30 @@ posix_set_mdata_xattr(xlator_t *this, const char *real_path, int fd, + } + } + +- /* Earlier, mdata was updated only if the existing time is less +- * than the time to be updated. This would fail the scenarios +- * where mtime can be set to any time using the syscall. Hence +- * just updating without comparison. But the ctime is not +- * allowed to changed to older date. +- */ +- +- if (flag->ctime && posix_compare_timespec(time, &mdata->ctime) > 0) { +- mdata->ctime = *time; +- } +- + /* In distributed systems, there could be races with fops + * updating mtime/atime which could result in different + * mtime/atime for same file. So this makes sure, only the + * highest time is retained. If the mtime/atime update comes + * from the explicit utime syscall, it is allowed to set to +- * previous time ++ * previous or future time but the ctime is always set to ++ * current time. + */ + if (update_utime) { ++ if (flag->ctime && ++ posix_compare_timespec(time, &mdata->ctime) > 0) { ++ mdata->ctime = *time; ++ } + if (flag->mtime) { +- mdata->mtime = *time; ++ mdata->mtime = *u_mtime; + } + if (flag->atime) { +- mdata->atime = *time; ++ mdata->atime = *u_atime; + } + } else { ++ if (flag->ctime && ++ posix_compare_timespec(time, &mdata->ctime) > 0) { ++ mdata->ctime = *time; ++ } + if (flag->mtime && + posix_compare_timespec(time, &mdata->mtime) > 0) { + mdata->mtime = *time; +@@ -584,15 +588,22 @@ out: + */ + void + posix_update_utime_in_mdata(xlator_t *this, const char *real_path, int fd, +- inode_t *inode, struct iatt *stbuf, int valid) ++ inode_t *inode, struct timespec *ctime, ++ struct iatt *stbuf, int valid) + { + int32_t ret = 0; + #if defined(HAVE_UTIMENSAT) +- struct timespec tv = { ++ struct timespec tv_atime = { ++ 0, ++ }; ++ struct timespec tv_mtime = { + 0, + }; + #else +- struct timeval tv = { ++ struct timeval tv_atime = { ++ 0, ++ }; ++ struct timeval tv_mtime = { + 0, + }; + #endif +@@ -611,35 +622,28 @@ posix_update_utime_in_mdata(xlator_t *this, const char *real_path, int fd, + */ + if (inode && priv->ctime) { + if ((valid & GF_SET_ATTR_ATIME) == GF_SET_ATTR_ATIME) { +- tv.tv_sec = stbuf->ia_atime; +- SET_TIMESPEC_NSEC_OR_TIMEVAL_USEC(tv, stbuf->ia_atime_nsec); ++ tv_atime.tv_sec = stbuf->ia_atime; ++ SET_TIMESPEC_NSEC_OR_TIMEVAL_USEC(tv_atime, stbuf->ia_atime_nsec); + +- flag.ctime = 0; +- flag.mtime = 0; ++ flag.ctime = 1; + flag.atime = 1; +- ret = posix_set_mdata_xattr(this, real_path, -1, inode, &tv, NULL, +- &flag, _gf_true); +- if (ret) { +- gf_msg(this->name, GF_LOG_WARNING, errno, P_MSG_SETMDATA_FAILED, +- "posix set mdata atime failed on file:" +- " %s gfid:%s", +- real_path, uuid_utoa(inode->gfid)); +- } + } + + if ((valid & GF_SET_ATTR_MTIME) == GF_SET_ATTR_MTIME) { +- tv.tv_sec = stbuf->ia_mtime; +- SET_TIMESPEC_NSEC_OR_TIMEVAL_USEC(tv, stbuf->ia_mtime_nsec); ++ tv_mtime.tv_sec = stbuf->ia_mtime; ++ SET_TIMESPEC_NSEC_OR_TIMEVAL_USEC(tv_mtime, stbuf->ia_mtime_nsec); + +- flag.ctime = 0; ++ flag.ctime = 1; + flag.mtime = 1; +- flag.atime = 0; ++ } + +- ret = posix_set_mdata_xattr(this, real_path, -1, inode, &tv, NULL, +- &flag, _gf_true); ++ if (flag.mtime || flag.atime) { ++ ret = posix_set_mdata_xattr(this, real_path, -1, inode, ctime, ++ &tv_atime, &tv_mtime, NULL, &flag, ++ _gf_true); + if (ret) { + gf_msg(this->name, GF_LOG_WARNING, errno, P_MSG_SETMDATA_FAILED, +- "posix set mdata mtime failed on file:" ++ "posix set mdata atime failed on file:" + " %s gfid:%s", + real_path, uuid_utoa(inode->gfid)); + } +@@ -702,8 +706,8 @@ posix_set_ctime(call_frame_t *frame, xlator_t *this, const char *real_path, + goto out; + } + ret = posix_set_mdata_xattr(this, real_path, fd, inode, +- &frame->root->ctime, stbuf, &flag, +- _gf_false); ++ &frame->root->ctime, NULL, NULL, stbuf, ++ &flag, _gf_false); + if (ret) { + gf_msg(this->name, GF_LOG_WARNING, errno, P_MSG_SETMDATA_FAILED, + "posix set mdata failed on file: %s gfid:%s", real_path, +@@ -733,8 +737,8 @@ posix_set_parent_ctime(call_frame_t *frame, xlator_t *this, + goto out; + } + ret = posix_set_mdata_xattr(this, real_path, fd, inode, +- &frame->root->ctime, stbuf, &flag, +- _gf_false); ++ &frame->root->ctime, NULL, NULL, stbuf, ++ &flag, _gf_false); + if (ret) { + gf_msg(this->name, GF_LOG_WARNING, errno, P_MSG_SETMDATA_FAILED, + "posix set mdata failed on file: %s gfid:%s", real_path, +@@ -792,8 +796,8 @@ posix_set_ctime_cfr(call_frame_t *frame, xlator_t *this, + flag_dup.atime = 0; + + ret = posix_set_mdata_xattr(this, real_path_out, fd_out, inode_out, +- &frame->root->ctime, stbuf_out, &flag_dup, +- _gf_false); ++ &frame->root->ctime, NULL, NULL, stbuf_out, ++ &flag_dup, _gf_false); + if (ret) { + gf_msg(this->name, GF_LOG_WARNING, errno, P_MSG_SETMDATA_FAILED, + "posix set mdata failed on file: %s gfid:%s", real_path_out, +@@ -811,8 +815,8 @@ posix_set_ctime_cfr(call_frame_t *frame, xlator_t *this, + flag_dup.ctime = 0; + + ret = posix_set_mdata_xattr(this, real_path_in, fd_out, inode_out, +- &frame->root->ctime, stbuf_out, &flag_dup, +- _gf_false); ++ &frame->root->ctime, NULL, NULL, stbuf_out, ++ &flag_dup, _gf_false); + if (ret) { + gf_msg(this->name, GF_LOG_WARNING, errno, P_MSG_SETMDATA_FAILED, + "posix set mdata failed on file: %s gfid:%s", real_path_in, +diff --git a/xlators/storage/posix/src/posix-metadata.h b/xlators/storage/posix/src/posix-metadata.h +index dc25e59..c176699 100644 +--- a/xlators/storage/posix/src/posix-metadata.h ++++ b/xlators/storage/posix/src/posix-metadata.h +@@ -40,7 +40,8 @@ __posix_get_mdata_xattr(xlator_t *this, const char *real_path, int _fd, + inode_t *inode, struct iatt *stbuf); + void + posix_update_utime_in_mdata(xlator_t *this, const char *real_path, int fd, +- inode_t *inode, struct iatt *stbuf, int valid); ++ inode_t *inode, struct timespec *ctime, ++ struct iatt *stbuf, int valid); + void + posix_set_ctime(call_frame_t *frame, xlator_t *this, const char *real_path, + int fd, inode_t *inode, struct iatt *stbuf); +-- +1.8.3.1 + diff --git a/SOURCES/0284-posix-log-aio_error-return-codes-in-posix_fs_health_.patch b/SOURCES/0284-posix-log-aio_error-return-codes-in-posix_fs_health_.patch new file mode 100644 index 0000000..4078bfc --- /dev/null +++ b/SOURCES/0284-posix-log-aio_error-return-codes-in-posix_fs_health_.patch @@ -0,0 +1,61 @@ +From 243075b593c6fccbffb3e82ffcfdb58acfd68269 Mon Sep 17 00:00:00 2001 +From: Mohit Agrawal +Date: Thu, 22 Aug 2019 15:51:43 +0530 +Subject: [PATCH 284/284] posix: log aio_error return codes in + posix_fs_health_check + +Problem: Sometime brick is going down to health check thread is + failed without logging error codes return by aio system calls. + As per aio_error man page it returns a positive error number + if the asynchronous I/O operation failed. + +Solution: log aio_error return codes in error message + +> Change-Id: I2496b1bc16e602b0fd3ad53e211de11ec8c641ef +> Fixes: bz#1744519 +> Signed-off-by: Mohit Agrawal +> Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/23284/ + +Change-Id: I2496b1bc16e602b0fd3ad53e211de11ec8c641ef +BUG: 1744518 +Signed-off-by: Mohit Agrawal +Reviewed-on: https://code.engineering.redhat.com/gerrit/179211 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/storage/posix/src/posix-helpers.c | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/xlators/storage/posix/src/posix-helpers.c b/xlators/storage/posix/src/posix-helpers.c +index ef5bfd5..d143d4c 100644 +--- a/xlators/storage/posix/src/posix-helpers.c ++++ b/xlators/storage/posix/src/posix-helpers.c +@@ -2025,7 +2025,6 @@ posix_fs_health_check(xlator_t *this) + if (ret != 0) { + op_errno = errno; + op = "aio_write_error"; +- ret = -1; + goto out; + } + +@@ -2064,7 +2063,6 @@ posix_fs_health_check(xlator_t *this) + if (ret != 0) { + op_errno = errno; + op = "aio_read_error"; +- ret = -1; + goto out; + } + +@@ -2089,7 +2087,8 @@ out: + } + if (ret && file_path[0]) { + gf_msg(this->name, GF_LOG_WARNING, errno, P_MSG_HEALTHCHECK_FAILED, +- "%s() on %s returned", op, file_path); ++ "%s() on %s returned ret is %d error is %s", op, file_path, ret, ++ ret != -1 ? strerror(ret) : strerror(op_errno)); + gf_event(EVENT_POSIX_HEALTH_CHECK_FAILED, + "op=%s;path=%s;error=%s;brick=%s:%s timeout is %d", op, + file_path, strerror(op_errno), priv->hostname, priv->base_path, +-- +1.8.3.1 + diff --git a/SOURCES/0285-glusterd-glusterd-service-is-getting-timed-out-on-sc.patch b/SOURCES/0285-glusterd-glusterd-service-is-getting-timed-out-on-sc.patch new file mode 100644 index 0000000..12549e7 --- /dev/null +++ b/SOURCES/0285-glusterd-glusterd-service-is-getting-timed-out-on-sc.patch @@ -0,0 +1,43 @@ +From 49cd9ef7487ba88796315b897823837a9cbd535e Mon Sep 17 00:00:00 2001 +From: Mohit Agrawal +Date: Wed, 28 Aug 2019 09:05:20 +0530 +Subject: [PATCH 285/297] glusterd: glusterd service is getting timed out on + scaled setup + +Problem: On a three node cluster with 2000 replica volumes systemctl is getting + timed out for glusted service. + +Solution: Configure TimeoutSec 300 to wait for glusterd startup. + +> Change-Id: Idb3f3f3e56e6216a0ebd754cbb9e8e37ce9e636d +> Fixes: bz#1746228 +> Signed-off-by: Mohit Agrawal +> (Cherry picked from commit c90dc63ec9eee0f43ba8e489876fdf8b8810bbdc) +> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/23316/) + +Change-Id: Idb3f3f3e56e6216a0ebd754cbb9e8e37ce9e636d +BUG: 1746027 +Signed-off-by: Mohit Agrawal +Reviewed-on: https://code.engineering.redhat.com/gerrit/179806 +Tested-by: Mohit Agrawal +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + extras/systemd/glusterd.service.in | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/extras/systemd/glusterd.service.in b/extras/systemd/glusterd.service.in +index c33351c..f604160 100644 +--- a/extras/systemd/glusterd.service.in ++++ b/extras/systemd/glusterd.service.in +@@ -13,6 +13,7 @@ Environment="LOG_LEVEL=INFO" + EnvironmentFile=-@sysconfdir@/sysconfig/glusterd + ExecStart=@prefix@/sbin/glusterd -p @localstatedir@/run/glusterd.pid --log-level $LOG_LEVEL $GLUSTERD_OPTIONS + KillMode=process ++TimeoutSec=300 + SuccessExitStatus=15 + + [Install] +-- +1.8.3.1 + diff --git a/SOURCES/0286-glusterfs.spec.in-added-script-files-for-machine-com.patch b/SOURCES/0286-glusterfs.spec.in-added-script-files-for-machine-com.patch new file mode 100644 index 0000000..415a07b --- /dev/null +++ b/SOURCES/0286-glusterfs.spec.in-added-script-files-for-machine-com.patch @@ -0,0 +1,162 @@ +From 2a905a8ae6b4737e84543ad76b55f3346fa0f32c Mon Sep 17 00:00:00 2001 +From: Hari Gowtham +Date: Tue, 27 Aug 2019 14:12:31 +0530 +Subject: [PATCH 286/297] glusterfs.spec.in: added script files for machine / + component stats + +Have added the file (extras/identify-hangs.sh) to the code base. +And included the following to be packaged: + +Quota Accounting issue: +extras/quota/xattr_analysis.py (made available only on server) +extras/quota/quota_fsck.py (made available only on server) +extras/quota/log_accounting.sh + +Debugging Statedumps: +extras/identify-hangs.sh + +Performance: +extras/collect-system-stats.sh + +Note: rest of the files were already included. + +Label: DOWNSTREAM ONLY. + +Change-Id: I2efb959865c3f381166c6a25c6eef613d13dd5ee +fixes: bz#1719171 +Signed-off-by: Hari Gowtham +Reviewed-on: https://code.engineering.redhat.com/gerrit/179515 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + extras/Makefile.am | 9 +++++++- + extras/identify-hangs.sh | 53 ++++++++++++++++++++++++++++++++++++++++++++++++ + glusterfs.spec.in | 8 ++++++++ + 3 files changed, 69 insertions(+), 1 deletion(-) + create mode 100644 extras/identify-hangs.sh + +diff --git a/extras/Makefile.am b/extras/Makefile.am +index 983f014..8cbfda1 100644 +--- a/extras/Makefile.am ++++ b/extras/Makefile.am +@@ -30,9 +30,14 @@ endif + + scriptsdir = $(datadir)/glusterfs/scripts + scripts_SCRIPTS = thin-arbiter/setup-thin-arbiter.sh ++scripts_SCRIPTS += quota/log_accounting.sh ++scripts_SCRIPTS += collect-system-stats.sh ++scripts_SCRIPTS += identify-hangs.sh + if WITH_SERVER + scripts_SCRIPTS += post-upgrade-script-for-quota.sh \ + pre-upgrade-script-for-quota.sh stop-all-gluster-processes.sh ++scripts_SCRIPTS += quota/quota_fsck.py ++scripts_SCRIPTS += quota/xattr_analysis.py + if USE_SYSTEMD + scripts_SCRIPTS += control-cpu-load.sh + scripts_SCRIPTS += control-mem.sh +@@ -50,7 +55,9 @@ EXTRA_DIST = glusterfs-logrotate gluster-rsyslog-7.2.conf gluster-rsyslog-5.8.co + command-completion/Makefile command-completion/README \ + stop-all-gluster-processes.sh clang-checker.sh mount-shared-storage.sh \ + control-cpu-load.sh control-mem.sh group-distributed-virt \ +- thin-arbiter/thin-arbiter.vol thin-arbiter/setup-thin-arbiter.sh ++ thin-arbiter/thin-arbiter.vol thin-arbiter/setup-thin-arbiter.sh \ ++ quota/xattr_analysis.py quota/quota_fsck.py quota/log_accounting.sh \ ++ collect-system-stats.sh identify-hangs.sh + + if WITH_SERVER + install-data-local: +diff --git a/extras/identify-hangs.sh b/extras/identify-hangs.sh +new file mode 100644 +index 0000000..ebc6bf1 +--- /dev/null ++++ b/extras/identify-hangs.sh +@@ -0,0 +1,53 @@ ++#!/bin/bash ++function get_statedump_fnames_without_timestamps ++{ ++ ls | grep -E "[.]dump[.][0-9][0-9]*" | cut -f1-3 -d'.' | sort -u ++} ++ ++function get_non_uniq_fields ++{ ++ local statedump_fname_prefix=$1 ++ print_stack_lkowner_unique_in_one_line "$statedump_fname_prefix" | sort | uniq -c | grep -vE "^\s*1 " | awk '{$1="repeats="$1; print $0}' ++} ++ ++function print_stack_lkowner_unique_in_one_line ++{ ++ local statedump_fname_prefix=$1 ++ sed -e '/./{H;$!d;}' -e 'x;/unique=/!d;/stack=/!d;/lk-owner=/!d;/pid=/!d;' "${statedump_fname_prefix}"* | grep -E "(stack|lk-owner|unique|pid)=" | paste -d " " - - - - ++} ++ ++function get_stacks_that_appear_in_multiple_statedumps ++{ ++ #If a stack with same 'unique/lk-owner/stack' appears in multiple statedumps ++ #print the stack ++ local statedump_fname_prefix=$1 ++ while read -r non_uniq_stack; ++ do ++ if [ -z "$printed" ]; ++ then ++ printed="1" ++ fi ++ echo "$statedump_fname_prefix" "$non_uniq_stack" ++ done < <(get_non_uniq_fields "$statedump_fname_prefix") ++} ++ ++statedumpdir=${1} ++if [ -z "$statedumpdir" ]; ++then ++ echo "Usage: $0 " ++ exit 1 ++fi ++ ++if [ ! -d "$statedumpdir" ]; ++then ++ echo "$statedumpdir: Is not a directory" ++ echo "Usage: $0 " ++ exit 1 ++fi ++ ++cd "$statedumpdir" || exit 1 ++for statedump_fname_prefix in $(get_statedump_fnames_without_timestamps); ++do ++ get_stacks_that_appear_in_multiple_statedumps "$statedump_fname_prefix" ++done | column -t ++echo "NOTE: stacks with lk-owner=\"\"/lk-owner=0000000000000000/unique=0 may not be hung frames and need further inspection" >&2 +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index 00603ec..3c2e2dc 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -1107,6 +1107,9 @@ exit 0 + %{_datadir}/glusterfs/scripts/post-upgrade-script-for-quota.sh + %{_datadir}/glusterfs/scripts/pre-upgrade-script-for-quota.sh + %endif ++%{_datadir}/glusterfs/scripts/identify-hangs.sh ++%{_datadir}/glusterfs/scripts/collect-system-stats.sh ++%{_datadir}/glusterfs/scripts/log_accounting.sh + # xlators that are needed on the client- and on the server-side + %dir %{_libdir}/glusterfs + %dir %{_libdir}/glusterfs/%{version}%{?prereltag} +@@ -1352,6 +1355,8 @@ exit 0 + %if ( 0%{!?_without_server:1} ) + %files server + %doc extras/clear_xattrs.sh ++%{_datadir}/glusterfs/scripts/xattr_analysis.py* ++%{_datadir}/glusterfs/scripts/quota_fsck.py* + # sysconf + %config(noreplace) %{_sysconfdir}/glusterfs + %exclude %{_sysconfdir}/glusterfs/thin-arbiter.vol +@@ -1942,6 +1947,9 @@ fi + %endif + + %changelog ++* Tue Aug 27 2019 Hari Gowtham ++- Added scripts to collect machine stats and component stats (#1719171) ++ + * Tue Jun 18 2019 Jiffin Tony Thottan + - build glusterfs-ganesha for rhel 7 and above (#1720551) + +-- +1.8.3.1 + diff --git a/SOURCES/0287-cluster-ec-Fail-fsync-flush-for-files-on-update-size.patch b/SOURCES/0287-cluster-ec-Fail-fsync-flush-for-files-on-update-size.patch new file mode 100644 index 0000000..93bd3c9 --- /dev/null +++ b/SOURCES/0287-cluster-ec-Fail-fsync-flush-for-files-on-update-size.patch @@ -0,0 +1,372 @@ +From 546f412c155dd5aca2b3cd4202f80c9977b215dc Mon Sep 17 00:00:00 2001 +From: Pranith Kumar K +Date: Wed, 4 Sep 2019 12:06:34 +0530 +Subject: [PATCH 287/297] cluster/ec: Fail fsync/flush for files on update + size/version failure + +Problem: +If update size/version is not successful on the file, updates on the +same stripe could lead to data corruptions if the earlier un-aligned +write is not successful on all the bricks. Application won't have +any knowledge of this because update size/version happens in the +background. + +Fix: +Fail fsync/flush on fds that are opened before update-size-version +went bad. + +Upstream-patch: https://review.gluster.org/c/glusterfs/+/23355 +fixes: bz#1745107 +Change-Id: I9d323eddcda703bd27d55f340c4079d76e06e492 +Signed-off-by: Pranith Kumar K +Reviewed-on: https://code.engineering.redhat.com/gerrit/180672 +Tested-by: RHGS Build Bot +Reviewed-by: Ashish Pandey +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + tests/basic/ec/ec-badfd.c | 124 +++++++++++++++++++++++++++++++++++ + tests/basic/ec/ec-badfd.t | 26 ++++++++ + xlators/cluster/ec/src/ec-common.c | 23 +++++++ + xlators/cluster/ec/src/ec-generic.c | 47 +++++++++++++ + xlators/cluster/ec/src/ec-helpers.c | 7 ++ + xlators/cluster/ec/src/ec-messages.h | 2 +- + xlators/cluster/ec/src/ec-types.h | 2 + + 7 files changed, 230 insertions(+), 1 deletion(-) + create mode 100644 tests/basic/ec/ec-badfd.c + create mode 100755 tests/basic/ec/ec-badfd.t + +diff --git a/tests/basic/ec/ec-badfd.c b/tests/basic/ec/ec-badfd.c +new file mode 100644 +index 0000000..8be23c1 +--- /dev/null ++++ b/tests/basic/ec/ec-badfd.c +@@ -0,0 +1,124 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++int ++fill_iov(struct iovec *iov, char fillchar, int count) ++{ ++ int ret = -1; ++ ++ iov->iov_base = malloc(count + 1); ++ if (iov->iov_base == NULL) { ++ return ret; ++ } else { ++ iov->iov_len = count; ++ ret = 0; ++ } ++ memset(iov->iov_base, fillchar, count); ++ memset(iov->iov_base + count, '\0', 1); ++ ++ return ret; ++} ++ ++int ++write_sync(glfs_t *fs, glfs_fd_t *glfd, int char_count) ++{ ++ ssize_t ret = -1; ++ int flags = O_RDWR; ++ struct iovec iov = {0}; ++ ++ ret = fill_iov(&iov, 'a', char_count); ++ if (ret) { ++ fprintf(stderr, "failed to create iov"); ++ goto out; ++ } ++ ++ ret = glfs_pwritev(glfd, &iov, 1, 0, flags); ++out: ++ if (ret < 0) { ++ fprintf(stderr, "glfs_pwritev failed, %d", errno); ++ } ++ return ret; ++} ++ ++int ++main(int argc, char *argv[]) ++{ ++ glfs_t *fs = NULL; ++ glfs_fd_t *fd = NULL; ++ int ret = 1; ++ char volume_cmd[4096] = {0}; ++ ++ if (argc != 4) { ++ fprintf(stderr, "Syntax: %s \n", argv[0]); ++ return 1; ++ } ++ ++ fs = glfs_new(argv[2]); ++ if (!fs) { ++ fprintf(stderr, "glfs_new: returned NULL\n"); ++ return 1; ++ } ++ ++ ret = glfs_set_volfile_server(fs, "tcp", argv[1], 24007); ++ if (ret != 0) { ++ fprintf(stderr, "glfs_set_volfile_server: returned %d\n", ret); ++ goto out; ++ } ++ ret = glfs_set_logging(fs, "/tmp/ec-badfd.log", 7); ++ if (ret != 0) { ++ fprintf(stderr, "glfs_set_logging: returned %d\n", ret); ++ goto out; ++ } ++ ret = glfs_init(fs); ++ if (ret != 0) { ++ fprintf(stderr, "glfs_init: returned %d\n", ret); ++ goto out; ++ } ++ ++ fd = glfs_open(fs, argv[3], O_RDWR); ++ if (fd == NULL) { ++ fprintf(stderr, "glfs_open: returned NULL\n"); ++ goto out; ++ } ++ ++ ret = write_sync(fs, fd, 16); ++ if (ret < 0) { ++ fprintf(stderr, "write_sync failed\n"); ++ } ++ ++ snprintf(volume_cmd, sizeof(volume_cmd), ++ "gluster --mode=script volume stop %s", argv[2]); ++ /*Stop the volume so that update-size-version fails*/ ++ system(volume_cmd); ++ sleep(8); /* 3 seconds more than eager-lock-timeout*/ ++ snprintf(volume_cmd, sizeof(volume_cmd), ++ "gluster --mode=script volume start %s", argv[2]); ++ system(volume_cmd); ++ sleep(8); /*wait for bricks to come up*/ ++ ret = glfs_fsync(fd, NULL, NULL); ++ if (ret == 0) { ++ fprintf(stderr, "fsync succeeded on a BADFD\n"); ++ exit(1); ++ } ++ ++ ret = glfs_close(fd); ++ if (ret == 0) { ++ fprintf(stderr, "flush succeeded on a BADFD\n"); ++ exit(1); ++ } ++ ret = 0; ++ ++out: ++ unlink("/tmp/ec-badfd.log"); ++ glfs_fini(fs); ++ ++ return ret; ++} +diff --git a/tests/basic/ec/ec-badfd.t b/tests/basic/ec/ec-badfd.t +new file mode 100755 +index 0000000..56feb47 +--- /dev/null ++++ b/tests/basic/ec/ec-badfd.t +@@ -0,0 +1,26 @@ ++#!/bin/bash ++ ++. $(dirname $0)/../../include.rc ++. $(dirname $0)/../../volume.rc ++ ++cleanup; ++ ++TEST glusterd ++TEST pidof glusterd ++ ++TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{1..6} ++TEST $CLI volume set $V0 performance.write-behind off ++TEST $CLI volume set $V0 disperse.eager-lock-timeout 5 ++ ++TEST $CLI volume start $V0 ++EXPECT 'Started' volinfo_field $V0 'Status' ++ ++TEST $GFS -s $H0 --volfile-id $V0 $M0 ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0 ++TEST touch $M0/file ++ ++TEST build_tester $(dirname $0)/ec-badfd.c -lgfapi -Wall -O2 ++TEST $(dirname $0)/ec-badfd $H0 $V0 /file ++cleanup_tester $(dirname ${0})/ec-badfd ++ ++cleanup; +diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c +index 5fb4610..92d4e5d 100644 +--- a/xlators/cluster/ec/src/ec-common.c ++++ b/xlators/cluster/ec/src/ec-common.c +@@ -2255,6 +2255,23 @@ ec_unlock_lock(ec_lock_link_t *link) + } + } + ++void ++ec_inode_bad_inc(inode_t *inode, xlator_t *xl) ++{ ++ ec_inode_t *ctx = NULL; ++ ++ LOCK(&inode->lock); ++ { ++ ctx = __ec_inode_get(inode, xl); ++ if (ctx == NULL) { ++ goto unlock; ++ } ++ ctx->bad_version++; ++ } ++unlock: ++ UNLOCK(&inode->lock); ++} ++ + int32_t + ec_update_size_version_done(call_frame_t *frame, void *cookie, xlator_t *this, + int32_t op_ret, int32_t op_errno, dict_t *xattr, +@@ -2270,6 +2287,12 @@ ec_update_size_version_done(call_frame_t *frame, void *cookie, xlator_t *this, + ctx = lock->ctx; + + if (op_ret < 0) { ++ if (link->lock->fd == NULL) { ++ ec_inode_bad_inc(link->lock->loc.inode, this); ++ } else { ++ ec_inode_bad_inc(link->lock->fd->inode, this); ++ } ++ + gf_msg(fop->xl->name, fop_log_level(fop->id, op_errno), op_errno, + EC_MSG_SIZE_VERS_UPDATE_FAIL, + "Failed to update version and size. %s", ec_msg_str(fop)); +diff --git a/xlators/cluster/ec/src/ec-generic.c b/xlators/cluster/ec/src/ec-generic.c +index acc16b5..b019050 100644 +--- a/xlators/cluster/ec/src/ec-generic.c ++++ b/xlators/cluster/ec/src/ec-generic.c +@@ -150,6 +150,37 @@ ec_manager_flush(ec_fop_data_t *fop, int32_t state) + } + } + ++static int32_t ++ec_validate_fd(fd_t *fd, xlator_t *xl) ++{ ++ uint64_t iversion = 0; ++ uint64_t fversion = 0; ++ ec_inode_t *inode_ctx = NULL; ++ ec_fd_t *fd_ctx = NULL; ++ ++ LOCK(&fd->lock); ++ { ++ fd_ctx = __ec_fd_get(fd, xl); ++ if (fd_ctx) { ++ fversion = fd_ctx->bad_version; ++ } ++ } ++ UNLOCK(&fd->lock); ++ ++ LOCK(&fd->inode->lock); ++ { ++ inode_ctx = __ec_inode_get(fd->inode, xl); ++ if (inode_ctx) { ++ iversion = inode_ctx->bad_version; ++ } ++ } ++ UNLOCK(&fd->inode->lock); ++ if (fversion < iversion) { ++ return EBADF; ++ } ++ return 0; ++} ++ + void + ec_flush(call_frame_t *frame, xlator_t *this, uintptr_t target, + uint32_t fop_flags, fop_flush_cbk_t func, void *data, fd_t *fd, +@@ -165,6 +196,14 @@ ec_flush(call_frame_t *frame, xlator_t *this, uintptr_t target, + GF_VALIDATE_OR_GOTO(this->name, frame, out); + GF_VALIDATE_OR_GOTO(this->name, this->private, out); + ++ error = ec_validate_fd(fd, this); ++ if (error) { ++ gf_msg(this->name, GF_LOG_ERROR, EBADF, EC_MSG_FD_BAD, ++ "Failing %s on %s", gf_fop_list[GF_FOP_FLUSH], ++ fd->inode ? uuid_utoa(fd->inode->gfid) : ""); ++ goto out; ++ } ++ + fop = ec_fop_data_allocate(frame, this, GF_FOP_FLUSH, 0, target, fop_flags, + ec_wind_flush, ec_manager_flush, callback, data); + if (fop == NULL) { +@@ -381,6 +420,14 @@ ec_fsync(call_frame_t *frame, xlator_t *this, uintptr_t target, + GF_VALIDATE_OR_GOTO(this->name, frame, out); + GF_VALIDATE_OR_GOTO(this->name, this->private, out); + ++ error = ec_validate_fd(fd, this); ++ if (error) { ++ gf_msg(this->name, GF_LOG_ERROR, EBADF, EC_MSG_FD_BAD, ++ "Failing %s on %s", gf_fop_list[GF_FOP_FSYNC], ++ fd->inode ? uuid_utoa(fd->inode->gfid) : ""); ++ goto out; ++ } ++ + fop = ec_fop_data_allocate(frame, this, GF_FOP_FSYNC, 0, target, fop_flags, + ec_wind_fsync, ec_manager_fsync, callback, data); + if (fop == NULL) { +diff --git a/xlators/cluster/ec/src/ec-helpers.c b/xlators/cluster/ec/src/ec-helpers.c +index 43f6e3b..baac001 100644 +--- a/xlators/cluster/ec/src/ec-helpers.c ++++ b/xlators/cluster/ec/src/ec-helpers.c +@@ -753,6 +753,7 @@ __ec_fd_get(fd_t *fd, xlator_t *xl) + { + int i = 0; + ec_fd_t *ctx = NULL; ++ ec_inode_t *ictx = NULL; + uint64_t value = 0; + ec_t *ec = xl->private; + +@@ -775,6 +776,12 @@ __ec_fd_get(fd_t *fd, xlator_t *xl) + GF_FREE(ctx); + return NULL; + } ++ /* Only refering bad-version so no need for lock ++ * */ ++ ictx = __ec_inode_get(fd->inode, xl); ++ if (ictx) { ++ ctx->bad_version = ictx->bad_version; ++ } + } + } else { + ctx = (ec_fd_t *)(uintptr_t)value; +diff --git a/xlators/cluster/ec/src/ec-messages.h b/xlators/cluster/ec/src/ec-messages.h +index 7c28808..be86b37 100644 +--- a/xlators/cluster/ec/src/ec-messages.h ++++ b/xlators/cluster/ec/src/ec-messages.h +@@ -55,6 +55,6 @@ GLFS_MSGID(EC, EC_MSG_INVALID_CONFIG, EC_MSG_HEAL_FAIL, + EC_MSG_CONFIG_XATTR_INVALID, EC_MSG_EXTENSION, EC_MSG_EXTENSION_NONE, + EC_MSG_EXTENSION_UNKNOWN, EC_MSG_EXTENSION_UNSUPPORTED, + EC_MSG_EXTENSION_FAILED, EC_MSG_NO_GF, EC_MSG_MATRIX_FAILED, +- EC_MSG_DYN_CREATE_FAILED, EC_MSG_DYN_CODEGEN_FAILED); ++ EC_MSG_DYN_CREATE_FAILED, EC_MSG_DYN_CODEGEN_FAILED, EC_MSG_FD_BAD); + + #endif /* !_EC_MESSAGES_H_ */ +diff --git a/xlators/cluster/ec/src/ec-types.h b/xlators/cluster/ec/src/ec-types.h +index 1c295c0..f27f2ec 100644 +--- a/xlators/cluster/ec/src/ec-types.h ++++ b/xlators/cluster/ec/src/ec-types.h +@@ -150,6 +150,7 @@ struct _ec_fd { + loc_t loc; + uintptr_t open; + int32_t flags; ++ uint64_t bad_version; + ec_fd_status_t fd_status[0]; + }; + +@@ -180,6 +181,7 @@ struct _ec_inode { + uint64_t dirty[2]; + struct list_head heal; + ec_stripe_list_t stripe_cache; ++ uint64_t bad_version; + }; + + typedef int32_t (*fop_heal_cbk_t)(call_frame_t *, void *, xlator_t *, int32_t, +-- +1.8.3.1 + diff --git a/SOURCES/0288-cluster-ec-Fix-coverity-issues.patch b/SOURCES/0288-cluster-ec-Fix-coverity-issues.patch new file mode 100644 index 0000000..8dd3fca --- /dev/null +++ b/SOURCES/0288-cluster-ec-Fix-coverity-issues.patch @@ -0,0 +1,77 @@ +From ccf7775760dd923e21341438725946737eb8d8af Mon Sep 17 00:00:00 2001 +From: Pranith Kumar K +Date: Sat, 7 Sep 2019 20:18:01 +0530 +Subject: [PATCH 288/297] cluster/ec: Fix coverity issues + +Fixed the following coverity issue in both flush/fsync +>>> CID 1404964: Null pointer dereferences (REVERSE_INULL) +>>> Null-checking "fd" suggests that it may be null, but it has already +been dereferenced on all paths leading to the check. +>>> if (fd != NULL) { +>>> fop->fd = fd_ref(fd); +>>> if (fop->fd == NULL) { +>>> gf_msg(this->name, GF_LOG_ERROR, 0, +>>> "Failed to reference a " +>>> "file descriptor."); + +Upstream-patch: https://review.gluster.org/c/glusterfs/+/23382 +fixes: bz#1745107 +Change-Id: I19c05d585e23f8fbfbc195d1f3775ec528eed671 +Signed-off-by: Pranith Kumar K +Reviewed-on: https://code.engineering.redhat.com/gerrit/180673 +Tested-by: RHGS Build Bot +Reviewed-by: Ashish Pandey +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/cluster/ec/src/ec-generic.c | 28 ++++++++++++++++------------ + 1 file changed, 16 insertions(+), 12 deletions(-) + +diff --git a/xlators/cluster/ec/src/ec-generic.c b/xlators/cluster/ec/src/ec-generic.c +index b019050..192bb02 100644 +--- a/xlators/cluster/ec/src/ec-generic.c ++++ b/xlators/cluster/ec/src/ec-generic.c +@@ -196,12 +196,14 @@ ec_flush(call_frame_t *frame, xlator_t *this, uintptr_t target, + GF_VALIDATE_OR_GOTO(this->name, frame, out); + GF_VALIDATE_OR_GOTO(this->name, this->private, out); + +- error = ec_validate_fd(fd, this); +- if (error) { +- gf_msg(this->name, GF_LOG_ERROR, EBADF, EC_MSG_FD_BAD, +- "Failing %s on %s", gf_fop_list[GF_FOP_FLUSH], +- fd->inode ? uuid_utoa(fd->inode->gfid) : ""); +- goto out; ++ if (fd) { ++ error = ec_validate_fd(fd, this); ++ if (error) { ++ gf_msg(this->name, GF_LOG_ERROR, EBADF, EC_MSG_FD_BAD, ++ "Failing %s on %s", gf_fop_list[GF_FOP_FLUSH], ++ fd->inode ? uuid_utoa(fd->inode->gfid) : ""); ++ goto out; ++ } + } + + fop = ec_fop_data_allocate(frame, this, GF_FOP_FLUSH, 0, target, fop_flags, +@@ -420,12 +422,14 @@ ec_fsync(call_frame_t *frame, xlator_t *this, uintptr_t target, + GF_VALIDATE_OR_GOTO(this->name, frame, out); + GF_VALIDATE_OR_GOTO(this->name, this->private, out); + +- error = ec_validate_fd(fd, this); +- if (error) { +- gf_msg(this->name, GF_LOG_ERROR, EBADF, EC_MSG_FD_BAD, +- "Failing %s on %s", gf_fop_list[GF_FOP_FSYNC], +- fd->inode ? uuid_utoa(fd->inode->gfid) : ""); +- goto out; ++ if (fd) { ++ error = ec_validate_fd(fd, this); ++ if (error) { ++ gf_msg(this->name, GF_LOG_ERROR, EBADF, EC_MSG_FD_BAD, ++ "Failing %s on %s", gf_fop_list[GF_FOP_FSYNC], ++ fd->inode ? uuid_utoa(fd->inode->gfid) : ""); ++ goto out; ++ } + } + + fop = ec_fop_data_allocate(frame, this, GF_FOP_FSYNC, 0, target, fop_flags, +-- +1.8.3.1 + diff --git a/SOURCES/0289-cluster-ec-quorum-count-implementation.patch b/SOURCES/0289-cluster-ec-quorum-count-implementation.patch new file mode 100644 index 0000000..6d24813 --- /dev/null +++ b/SOURCES/0289-cluster-ec-quorum-count-implementation.patch @@ -0,0 +1,721 @@ +From 0d54bb417e982a100ceefb5eab2a61a17e840f39 Mon Sep 17 00:00:00 2001 +From: Pranith Kumar K +Date: Thu, 5 Sep 2019 16:12:39 +0530 +Subject: [PATCH 289/297] cluster/ec: quorum-count implementation + +Upstream-patch: https://review.gluster.org/c/glusterfs/+/23366 +upstream-issue: #721 +fixes: bz#1748688 +Change-Id: I5333540e3c635ccf441cf1f4696e4c8986e38ea8 +Signed-off-by: Pranith Kumar K +Reviewed-on: https://code.engineering.redhat.com/gerrit/180674 +Tested-by: RHGS Build Bot +Reviewed-by: Ashish Pandey +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + libglusterfs/src/glusterfs/globals.h | 4 +- + tests/basic/ec/ec-quorum-count-partial-failure.t | 50 +++++++ + tests/basic/ec/ec-quorum-count.t | 165 +++++++++++++++++++++++ + tests/ec.rc | 9 ++ + xlators/cluster/ec/src/ec-common.c | 13 ++ + xlators/cluster/ec/src/ec-common.h | 24 ++++ + xlators/cluster/ec/src/ec-dir-write.c | 57 ++++---- + xlators/cluster/ec/src/ec-inode-write.c | 61 ++++----- + xlators/cluster/ec/src/ec-types.h | 1 + + xlators/cluster/ec/src/ec.c | 13 ++ + xlators/mgmt/glusterd/src/glusterd-volume-set.c | 46 +++++++ + 11 files changed, 383 insertions(+), 60 deletions(-) + create mode 100755 tests/basic/ec/ec-quorum-count-partial-failure.t + create mode 100644 tests/basic/ec/ec-quorum-count.t + +diff --git a/libglusterfs/src/glusterfs/globals.h b/libglusterfs/src/glusterfs/globals.h +index 55476f6..bdc8b3d 100644 +--- a/libglusterfs/src/glusterfs/globals.h ++++ b/libglusterfs/src/glusterfs/globals.h +@@ -50,7 +50,7 @@ + 1 /* MIN is the fresh start op-version, mostly \ + should not change */ + #define GD_OP_VERSION_MAX \ +- GD_OP_VERSION_7_0 /* MAX VERSION is the maximum \ ++ GD_OP_VERSION_8_0 /* MAX VERSION is the maximum \ + count in VME table, should \ + keep changing with \ + introduction of newer \ +@@ -136,6 +136,8 @@ + + #define GD_OP_VERSION_7_0 70000 /* Op-version for GlusterFS 7.0 */ + ++#define GD_OP_VERSION_8_0 80000 /* Op-version for GlusterFS 8.0 */ ++ + #include "glusterfs/xlator.h" + #include "glusterfs/options.h" + +diff --git a/tests/basic/ec/ec-quorum-count-partial-failure.t b/tests/basic/ec/ec-quorum-count-partial-failure.t +new file mode 100755 +index 0000000..79f5825 +--- /dev/null ++++ b/tests/basic/ec/ec-quorum-count-partial-failure.t +@@ -0,0 +1,50 @@ ++#!/bin/bash ++ ++. $(dirname $0)/../../include.rc ++. $(dirname $0)/../../volume.rc ++ ++#This test checks that partial failure of fop results in main fop failure only ++cleanup; ++ ++TEST glusterd ++TEST pidof glusterd ++TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5} ++TEST $CLI volume create $V1 $H0:$B0/${V1}{0..5} ++TEST $CLI volume set $V0 performance.flush-behind off ++TEST $CLI volume start $V0 ++TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=/$V0 $M0 ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0 ++ ++TEST dd if=/dev/urandom of=$M0/a bs=12347 count=1 ++TEST dd if=/dev/urandom of=$M0/b bs=12347 count=1 ++TEST cp $M0/b $M0/c ++TEST fallocate -p -l 101 $M0/c ++TEST $CLI volume stop $V0 ++TEST $CLI volume set $V0 debug.delay-gen posix; ++TEST $CLI volume set $V0 delay-gen.delay-duration 10000000; ++TEST $CLI volume set $V0 delay-gen.enable WRITE; ++TEST $CLI volume set $V0 delay-gen.delay-percentage 100 ++TEST $CLI volume set $V0 disperse.quorum-count 6 ++TEST $CLI volume start $V0 ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0 ++cksum=$(dd if=$M0/a bs=12345 count=1 | md5sum | awk '{print $1}') ++truncate -s 12345 $M0/a & #While write is waiting for 5 seconds, introduce failure ++fallocate -p -l 101 $M0/b & ++sleep 1 ++TEST kill_brick $V0 $H0 $B0/${V0}0 ++TEST wait ++TEST $CLI volume start $V0 force ++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0} ++EXPECT "12345" stat --format=%s $M0/a ++TEST kill_brick $V0 $H0 $B0/${V0}1 ++TEST kill_brick $V0 $H0 $B0/${V0}2 ++EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0; ++TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0 ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "4" ec_child_up_count $V0 0 ++cksum_after_heal=$(dd if=$M0/a | md5sum | awk '{print $1}') ++TEST [[ $cksum == $cksum_after_heal ]] ++cksum=$(dd if=$M0/c | md5sum | awk '{print $1}') ++cksum_after_heal=$(dd if=$M0/b | md5sum | awk '{print $1}') ++TEST [[ $cksum == $cksum_after_heal ]] ++ ++cleanup; +diff --git a/tests/basic/ec/ec-quorum-count.t b/tests/basic/ec/ec-quorum-count.t +new file mode 100644 +index 0000000..56b5329 +--- /dev/null ++++ b/tests/basic/ec/ec-quorum-count.t +@@ -0,0 +1,165 @@ ++ #!/bin/bash ++ ++. $(dirname $0)/../../include.rc ++. $(dirname $0)/../../volume.rc ++. $(dirname $0)/../../ec.rc ++ ++cleanup ++TEST glusterd ++TEST pidof glusterd ++TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5} ++TEST $CLI volume create $V1 $H0:$B0/${V1}{0..5} ++TEST $CLI volume set $V0 disperse.eager-lock-timeout 5 ++TEST $CLI volume set $V0 performance.flush-behind off ++ ++#Should fail on non-disperse volume ++TEST ! $CLI volume set $V1 disperse.quorum-count 5 ++ ++#Should succeed on a valid range ++TEST ! $CLI volume set $V0 disperse.quorum-count 0 ++TEST ! $CLI volume set $V0 disperse.quorum-count -0 ++TEST ! $CLI volume set $V0 disperse.quorum-count abc ++TEST ! $CLI volume set $V0 disperse.quorum-count 10abc ++TEST ! $CLI volume set $V0 disperse.quorum-count 1 ++TEST ! $CLI volume set $V0 disperse.quorum-count 2 ++TEST ! $CLI volume set $V0 disperse.quorum-count 3 ++TEST $CLI volume set $V0 disperse.quorum-count 4 ++TEST $CLI volume start $V0 ++TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0 ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0 ++ ++#Test that the option is reflected in the mount ++EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^4$" ec_option_value $V0 $M0 0 quorum-count ++TEST $CLI volume reset $V0 disperse.quorum-count ++EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^0$" ec_option_value $V0 $M0 0 quorum-count ++TEST $CLI volume set $V0 disperse.quorum-count 6 ++EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^6$" ec_option_value $V0 $M0 0 quorum-count ++ ++TEST touch $M0/a ++TEST touch $M0/data ++TEST setfattr -n trusted.def -v def $M0/a ++TEST touch $M0/src ++TEST touch $M0/del-me ++TEST mkdir $M0/dir1 ++TEST dd if=/dev/zero of=$M0/read-file bs=1M count=1 oflag=direct ++TEST dd if=/dev/zero of=$M0/del-file bs=1M count=1 oflag=direct ++TEST gf_rm_file_and_gfid_link $B0/${V0}0 del-file ++#modify operations should fail as the file is not in quorum ++TEST ! dd if=/dev/zero of=$M0/del-file bs=1M count=1 oflag=direct ++TEST kill_brick $V0 $H0 $B0/${V0}0 ++#Read should succeed even when quorum-count is not met ++TEST dd if=$M0/read-file of=/dev/null iflag=direct ++TEST ! touch $M0/a2 ++TEST ! mkdir $M0/dir2 ++TEST ! mknod $M0/b2 b 4 5 ++TEST ! ln -s $M0/a $M0/symlink ++TEST ! ln $M0/a $M0/link ++TEST ! mv $M0/src $M0/dst ++TEST ! rm -f $M0/del-me ++TEST ! rmdir $M0/dir1 ++TEST ! dd if=/dev/zero of=$M0/a bs=1M count=1 conv=notrunc ++TEST ! dd if=/dev/zero of=$M0/data bs=1M count=1 conv=notrunc ++TEST ! truncate -s 0 $M0/a ++TEST ! setfattr -n trusted.abc -v abc $M0/a ++TEST ! setfattr -x trusted.def $M0/a ++TEST ! chmod +x $M0/a ++TEST ! fallocate -l 2m -n $M0/a ++TEST ! fallocate -p -l 512k $M0/a ++TEST $CLI volume start $V0 force ++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0} ++ ++# reset the option and check whether the default redundancy count is ++# accepted or not. ++TEST $CLI volume reset $V0 disperse.quorum-count ++EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^0$" ec_option_value $V0 $M0 0 quorum-count ++TEST touch $M0/a1 ++TEST touch $M0/data1 ++TEST setfattr -n trusted.def -v def $M0/a1 ++TEST touch $M0/src1 ++TEST touch $M0/del-me1 ++TEST mkdir $M0/dir11 ++TEST kill_brick $V0 $H0 $B0/${V0}0 ++TEST kill_brick $V0 $H0 $B0/${V0}1 ++TEST touch $M0/a21 ++TEST mkdir $M0/dir21 ++TEST mknod $M0/b21 b 4 5 ++TEST ln -s $M0/a1 $M0/symlink1 ++TEST ln $M0/a1 $M0/link1 ++TEST mv $M0/src1 $M0/dst1 ++TEST rm -f $M0/del-me1 ++TEST rmdir $M0/dir11 ++TEST dd if=/dev/zero of=$M0/a1 bs=1M count=1 conv=notrunc ++TEST dd if=/dev/zero of=$M0/data1 bs=1M count=1 conv=notrunc ++TEST truncate -s 0 $M0/a1 ++TEST setfattr -n trusted.abc -v abc $M0/a1 ++TEST setfattr -x trusted.def $M0/a1 ++TEST chmod +x $M0/a1 ++TEST fallocate -l 2m -n $M0/a1 ++TEST fallocate -p -l 512k $M0/a1 ++TEST $CLI volume start $V0 force ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0 ++ ++TEST touch $M0/a2 ++TEST touch $M0/data2 ++TEST setfattr -n trusted.def -v def $M0/a1 ++TEST touch $M0/src2 ++TEST touch $M0/del-me2 ++TEST mkdir $M0/dir12 ++TEST kill_brick $V0 $H0 $B0/${V0}0 ++TEST kill_brick $V0 $H0 $B0/${V0}1 ++TEST kill_brick $V0 $H0 $B0/${V0}2 ++TEST ! touch $M0/a22 ++TEST ! mkdir $M0/dir22 ++TEST ! mknod $M0/b22 b 4 5 ++TEST ! ln -s $M0/a2 $M0/symlink2 ++TEST ! ln $M0/a2 $M0/link2 ++TEST ! mv $M0/src2 $M0/dst2 ++TEST ! rm -f $M0/del-me2 ++TEST ! rmdir $M0/dir12 ++TEST ! dd if=/dev/zero of=$M0/a2 bs=1M count=1 conv=notrunc ++TEST ! dd if=/dev/zero of=$M0/data2 bs=1M count=1 conv=notrunc ++TEST ! truncate -s 0 $M0/a2 ++TEST ! setfattr -n trusted.abc -v abc $M0/a2 ++TEST ! setfattr -x trusted.def $M0/a2 ++TEST ! chmod +x $M0/a2 ++TEST ! fallocate -l 2m -n $M0/a2 ++TEST ! fallocate -p -l 512k $M0/a2 ++TEST $CLI volume start $V0 force ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0 ++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0} ++ ++# Set quorum-count to 5 and kill 1 brick and the fops should pass ++TEST $CLI volume set $V0 disperse.quorum-count 5 ++EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^5$" ec_option_value $V0 $M0 0 quorum-count ++TEST touch $M0/a3 ++TEST touch $M0/data3 ++TEST setfattr -n trusted.def -v def $M0/a3 ++TEST touch $M0/src3 ++TEST touch $M0/del-me3 ++TEST mkdir $M0/dir13 ++TEST kill_brick $V0 $H0 $B0/${V0}0 ++TEST touch $M0/a31 ++TEST mkdir $M0/dir31 ++TEST mknod $M0/b31 b 4 5 ++TEST ln -s $M0/a3 $M0/symlink3 ++TEST ln $M0/a3 $M0/link3 ++TEST mv $M0/src3 $M0/dst3 ++TEST rm -f $M0/del-me3 ++TEST rmdir $M0/dir13 ++TEST dd if=/dev/zero of=$M0/a3 bs=1M count=1 conv=notrunc ++TEST dd if=/dev/zero of=$M0/data3 bs=1M count=1 conv=notrunc ++TEST truncate -s 0 $M0/a3 ++TEST setfattr -n trusted.abc -v abc $M0/a3 ++TEST setfattr -x trusted.def $M0/a3 ++TEST chmod +x $M0/a3 ++TEST fallocate -l 2m -n $M0/a3 ++TEST fallocate -p -l 512k $M0/a3 ++TEST dd if=/dev/urandom of=$M0/heal-file bs=1M count=1 oflag=direct ++cksum_before_heal="$(md5sum $M0/heal-file | awk '{print $1}')" ++TEST $CLI volume start $V0 force ++EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0} ++TEST kill_brick $V0 $H0 $B0/${V0}4 ++TEST kill_brick $V0 $H0 $B0/${V0}5 ++cksum_after_heal=$(dd if=$M0/heal-file iflag=direct | md5sum | awk '{print $1}') ++TEST [[ $cksum_before_heal == $cksum_after_heal ]] ++cleanup; +diff --git a/tests/ec.rc b/tests/ec.rc +index 04405ec..f18752f 100644 +--- a/tests/ec.rc ++++ b/tests/ec.rc +@@ -7,3 +7,12 @@ function ec_up_status() + local ec_id=$3 + grep -E "^up =" $m/.meta/graphs/active/${v}-disperse-${ec_id}/private | cut -f2 -d'=' + } ++ ++function ec_option_value() ++{ ++ local v=$1 ++ local m=$2 ++ local ec_id=$3 ++ local opt=$4 ++ grep -E "^$opt =" $m/.meta/graphs/active/${v}-disperse-${ec_id}/private | cut -f2 -d'='| awk '{print $1}' ++} +diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c +index 92d4e5d..2e59180 100644 +--- a/xlators/cluster/ec/src/ec-common.c ++++ b/xlators/cluster/ec/src/ec-common.c +@@ -707,6 +707,19 @@ ec_child_select(ec_fop_data_t *fop) + return 0; + } + ++ if (!fop->parent && fop->lock_count && ++ (fop->locks[0].update[EC_DATA_TXN] || ++ fop->locks[0].update[EC_METADATA_TXN])) { ++ if (ec->quorum_count && (num < ec->quorum_count)) { ++ gf_msg(ec->xl->name, GF_LOG_ERROR, 0, EC_MSG_CHILDS_INSUFFICIENT, ++ "Insufficient available children " ++ "for this request (have %d, need " ++ "%d). %s", ++ num, ec->quorum_count, ec_msg_str(fop)); ++ return 0; ++ } ++ } ++ + return 1; + } + +diff --git a/xlators/cluster/ec/src/ec-common.h b/xlators/cluster/ec/src/ec-common.h +index 3c69471..eab86ee 100644 +--- a/xlators/cluster/ec/src/ec-common.h ++++ b/xlators/cluster/ec/src/ec-common.h +@@ -26,6 +26,30 @@ typedef enum { EC_DATA_TXN, EC_METADATA_TXN } ec_txn_t; + + #define EC_FLAG_LOCK_SHARED 0x0001 + ++#define QUORUM_CBK(fn, fop, frame, cookie, this, op_ret, op_errno, params...) \ ++ do { \ ++ ec_t *__ec = fop->xl->private; \ ++ int32_t __op_ret = 0; \ ++ int32_t __op_errno = 0; \ ++ int32_t __success_count = gf_bits_count(fop->good); \ ++ \ ++ __op_ret = op_ret; \ ++ __op_errno = op_errno; \ ++ if (!fop->parent && frame && \ ++ (GF_CLIENT_PID_SELF_HEALD != frame->root->pid) && \ ++ __ec->quorum_count && (__success_count < __ec->quorum_count) && \ ++ op_ret >= 0) { \ ++ __op_ret = -1; \ ++ __op_errno = EIO; \ ++ gf_msg(__ec->xl->name, GF_LOG_ERROR, 0, \ ++ EC_MSG_CHILDS_INSUFFICIENT, \ ++ "Insufficient available children for this request " \ ++ "(have %d, need %d). %s", \ ++ __success_count, __ec->quorum_count, ec_msg_str(fop)); \ ++ } \ ++ fn(frame, cookie, this, __op_ret, __op_errno, params); \ ++ } while (0) ++ + enum _ec_xattrop_flags { + EC_FLAG_XATTROP, + EC_FLAG_DATA_DIRTY, +diff --git a/xlators/cluster/ec/src/ec-dir-write.c b/xlators/cluster/ec/src/ec-dir-write.c +index 0b8ee21..8192462 100644 +--- a/xlators/cluster/ec/src/ec-dir-write.c ++++ b/xlators/cluster/ec/src/ec-dir-write.c +@@ -218,10 +218,10 @@ ec_manager_create(ec_fop_data_t *fop, int32_t state) + GF_ASSERT(cbk != NULL); + + if (fop->cbks.create != NULL) { +- fop->cbks.create(fop->req_frame, fop, fop->xl, cbk->op_ret, +- cbk->op_errno, fop->fd, fop->loc[0].inode, +- &cbk->iatt[0], &cbk->iatt[1], &cbk->iatt[2], +- cbk->xdata); ++ QUORUM_CBK(fop->cbks.create, fop, fop->req_frame, fop, fop->xl, ++ cbk->op_ret, cbk->op_errno, fop->fd, ++ fop->loc[0].inode, &cbk->iatt[0], &cbk->iatt[1], ++ &cbk->iatt[2], cbk->xdata); + } + + return EC_STATE_LOCK_REUSE; +@@ -390,9 +390,10 @@ ec_manager_link(ec_fop_data_t *fop, int32_t state) + GF_ASSERT(cbk != NULL); + + if (fop->cbks.link != NULL) { +- fop->cbks.link(fop->req_frame, fop, fop->xl, cbk->op_ret, +- cbk->op_errno, fop->loc[0].inode, &cbk->iatt[0], +- &cbk->iatt[1], &cbk->iatt[2], cbk->xdata); ++ QUORUM_CBK(fop->cbks.link, fop, fop->req_frame, fop, fop->xl, ++ cbk->op_ret, cbk->op_errno, fop->loc[0].inode, ++ &cbk->iatt[0], &cbk->iatt[1], &cbk->iatt[2], ++ cbk->xdata); + } + + return EC_STATE_LOCK_REUSE; +@@ -569,9 +570,10 @@ ec_manager_mkdir(ec_fop_data_t *fop, int32_t state) + GF_ASSERT(cbk != NULL); + + if (fop->cbks.mkdir != NULL) { +- fop->cbks.mkdir(fop->req_frame, fop, fop->xl, cbk->op_ret, +- cbk->op_errno, fop->loc[0].inode, &cbk->iatt[0], +- &cbk->iatt[1], &cbk->iatt[2], cbk->xdata); ++ QUORUM_CBK(fop->cbks.mkdir, fop, fop->req_frame, fop, fop->xl, ++ cbk->op_ret, cbk->op_errno, fop->loc[0].inode, ++ &cbk->iatt[0], &cbk->iatt[1], &cbk->iatt[2], ++ cbk->xdata); + } + + return EC_STATE_LOCK_REUSE; +@@ -773,9 +775,10 @@ ec_manager_mknod(ec_fop_data_t *fop, int32_t state) + GF_ASSERT(cbk != NULL); + + if (fop->cbks.mknod != NULL) { +- fop->cbks.mknod(fop->req_frame, fop, fop->xl, cbk->op_ret, +- cbk->op_errno, fop->loc[0].inode, &cbk->iatt[0], +- &cbk->iatt[1], &cbk->iatt[2], cbk->xdata); ++ QUORUM_CBK(fop->cbks.mknod, fop, fop->req_frame, fop, fop->xl, ++ cbk->op_ret, cbk->op_errno, fop->loc[0].inode, ++ &cbk->iatt[0], &cbk->iatt[1], &cbk->iatt[2], ++ cbk->xdata); + } + + return EC_STATE_LOCK_REUSE; +@@ -931,10 +934,10 @@ ec_manager_rename(ec_fop_data_t *fop, int32_t state) + GF_ASSERT(cbk != NULL); + + if (fop->cbks.rename != NULL) { +- fop->cbks.rename(fop->req_frame, fop, fop->xl, cbk->op_ret, +- cbk->op_errno, &cbk->iatt[0], &cbk->iatt[1], +- &cbk->iatt[2], &cbk->iatt[3], &cbk->iatt[4], +- cbk->xdata); ++ QUORUM_CBK(fop->cbks.rename, fop, fop->req_frame, fop, fop->xl, ++ cbk->op_ret, cbk->op_errno, &cbk->iatt[0], ++ &cbk->iatt[1], &cbk->iatt[2], &cbk->iatt[3], ++ &cbk->iatt[4], cbk->xdata); + } + + return EC_STATE_LOCK_REUSE; +@@ -1083,9 +1086,9 @@ ec_manager_rmdir(ec_fop_data_t *fop, int32_t state) + GF_ASSERT(cbk != NULL); + + if (fop->cbks.rmdir != NULL) { +- fop->cbks.rmdir(fop->req_frame, fop, fop->xl, cbk->op_ret, +- cbk->op_errno, &cbk->iatt[0], &cbk->iatt[1], +- cbk->xdata); ++ QUORUM_CBK(fop->cbks.rmdir, fop, fop->req_frame, fop, fop->xl, ++ cbk->op_ret, cbk->op_errno, &cbk->iatt[0], ++ &cbk->iatt[1], cbk->xdata); + } + + return EC_STATE_LOCK_REUSE; +@@ -1237,10 +1240,10 @@ ec_manager_symlink(ec_fop_data_t *fop, int32_t state) + GF_ASSERT(cbk != NULL); + + if (fop->cbks.symlink != NULL) { +- fop->cbks.symlink(fop->req_frame, fop, fop->xl, cbk->op_ret, +- cbk->op_errno, fop->loc[0].inode, +- &cbk->iatt[0], &cbk->iatt[1], &cbk->iatt[2], +- cbk->xdata); ++ QUORUM_CBK(fop->cbks.symlink, fop, fop->req_frame, fop, fop->xl, ++ cbk->op_ret, cbk->op_errno, fop->loc[0].inode, ++ &cbk->iatt[0], &cbk->iatt[1], &cbk->iatt[2], ++ cbk->xdata); + } + + return EC_STATE_LOCK_REUSE; +@@ -1392,9 +1395,9 @@ ec_manager_unlink(ec_fop_data_t *fop, int32_t state) + GF_ASSERT(cbk != NULL); + + if (fop->cbks.unlink != NULL) { +- fop->cbks.unlink(fop->req_frame, fop, fop->xl, cbk->op_ret, +- cbk->op_errno, &cbk->iatt[0], &cbk->iatt[1], +- cbk->xdata); ++ QUORUM_CBK(fop->cbks.unlink, fop, fop->req_frame, fop, fop->xl, ++ cbk->op_ret, cbk->op_errno, &cbk->iatt[0], ++ &cbk->iatt[1], cbk->xdata); + } + + return EC_STATE_LOCK_REUSE; +diff --git a/xlators/cluster/ec/src/ec-inode-write.c b/xlators/cluster/ec/src/ec-inode-write.c +index 8bfa3b4..2dbb4db 100644 +--- a/xlators/cluster/ec/src/ec-inode-write.c ++++ b/xlators/cluster/ec/src/ec-inode-write.c +@@ -185,26 +185,26 @@ ec_xattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret, + switch (fop->id) { + case GF_FOP_SETXATTR: + if (fop->cbks.setxattr) { +- fop->cbks.setxattr(frame, cookie, this, op_ret, op_errno, +- xdata); ++ QUORUM_CBK(fop->cbks.setxattr, fop, frame, cookie, this, op_ret, ++ op_errno, xdata); + } + break; + case GF_FOP_REMOVEXATTR: + if (fop->cbks.removexattr) { +- fop->cbks.removexattr(frame, cookie, this, op_ret, op_errno, +- xdata); ++ QUORUM_CBK(fop->cbks.removexattr, fop, frame, cookie, this, ++ op_ret, op_errno, xdata); + } + break; + case GF_FOP_FSETXATTR: + if (fop->cbks.fsetxattr) { +- fop->cbks.fsetxattr(frame, cookie, this, op_ret, op_errno, +- xdata); ++ QUORUM_CBK(fop->cbks.fsetxattr, fop, frame, cookie, this, ++ op_ret, op_errno, xdata); + } + break; + case GF_FOP_FREMOVEXATTR: + if (fop->cbks.fremovexattr) { +- fop->cbks.fremovexattr(frame, cookie, this, op_ret, op_errno, +- xdata); ++ QUORUM_CBK(fop->cbks.fremovexattr, fop, frame, cookie, this, ++ op_ret, op_errno, xdata); + } + break; + } +@@ -494,16 +494,15 @@ ec_manager_setattr(ec_fop_data_t *fop, int32_t state) + + if (fop->id == GF_FOP_SETATTR) { + if (fop->cbks.setattr != NULL) { +- fop->cbks.setattr(fop->req_frame, fop, fop->xl, cbk->op_ret, +- cbk->op_errno, &cbk->iatt[0], +- &cbk->iatt[1], cbk->xdata); ++ QUORUM_CBK(fop->cbks.setattr, fop, fop->req_frame, fop, ++ fop->xl, cbk->op_ret, cbk->op_errno, ++ &cbk->iatt[0], &cbk->iatt[1], cbk->xdata); + } + } else { + if (fop->cbks.fsetattr != NULL) { +- fop->cbks.fsetattr(fop->req_frame, fop, fop->xl, +- cbk->op_ret, cbk->op_errno, +- &cbk->iatt[0], &cbk->iatt[1], +- cbk->xdata); ++ QUORUM_CBK(fop->cbks.fsetattr, fop, fop->req_frame, fop, ++ fop->xl, cbk->op_ret, cbk->op_errno, ++ &cbk->iatt[0], &cbk->iatt[1], cbk->xdata); + } + } + +@@ -994,9 +993,9 @@ ec_manager_fallocate(ec_fop_data_t *fop, int32_t state) + GF_ASSERT(cbk != NULL); + + if (fop->cbks.fallocate != NULL) { +- fop->cbks.fallocate(fop->req_frame, fop, fop->xl, cbk->op_ret, +- cbk->op_errno, &cbk->iatt[0], &cbk->iatt[1], +- cbk->xdata); ++ QUORUM_CBK(fop->cbks.fallocate, fop, fop->req_frame, fop, ++ fop->xl, cbk->op_ret, cbk->op_errno, &cbk->iatt[0], ++ &cbk->iatt[1], cbk->xdata); + } + + return EC_STATE_LOCK_REUSE; +@@ -1247,9 +1246,9 @@ ec_manager_discard(ec_fop_data_t *fop, int32_t state) + GF_ASSERT(cbk != NULL); + + if (fop->cbks.discard != NULL) { +- fop->cbks.discard(fop->req_frame, fop, fop->xl, cbk->op_ret, +- cbk->op_errno, &cbk->iatt[0], &cbk->iatt[1], +- cbk->xdata); ++ QUORUM_CBK(fop->cbks.discard, fop, fop->req_frame, fop, fop->xl, ++ cbk->op_ret, cbk->op_errno, &cbk->iatt[0], ++ &cbk->iatt[1], cbk->xdata); + } + + return EC_STATE_LOCK_REUSE; +@@ -1477,17 +1476,15 @@ ec_manager_truncate(ec_fop_data_t *fop, int32_t state) + + if (fop->id == GF_FOP_TRUNCATE) { + if (fop->cbks.truncate != NULL) { +- fop->cbks.truncate(fop->req_frame, fop, fop->xl, +- cbk->op_ret, cbk->op_errno, +- &cbk->iatt[0], &cbk->iatt[1], +- cbk->xdata); ++ QUORUM_CBK(fop->cbks.truncate, fop, fop->req_frame, fop, ++ fop->xl, cbk->op_ret, cbk->op_errno, ++ &cbk->iatt[0], &cbk->iatt[1], cbk->xdata); + } + } else { + if (fop->cbks.ftruncate != NULL) { +- fop->cbks.ftruncate(fop->req_frame, fop, fop->xl, +- cbk->op_ret, cbk->op_errno, +- &cbk->iatt[0], &cbk->iatt[1], +- cbk->xdata); ++ QUORUM_CBK(fop->cbks.ftruncate, fop, fop->req_frame, fop, ++ fop->xl, cbk->op_ret, cbk->op_errno, ++ &cbk->iatt[0], &cbk->iatt[1], cbk->xdata); + } + } + +@@ -2245,9 +2242,9 @@ ec_manager_writev(ec_fop_data_t *fop, int32_t state) + GF_ASSERT(cbk != NULL); + + if (fop->cbks.writev != NULL) { +- fop->cbks.writev(fop->req_frame, fop, fop->xl, cbk->op_ret, +- cbk->op_errno, &cbk->iatt[0], &cbk->iatt[1], +- cbk->xdata); ++ QUORUM_CBK(fop->cbks.writev, fop, fop->req_frame, fop, fop->xl, ++ cbk->op_ret, cbk->op_errno, &cbk->iatt[0], ++ &cbk->iatt[1], cbk->xdata); + } + + return EC_STATE_LOCK_REUSE; +diff --git a/xlators/cluster/ec/src/ec-types.h b/xlators/cluster/ec/src/ec-types.h +index f27f2ec..ea4f6ad 100644 +--- a/xlators/cluster/ec/src/ec-types.h ++++ b/xlators/cluster/ec/src/ec-types.h +@@ -654,6 +654,7 @@ struct _ec { + gf_boolean_t optimistic_changelog; + gf_boolean_t parallel_writes; + uint32_t stripe_cache; ++ uint32_t quorum_count; + uint32_t background_heals; + uint32_t heal_wait_qlen; + uint32_t self_heal_window_size; /* max size of read/writes */ +diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c +index 3c8013e..19094c4 100644 +--- a/xlators/cluster/ec/src/ec.c ++++ b/xlators/cluster/ec/src/ec.c +@@ -285,6 +285,7 @@ reconfigure(xlator_t *this, dict_t *options) + GF_OPTION_RECONF("parallel-writes", ec->parallel_writes, options, bool, + failed); + GF_OPTION_RECONF("stripe-cache", ec->stripe_cache, options, uint32, failed); ++ GF_OPTION_RECONF("quorum-count", ec->quorum_count, options, uint32, failed); + ret = 0; + if (ec_assign_read_policy(ec, read_policy)) { + ret = -1; +@@ -720,6 +721,7 @@ init(xlator_t *this) + failed); + GF_OPTION_INIT("parallel-writes", ec->parallel_writes, bool, failed); + GF_OPTION_INIT("stripe-cache", ec->stripe_cache, uint32, failed); ++ GF_OPTION_INIT("quorum-count", ec->quorum_count, uint32, failed); + + this->itable = inode_table_new(EC_SHD_INODE_LRU_LIMIT, this); + if (!this->itable) +@@ -1402,6 +1404,7 @@ ec_dump_private(xlator_t *this) + gf_proc_dump_write("heal-waiters", "%d", ec->heal_waiters); + gf_proc_dump_write("read-policy", "%s", ec_read_policies[ec->read_policy]); + gf_proc_dump_write("parallel-writes", "%d", ec->parallel_writes); ++ gf_proc_dump_write("quorum-count", "%u", ec->quorum_count); + + snprintf(key_prefix, GF_DUMP_MAX_BUF_LEN, "%s.%s.stats.stripe_cache", + this->type, this->name); +@@ -1672,6 +1675,16 @@ struct volume_options options[] = { + "lead to extra memory consumption, maximum " + "(cache size * stripe size) Bytes per open file."}, + { ++ .key = {"quorum-count"}, ++ .type = GF_OPTION_TYPE_INT, ++ .default_value = "0", ++ .description = ++ "This option can be used to define how many successes on" ++ "the bricks constitute a success to the application. This" ++ " count should be in the range" ++ "[disperse-data-count, disperse-count] (inclusive)", ++ }, ++ { + .key = {NULL}, + }, + }; +diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +index 8ce338e..7ca47a6 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c ++++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +@@ -1128,6 +1128,42 @@ out: + } + + static int ++validate_disperse_quorum_count(glusterd_volinfo_t *volinfo, dict_t *dict, ++ char *key, char *value, char **op_errstr) ++{ ++ int ret = -1; ++ int quorum_count = 0; ++ int data_count = 0; ++ ++ ret = gf_string2int(value, &quorum_count); ++ if (ret) { ++ gf_asprintf(op_errstr, ++ "%s is not an integer. %s expects a " ++ "valid integer value.", ++ value, key); ++ goto out; ++ } ++ ++ if (volinfo->type != GF_CLUSTER_TYPE_DISPERSE) { ++ gf_asprintf(op_errstr, "Cannot set %s for a non-disperse volume.", key); ++ ret = -1; ++ goto out; ++ } ++ ++ data_count = volinfo->disperse_count - volinfo->redundancy_count; ++ if (quorum_count < data_count || quorum_count > volinfo->disperse_count) { ++ gf_asprintf(op_errstr, "%d for %s is out of range [%d - %d]", ++ quorum_count, key, data_count, volinfo->disperse_count); ++ ret = -1; ++ goto out; ++ } ++ ++ ret = 0; ++out: ++ return ret; ++} ++ ++static int + validate_parallel_readdir(glusterd_volinfo_t *volinfo, dict_t *dict, char *key, + char *value, char **op_errstr) + { +@@ -3663,6 +3699,16 @@ struct volopt_map_entry glusterd_volopt_map[] = { + .type = NO_DOC, + .op_version = GD_OP_VERSION_3_13_0, + .flags = VOLOPT_FLAG_CLIENT_OPT}, ++ {.key = "disperse.quorum-count", ++ .voltype = "cluster/disperse", ++ .type = NO_DOC, ++ .op_version = GD_OP_VERSION_8_0, ++ .validate_fn = validate_disperse_quorum_count, ++ .description = "This option can be used to define how many successes on" ++ "the bricks constitute a success to the application. This" ++ " count should be in the range" ++ "[disperse-data-count, disperse-count] (inclusive)", ++ .flags = VOLOPT_FLAG_CLIENT_OPT}, + { + .key = "features.sdfs", + .voltype = "features/sdfs", +-- +1.8.3.1 + diff --git a/SOURCES/0290-glusterd-tag-disperse.quorum-count-for-31306.patch b/SOURCES/0290-glusterd-tag-disperse.quorum-count-for-31306.patch new file mode 100644 index 0000000..01ea8c2 --- /dev/null +++ b/SOURCES/0290-glusterd-tag-disperse.quorum-count-for-31306.patch @@ -0,0 +1,84 @@ +From 312da653ac80b537af06139f8d83a63180c72461 Mon Sep 17 00:00:00 2001 +From: Pranith Kumar K +Date: Tue, 10 Sep 2019 14:04:17 +0530 +Subject: [PATCH 290/297] glusterd: tag disperse.quorum-count for 31306 + +In upstream disperse.quorum-count is makred for release-8 +latest new op-version is 31306. + +Label: DOWNSTREAM ONLY + +fixes: bz#1748688 +Change-Id: I88fdbd56ce3b8475b5ec670659adaa9d11c01d97 +Signed-off-by: Pranith Kumar K +Reviewed-on: https://code.engineering.redhat.com/gerrit/180675 +Reviewed-by: Ashish Pandey +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + libglusterfs/src/glusterfs/globals.h | 12 ++++++------ + xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +- + 2 files changed, 7 insertions(+), 7 deletions(-) + +diff --git a/libglusterfs/src/glusterfs/globals.h b/libglusterfs/src/glusterfs/globals.h +index bdc8b3d..e218285 100644 +--- a/libglusterfs/src/glusterfs/globals.h ++++ b/libglusterfs/src/glusterfs/globals.h +@@ -50,19 +50,19 @@ + 1 /* MIN is the fresh start op-version, mostly \ + should not change */ + #define GD_OP_VERSION_MAX \ +- GD_OP_VERSION_8_0 /* MAX VERSION is the maximum \ ++ GD_OP_VERSION_7_0 /* MAX VERSION is the maximum \ + count in VME table, should \ + keep changing with \ + introduction of newer \ + versions */ + +-#define GD_OP_VERSION_RHS_3_0 30000 /* Op-Version of RHS 3.0 */ ++#define GD_OP_VERSION_RHS_3_0 30000 /* Op-Version of RHS 3.0 */ + + #define GD_OP_VER_PERSISTENT_AFR_XATTRS GD_OP_VERSION_RHS_3_0 + +-#define GD_OP_VERSION_RHS_2_1_5 20105 /* RHS 2.1 update 5 */ ++#define GD_OP_VERSION_RHS_2_1_5 20105 /* RHS 2.1 update 5 */ + +-#define GD_OP_VERSION_RHS_3_0_4 30004 /* Op-Version of RHS 3.0.4 */ ++#define GD_OP_VERSION_RHS_3_0_4 30004 /* Op-Version of RHS 3.0.4 */ + + #define GD_OP_VERSION_3_7_0 30700 /* Op-version for GlusterFS 3.7.0 */ + +@@ -124,6 +124,8 @@ + + #define GD_OP_VERSION_3_13_5 31305 /* Op-version for GlusterFS 3.13.5 */ + ++#define GD_OP_VERSION_3_13_6 31306 /* Op-version for GlusterFS 3.13.6 */ ++ + #define GD_OP_VERSION_4_0_0 40000 /* Op-version for GlusterFS 4.0.0 */ + + #define GD_OP_VERSION_4_1_0 40100 /* Op-version for GlusterFS 4.1.0 */ +@@ -136,8 +138,6 @@ + + #define GD_OP_VERSION_7_0 70000 /* Op-version for GlusterFS 7.0 */ + +-#define GD_OP_VERSION_8_0 80000 /* Op-version for GlusterFS 8.0 */ +- + #include "glusterfs/xlator.h" + #include "glusterfs/options.h" + +diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +index 7ca47a6..16601a2 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c ++++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +@@ -3702,7 +3702,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + {.key = "disperse.quorum-count", + .voltype = "cluster/disperse", + .type = NO_DOC, +- .op_version = GD_OP_VERSION_8_0, ++ .op_version = GD_OP_VERSION_3_13_6, + .validate_fn = validate_disperse_quorum_count, + .description = "This option can be used to define how many successes on" + "the bricks constitute a success to the application. This" +-- +1.8.3.1 + diff --git a/SOURCES/0291-cluster-ec-Mark-release-only-when-it-is-acquired.patch b/SOURCES/0291-cluster-ec-Mark-release-only-when-it-is-acquired.patch new file mode 100644 index 0000000..efdbc23 --- /dev/null +++ b/SOURCES/0291-cluster-ec-Mark-release-only-when-it-is-acquired.patch @@ -0,0 +1,106 @@ +From 87d8070f80487322a1736846a78725fd88f8de34 Mon Sep 17 00:00:00 2001 +From: Pranith Kumar K +Date: Tue, 20 Aug 2019 13:27:24 +0530 +Subject: [PATCH 291/297] cluster/ec: Mark release only when it is acquired + +Problem: +Mount-1 Mount-2 +1)Tries to acquire lock on 'dir1' 1)Tries to acquire lock on 'dir1' +2)Lock is granted on brick-0 2)Lock gets EAGAIN on brick-0 and + leads to blocking lock on brick-0 +3)Gets a lock-contention 3) Doesn't matter what happens on mount-2 + notification, marks lock->release from here on. + to true. +4)New fop comes on 'dir1' which will + be put in frozen list as lock->release + is set to true. +5) Lock acquisition from step-2 fails because +3 bricks went down in 4+2 setup. + +Fop on mount-1 which is put in frozen list will hang because no codepath will +move it from frozen list to any other list and the lock will not be retried. + +Fix: +Don't set lock->release to true if lock is not acquired at the time of +lock-contention-notification + +Upstream-patch: https://review.gluster.org/c/glusterfs/+/23272 +fixes: bz#1731896 +Change-Id: Ie6630db8735ccf372cc54b873a3a3aed7a6082b7 +Signed-off-by: Pranith Kumar K +Reviewed-on: https://code.engineering.redhat.com/gerrit/180870 +Tested-by: RHGS Build Bot +Reviewed-by: Ashish Pandey +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/cluster/ec/src/ec-common.c | 20 ++++++++++++++++++-- + xlators/cluster/ec/src/ec-types.h | 1 + + 2 files changed, 19 insertions(+), 2 deletions(-) + +diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c +index 2e59180..5cae37b 100644 +--- a/xlators/cluster/ec/src/ec-common.c ++++ b/xlators/cluster/ec/src/ec-common.c +@@ -1867,6 +1867,10 @@ ec_lock_acquired(ec_lock_link_t *link) + LOCK(&lock->loc.inode->lock); + + lock->acquired = _gf_true; ++ if (lock->contention) { ++ lock->release = _gf_true; ++ lock->contention = _gf_false; ++ } + + ec_lock_update_fd(lock, fop); + ec_lock_wake_shared(lock, &list); +@@ -1892,15 +1896,20 @@ ec_locked(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret, + ec_lock_link_t *link = NULL; + ec_lock_t *lock = NULL; + ++ link = fop->data; ++ lock = link->lock; + if (op_ret >= 0) { +- link = fop->data; +- lock = link->lock; + lock->mask = lock->good_mask = fop->good; + lock->healing = 0; + + ec_lock_acquired(link); + ec_lock(fop->parent); + } else { ++ LOCK(&lock->loc.inode->lock); ++ { ++ lock->contention = _gf_false; ++ } ++ UNLOCK(&lock->loc.inode->lock); + gf_msg(this->name, GF_LOG_WARNING, op_errno, EC_MSG_PREOP_LOCK_FAILED, + "Failed to complete preop lock"); + } +@@ -2547,6 +2556,13 @@ ec_lock_release(ec_t *ec, inode_t *inode) + gf_msg_debug(ec->xl->name, 0, "Releasing inode %p due to lock contention", + inode); + ++ if (!lock->acquired) { ++ /* This happens if some bricks already got the lock while inodelk is in ++ * progress. Set release to true after lock is acquired*/ ++ lock->contention = _gf_true; ++ goto done; ++ } ++ + /* The lock is not marked to be released, so the frozen list should be + * empty. */ + GF_ASSERT(list_empty(&lock->frozen)); +diff --git a/xlators/cluster/ec/src/ec-types.h b/xlators/cluster/ec/src/ec-types.h +index ea4f6ad..34a9768 100644 +--- a/xlators/cluster/ec/src/ec-types.h ++++ b/xlators/cluster/ec/src/ec-types.h +@@ -267,6 +267,7 @@ struct _ec_lock { + uint32_t refs_pending; /* Refs assigned to fops being prepared */ + uint32_t waiting_flags; /*Track xattrop/dirty marking*/ + gf_boolean_t acquired; ++ gf_boolean_t contention; + gf_boolean_t unlock_now; + gf_boolean_t release; + gf_boolean_t query; +-- +1.8.3.1 + diff --git a/SOURCES/0292-rpc-Update-address-family-if-it-is-not-provide-in-cm.patch b/SOURCES/0292-rpc-Update-address-family-if-it-is-not-provide-in-cm.patch new file mode 100644 index 0000000..07fc8f4 --- /dev/null +++ b/SOURCES/0292-rpc-Update-address-family-if-it-is-not-provide-in-cm.patch @@ -0,0 +1,72 @@ +From 769263ad422e3c1069de0994ff2274044982b242 Mon Sep 17 00:00:00 2001 +From: Mohit Agrawal +Date: Sun, 1 Sep 2019 12:01:09 +0530 +Subject: [PATCH 292/297] rpc: Update address family if it is not provide in + cmd-line arguments + +Problem: After enabling transport-type to inet6 and passed ipv6 + transport.socket.bind-address in glusterd.vol clients are + not started. + +Solution: Need to update address-family based on remote-address for + all gluster client process + +> Change-Id: Iaa3588cd87cebc45231bfd675745c1a457dc9b31 +> Fixes: bz#1747746 +> Credits: Amgad Saleh +> Signed-off-by: Mohit Agrawal +> (Cherry picked from commit 80b8cfe3f1386606bada97a76a0cad7acdf6b877) +> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/23340/) + +Change-Id: Iaa3588cd87cebc45231bfd675745c1a457dc9b31 +BUG: 1750241 +Signed-off-by: Mohit Agrawal +Reviewed-on: https://code.engineering.redhat.com/gerrit/181184 +Tested-by: Mohit Agrawal +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + rpc/rpc-transport/socket/src/name.c | 13 ++++++++++++- + 1 file changed, 12 insertions(+), 1 deletion(-) + +diff --git a/rpc/rpc-transport/socket/src/name.c b/rpc/rpc-transport/socket/src/name.c +index 7f18cc4..b473f3b 100644 +--- a/rpc/rpc-transport/socket/src/name.c ++++ b/rpc/rpc-transport/socket/src/name.c +@@ -214,6 +214,7 @@ af_inet_client_get_remote_sockaddr(rpc_transport_t *this, + uint16_t remote_port = 0; + struct addrinfo *addr_info = NULL; + int32_t ret = 0; ++ struct in6_addr serveraddr; + + remote_host_data = dict_get(options, "remote-host"); + if (remote_host_data == NULL) { +@@ -249,6 +250,13 @@ af_inet_client_get_remote_sockaddr(rpc_transport_t *this, + goto err; + } + ++ /* Need to update transport-address family if address-family is not provide ++ to command-line arguments ++ */ ++ if (inet_pton(AF_INET6, remote_host, &serveraddr)) { ++ sockaddr->sa_family = AF_INET6; ++ } ++ + /* TODO: gf_resolve is a blocking call. kick in some + non blocking dns techniques */ + ret = gf_resolve_ip6(remote_host, remote_port, sockaddr->sa_family, +@@ -522,7 +530,10 @@ socket_client_get_remote_sockaddr(rpc_transport_t *this, + ret = -1; + } + +- if (*sa_family == AF_UNSPEC) { ++ /* Address-family is updated based on remote_host in ++ af_inet_client_get_remote_sockaddr ++ */ ++ if (*sa_family != sockaddr->sa_family) { + *sa_family = sockaddr->sa_family; + } + +-- +1.8.3.1 + diff --git a/SOURCES/0293-glusterd-IPV6-hostname-address-is-not-parsed-correct.patch b/SOURCES/0293-glusterd-IPV6-hostname-address-is-not-parsed-correct.patch new file mode 100644 index 0000000..23120cb --- /dev/null +++ b/SOURCES/0293-glusterd-IPV6-hostname-address-is-not-parsed-correct.patch @@ -0,0 +1,69 @@ +From 8f89aef9691b0806d7487525c6a54a1a615c8bc1 Mon Sep 17 00:00:00 2001 +From: Mohit Agrawal +Date: Mon, 2 Sep 2019 10:46:10 +0530 +Subject: [PATCH 293/297] glusterd: IPV6 hostname address is not parsed + correctly + +Problem: IPV6 hostname address is not parsed correctly in function + glusterd_check_brick_order + +Solution: Update the code to parse hostname address + +> Change-Id: Ifb2f83f9c6e987b2292070e048e97eeb51b728ab +> Fixes: bz#1747746 +> Credits: Amgad Saleh +> Signed-off-by: Mohit Agrawal +> (Cherry picked from commit 6563ffb04d7ba51a89726e7c5bbb85c7dbc685b5) +> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/23341/) + +Change-Id: Ifb2f83f9c6e987b2292070e048e97eeb51b728ab +BUG: 1750241 +Signed-off-by: Mohit Agrawal +Reviewed-on: https://code.engineering.redhat.com/gerrit/181185 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 16 +++++++++++----- + 1 file changed, 11 insertions(+), 5 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +index 1ea8ba6..076bc80 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c ++++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +@@ -95,6 +95,10 @@ glusterd_check_brick_order(dict_t *dict, char *err_str) + int32_t type = GF_CLUSTER_TYPE_NONE; + int32_t sub_count = 0; + struct addrinfo *ai_info = NULL; ++ char brick_addr[128] = { ++ 0, ++ }; ++ int addrlen = 0; + + const char failed_string[2048] = + "Failed to perform brick order " +@@ -182,15 +186,17 @@ glusterd_check_brick_order(dict_t *dict, char *err_str) + brick_list_dup = tmpptr; + if (brick == NULL) + goto check_failed; +- brick = strtok_r(brick, ":", &tmpptr); +- if (brick == NULL) ++ tmpptr = strrchr(brick, ':'); ++ if (tmpptr == NULL) + goto check_failed; +- ret = getaddrinfo(brick, NULL, NULL, &ai_info); ++ addrlen = strlen(brick) - strlen(tmpptr); ++ strncpy(brick_addr, brick, addrlen); ++ brick_addr[addrlen] = '\0'; ++ ret = getaddrinfo(brick_addr, NULL, NULL, &ai_info); + if (ret != 0) { + ret = 0; + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_RESOLVE_FAIL, +- "unable to resolve " +- "host name"); ++ "unable to resolve host name for addr %s", brick_addr); + goto out; + } + ai_list_tmp1 = MALLOC(sizeof(addrinfo_list_t)); +-- +1.8.3.1 + diff --git a/SOURCES/0294-eventsapi-Set-IPv4-IPv6-family-based-on-input-IP.patch b/SOURCES/0294-eventsapi-Set-IPv4-IPv6-family-based-on-input-IP.patch new file mode 100644 index 0000000..1665185 --- /dev/null +++ b/SOURCES/0294-eventsapi-Set-IPv4-IPv6-family-based-on-input-IP.patch @@ -0,0 +1,59 @@ +From 2fa5476b95d4547bdde50f2281bf58b7db24e37a Mon Sep 17 00:00:00 2001 +From: Aravinda VK +Date: Mon, 16 Sep 2019 10:04:26 +0530 +Subject: [PATCH 294/297] eventsapi: Set IPv4/IPv6 family based on input IP + +server.sin_family was set to AF_INET while creating socket connection, +this was failing if the input address is IPv6(`::1`). + +With this patch, sin_family is set by reading the ai_family of +`getaddrinfo` result. + +> upstream patch : https://review.gluster.org/#/c/glusterfs/+/23423/ + +>Fixes: bz#1752330 +>Change-Id: I499f957b432842fa989c698f6e5b25b7016084eb +>Signed-off-by: Aravinda VK + +BUG: 1732443 +Change-Id: I499f957b432842fa989c698f6e5b25b7016084eb +Signed-off-by: Aravinda VK +Reviewed-on: https://code.engineering.redhat.com/gerrit/181197 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + libglusterfs/src/events.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/libglusterfs/src/events.c b/libglusterfs/src/events.c +index 2509767..9d33783 100644 +--- a/libglusterfs/src/events.c ++++ b/libglusterfs/src/events.c +@@ -42,6 +42,7 @@ _gf_event(eventtypes_t event, const char *fmt, ...) + struct addrinfo hints; + struct addrinfo *result = NULL; + xlator_t *this = THIS; ++ int sin_family = AF_INET; + + /* Global context */ + ctx = THIS->ctx; +@@ -75,13 +76,15 @@ _gf_event(eventtypes_t event, const char *fmt, ...) + ret = EVENT_ERROR_RESOLVE; + goto out; + } ++ ++ sin_family = result->ai_family; + } else { + /* Localhost, Use the defined IP for localhost */ + host = gf_strdup(EVENT_HOST); + } + + /* Socket Configurations */ +- server.sin_family = AF_INET; ++ server.sin_family = sin_family; + server.sin_port = htons(EVENT_PORT); + ret = inet_pton(server.sin_family, host, &server.sin_addr); + if (ret <= 0) { +-- +1.8.3.1 + diff --git a/SOURCES/0295-ctime-rebalance-Heal-ctime-xattr-on-directory-during.patch b/SOURCES/0295-ctime-rebalance-Heal-ctime-xattr-on-directory-during.patch new file mode 100644 index 0000000..9d3820d --- /dev/null +++ b/SOURCES/0295-ctime-rebalance-Heal-ctime-xattr-on-directory-during.patch @@ -0,0 +1,1164 @@ +From d5ce2300f77c25b38a076d4dd6a5521e82c56172 Mon Sep 17 00:00:00 2001 +From: Kotresh HR +Date: Mon, 29 Jul 2019 18:30:42 +0530 +Subject: [PATCH 295/297] ctime/rebalance: Heal ctime xattr on directory during + rebalance + +After add-brick and rebalance, the ctime xattr is not present +on rebalanced directories on new brick. This patch fixes the +same. + +Note that ctime still doesn't support consistent time across +distribute sub-volume. + +This patch also fixes the in-memory inconsistency of time attributes +when metadata is self healed. + +Backport of: + > Patch: https://review.gluster.org/23127/ + > Change-Id: Ia20506f1839021bf61d4753191e7dc34b31bb2df + > fixes: bz#1734026 + > Signed-off-by: Kotresh HR + +Change-Id: Ia20506f1839021bf61d4753191e7dc34b31bb2df +BUG: 1728673 +Signed-off-by: Kotresh HR +Reviewed-on: https://code.engineering.redhat.com/gerrit/181105 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + tests/basic/afr/split-brain-healing-ctime.t | 253 +++++++++++++++++++++ + tests/basic/afr/split-brain-healing.t | 1 + + tests/basic/ctime/ctime-ec-heal.t | 71 ++++++ + tests/basic/ctime/ctime-ec-rebalance.t | 44 ++++ + tests/basic/ctime/ctime-rep-heal.t | 71 ++++++ + tests/basic/ctime/ctime-rep-rebalance.t | 42 ++++ + .../bug-1734370-entry-heal-restore-time.t | 84 +++++++ + tests/volume.rc | 15 +- + xlators/cluster/afr/src/afr-self-heal-common.c | 3 +- + xlators/cluster/afr/src/afr-self-heal-entry.c | 2 + + xlators/cluster/dht/src/dht-common.c | 1 + + xlators/cluster/ec/src/ec-heal.c | 7 +- + xlators/storage/posix/src/posix-entry-ops.c | 8 +- + xlators/storage/posix/src/posix-helpers.c | 31 ++- + xlators/storage/posix/src/posix-inode-fd-ops.c | 57 ++--- + xlators/storage/posix/src/posix-metadata.c | 65 +++++- + xlators/storage/posix/src/posix-metadata.h | 7 + + xlators/storage/posix/src/posix.h | 5 +- + 18 files changed, 714 insertions(+), 53 deletions(-) + create mode 100644 tests/basic/afr/split-brain-healing-ctime.t + create mode 100644 tests/basic/ctime/ctime-ec-heal.t + create mode 100644 tests/basic/ctime/ctime-ec-rebalance.t + create mode 100644 tests/basic/ctime/ctime-rep-heal.t + create mode 100644 tests/basic/ctime/ctime-rep-rebalance.t + create mode 100644 tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t + +diff --git a/tests/basic/afr/split-brain-healing-ctime.t b/tests/basic/afr/split-brain-healing-ctime.t +new file mode 100644 +index 0000000..1ca18e3 +--- /dev/null ++++ b/tests/basic/afr/split-brain-healing-ctime.t +@@ -0,0 +1,253 @@ ++#!/bin/bash ++ ++#Test the split-brain resolution CLI commands. ++. $(dirname $0)/../../include.rc ++. $(dirname $0)/../../volume.rc ++ ++function get_replicate_subvol_number { ++ local filename=$1 ++ #get_backend_paths ++ if [ -f $B0/${V0}1/$filename ] ++ then ++ echo 0 ++ elif [ -f $B0/${V0}3/$filename ] ++ then echo 1 ++ else ++ echo -1 ++ fi ++} ++ ++cleanup; ++ ++AREQUAL_PATH=$(dirname $0)/../../utils ++GET_MDATA_PATH=$(dirname $0)/../../utils ++CFLAGS="" ++test "`uname -s`" != "Linux" && { ++ CFLAGS="$CFLAGS -lintl"; ++} ++build_tester $AREQUAL_PATH/arequal-checksum.c $CFLAGS ++build_tester $GET_MDATA_PATH/get-mdata-xattr.c ++ ++TEST glusterd ++TEST pidof glusterd ++TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4} ++TEST $CLI volume set $V0 cluster.self-heal-daemon off ++TEST $CLI volume set $V0 cluster.data-self-heal off ++TEST $CLI volume set $V0 cluster.metadata-self-heal off ++TEST $CLI volume set $V0 cluster.entry-self-heal off ++TEST $CLI volume set $V0 ctime on ++TEST $CLI volume start $V0 ++TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0 ++ ++cd $M0 ++for i in {1..10} ++do ++ echo "Initial content">>file$i ++done ++ ++replica_0_files_list=(`ls $B0/${V0}1|grep -v '^\.'`) ++replica_1_files_list=(`ls $B0/${V0}3|grep -v '^\.'`) ++ ++############ Create data split-brain in the files. ########################### ++TEST kill_brick $V0 $H0 $B0/${V0}1 ++for file in ${!replica_0_files_list[*]} ++do ++ echo "B1 is down">>${replica_0_files_list[$file]} ++done ++TEST kill_brick $V0 $H0 $B0/${V0}3 ++for file in ${!replica_1_files_list[*]} ++do ++ echo "B3 is down">>${replica_1_files_list[$file]} ++done ++ ++SMALLER_FILE_SIZE=$(stat -c %s file1) ++ ++TEST $CLI volume start $V0 force ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2 ++ ++TEST kill_brick $V0 $H0 $B0/${V0}2 ++for file in ${!replica_0_files_list[*]} ++do ++ echo "B2 is down">>${replica_0_files_list[$file]} ++ echo "appending more content to make it the bigger file">>${replica_0_files_list[$file]} ++done ++TEST kill_brick $V0 $H0 $B0/${V0}4 ++for file in ${!replica_1_files_list[*]} ++do ++ echo "B4 is down">>${replica_1_files_list[$file]} ++ echo "appending more content to make it the bigger file">>${replica_1_files_list[$file]} ++done ++ ++BIGGER_FILE_SIZE=$(stat -c %s file1) ++TEST $CLI volume start $V0 force ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 3 ++ ++ ++############### Acessing the files should now give EIO. ############################### ++TEST ! cat file1 ++TEST ! cat file2 ++TEST ! cat file3 ++TEST ! cat file4 ++TEST ! cat file5 ++TEST ! cat file6 ++TEST ! cat file7 ++TEST ! cat file8 ++TEST ! cat file9 ++TEST ! cat file10 ++################### ++TEST $CLI volume set $V0 cluster.self-heal-daemon on ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3 ++ ++################ Heal file1 using the bigger-file option ############## ++$CLI volume heal $V0 split-brain bigger-file /file1 ++EXPECT "0" echo $? ++EXPECT $BIGGER_FILE_SIZE stat -c %s file1 ++ ++################ Heal file2 using the bigger-file option and its gfid ############## ++subvolume=$(get_replicate_subvol_number file2) ++if [ $subvolume == 0 ] ++then ++ GFID=$(gf_get_gfid_xattr $B0/${V0}1/file2) ++elif [ $subvolume == 1 ] ++then ++ GFID=$(gf_get_gfid_xattr $B0/${V0}3/file2) ++fi ++GFIDSTR="gfid:$(gf_gfid_xattr_to_str $GFID)" ++$CLI volume heal $V0 split-brain bigger-file $GFIDSTR ++EXPECT "0" echo $? ++ ++################ Heal file3 using the source-brick option ############## ++################ Use the brick having smaller file size as source ####### ++subvolume=$(get_replicate_subvol_number file3) ++if [ $subvolume == 0 ] ++then ++ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}2 /file3 ++elif [ $subvolume == 1 ] ++then ++ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}4 /file3 ++fi ++EXPECT "0" echo $? ++EXPECT $SMALLER_FILE_SIZE stat -c %s file3 ++ ++################ Heal file4 using the source-brick option and it's gfid ############## ++################ Use the brick having smaller file size as source ####### ++subvolume=$(get_replicate_subvol_number file4) ++if [ $subvolume == 0 ] ++then ++ GFID=$(gf_get_gfid_xattr $B0/${V0}1/file4) ++ GFIDSTR="gfid:$(gf_gfid_xattr_to_str $GFID)" ++ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}2 $GFIDSTR ++elif [ $subvolume == 1 ] ++then ++ GFID=$(gf_get_gfid_xattr $B0/${V0}3/file4) ++ GFIDSTR="gfid:$(gf_gfid_xattr_to_str $GFID)" ++ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}4 $GFIDSTR ++fi ++EXPECT "0" echo $? ++EXPECT $SMALLER_FILE_SIZE stat -c %s file4 ++ ++# With ctime enabled, the ctime xattr ("trusted.glusterfs.mdata") gets healed ++# as part of metadata heal. So mtime would be same, hence it can't be healed ++# using 'latest-mtime' policy, use 'source-brick' option instead. ++################ Heal file5 using the source-brick option ############## ++subvolume=$(get_replicate_subvol_number file5) ++if [ $subvolume == 0 ] ++then ++ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 /file5 ++elif [ $subvolume == 1 ] ++then ++ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}3 /file5 ++fi ++EXPECT "0" echo $? ++ ++if [ $subvolume == 0 ] ++then ++ mtime1_after_heal=$(get_mtime $B0/${V0}1/file5) ++ mtime2_after_heal=$(get_mtime $B0/${V0}2/file5) ++elif [ $subvolume == 1 ] ++then ++ mtime1_after_heal=$(get_mtime $B0/${V0}3/file5) ++ mtime2_after_heal=$(get_mtime $B0/${V0}4/file5) ++fi ++ ++#TODO: To below comparisons on full sub-second resolution ++ ++TEST [ $mtime1_after_heal -eq $mtime2_after_heal ] ++ ++mtime_mount_after_heal=$(stat -c %Y file5) ++ ++TEST [ $mtime1_after_heal -eq $mtime_mount_after_heal ] ++ ++################ Heal file6 using the source-brick option and its gfid ############## ++subvolume=$(get_replicate_subvol_number file6) ++if [ $subvolume == 0 ] ++then ++ GFID=$(gf_get_gfid_xattr $B0/${V0}1/file6) ++ GFIDSTR="gfid:$(gf_gfid_xattr_to_str $GFID)" ++ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 $GFIDSTR ++elif [ $subvolume == 1 ] ++then ++ GFID=$(gf_get_gfid_xattr $B0/${V0}3/file6) ++ GFIDSTR="gfid:$(gf_gfid_xattr_to_str $GFID)" ++ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}3 $GFIDSTR ++fi ++EXPECT "0" echo $? ++ ++if [ $subvolume == 0 ] ++then ++ mtime1_after_heal=$(get_mtime $B0/${V0}1/file6) ++ mtime2_after_heal=$(get_mtime $B0/${V0}2/file6) ++elif [ $subvolume == 1 ] ++then ++ mtime1_after_heal=$(get_mtime $B0/${V0}3/file6) ++ mtime2_after_heal=$(get_mtime $B0/${V0}4/file6) ++fi ++ ++#TODO: To below comparisons on full sub-second resolution ++ ++TEST [ $mtime1_after_heal -eq $mtime2_after_heal ] ++ ++mtime_mount_after_heal=$(stat -c %Y file6) ++ ++TEST [ $mtime1_after_heal -eq $mtime_mount_after_heal ] ++ ++################ Heal remaining SB'ed files of replica_0 using B1 as source ############## ++$CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 ++EXPECT "0" echo $? ++ ++################ Heal remaining SB'ed files of replica_1 using B3 as source ############## ++$CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}3 ++EXPECT "0" echo $? ++ ++############### Reading the files should now succeed. ############################### ++TEST cat file1 ++TEST cat file2 ++TEST cat file3 ++TEST cat file4 ++TEST cat file5 ++TEST cat file6 ++TEST cat file7 ++TEST cat file8 ++TEST cat file9 ++TEST cat file10 ++ ++################ File contents on the bricks must be same. ################################ ++TEST diff <(arequal-checksum -p $B0/$V01 -i .glusterfs) <(arequal-checksum -p $B0/$V02 -i .glusterfs) ++TEST diff <(arequal-checksum -p $B0/$V03 -i .glusterfs) <(arequal-checksum -p $B0/$V04 -i .glusterfs) ++ ++############### Trying to heal files not in SB should fail. ############################### ++$CLI volume heal $V0 split-brain bigger-file /file1 ++EXPECT "1" echo $? ++$CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}4 /file3 ++EXPECT "1" echo $? ++ ++cd - ++TEST rm $AREQUAL_PATH/arequal-checksum ++TEST rm $GET_MDATA_PATH/get-mdata-xattr ++cleanup +diff --git a/tests/basic/afr/split-brain-healing.t b/tests/basic/afr/split-brain-healing.t +index 78553e6..315e815 100644 +--- a/tests/basic/afr/split-brain-healing.t ++++ b/tests/basic/afr/split-brain-healing.t +@@ -35,6 +35,7 @@ TEST $CLI volume set $V0 cluster.self-heal-daemon off + TEST $CLI volume set $V0 cluster.data-self-heal off + TEST $CLI volume set $V0 cluster.metadata-self-heal off + TEST $CLI volume set $V0 cluster.entry-self-heal off ++TEST $CLI volume set $V0 ctime off + TEST $CLI volume start $V0 + TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0 + +diff --git a/tests/basic/ctime/ctime-ec-heal.t b/tests/basic/ctime/ctime-ec-heal.t +new file mode 100644 +index 0000000..1cb4516 +--- /dev/null ++++ b/tests/basic/ctime/ctime-ec-heal.t +@@ -0,0 +1,71 @@ ++#!/bin/bash ++# ++# This will test self healing of ctime xattr 'trusted.glusterfs.mdata' ++# ++### ++ ++. $(dirname $0)/../../include.rc ++. $(dirname $0)/../../volume.rc ++. $(dirname $0)/../../afr.rc ++ ++cleanup ++ ++#cleate and start volume ++TEST glusterd ++TEST pidof glusterd ++TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/${V0}{1..3} ++TEST $CLI volume set $V0 ctime on ++TEST $CLI volume start $V0 ++ ++#Mount the volume ++TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0; ++ ++# Create files ++mkdir $M0/dir1 ++echo "Initial content" > $M0/file1 ++ ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/file1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/file1 ++ ++# Kill brick ++TEST kill_brick $V0 $H0 $B0/${V0}3 ++ ++echo "B3 is down" >> $M0/file1 ++echo "Change dir1 time attributes" > $M0/dir1/dir1_file1 ++echo "Entry heal file" > $M0/entry_heal_file1 ++mkdir $M0/entry_heal_dir1 ++ ++# Check xattr ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_uniq_count $B0/${V0}{1..3}/dir1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/file1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_uniq_count $B0/${V0}{1..3}/file1 ++ ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_count $B0/${V0}{1..3}/dir1/dir1_file1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1/dir1_file1 ++ ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_count $B0/${V0}{1..3}/entry_heal_file1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_file1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_count $B0/${V0}{1..3}/entry_heal_dir1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_dir1 ++ ++TEST $CLI volume start $V0 force ++$CLI volume heal $V0 ++ ++# Check xattr ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/file1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/file1 ++ ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1/dir1_file1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1/dir1_file1 ++ ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/entry_heal_file1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_file1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/entry_heal_dir1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_dir1 ++ ++cleanup; +diff --git a/tests/basic/ctime/ctime-ec-rebalance.t b/tests/basic/ctime/ctime-ec-rebalance.t +new file mode 100644 +index 0000000..caccdc1 +--- /dev/null ++++ b/tests/basic/ctime/ctime-ec-rebalance.t +@@ -0,0 +1,44 @@ ++#!/bin/bash ++# ++# This will test healing of ctime xattr 'trusted.glusterfs.mdata' after add-brick and rebalance ++# ++### ++ ++. $(dirname $0)/../../include.rc ++. $(dirname $0)/../../volume.rc ++. $(dirname $0)/../../fallocate.rc ++ ++cleanup ++ ++#cleate and start volume ++TEST glusterd ++TEST pidof glusterd ++TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/${V0}{0..5} ++TEST $CLI volume set $V0 ctime on ++TEST $CLI volume start $V0 ++ ++#Mount the volume ++TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0; ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0 ++ ++# Create files ++mkdir $M0/dir1 ++echo "test data" > $M0/dir1/file1 ++ ++# Add brick ++TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{6..8} ++ ++#Trigger rebalance ++TEST $CLI volume rebalance $V0 start force ++EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0 ++ ++#Verify ctime xattr heal on directory ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}6/dir1" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}7/dir1" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}8/dir1" ++ ++b6_mdata=$(get_mdata "$B0/${V0}6/dir1") ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "${b6_mdata}" get_mdata $B0/${V0}7/dir1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "${b6_mdata}" get_mdata $B0/${V0}8/dir1 ++ ++cleanup; +diff --git a/tests/basic/ctime/ctime-rep-heal.t b/tests/basic/ctime/ctime-rep-heal.t +new file mode 100644 +index 0000000..ba8b08a +--- /dev/null ++++ b/tests/basic/ctime/ctime-rep-heal.t +@@ -0,0 +1,71 @@ ++#!/bin/bash ++# ++# This will test self healing of ctime xattr 'trusted.glusterfs.mdata' ++# ++### ++ ++. $(dirname $0)/../../include.rc ++. $(dirname $0)/../../volume.rc ++. $(dirname $0)/../../afr.rc ++ ++cleanup ++ ++#cleate and start volume ++TEST glusterd ++TEST pidof glusterd ++TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3} ++TEST $CLI volume set $V0 ctime on ++TEST $CLI volume start $V0 ++ ++#Mount the volume ++TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0; ++ ++# Create files ++mkdir $M0/dir1 ++echo "Initial content" > $M0/file1 ++ ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/file1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/file1 ++ ++# Kill brick ++TEST kill_brick $V0 $H0 $B0/${V0}3 ++ ++echo "B3 is down" >> $M0/file1 ++echo "Change dir1 time attributes" > $M0/dir1/dir1_file1 ++echo "Entry heal file" > $M0/entry_heal_file1 ++mkdir $M0/entry_heal_dir1 ++ ++# Check xattr ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_uniq_count $B0/${V0}{1..3}/dir1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/file1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_uniq_count $B0/${V0}{1..3}/file1 ++ ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_count $B0/${V0}{1..3}/dir1/dir1_file1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1/dir1_file1 ++ ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_count $B0/${V0}{1..3}/entry_heal_file1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_file1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_count $B0/${V0}{1..3}/entry_heal_dir1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_dir1 ++ ++TEST $CLI volume start $V0 force ++$CLI volume heal $V0 ++ ++# Check xattr ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/file1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/file1 ++ ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1/dir1_file1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1/dir1_file1 ++ ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/entry_heal_file1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_file1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/entry_heal_dir1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_dir1 ++ ++cleanup; +diff --git a/tests/basic/ctime/ctime-rep-rebalance.t b/tests/basic/ctime/ctime-rep-rebalance.t +new file mode 100644 +index 0000000..dd9743e +--- /dev/null ++++ b/tests/basic/ctime/ctime-rep-rebalance.t +@@ -0,0 +1,42 @@ ++#!/bin/bash ++# ++# This will test healing of ctime xattr 'trusted.glusterfs.mdata' after add-brick and rebalance ++# ++### ++ ++. $(dirname $0)/../../include.rc ++. $(dirname $0)/../../volume.rc ++. $(dirname $0)/../../afr.rc ++ ++cleanup ++ ++#cleate and start volume ++TEST glusterd ++TEST pidof glusterd ++TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..5} ++TEST $CLI volume set $V0 ctime on ++TEST $CLI volume start $V0 ++ ++#Mount the volume ++TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0; ++ ++# Create files ++mkdir $M0/dir1 ++ ++# Add brick ++TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{6..8} ++ ++#Trigger rebalance ++TEST $CLI volume rebalance $V0 start force ++EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0 ++ ++#Verify ctime xattr heal on directory ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}6/dir1" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}7/dir1" ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}8/dir1" ++ ++b6_mdata=$(get_mdata "$B0/${V0}6/dir1") ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "${b6_mdata}" get_mdata $B0/${V0}7/dir1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "${b6_mdata}" get_mdata $B0/${V0}8/dir1 ++ ++cleanup; +diff --git a/tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t b/tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t +new file mode 100644 +index 0000000..298d6ed +--- /dev/null ++++ b/tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t +@@ -0,0 +1,84 @@ ++#!/bin/bash ++ ++. $(dirname $0)/../../include.rc ++. $(dirname $0)/../../volume.rc ++. $(dirname $0)/../../afr.rc ++ ++cleanup; ++ ++function time_stamps_match { ++ path=$1 ++ mtime_source_b0=$(get_mtime $B0/${V0}0/$path) ++ atime_source_b0=$(get_atime $B0/${V0}0/$path) ++ mtime_source_b2=$(get_mtime $B0/${V0}2/$path) ++ atime_source_b2=$(get_atime $B0/${V0}2/$path) ++ mtime_sink_b1=$(get_mtime $B0/${V0}1/$path) ++ atime_sink_b1=$(get_atime $B0/${V0}1/$path) ++ ++ #The same brick must be the source of heal for both atime and mtime. ++ if [[ ( $mtime_source_b0 -eq $mtime_sink_b1 && $atime_source_b0 -eq $atime_sink_b1 ) || \ ++ ( $mtime_source_b2 -eq $mtime_sink_b1 && $atime_source_b2 -eq $atime_sink_b1 ) ]] ++ then ++ echo "Y" ++ else ++ echo "N" ++ fi ++ ++} ++ ++# Test that the parent dir's timestamps are restored during entry-heal. ++GET_MDATA_PATH=$(dirname $0)/../../utils ++build_tester $GET_MDATA_PATH/get-mdata-xattr.c ++ ++TEST glusterd; ++TEST pidof glusterd; ++TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}; ++TEST $CLI volume set $V0 ctime on ++TEST $CLI volume start $V0; ++ ++TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2 ++ ++############################################################################### ++TEST mkdir $M0/DIR ++TEST kill_brick $V0 $H0 $B0/${V0}1 ++TEST touch $M0/DIR/FILE ++ ++TEST $CLI volume start $V0 force ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 ++TEST $CLI volume heal $V0 ++EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0 ++ ++EXPECT "Y" time_stamps_match DIR ++ctime_source1=$(get_ctime $B0/${V0}0/$path) ++ctime_source2=$(get_ctime $B0/${V0}2/$path) ++ctime_sink=$(get_ctime $B0/${V0}1/$path) ++TEST [ $ctime_source1 -eq $ctime_sink ] ++TEST [ $ctime_source2 -eq $ctime_sink ] ++ ++############################################################################### ++# Repeat the test with ctime feature disabled. ++TEST $CLI volume set $V0 features.ctime off ++TEST mkdir $M0/DIR2 ++TEST kill_brick $V0 $H0 $B0/${V0}1 ++TEST touch $M0/DIR2/FILE ++ ++TEST $CLI volume start $V0 force ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 ++EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 ++EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 ++TEST $CLI volume heal $V0 ++EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0 ++ ++EXPECT "Y" time_stamps_match DIR2 ++ ++TEST rm $GET_MDATA_PATH/get-mdata-xattr ++cleanup; +diff --git a/tests/volume.rc b/tests/volume.rc +index 76a8fd4..9a002d9 100644 +--- a/tests/volume.rc ++++ b/tests/volume.rc +@@ -371,6 +371,19 @@ function get_gfid2path { + getfattr -h --only-values -n glusterfs.gfidtopath $path 2>/dev/null + } + ++function get_mdata { ++ local path=$1 ++ getfattr -h -e hex -n trusted.glusterfs.mdata $path 2>/dev/null | grep "trusted.glusterfs.mdata" | cut -f2 -d'=' ++} ++ ++function get_mdata_count { ++ getfattr -d -m . -e hex $@ 2>/dev/null | grep mdata | wc -l ++} ++ ++function get_mdata_uniq_count { ++ getfattr -d -m . -e hex $@ 2>/dev/null | grep mdata | uniq | wc -l ++} ++ + function get_xattr_key { + local key=$1 + local path=$2 +@@ -925,7 +938,7 @@ function get_ctime { + local time=$(get-mdata-xattr -c $1) + if [ $time == "-1" ]; + then +- echo $(stat -c %Z $2) ++ echo $(stat -c %Z $1) + else + echo $time + fi +diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c +index b38085a..81ef38a 100644 +--- a/xlators/cluster/afr/src/afr-self-heal-common.c ++++ b/xlators/cluster/afr/src/afr-self-heal-common.c +@@ -513,7 +513,8 @@ afr_selfheal_restore_time(call_frame_t *frame, xlator_t *this, inode_t *inode, + + AFR_ONLIST(healed_sinks, frame, afr_sh_generic_fop_cbk, setattr, &loc, + &replies[source].poststat, +- (GF_SET_ATTR_ATIME | GF_SET_ATTR_MTIME), NULL); ++ (GF_SET_ATTR_ATIME | GF_SET_ATTR_MTIME | GF_SET_ATTR_CTIME), ++ NULL); + + loc_wipe(&loc); + +diff --git a/xlators/cluster/afr/src/afr-self-heal-entry.c b/xlators/cluster/afr/src/afr-self-heal-entry.c +index e07b521..35b600f 100644 +--- a/xlators/cluster/afr/src/afr-self-heal-entry.c ++++ b/xlators/cluster/afr/src/afr-self-heal-entry.c +@@ -1032,6 +1032,8 @@ unlock: + goto postop_unlock; + } + ++ afr_selfheal_restore_time(frame, this, fd->inode, source, healed_sinks, ++ locked_replies); + ret = afr_selfheal_undo_pending( + frame, this, fd->inode, sources, sinks, healed_sinks, undid_pending, + AFR_ENTRY_TRANSACTION, locked_replies, postop_lock); +diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c +index 219b072..99cccd6 100644 +--- a/xlators/cluster/dht/src/dht-common.c ++++ b/xlators/cluster/dht/src/dht-common.c +@@ -115,6 +115,7 @@ char *xattrs_to_heal[] = {"user.", + QUOTA_LIMIT_KEY, + QUOTA_LIMIT_OBJECTS_KEY, + GF_SELINUX_XATTR_KEY, ++ GF_XATTR_MDATA_KEY, + NULL}; + + char *dht_dbg_vxattrs[] = {DHT_DBG_HASHED_SUBVOL_PATTERN, NULL}; +diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c +index 0f0f398..06a7016 100644 +--- a/xlators/cluster/ec/src/ec-heal.c ++++ b/xlators/cluster/ec/src/ec-heal.c +@@ -2301,9 +2301,10 @@ ec_restore_time_and_adjust_versions(call_frame_t *frame, ec_t *ec, fd_t *fd, + + loc.inode = inode_ref(fd->inode); + gf_uuid_copy(loc.gfid, fd->inode->gfid); +- ret = cluster_setattr(ec->xl_list, healed_sinks, ec->nodes, replies, +- output, frame, ec->xl, &loc, &source_buf, +- GF_SET_ATTR_ATIME | GF_SET_ATTR_MTIME, NULL); ++ ret = cluster_setattr( ++ ec->xl_list, healed_sinks, ec->nodes, replies, output, frame, ++ ec->xl, &loc, &source_buf, ++ GF_SET_ATTR_ATIME | GF_SET_ATTR_MTIME | GF_SET_ATTR_CTIME, NULL); + EC_INTERSECT(healed_sinks, healed_sinks, output, ec->nodes); + if (EC_COUNT(healed_sinks, ec->nodes) == 0) { + ret = -ENOTCONN; +diff --git a/xlators/storage/posix/src/posix-entry-ops.c b/xlators/storage/posix/src/posix-entry-ops.c +index 34ee2b8..283b305 100644 +--- a/xlators/storage/posix/src/posix-entry-ops.c ++++ b/xlators/storage/posix/src/posix-entry-ops.c +@@ -500,7 +500,7 @@ post_op: + posix_set_gfid2path_xattr(this, real_path, loc->pargfid, loc->name); + } + +- op_ret = posix_entry_create_xattr_set(this, real_path, xdata); ++ op_ret = posix_entry_create_xattr_set(this, loc, real_path, xdata); + if (op_ret) { + if (errno != EEXIST) + gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_XATTR_FAILED, +@@ -828,7 +828,7 @@ posix_mkdir(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode, + "setting ACLs on %s failed ", real_path); + } + +- op_ret = posix_entry_create_xattr_set(this, real_path, xdata); ++ op_ret = posix_entry_create_xattr_set(this, loc, real_path, xdata); + if (op_ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_XATTR_FAILED, + "setting xattrs on %s failed", real_path); +@@ -1529,7 +1529,7 @@ posix_symlink(call_frame_t *frame, xlator_t *this, const char *linkname, + } + + ignore: +- op_ret = posix_entry_create_xattr_set(this, real_path, xdata); ++ op_ret = posix_entry_create_xattr_set(this, loc, real_path, xdata); + if (op_ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_XATTR_FAILED, + "setting xattrs on %s failed ", real_path); +@@ -2167,7 +2167,7 @@ posix_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags, + posix_set_gfid2path_xattr(this, real_path, loc->pargfid, loc->name); + } + ignore: +- op_ret = posix_entry_create_xattr_set(this, real_path, xdata); ++ op_ret = posix_entry_create_xattr_set(this, loc, real_path, xdata); + if (op_ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_XATTR_FAILED, + "setting xattrs on %s failed ", real_path); +diff --git a/xlators/storage/posix/src/posix-helpers.c b/xlators/storage/posix/src/posix-helpers.c +index d143d4c..6a1a35c 100644 +--- a/xlators/storage/posix/src/posix-helpers.c ++++ b/xlators/storage/posix/src/posix-helpers.c +@@ -1188,11 +1188,15 @@ posix_dump_buffer(xlator_t *this, const char *real_path, const char *key, + #endif + + int +-posix_handle_pair(xlator_t *this, const char *real_path, char *key, ++posix_handle_pair(xlator_t *this, loc_t *loc, const char *real_path, char *key, + data_t *value, int flags, struct iatt *stbuf) + { + int sys_ret = -1; + int ret = 0; ++ int op_errno = 0; ++ struct mdata_iatt mdata_iatt = { ++ 0, ++ }; + #ifdef GF_DARWIN_HOST_OS + const int error_code = EINVAL; + #else +@@ -1216,6 +1220,23 @@ posix_handle_pair(xlator_t *this, const char *real_path, char *key, + /* ignore this key value pair */ + ret = 0; + goto out; ++ } else if (!strncmp(key, GF_XATTR_MDATA_KEY, strlen(key))) { ++ /* This is either by rebalance or self heal. Create the xattr if it's ++ * not present. Compare and update the larger value if the xattr is ++ * already present. ++ */ ++ if (loc == NULL) { ++ ret = -EINVAL; ++ goto out; ++ } ++ posix_mdata_iatt_from_disk(&mdata_iatt, ++ (posix_mdata_disk_t *)value->data); ++ ret = posix_set_mdata_xattr_legacy_files(this, loc->inode, real_path, ++ &mdata_iatt, &op_errno); ++ if (ret != 0) { ++ ret = -op_errno; ++ } ++ goto out; + } else { + sys_ret = sys_lsetxattr(real_path, key, value->data, value->len, flags); + #ifdef GF_DARWIN_HOST_OS +@@ -1810,8 +1831,8 @@ _handle_entry_create_keyvalue_pair(dict_t *d, char *k, data_t *v, void *tmp) + return 0; + } + +- ret = posix_handle_pair(filler->this, filler->real_path, k, v, XATTR_CREATE, +- filler->stbuf); ++ ret = posix_handle_pair(filler->this, filler->loc, filler->real_path, k, v, ++ XATTR_CREATE, filler->stbuf); + if (ret < 0) { + errno = -ret; + return -1; +@@ -1820,7 +1841,8 @@ _handle_entry_create_keyvalue_pair(dict_t *d, char *k, data_t *v, void *tmp) + } + + int +-posix_entry_create_xattr_set(xlator_t *this, const char *path, dict_t *dict) ++posix_entry_create_xattr_set(xlator_t *this, loc_t *loc, const char *path, ++ dict_t *dict) + { + int ret = -1; + +@@ -1834,6 +1856,7 @@ posix_entry_create_xattr_set(xlator_t *this, const char *path, dict_t *dict) + filler.this = this; + filler.real_path = path; + filler.stbuf = NULL; ++ filler.loc = loc; + + ret = dict_foreach(dict, _handle_entry_create_keyvalue_pair, &filler); + +diff --git a/xlators/storage/posix/src/posix-inode-fd-ops.c b/xlators/storage/posix/src/posix-inode-fd-ops.c +index e0ea85b..a2a518f 100644 +--- a/xlators/storage/posix/src/posix-inode-fd-ops.c ++++ b/xlators/storage/posix/src/posix-inode-fd-ops.c +@@ -429,22 +429,9 @@ posix_setattr(call_frame_t *frame, xlator_t *this, loc_t *loc, + &frame->root->ctime, stbuf, valid); + } + +- if (valid & GF_SET_ATTR_CTIME && !priv->ctime) { +- /* +- * If ctime is not enabled, we have no means to associate an +- * arbitrary ctime with the file, so as a fallback, we ignore +- * the ctime payload and update the file ctime to current time +- * (which is possible directly with the POSIX API). +- */ +- op_ret = PATH_SET_TIMESPEC_OR_TIMEVAL(real_path, NULL); +- if (op_ret == -1) { +- op_errno = errno; +- gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_UTIMES_FAILED, +- "setattr (utimes) on %s " +- "failed", +- real_path); +- goto out; +- } ++ if ((valid & GF_SET_ATTR_CTIME) && priv->ctime) { ++ posix_update_ctime_in_mdata(this, real_path, -1, loc->inode, ++ &frame->root->ctime, stbuf, valid); + } + + if (!valid) { +@@ -469,14 +456,6 @@ posix_setattr(call_frame_t *frame, xlator_t *this, loc_t *loc, + goto out; + } + +- if (valid & GF_SET_ATTR_CTIME && priv->ctime) { +- /* +- * If we got ctime payload, we override +- * the ctime of statpost with that. +- */ +- statpost.ia_ctime = stbuf->ia_ctime; +- statpost.ia_ctime_nsec = stbuf->ia_ctime_nsec; +- } + posix_set_ctime(frame, this, real_path, -1, loc->inode, &statpost); + + if (xdata) +@@ -592,6 +571,7 @@ posix_fsetattr(call_frame_t *frame, xlator_t *this, fd_t *fd, + struct iatt statpost = { + 0, + }; ++ struct posix_private *priv = NULL; + struct posix_fd *pfd = NULL; + dict_t *xattr_rsp = NULL; + int32_t ret = -1; +@@ -604,6 +584,9 @@ posix_fsetattr(call_frame_t *frame, xlator_t *this, fd_t *fd, + VALIDATE_OR_GOTO(this, out); + VALIDATE_OR_GOTO(fd, out); + ++ priv = this->private; ++ VALIDATE_OR_GOTO(priv, out); ++ + ret = posix_fd_ctx_get(fd, this, &pfd, &op_errno); + if (ret < 0) { + gf_msg_debug(this->name, 0, "pfd is NULL from fd=%p", fd); +@@ -656,6 +639,11 @@ posix_fsetattr(call_frame_t *frame, xlator_t *this, fd_t *fd, + &frame->root->ctime, stbuf, valid); + } + ++ if ((valid & GF_SET_ATTR_CTIME) && priv->ctime) { ++ posix_update_ctime_in_mdata(this, NULL, pfd->fd, fd->inode, ++ &frame->root->ctime, stbuf, valid); ++ } ++ + if (!valid) { + op_ret = sys_fchown(pfd->fd, -1, -1); + if (op_ret == -1) { +@@ -2578,7 +2566,7 @@ _handle_setxattr_keyvalue_pair(dict_t *d, char *k, data_t *v, void *tmp) + + filler = tmp; + +- return posix_handle_pair(filler->this, filler->real_path, k, v, ++ return posix_handle_pair(filler->this, filler->loc, filler->real_path, k, v, + filler->flags, filler->stbuf); + } + +@@ -2641,27 +2629,27 @@ posix_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict, + priv = this->private; + DISK_SPACE_CHECK_AND_GOTO(frame, priv, xdata, op_ret, op_errno, out); + ++ MAKE_INODE_HANDLE(real_path, this, loc, NULL); ++ if (!real_path) { ++ op_ret = -1; ++ op_errno = ESTALE; ++ goto out; ++ } ++ + ret = dict_get_mdata(dict, CTIME_MDATA_XDATA_KEY, &mdata_iatt); + if (ret == 0) { + /* This is initiated by lookup when ctime feature is enabled to create + * "trusted.glusterfs.mdata" xattr if not present. These are the files + * which were created when ctime feature is disabled. + */ +- ret = posix_set_mdata_xattr_legacy_files(this, loc->inode, &mdata_iatt, +- &op_errno); ++ ret = posix_set_mdata_xattr_legacy_files(this, loc->inode, real_path, ++ &mdata_iatt, &op_errno); + if (ret != 0) { + op_ret = -1; + } + goto out; + } + +- MAKE_INODE_HANDLE(real_path, this, loc, NULL); +- if (!real_path) { +- op_ret = -1; +- op_errno = ESTALE; +- goto out; +- } +- + posix_pstat(this, loc->inode, loc->gfid, real_path, &preop, _gf_false); + + op_ret = -1; +@@ -2796,6 +2784,7 @@ posix_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict, + filler.real_path = real_path; + filler.this = this; + filler.stbuf = &preop; ++ filler.loc = loc; + + #ifdef GF_DARWIN_HOST_OS + filler.flags = map_xattr_flags(flags); +diff --git a/xlators/storage/posix/src/posix-metadata.c b/xlators/storage/posix/src/posix-metadata.c +index 532daa2..9efaf99 100644 +--- a/xlators/storage/posix/src/posix-metadata.c ++++ b/xlators/storage/posix/src/posix-metadata.c +@@ -56,6 +56,19 @@ posix_mdata_from_disk(posix_mdata_t *out, posix_mdata_disk_t *in) + out->atime.tv_nsec = be64toh(in->atime.tv_nsec); + } + ++void ++posix_mdata_iatt_from_disk(struct mdata_iatt *out, posix_mdata_disk_t *in) ++{ ++ out->ia_ctime = be64toh(in->ctime.tv_sec); ++ out->ia_ctime_nsec = be64toh(in->ctime.tv_nsec); ++ ++ out->ia_mtime = be64toh(in->mtime.tv_sec); ++ out->ia_mtime_nsec = be64toh(in->mtime.tv_nsec); ++ ++ out->ia_atime = be64toh(in->atime.tv_sec); ++ out->ia_atime_nsec = be64toh(in->atime.tv_nsec); ++} ++ + /* posix_fetch_mdata_xattr fetches the posix_mdata_t from disk */ + static int + posix_fetch_mdata_xattr(xlator_t *this, const char *real_path_arg, int _fd, +@@ -341,6 +354,7 @@ posix_compare_timespec(struct timespec *first, struct timespec *second) + + int + posix_set_mdata_xattr_legacy_files(xlator_t *this, inode_t *inode, ++ const char *realpath, + struct mdata_iatt *mdata_iatt, int *op_errno) + { + posix_mdata_t *mdata = NULL; +@@ -369,8 +383,8 @@ posix_set_mdata_xattr_legacy_files(xlator_t *this, inode_t *inode, + goto unlock; + } + +- ret = posix_fetch_mdata_xattr(this, NULL, -1, inode, (void *)mdata, +- op_errno); ++ ret = posix_fetch_mdata_xattr(this, realpath, -1, inode, ++ (void *)mdata, op_errno); + if (ret == 0) { + /* Got mdata from disk. This is a race, another client + * has healed the xattr during lookup. So set it in inode +@@ -412,7 +426,7 @@ posix_set_mdata_xattr_legacy_files(xlator_t *this, inode_t *inode, + } + } + +- ret = posix_store_mdata_xattr(this, NULL, -1, inode, mdata); ++ ret = posix_store_mdata_xattr(this, realpath, -1, inode, mdata); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_STOREMDATA_FAILED, + "gfid: %s key:%s ", uuid_utoa(inode->gfid), +@@ -445,7 +459,8 @@ posix_set_mdata_xattr(xlator_t *this, const char *real_path, int fd, + GF_VALIDATE_OR_GOTO(this->name, inode, out); + GF_VALIDATE_OR_GOTO(this->name, time, out); + +- if (update_utime && (!u_atime || !u_mtime)) { ++ if (update_utime && (flag->ctime && !time) && (flag->atime && !u_atime) && ++ (flag->mtime && !u_mtime)) { + goto out; + } + +@@ -652,6 +667,48 @@ posix_update_utime_in_mdata(xlator_t *this, const char *real_path, int fd, + return; + } + ++/* posix_update_ctime_in_mdata updates the posix_mdata_t when ctime needs ++ * to be modified ++ */ ++void ++posix_update_ctime_in_mdata(xlator_t *this, const char *real_path, int fd, ++ inode_t *inode, struct timespec *ctime, ++ struct iatt *stbuf, int valid) ++{ ++ int32_t ret = 0; ++#if defined(HAVE_UTIMENSAT) ++ struct timespec tv_ctime = { ++ 0, ++ }; ++#else ++ struct timeval tv_ctime = { ++ 0, ++ }; ++#endif ++ posix_mdata_flag_t flag = { ++ 0, ++ }; ++ ++ struct posix_private *priv = NULL; ++ priv = this->private; ++ ++ if (inode && priv->ctime) { ++ tv_ctime.tv_sec = stbuf->ia_ctime; ++ SET_TIMESPEC_NSEC_OR_TIMEVAL_USEC(tv_ctime, stbuf->ia_ctime_nsec); ++ flag.ctime = 1; ++ ++ ret = posix_set_mdata_xattr(this, real_path, -1, inode, &tv_ctime, NULL, ++ NULL, NULL, &flag, _gf_true); ++ if (ret) { ++ gf_msg(this->name, GF_LOG_WARNING, errno, P_MSG_SETMDATA_FAILED, ++ "posix set mdata atime failed on file:" ++ " %s gfid:%s", ++ real_path, uuid_utoa(inode->gfid)); ++ } ++ } ++ return; ++} ++ + static void + posix_get_mdata_flag(uint64_t flags, posix_mdata_flag_t *flag) + { +diff --git a/xlators/storage/posix/src/posix-metadata.h b/xlators/storage/posix/src/posix-metadata.h +index c176699..63e8771 100644 +--- a/xlators/storage/posix/src/posix-metadata.h ++++ b/xlators/storage/posix/src/posix-metadata.h +@@ -43,6 +43,10 @@ posix_update_utime_in_mdata(xlator_t *this, const char *real_path, int fd, + inode_t *inode, struct timespec *ctime, + struct iatt *stbuf, int valid); + void ++posix_update_ctime_in_mdata(xlator_t *this, const char *real_path, int fd, ++ inode_t *inode, struct timespec *ctime, ++ struct iatt *stbuf, int valid); ++void + posix_set_ctime(call_frame_t *frame, xlator_t *this, const char *real_path, + int fd, inode_t *inode, struct iatt *stbuf); + void +@@ -56,7 +60,10 @@ posix_set_ctime_cfr(call_frame_t *frame, xlator_t *this, + int fd_out, inode_t *inode_out, struct iatt *stbuf_out); + int + posix_set_mdata_xattr_legacy_files(xlator_t *this, inode_t *inode, ++ const char *realpath, + struct mdata_iatt *mdata_iatt, + int *op_errno); ++void ++posix_mdata_iatt_from_disk(struct mdata_iatt *out, posix_mdata_disk_t *in); + + #endif /* _POSIX_METADATA_H */ +diff --git a/xlators/storage/posix/src/posix.h b/xlators/storage/posix/src/posix.h +index 64288a7..dd51062 100644 +--- a/xlators/storage/posix/src/posix.h ++++ b/xlators/storage/posix/src/posix.h +@@ -339,7 +339,7 @@ dict_t * + posix_xattr_fill(xlator_t *this, const char *path, loc_t *loc, fd_t *fd, + int fdnum, dict_t *xattr, struct iatt *buf); + int +-posix_handle_pair(xlator_t *this, const char *real_path, char *key, ++posix_handle_pair(xlator_t *this, loc_t *loc, const char *real_path, char *key, + data_t *value, int flags, struct iatt *stbuf); + int + posix_fhandle_pair(call_frame_t *frame, xlator_t *this, int fd, char *key, +@@ -352,7 +352,8 @@ int + posix_gfid_heal(xlator_t *this, const char *path, loc_t *loc, + dict_t *xattr_req); + int +-posix_entry_create_xattr_set(xlator_t *this, const char *path, dict_t *dict); ++posix_entry_create_xattr_set(xlator_t *this, loc_t *loc, const char *path, ++ dict_t *dict); + + int + posix_fd_ctx_get(fd_t *fd, xlator_t *this, struct posix_fd **pfd, +-- +1.8.3.1 + diff --git a/SOURCES/0296-glusterfind-pre-command-failure-on-a-modify.patch b/SOURCES/0296-glusterfind-pre-command-failure-on-a-modify.patch new file mode 100644 index 0000000..9f43ff8 --- /dev/null +++ b/SOURCES/0296-glusterfind-pre-command-failure-on-a-modify.patch @@ -0,0 +1,62 @@ +From bfb64a0e685eb5755ceda6c54690335564e135c9 Mon Sep 17 00:00:00 2001 +From: Hari Gowtham +Date: Mon, 16 Sep 2019 14:22:34 +0530 +Subject: [PATCH 296/297] glusterfind: pre command failure on a modify + +Label: DOWNSTREAM ONLY + +On upstream we have gfid_to_all_paths_using_gfid2path instead of +gfid_to_path_using_pgfid and so we do not hit this in upstream. + +Problem: On a modify, the pre commands runs through the find function. +where the number of arguments sent mismatches and causes a stderr. +The mismatch is because of both changelog and brickfind use the find(), +but the brickfind was alone handled. + +Fix: Have handled the additional argument on the changelog side as well. +Received it as a dummy variable for changelog. + +Change-Id: I5eecdd993e477b68a0e486db2ad7e56ba94bbf02 +fixes: bz#1733970 +Signed-off-by: Hari Gowtham +Reviewed-on: https://code.engineering.redhat.com/gerrit/181095 +Tested-by: RHGS Build Bot +Reviewed-by: Aravinda Vishwanathapura Krishna Murthy +Reviewed-by: Rinku Kothiya +--- + tools/glusterfind/src/changelog.py | 5 +++-- + tools/glusterfind/src/utils.py | 2 +- + 2 files changed, 4 insertions(+), 3 deletions(-) + +diff --git a/tools/glusterfind/src/changelog.py b/tools/glusterfind/src/changelog.py +index 40c381b..ef982db 100644 +--- a/tools/glusterfind/src/changelog.py ++++ b/tools/glusterfind/src/changelog.py +@@ -141,8 +141,9 @@ def gfid_to_path_using_pgfid(brick, changelog_data, args): + + # Length of brick path, to remove from output path + brick_path_len = len(brick) +- +- def output_callback(path, inode): ++ # is_dir is a dummy variable to make it compitable with the find ++ # used in brickfind ++ def output_callback(path, inode, is_dir): + # For each path found, encodes it and updates path1 + # Also updates converted flag in inodegfid table as 1 + path = path.strip() +diff --git a/tools/glusterfind/src/utils.py b/tools/glusterfind/src/utils.py +index cc09903..e226c5a 100644 +--- a/tools/glusterfind/src/utils.py ++++ b/tools/glusterfind/src/utils.py +@@ -70,7 +70,7 @@ def find(path, callback_func=lambda x: True, filter_func=lambda x: True, + else: + filter_result = filter_func(full_path) + if filter_result is not None: +- callback_func(full_path, filter_result) ++ callback_func(full_path, filter_result, None) + else: + filter_result = filter_func(full_path) + if filter_result is not None: +-- +1.8.3.1 + diff --git a/SOURCES/0297-rpmbuild-fixing-the-build-errors-with-2a905a8ae.patch b/SOURCES/0297-rpmbuild-fixing-the-build-errors-with-2a905a8ae.patch new file mode 100644 index 0000000..47b5da0 --- /dev/null +++ b/SOURCES/0297-rpmbuild-fixing-the-build-errors-with-2a905a8ae.patch @@ -0,0 +1,89 @@ +From 37555b6c83d3a979033111a754ee1728dab254f5 Mon Sep 17 00:00:00 2001 +From: Hari Gowtham +Date: Wed, 18 Sep 2019 17:38:52 +0530 +Subject: [PATCH 297/297] rpmbuild: fixing the build errors with 2a905a8ae + +Label: DOWNSTREAM ONLY + +Have added a Makefile inside extras/quota to remove the +No rule to make target error for quota/log_accounting.sh + +Change-Id: Ia3f6b3fa21a0de7eb3bdb31b3d205139df412aca +fixes: bz#1719171 +Signed-off-by: Hari Gowtham +Reviewed-on: https://code.engineering.redhat.com/gerrit/181326 +Tested-by: RHGS Build Bot +Reviewed-by: Aravinda Vishwanathapura Krishna Murthy +Reviewed-by: Atin Mukherjee +--- + configure.ac | 1 + + extras/Makefile.am | 6 +----- + extras/quota/Makefile.am | 8 ++++++++ + 3 files changed, 10 insertions(+), 5 deletions(-) + create mode 100644 extras/quota/Makefile.am + +diff --git a/configure.ac b/configure.ac +index f597b86..327733e 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -232,6 +232,7 @@ AC_CONFIG_FILES([Makefile + extras/hook-scripts/reset/pre/Makefile + extras/python/Makefile + extras/snap_scheduler/Makefile ++ extras/quota/Makefile + events/Makefile + events/src/Makefile + events/src/eventsapiconf.py +diff --git a/extras/Makefile.am b/extras/Makefile.am +index 8cbfda1..31ccdf5 100644 +--- a/extras/Makefile.am ++++ b/extras/Makefile.am +@@ -12,7 +12,7 @@ EditorMode_DATA = glusterfs-mode.el glusterfs.vim + + SUBDIRS = init.d systemd benchmarking hook-scripts $(OCF_SUBDIR) LinuxRPM \ + $(GEOREP_EXTRAS_SUBDIR) snap_scheduler firewalld cliutils python \ +- ganesha ++ ganesha quota + + confdir = $(sysconfdir)/glusterfs + if WITH_SERVER +@@ -30,14 +30,11 @@ endif + + scriptsdir = $(datadir)/glusterfs/scripts + scripts_SCRIPTS = thin-arbiter/setup-thin-arbiter.sh +-scripts_SCRIPTS += quota/log_accounting.sh + scripts_SCRIPTS += collect-system-stats.sh + scripts_SCRIPTS += identify-hangs.sh + if WITH_SERVER + scripts_SCRIPTS += post-upgrade-script-for-quota.sh \ + pre-upgrade-script-for-quota.sh stop-all-gluster-processes.sh +-scripts_SCRIPTS += quota/quota_fsck.py +-scripts_SCRIPTS += quota/xattr_analysis.py + if USE_SYSTEMD + scripts_SCRIPTS += control-cpu-load.sh + scripts_SCRIPTS += control-mem.sh +@@ -56,7 +53,6 @@ EXTRA_DIST = glusterfs-logrotate gluster-rsyslog-7.2.conf gluster-rsyslog-5.8.co + stop-all-gluster-processes.sh clang-checker.sh mount-shared-storage.sh \ + control-cpu-load.sh control-mem.sh group-distributed-virt \ + thin-arbiter/thin-arbiter.vol thin-arbiter/setup-thin-arbiter.sh \ +- quota/xattr_analysis.py quota/quota_fsck.py quota/log_accounting.sh \ + collect-system-stats.sh identify-hangs.sh + + if WITH_SERVER +diff --git a/extras/quota/Makefile.am b/extras/quota/Makefile.am +new file mode 100644 +index 0000000..cdb6be1 +--- /dev/null ++++ b/extras/quota/Makefile.am +@@ -0,0 +1,8 @@ ++scriptsdir = $(datadir)/glusterfs/scripts ++scripts_SCRIPTS = log_accounting.sh ++ ++if WITH_SERVER ++scripts_SCRIPTS += xattr_analysis.py quota_fsck.py ++endif ++ ++EXTRA_DIST = log_accounting.sh xattr_analysis.py quota_fsck.py +-- +1.8.3.1 + diff --git a/SOURCES/0298-geo-rep-fix-sub-command-during-worker-connection.patch b/SOURCES/0298-geo-rep-fix-sub-command-during-worker-connection.patch new file mode 100644 index 0000000..72daa15 --- /dev/null +++ b/SOURCES/0298-geo-rep-fix-sub-command-during-worker-connection.patch @@ -0,0 +1,56 @@ +From f65f4739914cf317da7e5eaa3b5a06fe64f338c2 Mon Sep 17 00:00:00 2001 +From: Sunny Kumar +Date: Sat, 21 Sep 2019 01:07:30 +0530 +Subject: [PATCH 298/302] geo-rep : fix sub-command during worker connection + +Problem: + +Geo-rep session for non-root going faulty. + +Solution: + +During worker start we do not construct slave url and use 'args.resource_remote' +which is basically just slave-hostname. +This works better for root session but fails in non-root session during +ssh command. +Using slave url solves this issue. + +Backport of: + >fixes: bz#1753928 + >Change-Id: Ib83552fde77f81c208896494b323514ab37ebf22 + >Signed-off-by: Sunny Kumar + +Upstream patch: + https://review.gluster.org/#/c/glusterfs/+/23465/ + +BUG: 1754407 +Change-Id: Ib83552fde77f81c208896494b323514ab37ebf22 +Signed-off-by: Sunny Kumar +Reviewed-on: https://code.engineering.redhat.com/gerrit/181895 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + geo-replication/syncdaemon/subcmds.py | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/geo-replication/syncdaemon/subcmds.py b/geo-replication/syncdaemon/subcmds.py +index 8de7db2..f8515f2 100644 +--- a/geo-replication/syncdaemon/subcmds.py ++++ b/geo-replication/syncdaemon/subcmds.py +@@ -73,8 +73,11 @@ def subcmd_worker(args): + Popen.init_errhandler() + fcntl.fcntl(args.feedback_fd, fcntl.F_SETFD, fcntl.FD_CLOEXEC) + local = GLUSTER("localhost", args.master) +- slavevol = args.slave.split("::")[-1] +- slavehost = args.resource_remote ++ slave_url, slavevol = args.slave.split("::") ++ if "@" not in slave_url: ++ slavehost = args.resource_remote ++ else: ++ slavehost = "%s@%s" % (slave_url.split("@")[0], args.resource_remote) + remote = SSH(slavehost, slavevol) + remote.connect_remote() + local.connect() +-- +1.8.3.1 + diff --git a/SOURCES/0299-geo-rep-performance-improvement-while-syncing-rename.patch b/SOURCES/0299-geo-rep-performance-improvement-while-syncing-rename.patch new file mode 100644 index 0000000..9dea8cc --- /dev/null +++ b/SOURCES/0299-geo-rep-performance-improvement-while-syncing-rename.patch @@ -0,0 +1,156 @@ +From f293f7ac2f75c58d81da1229b484eb530b7083b5 Mon Sep 17 00:00:00 2001 +From: Sunny Kumar +Date: Fri, 20 Sep 2019 09:39:12 +0530 +Subject: [PATCH 299/302] geo-rep: performance improvement while syncing + renames with existing gfid + +Problem: +The bug[1] addresses issue of data inconsistency when handling RENAME with +existing destination. This fix requires some performance tuning considering +this issue occurs in heavy rename workload. + +Solution: +If distribution count for master volume is one do not verify op's on +master and go ahead with rename. + +The performance improvement with this patch can only be observed if +master volume has distribution count one. + +[1]. https://bugzilla.redhat.com/show_bug.cgi?id=1694820 +Backport of: + + >fixes: bz#1753857 + >Change-Id: I8e9bcd575e7e35f40f9f78b7961c92dee642f47b + >Signed-off-by: Sunny Kumar + +Upstream Patch: + https://review.gluster.org/#/c/glusterfs/+/23459/ + +BUG: 1726000 +Change-Id: I8e9bcd575e7e35f40f9f78b7961c92dee642f47b +Signed-off-by: Sunny Kumar +Reviewed-on: https://code.engineering.redhat.com/gerrit/181893 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + geo-replication/gsyncd.conf.in | 5 +++++ + geo-replication/syncdaemon/gsyncd.py | 2 ++ + geo-replication/syncdaemon/monitor.py | 2 ++ + geo-replication/syncdaemon/resource.py | 13 +++++++++++-- + geo-replication/syncdaemon/syncdutils.py | 11 +++++++++++ + 5 files changed, 31 insertions(+), 2 deletions(-) + +diff --git a/geo-replication/gsyncd.conf.in b/geo-replication/gsyncd.conf.in +index 5ebd57a..9155cd8 100644 +--- a/geo-replication/gsyncd.conf.in ++++ b/geo-replication/gsyncd.conf.in +@@ -23,6 +23,11 @@ configurable=false + type=int + value=1 + ++[master-distribution-count] ++configurable=false ++type=int ++value=1 ++ + [glusterd-workdir] + value = @GLUSTERD_WORKDIR@ + +diff --git a/geo-replication/syncdaemon/gsyncd.py b/geo-replication/syncdaemon/gsyncd.py +index a4c6f32..6ae5269 100644 +--- a/geo-replication/syncdaemon/gsyncd.py ++++ b/geo-replication/syncdaemon/gsyncd.py +@@ -134,6 +134,8 @@ def main(): + help="Directory where Gluster binaries exist on slave") + p.add_argument("--slave-access-mount", action="store_true", + help="Do not lazy umount the slave volume") ++ p.add_argument("--master-dist-count", type=int, ++ help="Master Distribution count") + + # Status + p = sp.add_parser("status") +diff --git a/geo-replication/syncdaemon/monitor.py b/geo-replication/syncdaemon/monitor.py +index 234f3f1..236afe7 100644 +--- a/geo-replication/syncdaemon/monitor.py ++++ b/geo-replication/syncdaemon/monitor.py +@@ -37,6 +37,8 @@ def get_subvol_num(brick_idx, vol, hot): + tier = vol.is_tier() + disperse_count = vol.disperse_count(tier, hot) + replica_count = vol.replica_count(tier, hot) ++ distribute_count = vol.distribution_count(tier, hot) ++ gconf.setconfig("master-distribution-count", distribute_count) + + if (tier and not hot): + brick_idx = brick_idx - vol.get_hot_bricks_count(tier) +diff --git a/geo-replication/syncdaemon/resource.py b/geo-replication/syncdaemon/resource.py +index b16db60..189d8a1 100644 +--- a/geo-replication/syncdaemon/resource.py ++++ b/geo-replication/syncdaemon/resource.py +@@ -377,6 +377,7 @@ class Server(object): + def entry_ops(cls, entries): + pfx = gauxpfx() + logging.debug('entries: %s' % repr(entries)) ++ dist_count = rconf.args.master_dist_count + + def entry_purge(op, entry, gfid, e, uid, gid): + # This is an extremely racy code and needs to be fixed ASAP. +@@ -686,9 +687,15 @@ class Server(object): + raise + else: + raise +- elif not matching_disk_gfid(gfid, en): ++ elif not matching_disk_gfid(gfid, en) and dist_count > 1: + collect_failure(e, EEXIST, uid, gid, True) + else: ++ # We are here which means matching_disk_gfid for ++ # both source and destination has returned false ++ # and distribution count for master vol is greater ++ # then one. Which basically says both the source and ++ # destination exist and not hardlinks. ++ # So we are safe to go ahead with rename here. + rename_with_disk_gfid_confirmation(gfid, entry, en, + uid, gid) + if blob: +@@ -1409,7 +1416,9 @@ class SSH(object): + '--slave-gluster-log-level', + gconf.get("slave-gluster-log-level"), + '--slave-gluster-command-dir', +- gconf.get("slave-gluster-command-dir")] ++ gconf.get("slave-gluster-command-dir"), ++ '--master-dist-count', ++ str(gconf.get("master-distribution-count"))] + + if gconf.get("slave-access-mount"): + args_to_slave.append('--slave-access-mount') +diff --git a/geo-replication/syncdaemon/syncdutils.py b/geo-replication/syncdaemon/syncdutils.py +index 2ee10ac..aadaebd 100644 +--- a/geo-replication/syncdaemon/syncdutils.py ++++ b/geo-replication/syncdaemon/syncdutils.py +@@ -926,6 +926,14 @@ class Volinfo(object): + else: + return int(self.get('disperseCount')[0].text) + ++ def distribution_count(self, tier, hot): ++ if (tier and hot): ++ return int(self.get('hotBricks/hotdistCount')[0].text) ++ elif (tier and not hot): ++ return int(self.get('coldBricks/colddistCount')[0].text) ++ else: ++ return int(self.get('distCount')[0].text) ++ + @property + @memoize + def hot_bricks(self): +@@ -994,6 +1002,9 @@ class VolinfoFromGconf(object): + def disperse_count(self, tier, hot): + return gconf.get("master-disperse-count") + ++ def distribution_count(self, tier, hot): ++ return gconf.get("master-distribution-count") ++ + @property + @memoize + def hot_bricks(self): +-- +1.8.3.1 + diff --git a/SOURCES/0300-cli-remove-the-warning-displayed-when-remove-brick-s.patch b/SOURCES/0300-cli-remove-the-warning-displayed-when-remove-brick-s.patch new file mode 100644 index 0000000..62bac41 --- /dev/null +++ b/SOURCES/0300-cli-remove-the-warning-displayed-when-remove-brick-s.patch @@ -0,0 +1,70 @@ +From 039a3f81209706261fc809eac94564e81a3377da Mon Sep 17 00:00:00 2001 +From: Sanju Rakonde +Date: Wed, 25 Sep 2019 14:55:19 +0530 +Subject: [PATCH 300/302] cli: remove the warning displayed when remove brick + start issued + +remove-brick start command gives displays below error: + +It is recommended that remove-brick be run with cluster.force-migration +option disabled to prevent possible data corruption. Doing so will ensure +that files that receive writes during migration will not be migrated and +will need to be manually copied after the remove-brick commit operation. +Please check the value of the option and update accordingly. +Do you want to continue with your current cluster.force-migration settings? (y/n) + +As we are not qualifying cluster.force-migration for 3.5.0, +we should not display this message. So, removing it. + +Label: DOWNSTREAM ONLY + +BUG: 1755227 +Change-Id: I409f2059d43c5e867788f19d2ccb8d6d839520f7 +fixes: bz#1755227 +Signed-off-by: Sanju Rakonde +Reviewed-on: https://code.engineering.redhat.com/gerrit/182009 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + cli/src/cli-cmd-parser.c | 2 -- + cli/src/cli-cmd-volume.c | 11 ----------- + 2 files changed, 13 deletions(-) + +diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c +index 92ceb8e..4456a7b 100644 +--- a/cli/src/cli-cmd-parser.c ++++ b/cli/src/cli-cmd-parser.c +@@ -2101,8 +2101,6 @@ cli_cmd_volume_remove_brick_parse(struct cli_state *state, const char **words, + wordcount--; + if (!strcmp("start", w)) { + command = GF_OP_CMD_START; +- if (question) +- *question = 1; + } else if (!strcmp("commit", w)) { + command = GF_OP_CMD_COMMIT; + } else if (!strcmp("stop", w)) { +diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c +index a42e663..6b958bd 100644 +--- a/cli/src/cli-cmd-volume.c ++++ b/cli/src/cli-cmd-volume.c +@@ -2088,17 +2088,6 @@ cli_cmd_volume_remove_brick_cbk(struct cli_state *state, + "Remove-brick force will not migrate files from the " + "removed bricks, so they will no longer be available" + " on the volume.\nDo you want to continue?"; +- } else if (command == GF_OP_CMD_START) { +- question = +- "It is recommended that remove-brick be run with" +- " cluster.force-migration option disabled to prevent" +- " possible data corruption. Doing so will ensure that" +- " files that receive writes during migration will not" +- " be migrated and will need to be manually copied" +- " after the remove-brick commit operation. Please" +- " check the value of the option and update accordingly." +- " \nDo you want to continue with your current" +- " cluster.force-migration settings?"; + } + + if (!brick_count) { +-- +1.8.3.1 + diff --git a/SOURCES/0301-posix-Brick-is-going-down-unexpectedly.patch b/SOURCES/0301-posix-Brick-is-going-down-unexpectedly.patch new file mode 100644 index 0000000..270a0d7 --- /dev/null +++ b/SOURCES/0301-posix-Brick-is-going-down-unexpectedly.patch @@ -0,0 +1,61 @@ +From 913a0dc8f1eaa2fb18a6ebd6fcf66f46b48039f1 Mon Sep 17 00:00:00 2001 +From: Mohit Agrawal +Date: Wed, 18 Sep 2019 19:11:33 +0530 +Subject: [PATCH 301/302] posix: Brick is going down unexpectedly + +Problem: In brick_mux environment, while multiple volumes are + created (1-1000) sometimes brick is going down due to + health_check thread failure + +Solution: Ignore EAGAIN error in health_check thread code to + avoid the issue + +> Change-Id: Id44c59f8e071a363a14d09d188813a6633855213 +> Fixes: bz#1751907 +> Signed-off-by: Mohit Agrawal +> (Cherry picked from commit c4d926900dc36f71c04b3f65ceca5150ce0e8c81) +> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/23437/) + +Change-Id: Id44c59f8e071a363a14d09d188813a6633855213 +BUG: 1731826 +Signed-off-by: Mohit Agrawal +Reviewed-on: https://code.engineering.redhat.com/gerrit/182106 +Tested-by: Mohit Agrawal +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/storage/posix/src/posix-helpers.c | 14 ++++++++++---- + 1 file changed, 10 insertions(+), 4 deletions(-) + +diff --git a/xlators/storage/posix/src/posix-helpers.c b/xlators/storage/posix/src/posix-helpers.c +index 6a1a35c..35dd3b6 100644 +--- a/xlators/storage/posix/src/posix-helpers.c ++++ b/xlators/storage/posix/src/posix-helpers.c +@@ -2108,14 +2108,20 @@ out: + if (fd != -1) { + sys_close(fd); + } ++ + if (ret && file_path[0]) { + gf_msg(this->name, GF_LOG_WARNING, errno, P_MSG_HEALTHCHECK_FAILED, + "%s() on %s returned ret is %d error is %s", op, file_path, ret, + ret != -1 ? strerror(ret) : strerror(op_errno)); +- gf_event(EVENT_POSIX_HEALTH_CHECK_FAILED, +- "op=%s;path=%s;error=%s;brick=%s:%s timeout is %d", op, +- file_path, strerror(op_errno), priv->hostname, priv->base_path, +- timeout); ++ ++ if ((op_errno == EAGAIN) || (ret == EAGAIN)) { ++ ret = 0; ++ } else { ++ gf_event(EVENT_POSIX_HEALTH_CHECK_FAILED, ++ "op=%s;path=%s;error=%s;brick=%s:%s timeout is %d", op, ++ file_path, strerror(op_errno), priv->hostname, ++ priv->base_path, timeout); ++ } + } + return ret; + } +-- +1.8.3.1 + diff --git a/SOURCES/0302-cluster-ec-prevent-filling-shd-log-with-table-not-fo.patch b/SOURCES/0302-cluster-ec-prevent-filling-shd-log-with-table-not-fo.patch new file mode 100644 index 0000000..7972767 --- /dev/null +++ b/SOURCES/0302-cluster-ec-prevent-filling-shd-log-with-table-not-fo.patch @@ -0,0 +1,67 @@ +From fb1d503791c874296afab0cd7be59b6865340d72 Mon Sep 17 00:00:00 2001 +From: Xavi Hernandez +Date: Wed, 25 Sep 2019 11:56:35 +0200 +Subject: [PATCH 302/302] cluster/ec: prevent filling shd log with "table not + found" messages + +When self-heal daemon receives an inodelk contention notification, it tries +to locate the related inode using inode_find() and the inode table owned by +top-most xlator, which in this case doesn't have any inode table. This causes +many messages to be logged by inode_find() function because the inode table +passed is NULL. + +This patch prevents this by making sure the inode table is not NULL before +calling inode_find(). + +Upstream patch: +> Change-Id: I8d001bd180aaaf1521ba40a536b097fcf70c991f +> Upstream patch link: https://review.gluster.org/c/glusterfs/+/23481 +> Fixes: bz#1755344 +> Signed-off-by: Xavi Hernandez + +Change-Id: I8d001bd180aaaf1521ba40a536b097fcf70c991f +BUG: 1754790 +Signed-off-by: Xavi Hernandez +Reviewed-on: https://code.engineering.redhat.com/gerrit/182207 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/cluster/ec/src/ec.c | 15 +++++++++++++-- + 1 file changed, 13 insertions(+), 2 deletions(-) + +diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c +index 19094c4..3f31c74 100644 +--- a/xlators/cluster/ec/src/ec.c ++++ b/xlators/cluster/ec/src/ec.c +@@ -463,6 +463,7 @@ ec_upcall(ec_t *ec, struct gf_upcall *upcall) + struct gf_upcall_cache_invalidation *ci = NULL; + struct gf_upcall_inodelk_contention *lc = NULL; + inode_t *inode; ++ inode_table_t *table; + + switch (upcall->event_type) { + case GF_UPCALL_CACHE_INVALIDATION: +@@ -476,8 +477,18 @@ ec_upcall(ec_t *ec, struct gf_upcall *upcall) + /* The lock is not owned by EC, ignore it. */ + return _gf_true; + } +- inode = inode_find(((xlator_t *)ec->xl->graph->top)->itable, +- upcall->gfid); ++ table = ((xlator_t *)ec->xl->graph->top)->itable; ++ if (table == NULL) { ++ /* Self-heal daemon doesn't have an inode table on the top ++ * xlator because it doesn't need it. In this case we should ++ * use the inode table managed by EC itself where all inodes ++ * being healed should be present. However self-heal doesn't ++ * use eager-locking and inodelk's are already released as ++ * soon as possible. In this case we can safely ignore these ++ * notifications. */ ++ return _gf_false; ++ } ++ inode = inode_find(table, upcall->gfid); + /* If inode is not found, it means that it's already released, + * so we can ignore it. Probably it has been released and + * destroyed while the contention notification was being sent. +-- +1.8.3.1 + diff --git a/SOURCES/0303-posix-heketidbstorage-bricks-go-down-during-PVC-crea.patch b/SOURCES/0303-posix-heketidbstorage-bricks-go-down-during-PVC-crea.patch new file mode 100644 index 0000000..8641353 --- /dev/null +++ b/SOURCES/0303-posix-heketidbstorage-bricks-go-down-during-PVC-crea.patch @@ -0,0 +1,45 @@ +From ae4f538065d26a277e38810c6eef18c0312cd1f3 Mon Sep 17 00:00:00 2001 +From: Mohit Agrawal +Date: Thu, 26 Sep 2019 17:52:30 +0530 +Subject: [PATCH 303/304] posix: heketidbstorage bricks go down during PVC + creation + +Problem: In OCS environment heketidbstorage is detached due + to health_check thread is failed.Sometime aio_write + is not successfully finished within default health-check-timeout + limit and the brick is detached. + +Solution: To avoid the issue increase default timeout to 20s + +> Change-Id: Idff283d5713da571f9d20a6b296274f69c3e5b7b +> Fixes: bz#1755900 +> Signed-off-by: Mohit Agrawal +> (Cherry picked from commit c6df9e962483bac5bfcd8916318b19040387ce81) +> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/23495/) + +Change-Id: Idff283d5713da571f9d20a6b296274f69c3e5b7b +BUG: 1752713 +Signed-off-by: Mohit Agrawal +Reviewed-on: https://code.engineering.redhat.com/gerrit/182387 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + xlators/storage/posix/src/posix-common.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/xlators/storage/posix/src/posix-common.c b/xlators/storage/posix/src/posix-common.c +index 69857d9..2cb58ba 100644 +--- a/xlators/storage/posix/src/posix-common.c ++++ b/xlators/storage/posix/src/posix-common.c +@@ -1257,7 +1257,7 @@ struct volume_options posix_options[] = { + {.key = {"health-check-timeout"}, + .type = GF_OPTION_TYPE_INT, + .min = 0, +- .default_value = "10", ++ .default_value = "20", + .validate = GF_OPT_VALIDATE_MIN, + .description = + "Interval in seconds to wait aio_write finish for health check, " +-- +1.8.3.1 + diff --git a/SOURCES/0304-cluster-dht-Correct-fd-processing-loop.patch b/SOURCES/0304-cluster-dht-Correct-fd-processing-loop.patch new file mode 100644 index 0000000..5f16e0a --- /dev/null +++ b/SOURCES/0304-cluster-dht-Correct-fd-processing-loop.patch @@ -0,0 +1,194 @@ +From ad233c1b3abdfe2bdfd1eacc83b5f84b7afa6b46 Mon Sep 17 00:00:00 2001 +From: N Balachandran +Date: Tue, 1 Oct 2019 17:37:15 +0530 +Subject: [PATCH 304/304] cluster/dht: Correct fd processing loop + +The fd processing loops in the +dht_migration_complete_check_task and the +dht_rebalance_inprogress_task functions were unsafe +and could cause an open to be sent on an already freed +fd. This has been fixed. + +> Change-Id: I0a3c7d2fba314089e03dfd704f9dceb134749540 +> Fixes: bz#1757399 +> Signed-off-by: N Balachandran +> (Cherry picked from commit 9b15867070b0cc241ab165886292ecffc3bc0aed) +> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/23506/) + +Change-Id: I0a3c7d2fba314089e03dfd704f9dceb134749540 +BUG: 1756325 +Signed-off-by: Mohit Agrawal +Reviewed-on: https://code.engineering.redhat.com/gerrit/182826 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/cluster/dht/src/dht-helper.c | 84 ++++++++++++++++++++++++++---------- + 1 file changed, 62 insertions(+), 22 deletions(-) + +diff --git a/xlators/cluster/dht/src/dht-helper.c b/xlators/cluster/dht/src/dht-helper.c +index 4c57e0d..1e9fee0 100644 +--- a/xlators/cluster/dht/src/dht-helper.c ++++ b/xlators/cluster/dht/src/dht-helper.c +@@ -1261,6 +1261,7 @@ dht_migration_complete_check_task(void *data) + fd_t *tmp = NULL; + uint64_t tmp_miginfo = 0; + dht_migrate_info_t *miginfo = NULL; ++ gf_boolean_t skip_open = _gf_false; + int open_failed = 0; + + this = THIS; +@@ -1399,24 +1400,34 @@ dht_migration_complete_check_task(void *data) + * the loop will cause the destruction of the fd. So we need to + * iterate the list safely because iter_fd cannot be trusted. + */ +- list_for_each_entry_safe(iter_fd, tmp, &inode->fd_list, inode_list) +- { +- if (fd_is_anonymous(iter_fd)) +- continue; +- +- if (dht_fd_open_on_dst(this, iter_fd, dst_node)) +- continue; +- ++ iter_fd = list_entry((&inode->fd_list)->next, typeof(*iter_fd), inode_list); ++ while (&iter_fd->inode_list != (&inode->fd_list)) { ++ if (fd_is_anonymous(iter_fd) || ++ (dht_fd_open_on_dst(this, iter_fd, dst_node))) { ++ if (!tmp) { ++ iter_fd = list_entry(iter_fd->inode_list.next, typeof(*iter_fd), ++ inode_list); ++ continue; ++ } ++ skip_open = _gf_true; ++ } + /* We need to release the inode->lock before calling + * syncop_open() to avoid possible deadlocks. However this + * can cause the iter_fd to be released by other threads. + * To avoid this, we take a reference before releasing the + * lock. + */ +- __fd_ref(iter_fd); ++ fd_ref(iter_fd); + + UNLOCK(&inode->lock); + ++ if (tmp) { ++ fd_unref(tmp); ++ tmp = NULL; ++ } ++ if (skip_open) ++ goto next; ++ + /* flags for open are stripped down to allow following the + * new location of the file, otherwise we can get EEXIST or + * truncate the file again as rebalance is moving the data */ +@@ -1438,9 +1449,11 @@ dht_migration_complete_check_task(void *data) + dht_fd_ctx_set(this, iter_fd, dst_node); + } + +- fd_unref(iter_fd); +- ++ next: + LOCK(&inode->lock); ++ skip_open = _gf_false; ++ tmp = iter_fd; ++ iter_fd = list_entry(tmp->inode_list.next, typeof(*tmp), inode_list); + } + + SYNCTASK_SETID(frame->root->uid, frame->root->gid); +@@ -1453,6 +1466,10 @@ dht_migration_complete_check_task(void *data) + + unlock: + UNLOCK(&inode->lock); ++ if (tmp) { ++ fd_unref(tmp); ++ tmp = NULL; ++ } + + out: + if (dict) { +@@ -1534,6 +1551,7 @@ dht_rebalance_inprogress_task(void *data) + int open_failed = 0; + uint64_t tmp_miginfo = 0; + dht_migrate_info_t *miginfo = NULL; ++ gf_boolean_t skip_open = _gf_false; + + this = THIS; + frame = data; +@@ -1654,24 +1672,40 @@ dht_rebalance_inprogress_task(void *data) + * the loop will cause the destruction of the fd. So we need to + * iterate the list safely because iter_fd cannot be trusted. + */ +- list_for_each_entry_safe(iter_fd, tmp, &inode->fd_list, inode_list) +- { +- if (fd_is_anonymous(iter_fd)) +- continue; +- +- if (dht_fd_open_on_dst(this, iter_fd, dst_node)) +- continue; +- ++ iter_fd = list_entry((&inode->fd_list)->next, typeof(*iter_fd), inode_list); ++ while (&iter_fd->inode_list != (&inode->fd_list)) { + /* We need to release the inode->lock before calling + * syncop_open() to avoid possible deadlocks. However this + * can cause the iter_fd to be released by other threads. + * To avoid this, we take a reference before releasing the + * lock. + */ +- __fd_ref(iter_fd); + ++ if (fd_is_anonymous(iter_fd) || ++ (dht_fd_open_on_dst(this, iter_fd, dst_node))) { ++ if (!tmp) { ++ iter_fd = list_entry(iter_fd->inode_list.next, typeof(*iter_fd), ++ inode_list); ++ continue; ++ } ++ skip_open = _gf_true; ++ } ++ ++ /* Yes, this is ugly but there isn't a cleaner way to do this ++ * the fd_ref is an atomic increment so not too bad. We want to ++ * reduce the number of inode locks and unlocks. ++ */ ++ ++ fd_ref(iter_fd); + UNLOCK(&inode->lock); + ++ if (tmp) { ++ fd_unref(tmp); ++ tmp = NULL; ++ } ++ if (skip_open) ++ goto next; ++ + /* flags for open are stripped down to allow following the + * new location of the file, otherwise we can get EEXIST or + * truncate the file again as rebalance is moving the data */ +@@ -1692,9 +1726,11 @@ dht_rebalance_inprogress_task(void *data) + dht_fd_ctx_set(this, iter_fd, dst_node); + } + +- fd_unref(iter_fd); +- ++ next: + LOCK(&inode->lock); ++ skip_open = _gf_false; ++ tmp = iter_fd; ++ iter_fd = list_entry(tmp->inode_list.next, typeof(*tmp), inode_list); + } + + SYNCTASK_SETID(frame->root->uid, frame->root->gid); +@@ -1702,6 +1738,10 @@ dht_rebalance_inprogress_task(void *data) + unlock: + UNLOCK(&inode->lock); + ++ if (tmp) { ++ fd_unref(tmp); ++ tmp = NULL; ++ } + if (open_failed) { + ret = -1; + goto out; +-- +1.8.3.1 + diff --git a/SOURCES/0305-glusterd-rebalance-start-should-fail-when-quorum-is-.patch b/SOURCES/0305-glusterd-rebalance-start-should-fail-when-quorum-is-.patch new file mode 100644 index 0000000..a1e77c6 --- /dev/null +++ b/SOURCES/0305-glusterd-rebalance-start-should-fail-when-quorum-is-.patch @@ -0,0 +1,56 @@ +From 90e52f3b44da0ed05e35ebd474e284d45794b0d6 Mon Sep 17 00:00:00 2001 +From: Sanju Rakonde +Date: Thu, 10 Oct 2019 20:40:49 +0530 +Subject: [PATCH 305/307] glusterd: rebalance start should fail when quorum is + not met + +rebalance start should not succeed if quorum is not met. +this patch adds a condition to check whether quorum is met +in pre-validation stage. + +> fixes: bz#1760467 +> Change-Id: Ic7d0d08f69e4bc6d5e7abae713ec1881531c8ad4 +> Signed-off-by: Sanju Rakonde + +upstream patch: https://review.gluster.org/#/c/glusterfs/+/23536/ +BUG: 1760261 +Change-Id: Ic7d0d08f69e4bc6d5e7abae713ec1881531c8ad4 +Signed-off-by: Sanju Rakonde +Reviewed-on: https://code.engineering.redhat.com/gerrit/183146 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + tests/bugs/glusterd/quorum-validation.t | 2 ++ + xlators/mgmt/glusterd/src/glusterd-mgmt.c | 3 ++- + 2 files changed, 4 insertions(+), 1 deletion(-) + +diff --git a/tests/bugs/glusterd/quorum-validation.t b/tests/bugs/glusterd/quorum-validation.t +index ff46729..3cc3351 100644 +--- a/tests/bugs/glusterd/quorum-validation.t ++++ b/tests/bugs/glusterd/quorum-validation.t +@@ -34,6 +34,8 @@ TEST ! $CLI_1 volume add-brick $V0 $H1:$B1/${V0}2 + TEST ! $CLI_1 volume remove-brick $V0 $H1:$B1/${V0}0 start + TEST ! $CLI_1 volume set $V0 barrier enable + ++#quorum is not met, rebalance/profile start should fail ++TEST ! $CLI_1 volume rebalance $V0 start + TEST ! $CLI_1 volume profile $V0 start + + #bug-1690753 - Volume stop when quorum not met is successful +diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c +index ec78913..a4915f3 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c ++++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c +@@ -1059,7 +1059,8 @@ glusterd_mgmt_v3_pre_validate(glusterd_op_t op, dict_t *req_dict, + goto out; + } + +- if (op == GD_OP_PROFILE_VOLUME || op == GD_OP_STOP_VOLUME) { ++ if (op == GD_OP_PROFILE_VOLUME || op == GD_OP_STOP_VOLUME || ++ op == GD_OP_REBALANCE) { + ret = glusterd_validate_quorum(this, op, req_dict, op_errstr); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SERVER_QUORUM_NOT_MET, +-- +1.8.3.1 + diff --git a/SOURCES/0306-cli-fix-distCount-value.patch b/SOURCES/0306-cli-fix-distCount-value.patch new file mode 100644 index 0000000..0e8b9f2 --- /dev/null +++ b/SOURCES/0306-cli-fix-distCount-value.patch @@ -0,0 +1,43 @@ +From 167980565e1ab56989b25fe6aa0203aeb7970c8b Mon Sep 17 00:00:00 2001 +From: Sanju Rakonde +Date: Sun, 6 Oct 2019 19:05:28 +0530 +Subject: [PATCH 306/307] cli: fix distCount value + +gluster volume info --xml id displaying wrong distCount +value. This patch addresses it. + +> fixes: bz#1758878 +> Change-Id: I64081597e06018361e6524587b433b0c4b2a0260 +> Signed-off-by: Sanju Rakonde + +upstream patch: https://review.gluster.org/#/c/glusterfs/+/23521/ + +BUG: 1758618 +Change-Id: I64081597e06018361e6524587b433b0c4b2a0260 +Signed-off-by: Sanju Rakonde +Reviewed-on: https://code.engineering.redhat.com/gerrit/183147 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + cli/src/cli-xml-output.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/cli/src/cli-xml-output.c b/cli/src/cli-xml-output.c +index b417bb8..006e2fb 100644 +--- a/cli/src/cli-xml-output.c ++++ b/cli/src/cli-xml-output.c +@@ -2548,8 +2548,9 @@ cli_xml_output_vol_info(cli_local_t *local, dict_t *dict) + ret = dict_get_int32(dict, key, &dist_count); + if (ret) + goto out; +- ret = xmlTextWriterWriteFormatElement( +- local->writer, (xmlChar *)"distCount", "%d", dist_count); ++ ret = xmlTextWriterWriteFormatElement(local->writer, ++ (xmlChar *)"distCount", "%d", ++ (brick_count / dist_count)); + XML_RET_CHECK_AND_GOTO(ret, out); + + snprintf(key, sizeof(key), "volume%d.stripe_count", i); +-- +1.8.3.1 + diff --git a/SOURCES/0307-ssl-fix-RHEL8-regression-failure.patch b/SOURCES/0307-ssl-fix-RHEL8-regression-failure.patch new file mode 100644 index 0000000..7a85b50 --- /dev/null +++ b/SOURCES/0307-ssl-fix-RHEL8-regression-failure.patch @@ -0,0 +1,42 @@ +From be9695391f39fe6eb1d157f6bfd018116d1ee42b Mon Sep 17 00:00:00 2001 +From: Sanju Rakonde +Date: Mon, 30 Sep 2019 13:14:06 +0530 +Subject: [PATCH 307/307] ssl: fix RHEL8 regression failure + +This tests is failing with +"SSL routines:SSL_CTX_use_certificate:ee key too small" +in RHEL8. This change is made according to +https://access.redhat.com/solutions/4157431 + +> updates: bz#1756900 +> Change-Id: Ib436372c3bd94bcf7324976337add7da4088b3d5 +> Signed-off-by: Sanju Rakonde + +upstream patch: https://review.gluster.org/#/c/glusterfs/+/23501/ + +BUG: 1704562 +Change-Id: Ib436372c3bd94bcf7324976337add7da4088b3d5 +Signed-off-by: Sanju Rakonde +Reviewed-on: https://code.engineering.redhat.com/gerrit/183148 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + tests/bugs/cli/bug-1320388.t | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tests/bugs/cli/bug-1320388.t b/tests/bugs/cli/bug-1320388.t +index f5ffcbe..8e5d77b 100755 +--- a/tests/bugs/cli/bug-1320388.t ++++ b/tests/bugs/cli/bug-1320388.t +@@ -21,7 +21,7 @@ cleanup; + rm -f $SSL_BASE/glusterfs.* + touch "$GLUSTERD_WORKDIR"/secure-access + +-TEST openssl genrsa -out $SSL_KEY 1024 ++TEST openssl genrsa -out $SSL_KEY 3072 + TEST openssl req -new -x509 -key $SSL_KEY -subj /CN=Anyone -out $SSL_CERT + ln $SSL_CERT $SSL_CA + +-- +1.8.3.1 + diff --git a/SOURCES/0308-dht-Rebalance-causing-IO-Error-File-descriptor-in-ba.patch b/SOURCES/0308-dht-Rebalance-causing-IO-Error-File-descriptor-in-ba.patch new file mode 100644 index 0000000..adbeb43 --- /dev/null +++ b/SOURCES/0308-dht-Rebalance-causing-IO-Error-File-descriptor-in-ba.patch @@ -0,0 +1,347 @@ +From 27f799563c1c2c1986662ed4a3a83d834c04fd98 Mon Sep 17 00:00:00 2001 +From: Mohit Agrawal +Date: Mon, 14 Oct 2019 15:42:31 +0530 +Subject: [PATCH 308/308] dht: Rebalance causing IO Error - File descriptor in + bad state + +Problem : When a file is migrated, dht attempts to re-open all open + fds on the new cached subvol. Earlier, if dht had not opened the fd, + the client xlator would be unable to find the remote fd and would + fall back to using an anon fd for the fop. That behavior changed with + https://review.gluster.org/#/c/glusterfs/+/15804, causing fops to fail + with EBADFD if the fd was not available on the cached subvol. + The client xlator returns EBADFD if the remote fd is not found but + dht only checks for EBADF before re-opening fds on the new cached subvol. + +Solution: Handle EBADFD at dht code path to avoid the issue + +> Change-Id: I43c51995cdd48d05b12e4b2889c8dbe2bb2a72d8 +> Fixes: bz#1758579 +> Signed-off-by: Mohit Agrawal +> (Cherry pick from commit 9314a9fbf487614c736cf6c4c1b93078d37bb9df) +> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/23518/) + +Change-Id: I43c51995cdd48d05b12e4b2889c8dbe2bb2a72d8 +BUG: 1758432 +Signed-off-by: Mohit Agrawal +Reviewed-on: https://code.engineering.redhat.com/gerrit/183370 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/cluster/dht/src/dht-common.c | 27 +++++++++++++++++--- + xlators/cluster/dht/src/dht-common.h | 19 ++++++++++++++ + xlators/cluster/dht/src/dht-helper.c | 29 +++++++++++++++++++++ + xlators/cluster/dht/src/dht-inode-read.c | 42 +++++++++++++++++++++++++++---- + xlators/cluster/dht/src/dht-inode-write.c | 16 ++++++------ + 5 files changed, 116 insertions(+), 17 deletions(-) + +diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c +index 99cccd6..37952ba 100644 +--- a/xlators/cluster/dht/src/dht-common.c ++++ b/xlators/cluster/dht/src/dht-common.c +@@ -53,6 +53,17 @@ dht_set_dir_xattr_req(xlator_t *this, loc_t *loc, dict_t *xattr_req); + int + dht_do_fresh_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc); + ++/* Check the xdata to make sure EBADF has been set by client xlator */ ++int32_t ++dht_check_remote_fd_failed_error(dht_local_t *local, int op_ret, int op_errno) ++{ ++ if (op_ret == -1 && (op_errno == EBADF || op_errno == EBADFD) && ++ !(local->fd_checked)) { ++ return 1; ++ } ++ return 0; ++} ++ + /* Sets the blocks and size values to fixed values. This is to be called + * only for dirs. The caller is responsible for checking the type + */ +@@ -4529,6 +4540,7 @@ dht_getxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret, + int this_call_cnt = 0; + dht_local_t *local = NULL; + dht_conf_t *conf = NULL; ++ int ret = 0; + + VALIDATE_OR_GOTO(frame, err); + VALIDATE_OR_GOTO(frame->local, err); +@@ -4537,6 +4549,13 @@ dht_getxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret, + conf = this->private; + local = frame->local; + ++ if (dht_check_remote_fd_failed_error(local, op_ret, op_errno)) { ++ ret = dht_check_and_open_fd_on_subvol(this, frame); ++ if (ret) ++ goto err; ++ return 0; ++ } ++ + LOCK(&frame->lock); + { + if (!xattr || (op_ret == -1)) { +@@ -5204,8 +5223,8 @@ dht_file_setxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this, + + local->op_errno = op_errno; + +- if ((local->fop == GF_FOP_FSETXATTR) && op_ret == -1 && +- (op_errno == EBADF) && !(local->fd_checked)) { ++ if ((local->fop == GF_FOP_FSETXATTR) && ++ dht_check_remote_fd_failed_error(local, op_ret, op_errno)) { + ret = dht_check_and_open_fd_on_subvol(this, frame); + if (ret) + goto out; +@@ -5929,8 +5948,8 @@ dht_file_removexattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this, + + local->op_errno = op_errno; + +- if ((local->fop == GF_FOP_FREMOVEXATTR) && (op_ret == -1) && +- (op_errno == EBADF) && !(local->fd_checked)) { ++ if ((local->fop == GF_FOP_FREMOVEXATTR) && ++ dht_check_remote_fd_failed_error(local, op_ret, op_errno)) { + ret = dht_check_and_open_fd_on_subvol(this, frame); + if (ret) + goto out; +diff --git a/xlators/cluster/dht/src/dht-common.h b/xlators/cluster/dht/src/dht-common.h +index c516271..ce11f02 100644 +--- a/xlators/cluster/dht/src/dht-common.h ++++ b/xlators/cluster/dht/src/dht-common.h +@@ -1230,6 +1230,22 @@ dht_newfile_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret, + struct iatt *preparent, struct iatt *postparent, dict_t *xdata); + + int ++dht_finodelk_cbk(call_frame_t *frame, void *cookie, xlator_t *this, ++ int32_t op_ret, int32_t op_errno, dict_t *xdata); ++ ++int ++dht_getxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret, ++ int op_errno, dict_t *xattr, dict_t *xdata); ++ ++int ++dht_common_xattrop_cbk(call_frame_t *frame, void *cookie, xlator_t *this, ++ int32_t op_ret, int32_t op_errno, dict_t *dict, ++ dict_t *xdata); ++int ++dht_fxattrop_cbk(call_frame_t *frame, void *cookie, xlator_t *this, ++ int32_t op_ret, int32_t op_errno, dict_t *dict, dict_t *xdata); ++ ++int + gf_defrag_status_get(dht_conf_t *conf, dict_t *dict); + + void +@@ -1525,4 +1541,7 @@ int + dht_pt_rename(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc, + dict_t *xdata); + ++int32_t ++dht_check_remote_fd_failed_error(dht_local_t *local, int op_ret, int op_errno); ++ + #endif /* _DHT_H */ +diff --git a/xlators/cluster/dht/src/dht-helper.c b/xlators/cluster/dht/src/dht-helper.c +index 1e9fee0..4f7370d 100644 +--- a/xlators/cluster/dht/src/dht-helper.c ++++ b/xlators/cluster/dht/src/dht-helper.c +@@ -366,6 +366,23 @@ dht_check_and_open_fd_on_subvol_complete(int ret, call_frame_t *frame, + + break; + ++ case GF_FOP_FXATTROP: ++ STACK_WIND(frame, dht_common_xattrop_cbk, subvol, ++ subvol->fops->fxattrop, local->fd, ++ local->rebalance.flags, local->rebalance.xattr, ++ local->xattr_req); ++ break; ++ ++ case GF_FOP_FGETXATTR: ++ STACK_WIND(frame, dht_getxattr_cbk, subvol, subvol->fops->fgetxattr, ++ local->fd, local->key, NULL); ++ break; ++ ++ case GF_FOP_FINODELK: ++ STACK_WIND(frame, dht_finodelk_cbk, subvol, subvol->fops->finodelk, ++ local->key, local->fd, local->rebalance.lock_cmd, ++ &local->rebalance.flock, local->xattr_req); ++ break; + default: + gf_msg(this->name, GF_LOG_ERROR, 0, DHT_MSG_UNKNOWN_FOP, + "Unknown FOP on fd (%p) on file %s @ %s", fd, +@@ -429,6 +446,18 @@ handle_err: + DHT_STACK_UNWIND(fremovexattr, frame, -1, op_errno, NULL); + break; + ++ case GF_FOP_FXATTROP: ++ DHT_STACK_UNWIND(fxattrop, frame, -1, op_errno, NULL, NULL); ++ break; ++ ++ case GF_FOP_FGETXATTR: ++ DHT_STACK_UNWIND(fgetxattr, frame, -1, op_errno, NULL, NULL); ++ break; ++ ++ case GF_FOP_FINODELK: ++ DHT_STACK_UNWIND(finodelk, frame, -1, op_errno, NULL); ++ break; ++ + default: + gf_msg(this->name, GF_LOG_ERROR, 0, DHT_MSG_UNKNOWN_FOP, + "Unknown FOP on fd (%p) on file %s @ %s", fd, +diff --git a/xlators/cluster/dht/src/dht-inode-read.c b/xlators/cluster/dht/src/dht-inode-read.c +index cacfe35..0c209a5 100644 +--- a/xlators/cluster/dht/src/dht-inode-read.c ++++ b/xlators/cluster/dht/src/dht-inode-read.c +@@ -162,8 +162,8 @@ dht_file_attr_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret, + local = frame->local; + prev = cookie; + +- if ((local->fop == GF_FOP_FSTAT) && (op_ret == -1) && (op_errno == EBADF) && +- !(local->fd_checked)) { ++ if ((local->fop == GF_FOP_FSTAT) && ++ dht_check_remote_fd_failed_error(local, op_ret, op_errno)) { + ret = dht_check_and_open_fd_on_subvol(this, frame); + if (ret) + goto out; +@@ -431,7 +431,7 @@ dht_readv_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret, + if (local->call_cnt != 1) + goto out; + +- if (op_ret == -1 && (op_errno == EBADF) && !(local->fd_checked)) { ++ if (dht_check_remote_fd_failed_error(local, op_ret, op_errno)) { + ret = dht_check_and_open_fd_on_subvol(this, frame); + if (ret) + goto out; +@@ -703,7 +703,7 @@ dht_flush_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret, + if (local->call_cnt != 1) + goto out; + +- if (op_ret == -1 && (op_errno == EBADF) && !(local->fd_checked)) { ++ if (dht_check_remote_fd_failed_error(local, op_ret, op_errno)) { + ret = dht_check_and_open_fd_on_subvol(this, frame); + if (ret) + goto out; +@@ -820,7 +820,7 @@ dht_fsync_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret, + + local->op_errno = op_errno; + +- if (op_ret == -1 && (op_errno == EBADF) && !(local->fd_checked)) { ++ if (dht_check_remote_fd_failed_error(local, op_ret, op_errno)) { + ret = dht_check_and_open_fd_on_subvol(this, frame); + if (ret) + goto out; +@@ -1223,6 +1223,13 @@ dht_common_xattrop_cbk(call_frame_t *frame, void *cookie, xlator_t *this, + if (local->call_cnt != 1) + goto out; + ++ if (dht_check_remote_fd_failed_error(local, op_ret, op_errno)) { ++ ret = dht_check_and_open_fd_on_subvol(this, frame); ++ if (ret) ++ goto out; ++ return 0; ++ } ++ + ret = dht_read_iatt_from_xdata(this, xdata, &stbuf); + + if ((!op_ret) && (ret)) { +@@ -1535,8 +1542,26 @@ dht_finodelk_cbk(call_frame_t *frame, void *cookie, xlator_t *this, + int32_t op_ret, int32_t op_errno, dict_t *xdata) + + { ++ dht_local_t *local = NULL; ++ int ret = 0; ++ ++ GF_VALIDATE_OR_GOTO("dht", frame, out); ++ GF_VALIDATE_OR_GOTO("dht", this, out); ++ GF_VALIDATE_OR_GOTO("dht", frame->local, out); ++ ++ local = frame->local; ++ ++ if (dht_check_remote_fd_failed_error(local, op_ret, op_errno)) { ++ ret = dht_check_and_open_fd_on_subvol(this, frame); ++ if (ret) ++ goto out; ++ return 0; ++ } ++ ++out: + dht_lk_inode_unref(frame, op_ret); + DHT_STACK_UNWIND(finodelk, frame, op_ret, op_errno, xdata); ++ + return 0; + } + +@@ -1574,6 +1599,13 @@ dht_finodelk(call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd, + if (ret) + goto err; + */ ++ local->rebalance.flock = *lock; ++ local->rebalance.lock_cmd = cmd; ++ local->key = gf_strdup(volume); ++ ++ if (xdata) ++ local->xattr_req = dict_ref(xdata); ++ + STACK_WIND(frame, dht_finodelk_cbk, lock_subvol, + lock_subvol->fops->finodelk, volume, fd, cmd, lock, xdata); + +diff --git a/xlators/cluster/dht/src/dht-inode-write.c b/xlators/cluster/dht/src/dht-inode-write.c +index b26b705..b6b349d 100644 +--- a/xlators/cluster/dht/src/dht-inode-write.c ++++ b/xlators/cluster/dht/src/dht-inode-write.c +@@ -49,7 +49,7 @@ dht_writev_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret, + * We only check once as this could be a valid bad fd error. + */ + +- if (op_ret == -1 && (op_errno == EBADF) && !(local->fd_checked)) { ++ if (dht_check_remote_fd_failed_error(local, op_ret, op_errno)) { + ret = dht_check_and_open_fd_on_subvol(this, frame); + if (ret) + goto out; +@@ -262,8 +262,8 @@ dht_truncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret, + * We only check once as this could actually be a valid error. + */ + +- if ((local->fop == GF_FOP_FTRUNCATE) && (op_ret == -1) && +- ((op_errno == EBADF) || (op_errno == EINVAL)) && !(local->fd_checked)) { ++ if ((local->fop == GF_FOP_FTRUNCATE) && ++ dht_check_remote_fd_failed_error(local, op_ret, op_errno)) { + ret = dht_check_and_open_fd_on_subvol(this, frame); + if (ret) + goto out; +@@ -489,7 +489,7 @@ dht_fallocate_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret, + * We only check once as this could actually be a valid error. + */ + +- if ((op_ret == -1) && (op_errno == EBADF) && !(local->fd_checked)) { ++ if (dht_check_remote_fd_failed_error(local, op_ret, op_errno)) { + ret = dht_check_and_open_fd_on_subvol(this, frame); + if (ret) + goto out; +@@ -666,7 +666,7 @@ dht_discard_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret, + * and a lookup updated the cached subvol in the inode ctx. + * We only check once as this could actually be a valid error. + */ +- if ((op_ret == -1) && (op_errno == EBADF) && !(local->fd_checked)) { ++ if (dht_check_remote_fd_failed_error(local, op_ret, op_errno)) { + ret = dht_check_and_open_fd_on_subvol(this, frame); + if (ret) + goto out; +@@ -838,7 +838,7 @@ dht_zerofill_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret, + * and a lookup updated the cached subvol in the inode ctx. + * We only check once as this could actually be a valid error. + */ +- if ((op_ret == -1) && (op_errno == EBADF) && !(local->fd_checked)) { ++ if (dht_check_remote_fd_failed_error(local, op_ret, op_errno)) { + ret = dht_check_and_open_fd_on_subvol(this, frame); + if (ret) + goto out; +@@ -1005,8 +1005,8 @@ dht_file_setattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this, + + local->op_errno = op_errno; + +- if ((local->fop == GF_FOP_FSETATTR) && (op_ret == -1) && +- (op_errno == EBADF) && !(local->fd_checked)) { ++ if ((local->fop == GF_FOP_FSETATTR) && ++ dht_check_remote_fd_failed_error(local, op_ret, op_errno)) { + ret = dht_check_and_open_fd_on_subvol(this, frame); + if (ret) + goto out; +-- +1.8.3.1 + diff --git a/SOURCES/0309-geo-rep-Fix-config-upgrade-on-non-participating-node.patch b/SOURCES/0309-geo-rep-Fix-config-upgrade-on-non-participating-node.patch new file mode 100644 index 0000000..6ae359e --- /dev/null +++ b/SOURCES/0309-geo-rep-Fix-config-upgrade-on-non-participating-node.patch @@ -0,0 +1,240 @@ +From 2b1738402276f43d7cb64542b74cb50145e46d77 Mon Sep 17 00:00:00 2001 +From: Kotresh HR +Date: Wed, 16 Oct 2019 14:25:47 +0530 +Subject: [PATCH 309/309] geo-rep: Fix config upgrade on non-participating node + +After upgrade, if the config files are of old format, it +gets migrated to new format. Monitor process migrates it. +Since monitor doesn't run on nodes where bricks are not +hosted, it doesn't get migrated there. So this patch fixes +the config upgrade on nodes which doesn't host bricks. +This happens during config either on get/set/reset. + +Backport of: + > Patch: https://review.gluster.org/23555 + > Change-Id: Ibade2f2310b0f3affea21a3baa1ae0eb71162cba + > Signed-off-by: Kotresh HR + > fixes: bz#1762220 + +Change-Id: Ibade2f2310b0f3affea21a3baa1ae0eb71162cba +Signed-off-by: Kotresh HR +BUG: 1760939 +Reviewed-on: https://code.engineering.redhat.com/gerrit/183461 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + geo-replication/syncdaemon/gsyncd.py | 3 +- + tests/00-geo-rep/georep-config-upgrade.t | 132 +++++++++++++++++++++++++++++++ + tests/00-geo-rep/gsyncd.conf.old | 47 +++++++++++ + 3 files changed, 181 insertions(+), 1 deletion(-) + create mode 100644 tests/00-geo-rep/georep-config-upgrade.t + create mode 100644 tests/00-geo-rep/gsyncd.conf.old + +diff --git a/geo-replication/syncdaemon/gsyncd.py b/geo-replication/syncdaemon/gsyncd.py +index 6ae5269..7b48d82 100644 +--- a/geo-replication/syncdaemon/gsyncd.py ++++ b/geo-replication/syncdaemon/gsyncd.py +@@ -255,7 +255,8 @@ def main(): + if args.subcmd == "slave": + override_from_args = True + +- if args.subcmd == "monitor": ++ if config_file is not None and \ ++ args.subcmd in ["monitor", "config-get", "config-set", "config-reset"]: + ret = gconf.is_config_file_old(config_file, args.master, extra_tmpl_args["slavevol"]) + if ret is not None: + gconf.config_upgrade(config_file, ret) +diff --git a/tests/00-geo-rep/georep-config-upgrade.t b/tests/00-geo-rep/georep-config-upgrade.t +new file mode 100644 +index 0000000..557461c +--- /dev/null ++++ b/tests/00-geo-rep/georep-config-upgrade.t +@@ -0,0 +1,132 @@ ++#!/bin/bash ++ ++. $(dirname $0)/../include.rc ++. $(dirname $0)/../volume.rc ++. $(dirname $0)/../geo-rep.rc ++. $(dirname $0)/../env.rc ++ ++SCRIPT_TIMEOUT=300 ++OLD_CONFIG_PATH=$(dirname $0)/gsyncd.conf.old ++WORKING_DIR=/var/lib/glusterd/geo-replication/master_127.0.0.1_slave ++ ++##Cleanup and start glusterd ++cleanup; ++TEST glusterd; ++TEST pidof glusterd ++ ++##Variables ++GEOREP_CLI="$CLI volume geo-replication" ++master=$GMV0 ++SH0="127.0.0.1" ++slave=${SH0}::${GSV0} ++num_active=2 ++num_passive=2 ++master_mnt=$M0 ++slave_mnt=$M1 ++ ++############################################################ ++#SETUP VOLUMES AND GEO-REPLICATION ++############################################################ ++ ++##create_and_start_master_volume ++TEST $CLI volume create $GMV0 replica 2 $H0:$B0/${GMV0}{1,2,3,4}; ++TEST $CLI volume start $GMV0 ++ ++##create_and_start_slave_volume ++TEST $CLI volume create $GSV0 replica 2 $H0:$B0/${GSV0}{1,2,3,4}; ++TEST $CLI volume start $GSV0 ++ ++##Create, start and mount meta_volume ++TEST $CLI volume create $META_VOL replica 3 $H0:$B0/${META_VOL}{1,2,3}; ++TEST $CLI volume start $META_VOL ++TEST mkdir -p $META_MNT ++TEST glusterfs -s $H0 --volfile-id $META_VOL $META_MNT ++ ++##Mount master ++TEST glusterfs -s $H0 --volfile-id $GMV0 $M0 ++ ++##Mount slave ++TEST glusterfs -s $H0 --volfile-id $GSV0 $M1 ++ ++############################################################ ++#BASIC GEO-REPLICATION TESTS ++############################################################ ++ ++#Create geo-rep session ++TEST create_georep_session $master $slave ++ ++#Config gluster-command-dir ++TEST $GEOREP_CLI $master $slave config gluster-command-dir ${GLUSTER_CMD_DIR} ++ ++#Config gluster-command-dir ++TEST $GEOREP_CLI $master $slave config slave-gluster-command-dir ${GLUSTER_CMD_DIR} ++ ++#Enable_metavolume ++TEST $GEOREP_CLI $master $slave config use_meta_volume true ++ ++#Wait for common secret pem file to be created ++EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_common_secret_file ++ ++#Verify the keys are distributed ++EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_keys_distributed ++ ++#Start_georep ++TEST $GEOREP_CLI $master $slave start ++ ++EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_num_rows "Active" ++EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_num_rows "Passive" ++ ++TEST $GEOREP_CLI $master $slave config sync-method tarssh ++ ++#Stop Geo-rep ++TEST $GEOREP_CLI $master $slave stop ++ ++#Copy old config file ++mv -f $WORKING_DIR/gsyncd.conf $WORKING_DIR/gsyncd.conf.org ++cp -p $OLD_CONFIG_PATH $WORKING_DIR/gsyncd.conf ++ ++#Check if config get all updates config_file ++TEST ! grep "sync-method" $WORKING_DIR/gsyncd.conf ++TEST $GEOREP_CLI $master $slave config ++TEST grep "sync-method" $WORKING_DIR/gsyncd.conf ++ ++#Check if config get updates config_file ++rm -f $WORKING_DIR/gsyncd.conf ++cp -p $OLD_CONFIG_PATH $WORKING_DIR/gsyncd.conf ++TEST ! grep "sync-method" $WORKING_DIR/gsyncd.conf ++TEST $GEOREP_CLI $master $slave config sync-method ++TEST grep "sync-method" $WORKING_DIR/gsyncd.conf ++ ++#Check if config set updates config_file ++rm -f $WORKING_DIR/gsyncd.conf ++cp -p $OLD_CONFIG_PATH $WORKING_DIR/gsyncd.conf ++TEST ! grep "sync-method" $WORKING_DIR/gsyncd.conf ++TEST $GEOREP_CLI $master $slave config sync-xattrs false ++TEST grep "sync-method" $WORKING_DIR/gsyncd.conf ++ ++#Check if config reset updates config_file ++rm -f $WORKING_DIR/gsyncd.conf ++cp -p $OLD_CONFIG_PATH $WORKING_DIR/gsyncd.conf ++TEST ! grep "sync-method" $WORKING_DIR/gsyncd.conf ++TEST $GEOREP_CLI $master $slave config \!sync-xattrs ++TEST grep "sync-method" $WORKING_DIR/gsyncd.conf ++ ++#Check if geo-rep start updates config_file ++rm -f $WORKING_DIR/gsyncd.conf ++cp -p $OLD_CONFIG_PATH $WORKING_DIR/gsyncd.conf ++TEST ! grep "sync-method" $WORKING_DIR/gsyncd.conf ++TEST $GEOREP_CLI $master $slave start ++TEST grep "sync-method" $WORKING_DIR/gsyncd.conf ++ ++#Stop geo-rep ++TEST $GEOREP_CLI $master $slave stop ++ ++#Delete Geo-rep ++TEST $GEOREP_CLI $master $slave delete ++ ++#Cleanup authorized keys ++sed -i '/^command=.*SSH_ORIGINAL_COMMAND#.*/d' ~/.ssh/authorized_keys ++sed -i '/^command=.*gsyncd.*/d' ~/.ssh/authorized_keys ++ ++cleanup; ++#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000 +diff --git a/tests/00-geo-rep/gsyncd.conf.old b/tests/00-geo-rep/gsyncd.conf.old +new file mode 100644 +index 0000000..519acaf +--- /dev/null ++++ b/tests/00-geo-rep/gsyncd.conf.old +@@ -0,0 +1,47 @@ ++[__meta__] ++version = 2.0 ++ ++[peersrx . .] ++remote_gsyncd = /usr/local/libexec/glusterfs/gsyncd ++georep_session_working_dir = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/ ++ssh_command_tar = ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i /var/lib/glusterd/geo-replication/tar_ssh.pem ++changelog_log_file = /var/log/glusterfs/geo-replication/${mastervol}/${eSlave}${local_id}-changes.log ++working_dir = /var/lib/misc/glusterfsd/${mastervol}/${eSlave} ++ignore_deletes = false ++pid_file = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/monitor.pid ++state_file = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/monitor.status ++gluster_command_dir = /usr/local/sbin/ ++gluster_params = aux-gfid-mount acl ++ssh_command = ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i /var/lib/glusterd/geo-replication/secret.pem ++state_detail_file = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/${eSlave}-detail.status ++state_socket_unencoded = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/${eSlave}.socket ++socketdir = /var/run/gluster ++log_file = /var/log/glusterfs/geo-replication/${mastervol}/${eSlave}.log ++gluster_log_file = /var/log/glusterfs/geo-replication/${mastervol}/${eSlave}${local_id}.gluster.log ++special_sync_mode = partial ++change_detector = changelog ++pid-file = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/monitor.pid ++state-file = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/monitor.status ++ ++[__section_order__] ++peersrx . . = 0 ++peersrx . %5essh%3a = 2 ++peersrx . = 3 ++peers master slave = 4 ++ ++[peersrx . %5Essh%3A] ++remote_gsyncd = /nonexistent/gsyncd ++ ++[peersrx .] ++gluster_command_dir = /usr/local/sbin/ ++gluster_params = aux-gfid-mount acl ++log_file = /var/log/glusterfs/geo-replication-slaves/${session_owner}:${local_node}${local_id}.${slavevol}.log ++log_file_mbr = /var/log/glusterfs/geo-replication-slaves/mbr/${session_owner}:${local_node}${local_id}.${slavevol}.log ++gluster_log_file = /var/log/glusterfs/geo-replication-slaves/${session_owner}:${local_node}${local_id}.${slavevol}.gluster.log ++ ++[peers master slave] ++session_owner = 0732cbd1-3ec5-4920-ab0d-aa5a896d5214 ++master.stime_xattr_name = trusted.glusterfs.0732cbd1-3ec5-4920-ab0d-aa5a896d5214.07a9005c-ace4-4f67-b3c0-73938fb236c4.stime ++volume_id = 0732cbd1-3ec5-4920-ab0d-aa5a896d5214 ++use_tarssh = true ++ +-- +1.8.3.1 + diff --git a/SOURCES/0310-tests-test-case-for-non-root-geo-rep-setup.patch b/SOURCES/0310-tests-test-case-for-non-root-geo-rep-setup.patch new file mode 100644 index 0000000..a38a4aa --- /dev/null +++ b/SOURCES/0310-tests-test-case-for-non-root-geo-rep-setup.patch @@ -0,0 +1,284 @@ +From c2decfb59bd1be7cd2b0d792fd2ca2627913638a Mon Sep 17 00:00:00 2001 +From: Sunny Kumar +Date: Tue, 24 Sep 2019 18:22:13 +0530 +Subject: [PATCH 310/313] tests : test case for non-root geo-rep setup + +Added test case for non-root geo-rep setup. + +Backport of: + > Patch: https://review.gluster.org/22902 + > Change-Id: Ib6ebee79949a9f61bdc5c7b5e11b51b262750e98 + > fixes: bz#1717827 + > Signed-off-by: Sunny Kumar + +Change-Id: Ib6ebee79949a9f61bdc5c7b5e11b51b262750e98 +BUG: 1763412 +Signed-off-by: Kotresh HR +Reviewed-on: https://code.engineering.redhat.com/gerrit/183664 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + tests/00-geo-rep/00-georep-verify-non-root-setup.t | 251 +++++++++++++++++++++ + 1 file changed, 251 insertions(+) + create mode 100644 tests/00-geo-rep/00-georep-verify-non-root-setup.t + +diff --git a/tests/00-geo-rep/00-georep-verify-non-root-setup.t b/tests/00-geo-rep/00-georep-verify-non-root-setup.t +new file mode 100644 +index 0000000..e753c1f +--- /dev/null ++++ b/tests/00-geo-rep/00-georep-verify-non-root-setup.t +@@ -0,0 +1,251 @@ ++#!/bin/bash ++ ++. $(dirname $0)/../include.rc ++. $(dirname $0)/../volume.rc ++. $(dirname $0)/../geo-rep.rc ++. $(dirname $0)/../env.rc ++ ++SCRIPT_TIMEOUT=500 ++ ++### Basic Non-root geo-rep setup test with Distribute Replicate volumes ++ ++##Cleanup and start glusterd ++cleanup; ++TEST glusterd; ++TEST pidof glusterd ++ ++ ++##Variables ++GEOREP_CLI="$CLI volume geo-replication" ++master=$GMV0 ++SH0="127.0.0.1" ++slave=${SH0}::${GSV0} ++num_active=2 ++num_passive=2 ++master_mnt=$M0 ++slave_mnt=$M1 ++ ++##User and group to be used for non-root geo-rep setup ++usr="nroot" ++grp="ggroup" ++ ++slave_url=$usr@$slave ++slave_vol=$GSV0 ++ssh_url=$usr@$SH0 ++ ++############################################################ ++#SETUP VOLUMES AND VARIABLES ++ ++##create_and_start_master_volume ++TEST $CLI volume create $GMV0 replica 2 $H0:$B0/${GMV0}{1,2,3,4}; ++TEST $CLI volume start $GMV0 ++ ++##create_and_start_slave_volume ++TEST $CLI volume create $GSV0 replica 2 $H0:$B0/${GSV0}{1,2,3,4}; ++TEST $CLI volume start $GSV0 ++ ++##Mount master ++#TEST glusterfs -s $H0 --volfile-id $GMV0 $M0 ++ ++##Mount slave ++#TEST glusterfs -s $H0 --volfile-id $GSV0 $M1 ++ ++ ++########################################################## ++#TEST FUNCTIONS ++ ++function distribute_key_non_root() ++{ ++ ${GLUSTER_LIBEXECDIR}/set_geo_rep_pem_keys.sh $usr $master $slave_vol ++ echo $? ++} ++ ++ ++function check_status_non_root() ++{ ++ local search_key=$1 ++ $GEOREP_CLI $master $slave_url status | grep -F "$search_key" | wc -l ++} ++ ++ ++function check_and_clean_group() ++{ ++ if [ $(getent group $grp) ] ++ then ++ groupdel $grp; ++ echo $? ++ else ++ echo 0 ++ fi ++} ++ ++function clean_lock_files() ++{ ++ if [ ! -f /etc/passwd.lock ]; ++ then ++ rm -rf /etc/passwd.lock; ++ fi ++ ++ if [ ! -f /etc/group.lock ]; ++ then ++ rm -rf /etc/group.lock; ++ fi ++ ++ if [ ! -f /etc/shadow.lock ]; ++ then ++ rm -rf /etc/shadow.lock; ++ fi ++ ++ if [ ! -f /etc/gshadow.lock ]; ++ then ++ rm -rf /etc/gshadow.lock; ++ fi ++} ++ ++ ++########################################################### ++#SETUP NON-ROOT GEO REPLICATION ++ ++##Create ggroup group ++##First test if group exists and then create new one ++ ++EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_and_clean_group ++ ++##cleanup *.lock files ++ ++clean_lock_files ++ ++TEST /usr/sbin/groupadd $grp ++ ++clean_lock_files ++##Create non-root user and assign it to newly created group ++ ++TEST /usr/sbin/useradd -G $grp $usr ++ ++##Modify password for non-root user to have control over distributing ssh-key ++echo "$usr:pass" | chpasswd ++ ++##Set up mountbroker root ++TEST gluster-mountbroker setup /var/mountbroker-root $grp ++ ++##Associate volume and non-root user to the mountbroker ++TEST gluster-mountbroker add $slave_vol $usr ++ ++##Check ssh setting for clear text passwords ++sed '/^PasswordAuthentication /{s/no/yes/}' -i /etc/ssh/sshd_config && grep '^PasswordAuthentication ' /etc/ssh/sshd_config && service sshd restart ++ ++ ++##Restart glusterd to reflect mountbroker changages ++TEST killall_gluster; ++TEST glusterd; ++TEST pidof glusterd; ++ ++ ++ ++##Create, start and mount meta_volume ++TEST $CLI volume create $META_VOL replica 3 $H0:$B0/${META_VOL}{1,2,3}; ++TEST $CLI volume start $META_VOL ++TEST mkdir -p $META_MNT ++TEST glusterfs -s $H0 --volfile-id $META_VOL $META_MNT ++ ++##Mount master ++TEST glusterfs -s $H0 --volfile-id $GMV0 $M0 ++ ++##Mount slave ++TEST glusterfs -s $H0 --volfile-id $GSV0 $M1 ++ ++## Check status of mount-broker ++TEST gluster-mountbroker status ++ ++ ++##Setup password-less ssh for non-root user ++#sshpass -p "pass" ssh-copy-id -i ~/.ssh/id_rsa.pub $ssh_url ++##Run ssh agent ++eval "$(ssh-agent -s)" ++PASS="pass" ++ ++ ++##Create a temp script to echo the SSH password, used by SSH_ASKPASS ++ ++SSH_ASKPASS_SCRIPT=/tmp/ssh-askpass-script ++cat > ${SSH_ASKPASS_SCRIPT} <> ~/.bashrc" ++ ++##Creating secret pem pub file ++TEST gluster-georep-sshkey generate ++ ++##Create geo-rep non-root setup ++ ++TEST $GEOREP_CLI $master $slave_url create push-pem ++ ++#Config gluster-command-dir ++TEST $GEOREP_CLI $master $slave_url config gluster-command-dir ${GLUSTER_CMD_DIR} ++ ++#Config gluster-command-dir ++TEST $GEOREP_CLI $master $slave_url config slave-gluster-command-dir ${GLUSTER_CMD_DIR} ++ ++## Test for key distribution ++ ++EXPECT_WITHIN $GEO_REP_TIMEOUT 0 distribute_key_non_root ++ ++##Wait for common secret pem file to be created ++EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_common_secret_file ++ ++#Enable_metavolume ++TEST $GEOREP_CLI $master $slave config use_meta_volume true ++ ++#Start_georep ++TEST $GEOREP_CLI $master $slave_url start ++ ++## Meta volume is enabled so looking for 2 Active and 2 Passive sessions ++ ++EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_non_root "Active" ++ ++EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_non_root "Passive" ++ ++#Pause geo-replication session ++TEST $GEOREP_CLI $master $slave_url pause ++ ++#Resume geo-replication session ++TEST $GEOREP_CLI $master $slave_url resume ++ ++#Validate failure of volume stop when geo-rep is running ++TEST ! $CLI volume stop $GMV0 ++ ++#Stop Geo-rep ++TEST $GEOREP_CLI $master $slave_url stop ++ ++#Delete Geo-rep ++TEST $GEOREP_CLI $master $slave_url delete ++ ++#Cleanup authorized_keys ++sed -i '/^command=.*SSH_ORIGINAL_COMMAND#.*/d' ~/.ssh/authorized_keys ++sed -i '/^command=.*gsyncd.*/d' ~/.ssh/authorized_keys ++ ++#clear mountbroker ++gluster-mountbroker remove --user $usr ++gluster-mountbroker remove --volume $slave_vol ++ ++#delete group and user created for non-root setup ++TEST userdel -r -f $usr ++EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_and_clean_group ++ ++##password script cleanup ++rm -rf /tmp/ssh-askpass-script ++ ++ ++cleanup; ++ +-- +1.8.3.1 + diff --git a/SOURCES/0311-geo-rep-Fix-Permission-denied-traceback-on-non-root-.patch b/SOURCES/0311-geo-rep-Fix-Permission-denied-traceback-on-non-root-.patch new file mode 100644 index 0000000..af0206a --- /dev/null +++ b/SOURCES/0311-geo-rep-Fix-Permission-denied-traceback-on-non-root-.patch @@ -0,0 +1,186 @@ +From 4a2441e76f4240568093080769ede07bb7fb2016 Mon Sep 17 00:00:00 2001 +From: Kotresh HR +Date: Sun, 20 Oct 2019 01:01:39 +0530 +Subject: [PATCH 311/313] geo-rep: Fix Permission denied traceback on non root + setup + +Problem: +While syncing rename of directory in hybrid crawl, geo-rep +crashes as below. + +Traceback (most recent call last): + File "/usr/local/libexec/glusterfs/python/syncdaemon/repce.py", line 118, in worker + res = getattr(self.obj, rmeth)(*in_data[2:]) + File "/usr/local/libexec/glusterfs/python/syncdaemon/resource.py", line 588, in entry_ops + src_entry = get_slv_dir_path(slv_host, slv_volume, gfid) + File "/usr/local/libexec/glusterfs/python/syncdaemon/syncdutils.py", line 687, in get_slv_dir_path + [ENOENT], [ESTALE]) + File "/usr/local/libexec/glusterfs/python/syncdaemon/syncdutils.py", line 546, in errno_wrap + return call(*arg) +PermissionError: [Errno 13] Permission denied: '/bricks/brick1/b1/.glusterfs/8e/c0/8ec0fcd4-d50f-4a6e-b473-a7943ab66640' + +Cause: +Conversion of gfid to path for a directory uses readlink on backend +.glusterfs gfid path. But this fails for non root user with +permission denied. + +Fix: +Use gfid2path interface to get the path from gfid + +Backport of: + > Patch: https://review.gluster.org/23570 + > Change-Id: I9d40c713a1b32cea95144cbc0f384ada82972222 + > fixes: bz#1763439 + > Signed-off-by: Kotresh HR + +Change-Id: I9d40c713a1b32cea95144cbc0f384ada82972222 +BUG: 1763412 +Signed-off-by: Kotresh HR +Reviewed-on: https://code.engineering.redhat.com/gerrit/183665 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + geo-replication/syncdaemon/gsyncd.py | 3 +- + geo-replication/syncdaemon/syncdutils.py | 35 ++++++++++++++++------ + tests/00-geo-rep/00-georep-verify-non-root-setup.t | 30 +++++++++++++++---- + 3 files changed, 52 insertions(+), 16 deletions(-) + +diff --git a/geo-replication/syncdaemon/gsyncd.py b/geo-replication/syncdaemon/gsyncd.py +index 7b48d82..8940384 100644 +--- a/geo-replication/syncdaemon/gsyncd.py ++++ b/geo-replication/syncdaemon/gsyncd.py +@@ -231,7 +231,8 @@ def main(): + # Set default path for config file in that case + # If an subcmd accepts config file then it also accepts + # master and Slave arguments. +- if config_file is None and hasattr(args, "config_file"): ++ if config_file is None and hasattr(args, "config_file") \ ++ and args.subcmd != "slave": + config_file = "%s/geo-replication/%s_%s_%s/gsyncd.conf" % ( + GLUSTERD_WORKDIR, + args.master, +diff --git a/geo-replication/syncdaemon/syncdutils.py b/geo-replication/syncdaemon/syncdutils.py +index aadaebd..b08098e 100644 +--- a/geo-replication/syncdaemon/syncdutils.py ++++ b/geo-replication/syncdaemon/syncdutils.py +@@ -57,6 +57,7 @@ from hashlib import sha256 as sha256 + + # auxiliary gfid based access prefix + _CL_AUX_GFID_PFX = ".gfid/" ++ROOT_GFID = "00000000-0000-0000-0000-000000000001" + GF_OP_RETRIES = 10 + + GX_GFID_CANONICAL_LEN = 37 # canonical gfid len + '\0' +@@ -670,6 +671,7 @@ def get_slv_dir_path(slv_host, slv_volume, gfid): + global slv_bricks + + dir_path = ENOENT ++ pfx = gauxpfx() + + if not slv_bricks: + slv_info = Volinfo(slv_volume, slv_host, master=False) +@@ -683,15 +685,30 @@ def get_slv_dir_path(slv_host, slv_volume, gfid): + gfid[2:4], + gfid], [ENOENT], [ESTALE]) + if dir_path != ENOENT: +- realpath = errno_wrap(os.readlink, [dir_path], +- [ENOENT], [ESTALE]) +- if not isinstance(realpath, int): +- realpath_parts = realpath.split('/') +- pargfid = realpath_parts[-2] +- basename = realpath_parts[-1] +- pfx = gauxpfx() +- dir_entry = os.path.join(pfx, pargfid, basename) +- return dir_entry ++ try: ++ realpath = errno_wrap(os.readlink, [dir_path], ++ [ENOENT], [ESTALE]) ++ if not isinstance(realpath, int): ++ realpath_parts = realpath.split('/') ++ pargfid = realpath_parts[-2] ++ basename = realpath_parts[-1] ++ dir_entry = os.path.join(pfx, pargfid, basename) ++ return dir_entry ++ except OSError: ++ # .gfid/GFID ++ gfidpath = unescape_space_newline(os.path.join(pfx, gfid)) ++ realpath = errno_wrap(Xattr.lgetxattr_buf, ++ [gfidpath, 'glusterfs.gfid2path'], [ENOENT], [ESTALE]) ++ if not isinstance(realpath, int): ++ basename = os.path.basename(realpath).rstrip('\x00') ++ dirpath = os.path.dirname(realpath) ++ if dirpath is "/": ++ pargfid = ROOT_GFID ++ else: ++ dirpath = dirpath.strip("/") ++ pargfid = get_gfid_from_mnt(dirpath) ++ dir_entry = os.path.join(pfx, pargfid, basename) ++ return dir_entry + + return None + +diff --git a/tests/00-geo-rep/00-georep-verify-non-root-setup.t b/tests/00-geo-rep/00-georep-verify-non-root-setup.t +index e753c1f..c9fd8b2 100644 +--- a/tests/00-geo-rep/00-georep-verify-non-root-setup.t ++++ b/tests/00-geo-rep/00-georep-verify-non-root-setup.t +@@ -118,8 +118,8 @@ clean_lock_files + TEST /usr/sbin/groupadd $grp + + clean_lock_files +-##Create non-root user and assign it to newly created group +- ++##Del if exists and create non-root user and assign it to newly created group ++userdel -r -f $usr + TEST /usr/sbin/useradd -G $grp $usr + + ##Modify password for non-root user to have control over distributing ssh-key +@@ -140,8 +140,6 @@ TEST killall_gluster; + TEST glusterd; + TEST pidof glusterd; + +- +- + ##Create, start and mount meta_volume + TEST $CLI volume create $META_VOL replica 3 $H0:$B0/${META_VOL}{1,2,3}; + TEST $CLI volume start $META_VOL +@@ -225,6 +223,26 @@ TEST $GEOREP_CLI $master $slave_url resume + #Validate failure of volume stop when geo-rep is running + TEST ! $CLI volume stop $GMV0 + ++#Hybrid directory rename test BZ#1763439 ++TEST $GEOREP_CLI $master $slave_url config change_detector xsync ++mkdir ${master_mnt}/dir1 ++mkdir ${master_mnt}/dir1/dir2 ++mkdir ${master_mnt}/dir1/dir3 ++mkdir ${master_mnt}/hybrid_d1 ++ ++EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/hybrid_d1 ++EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/dir1 ++EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/dir1/dir2 ++EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/dir1/dir3 ++ ++mv ${master_mnt}/hybrid_d1 ${master_mnt}/hybrid_rn_d1 ++mv ${master_mnt}/dir1/dir2 ${master_mnt}/rn_dir2 ++mv ${master_mnt}/dir1/dir3 ${master_mnt}/dir1/rn_dir3 ++ ++EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/hybrid_rn_d1 ++EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/rn_dir2 ++EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/dir1/rn_dir3 ++ + #Stop Geo-rep + TEST $GEOREP_CLI $master $slave_url stop + +@@ -232,8 +250,8 @@ TEST $GEOREP_CLI $master $slave_url stop + TEST $GEOREP_CLI $master $slave_url delete + + #Cleanup authorized_keys +-sed -i '/^command=.*SSH_ORIGINAL_COMMAND#.*/d' ~/.ssh/authorized_keys +-sed -i '/^command=.*gsyncd.*/d' ~/.ssh/authorized_keys ++sed -i '/^command=.*SSH_ORIGINAL_COMMAND#.*/d' /home/$usr/.ssh/authorized_keys ++sed -i '/^command=.*gsyncd.*/d' /home/$usr/.ssh/authorized_keys + + #clear mountbroker + gluster-mountbroker remove --user $usr +-- +1.8.3.1 + diff --git a/SOURCES/0312-Scripts-quota_fsck-script-KeyError-contri_size.patch b/SOURCES/0312-Scripts-quota_fsck-script-KeyError-contri_size.patch new file mode 100644 index 0000000..bf8c820 --- /dev/null +++ b/SOURCES/0312-Scripts-quota_fsck-script-KeyError-contri_size.patch @@ -0,0 +1,59 @@ +From b1d8a5ee8b2e320aaaf9b2a145fbc285178d07bb Mon Sep 17 00:00:00 2001 +From: hari gowtham +Date: Tue, 22 Oct 2019 15:11:03 +0530 +Subject: [PATCH 312/313] Scripts: quota_fsck script KeyError: 'contri_size' + + back-port of: https://review.gluster.org/#/c/glusterfs/+/23586/ + +Problem: In a certain code flow, we weren't handling the +unavailability of the contri value in the dict. Trying to print +without the value resulted in erroring out. + +Fix: Have printed the whole of dictionary as the values will be +helpful in understanding the state of the file/dir + +>Fixes: bz#1764129 +>Change-Id: I99c538adb712f281ca10e4e0088f404f515b9725 +>Signed-off-by: hari gowtham + +BUG: 1719171 +Change-Id: I99c538adb712f281ca10e4e0088f404f515b9725 +Signed-off-by: hari gowtham +Reviewed-on: https://code.engineering.redhat.com/gerrit/183720 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + extras/quota/quota_fsck.py | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +diff --git a/extras/quota/quota_fsck.py b/extras/quota/quota_fsck.py +index f03895d..485a37a 100755 +--- a/extras/quota/quota_fsck.py ++++ b/extras/quota/quota_fsck.py +@@ -52,17 +52,17 @@ epilog_msg=''' + + def print_msg(log_type, path, xattr_dict = {}, stbuf = "", dir_size = None): + if log_type == QUOTA_VERBOSE: +- print('%-24s %-60s\nxattr_values: %s\n%s\n' % {"Verbose", path, xattr_dict, stbuf}) ++ print('%-24s %-60s\nxattr_values: %s\n%s\n' % ("Verbose", path, xattr_dict, stbuf)) + elif log_type == QUOTA_META_ABSENT: +- print('%-24s %-60s\n%s\n' % {"Quota-Meta Absent", path, xattr_dict}) ++ print('%-24s %-60s\n%s\n' % ("Quota-Meta Absent", path, xattr_dict)) + elif log_type == QUOTA_SIZE_MISMATCH: + print("mismatch") + if dir_size is not None: +- print('%24s %60s %12s %12s' % {"Size Mismatch", path, xattr_dict['contri_size'], +- dir_size}) ++ print('%24s %60s %12s %12s' % ("Size Mismatch", path, ++ xattr_dict, dir_size)) + else: +- print('%-24s %-60s %-12i %-12i' % {"Size Mismatch", path, xattr_dict['contri_size'], +- stbuf.st_size}) ++ print('%-24s %-60s %-12i %-12i' % ("Size Mismatch", path, xattr_dict, ++ stbuf.st_size)) + + def size_differs_lot(s1, s2): + ''' +-- +1.8.3.1 + diff --git a/SOURCES/0313-extras-Cgroup-CPU-Mem-restriction-are-not-working-on.patch b/SOURCES/0313-extras-Cgroup-CPU-Mem-restriction-are-not-working-on.patch new file mode 100644 index 0000000..e4887b8 --- /dev/null +++ b/SOURCES/0313-extras-Cgroup-CPU-Mem-restriction-are-not-working-on.patch @@ -0,0 +1,60 @@ +From 23091d24d34102c7938ae2890930b73c89c5a8e7 Mon Sep 17 00:00:00 2001 +From: Mohit Agrawal +Date: Tue, 22 Oct 2019 18:52:25 +0530 +Subject: [PATCH 313/313] extras: Cgroup(CPU/Mem) restriction are not working + on gluster process + +Problem: After Configure the Cgroup(CPU/MEM) limit to a gluster processes + resource(CPU/MEM) limits are not applicable to the gluster + processes.Cgroup limits are not applicable because all threads are + not moved into a newly created cgroup to apply restriction. + +Solution: To move a gluster thread to newly created cgroup change the + condition in script + +> Change-Id: I8ad81c69200e4ec43a74f6052481551cf835354c +> Fixes: bz#1764208 +> (Cherry pick from commit 38de02012948013a88597545cf49380ce97f6fa7) +> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/23599/) +> Signed-off-by: Mohit Agrawal + +Change-Id: I8ad81c69200e4ec43a74f6052481551cf835354c +BUG: 1764202 +Signed-off-by: Mohit Agrawal +Reviewed-on: https://code.engineering.redhat.com/gerrit/183730 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + extras/control-cpu-load.sh | 2 +- + extras/control-mem.sh | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/extras/control-cpu-load.sh b/extras/control-cpu-load.sh +index b739c82..52dcf62 100755 +--- a/extras/control-cpu-load.sh ++++ b/extras/control-cpu-load.sh +@@ -104,7 +104,7 @@ echo "Setting $quota_value to cpu.cfs_quota_us for gluster_cgroup." + echo ${quota_value} > ${LOC}/${cgroup_name}/cpu.cfs_quota_us + + if ps -T -p ${daemon_pid} | grep gluster > /dev/null; then +- for thid in `ps -T -p ${daemon_pid} | grep gluster | awk -F " " '{print $2}'`; ++ for thid in `ps -T -p ${daemon_pid} | grep -v SPID | awk -F " " '{print $2}'`; + do + echo ${thid} > ${LOC}/${cgroup_name}/tasks ; + done +diff --git a/extras/control-mem.sh b/extras/control-mem.sh +index 38aa2a0..91b36f8 100755 +--- a/extras/control-mem.sh ++++ b/extras/control-mem.sh +@@ -116,7 +116,7 @@ else + fi + + if ps -T -p ${daemon_pid} | grep gluster > /dev/null; then +- for thid in `ps -T -p ${daemon_pid} | grep gluster | awk -F " " '{print $2}'`; ++ for thid in `ps -T -p ${daemon_pid} | grep -v SPID | awk -F " " '{print $2}'`; + do + echo ${thid} > ${LOC}/${cgroup_name}/tasks ; + done +-- +1.8.3.1 + diff --git a/SPECS/glusterfs.spec b/SPECS/glusterfs.spec index 6516e97..5f03864 100644 --- a/SPECS/glusterfs.spec +++ b/SPECS/glusterfs.spec @@ -231,7 +231,7 @@ Release: 0.1%{?prereltag:.%{prereltag}}%{?dist} %else Name: glusterfs Version: 6.0 -Release: 6%{?dist} +Release: 20%{?dist} ExcludeArch: i686 %endif License: GPLv2 or LGPLv3+ @@ -303,6 +303,9 @@ Obsoletes: %{name}-ufo %if ( 0%{!?_with_gnfs:1} ) Obsoletes: %{name}-gnfs %endif +%if ( 0%{?rhel} < 7 ) +Obsoletes: %{name}-ganesha +%endif Provides: %{name}-common = %{version}-%{release} Provides: %{name}-core = %{version}-%{release} @@ -498,6 +501,127 @@ Patch0189: 0189-features-shard-Fix-block-count-accounting-upon-trunc.patch Patch0190: 0190-Build-removing-the-hardcoded-usage-of-python3.patch Patch0191: 0191-Build-Update-python-shebangs-based-on-version.patch Patch0192: 0192-build-Ensure-gluster-cli-package-is-built-as-part-of.patch +Patch0193: 0193-spec-fixed-python-dependency-for-rhel6.patch +Patch0194: 0194-stack-Make-sure-to-have-unique-call-stacks-in-all-ca.patch +Patch0195: 0195-build-package-glusterfs-ganesha-for-rhel7-and-above.patch +Patch0196: 0196-posix-ctime-Fix-ctime-upgrade-issue.patch +Patch0197: 0197-posix-fix-crash-in-posix_cs_set_state.patch +Patch0198: 0198-cluster-ec-Prevent-double-pre-op-xattrops.patch +Patch0199: 0199-upcall-Avoid-sending-notifications-for-invalid-inode.patch +Patch0200: 0200-gfapi-fix-incorrect-initialization-of-upcall-syncop-.patch +Patch0201: 0201-geo-rep-Fix-permissions-for-GEOREP_DIR-in-non-root-s.patch +Patch0202: 0202-shd-mux-Fix-race-between-mux_proc-unlink-and-stop.patch +Patch0203: 0203-glusterd-shd-Change-shd-logfile-to-a-unique-name.patch +Patch0204: 0204-glusterd-conditionally-clear-txn_opinfo-in-stage-op.patch +Patch0205: 0205-glusterd-Can-t-run-rebalance-due-to-long-unix-socket.patch +Patch0206: 0206-glusterd-ignore-user.-options-from-compatibility-che.patch +Patch0207: 0207-glusterd-fix-use-after-free-of-a-dict_t.patch +Patch0208: 0208-mem-pool-remove-dead-code.patch +Patch0209: 0209-core-avoid-dynamic-TLS-allocation-when-possible.patch +Patch0210: 0210-mem-pool.-c-h-minor-changes.patch +Patch0211: 0211-libglusterfs-Fix-compilation-when-disable-mempool-is.patch +Patch0212: 0212-core-fix-memory-allocation-issues.patch +Patch0213: 0213-cluster-dht-Strip-out-dht-xattrs.patch +Patch0214: 0214-geo-rep-Upgrading-config-file-to-new-version.patch +Patch0215: 0215-posix-modify-storage.reserve-option-to-take-size-and.patch +Patch0216: 0216-Test-case-fixe-for-downstream-3.5.0.patch +Patch0217: 0217-uss-Fix-tar-issue-with-ctime-and-uss-enabled.patch +Patch0218: 0218-graph-shd-Use-glusterfs_graph_deactivate-to-free-the.patch +Patch0219: 0219-posix-add-posix_set_ctime-in-posix_ftruncate.patch +Patch0220: 0220-graph-shd-Use-top-down-approach-while-cleaning-xlato.patch +Patch0221: 0221-protocol-client-propagte-GF_EVENT_CHILD_PING-only-fo.patch +Patch0222: 0222-cluster-dht-Fixed-a-memleak-in-dht_rename_cbk.patch +Patch0223: 0223-change-get_real_filename-implementation-to-use-ENOAT.patch +Patch0224: 0224-core-replace-inet_addr-with-inet_pton.patch +Patch0225: 0225-tests-utils-Fix-py2-py3-util-python-scripts.patch +Patch0226: 0226-geo-rep-fix-gluster-command-path-for-non-root-sessio.patch +Patch0227: 0227-glusterd-svc-update-pid-of-mux-volumes-from-the-shd-.patch +Patch0228: 0228-locks-enable-notify-contention-by-default.patch +Patch0229: 0229-glusterd-Show-the-correct-brick-status-in-get-state.patch +Patch0230: 0230-Revert-glusterd-svc-update-pid-of-mux-volumes-from-t.patch +Patch0231: 0231-Revert-graph-shd-Use-top-down-approach-while-cleanin.patch +Patch0232: 0232-cluster-afr-Fix-incorrect-reporting-of-gfid-type-mis.patch +Patch0233: 0233-Revert-graph-shd-Use-glusterfs_graph_deactivate-to-f.patch +Patch0234: 0234-Revert-glusterd-shd-Change-shd-logfile-to-a-unique-n.patch +Patch0235: 0235-Revert-glusterd-svc-Stop-stale-process-using-the-glu.patch +Patch0236: 0236-Revert-shd-mux-Fix-race-between-mux_proc-unlink-and-.patch +Patch0237: 0237-Revert-ec-fini-Fix-race-between-xlator-cleanup-and-o.patch +Patch0238: 0238-Revert-xlator-log-Add-more-logging-in-xlator_is_clea.patch +Patch0239: 0239-Revert-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch +Patch0240: 0240-Revert-glusterd-shd-Optimize-the-glustershd-manager-.patch +Patch0241: 0241-Revert-glusterd-svc-glusterd_svcs_stop-should-call-i.patch +Patch0242: 0242-Revert-tests-shd-Add-test-coverage-for-shd-mux.patch +Patch0243: 0243-Revert-glusterfsd-cleanup-Protect-graph-object-under.patch +Patch0244: 0244-Revert-ec-shd-Cleanup-self-heal-daemon-resources-dur.patch +Patch0245: 0245-Revert-shd-glusterd-Serialize-shd-manager-to-prevent.patch +Patch0246: 0246-Revert-glusterd-shd-Keep-a-ref-on-volinfo-until-atta.patch +Patch0247: 0247-Revert-afr-shd-Cleanup-self-heal-daemon-resources-du.patch +Patch0248: 0248-Revert-shd-mux-Fix-coverity-issues-introduced-by-shd.patch +Patch0249: 0249-Revert-client-fini-return-fini-after-rpc-cleanup.patch +Patch0250: 0250-Revert-mgmt-shd-Implement-multiplexing-in-self-heal-.patch +Patch0251: 0251-tests-Fix-bug-1717819-metadata-split-brain-detection.patch +Patch0252: 0252-glusterd-do-not-mark-skip_locking-as-true-for-geo-re.patch +Patch0253: 0253-core-fix-deadlock-between-statedump-and-fd_anonymous.patch +Patch0254: 0254-Detach-iot_worker-to-release-its-resources.patch +Patch0255: 0255-Revert-tier-shd-glusterd-with-shd-mux-the-shd-volfil.patch +Patch0256: 0256-features-snapview-server-use-the-same-volfile-server.patch +Patch0257: 0257-geo-rep-Test-case-for-upgrading-config-file.patch +Patch0258: 0258-geo-rep-Fix-mount-broker-setup-issue.patch +Patch0259: 0259-gluster-block-tuning-perf-options.patch +Patch0260: 0260-ctime-Set-mdata-xattr-on-legacy-files.patch +Patch0261: 0261-features-utime-Fix-mem_put-crash.patch +Patch0262: 0262-glusterd-ctime-Disable-ctime-by-default.patch +Patch0263: 0263-tests-fix-ctime-related-tests.patch +Patch0264: 0264-gfapi-Fix-deadlock-while-processing-upcall.patch +Patch0265: 0265-fuse-add-missing-GF_FREE-to-fuse_interrupt.patch +Patch0266: 0266-geo-rep-Fix-mount-broker-setup-issue.patch +Patch0267: 0267-posix-ctime-Fix-race-during-lookup-ctime-xattr-heal.patch +Patch0268: 0268-rpc-transport-have-default-listen-port.patch +Patch0269: 0269-ec-fix-truncate-lock-to-cover-the-write-in-tuncate-c.patch +Patch0270: 0270-cluster-ec-inherit-healing-from-lock-when-it-has-inf.patch +Patch0271: 0271-cluster-ec-fix-EIO-error-for-concurrent-writes-on-sp.patch +Patch0272: 0272-cluster-ec-Always-read-from-good-mask.patch +Patch0273: 0273-cluster-ec-Fix-reopen-flags-to-avoid-misbehavior.patch +Patch0274: 0274-cluster-ec-Update-lock-good_mask-on-parent-fop-failu.patch +Patch0275: 0275-cluster-ec-Create-heal-task-with-heal-process-id.patch +Patch0276: 0276-features-utime-always-update-ctime-at-setattr.patch +Patch0277: 0277-geo-rep-Fix-Config-Get-Race.patch +Patch0278: 0278-geo-rep-Fix-worker-connection-issue.patch +Patch0279: 0279-posix-In-brick_mux-brick-is-crashed-while-start-stop.patch +Patch0280: 0280-performance-md-cache-Do-not-skip-caching-of-null-cha.patch +Patch0281: 0281-ctime-Fix-incorrect-realtime-passed-to-frame-root-ct.patch +Patch0282: 0282-geo-rep-Fix-the-name-of-changelog-archive-file.patch +Patch0283: 0283-ctime-Fix-ctime-issue-with-utime-family-of-syscalls.patch +Patch0284: 0284-posix-log-aio_error-return-codes-in-posix_fs_health_.patch +Patch0285: 0285-glusterd-glusterd-service-is-getting-timed-out-on-sc.patch +Patch0286: 0286-glusterfs.spec.in-added-script-files-for-machine-com.patch +Patch0287: 0287-cluster-ec-Fail-fsync-flush-for-files-on-update-size.patch +Patch0288: 0288-cluster-ec-Fix-coverity-issues.patch +Patch0289: 0289-cluster-ec-quorum-count-implementation.patch +Patch0290: 0290-glusterd-tag-disperse.quorum-count-for-31306.patch +Patch0291: 0291-cluster-ec-Mark-release-only-when-it-is-acquired.patch +Patch0292: 0292-rpc-Update-address-family-if-it-is-not-provide-in-cm.patch +Patch0293: 0293-glusterd-IPV6-hostname-address-is-not-parsed-correct.patch +Patch0294: 0294-eventsapi-Set-IPv4-IPv6-family-based-on-input-IP.patch +Patch0295: 0295-ctime-rebalance-Heal-ctime-xattr-on-directory-during.patch +Patch0296: 0296-glusterfind-pre-command-failure-on-a-modify.patch +Patch0297: 0297-rpmbuild-fixing-the-build-errors-with-2a905a8ae.patch +Patch0298: 0298-geo-rep-fix-sub-command-during-worker-connection.patch +Patch0299: 0299-geo-rep-performance-improvement-while-syncing-rename.patch +Patch0300: 0300-cli-remove-the-warning-displayed-when-remove-brick-s.patch +Patch0301: 0301-posix-Brick-is-going-down-unexpectedly.patch +Patch0302: 0302-cluster-ec-prevent-filling-shd-log-with-table-not-fo.patch +Patch0303: 0303-posix-heketidbstorage-bricks-go-down-during-PVC-crea.patch +Patch0304: 0304-cluster-dht-Correct-fd-processing-loop.patch +Patch0305: 0305-glusterd-rebalance-start-should-fail-when-quorum-is-.patch +Patch0306: 0306-cli-fix-distCount-value.patch +Patch0307: 0307-ssl-fix-RHEL8-regression-failure.patch +Patch0308: 0308-dht-Rebalance-causing-IO-Error-File-descriptor-in-ba.patch +Patch0309: 0309-geo-rep-Fix-config-upgrade-on-non-participating-node.patch +Patch0310: 0310-tests-test-case-for-non-root-geo-rep-setup.patch +Patch0311: 0311-geo-rep-Fix-Permission-denied-traceback-on-non-root-.patch +Patch0312: 0312-Scripts-quota_fsck-script-KeyError-contri_size.patch +Patch0313: 0313-extras-Cgroup-CPU-Mem-restriction-are-not-working-on.patch %description GlusterFS is a distributed file-system capable of scaling to several @@ -646,7 +770,7 @@ is in user space and easily manageable. This package provides support to FUSE based clients and inlcudes the glusterfs(d) binary. -%if ( 0%{!?_without_server:1} ) +%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 ) %package ganesha Summary: NFS-Ganesha configuration Group: Applications/File @@ -694,7 +818,11 @@ Summary: GlusterFS Geo-replication Requires: %{name}%{?_isa} = %{version}-%{release} Requires: %{name}-server%{?_isa} = %{version}-%{release} Requires: python%{_pythonver} +%if ( 0%{?rhel} && 0%{?rhel} < 7 ) +Requires: python-prettytable +%else Requires: python%{_pythonver}-prettytable +%endif Requires: python%{_pythonver}-gluster = %{version}-%{release} Requires: rsync @@ -1118,7 +1246,7 @@ install -D -p -m 0644 extras/glusterfs-logrotate \ %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs # ganesha ghosts -%if ( 0%{!?_without_server:1} ) +%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 ) mkdir -p %{buildroot}%{_sysconfdir}/ganesha touch %{buildroot}%{_sysconfdir}/ganesha/ganesha-ha.conf mkdir -p %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ @@ -1367,6 +1495,9 @@ exit 0 %{_datadir}/glusterfs/scripts/post-upgrade-script-for-quota.sh %{_datadir}/glusterfs/scripts/pre-upgrade-script-for-quota.sh %endif +%{_datadir}/glusterfs/scripts/identify-hangs.sh +%{_datadir}/glusterfs/scripts/collect-system-stats.sh +%{_datadir}/glusterfs/scripts/log_accounting.sh # xlators that are needed on the client- and on the server-side %dir %{_libdir}/glusterfs %dir %{_libdir}/glusterfs/%{version}%{?prereltag} @@ -1428,11 +1559,14 @@ exit 0 %endif %endif -%if ( 0%{?_without_server:1} ) -#exclude ganesha related files +%if ( 0%{?_without_server:1} || 0%{?rhel} < 7 ) +#exclude ganesha related files for rhel 6 and client builds %exclude %{_sysconfdir}/ganesha/ganesha-ha.conf.sample %exclude %{_libexecdir}/ganesha/* %exclude %{_prefix}/lib/ocf/resource.d/heartbeat/* +%if ( 0%{!?_without_server:1} ) +%{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh +%endif %endif %exclude %{_datadir}/glusterfs/scripts/setup-thin-arbiter.sh @@ -1587,7 +1721,7 @@ exit 0 %exclude %{_datadir}/glusterfs/tests/vagrant %endif -%if ( 0%{!?_without_server:1} ) +%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 ) %files ganesha %dir %{_libexecdir}/ganesha %{_sysconfdir}/ganesha/ganesha-ha.conf.sample @@ -1609,6 +1743,8 @@ exit 0 %if ( 0%{!?_without_server:1} ) %files server %doc extras/clear_xattrs.sh +%{_datadir}/glusterfs/scripts/xattr_analysis.py* +%{_datadir}/glusterfs/scripts/quota_fsck.py* # sysconf %config(noreplace) %{_sysconfdir}/glusterfs %exclude %{_sysconfdir}/glusterfs/thin-arbiter.vol @@ -2199,8 +2335,54 @@ fi %endif %changelog -* Fri Aug 02 2019 CentOS Sources - 6.0-6.el8.centos -- remove vendor and/or packager lines +* Wed Oct 23 2019 Rinku Kothiya - 6.0-20 +- fixes bugs bz#1719171 bz#1763412 bz#1764202 + +* Thu Oct 17 2019 Rinku Kothiya - 6.0-19 +- fixes bugs bz#1760939 + +* Wed Oct 16 2019 Rinku Kothiya - 6.0-18 +- fixes bugs bz#1758432 + +* Fri Oct 11 2019 Rinku Kothiya - 6.0-17 +- fixes bugs bz#1704562 bz#1758618 bz#1760261 + +* Wed Oct 09 2019 Rinku Kothiya - 6.0-16 +- fixes bugs bz#1752713 bz#1756325 + +* Fri Sep 27 2019 Rinku Kothiya - 6.0-15 +- fixes bugs bz#1726000 bz#1731826 bz#1754407 bz#1754790 bz#1755227 + +* Fri Sep 20 2019 Sunil Kumar Acharya - 6.0-14 +- fixes bugs bz#1719171 bz#1728673 bz#1731896 bz#1732443 bz#1733970 + bz#1745107 bz#1746027 bz#1748688 bz#1750241 bz#1572163 + +* Fri Aug 23 2019 Rinku Kothiya - 6.0-13 +- fixes bugs bz#1729915 bz#1732376 bz#1743611 bz#1743627 bz#1743634 bz#1744518 + +* Fri Aug 09 2019 Sunil Kumar Acharya - 6.0-12 +- fixes bugs bz#1730914 bz#1731448 bz#1732770 bz#1732792 bz#1733531 + bz#1734305 bz#1734534 bz#1734734 bz#1735514 bz#1737705 bz#1732774 + bz#1732793 + +* Tue Aug 06 2019 Sunil Kumar Acharya - 6.0-11 +- fixes bugs bz#1733520 bz#1734423 + +* Fri Aug 02 2019 Sunil Kumar Acharya - 6.0-10 +- fixes bugs bz#1713890 + +* Tue Jul 23 2019 Sunil Kumar Acharya - 6.0-9 +- fixes bugs bz#1708064 bz#1708180 bz#1715422 bz#1720992 bz#1722757 + +* Tue Jul 16 2019 Sunil Kumar Acharya - 6.0-8 +- fixes bugs bz#1698435 bz#1712591 bz#1715447 bz#1720488 bz#1722209 + bz#1722512 bz#1724089 bz#1726991 bz#1727785 bz#1729108 + +* Fri Jun 28 2019 Sunil Kumar Acharya - 6.0-7 +- fixes bugs bz#1573077 bz#1600918 bz#1703423 bz#1704207 bz#1708064 + bz#1709301 bz#1713664 bz#1716760 bz#1717784 bz#1720163 bz#1720192 + bz#1720551 bz#1721351 bz#1721357 bz#1721477 bz#1722131 bz#1722331 + bz#1722509 bz#1722801 bz#1720248 * Fri Jun 14 2019 Sunil Kumar Acharya - 6.0-6 - fixes bugs bz#1668001 bz#1708043 bz#1708183 bz#1710701