From db15e8fe12b7148b2da975d915573cb24c4ee1c9 Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Thu, 22 Nov 2018 09:58:52 +0530
Subject: [PATCH 475/493] glusterd: perform store operation in cleanup lock
All glusterd store operation and cleanup thread should work under a
critical section to avoid any partial store write.
> Change-Id: I4f12e738f597a1f925c87ea2f42565dcf9ecdb9d
> Fixes: bz#1652430
> Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
upstream patch: https://review.gluster.org/#/c/glusterfs/+/21702/
Change-Id: I4f12e738f597a1f925c87ea2f42565dcf9ecdb9d
BUG: 1654161
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/158804
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
glusterfsd/src/glusterfsd.c | 73 ++++++++++++++++--------------
libglusterfs/src/glusterfs.h | 1 +
xlators/mgmt/glusterd/src/glusterd-store.c | 10 +++-
3 files changed, 49 insertions(+), 35 deletions(-)
diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
index 03bca24..57effbd 100644
--- a/glusterfsd/src/glusterfsd.c
+++ b/glusterfsd/src/glusterfsd.c
@@ -1395,43 +1395,46 @@ cleanup_and_exit (int signum)
if (ctx->cleanup_started)
return;
- ctx->cleanup_started = 1;
+ pthread_mutex_lock(&ctx->cleanup_lock);
+ {
+ ctx->cleanup_started = 1;
- /* signout should be sent to all the bricks in case brick mux is enabled
- * and multiple brick instances are attached to this process
- */
- if (ctx->active) {
- top = ctx->active->first;
- for (trav_p = &top->children; *trav_p;
- trav_p = &(*trav_p)->next) {
- victim = (*trav_p)->xlator;
- glusterfs_mgmt_pmap_signout (ctx, victim->name);
+ /* signout should be sent to all the bricks in case brick mux
+ * is enabled and multiple brick instances are attached to this
+ * process
+ */
+ if (ctx->active) {
+ top = ctx->active->first;
+ for (trav_p = &top->children; *trav_p;
+ trav_p = &(*trav_p)->next) {
+ victim = (*trav_p)->xlator;
+ glusterfs_mgmt_pmap_signout (ctx, victim->name);
+ }
+ } else {
+ glusterfs_mgmt_pmap_signout (ctx, NULL);
}
- } else {
- glusterfs_mgmt_pmap_signout (ctx, NULL);
- }
- /* below part is a racy code where the rpcsvc object is freed.
- * But in another thread (epoll thread), upon poll error in the
- * socket the transports are cleaned up where again rpcsvc object
- * is accessed (which is already freed by the below function).
- * Since the process is about to be killed dont execute the function
- * below.
- */
- /* if (ctx->listener) { */
- /* (void) glusterfs_listener_stop (ctx); */
- /* } */
+ /* below part is a racy code where the rpcsvc object is freed.
+ * But in another thread (epoll thread), upon poll error in the
+ * socket the transports are cleaned up where again rpcsvc object
+ * is accessed (which is already freed by the below function).
+ * Since the process is about to be killed dont execute the
+ * function below.
+ */
+ /* if (ctx->listener) { */
+ /* (void) glusterfs_listener_stop (ctx); */
+ /* } */
- /* Call fini() of FUSE xlator first:
- * so there are no more requests coming and
- * 'umount' of mount point is done properly */
- trav = ctx->master;
- if (trav && trav->fini) {
- THIS = trav;
- trav->fini (trav);
- }
+ /* Call fini() of FUSE xlator first:
+ * so there are no more requests coming and
+ * 'umount' of mount point is done properly */
+ trav = ctx->master;
+ if (trav && trav->fini) {
+ THIS = trav;
+ trav->fini (trav);
+ }
- glusterfs_pidfile_cleanup (ctx);
+ glusterfs_pidfile_cleanup (ctx);
#if 0
/* TODO: Properly do cleanup_and_exit(), with synchronization */
@@ -1442,8 +1445,9 @@ cleanup_and_exit (int signum)
}
#endif
- trav = NULL;
-
+ trav = NULL;
+ }
+ pthread_mutex_unlock(&ctx->cleanup_lock);
/* NOTE: Only the least significant 8 bits i.e (signum & 255)
will be available to parent process on calling exit() */
exit(abs(signum));
@@ -1598,6 +1602,7 @@ glusterfs_ctx_defaults_init (glusterfs_ctx_t *ctx)
goto out;
pthread_mutex_init (&ctx->notify_lock, NULL);
+ pthread_mutex_init(&ctx->cleanup_lock, NULL);
pthread_cond_init (&ctx->notify_cond, NULL);
ctx->clienttable = gf_clienttable_alloc();
diff --git a/libglusterfs/src/glusterfs.h b/libglusterfs/src/glusterfs.h
index d06d8cf..c12e94e 100644
--- a/libglusterfs/src/glusterfs.h
+++ b/libglusterfs/src/glusterfs.h
@@ -574,6 +574,7 @@ struct _glusterfs_ctx {
char btbuf[GF_BACKTRACE_LEN];
pthread_mutex_t notify_lock;
+ pthread_mutex_t cleanup_lock;
pthread_cond_t notify_cond;
int notifying;
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
index f276fef..b3c4d9a 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
@@ -1792,10 +1792,17 @@ out:
int32_t
glusterd_store_volinfo (glusterd_volinfo_t *volinfo, glusterd_volinfo_ver_ac_t ac)
{
- int32_t ret = -1;
+ int32_t ret = -1;
+ glusterfs_ctx_t *ctx = NULL;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT(this);
+ ctx = this->ctx;
+ GF_ASSERT(ctx);
GF_ASSERT (volinfo);
+ pthread_mutex_lock(&ctx->cleanup_lock);
pthread_mutex_lock(&volinfo->store_volinfo_lock);
{
glusterd_perform_volinfo_version_action(volinfo, ac);
@@ -1837,6 +1844,7 @@ glusterd_store_volinfo (glusterd_volinfo_t *volinfo, glusterd_volinfo_ver_ac_t a
}
unlock:
pthread_mutex_unlock(&volinfo->store_volinfo_lock);
+ pthread_mutex_unlock(&ctx->cleanup_lock);
if (ret)
glusterd_store_volume_cleanup_tmp(volinfo);
--
1.8.3.1