21ab4e
From b609a9c72d335a0c32a1cec8fd9a0400745228ea Mon Sep 17 00:00:00 2001
21ab4e
From: Poornima G <pgurusid@redhat.com>
21ab4e
Date: Mon, 9 Jan 2017 09:55:26 +0530
21ab4e
Subject: [PATCH 357/361] readdir-ahead : Perform STACK_UNWIND outside of mutex
21ab4e
 locks
21ab4e
21ab4e
Currently STACK_UNWIND is performnd within ctx->lock.
21ab4e
If readdir-ahead is loaded as a child of dht, then there
21ab4e
can be scenarios where the function calling STACK_UNWIND
21ab4e
becomes re-entrant. Its a good practice to not call
21ab4e
STACK_WIND/UNWIND within local mutex's
21ab4e
21ab4e
mainline:
21ab4e
> BUG: 1401812
21ab4e
> Reviewed-on: http://review.gluster.org/16068
21ab4e
> Smoke: Gluster Build System <jenkins@build.gluster.org>
21ab4e
> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
21ab4e
> CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
21ab4e
> Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
21ab4e
> Reviewed-by: Raghavendra G <rgowdapp@redhat.com>
21ab4e
(cherry picked from commit c89a065af2adc11d5aca3a4500d2e8c1ea02ed28)
21ab4e
21ab4e
BUG: 1427096
21ab4e
Change-Id: If4e869849d99ce233014a8aad7c4d5eef8dc2e98
21ab4e
Signed-off-by: Poornima G <pgurusid@redhat.com>
21ab4e
Reviewed-on: https://code.engineering.redhat.com/gerrit/101417
21ab4e
Tested-by: Milind Changire <mchangir@redhat.com>
21ab4e
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
21ab4e
---
21ab4e
 tests/basic/afr/client-side-heal.t                 |   0
21ab4e
 .../performance/readdir-ahead/src/readdir-ahead.c  | 115 ++++++++++++---------
21ab4e
 2 files changed, 67 insertions(+), 48 deletions(-)
21ab4e
 mode change 100644 => 100755 tests/basic/afr/client-side-heal.t
21ab4e
21ab4e
diff --git a/tests/basic/afr/client-side-heal.t b/tests/basic/afr/client-side-heal.t
21ab4e
old mode 100644
21ab4e
new mode 100755
21ab4e
diff --git a/xlators/performance/readdir-ahead/src/readdir-ahead.c b/xlators/performance/readdir-ahead/src/readdir-ahead.c
21ab4e
index 38507a1..dcbab53 100644
21ab4e
--- a/xlators/performance/readdir-ahead/src/readdir-ahead.c
21ab4e
+++ b/xlators/performance/readdir-ahead/src/readdir-ahead.c
21ab4e
@@ -109,8 +109,8 @@ rda_can_serve_readdirp(struct rda_fd_ctx *ctx, size_t request_size)
21ab4e
  * buffer. ctx must be locked.
21ab4e
  */
21ab4e
 static int32_t
21ab4e
-__rda_serve_readdirp(xlator_t *this, gf_dirent_t *entries, size_t request_size,
21ab4e
-		   struct rda_fd_ctx *ctx)
21ab4e
+__rda_fill_readdirp (xlator_t *this, gf_dirent_t *entries, size_t request_size,
21ab4e
+		     struct rda_fd_ctx *ctx)
21ab4e
 {
21ab4e
 	gf_dirent_t     *dirent, *tmp;
21ab4e
 	size_t           dirent_size, size = 0, inodectx_size = 0;
21ab4e
@@ -146,48 +146,42 @@ __rda_serve_readdirp(xlator_t *this, gf_dirent_t *entries, size_t request_size,
21ab4e
 }
21ab4e
 
21ab4e
 static int32_t
21ab4e
-rda_readdirp_stub(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
21ab4e
-		  off_t offset, dict_t *xdata)
21ab4e
+__rda_serve_readdirp (xlator_t *this, struct rda_fd_ctx *ctx, size_t size,
21ab4e
+                      gf_dirent_t *entries, int *op_errno)
21ab4e
 {
21ab4e
-	gf_dirent_t entries;
21ab4e
-	int32_t ret;
21ab4e
-	struct rda_fd_ctx *ctx;
21ab4e
-        int op_errno = 0;
21ab4e
-
21ab4e
-	ctx = get_rda_fd_ctx(fd, this);
21ab4e
-	INIT_LIST_HEAD(&entries.list);
21ab4e
-	ret = __rda_serve_readdirp(this, &entries, size, ctx);
21ab4e
+        int32_t      ret     = 0;
21ab4e
 
21ab4e
-	if (!ret && (ctx->state & RDA_FD_ERROR)) {
21ab4e
-		ret = -1;
21ab4e
-		ctx->state &= ~RDA_FD_ERROR;
21ab4e
+        ret = __rda_fill_readdirp (this, entries, size, ctx);
21ab4e
 
21ab4e
-		/*
21ab4e
-		 * the preload has stopped running in the event of an error, so
21ab4e
-		 * pass all future requests along
21ab4e
-		 */
21ab4e
-		ctx->state |= RDA_FD_BYPASS;
21ab4e
-	}
21ab4e
+        if (!ret && (ctx->state & RDA_FD_ERROR)) {
21ab4e
+                ret = -1;
21ab4e
+                ctx->state &= ~RDA_FD_ERROR;
21ab4e
 
21ab4e
+                /*
21ab4e
+                 * the preload has stopped running in the event of an error, so
21ab4e
+                 * pass all future requests along
21ab4e
+                 */
21ab4e
+                ctx->state |= RDA_FD_BYPASS;
21ab4e
+        }
21ab4e
         /*
21ab4e
          * Use the op_errno sent by lower layers as xlators above will check
21ab4e
          * the op_errno for identifying whether readdir is completed or not.
21ab4e
          */
21ab4e
-        op_errno = ctx->op_errno;
21ab4e
-
21ab4e
-	STACK_UNWIND_STRICT(readdirp, frame, ret, op_errno, &entries, xdata);
21ab4e
-	gf_dirent_free(&entries);
21ab4e
+        *op_errno = ctx->op_errno;
21ab4e
 
21ab4e
-	return 0;
21ab4e
+        return ret;
21ab4e
 }
21ab4e
 
21ab4e
 static int32_t
21ab4e
 rda_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
21ab4e
 	     off_t off, dict_t *xdata)
21ab4e
 {
21ab4e
-	struct rda_fd_ctx *ctx;
21ab4e
-	call_stub_t *stub;
21ab4e
-	int fill = 0;
21ab4e
+        struct rda_fd_ctx   *ctx      = NULL;
21ab4e
+        int                  fill     = 0;
21ab4e
+        gf_dirent_t          entries;
21ab4e
+        int                  ret      = 0;
21ab4e
+        int                  op_errno = 0;
21ab4e
+        gf_boolean_t         serve    = _gf_false;
21ab4e
 
21ab4e
 	ctx = get_rda_fd_ctx(fd, this);
21ab4e
 	if (!ctx)
21ab4e
@@ -196,6 +190,7 @@ rda_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
21ab4e
 	if (ctx->state & RDA_FD_BYPASS)
21ab4e
 		goto bypass;
21ab4e
 
21ab4e
+        INIT_LIST_HEAD (&entries.list);
21ab4e
 	LOCK(&ctx->lock);
21ab4e
 
21ab4e
 	/* recheck now that we have the lock */
21ab4e
@@ -232,21 +227,22 @@ rda_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
21ab4e
 		goto bypass;
21ab4e
 	}
21ab4e
 
21ab4e
-	stub = fop_readdirp_stub(frame, rda_readdirp_stub, fd, size, off, xdata);
21ab4e
-	if (!stub) {
21ab4e
-		UNLOCK(&ctx->lock);
21ab4e
-		goto err;
21ab4e
-	}
21ab4e
-
21ab4e
 	/*
21ab4e
 	 * If we haven't bypassed the preload, this means we can either serve
21ab4e
 	 * the request out of the preload or the request that enables us to do
21ab4e
 	 * so is in flight...
21ab4e
 	 */
21ab4e
-	if (rda_can_serve_readdirp(ctx, size)) {
21ab4e
-		call_resume(stub);
21ab4e
+	if (rda_can_serve_readdirp (ctx, size)) {
21ab4e
+                ret = __rda_serve_readdirp (this, ctx, size, &entries,
21ab4e
+                                            &op_errno);
21ab4e
+                serve = _gf_true;
21ab4e
         } else {
21ab4e
-		ctx->stub = stub;
21ab4e
+                ctx->stub = fop_readdirp_stub (frame, NULL, fd, size, off,
21ab4e
+                                               xdata);
21ab4e
+                if (!ctx->stub) {
21ab4e
+                        UNLOCK(&ctx->lock);
21ab4e
+                        goto err;
21ab4e
+                }
21ab4e
 
21ab4e
                 if (!(ctx->state & RDA_FD_RUNNING)) {
21ab4e
                         fill = 1;
21ab4e
@@ -256,6 +252,12 @@ rda_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
21ab4e
 
21ab4e
 	UNLOCK(&ctx->lock);
21ab4e
 
21ab4e
+        if (serve) {
21ab4e
+                STACK_UNWIND_STRICT (readdirp, frame, ret, op_errno, &entries,
21ab4e
+                                     xdata);
21ab4e
+                gf_dirent_free(&entries);
21ab4e
+        }
21ab4e
+
21ab4e
 	if (fill)
21ab4e
 		rda_fill_fd(frame, this, fd);
21ab4e
 
21ab4e
@@ -272,17 +274,24 @@ err:
21ab4e
 }
21ab4e
 
21ab4e
 static int32_t
21ab4e
-rda_fill_fd_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
21ab4e
+rda_fill_fd_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
21ab4e
 		 int32_t op_ret, int32_t op_errno, gf_dirent_t *entries,
21ab4e
 		 dict_t *xdata)
21ab4e
 {
21ab4e
-	gf_dirent_t *dirent, *tmp;
21ab4e
-	struct rda_local *local = frame->local;
21ab4e
-	struct rda_fd_ctx *ctx = local->ctx;
21ab4e
-	struct rda_priv *priv = this->private;
21ab4e
-	int fill = 1;
21ab4e
-        size_t inodectx_size = 0, dirent_size = 0;
21ab4e
-
21ab4e
+        gf_dirent_t       *dirent        = NULL;
21ab4e
+        gf_dirent_t       *tmp           = NULL;
21ab4e
+        gf_dirent_t        serve_entries;
21ab4e
+        struct rda_local  *local         = frame->local;
21ab4e
+        struct rda_fd_ctx *ctx           = local->ctx;
21ab4e
+        struct rda_priv   *priv          = this->private;
21ab4e
+        int                fill          = 1;
21ab4e
+        size_t             inodectx_size = 0;
21ab4e
+        size_t             dirent_size   = 0;
21ab4e
+        int                ret           = 0;
21ab4e
+        gf_boolean_t       serve         = _gf_false;
21ab4e
+        call_stub_t       *stub          = NULL;
21ab4e
+
21ab4e
+        INIT_LIST_HEAD (&serve_entries.list);
21ab4e
 	LOCK(&ctx->lock);
21ab4e
 
21ab4e
 	/* Verify that the preload buffer is still pending on this data. */
21ab4e
@@ -339,8 +348,11 @@ rda_fill_fd_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
21ab4e
 	 * is always based on ctx->cur_offset.
21ab4e
 	 */
21ab4e
 	if (ctx->stub &&
21ab4e
-	    rda_can_serve_readdirp(ctx, ctx->stub->args.size)) {
21ab4e
-		call_resume(ctx->stub);
21ab4e
+	    rda_can_serve_readdirp (ctx, ctx->stub->args.size)) {
21ab4e
+                ret = __rda_serve_readdirp (this, ctx, ctx->stub->args.size,
21ab4e
+                                            &serve_entries, &op_errno);
21ab4e
+                serve = _gf_true;
21ab4e
+                stub = ctx->stub;
21ab4e
 		ctx->stub = NULL;
21ab4e
 	}
21ab4e
 
21ab4e
@@ -370,6 +382,13 @@ out:
21ab4e
 
21ab4e
 	UNLOCK(&ctx->lock);
21ab4e
 
21ab4e
+        if (serve) {
21ab4e
+                STACK_UNWIND_STRICT (readdirp, stub->frame, ret, op_errno,
21ab4e
+                                     &serve_entries, xdata);
21ab4e
+                gf_dirent_free (&serve_entries);
21ab4e
+                call_stub_destroy (stub);
21ab4e
+        }
21ab4e
+
21ab4e
 	if (fill)
21ab4e
 		rda_fill_fd(frame, this, local->fd);
21ab4e
 
21ab4e
-- 
21ab4e
1.8.3.1
21ab4e