887953
From 5b57b686984498eee09ce1a5f27eef6a14e4387e Mon Sep 17 00:00:00 2001
887953
From: Mohit Agrawal <moagrawa@redhat.com>
887953
Date: Wed, 6 Mar 2019 12:55:56 +0530
887953
Subject: [PATCH 533/534] glusterd: glusterd memory leak while running "gluster
887953
 v profile" in a loop
887953
887953
Problem: glusterd has memory leak while running "gluster v profile"
887953
         in a loop
887953
887953
Solution: Resolve leak code path to avoid leak
887953
887953
> Change-Id: Id608703ff6d0ad34ed8f921a5d25544e24cfadcd
887953
> fixes: bz#1685414
887953
> Cherry pick from commit 9374484917466dff4688d96ff7faa0de1c804a6c
887953
> Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/22301/
887953
887953
Change-Id: I874a0e9947913c201c67b78aaaa982d1fae78b46
887953
BUG: 1684648
887953
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
887953
Reviewed-on: https://code.engineering.redhat.com/gerrit/164609
887953
Tested-by: RHGS Build Bot <nigelb@redhat.com>
887953
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
887953
---
887953
 xlators/mgmt/glusterd/src/glusterd-mgmt.c   | 5 +++--
887953
 xlators/mgmt/glusterd/src/glusterd-syncop.c | 4 +++-
887953
 2 files changed, 6 insertions(+), 3 deletions(-)
887953
887953
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
887953
index 275059c..e176288 100644
887953
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c
887953
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
887953
@@ -1380,8 +1380,9 @@ out:
887953
         if (rsp.op_errstr)
887953
                 free (rsp.op_errstr);
887953
 
887953
-        if (rsp.dict.dict_val)
887953
-                free (rsp.dict.dict_val);
887953
+        if (rsp_dict)
887953
+                dict_unref (rsp_dict);
887953
+
887953
         GF_FREE (peerid);
887953
         /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
887953
          * the caller function.
887953
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
887953
index 7baef64..3b7fa8b 100644
887953
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
887953
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
887953
@@ -1707,6 +1707,7 @@ gd_brick_op_phase (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
887953
                    char **op_errstr)
887953
 {
887953
         glusterd_pending_node_t *pending_node = NULL;
887953
+        glusterd_pending_node_t *tmp = NULL;
887953
         struct cds_list_head    selected = {0,};
887953
         xlator_t                *this = NULL;
887953
         int                     brick_count = 0;
887953
@@ -1742,7 +1743,7 @@ gd_brick_op_phase (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
887953
         rsp_dict = NULL;
887953
 
887953
         brick_count = 0;
887953
-        cds_list_for_each_entry (pending_node, &selected, list) {
887953
+        cds_list_for_each_entry_safe (pending_node, tmp, &selected, list) {
887953
                 rpc = glusterd_pending_node_get_rpc (pending_node);
887953
                 if (!rpc) {
887953
                         if (pending_node->type == GD_NODE_REBALANCE) {
887953
@@ -1792,6 +1793,7 @@ gd_brick_op_phase (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
887953
 
887953
                 brick_count++;
887953
                 glusterd_pending_node_put_rpc (pending_node);
887953
+                GF_FREE(pending_node);
887953
         }
887953
 
887953
         pending_node = NULL;
887953
-- 
887953
1.8.3.1
887953