256ebe
From faaaa3452ceec6afcc18cffc9beca3fe19841cce Mon Sep 17 00:00:00 2001
256ebe
From: Mohammed Rafi KC <rkavunga@redhat.com>
256ebe
Date: Thu, 3 Jan 2019 17:44:18 +0530
256ebe
Subject: [PATCH 104/124] afr/shd: Cleanup self heal daemon resources during
256ebe
 afr fini
256ebe
256ebe
We were not properly cleaning self-heal daemon resources
256ebe
during afr fini. This patch will clean the same.
256ebe
256ebe
Backport of: https://review.gluster.org/#/c/glusterfs/+/22151/
256ebe
256ebe
>Change-Id: I597860be6f781b195449e695d871b8667a418d5a
256ebe
>updates: bz#1659708
256ebe
>Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
256ebe
256ebe
Change-Id: I7be981b9c2476c8cacadea6b14d74234f67b714f
256ebe
BUG: 1471742
256ebe
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
256ebe
Reviewed-on: https://code.engineering.redhat.com/gerrit/167845
256ebe
Tested-by: RHGS Build Bot <nigelb@redhat.com>
256ebe
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
256ebe
---
256ebe
 libglusterfs/src/syncop-utils.c          |  8 +++++
256ebe
 xlators/cluster/afr/src/afr-self-heald.c |  2 ++
256ebe
 xlators/cluster/afr/src/afr.c            | 57 ++++++++++++++++++++++++++++++++
256ebe
 3 files changed, 67 insertions(+)
256ebe
256ebe
diff --git a/libglusterfs/src/syncop-utils.c b/libglusterfs/src/syncop-utils.c
256ebe
index be03527..b842142 100644
256ebe
--- a/libglusterfs/src/syncop-utils.c
256ebe
+++ b/libglusterfs/src/syncop-utils.c
256ebe
@@ -350,6 +350,11 @@ syncop_mt_dir_scan(call_frame_t *frame, xlator_t *subvol, loc_t *loc, int pid,
256ebe
     gf_boolean_t cond_init = _gf_false;
256ebe
     gf_boolean_t mut_init = _gf_false;
256ebe
     gf_dirent_t entries;
256ebe
+    xlator_t *this = NULL;
256ebe
+
256ebe
+    if (frame) {
256ebe
+        this = frame->this;
256ebe
+    }
256ebe
 
256ebe
     /*For this functionality to be implemented in general, we need
256ebe
      * synccond_t infra which doesn't block the executing thread. Until then
256ebe
@@ -397,6 +402,9 @@ syncop_mt_dir_scan(call_frame_t *frame, xlator_t *subvol, loc_t *loc, int pid,
256ebe
 
256ebe
         list_for_each_entry_safe(entry, tmp, &entries.list, list)
256ebe
         {
256ebe
+            if (this && this->cleanup_starting)
256ebe
+                goto out;
256ebe
+
256ebe
             list_del_init(&entry->list);
256ebe
             if (!strcmp(entry->d_name, ".") || !strcmp(entry->d_name, "..")) {
256ebe
                 gf_dirent_entry_free(entry);
256ebe
diff --git a/xlators/cluster/afr/src/afr-self-heald.c b/xlators/cluster/afr/src/afr-self-heald.c
256ebe
index 7eb1207..8bc4720 100644
256ebe
--- a/xlators/cluster/afr/src/afr-self-heald.c
256ebe
+++ b/xlators/cluster/afr/src/afr-self-heald.c
256ebe
@@ -373,6 +373,7 @@ afr_shd_sweep_prepare(struct subvol_healer *healer)
256ebe
 
256ebe
     time(&event->start_time);
256ebe
     event->end_time = 0;
256ebe
+    _mask_cancellation();
256ebe
 }
256ebe
 
256ebe
 void
256ebe
@@ -394,6 +395,7 @@ afr_shd_sweep_done(struct subvol_healer *healer)
256ebe
 
256ebe
     if (eh_save_history(shd->statistics[healer->subvol], history) < 0)
256ebe
         GF_FREE(history);
256ebe
+    _unmask_cancellation();
256ebe
 }
256ebe
 
256ebe
 int
256ebe
diff --git a/xlators/cluster/afr/src/afr.c b/xlators/cluster/afr/src/afr.c
256ebe
index 33258a0..a0a7551 100644
256ebe
--- a/xlators/cluster/afr/src/afr.c
256ebe
+++ b/xlators/cluster/afr/src/afr.c
256ebe
@@ -611,13 +611,70 @@ init(xlator_t *this)
256ebe
 out:
256ebe
     return ret;
256ebe
 }
256ebe
+void
256ebe
+afr_destroy_healer_object(xlator_t *this, struct subvol_healer *healer)
256ebe
+{
256ebe
+    int ret = -1;
256ebe
+
256ebe
+    if (!healer)
256ebe
+        return;
256ebe
+
256ebe
+    if (healer->running) {
256ebe
+        /*
256ebe
+         * If there are any resources to cleanup, We need
256ebe
+         * to do that gracefully using pthread_cleanup_push
256ebe
+         */
256ebe
+        ret = gf_thread_cleanup_xint(healer->thread);
256ebe
+        if (ret)
256ebe
+            gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_SELF_HEAL_FAILED,
256ebe
+                   "Failed to clean up healer threads.");
256ebe
+        healer->thread = 0;
256ebe
+    }
256ebe
+    pthread_cond_destroy(&healer->cond);
256ebe
+    pthread_mutex_destroy(&healer->mutex);
256ebe
+}
256ebe
+
256ebe
+void
256ebe
+afr_selfheal_daemon_fini(xlator_t *this)
256ebe
+{
256ebe
+    struct subvol_healer *healer = NULL;
256ebe
+    afr_self_heald_t *shd = NULL;
256ebe
+    afr_private_t *priv = NULL;
256ebe
+    int i = 0;
256ebe
+
256ebe
+    priv = this->private;
256ebe
+    if (!priv)
256ebe
+        return;
256ebe
+
256ebe
+    shd = &priv->shd;
256ebe
+    if (!shd->iamshd)
256ebe
+        return;
256ebe
+
256ebe
+    for (i = 0; i < priv->child_count; i++) {
256ebe
+        healer = &shd->index_healers[i];
256ebe
+        afr_destroy_healer_object(this, healer);
256ebe
 
256ebe
+        healer = &shd->full_healers[i];
256ebe
+        afr_destroy_healer_object(this, healer);
256ebe
+
256ebe
+        if (shd->statistics[i])
256ebe
+            eh_destroy(shd->statistics[i]);
256ebe
+    }
256ebe
+    GF_FREE(shd->index_healers);
256ebe
+    GF_FREE(shd->full_healers);
256ebe
+    GF_FREE(shd->statistics);
256ebe
+    if (shd->split_brain)
256ebe
+        eh_destroy(shd->split_brain);
256ebe
+}
256ebe
 void
256ebe
 fini(xlator_t *this)
256ebe
 {
256ebe
     afr_private_t *priv = NULL;
256ebe
 
256ebe
     priv = this->private;
256ebe
+
256ebe
+    afr_selfheal_daemon_fini(this);
256ebe
+
256ebe
     LOCK(&priv->lock);
256ebe
     if (priv->timer != NULL) {
256ebe
         gf_timer_call_cancel(this->ctx, priv->timer);
256ebe
-- 
256ebe
1.8.3.1
256ebe