Blob Blame History Raw
From dca6f06c6b49ba92c1f2278e806c615a1a6b4b16 Mon Sep 17 00:00:00 2001
From: Avra Sengupta <asengupt@redhat.com>
Date: Fri, 26 Aug 2016 14:05:07 +0530
Subject: [PATCH 67/79] snapshot/eventsapi: Integrate snapshot events with eventsapi

    Backport of http://review.gluster.org/#/c/15329/

1. EVENT_SNAPSHOT_CREATED : snapshot_name=snap1 volume_name=test_vol
                            snapshot_uuid=26dd6c52-6021-40b1-a507-001a80401d70
2. EVENT_SNAPSHOT_CREATE_FAILED : snapshot_name=snap1 volume_name=test_vol
                                  error=Snapshot snap1 already exists
3. EVENT_SNAPSHOT_ACTIVATED : snapshot_name=snap1
                              snapshot_uuid=26dd6c52-6021-40b1-a507-001a80401d70
4. EVENT_SNAPSHOT_ACTIVATE_FAILED: snapshot_name=snap1
                                   error=Snapshot snap1 is already activated.
5. EVENT_SNAPSHOT_DEACTIVATED : snapshot_name=snap1
                              snapshot_uuid=26dd6c52-6021-40b1-a507-001a80401d70
6. EVENT_SNAPSHOT_DEACTIVATE_FAILED : snapshot_name=snap3
                                      error=Snapshot (snap3) does not exist.
7. EVENT_SNAPSHOT_SOFT_LIMIT_REACHED : volume_name=test_vol
                                  volume_id=2ace2616-5591-4b9b-be2a-38592dda5758
8. EVENT_SNAPSHOT_HARD_LIMIT_REACHED : volume_name=test_vol
                                  volume_id=2ace2616-5591-4b9b-be2a-38592dda5758
9. EVENT_SNAPSHOT_RESTORED : snapshot_name=snap1 volume_name=test_vol
                             snapshot_uuid=3a840ec5-08da-4f2b-850d-1d5539a5d14d
10. EVENT_SNAPSHOT_RESTORE_FAILED : snapshot_name=snap10
                                    error=Snapshot (snap10) does not exist
11. EVENT_SNAPSHOT_DELETED : snapshot_name=snap1
                             snapshot_uuid=d9ff3d4f-f579-4345-a4da-4f9353f0950c
12. EVENT_SNAPSHOT_DELETE_FAILED : snapshot_name=snap2
                                   error=Snapshot (snap2) does not exist
13. EVENT_SNAPSHOT_CLONED : clone_uuid=93ba9f06-cb9c-4ace-aa52-2616e7f31022
                            snapshot_name=snap1 clone_name=clone2
14. EVENT_SNAPSHOT_CLONE_FAILED : snapshot_name=snap1 clone_name=clone2
                                  error=Volume with name:clone2 already exists
15. EVENT_SNAPSHOT_CONFIG_UPDATED : auto-delete=enable config_type=system_config
                                    config_type=volume_config hard_limit=100
16. EVENT_SNAPSHOT_CONFIG_UPDATE_FAILED :
                   error=Invalid snap-max-soft-limit 110. Expected range 1 - 100
17. EVENT_SNAPSHOT_SCHEDULER_INITIALISED : status=Success
18. EVENT_SNAPSHOT_SCHEDULER_INIT_FAILED
19. EVENT_SNAPSHOT_SCHEDULER_ENABLED : status=Successfuly Enabled
20. EVENT_SNAPSHOT_SCHEDULER_ENABLE_FAILED :
                                   error=Snapshot scheduler is already enabled.
21. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADDED : status=Successfuly added job job1
22. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADD_FAILED :
                    status=Failed to add job job1 error=The job already exists.
23. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDITED :
                                             status=Successfuly edited job job1
24. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDIT_FAILED :
                                                 status=Failed to edit job job2
                                                 error=The job cannot be found.
25. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETED :
                                            status=Successfuly deleted job job1
26. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETE_FAILED :
                                               status=Failed to delete job job1
                                               error=The job cannot be found.
27. EVENT_SNAPSHOT_SCHEDULER_DISABLED : status=Successfuly Disabled
28. EVENT_SNAPSHOT_SCHEDULER_DISABLE_FAILED :
                                   error=Snapshot scheduler is already disabled.

Also excluded conf.py for client build

> Reviewed-on: http://review.gluster.org/15329
> Tested-by: Aravinda VK <avishwan@redhat.com>
> Smoke: Gluster Build System <jenkins@build.gluster.org>
> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
> CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
> Reviewed-by: Rajesh Joseph <rjoseph@redhat.com>

(cherry picked from commit c1278de9a5fb6a64455f42b8b17a8d05b74c2420)

Change-Id: I3479cc3fb7af3c76ded67cf289f99547d0a55d21
BUG: 1361184
Signed-off-by: Avra Sengupta <asengupt@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/84811
Reviewed-by: Milind Changire <mchangir@redhat.com>
Tested-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
 cli/src/cli-rpc-ops.c                              |  384 ++++++++++++++++++--
 configure.ac                                       |    1 +
 extras/snap_scheduler/Makefile.am                  |    4 +-
 extras/snap_scheduler/conf.py.in                   |   11 +
 extras/snap_scheduler/snap_scheduler.py            |   77 ++++-
 glusterfs.spec.in                                  |    5 +
 .../mgmt/glusterd/src/glusterd-snapshot-utils.c    |    1 +
 xlators/mgmt/glusterd/src/glusterd-snapshot.c      |  105 +++++-
 9 files changed, 550 insertions(+), 39 deletions(-)
 create mode 100644 extras/snap_scheduler/conf.py.in

diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
index dcf2a12..e8fc658 100644
--- a/cli/src/cli-rpc-ops.c
+++ b/cli/src/cli-rpc-ops.c
@@ -39,6 +39,7 @@
 #include "cli-quotad-client.h"
 #include "run.h"
 #include "quota-common-utils.h"
+#include "events.h"
 
 enum gf_task_types {
         GF_TASK_TYPE_REBALANCE,
@@ -10316,6 +10317,334 @@ out:
 }
 
 int
+gf_cli_generate_snapshot_event (gf_cli_rsp *rsp, dict_t *dict,
+                                int32_t type, char *snap_name,
+                                char *volname, char *snap_uuid,
+                                char *clone_name)
+{
+        int         ret               = -1;
+        int         config_command    = 0;
+        int32_t     delete_cmd        = -1;
+        uint64_t    hard_limit        = 0;
+        uint64_t    soft_limit        = 0;
+        char       *auto_delete       = NULL;
+        char       *snap_activate     = NULL;
+        char        msg[PATH_MAX]     = {0, };
+        char        option[PATH_MAX]  = {0, };
+
+        GF_VALIDATE_OR_GOTO ("cli", dict, out);
+        GF_VALIDATE_OR_GOTO ("cli", rsp, out);
+
+        switch (type) {
+        case GF_SNAP_OPTION_TYPE_CREATE:
+                if (!snap_name) {
+                        gf_log ("cli", GF_LOG_ERROR,
+                                "Failed to get snap name");
+                        goto out;
+                }
+
+                if (!volname) {
+                        gf_log ("cli", GF_LOG_ERROR,
+                                "Failed to get volume name");
+                        goto out;
+                }
+
+                if (rsp->op_ret != 0) {
+                        gf_event (EVENT_SNAPSHOT_CREATE_FAILED,
+                                  "snapshot_name=%s;volume_name=%s;error=%s",
+                                  snap_name, volname,
+                                  rsp->op_errstr ? rsp->op_errstr :
+                                  "Please check log file for details");
+                        ret = 0;
+                        break;
+                }
+
+                if (!snap_uuid) {
+                        gf_log ("cli", GF_LOG_ERROR, "Failed to get snap uuid");
+                        goto out;
+                }
+
+                gf_event (EVENT_SNAPSHOT_CREATED, "snapshot_name=%s;"
+                          "volume_name=%s;snapshot_uuid=%s", snap_name,
+                          volname, snap_uuid);
+
+                ret = 0;
+                break;
+
+        case GF_SNAP_OPTION_TYPE_ACTIVATE:
+                if (!snap_name) {
+                        gf_log ("cli", GF_LOG_ERROR,
+                                "Failed to get snap name");
+                        goto out;
+                }
+
+                if (rsp->op_ret != 0) {
+                        gf_event (EVENT_SNAPSHOT_ACTIVATE_FAILED,
+                                  "snapshot_name=%s;error=%s", snap_name,
+                                  rsp->op_errstr ? rsp->op_errstr :
+                                  "Please check log file for details");
+                        ret = 0;
+                        break;
+                }
+
+                if (!snap_uuid) {
+                        gf_log ("cli", GF_LOG_ERROR, "Failed to get snap uuid");
+                        goto out;
+                }
+
+                gf_event (EVENT_SNAPSHOT_ACTIVATED, "snapshot_name=%s;"
+                          "snapshot_uuid=%s", snap_name, snap_uuid);
+
+                ret = 0;
+                break;
+
+        case GF_SNAP_OPTION_TYPE_DEACTIVATE:
+                if (!snap_name) {
+                        gf_log ("cli", GF_LOG_ERROR,
+                                "Failed to get snap name");
+                        goto out;
+                }
+
+                if (rsp->op_ret != 0) {
+                        gf_event (EVENT_SNAPSHOT_DEACTIVATE_FAILED,
+                                  "snapshot_name=%s;error=%s", snap_name,
+                                  rsp->op_errstr ? rsp->op_errstr :
+                                  "Please check log file for details");
+                        ret = 0;
+                        break;
+                }
+
+                if (!snap_uuid) {
+                        gf_log ("cli", GF_LOG_ERROR, "Failed to get snap uuid");
+                        goto out;
+                }
+
+                gf_event (EVENT_SNAPSHOT_DEACTIVATED, "snapshot_name=%s;"
+                          "snapshot_uuid=%s", snap_name, snap_uuid);
+
+                ret = 0;
+                break;
+
+        case GF_SNAP_OPTION_TYPE_RESTORE:
+                if (!snap_name) {
+                        gf_log ("cli", GF_LOG_ERROR,
+                                "Failed to get snap name");
+                        goto out;
+                }
+
+                if (rsp->op_ret != 0) {
+                        gf_event (EVENT_SNAPSHOT_RESTORE_FAILED,
+                                  "snapshot_name=%s;error=%s", snap_name,
+                                  rsp->op_errstr ? rsp->op_errstr :
+                                  "Please check log file for details");
+                        ret = 0;
+                        break;
+                }
+
+                if (!snap_uuid) {
+                        gf_log ("cli", GF_LOG_ERROR, "Failed to get snap uuid");
+                        goto out;
+                }
+
+                if (!volname) {
+                        gf_log ("cli", GF_LOG_ERROR, "Failed to get volname");
+                        goto out;
+                }
+
+                gf_event (EVENT_SNAPSHOT_RESTORED, "snapshot_name=%s;"
+                          "snapshot_uuid=%s;volume_name=%s",
+                          snap_name, snap_uuid, volname);
+
+                ret = 0;
+                break;
+
+        case GF_SNAP_OPTION_TYPE_DELETE:
+                ret = dict_get_int32 (dict, "sub-cmd", &delete_cmd);
+                if (ret) {
+                        gf_log ("cli", GF_LOG_ERROR, "Could not get sub-cmd");
+                        goto out;
+                }
+
+                /*
+                 * Need not generate any event (success or failure) for delete *
+                 * all, as it will trigger individual delete for all snapshots *
+                 */
+                if (delete_cmd == GF_SNAP_DELETE_TYPE_ALL) {
+                        ret = 0;
+                        break;
+                }
+
+                if (!snap_name) {
+                        gf_log ("cli", GF_LOG_ERROR,
+                                "Failed to get snap name");
+                        goto out;
+                }
+
+                if (rsp->op_ret != 0) {
+                        gf_event (EVENT_SNAPSHOT_DELETE_FAILED,
+                                  "snapshot_name=%s;error=%s", snap_name,
+                                  rsp->op_errstr ? rsp->op_errstr :
+                                  "Please check log file for details");
+                        ret = 0;
+                        break;
+                }
+
+                if (!snap_uuid) {
+                        gf_log ("cli", GF_LOG_ERROR, "Failed to get snap uuid");
+                        goto out;
+                }
+
+                gf_event (EVENT_SNAPSHOT_DELETED, "snapshot_name=%s;"
+                          "snapshot_uuid=%s", snap_name, snap_uuid);
+
+                ret = 0;
+                break;
+
+        case GF_SNAP_OPTION_TYPE_CLONE:
+                if (!clone_name) {
+                        gf_log ("cli", GF_LOG_ERROR,
+                                "Failed to get clone name");
+                        goto out;
+                }
+
+                if (!snap_name) {
+                        gf_log ("cli", GF_LOG_ERROR,
+                                "Failed to get snapname name");
+                        goto out;
+                }
+
+                if (rsp->op_ret != 0) {
+                        gf_event (EVENT_SNAPSHOT_CLONE_FAILED,
+                                  "snapshot_name=%s;clone_name=%s;"
+                                  "error=%s", snap_name, clone_name,
+                                  rsp->op_errstr ? rsp->op_errstr :
+                                  "Please check log file for details");
+                        ret = 0;
+                        break;
+                }
+
+                if (!snap_uuid) {
+                        gf_log ("cli", GF_LOG_ERROR, "Failed to get snap uuid");
+                        goto out;
+                }
+
+                gf_event (EVENT_SNAPSHOT_CLONED, "snapshot_name=%s;"
+                          "clone_name=%s;clone_uuid=%s",
+                          snap_name, clone_name, snap_uuid);
+
+                ret = 0;
+                break;
+
+        case GF_SNAP_OPTION_TYPE_CONFIG:
+                if (rsp->op_ret != 0) {
+                        gf_event (EVENT_SNAPSHOT_CONFIG_UPDATE_FAILED,
+                                  "error=%s",
+                                  rsp->op_errstr ? rsp->op_errstr :
+                                  "Please check log file for details");
+                        ret = 0;
+                        break;
+                }
+
+                ret = dict_get_int32 (dict, "config-command", &config_command);
+                if (ret) {
+                        gf_log ("cli", GF_LOG_ERROR,
+                                "Could not fetch config type");
+                        goto out;
+                }
+
+                if (config_command == GF_SNAP_CONFIG_DISPLAY) {
+                        ret = 0;
+                        break;
+                }
+
+                /* These are optional parameters therefore ignore the error */
+                ret = dict_get_uint64 (dict, "snap-max-hard-limit",
+                                       &hard_limit);
+                ret = dict_get_uint64 (dict, "snap-max-soft-limit",
+                                       &soft_limit);
+                ret = dict_get_str (dict, "auto-delete",
+                                    &auto_delete);
+                ret = dict_get_str (dict, "snap-activate-on-create",
+                                    &snap_activate);
+
+                if (!hard_limit && !soft_limit &&
+                    !auto_delete && !snap_activate) {
+                        ret = -1;
+                        gf_log ("cli", GF_LOG_ERROR, "At least one option from "
+                                "snap-max-hard-limit, snap-max-soft-limit, "
+                                "auto-delete and snap-activate-on-create "
+                                "should be set");
+                        goto out;
+                }
+
+                volname = NULL;
+                ret = dict_get_str (dict, "volname", &volname);
+
+                if (hard_limit || soft_limit) {
+                        snprintf (option, sizeof(option), "%s=%"PRIu64,
+                                  hard_limit ? "hard_limit" : "soft_limit",
+                                  hard_limit ? hard_limit:soft_limit);
+                } else if (auto_delete || snap_activate) {
+                        snprintf (option, sizeof(option), "%s=%s",
+                                  auto_delete ? "auto-delete" : "snap-activate",
+                                  auto_delete ? auto_delete:snap_activate);
+                }
+
+                snprintf (msg, sizeof(msg), "config_type=%s;%s",
+                          volname?"volume_config":"system_config", option);
+
+                gf_event (EVENT_SNAPSHOT_CONFIG_UPDATED, "%s", msg);
+
+                ret = 0;
+                break;
+
+        default:
+                gf_log ("cli", GF_LOG_WARNING,
+                        "Cannot generate event for unknown type.");
+                ret = 0;
+                goto out;
+        }
+
+out:
+        return ret;
+}
+
+/*
+ * Fetch necessary data from dict at one place instead of *
+ * repeating the same code again and again.               *
+ */
+int
+gf_cli_snapshot_get_data_from_dict (dict_t *dict, char **snap_name,
+                                    char **volname, char **snap_uuid,
+                                    int8_t *soft_limit_flag,
+                                    char **clone_name)
+{
+        int     ret = -1;
+
+        GF_VALIDATE_OR_GOTO ("cli", dict, out);
+
+        if (snap_name)
+                ret = dict_get_str (dict, "snapname", snap_name);
+
+        if (volname)
+                ret = dict_get_str (dict, "volname1", volname);
+
+        if (snap_uuid)
+                ret = dict_get_str (dict, "snapuuid", snap_uuid);
+
+        if (soft_limit_flag)
+                ret = dict_get_int8 (dict, "soft-limit-reach",
+                                     soft_limit_flag);
+
+        if (clone_name)
+                ret = dict_get_str (dict, "clonename", clone_name);
+
+        ret = 0;
+out:
+        return ret;
+}
+
+int
 gf_cli_snapshot_cbk (struct rpc_req *req, struct iovec *iov,
                      int count, void *myframe)
 {
@@ -10329,6 +10658,7 @@ gf_cli_snapshot_cbk (struct rpc_req *req, struct iovec *iov,
         gf_boolean_t         snap_driven               = _gf_false;
         int8_t               soft_limit_flag           = -1;
         char                 *volname                  = NULL;
+        char                 *snap_uuid                = NULL;
 
         GF_ASSERT (myframe);
 
@@ -10363,6 +10693,24 @@ gf_cli_snapshot_cbk (struct rpc_req *req, struct iovec *iov,
                 goto out;
         }
 
+        ret = gf_cli_snapshot_get_data_from_dict (dict, &snap_name, &volname,
+                                                  &snap_uuid, &soft_limit_flag,
+                                                  &clone_name);
+        if (ret) {
+                gf_log ("cli", GF_LOG_ERROR, "Failed to fetch data from dict.");
+                goto out;
+        }
+
+#if (USE_EVENTS)
+        ret = gf_cli_generate_snapshot_event (&rsp, dict, type, snap_name,
+                                              volname, snap_uuid, clone_name);
+        if (ret) {
+                gf_log ("cli", GF_LOG_ERROR,
+                        "Failed to generate snapshot event");
+                goto out;
+        }
+#endif
+
         /* Snapshot status and delete command is handled separately */
         if (global_state->mode & GLUSTER_MODE_XML &&
             GF_SNAP_OPTION_TYPE_STATUS != type &&
@@ -10386,19 +10734,13 @@ gf_cli_snapshot_cbk (struct rpc_req *req, struct iovec *iov,
                                  goto out;
                 }
 
-                ret = dict_get_str (dict, "snapname", &snap_name);
-                if (ret) {
+                if (!snap_name) {
                         gf_log ("cli", GF_LOG_ERROR,
                                 "Failed to get snap name");
                         goto out;
                 }
 
-                /* TODO : Instead of using volname1 directly use
-                 * volname$i in loop once snapshot of multiple
-                 * volumes are supported
-                 */
-                ret = dict_get_str (dict, "volname1", &volname);
-                if (ret) {
+                if (!volname) {
                         gf_log ("cli", GF_LOG_ERROR,
                                 "Failed to get volume name");
                         goto out;
@@ -10407,8 +10749,6 @@ gf_cli_snapshot_cbk (struct rpc_req *req, struct iovec *iov,
                 cli_out ("snapshot create: success: Snap %s created "
                                         "successfully", snap_name);
 
-                ret = dict_get_int8 (dict, "soft-limit-reach",
-                                    &soft_limit_flag);
                 if (soft_limit_flag == 1) {
                         cli_out ("Warning: Soft-limit of volume (%s) is "
                                 "reached. Snapshot creation is not possible "
@@ -10426,15 +10766,13 @@ gf_cli_snapshot_cbk (struct rpc_req *req, struct iovec *iov,
                                  goto out;
                 }
 
-                ret = dict_get_str (dict, "clonename", &clone_name);
-                if (ret) {
+                if (!clone_name) {
                         gf_log ("cli", GF_LOG_ERROR,
                                 "Failed to get clone name");
                         goto out;
                 }
 
-                ret = dict_get_str (dict, "snapname", &snap_name);
-                if (ret) {
+                if (!snap_name) {
                         gf_log ("cli", GF_LOG_ERROR,
                                 "Failed to get snapname name");
                         goto out;
@@ -10447,9 +10785,6 @@ gf_cli_snapshot_cbk (struct rpc_req *req, struct iovec *iov,
                 break;
 
         case GF_SNAP_OPTION_TYPE_RESTORE:
-                /* TODO: Check if rsp.op_ret needs to be checked here. Or is
-                 * it ok to check this in the start of the function where we
-                 * get rsp.*/
                 if (rsp.op_ret) {
                         cli_err("snapshot restore: failed: %s",
                                  rsp.op_errstr ? rsp.op_errstr :
@@ -10458,8 +10793,7 @@ gf_cli_snapshot_cbk (struct rpc_req *req, struct iovec *iov,
                                  goto out;
                 }
 
-                ret = dict_get_str (dict, "snapname", &snap_name);
-                if (ret) {
+                if (!snap_name) {
                         gf_log ("cli", GF_LOG_ERROR,
                                 "Failed to get snap name");
                         goto out;
@@ -10471,9 +10805,6 @@ gf_cli_snapshot_cbk (struct rpc_req *req, struct iovec *iov,
                 ret = 0;
                 break;
         case GF_SNAP_OPTION_TYPE_ACTIVATE:
-                /* TODO: Check if rsp.op_ret needs to be checked here. Or is
-                 * it ok to check this in the start of the function where we
-                 * get rsp.*/
                 if (rsp.op_ret) {
                         cli_err("snapshot activate: failed: %s",
                                  rsp.op_errstr ? rsp.op_errstr :
@@ -10482,8 +10813,7 @@ gf_cli_snapshot_cbk (struct rpc_req *req, struct iovec *iov,
                                  goto out;
                 }
 
-                ret = dict_get_str (dict, "snapname", &snap_name);
-                if (ret) {
+                if (!snap_name) {
                         gf_log ("cli", GF_LOG_ERROR,
                                 "Failed to get snap name");
                         goto out;
@@ -10496,9 +10826,6 @@ gf_cli_snapshot_cbk (struct rpc_req *req, struct iovec *iov,
                 break;
 
         case GF_SNAP_OPTION_TYPE_DEACTIVATE:
-                /* TODO: Check if rsp.op_ret needs to be checked here. Or is
-                 * it ok to check this in the start of the function where we
-                 * get rsp.*/
                 if (rsp.op_ret) {
                         cli_err("snapshot deactivate: failed: %s",
                                  rsp.op_errstr ? rsp.op_errstr :
@@ -10507,8 +10834,7 @@ gf_cli_snapshot_cbk (struct rpc_req *req, struct iovec *iov,
                                  goto out;
                 }
 
-                ret = dict_get_str (dict, "snapname", &snap_name);
-                if (ret) {
+                if (!snap_name) {
                         gf_log ("cli", GF_LOG_ERROR,
                                 "Failed to get snap name");
                         goto out;
diff --git a/configure.ac b/configure.ac
index 94e195c..011cf14 100644
--- a/configure.ac
+++ b/configure.ac
@@ -43,6 +43,7 @@ AC_CONFIG_FILES([Makefile
                 geo-replication/src/peer_mountbroker
                 extras/peer_add_secret_pub
                 geo-replication/syncdaemon/configinterface.py
+                extras/snap_scheduler/conf.py
                 glusterfsd/Makefile
                 glusterfsd/src/Makefile
                 rpc/Makefile
diff --git a/extras/snap_scheduler/Makefile.am b/extras/snap_scheduler/Makefile.am
index 896595f..ffc1579 100644
--- a/extras/snap_scheduler/Makefile.am
+++ b/extras/snap_scheduler/Makefile.am
@@ -1,7 +1,7 @@
 snap_schedulerdir = $(sbindir)/
 
-snap_scheduler_SCRIPTS = gcron.py snap_scheduler.py
+snap_scheduler_SCRIPTS = gcron.py snap_scheduler.py conf.py
 
-EXTRA_DIST = gcron.py snap_scheduler.py
+EXTRA_DIST = gcron.py snap_scheduler.py conf.py
 
 CLEANFILES =
diff --git a/extras/snap_scheduler/conf.py.in b/extras/snap_scheduler/conf.py.in
new file mode 100644
index 0000000..6dcca05
--- /dev/null
+++ b/extras/snap_scheduler/conf.py.in
@@ -0,0 +1,11 @@
+#
+# Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
+# This file is part of GlusterFS.
+
+# This file is licensed to you under your choice of the GNU Lesser
+# General Public License, version 3 or any later version (LGPLv3 or
+# later), or the GNU General Public License, version 2 (GPLv2), in all
+# cases as published by the Free Software Foundation.
+#
+
+GLUSTERFS_LIBEXECDIR = '@GLUSTERFS_LIBEXECDIR@'
diff --git a/extras/snap_scheduler/snap_scheduler.py b/extras/snap_scheduler/snap_scheduler.py
index af092e2..23d5aa3 100755
--- a/extras/snap_scheduler/snap_scheduler.py
+++ b/extras/snap_scheduler/snap_scheduler.py
@@ -19,7 +19,10 @@ import logging.handlers
 import sys
 import shutil
 from errno import EEXIST
-
+from conf import GLUSTERFS_LIBEXECDIR
+sys.path.insert(1, GLUSTERFS_LIBEXECDIR)
+from events.gf_event import gf_event
+from events import eventtypes
 
 SCRIPT_NAME = "snap_scheduler"
 scheduler_enabled = False
@@ -55,6 +58,42 @@ INVALID_SCHEDULE = 15
 INVALID_ARG = 16
 VOLUME_DOES_NOT_EXIST = 17
 
+def print_error (error_num):
+    if error_num == INTERNAL_ERROR:
+        return "Internal Error"
+    elif error_num == SHARED_STORAGE_DIR_DOESNT_EXIST:
+        return "The shared storage directory ("+SHARED_STORAGE_DIR+")" \
+               " does not exist."
+    elif error_num == SHARED_STORAGE_NOT_MOUNTED:
+        return "The shared storage directory ("+SHARED_STORAGE_DIR+")" \
+               " is not mounted."
+    elif error_num == ANOTHER_TRANSACTION_IN_PROGRESS:
+        return "Another transaction is in progress."
+    elif error_num == INIT_FAILED:
+        return "Initialisation failed."
+    elif error_num == SCHEDULING_ALREADY_DISABLED:
+        return "Snapshot scheduler is already disabled."
+    elif error_num == SCHEDULING_ALREADY_ENABLED:
+        return "Snapshot scheduler is already enabled."
+    elif error_num == NODE_NOT_INITIALISED:
+        return "The node is not initialised."
+    elif error_num == ANOTHER_SCHEDULER_ACTIVE:
+        return "Another scheduler is active."
+    elif error_num == JOB_ALREADY_EXISTS:
+        return "The job already exists."
+    elif error_num == JOB_NOT_FOUND:
+        return "The job cannot be found."
+    elif error_num == INVALID_JOBNAME:
+        return "The job name is invalid."
+    elif error_num == INVALID_VOLNAME:
+        return "The volume name is invalid."
+    elif error_num == INVALID_SCHEDULE:
+        return "The schedule is invalid."
+    elif error_num == INVALID_ARG:
+        return "The argument is invalid."
+    elif error_num == VOLUME_DOES_NOT_EXIST:
+        return "The volume does not exist."
+
 def output(msg):
     print("%s: %s" % (SCRIPT_NAME, msg))
 
@@ -499,6 +538,7 @@ def initialise_scheduler():
 
     log.info("Successfully initialised snapshot scheduler for this node")
     output("Successfully initialised snapshot scheduler for this node")
+    gf_event (eventtypes.SNAPSHOT_SCHEDULER_INITIALISED, status="Success")
 
     ret = 0
     return ret
@@ -545,6 +585,8 @@ def perform_operation(args):
         ret = initialise_scheduler()
         if ret != 0:
             output("Failed to initialise snapshot scheduling")
+            gf_event (eventtypes.SNAPSHOT_SCHEDULER_INIT_FAILED,
+                      error=print_error(ret))
         return ret
 
     # Disable snapshot scheduler
@@ -552,6 +594,11 @@ def perform_operation(args):
         ret = disable_scheduler()
         if ret == 0:
             subprocess.Popen(["touch", "-h", GCRON_TASKS])
+            gf_event (eventtypes.SNAPSHOT_SCHEDULER_DISABLED,
+                      status="Successfuly Disabled")
+        else:
+            gf_event (eventtypes.SNAPSHOT_SCHEDULER_DISABLE_FAILED,
+                      error=print_error(ret))
         return ret
 
     # Check if the symlink to GCRON_TASKS is properly set in the shared storage
@@ -582,6 +629,11 @@ def perform_operation(args):
         ret = enable_scheduler()
         if ret == 0:
             subprocess.Popen(["touch", "-h", GCRON_TASKS])
+            gf_event (eventtypes.SNAPSHOT_SCHEDULER_ENABLED,
+                      status="Successfuly Enabled")
+        else:
+            gf_event (eventtypes.SNAPSHOT_SCHEDULER_ENABLE_FAILED,
+                      error=print_error(ret))
         return ret
 
     # Disable snapshot scheduler
@@ -589,6 +641,11 @@ def perform_operation(args):
         ret = disable_scheduler()
         if ret == 0:
             subprocess.Popen(["touch", "-h", GCRON_TASKS])
+            gf_event (eventtypes.SNAPSHOT_SCHEDULER_DISABLED,
+                      status="Successfuly Disabled")
+        else:
+            gf_event (eventtypes.SNAPSHOT_SCHEDULER_DISABLE_FAILED,
+                      error=print_error(ret))
         return ret
 
     # List snapshot schedules
@@ -604,6 +661,12 @@ def perform_operation(args):
         ret = add_schedules(args.jobname, args.schedule, args.volname)
         if ret == 0:
             subprocess.Popen(["touch", "-h", GCRON_TASKS])
+            gf_event (eventtypes.SNAPSHOT_SCHEDULER_SCHEDULE_ADDED,
+                      status="Successfuly added job "+args.jobname)
+        else:
+            gf_event (eventtypes.SNAPSHOT_SCHEDULER_SCHEDULE_ADD_FAILED,
+                      status="Failed to add job "+args.jobname,
+                      error=print_error(ret))
         return ret
 
     # Delete snapshot schedules
@@ -614,6 +677,12 @@ def perform_operation(args):
         ret = delete_schedules(args.jobname)
         if ret == 0:
             subprocess.Popen(["touch", "-h", GCRON_TASKS])
+            gf_event (eventtypes.SNAPSHOT_SCHEDULER_SCHEDULE_DELETED,
+                      status="Successfuly deleted job "+args.jobname)
+        else:
+            gf_event (eventtypes.SNAPSHOT_SCHEDULER_SCHEDULE_DELETE_FAILED,
+                      status="Failed to delete job "+args.jobname,
+                      error=print_error(ret))
         return ret
 
     # Edit snapshot schedules
@@ -624,6 +693,12 @@ def perform_operation(args):
         ret = edit_schedules(args.jobname, args.schedule, args.volname)
         if ret == 0:
             subprocess.Popen(["touch", "-h", GCRON_TASKS])
+            gf_event (eventtypes.SNAPSHOT_SCHEDULER_SCHEDULE_EDITED,
+                      status="Successfuly edited job "+args.jobname)
+        else:
+            gf_event (eventtypes.SNAPSHOT_SCHEDULER_SCHEDULE_EDIT_FAILED,
+                      status="Failed to edit job "+args.jobname,
+                      error=print_error(ret))
         return ret
 
     ret = INVALID_ARG
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 7d3f2da..5982b1f 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1053,6 +1053,7 @@ exit 0
 %exclude %{_sbindir}/glusterd
 %exclude %{_sbindir}/snap_scheduler.py
 %exclude %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
+%exclude %{_sbindir}/conf.py
 %if 0%{?_tmpfilesdir:1}
 %exclude %{_tmpfilesdir}/gluster.conf
 %endif
@@ -1325,6 +1326,7 @@ exit 0
 # snap_scheduler
 %{_sbindir}/snap_scheduler.py
 %{_sbindir}/gcron.py
+%{_sbindir}/conf.py
 
 # /var/lib/glusterd, e.g. hookscripts, etc.
 %ghost      %attr(0644,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/glusterd.info
@@ -2007,6 +2009,9 @@ end
 %endif
 
 %changelog
+* Fri Sep 16 2016 Avra Sengupta <asengupt@redhat.com>
+- Added conf.py for snap scheduler
+
 * Fri Sep 16 2016 Jiffin Tony Thottan <jthottan@redhat.com>
 - Remove ganesha.so from client xlators
 
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
index 2fa9a59..81b9aa0 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
@@ -4041,6 +4041,7 @@ glusterd_is_snap_soft_limit_reached (glusterd_volinfo_t *volinfo, dict_t *dict)
                                 "set soft limit exceed flag in "
                                 "response dictionary");
                 }
+
                 goto out;
         }
         ret = 0;
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
index 85f2a3f..70595ef 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -61,6 +61,7 @@
 #include "xdr-generic.h"
 
 #include "lvm-defaults.h"
+#include "events.h"
 
 char snap_mount_dir[PATH_MAX];
 struct snap_create_args_ {
@@ -7961,6 +7962,7 @@ glusterd_handle_snap_limit (dict_t *dict, dict_t *rsp_dict)
         int                 i                   = 0;
         char               *volname             = NULL;
         char                key[PATH_MAX]       = {0, };
+        char                msg[PATH_MAX]       = {0, };
         glusterd_volinfo_t *volinfo             = NULL;
         uint64_t            limit               = 0;
         int64_t             count               = 0;
@@ -8035,6 +8037,10 @@ glusterd_handle_snap_limit (dict_t *dict, dict_t *rsp_dict)
                         "Deleting snapshot %s.", limit, volinfo->volname,
                         snap->snapname);
 
+                snprintf (msg, sizeof(msg), "snapshot_name=%s;"
+                          "snapshot_uuid=%s", snap->snapname,
+                          uuid_utoa(snap->snap_id));
+
                 LOCK (&snap->lock);
                 {
                         snap->snap_status = GD_SNAP_STATUS_DECOMMISSION;
@@ -8057,6 +8063,13 @@ glusterd_handle_snap_limit (dict_t *dict, dict_t *rsp_dict)
                                         snap->snapname);
                 }
         unlock: UNLOCK (&snap->lock);
+                if (is_origin_glusterd (dict) == _gf_true) {
+                        if (ret)
+                                gf_event (EVENT_SNAPSHOT_DELETE_FAILED,
+                                          "%s", msg);
+                        else
+                                gf_event (EVENT_SNAPSHOT_DELETED, "%s", msg);
+                }
         }
 
 out:
@@ -8142,13 +8155,20 @@ int32_t
 glusterd_snapshot_create_postvalidate (dict_t *dict, int32_t op_ret,
                                        char **op_errstr, dict_t *rsp_dict)
 {
-        xlator_t        *this           = NULL;
-        glusterd_conf_t *priv           = NULL;
-        int              ret            = -1;
-        int32_t          cleanup        = 0;
-        glusterd_snap_t *snap           = NULL;
-        char            *snapname       = NULL;
-        char            *auto_delete    = NULL;
+        xlator_t             *this                = NULL;
+        glusterd_conf_t      *priv                = NULL;
+        int                   ret                 = -1;
+        int32_t               cleanup             = 0;
+        glusterd_snap_t      *snap                = NULL;
+        char                 *snapname            = NULL;
+        char                 *auto_delete         = NULL;
+        char                 *volname             = NULL;
+        glusterd_volinfo_t   *volinfo             = NULL;
+        uint64_t              opt_hard_max        = GLUSTERD_SNAPS_MAX_HARD_LIMIT;
+        uint64_t              opt_max_soft        = GLUSTERD_SNAPS_DEF_SOFT_LIMIT_PERCENT;
+        int64_t               effective_max_limit = 0;
+        int64_t               soft_limit          = 0;
+        int32_t               snap_activate       = _gf_false;
 
         this = THIS;
 
@@ -8215,6 +8235,77 @@ glusterd_snapshot_create_postvalidate (dict_t *dict, int32_t op_ret,
                 goto out;
         }
 
+        /*
+         * If activate_on_create was enabled, and we have reached this  *
+         * section of the code, that means, that after successfully     *
+         * creating the snapshot, we have also successfully started the *
+         * snapshot bricks on all nodes. So from originator node we can *
+         * send EVENT_SNAPSHOT_ACTIVATED event.                         *
+         *                                                              *
+         * Also check, if hard limit and soft limit is reached in case  *
+         * of successfuly creating the snapshot, and generate the event *
+         */
+        if (is_origin_glusterd (dict) == _gf_true) {
+                snap_activate = dict_get_str_boolean (priv->opts,
+                                              GLUSTERD_STORE_KEY_SNAP_ACTIVATE,
+                                              _gf_false);
+
+                if (snap_activate == _gf_true) {
+                        gf_event (EVENT_SNAPSHOT_ACTIVATED, "snapshot_name=%s;"
+                                  "snapshot_uuid=%s", snap->snapname,
+                                  uuid_utoa(snap->snap_id));
+                }
+
+                ret = dict_get_str (dict, "volname1", &volname);
+                if (ret) {
+                        gf_msg (this->name, GF_LOG_ERROR, 0,
+                                GD_MSG_DICT_GET_FAILED,
+                                "Failed to get volname.");
+                        goto out;
+                }
+
+                ret = glusterd_volinfo_find (volname, &volinfo);
+                if (ret) {
+                        gf_msg (this->name, GF_LOG_ERROR, 0,
+                                GD_MSG_VOL_NOT_FOUND,
+                                "Failed to get volinfo.");
+                        goto out;
+                }
+
+                /* config values snap-max-hard-limit and snap-max-soft-limit are
+                 * optional and hence we are not erroring out if values are not
+                 * present
+                 */
+                gd_get_snap_conf_values_if_present (priv->opts, &opt_hard_max,
+                                                    &opt_max_soft);
+
+                if (volinfo->snap_max_hard_limit < opt_hard_max)
+                        effective_max_limit = volinfo->snap_max_hard_limit;
+                else
+                        effective_max_limit = opt_hard_max;
+
+                /*
+                 * Check for hard limit. If it is reached after taking *
+                 * this snapshot, then generate event for the same. If *
+                 * it is not reached, then check for the soft limit,   *
+                 * and generate event accordingly.                     *
+                 */
+                if (volinfo->snap_count >= effective_max_limit) {
+                        gf_event (EVENT_SNAPSHOT_HARD_LIMIT_REACHED,
+                                  "volume_name=%s;volume_id=%s",
+                                  volname,
+                                  uuid_utoa(volinfo->volume_id));
+                } else {
+                        soft_limit = (opt_max_soft * effective_max_limit)/100;
+                        if (volinfo->snap_count >= soft_limit) {
+                                gf_event (EVENT_SNAPSHOT_SOFT_LIMIT_REACHED,
+                                          "volume_name=%s;volume_id=%s",
+                                          volname,
+                                          uuid_utoa(volinfo->volume_id));
+                        }
+                }
+        }
+
         /* "auto-delete" might not be set by user explicitly,
          * in that case it's better to consider the default value.
          * Hence not erroring out if Key is not found.
-- 
1.7.1