From 1405cc08f2d14639f0d777760c5e45987e0ccff5 Mon Sep 17 00:00:00 2001
From: Aravinda VK <avishwan@redhat.com>
Date: Thu, 26 May 2016 15:52:53 +0530
Subject: [PATCH 75/79] geo-rep: Simplify Non root user(mountbroker) Setup
Setting up Non root Geo-replication is troublesome. Lot of steps
involved like user setup, directory creation, setting up proper
permissions, editing glusterd.vol file etc.
Introducing `gluster-mountbroker` command, with this tool non root
user setup steps are(Run the following commands in any one Slave
node),
gluster-mountbroker setup <MOUNT ROOT> <GROUP>
For example,
gluster-mountbroker setup /var/mountbroker-root geogroup
Add user using,
gluster-mountbroker add <VOLUME> <USER>
For example,
gluster-mountbroker add slavevol geoaccount
Remove user or Volume using,
gluster-mountbroker remove [--volume <VOLUME>] [--user <USER>]
Example,
gluster-mountbroker remove --volume slavevol --user geoaccount
gluster-mountbroker remove --user geoaccount
gluster-mountbroker remove --volume slavevol
Check the status of setup using,
gluster-mountbroker status
Once the changes are complete, restart glusterd in all Slave nodes.
and follow the Geo-rep instructions.
> Reviewed-on: http://review.gluster.org/14544
> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
> Smoke: Gluster Build System <jenkins@build.gluster.org>
> Reviewed-by: Saravanakumar Arumugam <sarumuga@redhat.com>
> CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
> Reviewed-by: Kotresh HR <khiremat@redhat.com>
Change-Id: Ied3fa009df6a886b24ddd86d39283fcfcff68c82
BUG: 1359605
Signed-off-by: Aravinda VK <avishwan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/84801
Reviewed-by: Milind Changire <mchangir@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
configure.ac | 1 +
geo-replication/src/Makefile.am | 14 +-
geo-replication/src/peer_mountbroker.py.in | 387 ++++++++++++++++++++++++++++
glusterfs.spec.in | 5 +
5 files changed, 406 insertions(+), 2 deletions(-)
create mode 100644 geo-replication/src/peer_mountbroker.py.in
diff --git a/configure.ac b/configure.ac
index e4bf48e..e77ca9e 100644
--- a/configure.ac
+++ b/configure.ac
@@ -41,6 +41,7 @@ AC_CONFIG_FILES([Makefile
libglusterfs/src/gfdb/Makefile
geo-replication/src/peer_gsec_create
geo-replication/src/peer_mountbroker
+ geo-replication/src/peer_mountbroker.py
extras/peer_add_secret_pub
extras/snap_scheduler/conf.py
geo-replication/syncdaemon/conf.py
diff --git a/geo-replication/src/Makefile.am b/geo-replication/src/Makefile.am
index 1572698..f70f23e 100644
--- a/geo-replication/src/Makefile.am
+++ b/geo-replication/src/Makefile.am
@@ -1,11 +1,11 @@
gsyncddir = $(libexecdir)/glusterfs
gsyncd_SCRIPTS = gverify.sh peer_gsec_create \
- set_geo_rep_pem_keys.sh peer_mountbroker
+ set_geo_rep_pem_keys.sh peer_mountbroker peer_mountbroker.py
# peer_gsec_create and peer_add_secret_pub are not added to
# EXTRA_DIST as it's derived from a .in file
-EXTRA_DIST = gverify.sh set_geo_rep_pem_keys.sh
+EXTRA_DIST = gverify.sh set_geo_rep_pem_keys.sh peer_mountbroker.py.in
gsyncd_PROGRAMS = gsyncd
@@ -30,3 +30,13 @@ CLEANFILES =
$(top_builddir)/libglusterfs/src/libglusterfs.la:
$(MAKE) -C $(top_builddir)/libglusterfs/src/ all
+
+
+install-exec-hook:
+ $(mkdir_p) $(DESTDIR)$(sbindir)
+ rm -f $(DESTDIR)$(sbindir)/gluster-mountbroker
+ ln -s $(libexecdir)/glusterfs/peer_mountbroker.py \
+ $(DESTDIR)$(sbindir)/gluster-mountbroker
+
+uninstall-hook:
+ rm -f $(DESTDIR)$(sbindir)/gluster-mountbroker
diff --git a/geo-replication/src/peer_mountbroker.py.in b/geo-replication/src/peer_mountbroker.py.in
new file mode 100644
index 0000000..be182c5
--- /dev/null
+++ b/geo-replication/src/peer_mountbroker.py.in
@@ -0,0 +1,387 @@
+#!/usr/bin/env python
+import os
+from errno import EEXIST, ENOENT
+
+from gluster.cliutils import (execute, Cmd, node_output_ok,
+ node_output_notok, execute_in_peers,
+ runcli, oknotok)
+from prettytable import PrettyTable
+
+LOG_DIR = "@localstatedir@/log/glusterfs/geo-replication-slaves"
+GEOREP_DIR = "@GLUSTERD_WORKDIR@/geo-replication"
+GLUSTERD_VOLFILE = "@GLUSTERD_VOLFILE@"
+
+
+class MountbrokerUserMgmt(object):
+ def __init__(self, volfile):
+ self.volfile = volfile
+ self._options = {}
+ self.commented_lines = []
+ self.user_volumes = {}
+ self._parse()
+
+ def _parse(self):
+ """ Example glusterd.vol
+ volume management
+ type mgmt/glusterd
+ option working-directory /var/lib/glusterd
+ option transport-type socket,rdma
+ option transport.socket.keepalive-time 10
+ option transport.socket.keepalive-interval 2
+ option transport.socket.read-fail-log off
+ option rpc-auth-allow-insecure on
+ option ping-timeout 0
+ option event-threads 1
+ # option base-port 49152
+ option mountbroker-root /var/mountbroker-root
+ option mountbroker-geo-replication.user1 vol1,vol2,vol3
+ option geo-replication-log-group geogroup
+ option rpc-auth-allow-insecure on
+ end-volume
+ """
+ with open(self.volfile, "r") as f:
+ for line in f:
+ line = line.strip()
+ if line.startswith("option "):
+ key, value = line.split(" ")[1:]
+ self._options[key] = value
+ if line.startswith("#"):
+ self.commented_lines.append(line)
+
+ for k, v in self._options.iteritems():
+ if k.startswith("mountbroker-geo-replication."):
+ user = k.split(".")[-1]
+ self.user_volumes[user] = set(v.split(","))
+
+ def get_group(self):
+ return self._options.get("geo-replication-log-group", None)
+
+ def _get_write_data(self):
+ op = "volume management\n"
+ op += " type mgmt/glusterd\n"
+ for k, v in self._options.iteritems():
+ if k.startswith("mountbroker-geo-replication."):
+ # Users will be added seperately
+ continue
+
+ op += " option %s %s\n" % (k, v)
+
+ for k, v in self.user_volumes.items():
+ if v:
+ op += (" option mountbroker-geo-replication."
+ "%s %s\n" % (k, ",".join(v)))
+
+ for line in self.commented_lines:
+ op += " %s\n" % line
+
+ op += "end-volume"
+ return op
+
+ def save(self):
+ with open(self.volfile + "_tmp", "w") as f:
+ f.write(self._get_write_data())
+ f.flush()
+ os.fsync(f.fileno())
+ os.rename(self.volfile + "_tmp", self.volfile)
+
+ def set_mount_root_and_group(self, mnt_root, group):
+ self._options["mountbroker-root"] = mnt_root
+ self._options["geo-replication-log-group"] = group
+
+ def add(self, volume, user):
+ user_volumes = self.user_volumes.get(user, None)
+
+ if user_volumes is not None and volume in user_volumes:
+ # User and Volume already exists
+ return
+
+ if user_volumes is None:
+ # User not exists
+ self.user_volumes[user] = set()
+
+ self.user_volumes[user].add(volume)
+
+ def remove(self, volume=None, user=None):
+ if user is not None:
+ if volume is None:
+ self.user_volumes[user] = set()
+ else:
+ try:
+ self.user_volumes.get(user, set()).remove(volume)
+ except KeyError:
+ pass
+ else:
+ if volume is None:
+ return
+
+ for k, v in self.user_volumes.items():
+ try:
+ self.user_volumes[k].remove(volume)
+ except KeyError:
+ pass
+
+ def info(self):
+ # Convert Volumes set into Volumes list
+ users = {}
+ for k, v in self.user_volumes.items():
+ users[k] = list(v)
+
+ data = {
+ "mountbroker-root": self._options.get("mountbroker-root", "None"),
+ "geo-replication-log-group": self._options.get(
+ "geo-replication-log-group", ""),
+ "users": users
+ }
+
+ return data
+
+
+class NodeSetup(Cmd):
+ # Test if group exists using `getent group <grp>`
+ # and then group add using `groupadd <grp>`
+ # chgrp -R <grp> /var/log/glusterfs/geo-replication-slaves
+ # chgrp -R <grp> /var/lib/glusterd/geo-replication
+ # chmod -R 770 /var/log/glusterfs/geo-replication-slaves
+ # chmod -R 770 /var/lib/glusterd/geo-replication
+ # mkdir -p <mnt_root>
+ # chmod 0711 <mnt_root>
+ # If selinux,
+ # semanage fcontext -a -e /home /var/mountbroker-root
+ # restorecon -Rv /var/mountbroker-root
+ name = "node-setup"
+
+ def args(self, parser):
+ parser.add_argument("mount_root")
+ parser.add_argument("group")
+
+ def run(self, args):
+ m = MountbrokerUserMgmt(GLUSTERD_VOLFILE)
+
+ try:
+ os.makedirs(args.mount_root)
+ except OSError as e:
+ if e.errno == EEXIST:
+ pass
+ else:
+ node_output_notok("Unable to Create {0}".format(
+ args.mount_root))
+
+ execute(["chmod", "0711", args.mount_root])
+ try:
+ execute(["semanage", "fcontext", "-a", "-e",
+ "/home", args.mount_root])
+ except OSError as e:
+ if e.errno == ENOENT:
+ pass
+ else:
+ node_output_notok(
+ "Unable to run semanage: {0}".format(e))
+
+ try:
+ execute(["restorecon", "-Rv", args.mount_root])
+ except OSError as e:
+ if e.errno == ENOENT:
+ pass
+ else:
+ node_output_notok(
+ "Unable to run restorecon: {0}".format(e))
+
+ rc, out, err = execute(["getent", "group", args.group])
+ if rc != 0:
+ node_output_notok("User Group not exists")
+
+ execute(["chgrp", "-R", args.group, GEOREP_DIR])
+ execute(["chgrp", "-R", args.group, LOG_DIR])
+ execute(["chmod", "-R", "770", GEOREP_DIR])
+ execute(["chmod", "-R", "770", args.group, LOG_DIR])
+
+ m.set_mount_root_and_group(args.mount_root, args.group)
+ m.save()
+
+ node_output_ok()
+
+
+def color_status(value):
+ if value.lower() in ("up", "ok", "yes"):
+ return "green"
+ else:
+ return "red"
+
+
+class CliSetup(Cmd):
+ # gluster-mountbroker setup <MOUNT_ROOT> <GROUP>
+ name = "setup"
+
+ def args(self, parser):
+ parser.add_argument("mount_root")
+ parser.add_argument("group")
+
+ def run(self, args):
+ out = execute_in_peers("node-setup", [args.mount_root,
+ args.group])
+ table = PrettyTable(["NODE", "NODE STATUS", "SETUP STATUS"])
+ table.align["NODE STATUS"] = "r"
+ table.align["SETUP STATUS"] = "r"
+ for p in out:
+ table.add_row([p.hostname,
+ "UP" if p.node_up else "DOWN",
+ "OK" if p.ok else "NOT OK: {0}".format(
+ p.error)])
+
+ print(table)
+
+
+class NodeStatus(Cmd):
+ # Check if Group exists
+ # Check if user exists
+ # Check directory permission /var/log/glusterfs/geo-replication-slaves
+ # and /var/lib/glusterd/geo-replication
+ # Check mount root and its permissions
+ # Check glusterd.vol file for user, group, dir existance
+ name = "node-status"
+
+ def run(self, args):
+ m = MountbrokerUserMgmt(GLUSTERD_VOLFILE)
+ data = m.info()
+ data["group_exists"] = False
+ data["path_exists"] = False
+
+ rc, out, err = execute(["getent", "group",
+ data["geo-replication-log-group"]])
+
+ if rc == 0:
+ data["group_exists"] = True
+
+ if os.path.exists(data["mountbroker-root"]):
+ data["path_exists"] = True
+
+ node_output_ok(data)
+
+
+class CliStatus(Cmd):
+ # gluster-mountbroker status
+ name = "status"
+
+ def run(self, args):
+ out = execute_in_peers("node-status")
+ table = PrettyTable(["NODE", "NODE STATUS", "MOUNT ROOT",
+ "GROUP", "USERS"])
+ table.align["NODE STATUS"] = "r"
+
+ for p in out:
+ node_data = p.output
+ if node_data == "":
+ node_data = {}
+
+ users_row_data = ""
+ for k, v in node_data.get("users", {}).items():
+ users_row_data += "{0}({1}) ".format(k, ", ".join(v))
+
+ if not users_row_data:
+ users_row_data = "None"
+
+ mount_root = node_data.get("mountbroker-root", "None")
+ if mount_root != "None":
+ mount_root += "({0})".format(oknotok(
+ node_data.get("path_exists", False)))
+
+ grp = node_data.get("geo-replication-log-group", "None")
+ if grp != "None":
+ grp += "({0})".format(oknotok(
+ node_data.get("group_exists", False)))
+
+ table.add_row([p.hostname,
+ "UP" if p.node_up else "DOWN",
+ mount_root,
+ grp,
+ users_row_data])
+
+ print(table)
+
+
+class NodeAdd(Cmd):
+ # useradd -m -g <grp> <usr>
+ # useradd to glusterd.vol
+ name = "node-add"
+
+ def args(self, parser):
+ parser.add_argument("volume")
+ parser.add_argument("user")
+
+ def run(self, args):
+ m = MountbrokerUserMgmt(GLUSTERD_VOLFILE)
+ grp = m.get_group()
+ if grp is None:
+ node_output_notok("Group is not available")
+
+ m.add(args.volume, args.user)
+ m.save()
+ node_output_ok()
+
+
+class CliAdd(Cmd):
+ # gluster-mountbroker add <VOLUME> <USER>
+ name = "add"
+
+ def args(self, parser):
+ parser.add_argument("volume")
+ parser.add_argument("user")
+
+ def run(self, args):
+ out = execute_in_peers("node-add", [args.volume,
+ args.user])
+ table = PrettyTable(["NODE", "NODE STATUS", "ADD STATUS"])
+ table.align["NODE STATUS"] = "r"
+ table.align["ADD STATUS"] = "r"
+
+ for p in out:
+ table.add_row([p.hostname,
+ "UP" if p.node_up else "DOWN",
+ "OK" if p.ok else "NOT OK: {0}".format(
+ p.error)])
+
+ print(table)
+
+
+class NodeRemove(Cmd):
+ # userremove from glusterd.vol file
+ name = "node-remove"
+
+ def args(self, parser):
+ parser.add_argument("volume")
+ parser.add_argument("user")
+
+ def run(self, args):
+ m = MountbrokerUserMgmt(GLUSTERD_VOLFILE)
+ volume = None if args.volume == "." else args.volume
+ user = None if args.user == "." else args.user
+ m.remove(volume=volume, user=user)
+ m.save()
+ node_output_ok()
+
+
+class CliRemove(Cmd):
+ # gluster-mountbroker remove --volume <VOLUME> --user <USER>
+ name = "remove"
+
+ def args(self, parser):
+ parser.add_argument("--volume", default=".")
+ parser.add_argument("--user", default=".")
+
+ def run(self, args):
+ out = execute_in_peers("node-remove", [args.volume,
+ args.user])
+ table = PrettyTable(["NODE", "NODE STATUS", "REMOVE STATUS"])
+ table.align["NODE STATUS"] = "r"
+ table.align["REMOVE STATUS"] = "r"
+
+ for p in out:
+ table.add_row([p.hostname,
+ "UP" if p.node_up else "DOWN",
+ "OK" if p.ok else "NOT OK: {0}".format(
+ p.error)])
+
+ print(table)
+
+if __name__ == "__main__":
+ runcli()
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 5982b1f..984624c 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1213,12 +1213,14 @@ exit 0
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs-georep
%{_sbindir}/gfind_missing_files
+%{_sbindir}/gluster-mountbroker
%{_libexecdir}/glusterfs/gsyncd
%{_libexecdir}/glusterfs/python/syncdaemon/*
%{_libexecdir}/glusterfs/gverify.sh
%{_libexecdir}/glusterfs/set_geo_rep_pem_keys.sh
%{_libexecdir}/glusterfs/peer_gsec_create
%{_libexecdir}/glusterfs/peer_mountbroker
+%{_libexecdir}/glusterfs/peer_mountbroker.py*
%{_libexecdir}/glusterfs/gfind_missing_files
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/geo-replication
@@ -2016,6 +2018,9 @@ end
- Remove ganesha.so from client xlators
* Fri Sep 16 2016 Aravinda VK <avishwan@redhat.com>
+- Added gluster-mountbroker utility for geo-rep mountbroker setup (#1343333)
+
+* Fri Sep 16 2016 Aravinda VK <avishwan@redhat.com>
- Added Python subpackage "cliutils" under gluster (#1342356)
* Fri Sep 16 2016 Aravinda VK <avishwan@redhat.com>
--
1.7.1