|
|
02ddf4 |
diff -uNr a/configure.ac b/configure.ac
|
|
|
02ddf4 |
--- a/configure.ac 2020-04-16 11:54:08.466619607 +0200
|
|
|
02ddf4 |
+++ b/configure.ac 2020-04-16 12:05:17.241352586 +0200
|
|
|
02ddf4 |
@@ -30,6 +30,8 @@
|
|
|
02ddf4 |
PKG_FEATURES=""
|
|
|
02ddf4 |
|
|
|
02ddf4 |
AC_CONFIG_AUX_DIR(.)
|
|
|
02ddf4 |
+AC_CONFIG_MACRO_DIR([m4])
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
AC_CANONICAL_HOST
|
|
|
02ddf4 |
|
|
|
02ddf4 |
dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below)
|
|
|
02ddf4 |
@@ -72,6 +74,11 @@
|
|
|
02ddf4 |
[AC_MSG_ERROR([systemd support requested but pkg-config unable to query systemd package])])
|
|
|
02ddf4 |
with_systemdsystemunitdir=no],
|
|
|
02ddf4 |
[with_systemdsystemunitdir="$def_systemdsystemunitdir"])])
|
|
|
02ddf4 |
+if test "x$with_systemdsystemunitdir" != "xno" && \
|
|
|
02ddf4 |
+ test "x${prefix}" != "xNONE" && \
|
|
|
02ddf4 |
+ test "x${prefix}" != "x/usr"; then
|
|
|
02ddf4 |
+ with_systemdsystemunitdir="${prefix}/$with_systemdsystemunitdir"
|
|
|
02ddf4 |
+fi
|
|
|
02ddf4 |
AS_IF([test "x$with_systemdsystemunitdir" != "xno"],
|
|
|
02ddf4 |
[AC_SUBST([systemdsystemunitdir], [$with_systemdsystemunitdir])])
|
|
|
02ddf4 |
AM_CONDITIONAL([HAVE_SYSTEMD], [test "x$with_systemdsystemunitdir" != "xno"])
|
|
|
02ddf4 |
@@ -79,6 +86,11 @@
|
|
|
02ddf4 |
AC_ARG_WITH([systemdtmpfilesdir],
|
|
|
02ddf4 |
AS_HELP_STRING([--with-systemdtmpfilesdir=DIR], [Directory for systemd tmp files]),
|
|
|
02ddf4 |
[], [with_systemdtmpfilesdir=$($PKGCONFIG --variable=tmpfilesdir systemd)])
|
|
|
02ddf4 |
+ if test "x$with_systemdtmpfilesdir" != xno && \
|
|
|
02ddf4 |
+ test "x${prefix}" != "xNONE" && \
|
|
|
02ddf4 |
+ test "x${prefix}" != "x/usr"; then
|
|
|
02ddf4 |
+ with_systemdtmpfilesdir="${prefix}/$with_systemdtmpfilesdir"
|
|
|
02ddf4 |
+ fi
|
|
|
02ddf4 |
if test "x$with_systemdtmpfilesdir" != xno; then
|
|
|
02ddf4 |
AC_SUBST([systemdtmpfilesdir], [$with_systemdtmpfilesdir])
|
|
|
02ddf4 |
fi
|
|
|
02ddf4 |
@@ -501,12 +513,35 @@
|
|
|
02ddf4 |
AC_SUBST(RM)
|
|
|
02ddf4 |
AC_SUBST(TEST)
|
|
|
02ddf4 |
|
|
|
02ddf4 |
+dnl Ensure PYTHON is an absolute path
|
|
|
02ddf4 |
+AC_PATH_PROG([PYTHON], [$PYTHON])
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
AM_PATH_PYTHON
|
|
|
02ddf4 |
if test -z "$PYTHON"; then
|
|
|
02ddf4 |
echo "*** Essential program python not found" 1>&2
|
|
|
02ddf4 |
- exit 1
|
|
|
02ddf4 |
fi
|
|
|
02ddf4 |
|
|
|
02ddf4 |
+AC_PYTHON_MODULE(googleapiclient)
|
|
|
02ddf4 |
+AC_PYTHON_MODULE(pyroute2)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+AS_VERSION_COMPARE([$PYTHON_VERSION], [2.7], [BUILD_OCF_PY=0], [BUILD_OCF_PY=1], [BUILD_OCF_PY=1])
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+BUILD_AZURE_EVENTS=1
|
|
|
02ddf4 |
+if test -z "$PYTHON" || test $BUILD_OCF_PY -eq 0; then
|
|
|
02ddf4 |
+ BUILD_AZURE_EVENTS=0
|
|
|
02ddf4 |
+ AC_MSG_WARN("Not building azure-events")
|
|
|
02ddf4 |
+fi
|
|
|
02ddf4 |
+AM_CONDITIONAL(BUILD_AZURE_EVENTS, test $BUILD_AZURE_EVENTS -eq 1)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+BUILD_GCP_PD_MOVE=1
|
|
|
02ddf4 |
+AM_CONDITIONAL(BUILD_GCP_PD_MOVE, test $BUILD_GCP_PD_MOVE -eq 1)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+BUILD_GCP_VPC_MOVE_ROUTE=1
|
|
|
02ddf4 |
+AM_CONDITIONAL(BUILD_GCP_VPC_MOVE_ROUTE, test $BUILD_GCP_VPC_MOVE_ROUTE -eq 1)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+BUILD_GCP_VPC_MOVE_VIP=1
|
|
|
02ddf4 |
+AM_CONDITIONAL(BUILD_GCP_VPC_MOVE_VIP, test $BUILD_GCP_VPC_MOVE_VIP -eq 1)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
AC_PATH_PROGS(ROUTE, route)
|
|
|
02ddf4 |
AC_DEFINE_UNQUOTED(ROUTE, "$ROUTE", path to route command)
|
|
|
02ddf4 |
|
|
|
02ddf4 |
@@ -541,6 +576,12 @@
|
|
|
02ddf4 |
if test x"${STYLESHEET_PREFIX}" = x""; then
|
|
|
02ddf4 |
DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \
|
|
|
02ddf4 |
-type d | LC_ALL=C sort)
|
|
|
02ddf4 |
+ if test x"${DIRS}" = x""; then
|
|
|
02ddf4 |
+ # when datadir is not standard OS path, we cannot find docbook.xsl
|
|
|
02ddf4 |
+ # use standard OS path as backup
|
|
|
02ddf4 |
+ DIRS=$(find "/usr/share" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \
|
|
|
02ddf4 |
+ -type d | LC_ALL=C sort)
|
|
|
02ddf4 |
+ fi
|
|
|
02ddf4 |
XSLT=$(basename ${DOCBOOK_XSL_PATH})
|
|
|
02ddf4 |
for d in ${DIRS}; do
|
|
|
02ddf4 |
if test -f "${d}/${XSLT}"; then
|
|
|
02ddf4 |
@@ -948,6 +989,7 @@
|
|
|
02ddf4 |
)
|
|
|
02ddf4 |
|
|
|
02ddf4 |
dnl Files we output that need to be executable
|
|
|
02ddf4 |
+AC_CONFIG_FILES([heartbeat/azure-events], [chmod +x heartbeat/azure-events])
|
|
|
02ddf4 |
AC_CONFIG_FILES([heartbeat/AoEtarget], [chmod +x heartbeat/AoEtarget])
|
|
|
02ddf4 |
AC_CONFIG_FILES([heartbeat/ManageRAID], [chmod +x heartbeat/ManageRAID])
|
|
|
02ddf4 |
AC_CONFIG_FILES([heartbeat/ManageVE], [chmod +x heartbeat/ManageVE])
|
|
|
02ddf4 |
@@ -1021,7 +1063,7 @@
|
|
|
02ddf4 |
AC_MSG_RESULT([])
|
|
|
02ddf4 |
AC_MSG_RESULT([$PACKAGE configuration:])
|
|
|
02ddf4 |
AC_MSG_RESULT([ Version = ${VERSION}])
|
|
|
02ddf4 |
-AC_MSG_RESULT([ Build Version = e711383fd5c7bef9c24ff6bc85465e59f91080f9])
|
|
|
02ddf4 |
+AC_MSG_RESULT([ Build Version = $Format:%H$])
|
|
|
02ddf4 |
AC_MSG_RESULT([ Features =${PKG_FEATURES}])
|
|
|
02ddf4 |
AC_MSG_RESULT([])
|
|
|
02ddf4 |
AC_MSG_RESULT([ Prefix = ${prefix}])
|
|
|
02ddf4 |
diff -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am
|
|
|
02ddf4 |
--- a/doc/man/Makefile.am 2020-04-16 11:54:08.466619607 +0200
|
|
|
02ddf4 |
+++ b/doc/man/Makefile.am 2020-04-16 12:08:34.913726440 +0200
|
|
|
02ddf4 |
@@ -55,7 +55,7 @@
|
|
|
02ddf4 |
# 12126 on savannah.gnu.org. But, maybe it gets fixed soon, it was
|
|
|
02ddf4 |
# first reported in 1995 and added to Savannah in in 2005...
|
|
|
02ddf4 |
if BUILD_DOC
|
|
|
02ddf4 |
-man_MANS = ocf_heartbeat_AoEtarget.7 \
|
|
|
02ddf4 |
+man_MANS = ocf_heartbeat_AoEtarget.7 \
|
|
|
02ddf4 |
ocf_heartbeat_AudibleAlarm.7 \
|
|
|
02ddf4 |
ocf_heartbeat_ClusterMon.7 \
|
|
|
02ddf4 |
ocf_heartbeat_CTDB.7 \
|
|
|
02ddf4 |
@@ -183,6 +183,22 @@
|
|
|
02ddf4 |
man_MANS += ocf_heartbeat_IPv6addr.7
|
|
|
02ddf4 |
endif
|
|
|
02ddf4 |
|
|
|
02ddf4 |
+if BUILD_AZURE_EVENTS
|
|
|
02ddf4 |
+man_MANS += ocf_heartbeat_azure-events.7
|
|
|
02ddf4 |
+endif
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+if BUILD_GCP_PD_MOVE
|
|
|
02ddf4 |
+man_MANS += ocf_heartbeat_gcp-pd-move.7
|
|
|
02ddf4 |
+endif
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+if BUILD_GCP_VPC_MOVE_ROUTE
|
|
|
02ddf4 |
+man_MANS += ocf_heartbeat_gcp-vpc-move-route.7
|
|
|
02ddf4 |
+endif
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+if BUILD_GCP_VPC_MOVE_VIP
|
|
|
02ddf4 |
+man_MANS += ocf_heartbeat_gcp-vpc-move-vip.7
|
|
|
02ddf4 |
+endif
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
xmlfiles = $(man_MANS:.7=.xml)
|
|
|
02ddf4 |
|
|
|
02ddf4 |
%.1 %.5 %.7 %.8: %.xml
|
|
|
02ddf4 |
diff -uNr a/heartbeat/azure-events.in b/heartbeat/azure-events.in
|
|
|
02ddf4 |
--- a/heartbeat/azure-events.in 1970-01-01 01:00:00.000000000 +0100
|
|
|
02ddf4 |
+++ b/heartbeat/azure-events.in 2020-04-16 12:02:15.114693551 +0200
|
|
|
02ddf4 |
@@ -0,0 +1,824 @@
|
|
|
02ddf4 |
+#!@PYTHON@ -tt
|
|
|
02ddf4 |
+#
|
|
|
02ddf4 |
+# Resource agent for monitoring Azure Scheduled Events
|
|
|
02ddf4 |
+#
|
|
|
02ddf4 |
+# License: GNU General Public License (GPL)
|
|
|
02ddf4 |
+# (c) 2018 Tobias Niekamp, Microsoft Corp.
|
|
|
02ddf4 |
+# and Linux-HA contributors
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+import os
|
|
|
02ddf4 |
+import sys
|
|
|
02ddf4 |
+import time
|
|
|
02ddf4 |
+import subprocess
|
|
|
02ddf4 |
+import json
|
|
|
02ddf4 |
+try:
|
|
|
02ddf4 |
+ import urllib2
|
|
|
02ddf4 |
+except ImportError:
|
|
|
02ddf4 |
+ import urllib.request as urllib2
|
|
|
02ddf4 |
+import socket
|
|
|
02ddf4 |
+from collections import defaultdict
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+OCF_FUNCTIONS_DIR = os.environ.get("OCF_FUNCTIONS_DIR", "%s/lib/heartbeat" % os.environ.get("OCF_ROOT"))
|
|
|
02ddf4 |
+sys.path.append(OCF_FUNCTIONS_DIR)
|
|
|
02ddf4 |
+import ocf
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+##############################################################################
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+VERSION = "0.10"
|
|
|
02ddf4 |
+USER_AGENT = "Pacemaker-ResourceAgent/%s %s" % (VERSION, ocf.distro())
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+attr_globalPullState = "azure-events_globalPullState"
|
|
|
02ddf4 |
+attr_lastDocVersion = "azure-events_lastDocVersion"
|
|
|
02ddf4 |
+attr_curNodeState = "azure-events_curNodeState"
|
|
|
02ddf4 |
+attr_pendingEventIDs = "azure-events_pendingEventIDs"
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+default_loglevel = ocf.logging.INFO
|
|
|
02ddf4 |
+default_relevantEventTypes = set(["Reboot", "Redeploy"])
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+global_pullMaxAttempts = 3
|
|
|
02ddf4 |
+global_pullDelaySecs = 1
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+##############################################################################
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+class attrDict(defaultdict):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ A wrapper for accessing dict keys like an attribute
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ def __init__(self, data):
|
|
|
02ddf4 |
+ super(attrDict, self).__init__(attrDict)
|
|
|
02ddf4 |
+ for d in data.keys():
|
|
|
02ddf4 |
+ self.__setattr__(d, data[d])
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ def __getattr__(self, key):
|
|
|
02ddf4 |
+ try:
|
|
|
02ddf4 |
+ return self[key]
|
|
|
02ddf4 |
+ except KeyError:
|
|
|
02ddf4 |
+ raise AttributeError(key)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ def __setattr__(self, key, value):
|
|
|
02ddf4 |
+ self[key] = value
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+##############################################################################
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+class azHelper:
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Helper class for Azure's metadata API (including Scheduled Events)
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ metadata_host = "http://169.254.169.254/metadata"
|
|
|
02ddf4 |
+ instance_api = "instance"
|
|
|
02ddf4 |
+ events_api = "scheduledevents"
|
|
|
02ddf4 |
+ api_version = "2017-08-01"
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ @staticmethod
|
|
|
02ddf4 |
+ def _sendMetadataRequest(endpoint, postData=None):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Send a request to Azure's Azure Metadata Service API
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ url = "%s/%s?api-version=%s" % (azHelper.metadata_host, endpoint, azHelper.api_version)
|
|
|
02ddf4 |
+ ocf.logger.debug("_sendMetadataRequest: begin; endpoint = %s, postData = %s" % (endpoint, postData))
|
|
|
02ddf4 |
+ ocf.logger.debug("_sendMetadataRequest: url = %s" % url)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ req = urllib2.Request(url, postData)
|
|
|
02ddf4 |
+ req.add_header("Metadata", "true")
|
|
|
02ddf4 |
+ req.add_header("User-Agent", USER_AGENT)
|
|
|
02ddf4 |
+ resp = urllib2.urlopen(req)
|
|
|
02ddf4 |
+ data = resp.read()
|
|
|
02ddf4 |
+ ocf.logger.debug("_sendMetadataRequest: response = %s" % data)
|
|
|
02ddf4 |
+ if data:
|
|
|
02ddf4 |
+ data = json.loads(data)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ ocf.logger.debug("_sendMetadataRequest: finished")
|
|
|
02ddf4 |
+ return data
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ @staticmethod
|
|
|
02ddf4 |
+ def getInstanceInfo():
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Fetch details about the current VM from Azure's Azure Metadata Service API
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ ocf.logger.debug("getInstanceInfo: begin")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ jsondata = azHelper._sendMetadataRequest(azHelper.instance_api)
|
|
|
02ddf4 |
+ ocf.logger.debug("getInstanceInfo: json = %s" % jsondata)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ ocf.logger.debug("getInstanceInfo: finished, returning {}".format(jsondata["compute"]))
|
|
|
02ddf4 |
+ return attrDict(jsondata["compute"])
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ @staticmethod
|
|
|
02ddf4 |
+ def pullScheduledEvents():
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Retrieve all currently scheduled events via Azure Metadata Service API
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ ocf.logger.debug("pullScheduledEvents: begin")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ jsondata = azHelper._sendMetadataRequest(azHelper.events_api)
|
|
|
02ddf4 |
+ ocf.logger.debug("pullScheduledEvents: json = %s" % jsondata)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ ocf.logger.debug("pullScheduledEvents: finished")
|
|
|
02ddf4 |
+ return attrDict(jsondata)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ @staticmethod
|
|
|
02ddf4 |
+ def forceEvents(eventIDs):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Force a set of events to start immediately
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ ocf.logger.debug("forceEvents: begin")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ events = []
|
|
|
02ddf4 |
+ for e in eventIDs:
|
|
|
02ddf4 |
+ events.append({
|
|
|
02ddf4 |
+ "EventId": e,
|
|
|
02ddf4 |
+ })
|
|
|
02ddf4 |
+ postData = {
|
|
|
02ddf4 |
+ "StartRequests" : events
|
|
|
02ddf4 |
+ }
|
|
|
02ddf4 |
+ ocf.logger.info("forceEvents: postData = %s" % postData)
|
|
|
02ddf4 |
+ resp = azHelper._sendMetadataRequest(azHelper.events_api, postData=json.dumps(postData))
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ ocf.logger.debug("forceEvents: finished")
|
|
|
02ddf4 |
+ return
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+##############################################################################
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+class clusterHelper:
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Helper functions for Pacemaker control via crm
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ @staticmethod
|
|
|
02ddf4 |
+ def _getLocation(node):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Helper function to retrieve local/global attributes
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ if node:
|
|
|
02ddf4 |
+ return ["--node", node]
|
|
|
02ddf4 |
+ else:
|
|
|
02ddf4 |
+ return ["--type", "crm_config"]
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ @staticmethod
|
|
|
02ddf4 |
+ def _exec(command, *args):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Helper function to execute a UNIX command
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ args = list(args)
|
|
|
02ddf4 |
+ ocf.logger.debug("_exec: begin; command = %s, args = %s" % (command, str(args)))
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ def flatten(*n):
|
|
|
02ddf4 |
+ return (str(e) for a in n
|
|
|
02ddf4 |
+ for e in (flatten(*a) if isinstance(a, (tuple, list)) else (str(a),)))
|
|
|
02ddf4 |
+ command = list(flatten([command] + args))
|
|
|
02ddf4 |
+ ocf.logger.debug("_exec: cmd = %s" % " ".join(command))
|
|
|
02ddf4 |
+ try:
|
|
|
02ddf4 |
+ ret = subprocess.check_output(command)
|
|
|
02ddf4 |
+ ocf.logger.debug("_exec: return = %s" % ret)
|
|
|
02ddf4 |
+ return ret.rstrip()
|
|
|
02ddf4 |
+ except Exception as err:
|
|
|
02ddf4 |
+ ocf.logger.exception(err)
|
|
|
02ddf4 |
+ return None
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ @staticmethod
|
|
|
02ddf4 |
+ def setAttr(key, value, node=None):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Set the value of a specific global/local attribute in the Pacemaker cluster
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ ocf.logger.debug("setAttr: begin; key = %s, value = %s, node = %s" % (key, value, node))
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ if value:
|
|
|
02ddf4 |
+ ret = clusterHelper._exec("crm_attribute",
|
|
|
02ddf4 |
+ "--name", key,
|
|
|
02ddf4 |
+ "--update", value,
|
|
|
02ddf4 |
+ clusterHelper._getLocation(node))
|
|
|
02ddf4 |
+ else:
|
|
|
02ddf4 |
+ ret = clusterHelper._exec("crm_attribute",
|
|
|
02ddf4 |
+ "--name", key,
|
|
|
02ddf4 |
+ "--delete",
|
|
|
02ddf4 |
+ clusterHelper._getLocation(node))
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ ocf.logger.debug("setAttr: finished")
|
|
|
02ddf4 |
+ return len(ret) == 0
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ @staticmethod
|
|
|
02ddf4 |
+ def getAttr(key, node=None):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Retrieve a global/local attribute from the Pacemaker cluster
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ ocf.logger.debug("getAttr: begin; key = %s, node = %s" % (key, node))
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ val = clusterHelper._exec("crm_attribute",
|
|
|
02ddf4 |
+ "--name", key,
|
|
|
02ddf4 |
+ "--query", "--quiet",
|
|
|
02ddf4 |
+ "--default", "",
|
|
|
02ddf4 |
+ clusterHelper._getLocation(node))
|
|
|
02ddf4 |
+ ocf.logger.debug("getAttr: finished")
|
|
|
02ddf4 |
+ if not val:
|
|
|
02ddf4 |
+ return None
|
|
|
02ddf4 |
+ return val if not val.isdigit() else int(val)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ @staticmethod
|
|
|
02ddf4 |
+ def getAllNodes():
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Get a list of hostnames for all nodes in the Pacemaker cluster
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ ocf.logger.debug("getAllNodes: begin")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ nodes = []
|
|
|
02ddf4 |
+ nodeList = clusterHelper._exec("crm_node", "--list")
|
|
|
02ddf4 |
+ for n in nodeList.decode().split("\n"):
|
|
|
02ddf4 |
+ nodes.append(n.split()[1])
|
|
|
02ddf4 |
+ ocf.logger.debug("getAllNodes: finished; return %s" % str(nodes))
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ return nodes
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ @staticmethod
|
|
|
02ddf4 |
+ def getHostNameFromAzName(azName):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Helper function to get the actual host name from an Azure node name
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ return clusterHelper.getAttr("hostName_%s" % azName)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ @staticmethod
|
|
|
02ddf4 |
+ def removeHoldFromNodes():
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Remove the ON_HOLD state from all nodes in the Pacemaker cluster
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ ocf.logger.debug("removeHoldFromNodes: begin")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ for n in clusterHelper.getAllNodes():
|
|
|
02ddf4 |
+ if clusterHelper.getAttr(attr_curNodeState, node=n) == "ON_HOLD":
|
|
|
02ddf4 |
+ clusterHelper.setAttr(attr_curNodeState, "AVAILABLE", node=n)
|
|
|
02ddf4 |
+ ocf.logger.info("removeHoldFromNodes: removed ON_HOLD from node %s" % n)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ ocf.logger.debug("removeHoldFromNodes: finished")
|
|
|
02ddf4 |
+ return False
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ @staticmethod
|
|
|
02ddf4 |
+ def otherNodesAvailable(exceptNode):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Check if there are any nodes (except a given node) in the Pacemaker cluster that have state AVAILABLE
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ ocf.logger.debug("otherNodesAvailable: begin; exceptNode = %s" % exceptNode)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ for n in clusterHelper.getAllNodes():
|
|
|
02ddf4 |
+ state = clusterHelper.getAttr(attr_curNodeState, node=n)
|
|
|
02ddf4 |
+ state = stringToNodeState(state) if state else AVAILABLE
|
|
|
02ddf4 |
+ if state == AVAILABLE and n != exceptNode.hostName:
|
|
|
02ddf4 |
+ ocf.logger.info("otherNodesAvailable: at least %s is available" % n)
|
|
|
02ddf4 |
+ ocf.logger.debug("otherNodesAvailable: finished")
|
|
|
02ddf4 |
+ return True
|
|
|
02ddf4 |
+ ocf.logger.info("otherNodesAvailable: no other nodes are available")
|
|
|
02ddf4 |
+ ocf.logger.debug("otherNodesAvailable: finished")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ return False
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ @staticmethod
|
|
|
02ddf4 |
+ def transitionSummary():
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Get the current Pacemaker transition summary (used to check if all resources are stopped when putting a node standby)
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ # <tniek> Is a global crm_simulate "too much"? Or would it be sufficient it there are no planned transitions for a particular node?
|
|
|
02ddf4 |
+ # # crm_simulate -Ls
|
|
|
02ddf4 |
+ # Transition Summary:
|
|
|
02ddf4 |
+ # * Promote rsc_SAPHana_HN1_HDB03:0 (Slave -> Master hsr3-db1)
|
|
|
02ddf4 |
+ # * Stop rsc_SAPHana_HN1_HDB03:1 (hsr3-db0)
|
|
|
02ddf4 |
+ # * Move rsc_ip_HN1_HDB03 (Started hsr3-db0 -> hsr3-db1)
|
|
|
02ddf4 |
+ # * Start rsc_nc_HN1_HDB03 (hsr3-db1)
|
|
|
02ddf4 |
+ # # Excepted result when there are no pending actions:
|
|
|
02ddf4 |
+ # Transition Summary:
|
|
|
02ddf4 |
+ ocf.logger.debug("transitionSummary: begin")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ summary = clusterHelper._exec("crm_simulate", "-Ls")
|
|
|
02ddf4 |
+ if not summary:
|
|
|
02ddf4 |
+ ocf.logger.warning("transitionSummary: could not load transition summary")
|
|
|
02ddf4 |
+ return False
|
|
|
02ddf4 |
+ if summary.find("Transition Summary:") < 0:
|
|
|
02ddf4 |
+ ocf.logger.warning("transitionSummary: received unexpected transition summary: %s" % summary)
|
|
|
02ddf4 |
+ return False
|
|
|
02ddf4 |
+ summary = summary.split("Transition Summary:")[1]
|
|
|
02ddf4 |
+ ret = summary.decode().split("\n").pop(0)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ ocf.logger.debug("transitionSummary: finished; return = %s" % str(ret))
|
|
|
02ddf4 |
+ return ret
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ @staticmethod
|
|
|
02ddf4 |
+ def listOperationsOnNode(node):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Get a list of all current operations for a given node (used to check if any resources are pending)
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ # hsr3-db1:/home/tniek # crm_resource --list-operations -N hsr3-db0
|
|
|
02ddf4 |
+ # rsc_azure-events (ocf::heartbeat:azure-events): Started: rsc_azure-events_start_0 (node=hsr3-db0, call=91, rc=0, last-rc-change=Fri Jun 8 22:37:46 2018, exec=115ms): complete
|
|
|
02ddf4 |
+ # rsc_azure-events (ocf::heartbeat:azure-events): Started: rsc_azure-events_monitor_10000 (node=hsr3-db0, call=93, rc=0, last-rc-change=Fri Jun 8 22:37:47 2018, exec=197ms): complete
|
|
|
02ddf4 |
+ # rsc_SAPHana_HN1_HDB03 (ocf::suse:SAPHana): Master: rsc_SAPHana_HN1_HDB03_start_0 (node=hsr3-db0, call=-1, rc=193, last-rc-change=Fri Jun 8 22:37:46 2018, exec=0ms): pending
|
|
|
02ddf4 |
+ # rsc_SAPHanaTopology_HN1_HDB03 (ocf::suse:SAPHanaTopology): Started: rsc_SAPHanaTopology_HN1_HDB03_start_0 (node=hsr3-db0, call=90, rc=0, last-rc-change=Fri Jun 8 22:37:46 2018, exec=3214ms): complete
|
|
|
02ddf4 |
+ ocf.logger.debug("listOperationsOnNode: begin; node = %s" % node)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ resources = clusterHelper._exec("crm_resource", "--list-operations", "-N", node)
|
|
|
02ddf4 |
+ if len(resources) == 0:
|
|
|
02ddf4 |
+ ret = []
|
|
|
02ddf4 |
+ else:
|
|
|
02ddf4 |
+ ret = resources.decode().split("\n")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ ocf.logger.debug("listOperationsOnNode: finished; return = %s" % str(ret))
|
|
|
02ddf4 |
+ return ret
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ @staticmethod
|
|
|
02ddf4 |
+ def noPendingResourcesOnNode(node):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Check that there are no pending resources on a given node
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ ocf.logger.debug("noPendingResourcesOnNode: begin; node = %s" % node)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ for r in clusterHelper.listOperationsOnNode(node):
|
|
|
02ddf4 |
+ ocf.logger.debug("noPendingResourcesOnNode: * %s" % r)
|
|
|
02ddf4 |
+ resource = r.split()[-1]
|
|
|
02ddf4 |
+ if resource == "pending":
|
|
|
02ddf4 |
+ ocf.logger.info("noPendingResourcesOnNode: found resource %s that is still pending" % resource)
|
|
|
02ddf4 |
+ ocf.logger.debug("noPendingResourcesOnNode: finished; return = False")
|
|
|
02ddf4 |
+ return False
|
|
|
02ddf4 |
+ ocf.logger.info("noPendingResourcesOnNode: no pending resources on node %s" % node)
|
|
|
02ddf4 |
+ ocf.logger.debug("noPendingResourcesOnNode: finished; return = True")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ return True
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ @staticmethod
|
|
|
02ddf4 |
+ def allResourcesStoppedOnNode(node):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Check that all resources on a given node are stopped
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ ocf.logger.debug("allResourcesStoppedOnNode: begin; node = %s" % node)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ if clusterHelper.noPendingResourcesOnNode(node):
|
|
|
02ddf4 |
+ if len(clusterHelper.transitionSummary()) == 0:
|
|
|
02ddf4 |
+ ocf.logger.info("allResourcesStoppedOnNode: no pending resources on node %s and empty transition summary" % node)
|
|
|
02ddf4 |
+ ocf.logger.debug("allResourcesStoppedOnNode: finished; return = True")
|
|
|
02ddf4 |
+ return True
|
|
|
02ddf4 |
+ ocf.logger.info("allResourcesStoppedOnNode: transition summary is not empty")
|
|
|
02ddf4 |
+ ocf.logger.debug("allResourcesStoppedOnNode: finished; return = False")
|
|
|
02ddf4 |
+ return False
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ ocf.logger.info("allResourcesStoppedOnNode: still pending resources on node %s" % node)
|
|
|
02ddf4 |
+ ocf.logger.debug("allResourcesStoppedOnNode: finished; return = False")
|
|
|
02ddf4 |
+ return False
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+##############################################################################
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+AVAILABLE = 0 # Node is online and ready to handle events
|
|
|
02ddf4 |
+STOPPING = 1 # Standby has been triggered, but some resources are still running
|
|
|
02ddf4 |
+IN_EVENT = 2 # All resources are stopped, and event has been initiated via Azure Metadata Service
|
|
|
02ddf4 |
+ON_HOLD = 3 # Node has a pending event that cannot be started there are no other nodes available
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+def stringToNodeState(name):
|
|
|
02ddf4 |
+ if type(name) == int: return name
|
|
|
02ddf4 |
+ if name == "STOPPING": return STOPPING
|
|
|
02ddf4 |
+ if name == "IN_EVENT": return IN_EVENT
|
|
|
02ddf4 |
+ if name == "ON_HOLD": return ON_HOLD
|
|
|
02ddf4 |
+ return AVAILABLE
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+def nodeStateToString(state):
|
|
|
02ddf4 |
+ if state == STOPPING: return "STOPPING"
|
|
|
02ddf4 |
+ if state == IN_EVENT: return "IN_EVENT"
|
|
|
02ddf4 |
+ if state == ON_HOLD: return "ON_HOLD"
|
|
|
02ddf4 |
+ return "AVAILABLE"
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+##############################################################################
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+class Node:
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Core class implementing logic for a cluster node
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ def __init__(self, ra):
|
|
|
02ddf4 |
+ self.raOwner = ra
|
|
|
02ddf4 |
+ self.azInfo = azHelper.getInstanceInfo()
|
|
|
02ddf4 |
+ self.azName = self.azInfo.name
|
|
|
02ddf4 |
+ self.hostName = socket.gethostname()
|
|
|
02ddf4 |
+ self.setAttr("azName", self.azName)
|
|
|
02ddf4 |
+ clusterHelper.setAttr("hostName_%s" % self.azName, self.hostName)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ def getAttr(self, key):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Get a local attribute
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ return clusterHelper.getAttr(key, node=self.hostName)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ def setAttr(self, key, value):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Set a local attribute
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ return clusterHelper.setAttr(key, value, node=self.hostName)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ def selfOrOtherNode(self, node):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Helper function to distinguish self/other node
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ return node if node else self.hostName
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ def setState(self, state, node=None):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Set the state for a given node (or self)
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ node = self.selfOrOtherNode(node)
|
|
|
02ddf4 |
+ ocf.logger.debug("setState: begin; node = %s, state = %s" % (node, nodeStateToString(state)))
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ clusterHelper.setAttr(attr_curNodeState, nodeStateToString(state), node=node)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ ocf.logger.debug("setState: finished")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ def getState(self, node=None):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Get the state for a given node (or self)
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ node = self.selfOrOtherNode(node)
|
|
|
02ddf4 |
+ ocf.logger.debug("getState: begin; node = %s" % node)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ state = clusterHelper.getAttr(attr_curNodeState, node=node)
|
|
|
02ddf4 |
+ ocf.logger.debug("getState: state = %s" % state)
|
|
|
02ddf4 |
+ ocf.logger.debug("getState: finished")
|
|
|
02ddf4 |
+ if not state:
|
|
|
02ddf4 |
+ return AVAILABLE
|
|
|
02ddf4 |
+ return stringToNodeState(state)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ def setEventIDs(self, eventIDs, node=None):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Set pending EventIDs for a given node (or self)
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ node = self.selfOrOtherNode(node)
|
|
|
02ddf4 |
+ ocf.logger.debug("setEventIDs: begin; node = %s, eventIDs = %s" % (node, str(eventIDs)))
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ if eventIDs:
|
|
|
02ddf4 |
+ eventIDStr = ",".join(eventIDs)
|
|
|
02ddf4 |
+ else:
|
|
|
02ddf4 |
+ eventIDStr = None
|
|
|
02ddf4 |
+ clusterHelper.setAttr(attr_pendingEventIDs, eventIDStr, node=node)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ ocf.logger.debug("setEventIDs: finished")
|
|
|
02ddf4 |
+ return
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ def getEventIDs(self, node=None):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Get pending EventIDs for a given node (or self)
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ node = self.selfOrOtherNode(node)
|
|
|
02ddf4 |
+ ocf.logger.debug("getEventIDs: begin; node = %s" % node)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ eventIDStr = clusterHelper.getAttr(attr_pendingEventIDs, node=node)
|
|
|
02ddf4 |
+ if eventIDStr:
|
|
|
02ddf4 |
+ eventIDs = eventIDStr.decode().split(",")
|
|
|
02ddf4 |
+ else:
|
|
|
02ddf4 |
+ eventIDs = None
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ ocf.logger.debug("getEventIDs: finished; eventIDs = %s" % str(eventIDs))
|
|
|
02ddf4 |
+ return eventIDs
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ def updateNodeStateAndEvents(self, state, eventIDs, node=None):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Set the state and pending EventIDs for a given node (or self)
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ ocf.logger.debug("updateNodeStateAndEvents: begin; node = %s, state = %s, eventIDs = %s" % (node, nodeStateToString(state), str(eventIDs)))
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ self.setState(state, node=node)
|
|
|
02ddf4 |
+ self.setEventIDs(eventIDs, node=node)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ ocf.logger.debug("updateNodeStateAndEvents: finished")
|
|
|
02ddf4 |
+ return state
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ def putNodeStandby(self, node=None):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Put self to standby
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ node = self.selfOrOtherNode(node)
|
|
|
02ddf4 |
+ ocf.logger.debug("putNodeStandby: begin; node = %s" % node)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ clusterHelper._exec("crm_attribute",
|
|
|
02ddf4 |
+ "-t", "nodes",
|
|
|
02ddf4 |
+ "-N", node,
|
|
|
02ddf4 |
+ "-n", "standby",
|
|
|
02ddf4 |
+ "-v", "on",
|
|
|
02ddf4 |
+ "--lifetime=forever")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ ocf.logger.debug("putNodeStandby: finished")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ def putNodeOnline(self, node=None):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Put self back online
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ node = self.selfOrOtherNode(node)
|
|
|
02ddf4 |
+ ocf.logger.debug("putNodeOnline: begin; node = %s" % node)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ clusterHelper._exec("crm_attribute",
|
|
|
02ddf4 |
+ "-t", "nodes",
|
|
|
02ddf4 |
+ "-N", node,
|
|
|
02ddf4 |
+ "-n", "standby",
|
|
|
02ddf4 |
+ "-v", "off",
|
|
|
02ddf4 |
+ "--lifetime=forever")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ ocf.logger.debug("putNodeOnline: finished")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ def separateEvents(self, events):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Split own/other nodes' events
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ ocf.logger.debug("separateEvents: begin; events = %s" % str(events))
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ localEvents = []
|
|
|
02ddf4 |
+ remoteEvents = []
|
|
|
02ddf4 |
+ for e in events:
|
|
|
02ddf4 |
+ e = attrDict(e)
|
|
|
02ddf4 |
+ if e.EventType not in self.raOwner.relevantEventTypes:
|
|
|
02ddf4 |
+ continue
|
|
|
02ddf4 |
+ if self.azName in e.Resources:
|
|
|
02ddf4 |
+ localEvents.append(e)
|
|
|
02ddf4 |
+ else:
|
|
|
02ddf4 |
+ remoteEvents.append(e)
|
|
|
02ddf4 |
+ ocf.logger.debug("separateEvents: finished; localEvents = %s, remoteEvents = %s" % (str(localEvents), str(remoteEvents)))
|
|
|
02ddf4 |
+ return (localEvents, remoteEvents)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ def removeOrphanedEvents(self, azEvents):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Remove remote events that are already finished
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ ocf.logger.debug("removeOrphanedEvents: begin; azEvents = %s" % str(azEvents))
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ azEventIDs = set()
|
|
|
02ddf4 |
+ for e in azEvents:
|
|
|
02ddf4 |
+ azEventIDs.add(e.EventId)
|
|
|
02ddf4 |
+ # for all nodes except self ...
|
|
|
02ddf4 |
+ for n in clusterHelper.getAllNodes():
|
|
|
02ddf4 |
+ if n == self.hostName:
|
|
|
02ddf4 |
+ continue
|
|
|
02ddf4 |
+ curState = self.getState(node=n)
|
|
|
02ddf4 |
+ # ... that still show in an event or shutting down resources ...
|
|
|
02ddf4 |
+ if curState in (STOPPING, IN_EVENT):
|
|
|
02ddf4 |
+ ocf.logger.info("removeOrphanedEvents: node %s has state %s" % (n, curState))
|
|
|
02ddf4 |
+ clusterEventIDs = self.getEventIDs(node=n)
|
|
|
02ddf4 |
+ stillActive = False
|
|
|
02ddf4 |
+ # ... but don't have any more events running according to Azure, ...
|
|
|
02ddf4 |
+ for p in clusterEventIDs:
|
|
|
02ddf4 |
+ if p in azEventIDs:
|
|
|
02ddf4 |
+ ocf.logger.info("removeOrphanedEvents: (at least) event %s on node %s has not yet finished" % (str(p), n))
|
|
|
02ddf4 |
+ stillActive = True
|
|
|
02ddf4 |
+ break
|
|
|
02ddf4 |
+ if not stillActive:
|
|
|
02ddf4 |
+ # ... put them back online.
|
|
|
02ddf4 |
+ ocf.logger.info("removeOrphanedEvents: clusterEvents %s on node %s are not in azEvents %s -> bring node back online" % (str(clusterEventIDs), n, str(azEventIDs)))
|
|
|
02ddf4 |
+ self.putNodeOnline(node=n)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ ocf.logger.debug("removeOrphanedEvents: finished")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ def handleRemoteEvents(self, azEvents):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Handle a list of events (as provided by Azure Metadata Service) for other nodes
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ ocf.logger.debug("handleRemoteEvents: begin; hostName = %s, events = %s" % (self.hostName, str(azEvents)))
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ if len(azEvents) == 0:
|
|
|
02ddf4 |
+ ocf.logger.debug("handleRemoteEvents: no remote events to handle")
|
|
|
02ddf4 |
+ ocf.logger.debug("handleRemoteEvents: finished")
|
|
|
02ddf4 |
+ return
|
|
|
02ddf4 |
+ eventIDsForNode = {}
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ # iterate through all current events as per Azure
|
|
|
02ddf4 |
+ for e in azEvents:
|
|
|
02ddf4 |
+ ocf.logger.info("handleRemoteEvents: handling remote event %s (%s; nodes = %s)" % (e.EventId, e.EventType, str(e.Resources)))
|
|
|
02ddf4 |
+ # before we can force an event to start, we need to ensure all nodes involved have stopped their resources
|
|
|
02ddf4 |
+ if e.EventStatus == "Scheduled":
|
|
|
02ddf4 |
+ allNodesStopped = True
|
|
|
02ddf4 |
+ for azName in e.Resources:
|
|
|
02ddf4 |
+ hostName = clusterHelper.getHostNameFromAzName(azName)
|
|
|
02ddf4 |
+ state = self.getState(node=hostName)
|
|
|
02ddf4 |
+ if state == STOPPING:
|
|
|
02ddf4 |
+ # the only way we can continue is when node state is STOPPING, but all resources have been stopped
|
|
|
02ddf4 |
+ if not clusterHelper.allResourcesStoppedOnNode(hostName):
|
|
|
02ddf4 |
+ ocf.logger.info("handleRemoteEvents: (at least) node %s has still resources running -> wait" % hostName)
|
|
|
02ddf4 |
+ allNodesStopped = False
|
|
|
02ddf4 |
+ break
|
|
|
02ddf4 |
+ elif state in (AVAILABLE, IN_EVENT, ON_HOLD):
|
|
|
02ddf4 |
+ ocf.logger.info("handleRemoteEvents: node %s is still %s -> remote event needs to be picked up locally" % (hostName, nodeStateToString(state)))
|
|
|
02ddf4 |
+ allNodesStopped = False
|
|
|
02ddf4 |
+ break
|
|
|
02ddf4 |
+ if allNodesStopped:
|
|
|
02ddf4 |
+ ocf.logger.info("handleRemoteEvents: nodes %s are stopped -> add remote event %s to force list" % (str(e.Resources), e.EventId))
|
|
|
02ddf4 |
+ for n in e.Resources:
|
|
|
02ddf4 |
+ hostName = clusterHelper.getHostNameFromAzName(n)
|
|
|
02ddf4 |
+ if hostName in eventIDsForNode:
|
|
|
02ddf4 |
+ eventIDsForNode[hostName].append(e.EventId)
|
|
|
02ddf4 |
+ else:
|
|
|
02ddf4 |
+ eventIDsForNode[hostName] = [e.EventId]
|
|
|
02ddf4 |
+ elif e.EventStatus == "Started":
|
|
|
02ddf4 |
+ ocf.logger.info("handleRemoteEvents: remote event already started")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ # force the start of all events whose nodes are ready (i.e. have no more resources running)
|
|
|
02ddf4 |
+ if len(eventIDsForNode.keys()) > 0:
|
|
|
02ddf4 |
+ eventIDsToForce = set([item for sublist in eventIDsForNode.values() for item in sublist])
|
|
|
02ddf4 |
+ ocf.logger.info("handleRemoteEvents: set nodes %s to IN_EVENT; force remote events %s" % (str(eventIDsForNode.keys()), str(eventIDsToForce)))
|
|
|
02ddf4 |
+ for node, eventId in eventIDsForNode.items():
|
|
|
02ddf4 |
+ self.updateNodeStateAndEvents(IN_EVENT, eventId, node=node)
|
|
|
02ddf4 |
+ azHelper.forceEvents(eventIDsToForce)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ ocf.logger.debug("handleRemoteEvents: finished")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ def handleLocalEvents(self, azEvents):
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Handle a list of own events (as provided by Azure Metadata Service)
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ ocf.logger.debug("handleLocalEvents: begin; hostName = %s, azEvents = %s" % (self.hostName, str(azEvents)))
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ azEventIDs = set()
|
|
|
02ddf4 |
+ for e in azEvents:
|
|
|
02ddf4 |
+ azEventIDs.add(e.EventId)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ curState = self.getState()
|
|
|
02ddf4 |
+ clusterEventIDs = self.getEventIDs()
|
|
|
02ddf4 |
+ mayUpdateDocVersion = False
|
|
|
02ddf4 |
+ ocf.logger.info("handleLocalEvents: current state = %s; pending local clusterEvents = %s" % (nodeStateToString(curState), str(clusterEventIDs)))
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ # check if there are currently/still events set for the node
|
|
|
02ddf4 |
+ if clusterEventIDs:
|
|
|
02ddf4 |
+ # there are pending events set, so our state must be STOPPING or IN_EVENT
|
|
|
02ddf4 |
+ i = 0; touchedEventIDs = False
|
|
|
02ddf4 |
+ while i < len(clusterEventIDs):
|
|
|
02ddf4 |
+ # clean up pending events that are already finished according to AZ
|
|
|
02ddf4 |
+ if clusterEventIDs[i] not in azEventIDs:
|
|
|
02ddf4 |
+ ocf.logger.info("handleLocalEvents: remove finished local clusterEvent %s" % (clusterEventIDs[i]))
|
|
|
02ddf4 |
+ clusterEventIDs.pop(i)
|
|
|
02ddf4 |
+ touchedEventIDs = True
|
|
|
02ddf4 |
+ else:
|
|
|
02ddf4 |
+ i += 1
|
|
|
02ddf4 |
+ if len(clusterEventIDs) > 0:
|
|
|
02ddf4 |
+ # there are still pending events (either because we're still stopping, or because the event is still in place)
|
|
|
02ddf4 |
+ # either way, we need to wait
|
|
|
02ddf4 |
+ if touchedEventIDs:
|
|
|
02ddf4 |
+ ocf.logger.info("handleLocalEvents: added new local clusterEvent %s" % str(clusterEventIDs))
|
|
|
02ddf4 |
+ self.setEventIDs(clusterEventIDs)
|
|
|
02ddf4 |
+ else:
|
|
|
02ddf4 |
+ ocf.logger.info("handleLocalEvents: no local clusterEvents were updated")
|
|
|
02ddf4 |
+ else:
|
|
|
02ddf4 |
+ # there are no more pending events left after cleanup
|
|
|
02ddf4 |
+ if clusterHelper.noPendingResourcesOnNode(self.hostName):
|
|
|
02ddf4 |
+ # and no pending resources on the node -> set it back online
|
|
|
02ddf4 |
+ ocf.logger.info("handleLocalEvents: all local events finished -> clean up, put node online and AVAILABLE")
|
|
|
02ddf4 |
+ curState = self.updateNodeStateAndEvents(AVAILABLE, None)
|
|
|
02ddf4 |
+ self.putNodeOnline()
|
|
|
02ddf4 |
+ clusterHelper.removeHoldFromNodes()
|
|
|
02ddf4 |
+ # repeat handleLocalEvents() since we changed status to AVAILABLE
|
|
|
02ddf4 |
+ else:
|
|
|
02ddf4 |
+ ocf.logger.info("handleLocalEvents: all local events finished, but some resources have not completed startup yet -> wait")
|
|
|
02ddf4 |
+ else:
|
|
|
02ddf4 |
+ # there are no pending events set for us (yet)
|
|
|
02ddf4 |
+ if curState == AVAILABLE:
|
|
|
02ddf4 |
+ if len(azEventIDs) > 0:
|
|
|
02ddf4 |
+ if clusterHelper.otherNodesAvailable(self):
|
|
|
02ddf4 |
+ ocf.logger.info("handleLocalEvents: can handle local events %s -> set state STOPPING" % (str(azEventIDs)))
|
|
|
02ddf4 |
+ # this will also set mayUpdateDocVersion = True
|
|
|
02ddf4 |
+ curState = self.updateNodeStateAndEvents(STOPPING, azEventIDs)
|
|
|
02ddf4 |
+ else:
|
|
|
02ddf4 |
+ ocf.logger.info("handleLocalEvents: cannot handle azEvents %s (only node available) -> set state ON_HOLD" % str(azEventIDs))
|
|
|
02ddf4 |
+ self.setState(ON_HOLD)
|
|
|
02ddf4 |
+ else:
|
|
|
02ddf4 |
+ ocf.logger.debug("handleLocalEvents: no local azEvents to handle")
|
|
|
02ddf4 |
+ if curState == STOPPING:
|
|
|
02ddf4 |
+ if clusterHelper.noPendingResourcesOnNode(self.hostName):
|
|
|
02ddf4 |
+ ocf.logger.info("handleLocalEvents: all local resources are started properly -> put node standby")
|
|
|
02ddf4 |
+ self.putNodeStandby()
|
|
|
02ddf4 |
+ mayUpdateDocVersion = True
|
|
|
02ddf4 |
+ else:
|
|
|
02ddf4 |
+ ocf.logger.info("handleLocalEvents: some local resources are not clean yet -> wait")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ ocf.logger.debug("handleLocalEvents: finished; mayUpdateDocVersion = %s" % str(mayUpdateDocVersion))
|
|
|
02ddf4 |
+ return mayUpdateDocVersion
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+##############################################################################
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+class raAzEvents:
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ Main class for resource agent
|
|
|
02ddf4 |
+ """
|
|
|
02ddf4 |
+ def __init__(self, relevantEventTypes):
|
|
|
02ddf4 |
+ self.node = Node(self)
|
|
|
02ddf4 |
+ self.relevantEventTypes = relevantEventTypes
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ def monitor(self):
|
|
|
02ddf4 |
+ ocf.logger.debug("monitor: begin")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ pullFailedAttemps = 0
|
|
|
02ddf4 |
+ while True:
|
|
|
02ddf4 |
+ # check if another node is pulling at the same time;
|
|
|
02ddf4 |
+ # this should only be a concern for the first pull, as setting up Scheduled Events may take up to 2 minutes.
|
|
|
02ddf4 |
+ if clusterHelper.getAttr(attr_globalPullState) == "PULLING":
|
|
|
02ddf4 |
+ pullFailedAttemps += 1
|
|
|
02ddf4 |
+ if pullFailedAttemps == global_pullMaxAttempts:
|
|
|
02ddf4 |
+ ocf.logger.warning("monitor: exceeded maximum number of attempts (%d) to pull events" % global_pullMaxAttempts)
|
|
|
02ddf4 |
+ ocf.logger.debug("monitor: finished")
|
|
|
02ddf4 |
+ return ocf.OCF_SUCCESS
|
|
|
02ddf4 |
+ else:
|
|
|
02ddf4 |
+ ocf.logger.info("monitor: another node is pulling; retry in %d seconds" % global_pullDelaySecs)
|
|
|
02ddf4 |
+ time.sleep(global_pullDelaySecs)
|
|
|
02ddf4 |
+ continue
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ # we can pull safely from Azure Metadata Service
|
|
|
02ddf4 |
+ clusterHelper.setAttr(attr_globalPullState, "PULLING")
|
|
|
02ddf4 |
+ events = azHelper.pullScheduledEvents()
|
|
|
02ddf4 |
+ clusterHelper.setAttr(attr_globalPullState, "IDLE")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ # get current document version
|
|
|
02ddf4 |
+ curDocVersion = events.DocumentIncarnation
|
|
|
02ddf4 |
+ lastDocVersion = self.node.getAttr(attr_lastDocVersion)
|
|
|
02ddf4 |
+ ocf.logger.debug("monitor: lastDocVersion = %s; curDocVersion = %s" % (lastDocVersion, curDocVersion))
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ # split events local/remote
|
|
|
02ddf4 |
+ (localEvents, remoteEvents) = self.node.separateEvents(events.Events)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ # ensure local events are only executing once
|
|
|
02ddf4 |
+ if curDocVersion != lastDocVersion:
|
|
|
02ddf4 |
+ ocf.logger.debug("monitor: curDocVersion has not been handled yet")
|
|
|
02ddf4 |
+ # handleLocalEvents() returns True if mayUpdateDocVersion is True;
|
|
|
02ddf4 |
+ # this is only the case if we can ensure there are no pending events
|
|
|
02ddf4 |
+ if self.node.handleLocalEvents(localEvents):
|
|
|
02ddf4 |
+ ocf.logger.info("monitor: handleLocalEvents completed successfully -> update curDocVersion")
|
|
|
02ddf4 |
+ self.node.setAttr(attr_lastDocVersion, curDocVersion)
|
|
|
02ddf4 |
+ else:
|
|
|
02ddf4 |
+ ocf.logger.debug("monitor: handleLocalEvents still waiting -> keep curDocVersion")
|
|
|
02ddf4 |
+ else:
|
|
|
02ddf4 |
+ ocf.logger.info("monitor: already handled curDocVersion, skip")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ # remove orphaned remote events and then handle the remaining remote events
|
|
|
02ddf4 |
+ self.node.removeOrphanedEvents(remoteEvents)
|
|
|
02ddf4 |
+ self.node.handleRemoteEvents(remoteEvents)
|
|
|
02ddf4 |
+ break
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ ocf.logger.debug("monitor: finished")
|
|
|
02ddf4 |
+ return ocf.OCF_SUCCESS
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+##############################################################################
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+def setLoglevel(verbose):
|
|
|
02ddf4 |
+ # set up writing into syslog
|
|
|
02ddf4 |
+ loglevel = default_loglevel
|
|
|
02ddf4 |
+ if verbose:
|
|
|
02ddf4 |
+ opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=1))
|
|
|
02ddf4 |
+ urllib2.install_opener(opener)
|
|
|
02ddf4 |
+ loglevel = ocf.logging.DEBUG
|
|
|
02ddf4 |
+ ocf.log.setLevel(loglevel)
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+description = (
|
|
|
02ddf4 |
+ "Microsoft Azure Scheduled Events monitoring agent",
|
|
|
02ddf4 |
+ """This resource agent implements a monitor for scheduled
|
|
|
02ddf4 |
+(maintenance) events for a Microsoft Azure VM.
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+If any relevant events are found, it moves all Pacemaker resources
|
|
|
02ddf4 |
+away from the affected node to allow for a graceful shutdown.
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ Usage:
|
|
|
02ddf4 |
+ [OCF_RESKEY_eventTypes=VAL] [OCF_RESKEY_verbose=VAL] azure-events ACTION
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ action (required): Supported values: monitor, help, meta-data
|
|
|
02ddf4 |
+ eventTypes (optional): List of event types to be considered
|
|
|
02ddf4 |
+ relevant by the resource agent (comma-separated).
|
|
|
02ddf4 |
+ Supported values: Freeze,Reboot,Redeploy
|
|
|
02ddf4 |
+ Default = Reboot,Redeploy
|
|
|
02ddf4 |
+/ verbose (optional): If set to true, displays debug info.
|
|
|
02ddf4 |
+ Default = false
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+ Deployment:
|
|
|
02ddf4 |
+ crm configure primitive rsc_azure-events ocf:heartbeat:azure-events \
|
|
|
02ddf4 |
+ op monitor interval=10s
|
|
|
02ddf4 |
+ crm configure clone cln_azure-events rsc_azure-events
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+For further information on Microsoft Azure Scheduled Events, please
|
|
|
02ddf4 |
+refer to the following documentation:
|
|
|
02ddf4 |
+https://docs.microsoft.com/en-us/azure/virtual-machines/linux/scheduled-events
|
|
|
02ddf4 |
+""")
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+def monitor_action(eventTypes):
|
|
|
02ddf4 |
+ relevantEventTypes = set(eventTypes.split(",") if eventTypes else [])
|
|
|
02ddf4 |
+ ra = raAzEvents(relevantEventTypes)
|
|
|
02ddf4 |
+ return ra.monitor()
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+def validate_action(eventTypes):
|
|
|
02ddf4 |
+ if eventTypes:
|
|
|
02ddf4 |
+ for event in eventTypes.split(","):
|
|
|
02ddf4 |
+ if event not in ("Freeze", "Reboot", "Redeploy"):
|
|
|
02ddf4 |
+ ocf.ocf_exit_reason("Event type not one of Freeze, Reboot, Redeploy: " + eventTypes)
|
|
|
02ddf4 |
+ return ocf.OCF_ERR_CONFIGURED
|
|
|
02ddf4 |
+ return ocf.OCF_SUCCESS
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+def main():
|
|
|
02ddf4 |
+ agent = ocf.Agent("azure-events", shortdesc=description[0], longdesc=description[1])
|
|
|
02ddf4 |
+ agent.add_parameter(
|
|
|
02ddf4 |
+ "eventTypes",
|
|
|
02ddf4 |
+ shortdesc="List of resources to be considered",
|
|
|
02ddf4 |
+ longdesc="A comma-separated list of event types that will be handled by this resource agent. (Possible values: Freeze,Reboot,Redeploy)",
|
|
|
02ddf4 |
+ content_type="string",
|
|
|
02ddf4 |
+ default="Reboot,Redeploy")
|
|
|
02ddf4 |
+ agent.add_parameter(
|
|
|
02ddf4 |
+ "verbose",
|
|
|
02ddf4 |
+ shortdesc="Enable verbose agent logging",
|
|
|
02ddf4 |
+ longdesc="Set to true to enable verbose logging",
|
|
|
02ddf4 |
+ content_type="boolean",
|
|
|
02ddf4 |
+ default="false")
|
|
|
02ddf4 |
+ agent.add_action("start", timeout=10, handler=lambda: ocf.OCF_SUCCESS)
|
|
|
02ddf4 |
+ agent.add_action("stop", timeout=10, handler=lambda: ocf.OCF_SUCCESS)
|
|
|
02ddf4 |
+ agent.add_action("validate-all", timeout=20, handler=validate_action)
|
|
|
02ddf4 |
+ agent.add_action("monitor", timeout=240, interval=10, handler=monitor_action)
|
|
|
02ddf4 |
+ setLoglevel(ocf.is_true(ocf.get_parameter("verbose", "false")))
|
|
|
02ddf4 |
+ agent.run()
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+if __name__ == '__main__':
|
|
|
02ddf4 |
+ main()
|
|
|
02ddf4 |
diff -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am
|
|
|
02ddf4 |
--- a/heartbeat/Makefile.am 2020-04-16 11:54:08.467619588 +0200
|
|
|
02ddf4 |
+++ b/heartbeat/Makefile.am 2020-04-16 12:08:07.788224036 +0200
|
|
|
02ddf4 |
@@ -55,7 +55,7 @@
|
|
|
02ddf4 |
osp_SCRIPTS = nova-compute-wait \
|
|
|
02ddf4 |
NovaEvacuate
|
|
|
02ddf4 |
|
|
|
02ddf4 |
-ocf_SCRIPTS = AoEtarget \
|
|
|
02ddf4 |
+ocf_SCRIPTS = AoEtarget \
|
|
|
02ddf4 |
AudibleAlarm \
|
|
|
02ddf4 |
ClusterMon \
|
|
|
02ddf4 |
CTDB \
|
|
|
02ddf4 |
@@ -116,10 +116,7 @@
|
|
|
02ddf4 |
fio \
|
|
|
02ddf4 |
galera \
|
|
|
02ddf4 |
garbd \
|
|
|
02ddf4 |
- gcp-pd-move \
|
|
|
02ddf4 |
gcp-vpc-move-ip \
|
|
|
02ddf4 |
- gcp-vpc-move-vip \
|
|
|
02ddf4 |
- gcp-vpc-move-route \
|
|
|
02ddf4 |
iSCSILogicalUnit \
|
|
|
02ddf4 |
iSCSITarget \
|
|
|
02ddf4 |
ids \
|
|
|
02ddf4 |
@@ -177,6 +174,22 @@
|
|
|
02ddf4 |
vsftpd \
|
|
|
02ddf4 |
zabbixserver
|
|
|
02ddf4 |
|
|
|
02ddf4 |
+if BUILD_AZURE_EVENTS
|
|
|
02ddf4 |
+ocf_SCRIPTS += azure-events
|
|
|
02ddf4 |
+endif
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+if BUILD_GCP_PD_MOVE
|
|
|
02ddf4 |
+ocf_SCRIPTS += gcp-pd-move
|
|
|
02ddf4 |
+endif
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+if BUILD_GCP_VPC_MOVE_ROUTE
|
|
|
02ddf4 |
+ocf_SCRIPTS += gcp-vpc-move-route
|
|
|
02ddf4 |
+endif
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+if BUILD_GCP_VPC_MOVE_VIP
|
|
|
02ddf4 |
+ocf_SCRIPTS += gcp-vpc-move-vip
|
|
|
02ddf4 |
+endif
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
ocfcommondir = $(OCF_LIB_DIR_PREFIX)/heartbeat
|
|
|
02ddf4 |
ocfcommon_DATA = ocf-shellfuncs \
|
|
|
02ddf4 |
ocf-binaries \
|
|
|
02ddf4 |
@@ -205,3 +218,13 @@
|
|
|
02ddf4 |
|
|
|
02ddf4 |
%.check: %
|
|
|
02ddf4 |
OCF_ROOT=$(abs_srcdir) OCF_FUNCTIONS_DIR=$(abs_srcdir) ./$< meta-data | xmllint --path $(abs_srcdir) --noout --relaxng $(abs_srcdir)/metadata.rng -
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+do_spellcheck = printf '[%s]\n' "$(agent)"; \
|
|
|
02ddf4 |
+ OCF_ROOT=$(abs_srcdir) OCF_FUNCTIONS_DIR=$(abs_srcdir) \
|
|
|
02ddf4 |
+ ./$(agent) meta-data 2>/dev/null \
|
|
|
02ddf4 |
+ | xsltproc $(top_srcdir)/make/extract_text.xsl - \
|
|
|
02ddf4 |
+ | aspell pipe list -d en_US --ignore-case \
|
|
|
02ddf4 |
+ --home-dir=$(top_srcdir)/make -p spellcheck-ignore \
|
|
|
02ddf4 |
+ | sed -n 's|^&\([^:]*\):.*|\1|p';
|
|
|
02ddf4 |
+spellcheck:
|
|
|
02ddf4 |
+ @$(foreach agent,$(ocf_SCRIPTS), $(do_spellcheck))
|
|
|
02ddf4 |
diff -uNr a/m4/ac_python_module.m4 b/m4/ac_python_module.m4
|
|
|
02ddf4 |
--- a/m4/ac_python_module.m4 1970-01-01 01:00:00.000000000 +0100
|
|
|
02ddf4 |
+++ b/m4/ac_python_module.m4 2020-04-14 11:11:26.325806378 +0200
|
|
|
02ddf4 |
@@ -0,0 +1,30 @@
|
|
|
02ddf4 |
+dnl @synopsis AC_PYTHON_MODULE(modname[, fatal])
|
|
|
02ddf4 |
+dnl
|
|
|
02ddf4 |
+dnl Checks for Python module.
|
|
|
02ddf4 |
+dnl
|
|
|
02ddf4 |
+dnl If fatal is non-empty then absence of a module will trigger an
|
|
|
02ddf4 |
+dnl error.
|
|
|
02ddf4 |
+dnl
|
|
|
02ddf4 |
+dnl @category InstalledPackages
|
|
|
02ddf4 |
+dnl @author Andrew Collier <colliera@nu.ac.za>.
|
|
|
02ddf4 |
+dnl @version 2004-07-14
|
|
|
02ddf4 |
+dnl @license AllPermissive
|
|
|
02ddf4 |
+
|
|
|
02ddf4 |
+AC_DEFUN([AC_PYTHON_MODULE],[
|
|
|
02ddf4 |
+ AC_MSG_CHECKING(python module: $1)
|
|
|
02ddf4 |
+ $PYTHON -c "import $1" 2>/dev/null
|
|
|
02ddf4 |
+ if test $? -eq 0;
|
|
|
02ddf4 |
+ then
|
|
|
02ddf4 |
+ AC_MSG_RESULT(yes)
|
|
|
02ddf4 |
+ eval AS_TR_CPP(HAVE_PYMOD_$1)=yes
|
|
|
02ddf4 |
+ else
|
|
|
02ddf4 |
+ AC_MSG_RESULT(no)
|
|
|
02ddf4 |
+ eval AS_TR_CPP(HAVE_PYMOD_$1)=no
|
|
|
02ddf4 |
+ #
|
|
|
02ddf4 |
+ if test -n "$2"
|
|
|
02ddf4 |
+ then
|
|
|
02ddf4 |
+ AC_MSG_ERROR(failed to find required module $1)
|
|
|
02ddf4 |
+ exit 1
|
|
|
02ddf4 |
+ fi
|
|
|
02ddf4 |
+ fi
|
|
|
02ddf4 |
+])
|