diff --git a/SOURCES/bz1296201-fence_amt_ws-new-fence-agent.patch b/SOURCES/bz1296201-fence_amt_ws-new-fence-agent.patch
new file mode 100644
index 0000000..b417261
--- /dev/null
+++ b/SOURCES/bz1296201-fence_amt_ws-new-fence-agent.patch
@@ -0,0 +1,440 @@
+diff -uNr a/configure.ac b/configure.ac
+--- a/configure.ac 2017-10-05 14:14:39.688675727 +0200
++++ b/configure.ac 2017-10-05 14:15:17.964291884 +0200
+@@ -265,7 +265,8 @@
+ fence/agents/alom/Makefile
+ fence/agents/apc/Makefile
+ fence/agents/apc_snmp/Makefile
+- fence/agents/amt/Makefile
++ fence/agents/amt/Makefile
++ fence/agents/amt_ws/Makefile
+ fence/agents/bladecenter/Makefile
+ fence/agents/brocade/Makefile
+ fence/agents/cisco_mds/Makefile
+diff -uNr a/fence/agents/amt_ws/fence_amt_ws.py b/fence/agents/amt_ws/fence_amt_ws.py
+--- a/fence/agents/amt_ws/fence_amt_ws.py 1970-01-01 01:00:00.000000000 +0100
++++ b/fence/agents/amt_ws/fence_amt_ws.py 2017-10-05 14:15:17.965291874 +0200
+@@ -0,0 +1,243 @@
++#!/usr/bin/python -tt
++
++#
++# Fence agent for Intel AMT (WS) based on code from the openstack/ironic project:
++# https://github.com/openstack/ironic/blob/master/ironic/drivers/modules/amt/power.py
++#
++# Licensed under the Apache License, Version 2.0 (the "License"); you may
++# not use this file except in compliance with the License. You may obtain
++# a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++# License for the specific language governing permissions and limitations
++# under the License.
++#
++
++import sys
++import atexit
++import logging
++sys.path.append("@FENCEAGENTSLIBDIR@")
++from fencing import *
++from fencing import run_delay, fail_usage, fail, EC_STATUS
++
++import pywsman
++from xml.etree import ElementTree
++
++
++#BEGIN_VERSION_GENERATION
++RELEASE_VERSION="Fence agent for Intel AMT (WS)"
++REDHAT_COPYRIGHT=""
++BUILD_DATE=""
++#END_VERSION_GENERATION
++
++POWER_ON='2'
++POWER_OFF='8'
++POWER_CYCLE='10'
++
++RET_SUCCESS = '0'
++
++CIM_PowerManagementService = ('http://schemas.dmtf.org/wbem/wscim/1/'
++ 'cim-schema/2/CIM_PowerManagementService')
++CIM_ComputerSystem = ('http://schemas.dmtf.org/wbem/wscim/'
++ '1/cim-schema/2/CIM_ComputerSystem')
++CIM_AssociatedPowerManagementService = ('http://schemas.dmtf.org/wbem/wscim/'
++ '1/cim-schema/2/'
++ 'CIM_AssociatedPowerManagementService')
++
++CIM_BootConfigSetting = ('http://schemas.dmtf.org/wbem/wscim/'
++ '1/cim-schema/2/CIM_BootConfigSetting')
++CIM_BootSourceSetting = ('http://schemas.dmtf.org/wbem/wscim/'
++ '1/cim-schema/2/CIM_BootSourceSetting')
++
++
++def xml_find(doc, namespace, item):
++ if doc is None:
++ return
++ tree = ElementTree.fromstring(doc.root().string())
++ query = ('.//{%(namespace)s}%(item)s' % {'namespace': namespace,
++ 'item': item})
++ return tree.find(query)
++
++def _generate_power_action_input(action):
++ method_input = "RequestPowerStateChange_INPUT"
++ address = 'http://schemas.xmlsoap.org/ws/2004/08/addressing'
++ anonymous = ('http://schemas.xmlsoap.org/ws/2004/08/addressing/'
++ 'role/anonymous')
++ wsman = 'http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd'
++ namespace = CIM_PowerManagementService
++
++ doc = pywsman.XmlDoc(method_input)
++ root = doc.root()
++ root.set_ns(namespace)
++ root.add(namespace, 'PowerState', action)
++
++ child = root.add(namespace, 'ManagedElement', None)
++ child.add(address, 'Address', anonymous)
++
++ grand_child = child.add(address, 'ReferenceParameters', None)
++ grand_child.add(wsman, 'ResourceURI', CIM_ComputerSystem)
++
++ g_grand_child = grand_child.add(wsman, 'SelectorSet', None)
++ g_g_grand_child = g_grand_child.add(wsman, 'Selector', 'ManagedSystem')
++ g_g_grand_child.attr_add(wsman, 'Name', 'Name')
++ return doc
++
++def get_power_status(_, options):
++ client = pywsman.Client(options["--ip"], int(options["--ipport"]), \
++ '/wsman', 'http', 'admin', options["--password"])
++ namespace = CIM_AssociatedPowerManagementService
++ client_options = pywsman.ClientOptions()
++ doc = client.get(client_options, namespace)
++ _SOAP_ENVELOPE = 'http://www.w3.org/2003/05/soap-envelope'
++ item = 'Fault'
++ fault = xml_find(doc, _SOAP_ENVELOPE, item)
++ if fault is not None:
++ logging.error("Failed to get power state for: %s port:%s", \
++ options["--ip"], options["--ipport"])
++ fail(EC_STATUS)
++
++ item = "PowerState"
++ try: power_state = xml_find(doc, namespace, item).text
++ except AttributeError:
++ logging.error("Failed to get power state for: %s port:%s", \
++ options["--ip"], options["--ipport"])
++ fail(EC_STATUS)
++ if power_state == POWER_ON:
++ return "on"
++ elif power_state == POWER_OFF:
++ return "off"
++ else:
++ fail(EC_STATUS)
++
++def set_power_status(_, options):
++ client = pywsman.Client(options["--ip"], int(options["--ipport"]), \
++ '/wsman', 'http', 'admin', options["--password"])
++
++ method = 'RequestPowerStateChange'
++ client_options = pywsman.ClientOptions()
++ client_options.add_selector('Name', 'Intel(r) AMT Power Management Service')
++
++ if options["--action"] == "on":
++ target_state = POWER_ON
++ elif options["--action"] == "off":
++ target_state = POWER_OFF
++ elif options["--action"] == "reboot":
++ target_state = POWER_CYCLE
++ if options["--action"] in ["on", "off", "reboot"] \
++ and options.has_key("--boot-option"):
++ set_boot_order(_, client, options)
++
++ doc = _generate_power_action_input(target_state)
++ client_doc = client.invoke(client_options, CIM_PowerManagementService, \
++ method, doc)
++ item = "ReturnValue"
++ return_value = xml_find(client_doc, CIM_PowerManagementService, item).text
++ if return_value != RET_SUCCESS:
++ logging.error("Failed to set power state: %s for: %s", \
++ options["--action"], options["--ip"])
++ fail(EC_STATUS)
++
++def set_boot_order(_, client, options):
++ method_input = "ChangeBootOrder_INPUT"
++ address = 'http://schemas.xmlsoap.org/ws/2004/08/addressing'
++ anonymous = ('http://schemas.xmlsoap.org/ws/2004/08/addressing/'
++ 'role/anonymous')
++ wsman = 'http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd'
++ namespace = CIM_BootConfigSetting
++
++ if options["--boot-option"] == "pxe":
++ device = "Intel(r) AMT: Force PXE Boot"
++ elif options["--boot-option"] in ["hd", "hdsafe"]:
++ device = "Intel(r) AMT: Force Hard-drive Boot"
++ elif options["--boot-option"] == "cd":
++ device = "Intel(r) AMT: Force CD/DVD Boot"
++ elif options["--boot-option"] == "diag":
++ device = "Intel(r) AMT: Force Diagnostic Boot"
++ else:
++ logging.error('Boot device: %s not supported.', \
++ options["--boot-option"])
++ return
++
++ method = 'ChangeBootOrder'
++ client_options = pywsman.ClientOptions()
++ client_options.add_selector('InstanceID', \
++ 'Intel(r) AMT: Boot Configuration 0')
++
++ doc = pywsman.XmlDoc(method_input)
++ root = doc.root()
++ root.set_ns(namespace)
++
++ child = root.add(namespace, 'Source', None)
++ child.add(address, 'Address', anonymous)
++
++ grand_child = child.add(address, 'ReferenceParameters', None)
++ grand_child.add(wsman, 'ResourceURI', CIM_BootSourceSetting)
++
++ g_grand_child = grand_child.add(wsman, 'SelectorSet', None)
++ g_g_grand_child = g_grand_child.add(wsman, 'Selector', device)
++ g_g_grand_child.attr_add(wsman, 'Name', 'InstanceID')
++ if options["--boot-option"] == "hdsafe":
++ g_g_grand_child = g_grand_child.add(wsman, 'Selector', 'True')
++ g_g_grand_child.attr_add(wsman, 'Name', 'UseSafeMode')
++
++ client_doc = client.invoke(client_options, CIM_BootConfigSetting, \
++ method, doc)
++ item = "ReturnValue"
++ return_value = xml_find(client_doc, CIM_BootConfigSetting, item).text
++ if return_value != RET_SUCCESS:
++ logging.error("Failed to set boot device to: %s for: %s", \
++ options["--boot-option"], options["--ip"])
++ fail(EC_STATUS)
++
++def reboot_cycle(_, options):
++ status = set_power_status(_, options)
++ return not bool(status)
++
++def define_new_opts():
++ all_opt["boot_option"] = {
++ "getopt" : "b:",
++ "longopt" : "boot-option",
++ "help" : "-b, --boot-option=[option] "
++ "Change the default boot behavior of the\n"
++ " machine."
++ " (pxe|hd|hdsafe|cd|diag)",
++ "required" : "0",
++ "shortdesc" : "Change the default boot behavior of the machine.",
++ "choices" : ["pxe", "hd", "hdsafe", "cd", "diag"],
++ "order" : 1
++ }
++
++def main():
++ atexit.register(atexit_handler)
++
++ device_opt = ["ipaddr", "no_login", "passwd", "boot_option", "no_port",
++ "method"]
++
++ define_new_opts()
++
++ all_opt["ipport"]["default"] = "16992"
++
++ options = check_input(device_opt, process_input(device_opt))
++
++ docs = {}
++ docs["shortdesc"] = "Fence agent for AMT (WS)"
++ docs["longdesc"] = "fence_amt_ws is an I/O Fencing agent \
++which can be used with Intel AMT (WS). This agent requires \
++the pywsman Python library which is included in OpenWSMAN. \
++(http://openwsman.github.io/)."
++ docs["vendorurl"] = "http://www.intel.com/"
++ show_docs(options, docs)
++
++ run_delay(options)
++
++ result = fence_action(None, options, set_power_status, get_power_status, \
++ None, reboot_cycle)
++
++ sys.exit(result)
++
++if __name__ == "__main__":
++ main()
+diff -uNr a/fence/agents/amt_ws/Makefile.am b/fence/agents/amt_ws/Makefile.am
+--- a/fence/agents/amt_ws/Makefile.am 1970-01-01 01:00:00.000000000 +0100
++++ b/fence/agents/amt_ws/Makefile.am 2017-10-05 14:15:17.965291874 +0200
+@@ -0,0 +1,17 @@
++MAINTAINERCLEANFILES = Makefile.in
++
++TARGET = fence_amt_ws
++
++SRC = $(TARGET).py
++
++EXTRA_DIST = $(SRC)
++
++sbin_SCRIPTS = $(TARGET)
++
++man_MANS = $(TARGET).8
++
++FENCE_TEST_ARGS = -p test -a test
++
++include $(top_srcdir)/make/fencebuild.mk
++include $(top_srcdir)/make/fenceman.mk
++include $(top_srcdir)/make/agentpycheck.mk
+diff -uNr a/tests/data/metadata/fence_amt_ws.xml b/tests/data/metadata/fence_amt_ws.xml
+--- a/tests/data/metadata/fence_amt_ws.xml 1970-01-01 01:00:00.000000000 +0100
++++ b/tests/data/metadata/fence_amt_ws.xml 2017-10-05 14:10:49.145987710 +0200
+@@ -0,0 +1,155 @@
++
++
++fence_amt_ws is an I/O Fencing agent which can be used with Intel AMT (WS). This agent requires the pywsman Python library which is included in OpenWSMAN. (http://openwsman.github.io/).
++http://www.intel.com/
++
++
++
++
++ TCP/UDP port to use for connection with device
++
++
++
++
++ IP address or hostname of fencing device (together with --port-as-ip)
++
++
++
++
++ Forces agent to use IPv6 addresses only
++
++
++
++
++ IP Address or Hostname
++
++
++
++
++ Forces agent to use IPv4 addresses only
++
++
++
++
++
++
++
++ Method to fence (onoff|cycle)
++
++
++
++
++ Script to retrieve password
++
++
++
++
++ Login password or passphrase
++
++
++
++
++
++
++
++
++
++
++ Change the default boot behavior of the machine.
++
++
++
++
++ Fencing Action
++
++
++
++
++ IP address or hostname of fencing device (together with --port-as-ip)
++
++
++
++
++ IP Address or Hostname
++
++
++
++
++ Login password or passphrase
++
++
++
++
++ Script to retrieve password
++
++
++
++
++ Verbose mode
++
++
++
++
++ Write debug information to given file
++
++
++
++
++ Write debug information to given file
++
++
++
++
++ Display version information and exit
++
++
++
++
++ Display help and exit
++
++
++
++
++ Wait X seconds after issuing ON/OFF
++
++
++
++
++ Wait X seconds for cmd prompt after login
++
++
++
++
++ Test X seconds for status change after ON/OFF
++
++
++
++
++ Wait X seconds before fencing is started
++
++
++
++
++ Wait X seconds for cmd prompt after issuing command
++
++
++
++
++ Make "port/plug" to be an alias to IP address
++
++
++
++
++ Count of attempts to retry power on
++
++
++
++
++
++
++
++
++
++
++
++
diff --git a/SOURCES/bz1396050-fence_vmware_rest-new-fence-agent.patch b/SOURCES/bz1396050-fence_vmware_rest-new-fence-agent.patch
new file mode 100644
index 0000000..0612a3b
--- /dev/null
+++ b/SOURCES/bz1396050-fence_vmware_rest-new-fence-agent.patch
@@ -0,0 +1,404 @@
+diff -uNr a/configure.ac b/configure.ac
+--- a/configure.ac 2017-10-24 11:38:01.084912422 +0200
++++ b/configure.ac 2017-10-24 11:34:55.939413105 +0200
+@@ -309,6 +309,7 @@
+ fence/agents/virsh/Makefile
+ fence/agents/vmware/Makefile
+ fence/agents/vmware_soap/Makefile
++ fence/agents/vmware_rest/Makefile
+ fence/agents/wti/Makefile
+ fence/agents/xenapi/Makefile
+ fence/agents/hds_cb/Makefile
+diff -uNr a/fence/agents/vmware_rest/fence_vmware_rest.py b/fence/agents/vmware_rest/fence_vmware_rest.py
+--- a/fence/agents/vmware_rest/fence_vmware_rest.py 1970-01-01 01:00:00.000000000 +0100
++++ b/fence/agents/vmware_rest/fence_vmware_rest.py 2017-10-24 12:17:10.919982326 +0200
+@@ -0,0 +1,189 @@
++#!/usr/bin/python -tt
++
++import sys
++import pycurl, io, json
++import logging
++import atexit
++sys.path.append("@FENCEAGENTSLIBDIR@")
++from fencing import *
++from fencing import fail, run_delay, EC_LOGIN_DENIED, EC_STATUS
++
++#BEGIN_VERSION_GENERATION
++RELEASE_VERSION=""
++REDHAT_COPYRIGHT=""
++BUILD_DATE=""
++#END_VERSION_GENERATION
++
++state = {"POWERED_ON": "on", 'POWERED_OFF': "off"}
++
++def get_power_status(conn, options):
++ res = send_command(conn, "vcenter/vm?filter.names={}".format(options["--plug"]))["value"]
++
++ if len(res) == 0:
++ fail(EC_STATUS)
++
++ options["id"] = res[0]["vm"]
++
++ result = res[0]["power_state"]
++
++ return state[result]
++
++def set_power_status(conn, options):
++ action = {
++ "on" : "start",
++ "off" : "stop"
++ }[options["--action"]]
++
++ send_command(conn, "vcenter/vm/{}/power/{}".format(options["id"], action), "POST")
++
++def get_list(conn, options):
++ outlets = {}
++
++ res = send_command(conn, "vcenter/vm")
++
++ for r in res["value"]:
++ outlets[r["name"]] = ("", state[r["power_state"]])
++
++ return outlets
++
++def connect(opt):
++ conn = pycurl.Curl()
++
++ ## setup correct URL
++ if "--ssl" in opt or "--ssl-secure" in opt or "--ssl-insecure" in opt:
++ conn.base_url = "https:"
++ else:
++ conn.base_url = "http:"
++ if "--api-path" in opt:
++ api_path = opt["--api-path"]
++ else:
++ api_path = "/rest"
++
++ conn.base_url += "//" + opt["--ip"] + ":" + str(opt["--ipport"]) + api_path + "/"
++
++ ## send command through pycurl
++ conn.setopt(pycurl.HTTPHEADER, [
++ "Accept: application/json",
++ ])
++
++ conn.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
++ conn.setopt(pycurl.USERPWD, opt["--username"] + ":" + opt["--password"])
++
++ conn.setopt(pycurl.TIMEOUT, int(opt["--shell-timeout"]))
++ if "--ssl" in opt or "--ssl-secure" in opt:
++ conn.setopt(pycurl.SSL_VERIFYPEER, 1)
++ conn.setopt(pycurl.SSL_VERIFYHOST, 2)
++
++ if "--ssl-insecure" in opt:
++ conn.setopt(pycurl.SSL_VERIFYPEER, 0)
++ conn.setopt(pycurl.SSL_VERIFYHOST, 0)
++
++ try:
++ result = send_command(conn, "com/vmware/cis/session", "POST")
++ except Exception as e:
++ logging.debug("Failed: {}".format(e))
++ fail(EC_LOGIN_DENIED)
++
++ # set session id for later requests
++ conn.setopt(pycurl.HTTPHEADER, [
++ "Accept: application/json",
++ "vmware-api-session-id: {}".format(result["value"]),
++ ])
++
++ return conn
++
++def disconnect(conn):
++ send_command(conn, "com/vmware/cis/session", "DELETE")
++ conn.close()
++
++def send_command(conn, command, method="GET"):
++ url = conn.base_url + command
++
++ conn.setopt(pycurl.URL, url.encode("ascii"))
++
++ web_buffer = io.BytesIO()
++
++ if method == "GET":
++ conn.setopt(pycurl.POST, 0)
++ if method == "POST":
++ conn.setopt(pycurl.POSTFIELDS, "")
++ if method == "DELETE":
++ conn.setopt(pycurl.CUSTOMREQUEST, "DELETE")
++
++ conn.setopt(pycurl.WRITEFUNCTION, web_buffer.write)
++
++ try:
++ conn.perform()
++ except Exception as e:
++ raise Exception(e[1])
++
++ rc = conn.getinfo(pycurl.HTTP_CODE)
++ result = web_buffer.getvalue().decode()
++
++ web_buffer.close()
++
++ if len(result) > 0:
++ result = json.loads(result)
++
++ if rc != 200:
++ raise Exception("{}: {}".format(rc, result["value"]["messages"][0]["default_message"]))
++
++ logging.debug("url: {}".format(url))
++ logging.debug("method: {}".format(method))
++ logging.debug("response code: {}".format(rc))
++ logging.debug("result: {}\n".format(result))
++
++ return result
++
++def define_new_opts():
++ all_opt["api_path"] = {
++ "getopt" : ":",
++ "longopt" : "api-path",
++ "help" : "--api-path=[path] The path part of the API URL",
++ "default" : "/rest",
++ "required" : "0",
++ "shortdesc" : "The path part of the API URL",
++ "order" : 2}
++
++
++def main():
++ device_opt = [
++ "ipaddr",
++ "api_path",
++ "login",
++ "passwd",
++ "ssl",
++ "notls",
++ "web",
++ "port",
++ ]
++
++ atexit.register(atexit_handler)
++ define_new_opts()
++
++ all_opt["shell_timeout"]["default"] = "5"
++ all_opt["power_wait"]["default"] = "1"
++
++ options = check_input(device_opt, process_input(device_opt))
++
++ docs = {}
++ docs["shortdesc"] = "Fence agent for VMware REST API"
++ docs["longdesc"] = "fence_vmware_rest is an I/O Fencing agent which can be \
++used with VMware API to fence virtual machines."
++ docs["vendorurl"] = "https://www.vmware.com"
++ show_docs(options, docs)
++
++ ####
++ ## Fence operations
++ ####
++ run_delay(options)
++
++ conn = connect(options)
++ atexit.register(disconnect, conn)
++
++ result = fence_action(conn, options, set_power_status, get_power_status, get_list)
++
++ sys.exit(result)
++
++if __name__ == "__main__":
++ main()
+diff -uNr a/fence/agents/vmware_rest/Makefile.am b/fence/agents/vmware_rest/Makefile.am
+--- a/fence/agents/vmware_rest/Makefile.am 1970-01-01 01:00:00.000000000 +0100
++++ b/fence/agents/vmware_rest/Makefile.am 2017-10-24 11:32:17.369693405 +0200
+@@ -0,0 +1,20 @@
++MAINTAINERCLEANFILES = Makefile.in
++
++TARGET = fence_vmware_rest
++
++SRC = $(TARGET).py
++
++EXTRA_DIST = $(SRC)
++
++sbin_SCRIPTS = $(TARGET)
++
++man_MANS = $(TARGET).8
++
++FENCE_TEST_ARGS = -l test -p test -a test -n 1
++
++include $(top_srcdir)/make/fencebuild.mk
++include $(top_srcdir)/make/fenceman.mk
++include $(top_srcdir)/make/agentpycheck.mk
++
++clean-local: clean-man
++ rm -f $(TARGET)
+diff -uNr a/tests/data/metadata/fence_vmware_rest.xml b/tests/data/metadata/fence_vmware_rest.xml
+--- a/tests/data/metadata/fence_vmware_rest.xml 1970-01-01 01:00:00.000000000 +0100
++++ b/tests/data/metadata/fence_vmware_rest.xml 2017-10-24 11:35:43.501027721 +0200
+@@ -0,0 +1,172 @@
++
++
++fence_vmware_rest is an I/O Fencing agent which can be used with VMware API to fence virtual machines.
++https://www.vmware.com
++
++
++
++
++ TCP/UDP port to use for connection with device
++
++
++
++
++ Disable TLS negotiation, force SSL 3.0
++
++
++
++
++ SSL connection with verifying fence device's certificate
++
++
++
++
++ Physical plug number, name of virtual machine or UUID
++
++
++
++
++ Forces agent to use IPv6 addresses only
++
++
++
++
++ IP Address or Hostname
++
++
++
++
++ Forces agent to use IPv4 addresses only
++
++
++
++
++ Script to retrieve password
++
++
++
++
++ Login password or passphrase
++
++
++
++
++ SSL connection
++
++
++
++
++ SSL connection without verifying fence device's certificate
++
++
++
++
++ Fencing Action
++
++
++
++
++ Login Name
++
++
++
++
++ Physical plug number, name of virtual machine or UUID
++
++
++
++
++ Login Name
++
++
++
++
++ IP Address or Hostname
++
++
++
++
++ Login password or passphrase
++
++
++
++
++ Script to retrieve password
++
++
++
++ The path part of the API URL
++
++
++
++
++ Verbose mode
++
++
++
++
++ Write debug information to given file
++
++
++
++
++ Write debug information to given file
++
++
++
++
++ Display version information and exit
++
++
++
++
++ Display help and exit
++
++
++
++
++ Separator for CSV created by operation list
++
++
++
++
++ Wait X seconds after issuing ON/OFF
++
++
++
++
++ Wait X seconds for cmd prompt after login
++
++
++
++
++ Test X seconds for status change after ON/OFF
++
++
++
++
++ Wait X seconds before fencing is started
++
++
++
++
++ Wait X seconds for cmd prompt after issuing command
++
++
++
++
++ Count of attempts to retry power on
++
++
++
++
++
++
++
++
++
++
++
++
++
++
diff --git a/SOURCES/bz1449183-fence_ipmilan-hexadecimal-key-auth.patch b/SOURCES/bz1449183-fence_ipmilan-hexadecimal-key-auth.patch
new file mode 100644
index 0000000..e9dc2a7
--- /dev/null
+++ b/SOURCES/bz1449183-fence_ipmilan-hexadecimal-key-auth.patch
@@ -0,0 +1,112 @@
+diff -uNr a/fence/agents/ipmilan/fence_ipmilan.py b/fence/agents/ipmilan/fence_ipmilan.py
+--- a/fence/agents/ipmilan/fence_ipmilan.py 2017-09-29 13:16:33.097610593 +0200
++++ b/fence/agents/ipmilan/fence_ipmilan.py 2017-09-29 13:30:15.340293733 +0200
+@@ -72,6 +72,9 @@
+ if options.has_key("--privlvl"):
+ cmd += " -L " + options["--privlvl"]
+
++ if "--hexadecimal-kg" in options:
++ cmd += " -y " + options["--hexadecimal-kg"]
++
+ # --action / -o
+ cmd += " chassis power " + action
+
+@@ -136,6 +139,14 @@
+ "shortdesc" : "Bridge IPMI requests to the remote target address",
+ "order": 1
+ }
++ all_opt["hexadecimal_kg"] = {
++ "getopt" : ":",
++ "longopt" : "hexadecimal-kg",
++ "help" : "--hexadecimal-kg=[key] Hexadecimal-encoded Kg key for IPMIv2 authentication",
++ "required" : "0",
++ "shortdesc" : "Hexadecimal-encoded Kg key for IPMIv2 authentication",
++ "order": 1
++ }
+ all_opt["obsolete_ip"] = {
+ "getopt" : "i:",
+ "longopt" : "obsolete-ip",
+@@ -156,7 +167,7 @@
+
+ device_opt = ["ipaddr", "login", "no_login", "no_password", "passwd",
+ "diag", "lanplus", "auth", "cipher", "privlvl", "sudo",
+- "ipmitool_path", "method", "target", "obsolete_ip", "timeout"]
++ "ipmitool_path", "method", "target", "hexadecimal_kg", "obsolete_ip", "timeout"]
+ define_new_opts()
+
+ all_opt["power_wait"]["default"] = 2
+diff -uNr a/tests/data/metadata/fence_idrac.xml b/tests/data/metadata/fence_idrac.xml
+--- a/tests/data/metadata/fence_idrac.xml 2017-09-29 13:16:33.133610272 +0200
++++ b/tests/data/metadata/fence_idrac.xml 2017-09-29 13:59:22.124806409 +0200
+@@ -12,6 +12,11 @@
+
+ TCP/UDP port to use for connection with device
+
++
++
++
++ Hexadecimal-encoded Kg key for IPMIv2 authentication
++
+
+
+
+diff -uNr a/tests/data/metadata/fence_ilo3.xml b/tests/data/metadata/fence_ilo3.xml
+--- a/tests/data/metadata/fence_ilo3.xml 2017-09-29 13:16:33.133610272 +0200
++++ b/tests/data/metadata/fence_ilo3.xml 2017-09-29 13:59:22.123806418 +0200
+@@ -12,6 +12,11 @@
+
+ TCP/UDP port to use for connection with device
+
++
++
++
++ Hexadecimal-encoded Kg key for IPMIv2 authentication
++
+
+
+
+diff -uNr a/tests/data/metadata/fence_ilo4.xml b/tests/data/metadata/fence_ilo4.xml
+--- a/tests/data/metadata/fence_ilo4.xml 2017-09-29 13:16:33.134610263 +0200
++++ b/tests/data/metadata/fence_ilo4.xml 2017-09-29 13:59:22.123806418 +0200
+@@ -12,6 +12,11 @@
+
+ TCP/UDP port to use for connection with device
+
++
++
++
++ Hexadecimal-encoded Kg key for IPMIv2 authentication
++
+
+
+
+diff -uNr a/tests/data/metadata/fence_imm.xml b/tests/data/metadata/fence_imm.xml
+--- a/tests/data/metadata/fence_imm.xml 2017-09-29 13:16:33.134610263 +0200
++++ b/tests/data/metadata/fence_imm.xml 2017-09-29 13:59:22.123806418 +0200
+@@ -12,6 +12,11 @@
+
+ TCP/UDP port to use for connection with device
+
++
++
++
++ Hexadecimal-encoded Kg key for IPMIv2 authentication
++
+
+
+
+diff -uNr a/tests/data/metadata/fence_ipmilan.xml b/tests/data/metadata/fence_ipmilan.xml
+--- a/tests/data/metadata/fence_ipmilan.xml 2017-09-29 13:16:33.134610263 +0200
++++ b/tests/data/metadata/fence_ipmilan.xml 2017-09-29 13:35:55.835265303 +0200
+@@ -12,6 +12,11 @@
+
+ TCP/UDP port to use for connection with device
+
++
++
++
++ Hexadecimal-encoded Kg key for IPMIv2 authentication
++
+
+
+
diff --git a/SOURCES/bz1451776-1-fence_aws-new-fence-agent.patch b/SOURCES/bz1451776-1-fence_aws-new-fence-agent.patch
new file mode 100644
index 0000000..4982b78
--- /dev/null
+++ b/SOURCES/bz1451776-1-fence_aws-new-fence-agent.patch
@@ -0,0 +1,290 @@
+diff -uNr a/configure.ac b/configure.ac
+--- a/configure.ac 2017-10-05 10:21:12.966801280 +0200
++++ b/configure.ac 2017-10-05 10:22:01.993319558 +0200
+@@ -267,6 +267,7 @@
+ fence/agents/apc_snmp/Makefile
+ fence/agents/amt/Makefile
+ fence/agents/amt_ws/Makefile
++ fence/agents/aws/Makefile
+ fence/agents/bladecenter/Makefile
+ fence/agents/brocade/Makefile
+ fence/agents/cisco_mds/Makefile
+diff -uNr a/fence/agents/aws/fence_aws.py b/fence/agents/aws/fence_aws.py
+--- a/fence/agents/aws/fence_aws.py 1970-01-01 01:00:00.000000000 +0100
++++ b/fence/agents/aws/fence_aws.py 2017-08-29 12:01:39.187348926 +0200
+@@ -0,0 +1,139 @@
++#!/usr/bin/python -tt
++
++import sys, re
++import logging
++import atexit
++sys.path.append("/usr/share/fence")
++from fencing import *
++from fencing import fail, fail_usage, EC_TIMED_OUT, run_delay
++
++try:
++ import boto3
++ from botocore.exceptions import ClientError, EndpointConnectionError, NoRegionError
++except:
++ pass
++
++#BEGIN_VERSION_GENERATION
++RELEASE_VERSION="Fence agent for AWS (Amazon Web Services)"
++REDHAT_COPYRIGHT=""
++BUILD_DATE=""
++#END_VERSION_GENERATION
++
++def get_nodes_list(conn, options):
++ result = {}
++ try:
++ for instance in conn.instances.all():
++ result[instance.id] = ("", None)
++ except ClientError:
++ fail_usage("Failed: Incorrect Access Key or Secret Key.")
++ except EndpointConnectionError:
++ fail_usage("Failed: Incorrect Region.")
++
++ return result
++
++def get_power_status(conn, options):
++ try:
++ instance = conn.instances.filter(Filters=[{"Name": "instance-id", "Values": [options["--plug"]]}])
++ state = list(instance)[0].state["Name"]
++ if state == "running":
++ return "on"
++ elif state == "stopped":
++ return "off"
++ else:
++ return "unknown"
++
++ except ClientError:
++ fail_usage("Failed: Incorrect Access Key or Secret Key.")
++ except EndpointConnectionError:
++ fail_usage("Failed: Incorrect Region.")
++ except IndexError:
++ return "fail"
++
++def set_power_status(conn, options):
++ if (options["--action"]=="off"):
++ conn.instances.filter(InstanceIds=[options["--plug"]]).stop(Force=True)
++ elif (options["--action"]=="on"):
++ conn.instances.filter(InstanceIds=[options["--plug"]]).start()
++
++
++def define_new_opts():
++ all_opt["region"] = {
++ "getopt" : "r:",
++ "longopt" : "region",
++ "help" : "-r, --region=[name] Region, e.g. us-east-1",
++ "shortdesc" : "Region.",
++ "required" : "0",
++ "order" : 2
++ }
++ all_opt["access_key"] = {
++ "getopt" : "a:",
++ "longopt" : "access-key",
++ "help" : "-a, --access-key=[name] Access Key",
++ "shortdesc" : "Access Key.",
++ "required" : "0",
++ "order" : 3
++ }
++ all_opt["secret_key"] = {
++ "getopt" : "s:",
++ "longopt" : "secret-key",
++ "help" : "-s, --secret-key=[name] Secret Key",
++ "shortdesc" : "Secret Key.",
++ "required" : "0",
++ "order" : 4
++ }
++
++# Main agent method
++def main():
++ conn = None
++
++ device_opt = ["port", "no_password", "region", "access_key", "secret_key"]
++
++ atexit.register(atexit_handler)
++
++ define_new_opts()
++
++ all_opt["power_timeout"]["default"] = "60"
++
++ options = check_input(device_opt, process_input(device_opt))
++
++ docs = {}
++ docs["shortdesc"] = "Fence agent for AWS (Amazon Web Services)"
++ docs["longdesc"] = "fence_aws is an I/O Fencing agent for AWS (Amazon Web\
++Services). It uses the boto3 library to connect to AWS.\
++\n.P\n\
++boto3 can be configured with AWS CLI or by creating ~/.aws/credentials.\n\
++For instructions see: https://boto3.readthedocs.io/en/latest/guide/quickstart.html#configuration"
++ docs["vendorurl"] = "http://www.amazon.com"
++ show_docs(options, docs)
++
++ run_delay(options)
++
++ if "--region" in options and "--access-key" in options and "--secret-key" in options:
++ region = options["--region"]
++ access_key = options["--access-key"]
++ secret_key = options["--secret-key"]
++ try:
++ conn = boto3.resource('ec2', region_name=region,
++ aws_access_key_id=access_key,
++ aws_secret_access_key=secret_key)
++ except NameError:
++ fail_usage("Failed: boto3 Python library not available")
++ except:
++ fail_usage("Failed: Unable to connect to AWS. Check your configuration.")
++ else:
++ # If setup with "aws configure" or manually in
++ # ~/.aws/credentials
++ try:
++ conn = boto3.resource('ec2')
++ except NameError:
++ fail_usage("Failed: boto3 Python library not available")
++ except:
++ # If any of region/access/secret are missing
++ fail_usage("Failed: Unable to connect to AWS. Check your configuration.")
++
++ # Operate the fencing device
++ result = fence_action(conn, options, set_power_status, get_power_status, get_nodes_list)
++ sys.exit(result)
++
++if __name__ == "__main__":
++ main()
+diff -uNr a/fence/agents/aws/Makefile.am b/fence/agents/aws/Makefile.am
+--- a/fence/agents/aws/Makefile.am 1970-01-01 01:00:00.000000000 +0100
++++ b/fence/agents/aws/Makefile.am 2017-08-29 10:57:41.315547575 +0200
+@@ -0,0 +1,17 @@
++MAINTAINERCLEANFILES = Makefile.in
++
++TARGET = fence_aws
++
++SRC = $(TARGET).py
++
++EXTRA_DIST = $(SRC)
++
++sbin_SCRIPTS = $(TARGET)
++
++man_MANS = $(TARGET).8
++
++FENCE_TEST_ARGS = -r test -a test -s test -n 1
++
++include $(top_srcdir)/make/fencebuild.mk
++include $(top_srcdir)/make/fenceman.mk
++include $(top_srcdir)/make/agentpycheck.mk
+diff -uNr a/tests/data/metadata/fence_aws.xml b/tests/data/metadata/fence_aws.xml
+--- a/tests/data/metadata/fence_aws.xml 1970-01-01 01:00:00.000000000 +0100
++++ b/tests/data/metadata/fence_aws.xml 2017-08-29 10:52:48.250543883 +0200
+@@ -0,0 +1,111 @@
++
++
++fence_aws is an I/O Fencing agent for AWS (Amazon WebServices). It uses the boto3 library to connect to AWS.
++.P
++boto3 can be configured with AWS CLI or by creating ~/.aws/credentials.
++For instructions see: https://boto3.readthedocs.io/en/latest/guide/quickstart.html#configuration
++http://www.amazon.com
++
++
++
++
++ Fencing Action
++
++
++
++
++ Physical plug number, name of virtual machine or UUID
++
++
++
++
++ Physical plug number, name of virtual machine or UUID
++
++
++
++
++ Region.
++
++
++
++
++ Access Key.
++
++
++
++
++ Secret Key.
++
++
++
++
++ Verbose mode
++
++
++
++
++ Write debug information to given file
++
++
++
++
++ Write debug information to given file
++
++
++
++
++ Display version information and exit
++
++
++
++
++ Display help and exit
++
++
++
++
++ Separator for CSV created by operation list
++
++
++
++
++ Test X seconds for status change after ON/OFF
++
++
++
++
++ Wait X seconds after issuing ON/OFF
++
++
++
++
++ Wait X seconds for cmd prompt after issuing command
++
++
++
++
++ Wait X seconds before fencing is started
++
++
++
++
++ Wait X seconds for cmd prompt after login
++
++
++
++
++ Count of attempts to retry power on
++
++
++
++
++
++
++
++
++
++
++
++
++
++
diff --git a/SOURCES/bz1451776-2-fence_aws-bundled-python-botocore.patch b/SOURCES/bz1451776-2-fence_aws-bundled-python-botocore.patch
new file mode 100644
index 0000000..06d5a3d
--- /dev/null
+++ b/SOURCES/bz1451776-2-fence_aws-bundled-python-botocore.patch
@@ -0,0 +1,11 @@
+diff -uNr a/fence/agents/aws/fence_aws.py b/fence/agents/aws/fence_aws.py
+--- a/fence/agents/aws/fence_aws.py 2018-01-26 13:17:44.049566236 +0100
++++ b/fence/agents/aws/fence_aws.py 2018-01-26 13:21:05.808661868 +0100
+@@ -9,6 +9,7 @@
+
+ try:
+ import boto3
++ sys.path.insert(0, '/usr/lib/fence-agents/bundled')
+ from botocore.exceptions import ClientError, EndpointConnectionError, NoRegionError
+ except:
+ pass
diff --git a/SOURCES/bz1455383-fence_scsi-FIPS-support.patch b/SOURCES/bz1455383-fence_scsi-FIPS-support.patch
new file mode 100644
index 0000000..dbfc8ff
--- /dev/null
+++ b/SOURCES/bz1455383-fence_scsi-FIPS-support.patch
@@ -0,0 +1,31 @@
+From 81b8370844f5aecaee5e7178d82670c70399d824 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen
+Date: Mon, 24 Jul 2017 16:12:15 +0200
+Subject: [PATCH] fence_scsi: add FIPS support
+
+---
+ fence/agents/scsi/fence_scsi.py | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/fence/agents/scsi/fence_scsi.py b/fence/agents/scsi/fence_scsi.py
+index 37ff1d38..3ebe6513 100644
+--- a/fence/agents/scsi/fence_scsi.py
++++ b/fence/agents/scsi/fence_scsi.py
+@@ -182,7 +182,16 @@ def get_cluster_id(options):
+ cmd = options["--corosync-cmap-path"] + " totem.cluster_name"
+
+ match = re.search(r"\(str\) = (\S+)\n", run_cmd(options, cmd)["out"])
+- return hashlib.md5(match.group(1)).hexdigest() if match else fail_usage("Failed: cannot get cluster name")
++
++ if not match:
++ fail_usage("Failed: cannot get cluster name")
++
++ try:
++ return hashlib.md5(match.group(1)).hexdigest()
++ except ValueError:
++ # FIPS requires usedforsecurity=False and might not be
++ # available on all distros: https://bugs.python.org/issue9216
++ return hashlib.md5(match.group(1), usedforsecurity=False).hexdigest()
+
+
+ def get_node_id(options):
diff --git a/SOURCES/bz1461854-remove-list-when-not-supported.patch b/SOURCES/bz1461854-remove-list-when-not-supported.patch
new file mode 100644
index 0000000..334c75b
--- /dev/null
+++ b/SOURCES/bz1461854-remove-list-when-not-supported.patch
@@ -0,0 +1,441 @@
+diff -uNr a/fence/agents/lib/fencing.py.py b/fence/agents/lib/fencing.py.py
+--- a/fence/agents/lib/fencing.py.py 2017-07-25 13:09:37.611338735 +0200
++++ b/fence/agents/lib/fencing.py.py 2017-07-25 14:45:55.177947410 +0200
+@@ -615,8 +615,10 @@
+
+ if avail_opt.count("no_status") == 0:
+ print "\t"
+- print "\t"
+- print "\t"
++
++ if avail_opt.count("separator"):
++ print "\t"
++ print "\t"
+ print "\t"
+ print "\t"
+ print "\t"
+@@ -820,6 +822,10 @@
+ if 1 == device_opt.count("no_status"):
+ acceptable_actions.remove("status")
+
++ if not device_opt.count("separator"):
++ acceptable_actions.remove("list")
++ acceptable_actions.remove("list-status")
++
+ if 1 == device_opt.count("diag"):
+ acceptable_actions.extend(["diag"])
+
+diff -uNr a/tests/data/metadata/fence_alom.xml b/tests/data/metadata/fence_alom.xml
+--- a/tests/data/metadata/fence_alom.xml 2017-07-25 13:09:37.611338735 +0200
++++ b/tests/data/metadata/fence_alom.xml 2017-07-25 14:49:00.542377375 +0200
+@@ -169,8 +169,6 @@
+
+
+
+-
+-
+
+
+
+diff -uNr a/tests/data/metadata/fence_amt.xml b/tests/data/metadata/fence_amt.xml
+--- a/tests/data/metadata/fence_amt.xml 2017-07-25 13:09:37.611338735 +0200
++++ b/tests/data/metadata/fence_amt.xml 2017-07-25 14:49:01.818359686 +0200
+@@ -162,8 +162,6 @@
+
+
+
+-
+-
+
+
+
+diff -uNr a/tests/data/metadata/fence_compute.xml b/tests/data/metadata/fence_compute.xml
+--- a/tests/data/metadata/fence_compute.xml 2017-07-25 13:09:37.620338610 +0200
++++ b/tests/data/metadata/fence_compute.xml 2017-07-25 14:49:02.821345781 +0200
+@@ -3,10 +3,15 @@
+ Used to tell Nova that compute nodes are down and to reschedule flagged instances
+
+
+-
+-
+-
+- Region Name
++
++
++
++ Keystone Admin Tenant
++
++
++
++
++ Keystone Admin Auth URL
+
+
+
+@@ -18,26 +23,21 @@
+
+ Script to retrieve password
+
+-
+-
+-
+- Keystone Admin Auth URL
++
++
++
++ Region Name
+
+
+
+
+ Login password or passphrase
+
+-
++
+
+
+ Nova Endpoint type
+
+-
+-
+-
+- Keystone Admin Tenant
+-
+
+
+
+@@ -53,31 +53,11 @@
+
+ Physical plug number, name of virtual machine or UUID
+
+-
+-
+-
+- Region Name
+-
+-
+-
+-
+- Keystone Admin Tenant
+-
+
+
+
+ Login Name
+
+-
+-
+-
+- Nova Endpoint type
+-
+-
+-
+-
+- Keystone Admin Auth URL
+-
+
+
+
+@@ -98,36 +78,21 @@
+
+ DNS domain in which hosts live
+
+-
++
+
+
+ Allow instances to be evacuated
+
+-
++
+
+
+ Disable functionality for dealing with shared storage
+
+-
+-
+-
+- Only record the target as needing evacuation
+-
+-
+-
+-
+- Allow instances to be evacuated
+-
+-
++
+
+
+ Only record the target as needing evacuation
+
+-
+-
+-
+- Disable functionality for dealing with shared storage
+-
+
+
+
+@@ -168,16 +133,16 @@
+
+ Wait X seconds for cmd prompt after login
+
+-
+-
+-
+- Test X seconds for status change after ON/OFF
+-
+
+
+
+ Wait X seconds before fencing is started
+
++
++
++
++ Test X seconds for status change after ON/OFF
++
+
+
+
+diff -uNr a/tests/data/metadata/fence_drac.xml b/tests/data/metadata/fence_drac.xml
+--- a/tests/data/metadata/fence_drac.xml 2017-07-25 13:09:37.612338721 +0200
++++ b/tests/data/metadata/fence_drac.xml 2017-07-25 14:49:01.670361738 +0200
+@@ -149,8 +149,6 @@
+
+
+
+-
+-
+
+
+
+diff -uNr a/tests/data/metadata/fence_dummy.xml b/tests/data/metadata/fence_dummy.xml
+--- a/tests/data/metadata/fence_dummy.xml 2017-07-25 13:09:37.612338721 +0200
++++ b/tests/data/metadata/fence_dummy.xml 2017-07-25 14:49:00.886372607 +0200
+@@ -84,8 +84,6 @@
+
+
+
+-
+-
+
+
+
+diff -uNr a/tests/data/metadata/fence_idrac.xml b/tests/data/metadata/fence_idrac.xml
+--- a/tests/data/metadata/fence_idrac.xml 2017-07-25 13:09:37.613338707 +0200
++++ b/tests/data/metadata/fence_idrac.xml 2017-07-25 14:49:01.364365980 +0200
+@@ -204,8 +204,6 @@
+
+
+
+-
+-
+
+
+
+diff -uNr a/tests/data/metadata/fence_ilo2.xml b/tests/data/metadata/fence_ilo2.xml
+--- a/tests/data/metadata/fence_ilo2.xml 2017-07-25 13:09:37.613338707 +0200
++++ b/tests/data/metadata/fence_ilo2.xml 2017-07-25 14:49:01.520363817 +0200
+@@ -175,8 +175,6 @@
+
+
+
+-
+-
+
+
+
+diff -uNr a/tests/data/metadata/fence_ilo3_ssh.xml b/tests/data/metadata/fence_ilo3_ssh.xml
+--- a/tests/data/metadata/fence_ilo3_ssh.xml 2017-07-25 13:09:37.614338694 +0200
++++ b/tests/data/metadata/fence_ilo3_ssh.xml 2017-07-25 14:49:00.997371068 +0200
+@@ -179,8 +179,6 @@
+
+
+
+-
+-
+
+
+
+diff -uNr a/tests/data/metadata/fence_ilo3.xml b/tests/data/metadata/fence_ilo3.xml
+--- a/tests/data/metadata/fence_ilo3.xml 2017-07-25 13:09:37.613338707 +0200
++++ b/tests/data/metadata/fence_ilo3.xml 2017-07-25 14:49:01.260367421 +0200
+@@ -204,8 +204,6 @@
+
+
+
+-
+-
+
+
+
+diff -uNr a/tests/data/metadata/fence_ilo4_ssh.xml b/tests/data/metadata/fence_ilo4_ssh.xml
+--- a/tests/data/metadata/fence_ilo4_ssh.xml 2017-07-25 13:09:37.614338694 +0200
++++ b/tests/data/metadata/fence_ilo4_ssh.xml 2017-07-25 14:49:01.028370638 +0200
+@@ -179,8 +179,6 @@
+
+
+
+-
+-
+
+
+
+diff -uNr a/tests/data/metadata/fence_ilo4.xml b/tests/data/metadata/fence_ilo4.xml
+--- a/tests/data/metadata/fence_ilo4.xml 2017-07-25 13:09:37.614338694 +0200
++++ b/tests/data/metadata/fence_ilo4.xml 2017-07-25 14:49:01.295366936 +0200
+@@ -204,8 +204,6 @@
+
+
+
+-
+-
+
+
+
+diff -uNr a/tests/data/metadata/fence_ilo_mp.xml b/tests/data/metadata/fence_ilo_mp.xml
+--- a/tests/data/metadata/fence_ilo_mp.xml 2017-07-25 13:09:37.614338694 +0200
++++ b/tests/data/metadata/fence_ilo_mp.xml 2017-07-25 14:49:02.048356497 +0200
+@@ -169,8 +169,6 @@
+
+
+
+-
+-
+
+
+
+diff -uNr a/tests/data/metadata/fence_ilo_ssh.xml b/tests/data/metadata/fence_ilo_ssh.xml
+--- a/tests/data/metadata/fence_ilo_ssh.xml 2017-07-25 13:09:37.614338694 +0200
++++ b/tests/data/metadata/fence_ilo_ssh.xml 2017-07-25 14:49:00.964371525 +0200
+@@ -179,8 +179,6 @@
+
+
+
+-
+-
+
+
+
+diff -uNr a/tests/data/metadata/fence_ilo.xml b/tests/data/metadata/fence_ilo.xml
+--- a/tests/data/metadata/fence_ilo.xml 2017-07-25 13:09:37.613338707 +0200
++++ b/tests/data/metadata/fence_ilo.xml 2017-07-25 14:49:01.482364344 +0200
+@@ -175,8 +175,6 @@
+
+
+
+-
+-
+
+
+
+diff -uNr a/tests/data/metadata/fence_imm.xml b/tests/data/metadata/fence_imm.xml
+--- a/tests/data/metadata/fence_imm.xml 2017-07-25 13:09:37.614338694 +0200
++++ b/tests/data/metadata/fence_imm.xml 2017-07-25 14:49:01.329366465 +0200
+@@ -204,8 +204,6 @@
+
+
+
+-
+-
+
+
+
+diff -uNr a/tests/data/metadata/fence_ipmilan.xml b/tests/data/metadata/fence_ipmilan.xml
+--- a/tests/data/metadata/fence_ipmilan.xml 2017-07-25 13:09:37.615338680 +0200
++++ b/tests/data/metadata/fence_ipmilan.xml 2017-07-25 14:49:01.226367893 +0200
+@@ -204,8 +204,6 @@
+
+
+
+-
+-
+
+
+
+diff -uNr a/tests/data/metadata/fence_mpath.xml b/tests/data/metadata/fence_mpath.xml
+--- a/tests/data/metadata/fence_mpath.xml 2017-07-25 13:09:37.615338680 +0200
++++ b/tests/data/metadata/fence_mpath.xml 2017-07-25 14:49:02.420351340 +0200
+@@ -97,8 +97,6 @@
+
+
+
+-
+-
+
+
+
+diff -uNr a/tests/data/metadata/fence_rsa.xml b/tests/data/metadata/fence_rsa.xml
+--- a/tests/data/metadata/fence_rsa.xml 2017-07-25 13:09:37.615338680 +0200
++++ b/tests/data/metadata/fence_rsa.xml 2017-07-25 14:49:02.121355485 +0200
+@@ -169,8 +169,6 @@
+
+
+
+-
+-
+
+
+
+diff -uNr a/tests/data/metadata/fence_rsb.xml b/tests/data/metadata/fence_rsb.xml
+--- a/tests/data/metadata/fence_rsb.xml 2017-07-25 13:09:37.615338680 +0200
++++ b/tests/data/metadata/fence_rsb.xml 2017-07-25 14:49:02.494350314 +0200
+@@ -169,8 +169,6 @@
+
+
+
+-
+-
+
+
+
+diff -uNr a/tests/data/metadata/fence_scsi.xml b/tests/data/metadata/fence_scsi.xml
+--- a/tests/data/metadata/fence_scsi.xml 2017-07-25 13:09:37.616338666 +0200
++++ b/tests/data/metadata/fence_scsi.xml 2017-07-25 14:49:01.104369584 +0200
+@@ -4,11 +4,6 @@
+ The fence_scsi agent works by having each node in the cluster register a unique key with the SCSI device(s). Once registered, a single node will become the reservation holder by creating a "write exclusive, registrants only" reservation on the device(s). The result is that only registered nodes may write to the device(s). When a node failure occurs, the fence_scsi agent will remove the key belonging to the failed node from the device(s). The failed node will no longer be able to write to the device(s). A manual reboot is required.
+
+
+-
+-
+-
+- Use the APTPL flag for registrations. This option is only used for the 'on' action.
+-
+
+
+
+@@ -24,6 +19,11 @@
+
+ Key to use for the current operation. This key should be unique to a node. For the "on" action, the key specifies the key use to register the local node. For the "off" action, this key specifies the key to be removed from the device(s).
+
++
++
++
++ Use the APTPL flag for registrations. This option is only used for the 'on' action.
++
+
+
+
+@@ -96,11 +96,7 @@
+
+ Path to sg_turs binary
+
+-
+-
+- Path to corosync-cmapctl binary
+-
+-
++
+
+ Path to corosync-cmapctl binary
+
+@@ -114,8 +110,6 @@
+
+
+
+-
+-
+
+
+
diff --git a/SOURCES/bz1465436-fence_ipmilan-fix-default-method-inconsistency.patch b/SOURCES/bz1465436-fence_ipmilan-fix-default-method-inconsistency.patch
new file mode 100644
index 0000000..af6661f
--- /dev/null
+++ b/SOURCES/bz1465436-fence_ipmilan-fix-default-method-inconsistency.patch
@@ -0,0 +1,24 @@
+From 1a6d06adad10e2f3733433617fb0c80409551055 Mon Sep 17 00:00:00 2001
+From: Marek 'marx' Grac
+Date: Mon, 3 Jul 2017 10:29:24 +0200
+Subject: [PATCH] fence_ipmilan: Fix inconsistency between help and manual page
+ (defalt for --method)
+
+---
+ fence/agents/ipmilan/fence_ipmilan.py | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fence/agents/ipmilan/fence_ipmilan.py b/fence/agents/ipmilan/fence_ipmilan.py
+index 3f334713..6c43b185 100644
+--- a/fence/agents/ipmilan/fence_ipmilan.py
++++ b/fence/agents/ipmilan/fence_ipmilan.py
+@@ -178,7 +178,8 @@ def main():
+ all_opt["lanplus"]["default"] = "1"
+
+ all_opt["ipport"]["default"] = "623"
+- all_opt["method"]["help"] = "-m, --method=[method] Method to fence (onoff|cycle) (Default: cycle)\n" \
++ if all_opt["method"]["default"] == "cycle":
++ all_opt["method"]["help"] = "-m, --method=[method] Method to fence (onoff|cycle) (Default: cycle)\n" \
+ "WARNING! This fence agent might report success before the node is powered off. " \
+ "You should use -m/method onoff if your fence device works correctly with that option."
+
diff --git a/SOURCES/bz1473860-1-fence_compute-fence_scsi-fix-parameters.patch b/SOURCES/bz1473860-1-fence_compute-fence_scsi-fix-parameters.patch
new file mode 100644
index 0000000..e1b8b80
--- /dev/null
+++ b/SOURCES/bz1473860-1-fence_compute-fence_scsi-fix-parameters.patch
@@ -0,0 +1,100 @@
+diff -uNr a/fence/agents/compute/fence_compute.py b/fence/agents/compute/fence_compute.py
+--- a/fence/agents/compute/fence_compute.py 2017-07-24 12:16:11.193996848 +0200
++++ b/fence/agents/compute/fence_compute.py 2017-07-24 12:18:18.012271280 +0200
+@@ -367,7 +367,7 @@
+ logging.warning("Couldn't obtain a supported connection to nova, tried: %s\n" % repr(versions))
+
+ def define_new_opts():
+- all_opt["endpoint-type"] = {
++ all_opt["endpoint_type"] = {
+ "getopt" : "e:",
+ "longopt" : "endpoint-type",
+ "help" : "-e, --endpoint-type=[endpoint] Nova Endpoint type (publicURL, internalURL, adminURL)",
+@@ -376,7 +376,7 @@
+ "default" : "internalURL",
+ "order": 1,
+ }
+- all_opt["tenant-name"] = {
++ all_opt["tenant_name"] = {
+ "getopt" : "t:",
+ "longopt" : "tenant-name",
+ "help" : "-t, --tenant-name=[tenant] Keystone Admin Tenant",
+@@ -385,7 +385,7 @@
+ "default" : "",
+ "order": 1,
+ }
+- all_opt["auth-url"] = {
++ all_opt["auth_url"] = {
+ "getopt" : "k:",
+ "longopt" : "auth-url",
+ "help" : "-k, --auth-url=[url] Keystone Admin Auth URL",
+@@ -394,7 +394,7 @@
+ "default" : "",
+ "order": 1,
+ }
+- all_opt["region-name"] = {
++ all_opt["region_name"] = {
+ "getopt" : "",
+ "longopt" : "region-name",
+ "help" : "--region-name=[region] Region Name",
+@@ -420,7 +420,7 @@
+ "shortdesc" : "DNS domain in which hosts live",
+ "order": 5,
+ }
+- all_opt["record-only"] = {
++ all_opt["record_only"] = {
+ "getopt" : "r:",
+ "longopt" : "record-only",
+ "help" : "--record-only Record the target as needing evacuation but as yet do not intiate it",
+@@ -429,7 +429,7 @@
+ "default" : "False",
+ "order": 5,
+ }
+- all_opt["instance-filtering"] = {
++ all_opt["instance_filtering"] = {
+ "getopt" : "",
+ "longopt" : "instance-filtering",
+ "help" : "--instance-filtering Allow instances created from images and flavors with evacuable=true to be evacuated (or all if no images/flavors have been tagged)",
+@@ -438,7 +438,7 @@
+ "default" : "True",
+ "order": 5,
+ }
+- all_opt["no-shared-storage"] = {
++ all_opt["no_shared_storage"] = {
+ "getopt" : "",
+ "longopt" : "no-shared-storage",
+ "help" : "--no-shared-storage Disable functionality for shared storage",
+@@ -452,9 +452,9 @@
+ global override_status
+ atexit.register(atexit_handler)
+
+- device_opt = ["login", "passwd", "tenant-name", "auth-url", "fabric_fencing",
+- "no_login", "no_password", "port", "domain", "no-shared-storage", "endpoint-type",
+- "record-only", "instance-filtering", "insecure", "region-name"]
++ device_opt = ["login", "passwd", "tenant_name", "auth_url", "fabric_fencing",
++ "no_login", "no_password", "port", "domain", "no_shared_storage", "endpoint_type",
++ "record_only", "instance_filtering", "insecure", "region_name"]
+ define_new_opts()
+ all_opt["shell_timeout"]["default"] = "180"
+
+diff -uNr a/fence/agents/scsi/fence_scsi.py b/fence/agents/scsi/fence_scsi.py
+--- a/fence/agents/scsi/fence_scsi.py 2017-07-24 12:16:11.135997637 +0200
++++ b/fence/agents/scsi/fence_scsi.py 2017-07-24 12:20:21.987584397 +0200
+@@ -332,7 +332,7 @@
+ "shortdesc" : "Log output (stdout and stderr) to file",
+ "order": 5
+ }
+- all_opt["corosync-cmap_path"] = {
++ all_opt["corosync_cmap_path"] = {
+ "getopt" : "Z:",
+ "longopt" : "corosync-cmap-path",
+ "help" : "--corosync-cmap-path=[path] Path to corosync-cmapctl binary",
+@@ -416,7 +416,7 @@
+ atexit.register(atexit_handler)
+
+ device_opt = ["no_login", "no_password", "devices", "nodename", "key",\
+- "aptpl", "fabric_fencing", "on_target", "corosync-cmap_path",\
++ "aptpl", "fabric_fencing", "on_target", "corosync_cmap_path",\
+ "sg_persist_path", "sg_turs_path", "logfile", "vgs_path", "force_on"]
+
+ define_new_opts()
diff --git a/SOURCES/bz1473860-2-fence_compute-fence_scsi-fix-parameters.patch b/SOURCES/bz1473860-2-fence_compute-fence_scsi-fix-parameters.patch
new file mode 100644
index 0000000..2d24cf4
--- /dev/null
+++ b/SOURCES/bz1473860-2-fence_compute-fence_scsi-fix-parameters.patch
@@ -0,0 +1,18 @@
+--- a/fence/agents/lib/fencing.py.py 2017-09-19 12:29:04.158438532 +0200
++++ b/fence/agents/lib/fencing.py.py 2017-09-19 12:48:22.252509114 +0200
+@@ -705,11 +705,12 @@
+ continue
+
+ (name, value) = (line + "=").split("=", 1)
+- name = name.replace("-", "_");
+ value = value[:-1]
+
+- if name in mapping_longopt_names:
+- name = mapping_longopt_names[name]
++ if name.replace("-", "_") in mapping_longopt_names:
++ name = mapping_longopt_names[name.replace("-", "_")]
++ elif name.replace("_", "-") in mapping_longopt_names:
++ name = mapping_longopt_names[name.replace("_", "-")]
+
+ if avail_opt.count(name) == 0 and name in ["nodename"]:
+ continue
diff --git a/SOURCES/bz1476009-fence_azure_arm-new-fence-agent.patch b/SOURCES/bz1476009-fence_azure_arm-new-fence-agent.patch
new file mode 100644
index 0000000..78de0e0
--- /dev/null
+++ b/SOURCES/bz1476009-fence_azure_arm-new-fence-agent.patch
@@ -0,0 +1,331 @@
+diff -uNr a/configure.ac b/configure.ac
+--- a/configure.ac 2017-10-05 13:09:31.369561411 +0200
++++ b/configure.ac 2017-10-05 13:16:02.860680521 +0200
+@@ -268,6 +268,7 @@
+ fence/agents/amt/Makefile
+ fence/agents/amt_ws/Makefile
+ fence/agents/aws/Makefile
++ fence/agents/azure_arm/Makefile
+ fence/agents/bladecenter/Makefile
+ fence/agents/brocade/Makefile
+ fence/agents/cisco_mds/Makefile
+diff -uNr a/fence/agents/azure_arm/fence_azure_arm.py b/fence/agents/azure_arm/fence_azure_arm.py
+--- a/fence/agents/azure_arm/fence_azure_arm.py 1970-01-01 01:00:00.000000000 +0100
++++ b/fence/agents/azure_arm/fence_azure_arm.py 2017-10-05 13:14:46.755434886 +0200
+@@ -0,0 +1,149 @@
++#!/usr/bin/python -tt
++
++import sys, re, pexpect
++import logging
++import atexit
++sys.path.append("/usr/share/fence")
++from fencing import *
++from fencing import fail, fail_usage, EC_TIMED_OUT, run_delay
++
++#BEGIN_VERSION_GENERATION
++RELEASE_VERSION="4.0.25.34-695e-dirty"
++BUILD_DATE="(built Wed Jun 28 08:13:44 UTC 2017)"
++REDHAT_COPYRIGHT="Copyright (C) Red Hat, Inc. 2004-2010 All rights reserved."
++#END_VERSION_GENERATION
++
++def get_nodes_list(compute_client, options):
++ result = {}
++ if compute_client:
++ rgName = options["--resourceGroup"]
++ vms = compute_client.virtual_machines.list(rgName)
++ try:
++ for vm in vms:
++ result[vm.name] = ("", None)
++ except Exception as e:
++ fail_usage("Failed: %s" % e)
++
++ return result
++
++def get_power_status(compute_client, options):
++ logging.info("getting power status for VM " + options["--plug"])
++
++ if compute_client:
++ rgName = options["--resourceGroup"]
++ vmName = options["--plug"]
++
++ powerState = "unknown"
++ try:
++ vmStatus = compute_client.virtual_machines.get(rgName, vmName, "instanceView")
++ except Exception as e:
++ fail_usage("Failed: %s" % e)
++ for status in vmStatus.instance_view.statuses:
++ if status.code.startswith("PowerState"):
++ powerState = status.code
++ break
++
++ logging.info("Found power state of VM: " + powerState)
++ if powerState == "PowerState/running":
++ return "on"
++
++ return "off"
++
++def set_power_status(compute_client, options):
++ logging.info("setting power status for VM " + options["--plug"] + " to " + options["--action"])
++
++ if compute_client:
++ rgName = options["--resourceGroup"]
++ vmName = options["--plug"]
++
++ if (options["--action"]=="off"):
++ logging.info("Deallocating " + vmName + "in resource group " + rgName)
++ compute_client.virtual_machines.deallocate(rgName, vmName)
++ elif (options["--action"]=="on"):
++ logging.info("Starting " + vmName + "in resource group " + rgName)
++ compute_client.virtual_machines.start(rgName, vmName)
++
++
++def define_new_opts():
++ all_opt["resourceGroup"] = {
++ "getopt" : ":",
++ "longopt" : "resourceGroup",
++ "help" : "--resourceGroup=[name] Name of the resource group",
++ "shortdesc" : "Name of resource group.",
++ "required" : "1",
++ "order" : 2
++ }
++ all_opt["tenantId"] = {
++ "getopt" : ":",
++ "longopt" : "tenantId",
++ "help" : "--tenantId=[name] Id of the Azure Active Directory tenant",
++ "shortdesc" : "Id of Azure Active Directory tenant.",
++ "required" : "1",
++ "order" : 3
++ }
++ all_opt["subscriptionId"] = {
++ "getopt" : ":",
++ "longopt" : "subscriptionId",
++ "help" : "--subscriptionId=[name] Id of the Azure subscription",
++ "shortdesc" : "Id of the Azure subscription.",
++ "required" : "1",
++ "order" : 4
++ }
++
++# Main agent method
++def main():
++ compute_client = None
++
++ device_opt = ["resourceGroup", "login", "passwd", "tenantId", "subscriptionId","port"]
++
++ atexit.register(atexit_handler)
++
++ define_new_opts()
++
++ all_opt["power_timeout"]["default"] = "150"
++
++ all_opt["login"]["help"] = "-l, --username=[appid] Application ID"
++ all_opt["passwd"]["help"] = "-p, --password=[authkey] Authentication key"
++
++ options = check_input(device_opt, process_input(device_opt))
++
++ docs = {}
++ docs["shortdesc"] = "Fence agent for Azure Resource Manager"
++ docs["longdesc"] = "Used to deallocate virtual machines and to report power state of virtual machines running in Azure. It uses Azure SDK for Python to connect to Azure.\
++\n.P\n\
++For instructions to setup credentials see: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal\
++\n.P\n\
++Username and password are application ID and authentication key from \"App registrations\"."
++ docs["vendorurl"] = "http://www.microsoft.com"
++ show_docs(options, docs)
++
++ run_delay(options)
++
++ try:
++ from azure.common.credentials import ServicePrincipalCredentials
++ from azure.mgmt.compute import ComputeManagementClient
++
++ tenantid = options["--tenantId"]
++ servicePrincipal = options["--username"]
++ spPassword = options["--password"]
++ subscriptionId = options["--subscriptionId"]
++ credentials = ServicePrincipalCredentials(
++ client_id = servicePrincipal,
++ secret = spPassword,
++ tenant = tenantid
++ )
++ compute_client = ComputeManagementClient(
++ credentials,
++ subscriptionId
++ )
++ except ImportError:
++ fail_usage("Azure Resource Manager Python SDK not found or not accessible")
++ except Exception as e:
++ fail_usage("Failed: %s" % re.sub("^, ", "", str(e)))
++
++ # Operate the fencing device
++ result = fence_action(compute_client, options, set_power_status, get_power_status, get_nodes_list)
++ sys.exit(result)
++
++if __name__ == "__main__":
++ main()
+diff -uNr a/fence/agents/azure_arm/Makefile.am b/fence/agents/azure_arm/Makefile.am
+--- a/fence/agents/azure_arm/Makefile.am 1970-01-01 01:00:00.000000000 +0100
++++ b/fence/agents/azure_arm/Makefile.am 2017-10-05 13:55:41.206064062 +0200
+@@ -0,0 +1,17 @@
++MAINTAINERCLEANFILES = Makefile.in
++
++TARGET = fence_azure_arm
++
++SRC = $(TARGET).py
++
++EXTRA_DIST = $(SRC)
++
++sbin_SCRIPTS = $(TARGET)
++
++man_MANS = $(TARGET).8
++
++FENCE_TEST_ARGS = -l test -p test -n 1
++
++include $(top_srcdir)/make/fencebuild.mk
++include $(top_srcdir)/make/fenceman.mk
++include $(top_srcdir)/make/agentpycheck.mk
+diff -uNr a/tests/data/metadata/fence_azure_arm.xml b/tests/data/metadata/fence_azure_arm.xml
+--- a/tests/data/metadata/fence_azure_arm.xml 1970-01-01 01:00:00.000000000 +0100
++++ b/tests/data/metadata/fence_azure_arm.xml 2017-10-05 13:18:35.373168796 +0200
+@@ -0,0 +1,142 @@
++
++
++Used to deallocate virtual machines and to report power state of virtual machines running in Azure. It uses Azure SDK for Python to connect to Azure.
++.P
++For instructions to setup credentials see: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal
++.P
++Username and password are application ID and authentication key from "App registrations".
++http://www.microsoft.com
++
++
++
++
++ Login password or passphrase
++
++
++
++
++ Fencing Action
++
++
++
++
++ Script to retrieve password
++
++
++
++
++ Login Name
++
++
++
++
++ Physical plug number, name of virtual machine or UUID
++
++
++
++
++ Login Name
++
++
++
++
++ Login password or passphrase
++
++
++
++
++ Script to retrieve password
++
++
++
++
++ Physical plug number, name of virtual machine or UUID
++
++
++
++
++ Name of resource group.
++
++
++
++
++ Id of Azure Active Directory tenant.
++
++
++
++
++ Id of the Azure subscription.
++
++
++
++
++ Verbose mode
++
++
++
++
++ Write debug information to given file
++
++
++
++
++ Write debug information to given file
++
++
++
++
++ Display version information and exit
++
++
++
++
++ Display help and exit
++
++
++
++
++ Separator for CSV created by operation list
++
++
++
++
++ Wait X seconds before fencing is started
++
++
++
++
++ Wait X seconds for cmd prompt after issuing command
++
++
++
++
++ Wait X seconds after issuing ON/OFF
++
++
++
++
++ Test X seconds for status change after ON/OFF
++
++
++
++
++ Wait X seconds for cmd prompt after login
++
++
++
++
++ Count of attempts to retry power on
++
++
++
++
++
++
++
++
++
++
++
++
++
++
diff --git a/SOURCES/bz1476401-1-adhere-no_status-in-fence_action.patch b/SOURCES/bz1476401-1-adhere-no_status-in-fence_action.patch
new file mode 100644
index 0000000..e9741b2
--- /dev/null
+++ b/SOURCES/bz1476401-1-adhere-no_status-in-fence_action.patch
@@ -0,0 +1,22 @@
+--- a/fence/agents/lib/fencing.py.py 2017-09-28 13:23:38.300085104 +0200
++++ b/fence/agents/lib/fencing.py.py 2017-09-28 13:43:36.007103410 +0200
+@@ -1025,10 +1028,15 @@
+ print outlet_id + options["--separator"] + alias + options["--separator"] + status
+ return
+
+- status = get_multi_power_fn(tn, options, get_power_fn)
+-
+- if status != "on" and status != "off":
+- fail(EC_STATUS)
++ if options["--action"] == "monitor" and not "port" in options["device_opt"] and "no_status" in options["device_opt"]:
++ # Unable to do standard monitoring because 'status' action is not available
++ return 0
++
++ status = None
++ if not "no_status" in options["device_opt"]:
++ status = get_multi_power_fn(tn, options, get_power_fn)
++ if status != "on" and status != "off":
++ fail(EC_STATUS)
+
+ if options["--action"] == status:
+ if not (status == "on" and "force_on" in options["device_opt"]):
diff --git a/SOURCES/bz1476401-2-enhance-run_delay.patch b/SOURCES/bz1476401-2-enhance-run_delay.patch
new file mode 100644
index 0000000..8cfda40
--- /dev/null
+++ b/SOURCES/bz1476401-2-enhance-run_delay.patch
@@ -0,0 +1,39 @@
+--- a/fence/agents/lib/fencing.py.py 2017-09-28 13:45:20.920463487 +0200
++++ b/fence/agents/lib/fencing.py.py 2017-09-28 13:47:56.844961761 +0200
+@@ -1075,6 +1075,9 @@
+ # fence action was completed succesfully even in that case
+ logging.warning("%s", str(ex))
+
++ # switch back to original action for the case it is used lateron
++ options["--action"] = "reboot"
++
+ if power_on == False:
+ # this should not fail as node was fenced succesfully
+ logging.error('Timed out waiting to power ON\n')
+@@ -1278,11 +1281,21 @@
+
+ return (status, pipe_stdout, pipe_stderr)
+
+-def run_delay(options):
+- ## Delay is important for two-node clusters fencing but we do not need to delay 'status' operations
+- if options["--action"] in ["off", "reboot"]:
+- logging.info("Delay %s second(s) before logging in to the fence device", options["--delay"])
+- time.sleep(int(options["--delay"]))
++def run_delay(options, reserve=0, result=0):
++ ## Delay is important for two-node clusters fencing
++ ## but we do not need to delay 'status' operations
++ ## and get us out quickly if we already know that we are gonna fail
++ ## still wanna do something right before fencing? - reserve some time
++ if options["--action"] in ["off", "reboot"] \
++ and options["--delay"] != "0" \
++ and result == 0 \
++ and reserve >= 0:
++ time_left = 1 + int(options["--delay"]) - (time.time() - run_delay.time_start) - reserve
++ if time_left > 0:
++ logging.info("Delay %d second(s) before logging in to the fence device", time_left)
++ time.sleep(time_left)
++# mark time when fence-agent is started
++run_delay.time_start = time.time()
+
+ def fence_logout(conn, logout_string, sleep=0):
+ # Logout is not required part of fencing but we should attempt to do it properly
diff --git a/SOURCES/bz1476401-3-add-sync_set_power-to-fence_action.patch b/SOURCES/bz1476401-3-add-sync_set_power-to-fence_action.patch
new file mode 100644
index 0000000..c63a78c
--- /dev/null
+++ b/SOURCES/bz1476401-3-add-sync_set_power-to-fence_action.patch
@@ -0,0 +1,91 @@
+--- a/fence/agents/lib/fencing.py.py.orig 2017-10-16 13:51:44.994248524 +0200
++++ b/fence/agents/lib/fencing.py.py 2017-10-16 14:09:05.576468918 +0200
+@@ -938,7 +938,7 @@
+
+ return status
+
+-def set_multi_power_fn(tn, options, set_power_fn, get_power_fn, retry_attempts = 1):
++def async_set_multi_power_fn(tn, options, set_power_fn, get_power_fn, retry_attempts):
+ plugs = options["--plugs"] if options.has_key("--plugs") else [""]
+
+ for _ in range(retry_attempts):
+@@ -961,6 +961,39 @@
+ return True
+ return False
+
++def sync_set_multi_power_fn(tn, options, sync_set_power_fn, retry_attempts):
++ success = True
++ plugs = options["--plugs"] if options.has_key("--plugs") else [""]
++
++ for plug in plugs:
++ try:
++ options["--uuid"] = str(uuid.UUID(plug))
++ except ValueError:
++ pass
++ except KeyError:
++ pass
++
++ options["--plug"] = plug
++ for retry in range(retry_attempts):
++ if sync_set_power_fn(tn, options):
++ break
++ if retry == retry_attempts-1:
++ success = False
++ time.sleep(int(options["--power-wait"]))
++
++ return success
++
++def set_multi_power_fn(tn, options, set_power_fn, get_power_fn, sync_set_power_fn, retry_attempts=1):
++
++ if set_power_fn != None:
++ if get_power_fn != None:
++ return async_set_multi_power_fn(tn, options, set_power_fn, get_power_fn, retry_attempts)
++ elif sync_set_power_fn != None:
++ return sync_set_multi_power_fn(tn, options, sync_set_power_fn, retry_attempts)
++
++ return False
++
++
+ def show_docs(options, docs=None):
+ device_opt = options["device_opt"]
+
+@@ -986,7 +1019,7 @@
+ print __main__.REDHAT_COPYRIGHT
+ sys.exit(0)
+
+-def fence_action(tn, options, set_power_fn, get_power_fn, get_outlet_list=None, reboot_cycle_fn=None):
++def fence_action(tn, options, set_power_fn, get_power_fn, get_outlet_list=None, reboot_cycle_fn=None, sync_set_power_fn=None):
+ result = 0
+
+ try:
+@@ -1042,12 +1075,12 @@
+ return 0
+
+ if options["--action"] == "on":
+- if set_multi_power_fn(tn, options, set_power_fn, get_power_fn, 1 + int(options["--retry-on"])):
++ if set_multi_power_fn(tn, options, set_power_fn, get_power_fn, sync_set_power_fn, 1 + int(options["--retry-on"])):
+ print "Success: Powered ON"
+ else:
+ fail(EC_WAITING_ON)
+ elif options["--action"] == "off":
+- if set_multi_power_fn(tn, options, set_power_fn, get_power_fn):
++ if set_multi_power_fn(tn, options, set_power_fn, get_power_fn, sync_set_power_fn):
+ print "Success: Powered OFF"
+ else:
+ fail(EC_WAITING_OFF)
+@@ -1065,13 +1098,13 @@
+ else:
+ if status != "off":
+ options["--action"] = "off"
+- if not set_multi_power_fn(tn, options, set_power_fn, get_power_fn):
++ if not set_multi_power_fn(tn, options, set_power_fn, get_power_fn, sync_set_power_fn):
+ fail(EC_WAITING_OFF)
+
+ options["--action"] = "on"
+
+ try:
+- power_on = set_multi_power_fn(tn, options, set_power_fn, get_power_fn, int(options["--retry-on"]))
++ power_on = set_multi_power_fn(tn, options, set_power_fn, get_power_fn, sync_set_power_fn, int(options["--retry-on"]))
+ except Exception, ex:
+ # an error occured during power ON phase in reboot
+ # fence action was completed succesfully even in that case
diff --git a/SOURCES/bz1476401-4-add-fence_heuristics_ping.patch b/SOURCES/bz1476401-4-add-fence_heuristics_ping.patch
new file mode 100644
index 0000000..81456f6
--- /dev/null
+++ b/SOURCES/bz1476401-4-add-fence_heuristics_ping.patch
@@ -0,0 +1,414 @@
+diff --git a/configure.ac b/configure.ac
+index 8acfef9..f9f29cf 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -214,6 +214,27 @@ AC_PATH_PROG([SNMPGET_PATH], [snmpget], [/usr/bin/snmpget])
+ AC_PATH_PROG([MPATH_PATH], [mpathpersist], [/usr/sbin/mpathpersist])
+ AC_PATH_PROG([SUDO_PATH], [mpathpersist], [/usr/bin/sudo])
+
++AC_PATH_PROG([PING_CMD], [ping])
++AC_PATH_PROG([PING6_CMD], [ping6])
++AC_PATH_PROG([PING4_CMD], [ping4])
++
++if test "x${ac_cv_path_PING_CMD}" = x; then
++ # assume multicall-ping just not available in build-environment
++ PING_CMD="/bin/ping"
++ PING4_CMD="/bin/ping -4"
++ PING6_CMD="/bin/ping -6"
++elif test "x${ac_cv_path_PING6_CMD}" = x; then
++ # just IPv4
++ PING4_CMD="${ac_cv_path_PING_CMD}"
++elif test -L ${ac_cv_path_PING6_CMD}; then
++ # assume multicall-ping
++ PING4_CMD="${ac_cv_path_PING_CMD} -4"
++else
++ # ping is just IPv4
++ PING4_CMD="${ac_cv_path_PING_CMD}"
++fi
++
++
+ ## do subst
+
+ AC_SUBST([DEFAULT_CONFIG_DIR])
+@@ -278,6 +278,7 @@
+ fence/agents/eaton_snmp/Makefile
+ fence/agents/emerson/Makefile
+ fence/agents/eps/Makefile
++ fence/agents/heuristics_ping/Makefile
+ fence/agents/hpblade/Makefile
+ fence/agents/ibmblade/Makefile
+ fence/agents/ipdu/Makefile
+--- /dev/null 2017-09-27 08:35:37.286500265 +0200
++++ b/fence/agents/heuristics_ping/Makefile.am 2017-09-28 15:27:42.605317632 +0200
+@@ -0,0 +1,20 @@
++MAINTAINERCLEANFILES = Makefile.in
++
++TARGET = fence_heuristics_ping
++
++SRC = $(TARGET).py
++
++EXTRA_DIST = $(SRC)
++
++sbin_SCRIPTS = $(TARGET)
++
++man_MANS = $(TARGET).8
++
++FENCE_TEST_ARGS = --ping-targets=localhost
++
++include $(top_srcdir)/make/fencebuild.mk
++include $(top_srcdir)/make/fenceman.mk
++include $(top_srcdir)/make/agentpycheck.mk
++
++clean-local: clean-man
++ rm -f $(TARGET)
+diff --git a/doc/COPYRIGHT b/doc/COPYRIGHT
+index 8124c53..49f88c6 100644
+--- a/doc/COPYRIGHT
++++ b/doc/COPYRIGHT
+@@ -58,6 +58,7 @@ Joel Becker
+ Jonathan Brassow
+ jparsons
+ Ken Preslan
++Klaus Wenninger
+ Lon Hohberger
+ Marc - A. Dahlhaus
+ Marek 'marx' Grac
+diff --git a/fence/agents/heuristics_ping/fence_heuristics_ping.py b/fence/agents/heuristics_ping/fence_heuristics_ping.py
+new file mode 100644
+index 0000000..b21d6a4
+--- /dev/null
++++ b/fence/agents/heuristics_ping/fence_heuristics_ping.py
+@@ -0,0 +1,200 @@
++#!/usr/bin/python -tt
++
++# The Following Agent Has Been Tested On:
++#
++# RHEL 7.4
++#
++
++import io
++import re
++import subprocess
++import shlex
++import sys, stat
++import logging
++import os
++import atexit
++sys.path.append("@FENCEAGENTSLIBDIR@")
++from fencing import fail_usage, run_command, fence_action, all_opt
++from fencing import atexit_handler, check_input, process_input, show_docs
++from fencing import run_delay
++
++def ping_test(con, options):
++ # Send pings to the targets
++
++ if options["--action"] == "on":
++ # we want unfencing to always succeed
++ return True
++
++ if not "--ping-targets" in options or options["--ping-targets"] == "":
++ # "off" was requested so fake "on" to provoke failure
++ logging.error("ping target required")
++ return False
++
++ timeout = int(options["--ping-timeout"])
++ count = int(options["--ping-count"])
++ interval = int(options["--ping-interval"])
++ good_required = int(options["--ping-good-count"])
++ maxfail = int(options["--ping-maxfail"])
++ targets = options["--ping-targets"].split(",")
++ exitcode = True
++ p = {}
++ failcount = 0
++ # search string for parsing the results of the ping-executable
++ packet_count = re.compile(r".*transmitted, ([0-9]*) received.*")
++
++ # start a ping-process per target
++ for target in targets:
++ ping_path = '@PING_CMD@'
++ target_mangled = target
++ if target.startswith('inet6:'):
++ if '@PING6_CMD@' == '':
++ p[target] = None
++ continue
++ ping_path = '@PING6_CMD@'
++ target_mangled = target.lstrip('inet6:')
++ elif target.startswith('inet:'):
++ ping_path = '@PING4_CMD@'
++ target_mangled = target.lstrip('inet:')
++
++ ping_cmd = "%s -n -q -W %d -c %d -i %d %s" % (
++ ping_path, timeout, count, interval, target_mangled)
++ logging.info("Running command: %s", ping_cmd)
++ try:
++ p[target] = subprocess.Popen(shlex.split(ping_cmd),
++ stdout=subprocess.PIPE);
++ except OSError:
++ p[target] = None
++
++ # collect the results of the ping-processes
++ for target in targets:
++ good = 0
++ if p[target] != None:
++ p[target].wait()
++ if p[target].returncode == 0:
++ for line in p[target].stdout:
++ searchres = packet_count.search(line)
++ if searchres:
++ good = int(searchres.group(1))
++ break
++ if good >= good_required:
++ logging.info("ping target %s received %d of %d" \
++ % (target, good, count))
++ continue
++ failcount += 1
++ logging.info("ping target %s received %d of %d and thus failed"
++ % (target, good, count))
++ else:
++ failcount += 1
++ logging.error("ping target %s failed on OS level" % target)
++
++ if failcount > maxfail:
++ exitcode = False
++
++ return exitcode
++
++
++def define_new_opts():
++ all_opt["ping_count"] = {
++ "getopt" : ":",
++ "longopt" : "ping-count",
++ "required" : "0",
++ "help" : "--ping-count=[number] Number of ping-probes to send",
++ "shortdesc" : "The number of ping-probes that is being sent per target",
++ "default" : "10",
++ "order" : 1
++ }
++
++ all_opt["ping_good_count"] = {
++ "getopt" : ":",
++ "longopt" : "ping-good-count",
++ "required" : "0",
++ "help" : "--ping-good-count=[number] Number of positive ping-probes required",
++ "shortdesc" : "The number of positive ping-probes required to account a target as available",
++ "default" : "8",
++ "order" : 1
++ }
++
++ all_opt["ping_interval"] = {
++ "getopt" : ":",
++ "longopt" : "ping-interval",
++ "required" : "0",
++ "help" : "--ping-interval=[seconds] Seconds between ping-probes",
++ "shortdesc" : "The interval in seconds between ping-probes",
++ "default" : "1",
++ "order" : 1
++ }
++
++ all_opt["ping_timeout"] = {
++ "getopt" : ":",
++ "longopt" : "ping-timeout",
++ "required" : "0",
++ "help" : "--ping-timeout=[seconds] Timeout for individual ping-probes",
++ "shortdesc" : "The timeout in seconds till an individual ping-probe is accounted as lost",
++ "default" : "2",
++ "order" : 1
++ }
++
++ all_opt["ping_maxfail"] = {
++ "getopt" : ":",
++ "longopt" : "ping-maxfail",
++ "required" : "0",
++ "help" : "--ping-maxfail=[number] Number of failed ping-targets allowed",
++ "shortdesc" : "The number of failed ping-targets to still account as overall success",
++ "default" : "0",
++ "order" : 1
++ }
++
++ all_opt["ping_targets"] = {
++ "getopt" : ":",
++ "longopt" : "ping-targets",
++ "required" : "1",
++ "help" : "--ping-targets=tgt1,[inet6:]tgt2 Comma separated list of ping-targets",
++ "shortdesc" : "A comma separated list of ping-targets (optionally prepended by 'inet:' or 'inet6:') to be probed",
++ "default" : "",
++ "order" : 1
++ }
++
++
++def main():
++ device_opt = ["no_status", "no_password", "ping_count", "ping_good_count",
++ "ping_interval", "ping_timeout", "ping_maxfail", "ping_targets", "method"]
++ define_new_opts()
++ atexit.register(atexit_handler)
++
++ all_opt["method"]["default"] = "cycle"
++ all_opt["method"]["help"] = "-m, --method=[method] Method to fence (cycle|onoff) (Default: cycle)"
++
++ options = check_input(device_opt, process_input(device_opt))
++
++ docs = {}
++ docs["shortdesc"] = "Fence agent for ping-heuristic based fencing"
++ docs["longdesc"] = "fence_heuristics_ping uses ping-heuristics to control execution of another fence agent on the same fencing level.\
++\n.P\n\
++This is not a fence agent by itself! \
++Its only purpose is to enable/disable another fence agent that lives on the same fencing level but after fence_heuristics_ping.\
++\n.P\n\
++fence_heuristics_ping is currently provided as tech preview in RHEL-7.5."
++ docs["vendorurl"] = ""
++ show_docs(options, docs)
++
++ # move ping-test to the end of the time-window set via --delay
++ # as to give the network time to settle after the incident that has
++ # caused fencing and have the results as current as possible
++ max_pingcheck = (int(options["--ping-count"]) - 1) * \
++ int(options["--ping-interval"]) + int(options["--ping-timeout"])
++ run_delay(options, reserve=max_pingcheck)
++
++ result = fence_action(\
++ None, \
++ options, \
++ None, \
++ None, \
++ reboot_cycle_fn = ping_test,
++ sync_set_power_fn = ping_test)
++
++ # execute the remaining delay
++ run_delay(options, result=result)
++ sys.exit(result)
++
++if __name__ == "__main__":
++ main()
+diff --git a/make/fencebuild.mk b/make/fencebuild.mk
+index 0a1f2bc..25bb0f1 100644
+--- a/make/fencebuild.mk
++++ b/make/fencebuild.mk
+@@ -28,5 +28,8 @@ define gen_agent_from_py
+ -e 's#@''STORE_PATH@#${CLUSTERVARRUN}#g' \
+ -e 's#@''SUDO_PATH@#${SUDO_PATH}#g' \
++ -e 's#@''PING_CMD@#${PING_CMD}#g' \
++ -e 's#@''PING6_CMD@#${PING6_CMD}#g' \
++ -e 's#@''PING4_CMD@#${PING4_CMD}#g' \
+ > $@
+
+ if [ 0 -eq `echo "$(SRC)" | grep fence_ &> /dev/null; echo $$?` ]; then \
+--- /dev/null 2017-10-08 13:42:59.634387493 +0200
++++ fence-agents-4.0.11/tests/data/metadata/fence_heuristics_ping.xml 2017-10-18 20:55:23.978815450 +0200
+@@ -0,0 +1,117 @@
++
++
++fence_heuristics_ping uses ping-heuristics to control execution of another fence agent on the same fencing level.
++.P
++This is not a fence agent by itself! Its only purpose is to enable/disable another fence agent that lives on the same fencing level but after fence_heuristics_ping.
++.P
++fence_heuristics_ping is currently provided as tech preview in RHEL-7.5.
++
++
++
++
++
++ The interval in seconds between ping-probes
++
++
++
++
++ The number of failed ping-targets to still account as overall success
++
++
++
++
++ A comma separated list of ping-targets (optionally prepended by 'inet:' or 'inet6:') to be probed
++
++
++
++
++ Fencing Action
++
++
++
++
++ The number of positive ping-probes required to account a target as available
++
++
++
++
++ The timeout in seconds till an individual ping-probe is accounted as lost
++
++
++
++
++ The number of ping-probes that is being sent per target
++
++
++
++
++
++
++
++ Method to fence (onoff|cycle)
++
++
++
++
++ Verbose mode
++
++
++
++
++ Write debug information to given file
++
++
++
++
++ Write debug information to given file
++
++
++
++
++ Display version information and exit
++
++
++
++
++ Display help and exit
++
++
++
++
++ Wait X seconds for cmd prompt after issuing command
++
++
++
++
++ Wait X seconds before fencing is started
++
++
++
++
++ Test X seconds for status change after ON/OFF
++
++
++
++
++ Wait X seconds after issuing ON/OFF
++
++
++
++
++ Wait X seconds for cmd prompt after login
++
++
++
++
++ Count of attempts to retry power on
++
++
++
++
++
++
++
++
++
++
++
diff --git a/SOURCES/bz1479851-fence_compute-fence_scsi-fix-parameters.patch b/SOURCES/bz1479851-fence_compute-fence_scsi-fix-parameters.patch
deleted file mode 100644
index e1b8b80..0000000
--- a/SOURCES/bz1479851-fence_compute-fence_scsi-fix-parameters.patch
+++ /dev/null
@@ -1,100 +0,0 @@
-diff -uNr a/fence/agents/compute/fence_compute.py b/fence/agents/compute/fence_compute.py
---- a/fence/agents/compute/fence_compute.py 2017-07-24 12:16:11.193996848 +0200
-+++ b/fence/agents/compute/fence_compute.py 2017-07-24 12:18:18.012271280 +0200
-@@ -367,7 +367,7 @@
- logging.warning("Couldn't obtain a supported connection to nova, tried: %s\n" % repr(versions))
-
- def define_new_opts():
-- all_opt["endpoint-type"] = {
-+ all_opt["endpoint_type"] = {
- "getopt" : "e:",
- "longopt" : "endpoint-type",
- "help" : "-e, --endpoint-type=[endpoint] Nova Endpoint type (publicURL, internalURL, adminURL)",
-@@ -376,7 +376,7 @@
- "default" : "internalURL",
- "order": 1,
- }
-- all_opt["tenant-name"] = {
-+ all_opt["tenant_name"] = {
- "getopt" : "t:",
- "longopt" : "tenant-name",
- "help" : "-t, --tenant-name=[tenant] Keystone Admin Tenant",
-@@ -385,7 +385,7 @@
- "default" : "",
- "order": 1,
- }
-- all_opt["auth-url"] = {
-+ all_opt["auth_url"] = {
- "getopt" : "k:",
- "longopt" : "auth-url",
- "help" : "-k, --auth-url=[url] Keystone Admin Auth URL",
-@@ -394,7 +394,7 @@
- "default" : "",
- "order": 1,
- }
-- all_opt["region-name"] = {
-+ all_opt["region_name"] = {
- "getopt" : "",
- "longopt" : "region-name",
- "help" : "--region-name=[region] Region Name",
-@@ -420,7 +420,7 @@
- "shortdesc" : "DNS domain in which hosts live",
- "order": 5,
- }
-- all_opt["record-only"] = {
-+ all_opt["record_only"] = {
- "getopt" : "r:",
- "longopt" : "record-only",
- "help" : "--record-only Record the target as needing evacuation but as yet do not intiate it",
-@@ -429,7 +429,7 @@
- "default" : "False",
- "order": 5,
- }
-- all_opt["instance-filtering"] = {
-+ all_opt["instance_filtering"] = {
- "getopt" : "",
- "longopt" : "instance-filtering",
- "help" : "--instance-filtering Allow instances created from images and flavors with evacuable=true to be evacuated (or all if no images/flavors have been tagged)",
-@@ -438,7 +438,7 @@
- "default" : "True",
- "order": 5,
- }
-- all_opt["no-shared-storage"] = {
-+ all_opt["no_shared_storage"] = {
- "getopt" : "",
- "longopt" : "no-shared-storage",
- "help" : "--no-shared-storage Disable functionality for shared storage",
-@@ -452,9 +452,9 @@
- global override_status
- atexit.register(atexit_handler)
-
-- device_opt = ["login", "passwd", "tenant-name", "auth-url", "fabric_fencing",
-- "no_login", "no_password", "port", "domain", "no-shared-storage", "endpoint-type",
-- "record-only", "instance-filtering", "insecure", "region-name"]
-+ device_opt = ["login", "passwd", "tenant_name", "auth_url", "fabric_fencing",
-+ "no_login", "no_password", "port", "domain", "no_shared_storage", "endpoint_type",
-+ "record_only", "instance_filtering", "insecure", "region_name"]
- define_new_opts()
- all_opt["shell_timeout"]["default"] = "180"
-
-diff -uNr a/fence/agents/scsi/fence_scsi.py b/fence/agents/scsi/fence_scsi.py
---- a/fence/agents/scsi/fence_scsi.py 2017-07-24 12:16:11.135997637 +0200
-+++ b/fence/agents/scsi/fence_scsi.py 2017-07-24 12:20:21.987584397 +0200
-@@ -332,7 +332,7 @@
- "shortdesc" : "Log output (stdout and stderr) to file",
- "order": 5
- }
-- all_opt["corosync-cmap_path"] = {
-+ all_opt["corosync_cmap_path"] = {
- "getopt" : "Z:",
- "longopt" : "corosync-cmap-path",
- "help" : "--corosync-cmap-path=[path] Path to corosync-cmapctl binary",
-@@ -416,7 +416,7 @@
- atexit.register(atexit_handler)
-
- device_opt = ["no_login", "no_password", "devices", "nodename", "key",\
-- "aptpl", "fabric_fencing", "on_target", "corosync-cmap_path",\
-+ "aptpl", "fabric_fencing", "on_target", "corosync_cmap_path",\
- "sg_persist_path", "sg_turs_path", "logfile", "vgs_path", "force_on"]
-
- define_new_opts()
diff --git a/SOURCES/bz1490475-fence_ilo_ssh-fix-hard-reset.patch b/SOURCES/bz1490475-fence_ilo_ssh-fix-hard-reset.patch
new file mode 100644
index 0000000..6209591
--- /dev/null
+++ b/SOURCES/bz1490475-fence_ilo_ssh-fix-hard-reset.patch
@@ -0,0 +1,78 @@
+From d41299f89086277d291695f0c16538b2a0f54fd9 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen
+Date: Tue, 12 Sep 2017 16:00:46 +0200
+Subject: [PATCH 1/3] fence_ilo_ssh: fix hard reset
+
+---
+ fence/agents/ilo_ssh/fence_ilo_ssh.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fence/agents/ilo_ssh/fence_ilo_ssh.py b/fence/agents/ilo_ssh/fence_ilo_ssh.py
+index b0366157..fc44f98b 100644
+--- a/fence/agents/ilo_ssh/fence_ilo_ssh.py
++++ b/fence/agents/ilo_ssh/fence_ilo_ssh.py
+@@ -29,7 +29,7 @@ def set_power_status(conn, options):
+ return
+
+ def reboot_cycle(conn, options):
+- conn.send_eol("reset hard /system1")
++ conn.send_eol("reset /system1 hard")
+ conn.log_expect(options, options["--command-prompt"], int(options["--power-timeout"]))
+ return
+
+
+From 8a9252657455a850ae2389ad6532d01dd3d8c5a4 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen
+Date: Wed, 25 Oct 2017 11:21:06 +0200
+Subject: [PATCH 2/3] fence_ilo_ssh: return True in reboot_cycle()
+
+---
+ fence/agents/ilo_ssh/fence_ilo_ssh.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fence/agents/ilo_ssh/fence_ilo_ssh.py b/fence/agents/ilo_ssh/fence_ilo_ssh.py
+index fc44f98b..3445f846 100644
+--- a/fence/agents/ilo_ssh/fence_ilo_ssh.py
++++ b/fence/agents/ilo_ssh/fence_ilo_ssh.py
+@@ -31,7 +31,7 @@ def set_power_status(conn, options):
+ def reboot_cycle(conn, options):
+ conn.send_eol("reset /system1 hard")
+ conn.log_expect(options, options["--command-prompt"], int(options["--power-timeout"]))
+- return
++ return True
+
+ def main():
+ device_opt = ["ipaddr", "login", "passwd", "secure", "cmd_prompt", "method", "telnet"]
+
+From 70712afaa1f37be09cc793a84bfdf5c2e0d7c07b Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen
+Date: Thu, 26 Oct 2017 12:22:18 +0200
+Subject: [PATCH 3/3] fence_ilo_ssh: warn if power is OFF in reboot_cycle()
+
+---
+ fence/agents/ilo_ssh/fence_ilo_ssh.py | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/fence/agents/ilo_ssh/fence_ilo_ssh.py b/fence/agents/ilo_ssh/fence_ilo_ssh.py
+index 3445f846..f2c1f98d 100644
+--- a/fence/agents/ilo_ssh/fence_ilo_ssh.py
++++ b/fence/agents/ilo_ssh/fence_ilo_ssh.py
+@@ -2,6 +2,7 @@
+
+ import sys, re
+ import atexit
++import logging
+ sys.path.append("@FENCEAGENTSLIBDIR@")
+ from fencing import *
+
+@@ -31,6 +32,10 @@ def set_power_status(conn, options):
+ def reboot_cycle(conn, options):
+ conn.send_eol("reset /system1 hard")
+ conn.log_expect(options, options["--command-prompt"], int(options["--power-timeout"]))
++
++ if get_power_status(conn, options) == "off":
++ logging.error("Timed out waiting to power ON\n")
++
+ return True
+
+ def main():
diff --git a/SOURCES/bz1496390-fence_compute-fence_evacuate-Instance-HA-OSP12.patch b/SOURCES/bz1496390-fence_compute-fence_evacuate-Instance-HA-OSP12.patch
new file mode 100644
index 0000000..6ca484d
--- /dev/null
+++ b/SOURCES/bz1496390-fence_compute-fence_evacuate-Instance-HA-OSP12.patch
@@ -0,0 +1,1119 @@
+diff -uNr a/fence/agents/compute/fence_compute.py b/fence/agents/compute/fence_compute.py
+--- a/fence/agents/compute/fence_compute.py 2017-09-27 15:01:34.974642469 +0200
++++ b/fence/agents/compute/fence_compute.py 2017-09-27 15:24:57.482819900 +0200
+@@ -18,173 +18,115 @@
+ #END_VERSION_GENERATION
+
+ override_status = ""
+-nova = None
+
+ EVACUABLE_TAG = "evacuable"
+ TRUE_TAGS = ['true']
+
+-def get_power_status(_, options):
+- global override_status
+-
+- status = "unknown"
+- logging.debug("get action: " + options["--action"])
++def get_power_status(connection, options):
+
+ if len(override_status):
+ logging.debug("Pretending we're " + override_status)
+ return override_status
+
+- if nova:
++ status = "unknown"
++ logging.debug("get action: " + options["--action"])
++
++ if connection:
+ try:
+- services = nova.services.list(host=options["--plug"])
++ services = connection.services.list(host=options["--plug"], binary="nova-compute")
+ for service in services:
+- logging.debug("Status of %s is %s" % (service.binary, service.state))
+- if service.binary == "nova-compute":
+- if service.state == "up":
+- status = "on"
+- elif service.state == "down":
+- status = "off"
+- else:
+- logging.debug("Unknown status detected from nova: " + service.state)
+- break
++ logging.debug("Status of %s on %s is %s, %s" % (service.binary, options["--plug"], service.state, service.status))
++ if service.state == "up" and service.status == "enabled":
++ # Up and operational
++ status = "on"
++
++ elif service.state == "down" and service.status == "disabled":
++ # Down and fenced
++ status = "off"
++
++ elif service.state == "down":
++ # Down and requires fencing
++ status = "failed"
++
++ elif service.state == "up":
++ # Up and requires unfencing
++ status = "running"
++ else:
++ logging.warning("Unknown status detected from nova for %s: %s, %s" % (options["--plug"], service.state, service.status))
++ status = "%s %s" % (service.state, service.status)
++ break
+ except requests.exception.ConnectionError as err:
+ logging.warning("Nova connection failed: " + str(err))
++ logging.debug("Final status of %s is %s" % (options["--plug"], status))
+ return status
+
+-# NOTE(sbauza); We mimic the host-evacuate module since it's only a contrib
+-# module which is not stable
+-def _server_evacuate(server, on_shared_storage):
+- success = False
+- error_message = ""
+- try:
+- logging.debug("Resurrecting instance: %s" % server)
+- (response, dictionary) = nova.servers.evacuate(server=server, on_shared_storage=on_shared_storage)
+-
+- if response == None:
+- error_message = "No response while evacuating instance"
+- elif response.status_code == 200:
+- success = True
+- error_message = response.reason
+- else:
+- error_message = response.reason
+-
+- except Exception as e:
+- error_message = "Error while evacuating instance: %s" % e
+-
+- return {
+- "uuid": server,
+- "accepted": success,
+- "reason": error_message,
+- }
+-
+-def _is_server_evacuable(server, evac_flavors, evac_images):
+- if server.flavor.get('id') in evac_flavors:
+- return True
+- if server.image.get('id') in evac_images:
+- return True
+- logging.debug("Instance %s is not evacuable" % server.image.get('id'))
+- return False
+-
+-def _get_evacuable_flavors():
+- result = []
+- flavors = nova.flavors.list()
+- # Since the detailed view for all flavors doesn't provide the extra specs,
+- # we need to call each of the flavor to get them.
+- for flavor in flavors:
+- tag = flavor.get_keys().get(EVACUABLE_TAG)
+- if tag and tag.strip().lower() in TRUE_TAGS:
+- result.append(flavor.id)
+- return result
+-
+-def _get_evacuable_images():
+- result = []
+- images = nova.images.list(detailed=True)
+- for image in images:
+- if hasattr(image, 'metadata'):
+- tag = image.metadata.get(EVACUABLE_TAG)
+- if tag and tag.strip().lower() in TRUE_TAGS:
+- result.append(image.id)
+- return result
+-
+-def _host_evacuate(options):
+- result = True
+- images = _get_evacuable_images()
+- flavors = _get_evacuable_flavors()
+- servers = nova.servers.list(search_opts={'host': options["--plug"], 'all_tenants': 1 })
+-
+- if options["--instance-filtering"] == "False":
+- logging.debug("Not evacuating anything")
+- evacuables = []
+- elif len(flavors) or len(images):
+- logging.debug("Filtering images and flavors: %s %s" % (repr(flavors), repr(images)))
+- # Identify all evacuable servers
+- logging.debug("Checking %s" % repr(servers))
+- evacuables = [server for server in servers
+- if _is_server_evacuable(server, flavors, images)]
+- logging.debug("Evacuating %s" % repr(evacuables))
+- else:
+- logging.debug("Evacuating all images and flavors")
+- evacuables = servers
+-
+- if options["--no-shared-storage"] != "False":
+- on_shared_storage = False
+- else:
+- on_shared_storage = True
+-
+- for server in evacuables:
+- logging.debug("Processing %s" % server)
+- if hasattr(server, 'id'):
+- response = _server_evacuate(server.id, on_shared_storage)
+- if response["accepted"]:
+- logging.debug("Evacuated %s from %s: %s" %
+- (response["uuid"], options["--plug"], response["reason"]))
+- else:
+- logging.error("Evacuation of %s on %s failed: %s" %
+- (response["uuid"], options["--plug"], response["reason"]))
+- result = False
+- else:
+- logging.error("Could not evacuate instance: %s" % server.to_dict())
+- # Should a malformed instance result in a failed evacuation?
+- # result = False
+- return result
++def get_power_status_simple(connection, options):
++ status = get_power_status(connection, options)
++ if status in [ "off" ]:
++ return status
++ return "on"
+
+ def set_attrd_status(host, status, options):
+ logging.debug("Setting fencing status for %s to %s" % (host, status))
+ run_command(options, "attrd_updater -p -n evacuate -Q -N %s -U %s" % (host, status))
+
+-def set_power_status(_, options):
+- global override_status
+-
+- override_status = ""
+- logging.debug("set action: " + options["--action"])
++def get_attrd_status(host, options):
++ (status, pipe_stdout, pipe_stderr) = run_command(options, "attrd_updater -p -n evacuate -Q -N %s" % (host))
++ fields = pipe_stdout.split('"')
++ if len(fields) > 6:
++ return fields[5]
++ logging.debug("Got %s: o:%s e:%s n:%d" % (status, pipe_stdout, pipe_stderr, len(fields)))
++ return ""
++
++def set_power_status_on(connection, options):
++ # Wait for any evacuations to complete
++ while True:
++ current = get_attrd_status(options["--plug"], options)
++ if current in ["no", ""]:
++ logging.info("Evacuation complete for: %s '%s'" % (options["--plug"], current))
++ break
++ else:
++ logging.info("Waiting for %s to complete evacuations: %s" % (options["--plug"], current))
++ time.sleep(2)
+
+- if not nova:
+- return
++ status = get_power_status(connection, options)
++ # Should we do it for 'failed' too?
++ if status in [ "off", "running", "failed" ]:
++ try:
++ # Forcing the host back up
++ logging.info("Forcing nova-compute back up on "+options["--plug"])
++ connection.services.force_down(options["--plug"], "nova-compute", force_down=False)
++ logging.info("Forced nova-compute back up on "+options["--plug"])
++ except Exception as e:
++ # In theory, if force_down=False fails, that's for the exact
++ # same possible reasons that below with force_down=True
++ # eg. either an incompatible version or an old client.
++ # Since it's about forcing back to a default value, there is
++ # no real worries to just consider it's still okay even if the
++ # command failed
++ logging.warn("Exception from attempt to force "
++ "host back up via nova API: "
++ "%s: %s" % (e.__class__.__name__, e))
++
++ # Forcing the service back up in case it was disabled
++ logging.info("Enabling nova-compute on "+options["--plug"])
++ connection.services.enable(options["--plug"], 'nova-compute')
+
+- if options["--action"] == "on":
+- if get_power_status(_, options) != "on":
+- # Forcing the service back up in case it was disabled
+- nova.services.enable(options["--plug"], 'nova-compute')
+- try:
+- # Forcing the host back up
+- nova.services.force_down(
+- options["--plug"], "nova-compute", force_down=False)
+- except Exception as e:
+- # In theory, if force_down=False fails, that's for the exact
+- # same possible reasons that below with force_down=True
+- # eg. either an incompatible version or an old client.
+- # Since it's about forcing back to a default value, there is
+- # no real worries to just consider it's still okay even if the
+- # command failed
+- logging.info("Exception from attempt to force "
+- "host back up via nova API: "
+- "%s: %s" % (e.__class__.__name__, e))
+- else:
+- # Pretend we're 'on' so that the fencing library doesn't loop forever waiting for the node to boot
+- override_status = "on"
++ # Pretend we're 'on' so that the fencing library doesn't loop forever waiting for the node to boot
++ override_status = "on"
++ elif status not in ["on"]:
++ # Not safe to unfence, don't waste time looping to see if the status changes to "on"
++ options["--power-timeout"] = "0"
++
++def set_power_status_off(connection, options):
++ status = get_power_status(connection, options)
++ if status in [ "off" ]:
+ return
+
++ connection.services.disable(options["--plug"], 'nova-compute')
+ try:
+- nova.services.force_down(
++ # Until 2.53
++ connection.services.force_down(
+ options["--plug"], "nova-compute", force_down=True)
+ except Exception as e:
+ # Something went wrong when we tried to force the host down.
+@@ -198,7 +140,7 @@
+ "%s: %s" % (e.__class__.__name__, e))
+ # need to wait for nova to update its internal status or we
+ # cannot call host-evacuate
+- while get_power_status(_, options) != "off":
++ while get_power_status(connection, options) not in ["off"]:
+ # Loop forever if need be.
+ #
+ # Some callers (such as Pacemaker) will have a timer
+@@ -206,47 +148,55 @@
+ logging.debug("Waiting for nova to update its internal state for %s" % options["--plug"])
+ time.sleep(1)
+
+- if not _host_evacuate(options):
+- sys.exit(1)
++ set_attrd_status(options["--plug"], "yes", options)
++
++def set_power_status(connection, options):
++ global override_status
+
+- return
++ override_status = ""
++ logging.debug("set action: " + options["--action"])
++
++ if not connection:
++ return
+
++ if options["--action"] in ["off", "reboot"]:
++ set_power_status_off(connection, options)
++ else:
++ set_power_status_on(connection, options)
++ logging.debug("set action passed: " + options["--action"])
++ sys.exit(0)
+
+-def fix_domain(options):
++def fix_domain(connection, options):
+ domains = {}
+ last_domain = None
+
+- if nova:
++ if connection:
+ # Find it in nova
+
+- hypervisors = nova.hypervisors.list()
+- for hypervisor in hypervisors:
+- shorthost = hypervisor.hypervisor_hostname.split('.')[0]
++ services = connection.services.list(binary="nova-compute")
++ for service in services:
++ shorthost = service.host.split('.')[0]
+
+- if shorthost == hypervisor.hypervisor_hostname:
++ if shorthost == service.host:
+ # Nova is not using FQDN
+ calculated = ""
+ else:
+ # Compute nodes are named as FQDN, strip off the hostname
+- calculated = hypervisor.hypervisor_hostname.replace(shorthost+".", "")
+-
+- domains[calculated] = shorthost
++ calculated = service.host.replace(shorthost+".", "")
+
+ if calculated == last_domain:
+ # Avoid complaining for each compute node with the same name
+ # One hopes they don't appear interleaved as A.com B.com A.com B.com
+- logging.debug("Calculated the same domain from: %s" % hypervisor.hypervisor_hostname)
++ logging.debug("Calculated the same domain from: %s" % service.host)
++ continue
+
+- elif "--domain" in options and options["--domain"] == calculated:
+- # Supplied domain name is valid
+- return
++ domains[calculated] = service.host
++ last_domain = calculated
+
+- elif "--domain" in options:
++ if "--domain" in options and options["--domain"] != calculated:
+ # Warn in case nova isn't available at some point
+ logging.warning("Supplied domain '%s' does not match the one calculated from: %s"
+- % (options["--domain"], hypervisor.hypervisor_hostname))
+-
+- last_domain = calculated
++ % (options["--domain"], service.host))
+
+ if len(domains) == 0 and "--domain" not in options:
+ logging.error("Could not calculate the domain names used by compute nodes in nova")
+@@ -254,9 +204,9 @@
+ elif len(domains) == 1 and "--domain" not in options:
+ options["--domain"] = last_domain
+
+- elif len(domains) == 1:
+- logging.error("Overriding supplied domain '%s' does not match the one calculated from: %s"
+- % (options["--domain"], hypervisor.hypervisor_hostname))
++ elif len(domains) == 1 and options["--domain"] != last_domain:
++ logging.error("Overriding supplied domain '%s' as it does not match the one calculated from: %s"
++ % (options["--domain"], domains[last_domain]))
+ options["--domain"] = last_domain
+
+ elif len(domains) > 1:
+@@ -264,47 +214,49 @@
+ % (options["--domain"], repr(domains)))
+ sys.exit(1)
+
+-def fix_plug_name(options):
++ return last_domain
++
++def fix_plug_name(connection, options):
+ if options["--action"] == "list":
+ return
+
+ if "--plug" not in options:
+ return
+
+- fix_domain(options)
+- short_plug = options["--plug"].split('.')[0]
+- logging.debug("Checking target '%s' against calculated domain '%s'"% (options["--plug"], options["--domain"]))
+-
+- if "--domain" not in options:
++ calculated = fix_domain(connection, options)
++ if calculated is None or "--domain" not in options:
+ # Nothing supplied and nova not available... what to do... nothing
+ return
+
+- elif options["--domain"] == "":
++ short_plug = options["--plug"].split('.')[0]
++ logging.debug("Checking target '%s' against calculated domain '%s'"% (options["--plug"], calculated))
++
++ if options["--domain"] == "":
+ # Ensure any domain is stripped off since nova isn't using FQDN
+ options["--plug"] = short_plug
+
+- elif options["--domain"] in options["--plug"]:
+- # Plug already contains the domain, don't re-add
++ elif options["--plug"].endswith(options["--domain"]):
++ # Plug already uses the domain, don't re-add
+ return
+
+ else:
+ # Add the domain to the plug
+ options["--plug"] = short_plug + "." + options["--domain"]
+
+-def get_plugs_list(_, options):
++def get_plugs_list(connection, options):
+ result = {}
+
+- if nova:
+- hypervisors = nova.hypervisors.list()
+- for hypervisor in hypervisors:
+- longhost = hypervisor.hypervisor_hostname
++ if connection:
++ services = connection.services.list(binary="nova-compute")
++ for service in services:
++ longhost = service.host
+ shorthost = longhost.split('.')[0]
+ result[longhost] = ("", None)
+ result[shorthost] = ("", None)
+ return result
+
+ def create_nova_connection(options):
+- global nova
++ nova = None
+
+ try:
+ from novaclient import client
+@@ -330,41 +282,42 @@
+ if clientargs:
+ # OSP < 11
+ # ArgSpec(args=['version', 'username', 'password', 'project_id', 'auth_url'],
+- # varargs=None,
+- # keywords='kwargs', defaults=(None, None, None, None))
++ # varargs=None,
++ # keywords='kwargs', defaults=(None, None, None, None))
+ nova = client.Client(version,
+- options["--username"],
+- options["--password"],
+- options["--tenant-name"],
+- options["--auth-url"],
+- insecure=options["--insecure"],
+- region_name=options["--region-name"],
+- endpoint_type=options["--endpoint-type"],
+- http_log_debug=options.has_key("--verbose"))
++ options["--username"],
++ options["--password"],
++ options["--tenant-name"],
++ options["--auth-url"],
++ insecure=options["--insecure"],
++ region_name=options["--region-name"],
++ endpoint_type=options["--endpoint-type"],
++ http_log_debug=options.has_key("--verbose"))
+ else:
+ # OSP >= 11
+ # ArgSpec(args=['version'], varargs='args', keywords='kwargs', defaults=None)
+ nova = client.Client(version,
+- username=options["--username"],
+- password=options["--password"],
+- tenant_name=options["--tenant-name"],
+- auth_url=options["--auth-url"],
+- insecure=options["--insecure"],
+- region_name=options["--region-name"],
+- endpoint_type=options["--endpoint-type"],
+- http_log_debug=options.has_key("--verbose"))
++ username=options["--username"],
++ password=options["--password"],
++ tenant_name=options["--tenant-name"],
++ auth_url=options["--auth-url"],
++ insecure=options["--insecure"],
++ region_name=options["--region-name"],
++ endpoint_type=options["--endpoint-type"],
++ http_log_debug=options.has_key("--verbose"))
+
+ try:
+ nova.hypervisors.list()
+- return
++ return nova
+
+ except NotAcceptable as e:
+ logging.warning(e)
+
+ except Exception as e:
+ logging.warning("Nova connection failed. %s: %s" % (e.__class__.__name__, e))
+-
++
+ logging.warning("Couldn't obtain a supported connection to nova, tried: %s\n" % repr(versions))
++ return None
+
+ def define_new_opts():
+ all_opt["endpoint_type"] = {
+@@ -448,11 +401,23 @@
+ "order": 5,
+ }
+
++def set_multi_power_fn(connection, options, set_power_fn, get_power_fn, retry_attempts=1):
++ for _ in range(retry_attempts):
++ set_power_fn(connection, options)
++ time.sleep(int(options["--power-wait"]))
++
++ for _ in range(int(options["--power-timeout"])):
++ if get_power_fn(connection, options) != options["--action"]:
++ time.sleep(1)
++ else:
++ return True
++ return False
++
+ def main():
+ global override_status
+ atexit.register(atexit_handler)
+
+- device_opt = ["login", "passwd", "tenant_name", "auth_url", "fabric_fencing",
++ device_opt = ["login", "passwd", "tenant_name", "auth_url", "fabric_fencing",
+ "no_login", "no_password", "port", "domain", "no_shared_storage", "endpoint_type",
+ "record_only", "instance_filtering", "insecure", "region_name"]
+ define_new_opts()
+@@ -472,30 +437,28 @@
+
+ run_delay(options)
+
+- create_nova_connection(options)
++ logging.debug("Running "+options["--action"])
++ connection = create_nova_connection(options)
+
+- fix_plug_name(options)
+- if options["--record-only"] in [ "1", "True", "true", "Yes", "yes"]:
+- if options["--action"] == "on":
+- set_attrd_status(options["--plug"], "no", options)
+- sys.exit(0)
+-
+- elif options["--action"] in ["off", "reboot"]:
+- set_attrd_status(options["--plug"], "yes", options)
+- sys.exit(0)
++ if options["--action"] in ["off", "on", "reboot", "status"]:
++ fix_plug_name(connection, options)
+
+- elif options["--action"] in ["monitor", "status"]:
+- sys.exit(0)
+
+- if options["--action"] in ["off", "reboot"]:
+- # Pretend we're 'on' so that the fencing library will always call set_power_status(off)
+- override_status = "on"
+-
+- if options["--action"] == "on":
+- # Pretend we're 'off' so that the fencing library will always call set_power_status(on)
+- override_status = "off"
++ if options["--action"] in ["reboot"]:
++ options["--action"]="off"
++
++ if options["--action"] in ["off", "on"]:
++ # No status first, call our own version
++ result = not set_multi_power_fn(connection, options, set_power_status, get_power_status_simple,
++ 1 + int(options["--retry-on"]))
++ elif options["--action"] in ["monitor"]:
++ result = 0
++ else:
++ result = fence_action(connection, options, set_power_status, get_power_status_simple, get_plugs_list, None)
+
+- result = fence_action(None, options, set_power_status, get_power_status, get_plugs_list, None)
++ logging.debug("Result for "+options["--action"]+": "+repr(result))
++ if result == None:
++ result = 0
+ sys.exit(result)
+
+ if __name__ == "__main__":
+diff -uNr a/fence/agents/compute/fence_evacuate.py b/fence/agents/compute/fence_evacuate.py
+--- a/fence/agents/compute/fence_evacuate.py 1970-01-01 01:00:00.000000000 +0100
++++ b/fence/agents/compute/fence_evacuate.py 2017-09-27 15:25:54.234304769 +0200
+@@ -0,0 +1,366 @@
++#!/usr/bin/python -tt
++
++import sys
++import time
++import atexit
++import logging
++import inspect
++import requests.exceptions
++
++sys.path.append("@FENCEAGENTSLIBDIR@")
++from fencing import *
++from fencing import fail_usage, is_executable, run_command, run_delay
++
++EVACUABLE_TAG = "evacuable"
++TRUE_TAGS = ['true']
++
++def get_power_status(connection, options):
++
++ status = "unknown"
++ logging.debug("get action: " + options["--action"])
++
++ if connection:
++ try:
++ services = connection.services.list(host=options["--plug"], binary="nova-compute")
++ for service in services:
++ logging.debug("Status of %s is %s, %s" % (service.binary, service.state, service.status))
++ if service.state == "up" and service.status == "enabled":
++ # Up and operational
++ status = "on"
++
++ elif service.state == "down" and service.status == "disabled":
++ # Down and fenced
++ status = "off"
++
++ elif service.state == "down":
++ # Down and requires fencing
++ status = "failed"
++
++ elif service.state == "up":
++ # Up and requires unfencing
++ status = "running"
++ else:
++ logging.warning("Unknown status detected from nova for %s: %s, %s" % (options["--plug"], service.state, service.status))
++ status = "%s %s" % (service.state, service.status)
++ break
++ except requests.exception.ConnectionError as err:
++ logging.warning("Nova connection failed: " + str(err))
++ return status
++
++# NOTE(sbauza); We mimic the host-evacuate module since it's only a contrib
++# module which is not stable
++def _server_evacuate(connection, server, on_shared_storage):
++ success = False
++ error_message = ""
++ try:
++ logging.debug("Resurrecting instance: %s" % server)
++ (response, dictionary) = connection.servers.evacuate(server=server, on_shared_storage=on_shared_storage)
++
++ if response == None:
++ error_message = "No response while evacuating instance"
++ elif response.status_code == 200:
++ success = True
++ error_message = response.reason
++ else:
++ error_message = response.reason
++
++ except Exception as e:
++ error_message = "Error while evacuating instance: %s" % e
++
++ return {
++ "uuid": server,
++ "accepted": success,
++ "reason": error_message,
++ }
++
++def _is_server_evacuable(server, evac_flavors, evac_images):
++ if server.flavor.get('id') in evac_flavors:
++ return True
++ if hasattr(server.image, 'get'):
++ if server.image.get('id') in evac_images:
++ return True
++ logging.debug("Instance %s is not evacuable" % server.image.get('id'))
++ return False
++
++def _get_evacuable_flavors(connection):
++ result = []
++ flavors = connection.flavors.list()
++ # Since the detailed view for all flavors doesn't provide the extra specs,
++ # we need to call each of the flavor to get them.
++ for flavor in flavors:
++ tag = flavor.get_keys().get(EVACUABLE_TAG)
++ if tag and tag.strip().lower() in TRUE_TAGS:
++ result.append(flavor.id)
++ return result
++
++def _get_evacuable_images(connection):
++ result = []
++ images = []
++ if hasattr(connection, "images"):
++ images = connection.images.list(detailed=True)
++ elif hasattr(connection, "glance"):
++ # OSP12+
++ images = connection.glance.list()
++
++ for image in images:
++ if hasattr(image, 'metadata'):
++ tag = image.metadata.get(EVACUABLE_TAG)
++ if tag and tag.strip().lower() in TRUE_TAGS:
++ result.append(image.id)
++ elif hasattr(image, 'tags'):
++ # OSP12+
++ if EVACUABLE_TAG in image.tags:
++ result.append(image.id)
++ return result
++
++def _host_evacuate(connection, options):
++ result = True
++ images = _get_evacuable_images(connection)
++ flavors = _get_evacuable_flavors(connection)
++ servers = connection.servers.list(search_opts={'host': options["--plug"], 'all_tenants': 1 })
++
++ if options["--instance-filtering"] == "False":
++ logging.debug("Not evacuating anything")
++ evacuables = []
++ elif len(flavors) or len(images):
++ logging.debug("Filtering images and flavors: %s %s" % (repr(flavors), repr(images)))
++ # Identify all evacuable servers
++ logging.debug("Checking %s" % repr(servers))
++ evacuables = [server for server in servers
++ if _is_server_evacuable(server, flavors, images)]
++ logging.debug("Evacuating %s" % repr(evacuables))
++ else:
++ logging.debug("Evacuating all images and flavors")
++ evacuables = servers
++
++ if options["--no-shared-storage"] != "False":
++ on_shared_storage = False
++ else:
++ on_shared_storage = True
++
++ for server in evacuables:
++ logging.debug("Processing %s" % server)
++ if hasattr(server, 'id'):
++ response = _server_evacuate(connection, server.id, on_shared_storage)
++ if response["accepted"]:
++ logging.debug("Evacuated %s from %s: %s" %
++ (response["uuid"], options["--plug"], response["reason"]))
++ else:
++ logging.error("Evacuation of %s on %s failed: %s" %
++ (response["uuid"], options["--plug"], response["reason"]))
++ result = False
++ else:
++ logging.error("Could not evacuate instance: %s" % server.to_dict())
++ # Should a malformed instance result in a failed evacuation?
++ # result = False
++ return result
++
++def set_attrd_status(host, status, options):
++ logging.debug("Setting fencing status for %s to %s" % (host, status))
++ run_command(options, "attrd_updater -p -n evacuate -Q -N %s -U %s" % (host, status))
++
++def set_power_status(connection, options):
++ logging.debug("set action: " + options["--action"])
++
++ if not connection:
++ return
++
++ if options["--action"] == "off" and not _host_evacuate(options):
++ sys.exit(1)
++
++ sys.exit(0)
++
++def get_plugs_list(connection, options):
++ result = {}
++
++ if connection:
++ services = connection.services.list(binary="nova-compute")
++ for service in services:
++ longhost = service.host
++ shorthost = longhost.split('.')[0]
++ result[longhost] = ("", None)
++ result[shorthost] = ("", None)
++ return result
++
++def create_nova_connection(options):
++ nova = None
++
++ try:
++ from novaclient import client
++ from novaclient.exceptions import NotAcceptable
++ except ImportError:
++ fail_usage("Nova not found or not accessible")
++
++ versions = [ "2.11", "2" ]
++ for version in versions:
++ clientargs = inspect.getargspec(client.Client).varargs
++
++ # Some versions of Openstack prior to Ocata only
++ # supported positional arguments for username,
++ # password and tenant.
++ #
++ # Versions since Ocata only support named arguments.
++ #
++ # So we need to use introspection to figure out how to
++ # create a Nova client.
++ #
++ # Happy days
++ #
++ if clientargs:
++ # OSP < 11
++ # ArgSpec(args=['version', 'username', 'password', 'project_id', 'auth_url'],
++ # varargs=None,
++ # keywords='kwargs', defaults=(None, None, None, None))
++ nova = client.Client(version,
++ options["--username"],
++ options["--password"],
++ options["--tenant-name"],
++ options["--auth-url"],
++ insecure=options["--insecure"],
++ region_name=options["--region-name"],
++ endpoint_type=options["--endpoint-type"],
++ http_log_debug=options.has_key("--verbose"))
++ else:
++ # OSP >= 11
++ # ArgSpec(args=['version'], varargs='args', keywords='kwargs', defaults=None)
++ nova = client.Client(version,
++ username=options["--username"],
++ password=options["--password"],
++ tenant_name=options["--tenant-name"],
++ auth_url=options["--auth-url"],
++ insecure=options["--insecure"],
++ region_name=options["--region-name"],
++ endpoint_type=options["--endpoint-type"],
++ http_log_debug=options.has_key("--verbose"))
++
++ try:
++ nova.hypervisors.list()
++ return nova
++
++ except NotAcceptable as e:
++ logging.warning(e)
++
++ except Exception as e:
++ logging.warning("Nova connection failed. %s: %s" % (e.__class__.__name__, e))
++
++ logging.warning("Couldn't obtain a supported connection to nova, tried: %s\n" % repr(versions))
++ return None
++
++def define_new_opts():
++ all_opt["endpoint_type"] = {
++ "getopt" : "e:",
++ "longopt" : "endpoint-type",
++ "help" : "-e, --endpoint-type=[endpoint] Nova Endpoint type (publicURL, internalURL, adminURL)",
++ "required" : "0",
++ "shortdesc" : "Nova Endpoint type",
++ "default" : "internalURL",
++ "order": 1,
++ }
++ all_opt["tenant_name"] = {
++ "getopt" : "t:",
++ "longopt" : "tenant-name",
++ "help" : "-t, --tenant-name=[tenant] Keystone Admin Tenant",
++ "required" : "0",
++ "shortdesc" : "Keystone Admin Tenant",
++ "default" : "",
++ "order": 1,
++ }
++ all_opt["auth_url"] = {
++ "getopt" : "k:",
++ "longopt" : "auth-url",
++ "help" : "-k, --auth-url=[url] Keystone Admin Auth URL",
++ "required" : "0",
++ "shortdesc" : "Keystone Admin Auth URL",
++ "default" : "",
++ "order": 1,
++ }
++ all_opt["region_name"] = {
++ "getopt" : "",
++ "longopt" : "region-name",
++ "help" : "--region-name=[region] Region Name",
++ "required" : "0",
++ "shortdesc" : "Region Name",
++ "default" : "",
++ "order": 1,
++ }
++ all_opt["insecure"] = {
++ "getopt" : "",
++ "longopt" : "insecure",
++ "help" : "--insecure Explicitly allow agent to perform \"insecure\" TLS (https) requests",
++ "required" : "0",
++ "shortdesc" : "Allow Insecure TLS Requests",
++ "default" : "False",
++ "order": 2,
++ }
++ all_opt["domain"] = {
++ "getopt" : "d:",
++ "longopt" : "domain",
++ "help" : "-d, --domain=[string] DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN",
++ "required" : "0",
++ "shortdesc" : "DNS domain in which hosts live",
++ "order": 5,
++ }
++ all_opt["instance_filtering"] = {
++ "getopt" : "",
++ "longopt" : "instance-filtering",
++ "help" : "--instance-filtering Allow instances created from images and flavors with evacuable=true to be evacuated (or all if no images/flavors have been tagged)",
++ "required" : "0",
++ "shortdesc" : "Allow instances to be evacuated",
++ "default" : "True",
++ "order": 5,
++ }
++ all_opt["no_shared_storage"] = {
++ "getopt" : "",
++ "longopt" : "no-shared-storage",
++ "help" : "--no-shared-storage Disable functionality for shared storage",
++ "required" : "0",
++ "shortdesc" : "Disable functionality for dealing with shared storage",
++ "default" : "False",
++ "order": 5,
++ }
++
++def main():
++ atexit.register(atexit_handler)
++
++ device_opt = ["login", "passwd", "tenant_name", "auth_url",
++ "no_login", "no_password", "port", "domain", "no_shared_storage", "endpoint_type",
++ "instance_filtering", "insecure", "region_name"]
++ define_new_opts()
++ all_opt["shell_timeout"]["default"] = "180"
++
++ options = check_input(device_opt, process_input(device_opt))
++
++ docs = {}
++ docs["shortdesc"] = "Fence agent for the automatic resurrection of OpenStack compute instances"
++ docs["longdesc"] = "Used to reschedule flagged instances"
++ docs["vendorurl"] = ""
++
++ show_docs(options, docs)
++
++ run_delay(options)
++
++ connection = create_nova_connection(options)
++
++ # Un-evacuating a server doesn't make sense
++ if options["--action"] in ["on"]:
++ logging.error("Action %s is not supported by this agent" % (options["--action"]))
++ sys.exit(1)
++
++ if options["--action"] in ["off", "reboot"]:
++ status = get_power_status(connection, options)
++ if status != "off":
++ logging.error("Cannot resurrect instances from %s in state '%s'" % (options["--plug"], status))
++ sys.exit(1)
++
++ elif not _host_evacuate(connection, options):
++ logging.error("Resurrection of instances from %s failed" % (options["--plug"]))
++ sys.exit(1)
++
++ logging.info("Resurrection of instances from %s complete" % (options["--plug"]))
++ sys.exit(0)
++
++ result = fence_action(connection, options, set_power_status, get_power_status, get_plugs_list, None)
++ sys.exit(result)
++
++if __name__ == "__main__":
++ main()
+diff -uNr a/fence/agents/compute/Makefile.am b/fence/agents/compute/Makefile.am
+--- a/fence/agents/compute/Makefile.am 2017-09-27 15:01:34.844643650 +0200
++++ b/fence/agents/compute/Makefile.am 2017-09-27 15:57:50.963839738 +0200
+@@ -1,14 +1,14 @@
+ MAINTAINERCLEANFILES = Makefile.in
+
+-TARGET = fence_compute
++TARGET = fence_compute fence_evacuate
+
+-SRC = $(TARGET).py
++SRC = $(TARGET:=.py)
+
+ EXTRA_DIST = $(SRC)
+
+ sbin_SCRIPTS = $(TARGET)
+
+-man_MANS = $(TARGET).8
++man_MANS = $(TARGET:=.8)
+
+ FENCE_TEST_ARGS = -l test -p test -n 1
+
+diff -uNr a/tests/data/metadata/fence_evacuate.xml b/tests/data/metadata/fence_evacuate.xml
+--- a/tests/data/metadata/fence_evacuate.xml 1970-01-01 01:00:00.000000000 +0100
++++ b/tests/data/metadata/fence_evacuate.xml 2017-09-27 15:28:10.978063549 +0200
+@@ -0,0 +1,163 @@
++
++
++Used to reschedule flagged instances
++
++
++
++
++
++ Keystone Admin Tenant
++
++
++
++
++ Keystone Admin Auth URL
++
++
++
++
++ Physical plug number, name of virtual machine or UUID
++
++
++
++
++ Script to retrieve password
++
++
++
++
++ Region Name
++
++
++
++
++ Login password or passphrase
++
++
++
++
++ Nova Endpoint type
++
++
++
++
++ Fencing Action
++
++
++
++
++ Login Name
++
++
++
++
++ Physical plug number, name of virtual machine or UUID
++
++
++
++
++ Login Name
++
++
++
++
++ Login password or passphrase
++
++
++
++
++ Script to retrieve password
++
++
++
++
++ Allow Insecure TLS Requests
++
++
++
++
++ DNS domain in which hosts live
++
++
++
++
++ Allow instances to be evacuated
++
++
++
++
++ Disable functionality for dealing with shared storage
++
++
++
++
++ Verbose mode
++
++
++
++
++ Write debug information to given file
++
++
++
++
++ Write debug information to given file
++
++
++
++
++ Display version information and exit
++
++
++
++
++ Display help and exit
++
++
++
++
++ Separator for CSV created by operation list
++
++
++
++
++ Wait X seconds after issuing ON/OFF
++
++
++
++
++ Wait X seconds for cmd prompt after login
++
++
++
++
++ Wait X seconds before fencing is started
++
++
++
++
++ Test X seconds for status change after ON/OFF
++
++
++
++
++ Wait X seconds for cmd prompt after issuing command
++
++
++
++
++ Count of attempts to retry power on
++
++
++
++
++
++
++
++
++
++
++
++
++
++
diff --git a/SOURCES/bz1497072-fence_compute-fence_evacuate-Instance-HA-OSP12.patch b/SOURCES/bz1497072-fence_compute-fence_evacuate-Instance-HA-OSP12.patch
deleted file mode 100644
index 6ca484d..0000000
--- a/SOURCES/bz1497072-fence_compute-fence_evacuate-Instance-HA-OSP12.patch
+++ /dev/null
@@ -1,1119 +0,0 @@
-diff -uNr a/fence/agents/compute/fence_compute.py b/fence/agents/compute/fence_compute.py
---- a/fence/agents/compute/fence_compute.py 2017-09-27 15:01:34.974642469 +0200
-+++ b/fence/agents/compute/fence_compute.py 2017-09-27 15:24:57.482819900 +0200
-@@ -18,173 +18,115 @@
- #END_VERSION_GENERATION
-
- override_status = ""
--nova = None
-
- EVACUABLE_TAG = "evacuable"
- TRUE_TAGS = ['true']
-
--def get_power_status(_, options):
-- global override_status
--
-- status = "unknown"
-- logging.debug("get action: " + options["--action"])
-+def get_power_status(connection, options):
-
- if len(override_status):
- logging.debug("Pretending we're " + override_status)
- return override_status
-
-- if nova:
-+ status = "unknown"
-+ logging.debug("get action: " + options["--action"])
-+
-+ if connection:
- try:
-- services = nova.services.list(host=options["--plug"])
-+ services = connection.services.list(host=options["--plug"], binary="nova-compute")
- for service in services:
-- logging.debug("Status of %s is %s" % (service.binary, service.state))
-- if service.binary == "nova-compute":
-- if service.state == "up":
-- status = "on"
-- elif service.state == "down":
-- status = "off"
-- else:
-- logging.debug("Unknown status detected from nova: " + service.state)
-- break
-+ logging.debug("Status of %s on %s is %s, %s" % (service.binary, options["--plug"], service.state, service.status))
-+ if service.state == "up" and service.status == "enabled":
-+ # Up and operational
-+ status = "on"
-+
-+ elif service.state == "down" and service.status == "disabled":
-+ # Down and fenced
-+ status = "off"
-+
-+ elif service.state == "down":
-+ # Down and requires fencing
-+ status = "failed"
-+
-+ elif service.state == "up":
-+ # Up and requires unfencing
-+ status = "running"
-+ else:
-+ logging.warning("Unknown status detected from nova for %s: %s, %s" % (options["--plug"], service.state, service.status))
-+ status = "%s %s" % (service.state, service.status)
-+ break
- except requests.exception.ConnectionError as err:
- logging.warning("Nova connection failed: " + str(err))
-+ logging.debug("Final status of %s is %s" % (options["--plug"], status))
- return status
-
--# NOTE(sbauza); We mimic the host-evacuate module since it's only a contrib
--# module which is not stable
--def _server_evacuate(server, on_shared_storage):
-- success = False
-- error_message = ""
-- try:
-- logging.debug("Resurrecting instance: %s" % server)
-- (response, dictionary) = nova.servers.evacuate(server=server, on_shared_storage=on_shared_storage)
--
-- if response == None:
-- error_message = "No response while evacuating instance"
-- elif response.status_code == 200:
-- success = True
-- error_message = response.reason
-- else:
-- error_message = response.reason
--
-- except Exception as e:
-- error_message = "Error while evacuating instance: %s" % e
--
-- return {
-- "uuid": server,
-- "accepted": success,
-- "reason": error_message,
-- }
--
--def _is_server_evacuable(server, evac_flavors, evac_images):
-- if server.flavor.get('id') in evac_flavors:
-- return True
-- if server.image.get('id') in evac_images:
-- return True
-- logging.debug("Instance %s is not evacuable" % server.image.get('id'))
-- return False
--
--def _get_evacuable_flavors():
-- result = []
-- flavors = nova.flavors.list()
-- # Since the detailed view for all flavors doesn't provide the extra specs,
-- # we need to call each of the flavor to get them.
-- for flavor in flavors:
-- tag = flavor.get_keys().get(EVACUABLE_TAG)
-- if tag and tag.strip().lower() in TRUE_TAGS:
-- result.append(flavor.id)
-- return result
--
--def _get_evacuable_images():
-- result = []
-- images = nova.images.list(detailed=True)
-- for image in images:
-- if hasattr(image, 'metadata'):
-- tag = image.metadata.get(EVACUABLE_TAG)
-- if tag and tag.strip().lower() in TRUE_TAGS:
-- result.append(image.id)
-- return result
--
--def _host_evacuate(options):
-- result = True
-- images = _get_evacuable_images()
-- flavors = _get_evacuable_flavors()
-- servers = nova.servers.list(search_opts={'host': options["--plug"], 'all_tenants': 1 })
--
-- if options["--instance-filtering"] == "False":
-- logging.debug("Not evacuating anything")
-- evacuables = []
-- elif len(flavors) or len(images):
-- logging.debug("Filtering images and flavors: %s %s" % (repr(flavors), repr(images)))
-- # Identify all evacuable servers
-- logging.debug("Checking %s" % repr(servers))
-- evacuables = [server for server in servers
-- if _is_server_evacuable(server, flavors, images)]
-- logging.debug("Evacuating %s" % repr(evacuables))
-- else:
-- logging.debug("Evacuating all images and flavors")
-- evacuables = servers
--
-- if options["--no-shared-storage"] != "False":
-- on_shared_storage = False
-- else:
-- on_shared_storage = True
--
-- for server in evacuables:
-- logging.debug("Processing %s" % server)
-- if hasattr(server, 'id'):
-- response = _server_evacuate(server.id, on_shared_storage)
-- if response["accepted"]:
-- logging.debug("Evacuated %s from %s: %s" %
-- (response["uuid"], options["--plug"], response["reason"]))
-- else:
-- logging.error("Evacuation of %s on %s failed: %s" %
-- (response["uuid"], options["--plug"], response["reason"]))
-- result = False
-- else:
-- logging.error("Could not evacuate instance: %s" % server.to_dict())
-- # Should a malformed instance result in a failed evacuation?
-- # result = False
-- return result
-+def get_power_status_simple(connection, options):
-+ status = get_power_status(connection, options)
-+ if status in [ "off" ]:
-+ return status
-+ return "on"
-
- def set_attrd_status(host, status, options):
- logging.debug("Setting fencing status for %s to %s" % (host, status))
- run_command(options, "attrd_updater -p -n evacuate -Q -N %s -U %s" % (host, status))
-
--def set_power_status(_, options):
-- global override_status
--
-- override_status = ""
-- logging.debug("set action: " + options["--action"])
-+def get_attrd_status(host, options):
-+ (status, pipe_stdout, pipe_stderr) = run_command(options, "attrd_updater -p -n evacuate -Q -N %s" % (host))
-+ fields = pipe_stdout.split('"')
-+ if len(fields) > 6:
-+ return fields[5]
-+ logging.debug("Got %s: o:%s e:%s n:%d" % (status, pipe_stdout, pipe_stderr, len(fields)))
-+ return ""
-+
-+def set_power_status_on(connection, options):
-+ # Wait for any evacuations to complete
-+ while True:
-+ current = get_attrd_status(options["--plug"], options)
-+ if current in ["no", ""]:
-+ logging.info("Evacuation complete for: %s '%s'" % (options["--plug"], current))
-+ break
-+ else:
-+ logging.info("Waiting for %s to complete evacuations: %s" % (options["--plug"], current))
-+ time.sleep(2)
-
-- if not nova:
-- return
-+ status = get_power_status(connection, options)
-+ # Should we do it for 'failed' too?
-+ if status in [ "off", "running", "failed" ]:
-+ try:
-+ # Forcing the host back up
-+ logging.info("Forcing nova-compute back up on "+options["--plug"])
-+ connection.services.force_down(options["--plug"], "nova-compute", force_down=False)
-+ logging.info("Forced nova-compute back up on "+options["--plug"])
-+ except Exception as e:
-+ # In theory, if force_down=False fails, that's for the exact
-+ # same possible reasons that below with force_down=True
-+ # eg. either an incompatible version or an old client.
-+ # Since it's about forcing back to a default value, there is
-+ # no real worries to just consider it's still okay even if the
-+ # command failed
-+ logging.warn("Exception from attempt to force "
-+ "host back up via nova API: "
-+ "%s: %s" % (e.__class__.__name__, e))
-+
-+ # Forcing the service back up in case it was disabled
-+ logging.info("Enabling nova-compute on "+options["--plug"])
-+ connection.services.enable(options["--plug"], 'nova-compute')
-
-- if options["--action"] == "on":
-- if get_power_status(_, options) != "on":
-- # Forcing the service back up in case it was disabled
-- nova.services.enable(options["--plug"], 'nova-compute')
-- try:
-- # Forcing the host back up
-- nova.services.force_down(
-- options["--plug"], "nova-compute", force_down=False)
-- except Exception as e:
-- # In theory, if force_down=False fails, that's for the exact
-- # same possible reasons that below with force_down=True
-- # eg. either an incompatible version or an old client.
-- # Since it's about forcing back to a default value, there is
-- # no real worries to just consider it's still okay even if the
-- # command failed
-- logging.info("Exception from attempt to force "
-- "host back up via nova API: "
-- "%s: %s" % (e.__class__.__name__, e))
-- else:
-- # Pretend we're 'on' so that the fencing library doesn't loop forever waiting for the node to boot
-- override_status = "on"
-+ # Pretend we're 'on' so that the fencing library doesn't loop forever waiting for the node to boot
-+ override_status = "on"
-+ elif status not in ["on"]:
-+ # Not safe to unfence, don't waste time looping to see if the status changes to "on"
-+ options["--power-timeout"] = "0"
-+
-+def set_power_status_off(connection, options):
-+ status = get_power_status(connection, options)
-+ if status in [ "off" ]:
- return
-
-+ connection.services.disable(options["--plug"], 'nova-compute')
- try:
-- nova.services.force_down(
-+ # Until 2.53
-+ connection.services.force_down(
- options["--plug"], "nova-compute", force_down=True)
- except Exception as e:
- # Something went wrong when we tried to force the host down.
-@@ -198,7 +140,7 @@
- "%s: %s" % (e.__class__.__name__, e))
- # need to wait for nova to update its internal status or we
- # cannot call host-evacuate
-- while get_power_status(_, options) != "off":
-+ while get_power_status(connection, options) not in ["off"]:
- # Loop forever if need be.
- #
- # Some callers (such as Pacemaker) will have a timer
-@@ -206,47 +148,55 @@
- logging.debug("Waiting for nova to update its internal state for %s" % options["--plug"])
- time.sleep(1)
-
-- if not _host_evacuate(options):
-- sys.exit(1)
-+ set_attrd_status(options["--plug"], "yes", options)
-+
-+def set_power_status(connection, options):
-+ global override_status
-
-- return
-+ override_status = ""
-+ logging.debug("set action: " + options["--action"])
-+
-+ if not connection:
-+ return
-
-+ if options["--action"] in ["off", "reboot"]:
-+ set_power_status_off(connection, options)
-+ else:
-+ set_power_status_on(connection, options)
-+ logging.debug("set action passed: " + options["--action"])
-+ sys.exit(0)
-
--def fix_domain(options):
-+def fix_domain(connection, options):
- domains = {}
- last_domain = None
-
-- if nova:
-+ if connection:
- # Find it in nova
-
-- hypervisors = nova.hypervisors.list()
-- for hypervisor in hypervisors:
-- shorthost = hypervisor.hypervisor_hostname.split('.')[0]
-+ services = connection.services.list(binary="nova-compute")
-+ for service in services:
-+ shorthost = service.host.split('.')[0]
-
-- if shorthost == hypervisor.hypervisor_hostname:
-+ if shorthost == service.host:
- # Nova is not using FQDN
- calculated = ""
- else:
- # Compute nodes are named as FQDN, strip off the hostname
-- calculated = hypervisor.hypervisor_hostname.replace(shorthost+".", "")
--
-- domains[calculated] = shorthost
-+ calculated = service.host.replace(shorthost+".", "")
-
- if calculated == last_domain:
- # Avoid complaining for each compute node with the same name
- # One hopes they don't appear interleaved as A.com B.com A.com B.com
-- logging.debug("Calculated the same domain from: %s" % hypervisor.hypervisor_hostname)
-+ logging.debug("Calculated the same domain from: %s" % service.host)
-+ continue
-
-- elif "--domain" in options and options["--domain"] == calculated:
-- # Supplied domain name is valid
-- return
-+ domains[calculated] = service.host
-+ last_domain = calculated
-
-- elif "--domain" in options:
-+ if "--domain" in options and options["--domain"] != calculated:
- # Warn in case nova isn't available at some point
- logging.warning("Supplied domain '%s' does not match the one calculated from: %s"
-- % (options["--domain"], hypervisor.hypervisor_hostname))
--
-- last_domain = calculated
-+ % (options["--domain"], service.host))
-
- if len(domains) == 0 and "--domain" not in options:
- logging.error("Could not calculate the domain names used by compute nodes in nova")
-@@ -254,9 +204,9 @@
- elif len(domains) == 1 and "--domain" not in options:
- options["--domain"] = last_domain
-
-- elif len(domains) == 1:
-- logging.error("Overriding supplied domain '%s' does not match the one calculated from: %s"
-- % (options["--domain"], hypervisor.hypervisor_hostname))
-+ elif len(domains) == 1 and options["--domain"] != last_domain:
-+ logging.error("Overriding supplied domain '%s' as it does not match the one calculated from: %s"
-+ % (options["--domain"], domains[last_domain]))
- options["--domain"] = last_domain
-
- elif len(domains) > 1:
-@@ -264,47 +214,49 @@
- % (options["--domain"], repr(domains)))
- sys.exit(1)
-
--def fix_plug_name(options):
-+ return last_domain
-+
-+def fix_plug_name(connection, options):
- if options["--action"] == "list":
- return
-
- if "--plug" not in options:
- return
-
-- fix_domain(options)
-- short_plug = options["--plug"].split('.')[0]
-- logging.debug("Checking target '%s' against calculated domain '%s'"% (options["--plug"], options["--domain"]))
--
-- if "--domain" not in options:
-+ calculated = fix_domain(connection, options)
-+ if calculated is None or "--domain" not in options:
- # Nothing supplied and nova not available... what to do... nothing
- return
-
-- elif options["--domain"] == "":
-+ short_plug = options["--plug"].split('.')[0]
-+ logging.debug("Checking target '%s' against calculated domain '%s'"% (options["--plug"], calculated))
-+
-+ if options["--domain"] == "":
- # Ensure any domain is stripped off since nova isn't using FQDN
- options["--plug"] = short_plug
-
-- elif options["--domain"] in options["--plug"]:
-- # Plug already contains the domain, don't re-add
-+ elif options["--plug"].endswith(options["--domain"]):
-+ # Plug already uses the domain, don't re-add
- return
-
- else:
- # Add the domain to the plug
- options["--plug"] = short_plug + "." + options["--domain"]
-
--def get_plugs_list(_, options):
-+def get_plugs_list(connection, options):
- result = {}
-
-- if nova:
-- hypervisors = nova.hypervisors.list()
-- for hypervisor in hypervisors:
-- longhost = hypervisor.hypervisor_hostname
-+ if connection:
-+ services = connection.services.list(binary="nova-compute")
-+ for service in services:
-+ longhost = service.host
- shorthost = longhost.split('.')[0]
- result[longhost] = ("", None)
- result[shorthost] = ("", None)
- return result
-
- def create_nova_connection(options):
-- global nova
-+ nova = None
-
- try:
- from novaclient import client
-@@ -330,41 +282,42 @@
- if clientargs:
- # OSP < 11
- # ArgSpec(args=['version', 'username', 'password', 'project_id', 'auth_url'],
-- # varargs=None,
-- # keywords='kwargs', defaults=(None, None, None, None))
-+ # varargs=None,
-+ # keywords='kwargs', defaults=(None, None, None, None))
- nova = client.Client(version,
-- options["--username"],
-- options["--password"],
-- options["--tenant-name"],
-- options["--auth-url"],
-- insecure=options["--insecure"],
-- region_name=options["--region-name"],
-- endpoint_type=options["--endpoint-type"],
-- http_log_debug=options.has_key("--verbose"))
-+ options["--username"],
-+ options["--password"],
-+ options["--tenant-name"],
-+ options["--auth-url"],
-+ insecure=options["--insecure"],
-+ region_name=options["--region-name"],
-+ endpoint_type=options["--endpoint-type"],
-+ http_log_debug=options.has_key("--verbose"))
- else:
- # OSP >= 11
- # ArgSpec(args=['version'], varargs='args', keywords='kwargs', defaults=None)
- nova = client.Client(version,
-- username=options["--username"],
-- password=options["--password"],
-- tenant_name=options["--tenant-name"],
-- auth_url=options["--auth-url"],
-- insecure=options["--insecure"],
-- region_name=options["--region-name"],
-- endpoint_type=options["--endpoint-type"],
-- http_log_debug=options.has_key("--verbose"))
-+ username=options["--username"],
-+ password=options["--password"],
-+ tenant_name=options["--tenant-name"],
-+ auth_url=options["--auth-url"],
-+ insecure=options["--insecure"],
-+ region_name=options["--region-name"],
-+ endpoint_type=options["--endpoint-type"],
-+ http_log_debug=options.has_key("--verbose"))
-
- try:
- nova.hypervisors.list()
-- return
-+ return nova
-
- except NotAcceptable as e:
- logging.warning(e)
-
- except Exception as e:
- logging.warning("Nova connection failed. %s: %s" % (e.__class__.__name__, e))
--
-+
- logging.warning("Couldn't obtain a supported connection to nova, tried: %s\n" % repr(versions))
-+ return None
-
- def define_new_opts():
- all_opt["endpoint_type"] = {
-@@ -448,11 +401,23 @@
- "order": 5,
- }
-
-+def set_multi_power_fn(connection, options, set_power_fn, get_power_fn, retry_attempts=1):
-+ for _ in range(retry_attempts):
-+ set_power_fn(connection, options)
-+ time.sleep(int(options["--power-wait"]))
-+
-+ for _ in range(int(options["--power-timeout"])):
-+ if get_power_fn(connection, options) != options["--action"]:
-+ time.sleep(1)
-+ else:
-+ return True
-+ return False
-+
- def main():
- global override_status
- atexit.register(atexit_handler)
-
-- device_opt = ["login", "passwd", "tenant_name", "auth_url", "fabric_fencing",
-+ device_opt = ["login", "passwd", "tenant_name", "auth_url", "fabric_fencing",
- "no_login", "no_password", "port", "domain", "no_shared_storage", "endpoint_type",
- "record_only", "instance_filtering", "insecure", "region_name"]
- define_new_opts()
-@@ -472,30 +437,28 @@
-
- run_delay(options)
-
-- create_nova_connection(options)
-+ logging.debug("Running "+options["--action"])
-+ connection = create_nova_connection(options)
-
-- fix_plug_name(options)
-- if options["--record-only"] in [ "1", "True", "true", "Yes", "yes"]:
-- if options["--action"] == "on":
-- set_attrd_status(options["--plug"], "no", options)
-- sys.exit(0)
--
-- elif options["--action"] in ["off", "reboot"]:
-- set_attrd_status(options["--plug"], "yes", options)
-- sys.exit(0)
-+ if options["--action"] in ["off", "on", "reboot", "status"]:
-+ fix_plug_name(connection, options)
-
-- elif options["--action"] in ["monitor", "status"]:
-- sys.exit(0)
-
-- if options["--action"] in ["off", "reboot"]:
-- # Pretend we're 'on' so that the fencing library will always call set_power_status(off)
-- override_status = "on"
--
-- if options["--action"] == "on":
-- # Pretend we're 'off' so that the fencing library will always call set_power_status(on)
-- override_status = "off"
-+ if options["--action"] in ["reboot"]:
-+ options["--action"]="off"
-+
-+ if options["--action"] in ["off", "on"]:
-+ # No status first, call our own version
-+ result = not set_multi_power_fn(connection, options, set_power_status, get_power_status_simple,
-+ 1 + int(options["--retry-on"]))
-+ elif options["--action"] in ["monitor"]:
-+ result = 0
-+ else:
-+ result = fence_action(connection, options, set_power_status, get_power_status_simple, get_plugs_list, None)
-
-- result = fence_action(None, options, set_power_status, get_power_status, get_plugs_list, None)
-+ logging.debug("Result for "+options["--action"]+": "+repr(result))
-+ if result == None:
-+ result = 0
- sys.exit(result)
-
- if __name__ == "__main__":
-diff -uNr a/fence/agents/compute/fence_evacuate.py b/fence/agents/compute/fence_evacuate.py
---- a/fence/agents/compute/fence_evacuate.py 1970-01-01 01:00:00.000000000 +0100
-+++ b/fence/agents/compute/fence_evacuate.py 2017-09-27 15:25:54.234304769 +0200
-@@ -0,0 +1,366 @@
-+#!/usr/bin/python -tt
-+
-+import sys
-+import time
-+import atexit
-+import logging
-+import inspect
-+import requests.exceptions
-+
-+sys.path.append("@FENCEAGENTSLIBDIR@")
-+from fencing import *
-+from fencing import fail_usage, is_executable, run_command, run_delay
-+
-+EVACUABLE_TAG = "evacuable"
-+TRUE_TAGS = ['true']
-+
-+def get_power_status(connection, options):
-+
-+ status = "unknown"
-+ logging.debug("get action: " + options["--action"])
-+
-+ if connection:
-+ try:
-+ services = connection.services.list(host=options["--plug"], binary="nova-compute")
-+ for service in services:
-+ logging.debug("Status of %s is %s, %s" % (service.binary, service.state, service.status))
-+ if service.state == "up" and service.status == "enabled":
-+ # Up and operational
-+ status = "on"
-+
-+ elif service.state == "down" and service.status == "disabled":
-+ # Down and fenced
-+ status = "off"
-+
-+ elif service.state == "down":
-+ # Down and requires fencing
-+ status = "failed"
-+
-+ elif service.state == "up":
-+ # Up and requires unfencing
-+ status = "running"
-+ else:
-+ logging.warning("Unknown status detected from nova for %s: %s, %s" % (options["--plug"], service.state, service.status))
-+ status = "%s %s" % (service.state, service.status)
-+ break
-+ except requests.exception.ConnectionError as err:
-+ logging.warning("Nova connection failed: " + str(err))
-+ return status
-+
-+# NOTE(sbauza); We mimic the host-evacuate module since it's only a contrib
-+# module which is not stable
-+def _server_evacuate(connection, server, on_shared_storage):
-+ success = False
-+ error_message = ""
-+ try:
-+ logging.debug("Resurrecting instance: %s" % server)
-+ (response, dictionary) = connection.servers.evacuate(server=server, on_shared_storage=on_shared_storage)
-+
-+ if response == None:
-+ error_message = "No response while evacuating instance"
-+ elif response.status_code == 200:
-+ success = True
-+ error_message = response.reason
-+ else:
-+ error_message = response.reason
-+
-+ except Exception as e:
-+ error_message = "Error while evacuating instance: %s" % e
-+
-+ return {
-+ "uuid": server,
-+ "accepted": success,
-+ "reason": error_message,
-+ }
-+
-+def _is_server_evacuable(server, evac_flavors, evac_images):
-+ if server.flavor.get('id') in evac_flavors:
-+ return True
-+ if hasattr(server.image, 'get'):
-+ if server.image.get('id') in evac_images:
-+ return True
-+ logging.debug("Instance %s is not evacuable" % server.image.get('id'))
-+ return False
-+
-+def _get_evacuable_flavors(connection):
-+ result = []
-+ flavors = connection.flavors.list()
-+ # Since the detailed view for all flavors doesn't provide the extra specs,
-+ # we need to call each of the flavor to get them.
-+ for flavor in flavors:
-+ tag = flavor.get_keys().get(EVACUABLE_TAG)
-+ if tag and tag.strip().lower() in TRUE_TAGS:
-+ result.append(flavor.id)
-+ return result
-+
-+def _get_evacuable_images(connection):
-+ result = []
-+ images = []
-+ if hasattr(connection, "images"):
-+ images = connection.images.list(detailed=True)
-+ elif hasattr(connection, "glance"):
-+ # OSP12+
-+ images = connection.glance.list()
-+
-+ for image in images:
-+ if hasattr(image, 'metadata'):
-+ tag = image.metadata.get(EVACUABLE_TAG)
-+ if tag and tag.strip().lower() in TRUE_TAGS:
-+ result.append(image.id)
-+ elif hasattr(image, 'tags'):
-+ # OSP12+
-+ if EVACUABLE_TAG in image.tags:
-+ result.append(image.id)
-+ return result
-+
-+def _host_evacuate(connection, options):
-+ result = True
-+ images = _get_evacuable_images(connection)
-+ flavors = _get_evacuable_flavors(connection)
-+ servers = connection.servers.list(search_opts={'host': options["--plug"], 'all_tenants': 1 })
-+
-+ if options["--instance-filtering"] == "False":
-+ logging.debug("Not evacuating anything")
-+ evacuables = []
-+ elif len(flavors) or len(images):
-+ logging.debug("Filtering images and flavors: %s %s" % (repr(flavors), repr(images)))
-+ # Identify all evacuable servers
-+ logging.debug("Checking %s" % repr(servers))
-+ evacuables = [server for server in servers
-+ if _is_server_evacuable(server, flavors, images)]
-+ logging.debug("Evacuating %s" % repr(evacuables))
-+ else:
-+ logging.debug("Evacuating all images and flavors")
-+ evacuables = servers
-+
-+ if options["--no-shared-storage"] != "False":
-+ on_shared_storage = False
-+ else:
-+ on_shared_storage = True
-+
-+ for server in evacuables:
-+ logging.debug("Processing %s" % server)
-+ if hasattr(server, 'id'):
-+ response = _server_evacuate(connection, server.id, on_shared_storage)
-+ if response["accepted"]:
-+ logging.debug("Evacuated %s from %s: %s" %
-+ (response["uuid"], options["--plug"], response["reason"]))
-+ else:
-+ logging.error("Evacuation of %s on %s failed: %s" %
-+ (response["uuid"], options["--plug"], response["reason"]))
-+ result = False
-+ else:
-+ logging.error("Could not evacuate instance: %s" % server.to_dict())
-+ # Should a malformed instance result in a failed evacuation?
-+ # result = False
-+ return result
-+
-+def set_attrd_status(host, status, options):
-+ logging.debug("Setting fencing status for %s to %s" % (host, status))
-+ run_command(options, "attrd_updater -p -n evacuate -Q -N %s -U %s" % (host, status))
-+
-+def set_power_status(connection, options):
-+ logging.debug("set action: " + options["--action"])
-+
-+ if not connection:
-+ return
-+
-+ if options["--action"] == "off" and not _host_evacuate(options):
-+ sys.exit(1)
-+
-+ sys.exit(0)
-+
-+def get_plugs_list(connection, options):
-+ result = {}
-+
-+ if connection:
-+ services = connection.services.list(binary="nova-compute")
-+ for service in services:
-+ longhost = service.host
-+ shorthost = longhost.split('.')[0]
-+ result[longhost] = ("", None)
-+ result[shorthost] = ("", None)
-+ return result
-+
-+def create_nova_connection(options):
-+ nova = None
-+
-+ try:
-+ from novaclient import client
-+ from novaclient.exceptions import NotAcceptable
-+ except ImportError:
-+ fail_usage("Nova not found or not accessible")
-+
-+ versions = [ "2.11", "2" ]
-+ for version in versions:
-+ clientargs = inspect.getargspec(client.Client).varargs
-+
-+ # Some versions of Openstack prior to Ocata only
-+ # supported positional arguments for username,
-+ # password and tenant.
-+ #
-+ # Versions since Ocata only support named arguments.
-+ #
-+ # So we need to use introspection to figure out how to
-+ # create a Nova client.
-+ #
-+ # Happy days
-+ #
-+ if clientargs:
-+ # OSP < 11
-+ # ArgSpec(args=['version', 'username', 'password', 'project_id', 'auth_url'],
-+ # varargs=None,
-+ # keywords='kwargs', defaults=(None, None, None, None))
-+ nova = client.Client(version,
-+ options["--username"],
-+ options["--password"],
-+ options["--tenant-name"],
-+ options["--auth-url"],
-+ insecure=options["--insecure"],
-+ region_name=options["--region-name"],
-+ endpoint_type=options["--endpoint-type"],
-+ http_log_debug=options.has_key("--verbose"))
-+ else:
-+ # OSP >= 11
-+ # ArgSpec(args=['version'], varargs='args', keywords='kwargs', defaults=None)
-+ nova = client.Client(version,
-+ username=options["--username"],
-+ password=options["--password"],
-+ tenant_name=options["--tenant-name"],
-+ auth_url=options["--auth-url"],
-+ insecure=options["--insecure"],
-+ region_name=options["--region-name"],
-+ endpoint_type=options["--endpoint-type"],
-+ http_log_debug=options.has_key("--verbose"))
-+
-+ try:
-+ nova.hypervisors.list()
-+ return nova
-+
-+ except NotAcceptable as e:
-+ logging.warning(e)
-+
-+ except Exception as e:
-+ logging.warning("Nova connection failed. %s: %s" % (e.__class__.__name__, e))
-+
-+ logging.warning("Couldn't obtain a supported connection to nova, tried: %s\n" % repr(versions))
-+ return None
-+
-+def define_new_opts():
-+ all_opt["endpoint_type"] = {
-+ "getopt" : "e:",
-+ "longopt" : "endpoint-type",
-+ "help" : "-e, --endpoint-type=[endpoint] Nova Endpoint type (publicURL, internalURL, adminURL)",
-+ "required" : "0",
-+ "shortdesc" : "Nova Endpoint type",
-+ "default" : "internalURL",
-+ "order": 1,
-+ }
-+ all_opt["tenant_name"] = {
-+ "getopt" : "t:",
-+ "longopt" : "tenant-name",
-+ "help" : "-t, --tenant-name=[tenant] Keystone Admin Tenant",
-+ "required" : "0",
-+ "shortdesc" : "Keystone Admin Tenant",
-+ "default" : "",
-+ "order": 1,
-+ }
-+ all_opt["auth_url"] = {
-+ "getopt" : "k:",
-+ "longopt" : "auth-url",
-+ "help" : "-k, --auth-url=[url] Keystone Admin Auth URL",
-+ "required" : "0",
-+ "shortdesc" : "Keystone Admin Auth URL",
-+ "default" : "",
-+ "order": 1,
-+ }
-+ all_opt["region_name"] = {
-+ "getopt" : "",
-+ "longopt" : "region-name",
-+ "help" : "--region-name=[region] Region Name",
-+ "required" : "0",
-+ "shortdesc" : "Region Name",
-+ "default" : "",
-+ "order": 1,
-+ }
-+ all_opt["insecure"] = {
-+ "getopt" : "",
-+ "longopt" : "insecure",
-+ "help" : "--insecure Explicitly allow agent to perform \"insecure\" TLS (https) requests",
-+ "required" : "0",
-+ "shortdesc" : "Allow Insecure TLS Requests",
-+ "default" : "False",
-+ "order": 2,
-+ }
-+ all_opt["domain"] = {
-+ "getopt" : "d:",
-+ "longopt" : "domain",
-+ "help" : "-d, --domain=[string] DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN",
-+ "required" : "0",
-+ "shortdesc" : "DNS domain in which hosts live",
-+ "order": 5,
-+ }
-+ all_opt["instance_filtering"] = {
-+ "getopt" : "",
-+ "longopt" : "instance-filtering",
-+ "help" : "--instance-filtering Allow instances created from images and flavors with evacuable=true to be evacuated (or all if no images/flavors have been tagged)",
-+ "required" : "0",
-+ "shortdesc" : "Allow instances to be evacuated",
-+ "default" : "True",
-+ "order": 5,
-+ }
-+ all_opt["no_shared_storage"] = {
-+ "getopt" : "",
-+ "longopt" : "no-shared-storage",
-+ "help" : "--no-shared-storage Disable functionality for shared storage",
-+ "required" : "0",
-+ "shortdesc" : "Disable functionality for dealing with shared storage",
-+ "default" : "False",
-+ "order": 5,
-+ }
-+
-+def main():
-+ atexit.register(atexit_handler)
-+
-+ device_opt = ["login", "passwd", "tenant_name", "auth_url",
-+ "no_login", "no_password", "port", "domain", "no_shared_storage", "endpoint_type",
-+ "instance_filtering", "insecure", "region_name"]
-+ define_new_opts()
-+ all_opt["shell_timeout"]["default"] = "180"
-+
-+ options = check_input(device_opt, process_input(device_opt))
-+
-+ docs = {}
-+ docs["shortdesc"] = "Fence agent for the automatic resurrection of OpenStack compute instances"
-+ docs["longdesc"] = "Used to reschedule flagged instances"
-+ docs["vendorurl"] = ""
-+
-+ show_docs(options, docs)
-+
-+ run_delay(options)
-+
-+ connection = create_nova_connection(options)
-+
-+ # Un-evacuating a server doesn't make sense
-+ if options["--action"] in ["on"]:
-+ logging.error("Action %s is not supported by this agent" % (options["--action"]))
-+ sys.exit(1)
-+
-+ if options["--action"] in ["off", "reboot"]:
-+ status = get_power_status(connection, options)
-+ if status != "off":
-+ logging.error("Cannot resurrect instances from %s in state '%s'" % (options["--plug"], status))
-+ sys.exit(1)
-+
-+ elif not _host_evacuate(connection, options):
-+ logging.error("Resurrection of instances from %s failed" % (options["--plug"]))
-+ sys.exit(1)
-+
-+ logging.info("Resurrection of instances from %s complete" % (options["--plug"]))
-+ sys.exit(0)
-+
-+ result = fence_action(connection, options, set_power_status, get_power_status, get_plugs_list, None)
-+ sys.exit(result)
-+
-+if __name__ == "__main__":
-+ main()
-diff -uNr a/fence/agents/compute/Makefile.am b/fence/agents/compute/Makefile.am
---- a/fence/agents/compute/Makefile.am 2017-09-27 15:01:34.844643650 +0200
-+++ b/fence/agents/compute/Makefile.am 2017-09-27 15:57:50.963839738 +0200
-@@ -1,14 +1,14 @@
- MAINTAINERCLEANFILES = Makefile.in
-
--TARGET = fence_compute
-+TARGET = fence_compute fence_evacuate
-
--SRC = $(TARGET).py
-+SRC = $(TARGET:=.py)
-
- EXTRA_DIST = $(SRC)
-
- sbin_SCRIPTS = $(TARGET)
-
--man_MANS = $(TARGET).8
-+man_MANS = $(TARGET:=.8)
-
- FENCE_TEST_ARGS = -l test -p test -n 1
-
-diff -uNr a/tests/data/metadata/fence_evacuate.xml b/tests/data/metadata/fence_evacuate.xml
---- a/tests/data/metadata/fence_evacuate.xml 1970-01-01 01:00:00.000000000 +0100
-+++ b/tests/data/metadata/fence_evacuate.xml 2017-09-27 15:28:10.978063549 +0200
-@@ -0,0 +1,163 @@
-+
-+
-+Used to reschedule flagged instances
-+
-+
-+
-+
-+
-+ Keystone Admin Tenant
-+
-+
-+
-+
-+ Keystone Admin Auth URL
-+
-+
-+
-+
-+ Physical plug number, name of virtual machine or UUID
-+
-+
-+
-+
-+ Script to retrieve password
-+
-+
-+
-+
-+ Region Name
-+
-+
-+
-+
-+ Login password or passphrase
-+
-+
-+
-+
-+ Nova Endpoint type
-+
-+
-+
-+
-+ Fencing Action
-+
-+
-+
-+
-+ Login Name
-+
-+
-+
-+
-+ Physical plug number, name of virtual machine or UUID
-+
-+
-+
-+
-+ Login Name
-+
-+
-+
-+
-+ Login password or passphrase
-+
-+
-+
-+
-+ Script to retrieve password
-+
-+
-+
-+
-+ Allow Insecure TLS Requests
-+
-+
-+
-+
-+ DNS domain in which hosts live
-+
-+
-+
-+
-+ Allow instances to be evacuated
-+
-+
-+
-+
-+ Disable functionality for dealing with shared storage
-+
-+
-+
-+
-+ Verbose mode
-+
-+
-+
-+
-+ Write debug information to given file
-+
-+
-+
-+
-+ Write debug information to given file
-+
-+
-+
-+
-+ Display version information and exit
-+
-+
-+
-+
-+ Display help and exit
-+
-+
-+
-+
-+ Separator for CSV created by operation list
-+
-+
-+
-+
-+ Wait X seconds after issuing ON/OFF
-+
-+
-+
-+
-+ Wait X seconds for cmd prompt after login
-+
-+
-+
-+
-+ Wait X seconds before fencing is started
-+
-+
-+
-+
-+ Test X seconds for status change after ON/OFF
-+
-+
-+
-+
-+ Wait X seconds for cmd prompt after issuing command
-+
-+
-+
-+
-+ Count of attempts to retry power on
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
diff --git a/SOURCES/bz1497241-fence_compute-fence_scsi-fix-parameters.patch b/SOURCES/bz1497241-fence_compute-fence_scsi-fix-parameters.patch
deleted file mode 100644
index 2d24cf4..0000000
--- a/SOURCES/bz1497241-fence_compute-fence_scsi-fix-parameters.patch
+++ /dev/null
@@ -1,18 +0,0 @@
---- a/fence/agents/lib/fencing.py.py 2017-09-19 12:29:04.158438532 +0200
-+++ b/fence/agents/lib/fencing.py.py 2017-09-19 12:48:22.252509114 +0200
-@@ -705,11 +705,12 @@
- continue
-
- (name, value) = (line + "=").split("=", 1)
-- name = name.replace("-", "_");
- value = value[:-1]
-
-- if name in mapping_longopt_names:
-- name = mapping_longopt_names[name]
-+ if name.replace("-", "_") in mapping_longopt_names:
-+ name = mapping_longopt_names[name.replace("-", "_")]
-+ elif name.replace("_", "-") in mapping_longopt_names:
-+ name = mapping_longopt_names[name.replace("_", "-")]
-
- if avail_opt.count(name) == 0 and name in ["nodename"]:
- continue
diff --git a/SOURCES/bz1519370-fence_ilo3-default-to-onoff.patch b/SOURCES/bz1519370-fence_ilo3-default-to-onoff.patch
new file mode 100644
index 0000000..e937747
--- /dev/null
+++ b/SOURCES/bz1519370-fence_ilo3-default-to-onoff.patch
@@ -0,0 +1,29 @@
+From 1dd5cba2056fb7f378f65cc9e109378a3e2e6032 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen
+Date: Fri, 5 Jan 2018 17:02:19 +0100
+Subject: [PATCH] fence_ilo3: default to onoff
+
+---
+ fence/agents/ipmilan/fence_ipmilan.py | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/fence/agents/ipmilan/fence_ipmilan.py b/fence/agents/ipmilan/fence_ipmilan.py
+index 86caf070..453d7365 100644
+--- a/fence/agents/ipmilan/fence_ipmilan.py
++++ b/fence/agents/ipmilan/fence_ipmilan.py
+@@ -166,14 +166,12 @@ def main():
+ all_opt["power_wait"]["default"] = 2
+ if os.path.basename(sys.argv[0]) == "fence_ilo3":
+ all_opt["power_wait"]["default"] = "4"
+- all_opt["method"]["default"] = "cycle"
+ all_opt["lanplus"]["default"] = "1"
+ elif os.path.basename(sys.argv[0]) == "fence_ilo4":
+ all_opt["lanplus"]["default"] = "1"
+
+ all_opt["ipport"]["default"] = "623"
+- if all_opt["method"]["default"] == "cycle":
+- all_opt["method"]["help"] = "-m, --method=[method] Method to fence (onoff|cycle) (Default: cycle)\n" \
++ all_opt["method"]["help"] = "-m, --method=[method] Method to fence (onoff|cycle) (Default: cycle)\n" \
+ "WARNING! This fence agent might report success before the node is powered off. " \
+ "You should use -m/method onoff if your fence device works correctly with that option."
+
diff --git a/SOURCES/bz1533170-fence_compute-fence_evacuate-add-support-for-keystone-v3-authentication.patch b/SOURCES/bz1533170-fence_compute-fence_evacuate-add-support-for-keystone-v3-authentication.patch
new file mode 100644
index 0000000..f653339
--- /dev/null
+++ b/SOURCES/bz1533170-fence_compute-fence_evacuate-add-support-for-keystone-v3-authentication.patch
@@ -0,0 +1,281 @@
+diff -uNr a/fence/agents/compute/fence_compute.py b/fence/agents/compute/fence_compute.py
+--- a/fence/agents/compute/fence_compute.py 2018-01-10 13:46:17.965287100 +0100
++++ b/fence/agents/compute/fence_compute.py 2018-01-11 10:01:12.288043619 +0100
+@@ -11,12 +11,6 @@
+ from fencing import *
+ from fencing import fail_usage, is_executable, run_command, run_delay
+
+-#BEGIN_VERSION_GENERATION
+-RELEASE_VERSION="4.0.11"
+-BUILD_DATE="(built Wed Nov 12 06:33:38 EST 2014)"
+-REDHAT_COPYRIGHT="Copyright (C) Red Hat, Inc. 2004-2010 All rights reserved."
+-#END_VERSION_GENERATION
+-
+ override_status = ""
+
+ EVACUABLE_TAG = "evacuable"
+@@ -264,13 +258,37 @@
+ except ImportError:
+ fail_usage("Nova not found or not accessible")
+
+- versions = [ "2.11", "2" ]
+- for version in versions:
+- clientargs = inspect.getargspec(client.Client).varargs
++ from keystoneauth1 import loading
++ from keystoneauth1 import session
++ from keystoneclient import discover
++
++ # Prefer the oldest and strip the leading 'v'
++ keystone_versions = discover.available_versions(options["--auth-url"])
++ keystone_version = keystone_versions[0]['id'][1:]
++ kwargs = dict(
++ auth_url=options["--auth-url"],
++ username=options["--username"],
++ password=options["--password"]
++ )
++
++ if discover.version_match("2", keystone_version):
++ kwargs["tenant_name"] = options["--tenant-name"]
++
++ elif discover.version_match("3", keystone_version):
++ kwargs["project_name"] = options["--tenant-name"]
++ kwargs["user_domain_name"] = options["--user-domain"]
++ kwargs["project_domain_name"] = options["--project-domain"]
++
++ loader = loading.get_plugin_loader('password')
++ keystone_auth = loader.load_from_options(**kwargs)
++ keystone_session = session.Session(auth=keystone_auth, verify=(not options["--insecure"]))
+
++ nova_versions = [ "2.11", "2" ]
++ for version in nova_versions:
++ clientargs = inspect.getargspec(client.Client).varargs
+ # Some versions of Openstack prior to Ocata only
+ # supported positional arguments for username,
+- # password and tenant.
++ # password, and tenant.
+ #
+ # Versions since Ocata only support named arguments.
+ #
+@@ -285,25 +303,22 @@
+ # varargs=None,
+ # keywords='kwargs', defaults=(None, None, None, None))
+ nova = client.Client(version,
+- options["--username"],
+- options["--password"],
+- options["--tenant-name"],
+- options["--auth-url"],
++ None, # User
++ None, # Password
++ None, # Tenant
++ None, # Auth URL
+ insecure=options["--insecure"],
+ region_name=options["--region-name"],
+ endpoint_type=options["--endpoint-type"],
++ session=keystone_session, auth=keystone_auth,
+ http_log_debug=options.has_key("--verbose"))
+ else:
+ # OSP >= 11
+ # ArgSpec(args=['version'], varargs='args', keywords='kwargs', defaults=None)
+ nova = client.Client(version,
+- username=options["--username"],
+- password=options["--password"],
+- tenant_name=options["--tenant-name"],
+- auth_url=options["--auth-url"],
+- insecure=options["--insecure"],
+ region_name=options["--region-name"],
+ endpoint_type=options["--endpoint-type"],
++ session=keystone_session, auth=keystone_auth,
+ http_log_debug=options.has_key("--verbose"))
+
+ try:
+@@ -316,7 +331,7 @@
+ except Exception as e:
+ logging.warning("Nova connection failed. %s: %s" % (e.__class__.__name__, e))
+
+- logging.warning("Couldn't obtain a supported connection to nova, tried: %s\n" % repr(versions))
++ logging.warning("Couldn't obtain a supported connection to nova, tried: %s\n" % repr(nova_versions))
+ return None
+
+ def define_new_opts():
+@@ -332,12 +347,30 @@
+ all_opt["tenant_name"] = {
+ "getopt" : "t:",
+ "longopt" : "tenant-name",
+- "help" : "-t, --tenant-name=[tenant] Keystone Admin Tenant",
++ "help" : "-t, --tenant-name=[name] Keystone v2 Tenant or v3 Project Name",
+ "required" : "0",
+- "shortdesc" : "Keystone Admin Tenant",
++ "shortdesc" : "Keystone Admin Tenant or v3 Project",
+ "default" : "",
+ "order": 1,
+ }
++ all_opt["user-domain"] = {
++ "getopt" : "u:",
++ "longopt" : "user-domain",
++ "help" : "-u, --user-domain=[name] Keystone v3 User Domain",
++ "required" : "0",
++ "shortdesc" : "Keystone v3 User Domain",
++ "default" : "Default",
++ "order": 2,
++ }
++ all_opt["project-domain"] = {
++ "getopt" : "P:",
++ "longopt" : "project-domain",
++ "help" : "-d, --project-domain=[name] Keystone v3 Project Domain",
++ "required" : "0",
++ "shortdesc" : "Keystone v3 Project Domain",
++ "default" : "Default",
++ "order": 2,
++ }
+ all_opt["auth_url"] = {
+ "getopt" : "k:",
+ "longopt" : "auth-url",
+@@ -365,7 +398,7 @@
+ "default" : "False",
+ "order": 2,
+ }
+- all_opt["domain"] = {
++ all_opt["compute-domain"] = {
+ "getopt" : "d:",
+ "longopt" : "domain",
+ "help" : "-d, --domain=[string] DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN",
+@@ -418,8 +451,8 @@
+ atexit.register(atexit_handler)
+
+ device_opt = ["login", "passwd", "tenant_name", "auth_url", "fabric_fencing",
+- "no_login", "no_password", "port", "domain", "no_shared_storage", "endpoint_type",
+- "record_only", "instance_filtering", "insecure", "region_name"]
++ "no_login", "no_password", "port", "compute-domain", "project-domain", "user-domain",
++ "no_shared_storage", "endpoint_type", "record_only", "instance_filtering", "insecure", "region_name"]
+ define_new_opts()
+ all_opt["shell_timeout"]["default"] = "180"
+
+diff -uNr a/fence/agents/compute/fence_evacuate.py b/fence/agents/compute/fence_evacuate.py
+--- a/fence/agents/compute/fence_evacuate.py 2018-01-10 13:46:17.966287090 +0100
++++ b/fence/agents/compute/fence_evacuate.py 2018-01-10 13:48:19.375158060 +0100
+@@ -191,13 +191,38 @@
+ except ImportError:
+ fail_usage("Nova not found or not accessible")
+
++ from keystoneauth1 import loading
++ from keystoneauth1 import session
++ from keystoneclient import discover
++
++ # Prefer the oldest and strip the leading 'v'
++ keystone_versions = discover.available_versions(options["--auth-url"])
++ keystone_version = keystone_versions[0]['id'][1:]
++ kwargs = dict(
++ auth_url=options["--auth-url"],
++ username=options["--username"],
++ password=options["--password"]
++ )
++
++ if discover.version_match("2", keystone_version):
++ kwargs["tenant_name"] = options["--tenant-name"]
++
++ elif discover.version_match("3", keystone_version):
++ kwargs["project_name"] = options["--tenant-name"]
++ kwargs["user_domain_name"] = options["--user-domain"]
++ kwargs["project_domain_name"] = options["--project-domain"]
++
++ loader = loading.get_plugin_loader('password')
++ keystone_auth = loader.load_from_options(**kwargs)
++ keystone_session = session.Session(auth=keystone_auth, verify=(not options["--insecure"]))
++
+ versions = [ "2.11", "2" ]
+ for version in versions:
+ clientargs = inspect.getargspec(client.Client).varargs
+
+ # Some versions of Openstack prior to Ocata only
+ # supported positional arguments for username,
+- # password and tenant.
++ # password, and tenant.
+ #
+ # Versions since Ocata only support named arguments.
+ #
+@@ -212,25 +237,22 @@
+ # varargs=None,
+ # keywords='kwargs', defaults=(None, None, None, None))
+ nova = client.Client(version,
+- options["--username"],
+- options["--password"],
+- options["--tenant-name"],
+- options["--auth-url"],
++ None, # User
++ None, # Password
++ None, # Tenant
++ None, # Auth URL
+ insecure=options["--insecure"],
+ region_name=options["--region-name"],
+ endpoint_type=options["--endpoint-type"],
++ session=keystone_session, auth=keystone_auth,
+ http_log_debug=options.has_key("--verbose"))
+ else:
+ # OSP >= 11
+ # ArgSpec(args=['version'], varargs='args', keywords='kwargs', defaults=None)
+ nova = client.Client(version,
+- username=options["--username"],
+- password=options["--password"],
+- tenant_name=options["--tenant-name"],
+- auth_url=options["--auth-url"],
+- insecure=options["--insecure"],
+ region_name=options["--region-name"],
+ endpoint_type=options["--endpoint-type"],
++ session=keystone_session, auth=keystone_auth,
+ http_log_debug=options.has_key("--verbose"))
+
+ try:
+@@ -259,12 +281,30 @@
+ all_opt["tenant_name"] = {
+ "getopt" : "t:",
+ "longopt" : "tenant-name",
+- "help" : "-t, --tenant-name=[tenant] Keystone Admin Tenant",
++ "help" : "-t, --tenant-name=[name] Keystone v2 Tenant or v3 Project Name",
+ "required" : "0",
+- "shortdesc" : "Keystone Admin Tenant",
++ "shortdesc" : "Keystone Admin Tenant or v3 Project",
+ "default" : "",
+ "order": 1,
+ }
++ all_opt["user-domain"] = {
++ "getopt" : "u:",
++ "longopt" : "user-domain",
++ "help" : "-u, --user-domain=[name] Keystone v3 User Domain",
++ "required" : "0",
++ "shortdesc" : "Keystone v3 User Domain",
++ "default" : "Default",
++ "order": 2,
++ }
++ all_opt["project-domain"] = {
++ "getopt" : "P:",
++ "longopt" : "project-domain",
++ "help" : "-d, --project-domain=[name] Keystone v3 Project Domain",
++ "required" : "0",
++ "shortdesc" : "Keystone v3 Project Domain",
++ "default" : "Default",
++ "order": 2,
++ }
+ all_opt["auth_url"] = {
+ "getopt" : "k:",
+ "longopt" : "auth-url",
+@@ -292,7 +332,7 @@
+ "default" : "False",
+ "order": 2,
+ }
+- all_opt["domain"] = {
++ all_opt["compute-domain"] = {
+ "getopt" : "d:",
+ "longopt" : "domain",
+ "help" : "-d, --domain=[string] DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN",
+@@ -323,8 +363,9 @@
+ atexit.register(atexit_handler)
+
+ device_opt = ["login", "passwd", "tenant_name", "auth_url",
+- "no_login", "no_password", "port", "domain", "no_shared_storage", "endpoint_type",
+- "instance_filtering", "insecure", "region_name"]
++ "no_login", "no_password", "port", "compute-domain", "project-domain",
++ "user-domain", "no_shared_storage", "endpoint_type",
++ "instance_filtering", "insecure", "region_name"]
+ define_new_opts()
+ all_opt["shell_timeout"]["default"] = "180"
+
diff --git a/SOURCES/bz1535415-fence_compute-fence_evacuate-add-support-for-keystone-v3-authentication.patch b/SOURCES/bz1535415-fence_compute-fence_evacuate-add-support-for-keystone-v3-authentication.patch
deleted file mode 100644
index f653339..0000000
--- a/SOURCES/bz1535415-fence_compute-fence_evacuate-add-support-for-keystone-v3-authentication.patch
+++ /dev/null
@@ -1,281 +0,0 @@
-diff -uNr a/fence/agents/compute/fence_compute.py b/fence/agents/compute/fence_compute.py
---- a/fence/agents/compute/fence_compute.py 2018-01-10 13:46:17.965287100 +0100
-+++ b/fence/agents/compute/fence_compute.py 2018-01-11 10:01:12.288043619 +0100
-@@ -11,12 +11,6 @@
- from fencing import *
- from fencing import fail_usage, is_executable, run_command, run_delay
-
--#BEGIN_VERSION_GENERATION
--RELEASE_VERSION="4.0.11"
--BUILD_DATE="(built Wed Nov 12 06:33:38 EST 2014)"
--REDHAT_COPYRIGHT="Copyright (C) Red Hat, Inc. 2004-2010 All rights reserved."
--#END_VERSION_GENERATION
--
- override_status = ""
-
- EVACUABLE_TAG = "evacuable"
-@@ -264,13 +258,37 @@
- except ImportError:
- fail_usage("Nova not found or not accessible")
-
-- versions = [ "2.11", "2" ]
-- for version in versions:
-- clientargs = inspect.getargspec(client.Client).varargs
-+ from keystoneauth1 import loading
-+ from keystoneauth1 import session
-+ from keystoneclient import discover
-+
-+ # Prefer the oldest and strip the leading 'v'
-+ keystone_versions = discover.available_versions(options["--auth-url"])
-+ keystone_version = keystone_versions[0]['id'][1:]
-+ kwargs = dict(
-+ auth_url=options["--auth-url"],
-+ username=options["--username"],
-+ password=options["--password"]
-+ )
-+
-+ if discover.version_match("2", keystone_version):
-+ kwargs["tenant_name"] = options["--tenant-name"]
-+
-+ elif discover.version_match("3", keystone_version):
-+ kwargs["project_name"] = options["--tenant-name"]
-+ kwargs["user_domain_name"] = options["--user-domain"]
-+ kwargs["project_domain_name"] = options["--project-domain"]
-+
-+ loader = loading.get_plugin_loader('password')
-+ keystone_auth = loader.load_from_options(**kwargs)
-+ keystone_session = session.Session(auth=keystone_auth, verify=(not options["--insecure"]))
-
-+ nova_versions = [ "2.11", "2" ]
-+ for version in nova_versions:
-+ clientargs = inspect.getargspec(client.Client).varargs
- # Some versions of Openstack prior to Ocata only
- # supported positional arguments for username,
-- # password and tenant.
-+ # password, and tenant.
- #
- # Versions since Ocata only support named arguments.
- #
-@@ -285,25 +303,22 @@
- # varargs=None,
- # keywords='kwargs', defaults=(None, None, None, None))
- nova = client.Client(version,
-- options["--username"],
-- options["--password"],
-- options["--tenant-name"],
-- options["--auth-url"],
-+ None, # User
-+ None, # Password
-+ None, # Tenant
-+ None, # Auth URL
- insecure=options["--insecure"],
- region_name=options["--region-name"],
- endpoint_type=options["--endpoint-type"],
-+ session=keystone_session, auth=keystone_auth,
- http_log_debug=options.has_key("--verbose"))
- else:
- # OSP >= 11
- # ArgSpec(args=['version'], varargs='args', keywords='kwargs', defaults=None)
- nova = client.Client(version,
-- username=options["--username"],
-- password=options["--password"],
-- tenant_name=options["--tenant-name"],
-- auth_url=options["--auth-url"],
-- insecure=options["--insecure"],
- region_name=options["--region-name"],
- endpoint_type=options["--endpoint-type"],
-+ session=keystone_session, auth=keystone_auth,
- http_log_debug=options.has_key("--verbose"))
-
- try:
-@@ -316,7 +331,7 @@
- except Exception as e:
- logging.warning("Nova connection failed. %s: %s" % (e.__class__.__name__, e))
-
-- logging.warning("Couldn't obtain a supported connection to nova, tried: %s\n" % repr(versions))
-+ logging.warning("Couldn't obtain a supported connection to nova, tried: %s\n" % repr(nova_versions))
- return None
-
- def define_new_opts():
-@@ -332,12 +347,30 @@
- all_opt["tenant_name"] = {
- "getopt" : "t:",
- "longopt" : "tenant-name",
-- "help" : "-t, --tenant-name=[tenant] Keystone Admin Tenant",
-+ "help" : "-t, --tenant-name=[name] Keystone v2 Tenant or v3 Project Name",
- "required" : "0",
-- "shortdesc" : "Keystone Admin Tenant",
-+ "shortdesc" : "Keystone Admin Tenant or v3 Project",
- "default" : "",
- "order": 1,
- }
-+ all_opt["user-domain"] = {
-+ "getopt" : "u:",
-+ "longopt" : "user-domain",
-+ "help" : "-u, --user-domain=[name] Keystone v3 User Domain",
-+ "required" : "0",
-+ "shortdesc" : "Keystone v3 User Domain",
-+ "default" : "Default",
-+ "order": 2,
-+ }
-+ all_opt["project-domain"] = {
-+ "getopt" : "P:",
-+ "longopt" : "project-domain",
-+ "help" : "-d, --project-domain=[name] Keystone v3 Project Domain",
-+ "required" : "0",
-+ "shortdesc" : "Keystone v3 Project Domain",
-+ "default" : "Default",
-+ "order": 2,
-+ }
- all_opt["auth_url"] = {
- "getopt" : "k:",
- "longopt" : "auth-url",
-@@ -365,7 +398,7 @@
- "default" : "False",
- "order": 2,
- }
-- all_opt["domain"] = {
-+ all_opt["compute-domain"] = {
- "getopt" : "d:",
- "longopt" : "domain",
- "help" : "-d, --domain=[string] DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN",
-@@ -418,8 +451,8 @@
- atexit.register(atexit_handler)
-
- device_opt = ["login", "passwd", "tenant_name", "auth_url", "fabric_fencing",
-- "no_login", "no_password", "port", "domain", "no_shared_storage", "endpoint_type",
-- "record_only", "instance_filtering", "insecure", "region_name"]
-+ "no_login", "no_password", "port", "compute-domain", "project-domain", "user-domain",
-+ "no_shared_storage", "endpoint_type", "record_only", "instance_filtering", "insecure", "region_name"]
- define_new_opts()
- all_opt["shell_timeout"]["default"] = "180"
-
-diff -uNr a/fence/agents/compute/fence_evacuate.py b/fence/agents/compute/fence_evacuate.py
---- a/fence/agents/compute/fence_evacuate.py 2018-01-10 13:46:17.966287090 +0100
-+++ b/fence/agents/compute/fence_evacuate.py 2018-01-10 13:48:19.375158060 +0100
-@@ -191,13 +191,38 @@
- except ImportError:
- fail_usage("Nova not found or not accessible")
-
-+ from keystoneauth1 import loading
-+ from keystoneauth1 import session
-+ from keystoneclient import discover
-+
-+ # Prefer the oldest and strip the leading 'v'
-+ keystone_versions = discover.available_versions(options["--auth-url"])
-+ keystone_version = keystone_versions[0]['id'][1:]
-+ kwargs = dict(
-+ auth_url=options["--auth-url"],
-+ username=options["--username"],
-+ password=options["--password"]
-+ )
-+
-+ if discover.version_match("2", keystone_version):
-+ kwargs["tenant_name"] = options["--tenant-name"]
-+
-+ elif discover.version_match("3", keystone_version):
-+ kwargs["project_name"] = options["--tenant-name"]
-+ kwargs["user_domain_name"] = options["--user-domain"]
-+ kwargs["project_domain_name"] = options["--project-domain"]
-+
-+ loader = loading.get_plugin_loader('password')
-+ keystone_auth = loader.load_from_options(**kwargs)
-+ keystone_session = session.Session(auth=keystone_auth, verify=(not options["--insecure"]))
-+
- versions = [ "2.11", "2" ]
- for version in versions:
- clientargs = inspect.getargspec(client.Client).varargs
-
- # Some versions of Openstack prior to Ocata only
- # supported positional arguments for username,
-- # password and tenant.
-+ # password, and tenant.
- #
- # Versions since Ocata only support named arguments.
- #
-@@ -212,25 +237,22 @@
- # varargs=None,
- # keywords='kwargs', defaults=(None, None, None, None))
- nova = client.Client(version,
-- options["--username"],
-- options["--password"],
-- options["--tenant-name"],
-- options["--auth-url"],
-+ None, # User
-+ None, # Password
-+ None, # Tenant
-+ None, # Auth URL
- insecure=options["--insecure"],
- region_name=options["--region-name"],
- endpoint_type=options["--endpoint-type"],
-+ session=keystone_session, auth=keystone_auth,
- http_log_debug=options.has_key("--verbose"))
- else:
- # OSP >= 11
- # ArgSpec(args=['version'], varargs='args', keywords='kwargs', defaults=None)
- nova = client.Client(version,
-- username=options["--username"],
-- password=options["--password"],
-- tenant_name=options["--tenant-name"],
-- auth_url=options["--auth-url"],
-- insecure=options["--insecure"],
- region_name=options["--region-name"],
- endpoint_type=options["--endpoint-type"],
-+ session=keystone_session, auth=keystone_auth,
- http_log_debug=options.has_key("--verbose"))
-
- try:
-@@ -259,12 +281,30 @@
- all_opt["tenant_name"] = {
- "getopt" : "t:",
- "longopt" : "tenant-name",
-- "help" : "-t, --tenant-name=[tenant] Keystone Admin Tenant",
-+ "help" : "-t, --tenant-name=[name] Keystone v2 Tenant or v3 Project Name",
- "required" : "0",
-- "shortdesc" : "Keystone Admin Tenant",
-+ "shortdesc" : "Keystone Admin Tenant or v3 Project",
- "default" : "",
- "order": 1,
- }
-+ all_opt["user-domain"] = {
-+ "getopt" : "u:",
-+ "longopt" : "user-domain",
-+ "help" : "-u, --user-domain=[name] Keystone v3 User Domain",
-+ "required" : "0",
-+ "shortdesc" : "Keystone v3 User Domain",
-+ "default" : "Default",
-+ "order": 2,
-+ }
-+ all_opt["project-domain"] = {
-+ "getopt" : "P:",
-+ "longopt" : "project-domain",
-+ "help" : "-d, --project-domain=[name] Keystone v3 Project Domain",
-+ "required" : "0",
-+ "shortdesc" : "Keystone v3 Project Domain",
-+ "default" : "Default",
-+ "order": 2,
-+ }
- all_opt["auth_url"] = {
- "getopt" : "k:",
- "longopt" : "auth-url",
-@@ -292,7 +332,7 @@
- "default" : "False",
- "order": 2,
- }
-- all_opt["domain"] = {
-+ all_opt["compute-domain"] = {
- "getopt" : "d:",
- "longopt" : "domain",
- "help" : "-d, --domain=[string] DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN",
-@@ -323,8 +363,9 @@
- atexit.register(atexit_handler)
-
- device_opt = ["login", "passwd", "tenant_name", "auth_url",
-- "no_login", "no_password", "port", "domain", "no_shared_storage", "endpoint_type",
-- "instance_filtering", "insecure", "region_name"]
-+ "no_login", "no_password", "port", "compute-domain", "project-domain",
-+ "user-domain", "no_shared_storage", "endpoint_type",
-+ "instance_filtering", "insecure", "region_name"]
- define_new_opts()
- all_opt["shell_timeout"]["default"] = "180"
-
diff --git a/SPECS/fence-agents.spec b/SPECS/fence-agents.spec
index d489267..df6ef54 100644
--- a/SPECS/fence-agents.spec
+++ b/SPECS/fence-agents.spec
@@ -16,10 +16,10 @@
Name: fence-agents
Summary: Fence Agents for Red Hat Cluster
Version: 4.0.11
-Release: 66%{?alphatag:.%{alphatag}}%{?dist}.4
+Release: 86%{?alphatag:.%{alphatag}}%{?dist}
License: GPLv2+ and LGPLv2+
Group: System Environment/Base
-URL: http://sourceware.org/cluster/wiki/
+URL: https://github.com/ClusterLabs/fence-agents
Source0: https://fedorahosted.org/releases/f/e/fence-agents/%{name}-%{version}.tar.xz
Patch0: bz1072564-1-add_ssl_secure_and_ssl_insecure.patch
Patch1: bz1072564-2-add_ssl_secure_and_ssl_insecure.patch
@@ -108,7 +108,7 @@ Patch83: bz1280139-fence_scsi-fix-persistentl-typo-in-short-desc.patch
Patch84: bz1280151-1-fence_scsi-remove-dev-dm-X-reference.patch
Patch85: bz1280151-2-fence_scsi-remove-dev-dm-X-reference.patch
Patch86: bz1287059-1-fence_rhevm-add-filter-header.patch
-#Patch87: bz1296201-fence_amt_ws-new-fence-agent.patch
+Patch87: bz1296201-fence_amt_ws-new-fence-agent.patch
Patch88: bz1313561-fence_compute-locate-all-instances-to-be-evacuated.patch
Patch89: bz1285523-1-fence_compute-taggable-instance-support.patch
Patch90: bz1285523-2-fence_compute-taggable-instance-support.patch
@@ -136,22 +136,36 @@ Patch111: bz1377972-2-CI-dont-test-paths-in-metadata.patch
Patch112: bz1426693-1-fence_compute-project_id-to-project_name.patch
Patch113: bz1426693-2-fence_compute-project_id-to-project_name.patch
Patch114: bz1459199-fence_vmware_soap-fix-for-selfsigned-certificate.patch
-Patch115: bz1479851-fence_compute-fence_scsi-fix-parameters.patch
-Patch116: bz1497072-fence_compute-fence_evacuate-Instance-HA-OSP12.patch
-Patch117: bz1497241-fence_compute-fence_scsi-fix-parameters.patch
-Patch118: bz1535415-fence_compute-fence_evacuate-add-support-for-keystone-v3-authentication.patch
+Patch115: bz1473860-1-fence_compute-fence_scsi-fix-parameters.patch
+Patch116: bz1473860-2-fence_compute-fence_scsi-fix-parameters.patch
+Patch117: bz1461854-remove-list-when-not-supported.patch
+Patch118: bz1451776-1-fence_aws-new-fence-agent.patch
+Patch119: bz1496390-fence_compute-fence_evacuate-Instance-HA-OSP12.patch
+Patch120: bz1490475-fence_ilo_ssh-fix-hard-reset.patch
+Patch121: bz1455383-fence_scsi-FIPS-support.patch
+Patch122: bz1449183-fence_ipmilan-hexadecimal-key-auth.patch
+Patch123: bz1476009-fence_azure_arm-new-fence-agent.patch
+Patch124: bz1396050-fence_vmware_rest-new-fence-agent.patch
+Patch125: bz1476401-1-adhere-no_status-in-fence_action.patch
+Patch126: bz1476401-2-enhance-run_delay.patch
+Patch127: bz1476401-3-add-sync_set_power-to-fence_action.patch
+Patch128: bz1476401-4-add-fence_heuristics_ping.patch
+Patch129: bz1465436-fence_ipmilan-fix-default-method-inconsistency.patch
+Patch130: bz1533170-fence_compute-fence_evacuate-add-support-for-keystone-v3-authentication.patch
+Patch131: bz1519370-fence_ilo3-default-to-onoff.patch
+Patch132: bz1451776-2-fence_aws-bundled-python-botocore.patch
%if 0%{?rhel}
-%global supportedagents apc apc_snmp bladecenter brocade cisco_mds cisco_ucs compute drac5 eaton_snmp emerson eps hpblade ibmblade ifmib ilo ilo_moonshot ilo_mp ilo_ssh intelmodular ipdu ipmilan mpath kdump rhevm rsa rsb sbd scsi vmware_soap wti
-%global allfenceagents fence-agents-apc fence-agents-apc-snmp fence-agents-bladecenter fence-agents-brocade fence-agents-cisco-mds fence-agents-cisco-ucs fence-agents-compute fence-agents-drac5 fence-agents-eaton-snmp fence-agents-emerson fence-agents-eps fence-agents-hpblade fence-agents-ibmblade fence-agents-ifmib fence-agents-ilo2 fence-agents-ilo-moonshot fence-agents-ilo-mp fence-agents-ilo-ssh fence-agents-intelmodular fence-agents-ipdu fence-agents-ipmilan fence-agents-mpath fence-agents-kdump fence-agents-rhevm fence-agents-rsa fence-agents-rsb fence-agents-sbd fence-agents-scsi fence-agents-vmware-soap fence-agents-wti
+%global supportedagents amt_ws apc apc_snmp aws azure_arm bladecenter brocade cisco_mds cisco_ucs compute drac5 eaton_snmp emerson eps hpblade ibmblade ifmib ilo ilo_moonshot ilo_mp ilo_ssh intelmodular ipdu ipmilan mpath kdump rhevm rsa rsb sbd scsi vmware_rest vmware_soap wti
+%global allfenceagents fence-agents-amt-ws fence-agents-apc fence-agents-apc-snmp fence-agents-bladecenter fence-agents-brocade fence-agents-cisco-mds fence-agents-cisco-ucs fence-agents-compute fence-agents-drac5 fence-agents-eaton-snmp fence-agents-emerson fence-agents-eps fence-agents-heuristics-ping fence-agents-hpblade fence-agents-ibmblade fence-agents-ifmib fence-agents-ilo2 fence-agents-ilo-moonshot fence-agents-ilo-mp fence-agents-ilo-ssh fence-agents-intelmodular fence-agents-ipdu fence-agents-ipmilan fence-agents-mpath fence-agents-kdump fence-agents-rhevm fence-agents-rsa fence-agents-rsb fence-agents-sbd fence-agents-scsi fence-agents-vmware-rest fence-agents-vmware-soap fence-agents-wti
%ifarch ppc64le
-%global testagents virsh lpar
+%global testagents virsh lpar heuristics_ping
%endif
%ifarch s390x
-%global testagents virsh zvm
+%global testagents virsh zvm heuristics_ping
%endif
%ifnarch ppc64le s390x
-%global testagents virsh
+%global testagents virsh heuristics_ping
%endif
%endif
@@ -166,6 +180,7 @@ BuildRequires: libxslt
BuildRequires: python pexpect python-pycurl python-suds python-requests openwsman-python
BuildRequires: net-snmp-utils
BuildRequires: autoconf automake libtool
+BuildRequires: iputils
%prep
%setup -q -n %{name}-%{version}
@@ -256,7 +271,7 @@ BuildRequires: autoconf automake libtool
%patch84 -p1 -b .bz1280151-1
%patch85 -p1 -F1 -b .bz1280151-2
%patch86 -p1 -b .bz1287059-1
-#%patch87 -p1 -b .bz1296201
+%patch87 -p1 -b .bz1296201
%patch88 -p1 -b .bz1313561
%patch89 -p1 -b .bz1285523-1
%patch90 -p1 -b .bz1285523-2
@@ -284,10 +299,24 @@ BuildRequires: autoconf automake libtool
%patch112 -p1 -b .bz1426693-1
%patch113 -p1 -b .bz1426693-2
%patch114 -p1 -b .bz1459199
-%patch115 -p1 -b .bz1479851
-%patch116 -p1 -b .bz1497072
-%patch117 -p1 -b .bz1497241
-%patch118 -p1 -b .bz1535415
+%patch115 -p1 -b .bz1473860-1
+%patch116 -p1 -b .bz1473860-2
+%patch117 -p1 -b .bz1461854
+%patch118 -p1 -b .bz1451776-1
+%patch119 -p1 -b .bz1496390
+%patch120 -p1 -F1 -b .bz1490475
+%patch121 -p1 -b .bz1455383
+%patch122 -p1 -b .bz1449183
+%patch123 -p1 -b .bz1476009
+%patch124 -p1 -b .bz1396050
+%patch125 -p1 -b .bz1476401-1
+%patch126 -p1 -b .bz1476401-2
+%patch127 -p1 -b .bz1476401-3
+%patch128 -p1 -b .bz1476401-4
+%patch129 -p1 -b .bz1465436
+%patch130 -p1 -b .bz1533170
+%patch131 -p1 -b .bz1519370
+%patch132 -p1 -b .bz1451776-2
%build
./autogen.sh
@@ -315,7 +344,7 @@ power management for several devices.
License: GPLv2+ and LGPLv2+
Group: System Environment/Base
Summary: Common utilities for fence agents
-Requires: python pexpect python-pycurl policycoreutils-python
+Requires: python pexpect python-pycurl policycoreutils-python selinux-policy-targeted
%description common
Red Hat Fence Agents is a collection of scripts and libraries to handle remote power management for various devices.
%post common
@@ -328,20 +357,21 @@ Red Hat Fence Agents is a collection of scripts and libraries to handle remote p
%{_datadir}/cluster
%{_datadir}/fence/fencing.py
%{_datadir}/fence/fencing_snmp.py
+%exclude %{_datadir}/cluster/fence_scsi_check*
%package all
License: GPLv2+, LGPLv2+ and ASL 2.0
Group: System Environment/Base
Summary: Fence agents
-Requires: %{allfenceagents}
+Requires: %(echo "%{allfenceagents}" | sed "s/\( \|$\)/ >= %{version}-%{release}\1/g")
%ifarch i686 x86_64
Requires: fence-virt
%endif
%ifarch ppc64le
-Requires: fence-agents-lpar
+Requires: fence-agents-lpar >= %{version}-%{release}
%endif
%ifarch s390x
-Requires: fence-agents-zvm
+Requires: fence-agents-zvm >= %{version}-%{release}
%endif
Provides: fence-agents = %{version}-%{release}
Obsoletes: fence-agents < 3.1.13
@@ -365,6 +395,20 @@ Red Hat Fence Agents
%{_mandir}/man8/fence_alom.8*
%endif
+%package amt-ws
+License: ASL 2.0
+Group: System Environment/Base
+Summary: Fence agent for AMT (WS-Man) devices
+Requires: fence-agents-common >= %{version}-%{release}
+Requires: openwsman-python >= 2.6.3-1.git4391e5c.el7
+Obsoletes: fence-agents
+%description amt-ws
+The fence-agents-amt-ws package contains a fence agent for AMT (WS-Man) devices.
+%files amt-ws
+%defattr(-,root,root,-)
+%{_sbindir}/fence_amt_ws
+%{_mandir}/man8/fence_amt_ws.8*
+
%package apc
License: GPLv2+ and LGPLv2+
Group: System Environment/Base
@@ -393,6 +437,34 @@ The fence-agents-apc-snmp package contains a fence agent for APC devices that ar
%{_sbindir}/fence_apc_snmp
%{_mandir}/man8/fence_apc_snmp.8*
+%package aws
+License: GPLv2+ and LGPLv2+
+Group: System Environment/Base
+Summary: Fence agent for Amazon AWS
+Requires: fence-agents-common >= %{version}-%{release}
+Requires: python-boto3
+Obsoletes: fence-agents
+%description aws
+The fence-agents-aws package contains a fence agent for Amazon AWS instances.
+%files aws
+%defattr(-,root,root,-)
+%{_sbindir}/fence_aws
+%{_mandir}/man8/fence_aws.8*
+
+%package azure-arm
+License: GPLv2+ and LGPLv2+
+Group: System Environment/Base
+Summary: Fence agent for Azure Resource Manager
+Requires: fence-agents-common >= %{version}-%{release}
+Requires: python-azure-sdk
+Obsoletes: fence-agents
+%description azure-arm
+The fence-agents-azure-arm package contains a fence agent for Azure instances.
+%files azure-arm
+%defattr(-,root,root,-)
+%{_sbindir}/fence_azure_arm
+%{_mandir}/man8/fence_azure_arm.8*
+
%package bladecenter
License: GPLv2+ and LGPLv2+
Group: System Environment/Base
@@ -440,7 +512,6 @@ License: GPLv2+ and LGPLv2+
Group: System Environment/Base
Summary: Fence agent for Cisco UCS series
Requires: fence-agents-common >= %{version}-%{release}
-Requires: pycurl
Obsoletes: fence-agents
%description cisco-ucs
The fence-agents-cisco-ucs package contains a fence agent for Cisco UCS series devices that are accessed via the SNMP protocol.
@@ -519,6 +590,20 @@ The fence-agents-eps package contains a fence agent for ePowerSwitch 8M+ power s
%{_sbindir}/fence_eps
%{_mandir}/man8/fence_eps.8*
+%package heuristics-ping
+License: GPLv2+ and LGPLv2+
+Group: System Environment/Base
+Summary: Fence agent used to control other fence agents based on ping-heuristics
+Requires: fence-agents-common >= %{version}-%{release}
+Obsoletes: fence-agents
+%description heuristics-ping
+
+The fence-agents-heuristics-ping package contains fence agent used to control other fence agents based on ping-heuristics
+%files heuristics-ping
+%defattr(-,root,root,-)
+%{_sbindir}/fence_heuristics_ping
+%{_mandir}/man8/fence_heuristics_ping.8*
+
%package hpblade
License: GPLv2+ and LGPLv2+
Group: System Environment/Base
@@ -834,6 +919,19 @@ The fence-agents-virsh package contains a fence agent for virtual machines that
%{_sbindir}/fence_virsh
%{_mandir}/man8/fence_virsh.8*
+%package vmware-rest
+License: GPLv2+ and LGPLv2+
+Group: System Environment/Base
+Summary: Fence agent for VMWare with REST API
+Requires: fence-agents-common >= %{version}-%{release}
+Obsoletes: fence-agents
+%description vmware-rest
+The fence-agents-vmware-rest package contains a fence agent for VMWare with REST API
+%files vmware-rest
+%defattr(-,root,root,-)
+%{_sbindir}/fence_vmware_rest
+%{_mandir}/man8/fence_vmware_rest.8*
+
%package vmware-soap
License: GPLv2+ and LGPLv2+
Group: System Environment/Base
@@ -879,22 +977,77 @@ The fence-agents-zvm package contains a fence agent for z/VM hypervisors
%endif
%changelog
-* Wed Jan 17 2018 Oyvind Albrigtsen - 4.0.11-66.4
+* Wed Feb 7 2018 Oyvind Albrigtsen - 4.0.11-86
+- fence-agents-all: remove fence-agents-aws and fence-agents-azure-arm
+ dependencies
+ Resolves: rhbz#1476009
+
+* Tue Feb 6 2018 Oyvind Albrigtsen - 4.0.11-85
+- fence_aws: add python-boto3 dependency
+ Resolves: rhbz#1540700
+
+* Mon Jan 22 2018 Oyvind Albrigtsen - 4.0.11-84
+- fence_azure_arm: new fence agent
+ Resolves: rhbz#1476009
+
+* Thu Jan 11 2018 Oyvind Albrigtsen - 4.0.11-83
- fence_compute/fence_evacuate: add support for keystone v3 authentication
- Resolves: rhbz#1535415
+ Resolves: rhbz#1533170
+- fence_ilo3: default to onoff
+ Resolves: rhbz#1519370
-* Fri Sep 29 2017 Oyvind Albrigtsen - 4.0.11-66.3
-- fence_compute/fence_scsi: fix issue with some parameters
- (for ABI compatibility)
- Resolves: rhbz#1497241
+* Tue Nov 28 2017 Oyvind Albrigtsen - 4.0.11-82
+- fence_vmware_rest: new fence agent
+ Resolves: rhbz#1396050
-* Fri Sep 29 2017 Oyvind Albrigtsen - 4.0.11-66.2
-- fence_compute/fence_evacuate: changes to support Instance HA on OSP12
- Resolves: rhbz#1497072
+* Tue Nov 7 2017 Oyvind Albrigtsen - 4.0.11-81
+- common: add selinux-policy-targeted dependency
+ Resolves: rhbz#1509327
+
+* Fri Nov 3 2017 Oyvind Albrigtsen - 4.0.11-80
+- fence_ipmilan: fix default method inconsistency (help/man page)
+ Resolves: rhbz#1465436
+
+* Wed Nov 1 2017 Oyvind Albrigtsen - 4.0.11-78
+- fence_heuristics_ping: new fence agent
+ Resolves: rhbz#1476401
-* Thu Aug 10 2017 Oyvind Albrigtsen - 4.0.11-66.1
+* Thu Oct 26 2017 Oyvind Albrigtsen - 4.0.11-77
+- fence_ilo_ssh: fix "hard reset"
+ Resolves: rhbz#1490475
+
+* Wed Oct 25 2017 Oyvind Albrigtsen - 4.0.11-76
+- fence_ipmilan: add support for hexadecimal key authentication
+ Resolves: rhbz#1449183
+
+* Tue Oct 24 2017 Oyvind Albrigtsen - 4.0.11-75
+- fence_aws: new fence agent
+ Resolves: rhbz#1451776
+
+* Fri Oct 6 2017 Oyvind Albrigtsen - 4.0.11-72
+- fence_amt_ws: new fence agent
+ Resolves: rhbz#1296201
+
+* Fri Sep 29 2017 Oyvind Albrigtsen - 4.0.11-70
+- fence-agents-all: require agents to be the same version
+ Resolves: rhbz#1484128
+
+* Fri Sep 29 2017 Oyvind Albrigtsen - 4.0.11-69
+- fence_scsi: add FIPS support
+ Resolves: rhbz#1455383
+
+* Thu Sep 28 2017 Oyvind Albrigtsen - 4.0.11-68
- fence_compute/fence_scsi: fix issue with some parameters
- Resolves: rhbz#1479851
+ Resolves: rhbz#1473860
+- fence_compute/fence_evacuate: changes to support Instance HA on OSP12
+ Resolves: rhbz#1496390
+- fence-agents-common: remove fence_scsi_check-files that should only be in
+ the scsi subpackage
+ Resolves: rhbz#1484128
+
+* Wed Aug 2 2017 Oyvind Albrigtsen - 4.0.11-67
+- Remove "list" when not supported
+ Resolves: rhbz#1461854
* Fri Jun 16 2017 Marek Grac - 4.0.11-66
- Set SELinux booleans even when SELinux is disabled
@@ -1035,8 +1188,6 @@ The fence-agents-zvm package contains a fence agent for z/VM hypervisors
Resolves: rhbz#1280151
- fence_rhevm: add Filter header
Resolves: rhbz#1287059
-- fence_amt_ws: new fence agent
- Resolves: rhbz#1296201
- fence_compute: fix to locate all instances to be evacuated
Resolves: rhbz#1313561