|
|
08923a |
diff -uNr a/fence/agents/compute/fence_compute.py b/fence/agents/compute/fence_compute.py
|
|
|
08923a |
--- a/fence/agents/compute/fence_compute.py 2017-09-27 15:01:34.974642469 +0200
|
|
|
08923a |
+++ b/fence/agents/compute/fence_compute.py 2017-09-27 15:24:57.482819900 +0200
|
|
|
08923a |
@@ -18,173 +18,115 @@
|
|
|
08923a |
#END_VERSION_GENERATION
|
|
|
08923a |
|
|
|
08923a |
override_status = ""
|
|
|
08923a |
-nova = None
|
|
|
08923a |
|
|
|
08923a |
EVACUABLE_TAG = "evacuable"
|
|
|
08923a |
TRUE_TAGS = ['true']
|
|
|
08923a |
|
|
|
08923a |
-def get_power_status(_, options):
|
|
|
08923a |
- global override_status
|
|
|
08923a |
-
|
|
|
08923a |
- status = "unknown"
|
|
|
08923a |
- logging.debug("get action: " + options["--action"])
|
|
|
08923a |
+def get_power_status(connection, options):
|
|
|
08923a |
|
|
|
08923a |
if len(override_status):
|
|
|
08923a |
logging.debug("Pretending we're " + override_status)
|
|
|
08923a |
return override_status
|
|
|
08923a |
|
|
|
08923a |
- if nova:
|
|
|
08923a |
+ status = "unknown"
|
|
|
08923a |
+ logging.debug("get action: " + options["--action"])
|
|
|
08923a |
+
|
|
|
08923a |
+ if connection:
|
|
|
08923a |
try:
|
|
|
08923a |
- services = nova.services.list(host=options["--plug"])
|
|
|
08923a |
+ services = connection.services.list(host=options["--plug"], binary="nova-compute")
|
|
|
08923a |
for service in services:
|
|
|
08923a |
- logging.debug("Status of %s is %s" % (service.binary, service.state))
|
|
|
08923a |
- if service.binary == "nova-compute":
|
|
|
08923a |
- if service.state == "up":
|
|
|
08923a |
- status = "on"
|
|
|
08923a |
- elif service.state == "down":
|
|
|
08923a |
- status = "off"
|
|
|
08923a |
- else:
|
|
|
08923a |
- logging.debug("Unknown status detected from nova: " + service.state)
|
|
|
08923a |
- break
|
|
|
08923a |
+ logging.debug("Status of %s on %s is %s, %s" % (service.binary, options["--plug"], service.state, service.status))
|
|
|
08923a |
+ if service.state == "up" and service.status == "enabled":
|
|
|
08923a |
+ # Up and operational
|
|
|
08923a |
+ status = "on"
|
|
|
08923a |
+
|
|
|
08923a |
+ elif service.state == "down" and service.status == "disabled":
|
|
|
08923a |
+ # Down and fenced
|
|
|
08923a |
+ status = "off"
|
|
|
08923a |
+
|
|
|
08923a |
+ elif service.state == "down":
|
|
|
08923a |
+ # Down and requires fencing
|
|
|
08923a |
+ status = "failed"
|
|
|
08923a |
+
|
|
|
08923a |
+ elif service.state == "up":
|
|
|
08923a |
+ # Up and requires unfencing
|
|
|
08923a |
+ status = "running"
|
|
|
08923a |
+ else:
|
|
|
08923a |
+ logging.warning("Unknown status detected from nova for %s: %s, %s" % (options["--plug"], service.state, service.status))
|
|
|
08923a |
+ status = "%s %s" % (service.state, service.status)
|
|
|
08923a |
+ break
|
|
|
08923a |
except requests.exception.ConnectionError as err:
|
|
|
08923a |
logging.warning("Nova connection failed: " + str(err))
|
|
|
08923a |
+ logging.debug("Final status of %s is %s" % (options["--plug"], status))
|
|
|
08923a |
return status
|
|
|
08923a |
|
|
|
08923a |
-# NOTE(sbauza); We mimic the host-evacuate module since it's only a contrib
|
|
|
08923a |
-# module which is not stable
|
|
|
08923a |
-def _server_evacuate(server, on_shared_storage):
|
|
|
08923a |
- success = False
|
|
|
08923a |
- error_message = ""
|
|
|
08923a |
- try:
|
|
|
08923a |
- logging.debug("Resurrecting instance: %s" % server)
|
|
|
08923a |
- (response, dictionary) = nova.servers.evacuate(server=server, on_shared_storage=on_shared_storage)
|
|
|
08923a |
-
|
|
|
08923a |
- if response == None:
|
|
|
08923a |
- error_message = "No response while evacuating instance"
|
|
|
08923a |
- elif response.status_code == 200:
|
|
|
08923a |
- success = True
|
|
|
08923a |
- error_message = response.reason
|
|
|
08923a |
- else:
|
|
|
08923a |
- error_message = response.reason
|
|
|
08923a |
-
|
|
|
08923a |
- except Exception as e:
|
|
|
08923a |
- error_message = "Error while evacuating instance: %s" % e
|
|
|
08923a |
-
|
|
|
08923a |
- return {
|
|
|
08923a |
- "uuid": server,
|
|
|
08923a |
- "accepted": success,
|
|
|
08923a |
- "reason": error_message,
|
|
|
08923a |
- }
|
|
|
08923a |
-
|
|
|
08923a |
-def _is_server_evacuable(server, evac_flavors, evac_images):
|
|
|
08923a |
- if server.flavor.get('id') in evac_flavors:
|
|
|
08923a |
- return True
|
|
|
08923a |
- if server.image.get('id') in evac_images:
|
|
|
08923a |
- return True
|
|
|
08923a |
- logging.debug("Instance %s is not evacuable" % server.image.get('id'))
|
|
|
08923a |
- return False
|
|
|
08923a |
-
|
|
|
08923a |
-def _get_evacuable_flavors():
|
|
|
08923a |
- result = []
|
|
|
08923a |
- flavors = nova.flavors.list()
|
|
|
08923a |
- # Since the detailed view for all flavors doesn't provide the extra specs,
|
|
|
08923a |
- # we need to call each of the flavor to get them.
|
|
|
08923a |
- for flavor in flavors:
|
|
|
08923a |
- tag = flavor.get_keys().get(EVACUABLE_TAG)
|
|
|
08923a |
- if tag and tag.strip().lower() in TRUE_TAGS:
|
|
|
08923a |
- result.append(flavor.id)
|
|
|
08923a |
- return result
|
|
|
08923a |
-
|
|
|
08923a |
-def _get_evacuable_images():
|
|
|
08923a |
- result = []
|
|
|
08923a |
- images = nova.images.list(detailed=True)
|
|
|
08923a |
- for image in images:
|
|
|
08923a |
- if hasattr(image, 'metadata'):
|
|
|
08923a |
- tag = image.metadata.get(EVACUABLE_TAG)
|
|
|
08923a |
- if tag and tag.strip().lower() in TRUE_TAGS:
|
|
|
08923a |
- result.append(image.id)
|
|
|
08923a |
- return result
|
|
|
08923a |
-
|
|
|
08923a |
-def _host_evacuate(options):
|
|
|
08923a |
- result = True
|
|
|
08923a |
- images = _get_evacuable_images()
|
|
|
08923a |
- flavors = _get_evacuable_flavors()
|
|
|
08923a |
- servers = nova.servers.list(search_opts={'host': options["--plug"], 'all_tenants': 1 })
|
|
|
08923a |
-
|
|
|
08923a |
- if options["--instance-filtering"] == "False":
|
|
|
08923a |
- logging.debug("Not evacuating anything")
|
|
|
08923a |
- evacuables = []
|
|
|
08923a |
- elif len(flavors) or len(images):
|
|
|
08923a |
- logging.debug("Filtering images and flavors: %s %s" % (repr(flavors), repr(images)))
|
|
|
08923a |
- # Identify all evacuable servers
|
|
|
08923a |
- logging.debug("Checking %s" % repr(servers))
|
|
|
08923a |
- evacuables = [server for server in servers
|
|
|
08923a |
- if _is_server_evacuable(server, flavors, images)]
|
|
|
08923a |
- logging.debug("Evacuating %s" % repr(evacuables))
|
|
|
08923a |
- else:
|
|
|
08923a |
- logging.debug("Evacuating all images and flavors")
|
|
|
08923a |
- evacuables = servers
|
|
|
08923a |
-
|
|
|
08923a |
- if options["--no-shared-storage"] != "False":
|
|
|
08923a |
- on_shared_storage = False
|
|
|
08923a |
- else:
|
|
|
08923a |
- on_shared_storage = True
|
|
|
08923a |
-
|
|
|
08923a |
- for server in evacuables:
|
|
|
08923a |
- logging.debug("Processing %s" % server)
|
|
|
08923a |
- if hasattr(server, 'id'):
|
|
|
08923a |
- response = _server_evacuate(server.id, on_shared_storage)
|
|
|
08923a |
- if response["accepted"]:
|
|
|
08923a |
- logging.debug("Evacuated %s from %s: %s" %
|
|
|
08923a |
- (response["uuid"], options["--plug"], response["reason"]))
|
|
|
08923a |
- else:
|
|
|
08923a |
- logging.error("Evacuation of %s on %s failed: %s" %
|
|
|
08923a |
- (response["uuid"], options["--plug"], response["reason"]))
|
|
|
08923a |
- result = False
|
|
|
08923a |
- else:
|
|
|
08923a |
- logging.error("Could not evacuate instance: %s" % server.to_dict())
|
|
|
08923a |
- # Should a malformed instance result in a failed evacuation?
|
|
|
08923a |
- # result = False
|
|
|
08923a |
- return result
|
|
|
08923a |
+def get_power_status_simple(connection, options):
|
|
|
08923a |
+ status = get_power_status(connection, options)
|
|
|
08923a |
+ if status in [ "off" ]:
|
|
|
08923a |
+ return status
|
|
|
08923a |
+ return "on"
|
|
|
08923a |
|
|
|
08923a |
def set_attrd_status(host, status, options):
|
|
|
08923a |
logging.debug("Setting fencing status for %s to %s" % (host, status))
|
|
|
08923a |
run_command(options, "attrd_updater -p -n evacuate -Q -N %s -U %s" % (host, status))
|
|
|
08923a |
|
|
|
08923a |
-def set_power_status(_, options):
|
|
|
08923a |
- global override_status
|
|
|
08923a |
-
|
|
|
08923a |
- override_status = ""
|
|
|
08923a |
- logging.debug("set action: " + options["--action"])
|
|
|
08923a |
+def get_attrd_status(host, options):
|
|
|
08923a |
+ (status, pipe_stdout, pipe_stderr) = run_command(options, "attrd_updater -p -n evacuate -Q -N %s" % (host))
|
|
|
08923a |
+ fields = pipe_stdout.split('"')
|
|
|
08923a |
+ if len(fields) > 6:
|
|
|
08923a |
+ return fields[5]
|
|
|
08923a |
+ logging.debug("Got %s: o:%s e:%s n:%d" % (status, pipe_stdout, pipe_stderr, len(fields)))
|
|
|
08923a |
+ return ""
|
|
|
08923a |
+
|
|
|
08923a |
+def set_power_status_on(connection, options):
|
|
|
08923a |
+ # Wait for any evacuations to complete
|
|
|
08923a |
+ while True:
|
|
|
08923a |
+ current = get_attrd_status(options["--plug"], options)
|
|
|
08923a |
+ if current in ["no", ""]:
|
|
|
08923a |
+ logging.info("Evacuation complete for: %s '%s'" % (options["--plug"], current))
|
|
|
08923a |
+ break
|
|
|
08923a |
+ else:
|
|
|
08923a |
+ logging.info("Waiting for %s to complete evacuations: %s" % (options["--plug"], current))
|
|
|
08923a |
+ time.sleep(2)
|
|
|
08923a |
|
|
|
08923a |
- if not nova:
|
|
|
08923a |
- return
|
|
|
08923a |
+ status = get_power_status(connection, options)
|
|
|
08923a |
+ # Should we do it for 'failed' too?
|
|
|
08923a |
+ if status in [ "off", "running", "failed" ]:
|
|
|
08923a |
+ try:
|
|
|
08923a |
+ # Forcing the host back up
|
|
|
08923a |
+ logging.info("Forcing nova-compute back up on "+options["--plug"])
|
|
|
08923a |
+ connection.services.force_down(options["--plug"], "nova-compute", force_down=False)
|
|
|
08923a |
+ logging.info("Forced nova-compute back up on "+options["--plug"])
|
|
|
08923a |
+ except Exception as e:
|
|
|
08923a |
+ # In theory, if force_down=False fails, that's for the exact
|
|
|
08923a |
+ # same possible reasons that below with force_down=True
|
|
|
08923a |
+ # eg. either an incompatible version or an old client.
|
|
|
08923a |
+ # Since it's about forcing back to a default value, there is
|
|
|
08923a |
+ # no real worries to just consider it's still okay even if the
|
|
|
08923a |
+ # command failed
|
|
|
08923a |
+ logging.warn("Exception from attempt to force "
|
|
|
08923a |
+ "host back up via nova API: "
|
|
|
08923a |
+ "%s: %s" % (e.__class__.__name__, e))
|
|
|
08923a |
+
|
|
|
08923a |
+ # Forcing the service back up in case it was disabled
|
|
|
08923a |
+ logging.info("Enabling nova-compute on "+options["--plug"])
|
|
|
08923a |
+ connection.services.enable(options["--plug"], 'nova-compute')
|
|
|
08923a |
|
|
|
08923a |
- if options["--action"] == "on":
|
|
|
08923a |
- if get_power_status(_, options) != "on":
|
|
|
08923a |
- # Forcing the service back up in case it was disabled
|
|
|
08923a |
- nova.services.enable(options["--plug"], 'nova-compute')
|
|
|
08923a |
- try:
|
|
|
08923a |
- # Forcing the host back up
|
|
|
08923a |
- nova.services.force_down(
|
|
|
08923a |
- options["--plug"], "nova-compute", force_down=False)
|
|
|
08923a |
- except Exception as e:
|
|
|
08923a |
- # In theory, if force_down=False fails, that's for the exact
|
|
|
08923a |
- # same possible reasons that below with force_down=True
|
|
|
08923a |
- # eg. either an incompatible version or an old client.
|
|
|
08923a |
- # Since it's about forcing back to a default value, there is
|
|
|
08923a |
- # no real worries to just consider it's still okay even if the
|
|
|
08923a |
- # command failed
|
|
|
08923a |
- logging.info("Exception from attempt to force "
|
|
|
08923a |
- "host back up via nova API: "
|
|
|
08923a |
- "%s: %s" % (e.__class__.__name__, e))
|
|
|
08923a |
- else:
|
|
|
08923a |
- # Pretend we're 'on' so that the fencing library doesn't loop forever waiting for the node to boot
|
|
|
08923a |
- override_status = "on"
|
|
|
08923a |
+ # Pretend we're 'on' so that the fencing library doesn't loop forever waiting for the node to boot
|
|
|
08923a |
+ override_status = "on"
|
|
|
08923a |
+ elif status not in ["on"]:
|
|
|
08923a |
+ # Not safe to unfence, don't waste time looping to see if the status changes to "on"
|
|
|
08923a |
+ options["--power-timeout"] = "0"
|
|
|
08923a |
+
|
|
|
08923a |
+def set_power_status_off(connection, options):
|
|
|
08923a |
+ status = get_power_status(connection, options)
|
|
|
08923a |
+ if status in [ "off" ]:
|
|
|
08923a |
return
|
|
|
08923a |
|
|
|
08923a |
+ connection.services.disable(options["--plug"], 'nova-compute')
|
|
|
08923a |
try:
|
|
|
08923a |
- nova.services.force_down(
|
|
|
08923a |
+ # Until 2.53
|
|
|
08923a |
+ connection.services.force_down(
|
|
|
08923a |
options["--plug"], "nova-compute", force_down=True)
|
|
|
08923a |
except Exception as e:
|
|
|
08923a |
# Something went wrong when we tried to force the host down.
|
|
|
08923a |
@@ -198,7 +140,7 @@
|
|
|
08923a |
"%s: %s" % (e.__class__.__name__, e))
|
|
|
08923a |
# need to wait for nova to update its internal status or we
|
|
|
08923a |
# cannot call host-evacuate
|
|
|
08923a |
- while get_power_status(_, options) != "off":
|
|
|
08923a |
+ while get_power_status(connection, options) not in ["off"]:
|
|
|
08923a |
# Loop forever if need be.
|
|
|
08923a |
#
|
|
|
08923a |
# Some callers (such as Pacemaker) will have a timer
|
|
|
08923a |
@@ -206,47 +148,55 @@
|
|
|
08923a |
logging.debug("Waiting for nova to update its internal state for %s" % options["--plug"])
|
|
|
08923a |
time.sleep(1)
|
|
|
08923a |
|
|
|
08923a |
- if not _host_evacuate(options):
|
|
|
08923a |
- sys.exit(1)
|
|
|
08923a |
+ set_attrd_status(options["--plug"], "yes", options)
|
|
|
08923a |
+
|
|
|
08923a |
+def set_power_status(connection, options):
|
|
|
08923a |
+ global override_status
|
|
|
08923a |
|
|
|
08923a |
- return
|
|
|
08923a |
+ override_status = ""
|
|
|
08923a |
+ logging.debug("set action: " + options["--action"])
|
|
|
08923a |
+
|
|
|
08923a |
+ if not connection:
|
|
|
08923a |
+ return
|
|
|
08923a |
|
|
|
08923a |
+ if options["--action"] in ["off", "reboot"]:
|
|
|
08923a |
+ set_power_status_off(connection, options)
|
|
|
08923a |
+ else:
|
|
|
08923a |
+ set_power_status_on(connection, options)
|
|
|
08923a |
+ logging.debug("set action passed: " + options["--action"])
|
|
|
08923a |
+ sys.exit(0)
|
|
|
08923a |
|
|
|
08923a |
-def fix_domain(options):
|
|
|
08923a |
+def fix_domain(connection, options):
|
|
|
08923a |
domains = {}
|
|
|
08923a |
last_domain = None
|
|
|
08923a |
|
|
|
08923a |
- if nova:
|
|
|
08923a |
+ if connection:
|
|
|
08923a |
# Find it in nova
|
|
|
08923a |
|
|
|
08923a |
- hypervisors = nova.hypervisors.list()
|
|
|
08923a |
- for hypervisor in hypervisors:
|
|
|
08923a |
- shorthost = hypervisor.hypervisor_hostname.split('.')[0]
|
|
|
08923a |
+ services = connection.services.list(binary="nova-compute")
|
|
|
08923a |
+ for service in services:
|
|
|
08923a |
+ shorthost = service.host.split('.')[0]
|
|
|
08923a |
|
|
|
08923a |
- if shorthost == hypervisor.hypervisor_hostname:
|
|
|
08923a |
+ if shorthost == service.host:
|
|
|
08923a |
# Nova is not using FQDN
|
|
|
08923a |
calculated = ""
|
|
|
08923a |
else:
|
|
|
08923a |
# Compute nodes are named as FQDN, strip off the hostname
|
|
|
08923a |
- calculated = hypervisor.hypervisor_hostname.replace(shorthost+".", "")
|
|
|
08923a |
-
|
|
|
08923a |
- domains[calculated] = shorthost
|
|
|
08923a |
+ calculated = service.host.replace(shorthost+".", "")
|
|
|
08923a |
|
|
|
08923a |
if calculated == last_domain:
|
|
|
08923a |
# Avoid complaining for each compute node with the same name
|
|
|
08923a |
# One hopes they don't appear interleaved as A.com B.com A.com B.com
|
|
|
08923a |
- logging.debug("Calculated the same domain from: %s" % hypervisor.hypervisor_hostname)
|
|
|
08923a |
+ logging.debug("Calculated the same domain from: %s" % service.host)
|
|
|
08923a |
+ continue
|
|
|
08923a |
|
|
|
08923a |
- elif "--domain" in options and options["--domain"] == calculated:
|
|
|
08923a |
- # Supplied domain name is valid
|
|
|
08923a |
- return
|
|
|
08923a |
+ domains[calculated] = service.host
|
|
|
08923a |
+ last_domain = calculated
|
|
|
08923a |
|
|
|
08923a |
- elif "--domain" in options:
|
|
|
08923a |
+ if "--domain" in options and options["--domain"] != calculated:
|
|
|
08923a |
# Warn in case nova isn't available at some point
|
|
|
08923a |
logging.warning("Supplied domain '%s' does not match the one calculated from: %s"
|
|
|
08923a |
- % (options["--domain"], hypervisor.hypervisor_hostname))
|
|
|
08923a |
-
|
|
|
08923a |
- last_domain = calculated
|
|
|
08923a |
+ % (options["--domain"], service.host))
|
|
|
08923a |
|
|
|
08923a |
if len(domains) == 0 and "--domain" not in options:
|
|
|
08923a |
logging.error("Could not calculate the domain names used by compute nodes in nova")
|
|
|
08923a |
@@ -254,9 +204,9 @@
|
|
|
08923a |
elif len(domains) == 1 and "--domain" not in options:
|
|
|
08923a |
options["--domain"] = last_domain
|
|
|
08923a |
|
|
|
08923a |
- elif len(domains) == 1:
|
|
|
08923a |
- logging.error("Overriding supplied domain '%s' does not match the one calculated from: %s"
|
|
|
08923a |
- % (options["--domain"], hypervisor.hypervisor_hostname))
|
|
|
08923a |
+ elif len(domains) == 1 and options["--domain"] != last_domain:
|
|
|
08923a |
+ logging.error("Overriding supplied domain '%s' as it does not match the one calculated from: %s"
|
|
|
08923a |
+ % (options["--domain"], domains[last_domain]))
|
|
|
08923a |
options["--domain"] = last_domain
|
|
|
08923a |
|
|
|
08923a |
elif len(domains) > 1:
|
|
|
08923a |
@@ -264,47 +214,49 @@
|
|
|
08923a |
% (options["--domain"], repr(domains)))
|
|
|
08923a |
sys.exit(1)
|
|
|
08923a |
|
|
|
08923a |
-def fix_plug_name(options):
|
|
|
08923a |
+ return last_domain
|
|
|
08923a |
+
|
|
|
08923a |
+def fix_plug_name(connection, options):
|
|
|
08923a |
if options["--action"] == "list":
|
|
|
08923a |
return
|
|
|
08923a |
|
|
|
08923a |
if "--plug" not in options:
|
|
|
08923a |
return
|
|
|
08923a |
|
|
|
08923a |
- fix_domain(options)
|
|
|
08923a |
- short_plug = options["--plug"].split('.')[0]
|
|
|
08923a |
- logging.debug("Checking target '%s' against calculated domain '%s'"% (options["--plug"], options["--domain"]))
|
|
|
08923a |
-
|
|
|
08923a |
- if "--domain" not in options:
|
|
|
08923a |
+ calculated = fix_domain(connection, options)
|
|
|
08923a |
+ if calculated is None or "--domain" not in options:
|
|
|
08923a |
# Nothing supplied and nova not available... what to do... nothing
|
|
|
08923a |
return
|
|
|
08923a |
|
|
|
08923a |
- elif options["--domain"] == "":
|
|
|
08923a |
+ short_plug = options["--plug"].split('.')[0]
|
|
|
08923a |
+ logging.debug("Checking target '%s' against calculated domain '%s'"% (options["--plug"], calculated))
|
|
|
08923a |
+
|
|
|
08923a |
+ if options["--domain"] == "":
|
|
|
08923a |
# Ensure any domain is stripped off since nova isn't using FQDN
|
|
|
08923a |
options["--plug"] = short_plug
|
|
|
08923a |
|
|
|
08923a |
- elif options["--domain"] in options["--plug"]:
|
|
|
08923a |
- # Plug already contains the domain, don't re-add
|
|
|
08923a |
+ elif options["--plug"].endswith(options["--domain"]):
|
|
|
08923a |
+ # Plug already uses the domain, don't re-add
|
|
|
08923a |
return
|
|
|
08923a |
|
|
|
08923a |
else:
|
|
|
08923a |
# Add the domain to the plug
|
|
|
08923a |
options["--plug"] = short_plug + "." + options["--domain"]
|
|
|
08923a |
|
|
|
08923a |
-def get_plugs_list(_, options):
|
|
|
08923a |
+def get_plugs_list(connection, options):
|
|
|
08923a |
result = {}
|
|
|
08923a |
|
|
|
08923a |
- if nova:
|
|
|
08923a |
- hypervisors = nova.hypervisors.list()
|
|
|
08923a |
- for hypervisor in hypervisors:
|
|
|
08923a |
- longhost = hypervisor.hypervisor_hostname
|
|
|
08923a |
+ if connection:
|
|
|
08923a |
+ services = connection.services.list(binary="nova-compute")
|
|
|
08923a |
+ for service in services:
|
|
|
08923a |
+ longhost = service.host
|
|
|
08923a |
shorthost = longhost.split('.')[0]
|
|
|
08923a |
result[longhost] = ("", None)
|
|
|
08923a |
result[shorthost] = ("", None)
|
|
|
08923a |
return result
|
|
|
08923a |
|
|
|
08923a |
def create_nova_connection(options):
|
|
|
08923a |
- global nova
|
|
|
08923a |
+ nova = None
|
|
|
08923a |
|
|
|
08923a |
try:
|
|
|
08923a |
from novaclient import client
|
|
|
08923a |
@@ -330,41 +282,42 @@
|
|
|
08923a |
if clientargs:
|
|
|
08923a |
# OSP < 11
|
|
|
08923a |
# ArgSpec(args=['version', 'username', 'password', 'project_id', 'auth_url'],
|
|
|
08923a |
- # varargs=None,
|
|
|
08923a |
- # keywords='kwargs', defaults=(None, None, None, None))
|
|
|
08923a |
+ # varargs=None,
|
|
|
08923a |
+ # keywords='kwargs', defaults=(None, None, None, None))
|
|
|
08923a |
nova = client.Client(version,
|
|
|
08923a |
- options["--username"],
|
|
|
08923a |
- options["--password"],
|
|
|
08923a |
- options["--tenant-name"],
|
|
|
08923a |
- options["--auth-url"],
|
|
|
08923a |
- insecure=options["--insecure"],
|
|
|
08923a |
- region_name=options["--region-name"],
|
|
|
08923a |
- endpoint_type=options["--endpoint-type"],
|
|
|
08923a |
- http_log_debug=options.has_key("--verbose"))
|
|
|
08923a |
+ options["--username"],
|
|
|
08923a |
+ options["--password"],
|
|
|
08923a |
+ options["--tenant-name"],
|
|
|
08923a |
+ options["--auth-url"],
|
|
|
08923a |
+ insecure=options["--insecure"],
|
|
|
08923a |
+ region_name=options["--region-name"],
|
|
|
08923a |
+ endpoint_type=options["--endpoint-type"],
|
|
|
08923a |
+ http_log_debug=options.has_key("--verbose"))
|
|
|
08923a |
else:
|
|
|
08923a |
# OSP >= 11
|
|
|
08923a |
# ArgSpec(args=['version'], varargs='args', keywords='kwargs', defaults=None)
|
|
|
08923a |
nova = client.Client(version,
|
|
|
08923a |
- username=options["--username"],
|
|
|
08923a |
- password=options["--password"],
|
|
|
08923a |
- tenant_name=options["--tenant-name"],
|
|
|
08923a |
- auth_url=options["--auth-url"],
|
|
|
08923a |
- insecure=options["--insecure"],
|
|
|
08923a |
- region_name=options["--region-name"],
|
|
|
08923a |
- endpoint_type=options["--endpoint-type"],
|
|
|
08923a |
- http_log_debug=options.has_key("--verbose"))
|
|
|
08923a |
+ username=options["--username"],
|
|
|
08923a |
+ password=options["--password"],
|
|
|
08923a |
+ tenant_name=options["--tenant-name"],
|
|
|
08923a |
+ auth_url=options["--auth-url"],
|
|
|
08923a |
+ insecure=options["--insecure"],
|
|
|
08923a |
+ region_name=options["--region-name"],
|
|
|
08923a |
+ endpoint_type=options["--endpoint-type"],
|
|
|
08923a |
+ http_log_debug=options.has_key("--verbose"))
|
|
|
08923a |
|
|
|
08923a |
try:
|
|
|
08923a |
nova.hypervisors.list()
|
|
|
08923a |
- return
|
|
|
08923a |
+ return nova
|
|
|
08923a |
|
|
|
08923a |
except NotAcceptable as e:
|
|
|
08923a |
logging.warning(e)
|
|
|
08923a |
|
|
|
08923a |
except Exception as e:
|
|
|
08923a |
logging.warning("Nova connection failed. %s: %s" % (e.__class__.__name__, e))
|
|
|
08923a |
-
|
|
|
08923a |
+
|
|
|
08923a |
logging.warning("Couldn't obtain a supported connection to nova, tried: %s\n" % repr(versions))
|
|
|
08923a |
+ return None
|
|
|
08923a |
|
|
|
08923a |
def define_new_opts():
|
|
|
08923a |
all_opt["endpoint_type"] = {
|
|
|
08923a |
@@ -448,11 +401,23 @@
|
|
|
08923a |
"order": 5,
|
|
|
08923a |
}
|
|
|
08923a |
|
|
|
08923a |
+def set_multi_power_fn(connection, options, set_power_fn, get_power_fn, retry_attempts=1):
|
|
|
08923a |
+ for _ in range(retry_attempts):
|
|
|
08923a |
+ set_power_fn(connection, options)
|
|
|
08923a |
+ time.sleep(int(options["--power-wait"]))
|
|
|
08923a |
+
|
|
|
08923a |
+ for _ in range(int(options["--power-timeout"])):
|
|
|
08923a |
+ if get_power_fn(connection, options) != options["--action"]:
|
|
|
08923a |
+ time.sleep(1)
|
|
|
08923a |
+ else:
|
|
|
08923a |
+ return True
|
|
|
08923a |
+ return False
|
|
|
08923a |
+
|
|
|
08923a |
def main():
|
|
|
08923a |
global override_status
|
|
|
08923a |
atexit.register(atexit_handler)
|
|
|
08923a |
|
|
|
08923a |
- device_opt = ["login", "passwd", "tenant_name", "auth_url", "fabric_fencing",
|
|
|
08923a |
+ device_opt = ["login", "passwd", "tenant_name", "auth_url", "fabric_fencing",
|
|
|
08923a |
"no_login", "no_password", "port", "domain", "no_shared_storage", "endpoint_type",
|
|
|
08923a |
"record_only", "instance_filtering", "insecure", "region_name"]
|
|
|
08923a |
define_new_opts()
|
|
|
08923a |
@@ -472,30 +437,28 @@
|
|
|
08923a |
|
|
|
08923a |
run_delay(options)
|
|
|
08923a |
|
|
|
08923a |
- create_nova_connection(options)
|
|
|
08923a |
+ logging.debug("Running "+options["--action"])
|
|
|
08923a |
+ connection = create_nova_connection(options)
|
|
|
08923a |
|
|
|
08923a |
- fix_plug_name(options)
|
|
|
08923a |
- if options["--record-only"] in [ "1", "True", "true", "Yes", "yes"]:
|
|
|
08923a |
- if options["--action"] == "on":
|
|
|
08923a |
- set_attrd_status(options["--plug"], "no", options)
|
|
|
08923a |
- sys.exit(0)
|
|
|
08923a |
-
|
|
|
08923a |
- elif options["--action"] in ["off", "reboot"]:
|
|
|
08923a |
- set_attrd_status(options["--plug"], "yes", options)
|
|
|
08923a |
- sys.exit(0)
|
|
|
08923a |
+ if options["--action"] in ["off", "on", "reboot", "status"]:
|
|
|
08923a |
+ fix_plug_name(connection, options)
|
|
|
08923a |
|
|
|
08923a |
- elif options["--action"] in ["monitor", "status"]:
|
|
|
08923a |
- sys.exit(0)
|
|
|
08923a |
|
|
|
08923a |
- if options["--action"] in ["off", "reboot"]:
|
|
|
08923a |
- # Pretend we're 'on' so that the fencing library will always call set_power_status(off)
|
|
|
08923a |
- override_status = "on"
|
|
|
08923a |
-
|
|
|
08923a |
- if options["--action"] == "on":
|
|
|
08923a |
- # Pretend we're 'off' so that the fencing library will always call set_power_status(on)
|
|
|
08923a |
- override_status = "off"
|
|
|
08923a |
+ if options["--action"] in ["reboot"]:
|
|
|
08923a |
+ options["--action"]="off"
|
|
|
08923a |
+
|
|
|
08923a |
+ if options["--action"] in ["off", "on"]:
|
|
|
08923a |
+ # No status first, call our own version
|
|
|
08923a |
+ result = not set_multi_power_fn(connection, options, set_power_status, get_power_status_simple,
|
|
|
08923a |
+ 1 + int(options["--retry-on"]))
|
|
|
08923a |
+ elif options["--action"] in ["monitor"]:
|
|
|
08923a |
+ result = 0
|
|
|
08923a |
+ else:
|
|
|
08923a |
+ result = fence_action(connection, options, set_power_status, get_power_status_simple, get_plugs_list, None)
|
|
|
08923a |
|
|
|
08923a |
- result = fence_action(None, options, set_power_status, get_power_status, get_plugs_list, None)
|
|
|
08923a |
+ logging.debug("Result for "+options["--action"]+": "+repr(result))
|
|
|
08923a |
+ if result == None:
|
|
|
08923a |
+ result = 0
|
|
|
08923a |
sys.exit(result)
|
|
|
08923a |
|
|
|
08923a |
if __name__ == "__main__":
|
|
|
08923a |
diff -uNr a/fence/agents/compute/fence_evacuate.py b/fence/agents/compute/fence_evacuate.py
|
|
|
08923a |
--- a/fence/agents/compute/fence_evacuate.py 1970-01-01 01:00:00.000000000 +0100
|
|
|
08923a |
+++ b/fence/agents/compute/fence_evacuate.py 2017-09-27 15:25:54.234304769 +0200
|
|
|
08923a |
@@ -0,0 +1,366 @@
|
|
|
08923a |
+#!/usr/bin/python -tt
|
|
|
08923a |
+
|
|
|
08923a |
+import sys
|
|
|
08923a |
+import time
|
|
|
08923a |
+import atexit
|
|
|
08923a |
+import logging
|
|
|
08923a |
+import inspect
|
|
|
08923a |
+import requests.exceptions
|
|
|
08923a |
+
|
|
|
08923a |
+sys.path.append("@FENCEAGENTSLIBDIR@")
|
|
|
08923a |
+from fencing import *
|
|
|
08923a |
+from fencing import fail_usage, is_executable, run_command, run_delay
|
|
|
08923a |
+
|
|
|
08923a |
+EVACUABLE_TAG = "evacuable"
|
|
|
08923a |
+TRUE_TAGS = ['true']
|
|
|
08923a |
+
|
|
|
08923a |
+def get_power_status(connection, options):
|
|
|
08923a |
+
|
|
|
08923a |
+ status = "unknown"
|
|
|
08923a |
+ logging.debug("get action: " + options["--action"])
|
|
|
08923a |
+
|
|
|
08923a |
+ if connection:
|
|
|
08923a |
+ try:
|
|
|
08923a |
+ services = connection.services.list(host=options["--plug"], binary="nova-compute")
|
|
|
08923a |
+ for service in services:
|
|
|
08923a |
+ logging.debug("Status of %s is %s, %s" % (service.binary, service.state, service.status))
|
|
|
08923a |
+ if service.state == "up" and service.status == "enabled":
|
|
|
08923a |
+ # Up and operational
|
|
|
08923a |
+ status = "on"
|
|
|
08923a |
+
|
|
|
08923a |
+ elif service.state == "down" and service.status == "disabled":
|
|
|
08923a |
+ # Down and fenced
|
|
|
08923a |
+ status = "off"
|
|
|
08923a |
+
|
|
|
08923a |
+ elif service.state == "down":
|
|
|
08923a |
+ # Down and requires fencing
|
|
|
08923a |
+ status = "failed"
|
|
|
08923a |
+
|
|
|
08923a |
+ elif service.state == "up":
|
|
|
08923a |
+ # Up and requires unfencing
|
|
|
08923a |
+ status = "running"
|
|
|
08923a |
+ else:
|
|
|
08923a |
+ logging.warning("Unknown status detected from nova for %s: %s, %s" % (options["--plug"], service.state, service.status))
|
|
|
08923a |
+ status = "%s %s" % (service.state, service.status)
|
|
|
08923a |
+ break
|
|
|
08923a |
+ except requests.exception.ConnectionError as err:
|
|
|
08923a |
+ logging.warning("Nova connection failed: " + str(err))
|
|
|
08923a |
+ return status
|
|
|
08923a |
+
|
|
|
08923a |
+# NOTE(sbauza); We mimic the host-evacuate module since it's only a contrib
|
|
|
08923a |
+# module which is not stable
|
|
|
08923a |
+def _server_evacuate(connection, server, on_shared_storage):
|
|
|
08923a |
+ success = False
|
|
|
08923a |
+ error_message = ""
|
|
|
08923a |
+ try:
|
|
|
08923a |
+ logging.debug("Resurrecting instance: %s" % server)
|
|
|
08923a |
+ (response, dictionary) = connection.servers.evacuate(server=server, on_shared_storage=on_shared_storage)
|
|
|
08923a |
+
|
|
|
08923a |
+ if response == None:
|
|
|
08923a |
+ error_message = "No response while evacuating instance"
|
|
|
08923a |
+ elif response.status_code == 200:
|
|
|
08923a |
+ success = True
|
|
|
08923a |
+ error_message = response.reason
|
|
|
08923a |
+ else:
|
|
|
08923a |
+ error_message = response.reason
|
|
|
08923a |
+
|
|
|
08923a |
+ except Exception as e:
|
|
|
08923a |
+ error_message = "Error while evacuating instance: %s" % e
|
|
|
08923a |
+
|
|
|
08923a |
+ return {
|
|
|
08923a |
+ "uuid": server,
|
|
|
08923a |
+ "accepted": success,
|
|
|
08923a |
+ "reason": error_message,
|
|
|
08923a |
+ }
|
|
|
08923a |
+
|
|
|
08923a |
+def _is_server_evacuable(server, evac_flavors, evac_images):
|
|
|
08923a |
+ if server.flavor.get('id') in evac_flavors:
|
|
|
08923a |
+ return True
|
|
|
08923a |
+ if hasattr(server.image, 'get'):
|
|
|
08923a |
+ if server.image.get('id') in evac_images:
|
|
|
08923a |
+ return True
|
|
|
08923a |
+ logging.debug("Instance %s is not evacuable" % server.image.get('id'))
|
|
|
08923a |
+ return False
|
|
|
08923a |
+
|
|
|
08923a |
+def _get_evacuable_flavors(connection):
|
|
|
08923a |
+ result = []
|
|
|
08923a |
+ flavors = connection.flavors.list()
|
|
|
08923a |
+ # Since the detailed view for all flavors doesn't provide the extra specs,
|
|
|
08923a |
+ # we need to call each of the flavor to get them.
|
|
|
08923a |
+ for flavor in flavors:
|
|
|
08923a |
+ tag = flavor.get_keys().get(EVACUABLE_TAG)
|
|
|
08923a |
+ if tag and tag.strip().lower() in TRUE_TAGS:
|
|
|
08923a |
+ result.append(flavor.id)
|
|
|
08923a |
+ return result
|
|
|
08923a |
+
|
|
|
08923a |
+def _get_evacuable_images(connection):
|
|
|
08923a |
+ result = []
|
|
|
08923a |
+ images = []
|
|
|
08923a |
+ if hasattr(connection, "images"):
|
|
|
08923a |
+ images = connection.images.list(detailed=True)
|
|
|
08923a |
+ elif hasattr(connection, "glance"):
|
|
|
08923a |
+ # OSP12+
|
|
|
08923a |
+ images = connection.glance.list()
|
|
|
08923a |
+
|
|
|
08923a |
+ for image in images:
|
|
|
08923a |
+ if hasattr(image, 'metadata'):
|
|
|
08923a |
+ tag = image.metadata.get(EVACUABLE_TAG)
|
|
|
08923a |
+ if tag and tag.strip().lower() in TRUE_TAGS:
|
|
|
08923a |
+ result.append(image.id)
|
|
|
08923a |
+ elif hasattr(image, 'tags'):
|
|
|
08923a |
+ # OSP12+
|
|
|
08923a |
+ if EVACUABLE_TAG in image.tags:
|
|
|
08923a |
+ result.append(image.id)
|
|
|
08923a |
+ return result
|
|
|
08923a |
+
|
|
|
08923a |
+def _host_evacuate(connection, options):
|
|
|
08923a |
+ result = True
|
|
|
08923a |
+ images = _get_evacuable_images(connection)
|
|
|
08923a |
+ flavors = _get_evacuable_flavors(connection)
|
|
|
08923a |
+ servers = connection.servers.list(search_opts={'host': options["--plug"], 'all_tenants': 1 })
|
|
|
08923a |
+
|
|
|
08923a |
+ if options["--instance-filtering"] == "False":
|
|
|
08923a |
+ logging.debug("Not evacuating anything")
|
|
|
08923a |
+ evacuables = []
|
|
|
08923a |
+ elif len(flavors) or len(images):
|
|
|
08923a |
+ logging.debug("Filtering images and flavors: %s %s" % (repr(flavors), repr(images)))
|
|
|
08923a |
+ # Identify all evacuable servers
|
|
|
08923a |
+ logging.debug("Checking %s" % repr(servers))
|
|
|
08923a |
+ evacuables = [server for server in servers
|
|
|
08923a |
+ if _is_server_evacuable(server, flavors, images)]
|
|
|
08923a |
+ logging.debug("Evacuating %s" % repr(evacuables))
|
|
|
08923a |
+ else:
|
|
|
08923a |
+ logging.debug("Evacuating all images and flavors")
|
|
|
08923a |
+ evacuables = servers
|
|
|
08923a |
+
|
|
|
08923a |
+ if options["--no-shared-storage"] != "False":
|
|
|
08923a |
+ on_shared_storage = False
|
|
|
08923a |
+ else:
|
|
|
08923a |
+ on_shared_storage = True
|
|
|
08923a |
+
|
|
|
08923a |
+ for server in evacuables:
|
|
|
08923a |
+ logging.debug("Processing %s" % server)
|
|
|
08923a |
+ if hasattr(server, 'id'):
|
|
|
08923a |
+ response = _server_evacuate(connection, server.id, on_shared_storage)
|
|
|
08923a |
+ if response["accepted"]:
|
|
|
08923a |
+ logging.debug("Evacuated %s from %s: %s" %
|
|
|
08923a |
+ (response["uuid"], options["--plug"], response["reason"]))
|
|
|
08923a |
+ else:
|
|
|
08923a |
+ logging.error("Evacuation of %s on %s failed: %s" %
|
|
|
08923a |
+ (response["uuid"], options["--plug"], response["reason"]))
|
|
|
08923a |
+ result = False
|
|
|
08923a |
+ else:
|
|
|
08923a |
+ logging.error("Could not evacuate instance: %s" % server.to_dict())
|
|
|
08923a |
+ # Should a malformed instance result in a failed evacuation?
|
|
|
08923a |
+ # result = False
|
|
|
08923a |
+ return result
|
|
|
08923a |
+
|
|
|
08923a |
+def set_attrd_status(host, status, options):
|
|
|
08923a |
+ logging.debug("Setting fencing status for %s to %s" % (host, status))
|
|
|
08923a |
+ run_command(options, "attrd_updater -p -n evacuate -Q -N %s -U %s" % (host, status))
|
|
|
08923a |
+
|
|
|
08923a |
+def set_power_status(connection, options):
|
|
|
08923a |
+ logging.debug("set action: " + options["--action"])
|
|
|
08923a |
+
|
|
|
08923a |
+ if not connection:
|
|
|
08923a |
+ return
|
|
|
08923a |
+
|
|
|
08923a |
+ if options["--action"] == "off" and not _host_evacuate(options):
|
|
|
08923a |
+ sys.exit(1)
|
|
|
08923a |
+
|
|
|
08923a |
+ sys.exit(0)
|
|
|
08923a |
+
|
|
|
08923a |
+def get_plugs_list(connection, options):
|
|
|
08923a |
+ result = {}
|
|
|
08923a |
+
|
|
|
08923a |
+ if connection:
|
|
|
08923a |
+ services = connection.services.list(binary="nova-compute")
|
|
|
08923a |
+ for service in services:
|
|
|
08923a |
+ longhost = service.host
|
|
|
08923a |
+ shorthost = longhost.split('.')[0]
|
|
|
08923a |
+ result[longhost] = ("", None)
|
|
|
08923a |
+ result[shorthost] = ("", None)
|
|
|
08923a |
+ return result
|
|
|
08923a |
+
|
|
|
08923a |
+def create_nova_connection(options):
|
|
|
08923a |
+ nova = None
|
|
|
08923a |
+
|
|
|
08923a |
+ try:
|
|
|
08923a |
+ from novaclient import client
|
|
|
08923a |
+ from novaclient.exceptions import NotAcceptable
|
|
|
08923a |
+ except ImportError:
|
|
|
08923a |
+ fail_usage("Nova not found or not accessible")
|
|
|
08923a |
+
|
|
|
08923a |
+ versions = [ "2.11", "2" ]
|
|
|
08923a |
+ for version in versions:
|
|
|
08923a |
+ clientargs = inspect.getargspec(client.Client).varargs
|
|
|
08923a |
+
|
|
|
08923a |
+ # Some versions of Openstack prior to Ocata only
|
|
|
08923a |
+ # supported positional arguments for username,
|
|
|
08923a |
+ # password and tenant.
|
|
|
08923a |
+ #
|
|
|
08923a |
+ # Versions since Ocata only support named arguments.
|
|
|
08923a |
+ #
|
|
|
08923a |
+ # So we need to use introspection to figure out how to
|
|
|
08923a |
+ # create a Nova client.
|
|
|
08923a |
+ #
|
|
|
08923a |
+ # Happy days
|
|
|
08923a |
+ #
|
|
|
08923a |
+ if clientargs:
|
|
|
08923a |
+ # OSP < 11
|
|
|
08923a |
+ # ArgSpec(args=['version', 'username', 'password', 'project_id', 'auth_url'],
|
|
|
08923a |
+ # varargs=None,
|
|
|
08923a |
+ # keywords='kwargs', defaults=(None, None, None, None))
|
|
|
08923a |
+ nova = client.Client(version,
|
|
|
08923a |
+ options["--username"],
|
|
|
08923a |
+ options["--password"],
|
|
|
08923a |
+ options["--tenant-name"],
|
|
|
08923a |
+ options["--auth-url"],
|
|
|
08923a |
+ insecure=options["--insecure"],
|
|
|
08923a |
+ region_name=options["--region-name"],
|
|
|
08923a |
+ endpoint_type=options["--endpoint-type"],
|
|
|
08923a |
+ http_log_debug=options.has_key("--verbose"))
|
|
|
08923a |
+ else:
|
|
|
08923a |
+ # OSP >= 11
|
|
|
08923a |
+ # ArgSpec(args=['version'], varargs='args', keywords='kwargs', defaults=None)
|
|
|
08923a |
+ nova = client.Client(version,
|
|
|
08923a |
+ username=options["--username"],
|
|
|
08923a |
+ password=options["--password"],
|
|
|
08923a |
+ tenant_name=options["--tenant-name"],
|
|
|
08923a |
+ auth_url=options["--auth-url"],
|
|
|
08923a |
+ insecure=options["--insecure"],
|
|
|
08923a |
+ region_name=options["--region-name"],
|
|
|
08923a |
+ endpoint_type=options["--endpoint-type"],
|
|
|
08923a |
+ http_log_debug=options.has_key("--verbose"))
|
|
|
08923a |
+
|
|
|
08923a |
+ try:
|
|
|
08923a |
+ nova.hypervisors.list()
|
|
|
08923a |
+ return nova
|
|
|
08923a |
+
|
|
|
08923a |
+ except NotAcceptable as e:
|
|
|
08923a |
+ logging.warning(e)
|
|
|
08923a |
+
|
|
|
08923a |
+ except Exception as e:
|
|
|
08923a |
+ logging.warning("Nova connection failed. %s: %s" % (e.__class__.__name__, e))
|
|
|
08923a |
+
|
|
|
08923a |
+ logging.warning("Couldn't obtain a supported connection to nova, tried: %s\n" % repr(versions))
|
|
|
08923a |
+ return None
|
|
|
08923a |
+
|
|
|
08923a |
+def define_new_opts():
|
|
|
08923a |
+ all_opt["endpoint_type"] = {
|
|
|
08923a |
+ "getopt" : "e:",
|
|
|
08923a |
+ "longopt" : "endpoint-type",
|
|
|
08923a |
+ "help" : "-e, --endpoint-type=[endpoint] Nova Endpoint type (publicURL, internalURL, adminURL)",
|
|
|
08923a |
+ "required" : "0",
|
|
|
08923a |
+ "shortdesc" : "Nova Endpoint type",
|
|
|
08923a |
+ "default" : "internalURL",
|
|
|
08923a |
+ "order": 1,
|
|
|
08923a |
+ }
|
|
|
08923a |
+ all_opt["tenant_name"] = {
|
|
|
08923a |
+ "getopt" : "t:",
|
|
|
08923a |
+ "longopt" : "tenant-name",
|
|
|
08923a |
+ "help" : "-t, --tenant-name=[tenant] Keystone Admin Tenant",
|
|
|
08923a |
+ "required" : "0",
|
|
|
08923a |
+ "shortdesc" : "Keystone Admin Tenant",
|
|
|
08923a |
+ "default" : "",
|
|
|
08923a |
+ "order": 1,
|
|
|
08923a |
+ }
|
|
|
08923a |
+ all_opt["auth_url"] = {
|
|
|
08923a |
+ "getopt" : "k:",
|
|
|
08923a |
+ "longopt" : "auth-url",
|
|
|
08923a |
+ "help" : "-k, --auth-url=[url] Keystone Admin Auth URL",
|
|
|
08923a |
+ "required" : "0",
|
|
|
08923a |
+ "shortdesc" : "Keystone Admin Auth URL",
|
|
|
08923a |
+ "default" : "",
|
|
|
08923a |
+ "order": 1,
|
|
|
08923a |
+ }
|
|
|
08923a |
+ all_opt["region_name"] = {
|
|
|
08923a |
+ "getopt" : "",
|
|
|
08923a |
+ "longopt" : "region-name",
|
|
|
08923a |
+ "help" : "--region-name=[region] Region Name",
|
|
|
08923a |
+ "required" : "0",
|
|
|
08923a |
+ "shortdesc" : "Region Name",
|
|
|
08923a |
+ "default" : "",
|
|
|
08923a |
+ "order": 1,
|
|
|
08923a |
+ }
|
|
|
08923a |
+ all_opt["insecure"] = {
|
|
|
08923a |
+ "getopt" : "",
|
|
|
08923a |
+ "longopt" : "insecure",
|
|
|
08923a |
+ "help" : "--insecure Explicitly allow agent to perform \"insecure\" TLS (https) requests",
|
|
|
08923a |
+ "required" : "0",
|
|
|
08923a |
+ "shortdesc" : "Allow Insecure TLS Requests",
|
|
|
08923a |
+ "default" : "False",
|
|
|
08923a |
+ "order": 2,
|
|
|
08923a |
+ }
|
|
|
08923a |
+ all_opt["domain"] = {
|
|
|
08923a |
+ "getopt" : "d:",
|
|
|
08923a |
+ "longopt" : "domain",
|
|
|
08923a |
+ "help" : "-d, --domain=[string] DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN",
|
|
|
08923a |
+ "required" : "0",
|
|
|
08923a |
+ "shortdesc" : "DNS domain in which hosts live",
|
|
|
08923a |
+ "order": 5,
|
|
|
08923a |
+ }
|
|
|
08923a |
+ all_opt["instance_filtering"] = {
|
|
|
08923a |
+ "getopt" : "",
|
|
|
08923a |
+ "longopt" : "instance-filtering",
|
|
|
08923a |
+ "help" : "--instance-filtering Allow instances created from images and flavors with evacuable=true to be evacuated (or all if no images/flavors have been tagged)",
|
|
|
08923a |
+ "required" : "0",
|
|
|
08923a |
+ "shortdesc" : "Allow instances to be evacuated",
|
|
|
08923a |
+ "default" : "True",
|
|
|
08923a |
+ "order": 5,
|
|
|
08923a |
+ }
|
|
|
08923a |
+ all_opt["no_shared_storage"] = {
|
|
|
08923a |
+ "getopt" : "",
|
|
|
08923a |
+ "longopt" : "no-shared-storage",
|
|
|
08923a |
+ "help" : "--no-shared-storage Disable functionality for shared storage",
|
|
|
08923a |
+ "required" : "0",
|
|
|
08923a |
+ "shortdesc" : "Disable functionality for dealing with shared storage",
|
|
|
08923a |
+ "default" : "False",
|
|
|
08923a |
+ "order": 5,
|
|
|
08923a |
+ }
|
|
|
08923a |
+
|
|
|
08923a |
+def main():
|
|
|
08923a |
+ atexit.register(atexit_handler)
|
|
|
08923a |
+
|
|
|
08923a |
+ device_opt = ["login", "passwd", "tenant_name", "auth_url",
|
|
|
08923a |
+ "no_login", "no_password", "port", "domain", "no_shared_storage", "endpoint_type",
|
|
|
08923a |
+ "instance_filtering", "insecure", "region_name"]
|
|
|
08923a |
+ define_new_opts()
|
|
|
08923a |
+ all_opt["shell_timeout"]["default"] = "180"
|
|
|
08923a |
+
|
|
|
08923a |
+ options = check_input(device_opt, process_input(device_opt))
|
|
|
08923a |
+
|
|
|
08923a |
+ docs = {}
|
|
|
08923a |
+ docs["shortdesc"] = "Fence agent for the automatic resurrection of OpenStack compute instances"
|
|
|
08923a |
+ docs["longdesc"] = "Used to reschedule flagged instances"
|
|
|
08923a |
+ docs["vendorurl"] = ""
|
|
|
08923a |
+
|
|
|
08923a |
+ show_docs(options, docs)
|
|
|
08923a |
+
|
|
|
08923a |
+ run_delay(options)
|
|
|
08923a |
+
|
|
|
08923a |
+ connection = create_nova_connection(options)
|
|
|
08923a |
+
|
|
|
08923a |
+ # Un-evacuating a server doesn't make sense
|
|
|
08923a |
+ if options["--action"] in ["on"]:
|
|
|
08923a |
+ logging.error("Action %s is not supported by this agent" % (options["--action"]))
|
|
|
08923a |
+ sys.exit(1)
|
|
|
08923a |
+
|
|
|
08923a |
+ if options["--action"] in ["off", "reboot"]:
|
|
|
08923a |
+ status = get_power_status(connection, options)
|
|
|
08923a |
+ if status != "off":
|
|
|
08923a |
+ logging.error("Cannot resurrect instances from %s in state '%s'" % (options["--plug"], status))
|
|
|
08923a |
+ sys.exit(1)
|
|
|
08923a |
+
|
|
|
08923a |
+ elif not _host_evacuate(connection, options):
|
|
|
08923a |
+ logging.error("Resurrection of instances from %s failed" % (options["--plug"]))
|
|
|
08923a |
+ sys.exit(1)
|
|
|
08923a |
+
|
|
|
08923a |
+ logging.info("Resurrection of instances from %s complete" % (options["--plug"]))
|
|
|
08923a |
+ sys.exit(0)
|
|
|
08923a |
+
|
|
|
08923a |
+ result = fence_action(connection, options, set_power_status, get_power_status, get_plugs_list, None)
|
|
|
08923a |
+ sys.exit(result)
|
|
|
08923a |
+
|
|
|
08923a |
+if __name__ == "__main__":
|
|
|
08923a |
+ main()
|
|
|
08923a |
diff -uNr a/fence/agents/compute/Makefile.am b/fence/agents/compute/Makefile.am
|
|
|
08923a |
--- a/fence/agents/compute/Makefile.am 2017-09-27 15:01:34.844643650 +0200
|
|
|
08923a |
+++ b/fence/agents/compute/Makefile.am 2017-09-27 15:57:50.963839738 +0200
|
|
|
08923a |
@@ -1,14 +1,14 @@
|
|
|
08923a |
MAINTAINERCLEANFILES = Makefile.in
|
|
|
08923a |
|
|
|
08923a |
-TARGET = fence_compute
|
|
|
08923a |
+TARGET = fence_compute fence_evacuate
|
|
|
08923a |
|
|
|
08923a |
-SRC = $(TARGET).py
|
|
|
08923a |
+SRC = $(TARGET:=.py)
|
|
|
08923a |
|
|
|
08923a |
EXTRA_DIST = $(SRC)
|
|
|
08923a |
|
|
|
08923a |
sbin_SCRIPTS = $(TARGET)
|
|
|
08923a |
|
|
|
08923a |
-man_MANS = $(TARGET).8
|
|
|
08923a |
+man_MANS = $(TARGET:=.8)
|
|
|
08923a |
|
|
|
08923a |
FENCE_TEST_ARGS = -l test -p test -n 1
|
|
|
08923a |
|
|
|
08923a |
diff -uNr a/tests/data/metadata/fence_evacuate.xml b/tests/data/metadata/fence_evacuate.xml
|
|
|
08923a |
--- a/tests/data/metadata/fence_evacuate.xml 1970-01-01 01:00:00.000000000 +0100
|
|
|
08923a |
+++ b/tests/data/metadata/fence_evacuate.xml 2017-09-27 15:28:10.978063549 +0200
|
|
|
08923a |
@@ -0,0 +1,163 @@
|
|
|
08923a |
+
|
|
|
08923a |
+<resource-agent name="fence_evacuate" shortdesc="Fence agent for the automatic resurrection of OpenStack compute instances" >
|
|
|
08923a |
+<longdesc>Used to reschedule flagged instances</longdesc>
|
|
|
08923a |
+<vendor-url></vendor-url>
|
|
|
08923a |
+<parameters>
|
|
|
08923a |
+ <parameter name="tenant_name" unique="0" required="0">
|
|
|
08923a |
+ <getopt mixed="-t, --tenant-name=[tenant]" />
|
|
|
08923a |
+ <content type="string" />
|
|
|
08923a |
+ <shortdesc lang="en">Keystone Admin Tenant</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="auth_url" unique="0" required="0">
|
|
|
08923a |
+ <getopt mixed="-k, --auth-url=[url]" />
|
|
|
08923a |
+ <content type="string" />
|
|
|
08923a |
+ <shortdesc lang="en">Keystone Admin Auth URL</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="port" unique="0" required="1" deprecated="1">
|
|
|
08923a |
+ <getopt mixed="-n, --plug=[id]" />
|
|
|
08923a |
+ <content type="string" />
|
|
|
08923a |
+ <shortdesc lang="en">Physical plug number, name of virtual machine or UUID</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="passwd_script" unique="0" required="0" deprecated="1">
|
|
|
08923a |
+ <getopt mixed="-S, --password-script=[script]" />
|
|
|
08923a |
+ <content type="string" />
|
|
|
08923a |
+ <shortdesc lang="en">Script to retrieve password</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="region_name" unique="0" required="0">
|
|
|
08923a |
+ <getopt mixed="--region-name=[region]" />
|
|
|
08923a |
+ <content type="boolean" />
|
|
|
08923a |
+ <shortdesc lang="en">Region Name</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="passwd" unique="0" required="0" deprecated="1">
|
|
|
08923a |
+ <getopt mixed="-p, --password=[password]" />
|
|
|
08923a |
+ <content type="string" />
|
|
|
08923a |
+ <shortdesc lang="en">Login password or passphrase</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="endpoint_type" unique="0" required="0">
|
|
|
08923a |
+ <getopt mixed="-e, --endpoint-type=[endpoint]" />
|
|
|
08923a |
+ <content type="string" default="internalURL" />
|
|
|
08923a |
+ <shortdesc lang="en">Nova Endpoint type</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="action" unique="0" required="1">
|
|
|
08923a |
+ <getopt mixed="-o, --action=[action]" />
|
|
|
08923a |
+ <content type="string" default="reboot" />
|
|
|
08923a |
+ <shortdesc lang="en">Fencing Action</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="login" unique="0" required="0" deprecated="1">
|
|
|
08923a |
+ <getopt mixed="-l, --username=[name]" />
|
|
|
08923a |
+ <content type="string" />
|
|
|
08923a |
+ <shortdesc lang="en">Login Name</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="plug" unique="0" required="1" obsoletes="port">
|
|
|
08923a |
+ <getopt mixed="-n, --plug=[id]" />
|
|
|
08923a |
+ <content type="string" />
|
|
|
08923a |
+ <shortdesc lang="en">Physical plug number, name of virtual machine or UUID</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="username" unique="0" required="0" obsoletes="login">
|
|
|
08923a |
+ <getopt mixed="-l, --username=[name]" />
|
|
|
08923a |
+ <content type="string" />
|
|
|
08923a |
+ <shortdesc lang="en">Login Name</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="password" unique="0" required="0" obsoletes="passwd">
|
|
|
08923a |
+ <getopt mixed="-p, --password=[password]" />
|
|
|
08923a |
+ <content type="string" />
|
|
|
08923a |
+ <shortdesc lang="en">Login password or passphrase</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="password_script" unique="0" required="0" obsoletes="passwd_script">
|
|
|
08923a |
+ <getopt mixed="-S, --password-script=[script]" />
|
|
|
08923a |
+ <content type="string" />
|
|
|
08923a |
+ <shortdesc lang="en">Script to retrieve password</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="insecure" unique="0" required="0">
|
|
|
08923a |
+ <getopt mixed="--insecure" />
|
|
|
08923a |
+ <content type="boolean" default="False" />
|
|
|
08923a |
+ <shortdesc lang="en">Allow Insecure TLS Requests</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="domain" unique="0" required="0">
|
|
|
08923a |
+ <getopt mixed="-d, --domain=[string]" />
|
|
|
08923a |
+ <content type="string" />
|
|
|
08923a |
+ <shortdesc lang="en">DNS domain in which hosts live</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="instance_filtering" unique="0" required="0">
|
|
|
08923a |
+ <getopt mixed="--instance-filtering" />
|
|
|
08923a |
+ <content type="boolean" default="True" />
|
|
|
08923a |
+ <shortdesc lang="en">Allow instances to be evacuated</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="no_shared_storage" unique="0" required="0">
|
|
|
08923a |
+ <getopt mixed="--no-shared-storage" />
|
|
|
08923a |
+ <content type="boolean" default="False" />
|
|
|
08923a |
+ <shortdesc lang="en">Disable functionality for dealing with shared storage</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="verbose" unique="0" required="0">
|
|
|
08923a |
+ <getopt mixed="-v, --verbose" />
|
|
|
08923a |
+ <content type="boolean" />
|
|
|
08923a |
+ <shortdesc lang="en">Verbose mode</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="debug" unique="0" required="0" deprecated="1">
|
|
|
08923a |
+ <getopt mixed="-D, --debug-file=[debugfile]" />
|
|
|
08923a |
+ <content type="string" />
|
|
|
08923a |
+ <shortdesc lang="en">Write debug information to given file</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="debug_file" unique="0" required="0" obsoletes="debug">
|
|
|
08923a |
+ <getopt mixed="-D, --debug-file=[debugfile]" />
|
|
|
08923a |
+ <content type="string" />
|
|
|
08923a |
+ <shortdesc lang="en">Write debug information to given file</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="version" unique="0" required="0">
|
|
|
08923a |
+ <getopt mixed="-V, --version" />
|
|
|
08923a |
+ <content type="boolean" />
|
|
|
08923a |
+ <shortdesc lang="en">Display version information and exit</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="help" unique="0" required="0">
|
|
|
08923a |
+ <getopt mixed="-h, --help" />
|
|
|
08923a |
+ <content type="boolean" />
|
|
|
08923a |
+ <shortdesc lang="en">Display help and exit</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="separator" unique="0" required="0">
|
|
|
08923a |
+ <getopt mixed="-C, --separator=[char]" />
|
|
|
08923a |
+ <content type="string" default="," />
|
|
|
08923a |
+ <shortdesc lang="en">Separator for CSV created by operation list</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="power_wait" unique="0" required="0">
|
|
|
08923a |
+ <getopt mixed="--power-wait=[seconds]" />
|
|
|
08923a |
+ <content type="second" default="0" />
|
|
|
08923a |
+ <shortdesc lang="en">Wait X seconds after issuing ON/OFF</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="login_timeout" unique="0" required="0">
|
|
|
08923a |
+ <getopt mixed="--login-timeout=[seconds]" />
|
|
|
08923a |
+ <content type="second" default="5" />
|
|
|
08923a |
+ <shortdesc lang="en">Wait X seconds for cmd prompt after login</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="delay" unique="0" required="0">
|
|
|
08923a |
+ <getopt mixed="--delay=[seconds]" />
|
|
|
08923a |
+ <content type="second" default="0" />
|
|
|
08923a |
+ <shortdesc lang="en">Wait X seconds before fencing is started</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="power_timeout" unique="0" required="0">
|
|
|
08923a |
+ <getopt mixed="--power-timeout=[seconds]" />
|
|
|
08923a |
+ <content type="second" default="20" />
|
|
|
08923a |
+ <shortdesc lang="en">Test X seconds for status change after ON/OFF</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="shell_timeout" unique="0" required="0">
|
|
|
08923a |
+ <getopt mixed="--shell-timeout=[seconds]" />
|
|
|
08923a |
+ <content type="second" default="180" />
|
|
|
08923a |
+ <shortdesc lang="en">Wait X seconds for cmd prompt after issuing command</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+ <parameter name="retry_on" unique="0" required="0">
|
|
|
08923a |
+ <getopt mixed="--retry-on=[attempts]" />
|
|
|
08923a |
+ <content type="integer" default="1" />
|
|
|
08923a |
+ <shortdesc lang="en">Count of attempts to retry power on</shortdesc>
|
|
|
08923a |
+ </parameter>
|
|
|
08923a |
+</parameters>
|
|
|
08923a |
+<actions>
|
|
|
08923a |
+ <action name="on" automatic="0"/>
|
|
|
08923a |
+ <action name="off" />
|
|
|
08923a |
+ <action name="reboot" />
|
|
|
08923a |
+ <action name="status" />
|
|
|
08923a |
+ <action name="list" />
|
|
|
08923a |
+ <action name="list-status" />
|
|
|
08923a |
+ <action name="monitor" />
|
|
|
08923a |
+ <action name="metadata" />
|
|
|
08923a |
+ <action name="validate-all" />
|
|
|
08923a |
+</actions>
|
|
|
08923a |
+</resource-agent>
|