diff --git a/.gitignore b/.gitignore index a31cf26..2bc7aa6 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,7 @@ SOURCES/aliyun-python-sdk-ecs-4.9.3.tar.gz SOURCES/aliyun-python-sdk-vpc-3.0.2.tar.gz SOURCES/colorama-0.3.3.tar.gz SOURCES/google-cloud-sdk-206.0.0-linux-x86_64.tar.gz +SOURCES/httplib2-0.18.1.tar.gz SOURCES/jmespath-0.7.1.tar.gz SOURCES/pycryptodome-3.6.4.tar.gz SOURCES/pyroute2-0.4.13.tar.gz diff --git a/.resource-agents.metadata b/.resource-agents.metadata index ba66161..f182bfe 100644 --- a/.resource-agents.metadata +++ b/.resource-agents.metadata @@ -7,6 +7,7 @@ c2a98b9a1562d223a76514f05028488ca000c395 SOURCES/aliyun-python-sdk-ecs-4.9.3.tar f14647a4d37a9a254c4e711b95a7654fc418e41e SOURCES/aliyun-python-sdk-vpc-3.0.2.tar.gz 0fe5bd8bca54dd71223778a1e0bcca9af324abb1 SOURCES/colorama-0.3.3.tar.gz 182a102b36701e8c27b31e9751b6716a377fc3ef SOURCES/google-cloud-sdk-206.0.0-linux-x86_64.tar.gz +c5d22ce6660999633154927684eb9b799123e569 SOURCES/httplib2-0.18.1.tar.gz f8fbedae53dd8f8b2717a32de9a9ae0e15cba566 SOURCES/jmespath-0.7.1.tar.gz 326a73f58a62ebee00c11a12cfdd838b196e0e8e SOURCES/pycryptodome-3.6.4.tar.gz 147149db11104c06d405fd077dcd2aa1c345f109 SOURCES/pyroute2-0.4.13.tar.gz diff --git a/SOURCES/bz1846732-gcp-vpc-move-vip-support-multiple-alias-ips.patch b/SOURCES/bz1846732-gcp-vpc-move-vip-support-multiple-alias-ips.patch new file mode 100644 index 0000000..53f3f2f --- /dev/null +++ b/SOURCES/bz1846732-gcp-vpc-move-vip-support-multiple-alias-ips.patch @@ -0,0 +1,318 @@ +--- a/heartbeat/gcp-vpc-move-vip.in 2020-08-03 15:48:28.497845842 +0200 ++++ b/heartbeat/gcp-vpc-move-vip.in 2020-08-03 15:49:24.129888908 +0200 +@@ -22,7 +22,8 @@ + import sys + import time + +-OCF_FUNCTIONS_DIR="%s/lib/heartbeat" % os.environ.get("OCF_ROOT") ++OCF_FUNCTIONS_DIR = os.environ.get("OCF_FUNCTIONS_DIR", "%s/lib/heartbeat" ++ % os.environ.get("OCF_ROOT")) + sys.path.append(OCF_FUNCTIONS_DIR) + + from ocf import * +@@ -43,6 +44,10 @@ + import urllib2 as urlrequest + + ++# Constants for alias add/remove modes ++ADD = 0 ++REMOVE = 1 ++ + CONN = None + THIS_VM = None + ALIAS = None +@@ -53,27 +58,27 @@ + + + 1.0 +- Floating IP Address on Google Cloud Platform - Using Alias IP address functionality to attach a secondary IP address to a running instance +- Floating IP Address on Google Cloud Platform ++ Floating IP Address or Range on Google Cloud Platform - Using Alias IP address functionality to attach a secondary IP range to a running instance ++ Floating IP Address or Range on Google Cloud Platform + + +- IP Address to be added including CIDR. E.g 192.168.0.1/32 +- IP Address to be added including CIDR. E.g 192.168.0.1/32 ++ IP range to be added including CIDR netmask (e.g., 192.168.0.1/32) ++ IP range to be added including CIDR netmask (e.g., 192.168.0.1/32) + + +- ++ + Subnet name for the Alias IP + Subnet name for the Alias IP + + +- +- List of hosts in the cluster ++ ++ List of hosts in the cluster, separated by spaces + Host list + + + +- If enabled (set to true), IP failover logs will be posted to stackdriver logging. Using stackdriver logging requires additional libraries (google-cloud-logging). +- Stackdriver-logging support. Requires additional libraries (google-cloud-logging). ++ If enabled (set to true), IP failover logs will be posted to stackdriver logging ++ Stackdriver-logging support + + + +@@ -107,7 +112,8 @@ + url = '%s?%s' % (metadata_url, params) + request = urlrequest.Request(url, headers=METADATA_HEADERS) + request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) +- return request_opener.open(request, timeout=timeout * 1.1).read() ++ return request_opener.open( ++ request, timeout=timeout * 1.1).read().decode("utf-8") + + + def get_instance(project, zone, instance): +@@ -134,17 +140,21 @@ + time.sleep(1) + + +-def set_alias(project, zone, instance, alias, alias_range_name=None): +- fingerprint = get_network_ifaces(project, zone, instance)[0]['fingerprint'] ++def set_aliases(project, zone, instance, aliases, fingerprint): ++ """Sets the alias IP ranges for an instance. ++ ++ Args: ++ project: string, the project in which the instance resides. ++ zone: string, the zone in which the instance resides. ++ instance: string, the name of the instance. ++ aliases: list, the list of dictionaries containing alias IP ranges ++ to be added to or removed from the instance. ++ fingerprint: string, the fingerprint of the network interface. ++ """ + body = { +- 'aliasIpRanges': [], +- 'fingerprint': fingerprint ++ 'aliasIpRanges': aliases, ++ 'fingerprint': fingerprint + } +- if alias: +- obj = {'ipCidrRange': alias} +- if alias_range_name: +- obj['subnetworkRangeName'] = alias_range_name +- body['aliasIpRanges'].append(obj) + + request = CONN.instances().updateNetworkInterface( + instance=instance, networkInterface='nic0', project=project, zone=zone, +@@ -153,21 +163,75 @@ + wait_for_operation(project, zone, operation) + + +-def get_alias(project, zone, instance): +- iface = get_network_ifaces(project, zone, instance) ++def add_rm_alias(mode, project, zone, instance, alias, alias_range_name=None): ++ """Adds or removes an alias IP range for a GCE instance. ++ ++ Args: ++ mode: int, a constant (ADD (0) or REMOVE (1)) indicating the ++ operation type. ++ project: string, the project in which the instance resides. ++ zone: string, the zone in which the instance resides. ++ instance: string, the name of the instance. ++ alias: string, the alias IP range to be added to or removed from ++ the instance. ++ alias_range_name: string, the subnet name for the alias IP range. ++ ++ Returns: ++ True if the existing list of alias IP ranges was modified, or False ++ otherwise. ++ """ ++ ifaces = get_network_ifaces(project, zone, instance) ++ fingerprint = ifaces[0]['fingerprint'] ++ ++ try: ++ old_aliases = ifaces[0]['aliasIpRanges'] ++ except KeyError: ++ old_aliases = [] ++ ++ new_aliases = [a for a in old_aliases if a['ipCidrRange'] != alias] ++ ++ if alias: ++ if mode == ADD: ++ obj = {'ipCidrRange': alias} ++ if alias_range_name: ++ obj['subnetworkRangeName'] = alias_range_name ++ new_aliases.append(obj) ++ elif mode == REMOVE: ++ pass # already removed during new_aliases build ++ else: ++ raise ValueError('Invalid value for mode: {}'.format(mode)) ++ ++ if (sorted(new_aliases) != sorted(old_aliases)): ++ set_aliases(project, zone, instance, new_aliases, fingerprint) ++ return True ++ else: ++ return False ++ ++ ++def add_alias(project, zone, instance, alias, alias_range_name=None): ++ return add_rm_alias(ADD, project, zone, instance, alias, alias_range_name) ++ ++ ++def remove_alias(project, zone, instance, alias): ++ return add_rm_alias(REMOVE, project, zone, instance, alias) ++ ++ ++def get_aliases(project, zone, instance): ++ ifaces = get_network_ifaces(project, zone, instance) + try: +- return iface[0]['aliasIpRanges'][0]['ipCidrRange'] ++ aliases = ifaces[0]['aliasIpRanges'] ++ return [a['ipCidrRange'] for a in aliases] + except KeyError: +- return '' ++ return [] + + +-def get_localhost_alias(): ++def get_localhost_aliases(): + net_iface = get_metadata('instance/network-interfaces', {'recursive': True}) +- net_iface = json.loads(net_iface.decode('utf-8')) ++ net_iface = json.loads(net_iface) + try: +- return net_iface[0]['ipAliases'][0] ++ return net_iface[0]['ipAliases'] + except (KeyError, IndexError): +- return '' ++ return [] + + + def get_zone(project, instance): +@@ -201,21 +265,17 @@ + + + def gcp_alias_start(alias): +- my_alias = get_localhost_alias() ++ my_aliases = get_localhost_aliases() + my_zone = get_metadata('instance/zone').split('/')[-1] + project = get_metadata('project/project-id') + +- # If I already have the IP, exit. If it has an alias IP that isn't the VIP, +- # then remove it +- if my_alias == alias: ++ if alias in my_aliases: ++ # TODO: Do we need to check alias_range_name? + logger.info( + '%s already has %s attached. No action required' % (THIS_VM, alias)) + sys.exit(OCF_SUCCESS) +- elif my_alias: +- logger.info('Removing %s from %s' % (my_alias, THIS_VM)) +- set_alias(project, my_zone, THIS_VM, '') + +- # Loops through all hosts & remove the alias IP from the host that has it ++ # If the alias is currently attached to another host, detach it. + hostlist = os.environ.get('OCF_RESKEY_hostlist', '') + if hostlist: + hostlist = hostlist.replace(THIS_VM, '').split() +@@ -223,47 +283,53 @@ + hostlist = get_instances_list(project, THIS_VM) + for host in hostlist: + host_zone = get_zone(project, host) +- host_alias = get_alias(project, host_zone, host) +- if alias == host_alias: ++ host_aliases = get_aliases(project, host_zone, host) ++ if alias in host_aliases: + logger.info( +- '%s is attached to %s - Removing all alias IP addresses from %s' % +- (alias, host, host)) +- set_alias(project, host_zone, host, '') ++ '%s is attached to %s - Removing %s from %s' % ++ (alias, host, alias, host)) ++ remove_alias(project, host_zone, host, alias) + break + +- # add alias IP to localhost +- set_alias( ++ # Add alias IP range to localhost ++ add_alias( + project, my_zone, THIS_VM, alias, + os.environ.get('OCF_RESKEY_alias_range_name')) + +- # Check the IP has been added +- my_alias = get_localhost_alias() +- if alias == my_alias: ++ # Verify that the IP range has been added ++ my_aliases = get_localhost_aliases() ++ if alias in my_aliases: + logger.info('Finished adding %s to %s' % (alias, THIS_VM)) +- elif my_alias: +- logger.error( +- 'Failed to add IP. %s has an IP attached but it isn\'t %s' % +- (THIS_VM, alias)) +- sys.exit(OCF_ERR_GENERIC) + else: +- logger.error('Failed to add IP address %s to %s' % (alias, THIS_VM)) ++ if my_aliases: ++ logger.error( ++ 'Failed to add alias IP range %s. %s has alias IP ranges attached but' ++ + ' they don\'t include %s' % (alias, THIS_VM, alias)) ++ else: ++ logger.error( ++ 'Failed to add IP range %s. %s has no alias IP ranges attached' ++ % (alias, THIS_VM)) + sys.exit(OCF_ERR_GENERIC) + + + def gcp_alias_stop(alias): +- my_alias = get_localhost_alias() ++ my_aliases = get_localhost_aliases() + my_zone = get_metadata('instance/zone').split('/')[-1] + project = get_metadata('project/project-id') + +- if my_alias == alias: +- logger.info('Removing %s from %s' % (my_alias, THIS_VM)) +- set_alias(project, my_zone, THIS_VM, '') ++ if alias in my_aliases: ++ logger.info('Removing %s from %s' % (alias, THIS_VM)) ++ remove_alias(project, my_zone, THIS_VM, alias) ++ else: ++ logger.info( ++ '%s is not attached to %s. No action required' ++ % (alias, THIS_VM)) + + + def gcp_alias_status(alias): +- my_alias = get_localhost_alias() +- if alias == my_alias: +- logger.info('%s has the correct IP address attached' % THIS_VM) ++ my_aliases = get_localhost_aliases() ++ if alias in my_aliases: ++ logger.info('%s has the correct IP range attached' % THIS_VM) + else: + sys.exit(OCF_NOT_RUNNING) + +@@ -275,7 +341,8 @@ + + # Populate global vars + try: +- CONN = googleapiclient.discovery.build('compute', 'v1') ++ CONN = googleapiclient.discovery.build('compute', 'v1', ++ cache_discovery=False) + except Exception as e: + logger.error('Couldn\'t connect with google api: ' + str(e)) + sys.exit(OCF_ERR_CONFIGURED) +@@ -283,7 +350,8 @@ + try: + THIS_VM = get_metadata('instance/name') + except Exception as e: +- logger.error('Couldn\'t get instance name, is this running inside GCE?: ' + str(e)) ++ logger.error('Couldn\'t get instance name, is this running inside GCE?: ' ++ + str(e)) + sys.exit(OCF_ERR_CONFIGURED) + + ALIAS = os.environ.get('OCF_RESKEY_alias_ip') +@@ -309,7 +377,8 @@ + formatter = logging.Formatter('gcp:alias "%(message)s"') + handler.setFormatter(formatter) + log.addHandler(handler) +- logger = logging.LoggerAdapter(log, {'OCF_RESOURCE_INSTANCE': OCF_RESOURCE_INSTANCE}) ++ logger = logging.LoggerAdapter(log, {'OCF_RESOURCE_INSTANCE': ++ OCF_RESOURCE_INSTANCE}) + except ImportError: + logger.error('Couldn\'t import google.cloud.logging, ' + 'disabling Stackdriver-logging support') diff --git a/SOURCES/bz1848673-sybaseASE-verify-start-action-only.patch b/SOURCES/bz1848673-sybaseASE-verify-start-action-only.patch new file mode 100644 index 0000000..402bbd6 --- /dev/null +++ b/SOURCES/bz1848673-sybaseASE-verify-start-action-only.patch @@ -0,0 +1,41 @@ +From 953f689cb2a37606b6d4b2250ebec23f129f5095 Mon Sep 17 00:00:00 2001 +From: Reid wahl +Date: Thu, 9 Jul 2020 23:32:22 -0700 +Subject: [PATCH] sybaseASE: Run verify_all() for start operation only + +The `sybaseASE` resource agent runs the `verify_all()` function at the +beginning of start, stop, and monitor operations. + +When `verify_all()` is run for a probe (monitor) operation and +`sybase_home` resides on a cluster-managed filesystem, the probe often +fails with `$OCF_ERR_GENERIC` because the filesystem isn't mounted yet. +This prevents the resource from starting on that node. + +For the stop operation, there's simply no reason to run `verify_all()`. + +This patch removes `verify_all()` for the stop and monitor operations. +It is now only run for the start operation. + +Resolves: RHBZ#1848673 +Resolves: RHBZ#1848025 +--- + heartbeat/sybaseASE.in | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/heartbeat/sybaseASE.in b/heartbeat/sybaseASE.in +index 9ddd429be..7ff30bd31 100755 +--- a/heartbeat/sybaseASE.in ++++ b/heartbeat/sybaseASE.in +@@ -864,12 +864,10 @@ case $__OCF_ACTION in + exit $? + ;; + stop) +- verify_all || exit $OCF_ERR_GENERIC + ase_stop + exit $? + ;; + status | monitor) +- verify_all || exit $OCF_ERR_GENERIC + ase_status $OCF_CHECK_LEVEL + exit $? + ;; diff --git a/SOURCES/bz1850779-azure-lb-fix-redirect-issue.patch b/SOURCES/bz1850779-azure-lb-fix-redirect-issue.patch new file mode 100644 index 0000000..b171613 --- /dev/null +++ b/SOURCES/bz1850779-azure-lb-fix-redirect-issue.patch @@ -0,0 +1,54 @@ +From d22700fc5d5098c683b465ea0fede43803fd4d6b Mon Sep 17 00:00:00 2001 +From: Reid wahl +Date: Tue, 7 Jul 2020 02:18:09 -0700 +Subject: [PATCH] azure-lb: Don't redirect nc listener output to pidfile + +The `lb_start()` function spawns an `nc` listener background process +and echoes the resulting pid to `$pidfile`. Due to a bug in the +redirection, all future data received by the `nc` process is also +appended to `$pidfile`. + +If binary data is received later and appended to `$pidfile`, the +monitor operation fails when `grep` searches the now-binary file. + +``` +line 97: kill: Binary: arguments must be process or job IDs ] +line 97: kill: file: arguments must be process or job IDs ] +line 97: kill: /var/run/nc_PF2_02.pid: arguments must be process or job + IDs ] +line 97: kill: matches: arguments must be process or job IDs ] +``` + +Then the start operation fails during recovery. `lb_start()` spawns a +new `nc` process, but the old process is still running and using the +configured port. + +``` +nc_PF2_02_start_0:777:stderr [ Ncat: bind to :::62502: Address + already in use. QUITTING. ] +``` + +This patch fixes the issue by removing the `nc &` command from the +section whose output gets redirected to `$pidfile`. Now, only the `nc` +PID is echoed to `$pidfile`. + +Resolves: RHBZ#1850778 +Resolves: RHBZ#1850779 +--- + heartbeat/azure-lb | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/azure-lb b/heartbeat/azure-lb +index 05c134514..05755d778 100755 +--- a/heartbeat/azure-lb ++++ b/heartbeat/azure-lb +@@ -113,7 +113,8 @@ lb_start() { + if ! lb_monitor; then + ocf_log debug "Starting $process: $cmd" + # Execute the command as created above +- eval "$cmd & echo \$!" > $pidfile ++ $cmd & ++ echo $! > $pidfile + if lb_monitor; then + ocf_log debug "$process: $cmd started successfully, calling monitor" + lb_monitor diff --git a/SOURCES/bz1862121-azure-events-1-handle-exceptions-in-urlopen.patch b/SOURCES/bz1862121-azure-events-1-handle-exceptions-in-urlopen.patch new file mode 100644 index 0000000..fa194c9 --- /dev/null +++ b/SOURCES/bz1862121-azure-events-1-handle-exceptions-in-urlopen.patch @@ -0,0 +1,70 @@ +From 194909ff08cfe75cd5da9f704d8ed4cc9ab40341 Mon Sep 17 00:00:00 2001 +From: Gustavo Figueira +Date: Tue, 19 May 2020 10:58:34 +0200 +Subject: [PATCH 1/2] azure-events: handle exceptions in urlopen The locking in + azure-events does not correctly handle some failures. + +If the metadata server is not recheable or has an error +handling the request, attr_globalPullState will never go +back to IDLE unless the administrator manually changes it. + +> azure-events: ERROR: [Errno 104] Connection reset by peer +> lrmd[2734]: notice: rsc_azure-events_monitor_10000:113088:stderr [ ocf-exit-reason:[Errno 104] Connection reset by peer ] +--- + heartbeat/azure-events.in | 16 +++++++++++++--- + 1 file changed, 13 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in +index 8709d97e3..bd812f4b2 100644 +--- a/heartbeat/azure-events.in ++++ b/heartbeat/azure-events.in +@@ -82,9 +82,19 @@ class azHelper: + req = urllib2.Request(url, postData) + req.add_header("Metadata", "true") + req.add_header("User-Agent", USER_AGENT) +- resp = urllib2.urlopen(req) +- data = resp.read() +- ocf.logger.debug("_sendMetadataRequest: response = %s" % data) ++ try: ++ resp = urllib2.urlopen(req) ++ except URLError as e: ++ if hasattr(e, 'reason'): ++ print('We failed to reach a server. Reason: '), e.reason ++ clusterHelper.setAttr(attr_globalPullState, "IDLE") ++ elif hasattr(e, 'code'): ++ print('The server couldn\'t fulfill the request. Error code: '), e.code ++ clusterHelper.setAttr(attr_globalPullState, "IDLE") ++ else: ++ data = resp.read() ++ ocf.logger.debug("_sendMetadataRequest: response = %s" % data) ++ + if data: + data = json.loads(data) + + +From c4071ec4a82fcb831f170f341e0790633e4b904f Mon Sep 17 00:00:00 2001 +From: Gustavo Figueira +Date: Tue, 19 May 2020 12:53:22 +0200 +Subject: [PATCH 2/2] azure-events: use ocf.logger.warning instead of print + +--- + heartbeat/azure-events.in | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in +index bd812f4b2..a48a86309 100644 +--- a/heartbeat/azure-events.in ++++ b/heartbeat/azure-events.in +@@ -86,10 +86,10 @@ class azHelper: + resp = urllib2.urlopen(req) + except URLError as e: + if hasattr(e, 'reason'): +- print('We failed to reach a server. Reason: '), e.reason ++ ocf.logger.warning("Failed to reach the server: %s" % e.reason) + clusterHelper.setAttr(attr_globalPullState, "IDLE") + elif hasattr(e, 'code'): +- print('The server couldn\'t fulfill the request. Error code: '), e.code ++ ocf.logger.warning("The server couldn\'t fulfill the request. Error code: %s" % e.code) + clusterHelper.setAttr(attr_globalPullState, "IDLE") + else: + data = resp.read() diff --git a/SOURCES/bz1862121-azure-events-2-import-urlerror-encode-postdata.patch b/SOURCES/bz1862121-azure-events-2-import-urlerror-encode-postdata.patch new file mode 100644 index 0000000..7795e78 --- /dev/null +++ b/SOURCES/bz1862121-azure-events-2-import-urlerror-encode-postdata.patch @@ -0,0 +1,68 @@ +From f2bf1d8a07ea810099b03469883cb7f485ab9ac1 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 27 Jul 2020 10:09:43 +0200 +Subject: [PATCH 1/2] azure-events: import URLError and encode postData when + necessary + +--- + heartbeat/azure-events.in | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in +index d4a166d9f..a7f359468 100644 +--- a/heartbeat/azure-events.in ++++ b/heartbeat/azure-events.in +@@ -13,8 +13,10 @@ import subprocess + import json + try: + import urllib2 ++ from urllib2 import URLError + except ImportError: + import urllib.request as urllib2 ++ from urllib.error import URLError + import socket + from collections import defaultdict + +@@ -76,9 +78,13 @@ class azHelper: + Send a request to Azure's Azure Metadata Service API + """ + url = "%s/%s?api-version=%s" % (azHelper.metadata_host, endpoint, azHelper.api_version) ++ data = "" + ocf.logger.debug("_sendMetadataRequest: begin; endpoint = %s, postData = %s" % (endpoint, postData)) + ocf.logger.debug("_sendMetadataRequest: url = %s" % url) + ++ if postData and type(postData) != bytes: ++ postData = postData.encode() ++ + req = urllib2.Request(url, postData) + req.add_header("Metadata", "true") + req.add_header("User-Agent", USER_AGENT) + +From 1ab5d71bff95eb271f1e1bbc401961dc313219d9 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 29 Jul 2020 21:25:43 +0200 +Subject: [PATCH 2/2] azure-events: report error if jsondata not received + +--- + heartbeat/azure-events.in | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in +index a7f359468..3a24d6358 100644 +--- a/heartbeat/azure-events.in ++++ b/heartbeat/azure-events.in +@@ -117,8 +117,12 @@ class azHelper: + jsondata = azHelper._sendMetadataRequest(azHelper.instance_api) + ocf.logger.debug("getInstanceInfo: json = %s" % jsondata) + +- ocf.logger.debug("getInstanceInfo: finished, returning {}".format(jsondata["compute"])) +- return attrDict(jsondata["compute"]) ++ if jsondata: ++ ocf.logger.debug("getInstanceInfo: finished, returning {}".format(jsondata["compute"])) ++ return attrDict(jsondata["compute"]) ++ else: ++ ocf.ocf_exit_reason("getInstanceInfo: Unable to get instance info") ++ sys.exit(ocf.OCF_ERR_GENERIC) + + @staticmethod + def pullScheduledEvents(): diff --git a/SOURCES/bz1862121-azure-events-3-decode-improvement.patch b/SOURCES/bz1862121-azure-events-3-decode-improvement.patch new file mode 100644 index 0000000..437aca5 --- /dev/null +++ b/SOURCES/bz1862121-azure-events-3-decode-improvement.patch @@ -0,0 +1,47 @@ +--- a/heartbeat/azure-events.in 2020-08-03 16:55:42.336402080 +0200 ++++ b/heartbeat/azure-events.in 2020-07-31 14:27:32.336656383 +0200 +@@ -189,6 +189,8 @@ + ocf.logger.debug("_exec: cmd = %s" % " ".join(command)) + try: + ret = subprocess.check_output(command) ++ if type(ret) != str: ++ ret = ret.decode() + ocf.logger.debug("_exec: return = %s" % ret) + return ret.rstrip() + except Exception as err: +@@ -242,7 +244,7 @@ + + nodes = [] + nodeList = clusterHelper._exec("crm_node", "--list") +- for n in nodeList.decode().split("\n"): ++ for n in nodeList.split("\n"): + nodes.append(n.split()[1]) + ocf.logger.debug("getAllNodes: finished; return %s" % str(nodes)) + +@@ -313,7 +315,7 @@ + ocf.logger.warning("transitionSummary: received unexpected transition summary: %s" % summary) + return False + summary = summary.split("Transition Summary:")[1] +- ret = summary.decode().split("\n").pop(0) ++ ret = summary.split("\n").pop(0) + + ocf.logger.debug("transitionSummary: finished; return = %s" % str(ret)) + return ret +@@ -334,7 +336,7 @@ + if len(resources) == 0: + ret = [] + else: +- ret = resources.decode().split("\n") ++ ret = resources.split("\n") + + ocf.logger.debug("listOperationsOnNode: finished; return = %s" % str(ret)) + return ret +@@ -480,7 +482,7 @@ + + eventIDStr = clusterHelper.getAttr(attr_pendingEventIDs, node=node) + if eventIDStr: +- eventIDs = eventIDStr.decode().split(",") ++ eventIDs = eventIDStr.split(",") + else: + eventIDs = None + diff --git a/SPECS/resource-agents.spec b/SPECS/resource-agents.spec index c80bda0..84adbf9 100644 --- a/SPECS/resource-agents.spec +++ b/SPECS/resource-agents.spec @@ -55,6 +55,10 @@ %global googlecloudsdk google-cloud-sdk %global googlecloudsdk_version 206.0.0 %global googlecloudsdk_dir %{bundled_lib_dir}/%{googlecloudsdk} +# python-httplib2 bundle +%global httplib2 httplib2 +%global httplib2_version 0.18.1 +%global httplib2_dir %{bundled_lib_dir}/%{httplib2} # python-pyroute2 bundle %global pyroute2 pyroute2 %global pyroute2_version 0.4.13 @@ -95,7 +99,7 @@ Name: resource-agents Summary: Open Source HA Reusable Cluster Resource Scripts Version: 4.1.1 -Release: 61%{?dist} +Release: 61%{?dist}.4 License: GPLv2+ and LGPLv2+ and ASL 2.0 URL: https://github.com/ClusterLabs/resource-agents %if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} @@ -109,14 +113,15 @@ Source2: %{sap_script_package_prefix}-%{sap_script_package_hash}.tar.gz Source3: %{saphana_prefix}-%{saphana_version}.tar.gz Source4: %{saphana_scaleout_prefix}-%{saphana_scaleout_hash}.tar.gz Source5: %{googlecloudsdk}-%{googlecloudsdk_version}-linux-x86_64.tar.gz -Source6: %{pyroute2}-%{pyroute2_version}.tar.gz -Source7: %{colorama}-%{colorama_version}.tar.gz -Source8: %{jmespath}-%{jmespath_version}.tar.gz -Source9: %{pycryptodome}-%{pycryptodome_version}.tar.gz -Source10: %{aliyunsdkcore}-%{aliyunsdkcore_version}.tar.gz -Source11: %{aliyunsdkecs}-%{aliyunsdkecs_version}.tar.gz -Source12: %{aliyunsdkvpc}-%{aliyunsdkvpc_version}.tar.gz -Source13: %{aliyuncli}-%{aliyuncli_version}.tar.gz +Source6: %{httplib2}-%{httplib2_version}.tar.gz +Source7: %{pyroute2}-%{pyroute2_version}.tar.gz +Source8: %{colorama}-%{colorama_version}.tar.gz +Source9: %{jmespath}-%{jmespath_version}.tar.gz +Source10: %{pycryptodome}-%{pycryptodome_version}.tar.gz +Source11: %{aliyunsdkcore}-%{aliyunsdkcore_version}.tar.gz +Source12: %{aliyunsdkecs}-%{aliyunsdkecs_version}.tar.gz +Source13: %{aliyunsdkvpc}-%{aliyunsdkvpc_version}.tar.gz +Source14: %{aliyuncli}-%{aliyuncli_version}.tar.gz Patch0: bz1596139-1-nova-compute-wait-NovaEvacuate.patch Patch1: bz1470840-LVM-volume_group_check_only.patch Patch2: bz1538689-vdo-vol.patch @@ -235,6 +240,12 @@ Patch114: bz1633249-gcp-pd-move-4-fixes-and-improvements.patch Patch115: bz1633249-gcp-pd-move-5-bundle.patch Patch116: bz1840750-nfsserver-fix-nfsv4-only-support.patch Patch117: bz1601950-exportfs-2-fix-monitor-action.patch +Patch118: bz1846732-gcp-vpc-move-vip-support-multiple-alias-ips.patch +Patch119: bz1848673-sybaseASE-verify-start-action-only.patch +Patch120: bz1862121-azure-events-1-handle-exceptions-in-urlopen.patch +Patch121: bz1862121-azure-events-2-import-urlerror-encode-postdata.patch +Patch122: bz1862121-azure-events-3-decode-improvement.patch +Patch123: bz1850779-azure-lb-fix-redirect-issue.patch # bundle patches Patch1000: bz1568588-7-gcp-bundled.patch @@ -429,7 +440,7 @@ SAP instances to be managed in a cluster environment. License: GPLv2+ Summary: SAP HANA Scale-Out cluster resource agents Version: 0.164.0 -Release: 6%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist} +Release: 6%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist}.4 %if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} Group: System Environment/Base %else @@ -449,7 +460,7 @@ environment. License: GPLv2+ Summary: SAP cluster connector script Version: 3.0.1 -Release: 37%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist} +Release: 37%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist}.4 %if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} Group: System Environment/Base %else @@ -591,6 +602,12 @@ exit 1 %patch115 -p1 %patch116 -p1 %patch117 -p1 +%patch118 -p1 +%patch119 -p1 +%patch120 -p1 +%patch121 -p1 +%patch122 -p1 +%patch123 -p1 # add SAPHana agents to Makefile.am mv %{saphana_prefix}-%{saphana_version}/ra/SAPHana* heartbeat @@ -609,6 +626,16 @@ mkdir -p %{bundled_lib_dir} # google-cloud-sdk bundle %ifarch x86_64 tar -xzf %SOURCE5 -C %{bundled_lib_dir} + +## upgrade httplib2 to fix CVE-2020-11078 +pushd %{googlecloudsdk_dir} +rm -rf lib/third_party/httplib2 +popd + +# python-httplib2 bundle +tar -xzf %SOURCE6 -C %{bundled_lib_dir} +mv %{bundled_lib_dir}/%{httplib2}-%{httplib2_version} %{httplib2_dir} + # gcp*: append bundled-directory to search path, gcloud-ra %patch1000 -p1 # google-cloud-sdk fixes @@ -633,7 +660,7 @@ rm -rf %{googlecloudsdk_dir}/lib/third_party/grpc cp %{googlecloudsdk_dir}/README %{googlecloudsdk}_README cp %{googlecloudsdk_dir}/lib/third_party/argparse/README.txt %{googlecloudsdk}_argparse_README.txt cp %{googlecloudsdk_dir}/LICENSE %{googlecloudsdk}_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/httplib2/LICENSE %{googlecloudsdk}_httplib2_LICENSE +cp %{httplib2_dir}/LICENSE %{googlecloudsdk}_httplib2_LICENSE cp %{googlecloudsdk_dir}/lib/third_party/contextlib2/LICENSE %{googlecloudsdk}_contextlib2_LICENSE cp %{googlecloudsdk_dir}/lib/third_party/concurrent/LICENSE %{googlecloudsdk}_concurrent_LICENSE cp %{googlecloudsdk_dir}/lib/third_party/yaml/LICENSE %{googlecloudsdk}_yaml_LICENSE @@ -679,7 +706,7 @@ cp %{googlecloudsdk_dir}/lib/third_party/containerregistry/LICENSE %{googlecloud cp %{googlecloudsdk_dir}/lib/third_party/prompt_toolkit/LICENSE %{googlecloudsdk}_prompt_toolkit_LICENSE # python-pyroute2 bundle -tar -xzf %SOURCE6 -C %{bundled_lib_dir} +tar -xzf %SOURCE7 -C %{bundled_lib_dir} mv %{bundled_lib_dir}/%{pyroute2}-%{pyroute2_version} %{pyroute2_dir} cp %{pyroute2_dir}/README.md %{pyroute2}_README.md cp %{pyroute2_dir}/README.license.md %{pyroute2}_README.license.md @@ -687,7 +714,7 @@ cp %{pyroute2_dir}/LICENSE.Apache.v2 %{pyroute2}_LICENSE.Apache.v2 cp %{pyroute2_dir}/LICENSE.GPL.v2 %{pyroute2}_LICENSE.GPL.v2 # python-colorama bundle -tar -xzf %SOURCE7 -C %{bundled_lib_dir} +tar -xzf %SOURCE8 -C %{bundled_lib_dir} mv %{bundled_lib_dir}/%{colorama}-%{colorama_version} %{colorama_dir} cp %{colorama_dir}/LICENSE.txt %{colorama}_LICENSE.txt cp %{colorama_dir}/README.rst %{colorama}_README.rst @@ -698,7 +725,7 @@ rm -rf *.egg-info popd # python-jmespath bundle -tar -xzf %SOURCE8 -C %{bundled_lib_dir} +tar -xzf %SOURCE9 -C %{bundled_lib_dir} mv %{bundled_lib_dir}/jmespath.py-%{jmespath_version} %{jmespath_dir} cp %{jmespath_dir}/LICENSE.txt %{jmespath}_LICENSE.txt cp %{jmespath_dir}/README.rst %{jmespath}_README.rst @@ -708,28 +735,28 @@ rm -rf jmespath.egg-info popd # python-pycryptodome bundle -tar -xzf %SOURCE9 -C %{bundled_lib_dir} +tar -xzf %SOURCE10 -C %{bundled_lib_dir} mv %{bundled_lib_dir}/%{pycryptodome}-%{pycryptodome_version} %{pycryptodome_dir} cp %{pycryptodome_dir}/README.rst %{pycryptodome}_README.rst cp %{pycryptodome_dir}/LICENSE.rst %{pycryptodome}_LICENSE.rst # python-aliyun-sdk-core bundle -tar -xzf %SOURCE10 -C %{bundled_lib_dir} +tar -xzf %SOURCE11 -C %{bundled_lib_dir} mv %{bundled_lib_dir}/%{aliyunsdkcore}-%{aliyunsdkcore_version} %{aliyunsdkcore_dir} cp %{aliyunsdkcore_dir}/README.rst %{aliyunsdkcore}_README.rst # python-aliyun-sdk-ecs bundle -tar -xzf %SOURCE11 -C %{bundled_lib_dir} +tar -xzf %SOURCE12 -C %{bundled_lib_dir} mv %{bundled_lib_dir}/%{aliyunsdkecs}-%{aliyunsdkecs_version} %{aliyunsdkecs_dir} cp %{aliyunsdkecs_dir}/README.rst %{aliyunsdkecs}_README.rst # python-aliyun-sdk-vpc bundle -tar -xzf %SOURCE12 -C %{bundled_lib_dir} +tar -xzf %SOURCE13 -C %{bundled_lib_dir} mv %{bundled_lib_dir}/%{aliyunsdkvpc}-%{aliyunsdkvpc_version} %{aliyunsdkvpc_dir} cp %{aliyunsdkvpc_dir}/README.rst %{aliyunsdkvpc}_README.rst # aliyuncli bundle -tar -xzf %SOURCE13 -C %{bundled_lib_dir} +tar -xzf %SOURCE14 -C %{bundled_lib_dir} mv %{bundled_lib_dir}/%{aliyuncli}-%{aliyuncli_version} %{aliyuncli_dir} cp %{aliyuncli_dir}/README.rst %{aliyuncli}_README.rst cp %{aliyuncli_dir}/LICENSE %{aliyuncli}_LICENSE @@ -786,8 +813,13 @@ JFLAGS="$(echo '%{_smp_mflags}')" make $JFLAGS -# python-pyroute2 bundle %ifarch x86_64 +# python-httplib2 bundle +pushd %{httplib2_dir} +%{__python2} setup.py build +popd + +# python-pyroute2 bundle pushd %{pyroute2_dir} %{__python2} setup.py build popd @@ -866,6 +898,11 @@ test -d %{buildroot}/%{_bindir} || mkdir %{buildroot}/%{_bindir} ln -s /usr/lib/%{name}/%{googlecloudsdk_dir}/bin/gcloud-ra %{buildroot}/%{_bindir} popd +# python-httplib2 bundle +pushd %{httplib2_dir} +%{__python2} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{googlecloudsdk_dir}/lib/third_party +popd + # python-pyroute2 bundle pushd %{pyroute2_dir} %{__python2} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir} @@ -1231,6 +1268,25 @@ ccs_update_schema > /dev/null 2>&1 ||: %endif %changelog +* Fri Oct 2 2020 Oyvind Albrigtsen - 4.1.1-61.4 +- Upgrade bundled python-httplib2 to fix CVE-2020-11078 + + Resolves: rhbz#1850992 + +* Wed Aug 26 2020 Oyvind Albrigtsen - 4.1.1-61.2 +- azure-lb: fix redirect issue + + Resolves: rhbz#1850779 + +* Mon Aug 3 2020 Oyvind Albrigtsen - 4.1.1-61.1 +- gcp-vpc-move-vip: add support for multiple alias IPs +- sybaseASE: run verify action during start action only +- azure-events: handle exceptions in urlopen + + Resolves: rhbz#1846732 + Resolves: rhbz#1848673 + Resolves: rhbz#1862121 + * Tue Jun 9 2020 Oyvind Albrigtsen - 4.1.1-61 - exportfs: add symlink support