diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..f79624a
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,9 @@
+SOURCES/ClusterLabs-resource-agents-e711383f.tar.gz
+SOURCES/aliyun-cli-2.1.10.tar.gz
+SOURCES/aliyun-python-sdk-core-2.13.1.tar.gz
+SOURCES/aliyun-python-sdk-ecs-4.9.3.tar.gz
+SOURCES/aliyun-python-sdk-vpc-3.0.2.tar.gz
+SOURCES/colorama-0.3.3.tar.gz
+SOURCES/google-cloud-sdk-241.0.0-linux-x86_64.tar.gz
+SOURCES/pycryptodome-3.6.4.tar.gz
+SOURCES/pyroute2-0.4.13.tar.gz
diff --git a/.resource-agents.metadata b/.resource-agents.metadata
new file mode 100644
index 0000000..e21f8a9
--- /dev/null
+++ b/.resource-agents.metadata
@@ -0,0 +1,9 @@
+0358e1cb7fe86b2105bd2646cbe86f3c0273844a SOURCES/ClusterLabs-resource-agents-e711383f.tar.gz
+306e131d8908ca794276bfe3a0b55ccc3bbd482f SOURCES/aliyun-cli-2.1.10.tar.gz
+0a56f6d9ed2014a363486d33b63eca094379be06 SOURCES/aliyun-python-sdk-core-2.13.1.tar.gz
+c2a98b9a1562d223a76514f05028488ca000c395 SOURCES/aliyun-python-sdk-ecs-4.9.3.tar.gz
+f14647a4d37a9a254c4e711b95a7654fc418e41e SOURCES/aliyun-python-sdk-vpc-3.0.2.tar.gz
+0fe5bd8bca54dd71223778a1e0bcca9af324abb1 SOURCES/colorama-0.3.3.tar.gz
+876e2b0c0e3031c6e6101745acd08e4e9f53d6a9 SOURCES/google-cloud-sdk-241.0.0-linux-x86_64.tar.gz
+326a73f58a62ebee00c11a12cfdd838b196e0e8e SOURCES/pycryptodome-3.6.4.tar.gz
+147149db11104c06d405fd077dcd2aa1c345f109 SOURCES/pyroute2-0.4.13.tar.gz
diff --git a/SOURCES/1-configure-add-python-path-detection.patch b/SOURCES/1-configure-add-python-path-detection.patch
new file mode 100644
index 0000000..f1ed530
--- /dev/null
+++ b/SOURCES/1-configure-add-python-path-detection.patch
@@ -0,0 +1,29 @@
+From 266e10a719a396a3a522e4b0ce4271a372e4f6f1 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Fri, 13 Jul 2018 08:59:45 +0200
+Subject: [PATCH 1/3] configure: add Python path detection
+
+---
+ configure.ac | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/configure.ac b/configure.ac
+index 90ed2453..bdf057d3 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -501,6 +501,12 @@ AC_SUBST(PING)
+ AC_SUBST(RM)
+ AC_SUBST(TEST)
+ 
++AM_PATH_PYTHON
++if test -z "$PYTHON"; then
++    echo "*** Essential program python not found" 1>&2
++    exit 1
++fi
++
+ AC_PATH_PROGS(ROUTE, route)
+ AC_DEFINE_UNQUOTED(ROUTE, "$ROUTE", path to route command)
+ 
+-- 
+2.17.1
+
diff --git a/SOURCES/10-gcloud-support-info.patch b/SOURCES/10-gcloud-support-info.patch
new file mode 100644
index 0000000..ef96ca5
--- /dev/null
+++ b/SOURCES/10-gcloud-support-info.patch
@@ -0,0 +1,25 @@
+diff -uNr a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py
+--- a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py	1980-01-01 09:00:00.000000000 +0100
++++ b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py	2019-04-04 11:59:47.592768577 +0200
+@@ -900,6 +900,9 @@
+   return """\
+ For detailed information on this command and its flags, run:
+   {command_path} --help
++
++WARNING: {command_path} is only supported for "{command_path} init" and for use
++with the agents in resource-agents.
+ """.format(command_path=' '.join(command.GetPath()))
+ 
+ 
+diff -uNr a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py
+--- a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py	1980-01-01 09:00:00.000000000 +0100
++++ b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py	2019-04-04 12:00:23.991142694 +0200
+@@ -84,7 +84,7 @@
+ 
+   pkg_root = os.path.dirname(os.path.dirname(surface.__file__))
+   loader = cli.CLILoader(
+-      name='gcloud',
++      name='gcloud-ra',
+       command_root_directory=os.path.join(pkg_root, 'surface'),
+       allow_non_existing_modules=True,
+       version_func=VersionFunc,
diff --git a/SOURCES/2-ci-skip-python-agents-in-shellcheck.patch b/SOURCES/2-ci-skip-python-agents-in-shellcheck.patch
new file mode 100644
index 0000000..fd891e0
--- /dev/null
+++ b/SOURCES/2-ci-skip-python-agents-in-shellcheck.patch
@@ -0,0 +1,24 @@
+From 059effc058758c1294d80f03741bf5c078f1498d Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Fri, 13 Jul 2018 13:22:56 +0200
+Subject: [PATCH 2/3] CI: skip Python agents in shellcheck
+
+---
+ ci/build.sh | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/ci/build.sh b/ci/build.sh
+index 608387ad..c331e9ab 100755
+--- a/ci/build.sh
++++ b/ci/build.sh
+@@ -58,6 +58,7 @@ check_all_executables() {
+ 	echo "Checking executables and .sh files..."
+ 	while read -r script; do
+ 		file --mime "$script" | grep 'charset=binary' >/dev/null 2>&1 && continue
++		file --mime "$script" | grep 'text/x-python' >/dev/null 2>&1 && continue
+ 		head=$(head -n1 "$script")
+ 		[[ "$head" =~ .*ruby.* ]] && continue
+ 		[[ "$head" =~ .*zsh.* ]] && continue
+-- 
+2.17.1
+
diff --git a/SOURCES/3-gcp-vpc-move-vip.patch b/SOURCES/3-gcp-vpc-move-vip.patch
new file mode 100644
index 0000000..75beb19
--- /dev/null
+++ b/SOURCES/3-gcp-vpc-move-vip.patch
@@ -0,0 +1,646 @@
+From 92da4155d881e9ac2dce3a51c6953817349d164a Mon Sep 17 00:00:00 2001
+From: Helen Koike <helen.koike@collabora.com>
+Date: Mon, 25 Jun 2018 11:03:51 -0300
+Subject: [PATCH 1/4] gcp-vpc-move-vip.in: manage ip alias
+
+Add a resource agent to manage ip alias in the cluster.
+
+start:
+	Check if any machine in hostlist has the alias_ip assigned and
+	disassociate it.
+	Assign alias_ip to the current machine.
+
+stop:
+	Disassociate the alias_ip from the current machine.
+
+status/monitor:
+	Check if alias_ip is assigned with the current machine.
+
+---
+
+This is a port to the following bash script to python:
+https://storage.googleapis.com/sapdeploy/pacemaker-gcp/alias
+
+The problem with the bash script is the use of gcloud whose command line
+API is not stable.
+
+ocf-tester.in results:
+
+	> sudo ./tools/ocf-tester.in -o alias_ip='10.128.1.0/32' -o stackdriver_logging=yes -n gcp-vpc-move-vip.in heartbeat/gcp-vpc-move-vip.in
+	Beginning tests for heartbeat/gcp-vpc-move-vip.in...
+	./tools/ocf-tester.in: line 226: cd: @datadir@/resource-agents: No such file or directory
+	close failed in file object destructor:
+	sys.excepthook is missing
+	lost sys.stderr
+	* rc=1: Your agent produces meta-data which does not conform to ra-api-1.dtd
+	Tests failed: heartbeat/gcp-vpc-move-vip.in failed 1 tests
+
+The only test faillig is the meta-data, but all the agents that I tried
+also fails on this. If this is a concern, could you please point me out
+to a test which succeeds so I can check what I am doing differently?
+
+This commit can also be viewed at:
+	https://github.com/collabora-gce/resource-agents/tree/alias
+
+Thanks
+---
+ configure.ac                  |   1 +
+ doc/man/Makefile.am           |   1 +
+ heartbeat/Makefile.am         |   1 +
+ heartbeat/gcp-vpc-move-vip.in | 299 ++++++++++++++++++++++++++++++++++++++++++
+ 4 files changed, 302 insertions(+)
+ create mode 100755 heartbeat/gcp-vpc-move-vip.in
+
+diff --git a/configure.ac b/configure.ac
+index bdf057d33..3d8f9ca74 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -959,6 +959,7 @@ AC_CONFIG_FILES([heartbeat/dnsupdate], [chmod +x heartbeat/dnsupdate])
+ AC_CONFIG_FILES([heartbeat/eDir88], [chmod +x heartbeat/eDir88])
+ AC_CONFIG_FILES([heartbeat/fio], [chmod +x heartbeat/fio])
+ AC_CONFIG_FILES([heartbeat/gcp-vpc-move-ip], [chmod +x heartbeat/gcp-vpc-move-ip])
++AC_CONFIG_FILES([heartbeat/gcp-vpc-move-vip], [chmod +x heartbeat/gcp-vpc-move-vip])
+ AC_CONFIG_FILES([heartbeat/iSCSILogicalUnit], [chmod +x heartbeat/iSCSILogicalUnit])
+ AC_CONFIG_FILES([heartbeat/iSCSITarget], [chmod +x heartbeat/iSCSITarget])
+ AC_CONFIG_FILES([heartbeat/jira], [chmod +x heartbeat/jira])
+diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
+index c59126d13..e9eaf369f 100644
+--- a/doc/man/Makefile.am
++++ b/doc/man/Makefile.am
+@@ -114,6 +114,7 @@ man_MANS	       = ocf_heartbeat_AoEtarget.7 \
+                           ocf_heartbeat_galera.7 \
+                           ocf_heartbeat_garbd.7 \
+                           ocf_heartbeat_gcp-vpc-move-ip.7 \
++                          ocf_heartbeat_gcp-vpc-move-vip.7 \
+                           ocf_heartbeat_iSCSILogicalUnit.7 \
+                           ocf_heartbeat_iSCSITarget.7 \
+                           ocf_heartbeat_iface-bridge.7 \
+diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am
+index 4f5059e27..36b271956 100644
+--- a/heartbeat/Makefile.am
++++ b/heartbeat/Makefile.am
+@@ -111,6 +111,7 @@ ocf_SCRIPTS	     =  AoEtarget		\
+ 			galera			\
+ 			garbd			\
+ 			gcp-vpc-move-ip		\
++			gcp-vpc-move-vip	\
+ 			iSCSILogicalUnit	\
+ 			iSCSITarget		\
+ 			ids			\
+diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in
+new file mode 100755
+index 000000000..4954e11df
+--- /dev/null
++++ b/heartbeat/gcp-vpc-move-vip.in
+@@ -0,0 +1,299 @@
++#!/usr/bin/env python
++# ---------------------------------------------------------------------
++# Copyright 2016 Google Inc.
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++# ---------------------------------------------------------------------
++# Description:	Google Cloud Platform - Floating IP Address (Alias)
++# ---------------------------------------------------------------------
++
++import json
++import logging
++import os
++import sys
++import time
++
++import googleapiclient.discovery
++
++if sys.version_info >= (3, 0):
++  # Python 3 imports.
++  import urllib.parse as urlparse
++  import urllib.request as urlrequest
++else:
++  # Python 2 imports.
++  import urllib as urlparse
++  import urllib2 as urlrequest
++
++
++CONN = None
++THIS_VM = None
++OCF_SUCCESS = 0
++OCF_ERR_GENERIC = 1
++OCF_ERR_CONFIGURED = 6
++OCF_NOT_RUNNING = 7
++METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1/'
++METADATA_HEADERS = {'Metadata-Flavor': 'Google'}
++METADATA = \
++'''<?xml version="1.0"?>
++<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
++<resource-agent name="gcp-vpc-move-vip">
++  <version>1.0</version>
++  <longdesc lang="en">Floating IP Address on Google Cloud Platform - Using Alias IP address functionality to attach a secondary IP address to a running instance</longdesc>
++  <shortdesc lang="en">Floating IP Address on Google Cloud Platform</shortdesc>
++  <parameters>
++    <parameter name="hostlist" unique="1" required="1">
++      <longdesc lang="en">List of hosts in the cluster</longdesc>
++      <shortdesc lang="en">Host list</shortdesc>
++      <content type="string" default="" />
++    </parameter>
++    <parameter name="stackdriver-logging" unique="0" required="0">
++      <longdesc lang="en">If enabled (set to true), IP failover logs will be posted to stackdriver logging</longdesc>
++      <shortdesc lang="en">Stackdriver-logging support</shortdesc>
++      <content type="boolean" default="" />
++    </parameter>
++    <parameter name="alias_ip" unique="1" required="1">
++      <longdesc lang="en">IP Address to be added including CIDR. E.g 192.168.0.1/32</longdesc>
++      <shortdesc lang="en">IP Address to be added including CIDR. E.g 192.168.0.1/32</shortdesc>
++      <content type="string" default="" />
++    </parameter>
++    <parameter name="alias_range_name" unique="1" required="0">
++      <longdesc lang="en">Subnet name for the Alias IP2</longdesc>
++      <shortdesc lang="en">Subnet name for the Alias IP</shortdesc>
++      <content type="string" default="" />
++    </parameter>
++  </parameters>
++  <actions>
++    <action name="start" timeout="300" />
++    <action name="stop" timeout="15" />
++    <action name="monitor" timeout="15" interval="60" depth="0" />
++    <action name="meta-data" timeout="15" />
++  </actions>
++</resource-agent>'''
++
++
++def get_metadata(metadata_key, params=None, timeout=None):
++  """Performs a GET request with the metadata headers.
++
++  Args:
++    metadata_key: string, the metadata to perform a GET request on.
++    params: dictionary, the query parameters in the GET request.
++    timeout: int, timeout in seconds for metadata requests.
++
++  Returns:
++    HTTP response from the GET request.
++
++  Raises:
++    urlerror.HTTPError: raises when the GET request fails.
++  """
++  timeout = timeout or 60
++  metadata_url = os.path.join(METADATA_SERVER, metadata_key)
++  params = urlparse.urlencode(params or {})
++  url = '%s?%s' % (metadata_url, params)
++  request = urlrequest.Request(url, headers=METADATA_HEADERS)
++  request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({}))
++  return request_opener.open(request, timeout=timeout * 1.1).read()
++
++
++def get_instance(project, zone, instance):
++  request = CONN.instances().get(
++      project=project, zone=zone, instance=instance)
++  return request.execute()
++
++
++def get_network_ifaces(project, zone, instance):
++  return get_instance(project, zone, instance)['networkInterfaces']
++
++
++def wait_for_operation(project, zone, operation):
++  while True:
++    result = CONN.zoneOperations().get(
++        project=project,
++        zone=zone,
++        operation=operation['name']).execute()
++
++    if result['status'] == 'DONE':
++      if 'error' in result:
++        raise Exception(result['error'])
++      return
++    time.sleep(1)
++
++
++def set_alias(project, zone, instance, alias, alias_range_name=None):
++  fingerprint = get_network_ifaces(project, zone, instance)[0]['fingerprint']
++  body = {
++      'aliasIpRanges': [],
++      'fingerprint': fingerprint
++  }
++  if alias:
++    obj = {'ipCidrRange': alias}
++    if alias_range_name:
++      obj['subnetworkRangeName'] = alias_range_name
++    body['aliasIpRanges'].append(obj)
++
++  request = CONN.instances().updateNetworkInterface(
++      instance=instance, networkInterface='nic0', project=project, zone=zone,
++      body=body)
++  operation = request.execute()
++  wait_for_operation(project, zone, operation)
++
++
++def get_alias(project, zone, instance):
++  iface = get_network_ifaces(project, zone, instance)
++  try:
++    return iface[0]['aliasIpRanges'][0]['ipCidrRange']
++  except KeyError:
++    return ''
++
++
++def get_localhost_alias():
++  net_iface = get_metadata('instance/network-interfaces', {'recursive': True})
++  net_iface = json.loads(net_iface.decode('utf-8'))
++  try:
++    return net_iface[0]['ipAliases'][0]
++  except (KeyError, IndexError):
++    return ''
++
++
++def get_zone(project, instance):
++  request = CONN.instances().aggregatedList(project=project)
++  while request is not None:
++    response = request.execute()
++    zones = response.get('items', {})
++    for zone in zones.values():
++      for inst in zone.get('instances', []):
++        if inst['name'] == instance:
++          return inst['zone'].split("/")[-1]
++    request = CONN.instances().aggregatedList_next(
++        previous_request=request, previous_response=response)
++  raise Exception("Unable to find instance %s" % (instance))
++
++
++def gcp_alias_start(alias):
++  if not alias:
++    sys.exit(OCF_ERR_CONFIGURED)
++  my_alias = get_localhost_alias()
++  my_zone = get_metadata('instance/zone').split('/')[-1]
++  project = get_metadata('project/project-id')
++
++  # If I already have the IP, exit. If it has an alias IP that isn't the VIP,
++  # then remove it
++  if my_alias == alias:
++    logging.info(
++        '%s already has %s attached. No action required' % (THIS_VM, alias))
++    sys.exit(OCF_SUCCESS)
++  elif my_alias:
++    logging.info('Removing %s from %s' % (my_alias, THIS_VM))
++    set_alias(project, my_zone, THIS_VM, '')
++
++  # Loops through all hosts & remove the alias IP from the host that has it
++  hostlist = os.environ.get('OCF_RESKEY_hostlist', '')
++  hostlist.replace(THIS_VM, '')
++  for host in hostlist.split():
++    host_zone = get_zone(project, host)
++    host_alias = get_alias(project, host_zone, host)
++    if alias == host_alias:
++      logging.info(
++          '%s is attached to %s - Removing all alias IP addresses from %s' %
++          (alias, host, host))
++      set_alias(project, host_zone, host, '')
++      break
++
++  # add alias IP to localhost
++  set_alias(
++      project, my_zone, THIS_VM, alias,
++      os.environ.get('OCF_RESKEY_alias_range_name'))
++
++  # Check the IP has been added
++  my_alias = get_localhost_alias()
++  if alias == my_alias:
++    logging.info('Finished adding %s to %s' % (alias, THIS_VM))
++  elif my_alias:
++    logging.error(
++        'Failed to add IP. %s has an IP attached but it isn\'t %s' %
++        (THIS_VM, alias))
++    sys.exit(OCF_ERR_GENERIC)
++  else:
++    logging.error('Failed to add IP address %s to %s' % (alias, THIS_VM))
++    sys.exit(OCF_ERR_GENERIC)
++
++
++def gcp_alias_stop(alias):
++  if not alias:
++    sys.exit(OCF_ERR_CONFIGURED)
++  my_alias = get_localhost_alias()
++  my_zone = get_metadata('instance/zone').split('/')[-1]
++  project = get_metadata('project/project-id')
++
++  if my_alias == alias:
++    logging.info('Removing %s from %s' % (my_alias, THIS_VM))
++    set_alias(project, my_zone, THIS_VM, '')
++
++
++def gcp_alias_status(alias):
++  if not alias:
++    sys.exit(OCF_ERR_CONFIGURED)
++  my_alias = get_localhost_alias()
++  if alias == my_alias:
++    logging.info('%s has the correct IP address attached' % THIS_VM)
++  else:
++    sys.exit(OCF_NOT_RUNNING)
++
++
++def configure():
++  global CONN
++  global THIS_VM
++
++  # Populate global vars
++  CONN = googleapiclient.discovery.build('compute', 'v1')
++  THIS_VM = get_metadata('instance/name')
++
++  # Prepare logging
++  logging.basicConfig(
++      format='gcp:alias - %(levelname)s - %(message)s', level=logging.INFO)
++  logging.getLogger('googleapiclient').setLevel(logging.WARN)
++  logging_env = os.environ.get('OCF_RESKEY_stackdriver_logging')
++  if logging_env:
++    logging_env = logging_env.lower()
++    if any(x in logging_env for x in ['yes', 'true', 'enabled']):
++      try:
++        import google.cloud.logging.handlers
++        client = google.cloud.logging.Client()
++        handler = google.cloud.logging.handlers.CloudLoggingHandler(
++            client, name=THIS_VM)
++        handler.setLevel(logging.INFO)
++        formatter = logging.Formatter('gcp:alias "%(message)s"')
++        handler.setFormatter(formatter)
++        root_logger = logging.getLogger()
++        root_logger.addHandler(handler)
++      except ImportError:
++        logging.error('Couldn\'t import google.cloud.logging, '
++            'disabling Stackdriver-logging support')
++
++
++def main():
++  configure()
++
++  alias = os.environ.get('OCF_RESKEY_alias_ip')
++  if 'start' in sys.argv[1]:
++    gcp_alias_start(alias)
++  elif 'stop' in sys.argv[1]:
++    gcp_alias_stop(alias)
++  elif 'status' in sys.argv[1] or 'monitor' in sys.argv[1]:
++    gcp_alias_status(alias)
++  elif 'meta-data' in sys.argv[1]:
++    print(METADATA)
++  else:
++    logging.error('gcp:alias - no such function %s' % str(sys.argv[1]))
++
++
++if __name__ == "__main__":
++  main()
+
+From 0e6ba4894a748664ac1d8ff5b9e8c271f0b04d93 Mon Sep 17 00:00:00 2001
+From: Helen Koike <helen.koike@collabora.com>
+Date: Thu, 12 Jul 2018 09:01:22 -0300
+Subject: [PATCH 2/4] gcp-vpc-move-vip.in: minor fixes
+
+- Get hostlist from the project if the parameter is not given
+- Verify if alias is present out of each action function
+- Don't call configure if 'meta-data' action is given
+---
+ heartbeat/gcp-vpc-move-vip.in | 40 ++++++++++++++++++++++++++++------------
+ 1 file changed, 28 insertions(+), 12 deletions(-)
+
+diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in
+index 4954e11df..f3d117bda 100755
+--- a/heartbeat/gcp-vpc-move-vip.in
++++ b/heartbeat/gcp-vpc-move-vip.in
+@@ -50,7 +50,7 @@ METADATA = \
+   <longdesc lang="en">Floating IP Address on Google Cloud Platform - Using Alias IP address functionality to attach a secondary IP address to a running instance</longdesc>
+   <shortdesc lang="en">Floating IP Address on Google Cloud Platform</shortdesc>
+   <parameters>
+-    <parameter name="hostlist" unique="1" required="1">
++    <parameter name="hostlist" unique="1" required="0">
+       <longdesc lang="en">List of hosts in the cluster</longdesc>
+       <shortdesc lang="en">Host list</shortdesc>
+       <content type="string" default="" />
+@@ -177,9 +177,22 @@ def get_zone(project, instance):
+   raise Exception("Unable to find instance %s" % (instance))
+ 
+ 
++def get_instances_list(project, exclude):
++  hostlist = []
++  request = CONN.instances().aggregatedList(project=project)
++  while request is not None:
++    response = request.execute()
++    zones = response.get('items', {})
++    for zone in zones.values():
++      for inst in zone.get('instances', []):
++        if inst['name'] != exclude:
++          hostlist.append(inst['name'])
++    request = CONN.instances().aggregatedList_next(
++        previous_request=request, previous_response=response)
++  return hostlist
++
++
+ def gcp_alias_start(alias):
+-  if not alias:
+-    sys.exit(OCF_ERR_CONFIGURED)
+   my_alias = get_localhost_alias()
+   my_zone = get_metadata('instance/zone').split('/')[-1]
+   project = get_metadata('project/project-id')
+@@ -196,8 +209,11 @@ def gcp_alias_start(alias):
+ 
+   # Loops through all hosts & remove the alias IP from the host that has it
+   hostlist = os.environ.get('OCF_RESKEY_hostlist', '')
+-  hostlist.replace(THIS_VM, '')
+-  for host in hostlist.split():
++  if hostlist:
++    hostlist.replace(THIS_VM, '').split()
++  else:
++    hostlist = get_instances_list(project, THIS_VM)
++  for host in hostlist:
+     host_zone = get_zone(project, host)
+     host_alias = get_alias(project, host_zone, host)
+     if alias == host_alias:
+@@ -227,8 +243,6 @@ def gcp_alias_start(alias):
+ 
+ 
+ def gcp_alias_stop(alias):
+-  if not alias:
+-    sys.exit(OCF_ERR_CONFIGURED)
+   my_alias = get_localhost_alias()
+   my_zone = get_metadata('instance/zone').split('/')[-1]
+   project = get_metadata('project/project-id')
+@@ -239,8 +253,6 @@ def gcp_alias_stop(alias):
+ 
+ 
+ def gcp_alias_status(alias):
+-  if not alias:
+-    sys.exit(OCF_ERR_CONFIGURED)
+   my_alias = get_localhost_alias()
+   if alias == my_alias:
+     logging.info('%s has the correct IP address attached' % THIS_VM)
+@@ -280,17 +292,21 @@ def configure():
+ 
+ 
+ def main():
+-  configure()
++  if 'meta-data' in sys.argv[1]:
++    print(METADATA)
++    return
+ 
+   alias = os.environ.get('OCF_RESKEY_alias_ip')
++  if not alias:
++    sys.exit(OCF_ERR_CONFIGURED)
++
++  configure()
+   if 'start' in sys.argv[1]:
+     gcp_alias_start(alias)
+   elif 'stop' in sys.argv[1]:
+     gcp_alias_stop(alias)
+   elif 'status' in sys.argv[1] or 'monitor' in sys.argv[1]:
+     gcp_alias_status(alias)
+-  elif 'meta-data' in sys.argv[1]:
+-    print(METADATA)
+   else:
+     logging.error('gcp:alias - no such function %s' % str(sys.argv[1]))
+ 
+
+From 1f50c4bc80f23f561a8630c12076707366525899 Mon Sep 17 00:00:00 2001
+From: Helen Koike <helen.koike@collabora.com>
+Date: Thu, 12 Jul 2018 13:02:16 -0300
+Subject: [PATCH 3/4] gcp-vcp-move-vip.in: implement validate-all
+
+Also fix some return errors
+---
+ heartbeat/gcp-vpc-move-vip.in | 47 +++++++++++++++++++++++++++++++------------
+ 1 file changed, 34 insertions(+), 13 deletions(-)
+
+diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in
+index f3d117bda..a90c2de8d 100755
+--- a/heartbeat/gcp-vpc-move-vip.in
++++ b/heartbeat/gcp-vpc-move-vip.in
+@@ -22,7 +22,10 @@ import os
+ import sys
+ import time
+ 
+-import googleapiclient.discovery
++try:
++  import googleapiclient.discovery
++except ImportError:
++  pass
+ 
+ if sys.version_info >= (3, 0):
+   # Python 3 imports.
+@@ -36,6 +39,7 @@ else:
+ 
+ CONN = None
+ THIS_VM = None
++ALIAS = None
+ OCF_SUCCESS = 0
+ OCF_ERR_GENERIC = 1
+ OCF_ERR_CONFIGURED = 6
+@@ -210,7 +214,7 @@ def gcp_alias_start(alias):
+   # Loops through all hosts & remove the alias IP from the host that has it
+   hostlist = os.environ.get('OCF_RESKEY_hostlist', '')
+   if hostlist:
+-    hostlist.replace(THIS_VM, '').split()
++    hostlist = hostlist.replace(THIS_VM, '').split()
+   else:
+     hostlist = get_instances_list(project, THIS_VM)
+   for host in hostlist:
+@@ -260,14 +264,31 @@ def gcp_alias_status(alias):
+     sys.exit(OCF_NOT_RUNNING)
+ 
+ 
+-def configure():
++def validate():
++  global ALIAS
+   global CONN
+   global THIS_VM
+ 
+   # Populate global vars
+-  CONN = googleapiclient.discovery.build('compute', 'v1')
+-  THIS_VM = get_metadata('instance/name')
++  try:
++    CONN = googleapiclient.discovery.build('compute', 'v1')
++  except Exception as e:
++    logging.error('Couldn\'t connect with google api: ' + str(e))
++    sys.exit(OCF_ERR_CONFIGURED)
++
++  try:
++    THIS_VM = get_metadata('instance/name')
++  except Exception as e:
++    logging.error('Couldn\'t get instance name, is this running inside GCE?: ' + str(e))
++    sys.exit(OCF_ERR_CONFIGURED)
+ 
++  ALIAS = os.environ.get('OCF_RESKEY_alias_ip')
++  if not ALIAS:
++    logging.error('Missing alias_ip parameter')
++    sys.exit(OCF_ERR_CONFIGURED)
++
++
++def configure_logs():
+   # Prepare logging
+   logging.basicConfig(
+       format='gcp:alias - %(levelname)s - %(message)s', level=logging.INFO)
+@@ -296,19 +317,19 @@ def main():
+     print(METADATA)
+     return
+ 
+-  alias = os.environ.get('OCF_RESKEY_alias_ip')
+-  if not alias:
+-    sys.exit(OCF_ERR_CONFIGURED)
++  validate()
++  if 'validate-all' in sys.argv[1]:
++    return
+ 
+-  configure()
++  configure_logs()
+   if 'start' in sys.argv[1]:
+-    gcp_alias_start(alias)
++    gcp_alias_start(ALIAS)
+   elif 'stop' in sys.argv[1]:
+-    gcp_alias_stop(alias)
++    gcp_alias_stop(ALIAS)
+   elif 'status' in sys.argv[1] or 'monitor' in sys.argv[1]:
+-    gcp_alias_status(alias)
++    gcp_alias_status(ALIAS)
+   else:
+-    logging.error('gcp:alias - no such function %s' % str(sys.argv[1]))
++    logging.error('no such function %s' % str(sys.argv[1]))
+ 
+ 
+ if __name__ == "__main__":
+
+From f11cb236bb348ebee74e962d0ded1cb2fc97bd5f Mon Sep 17 00:00:00 2001
+From: Helen Koike <helen.koike@collabora.com>
+Date: Fri, 13 Jul 2018 08:01:02 -0300
+Subject: [PATCH 4/4] gcp-vpc-move-vip.in: minor fixes
+
+---
+ heartbeat/gcp-vpc-move-vip.in | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in
+index a90c2de8d..9fc87242f 100755
+--- a/heartbeat/gcp-vpc-move-vip.in
++++ b/heartbeat/gcp-vpc-move-vip.in
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!@PYTHON@ -tt
+ # ---------------------------------------------------------------------
+ # Copyright 2016 Google Inc.
+ #
+@@ -59,7 +59,7 @@ METADATA = \
+       <shortdesc lang="en">Host list</shortdesc>
+       <content type="string" default="" />
+     </parameter>
+-    <parameter name="stackdriver-logging" unique="0" required="0">
++    <parameter name="stackdriver_logging" unique="0" required="0">
+       <longdesc lang="en">If enabled (set to true), IP failover logs will be posted to stackdriver logging</longdesc>
+       <shortdesc lang="en">Stackdriver-logging support</shortdesc>
+       <content type="boolean" default="" />
+@@ -80,6 +80,7 @@ METADATA = \
+     <action name="stop" timeout="15" />
+     <action name="monitor" timeout="15" interval="60" depth="0" />
+     <action name="meta-data" timeout="15" />
++    <action name="validate-all" timeout="15" />
+   </actions>
+ </resource-agent>'''
+ 
diff --git a/SOURCES/4-gcp-vpc-move-route.patch b/SOURCES/4-gcp-vpc-move-route.patch
new file mode 100644
index 0000000..ccd221e
--- /dev/null
+++ b/SOURCES/4-gcp-vpc-move-route.patch
@@ -0,0 +1,632 @@
+From 0ee4c62105ee8f90a43fe0bf8a65bc9b9da2e7e0 Mon Sep 17 00:00:00 2001
+From: Helen Koike <helen.koike@collabora.com>
+Date: Wed, 18 Jul 2018 11:54:40 -0300
+Subject: [PATCH 1/4] gcp-vpc-move-route.in: python implementation of
+ gcp-vpc-move-ip.in
+
+gcloud api is not reliable and it is slow, add a python version of
+gcp-vpc-move-ip.in
+---
+ configure.ac                    |   1 +
+ doc/man/Makefile.am             |   1 +
+ heartbeat/Makefile.am           |   1 +
+ heartbeat/gcp-vpc-move-route.in | 441 ++++++++++++++++++++++++++++++++++++++++
+ 4 files changed, 444 insertions(+)
+ create mode 100644 heartbeat/gcp-vpc-move-route.in
+
+diff --git a/configure.ac b/configure.ac
+index 3d8f9ca74..039b4942c 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -960,6 +960,7 @@ AC_CONFIG_FILES([heartbeat/eDir88], [chmod +x heartbeat/eDir88])
+ AC_CONFIG_FILES([heartbeat/fio], [chmod +x heartbeat/fio])
+ AC_CONFIG_FILES([heartbeat/gcp-vpc-move-ip], [chmod +x heartbeat/gcp-vpc-move-ip])
+ AC_CONFIG_FILES([heartbeat/gcp-vpc-move-vip], [chmod +x heartbeat/gcp-vpc-move-vip])
++AC_CONFIG_FILES([heartbeat/gcp-vpc-move-route], [chmod +x heartbeat/gcp-vpc-move-route])
+ AC_CONFIG_FILES([heartbeat/iSCSILogicalUnit], [chmod +x heartbeat/iSCSILogicalUnit])
+ AC_CONFIG_FILES([heartbeat/iSCSITarget], [chmod +x heartbeat/iSCSITarget])
+ AC_CONFIG_FILES([heartbeat/jira], [chmod +x heartbeat/jira])
+diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
+index e9eaf369f..3ac0569de 100644
+--- a/doc/man/Makefile.am
++++ b/doc/man/Makefile.am
+@@ -115,6 +115,7 @@ man_MANS	       = ocf_heartbeat_AoEtarget.7 \
+                           ocf_heartbeat_garbd.7 \
+                           ocf_heartbeat_gcp-vpc-move-ip.7 \
+                           ocf_heartbeat_gcp-vpc-move-vip.7 \
++                          ocf_heartbeat_gcp-vpc-move-route.7 \
+                           ocf_heartbeat_iSCSILogicalUnit.7 \
+                           ocf_heartbeat_iSCSITarget.7 \
+                           ocf_heartbeat_iface-bridge.7 \
+diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am
+index 36b271956..d4750bf09 100644
+--- a/heartbeat/Makefile.am
++++ b/heartbeat/Makefile.am
+@@ -112,6 +112,7 @@ ocf_SCRIPTS	     =  AoEtarget		\
+ 			garbd			\
+ 			gcp-vpc-move-ip		\
+ 			gcp-vpc-move-vip	\
++			gcp-vpc-move-route	\
+ 			iSCSILogicalUnit	\
+ 			iSCSITarget		\
+ 			ids			\
+diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
+new file mode 100644
+index 000000000..5f4569baa
+--- /dev/null
++++ b/heartbeat/gcp-vpc-move-route.in
+@@ -0,0 +1,441 @@
++#!@PYTHON@ -tt
++# - *- coding: utf- 8 - *-
++#
++#
++# OCF resource agent to move an IP address within a VPC in GCP
++#
++# License: GNU General Public License (GPL)
++# Copyright (c) 2018 Hervé Werner (MFG Labs)
++# Copyright 2018 Google Inc.
++# Based on code from Markus Guertler (aws-vpc-move-ip)
++# All Rights Reserved.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of version 2 of the GNU General Public License as
++# published by the Free Software Foundation.
++#
++# This program is distributed in the hope that it would be useful, but
++# WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
++#
++# Further, this software is distributed without any warranty that it is
++# free of the rightful claim of any third person regarding infringement
++# or the like.  Any license provided herein, whether implied or
++# otherwise, applies only to this software file.  Patent licenses, if
++# any, provided herein do not apply to combinations of this program with
++# other software, or any other product whatsoever.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write the Free Software Foundation,
++# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
++#
++
++
++#######################################################################
++
++import atexit
++import logging
++import os
++import sys
++import time
++
++try:
++  import googleapiclient.discovery
++  import pyroute2
++except ImportError:
++  pass
++
++if sys.version_info >= (3, 0):
++  # Python 3 imports.
++  import urllib.parse as urlparse
++  import urllib.request as urlrequest
++else:
++  # Python 2 imports.
++  import urllib as urlparse
++  import urllib2 as urlrequest
++
++
++OCF_SUCCESS = 0
++OCF_ERR_GENERIC = 1
++OCF_ERR_UNIMPLEMENTED = 3
++OCF_ERR_PERM = 4
++OCF_ERR_CONFIGURED = 6
++OCF_NOT_RUNNING = 7
++GCP_API_URL_PREFIX = 'https://www.googleapis.com/compute/v1'
++METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1/'
++METADATA_HEADERS = {'Metadata-Flavor': 'Google'}
++METADATA = \
++'''<?xml version="1.0"?>
++<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
++<resource-agent name="gcp-vpc-move-route">
++<version>1.0</version>
++<longdesc lang="en">
++Resource Agent that can move a floating IP addresse within a GCP VPC by changing an
++entry in the routing table. This agent also configures the floating IP locally
++on the instance OS.
++Requirements :
++- IP forwarding must be enabled on all instances in order to be able to
++terminate the route
++- The floating IP address must be choosen so that it is outside all existing
++subnets in the VPC network
++- IAM permissions
++(see https://cloud.google.com/compute/docs/access/iam-permissions) :
++1) compute.routes.delete, compute.routes.get and compute.routes.update on the
++route
++2) compute.networks.updatePolicy on the network (to add a new route)
++3) compute.networks.get on the network (to check the VPC network existence)
++4) compute.routes.list on the project (to check conflicting routes)
++</longdesc>
++<shortdesc lang="en">Move IP within a GCP VPC</shortdesc>
++
++<parameters>
++
++<parameter name="stackdriver_logging" unique="0" required="0">
++<longdesc lang="en">If enabled (set to true), IP failover logs will be posted to stackdriver logging</longdesc>
++<shortdesc lang="en">Stackdriver-logging support</shortdesc>
++<content type="boolean" default="" />
++</parameter>
++
++<parameter name="ip" unique="1" required="1">
++<longdesc lang="en">
++Floating IP address. Note that this IP must be chosen outside of all existing
++subnet ranges
++</longdesc>
++<shortdesc lang="en">Floating IP</shortdesc>
++<content type="string" />
++</parameter>
++
++<parameter name="vpc_network" required="1">
++<longdesc lang="en">
++Name of the VPC network
++</longdesc>
++<shortdesc lang="en">VPC network</shortdesc>
++<content type="string" default="${OCF_RESKEY_vpc_network_default}" />
++</parameter>
++
++<parameter name="interface">
++<longdesc lang="en">
++Name of the network interface
++</longdesc>
++<shortdesc lang="en">Network interface name</shortdesc>
++<content type="string" default="${OCF_RESKEY_interface_default}" />
++</parameter>
++
++<parameter name="route_name" unique="1">
++<longdesc lang="en">
++Route name
++</longdesc>
++<shortdesc lang="en">Route name</shortdesc>
++<content type="string" default="${OCF_RESKEY_route_name_default}" />
++</parameter>
++</parameters>
++
++<actions>
++<action name="start" timeout="180s" />
++<action name="stop" timeout="180s" />
++<action name="monitor" depth="0" timeout="30s" interval="60s" />
++<action name="validate-all" timeout="5s" />
++<action name="meta-data" timeout="5s" />
++</actions>
++</resource-agent>
++'''
++
++
++class Context(object):
++  __slots__ = 'conn', 'iface_idx', 'instance', 'instance_url', 'interface', \
++      'ip', 'iproute', 'project', 'route_name', 'vpc_network', \
++      'vpc_network_url', 'zone'
++
++
++def wait_for_operation(ctx, response):
++  """Blocks until operation completes.
++  Code from GitHub's GoogleCloudPlatform/python-docs-samples
++
++  Args:
++    response: dict, a request's response
++  """
++  def _OperationGetter(response):
++    operation = response[u'name']
++    if response.get(u'zone'):
++      return ctx.conn.zoneOperations().get(
++          project=ctx.project, zone=ctx.zone, operation=operation)
++    else:
++      return ctx.conn.globalOperations().get(
++          project=ctx.project, operation=operation)
++
++  while True:
++    result = _OperationGetter(response).execute()
++
++    if result['status'] == 'DONE':
++      if 'error' in result:
++        raise Exception(result['error'])
++      return result
++
++    time.sleep(1)
++
++
++def get_metadata(metadata_key, params=None, timeout=None):
++  """Performs a GET request with the metadata headers.
++
++  Args:
++    metadata_key: string, the metadata to perform a GET request on.
++    params: dictionary, the query parameters in the GET request.
++    timeout: int, timeout in seconds for metadata requests.
++
++  Returns:
++    HTTP response from the GET request.
++
++  Raises:
++    urlerror.HTTPError: raises when the GET request fails.
++  """
++  timeout = timeout or 60
++  metadata_url = os.path.join(METADATA_SERVER, metadata_key)
++  params = urlparse.urlencode(params or {})
++  url = '%s?%s' % (metadata_url, params)
++  request = urlrequest.Request(url, headers=METADATA_HEADERS)
++  request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({}))
++  return request_opener.open(request, timeout=timeout * 1.1).read()
++
++
++def validate(ctx):
++  if os.geteuid() != 0:
++    logging.error('You must run this agent as root')
++    sys.exit(OCF_ERR_PERM)
++
++  try:
++    ctx.conn = googleapiclient.discovery.build('compute', 'v1')
++  except Exception as e:
++    logging.error('Couldn\'t connect with google api: ' + str(e))
++    sys.exit(OCF_ERR_CONFIGURED)
++
++  ctx.ip = os.environ.get('OCF_RESKEY_ip')
++  if not ctx.ip:
++    logging.error('Missing ip parameter')
++    sys.exit(OCF_ERR_CONFIGURED)
++
++  try:
++    ctx.instance = get_metadata('instance/name')
++    ctx.zone = get_metadata('instance/zone').split('/')[-1]
++    ctx.project = get_metadata('project/project-id')
++  except Exception as e:
++    logging.error(
++        'Instance information not found. Is this a GCE instance ?: %s', str(e))
++    sys.exit(OCF_ERR_CONFIGURED)
++
++  ctx.instance_url = '%s/projects/%s/zones/%s/instances/%s' % (
++      GCP_API_URL_PREFIX, ctx.project, ctx.zone, ctx.instance)
++  ctx.vpc_network = os.environ.get('OCF_RESKEY_vpc_network', 'default')
++  ctx.vpc_network_url = '%s/projects/%s/global/networks/%s' % (
++      GCP_API_URL_PREFIX, ctx.project, ctx.vpc_network)
++  ctx.interface = os.environ.get('OCF_RESKEY_interface', 'eth0')
++  ctx.route_name = os.environ.get(
++      'OCF_RESKEY_route_name', 'ra-%s' % os.environ['__SCRIPT_NAME'])
++  ctx.iproute = pyroute2.IPRoute()
++  atexit.register(ctx.iproute.close)
++  idxs = ctx.iproute.link_lookup(ifname=ctx.interface)
++  if not idxs:
++    logging.error('Network interface not found')
++    sys.exit(OCF_ERR_CONFIGURED)
++  ctx.iface_idx = idxs[0]
++
++
++def check_conflicting_routes(ctx):
++  fl = '(destRange = "%s*") AND (network = "%s") AND (name != "%s")' % (
++      ctx.ip, ctx.vpc_network_url, ctx.route_name)
++  request = ctx.conn.routes().list(project=ctx.project, filter=fl)
++  response = request.execute()
++  route_list = response.get('items', None)
++  if route_list:
++    logging.error(
++        'Conflicting unnmanaged routes for destination %s/32 in VPC %s found : %s',
++        ctx.ip, ctx.vpc_network, str(route_list))
++    sys.exit(OCF_ERR_CONFIGURED)
++
++
++def route_release(ctx):
++  request = ctx.conn.routes().delete(project=ctx.project, route=ctx.route_name)
++  wait_for_operation(ctx, request.execute())
++
++
++def ip_monitor(ctx):
++  logging.info('IP monitor: checking local network configuration')
++
++  def address_filter(addr):
++    for attr in addr['attrs']:
++      if attr[0] == 'IFA_LOCAL':
++        if attr[1] == ctx.ip:
++          return True
++        else:
++          return False
++
++  route = ctx.iproute.get_addr(
++      index=ctx.iface_idx, match=address_filter)
++  if not route:
++    logging.warn(
++        'The floating IP %s is not locally configured on this instance (%s)',
++        ctx.ip, ctx.instance)
++    return OCF_NOT_RUNNING
++
++  logging.debug(
++      'The floating IP %s is correctly configured on this instance (%s)',
++      ctx.ip, ctx.instance)
++  return OCF_SUCCESS
++
++
++def ip_release(ctx):
++  ctx.iproute.addr('del', index=ctx.iface_idx, address=ctx.ip, mask=32)
++
++
++def ip_and_route_start(ctx):
++  logging.info('Bringing up the floating IP %s', ctx.ip)
++
++  # Add a new entry in the routing table
++  # If the route entry exists and is pointing to another instance, take it over
++
++  # Ensure that there is no route that we are not aware of that is also handling our IP
++  check_conflicting_routes(ctx)
++
++  # There is no replace API, We need to first delete the existing route if any
++  try:
++    request = ctx.conn.routes().get(project=ctx.project, route=ctx.route_name)
++    request.execute()
++  # TODO: check specific exception for 404
++  except googleapiclient.errors.HttpError as e:
++    if e.resp.status != 404:
++      raise
++  else:
++      route_release(ctx)
++
++  route_body = {
++      'name': ctx.route_name,
++      'network': ctx.vpc_network_url,
++      'destRange': '%s/32' % ctx.ip,
++      'nextHopInstance': ctx.instance_url,
++  }
++  try:
++    request = ctx.conn.routes().insert(project=ctx.project, body=route_body)
++    wait_for_operation(ctx, request.execute())
++  except googleapiclient.errors.HttpError:
++    try:
++      request = ctx.conn.networks().get(
++          project=ctx.project, network=ctx.vpc_network)
++      request.execute()
++    except googleapiclient.errors.HttpError as e:
++      if e.resp.status == 404:
++        logging.error('VPC network not found')
++        sys.exit(OCF_ERR_CONFIGURED)
++      else:
++        raise
++    else:
++      raise
++
++  # Configure the IP address locally
++  # We need to release the IP first
++  if ip_monitor(ctx) == OCF_SUCCESS:
++    ip_release(ctx)
++
++  ctx.iproute.addr('add', index=ctx.iface_idx, address=ctx.ip, mask=32)
++  ctx.iproute.link('set', index=ctx.iface_idx, state='up')
++  logging.info('Successfully brought up the floating IP %s', ctx.ip)
++
++
++def route_monitor(ctx):
++  logging.info('GCP route monitor: checking route table')
++
++  # Ensure that there is no route that we are not aware of that is also handling our IP
++  check_conflicting_routes
++
++  try:
++    request = ctx.conn.routes().get(project=ctx.project, route=ctx.route_name)
++    response = request.execute()
++  except googleapiclient.errors.HttpError as e:
++    if 'Insufficient Permission' in e.content:
++      return OCF_ERR_PERM
++    elif e.resp.status == 404:
++      return OCF_NOT_RUNNING
++    else:
++      raise
++
++  routed_to_instance = response.get('nextHopInstance', '<unknown>')
++  instance_url = '%s/projects/%s/zones/%s/instances/%s' % (
++      GCP_API_URL_PREFIX, ctx.project, ctx.zone, ctx.instance)
++  if routed_to_instance != instance_url:
++    logging.warn(
++        'The floating IP %s is not routed to this instance (%s) but to instance %s',
++        ctx.ip, ctx.instance, routed_to_instance.split('/')[-1])
++    return OCF_NOT_RUNNING
++
++  logging.debug(
++      'The floating IP %s is correctly routed to this instance (%s)',
++      ctx.ip, ctx.instance)
++  return OCF_SUCCESS
++
++
++def ip_and_route_stop(ctx):
++  logging.info('Bringing down the floating IP %s', ctx.ip)
++
++  # Delete the route entry
++  # If the route entry exists and is pointing to another instance, don't touch it
++  if route_monitor(ctx) == OCF_NOT_RUNNING:
++    logging.info(
++        'The floating IP %s is already not routed to this instance (%s)',
++        ctx.ip, ctx.instance)
++  else:
++    route_release(ctx)
++
++  if ip_monitor(ctx) == OCF_NOT_RUNNING:
++    logging.info('The floating IP %s is already down', ctx.ip)
++  else:
++    ip_release(ctx)
++
++
++def configure_logs(ctx):
++  # Prepare logging
++  logging.basicConfig(
++      format='gcp:route - %(levelname)s - %(message)s', level=logging.INFO)
++  logging.getLogger('googleapiclient').setLevel(logging.WARN)
++  logging_env = os.environ.get('OCF_RESKEY_stackdriver_logging')
++  if logging_env:
++    logging_env = logging_env.lower()
++    if any(x in logging_env for x in ['yes', 'true', 'enabled']):
++      try:
++        import google.cloud.logging.handlers
++        client = google.cloud.logging.Client()
++        handler = google.cloud.logging.handlers.CloudLoggingHandler(
++            client, name=ctx.instance)
++        handler.setLevel(logging.INFO)
++        formatter = logging.Formatter('gcp:route "%(message)s"')
++        handler.setFormatter(formatter)
++        root_logger = logging.getLogger()
++        root_logger.addHandler(handler)
++      except ImportError:
++        logging.error('Couldn\'t import google.cloud.logging, '
++            'disabling Stackdriver-logging support')
++
++
++def main():
++  if 'meta-data' in sys.argv[1]:
++    print(METADATA)
++    return
++
++  ctx = Context()
++
++  validate(ctx)
++  if 'validate-all' in sys.argv[1]:
++    return
++
++  configure_logs(ctx)
++  if 'start' in sys.argv[1]:
++    ip_and_route_start(ctx)
++  elif 'stop' in sys.argv[1]:
++    ip_and_route_stop(ctx)
++  elif 'status' in sys.argv[1] or 'monitor' in sys.argv[1]:
++    sys.exit(ip_monitor(ctx))
++  else:
++    usage = 'usage: $0 {start|stop|monitor|status|meta-data|validate-all}'
++    logging.error(usage)
++    sys.exit(OCF_ERR_UNIMPLEMENTED)
++
++
++if __name__ == "__main__":
++  main()
+
+From 6590c99f462403808854114ec1031755e5ce6b36 Mon Sep 17 00:00:00 2001
+From: Helen Koike <helen.koike@collabora.com>
+Date: Thu, 19 Jul 2018 12:33:44 -0300
+Subject: [PATCH 2/4] gcp-vpc-move-ip.in: add deprecation message
+
+---
+ heartbeat/gcp-vpc-move-ip.in | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/heartbeat/gcp-vpc-move-ip.in b/heartbeat/gcp-vpc-move-ip.in
+index 4a6c343a8..3b8d998b3 100755
+--- a/heartbeat/gcp-vpc-move-ip.in
++++ b/heartbeat/gcp-vpc-move-ip.in
+@@ -348,6 +348,8 @@ ip_and_route_stop() {
+ #
+ ###############################################################################
+ 
++ocf_log warn "gcp-vpc-move-ip is deprecated, prefer to use gcp-vpc-move-route instead"
++
+ case $__OCF_ACTION in
+   meta-data)  metadata
+               exit $OCF_SUCCESS
+
+From 73608196d21068c6c2d5fb9f77e3d40179c85fee Mon Sep 17 00:00:00 2001
+From: Helen Koike <helen.koike@collabora.com>
+Date: Fri, 20 Jul 2018 08:26:17 -0300
+Subject: [PATCH 3/4] gcp-vpc-move-route.in: move stackdriver parameter
+
+Move stackdriver parameter to the bottom of metadata list
+---
+ heartbeat/gcp-vpc-move-route.in | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
+index 5f4569baa..8d5bfff36 100644
+--- a/heartbeat/gcp-vpc-move-route.in
++++ b/heartbeat/gcp-vpc-move-route.in
+@@ -90,12 +90,6 @@ route
+ 
+ <parameters>
+ 
+-<parameter name="stackdriver_logging" unique="0" required="0">
+-<longdesc lang="en">If enabled (set to true), IP failover logs will be posted to stackdriver logging</longdesc>
+-<shortdesc lang="en">Stackdriver-logging support</shortdesc>
+-<content type="boolean" default="" />
+-</parameter>
+-
+ <parameter name="ip" unique="1" required="1">
+ <longdesc lang="en">
+ Floating IP address. Note that this IP must be chosen outside of all existing
+@@ -128,6 +122,12 @@ Route name
+ <shortdesc lang="en">Route name</shortdesc>
+ <content type="string" default="${OCF_RESKEY_route_name_default}" />
+ </parameter>
++
++<parameter name="stackdriver_logging" unique="0" required="0">
++<longdesc lang="en">If enabled (set to true), IP failover logs will be posted to stackdriver logging</longdesc>
++<shortdesc lang="en">Stackdriver-logging support</shortdesc>
++<content type="boolean" default="" />
++</parameter>
+ </parameters>
+ 
+ <actions>
+
+From e54565ec69f809b28337c0471ad0a9b26a64f8bf Mon Sep 17 00:00:00 2001
+From: Helen Koike <helen.koike@collabora.com>
+Date: Fri, 20 Jul 2018 08:45:53 -0300
+Subject: [PATCH 4/4] gcp-vpc-move-route.in: minor fixes
+
+---
+ heartbeat/gcp-vpc-move-route.in | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
+index 8d5bfff36..566a70f86 100644
+--- a/heartbeat/gcp-vpc-move-route.in
++++ b/heartbeat/gcp-vpc-move-route.in
+@@ -104,7 +104,7 @@ subnet ranges
+ Name of the VPC network
+ </longdesc>
+ <shortdesc lang="en">VPC network</shortdesc>
+-<content type="string" default="${OCF_RESKEY_vpc_network_default}" />
++<content type="string" default="default" />
+ </parameter>
+ 
+ <parameter name="interface">
+@@ -112,7 +112,7 @@ Name of the VPC network
+ Name of the network interface
+ </longdesc>
+ <shortdesc lang="en">Network interface name</shortdesc>
+-<content type="string" default="${OCF_RESKEY_interface_default}" />
++<content type="string" default="eth0" />
+ </parameter>
+ 
+ <parameter name="route_name" unique="1">
+@@ -120,7 +120,7 @@ Name of the network interface
+ Route name
+ </longdesc>
+ <shortdesc lang="en">Route name</shortdesc>
+-<content type="string" default="${OCF_RESKEY_route_name_default}" />
++<content type="string" default="ra-%s" />
+ </parameter>
+ 
+ <parameter name="stackdriver_logging" unique="0" required="0">
+@@ -138,7 +138,7 @@ Route name
+ <action name="meta-data" timeout="5s" />
+ </actions>
+ </resource-agent>
+-'''
++''' % os.path.basename(sys.argv[0])
+ 
+ 
+ class Context(object):
+@@ -229,7 +229,7 @@ def validate(ctx):
+       GCP_API_URL_PREFIX, ctx.project, ctx.vpc_network)
+   ctx.interface = os.environ.get('OCF_RESKEY_interface', 'eth0')
+   ctx.route_name = os.environ.get(
+-      'OCF_RESKEY_route_name', 'ra-%s' % os.environ['__SCRIPT_NAME'])
++      'OCF_RESKEY_route_name', 'ra-%s' % os.path.basename(sys.argv[0]))
+   ctx.iproute = pyroute2.IPRoute()
+   atexit.register(ctx.iproute.close)
+   idxs = ctx.iproute.link_lookup(ifname=ctx.interface)
+@@ -432,7 +432,8 @@ def main():
+   elif 'status' in sys.argv[1] or 'monitor' in sys.argv[1]:
+     sys.exit(ip_monitor(ctx))
+   else:
+-    usage = 'usage: $0 {start|stop|monitor|status|meta-data|validate-all}'
++    usage = 'usage: %s {start|stop|monitor|status|meta-data|validate-all}' % \
++        os.path.basename(sys.argv[0])
+     logging.error(usage)
+     sys.exit(OCF_ERR_UNIMPLEMENTED)
+ 
diff --git a/SOURCES/5-python-library.patch b/SOURCES/5-python-library.patch
new file mode 100644
index 0000000..0066119
--- /dev/null
+++ b/SOURCES/5-python-library.patch
@@ -0,0 +1,600 @@
+From 13ae97dec5754642af4d0d0edc03d9290e792e7f Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Thu, 19 Jul 2018 16:12:35 +0200
+Subject: [PATCH 1/5] Add Python library
+
+---
+ heartbeat/Makefile.am |   3 +-
+ heartbeat/ocf.py      | 136 ++++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 138 insertions(+), 1 deletion(-)
+ create mode 100644 heartbeat/ocf.py
+
+diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am
+index d4750bf09..1333f8feb 100644
+--- a/heartbeat/Makefile.am
++++ b/heartbeat/Makefile.am
+@@ -185,7 +185,8 @@ ocfcommon_DATA		= ocf-shellfuncs 	\
+ 			  ora-common.sh		\
+ 			  mysql-common.sh	\
+ 			  nfsserver-redhat.sh	\
+-			  findif.sh
++			  findif.sh		\
++			  ocf.py
+ 
+ # Legacy locations
+ hbdir			= $(sysconfdir)/ha.d
+diff --git a/heartbeat/ocf.py b/heartbeat/ocf.py
+new file mode 100644
+index 000000000..12be7a2a4
+--- /dev/null
++++ b/heartbeat/ocf.py
+@@ -0,0 +1,136 @@
++#
++# Copyright (c) 2016 Red Hat, Inc, Oyvind Albrigtsen
++#                    All Rights Reserved.
++#
++#
++# This library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# This library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with this library; if not, write to the Free Software
++# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++# 
++
++import sys, os, logging, syslog
++
++argv=sys.argv
++env=os.environ
++
++#
++# 	Common variables for the OCF Resource Agents supplied by
++# 	heartbeat.
++#
++
++OCF_SUCCESS=0
++OCF_ERR_GENERIC=1
++OCF_ERR_ARGS=2
++OCF_ERR_UNIMPLEMENTED=3
++OCF_ERR_PERM=4
++OCF_ERR_INSTALLED=5
++OCF_ERR_CONFIGURED=6
++OCF_NOT_RUNNING=7
++
++# Non-standard values.
++#
++# OCF does not include the concept of master/slave resources so we
++#   need to extend it so we can discover a resource's complete state.
++#
++# OCF_RUNNING_MASTER:  
++#    The resource is in "master" mode and fully operational
++# OCF_FAILED_MASTER:
++#    The resource is in "master" mode but in a failed state
++# 
++# The extra two values should only be used during a probe.
++#
++# Probes are used to discover resources that were started outside of
++#    the CRM and/or left behind if the LRM fails.
++# 
++# They can be identified in RA scripts by checking for:
++#   [ "${__OCF_ACTION}" = "monitor" -a "${OCF_RESKEY_CRM_meta_interval}" = "0" ]
++# 
++# Failed "slaves" should continue to use: OCF_ERR_GENERIC
++# Fully operational "slaves" should continue to use: OCF_SUCCESS
++#
++OCF_RUNNING_MASTER=8
++OCF_FAILED_MASTER=9
++
++
++## Own logger handler that uses old-style syslog handler as otherwise
++## everything is sourced from /dev/syslog
++class SyslogLibHandler(logging.StreamHandler):
++	"""
++	A handler class that correctly push messages into syslog
++	"""
++	def emit(self, record):
++		syslog_level = {
++			logging.CRITICAL:syslog.LOG_CRIT,
++			logging.ERROR:syslog.LOG_ERR,
++			logging.WARNING:syslog.LOG_WARNING,
++			logging.INFO:syslog.LOG_INFO,
++			logging.DEBUG:syslog.LOG_DEBUG,
++			logging.NOTSET:syslog.LOG_DEBUG,
++		}[record.levelno]
++
++		msg = self.format(record)
++
++		# syslog.syslog can not have 0x00 character inside or exception
++		# is thrown
++		syslog.syslog(syslog_level, msg.replace("\x00","\n"))
++		return
++
++
++OCF_RESOURCE_INSTANCE = env.get("OCF_RESOURCE_INSTANCE")
++
++HA_DEBUG = env.get("HA_debug", 0)
++HA_DATEFMT = env.get("HA_DATEFMT", "%b %d %T ")
++HA_LOGFACILITY = env.get("HA_LOGFACILITY")
++HA_LOGFILE = env.get("HA_LOGFILE")
++HA_DEBUGLOG = env.get("HA_DEBUGLOG")
++
++log = logging.getLogger(os.path.basename(argv[0]))
++log.setLevel(logging.DEBUG)
++
++## add logging to stderr
++if sys.stdout.isatty():
++	seh = logging.StreamHandler(stream=sys.stderr)
++	if HA_DEBUG == 0:
++		seh.setLevel(logging.WARNING)
++	sehformatter = logging.Formatter('%(filename)s(%(OCF_RESOURCE_INSTANCE)s)[%(process)s]:\t%(asctime)s%(levelname)s: %(message)s', datefmt=HA_DATEFMT)
++	seh.setFormatter(sehformatter)
++	log.addHandler(seh)
++
++## add logging to syslog
++if HA_LOGFACILITY:
++	slh = SyslogLibHandler()
++	if HA_DEBUG == 0:
++		slh.setLevel(logging.WARNING)
++	slhformatter = logging.Formatter('%(levelname)s: %(message)s')
++	slh.setFormatter(slhformatter)
++	log.addHandler(slh)
++
++## add logging to file
++if HA_LOGFILE:
++	lfh = logging.FileHandler(HA_LOGFILE)
++	if HA_DEBUG == 0:
++		lfh.setLevel(logging.WARNING)
++	lfhformatter = logging.Formatter('%(filename)s(%(OCF_RESOURCE_INSTANCE)s)[%(process)s]:\t%(asctime)s%(levelname)s: %(message)s', datefmt=HA_DATEFMT)
++	lfh.setFormatter(lfhformatter)
++	log.addHandler(lfh)
++
++## add debug logging to file
++if HA_DEBUGLOG and HA_LOGFILE != HA_DEBUGLOG:
++	dfh = logging.FileHandler(HA_DEBUGLOG)
++	if HA_DEBUG == 0:
++		dfh.setLevel(logging.WARNING)
++	dfhformatter = logging.Formatter('%(filename)s(%(OCF_RESOURCE_INSTANCE)s)[%(process)s]:\t%(asctime)s%(levelname)s: %(message)s', datefmt=HA_DATEFMT)
++	dfh.setFormatter(dfhformatter)
++	log.addHandler(dfh)
++
++logger = logging.LoggerAdapter(log, {'OCF_RESOURCE_INSTANCE': OCF_RESOURCE_INSTANCE})
+
+From 2ade8dbf1f6f6d3889dd1ddbf40858edf10fbdc7 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Thu, 19 Jul 2018 16:20:39 +0200
+Subject: [PATCH 2/5] gcp-vpc-move-vip: use Python library
+
+---
+ heartbeat/gcp-vpc-move-vip.in | 42 +++++++++++++++++++++---------------------
+ 1 file changed, 21 insertions(+), 21 deletions(-)
+
+diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in
+index af2080502..eb5bce6a8 100755
+--- a/heartbeat/gcp-vpc-move-vip.in
++++ b/heartbeat/gcp-vpc-move-vip.in
+@@ -22,6 +22,11 @@ import os
+ import sys
+ import time
+ 
++OCF_FUNCTIONS_DIR="%s/lib/heartbeat" % os.environ.get("OCF_ROOT")
++sys.path.append(OCF_FUNCTIONS_DIR)
++
++from ocf import *
++
+ try:
+   import googleapiclient.discovery
+ except ImportError:
+@@ -40,10 +45,6 @@ else:
+ CONN = None
+ THIS_VM = None
+ ALIAS = None
+-OCF_SUCCESS = 0
+-OCF_ERR_GENERIC = 1
+-OCF_ERR_CONFIGURED = 6
+-OCF_NOT_RUNNING = 7
+ METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1/'
+ METADATA_HEADERS = {'Metadata-Flavor': 'Google'}
+ METADATA = \
+@@ -206,11 +207,11 @@ def gcp_alias_start(alias):
+   # If I already have the IP, exit. If it has an alias IP that isn't the VIP,
+   # then remove it
+   if my_alias == alias:
+-    logging.info(
++    logger.info(
+         '%s already has %s attached. No action required' % (THIS_VM, alias))
+     sys.exit(OCF_SUCCESS)
+   elif my_alias:
+-    logging.info('Removing %s from %s' % (my_alias, THIS_VM))
++    logger.info('Removing %s from %s' % (my_alias, THIS_VM))
+     set_alias(project, my_zone, THIS_VM, '')
+ 
+   # Loops through all hosts & remove the alias IP from the host that has it
+@@ -223,7 +224,7 @@ def gcp_alias_start(alias):
+     host_zone = get_zone(project, host)
+     host_alias = get_alias(project, host_zone, host)
+     if alias == host_alias:
+-      logging.info(
++      logger.info(
+           '%s is attached to %s - Removing all alias IP addresses from %s' %
+           (alias, host, host))
+       set_alias(project, host_zone, host, '')
+@@ -237,14 +238,14 @@ def gcp_alias_start(alias):
+   # Check the IP has been added
+   my_alias = get_localhost_alias()
+   if alias == my_alias:
+-    logging.info('Finished adding %s to %s' % (alias, THIS_VM))
++    logger.info('Finished adding %s to %s' % (alias, THIS_VM))
+   elif my_alias:
+-    logging.error(
++    logger.error(
+         'Failed to add IP. %s has an IP attached but it isn\'t %s' %
+         (THIS_VM, alias))
+     sys.exit(OCF_ERR_GENERIC)
+   else:
+-    logging.error('Failed to add IP address %s to %s' % (alias, THIS_VM))
++    logger.error('Failed to add IP address %s to %s' % (alias, THIS_VM))
+     sys.exit(OCF_ERR_GENERIC)
+ 
+ 
+@@ -254,14 +255,14 @@ def gcp_alias_stop(alias):
+   project = get_metadata('project/project-id')
+ 
+   if my_alias == alias:
+-    logging.info('Removing %s from %s' % (my_alias, THIS_VM))
++    logger.info('Removing %s from %s' % (my_alias, THIS_VM))
+     set_alias(project, my_zone, THIS_VM, '')
+ 
+ 
+ def gcp_alias_status(alias):
+   my_alias = get_localhost_alias()
+   if alias == my_alias:
+-    logging.info('%s has the correct IP address attached' % THIS_VM)
++    logger.info('%s has the correct IP address attached' % THIS_VM)
+   else:
+     sys.exit(OCF_NOT_RUNNING)
+ 
+@@ -275,25 +276,24 @@ def validate():
+   try:
+     CONN = googleapiclient.discovery.build('compute', 'v1')
+   except Exception as e:
+-    logging.error('Couldn\'t connect with google api: ' + str(e))
++    logger.error('Couldn\'t connect with google api: ' + str(e))
+     sys.exit(OCF_ERR_CONFIGURED)
+ 
+   try:
+     THIS_VM = get_metadata('instance/name')
+   except Exception as e:
+-    logging.error('Couldn\'t get instance name, is this running inside GCE?: ' + str(e))
++    logger.error('Couldn\'t get instance name, is this running inside GCE?: ' + str(e))
+     sys.exit(OCF_ERR_CONFIGURED)
+ 
+   ALIAS = os.environ.get('OCF_RESKEY_alias_ip')
+   if not ALIAS:
+-    logging.error('Missing alias_ip parameter')
++    logger.error('Missing alias_ip parameter')
+     sys.exit(OCF_ERR_CONFIGURED)
+ 
+ 
+ def configure_logs():
+   # Prepare logging
+-  logging.basicConfig(
+-      format='gcp:alias - %(levelname)s - %(message)s', level=logging.INFO)
++  global logger
+   logging.getLogger('googleapiclient').setLevel(logging.WARN)
+   logging_env = os.environ.get('OCF_RESKEY_stackdriver_logging')
+   if logging_env:
+@@ -307,10 +307,10 @@ def configure_logs():
+         handler.setLevel(logging.INFO)
+         formatter = logging.Formatter('gcp:alias "%(message)s"')
+         handler.setFormatter(formatter)
+-        root_logger = logging.getLogger()
+-        root_logger.addHandler(handler)
++        log.addHandler(handler)
++        logger = logging.LoggerAdapter(log, {'OCF_RESOURCE_INSTANCE': OCF_RESOURCE_INSTANCE})
+       except ImportError:
+-        logging.error('Couldn\'t import google.cloud.logging, '
++        logger.error('Couldn\'t import google.cloud.logging, '
+             'disabling Stackdriver-logging support')
+ 
+ 
+@@ -331,7 +331,7 @@ def main():
+   elif 'status' in sys.argv[1] or 'monitor' in sys.argv[1]:
+     gcp_alias_status(ALIAS)
+   else:
+-    logging.error('no such function %s' % str(sys.argv[1]))
++    logger.error('no such function %s' % str(sys.argv[1]))
+ 
+ 
+ if __name__ == "__main__":
+
+From 9e9ea17c42df27d4c13fed9badba295df48437f2 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Fri, 20 Jul 2018 13:27:42 +0200
+Subject: [PATCH 3/5] gcp-vpc-move-vip: moved alias-parameters to top of
+ metadata
+
+---
+ heartbeat/gcp-vpc-move-vip.in | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in
+index eb5bce6a8..ba61193b6 100755
+--- a/heartbeat/gcp-vpc-move-vip.in
++++ b/heartbeat/gcp-vpc-move-vip.in
+@@ -55,6 +55,16 @@ METADATA = \
+   <longdesc lang="en">Floating IP Address on Google Cloud Platform - Using Alias IP address functionality to attach a secondary IP address to a running instance</longdesc>
+   <shortdesc lang="en">Floating IP Address on Google Cloud Platform</shortdesc>
+   <parameters>
++    <parameter name="alias_ip" unique="1" required="1">
++      <longdesc lang="en">IP Address to be added including CIDR. E.g 192.168.0.1/32</longdesc>
++      <shortdesc lang="en">IP Address to be added including CIDR. E.g 192.168.0.1/32</shortdesc>
++      <content type="string" default="" />
++    </parameter>
++    <parameter name="alias_range_name" unique="1" required="0">
++      <longdesc lang="en">Subnet name for the Alias IP</longdesc>
++      <shortdesc lang="en">Subnet name for the Alias IP</shortdesc>
++      <content type="string" default="" />
++    </parameter>
+     <parameter name="hostlist" unique="1" required="0">
+       <longdesc lang="en">List of hosts in the cluster</longdesc>
+       <shortdesc lang="en">Host list</shortdesc>
+@@ -65,16 +75,6 @@ METADATA = \
+       <shortdesc lang="en">Stackdriver-logging support</shortdesc>
+       <content type="boolean" default="" />
+     </parameter>
+-    <parameter name="alias_ip" unique="1" required="1">
+-      <longdesc lang="en">IP Address to be added including CIDR. E.g 192.168.0.1/32</longdesc>
+-      <shortdesc lang="en">IP Address to be added including CIDR. E.g 192.168.0.1/32</shortdesc>
+-      <content type="string" default="" />
+-    </parameter>
+-    <parameter name="alias_range_name" unique="1" required="0">
+-      <longdesc lang="en">Subnet name for the Alias IP2</longdesc>
+-      <shortdesc lang="en">Subnet name for the Alias IP</shortdesc>
+-      <content type="string" default="" />
+-    </parameter>
+   </parameters>
+   <actions>
+     <action name="start" timeout="300" />
+
+From 716d69040dba7a769efb5a60eca934fdd65585f2 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Mon, 23 Jul 2018 11:17:00 +0200
+Subject: [PATCH 4/5] gcp-vpc-move-route: use Python library
+
+---
+ heartbeat/gcp-vpc-move-route.in | 58 ++++++++++++++++++++---------------------
+ 1 file changed, 28 insertions(+), 30 deletions(-)
+
+diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
+index 566a70f86..125289d86 100644
+--- a/heartbeat/gcp-vpc-move-route.in
++++ b/heartbeat/gcp-vpc-move-route.in
+@@ -39,6 +39,11 @@ import os
+ import sys
+ import time
+ 
++OCF_FUNCTIONS_DIR="%s/lib/heartbeat" % os.environ.get("OCF_ROOT")
++sys.path.append(OCF_FUNCTIONS_DIR)
++
++from ocf import *
++
+ try:
+   import googleapiclient.discovery
+   import pyroute2
+@@ -55,12 +60,6 @@ else:
+   import urllib2 as urlrequest
+ 
+ 
+-OCF_SUCCESS = 0
+-OCF_ERR_GENERIC = 1
+-OCF_ERR_UNIMPLEMENTED = 3
+-OCF_ERR_PERM = 4
+-OCF_ERR_CONFIGURED = 6
+-OCF_NOT_RUNNING = 7
+ GCP_API_URL_PREFIX = 'https://www.googleapis.com/compute/v1'
+ METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1/'
+ METADATA_HEADERS = {'Metadata-Flavor': 'Google'}
+@@ -199,18 +198,18 @@ def get_metadata(metadata_key, params=None, timeout=None):
+ 
+ def validate(ctx):
+   if os.geteuid() != 0:
+-    logging.error('You must run this agent as root')
++    logger.error('You must run this agent as root')
+     sys.exit(OCF_ERR_PERM)
+ 
+   try:
+     ctx.conn = googleapiclient.discovery.build('compute', 'v1')
+   except Exception as e:
+-    logging.error('Couldn\'t connect with google api: ' + str(e))
++    logger.error('Couldn\'t connect with google api: ' + str(e))
+     sys.exit(OCF_ERR_CONFIGURED)
+ 
+   ctx.ip = os.environ.get('OCF_RESKEY_ip')
+   if not ctx.ip:
+-    logging.error('Missing ip parameter')
++    logger.error('Missing ip parameter')
+     sys.exit(OCF_ERR_CONFIGURED)
+ 
+   try:
+@@ -218,7 +217,7 @@ def validate(ctx):
+     ctx.zone = get_metadata('instance/zone').split('/')[-1]
+     ctx.project = get_metadata('project/project-id')
+   except Exception as e:
+-    logging.error(
++    logger.error(
+         'Instance information not found. Is this a GCE instance ?: %s', str(e))
+     sys.exit(OCF_ERR_CONFIGURED)
+ 
+@@ -234,7 +233,7 @@ def validate(ctx):
+   atexit.register(ctx.iproute.close)
+   idxs = ctx.iproute.link_lookup(ifname=ctx.interface)
+   if not idxs:
+-    logging.error('Network interface not found')
++    logger.error('Network interface not found')
+     sys.exit(OCF_ERR_CONFIGURED)
+   ctx.iface_idx = idxs[0]
+ 
+@@ -246,7 +245,7 @@ def check_conflicting_routes(ctx):
+   response = request.execute()
+   route_list = response.get('items', None)
+   if route_list:
+-    logging.error(
++    logger.error(
+         'Conflicting unnmanaged routes for destination %s/32 in VPC %s found : %s',
+         ctx.ip, ctx.vpc_network, str(route_list))
+     sys.exit(OCF_ERR_CONFIGURED)
+@@ -258,7 +257,7 @@ def route_release(ctx):
+ 
+ 
+ def ip_monitor(ctx):
+-  logging.info('IP monitor: checking local network configuration')
++  logger.info('IP monitor: checking local network configuration')
+ 
+   def address_filter(addr):
+     for attr in addr['attrs']:
+@@ -271,12 +270,12 @@ def ip_monitor(ctx):
+   route = ctx.iproute.get_addr(
+       index=ctx.iface_idx, match=address_filter)
+   if not route:
+-    logging.warn(
++    logger.warning(
+         'The floating IP %s is not locally configured on this instance (%s)',
+         ctx.ip, ctx.instance)
+     return OCF_NOT_RUNNING
+ 
+-  logging.debug(
++  logger.debug(
+       'The floating IP %s is correctly configured on this instance (%s)',
+       ctx.ip, ctx.instance)
+   return OCF_SUCCESS
+@@ -287,7 +286,7 @@ def ip_release(ctx):
+ 
+ 
+ def ip_and_route_start(ctx):
+-  logging.info('Bringing up the floating IP %s', ctx.ip)
++  logger.info('Bringing up the floating IP %s', ctx.ip)
+ 
+   # Add a new entry in the routing table
+   # If the route entry exists and is pointing to another instance, take it over
+@@ -322,7 +321,7 @@ def ip_and_route_start(ctx):
+       request.execute()
+     except googleapiclient.errors.HttpError as e:
+       if e.resp.status == 404:
+-        logging.error('VPC network not found')
++        logger.error('VPC network not found')
+         sys.exit(OCF_ERR_CONFIGURED)
+       else:
+         raise
+@@ -336,11 +335,11 @@ def ip_and_route_start(ctx):
+ 
+   ctx.iproute.addr('add', index=ctx.iface_idx, address=ctx.ip, mask=32)
+   ctx.iproute.link('set', index=ctx.iface_idx, state='up')
+-  logging.info('Successfully brought up the floating IP %s', ctx.ip)
++  logger.info('Successfully brought up the floating IP %s', ctx.ip)
+ 
+ 
+ def route_monitor(ctx):
+-  logging.info('GCP route monitor: checking route table')
++  logger.info('GCP route monitor: checking route table')
+ 
+   # Ensure that there is no route that we are not aware of that is also handling our IP
+   check_conflicting_routes
+@@ -360,39 +359,38 @@ def route_monitor(ctx):
+   instance_url = '%s/projects/%s/zones/%s/instances/%s' % (
+       GCP_API_URL_PREFIX, ctx.project, ctx.zone, ctx.instance)
+   if routed_to_instance != instance_url:
+-    logging.warn(
++    logger.warning(
+         'The floating IP %s is not routed to this instance (%s) but to instance %s',
+         ctx.ip, ctx.instance, routed_to_instance.split('/')[-1])
+     return OCF_NOT_RUNNING
+ 
+-  logging.debug(
++  logger.debug(
+       'The floating IP %s is correctly routed to this instance (%s)',
+       ctx.ip, ctx.instance)
+   return OCF_SUCCESS
+ 
+ 
+ def ip_and_route_stop(ctx):
+-  logging.info('Bringing down the floating IP %s', ctx.ip)
++  logger.info('Bringing down the floating IP %s', ctx.ip)
+ 
+   # Delete the route entry
+   # If the route entry exists and is pointing to another instance, don't touch it
+   if route_monitor(ctx) == OCF_NOT_RUNNING:
+-    logging.info(
++    logger.info(
+         'The floating IP %s is already not routed to this instance (%s)',
+         ctx.ip, ctx.instance)
+   else:
+     route_release(ctx)
+ 
+   if ip_monitor(ctx) == OCF_NOT_RUNNING:
+-    logging.info('The floating IP %s is already down', ctx.ip)
++    logger.info('The floating IP %s is already down', ctx.ip)
+   else:
+     ip_release(ctx)
+ 
+ 
+ def configure_logs(ctx):
+   # Prepare logging
+-  logging.basicConfig(
+-      format='gcp:route - %(levelname)s - %(message)s', level=logging.INFO)
++  global logger
+   logging.getLogger('googleapiclient').setLevel(logging.WARN)
+   logging_env = os.environ.get('OCF_RESKEY_stackdriver_logging')
+   if logging_env:
+@@ -406,10 +404,10 @@ def configure_logs(ctx):
+         handler.setLevel(logging.INFO)
+         formatter = logging.Formatter('gcp:route "%(message)s"')
+         handler.setFormatter(formatter)
+-        root_logger = logging.getLogger()
+-        root_logger.addHandler(handler)
++        log.addHandler(handler)
++        logger = logging.LoggerAdapter(log, {'OCF_RESOURCE_INSTANCE': OCF_RESOURCE_INSTANCE})
+       except ImportError:
+-        logging.error('Couldn\'t import google.cloud.logging, '
++        logger.error('Couldn\'t import google.cloud.logging, '
+             'disabling Stackdriver-logging support')
+ 
+ 
+@@ -434,7 +432,7 @@ def main():
+   else:
+     usage = 'usage: %s {start|stop|monitor|status|meta-data|validate-all}' % \
+         os.path.basename(sys.argv[0])
+-    logging.error(usage)
++    logger.error(usage)
+     sys.exit(OCF_ERR_UNIMPLEMENTED)
+ 
+ 
+
+From 6ec7e87693a51cbb16a1822e6d15f1dbfc11f8e6 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Mon, 23 Jul 2018 15:55:48 +0200
+Subject: [PATCH 5/5] Python: add logging.basicConfig() to support background
+ logging
+
+---
+ heartbeat/ocf.py | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/heartbeat/ocf.py b/heartbeat/ocf.py
+index 12be7a2a4..36e7ccccd 100644
+--- a/heartbeat/ocf.py
++++ b/heartbeat/ocf.py
+@@ -94,6 +94,7 @@ def emit(self, record):
+ HA_LOGFILE = env.get("HA_LOGFILE")
+ HA_DEBUGLOG = env.get("HA_DEBUGLOG")
+ 
++logging.basicConfig()
+ log = logging.getLogger(os.path.basename(argv[0]))
+ log.setLevel(logging.DEBUG)
+ 
diff --git a/SOURCES/6-gcp-move-vip-filter-aggregatedlist.patch b/SOURCES/6-gcp-move-vip-filter-aggregatedlist.patch
new file mode 100644
index 0000000..69ac757
--- /dev/null
+++ b/SOURCES/6-gcp-move-vip-filter-aggregatedlist.patch
@@ -0,0 +1,25 @@
+From 6bd66e337922403cb2dd3a8715ae401def8c0437 Mon Sep 17 00:00:00 2001
+From: Helen Koike <helen.koike@collabora.com>
+Date: Thu, 19 Jul 2018 13:00:58 -0300
+Subject: [PATCH] gcp-vpc-move-vip.in: filter call to aggregatedList
+
+Don't list all the instances in the project, filter only the one we are
+interested in.
+---
+ heartbeat/gcp-vpc-move-vip.in | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in
+index 9fc87242f..af2080502 100755
+--- a/heartbeat/gcp-vpc-move-vip.in
++++ b/heartbeat/gcp-vpc-move-vip.in
+@@ -169,7 +169,8 @@ def get_localhost_alias():
+ 
+ 
+ def get_zone(project, instance):
+-  request = CONN.instances().aggregatedList(project=project)
++  fl = 'name="%s"' % instance
++  request = CONN.instances().aggregatedList(project=project, filter=fl)
+   while request is not None:
+     response = request.execute()
+     zones = response.get('items', {})
diff --git a/SOURCES/7-gcp-bundled.patch b/SOURCES/7-gcp-bundled.patch
new file mode 100644
index 0000000..b341dac
--- /dev/null
+++ b/SOURCES/7-gcp-bundled.patch
@@ -0,0 +1,23 @@
+diff -uNr a/heartbeat/gcp-vpc-move-ip.in b/heartbeat/gcp-vpc-move-ip.in
+--- a/heartbeat/gcp-vpc-move-ip.in	2019-04-05 09:20:26.164739897 +0200
++++ b/heartbeat/gcp-vpc-move-ip.in	2019-04-05 09:21:01.331139742 +0200
+@@ -36,7 +36,7 @@
+ . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+ 
+ # Defaults
+-OCF_RESKEY_gcloud_default="/usr/bin/gcloud"
++OCF_RESKEY_gcloud_default="/usr/bin/gcloud-ra"
+ OCF_RESKEY_configuration_default="default"
+ OCF_RESKEY_vpc_network_default="default"
+ OCF_RESKEY_interface_default="eth0"
+diff -uNr a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
+--- a/heartbeat/gcp-vpc-move-route.in	2019-04-05 09:20:26.180739624 +0200
++++ b/heartbeat/gcp-vpc-move-route.in	2019-04-05 09:22:28.648649593 +0200
+@@ -45,6 +45,7 @@
+ from ocf import *
+ 
+ try:
++  sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp')
+   import googleapiclient.discovery
+   import pyroute2
+ except ImportError:
diff --git a/SOURCES/7-gcp-stackdriver-logging-note.patch b/SOURCES/7-gcp-stackdriver-logging-note.patch
new file mode 100644
index 0000000..b714513
--- /dev/null
+++ b/SOURCES/7-gcp-stackdriver-logging-note.patch
@@ -0,0 +1,28 @@
+diff -uNr a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
+--- a/heartbeat/gcp-vpc-move-route.in	2018-07-30 16:56:23.486368292 +0200
++++ b/heartbeat/gcp-vpc-move-route.in	2018-07-30 17:11:54.189715666 +0200
+@@ -125,8 +125,8 @@
+ </parameter>
+ 
+ <parameter name="stackdriver_logging" unique="0" required="0">
+-<longdesc lang="en">If enabled (set to true), IP failover logs will be posted to stackdriver logging</longdesc>
+-<shortdesc lang="en">Stackdriver-logging support</shortdesc>
++<longdesc lang="en">If enabled (set to true), IP failover logs will be posted to stackdriver logging. Using stackdriver logging requires additional libraries (google-cloud-logging).</longdesc>
++<shortdesc lang="en">Stackdriver-logging support. Requires additional libraries (google-cloud-logging).</shortdesc>
+ <content type="boolean" default="" />
+ </parameter>
+ </parameters>
+diff -uNr a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in
+--- a/heartbeat/gcp-vpc-move-vip.in	2018-07-30 16:56:23.486368292 +0200
++++ b/heartbeat/gcp-vpc-move-vip.in	2018-07-30 17:06:17.260686483 +0200
+@@ -72,8 +72,8 @@
+       <content type="string" default="" />
+     </parameter>
+     <parameter name="stackdriver_logging" unique="0" required="0">
+-      <longdesc lang="en">If enabled (set to true), IP failover logs will be posted to stackdriver logging</longdesc>
+-      <shortdesc lang="en">Stackdriver-logging support</shortdesc>
++      <longdesc lang="en">If enabled (set to true), IP failover logs will be posted to stackdriver logging. Using stackdriver logging requires additional libraries (google-cloud-logging).</longdesc>
++      <shortdesc lang="en">Stackdriver-logging support. Requires additional libraries (google-cloud-logging).</shortdesc>
+       <content type="boolean" default="" />
+     </parameter>
+   </parameters>
diff --git a/SOURCES/8-google-cloud-sdk-fixes.patch b/SOURCES/8-google-cloud-sdk-fixes.patch
new file mode 100644
index 0000000..d734d82
--- /dev/null
+++ b/SOURCES/8-google-cloud-sdk-fixes.patch
@@ -0,0 +1,12 @@
+diff -uNr a/bundled/gcp/google-cloud-sdk/bin/gcloud b/bundled/gcp/google-cloud-sdk/bin/gcloud
+--- a/bundled/gcp/google-cloud-sdk/bin/gcloud	2018-06-18 14:30:10.000000000 +0200
++++ b/bundled/gcp/google-cloud-sdk/bin/gcloud	2018-06-25 13:12:56.057000620 +0200
+@@ -64,6 +64,8 @@
+ }
+ CLOUDSDK_ROOT_DIR=$(_cloudsdk_root_dir "$0")
+ 
++CLOUDSDK_PYTHON_SITEPACKAGES=1
++
+ # if CLOUDSDK_PYTHON is empty
+ if [ -z "$CLOUDSDK_PYTHON" ]; then
+   # if python2 exists then plain python may point to a version != 2
diff --git a/SOURCES/9-google-cloud-sdk-oauth2client-python-rsa-to-cryptography.patch b/SOURCES/9-google-cloud-sdk-oauth2client-python-rsa-to-cryptography.patch
new file mode 100644
index 0000000..de378c4
--- /dev/null
+++ b/SOURCES/9-google-cloud-sdk-oauth2client-python-rsa-to-cryptography.patch
@@ -0,0 +1,129 @@
+diff -uNr a/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py b/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py
+--- a/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py	1980-01-01 09:00:00.000000000 +0100
++++ b/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py	2019-04-04 11:56:00.292677044 +0200
+@@ -19,8 +19,14 @@
+ certificates.
+ """
+ 
++from pyasn1.codec.der import decoder
+ from pyasn1_modules import pem
+-import rsa
++from pyasn1_modules.rfc2459 import Certificate
++from pyasn1_modules.rfc5208 import PrivateKeyInfo
++from cryptography.hazmat.primitives import serialization, hashes
++from cryptography.hazmat.primitives.asymmetric import padding
++from cryptography import x509
++from cryptography.hazmat.backends import default_backend
+ import six
+ 
+ from oauth2client import _helpers
+@@ -40,7 +46,7 @@
+                  '-----END RSA PRIVATE KEY-----')
+ _PKCS8_MARKER = ('-----BEGIN PRIVATE KEY-----',
+                  '-----END PRIVATE KEY-----')
+-_PKCS8_SPEC = None
++_PKCS8_SPEC = PrivateKeyInfo()
+ 
+ 
+ def _bit_list_to_bytes(bit_list):
+@@ -67,7 +73,8 @@
+     """
+ 
+     def __init__(self, pubkey):
+-        self._pubkey = pubkey
++        self._pubkey = serialization.load_pem_public_key(pubkey,
++            backend=default_backend())
+ 
+     def verify(self, message, signature):
+         """Verifies a message against a signature.
+@@ -84,8 +91,9 @@
+         """
+         message = _helpers._to_bytes(message, encoding='utf-8')
+         try:
+-            return rsa.pkcs1.verify(message, signature, self._pubkey)
+-        except (ValueError, rsa.pkcs1.VerificationError):
++            return self._pubkey.verify(signature, message, padding.PKCS1v15(),
++                hashes.SHA256())
++        except (ValueError, TypeError, InvalidSignature):
+             return False
+ 
+     @classmethod
+@@ -109,19 +117,18 @@
+         """
+         key_pem = _helpers._to_bytes(key_pem)
+         if is_x509_cert:
+-            from pyasn1.codec.der import decoder
+-            from pyasn1_modules import rfc2459
+-
+-            der = rsa.pem.load_pem(key_pem, 'CERTIFICATE')
+-            asn1_cert, remaining = decoder.decode(der, asn1Spec=rfc2459.Certificate())
++            der = x509.load_pem_x509_certificate(pem_data, default_backend())
++            asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
+             if remaining != b'':
+                 raise ValueError('Unused bytes', remaining)
+ 
+             cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo']
+             key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey'])
+-            pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER')
++            pubkey = serialization.load_der_public_key(decoded_key,
++                backend=default_backend())
+         else:
+-            pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM')
++            pubkey = serialization.load_pem_public_key(decoded_key,
++                backend=default_backend())
+         return cls(pubkey)
+ 
+ 
+@@ -134,6 +141,8 @@
+ 
+     def __init__(self, pkey):
+         self._key = pkey
++        self._pubkey = serialization.load_pem_private_key(pkey,
++            backend=default_backend())
+ 
+     def sign(self, message):
+         """Signs a message.
+@@ -145,7 +154,7 @@
+             string, The signature of the message for the given key.
+         """
+         message = _helpers._to_bytes(message, encoding='utf-8')
+-        return rsa.pkcs1.sign(message, self._key, 'SHA-256')
++        return self._key.sign(message, padding.PKCS1v15(), hashes.SHA256())
+ 
+     @classmethod
+     def from_string(cls, key, password='notasecret'):
+@@ -163,27 +172,24 @@
+             ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in
+             PEM format.
+         """
+-        global _PKCS8_SPEC
+         key = _helpers._from_bytes(key)  # pem expects str in Py3
+         marker_id, key_bytes = pem.readPemBlocksFromFile(
+             six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)
+ 
+         if marker_id == 0:
+-            pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes,
+-                                                 format='DER')
+-        elif marker_id == 1:
+-            from pyasn1.codec.der import decoder
+-            from pyasn1_modules import rfc5208
++            pkey = serialization.load_der_private_key(
++                key_bytes, password=None,
++                backend=default_backend())
+ 
+-            if _PKCS8_SPEC is None:
+-              _PKCS8_SPEC = rfc5208.PrivateKeyInfo()
++        elif marker_id == 1:
+             key_info, remaining = decoder.decode(
+                 key_bytes, asn1Spec=_PKCS8_SPEC)
+             if remaining != b'':
+                 raise ValueError('Unused bytes', remaining)
+             pkey_info = key_info.getComponentByName('privateKey')
+-            pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(),
+-                                                 format='DER')
++            pkey = serialization.load_der_private_key(
++                pkey_info.asOctets(), password=None,
++                backend=default_backend())
+         else:
+             raise ValueError('No key could be detected.')
+ 
diff --git a/SOURCES/IPaddr2-monitor_retries.patch b/SOURCES/IPaddr2-monitor_retries.patch
new file mode 100644
index 0000000..6f2629a
--- /dev/null
+++ b/SOURCES/IPaddr2-monitor_retries.patch
@@ -0,0 +1,66 @@
+diff -uNr a/heartbeat/IPaddr2 b/heartbeat/IPaddr2
+--- a/heartbeat/IPaddr2	2018-06-27 10:29:08.000000000 +0200
++++ b/heartbeat/IPaddr2	2018-06-29 16:01:50.538797379 +0200
+@@ -80,6 +80,7 @@
+ OCF_RESKEY_arp_bg_default=true
+ OCF_RESKEY_run_arping_default=false
+ OCF_RESKEY_preferred_lft_default="forever"
++OCF_RESKEY_monitor_retries="1"
+ 
+ : ${OCF_RESKEY_lvs_support=${OCF_RESKEY_lvs_support_default}}
+ : ${OCF_RESKEY_lvs_ipv6_addrlabel=${OCF_RESKEY_lvs_ipv6_addrlabel_default}}
+@@ -92,6 +93,7 @@
+ : ${OCF_RESKEY_arp_bg=${OCF_RESKEY_arp_bg_default}}
+ : ${OCF_RESKEY_run_arping=${OCF_RESKEY_run_arping_default}}
+ : ${OCF_RESKEY_preferred_lft=${OCF_RESKEY_preferred_lft_default}}
++: ${OCF_RESKEY_monitor_retries=${OCF_RESKEY_monitor_retries_default}}
+ #######################################################################
+ 
+ SENDARP=$HA_BIN/send_arp
+@@ -368,6 +370,18 @@
+ <content type="string" default="${OCF_RESKEY_preferred_lft_default}"/>
+ </parameter>
+ 
++<parameter name="monitor_retries">
++<longdesc lang="en">
++Set number of retries to find interface in monitor-action.
++
++ONLY INCREASE IF THE AGENT HAS ISSUES FINDING YOUR NIC DURING THE
++MONITOR-ACTION. A HIGHER SETTING MAY LEAD TO DELAYS IN DETECTING
++A FAILURE.
++</longdesc>
++<shortdesc lang="en">Number of retries to find interface in monitor-action</shortdesc>
++<content type="string" default="${OCF_RESKEY_monitor_retries_default}"/>
++</parameter>
++
+ </parameters>
+ <actions>
+ <action name="start"   timeout="20s" />
+@@ -536,15 +550,26 @@
+ find_interface() {
+ 	local ipaddr="$1"
+ 	local netmask="$2"
++	local iface=""
+ 
+ 	#
+ 	# List interfaces but exclude FreeS/WAN ipsecN virtual interfaces
+ 	#
+-	local iface="`$IP2UTIL -o -f $FAMILY addr show \
++	for i in $(seq 1 $OCF_RESKEY_monitor_retries); do
++	    iface="`$IP2UTIL -o -f $FAMILY addr show \
+ 		| grep "\ $ipaddr/$netmask" \
+ 		| cut -d ' ' -f2 \
+ 		| grep -v '^ipsec[0-9][0-9]*$'`"
+ 
++	    if [ -n "$iface" ]; then
++		    break
++	    fi
++
++	    if [ $OCF_RESKEY_monitor_retries -gt 1 ]; then
++		sleep 1
++	    fi
++	done
++
+ 	echo "$iface"
+ 	return 0
+ }
diff --git a/SOURCES/LVM-activate-1-warn-vg_access_mode.patch b/SOURCES/LVM-activate-1-warn-vg_access_mode.patch
new file mode 100644
index 0000000..3471524
--- /dev/null
+++ b/SOURCES/LVM-activate-1-warn-vg_access_mode.patch
@@ -0,0 +1,42 @@
+From 12ef5a343158bbfaa5233468a0506074fceaac81 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Tue, 21 Aug 2018 12:14:49 +0200
+Subject: [PATCH] LVM-activate: return OCF_ERR_CONFIGURED for incorrect
+ vg_access_mode
+
+---
+ heartbeat/LVM-activate | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
+index fbd058288..55e36a2d2 100755
+--- a/heartbeat/LVM-activate
++++ b/heartbeat/LVM-activate
+@@ -448,7 +448,7 @@ lvm_validate() {
+ 		;;
+ 	*)
+ 		ocf_exit_reason "You specified an invalid value for vg_access_mode: $VG_access_mode"
+-		exit $OCF_ERR_ARGS
++		exit $OCF_ERR_CONFIGURED
+ 		;;
+ 	esac
+ 
+@@ -771,7 +771,6 @@ lvm_stop() {
+ 		return $OCF_SUCCESS
+ 	fi
+ 
+-	lvm_validate
+ 	ocf_log info "Deactivating ${vol}"
+ 
+ 	case ${VG_access_mode} in
+@@ -788,8 +787,8 @@ lvm_stop() {
+ 		tagging_deactivate
+ 		;;
+ 	*)
+-		ocf_exit_reason "VG [${VG}] is not properly configured in cluster. It's unsafe!"
+-		exit $OCF_ERR_CONFIGURED
++		ocf_log err "VG [${VG}] is not properly configured in cluster. It's unsafe!"
++		exit $OCF_SUCCESS
+ 		;;
+ 	esac
+ 
diff --git a/SOURCES/LVM-activate-2-parameters-access-mode-fixes.patch b/SOURCES/LVM-activate-2-parameters-access-mode-fixes.patch
new file mode 100644
index 0000000..ae1fe65
--- /dev/null
+++ b/SOURCES/LVM-activate-2-parameters-access-mode-fixes.patch
@@ -0,0 +1,137 @@
+From 792077bf2994e2e582ccfb0768f3186517de9025 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Fri, 21 Sep 2018 12:00:07 +0200
+Subject: [PATCH] LVM-activate: fixes
+
+- read parameters for start/stop/monitor-actions
+- fail during monitor-action when run with incorrect access_mode
+---
+ heartbeat/LVM-activate | 44 ++++++++++++++++++++++++++----------------
+ 1 file changed, 27 insertions(+), 17 deletions(-)
+
+diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
+index 55e36a2d2..f46932c1c 100755
+--- a/heartbeat/LVM-activate
++++ b/heartbeat/LVM-activate
+@@ -56,6 +56,7 @@ LV=${OCF_RESKEY_lvname}
+ # 3: vg has system_id (new)
+ # 4: vg has tagging (old)
+ VG_access_mode=${OCF_RESKEY_vg_access_mode}
++VG_access_mode_num=0
+ 
+ # Activate LV(s) with "shared" lock for cluster fs
+ # or "exclusive" lock for local fs
+@@ -176,7 +177,9 @@ END
+ # 2: vg is clustered - clvmd (old)
+ # 3: vg has system_id (new)
+ # 4: vg has tagging (old)
+-get_VG_access_mode() {
++
++get_VG_access_mode_num()
++{
+ 	local access_mode
+ 	local vg_locktype
+ 	local vg_clustered
+@@ -415,11 +418,8 @@ tagging_check()
+         return $OCF_SUCCESS
+ }
+ 
+-lvm_validate() {
+-	local lv_count
+-	local mode
+-
+-	# Parameters checking
++read_parameters()
++{
+ 	if [ -z "$VG" ]
+ 	then
+ 		ocf_exit_reason "You must identify the volume group name!"
+@@ -435,22 +435,30 @@ lvm_validate() {
+ 	# Convert VG_access_mode from string to index
+ 	case ${VG_access_mode} in
+ 	lvmlockd)
+-		VG_access_mode=1
++		VG_access_mode_num=1
+ 		;;
+ 	clvmd)
+-		VG_access_mode=2
++		VG_access_mode_num=2
+ 		;;
+ 	system_id)
+-		VG_access_mode=3
++		VG_access_mode_num=3
+ 		;;
+ 	tagging)
+-		VG_access_mode=4
++		VG_access_mode_num=4
+ 		;;
+ 	*)
++		# dont exit with error-code here or nodes will get fenced on
++		# e.g. "pcs resource create"
+ 		ocf_exit_reason "You specified an invalid value for vg_access_mode: $VG_access_mode"
+-		exit $OCF_ERR_CONFIGURED
+ 		;;
+ 	esac
++}
++
++lvm_validate() {
++	local lv_count
++	local mode
++
++	read_parameters
+ 
+ 	check_binary pgrep
+ 	# Every LVM command is just symlink to lvm binary
+@@ -471,9 +479,9 @@ lvm_validate() {
+ 	# Get the access mode from VG metadata and check if it matches the input
+ 	# value. Skip to check "tagging" mode because there's no reliable way to
+ 	# automatically check if "tagging" mode is being used.
+-	get_VG_access_mode
++	get_VG_access_mode_num
+ 	mode=$?
+-	if [ $VG_access_mode -ne 4 ] && [ $mode -ne $VG_access_mode ]; then
++	if [ $VG_access_mode_num -ne 4 ] && [ $mode -ne $VG_access_mode_num ]; then
+ 		ocf_exit_reason "The specified vg_access_mode doesn't match the lock_type on VG metadata!"
+ 		exit $OCF_ERR_ARGS
+ 	fi
+@@ -495,8 +503,8 @@ lvm_validate() {
+ 		fi
+ 	fi
+ 
+-	# VG_access_mode specific checking goes here
+-	case ${VG_access_mode} in
++	# VG_access_mode_num specific checking goes here
++	case ${VG_access_mode_num} in
+ 	1)
+ 		lvmlockd_check
+ 		;;
+@@ -731,7 +739,7 @@ lvm_start() {
+ 	[ -z ${LV} ] && vol=${VG} || vol=${VG}/${LV}
+ 	ocf_log info "Activating ${vol}"
+ 
+-	case ${VG_access_mode} in
++	case ${VG_access_mode_num} in
+ 	1)
+ 		lvmlockd_activate
+ 		;;
+@@ -773,7 +781,7 @@ lvm_stop() {
+ 
+ 	ocf_log info "Deactivating ${vol}"
+ 
+-	case ${VG_access_mode} in
++	case ${VG_access_mode_num} in
+ 	1)
+ 		lvmlockd_deactivate
+ 		;;
+@@ -811,9 +819,11 @@ start)
+ 	lvm_start
+ 	;;
+ stop)
++	read_parameters
+ 	lvm_stop
+ 	;;
+ monitor)
++	lvm_validate
+ 	lvm_status
+ 	;;
+ validate-all)
diff --git a/SOURCES/LVM-activate-fix-issue-with-dashes.patch b/SOURCES/LVM-activate-fix-issue-with-dashes.patch
new file mode 100644
index 0000000..7f3b996
--- /dev/null
+++ b/SOURCES/LVM-activate-fix-issue-with-dashes.patch
@@ -0,0 +1,54 @@
+From 2c219dd000d7f5edd3765a1c6bc5f3e6efb7208b Mon Sep 17 00:00:00 2001
+From: Paul Mezzanini <pfmeec@rit.edu>
+Date: Fri, 1 Jun 2018 11:58:06 -0400
+Subject: [PATCH] Volume groups and logical volumes "-" in their name get
+ mangled with double dashes in dmsetup.  Switching to wc and just counting
+ lines while depending on the vgname + lvname filter in the dmsetup call gets
+ around the issue with dmsetup outputting correctly but grep failing due to
+ the name mangle.
+
+Logic for both test cases and dmsetup calls changed so they match too.  No reason
+to not have matching tests.
+
+This is AllBad but there isn't a better way that I'm aware of yet.
+---
+ heartbeat/LVM-activate | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
+index 60e656178..fbd058288 100755
+--- a/heartbeat/LVM-activate
++++ b/heartbeat/LVM-activate
+@@ -692,20 +692,27 @@ tagging_deactivate() {
+ # lvs/vgs when the metadata is somehow inconsistent.
+ #
+ # So, we have to make compromise that the VG is assumably active if any LV of the VG is active.
++#
++# Paul:
++# VGS + LVS with "-" in their name get mangled with double dashes in dmsetup.
++# Switching to wc and just counting lines while depending on the vgname + lvname filter
++# in dmsetup gets around the issue with dmsetup reporting correctly but grep failing.
++#
++# Logic for both test cases and dmsetup calls changed so they match too.
++#
++# This is AllBad but there isn't a better way that I'm aware of yet.
+ lvm_status() {
+ 	local dm_count
+ 
+ 	if [ -n "${LV}" ]; then
+ 		# dmsetup ls? It cannot accept device name. It's
+ 		# too heavy to list all DM devices.
+-		dmsetup info --noheadings --noflush -c -S "vgname=${VG} && lvname=${LV}" \
+-			| grep -Eq "${VG}-+${LV}"
++		dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG} && lvname=${LV}" |  wc -l )
+ 	else
+-		dm_count=$(dmsetup --noheadings info -c -S "vgname=${VG}" 2>/dev/null | grep -c "${VG}-")
+-		test $dm_count -gt 0
++		dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG}" 2>/dev/null | wc -l )
+ 	fi
+ 
+-	if [ $? -ne 0 ]; then
++	if [ $dm_count -eq 0 ]; then
+ 		return $OCF_NOT_RUNNING
+ 	fi
+ 
diff --git a/SOURCES/LVM-fix-missing-dash.patch b/SOURCES/LVM-fix-missing-dash.patch
new file mode 100644
index 0000000..0e24f5f
--- /dev/null
+++ b/SOURCES/LVM-fix-missing-dash.patch
@@ -0,0 +1,22 @@
+From 5a664525a20d3d5094912322be4faac668e4920e Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Mon, 13 Aug 2018 14:30:50 +0200
+Subject: [PATCH] LVM: fix missing dash
+
+---
+ heartbeat/lvm-tag.sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/heartbeat/lvm-tag.sh b/heartbeat/lvm-tag.sh
+index 71f53b20b..fe17e0f21 100644
+--- a/heartbeat/lvm-tag.sh
++++ b/heartbeat/lvm-tag.sh
+@@ -147,7 +147,7 @@ lvm_init() {
+ 	if [ -n "$OCF_RESKEY_tag" ]; then
+ 		OUR_TAG=$OCF_RESKEY_tag
+ 	fi
+-	vgchange_activate_options="aly --config activation{volume_list=[\"@${OUR_TAG}\"]}"
++	vgchange_activate_options="-aly --config activation{volume_list=[\"@${OUR_TAG}\"]}"
+ 	vgchange_deactivate_options="-aln"
+ }
+ 
diff --git a/SOURCES/LVM-volume_group_check_only.patch b/SOURCES/LVM-volume_group_check_only.patch
new file mode 100644
index 0000000..505c66a
--- /dev/null
+++ b/SOURCES/LVM-volume_group_check_only.patch
@@ -0,0 +1,72 @@
+From c414259728610f95243d9e34289fefd596b0ac8b Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Fri, 22 Jun 2018 15:37:36 +0200
+Subject: [PATCH] LVM: add "volume_group_check_only" parameter to avoid
+ timeouts in some cases
+
+---
+ heartbeat/LVM        | 10 ++++++++++
+ heartbeat/lvm-tag.sh | 24 +++++++++++++-----------
+ 2 files changed, 23 insertions(+), 11 deletions(-)
+
+diff --git a/heartbeat/LVM b/heartbeat/LVM
+index d3cd1a14..10f7186d 100755
+--- a/heartbeat/LVM
++++ b/heartbeat/LVM
+@@ -103,6 +103,16 @@ logical volumes.
+ <content type="string" default="false" />
+ </parameter>
+ 
++<parameter name="volume_group_check_only" unique="0" required="0">
++<longdesc lang="en">
++If set, only the volume group will be checked during monitoring.
++
++WARNING: ONLY USE IF YOU HAVE ISSUES WITH TIMEOUTS!
++</longdesc>
++<shortdesc lang="en">Only check volume group during monitoring</shortdesc>
++<content type="string" default="false" />
++</parameter>
++
+ </parameters>
+ 
+ <actions>
+diff --git a/heartbeat/lvm-tag.sh b/heartbeat/lvm-tag.sh
+index 71f53b20..170426e8 100644
+--- a/heartbeat/lvm-tag.sh
++++ b/heartbeat/lvm-tag.sh
+@@ -160,19 +160,21 @@ lvm_validate_all() {
+ lvm_status() {
+ 	local rc=0
+ 
+-	# If vg is running, make sure the correct tag is present. Otherwise we
+-	# can not guarantee exclusive activation.
+-	if ! check_tags; then
+-		ocf_exit_reason "WARNING: $OCF_RESKEY_volgrpname is active without the cluster tag, \"$OUR_TAG\""
+-		rc=$OCF_ERR_GENERIC
+-	fi
++	if ! ocf_is_true "$OCF_RESKEY_volume_group_check_only"; then
++		# If vg is running, make sure the correct tag is present. Otherwise we
++		# can not guarantee exclusive activation.
++		if ! check_tags; then
++			ocf_exit_reason "WARNING: $OCF_RESKEY_volgrpname is active without the cluster tag, \"$OUR_TAG\""
++			rc=$OCF_ERR_GENERIC
++		fi
+ 
+-	# make sure the environment for tags activation is still valid
+-	if ! verify_tags_environment; then
+-		rc=$OCF_ERR_GENERIC
++		# make sure the environment for tags activation is still valid
++		if ! verify_tags_environment; then
++			rc=$OCF_ERR_GENERIC
++		fi
++		# let the user know if their initrd is older than lvm.conf.
++		check_initrd_warning
+ 	fi
+-	# let the user know if their initrd is older than lvm.conf.
+-	check_initrd_warning
+ 
+ 	return $rc
+ }
+-- 
+2.17.1
+
diff --git a/SOURCES/VirtualDomain-stateless-support.patch b/SOURCES/VirtualDomain-stateless-support.patch
new file mode 100644
index 0000000..9d79622
--- /dev/null
+++ b/SOURCES/VirtualDomain-stateless-support.patch
@@ -0,0 +1,126 @@
+diff -uNr a/heartbeat/VirtualDomain b/heartbeat/VirtualDomain
+--- a/heartbeat/VirtualDomain	2018-06-29 14:05:02.000000000 +0200
++++ b/heartbeat/VirtualDomain	2018-07-03 14:01:25.892705351 +0200
+@@ -26,6 +26,9 @@
+ OCF_RESKEY_CRM_meta_timeout_default=90000
+ OCF_RESKEY_save_config_on_stop_default=false
+ OCF_RESKEY_sync_config_on_stop_default=false
++OCF_RESKEY_backingfile_default=""
++OCF_RESKEY_stateless_default="false"
++OCF_RESKEY_copyindirs_default=""
+ 
+ : ${OCF_RESKEY_migration_downtime=${OCF_RESKEY_migration_downtime_default}}
+ : ${OCF_RESKEY_migration_speed=${OCF_RESKEY_migration_speed_default}}
+@@ -36,6 +39,9 @@
+ : ${OCF_RESKEY_CRM_meta_timeout=${OCF_RESKEY_CRM_meta_timeout_default}}
+ : ${OCF_RESKEY_save_config_on_stop=${OCF_RESKEY_save_config_on_stop_default}}
+ : ${OCF_RESKEY_sync_config_on_stop=${OCF_RESKEY_sync_config_on_stop_default}}
++: ${OCF_RESKEY_backingfile=${OCF_RESKEY_backingfile_default}}
++: ${OCF_RESKEY_stateless=${OCF_RESKEY_stateless_default}}
++: ${OCF_RESKEY_copyindirs=${OCF_RESKEY_copyindirs_default}}
+ 
+ if ocf_is_true ${OCF_RESKEY_sync_config_on_stop}; then
+ 	OCF_RESKEY_save_config_on_stop="true"
+@@ -271,6 +277,35 @@
+ <content type="string" default=""/>
+ </parameter>
+ 
++<parameter name="backingfile" unique="0" required="0">
++<longdesc lang="en">
++When the VM is used in Copy-On-Write mode, this is the backing file to use (with its full path).
++The VMs image will be created based on this backing file.
++This backing file will never be changed during the life of the VM.
++</longdesc>
++<shortdesc lang="en">If the VM is wanted to work with Copy-On-Write mode, this is the backing file to use (with its full path)</shortdesc>
++<content type="string" default="${OCF_RESKEY_backingfile_default}" />
++</parameter>
++
++<parameter name="stateless" unique="0" required="0">
++<longdesc lang="en">
++If set to true and backingfile is defined, the start of the VM will systematically create a new qcow2 based on
++the backing file, therefore the VM will always be stateless.  If set to false, the start of the VM will use the
++COW (&lt;vmname&gt;.qcow2) file if it exists, otherwise the first start will create a new qcow2 based on the backing
++file given as backingfile.
++</longdesc>
++<shortdesc lang="en">If set to true, the (&lt;vmname&gt;.qcow2) file will be re-created at each start, based on the backing file (if defined)</shortdesc>
++<content type="boolean" default="${OCF_RESKEY_stateless_default}" />
++</parameter>
++
++<parameter name="copyindirs" unique="0" required="0">
++<longdesc lang="en">
++List of directories for the virt-copy-in before booting the VM. Used only in stateless mode.
++</longdesc>
++<shortdesc lang="en">List of directories for the virt-copy-in before booting the VM stateless mode.</shortdesc>
++<content type="string" default="${OCF_RESKEY_copyindirs_default}" />
++</parameter>
++
+ <parameter name="shutdown_mode">
+ <longdesc lang="en">
+ virsh shutdown method to use. Please verify that it is supported by your virsh toolsed with 'virsh help shutdown'
+@@ -545,11 +580,49 @@
+ 	# is restored to an 'undefined' state before creating.
+ 	verify_undefined
+ 
+-	virsh $VIRSH_OPTIONS create ${OCF_RESKEY_config}
+-	rc=$?
+-	if [ $rc -ne 0 ]; then
+-		ocf_exit_reason "Failed to start virtual domain ${DOMAIN_NAME}."
+-		return $OCF_ERR_GENERIC
++	if [ -z "${OCF_RESKEY_backingfile}" ]; then
++		virsh $VIRSH_OPTIONS create ${OCF_RESKEY_config}
++		if [ $? -ne 0 ]; then
++			ocf_exit_reason "Failed to start virtual domain ${DOMAIN_NAME}."
++			return $OCF_ERR_GENERIC
++		fi
++	else
++		if ocf_is_true "${OCF_RESKEY_stateless}" || [ ! -s "${OCF_RESKEY_config%%.*}.qcow2" ]; then
++			# Create the Stateless image
++			dirconfig=`dirname ${OCF_RESKEY_config}`
++			qemu-img create -f qcow2 -b ${OCF_RESKEY_backingfile} ${OCF_RESKEY_config%%.*}.qcow2
++			if [ $? -ne 0 ]; then
++				ocf_exit_reason "Failed qemu-img create ${DOMAIN_NAME} with backing file ${OCF_RESKEY_backingfile}."
++				return $OCF_ERR_GENERIC
++			fi
++
++			virsh define ${OCF_RESKEY_config}
++			if [ $? -ne 0 ]; then
++				ocf_exit_reason "Failed to define virtual domain ${DOMAIN_NAME}."
++				return $OCF_ERR_GENERIC
++			fi
++
++			if [ -n "${OCF_RESKEY_copyindirs}" ]; then
++				# Inject copyindirs directories and files
++				virt-copy-in -d ${DOMAIN_NAME}  ${OCF_RESKEY_copyindirs}  /
++				if [ $? -ne 0 ]; then
++					ocf_exit_reason "Failed on virt-copy-in command ${DOMAIN_NAME}."
++					return $OCF_ERR_GENERIC
++				fi
++			fi
++		else
++			virsh define ${OCF_RESKEY_config}
++			if [ $? -ne 0 ]; then
++				ocf_exit_reason "Failed to define virtual domain ${DOMAIN_NAME}."
++				return $OCF_ERR_GENERIC
++			fi
++		fi
++
++		virsh $VIRSH_OPTIONS start ${DOMAIN_NAME}
++		if [ $? -ne 0 ]; then
++			ocf_exit_reason "Failed to start virtual domain ${DOMAIN_NAME}."
++			return $OCF_ERR_GENERIC
++		fi
+ 	fi
+ 
+ 	while ! VirtualDomain_monitor; do
+@@ -926,6 +999,11 @@
+ 		ocf_exit_reason "migration_downtime has to be a decimal value"
+ 		return $OCF_ERR_CONFIGURED
+ 	fi
++
++	if ocf_is_true "${OCF_RESKEY_stateless}" && [ -z "${OCF_RESKEY_backingfile}" ]; then
++		ocf_exit_reason "Stateless functionality can't be achieved without a backing file."
++		return $OCF_ERR_CONFIGURED
++	fi
+ }
+ 
+ VirtualDomain_getconfig() {
diff --git a/SOURCES/aliyun-vpc-move-ip-1.patch b/SOURCES/aliyun-vpc-move-ip-1.patch
new file mode 100644
index 0000000..ab948dc
--- /dev/null
+++ b/SOURCES/aliyun-vpc-move-ip-1.patch
@@ -0,0 +1,275 @@
+From e45d0ca9ccc3d5fbe94372f40bedb7559dc9530a Mon Sep 17 00:00:00 2001
+From: "feng.changf1" <feng.changf1@alibaba-inc.com>
+Date: Tue, 24 Jul 2018 15:08:45 +0800
+Subject: [PATCH] Add Aliyun vpc-move-ip agent.
+
+---
+ heartbeat/aliyun-vpc-move-ip | 258 +++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 258 insertions(+)
+ create mode 100644 heartbeat/aliyun-vpc-move-ip
+
+diff --git a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip
+new file mode 100644
+index 000000000..bc97822a8
+--- /dev/null
++++ b/heartbeat/aliyun-vpc-move-ip
+@@ -0,0 +1,258 @@
++#!/bin/bash
++#
++# OCF resource agent to move an IP address within a VPC in the Aliyun
++# Based on code of Markus Guertler (GitHub AWS-VPC-move-IP)
++# Based on code of Adam Gandelman (GitHub ec2-resource-agents/elasticip)
++#
++
++###############################################################################
++# For testing purposes delete OCF_ROOT after testing
++OCF_ROOT=/usr/lib/ocf/
++#
++# INIT
++#: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/resource.d/heartbeat}
++#if [ -f ${OCF_FUNCTIONS_DIR}/.ocf-shellfuncs ]; then
++#  . ${OCF_FUNCTIONS_DIR}/.ocf-shellfuncs
++#fi
++
++#######################################################################
++# Initialization:
++
++: ${OCF_FUNCTIONS=${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs}
++. ${OCF_FUNCTIONS}
++: ${__OCF_ACTION=$1}
++export HOME=/root
++#######################################################################
++
++ 
++USAGE="usage: $0 {start|stop|status|meta-data}";
++###############################################################################
++
++
++###############################################################################
++#
++# Functions
++#
++###############################################################################
++
++
++metadata() {
++cat <<END
++<?xml version="1.0"?>
++<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
++<resource-agent name="vpc-move-ip">
++<version>2.0</version>
++<longdesc lang="en">
++Resource Agent to move IP addresses within a VPC of the Aliyun Webservices ECS
++by changing an entry in an specific routing table
++</longdesc>
++<shortdesc lang="en">Move IP within a APC of the Aliyun ECS</shortdesc>
++<parameters>
++<parameter name="address" required="1">
++<longdesc lang="en">
++VPC private IP address
++</longdesc>
++<shortdesc lang="en">vpc ip</shortdesc>
++<content type="string" default="" />
++</parameter>
++<parameter name="routing_table" required="1">
++<longdesc lang="en">
++Name of the routing table, where the route for the IP address should be changed, i.e. rtb-...
++</longdesc>
++<shortdesc lang="en">routing table name</shortdesc>
++<content type="string" default="" />
++</parameter>
++<parameter name="interface" required="1">
++<longdesc lang="en">
++Name of the network interfacen, i.e. eth0
++</longdesc>
++<shortdesc lang="en">network interface name</shortdesc>
++<content type="string" default="eth0" />
++</parameter>
++<parameter name="profile" required="0">
++<longdesc lang="en">
++Valid Aliyun CLI profile name
++</longdesc>
++<shortdesc lang="en">profile name</shortdesc>
++<content type="string" default="default" /> 
++</parameter> 
++</parameters>
++<actions>
++<action name="start" timeout="180" />
++<action name="stop" timeout="180" />
++<action name="monitor" depth="0" timeout="30" interval="30" />
++<action name="validate-all" timeout="5" />
++<action name="meta-data" timeout="5" />
++</actions>
++</resource-agent>
++END
++}
++
++debugger() {
++	ocf_log info "DEBUG: $1"
++}
++
++ecs_ip_validate() {
++	debugger "function: validate"
++	
++	# IP address
++	[[ -z "$OCF_RESKEY_address" ]] && ocf_log error "IP address parameter not set $OCF_RESKEY_ADDRESS!" && exit $OCF_ERR_CONFIGURED
++	
++	# Network Interface
++	[[ -z "$OCF_RESKEY_interface" ]] && ocf_log error "Network interface parameter not set $OCF_RESKEY_INTERFACE!" && exit $OCF_ERR_CONFIGURED
++	
++	# Routing Table
++	[[ -z "$OCF_RESKEY_routing_table" ]] && ocf_log error "Routing table parameter not set $OCF_RESKEY_ROUTING_TABLE!" && exit $OCF_ERR_CONFIGURED
++	
++	ECS_INSTANCE_ID="$(curl -s http://100.100.100.200/latest/meta-data/instance-id)"
++
++	if [ -z "${ECS_INSTANCE_ID}" ]; then
++		ocf_exit_reason "Instance ID not found. Is this a ECS instance?"
++		return $OCF_ERR_GENERIC
++	fi
++	
++	return $OCF_SUCCESS
++}
++
++ecs_ip_monitor() {
++	ecs_ip_validate
++	debugger "function: ecsip_monitor: check routing table"
++	cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
++	debugger "executing command: $cmd"
++	ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')"
++	if [ -z "$ROUTE_TO_INSTANCE" ]; then 
++		ROUTE_TO_INSTANCE="<unknown>"
++	fi
++	
++	[[ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]] && debugger "not routed to this instance ($ECS_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE" && return $OCF_NOT_RUNNING 
++	cmd="ping -W 1 -c 1 $OCF_RESKEY_address"
++	debugger "executing command: $cmd"
++	$cmd > /dev/null
++	[[ $? -gt 0 ]]  && debugger "IP $OCF_RESKEY_address not locally reachable via ping on this system" && return $OCF_NOT_RUNNING
++	debugger "routed in VPC and locally reachable"
++	return $OCF_SUCCESS	
++}
++
++
++ecs_ip_drop() {
++	debugger "function: ecsip_drop"
++	cmd="ip addr delete ${OCF_RESKEY_address}/32 dev $OCF_RESKEY_interface"
++	debugger "executing command: $cmd"
++	$cmd
++	rc=$?
++	[[ $rc -gt 2 ]] && debugger "command failed, rc $rc" && return $OCF_ERR_GENERIC
++	debugger "command succeeded"
++	return $OCF_SUCCESS
++}
++
++wait_for_deleted() {
++  while [ ! -z "$ROUTE_TO_INSTANCE" ]; do
++		sleep 1
++		cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
++		debugger "executing command: $cmd"
++		ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')"
++  done
++	sleep 5
++}
++
++wait_for_started() {
++	cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
++	debugger "executing command: $cmd"
++	ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')"
++		
++  while [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; do
++		sleep 1
++		cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
++		debugger "executing command: $cmd"
++		ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')"
++  done
++	sleep 5
++}
++
++ecs_ip_get_and_configure() {
++	debugger "function: ecsip_get_and_configure"
++  
++ if [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then 
++ 
++     if [ $ROUTE_TO_INSTANCE != "<unknown>" ]; then
++      # Adjusting the routing table
++        cmd="aliyuncli  vpc DeleteRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ROUTE_TO_INSTANCE --output text"
++        debugger "executing command: $cmd"
++        $cmd
++        rc=$?
++        [[ $rc != 0 ]] && debugger "command failed, rc: $rc" && return $OCF_ERR_GENERIC
++        #wait_for_deleted
++        sleep 3
++      fi
++      
++      cmd="aliyuncli  vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text"
++      debugger "executing command: $cmd"
++      $cmd
++      rc=$?
++      #[[ $rc != 0 ]] && debugger "command failed, rc: $rc" && return $OCF_ERR_GENERIC
++		  while [ $rc != 0 ]; do
++				sleep 2
++				cmd="aliyuncli  vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text"
++				debugger "executing command: $cmd"
++				$cmd
++				rc=$?
++			done
++      wait_for_started
++	fi
++  
++  
++	# Reconfigure the local ip address
++	ecs_ip_drop
++	ip addr add "${OCF_RESKEY_address}/32" dev $OCF_RESKEY_interface
++	rc=$?
++	[[ $rc != 0 ]] && debugger "command failed, rc: $rc" && return $OCF_ERR_GENERIC
++	debugger "-success"
++	return $OCF_SUCCESS
++}
++
++ecs_ip_stop() {
++	ocf_log info "ECS: Bringing down IP address $OCF_RESKEY_address"
++	ecs_ip_validate 
++	ecs_ip_monitor
++	[[ $? == $OCF_NOT_RUNNING ]] && ocf_log info "ECS: Address $OCF_RESKEY_address already down" && return $OCF_SUCCESS
++	ecs_ip_drop
++	[[ $? != $OCF_SUCCESS ]] && return $OCF_ERR_GENERIC
++	ecs_ip_monitor
++	[[ $? == $OCF_NOT_RUNNING ]] && ocf_log info "ECS: Successfully brought down $OCF_RESKEY_address" && return $OCF_SUCCESS
++	ocf_log error "ECS: Couldn't bring down IP address $OCF_RESKEY_address on interface $OCF_RESKEY_interface." 
++	return $OCF_ERR_GENERIC
++}
++
++ecs_ip_start() {
++	ocf_log info "ECS: Moving IP address $OCF_RESKEY_address to this host by adjusting routing table $OCF_RESKEY_routing_table"
++	ecs_ip_validate
++	ecs_ip_monitor
++	[[ $? == $OCF_SUCCESS ]] && ocf_log info "ECS: $OCF_RESKEY_address already started" && return $OCF_SUCCESS
++	ocf_log info "ECS: Adjusting routing table and locally configuring IP address"
++	ecs_ip_get_and_configure 
++	[[ $? != 0 ]] && ocf_log error "Received $? from 'aliyun cli'" && return $OCF_ERR_GENERIC
++  return $OCF_SUCCESS
++	ecs_ip_monitor
++	[[ $? == $OCF_SUCCESS ]] &&  return $?
++	ocf_log error "ECS: IP address couldn't be configured on this host (IP: $OCF_RESKEY_address, Interface: $OCF_RESKEY_interface)"
++	return $OCF_ERR_GENERIC
++}
++
++###############################################################################
++#
++# MAIN
++#
++###############################################################################
++
++case $__OCF_ACTION in 
++	meta-data) metadata
++		   exit $OCF_SUCCESS;;
++	monitor)
++		ecs_ip_monitor;;
++	stop)
++		ecs_ip_stop;;
++	validate-all) ecs_ip_validate;;
++	start)
++		ecs_ip_start;;
++	*)	exit $OCF_ERR_UNIMPLEMENTED;;
++esac
+\ No newline at end of file
diff --git a/SOURCES/aliyun-vpc-move-ip-2-fixes.patch b/SOURCES/aliyun-vpc-move-ip-2-fixes.patch
new file mode 100644
index 0000000..7c5db4c
--- /dev/null
+++ b/SOURCES/aliyun-vpc-move-ip-2-fixes.patch
@@ -0,0 +1,451 @@
+From db3df55a6f7097e1da7d77eb361e9e7560f13353 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Tue, 24 Jul 2018 13:57:08 +0200
+Subject: [PATCH] aliyun-vpc-move-ip: fixes
+
+---
+ doc/man/Makefile.am          |   1 +
+ heartbeat/Makefile.am        |   1 +
+ heartbeat/aliyun-vpc-move-ip | 336 ++++++++++++++++++++++++-------------------
+ 3 files changed, 189 insertions(+), 149 deletions(-)
+ mode change 100644 => 100755 heartbeat/aliyun-vpc-move-ip
+
+diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
+index 3ac0569de..fc9a67161 100644
+--- a/doc/man/Makefile.am
++++ b/doc/man/Makefile.am
+@@ -93,6 +93,7 @@ man_MANS	       = ocf_heartbeat_AoEtarget.7 \
+                           ocf_heartbeat_WinPopup.7 \
+                           ocf_heartbeat_Xen.7 \
+                           ocf_heartbeat_Xinetd.7 \
++                          ocf_heartbeat_aliyun-vpc-move-ip.7 \
+                           ocf_heartbeat_anything.7 \
+                           ocf_heartbeat_apache.7 \
+                           ocf_heartbeat_asterisk.7 \
+diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am
+index d4750bf09..6adc6bc3c 100644
+--- a/heartbeat/Makefile.am
++++ b/heartbeat/Makefile.am
+@@ -90,6 +90,7 @@ ocf_SCRIPTS	     =  AoEtarget		\
+ 			Xen			\
+ 			Xinetd			\
+ 			ZFS			\
++			aliyun-vpc-move-ip	\
+ 			anything		\
+ 			apache			\
+ 			asterisk		\
+diff --git a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip
+old mode 100644
+new mode 100755
+index bc97822a8..108feb247
+--- a/heartbeat/aliyun-vpc-move-ip
++++ b/heartbeat/aliyun-vpc-move-ip
+@@ -1,30 +1,19 @@
+-#!/bin/bash
++#!/bin/sh
+ #
+ # OCF resource agent to move an IP address within a VPC in the Aliyun
+ # Based on code of Markus Guertler (GitHub AWS-VPC-move-IP)
+ # Based on code of Adam Gandelman (GitHub ec2-resource-agents/elasticip)
+ #
+ 
+-###############################################################################
+-# For testing purposes delete OCF_ROOT after testing
+-OCF_ROOT=/usr/lib/ocf/
+-#
+-# INIT
+-#: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/resource.d/heartbeat}
+-#if [ -f ${OCF_FUNCTIONS_DIR}/.ocf-shellfuncs ]; then
+-#  . ${OCF_FUNCTIONS_DIR}/.ocf-shellfuncs
+-#fi
+-
+ #######################################################################
+ # Initialization:
+-
+-: ${OCF_FUNCTIONS=${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs}
+-. ${OCF_FUNCTIONS}
+-: ${__OCF_ACTION=$1}
+-export HOME=/root
++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+ #######################################################################
+ 
+- 
++# aliyuncli doesnt work without HOME parameter
++export HOME="/root"
++
+ USAGE="usage: $0 {start|stop|status|meta-data}";
+ ###############################################################################
+ 
+@@ -36,8 +25,96 @@ USAGE="usage: $0 {start|stop|status|meta-data}";
+ ###############################################################################
+ 
+ 
+-metadata() {
+-cat <<END
++
++ip_get_and_configure() {
++	ocf_log debug "function: ip_get_and_configure"
++
++	ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')"
++
++	if [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then
++		if [ -n "$ROUTE_TO_INSTANCE" ]; then
++			ip_drop
++		fi
++
++		cmd="aliyuncli vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text"
++		ocf_log debug "executing command: $cmd"
++		$cmd
++		rc=$?
++		while [ $rc -ne 0 ]; do
++			sleep 1
++			cmd="aliyuncli vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text"
++			ocf_log debug "executing command: $cmd"
++			$cmd
++			rc=$?
++		done
++		wait_for_started
++	fi
++
++
++	# Reconfigure the local ip address
++	ip addr add "${OCF_RESKEY_address}/32" dev $OCF_RESKEY_interface
++	rc=$?
++	if [ $rc -ne 0 ]; then
++		ocf_log err "command failed, rc: $rc"
++		return $OCF_ERR_GENERIC
++	fi
++
++	ocf_log debug "IP added"
++
++	return $OCF_SUCCESS
++}
++
++ip_drop() {
++	ocf_log debug "function: ip_drop"
++	cmd="ip addr delete ${OCF_RESKEY_address}/32 dev $OCF_RESKEY_interface"
++	ocf_log debug "executing command: $cmd"
++	$cmd
++	rc=$?
++	if [ $rc -ne 0 ] && [ $rc -ne 2 ]; then
++		ocf_log err "command failed, rc $rc"
++		return $OCF_ERR_GENERIC
++	fi
++
++	cmd="aliyuncli vpc DeleteRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ROUTE_TO_INSTANCE --output text"
++	ocf_log debug "executing command: $cmd"
++	$cmd
++	if [ $? -ne 0 ]; then
++		ocf_log err "command failed, rc: $rc"
++		return $OCF_ERR_GENERIC
++	fi
++	wait_for_deleted
++
++	ocf_log debug "IP dropped"
++
++	return $OCF_SUCCESS
++}
++
++wait_for_started() {
++	cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
++	ocf_log debug "executing command: $cmd"
++	ROUTE_TO_INSTANCE="$($cmd | grep $OCF_RESKEY_address | awk '{ print $3 }')"
++
++	while [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; do
++		sleep 3
++		cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
++		ocf_log debug "executing command: $cmd"
++		ROUTE_TO_INSTANCE="$($cmd | grep $OCF_RESKEY_address | awk '{ print $3 }')"
++	done
++}
++
++wait_for_deleted() {
++	ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')"
++
++	 while [ ! -z "$ROUTE_TO_INSTANCE" ]; do
++		sleep 1
++		cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
++		ocf_log debug "executing command: $cmd"
++		ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')"
++	 done
++}
++
++ecs_ip_metadata() {
++	cat <<END
+ <?xml version="1.0"?>
+ <!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+ <resource-agent name="vpc-move-ip">
+@@ -74,8 +151,8 @@ Name of the network interfacen, i.e. eth0
+ Valid Aliyun CLI profile name
+ </longdesc>
+ <shortdesc lang="en">profile name</shortdesc>
+-<content type="string" default="default" /> 
+-</parameter> 
++<content type="string" default="default" />
++</parameter>
+ </parameters>
+ <actions>
+ <action name="start" timeout="180" />
+@@ -88,171 +165,132 @@ Valid Aliyun CLI profile name
+ END
+ }
+ 
+-debugger() {
+-	ocf_log info "DEBUG: $1"
+-}
+-
+ ecs_ip_validate() {
+-	debugger "function: validate"
+-	
++	ocf_log debug "function: validate"
++
+ 	# IP address
+-	[[ -z "$OCF_RESKEY_address" ]] && ocf_log error "IP address parameter not set $OCF_RESKEY_ADDRESS!" && exit $OCF_ERR_CONFIGURED
+-	
++	if [ -z "$OCF_RESKEY_address" ]; then
++		ocf_log err "IP address parameter not set $OCF_RESKEY_ADDRESS!"
++		exit $OCF_ERR_CONFIGURED
++	fi
++
+ 	# Network Interface
+-	[[ -z "$OCF_RESKEY_interface" ]] && ocf_log error "Network interface parameter not set $OCF_RESKEY_INTERFACE!" && exit $OCF_ERR_CONFIGURED
+-	
++	if [ -z "$OCF_RESKEY_interface" ]; then
++		ocf_log err "Network interface parameter not set $OCF_RESKEY_INTERFACE!"
++		exit $OCF_ERR_CONFIGURED
++	fi
++
+ 	# Routing Table
+-	[[ -z "$OCF_RESKEY_routing_table" ]] && ocf_log error "Routing table parameter not set $OCF_RESKEY_ROUTING_TABLE!" && exit $OCF_ERR_CONFIGURED
+-	
+-	ECS_INSTANCE_ID="$(curl -s http://100.100.100.200/latest/meta-data/instance-id)"
++	if [ -z "$OCF_RESKEY_routing_table" ]; then
++		ocf_log err "Routing table parameter not set $OCF_RESKEY_ROUTING_TABLE!"
++		exit $OCF_ERR_CONFIGURED
++	fi
+ 
+ 	if [ -z "${ECS_INSTANCE_ID}" ]; then
+ 		ocf_exit_reason "Instance ID not found. Is this a ECS instance?"
+ 		return $OCF_ERR_GENERIC
+ 	fi
+-	
+-	return $OCF_SUCCESS
+-}
+ 
+-ecs_ip_monitor() {
+-	ecs_ip_validate
+-	debugger "function: ecsip_monitor: check routing table"
+-	cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
+-	debugger "executing command: $cmd"
+-	ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')"
+-	if [ -z "$ROUTE_TO_INSTANCE" ]; then 
+-		ROUTE_TO_INSTANCE="<unknown>"
+-	fi
+-	
+-	[[ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]] && debugger "not routed to this instance ($ECS_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE" && return $OCF_NOT_RUNNING 
+-	cmd="ping -W 1 -c 1 $OCF_RESKEY_address"
+-	debugger "executing command: $cmd"
+-	$cmd > /dev/null
+-	[[ $? -gt 0 ]]  && debugger "IP $OCF_RESKEY_address not locally reachable via ping on this system" && return $OCF_NOT_RUNNING
+-	debugger "routed in VPC and locally reachable"
+-	return $OCF_SUCCESS	
+-}
+-
+-
+-ecs_ip_drop() {
+-	debugger "function: ecsip_drop"
+-	cmd="ip addr delete ${OCF_RESKEY_address}/32 dev $OCF_RESKEY_interface"
+-	debugger "executing command: $cmd"
+-	$cmd
+-	rc=$?
+-	[[ $rc -gt 2 ]] && debugger "command failed, rc $rc" && return $OCF_ERR_GENERIC
+-	debugger "command succeeded"
+ 	return $OCF_SUCCESS
+ }
+ 
+-wait_for_deleted() {
+-  while [ ! -z "$ROUTE_TO_INSTANCE" ]; do
+-		sleep 1
+-		cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
+-		debugger "executing command: $cmd"
+-		ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')"
+-  done
+-	sleep 5
+-}
++ecs_ip_start() {
++	ocf_log info "ECS: Moving IP address $OCF_RESKEY_address to this host by adjusting routing table $OCF_RESKEY_routing_table"
+ 
+-wait_for_started() {
+-	cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
+-	debugger "executing command: $cmd"
+-	ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')"
+-		
+-  while [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; do
+-		sleep 1
+-		cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
+-		debugger "executing command: $cmd"
+-		ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')"
+-  done
+-	sleep 5
+-}
++	ecs_ip_monitor
++	if [ $? = $OCF_SUCCESS ]; then
++		ocf_log info "ECS: $OCF_RESKEY_address already started"
++		return $OCF_SUCCESS
++	fi
+ 
+-ecs_ip_get_and_configure() {
+-	debugger "function: ecsip_get_and_configure"
+-  
+- if [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then 
+- 
+-     if [ $ROUTE_TO_INSTANCE != "<unknown>" ]; then
+-      # Adjusting the routing table
+-        cmd="aliyuncli  vpc DeleteRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ROUTE_TO_INSTANCE --output text"
+-        debugger "executing command: $cmd"
+-        $cmd
+-        rc=$?
+-        [[ $rc != 0 ]] && debugger "command failed, rc: $rc" && return $OCF_ERR_GENERIC
+-        #wait_for_deleted
+-        sleep 3
+-      fi
+-      
+-      cmd="aliyuncli  vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text"
+-      debugger "executing command: $cmd"
+-      $cmd
+-      rc=$?
+-      #[[ $rc != 0 ]] && debugger "command failed, rc: $rc" && return $OCF_ERR_GENERIC
+-		  while [ $rc != 0 ]; do
+-				sleep 2
+-				cmd="aliyuncli  vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text"
+-				debugger "executing command: $cmd"
+-				$cmd
+-				rc=$?
+-			done
+-      wait_for_started
++	ocf_log info "ECS: Adjusting routing table and locally configuring IP address"
++	ip_get_and_configure
++	rc=$?
++	if [ $rc -ne 0 ]; then
++		ocf_log err "Received $rc from 'aliyun cli'"
++		return $OCF_ERR_GENERIC
+ 	fi
+-  
+-  
+-	# Reconfigure the local ip address
+-	ecs_ip_drop
+-	ip addr add "${OCF_RESKEY_address}/32" dev $OCF_RESKEY_interface
++
++	ecs_ip_monitor
+ 	rc=$?
+-	[[ $rc != 0 ]] && debugger "command failed, rc: $rc" && return $OCF_ERR_GENERIC
+-	debugger "-success"
++	if [ $rc -ne $OCF_SUCCESS ]; then
++		ocf_log err "IP address couldn't be configured on this host (IP: $OCF_RESKEY_address, Interface: $OCF_RESKEY_interface)"
++		return $rc
++	fi
++
+ 	return $OCF_SUCCESS
+ }
+ 
+ ecs_ip_stop() {
+ 	ocf_log info "ECS: Bringing down IP address $OCF_RESKEY_address"
+-	ecs_ip_validate 
++
+ 	ecs_ip_monitor
+-	[[ $? == $OCF_NOT_RUNNING ]] && ocf_log info "ECS: Address $OCF_RESKEY_address already down" && return $OCF_SUCCESS
+-	ecs_ip_drop
+-	[[ $? != $OCF_SUCCESS ]] && return $OCF_ERR_GENERIC
++	if [ $? = $OCF_NOT_RUNNING ]; then
++		ocf_log info "ECS: Address $OCF_RESKEY_address already down"
++		return $OCF_SUCCESS
++	fi
++
++	ip_drop
++	if [ $? -ne $OCF_SUCCESS ]; then
++		ocf_log err "ECS: Couldn't drop IP address $OCF_RESKEY_address on interface $OCF_RESKEY_interface."
++		return $OCF_ERR_GENERIC
++	fi
++
+ 	ecs_ip_monitor
+-	[[ $? == $OCF_NOT_RUNNING ]] && ocf_log info "ECS: Successfully brought down $OCF_RESKEY_address" && return $OCF_SUCCESS
+-	ocf_log error "ECS: Couldn't bring down IP address $OCF_RESKEY_address on interface $OCF_RESKEY_interface." 
++	if [ $? = $OCF_NOT_RUNNING ]; then
++		ocf_log info "ECS: Successfully brought down $OCF_RESKEY_address"
++		return $OCF_SUCCESS
++	fi
++
++	ocf_log err "ECS: Couldn't bring down IP address $OCF_RESKEY_address on interface $OCF_RESKEY_interface."
+ 	return $OCF_ERR_GENERIC
+ }
+ 
+-ecs_ip_start() {
+-	ocf_log info "ECS: Moving IP address $OCF_RESKEY_address to this host by adjusting routing table $OCF_RESKEY_routing_table"
+-	ecs_ip_validate
+-	ecs_ip_monitor
+-	[[ $? == $OCF_SUCCESS ]] && ocf_log info "ECS: $OCF_RESKEY_address already started" && return $OCF_SUCCESS
+-	ocf_log info "ECS: Adjusting routing table and locally configuring IP address"
+-	ecs_ip_get_and_configure 
+-	[[ $? != 0 ]] && ocf_log error "Received $? from 'aliyun cli'" && return $OCF_ERR_GENERIC
+-  return $OCF_SUCCESS
+-	ecs_ip_monitor
+-	[[ $? == $OCF_SUCCESS ]] &&  return $?
+-	ocf_log error "ECS: IP address couldn't be configured on this host (IP: $OCF_RESKEY_address, Interface: $OCF_RESKEY_interface)"
+-	return $OCF_ERR_GENERIC
++ecs_ip_monitor() {
++	ocf_log debug "function: ecsip_monitor: check routing table"
++	cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
++	ocf_log debug "executing command: $cmd"
++
++	ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')"
++
++	if [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then
++		ocf_log debug "not routed to this instance ($ECS_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE"
++		return $OCF_NOT_RUNNING
++	fi
++
++	cmd="ping -W 1 -c 1 $OCF_RESKEY_address"
++	ocf_log debug "executing command: $cmd"
++	$cmd > /dev/null
++	if [ $? -ne 0 ]; then
++		ocf_log debug "IP $OCF_RESKEY_address not locally reachable via ping on this system"
++		return $OCF_NOT_RUNNING
++	fi
++	ocf_log debug "routed in VPC and locally reachable"
++	return $OCF_SUCCESS
+ }
+ 
++
+ ###############################################################################
+ #
+ # MAIN
+ #
+ ###############################################################################
+ 
+-case $__OCF_ACTION in 
+-	meta-data) metadata
++case $__OCF_ACTION in
++	meta-data) ecs_ip_metadata
+ 		   exit $OCF_SUCCESS;;
+-	monitor)
+-		ecs_ip_monitor;;
+-	stop)
+-		ecs_ip_stop;;
+ 	validate-all) ecs_ip_validate;;
++esac
++
++ECS_INSTANCE_ID="$(curl -s http://100.100.100.200/latest/meta-data/instance-id)"
++
++case $__OCF_ACTION in
+ 	start)
++		ecs_ip_validate
+ 		ecs_ip_start;;
++	stop)
++		ecs_ip_stop;;
++	monitor)
++		ecs_ip_monitor;;
+ 	*)	exit $OCF_ERR_UNIMPLEMENTED;;
+-esac
+\ No newline at end of file
++esac
diff --git a/SOURCES/aliyun-vpc-move-ip-3-fix-manpage.patch b/SOURCES/aliyun-vpc-move-ip-3-fix-manpage.patch
new file mode 100644
index 0000000..619b721
--- /dev/null
+++ b/SOURCES/aliyun-vpc-move-ip-3-fix-manpage.patch
@@ -0,0 +1,22 @@
+From ee081df601f914079f111eec10cb81ab212130a9 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Wed, 25 Jul 2018 11:22:39 +0200
+Subject: [PATCH] aliyun-vpc-move-ip: fix manpage
+
+---
+ heartbeat/aliyun-vpc-move-ip | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip
+index 108feb247..e27952adb 100755
+--- a/heartbeat/aliyun-vpc-move-ip
++++ b/heartbeat/aliyun-vpc-move-ip
+@@ -117,7 +117,7 @@ ecs_ip_metadata() {
+ 	cat <<END
+ <?xml version="1.0"?>
+ <!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+-<resource-agent name="vpc-move-ip">
++<resource-agent name="aliyun-vpc-move-ip">
+ <version>2.0</version>
+ <longdesc lang="en">
+ Resource Agent to move IP addresses within a VPC of the Aliyun Webservices ECS
diff --git a/SOURCES/aliyun-vpc-move-ip-4-bundled.patch b/SOURCES/aliyun-vpc-move-ip-4-bundled.patch
new file mode 100644
index 0000000..b52e7c8
--- /dev/null
+++ b/SOURCES/aliyun-vpc-move-ip-4-bundled.patch
@@ -0,0 +1,62 @@
+diff -uNr a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip
+--- a/heartbeat/aliyun-vpc-move-ip	2018-08-03 15:21:34.869664678 +0200
++++ b/heartbeat/aliyun-vpc-move-ip	2018-08-03 15:22:48.632185198 +0200
+@@ -36,13 +36,13 @@
+ 			ip_drop
+ 		fi
+ 
+-		cmd="aliyuncli vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text"
++		cmd="aliyuncli-ra vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text"
+ 		ocf_log debug "executing command: $cmd"
+ 		$cmd
+ 		rc=$?
+ 		while [ $rc -ne 0 ]; do
+ 			sleep 1
+-			cmd="aliyuncli vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text"
++			cmd="aliyuncli-ra vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text"
+ 			ocf_log debug "executing command: $cmd"
+ 			$cmd
+ 			rc=$?
+@@ -75,7 +75,7 @@
+ 		return $OCF_ERR_GENERIC
+ 	fi
+ 
+-	cmd="aliyuncli vpc DeleteRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ROUTE_TO_INSTANCE --output text"
++	cmd="aliyuncli-ra vpc DeleteRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ROUTE_TO_INSTANCE --output text"
+ 	ocf_log debug "executing command: $cmd"
+ 	$cmd
+ 	if [ $? -ne 0 ]; then
+@@ -90,13 +90,13 @@
+ }
+ 
+ wait_for_started() {
+-	cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
++	cmd="aliyuncli-ra vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
+ 	ocf_log debug "executing command: $cmd"
+ 	ROUTE_TO_INSTANCE="$($cmd | grep $OCF_RESKEY_address | awk '{ print $3 }')"
+ 
+ 	while [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; do
+ 		sleep 3
+-		cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
++		cmd="aliyuncli-ra vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
+ 		ocf_log debug "executing command: $cmd"
+ 		ROUTE_TO_INSTANCE="$($cmd | grep $OCF_RESKEY_address | awk '{ print $3 }')"
+ 	done
+@@ -107,7 +107,7 @@
+ 
+ 	 while [ ! -z "$ROUTE_TO_INSTANCE" ]; do
+ 		sleep 1
+-		cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
++		cmd="aliyuncli-ra vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
+ 		ocf_log debug "executing command: $cmd"
+ 		ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')"
+ 	 done
+@@ -248,7 +248,7 @@
+ 
+ ecs_ip_monitor() {
+ 	ocf_log debug "function: ecsip_monitor: check routing table"
+-	cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
++	cmd="aliyuncli-ra vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text"
+ 	ocf_log debug "executing command: $cmd"
+ 
+ 	ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')"
diff --git a/SOURCES/aliyun-vpc-move-ip-5-improve-metadata-manpage.patch b/SOURCES/aliyun-vpc-move-ip-5-improve-metadata-manpage.patch
new file mode 100644
index 0000000..872158c
--- /dev/null
+++ b/SOURCES/aliyun-vpc-move-ip-5-improve-metadata-manpage.patch
@@ -0,0 +1,49 @@
+From fc497e888afcb88babbc21a59883556335c070fa Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Fri, 31 Aug 2018 11:41:32 +0200
+Subject: [PATCH] aliyun-vpc-move-ip: improve metadata and manpage
+
+---
+ heartbeat/aliyun-vpc-move-ip | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip
+index e27952adb..c004d26fc 100755
+--- a/heartbeat/aliyun-vpc-move-ip
++++ b/heartbeat/aliyun-vpc-move-ip
+@@ -123,7 +123,7 @@ ecs_ip_metadata() {
+ Resource Agent to move IP addresses within a VPC of the Aliyun Webservices ECS
+ by changing an entry in an specific routing table
+ </longdesc>
+-<shortdesc lang="en">Move IP within a APC of the Aliyun ECS</shortdesc>
++<shortdesc lang="en">Move IP within a VPC of the Aliyun ECS</shortdesc>
+ <parameters>
+ <parameter name="address" required="1">
+ <longdesc lang="en">
+@@ -134,21 +134,23 @@ VPC private IP address
+ </parameter>
+ <parameter name="routing_table" required="1">
+ <longdesc lang="en">
+-Name of the routing table, where the route for the IP address should be changed, i.e. rtb-...
++Name of the routing table, where the route for the IP address should be changed, i.e. vtb-...
+ </longdesc>
+ <shortdesc lang="en">routing table name</shortdesc>
+ <content type="string" default="" />
+ </parameter>
+ <parameter name="interface" required="1">
+ <longdesc lang="en">
+-Name of the network interfacen, i.e. eth0
++Name of the network interface, i.e. eth0
+ </longdesc>
+ <shortdesc lang="en">network interface name</shortdesc>
+ <content type="string" default="eth0" />
+ </parameter>
+ <parameter name="profile" required="0">
+ <longdesc lang="en">
+-Valid Aliyun CLI profile name
++Valid Aliyun CLI profile name (see 'aliyuncli-ra configure').
++
++See https://www.alibabacloud.com/help/doc-detail/43039.htm?spm=a2c63.p38356.b99.16.38a914abRZtOU3 for more information about aliyuncli-ra.
+ </longdesc>
+ <shortdesc lang="en">profile name</shortdesc>
+ <content type="string" default="default" />
diff --git a/SOURCES/aliyuncli-python3-fixes.patch b/SOURCES/aliyuncli-python3-fixes.patch
new file mode 100644
index 0000000..22be4e1
--- /dev/null
+++ b/SOURCES/aliyuncli-python3-fixes.patch
@@ -0,0 +1,398 @@
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py	2019-02-19 12:08:17.331785393 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py	2019-02-19 14:40:39.656330971 +0100
+@@ -13,7 +13,7 @@
+     
+     def getFileName(self,keyValues):
+         filename = None
+-        if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
++        if '--filename' in keyValues and len(keyValues['--filename']) > 0:
+             filename = keyValues['--filename'][0]
+         else:
+             return filename, "A file name is needed! please use \'--filename\' and add the file name."
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py	2019-02-19 12:08:17.331785393 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py	2019-02-19 14:41:48.927128430 +0100
+@@ -13,7 +13,7 @@
+     
+     def getFileName(self,keyValues):
+         filename = None
+-        if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
++        if '--filename' in keyValues and len(keyValues['--filename']) > 0:
+             filename = keyValues['--filename'][0]
+         else:
+             print("A profile is needed! please use \'--filename\' and add the profile name.")
+@@ -21,7 +21,7 @@
+ 
+     def getInstanceCount(self,keyValues):
+         count = 1
+-        if keyValues.has_key('--instancecount') and len(keyValues['--instancecount']) > 0:
++        if '--instancecount' in keyValues and len(keyValues['--instancecount']) > 0:
+             if  keyValues['--instancecount'][0].isdigit() and int(keyValues['--instancecount'][0]) >= 0:
+                 count = keyValues['--instancecount'][0]
+             else:
+@@ -113,7 +113,7 @@
+ 
+     def isAllocatePublicIpAddress(self,keyValues):
+         _publicIp = False
+-        if keyValues.has_key('--allocatepublicip') and len(keyValues['--allocatepublicip']) > 0:
++        if '--allocatepublicip' in keyValues and len(keyValues['--allocatepublicip']) > 0:
+             if  keyValues['--allocatepublicip'][0] == "yes":
+                 _publicIp = True
+         return _publicIp
+@@ -125,7 +125,7 @@
+             '''
+             data = json.loads(jsonbody)
+             '''
+-            if data.has_key('InstanceId') and len(data['InstanceId']) > 0:
++            if 'InstanceId' in data and len(data['InstanceId']) > 0:
+                 instanceId = data['InstanceId']
+         except Exception as e:
+             pass
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py	2019-02-19 12:08:17.331785393 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py	2019-02-19 14:42:11.772731833 +0100
+@@ -38,7 +38,7 @@
+ 
+     def getFileName(self,keyValues):
+         filename = None
+-        if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
++        if '--filename' in keyValues and len(keyValues['--filename']) > 0:
+             filename = keyValues['--filename'][0]
+         else:
+             return filename, "A file name is needed! please use \'--filename\' and add the file name."
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py	2019-02-19 12:08:17.331785393 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py	2019-02-19 14:39:09.247900469 +0100
+@@ -13,7 +13,7 @@
+     
+     def getFileName(self,keyValues):
+         filename = None
+-        if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
++        if '--filename' in keyValues and len(keyValues['--filename']) > 0:
+             filename = keyValues['--filename'][0]
+         else:
+             return filename, "A filename is needed! please use \'--filename\' and add the file name."
+@@ -21,7 +21,7 @@
+     def getInstanceCount(self,keyValues):
+         count = 1
+         import_count = "--count"
+-        if keyValues.has_key(import_count) and len(keyValues[import_count]) > 0:
++        if import_count in keyValues and len(keyValues[import_count]) > 0:
+             if  keyValues[import_count][0].isdigit() and int(keyValues[import_count][0]) >= 0:
+                 count = keyValues[import_count][0]
+             else:
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py	2019-02-19 11:01:46.116653274 +0100
+@@ -17,37 +17,37 @@
+ 
+     def getConfigHandlerOptions(self):
+         return [ConfigCmd.name]
+-				
++
+     def showConfig(self):
+         _credentialsPath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.credentials)
+         _configurePath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.configure)
+         config = dict()
+         configContent = dict() 
+-	credentialsContent = dict ()
+-	if os.path.exists(_configurePath):
++        credentialsContent = dict ()
++        if os.path.exists(_configurePath):
+             for line in open(_configurePath):
+                 line = line.strip('\n')
+                 if line.find('=') > 0:
+                     list = line.split("=",1)
+-		    configContent[list[0]] = list[1]
+-		else: 
+-		    pass
+-	config['configure'] = configContent
+-	if os.path.exists(_credentialsPath):
+-	    for line in open(_credentialsPath):
++                    configContent[list[0]] = list[1]
++                else: 
++                    pass
++        config['configure'] = configContent
++        if os.path.exists(_credentialsPath):
++            for line in open(_credentialsPath):
+                 line = line.strip('\n')
+                 if line.find('=') > 0:
+                     list = line.split("=",1)
+-		    credentialsContent[list[0]] = list[1]
+-		else: 
+-		    pass 
+-	config ['credentials'] = credentialsContent
+-	response.display_response("showConfigure",config,'table')
++                    credentialsContent[list[0]] = list[1]
++                else: 
++                    pass 
++        config ['credentials'] = credentialsContent
++        response.display_response("showConfigure",config,'table')
+     def importConfig():
+         pass
+     def exportConfig():
+         pass
+-	
++
+ 
+ 
+ if __name__ == "__main__":
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py	2019-02-19 12:08:17.332785376 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py	2019-02-19 14:40:12.267806439 +0100
+@@ -20,7 +20,7 @@
+     def handleProfileCmd(self, cmd, keyValues):
+         if cmd.lower() == ProfileCmd.useProfile.lower(): # confirm command is right
+             #check --name is valid
+-            if keyValues.has_key(ProfileCmd.name) and len(keyValues[ProfileCmd.name]) > 0:
++            if ProfileCmd.name in keyValues and len(keyValues[ProfileCmd.name]) > 0:
+                 _value = keyValues[ProfileCmd.name][0] # use the first value
+                 self.extensionCliHandler.setUserProfile(_value)
+             else:
+@@ -34,7 +34,7 @@
+         newProfileName = ''
+         if cmd.lower() == ProfileCmd.addProfile.lower(): # confirm command is right
+             #check --name is valid
+-            if keyValues.has_key(ProfileCmd.name) and len(keyValues[ProfileCmd.name]) > 0:
++            if ProfileCmd.name in keyValues and len(keyValues[ProfileCmd.name]) > 0:
+                 _value = keyValues[ProfileCmd.name][0] # check the first value
+                 # only input key and secret
+                 newProfileName = _value
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py	2019-02-19 12:08:17.332785376 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py	2019-02-19 14:35:32.009660989 +0100
+@@ -137,9 +137,9 @@
+                         values.append(self.args[index])
+                         index = index + 1
+                     keyValues[currentValue] = values
+-        if keyValues.has_key(keystr) and keyValues[keystr].__len__() > 0:
++        if keystr in keyValues and keyValues[keystr].__len__() > 0:
+             _key = keyValues[keystr][0]
+-        if keyValues.has_key(secretstr) and keyValues[secretstr].__len__() > 0:
++        if secretstr in keyValues and keyValues[secretstr].__len__() > 0:
+             _secret = keyValues[secretstr][0]
+         #print("accesskeyid: ", _key , "accesskeysecret: ",_secret)
+         return _key, _secret
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py	2019-02-19 13:35:35.738680413 +0100
+@@ -19,8 +19,9 @@
+ '''
+ 
+ import sys 
+-reload(sys)
+-sys.setdefaultencoding('utf-8')
++if sys.version_info[0] < 3:
++    reload(sys)
++    sys.setdefaultencoding('utf-8')
+ __author__ = 'xixi.xxx'
+ import aliyunCliMain
+ 
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py	2019-02-19 12:08:17.332785376 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py	2019-02-19 11:15:19.920089641 +0100
+@@ -18,7 +18,7 @@
+ '''
+ 
+ import aliyunCliConfiugre
+-import urllib2
++import urllib3
+ import re
+ import os
+ import platform
+@@ -151,7 +151,7 @@
+ # this functino will get the latest version
+     def _getLatestTimeFromServer(self):
+         try:
+-            f = urllib2.urlopen(self.configure.server_url,data=None,timeout=5)
++            f = urllib3.urlopen(self.configure.server_url,data=None,timeout=5)
+             s = f.read()
+             return s
+         except Exception as e:
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py	2019-02-19 12:08:17.332785376 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py	2019-02-19 14:37:28.221649497 +0100
+@@ -26,7 +26,7 @@
+ import aliyunSdkConfigure
+ import json
+ import cliError
+-import urllib2
++import urllib3
+ import handleEndPoint
+ 
+ from __init__ import  __version__
+@@ -259,7 +259,7 @@
+     def changeEndPoint(self, classname, keyValues):
+         endpoint = "Endpoint"
+         try:
+-            if keyValues.has_key(endpoint) and keyValues[endpoint].__len__() > 0:
++            if endpoint in keyValues and keyValues[endpoint].__len__() > 0:
+                 classname._RestApi__domain = keyValues[endpoint][0]
+         except Exception as e:
+             pass
+@@ -444,10 +444,10 @@
+ 
+     def getTempVersion(self,keyValues):
+         key='--version'
+-        if keyValues is not None and keyValues.has_key(key):
++        if keyValues is not None and key in keyValues:
+             return keyValues.get(key)
+         key = 'version'
+-        if keyValues is not None and keyValues.has_key(key):
++        if keyValues is not None and key in keyValues:
+             return keyValues.get(key)
+ 
+     def getVersionFromFile(self,cmd):
+@@ -513,7 +513,7 @@
+         self.checkForServer(response,cmd,operation)
+     def getRequestId(self,response):
+         try:
+-            if response.has_key('RequestId') and len(response['RequestId']) > 0:
++            if 'RequestId' in response and len(response['RequestId']) > 0:
+                 requestId = response['RequestId']
+                 return  requestId
+         except Exception:
+@@ -532,7 +532,7 @@
+             ua = ""
+         url = configure.server_url + "?requesId=" + requestId + "&ak=" + ak +"&ua="+ua+"&cmd="+cmd+"&operation="+operation
+         try:
+-            f = urllib2.urlopen(url,data=None,timeout=5)
++            f = urllib3.urlopen(url,data=None,timeout=5)
+             s = f.read()
+             return s
+         except Exception :
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py	2019-02-19 12:08:17.333785359 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py	2019-02-19 14:38:04.032029661 +0100
+@@ -39,7 +39,7 @@
+ 
+     def sdkConfigure(self,cmd,operation):
+         keyValues = self.parser._getKeyValues()
+-        if keyValues.has_key('--version') and len(keyValues['--version']) > 0:
++        if '--version' in keyValues and len(keyValues['--version']) > 0:
+             version=keyValues['--version'][0]
+             filename=self.fileName
+             self.writeCmdVersionToFile(cmd,version,filename)
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py	2019-02-19 12:08:17.333785359 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py	2019-02-19 11:12:58.670708353 +0100
+@@ -23,6 +23,8 @@
+ import aliyunCliParser
+ import platform
+ 
++if sys.version_info[0] > 2:
++    raw_input = input
+ 
+ OSS_CREDS_FILENAME = "%s/.aliyuncli/osscredentials" % os.path.expanduser('~')
+ OSS_CONFIG_SECTION = 'OSSCredentials'
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py	2019-02-19 12:08:17.333785359 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py	2019-02-19 11:14:58.926181598 +0100
+@@ -19,7 +19,7 @@
+ #/usr/bin/env python
+ #!-*- coding:utf-8 -*-
+ import os
+-import urllib2
++import urllib3
+ import cliError
+ 
+ 
+@@ -64,9 +64,9 @@
+         print(e)
+ def _getParamFromUrl(prefix,value,mode):
+ 
+-    req = urllib2.Request(value)
++    req = urllib3.Request(value)
+     try:
+-        response=urllib2.urlopen(req)
++        response=urllib3.urlopen(req)
+         if response.getcode() == 200:
+             return response.read()
+         else:
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/six.py b/bundled/aliyun/aliyun-cli/aliyuncli/six.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/six.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/six.py	2019-02-19 11:14:40.505262286 +0100
+@@ -340,8 +340,8 @@
+ 
+ 
+ _urllib_error_moved_attributes = [
+-    MovedAttribute("URLError", "urllib2", "urllib.error"),
+-    MovedAttribute("HTTPError", "urllib2", "urllib.error"),
++    MovedAttribute("URLError", "urllib3", "urllib.error"),
++    MovedAttribute("HTTPError", "urllib3", "urllib.error"),
+     MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+ ]
+ for attr in _urllib_error_moved_attributes:
+@@ -359,34 +359,34 @@
+ 
+ 
+ _urllib_request_moved_attributes = [
+-    MovedAttribute("urlopen", "urllib2", "urllib.request"),
+-    MovedAttribute("install_opener", "urllib2", "urllib.request"),
+-    MovedAttribute("build_opener", "urllib2", "urllib.request"),
++    MovedAttribute("urlopen", "urllib3", "urllib.request"),
++    MovedAttribute("install_opener", "urllib3", "urllib.request"),
++    MovedAttribute("build_opener", "urllib3", "urllib.request"),
+     MovedAttribute("pathname2url", "urllib", "urllib.request"),
+     MovedAttribute("url2pathname", "urllib", "urllib.request"),
+     MovedAttribute("getproxies", "urllib", "urllib.request"),
+-    MovedAttribute("Request", "urllib2", "urllib.request"),
+-    MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+-    MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+-    MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+-    MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+-    MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+-    MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+-    MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+-    MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+-    MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+-    MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+-    MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+-    MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+-    MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+-    MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+-    MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+-    MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+-    MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+-    MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+-    MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+-    MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+-    MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
++    MovedAttribute("Request", "urllib3", "urllib.request"),
++    MovedAttribute("OpenerDirector", "urllib3", "urllib.request"),
++    MovedAttribute("HTTPDefaultErrorHandler", "urllib3", "urllib.request"),
++    MovedAttribute("HTTPRedirectHandler", "urllib3", "urllib.request"),
++    MovedAttribute("HTTPCookieProcessor", "urllib3", "urllib.request"),
++    MovedAttribute("ProxyHandler", "urllib3", "urllib.request"),
++    MovedAttribute("BaseHandler", "urllib3", "urllib.request"),
++    MovedAttribute("HTTPPasswordMgr", "urllib3", "urllib.request"),
++    MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib3", "urllib.request"),
++    MovedAttribute("AbstractBasicAuthHandler", "urllib3", "urllib.request"),
++    MovedAttribute("HTTPBasicAuthHandler", "urllib3", "urllib.request"),
++    MovedAttribute("ProxyBasicAuthHandler", "urllib3", "urllib.request"),
++    MovedAttribute("AbstractDigestAuthHandler", "urllib3", "urllib.request"),
++    MovedAttribute("HTTPDigestAuthHandler", "urllib3", "urllib.request"),
++    MovedAttribute("ProxyDigestAuthHandler", "urllib3", "urllib.request"),
++    MovedAttribute("HTTPHandler", "urllib3", "urllib.request"),
++    MovedAttribute("HTTPSHandler", "urllib3", "urllib.request"),
++    MovedAttribute("FileHandler", "urllib3", "urllib.request"),
++    MovedAttribute("FTPHandler", "urllib3", "urllib.request"),
++    MovedAttribute("CacheFTPHandler", "urllib3", "urllib.request"),
++    MovedAttribute("UnknownHandler", "urllib3", "urllib.request"),
++    MovedAttribute("HTTPErrorProcessor", "urllib3", "urllib.request"),
+     MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+     MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+     MovedAttribute("URLopener", "urllib", "urllib.request"),
+diff -uNr a/bundled/aliyun/aliyun-cli/setup.py b/bundled/aliyun/aliyun-cli/setup.py
+--- a/bundled/aliyun/aliyun-cli/setup.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/setup.py	2019-02-19 13:33:29.069848394 +0100
+@@ -24,7 +24,7 @@
+ 
+ install_requires = [
+         'colorama>=0.2.5,<=0.3.3',
+-        'jmespath>=0.7.0,<=0.7.1',
++        'jmespath>=0.7.0',
+         ]
+ def main():
+     setup(
diff --git a/SOURCES/aws-vpc-move-ip-1-avoid-false-positive-monitor.patch b/SOURCES/aws-vpc-move-ip-1-avoid-false-positive-monitor.patch
new file mode 100644
index 0000000..9c23ffa
--- /dev/null
+++ b/SOURCES/aws-vpc-move-ip-1-avoid-false-positive-monitor.patch
@@ -0,0 +1,39 @@
+From 7632a85bcf642b484df52a25dbffbfa0031421bc Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michal=20Koutn=C3=BD?= <mkoutny@suse.com>
+Date: Mon, 6 Aug 2018 18:04:52 +0200
+Subject: [PATCH] aws-vpc-move-ip: Use ip utility to check address
+
+When pinging the assigned address during initial monitor (probe) on one
+node we may actually ping the reachable address when the resource is
+running on another node. This yields false positive monitor result on
+the pinging node. Avoid this by merely checking the assignment of the
+address to an interface.
+---
+ heartbeat/aws-vpc-move-ip | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
+index cefa38e03..3bbbed474 100755
+--- a/heartbeat/aws-vpc-move-ip
++++ b/heartbeat/aws-vpc-move-ip
+@@ -167,15 +167,15 @@ ec2ip_monitor() {
+ 		ocf_log debug "monitor: Enhanced Monitoring disabled - omitting API call"
+ 	fi
+ 
+-	cmd="ping -W 1 -c 1 $OCF_RESKEY_ip"
++	cmd="ip addr show to '$OCF_RESKEY_ip' up"
+ 	ocf_log debug "executing command: $cmd"
+-	$cmd > /dev/null
+-	if [ "$?" -gt 0 ]; then
+-		ocf_log warn "IP $OCF_RESKEY_ip not locally reachable via ping on this system"
++	RESULT=$($cmd | grep '$OCF_RESKEY_ip')
++	if [ -z "$RESULT" ]; then
++		ocf_log warn "IP $OCF_RESKEY_ip not assigned to running interface"
+ 		return $OCF_NOT_RUNNING
+ 	fi
+ 
+-	ocf_log debug "route in VPC and locally reachable"
++	ocf_log debug "route in VPC and address assigned"
+ 	return $OCF_SUCCESS
+ }
+ 
diff --git a/SOURCES/aws-vpc-move-ip-2-avoid-false-positive-monitor.patch b/SOURCES/aws-vpc-move-ip-2-avoid-false-positive-monitor.patch
new file mode 100644
index 0000000..4d1cbf9
--- /dev/null
+++ b/SOURCES/aws-vpc-move-ip-2-avoid-false-positive-monitor.patch
@@ -0,0 +1,31 @@
+From 42dccdd20aff3ebf134c8041f79ab0a658975e69 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michal=20Koutn=C3=BD?= <mkoutny@suse.com>
+Date: Thu, 30 Aug 2018 18:02:51 +0200
+Subject: [PATCH] aws-vpc-move-ip: Fix broken shell quoting
+
+The argument 4th to `ip` is passed with single quotes around which
+cannot be parsed as valid IP address. Furthermore, we need to expand the
+$OCF_RESKEY_ip for grep. This breaks correct detection of the assigned
+address.
+
+Fixes 7632a85bcf642b484df52a25dbffbfa0031421bc.
+---
+ heartbeat/aws-vpc-move-ip | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
+index 3bbbed474..ce3fc6b9a 100755
+--- a/heartbeat/aws-vpc-move-ip
++++ b/heartbeat/aws-vpc-move-ip
+@@ -167,9 +167,9 @@ ec2ip_monitor() {
+ 		ocf_log debug "monitor: Enhanced Monitoring disabled - omitting API call"
+ 	fi
+ 
+-	cmd="ip addr show to '$OCF_RESKEY_ip' up"
++	cmd="ip addr show to $OCF_RESKEY_ip up"
+ 	ocf_log debug "executing command: $cmd"
+-	RESULT=$($cmd | grep '$OCF_RESKEY_ip')
++	RESULT=$($cmd | grep "$OCF_RESKEY_ip")
+ 	if [ -z "$RESULT" ]; then
+ 		ocf_log warn "IP $OCF_RESKEY_ip not assigned to running interface"
+ 		return $OCF_NOT_RUNNING
diff --git a/SOURCES/build-add-missing-manpages.patch b/SOURCES/build-add-missing-manpages.patch
new file mode 100644
index 0000000..6ac1c2d
--- /dev/null
+++ b/SOURCES/build-add-missing-manpages.patch
@@ -0,0 +1,43 @@
+diff -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am
+--- a/doc/man/Makefile.am	2018-07-27 13:05:09.611188363 +0200
++++ b/doc/man/Makefile.am	2018-07-27 13:05:43.981806873 +0200
+@@ -97,6 +97,7 @@
+                           ocf_heartbeat_WinPopup.7 \
+                           ocf_heartbeat_Xen.7 \
+                           ocf_heartbeat_Xinetd.7 \
++                          ocf_heartbeat_ZFS.7 \
+                           ocf_heartbeat_aliyun-vpc-move-ip.7 \
+                           ocf_heartbeat_anything.7 \
+                           ocf_heartbeat_apache.7 \
+@@ -136,6 +137,7 @@
+                           ocf_heartbeat_lxd-info.7 \
+                           ocf_heartbeat_machine-info.7 \
+                           ocf_heartbeat_mariadb.7 \
++                          ocf_heartbeat_minio.7 \
+                           ocf_heartbeat_mysql.7 \
+                           ocf_heartbeat_mysql-proxy.7 \
+                           ocf_heartbeat_nagios.7 \
+@@ -150,6 +152,7 @@
+                           ocf_heartbeat_oracle.7 \
+                           ocf_heartbeat_oralsnr.7 \
+                           ocf_heartbeat_ovsmonitor.7 \
++                          ocf_heartbeat_pgagent.7 \
+                           ocf_heartbeat_pgsql.7 \
+                           ocf_heartbeat_pingd.7 \
+                           ocf_heartbeat_portblock.7 \
+@@ -158,6 +161,7 @@
+                           ocf_heartbeat_proftpd.7 \
+                           ocf_heartbeat_rabbitmq-cluster.7 \
+                           ocf_heartbeat_redis.7 \
++                          ocf_heartbeat_rkt.7 \
+                           ocf_heartbeat_rsyncd.7 \
+                           ocf_heartbeat_rsyslog.7 \
+                           ocf_heartbeat_scsi2reservation.7 \
+@@ -172,6 +176,7 @@
+                           ocf_heartbeat_varnish.7 \
+                           ocf_heartbeat_vdo-vol.7 \
+                           ocf_heartbeat_vmware.7 \
++                          ocf_heartbeat_vsftpd.7 \
+                           ocf_heartbeat_zabbixserver.7
+ 
+ if USE_IPV6ADDR_AGENT
diff --git a/SOURCES/bz1552330-vdo-vol.patch b/SOURCES/bz1552330-vdo-vol.patch
new file mode 100644
index 0000000..2aa093d
--- /dev/null
+++ b/SOURCES/bz1552330-vdo-vol.patch
@@ -0,0 +1,285 @@
+From 8b07d095acbbb1069c1fb44142ccfdd0aeed075f Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Wed, 16 May 2018 14:10:49 +0200
+Subject: [PATCH] vdo-vol: new resource agent
+
+---
+ doc/man/Makefile.am   |   3 +-
+ heartbeat/Makefile.am |   1 +
+ heartbeat/vdo-vol     | 234 ++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 237 insertions(+), 1 deletion(-)
+ create mode 100755 heartbeat/vdo-vol
+
+diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
+index c59126d1..8d94c10c 100644
+--- a/doc/man/Makefile.am
++++ b/doc/man/Makefile.am
+@@ -158,11 +158,12 @@ man_MANS	       = ocf_heartbeat_AoEtarget.7 \
+                           ocf_heartbeat_slapd.7 \
+                           ocf_heartbeat_sybaseASE.7 \
+                           ocf_heartbeat_sg_persist.7 \
+-			  ocf_heartbeat_mpathpersist.7 \
++                          ocf_heartbeat_mpathpersist.7 \
+                           ocf_heartbeat_symlink.7 \
+                           ocf_heartbeat_syslog-ng.7 \
+                           ocf_heartbeat_tomcat.7 \
+                           ocf_heartbeat_varnish.7 \
++                          ocf_heartbeat_vdo-vol.7 \
+                           ocf_heartbeat_vmware.7 \
+                           ocf_heartbeat_zabbixserver.7
+ 
+diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am
+index 4f5059e2..a68fa978 100644
+--- a/heartbeat/Makefile.am
++++ b/heartbeat/Makefile.am
+@@ -162,6 +162,7 @@ ocf_SCRIPTS	     =  AoEtarget		\
+ 			syslog-ng		\
+ 			tomcat			\
+ 			varnish			\
++			vdo-vol			\
+ 			vmware			\
+ 			vsftpd			\
+ 			zabbixserver
+diff --git a/heartbeat/vdo-vol b/heartbeat/vdo-vol
+new file mode 100755
+index 00000000..074339db
+--- /dev/null
++++ b/heartbeat/vdo-vol
+@@ -0,0 +1,234 @@
++#!/bin/sh
++#
++#  License:      GNU General Public License (GPL)
++#  (c) 2018 O. Albrigtsen
++#           and Linux-HA contributors
++#
++# -----------------------------------------------------------------------------
++#      O C F    R E S O U R C E    S C R I P T   S P E C I F I C A T I O N
++# -----------------------------------------------------------------------------
++#
++# NAME
++#       vdo-vol : OCF resource agent script for VDO (Virtual Data Optimizer)
++#
++
++# Initialization:
++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
++
++# Defaults
++OCF_RESKEY_volume_default=""
++
++: ${OCF_RESKEY_volume=${OCF_RESKEY_volume_default}}
++
++
++vdo_usage() {
++  cat <<END
++    usage: $0 (start|stop|validate-all|meta-data|help|usage|monitor)
++    $0 manages VDO (Virtual Data Optimizer) volume(s) as an OCF HA resource.
++    The 'start' operation starts the instance.
++    The 'stop' operation stops the instance.
++    The 'status' operation reports whether the instance is running
++    The 'monitor' operation reports whether the instance seems to be working
++    The 'validate-all' operation reports whether the parameters are valid
++END
++}
++
++vdo_meta_data() {
++        cat <<END
++<?xml version="1.0"?>
++<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
++<resource-agent name="vdo-vol">
++<version>0.75</version>
++
++<longdesc lang="en">
++OCF Resource script for VDO (Virtual Data Optimizer) volume(s). It manages VDO volume(s) as a HA resource.
++
++The configuration file needs to be synced to all nodes, and the systemd vdo service must be disabled when
++using this agent.
++</longdesc>
++<shortdesc lang="en">VDO resource agent</shortdesc>
++
++<parameters>
++
++<parameter name="config">
++    <longdesc lang="en">Configuration file</longdesc>
++    <shortdesc lang="en">Config file</shortdesc>
++    <content type="string" default="${OCF_RESKEY_config_default}" />
++</parameter>
++
++<parameter name="volume">
++    <longdesc lang="en">VDO Volume (leave empty for all)</longdesc>
++    <shortdesc lang="en">Volume (empty for all)</shortdesc>
++    <content type="string" default="${OCF_RESKEY_volume_default}" />
++</parameter>
++
++</parameters>
++
++<actions>
++<action name="start" timeout="60s" />
++<action name="stop" timeout="20s" />
++<action name="status" timeout="20s" />
++<action name="monitor" depth="0" timeout="20s" interval="10s" start-delay="10s" />
++<action name="validate-all" timeout="20s" />
++<action name="meta-data" timeout="20s" />
++</actions>
++</resource-agent>
++END
++}
++
++
++rebuild() {
++		ocf_log warn "${OCF_RESKEY_volume} is in $MODE mode, starting in rebuild mode"
++
++		vdo stop $OPTIONS
++
++		while vdo_monitor skiprocheck; do
++			sleep 1
++		done
++
++		vdo start $OPTIONS --forceRebuild
++
++		while ! vdo_monitor; do
++			sleep 1
++		done
++
++		return $?
++}
++
++vdo_start() {
++	# if resource is already running,no need to continue code after this.
++	if vdo_monitor; then
++		ocf_log info "VDO volume(s): ${OCF_RESKEY_volume} is already active"
++		return $OCF_SUCCESS
++	fi
++
++	vdo activate $OPTIONS
++	vdo start $OPTIONS
++
++	while ! vdo_monitor skiprocheck; do
++		sleep 1
++	done
++
++	MODE=$(vdostats --verbose ${OCF_RESKEY_volume} | grep "operating mode" | awk '{print $NF}')
++	if [ $(echo "$MODE" | grep -v "normal" | wc -l) -gt 0 ]; then
++		rebuild
++	fi
++
++	if [ $? -eq $OCF_SUCCESS ]; then
++		ocf_log info "VDO volume(s): ${OCF_RESKEY_volume} activated"
++		return ${OCF_SUCCESS}
++	fi
++
++	return $?
++}
++
++vdo_stop() {
++	vdo_monitor skiprocheck
++	if [ $? -ne $OCF_SUCCESS ]; then
++		# Currently not running. Nothing to do.
++		ocf_log info "VDO volume(s): ${OCF_RESKEY_volume} already deactivated"
++
++		return $OCF_SUCCESS
++	fi
++
++	vdo stop $OPTIONS
++	vdo deactivate $OPTIONS
++
++	# Wait for process to stop
++	while vdo_monitor skiprocheck; do
++		sleep 1
++	done
++
++	return $OCF_SUCCESS
++}
++
++vdo_monitor(){
++	status=$(vdo status $OPTIONS 2>&1)
++	MODE=$(vdostats vdo_vol --verbose | grep "operating mode" | awk '{print $NF}')
++
++	case "$status" in
++		*"Device mapper status: not available"*)
++			return $OCF_NOT_RUNNING
++			;;
++		*"Device mapper status: "*online*)
++			if [ "$MODE" = "read-only" ] && [ "$1" != "skiprocheck" ]; then
++				ocf_log err "VDO volume(s): ${OCF_RESKEY_volume} is in $MODE mode."
++				return $OCF_ERR_GENERIC
++			else
++				return $OCF_SUCCESS
++			fi
++			;;
++		*)
++			ocf_log err "VDO volume(s): ${OCF_RESKEY_volume} failed\n$status"
++			return $OCF_ERR_GENERIC;;
++	esac
++}
++
++vdo_validate_all(){
++	check_binary "vdo"
++
++	if systemctl is-enabled vdo > /dev/null 2>&1; then
++		ocf_exit_reason "systemd service vdo needs to be disabled"
++		exit $OCF_ERR_CONFIGURED
++	fi
++
++	if [ -n "${OCF_RESKEY_config}" ] && [ ! -f "${OCF_RESKEY_config}" ]; then
++		ocf_exit_reason "Configuration file: ${OCF_RESKEY_config} not found"
++		exit $OCF_ERR_CONFIGURED
++	fi
++
++	return $OCF_SUCCESS
++}
++
++
++# **************************** MAIN SCRIPT ************************************
++
++# Make sure meta-data and usage always succeed
++case $__OCF_ACTION in
++	meta-data)
++		vdo_meta_data
++		exit $OCF_SUCCESS
++		;;
++	usage|help)
++		vdo_usage
++		exit $OCF_SUCCESS
++		;;
++esac
++
++# This OCF agent script need to be run as root user.
++if ! ocf_is_root; then
++        echo  "$0 agent script need to be run as root user."
++        ocf_log debug "$0 agent script need to be run as root user."
++        exit $OCF_ERR_GENERIC
++fi
++
++if [ -z "${OCF_RESKEY_volume}" ]; then
++	OPTIONS="-a"
++else
++	OPTIONS="-n ${OCF_RESKEY_volume}"
++fi
++
++if [ -n "${OCF_RESKEY_config}" ]; then
++	OPTIONS="$OPTIONS -f ${OCF_RESKEY_config}"
++fi
++
++# Translate each action into the appropriate function call
++case $__OCF_ACTION in
++	start)
++		vdo_validate_all
++		vdo_start;;
++	stop)
++		vdo_stop;;
++	status|monitor)
++		vdo_monitor;;
++	validate-all)
++		;;
++	*)
++		vdo_usage
++                exit $OCF_ERR_UNIMPLEMENTED;;
++esac
++
++exit $?
++
++# End of this script
+-- 
+2.17.1
+
diff --git a/SOURCES/bz1607607-podman.patch b/SOURCES/bz1607607-podman.patch
new file mode 100644
index 0000000..572e761
--- /dev/null
+++ b/SOURCES/bz1607607-podman.patch
@@ -0,0 +1,538 @@
+From 07d283a6e20b8e559257c9694f7e36e155075014 Mon Sep 17 00:00:00 2001
+From: Michele Baldessari <michele@acksyn.org>
+Date: Sun, 22 Jul 2018 17:54:29 +0200
+Subject: [PATCH] Initial podman support
+
+Tested with the following container:
+     podman container set: test_bundle [docker.io/sdelrio/docker-minimal-nginx]
+       test_bundle-podman-0 (ocf::heartbeat:podman):        Started nodea
+       test_bundle-podman-1 (ocf::heartbeat:podman):        Started nodeb
+       test_bundle-podman-2 (ocf::heartbeat:podman):        Started nodec
+
+Tested a couple of stop/start cycles successfully. Needs the
+corresponding pacemaker support https://github.com/ClusterLabs/pacemaker/pull/1564
+---
+ doc/man/Makefile.am   |   1 +
+ heartbeat/Makefile.am |   1 +
+ heartbeat/podman      | 488 ++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 490 insertions(+)
+ create mode 100755 heartbeat/podman
+
+diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
+index 145e5fd50..0bef88740 100644
+--- a/doc/man/Makefile.am
++++ b/doc/man/Makefile.am
+@@ -151,6 +151,7 @@ man_MANS	       = ocf_heartbeat_AoEtarget.7 \
+                           ocf_heartbeat_pgagent.7 \
+                           ocf_heartbeat_pgsql.7 \
+                           ocf_heartbeat_pingd.7 \
++                          ocf_heartbeat_podman.7 \
+                           ocf_heartbeat_portblock.7 \
+                           ocf_heartbeat_postfix.7 \
+                           ocf_heartbeat_pound.7 \
+diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am
+index e7a3a4fac..993bff042 100644
+--- a/heartbeat/Makefile.am
++++ b/heartbeat/Makefile.am
+@@ -146,6 +146,7 @@ ocf_SCRIPTS	     =  AoEtarget		\
+ 			pgagent			\
+ 			pgsql			\
+ 			pingd			\
++			podman			\
+ 			portblock		\
+ 			postfix			\
+ 			pound			\
+diff --git a/heartbeat/podman b/heartbeat/podman
+new file mode 100755
+index 000000000..88475f1df
+--- /dev/null
++++ b/heartbeat/podman
+@@ -0,0 +1,488 @@
++#!/bin/sh
++#
++# The podman HA resource agent creates and launches a podman container
++# based off a supplied podman image. Containers managed by this agent
++# are both created and removed upon the agent's start and stop actions.
++#
++# Copyright (c) 2014 David Vossel <davidvossel@gmail.com>
++#                    Michele Baldessari <michele@acksyn.org>
++#                    All Rights Reserved.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of version 2 of the GNU General Public License as
++# published by the Free Software Foundation.
++#
++# This program is distributed in the hope that it would be useful, but
++# WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
++#
++# Further, this software is distributed without any warranty that it is
++# free of the rightful claim of any third person regarding infringement
++# or the like.  Any license provided herein, whether implied or
++# otherwise, applies only to this software file.  Patent licenses, if
++# any, provided herein do not apply to combinations of this program with
++# other software, or any other product whatsoever.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write the Free Software Foundation,
++# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
++#
++
++#######################################################################
++# Initialization:
++
++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
++
++#######################################################################
++
++meta_data()
++{
++	cat <<END
++<?xml version="1.0"?>
++<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
++<resource-agent name="podman">
++<version>1.0</version>
++
++<longdesc lang="en">
++The podman HA resource agent creates and launches a podman container
++based off a supplied podman image. Containers managed by this agent
++are both created and removed upon the agent's start and stop actions.
++</longdesc>
++<shortdesc lang="en">Podman container resource agent.</shortdesc>
++
++<parameters>
++<parameter name="image" required="1" unique="0">
++<longdesc lang="en">
++The podman image to base this container off of.
++</longdesc>
++<shortdesc lang="en">podman image</shortdesc>
++<content type="string"/>
++</parameter>
++
++<parameter name="name" required="0" unique="0">
++<longdesc lang="en">
++The name to give the created container. By default this will
++be that resource's instance name.
++</longdesc>
++<shortdesc lang="en">podman container name</shortdesc>
++<content type="string"/>
++</parameter>
++
++<parameter name="allow_pull" unique="0">
++<longdesc lang="en">
++Allow the image to be pulled from the configured podman registry when
++the image does not exist locally. NOTE, this can drastically increase
++the time required to start the container if the image repository is
++pulled over the network.
++</longdesc>
++<shortdesc lang="en">Allow pulling non-local images</shortdesc>
++<content type="boolean"/>
++</parameter>
++
++<parameter name="run_opts" required="0" unique="0">
++<longdesc lang="en">
++Add options to be appended to the 'podman run' command which is used
++when creating the container during the start action. This option allows
++users to do things such as setting a custom entry point and injecting
++environment variables into the newly created container. Note the '-d'
++option is supplied regardless of this value to force containers to run
++in the background.
++
++NOTE: Do not explicitly specify the --name argument in the run_opts. This
++agent will set --name using either the resource's instance or the name
++provided in the 'name' argument of this agent.
++
++</longdesc>
++<shortdesc lang="en">run options</shortdesc>
++<content type="string"/>
++</parameter>
++
++<parameter name="run_cmd" required="0" unique="0">
++<longdesc lang="en">
++Specify a command to launch within the container once
++it has initialized.
++</longdesc>
++<shortdesc lang="en">run command</shortdesc>
++<content type="string"/>
++</parameter>
++
++<parameter name="mount_points" required="0" unique="0">
++<longdesc lang="en">
++A comma separated list of directories that the container is expecting to use.
++The agent will ensure they exist by running 'mkdir -p'
++</longdesc>
++<shortdesc lang="en">Required mount points</shortdesc>
++<content type="string"/>
++</parameter>
++
++<parameter name="monitor_cmd" required="0" unique="0">
++<longdesc lang="en">
++Specify the full path of a command to launch within the container to check
++the health of the container. This command must return 0 to indicate that
++the container is healthy. A non-zero return code will indicate that the
++container has failed and should be recovered.
++
++If 'podman exec' is supported, it is used to execute the command. If not,
++nsenter is used.
++
++Note: Using this method for monitoring processes inside a container
++is not recommended, as containerd tries to track processes running
++inside the container and does not deal well with many short-lived
++processes being spawned. Ensure that your container monitors its
++own processes and terminates on fatal error rather than invoking
++a command from the outside.
++</longdesc>
++<shortdesc lang="en">monitor command</shortdesc>
++<content type="string"/>
++</parameter>
++
++<parameter name="force_kill" required="0" unique="0">
++<longdesc lang="en">
++Kill a container immediately rather than waiting for it to gracefully
++shutdown
++</longdesc>
++<shortdesc lang="en">force kill</shortdesc>
++<content type="boolean"/>
++</parameter>
++
++<parameter name="reuse" required="0" unique="0">
++<longdesc lang="en">
++Allow the container to be reused after stopping the container. By default
++containers are removed after stop. With the reuse option containers
++will persist after the container stops.
++</longdesc>
++<shortdesc lang="en">reuse container</shortdesc>
++<content type="boolean"/>
++</parameter>
++</parameters>
++
++<actions>
++<action name="start"        timeout="90s" />
++<action name="stop"         timeout="90s" />
++<action name="monitor"      timeout="30s" interval="30s" depth="0" />
++<action name="meta-data"    timeout="5s" />
++<action name="validate-all"   timeout="30s" />
++</actions>
++</resource-agent>
++END
++}
++
++#######################################################################
++REQUIRE_IMAGE_PULL=0
++
++podman_usage()
++{
++	cat <<END
++usage: $0 {start|stop|monitor|validate-all|meta-data}
++
++Expects to have a fully populated OCF RA-compliant environment set.
++END
++}
++
++
++monitor_cmd_exec()
++{
++	local rc=$OCF_SUCCESS
++	local out
++
++	if [ -z "$OCF_RESKEY_monitor_cmd" ]; then
++		return $rc
++	fi
++
++	if podman exec --help >/dev/null 2>&1; then
++		out=$(podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1)
++		rc=$?
++	else
++		out=$(echo "$OCF_RESKEY_monitor_cmd" | nsenter --target $(podman inspect --format {{.State.Pid}} ${CONTAINER}) --mount --uts --ipc --net --pid 2>&1)
++		rc=$?
++	fi
++
++	if [ $rc -eq 127 ]; then
++		ocf_log err "monitor cmd failed (rc=$rc), output: $out"
++		ocf_exit_reason "monitor_cmd, ${OCF_RESKEY_monitor_cmd} , not found within container."
++		# there is no recovering from this, exit immediately
++		exit $OCF_ERR_ARGS
++	elif [ $rc -ne 0 ]; then
++		ocf_exit_reason "monitor cmd failed (rc=$rc), output: $out"
++		rc=$OCF_ERR_GENERIC
++	else
++		ocf_log debug "monitor cmd passed: exit code = $rc"
++	fi
++
++	return $rc
++}
++
++container_exists()
++{
++	podman inspect --format {{.State.Running}} $CONTAINER | egrep '(true|false)' >/dev/null 2>&1
++}
++
++remove_container()
++{
++	if ocf_is_true "$OCF_RESKEY_reuse"; then
++		# never remove the container if we have reuse enabled.
++		return 0
++	fi
++
++	container_exists
++	if [ $? -ne 0 ]; then
++		# don't attempt to remove a container that doesn't exist
++		return 0
++	fi
++	ocf_log notice "Cleaning up inactive container, ${CONTAINER}."
++	ocf_run podman rm $CONTAINER
++}
++
++podman_simple_status()
++{
++	local val
++
++	container_exists
++	if [ $? -ne 0 ]; then
++		return $OCF_NOT_RUNNING
++	fi
++
++	# retrieve the 'Running' attribute for the container
++	val=$(podman inspect --format {{.State.Running}} $CONTAINER 2>/dev/null)
++	if [ $? -ne 0 ]; then
++		#not running as a result of container not being found
++		return $OCF_NOT_RUNNING
++	fi
++
++	if ocf_is_true "$val"; then
++		# container exists and is running
++		return $OCF_SUCCESS
++	fi
++
++	return $OCF_NOT_RUNNING
++}
++
++podman_monitor()
++{
++	local rc=0
++
++	podman_simple_status
++	rc=$?
++
++	if [ $rc -ne 0 ]; then
++		return $rc
++	fi
++
++	monitor_cmd_exec
++}
++
++podman_create_mounts() {
++	oldIFS="$IFS"
++	IFS=","
++	for directory in $OCF_RESKEY_mount_points; do
++		mkdir -p "$directory"
++	done
++	IFS="$oldIFS"
++}
++
++podman_start()
++{
++	podman_create_mounts
++	local run_opts="-d --name=${CONTAINER}"
++	# check to see if the container has already started
++	podman_simple_status
++	if [ $? -eq $OCF_SUCCESS ]; then
++		return $OCF_SUCCESS
++	fi
++
++	if [ -n "$OCF_RESKEY_run_opts" ]; then
++		run_opts="$run_opts $OCF_RESKEY_run_opts"
++	fi
++
++	if [ $REQUIRE_IMAGE_PULL -eq 1 ]; then
++		ocf_log notice "Beginning pull of image, ${OCF_RESKEY_image}"
++		podman pull "${OCF_RESKEY_image}"
++		if [ $? -ne 0 ]; then
++			ocf_exit_reason "failed to pull image ${OCF_RESKEY_image}"
++			return $OCF_ERR_GENERIC
++		fi
++	fi
++
++	if ocf_is_true "$OCF_RESKEY_reuse" && container_exists; then
++		ocf_log info "starting existing container $CONTAINER."
++		ocf_run podman start $CONTAINER
++	else
++		# make sure any previous container matching our container name is cleaned up first.
++		# we already know at this point it wouldn't be running
++		remove_container
++		ocf_log info "running container $CONTAINER for the first time"
++		ocf_run podman run $run_opts $OCF_RESKEY_image $OCF_RESKEY_run_cmd
++	fi
++
++	if [ $? -ne 0 ]; then
++		ocf_exit_reason "podman failed to launch container"
++		return $OCF_ERR_GENERIC
++	fi
++
++
++	# wait for monitor to pass before declaring that the container is started
++	while true; do
++		podman_simple_status
++		if [ $? -ne $OCF_SUCCESS ]; then
++			ocf_exit_reason "Newly created podman container exited after start"
++			return $OCF_ERR_GENERIC
++		fi
++
++		monitor_cmd_exec
++		if [ $? -eq $OCF_SUCCESS ]; then
++			ocf_log notice "Container $CONTAINER  started successfully"
++			return $OCF_SUCCESS
++		fi
++
++		ocf_exit_reason "waiting on monitor_cmd to pass after start"
++		sleep 1
++	done
++}
++
++podman_stop()
++{
++	local timeout=60
++	podman_simple_status
++	if [ $? -eq  $OCF_NOT_RUNNING ]; then
++		remove_container
++		return $OCF_SUCCESS
++	fi
++
++	if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then
++		timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000) -10 ))
++		if [ $timeout -lt 10 ]; then
++			timeout=10
++		fi
++	fi
++
++	if ocf_is_true "$OCF_RESKEY_force_kill"; then
++		ocf_run podman kill $CONTAINER
++	else
++		ocf_log debug "waiting $timeout second[s] before killing container"
++		ocf_run podman stop -t=$timeout $CONTAINER
++	fi
++
++	if [ $? -ne 0 ]; then
++		ocf_exit_reason "Failed to stop container, ${CONTAINER}, based on image, ${OCF_RESKEY_image}."
++		return $OCF_ERR_GENERIC
++	fi
++
++	remove_container
++	if [ $? -ne 0 ]; then
++		ocf_exit_reason "Failed to remove stopped container, ${CONTAINER}, based on image, ${OCF_RESKEY_image}."
++		return $OCF_ERR_GENERIC
++	fi
++
++	return $OCF_SUCCESS
++}
++
++image_exists()
++{
++	# if no tag was specified, use default "latest"
++	local COLON_FOUND=0
++	local SLASH_FOUND=0
++	local SERVER_NAME=""
++	local IMAGE_NAME="${OCF_RESKEY_image}"
++	local IMAGE_TAG="latest"
++
++	SLASH_FOUND="$(echo "${OCF_RESKEY_image}" | grep -o '/' | grep -c .)"
++
++	if [ ${SLASH_FOUND} -ge 1 ]; then
++		SERVER_NAME="$(echo ${IMAGE_NAME} | cut -d / -f 1-${SLASH_FOUND})"
++		IMAGE_NAME="$(echo ${IMAGE_NAME} | awk -F'/' '{print $NF}')"
++	fi
++
++	COLON_FOUND="$(echo "${IMAGE_NAME}" | grep -o ':' | grep -c .)"
++	if [ ${COLON_FOUND} -ge 1 ]; then
++		IMAGE_TAG="$(echo ${IMAGE_NAME} | awk -F':' '{print $NF}')"
++		IMAGE_NAME="$(echo ${IMAGE_NAME} | cut -d : -f 1-${COLON_FOUND})"
++	fi
++
++	# IMAGE_NAME might be following formats:
++	# - image
++	# - repository:port/image
++	# - docker.io/image (some distro will display "docker.io/" as prefix)
++	podman images | awk '{print $1 ":" $2}' | egrep -q -s "^(docker.io\/|${SERVER_NAME}\/)?${IMAGE_NAME}:${IMAGE_TAG}\$"
++	if [ $? -eq 0 ]; then
++		# image found
++		return 0
++	fi
++
++	if ocf_is_true "$OCF_RESKEY_allow_pull"; then
++		REQUIRE_IMAGE_PULL=1
++		ocf_log notice "Image (${OCF_RESKEY_image}) does not exist locally but will be pulled during start"
++		return 0
++	fi
++	# image not found.
++	return 1
++}
++
++podman_validate()
++{
++	check_binary podman
++	if [ -z "$OCF_RESKEY_image" ]; then
++		ocf_exit_reason "'image' option is required"
++		exit $OCF_ERR_CONFIGURED
++	fi
++
++	if [ -n "$OCF_RESKEY_monitor_cmd" ]; then
++		podman exec --help >/dev/null 2>&1
++		if [ ! $? ]; then
++			ocf_log info "checking for nsenter, which is required when 'monitor_cmd' is specified"
++			check_binary nsenter
++		fi
++	fi
++
++	image_exists
++	if [ $? -ne 0 ]; then
++		ocf_exit_reason "base image, ${OCF_RESKEY_image}, could not be found."
++		exit $OCF_ERR_CONFIGURED
++	fi
++
++	return $OCF_SUCCESS
++}
++
++# TODO :
++# When a user starts plural clones in a node in globally-unique, a user cannot appoint plural name parameters.
++# When a user appoints reuse, the resource agent cannot connect plural clones with a container.
++
++if ocf_is_true "$OCF_RESKEY_CRM_meta_globally_unique"; then
++	if [ -n "$OCF_RESKEY_name" ]; then
++		if [ -n "$OCF_RESKEY_CRM_meta_clone_node_max" ] && [ "$OCF_RESKEY_CRM_meta_clone_node_max" -ne 1 ]
++		then
++			ocf_exit_reason "Cannot make plural clones from the same name parameter."
++			exit $OCF_ERR_CONFIGURED
++		fi
++		if [ -n "$OCF_RESKEY_CRM_meta_master_node_max" ] && [ "$OCF_RESKEY_CRM_meta_master_node_max" -ne 1 ]
++		then
++			ocf_exit_reason "Cannot make plural master from the same name parameter."
++			exit $OCF_ERR_CONFIGURED
++		fi
++	fi
++	: ${OCF_RESKEY_name=`echo ${OCF_RESOURCE_INSTANCE} | tr ':' '-'`}
++else
++	: ${OCF_RESKEY_name=${OCF_RESOURCE_INSTANCE}}
++fi
++
++CONTAINER=$OCF_RESKEY_name
++
++case $__OCF_ACTION in
++meta-data) meta_data
++		exit $OCF_SUCCESS;;
++start)
++	podman_validate
++	podman_start;;
++stop)		podman_stop;;
++monitor)	podman_monitor;;
++validate-all)	podman_validate;;
++usage|help)	podman_usage
++		exit $OCF_SUCCESS
++		;;
++*)		podman_usage
++		exit $OCF_ERR_UNIMPLEMENTED
++		;;
++esac
++rc=$?
++ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
++exit $rc
diff --git a/SOURCES/bz1631291-systemd-tmpfiles-configurable-path.patch b/SOURCES/bz1631291-systemd-tmpfiles-configurable-path.patch
new file mode 100644
index 0000000..850c318
--- /dev/null
+++ b/SOURCES/bz1631291-systemd-tmpfiles-configurable-path.patch
@@ -0,0 +1,48 @@
+From c70924b69af760ec3762b01594afb6ff82c3820c Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Wed, 19 Sep 2018 16:13:43 +0200
+Subject: [PATCH] systemd-tmpfiles: configure path with --with-rsctmpdir
+
+---
+ configure.ac                    | 3 ++-
+ systemd/resource-agents.conf    | 1 -
+ systemd/resource-agents.conf.in | 1 +
+ 3 files changed, 3 insertions(+), 2 deletions(-)
+ delete mode 100644 systemd/resource-agents.conf
+ create mode 100644 systemd/resource-agents.conf.in
+
+diff --git a/configure.ac b/configure.ac
+index b7ffb99f3..e34d125e9 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -431,7 +431,7 @@ AC_SUBST(HA_VARRUNDIR)
+ 
+ # Expand $prefix
+ eval HA_RSCTMPDIR="`eval echo ${HA_RSCTMPDIR}`"
+-AC_DEFINE_UNQUOTED(HA_RSCTMPDIR,"$HA_RSCTMPDIR", Where Resouce agents keep state files)
++AC_DEFINE_UNQUOTED(HA_RSCTMPDIR,"$HA_RSCTMPDIR", Where Resource agents keep state files)
+ AC_SUBST(HA_RSCTMPDIR)
+ 
+ dnl Eventually move out of the heartbeat dir tree and create symlinks when needed
+@@ -911,6 +911,7 @@ heartbeat/Makefile						\
+    heartbeat/ocf-shellfuncs					\
+    heartbeat/shellfuncs						\
+ systemd/Makefile						\
++   systemd/resource-agents.conf					\
+ tools/Makefile							\
+    tools/ocf-tester						\
+    tools/ocft/Makefile						\
+diff --git a/systemd/resource-agents.conf b/systemd/resource-agents.conf
+deleted file mode 100644
+index 1cb129c18..000000000
+--- a/systemd/resource-agents.conf
++++ /dev/null
+@@ -1 +0,0 @@
+-d /var/run/resource-agents/ 1755 root root
+diff --git a/systemd/resource-agents.conf.in b/systemd/resource-agents.conf.in
+new file mode 100644
+index 000000000..7bd157ec0
+--- /dev/null
++++ b/systemd/resource-agents.conf.in
+@@ -0,0 +1 @@
++d @HA_RSCTMPDIR@ 1755 root root
diff --git a/SOURCES/bz1635785-redis-pidof-basename.patch b/SOURCES/bz1635785-redis-pidof-basename.patch
new file mode 100644
index 0000000..32c57eb
--- /dev/null
+++ b/SOURCES/bz1635785-redis-pidof-basename.patch
@@ -0,0 +1,61 @@
+From 2462caf264c487810805c40a546a4dc3f953c340 Mon Sep 17 00:00:00 2001
+From: Michele Baldessari <michele@acksyn.org>
+Date: Wed, 3 Oct 2018 18:07:31 +0200
+Subject: [PATCH] Do not use the absolute path in redis' pidof calls
+
+The reason for this is that newer kernels (we saw this on a 4.18 kernel)
+can limit access to /proc/<pid>/{cwd,exe,root} and so pidof will fail to
+identify the process when using the full path names.
+This access limitation happens even with the root user:
+()[root@ra1 /]$ ls -l /proc/32/ |grep redis-server
+ls: cannot read symbolic link '/proc/32/cwd': Permission denied
+ls: cannot read symbolic link '/proc/32/root': Permission denied
+ls: cannot read symbolic link '/proc/32/exe': Permission denied
+
+For this reason the 'pidof /usr/bin/redis-server' calls will fail
+when running inside containers that have this kernel protection
+mechanism.
+
+We tested this change and successfuly obtained a running redis cluster:
+ podman container set: redis-bundle [192.168.222.1:5000/redis:latest]
+   Replica[0]
+      redis-bundle-podman-0     (ocf::heartbeat:podman):        Started ra1
+      redis-bundle-0    (ocf::pacemaker:remote):        Started ra1
+      redis     (ocf::heartbeat:redis): Master redis-bundle-0
+   Replica[1]
+      redis-bundle-podman-1     (ocf::heartbeat:podman):        Started ra2
+      redis-bundle-1    (ocf::pacemaker:remote):        Started ra2
+      redis     (ocf::heartbeat:redis): Slave redis-bundle-1
+   Replica[2]
+      redis-bundle-podman-2     (ocf::heartbeat:podman):        Started ra3
+      redis-bundle-2    (ocf::pacemaker:remote):        Started ra3
+      redis     (ocf::heartbeat:redis): Slave redis-bundle-2
+
+Signed-off-By: Damien Ciabrini <dciabrin@redhat.com>
+Signed-off-by: Michele Baldessari <michele@acksyn.org>
+---
+ heartbeat/redis.in | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/heartbeat/redis.in b/heartbeat/redis.in
+index ddc62d8a7..1dff067e9 100644
+--- a/heartbeat/redis.in
++++ b/heartbeat/redis.in
+@@ -316,7 +316,7 @@ simple_status() {
+ 	fi
+ 
+ 	pid="$(<"$REDIS_PIDFILE")"
+-	pidof "$REDIS_SERVER" | grep -q "\<$pid\>" || return $OCF_NOT_RUNNING
++	pidof $(basename "$REDIS_SERVER") | grep -q "\<$pid\>" || return $OCF_NOT_RUNNING
+ 
+ 	ocf_log debug "monitor: redis-server running under pid $pid"
+ 
+@@ -465,7 +465,7 @@ redis_start() {
+ 			break
+ 		elif (( info[loading] == 1 )); then
+ 			sleep "${info[loading_eta_seconds]}"
+-		elif pidof "$REDIS_SERVER" >/dev/null; then
++		elif pidof $(basename "$REDIS_SERVER") >/dev/null; then
+ 			# unknown error, but the process still exists.
+ 			# This check is mainly because redis daemonizes before it starts listening, causing `redis-cli` to fail
+ 			#   See https://github.com/antirez/redis/issues/2368
diff --git a/SOURCES/bz1642027-nfsserver-var-lib-nfs-fix.patch b/SOURCES/bz1642027-nfsserver-var-lib-nfs-fix.patch
new file mode 100644
index 0000000..838a8aa
--- /dev/null
+++ b/SOURCES/bz1642027-nfsserver-var-lib-nfs-fix.patch
@@ -0,0 +1,43 @@
+diff -uNr a/heartbeat/nfsserver b/heartbeat/nfsserver
+--- a/heartbeat/nfsserver	2018-10-10 17:02:47.873199077 +0200
++++ b/heartbeat/nfsserver	2018-10-11 15:24:41.782048475 +0200
+@@ -402,7 +402,6 @@
+ 		return
+ 	fi
+ 
+-	[ -d "$fp" ] || mkdir -p $fp
+ 	[ -d "$OCF_RESKEY_rpcpipefs_dir" ] || mkdir -p $OCF_RESKEY_rpcpipefs_dir
+ 	[ -d "$fp/v4recovery" ] || mkdir -p $fp/v4recovery
+ 
+@@ -437,10 +436,21 @@
+ 		return
+ 	fi
+ 
++	[ -d "$fp" ] || mkdir -p $fp
++
+ 	if is_bound /var/lib/nfs; then
+ 		ocf_log debug "$fp is already bound to /var/lib/nfs"
+ 		return 0
+ 	fi
++
++	case $EXEC_MODE in
++	  [23]) if nfs_exec status var-lib-nfs-rpc_pipefs.mount > /dev/null 2>&1; then
++			ocf_log debug "/var/lib/nfs/rpc_pipefs already mounted. Unmounting in preparation to bind mount nfs dir"
++			systemctl stop var-lib-nfs-rpc_pipefs.mount
++		fi
++		;;
++	esac
++
+ 	mount --bind $fp /var/lib/nfs
+ 	[ $SELINUX_ENABLED -eq 0 ] && restorecon /var/lib/nfs
+ }
+@@ -612,8 +622,8 @@
+ 	fi
+ 
+ 	is_redhat_based && set_env_args
+-	prepare_directory
+ 	bind_tree
++	prepare_directory
+ 
+ 	if ! `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "`; then
+ 		mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir
diff --git a/SOURCES/bz1643307-LVM-activate-dont-fail-initial-probe.patch b/SOURCES/bz1643307-LVM-activate-dont-fail-initial-probe.patch
new file mode 100644
index 0000000..0b7d485
--- /dev/null
+++ b/SOURCES/bz1643307-LVM-activate-dont-fail-initial-probe.patch
@@ -0,0 +1,24 @@
+From 848d62c32b355a03c2ad8d246eb3e34b04af07ca Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Wed, 9 Jan 2019 16:49:41 +0100
+Subject: [PATCH] LVM-activate: dont fail initial probe
+
+---
+ heartbeat/LVM-activate | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
+index f46932c1c..49ab717a3 100755
+--- a/heartbeat/LVM-activate
++++ b/heartbeat/LVM-activate
+@@ -323,6 +323,10 @@ lvmlockd_check()
+ 
+ 	# Good: lvmlockd is running, and clvmd is not running
+ 	if ! pgrep lvmlockd >/dev/null 2>&1 ; then
++		if ocf_is_probe; then
++			exit $OCF_NOT_RUNNING
++		fi
++
+ 		ocf_exit_reason "lvmlockd daemon is not running!"
+ 		exit $OCF_ERR_CONFIGURED
+ 	fi
diff --git a/SOURCES/bz1658664-LVM-activate-dont-require-locking_type.patch b/SOURCES/bz1658664-LVM-activate-dont-require-locking_type.patch
new file mode 100644
index 0000000..5b975a1
--- /dev/null
+++ b/SOURCES/bz1658664-LVM-activate-dont-require-locking_type.patch
@@ -0,0 +1,27 @@
+From 4f122cd0cf46c1fdc1badb22049607a6abf0c885 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Mon, 4 Feb 2019 17:04:59 +0100
+Subject: [PATCH] LVM-activate: only check locking_type when LVM < v2.03
+
+---
+ heartbeat/LVM-activate | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
+index c2239d881..3c462c75c 100755
+--- a/heartbeat/LVM-activate
++++ b/heartbeat/LVM-activate
+@@ -311,7 +311,12 @@ config_verify()
+ lvmlockd_check()
+ {
+ 	config_verify "global/use_lvmlockd" "1"
+-	config_verify "global/locking_type" "1"
++
++	# locking_type was removed from config in v2.03
++	ocf_version_cmp "$(lvmconfig --version | awk '/LVM ver/ {sub(/\(.*/, "", $3); print $3}')" "2.03"
++	if [ "$?" -eq 0 ]; then
++		config_verify "global/locking_type" "1"
++	fi
+ 
+ 	# We recommend to activate one LV at a time so that this specific volume
+ 	# binds to a proper filesystem to protect the data
diff --git a/SOURCES/bz1662466-vdo-vol-fix-monitor-action.patch b/SOURCES/bz1662466-vdo-vol-fix-monitor-action.patch
new file mode 100644
index 0000000..58b13ce
--- /dev/null
+++ b/SOURCES/bz1662466-vdo-vol-fix-monitor-action.patch
@@ -0,0 +1,12 @@
+diff -uNr a/heartbeat/vdo-vol b/heartbeat/vdo-vol
+--- a/heartbeat/vdo-vol	2018-11-07 09:11:23.037835110 +0100
++++ b/heartbeat/vdo-vol	2018-11-07 09:12:41.322373901 +0100
+@@ -145,7 +145,7 @@
+ 
+ vdo_monitor(){
+ 	status=$(vdo status $OPTIONS 2>&1)
+-	MODE=$(vdostats vdo_vol --verbose | grep "operating mode" | awk '{print $NF}')
++	MODE=$(vdostats --verbose ${OCF_RESKEY_volume} | grep "operating mode" | awk '{print $NF}')
+ 
+ 	case "$status" in
+ 		*"Device mapper status: not available"*)
diff --git a/SOURCES/bz1666691-tomcat-use-systemd-when-catalina.sh-unavailable.patch b/SOURCES/bz1666691-tomcat-use-systemd-when-catalina.sh-unavailable.patch
new file mode 100644
index 0000000..571196b
--- /dev/null
+++ b/SOURCES/bz1666691-tomcat-use-systemd-when-catalina.sh-unavailable.patch
@@ -0,0 +1,59 @@
+From b42ef7555de86cc29d165ae17682c223bfb23b6e Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Mon, 5 Nov 2018 16:38:01 +0100
+Subject: [PATCH 1/2] tomcat: use systemd on RHEL when catalina.sh is
+ unavailable
+
+---
+ heartbeat/tomcat | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/heartbeat/tomcat b/heartbeat/tomcat
+index 4812a0133..833870038 100755
+--- a/heartbeat/tomcat
++++ b/heartbeat/tomcat
+@@ -613,7 +613,6 @@ TOMCAT_NAME="${OCF_RESKEY_tomcat_name-tomcat}"
+ TOMCAT_CONSOLE="${OCF_RESKEY_script_log-/var/log/$TOMCAT_NAME.log}"
+ RESOURCE_TOMCAT_USER="${OCF_RESKEY_tomcat_user-root}"
+ RESOURCE_STATUSURL="${OCF_RESKEY_statusurl-http://127.0.0.1:8080}"
+-OCF_RESKEY_force_systemd_default=0
+ 
+ JAVA_HOME="${OCF_RESKEY_java_home}"
+ JAVA_OPTS="${OCF_RESKEY_java_opts}"
+@@ -630,6 +629,13 @@ if [ -z "$CATALINA_PID" ]; then
+ 	CATALINA_PID="${HA_RSCTMP}/${TOMCAT_NAME}_tomcatstate/catalina.pid"
+ fi
+ 
++# Only default to true for RedHat systems without catalina.sh
++if [ -e "$CATALINA_HOME/bin/catalina.sh" ] || ! is_redhat_based; then
++	OCF_RESKEY_force_systemd_default=0
++else
++	OCF_RESKEY_force_systemd_default=1
++fi
++
+ MAX_STOP_TIME="${OCF_RESKEY_max_stop_time}"
+ 
+ : ${OCF_RESKEY_force_systemd=${OCF_RESKEY_force_systemd_default}}
+
+From 9cb2b142a9ecb3a2d5a51cdd51b4005f08b9a97b Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Mon, 5 Nov 2018 17:09:43 +0100
+Subject: [PATCH 2/2] ocf-distro: add regex for RedHat version
+
+---
+ heartbeat/ocf-distro | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/heartbeat/ocf-distro b/heartbeat/ocf-distro
+index 530ee57ed..f69910c98 100644
+--- a/heartbeat/ocf-distro
++++ b/heartbeat/ocf-distro
+@@ -39,7 +39,7 @@ get_os_ver() {
+ 		VER=$(cat $_DEBIAN_VERSION_FILE)
+ 	elif [ -f $_REDHAT_RELEASE_FILE ]; then
+ 		OS=RedHat  # redhat or similar
+-		VER= # here some complex sed script
++		VER=$(sed "s/.* release \([^ ]\+\).*/\1/" $_REDHAT_RELEASE_FILE)
+ 	else
+ 		OS=$(uname -s)
+ 		VER=$(uname -r)
diff --git a/SOURCES/bz1667414-1-LVM-activate-support-LVs-from-same-VG.patch b/SOURCES/bz1667414-1-LVM-activate-support-LVs-from-same-VG.patch
new file mode 100644
index 0000000..af1974c
--- /dev/null
+++ b/SOURCES/bz1667414-1-LVM-activate-support-LVs-from-same-VG.patch
@@ -0,0 +1,23 @@
+From 13511f843b2b0fa1b8b306beac041e0855be05a6 Mon Sep 17 00:00:00 2001
+From: Valentin Vidic <Valentin.Vidic@CARNet.hr>
+Date: Tue, 15 Jan 2019 15:45:03 +0100
+Subject: [PATCH] LVM-activate: make vgname not uniqe
+
+If activating one lvname at a time, vgname will not be unique.
+---
+ heartbeat/LVM-activate | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
+index f46932c1c..bc448c9c1 100755
+--- a/heartbeat/LVM-activate
++++ b/heartbeat/LVM-activate
+@@ -102,7 +102,7 @@ because some DLM lockspaces might be in use and cannot be closed automatically.
+ <shortdesc lang="en">This agent activates/deactivates logical volumes.</shortdesc>
+ 
+ <parameters>
+-<parameter name="vgname" unique="1" required="1">
++<parameter name="vgname" unique="0" required="1">
+ <longdesc lang="en">
+ The volume group name.
+ </longdesc>
diff --git a/SOURCES/bz1667414-2-LVM-activate-only-count-volumes.patch b/SOURCES/bz1667414-2-LVM-activate-only-count-volumes.patch
new file mode 100644
index 0000000..5911e0e
--- /dev/null
+++ b/SOURCES/bz1667414-2-LVM-activate-only-count-volumes.patch
@@ -0,0 +1,29 @@
+From ee9a47f97dd8b0cb51033db7879a79588aab409c Mon Sep 17 00:00:00 2001
+From: Valentin Vidic <Valentin.Vidic@CARNet.hr>
+Date: Tue, 15 Jan 2019 15:40:01 +0100
+Subject: [PATCH] LVM-activate: fix dmsetup check
+
+When there are no devices in the system dmsetup outputs one line:
+
+  # dmsetup info -c
+  No devices found
+---
+ heartbeat/LVM-activate | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
+index f46932c1c..c3225e1cb 100755
+--- a/heartbeat/LVM-activate
++++ b/heartbeat/LVM-activate
+@@ -715,9 +715,9 @@ lvm_status() {
+ 	if [ -n "${LV}" ]; then
+ 		# dmsetup ls? It cannot accept device name. It's
+ 		# too heavy to list all DM devices.
+-		dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG} && lvname=${LV}" |  wc -l )
++		dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG} && lvname=${LV}" | grep -c -v '^No devices found')
+ 	else
+-		dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG}" 2>/dev/null | wc -l )
++		dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG}" | grep -c -v '^No devices found')
+ 	fi
+ 
+ 	if [ $dm_count -eq 0 ]; then
diff --git a/SOURCES/bz1669140-Route-make-family-parameter-optional.patch b/SOURCES/bz1669140-Route-make-family-parameter-optional.patch
new file mode 100644
index 0000000..81ab09d
--- /dev/null
+++ b/SOURCES/bz1669140-Route-make-family-parameter-optional.patch
@@ -0,0 +1,31 @@
+From d95765aba205ea59dcb99378bed4c6d0593ebdb4 Mon Sep 17 00:00:00 2001
+From: fpicot <francois.picot@homesend.com>
+Date: Fri, 11 Jan 2019 11:38:18 -0500
+Subject: [PATCH] Route: make family parameter optional
+
+---
+ heartbeat/Route | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/heartbeat/Route b/heartbeat/Route
+index 67bdf6bfc..2da58bce1 100755
+--- a/heartbeat/Route
++++ b/heartbeat/Route
+@@ -124,7 +124,7 @@ The routing table to be configured for the route.
+ <content type="string" default="" />
+ </parameter>
+ 
+-<parameter name="family" unique="0" required="1">
++<parameter name="family" unique="0">
+ <longdesc lang="en">
+ The address family to be used for the route
+ ip4      IP version 4
+@@ -132,7 +132,7 @@ ip6      IP version 6
+ detect   Detect from 'destination' address.
+ </longdesc>
+ <shortdesc lang="en">Address Family</shortdesc>
+-<content type="string" default="${OCF_RESKEY_family}" />
++<content type="string" default="${OCF_RESKEY_family_default}" />
+ </parameter>
+ 
+ </parameters>
diff --git a/SOURCES/bz1683548-redis-mute-password-warning.patch b/SOURCES/bz1683548-redis-mute-password-warning.patch
new file mode 100644
index 0000000..b3b89e0
--- /dev/null
+++ b/SOURCES/bz1683548-redis-mute-password-warning.patch
@@ -0,0 +1,62 @@
+From 6303448af77d2ed64c7436a84b30cf7fa4941e19 Mon Sep 17 00:00:00 2001
+From: Michele Baldessari <michele@acksyn.org>
+Date: Wed, 30 Jan 2019 21:36:17 +0100
+Subject: [PATCH] redis: Filter warning from stderr when calling 'redis-cli -a'
+
+In some versions of redis (starting with 4.0.10) we have commits [1] and
+[2] which add a warning on stderr which will be printed out every single
+time a monitor operation takes place:
+
+  foo pacemaker-remoted[57563]:  notice: redis_monitor_20000:1930:stderr
+  [ Warning: Using a password with '-a' option on the command line interface may not be safe. ]
+
+Later on commit [3] (merged with 5.0rc4) was merged which added the option
+'--no-auth-warning' to disable said warning since it broke a bunch of
+scripts [4]. I tried to forcibly either try the command twice (first
+with --no-auth-warning and then without in case of errors) but it is
+impossible to distinguish between error due to missing param and other
+errors.
+
+So instead of inspecting the version of the redis-cli tool and do the following:
+- >= 5.0.0 use --no-auth-warning all the time
+- >= 4.0.10 & < 5.0.0 filter the problematic line from stderr only
+- else do it like before
+
+We simply filter out from stderr the 'Using a password' message
+unconditionally while making sure we keep stdout just the same.
+
+Tested on a redis 4.0.10 cluster and confirmed that it is working as
+intended.
+
+All this horror and pain is due to the fact that redis does not support
+any other means to pass a password (we could in theory first connect to
+the server and then issue an AUTH command, but that seems even more
+complex and error prone). See [5] for more info (or [6] for extra fun)
+
+[1] https://github.com/antirez/redis/commit/c082221aefbb2a472c7193dbdbb90900256ce1a2
+[2] https://github.com/antirez/redis/commit/ef931ef93e909b4f504e8c6fbed350ed70c1c67c
+[3] https://github.com/antirez/redis/commit/a4ef94d2f71a32f73ce4ebf154580307a144b48f
+[4] https://github.com/antirez/redis/issues/5073
+[5] https://github.com/antirez/redis/issues/3483
+[6] https://github.com/antirez/redis/pull/2413
+
+Signed-off-by: Michele Baldessari <michele@acksyn.org>
+---
+ heartbeat/redis.in | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/heartbeat/redis.in b/heartbeat/redis.in
+index 1dff067e9..e257bcc5e 100644
+--- a/heartbeat/redis.in
++++ b/heartbeat/redis.in
+@@ -302,7 +302,9 @@ set_score()
+ redis_client() {
+ 	ocf_log debug "redis_client: '$REDIS_CLIENT' -s '$REDIS_SOCKET' $*"
+ 	if [ -n "$clientpasswd" ]; then
+-		"$REDIS_CLIENT" -s "$REDIS_SOCKET" -a "$clientpasswd" "$@" | sed 's/\r//'
++		# Starting with 4.0.10 there is a warning on stderr when using a pass
++		# Once we stop supporting versions < 5.0.0 we can add --no-auth-warning here
++		("$REDIS_CLIENT" -s "$REDIS_SOCKET" -a "$clientpasswd" "$@" 2>&1 >&3 3>&- | grep -v "Using a password" >&2 3>&-) 3>&1 | sed 's/\r//'
+ 	else
+ 		"$REDIS_CLIENT" -s "$REDIS_SOCKET" "$@" | sed 's/\r//'
+ 	fi
diff --git a/SOURCES/bz1689184-Squid-1-fix-pidfile-issue.patch b/SOURCES/bz1689184-Squid-1-fix-pidfile-issue.patch
new file mode 100644
index 0000000..1ebb942
--- /dev/null
+++ b/SOURCES/bz1689184-Squid-1-fix-pidfile-issue.patch
@@ -0,0 +1,70 @@
+From d228d41c61f57f2576dd87aa7be86f9ca26e3059 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Mon, 18 Mar 2019 16:03:14 +0100
+Subject: [PATCH] Squid: fix pid file issue due to new Squid version saving the
+ PID of the parent process instead of the listener child process
+
+---
+ heartbeat/Squid.in | 21 +++++----------------
+ 1 file changed, 5 insertions(+), 16 deletions(-)
+
+diff --git a/heartbeat/Squid.in b/heartbeat/Squid.in
+index a99892d75..0b3c8ea86 100644
+--- a/heartbeat/Squid.in
++++ b/heartbeat/Squid.in
+@@ -96,12 +96,9 @@ for a squid instance managed by this RA.
+ <content type="string" default=""/>
+ </parameter>
+ 
+-<parameter name="squid_pidfile" required="1" unique="1">
+-<longdesc lang="en">
+-This is a required parameter. This parameter specifies a process id file
+-for a squid instance managed by this RA.
+-</longdesc>
+-<shortdesc lang="en">Pidfile</shortdesc>
++<parameter name="squid_pidfile" required="0" unique="1">
++<longdesc lang="en">Deprecated - do not use anymore</longdesc>
++<shortdesc lang="en">deprecated - do not use anymore</shortdesc>
+ <content type="string" default=""/>
+ </parameter>
+ 
+@@ -175,8 +172,8 @@ get_pids()
+ 	# Seek by pattern
+ 	SQUID_PIDS[0]=$(pgrep -f "$PROCESS_PATTERN")
+ 
+-	# Seek by pidfile
+-	SQUID_PIDS[1]=$(awk '1{print $1}' $SQUID_PIDFILE 2>/dev/null)
++	# Seek by child process
++	SQUID_PIDS[1]=$(pgrep -P ${SQUID_PIDS[0]})
+ 
+ 	if [[ -n "${SQUID_PIDS[1]}" ]]; then
+ 		typeset exe
+@@ -306,7 +303,6 @@ stop_squid()
+ 		while true; do
+ 			get_pids
+ 			if is_squid_dead; then
+-				rm -f $SQUID_PIDFILE
+ 				return $OCF_SUCCESS
+ 			fi
+ 			(( lapse_sec = lapse_sec + 1 ))
+@@ -326,7 +322,6 @@ stop_squid()
+ 		kill -KILL ${SQUID_PIDS[0]} ${SQUID_PIDS[2]}
+ 		sleep 1
+ 		if is_squid_dead; then
+-			rm -f $SQUID_PIDFILE
+ 			return $OCF_SUCCESS
+ 		fi
+ 	done
+@@ -389,12 +384,6 @@ if [[ ! -x "$SQUID_EXE" ]]; then
+ 	exit $OCF_ERR_CONFIGURED
+ fi
+ 
+-SQUID_PIDFILE="${OCF_RESKEY_squid_pidfile}"
+-if [[ -z "$SQUID_PIDFILE" ]]; then
+-	ocf_exit_reason "SQUID_PIDFILE is not defined"
+-	exit $OCF_ERR_CONFIGURED
+-fi
+-
+ SQUID_PORT="${OCF_RESKEY_squid_port}"
+ if [[ -z "$SQUID_PORT" ]]; then
+ 	ocf_exit_reason "SQUID_PORT is not defined"
diff --git a/SOURCES/bz1689184-Squid-2-dont-run-pgrep-without-PID.patch b/SOURCES/bz1689184-Squid-2-dont-run-pgrep-without-PID.patch
new file mode 100644
index 0000000..bb6a894
--- /dev/null
+++ b/SOURCES/bz1689184-Squid-2-dont-run-pgrep-without-PID.patch
@@ -0,0 +1,24 @@
+From e370845f41d39d93f76fa34502d62e2513d5eb73 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Wed, 29 May 2019 14:07:46 +0200
+Subject: [PATCH] Squid: dont run pgrep -P without PID
+
+---
+ heartbeat/Squid.in | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/heartbeat/Squid.in b/heartbeat/Squid.in
+index 0b3c8ea86..e62e7ee66 100644
+--- a/heartbeat/Squid.in
++++ b/heartbeat/Squid.in
+@@ -173,7 +173,9 @@ get_pids()
+ 	SQUID_PIDS[0]=$(pgrep -f "$PROCESS_PATTERN")
+ 
+ 	# Seek by child process
+-	SQUID_PIDS[1]=$(pgrep -P ${SQUID_PIDS[0]})
++	if [[ -n "${SQUID_PIDS[0]}" ]]; then
++		SQUID_PIDS[1]=$(pgrep -P ${SQUID_PIDS[0]})
++	fi
+ 
+ 	if [[ -n "${SQUID_PIDS[1]}" ]]; then
+ 		typeset exe
diff --git a/SOURCES/bz1691456-gcloud-dont-detect-python2.patch b/SOURCES/bz1691456-gcloud-dont-detect-python2.patch
new file mode 100644
index 0000000..9abbd09
--- /dev/null
+++ b/SOURCES/bz1691456-gcloud-dont-detect-python2.patch
@@ -0,0 +1,29 @@
+diff -uNr a/bundled/gcp/google-cloud-sdk/bin/gcloud b/bundled/gcp/google-cloud-sdk/bin/gcloud
+--- a/bundled/gcp/google-cloud-sdk/bin/gcloud	2019-04-04 12:01:28.838027640 +0200
++++ b/bundled/gcp/google-cloud-sdk/bin/gcloud	2019-04-04 12:03:21.577089065 +0200
+@@ -74,24 +74,7 @@
+ 
+ # if CLOUDSDK_PYTHON is empty
+ if [ -z "$CLOUDSDK_PYTHON" ]; then
+-  # if python2 exists then plain python may point to a version != 2
+-  if _cloudsdk_which python2 >/dev/null; then
+-    CLOUDSDK_PYTHON=python2
+-  elif _cloudsdk_which python2.7 >/dev/null; then
+-    # this is what some OS X versions call their built-in Python
+-    CLOUDSDK_PYTHON=python2.7
+-  elif _cloudsdk_which python >/dev/null; then
+-    # Use unversioned python if it exists.
+-    CLOUDSDK_PYTHON=python
+-  elif _cloudsdk_which python3 >/dev/null; then
+-    # We support python3, but only want to default to it if nothing else is
+-    # found.
+-    CLOUDSDK_PYTHON=python3
+-  else
+-    # This won't work because it wasn't found above, but at this point this
+-    # is our best guess for the error message.
+-    CLOUDSDK_PYTHON=python
+-  fi
++  CLOUDSDK_PYTHON="/usr/libexec/platform-python"
+ fi
+ 
+ # $PYTHONHOME can interfere with gcloud. Users should use
diff --git a/SOURCES/bz1692413-iSCSILogicalUnit-create-iqn-when-it-doesnt-exist.patch b/SOURCES/bz1692413-iSCSILogicalUnit-create-iqn-when-it-doesnt-exist.patch
new file mode 100644
index 0000000..d50b231
--- /dev/null
+++ b/SOURCES/bz1692413-iSCSILogicalUnit-create-iqn-when-it-doesnt-exist.patch
@@ -0,0 +1,31 @@
+From 9273b83edf6ee72a59511f307e168813ca3d31fd Mon Sep 17 00:00:00 2001
+From: colttt <shadow_7@gmx.net>
+Date: Fri, 12 Oct 2018 15:29:48 +0200
+Subject: [PATCH] possible fix for #1026
+
+add an if-condition and remove an useless 'targetcli create'
+---
+ heartbeat/iSCSITarget.in | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/heartbeat/iSCSITarget.in b/heartbeat/iSCSITarget.in
+index e49a79016..9128fdc55 100644
+--- a/heartbeat/iSCSITarget.in
++++ b/heartbeat/iSCSITarget.in
+@@ -340,13 +340,13 @@ iSCSITarget_start() {
+ 		ocf_take_lock $TARGETLOCKFILE
+ 		ocf_release_lock_on_exit $TARGETLOCKFILE
+ 		ocf_run targetcli /iscsi set global auto_add_default_portal=false || exit $OCF_ERR_GENERIC
+-		ocf_run targetcli /iscsi create ${OCF_RESKEY_iqn} || exit $OCF_ERR_GENERIC
++		if ! [ -d /sys/kernel/config/target/iscsi/${OCF_RESKEY_iqn} ] ; then
++			ocf_run targetcli /iscsi create ${OCF_RESKEY_iqn} || exit $OCF_ERR_GENERIC
++		fi
+ 		for portal in ${OCF_RESKEY_portals}; do
+ 			if [ $portal != ${OCF_RESKEY_portals_default} ] ; then
+ 				IFS=':' read -a sep_portal <<< "$portal"
+ 				ocf_run targetcli /iscsi/${OCF_RESKEY_iqn}/tpg1/portals create "${sep_portal[0]}" "${sep_portal[1]}" || exit $OCF_ERR_GENERIC
+-			else
+-				ocf_run targetcli /iscsi create ${OCF_RESKEY_iqn} || exit $OCF_ERR_GENERIC
+ 			fi
+ 		done
+ 		# in lio, we can set target parameters by manipulating
diff --git a/SOURCES/bz1693662-aws-vpc-move-ip-avoid-possible-race-condition.patch b/SOURCES/bz1693662-aws-vpc-move-ip-avoid-possible-race-condition.patch
new file mode 100644
index 0000000..8899055
--- /dev/null
+++ b/SOURCES/bz1693662-aws-vpc-move-ip-avoid-possible-race-condition.patch
@@ -0,0 +1,104 @@
+From 57f695d336cab33c61e754e463654ad6400f7b58 Mon Sep 17 00:00:00 2001
+From: gguifelixamz <fguilher@amazon.com>
+Date: Tue, 27 Nov 2018 17:06:05 +0000
+Subject: [PATCH 1/4] Enable --query flag in DescribeRouteTable API call to
+ avoid race condition with grep
+
+---
+ heartbeat/aws-vpc-move-ip | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
+index 9b2043aca..d2aed7490 100755
+--- a/heartbeat/aws-vpc-move-ip
++++ b/heartbeat/aws-vpc-move-ip
+@@ -167,9 +167,10 @@ ec2ip_validate() {
+ ec2ip_monitor() {
+ 	if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ] || ocf_is_probe; then
+ 		ocf_log info "monitor: check routing table (API call)"
+-		cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table"
++                cmd=''$OCF_RESKEY_awscli' --profile '$OCF_RESKEY_profile' --output text ec2 describe-route-tables --route-table-ids '$OCF_RESKEY_routing_table' --query 'RouteTables[*].Routes[?DestinationCidrBlock==\`$OCF_RESKEY_address/32\`].InstanceId''
+ 		ocf_log debug "executing command: $cmd"
+-		ROUTE_TO_INSTANCE="$($cmd | grep $OCF_RESKEY_ip | awk '{ print $3 }')"
++                ROUTE_TO_INSTANCE=$($cmd)
++                ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}"
+ 		if [ -z "$ROUTE_TO_INSTANCE" ]; then
+ 			ROUTE_TO_INSTANCE="<unknown>"
+ 		fi
+
+From 4d6371aca5dca35b902a480e07a08c1dc3373ca5 Mon Sep 17 00:00:00 2001
+From: gguifelixamz <fguilher@amazon.com>
+Date: Thu, 29 Nov 2018 11:39:26 +0000
+Subject: [PATCH 2/4] aws-vpc-move-ip: Fixed outer quotes and removed inner
+ quotes
+
+---
+ heartbeat/aws-vpc-move-ip | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
+index d2aed7490..ced69bd13 100755
+--- a/heartbeat/aws-vpc-move-ip
++++ b/heartbeat/aws-vpc-move-ip
+@@ -167,7 +167,7 @@ ec2ip_validate() {
+ ec2ip_monitor() {
+ 	if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ] || ocf_is_probe; then
+ 		ocf_log info "monitor: check routing table (API call)"
+-                cmd=''$OCF_RESKEY_awscli' --profile '$OCF_RESKEY_profile' --output text ec2 describe-route-tables --route-table-ids '$OCF_RESKEY_routing_table' --query 'RouteTables[*].Routes[?DestinationCidrBlock==\`$OCF_RESKEY_address/32\`].InstanceId''
++                cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table --query RouteTables[*].Routes[?DestinationCidrBlock==\`$OCF_RESKEY_address/32\`].InstanceId"
+ 		ocf_log debug "executing command: $cmd"
+                 ROUTE_TO_INSTANCE=$($cmd)
+                 ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}"
+
+From 09f4b061690a0e681aaf7314f1fc3e6f4e597cc8 Mon Sep 17 00:00:00 2001
+From: gguifelixamz <fguilher@amazon.com>
+Date: Thu, 29 Nov 2018 11:55:05 +0000
+Subject: [PATCH 3/4] aws-vpc-move-ip: Replaced indentation spaces with tabs
+ for consistency with the rest of the code
+
+---
+ heartbeat/aws-vpc-move-ip | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
+index ced69bd13..3e827283e 100755
+--- a/heartbeat/aws-vpc-move-ip
++++ b/heartbeat/aws-vpc-move-ip
+@@ -167,10 +167,10 @@ ec2ip_validate() {
+ ec2ip_monitor() {
+ 	if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ] || ocf_is_probe; then
+ 		ocf_log info "monitor: check routing table (API call)"
+-                cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table --query RouteTables[*].Routes[?DestinationCidrBlock==\`$OCF_RESKEY_address/32\`].InstanceId"
++		cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table --query RouteTables[*].Routes[?DestinationCidrBlock==\`$OCF_RESKEY_address/32\`].InstanceId"
+ 		ocf_log debug "executing command: $cmd"
+-                ROUTE_TO_INSTANCE=$($cmd)
+-                ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}"
++		ROUTE_TO_INSTANCE=$($cmd)
++		ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}"
+ 		if [ -z "$ROUTE_TO_INSTANCE" ]; then
+ 			ROUTE_TO_INSTANCE="<unknown>"
+ 		fi
+
+From fcf85551ce70cb4fb7ce24e21c361fdbe6fcce6b Mon Sep 17 00:00:00 2001
+From: gguifelixamz <fguilher@amazon.com>
+Date: Thu, 29 Nov 2018 13:07:32 +0000
+Subject: [PATCH 4/4] aws-vpc-move-ip: In cmd variable on ec2ip_monitor():
+ replaced _address with _ip and modified to use single quotes
+
+---
+ heartbeat/aws-vpc-move-ip | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
+index 3e827283e..331ee184f 100755
+--- a/heartbeat/aws-vpc-move-ip
++++ b/heartbeat/aws-vpc-move-ip
+@@ -167,7 +167,7 @@ ec2ip_validate() {
+ ec2ip_monitor() {
+ 	if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ] || ocf_is_probe; then
+ 		ocf_log info "monitor: check routing table (API call)"
+-		cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table --query RouteTables[*].Routes[?DestinationCidrBlock==\`$OCF_RESKEY_address/32\`].InstanceId"
++		cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].InstanceId"
+ 		ocf_log debug "executing command: $cmd"
+ 		ROUTE_TO_INSTANCE=$($cmd)
+ 		ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}"
diff --git a/SOURCES/bz1695656-gcp-vpc-move-route-vip-fix-python3-encoding.patch b/SOURCES/bz1695656-gcp-vpc-move-route-vip-fix-python3-encoding.patch
new file mode 100644
index 0000000..9ad4c1d
--- /dev/null
+++ b/SOURCES/bz1695656-gcp-vpc-move-route-vip-fix-python3-encoding.patch
@@ -0,0 +1,46 @@
+From 17fe1dfeef1534b270e4765277cb8d7b42c4a9c4 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Fri, 5 Apr 2019 09:15:40 +0200
+Subject: [PATCH] gcp-vpc-move-route/gcp-vpc-move-vip: fix Python 3 encoding
+ issue
+
+---
+ heartbeat/gcp-vpc-move-route.in | 2 +-
+ heartbeat/gcp-vpc-move-vip.in   | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
+index 591b97b1c..7dd47150d 100644
+--- a/heartbeat/gcp-vpc-move-route.in
++++ b/heartbeat/gcp-vpc-move-route.in
+@@ -193,7 +193,7 @@ def get_metadata(metadata_key, params=None, timeout=None):
+   url = '%s?%s' % (metadata_url, params)
+   request = urlrequest.Request(url, headers=METADATA_HEADERS)
+   request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({}))
+-  return request_opener.open(request, timeout=timeout * 1.1).read()
++  return request_opener.open(request, timeout=timeout * 1.1).read().decode("utf-8")
+ 
+ 
+ def validate(ctx):
+diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in
+index bd6cf86cd..953d61ed7 100755
+--- a/heartbeat/gcp-vpc-move-vip.in
++++ b/heartbeat/gcp-vpc-move-vip.in
+@@ -106,7 +106,7 @@ def get_metadata(metadata_key, params=None, timeout=None):
+   url = '%s?%s' % (metadata_url, params)
+   request = urlrequest.Request(url, headers=METADATA_HEADERS)
+   request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({}))
+-  return request_opener.open(request, timeout=timeout * 1.1).read()
++  return request_opener.open(request, timeout=timeout * 1.1).read().decode("utf-8")
+ 
+ 
+ def get_instance(project, zone, instance):
+@@ -162,7 +162,7 @@ def get_alias(project, zone, instance):
+ 
+ def get_localhost_alias():
+   net_iface = get_metadata('instance/network-interfaces', {'recursive': True})
+-  net_iface = json.loads(net_iface.decode('utf-8'))
++  net_iface = json.loads(net_iface)
+   try:
+     return net_iface[0]['ipAliases'][0]
+   except (KeyError, IndexError):
diff --git a/SOURCES/bz1697559-aws-vpc-move-ip-1-multi-route-table-support.patch b/SOURCES/bz1697559-aws-vpc-move-ip-1-multi-route-table-support.patch
new file mode 100644
index 0000000..b724aa3
--- /dev/null
+++ b/SOURCES/bz1697559-aws-vpc-move-ip-1-multi-route-table-support.patch
@@ -0,0 +1,122 @@
+--- a/heartbeat/aws-vpc-move-ip	2019-05-20 10:54:01.527329668 +0200
++++ b/heartbeat/aws-vpc-move-ip	2019-05-20 11:33:35.386089091 +0200
+@@ -93,11 +93,19 @@
+ <content type="string" default="" />
+ </parameter>
+ 
++<parameter name="address">
++<longdesc lang="en">
++Deprecated IP address param. Use the ip param instead.
++</longdesc>
++<shortdesc lang="en">Deprecated VPC private IP Address</shortdesc>
++<content type="string" default="" />
++</parameter>
++
+ <parameter name="routing_table" required="1">
+ <longdesc lang="en">
+-Name of the routing table, where the route for the IP address should be changed, i.e. rtb-...
++Name of the routing table(s), where the route for the IP address should be changed. If declaring multiple routing tables they should be separated by comma. Example: rtb-XXXXXXXX,rtb-YYYYYYYYY
+ </longdesc>
+-<shortdesc lang="en">routing table name</shortdesc>
++<shortdesc lang="en">routing table name(s)</shortdesc>
+ <content type="string" default="" />
+ </parameter>
+ 
+@@ -129,6 +137,13 @@
+ END
+ }
+ 
++ec2ip_set_address_param_compat(){
++	# Include backward compatibility for the deprecated address parameter
++	if [ -z  "$OCF_RESKEY_ip" ] && [ -n "$OCF_RESKEY_address" ]; then
++		OCF_RESKEY_ip="$OCF_RESKEY_address"
++	fi
++}
++
+ ec2ip_validate() {
+ 	for cmd in aws ip curl; do
+ 		check_binary "$cmd"
+@@ -150,20 +165,29 @@
+ }
+ 
+ ec2ip_monitor() {
+-	if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ]; then
+-		ocf_log info "monitor: check routing table (API call)"
+-		cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].InstanceId"
+-		ocf_log debug "executing command: $cmd"
+-		ROUTE_TO_INSTANCE=$($cmd)
+-		ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}"
+-		if [ -z "$ROUTE_TO_INSTANCE" ]; then
+-			ROUTE_TO_INSTANCE="<unknown>"
+-		fi
++        MON_RES=""
++	if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ] || ocf_is_probe; then
++		for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do
++			ocf_log info "monitor: check routing table (API call) - $rtb"
++			cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].InstanceId"
++			ocf_log debug "executing command: $cmd"
++			ROUTE_TO_INSTANCE="$($cmd)"
++			ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}"
++			if [ -z "$ROUTE_TO_INSTANCE" ]; then
++				ROUTE_TO_INSTANCE="<unknown>"
++			fi
++
++			if [ "$EC2_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then 
++				ocf_log warn "not routed to this instance ($EC2_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE on $rtb"
++				MON_RES="$MON_RES $rtb"
++			fi
++			sleep 1
++		done
+ 
+-		if [ "$EC2_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ];then 
+-			ocf_log warn "not routed to this instance ($EC2_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE"
++		if [ ! -z "$MON_RES" ]; then
+ 			return $OCF_NOT_RUNNING
+ 		fi
++
+ 	else
+ 		ocf_log debug "monitor: Enhanced Monitoring disabled - omitting API call"
+ 	fi
+@@ -195,19 +219,23 @@
+ }
+ 
+ ec2ip_get_and_configure() {
+-	# Adjusting the routing table
+-	cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile ec2 replace-route --route-table-id $OCF_RESKEY_routing_table --destination-cidr-block ${OCF_RESKEY_ip}/32 --instance-id $EC2_INSTANCE_ID"
+-	ocf_log debug "executing command: $cmd"
+-	$cmd
+-	rc=$?
+-	if [ "$rc" != 0 ]; then
+-		ocf_log warn "command failed, rc: $rc"
+-		return $OCF_ERR_GENERIC
+-	fi
++	for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do
++		cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --instance-id $EC2_INSTANCE_ID"
++		ocf_log debug "executing command: $cmd"
++		$cmd
++		rc=$?
++		if [ "$rc" != 0 ]; then
++			ocf_log warn "command failed, rc: $rc"
++			return $OCF_ERR_GENERIC
++		fi
++		sleep 1
++	done
+ 
+ 	# Reconfigure the local ip address
+ 	ec2ip_drop
+-	ip addr add "${OCF_RESKEY_ip}/32" dev $OCF_RESKEY_interface
++	cmd="ip addr add ${OCF_RESKEY_ip}/32 dev $OCF_RESKEY_interface"
++	ocf_log debug "executing command: $cmd"
++	$cmd
+ 	rc=$?
+ 	if [ $rc != 0 ]; then
+ 		ocf_log warn "command failed, rc: $rc"
+@@ -289,6 +317,8 @@
+ 	exit $OCF_ERR_PERM
+ fi
+ 
++ec2ip_set_address_param_compat
++
+ ec2ip_validate
+ 
+ case $__OCF_ACTION in
diff --git a/SOURCES/bz1697559-aws-vpc-move-ip-2-fix-route-update-multi-NICs.patch b/SOURCES/bz1697559-aws-vpc-move-ip-2-fix-route-update-multi-NICs.patch
new file mode 100644
index 0000000..c283801
--- /dev/null
+++ b/SOURCES/bz1697559-aws-vpc-move-ip-2-fix-route-update-multi-NICs.patch
@@ -0,0 +1,221 @@
+From 9f2b9cc09f7e2df163ff95585374f860f3dc58eb Mon Sep 17 00:00:00 2001
+From: Tomas Krojzl <tomas_krojzl@cz.ibm.com>
+Date: Tue, 16 Apr 2019 18:40:29 +0200
+Subject: [PATCH 1/6] Fix for VM having multiple network interfaces
+
+---
+ heartbeat/aws-vpc-move-ip | 22 +++++++++++++++++++++-
+ 1 file changed, 21 insertions(+), 1 deletion(-)
+
+diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
+index 090956434..a91c2dd11 100755
+--- a/heartbeat/aws-vpc-move-ip
++++ b/heartbeat/aws-vpc-move-ip
+@@ -219,8 +219,28 @@ ec2ip_drop() {
+ }
+ 
+ ec2ip_get_and_configure() {
++	cmd="ip -br link show dev $OCF_RESKEY_interface | tr -s ' ' | cut -d' ' -f3"
++	ocf_log debug "executing command: $cmd"
++	MAC_ADDR="$(eval $cmd)"
++	rc=$?
++	if [ $rc != 0 ]; then
++		ocf_log warn "command failed, rc: $rc"
++		return $OCF_ERR_GENERIC
++	fi
++  ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}"
++
++	cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-instances --instance-ids $EC2_INSTANCE_ID --query 'Reservations[*].Instances[*].NetworkInterfaces[*].[NetworkInterfaceId,MacAddress]' | grep ${MAC_ADDR} | cut -f1"
++	ocf_log debug "executing command: $cmd"
++	EC2_NETWORK_INTERFACE_ID="$(eval $cmd)"
++	rc=$?
++	if [ $rc != 0 ]; then
++		ocf_log warn "command failed, rc: $rc"
++		return $OCF_ERR_GENERIC
++	fi
++  ocf_log debug "network interface id associated MAC address ${MAC_ADDR}: ${EC2_NETWORK_INTERFACE_ID}"
++
+ 	for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do
+-		cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --instance-id $EC2_INSTANCE_ID"
++		cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --network-interface-id $EC2_NETWORK_INTERFACE_ID"
+ 		ocf_log debug "executing command: $cmd"
+ 		$cmd
+ 		rc=$?
+
+From a871a463134ebb2456b5f37a343bf9034f5f4074 Mon Sep 17 00:00:00 2001
+From: krojzl <tomas_krojzl@cz.ibm.com>
+Date: Tue, 16 Apr 2019 18:49:32 +0200
+Subject: [PATCH 2/6] Fixing indentation
+
+---
+ heartbeat/aws-vpc-move-ip | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
+index a91c2dd11..a46d10d30 100755
+--- a/heartbeat/aws-vpc-move-ip
++++ b/heartbeat/aws-vpc-move-ip
+@@ -227,7 +227,7 @@ ec2ip_get_and_configure() {
+ 		ocf_log warn "command failed, rc: $rc"
+ 		return $OCF_ERR_GENERIC
+ 	fi
+-  ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}"
++	ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}"
+ 
+ 	cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-instances --instance-ids $EC2_INSTANCE_ID --query 'Reservations[*].Instances[*].NetworkInterfaces[*].[NetworkInterfaceId,MacAddress]' | grep ${MAC_ADDR} | cut -f1"
+ 	ocf_log debug "executing command: $cmd"
+@@ -237,7 +237,7 @@ ec2ip_get_and_configure() {
+ 		ocf_log warn "command failed, rc: $rc"
+ 		return $OCF_ERR_GENERIC
+ 	fi
+-  ocf_log debug "network interface id associated MAC address ${MAC_ADDR}: ${EC2_NETWORK_INTERFACE_ID}"
++	ocf_log debug "network interface id associated MAC address ${MAC_ADDR}: ${EC2_NETWORK_INTERFACE_ID}"
+ 
+ 	for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do
+ 		cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --network-interface-id $EC2_NETWORK_INTERFACE_ID"
+
+From 068680427dff620a948ae25f090bc154b02f17b9 Mon Sep 17 00:00:00 2001
+From: krojzl <tomas_krojzl@cz.ibm.com>
+Date: Wed, 17 Apr 2019 14:22:31 +0200
+Subject: [PATCH 3/6] Requested fix to avoid using AWS API
+
+---
+ heartbeat/aws-vpc-move-ip | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
+index a46d10d30..2910552f2 100755
+--- a/heartbeat/aws-vpc-move-ip
++++ b/heartbeat/aws-vpc-move-ip
+@@ -229,7 +229,7 @@ ec2ip_get_and_configure() {
+ 	fi
+ 	ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}"
+ 
+-	cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-instances --instance-ids $EC2_INSTANCE_ID --query 'Reservations[*].Instances[*].NetworkInterfaces[*].[NetworkInterfaceId,MacAddress]' | grep ${MAC_ADDR} | cut -f1"
++	cmd="curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id"
+ 	ocf_log debug "executing command: $cmd"
+ 	EC2_NETWORK_INTERFACE_ID="$(eval $cmd)"
+ 	rc=$?
+
+From 207a2ba66ba7196180d27674aa204980fcd25de2 Mon Sep 17 00:00:00 2001
+From: krojzl <tomas_krojzl@cz.ibm.com>
+Date: Fri, 19 Apr 2019 11:14:21 +0200
+Subject: [PATCH 4/6] More robust approach of getting MAC address
+
+---
+ heartbeat/aws-vpc-move-ip | 29 +++++++++++++++++++++--------
+ 1 file changed, 21 insertions(+), 8 deletions(-)
+
+diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
+index 2910552f2..3a848b7e3 100755
+--- a/heartbeat/aws-vpc-move-ip
++++ b/heartbeat/aws-vpc-move-ip
+@@ -219,15 +219,28 @@ ec2ip_drop() {
+ }
+ 
+ ec2ip_get_and_configure() {
+-	cmd="ip -br link show dev $OCF_RESKEY_interface | tr -s ' ' | cut -d' ' -f3"
+-	ocf_log debug "executing command: $cmd"
+-	MAC_ADDR="$(eval $cmd)"
+-	rc=$?
+-	if [ $rc != 0 ]; then
+-		ocf_log warn "command failed, rc: $rc"
+-		return $OCF_ERR_GENERIC
++	MAC_FILE="/sys/class/net/${OCF_RESKEY_interface}/address"
++	if [ -f $MAC_FILE ]; then
++		cmd="cat ${MAC_FILE}"
++		ocf_log debug "executing command: $cmd"
++		MAC_ADDR="$(eval $cmd)"
++		rc=$?
++		if [ $rc != 0 ]; then
++			ocf_log warn "command failed, rc: $rc"
++			return $OCF_ERR_GENERIC
++		fi
++		ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}"
++	else
++		cmd="ip -br link show dev ${OCF_RESKEY_interface} | tr -s ' ' | cut -d' ' -f3"
++		ocf_log debug "executing command: $cmd"
++		MAC_ADDR="$(eval $cmd)"
++		rc=$?
++		if [ $rc != 0 ]; then
++			ocf_log warn "command failed, rc: $rc"
++			return $OCF_ERR_GENERIC
++		fi
++		ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}"
+ 	fi
+-	ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}"
+ 
+ 	cmd="curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id"
+ 	ocf_log debug "executing command: $cmd"
+
+From cdcc12a9c1431125b0d5298176e5242bfc9fbe29 Mon Sep 17 00:00:00 2001
+From: krojzl <tomas_krojzl@cz.ibm.com>
+Date: Fri, 19 Apr 2019 11:20:09 +0200
+Subject: [PATCH 5/6] Moving shared part outside if
+
+---
+ heartbeat/aws-vpc-move-ip | 25 +++++++++----------------
+ 1 file changed, 9 insertions(+), 16 deletions(-)
+
+diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
+index 3a848b7e3..bfe23e5bf 100755
+--- a/heartbeat/aws-vpc-move-ip
++++ b/heartbeat/aws-vpc-move-ip
+@@ -222,26 +222,19 @@ ec2ip_get_and_configure() {
+ 	MAC_FILE="/sys/class/net/${OCF_RESKEY_interface}/address"
+ 	if [ -f $MAC_FILE ]; then
+ 		cmd="cat ${MAC_FILE}"
+-		ocf_log debug "executing command: $cmd"
+-		MAC_ADDR="$(eval $cmd)"
+-		rc=$?
+-		if [ $rc != 0 ]; then
+-			ocf_log warn "command failed, rc: $rc"
+-			return $OCF_ERR_GENERIC
+-		fi
+-		ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}"
+ 	else
+ 		cmd="ip -br link show dev ${OCF_RESKEY_interface} | tr -s ' ' | cut -d' ' -f3"
+-		ocf_log debug "executing command: $cmd"
+-		MAC_ADDR="$(eval $cmd)"
+-		rc=$?
+-		if [ $rc != 0 ]; then
+-			ocf_log warn "command failed, rc: $rc"
+-			return $OCF_ERR_GENERIC
+-		fi
+-		ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}"
+ 	fi
+ 
++	ocf_log debug "executing command: $cmd"
++	MAC_ADDR="$(eval $cmd)"
++	rc=$?
++	if [ $rc != 0 ]; then
++		ocf_log warn "command failed, rc: $rc"
++		return $OCF_ERR_GENERIC
++	fi
++	ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}"
++
+ 	cmd="curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id"
+ 	ocf_log debug "executing command: $cmd"
+ 	EC2_NETWORK_INTERFACE_ID="$(eval $cmd)"
+
+From c3fc114fc64f6feb015c5342923fd2afc367ae28 Mon Sep 17 00:00:00 2001
+From: krojzl <tomas_krojzl@cz.ibm.com>
+Date: Fri, 19 Apr 2019 11:22:55 +0200
+Subject: [PATCH 6/6] Linting adjustment
+
+---
+ heartbeat/aws-vpc-move-ip | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
+index bfe23e5bf..2757c27d0 100755
+--- a/heartbeat/aws-vpc-move-ip
++++ b/heartbeat/aws-vpc-move-ip
+@@ -225,7 +225,6 @@ ec2ip_get_and_configure() {
+ 	else
+ 		cmd="ip -br link show dev ${OCF_RESKEY_interface} | tr -s ' ' | cut -d' ' -f3"
+ 	fi
+-
+ 	ocf_log debug "executing command: $cmd"
+ 	MAC_ADDR="$(eval $cmd)"
+ 	rc=$?
diff --git a/SOURCES/bz1707969-1-ocf_log-do-not-log-debug-when-HA_debug-unset.patch b/SOURCES/bz1707969-1-ocf_log-do-not-log-debug-when-HA_debug-unset.patch
new file mode 100644
index 0000000..4de33f1
--- /dev/null
+++ b/SOURCES/bz1707969-1-ocf_log-do-not-log-debug-when-HA_debug-unset.patch
@@ -0,0 +1,32 @@
+From aae26ca70ef910e83485778c1fb450941fe79e8a Mon Sep 17 00:00:00 2001
+From: Michele Baldessari <michele@acksyn.org>
+Date: Mon, 3 Dec 2018 16:48:14 +0100
+Subject: [PATCH] Do not log at debug log level when HA_debug is unset
+
+There might be situations (e.g. bundles) where the HA_debug variable
+is unset. It makes little sense to enable debug logging when the HA_debug env
+variable is unset.
+So let's skip debug logs when HA_debug is set to 0 or is unset.
+
+Tested inside a bundle and observed that previously seen 'ocf_log debug'
+calls are now correctly suppressed (w/ HA_debug being unset inside the
+container)
+
+Signed-off-by: Michele Baldessari <michele@acksyn.org>
+---
+ heartbeat/ocf-shellfuncs.in | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
+index 043ab9bf2..b17297e1a 100644
+--- a/heartbeat/ocf-shellfuncs.in
++++ b/heartbeat/ocf-shellfuncs.in
+@@ -257,7 +257,7 @@ ha_log()
+ 
+ ha_debug() {
+ 
+-        if [ "x${HA_debug}" = "x0" ] ; then
++        if [ "x${HA_debug}" = "x0" ] || [ -z "${HA_debug}" ] ; then
+                 return 0
+         fi
+ 	if tty >/dev/null; then
diff --git a/SOURCES/bz1707969-2-ocf_is_true-add-True-to-regexp.patch b/SOURCES/bz1707969-2-ocf_is_true-add-True-to-regexp.patch
new file mode 100644
index 0000000..00e34b8
--- /dev/null
+++ b/SOURCES/bz1707969-2-ocf_is_true-add-True-to-regexp.patch
@@ -0,0 +1,22 @@
+From 73b35b74b743403aeebab43205475be6f2938cd5 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Wed, 12 Jun 2019 10:11:07 +0200
+Subject: [PATCH] ocf_is_true: add True to regexp
+
+---
+ heartbeat/ocf-shellfuncs.in | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
+index b17297e1a..7a97558a5 100644
+--- a/heartbeat/ocf-shellfuncs.in
++++ b/heartbeat/ocf-shellfuncs.in
+@@ -101,7 +101,7 @@ ocf_is_decimal() {
+ 
+ ocf_is_true() {
+ 	case "$1" in
+-	yes|true|1|YES|TRUE|ja|on|ON) true ;;
++	yes|true|1|YES|TRUE|True|ja|on|ON) true ;;
+ 	*)	false ;;
+ 	esac
+ }
diff --git a/SOURCES/bz1717759-Filesystem-remove-notify-action-from-metadata.patch b/SOURCES/bz1717759-Filesystem-remove-notify-action-from-metadata.patch
new file mode 100644
index 0000000..9cf643e
--- /dev/null
+++ b/SOURCES/bz1717759-Filesystem-remove-notify-action-from-metadata.patch
@@ -0,0 +1,21 @@
+From d1fc6920718284431a2c2cc28562498d6c8ea792 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Wed, 19 Jun 2019 11:12:33 +0200
+Subject: [PATCH] Filesystem: remove removed notify-action from metadata
+
+---
+ heartbeat/Filesystem | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
+index 780ba63a4..c46ec3cca 100755
+--- a/heartbeat/Filesystem
++++ b/heartbeat/Filesystem
+@@ -221,7 +221,6 @@ block if unresponsive nfs mounts are in use on the system.
+ <actions>
+ <action name="start" timeout="60s" />
+ <action name="stop" timeout="60s" />
+-<action name="notify" timeout="60s" />
+ <action name="monitor" depth="0" timeout="40s" interval="20s" />
+ <action name="validate-all" timeout="5s" />
+ <action name="meta-data" timeout="5s" />
diff --git a/SOURCES/bz1719684-dhcpd-keep-SELinux-context-chroot.patch b/SOURCES/bz1719684-dhcpd-keep-SELinux-context-chroot.patch
new file mode 100644
index 0000000..82a46c1
--- /dev/null
+++ b/SOURCES/bz1719684-dhcpd-keep-SELinux-context-chroot.patch
@@ -0,0 +1,28 @@
+From c8c073ed81884128b0b3955fb0b0bd23661044a2 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Wed, 12 Jun 2019 12:45:08 +0200
+Subject: [PATCH] dhcpd: keep SELinux context
+
+---
+ heartbeat/dhcpd | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/heartbeat/dhcpd b/heartbeat/dhcpd
+index 8b2d8b695..46027b39b 100755
+--- a/heartbeat/dhcpd
++++ b/heartbeat/dhcpd
+@@ -337,12 +337,12 @@ dhcpd_initialize_chroot() {
+     done | sort -u`
+     for i in $cplibs ; do
+ 	if [ -s "$i" ]; then
+-	    cp -pL "$i" "${OCF_RESKEY_chrooted_path}/$libdir/" ||
++	    cp -aL "$i" "${OCF_RESKEY_chrooted_path}/$libdir/" ||
+ 		{ ocf_exit_reason "could not copy $i to chroot jail"; return $OCF_ERR_GENERIC; }
+ 	fi
+     done
+ 
+-   return $OCF_SUCCESS
++    return $OCF_SUCCESS
+ }
+ 
+ # Initialize a non-chroot environment
diff --git a/SOURCES/dont-use-ocf_attribute_target-for-metadata.patch b/SOURCES/dont-use-ocf_attribute_target-for-metadata.patch
new file mode 100644
index 0000000..d868593
--- /dev/null
+++ b/SOURCES/dont-use-ocf_attribute_target-for-metadata.patch
@@ -0,0 +1,76 @@
+From 2dbfbd8ee3c1547f941507ab4109aa04eec0ef5a Mon Sep 17 00:00:00 2001
+From: Michele Baldessari <michele@acksyn.org>
+Date: Mon, 16 Jul 2018 20:24:04 +0200
+Subject: [PATCH] Do not call ocf_attribute_target in the meta-data function
+
+Starting with pacemaker-1.1.19 a "crm_node -n" call triggers
+a CRM_OP_NODE_INFO cluster operation. If this is called
+from a bundle with 1.1.19 code (or later) running on a 1.1.18
+cluster, during a meta-data call we will get the following error in the
+cluster logs:
+Jul 14 11:35:27 [20041] controller-0 crmd: error: handle_request: Unexpected request (node-info) sent to non-DC node
+Jul 14 11:35:27 [20041] controller-0 crmd: error: handle_request: Unexpected <create_request_adv origin="send_node_info_request" t="crmd" version="3.0.14" subt="request" reference="node-info-crm_node-1531568127-1" crm_task="node-info" crm_sys_to="crmd" crm_sys_from="e67698a2-6f50-45fc-b8de-423c94e11c99" acl_target="root" crm_user="root" src="controller-0"/>
+
+By not calling ocf_attribute_target (which triggers a crm_node -n
+call) when polling for meta-data we do not run into this issue.
+
+This can easily get triggered when creating a resource invoking
+crm_node -n inside a 1.1.19 container with pcs, as that invokes
+the 'meta-data' action explicitely.
+
+Co-Authored-By: Damien Ciabrini <dciabrin@redhat.com>
+Suggested-By: Ken Gaillot <kgaillot@redhat.com>
+---
+ heartbeat/galera           | 6 ++++--
+ heartbeat/rabbitmq-cluster | 4 +++-
+ heartbeat/redis.in         | 4 +++-
+ 3 files changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/heartbeat/galera b/heartbeat/galera
+index 270bdaf1b..4f341ceef 100755
+--- a/heartbeat/galera
++++ b/heartbeat/galera
+@@ -66,9 +66,11 @@
+ 
+ : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+ . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+-. ${OCF_FUNCTIONS_DIR}/mysql-common.sh
+ 
+-NODENAME=$(ocf_attribute_target)
++if [ "$__OCF_ACTION" != "meta-data" ]; then
++    . ${OCF_FUNCTIONS_DIR}/mysql-common.sh
++    NODENAME=$(ocf_attribute_target)
++fi
+ 
+ # It is common for some galera instances to store
+ # check user that can be used to query status
+diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster
+index 9ff49e075..54a16c941 100755
+--- a/heartbeat/rabbitmq-cluster
++++ b/heartbeat/rabbitmq-cluster
+@@ -37,7 +37,9 @@ RMQ_DATA_DIR="/var/lib/rabbitmq/mnesia"
+ RMQ_PID_DIR="/var/run/rabbitmq"
+ RMQ_PID_FILE="/var/run/rabbitmq/rmq.pid"
+ RMQ_LOG_DIR="/var/log/rabbitmq"
+-NODENAME=$(ocf_attribute_target)
++if [ "$__OCF_ACTION" != "meta-data" ]; then
++	NODENAME=$(ocf_attribute_target)
++fi
+ 
+ # this attr represents the current active local rmq node name.
+ # when rmq stops or the node is fenced, this attr disappears
+diff --git a/heartbeat/redis.in b/heartbeat/redis.in
+index d5eb8f664..ddc62d8a7 100644
+--- a/heartbeat/redis.in
++++ b/heartbeat/redis.in
+@@ -664,7 +664,9 @@ redis_validate() {
+ 	fi
+ }
+ 
+-NODENAME=$(ocf_attribute_target)
++if [ "$__OCF_ACTION" != "meta-data" ]; then
++	NODENAME=$(ocf_attribute_target)
++fi
+ if [ -r "$REDIS_CONFIG" ]; then
+ 	clientpasswd="$(sed -n -e  's/^\s*requirepass\s*\(.*\)\s*$/\1/p' < $REDIS_CONFIG | tail -n 1)"
+ fi
diff --git a/SOURCES/findif-only-match-lines-with-netmasks.patch b/SOURCES/findif-only-match-lines-with-netmasks.patch
new file mode 100644
index 0000000..6afdd55
--- /dev/null
+++ b/SOURCES/findif-only-match-lines-with-netmasks.patch
@@ -0,0 +1,25 @@
+From 2437d3879270f8febc5353e09898dd7d0aee08af Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Wed, 1 Aug 2018 09:54:39 +0200
+Subject: [PATCH] findif: only match lines containing netmasks
+
+---
+ heartbeat/findif.sh | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/heartbeat/findif.sh b/heartbeat/findif.sh
+index fc84cf0ec..66bc6d56a 100644
+--- a/heartbeat/findif.sh
++++ b/heartbeat/findif.sh
+@@ -215,9 +215,9 @@ findif()
+   fi
+   if [ -n "$nic" ] ; then
+     # NIC supports more than two.
+-    set -- $(ip -o -f $family route list match $match $scope | grep "dev $nic " | awk 'BEGIN{best=0} { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}')
++    set -- $(ip -o -f $family route list match $match $scope | grep "dev $nic " | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}')
+   else
+-    set -- $(ip -o -f $family route list match $match $scope | awk 'BEGIN{best=0} { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}')
++    set -- $(ip -o -f $family route list match $match $scope | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}')
+   fi
+   if [ $# = 0 ] ; then
+     case $OCF_RESKEY_ip in
diff --git a/SOURCES/lvmlockd-add-cmirrord-support.patch b/SOURCES/lvmlockd-add-cmirrord-support.patch
new file mode 100644
index 0000000..cfaf001
--- /dev/null
+++ b/SOURCES/lvmlockd-add-cmirrord-support.patch
@@ -0,0 +1,118 @@
+From d4c9de6264251e4dbc91b64aaf7f500919d08d60 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Fri, 17 Aug 2018 12:48:46 +0200
+Subject: [PATCH] lvmlockd: add cmirrord support
+
+---
+ heartbeat/lvmlockd | 53 ++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 49 insertions(+), 4 deletions(-)
+
+diff --git a/heartbeat/lvmlockd b/heartbeat/lvmlockd
+index 7fe73e364..57f7fdc76 100755
+--- a/heartbeat/lvmlockd
++++ b/heartbeat/lvmlockd
+@@ -59,6 +59,14 @@ For more information, refer to manpage lvmlockd.8.
+ <shortdesc lang="en">This agent manages the lvmlockd daemon</shortdesc>
+ 
+ <parameters>
++<parameter name="with_cmirrord" unique="0" required="0">
++<longdesc lang="en">
++Start with cmirrord (cluster mirror log daemon).
++</longdesc>
++<shortdesc lang="en">activate cmirrord</shortdesc>
++<content type="boolean" default="false" />
++</parameter>
++
+ <parameter name="pidfile" unique="0">
+ <longdesc lang="en">pid file</longdesc>
+ <shortdesc lang="en">pid file</shortdesc>
+@@ -102,6 +110,7 @@ END
+ : ${OCF_RESKEY_pidfile:="/run/lvmlockd.pid"}
+ 
+ LOCKD="lvmlockd"
++CMIRRORD="cmirrord"
+ # 0.5s sleep each count
+ TIMEOUT_COUNT=20
+ 
+@@ -138,6 +147,21 @@ silent_status()
+ 
+ 	if [ -n "$pid" ] ; then
+ 		daemon_is_running "$pid"
++		rc=$?
++		mirror_rc=$rc
++
++		if ocf_is_true $OCF_RESKEY_with_cmirrord; then
++			pid=$(pgrep $CMIRRORD | head -n1)
++			daemon_is_running "$pid"
++			mirror_rc=$?
++		fi
++
++		# If these ever don't match, return error to force recovery
++		if [ $mirror_rc -ne $rc ]; then
++			return $OCF_ERR_GENERIC
++		fi
++
++		return $rc
+ 	else
+ 		# No pid file
+ 		false
+@@ -199,6 +223,16 @@ lvmlockd_start() {
+ 		return $OCF_SUCCESS
+ 	fi
+ 
++	if ocf_is_true $OCF_RESKEY_with_cmirrord; then
++		ocf_log info "starting ${CMIRRORD}..."
++		$CMIRRORD
++		rc=$?
++		if [ $rc -ne $OCF_SUCCESS ] ; then
++			ocf_exit_reason "Failed to start ${CMIRRORD}, exit code: $rc"
++			return $OCF_ERR_GENERIC
++		fi
++	fi
++
+ 	if [ ! -z "$OCF_RESKEY_socket_path" ] ; then
+ 		extras="$extras -s ${OCF_RESKEY_socket_path}"
+ 	fi
+@@ -252,10 +286,11 @@ wait_lockspaces_close()
+ 
+ kill_stop()
+ {
+-	local pid=$1
++	local proc=$1
++	local pid=$2
+ 	local retries=0
+ 
+-	ocf_log info "Killing ${LOCKD} (pid=$pid)"
++	ocf_log info "Killing $proc (pid=$pid)"
+ 	while
+ 		daemon_is_running $pid && [ $retries -lt "$TIMEOUT_COUNT" ]
+ 	do
+@@ -292,9 +327,15 @@ lvmlockd_stop() {
+ 	wait_lockspaces_close
+ 
+ 	pid=$(get_pid)
+-	kill_stop $pid
++	kill_stop $LOCKD $pid
++
++	if ocf_is_true $OCF_RESKEY_with_cmirrord; then
++		pid=$(pgrep $CMIRRORD)
++		kill_stop $CMIRRORD $pid
++	fi
++
+ 	if silent_status ; then
+-		ocf_exit_reason "Failed to stop, ${LOCKD}[$pid] still running."
++		ocf_exit_reason "Failed to stop, ${LOCKD} or ${CMIRRORD} still running."
+ 		return $OCF_ERR_GENERIC
+ 	fi
+ 
+@@ -317,6 +358,10 @@ lvmlockd_validate() {
+ 	check_binary pgrep
+ 	check_binary lvmlockctl
+ 
++	if ocf_is_true $OCF_RESKEY_with_cmirrord; then
++		check_binary $CMIRRORD
++	fi
++
+ 	return $OCF_SUCCESS
+ }
+ 
diff --git a/SOURCES/metadata-add-missing-s-suffix.patch b/SOURCES/metadata-add-missing-s-suffix.patch
new file mode 100644
index 0000000..62ab32b
--- /dev/null
+++ b/SOURCES/metadata-add-missing-s-suffix.patch
@@ -0,0 +1,183 @@
+From 84083d83ff6049bcc99b959c00999496b3027317 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Fri, 21 Sep 2018 11:30:26 +0200
+Subject: [PATCH 1/4] IPv6Addr/nagios/sybaseASE: add missing "s"-suffix in
+ metadata
+
+---
+ heartbeat/IPv6addr.c   | 12 ++++++------
+ heartbeat/nagios       |  2 +-
+ heartbeat/sybaseASE.in |  4 ++--
+ 3 files changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/heartbeat/IPv6addr.c b/heartbeat/IPv6addr.c
+index 68447de2e..d8562559b 100644
+--- a/heartbeat/IPv6addr.c
++++ b/heartbeat/IPv6addr.c
+@@ -863,12 +863,12 @@ meta_data_addr6(void)
+ 	"    </parameter>\n"
+ 	"  </parameters>\n"
+ 	"  <actions>\n"
+-	"    <action name=\"start\"   timeout=\"15\" />\n"
+-	"    <action name=\"stop\"    timeout=\"15\" />\n"
+-	"    <action name=\"status\"  timeout=\"15\" interval=\"15\" />\n"
+-	"    <action name=\"monitor\" timeout=\"15\" interval=\"15\" />\n"
+-	"    <action name=\"validate-all\"  timeout=\"5\" />\n"
+-	"    <action name=\"meta-data\"  timeout=\"5\" />\n"
++	"    <action name=\"start\"   timeout=\"15s\" />\n"
++	"    <action name=\"stop\"    timeout=\"15s\" />\n"
++	"    <action name=\"status\"  timeout=\"15s\" interval=\"15s\" />\n"
++	"    <action name=\"monitor\" timeout=\"15s\" interval=\"15s\" />\n"
++	"    <action name=\"validate-all\"  timeout=\"5s\" />\n"
++	"    <action name=\"meta-data\"  timeout=\"5s\" />\n"
+ 	"  </actions>\n"
+ 	"</resource-agent>\n";
+ 	printf("%s\n",meta_data);
+diff --git a/heartbeat/nagios b/heartbeat/nagios
+index 4cb462f6a..3d07b141c 100755
+--- a/heartbeat/nagios
++++ b/heartbeat/nagios
+@@ -114,7 +114,7 @@ nagios_meta_data() {
+ <action name="start" timeout="20s" />
+ <action name="stop" timeout="20s" />
+ <action name="status" timeout="20s" />
+-<action name="monitor" depth="0" timeout="20s" interval="10s" start-delay="10" />
++<action name="monitor" depth="0" timeout="20s" interval="10s" start-delay="10s" />
+ <action name="validate-all" timeout="20s" />
+ <action name="meta-data" timeout="20s" />
+ </actions>
+diff --git a/heartbeat/sybaseASE.in b/heartbeat/sybaseASE.in
+index b4809ea23..9ddd429be 100755
+--- a/heartbeat/sybaseASE.in
++++ b/heartbeat/sybaseASE.in
+@@ -234,8 +234,8 @@ meta_data()
+ 		<action name="monitor" interval="30s" timeout="100s" />
+ 
+ 		<!--Checks to see if we can read from the mountpoint -->
+-		<action name="status" depth="10" timeout="100" interval="120s" />
+-		<action name="monitor" depth="10" timeout="100" interval="120s" />
++		<action name="status" depth="10" timeout="100s" interval="120s" />
++		<action name="monitor" depth="10" timeout="100s" interval="120s" />
+ 
+ 		<action name="meta-data" timeout="5s" />
+ 		<action name="validate-all" timeout="5s" />
+
+From d4bba27b171cb87698359dd300313ba5a6600cca Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Fri, 21 Sep 2018 11:34:41 +0200
+Subject: [PATCH 2/4] CI: improvements
+
+- added "check"-command to skip build-process
+- added check for "s"-suffix in agents
+- added additional file-types to also check ocf-* and C agents
+---
+ ci/build.sh | 20 +++++++++++++++-----
+ 1 file changed, 15 insertions(+), 5 deletions(-)
+
+diff --git a/ci/build.sh b/ci/build.sh
+index c331e9ab4..22f4472d3 100755
+--- a/ci/build.sh
++++ b/ci/build.sh
+@@ -51,7 +51,7 @@ find_prunes() {
+ }
+ 
+ find_cmd() {
+-	echo "find heartbeat -type f -and \( -perm /111 -or -name '*.sh' \) $(find_prunes)"
++	echo "find heartbeat -type f -and \( -perm /111 -or -name '*.sh' -or -name '*.c' -or -name '*.in' \) $(find_prunes)"
+ }
+ 
+ check_all_executables() {
+@@ -59,6 +59,12 @@ check_all_executables() {
+ 	while read -r script; do
+ 		file --mime "$script" | grep 'charset=binary' >/dev/null 2>&1 && continue
+ 		file --mime "$script" | grep 'text/x-python' >/dev/null 2>&1 && continue
++		file --mime "$script" | grep 'text/x-makefile' >/dev/null 2>&1 && continue
++
++		if grep -qE "\<action.*(timeout|interval|delay)=\\\?\"[0-9]+\\\?\"" "$script"; then
++			fail "$script: \"s\"-suffix missing in timeout, interval or delay"
++		fi
++
+ 		head=$(head -n1 "$script")
+ 		[[ "$head" =~ .*ruby.* ]] && continue
+ 		[[ "$head" =~ .*zsh.* ]] && continue
+@@ -67,6 +73,7 @@ check_all_executables() {
+ 		[[ "$script" =~ ^.*\.orig ]] && continue
+ 		[[ "$script" =~ ^ldirectord.in ]] && continue
+ 		check "$script"
++
+ 	done < <(eval "$(find_cmd)")
+ 	if [ $failed -gt 0 ]; then
+ 		echo "ci/build.sh: $failed failure(s) detected."
+@@ -75,8 +82,11 @@ check_all_executables() {
+ 	exit 0
+ }
+ 
+-./autogen.sh
+-./configure
+-make check
+-[ $? -eq 0 ] || failed=$((failed + 1))
++if [ "$1" != "check" ]; then
++	./autogen.sh
++	./configure
++	make check
++	[ $? -eq 0 ] || failed=$((failed + 1))
++fi
++
+ check_all_executables
+
+From 6c5f5442b4d03448aa585bf62f83876f667a76f8 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Fri, 21 Sep 2018 11:41:40 +0200
+Subject: [PATCH 3/4] ocf-shellfuncs: fixes caught when improving CI
+
+---
+ heartbeat/ocf-shellfuncs.in | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
+index 8e44f09eb..043ab9bf2 100644
+--- a/heartbeat/ocf-shellfuncs.in
++++ b/heartbeat/ocf-shellfuncs.in
+@@ -457,7 +457,7 @@ ocf_pidfile_status() {
+ 	return 2
+     fi
+     pid=`cat $pidfile`
+-    kill -0 $pid 2>&1 > /dev/null
++    kill -0 $pid > /dev/null 2>&1
+     if [ $? = 0 ]; then
+ 	return 0
+     fi
+@@ -761,7 +761,7 @@ maketempfile()
+ {
+ 	if [ $# = 1 -a "$1" = "-d" ]; then
+ 		mktemp -d
+-		return -0
++		return 0
+ 	elif [ $# != 0 ]; then
+ 		return 1
+ 	fi
+
+From d1579996d6f5aec57ece2bc31b106891d0bbb964 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Fri, 21 Sep 2018 11:50:08 +0200
+Subject: [PATCH 4/4] CI: fix upstream CI not detecting MIME-format correctly
+ for Makefiles
+
+---
+ ci/build.sh | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/ci/build.sh b/ci/build.sh
+index 22f4472d3..b900ddc05 100755
+--- a/ci/build.sh
++++ b/ci/build.sh
+@@ -59,7 +59,8 @@ check_all_executables() {
+ 	while read -r script; do
+ 		file --mime "$script" | grep 'charset=binary' >/dev/null 2>&1 && continue
+ 		file --mime "$script" | grep 'text/x-python' >/dev/null 2>&1 && continue
+-		file --mime "$script" | grep 'text/x-makefile' >/dev/null 2>&1 && continue
++		# upstream CI doesnt detect MIME-format correctly for Makefiles
++		[[ "$script" =~ .*/Makefile.in ]] && continue
+ 
+ 		if grep -qE "\<action.*(timeout|interval|delay)=\\\?\"[0-9]+\\\?\"" "$script"; then
+ 			fail "$script: \"s\"-suffix missing in timeout, interval or delay"
diff --git a/SOURCES/nfsserver-mount-rpc_pipefs.patch b/SOURCES/nfsserver-mount-rpc_pipefs.patch
new file mode 100644
index 0000000..4dbafb5
--- /dev/null
+++ b/SOURCES/nfsserver-mount-rpc_pipefs.patch
@@ -0,0 +1,100 @@
+From c6b0104ec21a5f4ddbc3c0151a587fed2a35b773 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Fri, 20 May 2016 15:47:33 +0200
+Subject: [PATCH 1/2] nfsserver: mount based on rpcpipefs_dir variable
+
+---
+ heartbeat/nfsserver | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
+index 3d036a98a..479082169 100755
+--- a/heartbeat/nfsserver
++++ b/heartbeat/nfsserver
+@@ -177,14 +177,8 @@ esac
+ fp="$OCF_RESKEY_nfs_shared_infodir"
+ : ${OCF_RESKEY_nfs_notify_cmd="$DEFAULT_NOTIFY_CMD"}
+ : ${OCF_RESKEY_nfs_notify_foreground="$DEFAULT_NOTIFY_FOREGROUND"}
+-
+-if [ -z ${OCF_RESKEY_rpcpipefs_dir} ]; then
+-	rpcpipefs_make_dir=$fp/rpc_pipefs
+-	rpcpipefs_umount_dir=${DEFAULT_RPCPIPEFS_DIR}
+-else
+-	rpcpipefs_make_dir=${OCF_RESKEY_rpcpipefs_dir}
+-	rpcpipefs_umount_dir=${OCF_RESKEY_rpcpipefs_dir}
+-fi
++: ${OCF_RESKEY_rpcpipefs_dir="$DEFAULT_RPCPIPEFS_DIR"}
++OCF_RESKEY_rpcpipefs_dir=${OCF_RESKEY_rpcpipefs_dir%/}
+ 
+ # Use statd folder if it exists
+ if [ -d "/var/lib/nfs/statd" ]; then
+@@ -409,7 +403,7 @@ prepare_directory ()
+ 	fi
+ 
+ 	[ -d "$fp" ] || mkdir -p $fp
+-	[ -d "$rpcpipefs_make_dir" ] || mkdir -p $rpcpipefs_make_dir
++	[ -d "$OCF_RESKEY_rpcpipefs_dir" ] || mkdir -p $OCF_RESKEY_rpcpipefs_dir
+ 	[ -d "$fp/v4recovery" ] || mkdir -p $fp/v4recovery
+ 
+ 	[ -d "$fp/$STATD_DIR" ] || mkdir -p "$fp/$STATD_DIR"
+@@ -453,9 +447,13 @@ bind_tree ()
+ 
+ unbind_tree ()
+ {
+-	if `mount | grep -q " on $rpcpipefs_umount_dir"`; then
+-		umount -t rpc_pipefs $rpcpipefs_umount_dir
+-	fi
++	local i=1
++	while `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir"` && [ "$i" -le 10 ]; do
++		ocf_log info "Stop: umount ($i/10 attempts)"
++		umount -t rpc_pipefs $OCF_RESKEY_rpcpipefs_dir
++		sleep 1
++		i=$((i + 1))
++	done
+ 	if is_bound /var/lib/nfs; then
+ 		umount /var/lib/nfs
+ 	fi
+@@ -617,6 +615,8 @@ nfsserver_start ()
+ 	prepare_directory
+ 	bind_tree
+ 
++	mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir
++
+ 	# remove the sm-notify pid so sm-notify will be allowed to run again without requiring a reboot.
+ 	rm -f /var/run/sm-notify.pid
+ 	#
+
+From c92e8c84b73dde3254f53665a0ef3603418538dc Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Thu, 27 Sep 2018 16:09:09 +0200
+Subject: [PATCH 2/2] nfsserver: only mount rpc_pipefs if it's not mounted
+
+also added space to avoid matching similarly named mounts
+---
+ heartbeat/nfsserver | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
+index 479082169..5412f391b 100755
+--- a/heartbeat/nfsserver
++++ b/heartbeat/nfsserver
+@@ -448,7 +448,7 @@ bind_tree ()
+ unbind_tree ()
+ {
+ 	local i=1
+-	while `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir"` && [ "$i" -le 10 ]; do
++	while `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "` && [ "$i" -le 10 ]; do
+ 		ocf_log info "Stop: umount ($i/10 attempts)"
+ 		umount -t rpc_pipefs $OCF_RESKEY_rpcpipefs_dir
+ 		sleep 1
+@@ -615,7 +615,9 @@ nfsserver_start ()
+ 	prepare_directory
+ 	bind_tree
+ 
+-	mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir
++	if ! `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "`; then
++		mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir
++	fi
+ 
+ 	# remove the sm-notify pid so sm-notify will be allowed to run again without requiring a reboot.
+ 	rm -f /var/run/sm-notify.pid
diff --git a/SOURCES/nova-compute-wait-NovaEvacuate.patch b/SOURCES/nova-compute-wait-NovaEvacuate.patch
new file mode 100644
index 0000000..12b7ad5
--- /dev/null
+++ b/SOURCES/nova-compute-wait-NovaEvacuate.patch
@@ -0,0 +1,747 @@
+diff -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am
+--- a/doc/man/Makefile.am	2018-06-27 13:22:31.576628598 +0200
++++ b/doc/man/Makefile.am	2018-06-27 13:47:15.902753673 +0200
+@@ -75,6 +75,8 @@
+                           ocf_heartbeat_ManageRAID.7 \
+                           ocf_heartbeat_ManageVE.7 \
+                           ocf_heartbeat_NodeUtilization.7 \
++                          ocf_heartbeat_nova-compute-wait.7 \
++                          ocf_heartbeat_NovaEvacuate.7 \
+                           ocf_heartbeat_Pure-FTPd.7 \
+                           ocf_heartbeat_Raid1.7 \
+                           ocf_heartbeat_Route.7 \
+diff -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am
+--- a/heartbeat/Makefile.am	2018-06-27 13:22:31.574628625 +0200
++++ b/heartbeat/Makefile.am	2018-06-27 13:46:23.621453631 +0200
+@@ -29,6 +29,8 @@
+ 
+ ocfdir		        = $(OCF_RA_DIR_PREFIX)/heartbeat
+ 
++ospdir			= $(OCF_RA_DIR_PREFIX)/openstack
++
+ dtddir			= $(datadir)/$(PACKAGE_NAME)
+ dtd_DATA		= ra-api-1.dtd metadata.rng
+ 
+@@ -50,6 +52,9 @@
+ IPv6addr_LDADD          = -lplumb $(LIBNETLIBS)
+ send_ua_LDADD           = $(LIBNETLIBS)
+ 
++osp_SCRIPTS	     =  nova-compute-wait	\
++			NovaEvacuate
++
+ ocf_SCRIPTS	     =  AoEtarget		\
+ 			AudibleAlarm		\
+ 			ClusterMon		\
+diff -uNr a/heartbeat/nova-compute-wait b/heartbeat/nova-compute-wait
+--- a/heartbeat/nova-compute-wait	1970-01-01 01:00:00.000000000 +0100
++++ b/heartbeat/nova-compute-wait	2018-06-27 13:27:15.166830889 +0200
+@@ -0,0 +1,317 @@
++#!/bin/sh
++# Copyright 2015 Red Hat, Inc.
++#
++# Description:  Manages compute daemons
++#
++# Authors: Andrew Beekhof
++#
++# Support:      openstack@lists.openstack.org
++# License:      Apache Software License (ASL) 2.0
++#
++
++
++#######################################################################
++# Initialization:
++
++###
++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
++###
++
++: ${__OCF_ACTION=$1}
++
++#######################################################################
++
++meta_data() {
++	cat <<END
++<?xml version="1.0"?>
++<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
++<resource-agent name="nova-compute-wait" version="1.0">
++<version>1.0</version>
++
++<longdesc lang="en">
++OpenStack Nova Compute Server.
++</longdesc>
++<shortdesc lang="en">OpenStack Nova Compute Server</shortdesc>
++
++<parameters>
++
++<parameter name="auth_url" unique="0" required="1">
++<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
++<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="username" unique="0" required="1">
++<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
++<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
++</parameter>
++
++<parameter name="password" unique="0" required="1">
++<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
++<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="tenant_name" unique="0" required="1">
++<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
++<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="domain" unique="0" required="0">
++<longdesc lang="en">
++DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN
++</longdesc>
++<shortdesc lang="en">DNS domain</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="endpoint_type" unique="0" required="0">
++<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
++<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="no_shared_storage" unique="0" required="0">
++<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
++<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
++<content type="boolean" default="0" />
++</parameter>
++
++<parameter name="evacuation_delay" unique="0" required="0">
++<longdesc lang="en">
++How long to wait for nova to finish evacuating instances elsewhere
++before starting nova-compute.  Only used when the agent detects
++evacuations might be in progress.
++
++You may need to increase the start timeout when increasing this value.
++</longdesc>
++<shortdesc lang="en">Delay to allow evacuations time to complete</shortdesc>
++<content type="integer" default="120" />
++</parameter>
++
++</parameters>
++
++<actions>
++<action name="start"        timeout="600" />
++<action name="stop"         timeout="300" />
++<action name="monitor"      timeout="20" interval="10" depth="0"/>
++<action name="validate-all" timeout="20" />
++<action name="meta-data"    timeout="5" />
++</actions>
++</resource-agent>
++END
++}
++
++#######################################################################
++
++# don't exit on TERM, to test that lrmd makes sure that we do exit
++trap sigterm_handler TERM
++sigterm_handler() {
++	ocf_log info "They use TERM to bring us down. No such luck."
++	return
++}
++
++nova_usage() {
++	cat <<END
++usage: $0 {start|stop|monitor|validate-all|meta-data}
++
++Expects to have a fully populated OCF RA-compliant environment set.
++END
++}
++
++nova_start() {
++    build_unfence_overlay
++
++    state=$(attrd_updater -p -n evacuate -N ${NOVA_HOST} | sed -e 's/.*value=//' | tr -d '"' )
++    if [ "x$state" = x ]; then
++	: never been fenced
++
++    elif [ "x$state" = xno ]; then
++	: has been evacuated, however it could have been 1s ago
++	ocf_log info "Pausing to give evacuations from ${NOVA_HOST} time to complete"
++	sleep ${OCF_RESKEY_evacuation_delay}
++
++    else
++	while [ "x$state" != "xno" ]; do
++	    ocf_log info "Waiting for pending evacuations from ${NOVA_HOST}"
++	    state=$(attrd_updater -p -n evacuate -N ${NOVA_HOST} | sed -e 's/.*value=//' | tr -d '"' )
++	    sleep 5
++	done
++
++	ocf_log info "Pausing to give evacuations from ${NOVA_HOST} time to complete"
++	sleep ${OCF_RESKEY_evacuation_delay}
++    fi
++
++    touch "$statefile"
++
++    return $OCF_SUCCESS
++}
++
++nova_stop() {
++    rm -f "$statefile"
++    return $OCF_SUCCESS
++}
++
++nova_monitor() {
++    if [ ! -f "$statefile" ]; then
++        return $OCF_NOT_RUNNING
++    fi
++
++    return $OCF_SUCCESS
++}
++
++nova_notify() {
++    return $OCF_SUCCESS
++}
++
++build_unfence_overlay() {
++    fence_options=""
++
++    if [ -z "${OCF_RESKEY_auth_url}" ]; then
++	candidates=$(/usr/sbin/stonith_admin -l ${NOVA_HOST})
++	for candidate in ${candidates}; do
++	    pcs stonith show $d | grep -q fence_compute
++	    if [ $? = 0 ]; then
++		ocf_log info "Unfencing nova based on: $candidate"
++		fence_auth=$(pcs stonith show $candidate | grep Attributes: | sed -e s/Attributes:// -e s/-/_/g -e 's/[^ ]\+=/OCF_RESKEY_\0/g' -e s/passwd/password/g)
++		eval "export $fence_auth"
++		break
++	    fi
++	done
++    fi    
++
++    # Copied from NovaEvacuate 
++    if [ -z "${OCF_RESKEY_auth_url}" ]; then
++        ocf_exit_reason "auth_url not configured"
++        exit $OCF_ERR_CONFIGURED
++    fi
++
++    fence_options="${fence_options} -k ${OCF_RESKEY_auth_url}"
++
++    if [ -z "${OCF_RESKEY_username}" ]; then
++        ocf_exit_reason "username not configured"
++        exit $OCF_ERR_CONFIGURED
++    fi
++
++    fence_options="${fence_options} -l ${OCF_RESKEY_username}"
++
++    if [ -z "${OCF_RESKEY_password}" ]; then
++        ocf_exit_reason "password not configured"
++        exit $OCF_ERR_CONFIGURED
++    fi
++
++    fence_options="${fence_options} -p ${OCF_RESKEY_password}"
++
++    if [ -z "${OCF_RESKEY_tenant_name}" ]; then
++        ocf_exit_reason "tenant_name not configured"
++        exit $OCF_ERR_CONFIGURED
++    fi
++
++    fence_options="${fence_options} -t ${OCF_RESKEY_tenant_name}"
++
++    if [ -n "${OCF_RESKEY_domain}" ]; then
++        fence_options="${fence_options} -d ${OCF_RESKEY_domain}"
++    fi
++
++    if [ -n "${OCF_RESKEY_region_name}" ]; then
++        fence_options="${fence_options} \
++            --region-name ${OCF_RESKEY_region_name}"
++    fi
++
++    if [ -n "${OCF_RESKEY_insecure}" ]; then
++        if ocf_is_true "${OCF_RESKEY_insecure}"; then
++            fence_options="${fence_options} --insecure"
++        fi
++    fi
++
++    if [ -n "${OCF_RESKEY_no_shared_storage}" ]; then
++        if ocf_is_true "${OCF_RESKEY_no_shared_storage}"; then
++            fence_options="${fence_options} --no-shared-storage"
++        fi
++    fi
++
++    if [ -n "${OCF_RESKEY_endpoint_type}" ]; then
++        case ${OCF_RESKEY_endpoint_type} in
++            adminURL|publicURL|internalURL)
++                ;;
++            *)
++                ocf_exit_reason "endpoint_type ${OCF_RESKEY_endpoint_type}" \
++                    "not valid. Use adminURL or publicURL or internalURL"
++                exit $OCF_ERR_CONFIGURED
++                ;;
++        esac
++        fence_options="${fence_options} -e ${OCF_RESKEY_endpoint_type}"
++    fi
++
++    mkdir -p /run/systemd/system/openstack-nova-compute.service.d
++    cat<<EOF>/run/systemd/system/openstack-nova-compute.service.d/unfence-20.conf
++[Service]
++ExecStartPost=/sbin/fence_compute ${fence_options} -o on -n ${NOVA_HOST}
++EOF
++}
++
++nova_validate() {
++    rc=$OCF_SUCCESS
++
++    check_binary crudini
++    check_binary nova-compute
++    check_binary fence_compute
++
++    if [ ! -f /etc/nova/nova.conf ]; then
++	   ocf_exit_reason "/etc/nova/nova.conf not found"
++	   exit $OCF_ERR_CONFIGURED
++    fi
++
++    # Is the state directory writable?
++    state_dir=$(dirname $statefile)
++    touch "$state_dir/$$"
++    if [ $? != 0 ]; then
++        ocf_exit_reason "Invalid state directory: $state_dir"
++        return $OCF_ERR_ARGS
++    fi
++    rm -f "$state_dir/$$"
++
++    NOVA_HOST=$(crudini --get /etc/nova/nova.conf DEFAULT host 2>/dev/null)
++    if [ $? = 1 ]; then
++        short_host=$(uname -n | awk -F. '{print $1}')
++        if [ "x${OCF_RESKEY_domain}" != x ]; then
++            NOVA_HOST=${short_host}.${OCF_RESKEY_domain}
++        else
++            NOVA_HOST=$(uname -n)
++        fi
++    fi
++
++    if [ $rc != $OCF_SUCCESS ]; then
++	exit $rc
++    fi
++    return $rc
++}
++
++statefile="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.active"
++
++: ${OCF_RESKEY_evacuation_delay=120}
++case $__OCF_ACTION in
++meta-data)	meta_data
++		exit $OCF_SUCCESS
++		;;
++usage|help)	nova_usage
++		exit $OCF_SUCCESS
++		;;
++esac
++
++case $__OCF_ACTION in
++start)		nova_validate; nova_start;;
++stop)		nova_stop;;
++monitor)	nova_validate; nova_monitor;;
++notify)		nova_notify;;
++validate-all)	exit $OCF_SUCCESS;;
++*)		nova_usage
++		exit $OCF_ERR_UNIMPLEMENTED
++		;;
++esac
++rc=$?
++ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
++exit $rc
++
+diff -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
+--- a/heartbeat/NovaEvacuate	1970-01-01 01:00:00.000000000 +0100
++++ b/heartbeat/NovaEvacuate	2018-06-27 13:27:18.835781756 +0200
+@@ -0,0 +1,388 @@
++#!/bin/sh
++#
++# Copyright 2015 Red Hat, Inc.
++#
++# Description:  Manages evacuation of nodes running nova-compute
++#
++# Authors: Andrew Beekhof
++#
++# Support:      openstack@lists.openstack.org
++# License:      Apache Software License (ASL) 2.0
++#
++
++
++#######################################################################
++# Initialization:
++
++###
++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
++###
++
++: ${__OCF_ACTION=$1}
++
++#######################################################################
++
++meta_data() {
++	cat <<END
++<?xml version="1.0"?>
++<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
++<resource-agent name="NovaEvacuate" version="1.0">
++<version>1.0</version>
++
++<longdesc lang="en">
++Facility for tacking a list of compute nodes and reliably evacuating the ones that fence_evacuate has flagged.
++</longdesc>
++<shortdesc lang="en">Evacuator for OpenStack Nova Compute Server</shortdesc>
++
++<parameters>
++
++<parameter name="auth_url" unique="0" required="1">
++<longdesc lang="en">
++Authorization URL for connecting to keystone in admin context
++</longdesc>
++<shortdesc lang="en">Authorization URL</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="username" unique="0" required="1">
++<longdesc lang="en">
++Username for connecting to keystone in admin context
++</longdesc>
++<shortdesc lang="en">Username</shortdesc>
++</parameter>
++
++<parameter name="password" unique="0" required="1">
++<longdesc lang="en">
++Password for connecting to keystone in admin context
++</longdesc>
++<shortdesc lang="en">Password</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="tenant_name" unique="0" required="1">
++<longdesc lang="en">
++Tenant name for connecting to keystone in admin context.
++Note that with Keystone V3 tenant names are only unique within a domain.
++</longdesc>
++<shortdesc lang="en">Keystone v2 Tenant or v3 Project Name</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="user_domain" unique="0" required="1">
++<longdesc lang="en">
++User's domain name. Used when authenticating to Keystone.
++</longdesc>
++<shortdesc lang="en">Keystone v3 User Domain</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="project_domain" unique="0" required="1">
++<longdesc lang="en">
++Domain name containing project. Used when authenticating to Keystone.
++</longdesc>
++<shortdesc lang="en">Keystone v3 Project Domain</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="endpoint_type" unique="0" required="0">
++<longdesc lang="en">
++Nova API location (internal, public or admin URL)
++</longdesc>
++<shortdesc lang="en">Nova API location (internal, public or admin URL)</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="region_name" unique="0" required="0">
++<longdesc lang="en">
++Region name for connecting to nova.
++</longdesc>
++<shortdesc lang="en">Region name</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="insecure" unique="0" required="0">
++<longdesc lang="en">
++Explicitly allow client to perform "insecure" TLS (https) requests.
++The server's certificate will not be verified against any certificate authorities.
++This option should be used with caution.
++</longdesc>
++<shortdesc lang="en">Allow insecure TLS requests</shortdesc>
++<content type="boolean" default="0" />
++</parameter>
++
++<parameter name="no_shared_storage" unique="0" required="0">
++<longdesc lang="en">
++Disable shared storage recovery for instances. Use at your own risk!
++</longdesc>
++<shortdesc lang="en">Disable shared storage recovery for instances</shortdesc>
++<content type="boolean" default="0" />
++</parameter>
++
++<parameter name="verbose" unique="0" required="0">
++<longdesc lang="en">
++Enable extra logging from the evacuation process
++</longdesc>
++<shortdesc lang="en">Enable debug logging</shortdesc>
++<content type="boolean" default="0" />
++</parameter>
++
++</parameters>
++
++<actions>
++<action name="start"        timeout="20" />
++<action name="stop"         timeout="20" />
++<action name="monitor"      timeout="600" interval="10" depth="0"/>
++<action name="validate-all" timeout="20" />
++<action name="meta-data"    timeout="5" />
++</actions>
++</resource-agent>
++END
++}
++
++#######################################################################
++
++# don't exit on TERM, to test that lrmd makes sure that we do exit
++trap sigterm_handler TERM
++sigterm_handler() {
++	ocf_log info "They use TERM to bring us down. No such luck."
++	return
++}
++
++evacuate_usage() {
++	cat <<END
++usage: $0 {start|stop|monitor|validate-all|meta-data}
++
++Expects to have a fully populated OCF RA-compliant environment set.
++END
++}
++
++evacuate_stop() {
++    rm -f "$statefile"
++    return $OCF_SUCCESS
++}
++
++evacuate_start() {
++    touch "$statefile"
++    # Do not invole monitor here so that the start timeout can be low
++    return $?
++}
++
++update_evacuation() {
++    attrd_updater -p -n evacuate -Q -N ${1} -v ${2}
++    arc=$?
++    if [ ${arc} != 0 ]; then
++	ocf_log warn "Can not set evacuation state of ${1} to ${2}: ${arc}"
++    fi
++    return ${arc}
++}
++
++handle_evacuations() {
++    while [ $# -gt 0 ]; do
++	node=$1 
++	state=$2
++	shift; shift;
++	need_evacuate=0
++
++	case $state in
++	    "")
++	        ;;
++	    no)
++		ocf_log debug "$node is either fine or already handled"
++		;;
++	    yes) need_evacuate=1
++		;; 
++	    *@*)
++		where=$(echo $state | awk -F@ '{print $1}')
++		when=$(echo $state | awk -F@ '{print $2}')
++		now=$(date +%s)
++
++		if [ $(($now - $when)) -gt 60 ]; then
++		    ocf_log info "Processing partial evacuation of $node by $where at $when"
++		    need_evacuate=1
++		else
++		    # Give some time for any in-flight evacuations to either complete or fail
++		    # Nova won't react well if there are two overlapping requests 
++		    ocf_log info "Deferring processing partial evacuation of $node by $where at $when"
++		fi
++		;;
++	esac
++
++	if [ $need_evacuate = 1 ]; then
++	    fence_agent="fence_compute"
++
++	    if have_binary fence_evacuate
++	    then
++		fence_agent="fence_evacuate"
++	    fi
++
++	    ocf_log notice "Initiating evacuation of $node with $fence_agent"
++	    $fence_agent ${fence_options} -o status -n ${node}
++	    if [ $? = 1 ]; then
++		ocf_log info "Nova does not know about ${node}"
++		# Dont mark as no because perhaps nova is unavailable right now
++		continue
++	    fi
++
++	    update_evacuation ${node} "$(uname -n)@$(date +%s)"
++	    if [ $? != 0 ]; then
++		return $OCF_SUCCESS
++	    fi
++
++	    $fence_agent ${fence_options} -o off -n $node
++	    rc=$?
++
++	    if [ $rc = 0 ]; then
++		update_evacuation ${node} no
++		ocf_log notice "Completed evacuation of $node"
++	    else
++		ocf_log warn "Evacuation of $node failed: $rc"
++		update_evacuation ${node} yes
++	    fi
++	fi
++    done
++
++    return $OCF_SUCCESS
++}
++
++evacuate_monitor() {
++    if [ ! -f "$statefile" ]; then
++	return $OCF_NOT_RUNNING
++    fi
++
++    handle_evacuations $(
++        attrd_updater -n evacuate -A |
++            sed 's/ value=""/ value="no"/' |
++            tr '="' '  ' |
++            awk '{print $4" "$6}'
++    )
++    return $OCF_SUCCESS
++}
++
++evacuate_validate() {
++    rc=$OCF_SUCCESS
++    fence_options=""
++
++    
++    if ! have_binary fence_evacuate; then
++       check_binary fence_compute
++    fi
++
++    # Is the state directory writable? 
++    state_dir=$(dirname $statefile)
++    touch "$state_dir/$$"
++    if [ $? != 0 ]; then
++	ocf_exit_reason "Invalid state directory: $state_dir"
++	return $OCF_ERR_ARGS
++    fi
++    rm -f "$state_dir/$$"
++
++    if [ -z "${OCF_RESKEY_auth_url}" ]; then
++	   ocf_exit_reason "auth_url not configured"
++	   exit $OCF_ERR_CONFIGURED
++    fi
++
++    fence_options="${fence_options} -k ${OCF_RESKEY_auth_url}"
++
++    if [ -z "${OCF_RESKEY_username}" ]; then
++	   ocf_exit_reason "username not configured"
++	   exit $OCF_ERR_CONFIGURED
++    fi
++
++    fence_options="${fence_options} -l ${OCF_RESKEY_username}"
++
++    if [ -z "${OCF_RESKEY_password}" ]; then
++	   ocf_exit_reason "password not configured"
++	   exit $OCF_ERR_CONFIGURED
++    fi
++
++    fence_options="${fence_options} -p ${OCF_RESKEY_password}"
++
++    if [ -z "${OCF_RESKEY_tenant_name}" ]; then
++	   ocf_exit_reason "tenant_name not configured"
++	   exit $OCF_ERR_CONFIGURED
++    fi
++
++    fence_options="${fence_options} -t ${OCF_RESKEY_tenant_name}"
++
++    if [ -n "${OCF_RESKEY_user_domain}" ]; then
++        fence_options="${fence_options} -u ${OCF_RESKEY_user_domain}"
++    fi
++
++    if [ -n "${OCF_RESKEY_project_domain}" ]; then
++        fence_options="${fence_options} -P ${OCF_RESKEY_project_domain}"
++    fi
++
++    if [ -n "${OCF_RESKEY_region_name}" ]; then
++        fence_options="${fence_options} \
++            --region-name ${OCF_RESKEY_region_name}"
++    fi
++
++    if [ -n "${OCF_RESKEY_insecure}" ]; then
++        if ocf_is_true "${OCF_RESKEY_insecure}"; then
++            fence_options="${fence_options} --insecure"
++        fi
++    fi
++
++    if [ -n "${OCF_RESKEY_no_shared_storage}" ]; then
++	if ocf_is_true "${OCF_RESKEY_no_shared_storage}"; then
++	    fence_options="${fence_options} --no-shared-storage"
++	fi
++    fi
++
++    if [ -n "${OCF_RESKEY_verbose}" ]; then
++        if ocf_is_true "${OCF_RESKEY_verbose}"; then
++            fence_options="${fence_options} --verbose"
++        fi
++    fi
++
++    if [ -n "${OCF_RESKEY_endpoint_type}" ]; then
++	case ${OCF_RESKEY_endpoint_type} in
++	    adminURL|publicURL|internalURL) ;;
++	    *)
++		ocf_exit_reason "endpoint_type ${OCF_RESKEY_endpoint_type} not valid. Use adminURL or publicURL or internalURL"
++		exit $OCF_ERR_CONFIGURED
++	    ;;
++	esac
++	fence_options="${fence_options} -e ${OCF_RESKEY_endpoint_type}"
++    fi
++
++    if [ $rc != $OCF_SUCCESS ]; then
++	exit $rc
++    fi
++    return $rc
++}
++
++statefile="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.active"
++
++case $__OCF_ACTION in
++    start)
++	evacuate_validate
++	evacuate_start
++	;;
++    stop)
++	evacuate_stop
++	;;
++    monitor)
++	evacuate_validate
++	evacuate_monitor
++	;;
++    meta-data)
++	meta_data
++	exit $OCF_SUCCESS
++	;;
++    usage|help)
++	evacuate_usage
++	exit $OCF_SUCCESS
++	;;
++    validate-all)
++	exit $OCF_SUCCESS
++	;;
++    *)
++	evacuate_usage
++	exit $OCF_ERR_UNIMPLEMENTED
++	;;
++esac
++rc=$?
++ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
++exit $rc
diff --git a/SOURCES/python3-syntax-fixes.patch b/SOURCES/python3-syntax-fixes.patch
new file mode 100644
index 0000000..a34e312
--- /dev/null
+++ b/SOURCES/python3-syntax-fixes.patch
@@ -0,0 +1,705 @@
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py	2018-10-08 12:36:31.868765636 +0200
+@@ -52,8 +52,8 @@
+                             if not filename == None:
+                                 self.exportInstanceToFile(result,filename)
+                             else:
+-                                print 'Filename is needed'
+-                    except Exception,e:
++                                print('Filename is needed')
++                    except Exception as e:
+                         print(e)
+     def _optimizeResult(self,result):
+         keys = result.keys()
+@@ -81,9 +81,9 @@
+         fp = open(fileName,'w')
+         try :
+             fp.write(json.dumps(result,indent=4))
+-            print "success"
++            print("success")
+         except IOError:
+-            print "Error: can\'t find file or read data"
++            print("Error: can\'t find file or read data")
+         finally:
+             fp.close()
+ 			
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py	2018-10-08 12:36:53.882358851 +0200
+@@ -16,7 +16,7 @@
+         if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
+             filename = keyValues['--filename'][0]
+         else:
+-            print "A profile is needed! please use \'--filename\' and add the profile name."
++            print("A profile is needed! please use \'--filename\' and add the profile name.")
+         return filename
+ 
+     def getInstanceCount(self,keyValues):
+@@ -25,7 +25,7 @@
+             if  keyValues['--instancecount'][0].isdigit() and int(keyValues['--instancecount'][0]) >= 0:
+                 count = keyValues['--instancecount'][0]
+             else:
+-                print "InstanceCount should be a positive number! The default value(1) will be used!"
++                print("InstanceCount should be a positive number! The default value(1) will be used!")
+         return int(count)
+     
+     def getSubOperations(self,cmd,operation):
+@@ -65,8 +65,8 @@
+                                     _newkeyValues["RegionId"] = newkeyValues["RegionId"]
+                                     self._handExtraOperation(cmd,extraOperation,_newkeyValues,version,secureRequest)
+                                 else:
+-                                    print "InstanceId  is need!"
+-                    except Exception,e:
++                                    print("InstanceId  is need!")
++                    except Exception as e:
+                         print(e)
+ 
+     def _handExtraOperation(self,cmd,extraOperation,keyValues,version , secureRequest = False):
+@@ -81,7 +81,7 @@
+                         response.display_response("error", result, "json")
+                     else:
+                         response.display_response(extraOperation, result, "json")
+-                except Exception,e:
++                except Exception as e:
+                     print(e)
+ 
+ 
+@@ -127,7 +127,7 @@
+             '''
+             if data.has_key('InstanceId') and len(data['InstanceId']) > 0:
+                 instanceId = data['InstanceId']
+-        except Exception,e:
++        except Exception as e:
+             pass
+         finally:
+             return instanceId
+@@ -156,5 +156,5 @@
+ if __name__ == "__main__":
+     handler = EcsImportHandler()
+     handler.getKVFromJson('ttt')
+-    print handler.getKVFromJson('ttt')
++    print(handler.getKVFromJson('ttt'))
+ 
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py	2018-10-08 12:37:08.373091088 +0200
+@@ -77,8 +77,8 @@
+                         if not filename == None:
+                             self.exportInstanceToFile(result,filename)
+                         else:
+-                            print 'Filename is needed'
+-                except Exception,e:
++                            print('Filename is needed')
++                except Exception as e:
+                     print(e)
+ 
+     def exportInstanceToFile(self, result, filename):
+@@ -96,9 +96,9 @@
+         fp = open(fileName,'w')
+         try :
+             fp.write(json.dumps(result,indent=4))
+-            print "success"
++            print("success")
+         except IOError:
+-            print "Error: can\'t find file or read data"
++            print("Error: can\'t find file or read data")
+         finally:
+             fp.close()
+ 
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py	2018-10-08 12:36:20.997966509 +0200
+@@ -26,7 +26,7 @@
+                 count = keyValues[import_count][0]
+             else:
+                 pass
+-                # print "InstanceCount should be a positive number! The default value(1) will be used!"
++                # print("InstanceCount should be a positive number! The default value(1) will be used!")
+         return int(count), "InstanceCount is "+str(count)+" created."
+     
+     def getSubOperations(self,cmd,operation):
+@@ -46,7 +46,7 @@
+                     if self.apiHandler.needSetDefaultRegion(cmdInstance, newkeyValues):
+                         newkeyValues["RegionId"] = [self.extensionHandler.getUserRegion()]
+                     newkeyValues["ClientToken"] = [self.random_str()]
+-                    # print newkeyValues.keys()
++                    # print(newkeyValues.keys())
+                     # return
+                     # self._setAttr(cmdInstance, newkeyValues) # set all key values in instance
+                     # self.apiHandler.changeEndPoint(cmdInstance, newkeyValues)
+@@ -58,7 +58,7 @@
+                             response.display_response("error", result, "json")
+                         else:
+                             response.display_response(item, result, "json")
+-                    except Exception,e:
++                    except Exception as e:
+                         print(e)
+    
+     def getKVFromJson(self,filename):
+@@ -77,7 +77,7 @@
+             fp = open(fileName,'r')
+             data=json.loads(fp.read())
+             keys = data.keys()
+-            # print keys, type(data['Items']['DBInstanceAttribute'][0])
++            # print(keys, type(data['Items']['DBInstanceAttribute'][0]))
+             # instanceAttribute = data['Items']['DBInstanceAttribute'][0]
+             items = data['Items']['DBInstanceAttribute'][0]
+             keys = items.keys()
+@@ -130,7 +130,7 @@
+ if __name__ == "__main__":
+     handler = RdsImportDBInstanceHandler()
+     # handler.getKVFromJson('ttt')
+-    # print handler.getKVFromJson('ttt')
+-    print handler.random_str()
++    # print(handler.getKVFromJson('ttt'))
++    print(handler.random_str())
+ 
+ 
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py	2018-10-08 12:11:19.743703469 +0200
+@@ -24,9 +24,9 @@
+                 _value = keyValues[ProfileCmd.name][0] # use the first value
+                 self.extensionCliHandler.setUserProfile(_value)
+             else:
+-                print "Do your forget profile name? please use \'--name\' and add the profile name."
++                print("Do your forget profile name? please use \'--name\' and add the profile name.")
+         else:
+-            print "[", cmd, "] is not right, do you mean "+ProfileCmd.useProfile+" ?"
++            print("[", cmd, "] is not right, do you mean "+ProfileCmd.useProfile+" ?")
+ 
+     def addProfileCmd(self, cmd, keyValues):
+         userKey = ''
+@@ -52,12 +52,12 @@
+                 finally:
+                     f.close()
+         else:
+-            print "[", cmd, "] is not right, do you mean "+ProfileCmd.addProfile+" ?"
++            print("[", cmd, "] is not right, do you mean "+ProfileCmd.addProfile+" ?")
+ 
+ 
+ if __name__ == "__main__":
+     handler = ProfileHandler()
+     handler.handleProfileCmd("useprofile", {'--name':["profile444"]})
+-    print handler.extensionCliHandler.getUserProfile()
++    print(handler.extensionCliHandler.getUserProfile())
+     handler.addProfileCmd("addProfile", {})
+-    handler.addProfileCmd("addProfile", {'--name':["profile2222"]})
+\ No newline at end of file
++    handler.addProfileCmd("addProfile", {'--name':["profile2222"]})
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py	2018-10-08 12:12:25.602486634 +0200
+@@ -24,14 +24,14 @@
+         self.openApiDataHandler = aliyunOpenApiData.aliyunOpenApiDataHandler()
+ 
+     def showUsage(self):
+-        print "usage: aliyuncli <command> <operation> [options and parameters]"
++        print("usage: aliyuncli <command> <operation> [options and parameters]")
+ 
+     def showExample(self):
+-        print "show example"
++        print("show example")
+ 
+     def showCmdError(self, cmd):
+         self.showUsage()
+-        print "<aliyuncli> the valid command as follows:\n"
++        print("<aliyuncli> the valid command as follows:\n")
+         cmds = self.openApiDataHandler.getApiCmds()
+         self.printAsFormat(cmds)
+ 
+@@ -44,7 +44,7 @@
+                 error.printInFormat("Wrong version", "The sdk version is not exit.")
+                 return None
+         self.showUsage()
+-        print "["+cmd+"]","valid operations as follows:\n"
++        print("["+cmd+"]","valid operations as follows:\n")
+         operations = self.openApiDataHandler.getApiOperations(cmd, version)
+         extensions = self.openApiDataHandler.getExtensionOperationsFromCmd(cmd)
+         operations.update(extensions)
+@@ -56,8 +56,8 @@
+         self.printAsFormat(operations)
+ 
+     def showParameterError(self, cmd, operation, parameterlist):
+-        print 'usage: aliyuncli <command> <operation> [options and parameters]'
+-        print '['+cmd+"."+operation+']: current operation can uses parameters as follow :\n'
++        print('usage: aliyuncli <command> <operation> [options and parameters]')
++        print('['+cmd+"."+operation+']: current operation can uses parameters as follow :\n')
+         self.printAsFormat(parameterlist)
+         pass
+ 
+@@ -72,7 +72,7 @@
+             tmpList.append(item)
+             count = count+1
+             if len(tmpList) == 2:
+-                print '{0:40}'.format(tmpList[0]),'\t|',format(tmpList[1],'<10')
++                print('{0:40}'.format(tmpList[0]),'\t|',format(tmpList[1],'<10'))
+                 tmpList = list()
+             if len(tmpList) == 1 and count == len(mlist):
+-                print tmpList[0]
+\ No newline at end of file
++                print(tmpList[0])
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py	2018-10-08 12:12:42.799168903 +0200
+@@ -91,7 +91,7 @@
+                             keyValues["RegionId"] = [self.extensionHandler.getUserRegion()]
+                         #check necessaryArgs as:accesskeyid accesskeysecret regionId
+                         if not self.handler.hasNecessaryArgs(keyValues):
+-                            print 'accesskeyid/accesskeysecret/regionId is absence'
++                            print('accesskeyid/accesskeysecret/regionId is absence')
+                             return
+                         result = self.handler.getResponse(cmd,operation,className,cmdInstance,keyValues,secureRequest)
+                         if result is None:
+@@ -102,7 +102,7 @@
+                         else:
+                             response.display_response(operation, result, outPutFormat,keyValues)
+                     else:
+-                        print 'aliyuncli internal error, please contact: haowei.yao@alibaba-inc.com'
++                        print('aliyuncli internal error, please contact: haowei.yao@alibaba-inc.com')
+             elif self.handler.isAvailableExtensionOperation(cmd, operation):
+                 if self.args.__len__() >= 3 and self.args[2] == 'help':
+                     import commandConfigure
+@@ -125,7 +125,7 @@
+     def showInstanceAttribute(self, cmd, operation, classname):
+         if self.args.__len__() >= 3 and self.args[2] == "help":
+             self.helper.showParameterError(cmd, operation, self.completer._help_to_show_instance_attribute(classname))
+-            #print self.completer._help_to_show_instance_attribute(cmdInstance)
++            #print(self.completer._help_to_show_instance_attribute(cmdInstance))
+             return True
+         return False
+ 
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py	2018-10-08 12:12:54.764947819 +0200
+@@ -141,7 +141,7 @@
+             _key = keyValues[keystr][0]
+         if keyValues.has_key(secretstr) and keyValues[secretstr].__len__() > 0:
+             _secret = keyValues[secretstr][0]
+-        #print "accesskeyid: ", _key , "accesskeysecret: ",_secret
++        #print("accesskeyid: ", _key , "accesskeysecret: ",_secret)
+         return _key, _secret
+ 
+ 
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py	2018-10-08 12:13:23.672413710 +0200
+@@ -161,12 +161,12 @@
+ 
+ if __name__ == "__main__":
+     upgradeHandler = aliyunCliUpgradeHandler()
+-    # print upgradeHandler.getLatestTimeFromServer()
++    # print(upgradeHandler.getLatestTimeFromServer())
+     # flag, url = upgradeHandler.isNewVersionReady()
+     # if flag:
+-    #     print url
++    #     print(url)
+     # else:
+-    #     print "current version is latest one"
+-    # print "final test:"
+-    print upgradeHandler.checkForUpgrade()
+-    print upgradeHandler.handleUserChoice("N")
++    #     print("current version is latest one")
++    # print("final test:")
++    print(upgradeHandler.checkForUpgrade())
++    print(upgradeHandler.handleUserChoice("N"))
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py	2018-10-08 12:14:46.830877248 +0200
+@@ -127,35 +127,35 @@
+ 
+ # this api will show help page when user input aliyuncli help(-h or --help)
+     def showAliyunCliHelp(self):
+-        print color.bold+"ALIYUNCLI()"+color.end
+-        print color.bold+"\nNAME"+color.end
+-        print "\taliyuncli -"
+-        print color.bold+"\nDESCRIPTION"+color.end
+-        print "\tThe Aliyun Command Line Interface is a unified tool to manage your aliyun services. "
+-        print color.bold+"\nSYNOPSIS"+color.end
+-        print "\taliyuncli <command> <operation> [options and parameters]"
+-        print "\n\taliyuncli has supported command completion now. The detail you can check our site."
+-        print color.bold+"OPTIONS"+color.end
+-        print color.bold+"\tconfigure"+color.end
+-        print "\n\tThis option will help you save the key and secret and your favorite output format (text, json or table)"
+-        print color.bold+"\n\t--output"+color.end+" (string)"
+-        print "\n\tThe formatting style for command output."
+-        print "\n\to json"
+-        print "\n\to text"
+-        print "\n\to table"
++        print(color.bold+"ALIYUNCLI()"+color.end)
++        print(color.bold+"\nNAME"+color.end)
++        print("\taliyuncli -")
++        print(color.bold+"\nDESCRIPTION"+color.end)
++        print("\tThe Aliyun Command Line Interface is a unified tool to manage your aliyun services. ")
++        print(color.bold+"\nSYNOPSIS"+color.end)
++        print("\taliyuncli <command> <operation> [options and parameters]")
++        print("\n\taliyuncli has supported command completion now. The detail you can check our site.")
++        print(color.bold+"OPTIONS"+color.end)
++        print(color.bold+"\tconfigure"+color.end)
++        print("\n\tThis option will help you save the key and secret and your favorite output format (text, json or table)")
++        print(color.bold+"\n\t--output"+color.end+" (string)")
++        print("\n\tThe formatting style for command output.")
++        print("\n\to json")
++        print("\n\to text")
++        print("\n\to table")
+         
+-        print color.bold+"\n\t--secure"+color.end
+-        print "\n\tMaking secure requests(HTTPS) to service"
++        print(color.bold+"\n\t--secure"+color.end)
++        print("\n\tMaking secure requests(HTTPS) to service")
+         
+-        print color.bold+"\nAVAILABLE SERVICES"+color.end
+-        print "\n\to ecs"
+-        print "\n\to ess"
+-        print "\n\to mts"
+-        print "\n\to rds"
+-        print "\n\to slb"
++        print(color.bold+"\nAVAILABLE SERVICES"+color.end)
++        print("\n\to ecs")
++        print("\n\to ess")
++        print("\n\to mts")
++        print("\n\to rds")
++        print("\n\to slb")
+ 
+     def showCurrentVersion(self):
+-        print self._version
++        print(self._version)
+ 
+     def findConfigureFilePath(self):
+         homePath = ""
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py	2018-10-08 12:16:00.008525187 +0200
+@@ -39,9 +39,9 @@
+ 
+ 
+ def oss_notice():
+-    print "OSS operation in aliyuncli is not supported."
+-    print "Please use 'ossutil' command line tool for Alibaba Cloud OSS operation."
+-    print "You can find information about 'ossutil' here: https://github.com/aliyun/ossutil.\n"
++    print("OSS operation in aliyuncli is not supported.")
++    print("Please use 'ossutil' command line tool for Alibaba Cloud OSS operation.")
++    print("You can find information about 'ossutil' here: https://github.com/aliyun/ossutil.\n")
+ 
+     
+ try:
+@@ -391,22 +391,22 @@
+             return jsonobj
+         
+         except ImportError as e:
+-            print module, 'is not exist!'
++            print(module, 'is not exist!')
+             sys.exit(1)            
+ 
+         except ServerException as e:
+             error = cliError.error()
+             error.printInFormat(e.get_error_code(), e.get_error_msg())
+-            print "Detail of Server Exception:\n"
+-            print str(e)
++            print("Detail of Server Exception:\n")
++            print(str(e))
+             sys.exit(1)
+         
+         except ClientException as e:            
+-            # print e.get_error_msg()
++            # print(e.get_error_msg())
+             error = cliError.error()
+             error.printInFormat(e.get_error_code(), e.get_error_msg())
+-            print "Detail of Client Exception:\n"
+-            print str(e)
++            print("Detail of Client Exception:\n")
++            print(str(e))
+             sys.exit(1)
+ 
+     def getSetFuncs(self,classname):
+@@ -549,6 +549,6 @@
+ 
+ if __name__ == '__main__':
+     handler = aliyunOpenApiDataHandler()
+-    print "###############",handler.isAvailableExtensionOperation('ecs', 'exportInstance')
+-    print "###############",handler.isAvailableOperation('ecs', 'DescribeInstances')
+-    print "###############",handler.getExtensionOperationsFromCmd('ecs')
++    print("###############",handler.isAvailableExtensionOperation('ecs', 'exportInstance'))
++    print("###############",handler.isAvailableOperation('ecs', 'DescribeInstances'))
++    print("###############",handler.getExtensionOperationsFromCmd('ecs'))
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py	2018-10-08 12:16:14.865250686 +0200
+@@ -44,7 +44,7 @@
+             filename=self.fileName
+             self.writeCmdVersionToFile(cmd,version,filename)
+         else:
+-            print "A argument is needed! please use \'--version\' and add the sdk version."
++            print("A argument is needed! please use \'--version\' and add the sdk version.")
+             return
+     def showVersions(self,cmd,operation,stream=None):
+         configureVersion='(not configure)'
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py	2018-10-08 12:17:34.763774477 +0200
+@@ -55,7 +55,7 @@
+                 # _mlist = self.rds.extensionOptions[self.rds.exportDBInstance]
+                 self.appendList(parameterList, self.rds.extensionOptions[self.rds.exportDBInstance])
+             if operation.lower() == self.rds.importDBInstance.lower():
+-                # print "haha", (self.rds.extensionOptions[self.rds.importDBInstance])
++                # print("haha", (self.rds.extensionOptions[self.rds.importDBInstance]))
+                 # parameterList.append(self.rds.extensionOptions[self.rds.importDBInstance])
+                 self.appendList(parameterList, self.rds.extensionOptions[self.rds.importDBInstance])
+ 
+@@ -89,8 +89,8 @@
+                         importInstance:['count','filename']}
+ 
+ if __name__ == '__main__':
+-    # print type(rds.extensionOperations)
+-    # print type(rds.extensionOptions)
+-    # print rds.extensionOptions['ll']
++    # print(type(rds.extensionOperations))
++    # print(type(rds.extensionOptions))
++    # print(rds.extensionOptions['ll'])
+     configure = commandConfigure()
+-    print configure.showExtensionOperationHelp("ecs", "ExportInstance")
++    print(configure.showExtensionOperationHelp("ecs", "ExportInstance"))
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py	2018-10-08 12:17:59.282322043 +0200
+@@ -577,7 +577,7 @@
+                 operation = operations[i].strip()
+                 self._getKeyFromSection(profilename,operation)
+         else:
+-            print 'The correct usage:aliyuncli configure get key --profile profilename'
++            print('The correct usage:aliyuncli configure get key --profile profilename')
+             return
+ 
+     def _getKeyFromSection(self,profilename,key):
+@@ -591,7 +591,7 @@
+         elif key in _WRITE_TO_CONFIG_FILE :
+             self._getKeyFromFile(config_filename,sectionName,key)
+         else:
+-            print key,'=','None'
++            print(key,'=','None')
+     def _getKeyFromFile(self,filename,section,key):
+         if  os.path.isfile(filename):
+             with open(filename, 'r') as f:
+@@ -600,9 +600,9 @@
+                 start =  self._configWriter.hasSectionName(section,contents)[1]
+                 end = self._configWriter._getSectionEnd(start,contents)
+                 value = self._configWriter._getValueInSlice(start,end,key,contents)
+-                print key,'=',value
++                print(key,'=',value)
+         else:
+-            print key,'=None'
++            print(key,'=None')
+ 
+ 
+ 
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py b/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py	2018-10-08 12:18:25.178844179 +0200
+@@ -2,7 +2,7 @@
+ 
+ def handleEndPoint(cmd,operation,keyValues):
+     if not hasNecessaryArgs(keyValues):
+-        print 'RegionId/EndPoint is absence'
++        print('RegionId/EndPoint is absence')
+         return
+     if cmd is not None:
+         cmd = cmd.capitalize()
+@@ -25,7 +25,7 @@
+         from aliyunsdkcore.profile.region_provider import modify_point
+         modify_point(cmd,regionId,endPoint)
+     except Exception as e:
+-        print e
++        print(e)
+         pass
+ 
+ 
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py	2018-10-08 12:18:45.458469966 +0200
+@@ -111,14 +111,14 @@
+     if os.path.isfile(cfgfile):
+         ans = raw_input('File existed. Do you wish to overwrite it?(y/n)')
+         if ans.lower() != 'y':
+-            print 'Answer is No. Quit now'
++            print('Answer is No. Quit now')
+             return
+     with open(cfgfile, 'w+') as f:
+         config.write(f)
+-    print 'Your configuration is saved to %s.' % cfgfile
++    print('Your configuration is saved to %s.' % cfgfile)
+ 
+ def cmd_help(args):
+-    print HELP
++    print(HELP)
+ 
+ def add_config(parser):
+     parser.add_argument('--host', type=str, help='service host')
+@@ -161,7 +161,7 @@
+     return CMD_LIST.keys()
+ def handleOas(pars=None):
+     if  pars is None:
+-        print HELP
++        print(HELP)
+         sys.exit(0)
+     parser = ArgumentParser(prog="aliyuncli oas",formatter_class=ArgumentDefaultsHelpFormatter)
+ 
+diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py
+--- a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py	2018-01-24 04:08:33.000000000 +0100
++++ b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py	2018-10-08 12:18:59.713206928 +0200
+@@ -61,7 +61,7 @@
+             data = f.read()
+             return data
+     except (OSError, IOError) as e:
+-        print e
++        print(e)
+ def _getParamFromUrl(prefix,value,mode):
+ 
+     req = urllib2.Request(value)
+@@ -74,7 +74,7 @@
+             errorMsg='Get the wrong content'
+             errorClass.printInFormat(response.getcode(), errorMsg)
+     except Exception as e:
+-        print e
++        print(e)
+ 
+ PrefixMap = {'file://': _getParamFromFile,
+              'fileb://': _getParamFromFile
+@@ -86,4 +86,4 @@
+              'fileb://': {'mode': 'rb'},
+              #'http://': {},
+              #'https://': {}
+-            }
+\ No newline at end of file
++            }
+diff -uNr a/bundled/aliyun/colorama/demos/demo07.py b/bundled/aliyun/colorama/demos/demo07.py
+--- a/bundled/aliyun/colorama/demos/demo07.py	2015-01-06 11:41:47.000000000 +0100
++++ b/bundled/aliyun/colorama/demos/demo07.py	2018-10-08 12:20:25.598622106 +0200
+@@ -16,10 +16,10 @@
+     3a4
+     """
+     colorama.init()
+-    print "aaa"
+-    print "aaa"
+-    print "aaa"
+-    print forward() + up(2) + "b" + up() + back(2) + "1" + forward() + "2" + back(3) + down(2) + "3" + forward() + "4"
++    print("aaa")
++    print("aaa")
++    print("aaa")
++    print(forward() + up(2) + "b" + up() + back(2) + "1" + forward() + "2" + back(3) + down(2) + "3" + forward() + "4")
+ 
+ 
+ if __name__ == '__main__':
+diff -uNr a/bundled/aliyun/pycryptodome/Doc/conf.py b/bundled/aliyun/pycryptodome/Doc/conf.py
+--- a/bundled/aliyun/pycryptodome/Doc/conf.py	2018-07-10 21:32:46.000000000 +0200
++++ b/bundled/aliyun/pycryptodome/Doc/conf.py	2018-10-08 12:08:11.122188094 +0200
+@@ -15,7 +15,7 @@
+ 
+ # Modules to document with autodoc are in another directory
+ sys.path.insert(0, os.path.abspath('../lib'))
+-print sys.path
++print(sys.path)
+ 
+ # Mock existance of native modules
+ from Crypto.Util import _raw_api
+diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/Math/Primality.py b/bundled/aliyun/pycryptodome/lib/Crypto/Math/Primality.py
+--- a/bundled/aliyun/pycryptodome/lib/Crypto/Math/Primality.py	2018-07-10 21:32:46.000000000 +0200
++++ b/bundled/aliyun/pycryptodome/lib/Crypto/Math/Primality.py	2018-10-08 12:08:11.123188075 +0200
+@@ -302,7 +302,7 @@
+     randfunc = kwargs.pop("randfunc", None)
+     prime_filter = kwargs.pop("prime_filter", lambda x: True)
+     if kwargs:
+-        print "Unknown parameters:", kwargs.keys()
++        print("Unknown parameters:", kwargs.keys())
+ 
+     if exact_bits is None:
+         raise ValueError("Missing exact_bits parameter")
+@@ -341,7 +341,7 @@
+     exact_bits = kwargs.pop("exact_bits", None)
+     randfunc = kwargs.pop("randfunc", None)
+     if kwargs:
+-        print "Unknown parameters:", kwargs.keys()
++        print("Unknown parameters:", kwargs.keys())
+ 
+     if randfunc is None:
+         randfunc = Random.new().read
+diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/PublicKey/ECC.py b/bundled/aliyun/pycryptodome/lib/Crypto/PublicKey/ECC.py
+--- a/bundled/aliyun/pycryptodome/lib/Crypto/PublicKey/ECC.py	2018-07-10 21:32:46.000000000 +0200
++++ b/bundled/aliyun/pycryptodome/lib/Crypto/PublicKey/ECC.py	2018-10-08 12:08:11.124188057 +0200
+@@ -912,4 +912,4 @@
+     count = 30
+     for x in xrange(count):
+         _ = point * d
+-    print (time.time() - start) / count * 1000, "ms"
++    print((time.time() - start) / count * 1000, "ms")
+diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_AES.py b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_AES.py
+--- a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_AES.py	2018-07-10 21:32:46.000000000 +0200
++++ b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_AES.py	2018-10-08 12:08:11.124188057 +0200
+@@ -1276,7 +1276,7 @@
+         tests += make_block_tests(AES, "AESNI", test_data, {'use_aesni': True})
+         tests += [ TestMultipleBlocks(True) ]
+     else:
+-        print "Skipping AESNI tests"
++        print("Skipping AESNI tests")
+     return tests
+ 
+ if __name__ == '__main__':
+diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_GCM.py b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_GCM.py
+--- a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_GCM.py	2018-07-10 21:32:46.000000000 +0200
++++ b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_GCM.py	2018-10-08 12:08:11.125188038 +0200
+@@ -894,7 +894,7 @@
+         if config.get('slow_tests'):
+             tests += list_test_cases(NISTTestVectorsGCM_no_clmul)
+     else:
+-        print "Skipping test of PCLMULDQD in AES GCM"
++        print("Skipping test of PCLMULDQD in AES GCM")
+ 
+     return tests
+ 
+diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_pkcs1_15.py b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_pkcs1_15.py
+--- a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_pkcs1_15.py	2018-07-10 21:32:46.000000000 +0200
++++ b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_pkcs1_15.py	2018-10-08 12:08:11.125188038 +0200
+@@ -39,7 +39,7 @@
+     """Convert a text string with bytes in hex form to a byte string"""
+     clean = b(rws(t))
+     if len(clean)%2 == 1:
+-        print clean
++        print(clean)
+         raise ValueError("Even number of characters expected")
+     return a2b_hex(clean)
+ 
+diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/__main__.py b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/__main__.py
+--- a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/__main__.py	2018-07-10 21:32:46.000000000 +0200
++++ b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/__main__.py	2018-10-08 12:08:11.126188020 +0200
+@@ -25,11 +25,11 @@
+ 
+ slow_tests = not "--skip-slow-tests" in sys.argv
+ if not slow_tests:
+-    print "Skipping slow tests"
++    print("Skipping slow tests")
+ 
+ wycheproof_warnings = "--wycheproof-warnings" in sys.argv
+ if wycheproof_warnings:
+-    print "Printing Wycheproof warnings"
++    print("Printing Wycheproof warnings")
+ 
+ config = {'slow_tests' : slow_tests, 'wycheproof_warnings' : wycheproof_warnings }
+ SelfTest.run(stream=sys.stdout, verbosity=1, config=config)
+diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/Util/RFC1751.py b/bundled/aliyun/pycryptodome/lib/Crypto/Util/RFC1751.py
+--- a/bundled/aliyun/pycryptodome/lib/Crypto/Util/RFC1751.py	2018-07-10 21:32:46.000000000 +0200
++++ b/bundled/aliyun/pycryptodome/lib/Crypto/Util/RFC1751.py	2018-10-08 12:08:11.126188020 +0200
+@@ -369,13 +369,13 @@
+            ]
+ 
+     for key, words in data:
+-        print 'Trying key', key
++        print('Trying key', key)
+         key=binascii.a2b_hex(key)
+         w2=key_to_english(key)
+         if w2!=words:
+-            print 'key_to_english fails on key', repr(key), ', producing', str(w2)
++            print('key_to_english fails on key', repr(key), ', producing', str(w2))
+         k2=english_to_key(words)
+         if k2!=key:
+-            print 'english_to_key fails on key', repr(key), ', producing', repr(k2)
++            print('english_to_key fails on key', repr(key), ', producing', repr(k2))
diff --git a/SOURCES/timeout-interval-add-s-suffix.patch b/SOURCES/timeout-interval-add-s-suffix.patch
new file mode 100644
index 0000000..74f584d
--- /dev/null
+++ b/SOURCES/timeout-interval-add-s-suffix.patch
@@ -0,0 +1,161 @@
+From 1c23bbf9700eda44d0d64f34bcb538d7b9e4f6f6 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Tue, 4 Sep 2018 09:19:59 +0200
+Subject: [PATCH] timeout/interval: add "s" suffix where it's missing
+
+---
+ .gitignore                    |  1 +
+ heartbeat/SAPInstance         |  2 +-
+ heartbeat/aliyun-vpc-move-ip  | 10 +++++-----
+ heartbeat/gcp-vpc-move-vip.in | 10 +++++-----
+ heartbeat/mariadb.in          | 22 +++++++++++-----------
+ heartbeat/sybaseASE.in        | 32 ++++++++++++++++----------------
+ 6 files changed, 39 insertions(+), 38 deletions(-)
+
+diff --git a/.gitignore b/.gitignore
+index bbff032c3..3a9be36e5 100644
+--- a/.gitignore
++++ b/.gitignore
+@@ -44,6 +44,7 @@ heartbeat/ocf-directories
+ heartbeat/ocf-shellfuncs
+ heartbeat/send_ua
+ heartbeat/shellfuncs
++heartbeat/*.pyc
+ include/agent_config.h
+ include/config.h
+ include/config.h.in
+diff --git a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip
+index e27952adb..ed446c9c1 100755
+--- a/heartbeat/aliyun-vpc-move-ip
++++ b/heartbeat/aliyun-vpc-move-ip
+@@ -155,11 +155,11 @@ Valid Aliyun CLI profile name
+ </parameter>
+ </parameters>
+ <actions>
+-<action name="start" timeout="180" />
+-<action name="stop" timeout="180" />
+-<action name="monitor" depth="0" timeout="30" interval="30" />
+-<action name="validate-all" timeout="5" />
+-<action name="meta-data" timeout="5" />
++<action name="start" timeout="180s" />
++<action name="stop" timeout="180s" />
++<action name="monitor" depth="0" timeout="30s" interval="30s" />
++<action name="validate-all" timeout="5s" />
++<action name="meta-data" timeout="5s" />
+ </actions>
+ </resource-agent>
+ END
+diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in
+index ba61193b6..31d84643a 100755
+--- a/heartbeat/gcp-vpc-move-vip.in
++++ b/heartbeat/gcp-vpc-move-vip.in
+@@ -77,11 +77,11 @@ METADATA = \
+     </parameter>
+   </parameters>
+   <actions>
+-    <action name="start" timeout="300" />
+-    <action name="stop" timeout="15" />
+-    <action name="monitor" timeout="15" interval="60" depth="0" />
+-    <action name="meta-data" timeout="15" />
+-    <action name="validate-all" timeout="15" />
++    <action name="start" timeout="300s" />
++    <action name="stop" timeout="15s" />
++    <action name="monitor" timeout="15s" interval="60s" depth="0" />
++    <action name="meta-data" timeout="15s" />
++    <action name="validate-all" timeout="15s" />
+   </actions>
+ </resource-agent>'''
+ 
+diff --git a/heartbeat/mariadb.in b/heartbeat/mariadb.in
+index 860fea7fd..c1969d70e 100644
+--- a/heartbeat/mariadb.in
++++ b/heartbeat/mariadb.in
+@@ -250,17 +250,17 @@ The port on which the Master MariaDB instance is listening.
+ </parameters>
+ 
+ <actions>
+-<action name="start" timeout="120" />
+-<action name="stop" timeout="120" />
+-<action name="status" timeout="60" />
+-<action name="monitor" depth="0" timeout="30" interval="20" />
+-<action name="monitor" role="Master" depth="0" timeout="30" interval="10" />
+-<action name="monitor" role="Slave" depth="0" timeout="30" interval="30" />
+-<action name="promote" timeout="120" />
+-<action name="demote" timeout="120" />
+-<action name="notify" timeout="90" />
+-<action name="validate-all" timeout="5" />
+-<action name="meta-data" timeout="5" />
++<action name="start" timeout="120s" />
++<action name="stop" timeout="120s" />
++<action name="status" timeout="60s" />
++<action name="monitor" depth="0" timeout="30s" interval="20s" />
++<action name="monitor" role="Master" depth="0" timeout="30s" interval="10s" />
++<action name="monitor" role="Slave" depth="0" timeout="30s" interval="30s" />
++<action name="promote" timeout="120s" />
++<action name="demote" timeout="120s" />
++<action name="notify" timeout="90s" />
++<action name="validate-all" timeout="5s" />
++<action name="meta-data" timeout="5s" />
+ </actions>
+ </resource-agent>
+ END
+diff --git a/heartbeat/sybaseASE.in b/heartbeat/sybaseASE.in
+index a4a0b7a0c..b4809ea23 100755
+--- a/heartbeat/sybaseASE.in
++++ b/heartbeat/sybaseASE.in
+@@ -26,19 +26,19 @@
+ #         /$sybase_home/$sybase_ase/install/RUN_$server_name
+ #
+ # (2) You can customize the interval value in the meta-data section if needed:
+-#                <action name="start" timeout="300" />
+-#                <action name="stop" timeout="300" />
++#                <action name="start" timeout="300s" />
++#                <action name="stop" timeout="300s" />
+ #
+ #                <!-- Checks to see if it''s mounted in the right place -->
+-#                <action name="status"  interval="30" timeout="100" />
+-#                <action name="monitor" interval="30" timeout="100" />
++#                <action name="status"  interval="30s" timeout="100s" />
++#                <action name="monitor" interval="30s" timeout="100s" />
+ #
+ #                <!--Checks to see if we can read from the mountpoint -->
+-#                <action name="status" depth="10" timeout="100" interval="120" />
+-#                <action name="monitor" depth="10" timeout="100" interval="120" />
++#                <action name="status" depth="10" timeout="100s" interval="120s" />
++#                <action name="monitor" depth="10" timeout="100s" interval="120s" />
+ #
+-#                <action name="meta-data" timeout="5" />
+-#                <action name="validate-all" timeout="5" />
++#                <action name="meta-data" timeout="5s" />
++#                <action name="validate-all" timeout="5s" />
+ #     The timeout value is not supported by Redhat in RHCS5.0.
+ #
+ 
+@@ -226,19 +226,19 @@ meta_data()
+ 	</parameters>
+ 
+ 	<actions>
+-		<action name="start" timeout="300" />
+-		<action name="stop" timeout="300" />
++		<action name="start" timeout="300s" />
++		<action name="stop" timeout="300s" />
+ 
+ 		<!-- Checks to see if it''s mounted in the right place -->
+-		<action name="status"  interval="30" timeout="100" />
+-		<action name="monitor" interval="30" timeout="100" />
++		<action name="status"  interval="30s" timeout="100s" />
++		<action name="monitor" interval="30s" timeout="100s" />
+ 
+ 		<!--Checks to see if we can read from the mountpoint -->
+-		<action name="status" depth="10" timeout="100" interval="120" />
+-		<action name="monitor" depth="10" timeout="100" interval="120" />
++		<action name="status" depth="10" timeout="100" interval="120s" />
++		<action name="monitor" depth="10" timeout="100" interval="120s" />
+ 
+-		<action name="meta-data" timeout="5" />
+-		<action name="validate-all" timeout="5" />
++		<action name="meta-data" timeout="5s" />
++		<action name="validate-all" timeout="5s" />
+ 	</actions>
+ </resource-agent>
+ EOT
diff --git a/SPECS/resource-agents.spec b/SPECS/resource-agents.spec
new file mode 100644
index 0000000..8b4b42e
--- /dev/null
+++ b/SPECS/resource-agents.spec
@@ -0,0 +1,1453 @@
+#
+# All modifications and additions to the file contributed by third parties
+# remain the property of their copyright owners, unless otherwise agreed
+# upon. The license for this file, and modifications and additions to the
+# file, is the same license as for the pristine package itself (unless the
+# license for the pristine package is not an Open Source License, in which
+# case the license is the MIT License). An "Open Source License" is a
+# license that conforms to the Open Source Definition (Version 1.9)
+# published by the Open Source Initiative.
+#
+
+# Below is the script used to generate a new source file
+# from the resource-agent upstream git repo.
+#
+# TAG=$(git log --pretty="format:%h" -n 1)
+# distdir="ClusterLabs-resource-agents-${TAG}"
+# TARFILE="${distdir}.tar.gz"
+# rm -rf $TARFILE $distdir
+# git archive --prefix=$distdir/ HEAD | gzip > $TARFILE
+#
+
+%global upstream_prefix ClusterLabs-resource-agents
+%global upstream_version e711383f
+
+# bundles
+%global bundled_lib_dir		bundled
+## google cloud
+# google-cloud-sdk bundle
+%global googlecloudsdk		google-cloud-sdk
+%global googlecloudsdk_version	241.0.0
+%global googlecloudsdk_dir	%{bundled_lib_dir}/gcp/%{googlecloudsdk}
+# python-pyroute2 bundle
+%global pyroute2		pyroute2
+%global pyroute2_version	0.4.13
+%global pyroute2_dir		%{bundled_lib_dir}/gcp/%{pyroute2}
+## alibaba cloud
+# python-colorama bundle
+%global colorama		colorama
+%global colorama_version	0.3.3
+%global colorama_dir		%{bundled_lib_dir}/aliyun/%{colorama}
+# python-pycryptodome bundle
+%global pycryptodome		pycryptodome
+%global pycryptodome_version	3.6.4
+%global pycryptodome_dir	%{bundled_lib_dir}/aliyun/%{pycryptodome}
+# python-aliyun-sdk-core bundle
+%global aliyunsdkcore		aliyun-python-sdk-core
+%global aliyunsdkcore_version	2.13.1
+%global aliyunsdkcore_dir	%{bundled_lib_dir}/aliyun/%{aliyunsdkcore}
+# python-aliyun-sdk-ecs bundle
+%global aliyunsdkecs		aliyun-python-sdk-ecs
+%global aliyunsdkecs_version	4.9.3
+%global aliyunsdkecs_dir	%{bundled_lib_dir}/aliyun/%{aliyunsdkecs}
+# python-aliyun-sdk-vpc bundle
+%global aliyunsdkvpc		aliyun-python-sdk-vpc
+%global aliyunsdkvpc_version	3.0.2
+%global aliyunsdkvpc_dir	%{bundled_lib_dir}/aliyun/%{aliyunsdkvpc}
+# aliyuncli bundle
+%global aliyuncli		aliyun-cli
+%global aliyuncli_version	2.1.10
+%global aliyuncli_dir		%{bundled_lib_dir}/aliyun/%{aliyuncli}
+
+# determine the ras-set to process based on configure invokation
+%bcond_with rgmanager
+%bcond_without linuxha
+
+Name:		resource-agents
+Summary:	Open Source HA Reusable Cluster Resource Scripts
+Version:	4.1.1
+Release:	27%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist}
+License:	GPLv2+ and LGPLv2+
+URL:		https://github.com/ClusterLabs/resource-agents
+%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel}
+Group:		System Environment/Base
+%else
+Group:		Productivity/Clustering/HA
+%endif
+Source0:	%{upstream_prefix}-%{upstream_version}.tar.gz
+Source1:	%{googlecloudsdk}-%{googlecloudsdk_version}-linux-x86_64.tar.gz
+Source2:	%{pyroute2}-%{pyroute2_version}.tar.gz
+Source3:	%{colorama}-%{colorama_version}.tar.gz
+Source4:	%{pycryptodome}-%{pycryptodome_version}.tar.gz
+Source5:	%{aliyunsdkcore}-%{aliyunsdkcore_version}.tar.gz
+Source6:	%{aliyunsdkecs}-%{aliyunsdkecs_version}.tar.gz
+Source7:	%{aliyunsdkvpc}-%{aliyunsdkvpc_version}.tar.gz
+Source8:	%{aliyuncli}-%{aliyuncli_version}.tar.gz
+Patch0:		nova-compute-wait-NovaEvacuate.patch
+Patch1:		LVM-volume_group_check_only.patch
+Patch2:		bz1552330-vdo-vol.patch
+Patch3:		IPaddr2-monitor_retries.patch
+Patch4:		VirtualDomain-stateless-support.patch
+Patch5:		1-configure-add-python-path-detection.patch
+Patch6:		2-ci-skip-python-agents-in-shellcheck.patch
+Patch7:		3-gcp-vpc-move-vip.patch
+Patch8:		4-gcp-vpc-move-route.patch
+Patch9:		5-python-library.patch
+Patch10:	dont-use-ocf_attribute_target-for-metadata.patch
+Patch11:	LVM-activate-fix-issue-with-dashes.patch
+Patch12:	6-gcp-move-vip-filter-aggregatedlist.patch
+Patch13:	aliyun-vpc-move-ip-1.patch
+Patch14:	aliyun-vpc-move-ip-2-fixes.patch
+Patch15:	aliyun-vpc-move-ip-3-fix-manpage.patch
+Patch16:	build-add-missing-manpages.patch
+Patch17:	findif-only-match-lines-with-netmasks.patch
+Patch18:	7-gcp-stackdriver-logging-note.patch
+Patch19:	LVM-fix-missing-dash.patch
+Patch20:	lvmlockd-add-cmirrord-support.patch
+Patch21:	LVM-activate-1-warn-vg_access_mode.patch
+Patch22:	bz1607607-podman.patch
+Patch23:	aliyun-vpc-move-ip-5-improve-metadata-manpage.patch
+Patch24:	aws-vpc-move-ip-1-avoid-false-positive-monitor.patch
+Patch25:	aws-vpc-move-ip-2-avoid-false-positive-monitor.patch
+Patch26:	LVM-activate-2-parameters-access-mode-fixes.patch
+Patch27:	timeout-interval-add-s-suffix.patch
+Patch28:	metadata-add-missing-s-suffix.patch
+Patch29:	bz1631291-systemd-tmpfiles-configurable-path.patch
+Patch30:	nfsserver-mount-rpc_pipefs.patch
+Patch31:	bz1635785-redis-pidof-basename.patch
+Patch32:	bz1642027-nfsserver-var-lib-nfs-fix.patch
+Patch33:	bz1662466-vdo-vol-fix-monitor-action.patch
+Patch34:	bz1643307-LVM-activate-dont-fail-initial-probe.patch
+Patch35:	bz1658664-LVM-activate-dont-require-locking_type.patch
+Patch36:	bz1689184-Squid-1-fix-pidfile-issue.patch
+Patch37:	bz1667414-1-LVM-activate-support-LVs-from-same-VG.patch
+Patch38:	bz1667414-2-LVM-activate-only-count-volumes.patch
+Patch39:	bz1666691-tomcat-use-systemd-when-catalina.sh-unavailable.patch
+Patch40:	bz1693662-aws-vpc-move-ip-avoid-possible-race-condition.patch
+Patch41:	bz1695656-gcp-vpc-move-route-vip-fix-python3-encoding.patch
+Patch42:	bz1697559-aws-vpc-move-ip-1-multi-route-table-support.patch
+Patch43:	bz1697559-aws-vpc-move-ip-2-fix-route-update-multi-NICs.patch
+Patch44:	bz1669140-Route-make-family-parameter-optional.patch
+Patch45:	bz1683548-redis-mute-password-warning.patch
+Patch46:	bz1692413-iSCSILogicalUnit-create-iqn-when-it-doesnt-exist.patch
+Patch47:	bz1689184-Squid-2-dont-run-pgrep-without-PID.patch
+Patch48:	bz1707969-1-ocf_log-do-not-log-debug-when-HA_debug-unset.patch
+Patch49:	bz1707969-2-ocf_is_true-add-True-to-regexp.patch
+Patch50:	bz1717759-Filesystem-remove-notify-action-from-metadata.patch
+Patch51:	bz1719684-dhcpd-keep-SELinux-context-chroot.patch
+# bundle patches
+Patch1000:	7-gcp-bundled.patch
+Patch1001:	8-google-cloud-sdk-fixes.patch
+Patch1002:	9-google-cloud-sdk-oauth2client-python-rsa-to-cryptography.patch
+Patch1003:	10-gcloud-support-info.patch
+Patch1004:	bz1691456-gcloud-dont-detect-python2.patch
+Patch1005:	aliyun-vpc-move-ip-4-bundled.patch
+Patch1006:	python3-syntax-fixes.patch
+Patch1007:	aliyuncli-python3-fixes.patch
+
+Obsoletes:	heartbeat-resources <= %{version}
+Provides:	heartbeat-resources = %{version}
+
+# Build dependencies
+BuildRequires: automake autoconf gcc
+BuildRequires: perl-interpreter python3-devel
+BuildRequires: libxslt glib2-devel
+BuildRequires: systemd
+BuildRequires: which
+
+%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel}
+#BuildRequires: cluster-glue-libs-devel
+BuildRequires: docbook-style-xsl docbook-dtds
+%if 0%{?rhel} == 0
+BuildRequires: libnet-devel
+%endif
+%endif
+
+## Runtime deps
+# system tools shared by several agents
+Requires: /bin/bash /usr/bin/grep /bin/sed /bin/gawk
+Requires: /bin/ps /usr/bin/pkill /usr/bin/hostname /usr/bin/netstat
+Requires: /usr/sbin/fuser /bin/mount
+
+# Filesystem / fs.sh / netfs.sh
+Requires: /sbin/fsck
+Requires: /usr/sbin/fsck.ext2 /usr/sbin/fsck.ext3 /usr/sbin/fsck.ext4
+Requires: /usr/sbin/fsck.xfs
+Requires: /sbin/mount.nfs /sbin/mount.nfs4 /usr/sbin/mount.cifs
+
+# IPaddr2
+Requires: /sbin/ip
+
+# LVM / lvm.sh
+Requires: /usr/sbin/lvm
+
+# nfsserver / netfs.sh
+Requires: /usr/sbin/rpc.nfsd /sbin/rpc.statd /usr/sbin/rpc.mountd
+
+# ocf.py
+Requires: python3
+
+# rgmanager
+%if %{with rgmanager}
+# ip.sh
+Requires: /usr/sbin/ethtool
+Requires: /sbin/rdisc /usr/sbin/arping /bin/ping /bin/ping6
+
+# nfsexport.sh
+Requires: /sbin/findfs
+Requires: /sbin/quotaon /sbin/quotacheck
+%endif
+
+%description
+A set of scripts to interface with several services to operate in a
+High Availability environment for both Pacemaker and rgmanager
+service managers.
+
+%ifarch x86_64
+%package aliyun
+License:	GPLv2+ and LGPLv2+ and ASL 2.0 and BSD and MIT
+Summary:	Alibaba Cloud (Aliyun) resource agents
+%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel}
+Group:		System Environment/Base
+%else
+Group:		Productivity/Clustering/HA
+%endif
+Requires:	%{name} = %{version}-%{release}
+Requires:	python3-jmespath >= 0.9.0
+Requires:	python3-urllib3
+# python-colorama bundle
+Provides:	bundled(python-%{colorama}) = %{colorama_version}
+# python-pycryptodome bundle
+Provides:	bundled(python-%{pycryptodome}) = %{pycryptodome_version}
+# python-aliyun-sdk-core bundle
+Provides:	bundled(python-aliyun-sdk-core) = %{aliyunsdkcore_version}
+# python-aliyun-sdk-ecs bundle
+Provides:	bundled(python-aliyun-sdk-ecs) = %{aliyunsdkecs_version}
+# python-aliyun-sdk-vpc bundle
+Provides:	bundled(python-aliyun-sdk-vpc) = %{aliyunsdkvpc_version}
+# aliyuncli bundle
+Provides:	bundled(aliyuncli) = %{aliyuncli_version}
+
+%description aliyun
+Alibaba Cloud (Aliyun) resource agents allows Alibaba Cloud
+(Aliyun) instances to be managed in a cluster environment.
+%endif
+
+%ifarch x86_64
+%package gcp
+License:	GPLv2+ and LGPLv2+ and BSD and ASL 2.0 and MIT and Python
+Summary:	Google Cloud Platform resource agents
+%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel}
+Group:		System Environment/Base
+%else
+Group:		Productivity/Clustering/HA
+%endif
+Requires:	%{name} = %{version}-%{release}
+Requires:	python3-google-api-client
+# google-cloud-sdk bundle
+Requires:	python3-cryptography >= 1.7.2
+Requires:	python3-dateutil >= 2.6.0
+Provides:	bundled(%{googlecloudsdk}) = %{googlecloudsdk_version}
+Provides:	bundled(python-antlr3) = 3.1.1
+Provides:	bundled(python-appdirs) = 1.4.0
+Provides:	bundled(python-argparse) = 1.2.1
+Provides:	bundled(python-chardet) = 2.3.0
+Provides:	bundled(python-dulwich) = 0.10.2
+Provides:	bundled(python-ipaddress) = 1.0.16
+Provides:	bundled(python-ipaddr) = 2.1.11
+Provides:	bundled(python-mako) = 1.0.7
+Provides:	bundled(python-oauth2client) = 3.0.0
+Provides:	bundled(python-prompt_toolkit) = 1.0.13
+Provides:	bundled(python-pyasn1) = 0.4.2
+Provides:	bundled(python-pyasn1_modules) = 0.2.1
+Provides:	bundled(python-pygments) = 2.2.0
+Provides:	bundled(python-pyparsing) = 2.1.10
+Provides:	bundled(python-requests) = 2.10.0
+Provides:	bundled(python-six) = 1.11.0
+Provides:	bundled(python-uritemplate) = 3.0.0
+Provides:	bundled(python-urllib3) = 1.15.1
+Provides:	bundled(python-websocket) = 0.47.0
+Provides:	bundled(python-yaml) = 3.12
+# python-pyroute2 bundle
+Provides:	bundled(%{pyroute2}) = %{pyroute2_version}
+
+%description gcp
+The Google Cloud Platform resource agents allows Google Cloud
+Platform instances to be managed in a cluster environment.
+%endif
+
+%prep
+%if 0%{?suse_version} == 0 && 0%{?fedora} == 0 && 0%{?centos_version} == 0 && 0%{?rhel} == 0
+%{error:Unable to determine the distribution/version. This is generally caused by missing /etc/rpm/macros.dist. Please install the correct build packages or define the required macros manually.}
+exit 1
+%endif
+%setup -q -n %{upstream_prefix}-%{upstream_version}
+%setup -T -D -a 1 -n %{upstream_prefix}-%{upstream_version}
+%setup -T -D -a 2 -n %{upstream_prefix}-%{upstream_version}
+%patch0 -p1
+%patch1 -p1
+%patch2 -p1
+%patch3 -p1
+%patch4 -p1
+%patch5 -p1
+%patch6 -p1
+%patch7 -p1
+%patch8 -p1
+%patch9 -p1
+%patch10 -p1
+%patch11 -p1
+%patch12 -p1
+%patch13 -p1
+%patch14 -p1
+%patch15 -p1
+%patch16 -p1
+%patch17 -p1
+%patch18 -p1
+%patch19 -p1
+%patch20 -p1
+%patch21 -p1
+%patch22 -p1
+%patch23 -p1
+%patch24 -p1
+%patch25 -p1
+%patch26 -p1
+%patch27 -p1
+%patch28 -p1
+%patch29 -p1
+%patch30 -p1
+%patch31 -p1
+%patch32 -p1
+%patch33 -p1
+%patch34 -p1
+%patch35 -p1
+%patch36 -p1
+%patch37 -p1
+%patch38 -p1
+%patch39 -p1
+%patch40 -p1 -F2
+%patch41 -p1
+%patch42 -p1
+%patch43 -p1
+%patch44 -p1
+%patch45 -p1
+%patch46 -p1
+%patch47 -p1
+%patch48 -p1
+%patch49 -p1
+%patch50 -p1
+%patch51 -p1
+
+chmod 755 heartbeat/nova-compute-wait
+chmod 755 heartbeat/NovaEvacuate
+
+# bundles
+mkdir -p %{bundled_lib_dir}/gcp
+mkdir -p %{bundled_lib_dir}/aliyun
+
+# google-cloud-sdk bundle
+%ifarch x86_64
+tar -xzf %SOURCE1 -C %{bundled_lib_dir}/gcp
+# gcp*: append bundled-directory to search path, gcloud-ra
+%patch1000 -p1
+# google-cloud-sdk fixes
+%patch1001 -p1
+# replace python-rsa with python-cryptography
+%patch1002 -p1
+# gcloud support info
+%patch1003 -p1
+# gcloud remove python 2 detection
+%patch1004 -p1
+# rename gcloud
+mv %{googlecloudsdk_dir}/bin/gcloud %{googlecloudsdk_dir}/bin/gcloud-ra
+# keep googleapiclient
+mv %{googlecloudsdk_dir}/platform/bq/third_party/googleapiclient %{googlecloudsdk_dir}/lib/third_party
+# only keep gcloud
+rm -rf %{googlecloudsdk_dir}/bin/{bootstrapping,bq,dev_appserver.py,docker-credential-gcloud,endpointscfg.py,git-credential-gcloud.sh,gsutil,java_dev_appserver.sh} %{googlecloudsdk_dir}/{completion.*,deb,install.*,path.*,platform,properties,RELEASE_NOTES,rpm,VERSION}
+# remove Python 2 code
+rm -rf %{googlecloudsdk_dir}/lib/third_party/*/python2
+# remove python-rsa
+rm -rf %{googlecloudsdk_dir}/lib/third_party/rsa
+# remove grpc
+rm -rf %{googlecloudsdk_dir}/lib/third_party/grpc
+# remove dateutil
+rm -rf %{googlecloudsdk_dir}/lib/third_party/dateutil
+# docs/licenses
+cp %{googlecloudsdk_dir}/README %{googlecloudsdk}_README
+cp %{googlecloudsdk_dir}/lib/third_party/argparse/README.txt %{googlecloudsdk}_argparse_README.txt
+cp %{googlecloudsdk_dir}/LICENSE %{googlecloudsdk}_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/httplib2/LICENSE %{googlecloudsdk}_httplib2_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/contextlib2/LICENSE %{googlecloudsdk}_contextlib2_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/concurrent/LICENSE %{googlecloudsdk}_concurrent_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/yaml/LICENSE %{googlecloudsdk}_yaml_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/pyu2f/LICENSE %{googlecloudsdk}_pyu2f_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/ml_sdk/LICENSE %{googlecloudsdk}_ml_sdk_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/ml_sdk/pkg/LICENSE %{googlecloudsdk}_pkg_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/ipaddr/LICENSE %{googlecloudsdk}_ipaddr_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/urllib3/LICENSE %{googlecloudsdk}_urllib3_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/ipaddress/LICENSE %{googlecloudsdk}_ipaddress_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/requests/LICENSE %{googlecloudsdk}_requests_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/docker/LICENSE %{googlecloudsdk}_docker_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/monotonic/LICENSE %{googlecloudsdk}_monotonic_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/websocket/LICENSE %{googlecloudsdk}_websocket_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/fasteners/LICENSE %{googlecloudsdk}_fasteners_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/wcwidth/LICENSE %{googlecloudsdk}_wcwidth_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/pygments/LICENSE %{googlecloudsdk}_pygments_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/oauth2client/LICENSE %{googlecloudsdk}_oauth2client_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/uritemplate/LICENSE %{googlecloudsdk}_uritemplate_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/dulwich/LICENSE %{googlecloudsdk}_dulwich_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/mako/LICENSE %{googlecloudsdk}_mako_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/packaging/LICENSE %{googlecloudsdk}_packaging_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/socks/LICENSE %{googlecloudsdk}_socks_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/antlr3/LICENSE %{googlecloudsdk}_antlr3_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/argparse/LICENSE.txt %{googlecloudsdk}_argparse_LICENSE.txt
+cp %{googlecloudsdk_dir}/lib/third_party/chardet/LICENSE %{googlecloudsdk}_chardet_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/ruamel/LICENSE %{googlecloudsdk}_ruamel_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/appdirs/LICENSE %{googlecloudsdk}_appdirs_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/argcomplete/LICENSE %{googlecloudsdk}_argcomplete_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/pyasn1_modules/LICENSE %{googlecloudsdk}_pyasn1_modules_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/setuptools/LICENSE %{googlecloudsdk}_setuptools_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/google/LICENSE %{googlecloudsdk}_google_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/google/protobuf/LICENSE %{googlecloudsdk}_protobuf_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/six/LICENSE %{googlecloudsdk}_six_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/dns/LICENSE %{googlecloudsdk}_dns_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/enum/LICENSE %{googlecloudsdk}_enum_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/gae_ext_runtime/LICENSE %{googlecloudsdk}_gae_ext_runtime_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/fancy_urllib/LICENSE %{googlecloudsdk}_fancy_urllib_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/pyasn1/LICENSE %{googlecloudsdk}_pyasn1_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/apitools/LICENSE %{googlecloudsdk}_apitools_LICENSE
+cp %{googlecloudsdk_dir}/lib/third_party/containerregistry/LICENSE %{googlecloudsdk}_containerregistry_LICENSE
+
+# python-pyroute2 bundle
+tar -xzf %SOURCE2 -C %{bundled_lib_dir}/gcp
+mv %{bundled_lib_dir}/gcp/%{pyroute2}-%{pyroute2_version} %{pyroute2_dir}
+cp %{pyroute2_dir}/README.md %{pyroute2}_README.md
+cp %{pyroute2_dir}/README.license.md %{pyroute2}_README.license.md
+cp %{pyroute2_dir}/LICENSE.Apache.v2 %{pyroute2}_LICENSE.Apache.v2
+cp %{pyroute2_dir}/LICENSE.GPL.v2 %{pyroute2}_LICENSE.GPL.v2
+
+# python-colorama bundle
+tar -xzf %SOURCE3 -C %{bundled_lib_dir}/aliyun
+mv %{bundled_lib_dir}/aliyun/%{colorama}-%{colorama_version} %{colorama_dir}
+cp %{colorama_dir}/LICENSE.txt %{colorama}_LICENSE.txt
+cp %{colorama_dir}/README.rst %{colorama}_README.rst
+
+pushd %{colorama_dir}
+# remove bundled egg-info
+rm -rf *.egg-info
+popd
+
+# python-pycryptodome bundle
+tar -xzf %SOURCE4 -C %{bundled_lib_dir}/aliyun
+mv %{bundled_lib_dir}/aliyun/%{pycryptodome}-%{pycryptodome_version} %{pycryptodome_dir}
+cp %{pycryptodome_dir}/README.rst %{pycryptodome}_README.rst
+cp %{pycryptodome_dir}/LICENSE.rst %{pycryptodome}_LICENSE.rst
+
+# python-aliyun-sdk-core bundle
+tar -xzf %SOURCE5 -C %{bundled_lib_dir}/aliyun
+mv %{bundled_lib_dir}/aliyun/%{aliyunsdkcore}-%{aliyunsdkcore_version} %{aliyunsdkcore_dir}
+cp %{aliyunsdkcore_dir}/README.rst %{aliyunsdkcore}_README.rst
+
+# python-aliyun-sdk-ecs bundle
+tar -xzf %SOURCE6 -C %{bundled_lib_dir}/aliyun
+mv %{bundled_lib_dir}/aliyun/%{aliyunsdkecs}-%{aliyunsdkecs_version} %{aliyunsdkecs_dir}
+cp %{aliyunsdkecs_dir}/README.rst %{aliyunsdkecs}_README.rst
+
+# python-aliyun-sdk-vpc bundle
+tar -xzf %SOURCE7 -C %{bundled_lib_dir}/aliyun
+mv %{bundled_lib_dir}/aliyun/%{aliyunsdkvpc}-%{aliyunsdkvpc_version} %{aliyunsdkvpc_dir}
+cp %{aliyunsdkvpc_dir}/README.rst %{aliyunsdkvpc}_README.rst
+
+# aliyuncli bundle
+tar -xzf %SOURCE8 -C %{bundled_lib_dir}/aliyun
+mv %{bundled_lib_dir}/aliyun/%{aliyuncli}-%{aliyuncli_version} %{aliyuncli_dir}
+cp %{aliyuncli_dir}/README.rst %{aliyuncli}_README.rst
+cp %{aliyuncli_dir}/LICENSE %{aliyuncli}_LICENSE
+# aliyun*: use bundled libraries
+%patch1005 -p1
+
+# aliyun Python 3 fixes
+%patch1006 -p1
+%patch1007 -p1
+%endif
+
+%build
+if [ ! -f configure ]; then
+	./autogen.sh
+fi
+
+%if 0%{?fedora} >= 11 || 0%{?centos_version} > 5 || 0%{?rhel} > 5
+CFLAGS="$(echo '%{optflags}')"
+%global conf_opt_fatal "--enable-fatal-warnings=no"
+%else
+CFLAGS="${CFLAGS} ${RPM_OPT_FLAGS}"
+%global conf_opt_fatal "--enable-fatal-warnings=yes"
+%endif
+
+%if %{with rgmanager}
+%global rasset rgmanager
+%endif
+%if %{with linuxha}
+%global rasset linux-ha
+%endif
+%if %{with rgmanager} && %{with linuxha}
+%global rasset all
+%endif
+
+export CFLAGS
+
+%configure BASH_SHELL="/bin/bash" \
+	PYTHON="%{__python3}" \
+	%{conf_opt_fatal} \
+%if %{defined _unitdir}
+    --with-systemdsystemunitdir=%{_unitdir} \
+%endif
+%if %{defined _tmpfilesdir}
+    --with-systemdtmpfilesdir=%{_tmpfilesdir} \
+    --with-rsctmpdir=/run/resource-agents \
+%endif
+	--with-pkg-name=%{name} \
+	--with-ras-set=%{rasset}
+
+%if %{defined jobs}
+JFLAGS="$(echo '-j%{jobs}')"
+%else
+JFLAGS="$(echo '%{_smp_mflags}')"
+%endif
+
+make $JFLAGS
+
+# python-pyroute2 bundle
+%ifarch x86_64
+pushd %{pyroute2_dir}
+%{__python3} setup.py build
+popd
+
+# python-colorama bundle
+pushd %{colorama_dir}
+%{__python3} setup.py build
+popd
+
+# python-pycryptodome bundle
+pushd %{pycryptodome_dir}
+%{__python3} setup.py build
+popd
+
+# python-aliyun-sdk-core bundle
+pushd %{aliyunsdkcore_dir}
+%{__python3} setup.py build
+popd
+
+# python-aliyun-sdk-ecs bundle
+pushd %{aliyunsdkecs_dir}
+%{__python3} setup.py build
+popd
+
+# python-aliyun-sdk-vpc bundle
+pushd %{aliyunsdkvpc_dir}
+%{__python3} setup.py build
+popd
+
+# aliyuncli bundle
+pushd %{aliyuncli_dir}
+%{__python3} setup.py build
+popd
+%endif
+
+%install
+rm -rf %{buildroot}
+make install DESTDIR=%{buildroot}
+
+# byte compile ocf.py
+%py_byte_compile %{__python3} %{buildroot}%{_usr}/lib/ocf/lib/heartbeat
+
+# google-cloud-sdk bundle
+%ifarch x86_64
+pushd %{googlecloudsdk_dir}
+mkdir -p %{buildroot}/usr/lib/%{name}/%{googlecloudsdk_dir}
+cp -a bin data lib %{buildroot}/usr/lib/%{name}/%{googlecloudsdk_dir}
+mkdir %{buildroot}/%{_bindir}
+ln -s /usr/lib/%{name}/%{googlecloudsdk_dir}/bin/gcloud-ra %{buildroot}/%{_bindir}
+popd
+
+# python-pyroute2 bundle
+pushd %{pyroute2_dir}
+%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/gcp
+popd
+
+# python-colorama bundle
+pushd %{colorama_dir}
+%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun
+popd
+
+# python-pycryptodome bundle
+pushd %{pycryptodome_dir}
+%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun
+popd
+
+# python-aliyun-sdk-core bundle
+pushd %{aliyunsdkcore_dir}
+%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun
+popd
+
+# python-aliyun-sdk-ecs bundle
+pushd %{aliyunsdkecs_dir}
+%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun
+popd
+
+# python-aliyun-sdk-vpc bundle
+pushd %{aliyunsdkvpc_dir}
+%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun
+popd
+
+# aliyuncli bundle
+pushd %{aliyuncli_dir}
+%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun
+sed -i -e "/^import sys/asys.path.insert(0, '/usr/lib/%{name}/%{bundled_lib_dir}/aliyun')\nsys.path.insert(1, '/usr/lib/%{name}/%{bundled_lib_dir}/aliyun/aliyuncli')" %{buildroot}/%{_bindir}/aliyuncli
+mv %{buildroot}/%{_bindir}/aliyuncli %{buildroot}/%{_bindir}/aliyuncli-ra
+# aliyun_completer / aliyun_zsh_complete.sh
+rm %{buildroot}/%{_bindir}/aliyun_*
+popd
+%endif
+
+## tree fixup
+# remove docs (there is only one and they should come from doc sections in files)
+rm -rf %{buildroot}/usr/share/doc/resource-agents
+
+##
+# Create symbolic link between IPAddr and IPAddr2
+##
+rm -f %{buildroot}/usr/lib/ocf/resource.d/heartbeat/IPaddr
+ln -s /usr/lib/ocf/resource.d/heartbeat/IPaddr2 %{buildroot}/usr/lib/ocf/resource.d/heartbeat/IPaddr
+
+%clean
+rm -rf %{buildroot}
+
+%files
+%defattr(-,root,root)
+%doc AUTHORS COPYING COPYING.GPLv3 COPYING.LGPL ChangeLog
+%if %{with linuxha}
+%doc heartbeat/README.galera
+%doc doc/README.webapps
+%doc %{_datadir}/%{name}/ra-api-1.dtd
+%doc %{_datadir}/%{name}/metadata.rng
+%endif
+
+%if %{with rgmanager}
+%{_datadir}/cluster
+%{_sbindir}/rhev-check.sh
+%endif
+
+%if %{with linuxha}
+%dir %{_usr}/lib/ocf
+%dir %{_usr}/lib/ocf/resource.d
+%dir %{_usr}/lib/ocf/lib
+
+%{_usr}/lib/ocf/lib/heartbeat
+
+%{_usr}/lib/ocf/resource.d/heartbeat
+%{_usr}/lib/ocf/resource.d/openstack
+%if %{with rgmanager}
+%{_usr}/lib/ocf/resource.d/redhat
+%endif
+
+%if %{defined _unitdir}
+%{_unitdir}/resource-agents-deps.target
+%endif
+%if %{defined _tmpfilesdir}
+%{_tmpfilesdir}/%{name}.conf
+%endif
+
+%dir %{_datadir}/%{name}
+%dir %{_datadir}/%{name}/ocft
+%{_datadir}/%{name}/ocft/configs
+%{_datadir}/%{name}/ocft/caselib
+%{_datadir}/%{name}/ocft/README
+%{_datadir}/%{name}/ocft/README.zh_CN
+%{_datadir}/%{name}/ocft/helpers.sh
+%exclude %{_datadir}/%{name}/ocft/runocft
+%exclude %{_datadir}/%{name}/ocft/runocft.prereq
+
+%{_sbindir}/ocft
+
+%{_includedir}/heartbeat
+
+%if %{defined _tmpfilesdir}
+%dir %attr (1755, root, root)	/run/resource-agents
+%else
+%dir %attr (1755, root, root)	%{_var}/run/resource-agents
+%endif
+
+%{_mandir}/man7/*.7*
+
+###
+# Supported, but in another sub package
+###
+%exclude /usr/lib/ocf/resource.d/heartbeat/aliyun-vpc-move-ip*
+%exclude %{_mandir}/man7/*aliyun-vpc-move-ip*
+%exclude /usr/lib/ocf/resource.d/heartbeat/gcp*
+%exclude %{_mandir}/man7/*gcp*
+%exclude /usr/lib/%{name}/%{bundled_lib_dir}
+
+###
+# Moved to separate packages
+###
+%exclude /usr/lib/ocf/resource.d/heartbeat/SAP*
+%exclude /usr/lib/ocf/lib/heartbeat/sap*
+%exclude %{_mandir}/man7/*SAP*
+
+###
+# Unsupported
+###
+%exclude /usr/lib/ocf/resource.d/heartbeat/clvm
+%exclude /usr/lib/ocf/resource.d/heartbeat/LVM
+%exclude /usr/lib/ocf/resource.d/heartbeat/AoEtarget
+%exclude /usr/lib/ocf/resource.d/heartbeat/AudibleAlarm
+%exclude /usr/lib/ocf/resource.d/heartbeat/ClusterMon
+%exclude /usr/lib/ocf/resource.d/heartbeat/EvmsSCC
+%exclude /usr/lib/ocf/resource.d/heartbeat/Evmsd
+%exclude /usr/lib/ocf/resource.d/heartbeat/ICP
+%exclude /usr/lib/ocf/resource.d/heartbeat/LinuxSCSI
+%exclude /usr/lib/ocf/resource.d/heartbeat/ManageRAID
+%exclude /usr/lib/ocf/resource.d/heartbeat/ManageVE
+%exclude /usr/lib/ocf/resource.d/heartbeat/Pure-FTPd
+%exclude /usr/lib/ocf/resource.d/heartbeat/Raid1
+%exclude /usr/lib/ocf/resource.d/heartbeat/ServeRAID
+%exclude /usr/lib/ocf/resource.d/heartbeat/SphinxSearchDaemon
+%exclude /usr/lib/ocf/resource.d/heartbeat/Stateful
+%exclude /usr/lib/ocf/resource.d/heartbeat/SysInfo
+%exclude /usr/lib/ocf/resource.d/heartbeat/VIPArip
+%exclude /usr/lib/ocf/resource.d/heartbeat/WAS
+%exclude /usr/lib/ocf/resource.d/heartbeat/WAS6
+%exclude /usr/lib/ocf/resource.d/heartbeat/WinPopup
+%exclude /usr/lib/ocf/resource.d/heartbeat/Xen
+%exclude /usr/lib/ocf/resource.d/heartbeat/anything
+%exclude /usr/lib/ocf/resource.d/heartbeat/asterisk
+%exclude /usr/lib/ocf/resource.d/heartbeat/aws-vpc-route53
+%exclude /usr/lib/ocf/resource.d/heartbeat/dnsupdate
+%exclude /usr/lib/ocf/resource.d/heartbeat/eDir88
+%exclude /usr/lib/ocf/resource.d/heartbeat/fio
+%exclude /usr/lib/ocf/resource.d/heartbeat/ids
+%exclude /usr/lib/ocf/resource.d/heartbeat/iface-bridge
+%exclude /usr/lib/ocf/resource.d/heartbeat/ipsec
+%exclude /usr/lib/ocf/resource.d/heartbeat/jira
+%exclude /usr/lib/ocf/resource.d/heartbeat/kamailio
+%exclude /usr/lib/ocf/resource.d/heartbeat/lxd-info
+%exclude /usr/lib/ocf/resource.d/heartbeat/machine-info
+%exclude /usr/lib/ocf/resource.d/heartbeat/mariadb
+%exclude /usr/lib/ocf/resource.d/heartbeat/minio
+%exclude /usr/lib/ocf/resource.d/heartbeat/mpathpersist
+%exclude /usr/lib/ocf/resource.d/heartbeat/iscsi
+%exclude /usr/lib/ocf/resource.d/heartbeat/jboss
+%exclude /usr/lib/ocf/resource.d/heartbeat/ldirectord
+%exclude /usr/lib/ocf/resource.d/heartbeat/lxc
+%exclude /usr/lib/ocf/resource.d/heartbeat/openstack-cinder-volume
+%exclude /usr/lib/ocf/resource.d/heartbeat/openstack-floating-ip
+%exclude /usr/lib/ocf/resource.d/heartbeat/openstack-info
+%exclude /usr/lib/ocf/resource.d/heartbeat/ovsmonitor
+%exclude /usr/lib/ocf/resource.d/heartbeat/pgagent
+%exclude /usr/lib/ocf/resource.d/heartbeat/pingd
+%exclude /usr/lib/ocf/resource.d/heartbeat/pound
+%exclude /usr/lib/ocf/resource.d/heartbeat/proftpd
+%exclude /usr/lib/ocf/resource.d/heartbeat/rkt
+%exclude /usr/lib/ocf/resource.d/heartbeat/scsi2reservation
+%exclude /usr/lib/ocf/resource.d/heartbeat/sfex
+%exclude /usr/lib/ocf/resource.d/heartbeat/sg_persist
+%exclude /usr/lib/ocf/resource.d/heartbeat/syslog-ng
+%exclude /usr/lib/ocf/resource.d/heartbeat/varnish
+%exclude /usr/lib/ocf/resource.d/heartbeat/vmware
+%exclude /usr/lib/ocf/resource.d/heartbeat/zabbixserver
+%exclude /usr/lib/ocf/resource.d/heartbeat/mysql-proxy
+%exclude /usr/lib/ocf/resource.d/heartbeat/rsyslog
+%exclude /usr/lib/ocf/resource.d/heartbeat/vsftpd
+%exclude /usr/lib/ocf/resource.d/heartbeat/ZFS
+%exclude %{_mandir}/man7/ocf_heartbeat_clvm
+%exclude %{_mandir}/man7/ocf_heartbeat_LVM
+%exclude %{_mandir}/man7/ocf_heartbeat_AoEtarget.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_AudibleAlarm.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ClusterMon.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_EvmsSCC.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_Evmsd.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ICP.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_IPaddr.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_LinuxSCSI.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ManageRAID.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ManageVE.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_Pure-FTPd.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_Raid1.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ServeRAID.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_SphinxSearchDaemon.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_Stateful.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_SysInfo.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_VIPArip.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_WAS.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_WAS6.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_WinPopup.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_Xen.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_anything.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_asterisk.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_aws-vpc-route53.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_dnsupdate.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_eDir88.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_fio.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ids.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_iface-bridge.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ipsec.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_iscsi.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_jboss.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_jira.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_kamailio.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_lxc.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_lxd-info.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_machine-info.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_mariadb.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_minio.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_mpathpersist.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_openstack-cinder-volume.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_openstack-floating-ip.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_openstack-info.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ovsmonitor.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_pgagent.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_pingd.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_pound.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_proftpd.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_rkt.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_scsi2reservation.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_sfex.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_sg_persist.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_syslog-ng.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_varnish.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_vmware.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_vsftpd.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_zabbixserver.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_mysql-proxy.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_rsyslog.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ZFS.7.gz
+
+###
+# Other excluded files.
+###
+# This tool has to be updated for the new pacemaker lrmd.
+%exclude %{_sbindir}/ocf-tester
+%exclude %{_mandir}/man8/ocf-tester.8*
+# ldirectord is not supported
+%exclude /etc/ha.d/resource.d/ldirectord
+%exclude /etc/init.d/ldirectord
+%exclude %{_unitdir}/ldirectord.service
+%exclude /etc/logrotate.d/ldirectord
+%exclude /usr/sbin/ldirectord
+%exclude %{_mandir}/man8/ldirectord.8.gz
+
+# For compatability with pre-existing agents
+%dir %{_sysconfdir}/ha.d
+%{_sysconfdir}/ha.d/shellfuncs
+
+%{_libexecdir}/heartbeat
+%endif
+
+%if %{with rgmanager}
+%post -n resource-agents
+ccs_update_schema > /dev/null 2>&1 ||:
+%endif
+
+%ifarch x86_64
+%files aliyun
+%doc %{aliyuncli}_README.rst %{colorama}_README.rst %{pycryptodome}_README.rst aliyun*_README*
+%license %{aliyuncli}_LICENSE %{colorama}_LICENSE.txt %{pycryptodome}_LICENSE.rst
+%defattr(-,root,root)
+/usr/lib/ocf/resource.d/heartbeat/aliyun-vpc-move-ip*
+%{_mandir}/man7/*aliyun-vpc-move-ip*
+# bundle
+%{_bindir}/aliyuncli-ra
+%dir /usr/lib/%{name}
+/usr/lib/%{name}/%{bundled_lib_dir}/aliyun
+%endif
+
+%ifarch x86_64
+%files gcp
+%doc %{googlecloudsdk}_*README*
+%license %{googlecloudsdk}_*LICENSE*
+%doc %{pyroute2}_README*
+%license %{pyroute2}_LICENSE*
+%defattr(-,root,root)
+/usr/lib/ocf/resource.d/heartbeat/gcp-vpc-move-vip*
+%{_mandir}/man7/*gcp-vpc-move-vip*
+/usr/lib/ocf/resource.d/heartbeat/gcp-vpc-move-route*
+%{_mandir}/man7/*gcp-vpc-move-route*
+# bundle
+%{_bindir}/gcloud-ra
+%dir /usr/lib/%{name}
+/usr/lib/%{name}/%{bundled_lib_dir}/gcp
+%endif
+
+%changelog
+* Wed Jun 19 2019 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-27
+- ocf_log: do not log debug messages when HA_debug unset
+- Filesystem: remove notify-action from metadata
+- dhcpd keep SELinux context in chroot
+
+  Resolves: rhbz#1707969
+  Resolves: rhbz#1717759
+  Resolves: rhbz#1719684
+
+* Tue Jun 11 2019 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-26
+- sap/sap-hana: split subpackages into separate packages
+
+  Resolves: rhbz#1705767
+
+* Wed May 29 2019 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-24
+- Squid: fix PID file issue
+
+  Resolves: rhbz#1689184
+
+* Tue May 28 2019 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-23
+- Route: make family parameter optional
+- redis: mute password warning
+- iSCSILogicalUnit: create iqn when it doesnt exist
+
+  Resolves: rhbz#1669140
+  Resolves: rhbz#1683548
+  Resolves: rhbz#1692413
+
+* Thu May 23 2019 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-22
+- aws-vpc-move-ip: add multi route-table support and fix issue
+  w/multiple NICs
+
+  Resolves: rhbz#1697559
+
+* Fri Apr  5 2019 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-21
+- gcp-vpc-move-route/gcp-vpc-move-vip: fix Python 3 encoding issue
+
+  Resolves: rhbz#1695656
+
+* Mon Apr  1 2019 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-20
+- LVM/clvm: remove manpages for excluded agents
+- aws-vpc-move-ip: use "--query" to avoid a possible race condition
+- gcloud-ra: fix Python 3 issue and remove Python 2 detection
+
+  Resolves: rhbz#1694392
+  Resolves: rhbz#1693662
+  Resolves: rhbz#1691456
+
+* Thu Mar 21 2019 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-19
+- Add CI gating tests
+- LVM-activate: support LVs from same VG
+- tomcat: use systemd when catalina.sh is unavailable
+- Fixed python-devel/perl build dependencies
+
+  Resolves: rhbz#1682136
+  Resolves: rhbz#1667414
+  Resolves: rhbz#1666691
+  Resolves: rhbz#1595854
+
+* Thu Mar  7 2019 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-18
+- aliyun-vpc-move-ip: exclude from main package
+- aliyuncli-ra: upgrade bundled python-aliyun-sdk-core and fix Python 3 issues
+- ocf.py: byte compile
+
+  Resolves: rhbz#1677204
+  Resolves: rhbz#1677981
+  Resolves: rhbz#1678874
+
+* Tue Feb  5 2019 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-17
+- LVM-activate: dont require locking_type
+
+  Resolves: rhbz#1658664
+
+* Fri Jan 11 2019 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-16
+- vdo-vol: fix monitor-action
+- LVM-activate: dont fail initial probe
+
+  Resolves: rhbz#1662466
+  Resolves: rhbz#1643307
+
+* Tue Oct 23 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-15
+- nfsserver: fix start-issues when nfs_shared_infodir parameter is
+  changed
+
+  Resolves: rhbz#1642027
+
+* Mon Oct  8 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-14
+- redis: use basename in pidof to avoid issues in containers
+
+  Resolves: rhbz#1635785
+
+* Wed Sep 26 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-11
+- Remove grpc from bundle
+
+  Resolves: rhbz#1630627
+
+* Fri Sep 21 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-10
+- systemd-tmpfiles: change path to /run/resource-agents
+
+  Resolves: rhbz#1631291
+
+* Fri Aug 24 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-9
+- podman: new resource agent
+
+  Resolves: rhbz#1607607
+
+* Wed Aug 22 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-8
+- LVM: fix missing dash in activate_options
+- LVM-activate: warn about incorrect vg_access_mode
+- lvmlockd: add cmirrord support
+
+* Wed Aug  1 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-7
+- findif: only match lines containing netmasks
+
+* Mon Jul 30 2018 Florian Weimer <fweimer@redhat.com> - 4.1.1-6
+- Rebuild with fixed binutils
+
+* Fri Jul 27 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-5
+- vdo-vol: new resource agent
+
+  Resolves: rhbz#1552330
+
+* Wed Jul  4 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-4
+- VirtualDomain: add stateless support
+- Exclude unsupported agents
+
+* Thu Jun 28 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-3
+- Added SAPHana and OpenStack agents
+
+* Fri May 25 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-2
+- Remove unsupported clvm and LVM agents
+
+* Tue Mar 13 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-1
+- Rebase to resource-agents 4.1.1 upstream release.
+
+* Mon Feb 19 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.0-2
+- Add gcc to BuildRequires
+
+* Fri Feb 09 2018 Igor Gnatenko <ignatenkobrain@fedoraproject.org> - 4.1.0-1.1
+- Escape macros in %%changelog
+
+* Wed Jan 10 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.0-1
+- Rebase to resource-agents 4.1.0 upstream release.
+
+* Thu Aug 03 2017 Fedora Release Engineering <releng@fedoraproject.org> - 4.0.1-1.3
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild
+
+* Thu Jul 27 2017 Fedora Release Engineering <releng@fedoraproject.org> - 4.0.1-1.2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild
+
+* Sat Feb 11 2017 Fedora Release Engineering <releng@fedoraproject.org> - 4.0.1-1.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild
+
+* Thu Feb  2 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.0.1-1
+- Rebase to resource-agents 4.0.1 upstream release.
+
+* Wed Feb  1 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.0.0-2
+- galera: remove "long SST monitoring" support due to corner-case issues
+
+* Tue Jan 31 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.0.0-1
+- Rebase to resource-agents 4.0.0 upstream release.
+
+* Thu Dec 15 2016 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.7-6
+- Add netstat dependency
+
+* Tue Feb  9 2016 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.7-4
+- Rebase to resource-agents 3.9.7 upstream release.
+
+* Thu Feb 04 2016 Fedora Release Engineering <releng@fedoraproject.org> - 3.9.6-2.2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild
+
+* Thu Jun 18 2015 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.9.6-2.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild
+
+* Mon Apr 20 2015 David Vossel <dvossel@redhat.com> - 3.9.6-2
+- Rebase to latest upstream code in order to pull in rabbitmq-cluster agent
+
+* Fri Feb 13 2015 David Vossel <dvossel@redhat.com> - 3.9.6-1
+- Rebase to resource-agents 3.9.6 upstream release.
+
+* Sun Aug 17 2014 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.9.5-12.2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild
+
+* Sun Jun 08 2014 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.9.5-12.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild
+
+* Wed Apr 30 2014 David Vossel <dvossel@redhat.com> - 3.9.5-12
+- Sync with latest upstream.
+
+* Thu Jan 2 2014 David Vossel <dvossel@redhat.com> - 3.9.5-11
+- Sync with latest upstream.
+
+* Sun Oct 20 2013 David Vossel <dvossel@redhat.com> - 3.9.5-10
+- Fix build system for rawhide.
+
+* Wed Oct 16 2013 David Vossel <dvossel@redhat.com> - 3.9.5-9
+- Remove rgmanager agents from build. 
+
+* Sun Aug 04 2013 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.9.5-8
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_20_Mass_Rebuild
+
+* Wed Jul 17 2013 Petr Pisar <ppisar@redhat.com> - 3.9.5-7
+- Perl 5.18 rebuild
+
+* Tue Jun 18 2013 David Vossel <dvossel@redhat.com> - 3.9.5-6
+- Restores rsctmp directory to upstream default.
+
+* Tue Jun 18 2013 David Vossel <dvossel@redhat.com> - 3.9.5-5
+- Merges redhat provider into heartbeat provider. Remove
+  rgmanager's redhat provider.
+
+  Resolves: rhbz#917681
+  Resolves: rhbz#928890
+  Resolves: rhbz#952716
+  Resolves: rhbz#960555
+
+* Tue Mar 12 2013 David Vossel <dvossel@redhat.com> - 3.9.5-3
+- Fixes build system error with conditional logic involving
+  IPv6addr and updates spec file to build against rhel 7 as
+  well as fedora 19.
+
+* Mon Mar 11 2013 David Vossel <dvossel@redhat.com> - 3.9.5-2
+- Resolves rhbz#915050
+
+* Mon Mar 11 2013 David Vossel <dvossel@redhat.com> - 3.9.5-1
+- New upstream release.
+
+* Fri Jan 25 2013 Kevin Fenzi <kevin@scrye.com> - 3.9.2-5
+- Fix cifs mount requires
+
+* Mon Nov 12 2012 Chris Feist <cfeist@redhat.com> - 3.9.2-4
+- Removed version number after dist
+
+* Mon Oct 29 2012 Chris Feist <cfeist@redhat.com> - 3.9.2-3.8
+- Remove cluster-glue-libs-devel
+- Disable IPv6addr & sfex to fix deps on libplumgpl & libplum (due to
+  disappearance of cluster-glue in F18)
+
+* Sat Jul 21 2012 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.9.2-3.5
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild
+
+* Thu Jul 05 2012 Chris Feist <cfeist@redhat.com> - 3.9.2-3.4
+- Fix location of lvm (change from /sbin to /usr/sbin)
+
+* Wed Apr 04 2012 Jon Ciesla <limburgher@gmail.com> - 3.9.2-3.3
+- Rebuilt to fix rawhide dependency issues (caused by move of fsck from
+  /sbin to /usr/sbin).
+
+* Fri Mar 30 2012 Jon Ciesla <limburgher@gmail.com> - 3.9.2-3.1
+- libnet rebuild.
+
+* Sat Jan 14 2012 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.9.2-2.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild
+
+* Fri Jul  8 2011 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.9.2-2
+- add post call to resource-agents to integrate with cluster 3.1.4
+
+* Thu Jun 30 2011 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.9.2-1
+- new upstream release
+- fix 2 regressions from 3.9.1
+
+* Mon Jun 20 2011 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.9.1-1
+- new upstream release
+- import spec file from upstream
+
+* Tue Mar  1 2011 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.1.1-1
+- new upstream release 3.1.1 and 1.0.4
+
+* Wed Feb 09 2011 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.1.0-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild
+
+* Thu Dec  2 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.1.0-1
+- new upstream release
+- spec file update:
+  Update upstream URL
+  Update source URL
+  use standard configure macro
+  use standard make invokation
+
+* Thu Oct  7 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.17-1
+- new upstream release
+  Resolves: rhbz#632595, rhbz#633856, rhbz#632385, rhbz#628013
+  Resolves: rhbz#621313, rhbz#595383, rhbz#580492, rhbz#605733
+  Resolves: rhbz#636243, rhbz#591003, rhbz#637913, rhbz#634718
+  Resolves: rhbz#617247, rhbz#617247, rhbz#617234, rhbz#631943
+  Resolves: rhbz#639018
+
+* Thu Oct  7 2010 Andrew Beekhof <andrew@beekhof.net> - 3.0.16-2
+- new upstream release of the Pacemaker agents: 71b1377f907c
+
+* Thu Sep  2 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.16-1
+- new upstream release
+  Resolves: rhbz#619096, rhbz#614046, rhbz#620679, rhbz#619680
+  Resolves: rhbz#621562, rhbz#621694, rhbz#608887, rhbz#622844
+  Resolves: rhbz#623810, rhbz#617306, rhbz#623816, rhbz#624691
+  Resolves: rhbz#622576
+
+* Thu Jul 29 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.14-1
+- new upstream release
+  Resolves: rhbz#553383, rhbz#557563, rhbz#578625, rhbz#591003
+  Resolves: rhbz#593721, rhbz#593726, rhbz#595455, rhbz#595547
+  Resolves: rhbz#596918, rhbz#601315, rhbz#604298, rhbz#606368
+  Resolves: rhbz#606470, rhbz#606480, rhbz#606754, rhbz#606989
+  Resolves: rhbz#607321, rhbz#608154, rhbz#608887, rhbz#609181
+  Resolves: rhbz#609866, rhbz#609978, rhbz#612097, rhbz#612110
+  Resolves: rhbz#612165, rhbz#612941, rhbz#614127, rhbz#614356
+  Resolves: rhbz#614421, rhbz#614457, rhbz#614961, rhbz#615202
+  Resolves: rhbz#615203, rhbz#615255, rhbz#617163, rhbz#617566
+  Resolves: rhbz#618534, rhbz#618703, rhbz#618806, rhbz#618814
+
+* Mon Jun  7 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.13-1
+- new upstream release
+  Resolves: rhbz#592103, rhbz#593108, rhbz#578617, rhbz#594626
+  Resolves: rhbz#594511, rhbz#596046, rhbz#594111, rhbz#597002
+  Resolves: rhbz#599643
+
+* Tue May 18 2010 Andrew Beekhof <andrew@beekhof.net> - 3.0.12-2
+- libnet is not available on RHEL
+- Do not package ldirectord on RHEL
+  Resolves: rhbz#577264
+
+* Mon May 10 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.12-1
+- new upstream release
+  Resolves: rhbz#585217, rhbz#586100, rhbz#581533, rhbz#582753
+  Resolves: rhbz#582754, rhbz#585083, rhbz#587079, rhbz#588890
+  Resolves: rhbz#588925, rhbz#583789, rhbz#589131, rhbz#588010
+  Resolves: rhbz#576871, rhbz#576871, rhbz#590000, rhbz#589823
+
+* Mon May 10 2010 Andrew Beekhof <andrew@beekhof.net> - 3.0.12-1
+- New pacemaker agents upstream release: a7c0f35916bf
+  + High: pgsql: properly implement pghost parameter
+  + High: RA: mysql: fix syntax error
+  + High: SAPInstance RA: do not rely on op target rc when monitoring clones (lf#2371)
+  + High: set the HA_RSCTMP directory to /var/run/resource-agents (lf#2378)
+  + Medium: IPaddr/IPaddr2: add a description of the assumption in meta-data
+  + Medium: IPaddr: return the correct code if interface delete failed
+  + Medium: nfsserver: rpc.statd as the notify cmd does not work with -v (thanks to Carl Lewis)
+  + Medium: oracle: reduce output from sqlplus to the last line for queries (bnc#567815)
+  + Medium: pgsql: implement "config" parameter
+  + Medium: RA: iSCSITarget: follow changed IET access policy
+
+* Wed Apr 21 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.11-1
+- new upstream release
+  Resolves: rhbz#583945, rhbz#581047, rhbz#576330, rhbz#583017
+  Resolves: rhbz#583019, rhbz#583948, rhbz#584003, rhbz#582017
+  Resolves: rhbz#555901, rhbz#582754, rhbz#582573, rhbz#581533
+- Switch to file based Requires.
+  Also address several other problems related to missing runtime
+  components in different agents.
+  With the current Requires: set, we guarantee all basic functionalities
+  out of the box for lvm/fs/clusterfs/netfs/networking.
+  Resolves: rhbz#570008
+
+* Sat Apr 17 2010 Andrew Beekhof <andrew@beekhof.net> - 3.0.10-2
+- New pacemaker agents upstream release
+  + High: RA: vmware: fix set_environment() invocation (LF 2342)
+  + High: RA: vmware: update to version 0.2
+  + Medium: Filesystem: prefer /proc/mounts to /etc/mtab for non-bind mounts (lf#2388)
+  + Medium: IPaddr2: don't bring the interface down on stop (thanks to Lars Ellenberg)
+  + Medium: IPsrcaddr: modify the interface route (lf#2367)
+  + Medium: ldirectord: Allow multiple email addresses (LF 2168)
+  + Medium: ldirectord: fix setting defaults for configfile and ldirectord (lf#2328)
+  + Medium: meta-data: improve timeouts in most resource agents
+  + Medium: nfsserver: use default values (lf#2321)
+  + Medium: ocf-shellfuncs: don't log but print to stderr if connected to a terminal
+  + Medium: ocf-shellfuncs: don't output to stderr if using syslog
+  + Medium: oracle/oralsnr: improve exit codes if the environment isn't valid
+  + Medium: RA: iSCSILogicalUnit: fix monitor for STGT
+  + Medium: RA: make sure that OCF_RESKEY_CRM_meta_interval is always defined (LF 2284)
+  + Medium: RA: ManageRAID: require bash
+  + Medium: RA: ManageRAID: require bash
+  + Medium: RA: VirtualDomain: bail out early if config file can't be read during probe (Novell 593988)
+  + Medium: RA: VirtualDomain: fix incorrect use of __OCF_ACTION
+  + Medium: RA: VirtualDomain: improve error messages
+  + Medium: RA: VirtualDomain: spin on define until we definitely have a domain name
+  + Medium: Route: add route table parameter (lf#2335)
+  + Medium: sfex: don't use pid file (lf#2363,bnc#585416)
+  + Medium: sfex: exit with success on stop if sfex has never been started (bnc#585416)
+
+* Fri Apr  9 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.10-1
+- New rgmanager resource agents upstream release
+  Resolves: rhbz#519491, rhbz#570525, rhbz#571806, rhbz#574027
+  Resolves: rhbz#574215, rhbz#574886, rhbz#576322, rhbz#576335
+  Resolves: rhbz#575103, rhbz#577856, rhbz#577874, rhbz#578249
+  Resolves: rhbz#578625, rhbz#578626, rhbz#578628, rhbz#578626
+  Resolves: rhbz#579621, rhbz#579623, rhbz#579625, rhbz#579626
+  Resolves: rhbz#579059
+
+* Wed Mar 24 2010 Andrew Beekhof <andrew@beekhof.net> - 3.0.9-2
+- Resolves: rhbz#572993 - Patched build process to correctly generate ldirectord man page
+- Resolves: rhbz#574732 - Add libnet-devel as a dependancy to ensure IPaddrv6 is built
+
+* Mon Mar  1 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.9-1
+- New rgmanager resource agents upstream release
+  Resolves: rhbz#455300, rhbz#568446, rhbz#561862, rhbz#536902
+  Resolves: rhbz#512171, rhbz#519491
+
+* Mon Feb 22 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.8-1
+- New rgmanager resource agents upstream release
+  Resolves: rhbz#548133, rhbz#565907, rhbz#545602, rhbz#555901
+  Resolves: rhbz#564471, rhbz#515717, rhbz#557128, rhbz#536157
+  Resolves: rhbz#455300, rhbz#561416, rhbz#562237, rhbz#537201
+  Resolves: rhbz#536962, rhbz#553383, rhbz#556961, rhbz#555363
+  Resolves: rhbz#557128, rhbz#455300, rhbz#557167, rhbz#459630
+  Resolves: rhbz#532808, rhbz#556603, rhbz#554968, rhbz#555047
+  Resolves: rhbz#554968, rhbz#555047
+- spec file update:
+  * update spec file copyright date
+  * use bz2 tarball
+
+* Fri Jan 15 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.7-2
+- Add python as BuildRequires
+
+* Mon Jan 11 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.7-1
+- New rgmanager resource agents upstream release
+  Resolves: rhbz#526286, rhbz#533461
+
+* Mon Jan 11 2010 Andrew Beekhof <andrew@beekhof.net> - 3.0.6-2
+- Update Pacameker agents to upstream version: c76b4a6eb576
+  + High: RA: VirtualDomain: fix forceful stop (LF 2283)
+  + High: apache: monitor operation of depth 10 for web applications (LF 2234)
+  + Medium: IPaddr2: CLUSTERIP/iptables rule not always inserted on failed monitor (LF 2281)
+  + Medium: RA: Route: improve validate (LF 2232)
+  + Medium: mark obsolete RAs as deprecated (LF 2244)
+  + Medium: mysql: escalate stop to KILL if regular shutdown doesn't work
+
+* Mon Dec 7 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.6-1
+- New rgmanager resource agents upstream release
+- spec file update:
+  * use global instead of define
+  * use new Source0 url
+  * use %%name macro more aggressively
+
+* Mon Dec 7 2009 Andrew Beekhof <andrew@beekhof.net> - 3.0.5-2
+- Update Pacameker agents to upstream version: bc00c0b065d9
+  + High: RA: introduce OCF_FUNCTIONS_DIR, allow it to be overridden (LF2239)
+  + High: doc: add man pages for all RAs (LF2237)
+  + High: syslog-ng: new RA
+  + High: vmware: make meta-data work and several cleanups (LF 2212)
+  + Medium: .ocf-shellfuncs: add ocf_is_probe function
+  + Medium: Dev: make RAs executable (LF2239)
+  + Medium: IPv6addr: ifdef out the ip offset hack for libnet v1.1.4 (LF 2034)
+  + Medium: add mercurial repository version information to .ocf-shellfuncs
+  + Medium: build: add perl-MailTools runtime dependency to ldirectord package (LF 1469)
+  + Medium: iSCSITarget, iSCSILogicalUnit: support LIO
+  + Medium: nfsserver: use check_binary properly in validate (LF 2211)
+  + Medium: nfsserver: validate should not check if nfs_shared_infodir exists (thanks to eelco@procolix.com) (LF 2219)
+  + Medium: oracle/oralsnr: export variables properly
+  + Medium: pgsql: remove the previous backup_label if it exists
+  + Medium: postfix: fix double stop (thanks to Dinh N. Quoc)
+  + RA: LVM: Make monitor operation quiet in logs (bnc#546353)
+  + RA: Xen: Remove instance_attribute "allow_migrate" (bnc#539968)
+  + ldirectord: OCF agent: overhaul
+
+* Fri Nov 20 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.5-1
+- New rgmanager resource agents upstream release
+- Allow pacemaker to use rgmanager resource agents
+
+* Wed Oct 28 2009 Andrew Beekhof <andrew@beekhof.net> - 3.0.4-2
+- Update Pacameker agents to upstream version: e2338892f59f
+  + High: send_arp - turn on unsolicited mode for compatibilty with the libnet version's exit codes
+  + High: Trap sigterm for compatibility with the libnet version of send_arp
+  + Medium: Bug - lf#2147: IPaddr2: behave if the interface is down
+  + Medium: IPv6addr: recognize network masks properly
+  + Medium: RA: VirtualDomain: avoid needlessly invoking "virsh define"
+
+* Wed Oct 21 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.4-1
+- New rgmanager resource agents upstream release
+
+* Mon Oct 12 2009 Andrew Beekhof <andrew@beekhof.net> - 3.0.3-3
+- Update Pacameker agents to upstream version: 099c0e5d80db
+  + Add the ha_parameter function back into .ocf-shellfuncs.
+  + Bug bnc#534803 - Provide a default for MAILCMD
+  + Fix use of undefined macro @HA_NOARCHDATAHBDIR@
+  + High (LF 2138): IPsrcaddr: replace 0/0 with proper ip prefix (thanks to Michael Ricordeau and Michael Schwartzkopff)
+  + Import shellfuncs from heartbeat as badly written RAs use it
+  + Medium (LF 2173): nfsserver: exit properly in nfsserver_validate
+  + Medium: RA: Filesystem: implement monitor operation
+  + Medium: RA: VirtualDomain: loop on status if libvirtd is unreachable
+  + Medium: RA: VirtualDomain: loop on status if libvirtd is unreachable (addendum)
+  + Medium: RA: iSCSILogicalUnit: use a 16-byte default SCSI ID
+  + Medium: RA: iSCSITarget: be more persistent deleting targets on stop
+  + Medium: RA: portblock: add per-IP filtering capability
+  + Medium: mysql-proxy: log_level and keepalive parameters
+  + Medium: oracle: drop spurious output from sqlplus
+  + RA: Filesystem: allow configuring smbfs mounts as clones
+
+* Wed Sep 23 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.3-1
+- New rgmanager resource agents upstream release
+
+* Thu Aug 20 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.1-1
+- New rgmanager resource agents upstream release
+
+* Tue Aug 18 2009 Andrew Beekhof <andrew@beekhof.net> - 3.0.0-16
+- Create an ldirectord package
+- Update Pacameker agents to upstream version: 2198dc90bec4
+  + Build: Import ldirectord.
+  + Ensure HA_VARRUNDIR has a value to substitute
+  + High: Add findif tool (mandatory for IPaddr/IPaddr2)
+  + High: IPv6addr: new nic and cidr_netmask parameters
+  + High: postfix: new resource agent
+  + Include license information
+  + Low (LF 2159): Squid: make the regexp match more precisely output of netstat
+  + Low: configure: Fix package name.
+  + Low: ldirectord: add dependency on $remote_fs.
+  + Low: ldirectord: add mandatory required header to init script.
+  + Medium (LF 2165): IPaddr2: remove all colons from the mac address before passing it to send_arp
+  + Medium: VirtualDomain: destroy domain shortly before timeout expiry
+  + Medium: shellfuncs: Make the mktemp wrappers work.
+  + Remove references to Echo function
+  + Remove references to heartbeat shellfuncs.
+  + Remove useless path lookups
+  + findif: actually include the right header. Simplify configure.
+  + ldirectord: Remove superfluous configure artifact.
+  + ocf-tester: Fix package reference and path to DTD.
+
+* Tue Aug 11 2009 Ville Skyttä <ville.skytta@iki.fi> - 3.0.0-15
+- Use bzipped upstream hg tarball.
+
+* Wed Jul 29 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-14
+- Merge Pacemaker cluster resource agents:
+  * Add Source1.
+  * Drop noarch. We have real binaries now.
+  * Update BuildRequires.
+  * Update all relevant prep/build/install/files/description sections.
+
+* Sun Jul 26 2009 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.0.0-13
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild
+
+* Wed Jul  8 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-12
+- spec file updates:
+  * Update copyright header
+  * final release.. undefine alphatag
+
+* Thu Jul  2 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-11.rc4
+- New upstream release.
+
+* Sat Jun 20 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-10.rc3
+- New upstream release.
+
+* Wed Jun 10 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-9.rc2
+- New upstream release + git94df30ca63e49afb1e8aeede65df8a3e5bcd0970
+
+* Tue Mar 24 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-8.rc1
+- New upstream release.
+- Update BuildRoot usage to preferred versions/names
+
+* Mon Mar  9 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-7.beta1
+- New upstream release.
+
+* Fri Mar  6 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-6.alpha7
+- New upstream release.
+
+* Tue Mar  3 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-5.alpha6
+- New upstream release.
+
+* Tue Feb 24 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-4.alpha5
+- Drop Conflicts with rgmanager.
+
+* Mon Feb 23 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-3.alpha5
+- New upstream release.
+
+* Thu Feb 19 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-2.alpha4
+- Add comments on how to build this package.
+
+* Thu Feb  5 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-1.alpha4
+- New upstream release.
+- Fix datadir/cluster directory ownership.
+
+* Tue Jan 27 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-1.alpha3
+  - Initial packaging